core_feature_base.h 106 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523
  1. /*
  2. * Copyright (c) 2019 Nuclei Limited. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the License); you may
  7. * not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  14. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #ifndef __CORE_FEATURE_BASE__
  19. #define __CORE_FEATURE_BASE__
  20. /*!
  21. * @file core_feature_base.h
  22. * @brief Base core feature API for Nuclei N/NX Core
  23. */
  24. /*
  25. * Core Base Feature Configuration Macro:
  26. * 1. __HARTID_OFFSET: Optional, define this macro when your cpu system first hart hartid and hart index is different.
  27. * eg. If your cpu system, first hart hartid is 2, hart index is 0, then set this macro to 2
  28. *
  29. */
  30. #include <stdint.h>
  31. #ifdef __cplusplus
  32. extern "C" {
  33. #endif
  34. #include "nmsis_compiler.h"
  35. /**
  36. * \defgroup NMSIS_Core_Registers Register Define and Type Definitions
  37. * \brief Type definitions and defines for core registers.
  38. *
  39. * @{
  40. */
  41. #ifndef __RISCV_XLEN
  42. /** \brief Refer to the width of an integer register in bits(either 32 or 64) */
  43. #ifndef __riscv_xlen
  44. #define __RISCV_XLEN 32
  45. #else
  46. #define __RISCV_XLEN __riscv_xlen
  47. #endif
  48. #endif /* __RISCV_XLEN */
  49. /** \brief Type of Control and Status Register(CSR), depends on the XLEN defined in RISC-V */
  50. typedef unsigned long rv_csr_t;
  51. /** @} */ /* End of Doxygen Group NMSIS_Core_Registers */
  52. /**
  53. * \defgroup NMSIS_Core_Base_Registers Base Register Define and Type Definitions
  54. * \ingroup NMSIS_Core_Registers
  55. * \brief Type definitions and defines for base core registers.
  56. *
  57. * @{
  58. */
  59. /**
  60. * \brief Union type to access MISA CSR register.
  61. */
  62. typedef union {
  63. struct {
  64. rv_csr_t a:1; /*!< bit: 0 Atomic extension */
  65. rv_csr_t b:1; /*!< bit: 1 B extension */
  66. rv_csr_t c:1; /*!< bit: 2 Compressed extension */
  67. rv_csr_t d:1; /*!< bit: 3 Double-precision floating-point extension */
  68. rv_csr_t e:1; /*!< bit: 4 RV32E/64E base ISA */
  69. rv_csr_t f:1; /*!< bit: 5 Single-precision floating-point extension */
  70. rv_csr_t g:1; /*!< bit: 6 Reserved */
  71. rv_csr_t h:1; /*!< bit: 7 Hypervisor extension */
  72. rv_csr_t i:1; /*!< bit: 8 RV32I/64I/128I base ISA */
  73. rv_csr_t j:1; /*!< bit: 9 Reserved */
  74. rv_csr_t k:1; /*!< bit: 10 Reserved */
  75. rv_csr_t l:1; /*!< bit: 11 Reserved */
  76. rv_csr_t m:1; /*!< bit: 12 Integer Multiply/Divide extension */
  77. rv_csr_t n:1; /*!< bit: 13 Tentatively reserved for User-Level Interrupts extension */
  78. rv_csr_t o:1; /*!< bit: 14 Reserved */
  79. rv_csr_t p:1; /*!< bit: 15 Tentatively reserved for Packed-SIMD extension */
  80. rv_csr_t q:1; /*!< bit: 16 Quad-precision floating-point extension */
  81. rv_csr_t r:1; /*!< bit: 17 Reserved */
  82. rv_csr_t s:1; /*!< bit: 18 Supervisor mode implemented */
  83. rv_csr_t t:1; /*!< bit: 19 Reserved */
  84. rv_csr_t u:1; /*!< bit: 20 User mode implemented */
  85. rv_csr_t v:1; /*!< bit: 21 Vector extension */
  86. rv_csr_t w:1; /*!< bit: 22 Reserved */
  87. rv_csr_t x:1; /*!< bit: 23 Non-standard extensions present */
  88. rv_csr_t y:1; /*!< bit: 24 Reserved */
  89. rv_csr_t z:1; /*!< bit: 25 Reserved */
  90. rv_csr_t _reserved0:__RISCV_XLEN-28; /*!< bit: 26..XLEN-3 Reserved */
  91. rv_csr_t mxl:2; /*!< bit: XLEN-2..XLEN-1 Machine XLEN */
  92. } b; /*!< Structure used for bit access */
  93. rv_csr_t d; /*!< Type used for csr data access */
  94. } CSR_MISA_Type;
  95. /**
  96. * \brief Union type to access MSTATUS CSR register.
  97. */
  98. typedef union {
  99. struct {
  100. rv_csr_t _reserved0:1; /*!< bit: 0 Reserved */
  101. rv_csr_t sie:1; /*!< bit: 1 supervisor interrupt enable flag */
  102. rv_csr_t _reserved1:1; /*!< bit: 2 Reserved */
  103. rv_csr_t mie:1; /*!< bit: 3 machine mode interrupt enable flag */
  104. rv_csr_t _reserved2:1; /*!< bit: 4 Reserved */
  105. rv_csr_t spie:1; /*!< bit: 5 supervisor mode interrupt enable flag */
  106. rv_csr_t ube:1; /*!< bit: 6 U-mode non-instruction-fetch memory accesse big-endian enable flag */
  107. rv_csr_t mpie:1; /*!< bit: 7 machine mode previous interrupt enable flag */
  108. rv_csr_t spp:1; /*!< bit: 8 supervisor previous privilede mode */
  109. rv_csr_t vs:2; /*!< bit: 9..10 vector status flag */
  110. rv_csr_t mpp:2; /*!< bit: 11..12 machine previous privilede mode */
  111. rv_csr_t fs:2; /*!< bit: 13..14 FS status flag */
  112. rv_csr_t xs:2; /*!< bit: 15..16 XS status flag */
  113. rv_csr_t mprv:1; /*!< bit: 17 Modify PRiVilege */
  114. rv_csr_t sum:1; /*!< bit: 18 Supervisor Mode load and store protection */
  115. rv_csr_t mxr:1; /*!< bit: 19 Make eXecutable Readable */
  116. rv_csr_t tvm:1; /*!< bit: 20 Trap Virtual Memory */
  117. rv_csr_t tw:1; /*!< bit: 21 Timeout Wait */
  118. rv_csr_t tsr:1; /*!< bit: 22 Trap SRET */
  119. rv_csr_t spelp:1; /*!< bit: 23 Supervisor mode Previous Expected Landing Pad (ELP) State */
  120. rv_csr_t sdt:1; /*!< bit: 24 S-mode-disable-trap */
  121. #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
  122. rv_csr_t _reserved3:7; /*!< bit: 25..31 Reserved */
  123. rv_csr_t uxl:2; /*!< bit: 32..33 U-mode XLEN */
  124. rv_csr_t sxl:2; /*!< bit: 34..35 S-mode XLEN */
  125. rv_csr_t sbe:1; /*!< bit: 36 S-mode non-instruction-fetch memory accesse big-endian enable flag */
  126. rv_csr_t mbe:1; /*!< bit: 37 M-mode non-instruction-fetch memory accesse big-endian enable flag */
  127. rv_csr_t gva:1; /*!< bit: 38 Guest Virtual Address */
  128. rv_csr_t mpv:1; /*!< bit: 39 Machine Previous Virtualization Mode */
  129. rv_csr_t _reserved4:1; /*!< bit: 40 Reserved */
  130. rv_csr_t mpelp:1; /*!< bit: 41 Machine mode Previous Expected Landing Pad (ELP) State */
  131. rv_csr_t mdt:1; /*!< bit: 42 M-mode-disable-trap */
  132. rv_csr_t _reserved5:20; /*!< bit: 43..62 Reserved */
  133. rv_csr_t sd:1; /*!< bit: 63 Dirty status for XS or FS */
  134. #else
  135. rv_csr_t _reserved3:6; /*!< bit: 25..30 Reserved */
  136. rv_csr_t sd:1; /*!< bit: 31 Dirty status for XS or FS */
  137. #endif
  138. } b; /*!< Structure used for bit access */
  139. rv_csr_t d; /*!< Type used for csr data access */
  140. } CSR_MSTATUS_Type;
  141. #if defined(__RISCV_XLEN) && __RISCV_XLEN == 32
  142. /**
  143. * \brief Union type to access MSTATUSH CSR register.
  144. */
  145. typedef union {
  146. struct {
  147. rv_csr_t _reserved0:4; /*!< bit: 0..3 Reserved */
  148. rv_csr_t sbe:1; /*!< bit: 4 S-mode non-instruction-fetch memory accesse big-endian enable flag */
  149. rv_csr_t mbe:1; /*!< bit: 5 M-mode non-instruction-fetch memory accesse big-endian enable flag */
  150. rv_csr_t gva:1; /*!< bit: 6 Guest Virtual Address */
  151. rv_csr_t mpv:1; /*!< bit: 7 Machine Previous Virtualization Mode */
  152. rv_csr_t _reserved1:1; /*!< bit: 8 Reserved */
  153. rv_csr_t mpelp:1; /*!< bit: 9 Machine mode Previous Expected Landing Pad (ELP) State */
  154. rv_csr_t mdt:1; /*!< bit: 10 M-mode-disable-trap */
  155. rv_csr_t _reserved5:21; /*!< bit: 11..31 Reserved */
  156. } b; /*!< Structure used for bit access */
  157. rv_csr_t d; /*!< Type used for csr data access */
  158. } CSR_MSTATUSH_Type;
  159. #endif
  160. /**
  161. * \brief Union type to access MTVEC CSR register.
  162. */
  163. typedef union {
  164. struct {
  165. rv_csr_t mode:6; /*!< bit: 0..5 interrupt mode control */
  166. rv_csr_t addr:__RISCV_XLEN-6; /*!< bit: 6..XLEN-1 mtvec address */
  167. } b; /*!< Structure used for bit access */
  168. rv_csr_t d; /*!< Type used for csr data access */
  169. } CSR_MTVEC_Type;
  170. /**
  171. * \brief Union type to access MCAUSE CSR register.
  172. */
  173. typedef union {
  174. struct {
  175. rv_csr_t exccode:12; /*!< bit: 0..11 exception or interrupt code */
  176. rv_csr_t _reserved0:4; /*!< bit: 12..15 Reserved */
  177. rv_csr_t mpil:8; /*!< bit: 16..23 Previous interrupt level */
  178. rv_csr_t _reserved1:3; /*!< bit: 24..26 Reserved */
  179. rv_csr_t mpie:1; /*!< bit: 27 Interrupt enable flag before enter interrupt */
  180. rv_csr_t mpp:2; /*!< bit: 28..29 Privilede mode flag before enter interrupt */
  181. rv_csr_t minhv:1; /*!< bit: 30 Machine interrupt vector table */
  182. #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
  183. rv_csr_t _reserved2:__RISCV_XLEN-32; /*!< bit: 31..XLEN-2 Reserved */
  184. #endif
  185. rv_csr_t interrupt:1; /*!< bit: XLEN-1 trap type. 0 means exception and 1 means interrupt */
  186. } b; /*!< Structure used for bit access */
  187. rv_csr_t d; /*!< Type used for csr data access */
  188. } CSR_MCAUSE_Type;
  189. /**
  190. * \brief Union type to access MCOUNTINHIBIT CSR register.
  191. */
  192. typedef union {
  193. struct {
  194. rv_csr_t cy:1; /*!< bit: 0 1 means disable mcycle counter */
  195. rv_csr_t _reserved0:1; /*!< bit: 1 Reserved */
  196. rv_csr_t ir:1; /*!< bit: 2 1 means disable minstret counter */
  197. rv_csr_t _reserved1:__RISCV_XLEN-3; /*!< bit: 3..XLEN-1 Reserved */
  198. } b; /*!< Structure used for bit access */
  199. rv_csr_t d; /*!< Type used for csr data access */
  200. } CSR_MCOUNTINHIBIT_Type;
  201. /**
  202. * \brief Union type to access MSUBM CSR register.
  203. */
  204. typedef union {
  205. struct {
  206. rv_csr_t _reserved0:6; /*!< bit: 0..5 Reserved */
  207. rv_csr_t typ:2; /*!< bit: 6..7 current trap type */
  208. rv_csr_t ptyp:2; /*!< bit: 8..9 previous trap type */
  209. rv_csr_t _reserved1:__RISCV_XLEN-10; /*!< bit: 10..XLEN-1 Reserved */
  210. } b; /*!< Structure used for bit access */
  211. rv_csr_t d; /*!< Type used for csr data access */
  212. } CSR_MSUBM_Type;
  213. /**
  214. * \brief Union type to access MDCAUSE CSR register.
  215. */
  216. typedef union {
  217. struct {
  218. rv_csr_t mdcause:3; /*!< bit: 0..2 More detailed exception information as MCAUSE supplement */
  219. rv_csr_t _reserved0:__RISCV_XLEN-3; /*!< bit: 3..XLEN-1 Reserved */
  220. } b; /*!< Structure used for bit access */
  221. rv_csr_t d; /*!< Type used for csr data access */
  222. } CSR_MDCAUSE_Type;
  223. /**
  224. * \brief Union type to access MMISC_CTRL CSR register.
  225. */
  226. typedef union {
  227. struct {
  228. rv_csr_t _reserved0:1; /*!< bit: 0 Reserved */
  229. rv_csr_t zclsd_en:1; /*!< bit: 1 Control the Zclsd will uses the Zcf extension encoding or not */
  230. rv_csr_t _reserved1:1; /*!< bit: 2 Reserved */
  231. rv_csr_t bpu:1; /*!< bit: 3 dynamic prediction enable flag */
  232. rv_csr_t _reserved2:2; /*!< bit: 4..5 Reserved */
  233. rv_csr_t misalign:1; /*!< bit: 6 misaligned access support flag */
  234. rv_csr_t zcmt_zcmp:1; /*!< bit: 7 Zc Ext uses the cfdsp of D Ext’s encoding or not */
  235. rv_csr_t core_buserr:1; /*!< bit: 8 core bus error exception or interrupt */
  236. rv_csr_t nmi_cause:1; /*!< bit: 9 mnvec control and nmi mcase exccode */
  237. rv_csr_t imreturn_en:1; /*!< bit: 10 IMRETURN mode of trace */
  238. rv_csr_t sijump_en:1; /*!< bit: 11 SIJUMP mode of trace */
  239. rv_csr_t ldspec_en:1; /*!< bit: 12 enable load speculative goes to mem interface */
  240. rv_csr_t _reserved3:1; /*!< bit: 13 Reserved */
  241. rv_csr_t dbg_sec:1; /*!< bit: 14 debug access mode, removed in latest releases */
  242. rv_csr_t _reserved4:2; /*!< bit: 15..16 Reserved */
  243. rv_csr_t csr_excl_enable:1; /*!< bit: 17 Exclusive instruction(lr,sc) on Non-cacheable/Device memory can send exclusive flag in memory bus */
  244. rv_csr_t _reserved5:__RISCV_XLEN-18; /*!< bit: 18..XLEN-1 Reserved */
  245. } b; /*!< Structure used for bit access */
  246. rv_csr_t d; /*!< Type used for csr data access */
  247. } CSR_MMISCCTRL_Type;
  248. typedef CSR_MMISCCTRL_Type CSR_MMISCCTL_Type;
  249. typedef CSR_MMISCCTRL_Type CSR_MMISC_CTL_Type;
  250. /**
  251. * \brief Union type to access MCACHE_CTL CSR register.
  252. */
  253. typedef union {
  254. struct {
  255. rv_csr_t ic_en:1; /*!< bit: 0 I-Cache enable */
  256. rv_csr_t ic_scpd_mod:1; /*!< bit: 1 Scratchpad mode, 0: Scratchpad as ICache Data RAM, 1: Scratchpad as ILM SRAM */
  257. rv_csr_t ic_ecc_en:1; /*!< bit: 2 I-Cache ECC enable */
  258. rv_csr_t ic_ecc_excp_en:1; /*!< bit: 3 I-Cache 2bit ECC error exception enable */
  259. rv_csr_t ic_rwtecc:1; /*!< bit: 4 Control I-Cache Tag Ram ECC code injection */
  260. rv_csr_t ic_rwdecc:1; /*!< bit: 5 Control I-Cache Data Ram ECC code injection */
  261. rv_csr_t ic_pf_en:1; /*!< bit: 6 I-Cache prefetch enable */
  262. rv_csr_t ic_cancel_en:1; /*!< bit: 7 I-Cache change flow canceling enable control */
  263. rv_csr_t ic_ecc_chk_en:1; /*!< bit: 8 I-Cache check ECC codes enable */
  264. rv_csr_t ic_prefetch_en:1; /*!< bit: 9 I-Cache CMO prefetch enable control */
  265. rv_csr_t ic_burst_type:1; /*!< bit: 10 I-Cache Burst type control */
  266. rv_csr_t _reserved0:5; /*!< bit: 11..15 Reserved */
  267. rv_csr_t dc_en:1; /*!< bit: 16 DCache enable */
  268. rv_csr_t dc_ecc_en:1; /*!< bit: 17 D-Cache ECC enable */
  269. rv_csr_t dc_ecc_excp_en:1; /*!< bit: 18 D-Cache 2bit ECC error exception enable */
  270. rv_csr_t dc_rwtecc:1; /*!< bit: 19 Control D-Cache Tag Ram ECC code injection */
  271. rv_csr_t dc_rwdecc:1; /*!< bit: 20 Control D-Cache Data Ram ECC code injection */
  272. rv_csr_t dc_ecc_chk_en:1; /*!< bit: 21 D-Cache check ECC codes enable */
  273. rv_csr_t dc_prefetch_en:1; /*!< bit: 22 D-Cache CMO prefetch enable control */
  274. rv_csr_t dc_burst_type:1; /*!< bit: 23 D-Cache Burst type control */
  275. rv_csr_t _reserved1:__RISCV_XLEN-24; /*!< bit: 24..XLEN-1 Reserved */
  276. } b; /*!< Structure used for bit access */
  277. rv_csr_t d; /*!< Type used for csr data access */
  278. } CSR_MCACHECTL_Type;
  279. typedef CSR_MCACHECTL_Type CSR_MCACHE_CTL_Type;
  280. /**
  281. * \brief Union type to access MSAVESTATUS CSR register.
  282. */
  283. typedef union {
  284. struct {
  285. rv_csr_t mpie1:1; /*!< bit: 0 interrupt enable flag of fisrt level NMI/exception nestting */
  286. rv_csr_t mpp1:2; /*!< bit: 1..2 privilede mode of fisrt level NMI/exception nestting */
  287. rv_csr_t _reserved0:3; /*!< bit: 3..5 Reserved */
  288. rv_csr_t ptyp1:2; /*!< bit: 6..7 NMI/exception type of before first nestting */
  289. rv_csr_t mpie2:1; /*!< bit: 8 interrupt enable flag of second level NMI/exception nestting */
  290. rv_csr_t mpp2:2; /*!< bit: 9..10 privilede mode of second level NMI/exception nestting */
  291. rv_csr_t _reserved1:3; /*!< bit: 11..13 Reserved */
  292. rv_csr_t ptyp2:2; /*!< bit: 14..15 NMI/exception type of before second nestting */
  293. rv_csr_t _reserved2:__RISCV_XLEN-16; /*!< bit: 16..XLEN-1 Reserved */
  294. } b; /*!< Structure used for bit access */
  295. rv_csr_t w; /*!< Type used for csr data access */
  296. } CSR_MSAVESTATUS_Type;
  297. /**
  298. * \brief Union type to access MILM_CTL CSR register.
  299. */
  300. typedef union {
  301. struct {
  302. rv_csr_t ilm_en:1; /*!< bit: 0 ILM enable */
  303. rv_csr_t ilm_ecc_en:1; /*!< bit: 1 ILM ECC eanble */
  304. rv_csr_t ilm_ecc_excp_en:1; /*!< bit: 2 ILM ECC exception enable */
  305. rv_csr_t ilm_rwecc:1; /*!< bit: 3 Control mecc_code write to ilm, simulate error injection */
  306. rv_csr_t ilm_ecc_chk_en:1; /*!< bit: 4 ILM check ECC codes enable */
  307. rv_csr_t ilm_va_en:1; /*!< bit: 5 Using virtual address to judge ILM access */
  308. rv_csr_t _reserved0:4; /*!< bit: 6..9 Reserved */
  309. rv_csr_t ilm_bpa:__RISCV_XLEN-10; /*!< bit: 10..XLEN-1 ILM base address */
  310. } b; /*!< Structure used for bit access */
  311. rv_csr_t d; /*!< Type used for csr data access */
  312. } CSR_MILMCTL_Type;
  313. typedef CSR_MILMCTL_Type CSR_MILM_CTL_Type;
  314. /**
  315. * \brief Union type to access MDLM_CTL CSR register.
  316. */
  317. typedef union {
  318. struct {
  319. rv_csr_t dlm_en:1; /*!< bit: 0 DLM enable */
  320. rv_csr_t dlm_ecc_en:1; /*!< bit: 1 DLM ECC eanble */
  321. rv_csr_t dlm_ecc_excp_en:1; /*!< bit: 2 DLM ECC exception enable */
  322. rv_csr_t dlm_rwecc:1; /*!< bit: 3 Control mecc_code write to dlm, simulate error injection */
  323. rv_csr_t dlm_ecc_chk_en:1; /*!< bit: 4 DLM check ECC codes enable */
  324. rv_csr_t _reserved0:5; /*!< bit: 5..9 Reserved */
  325. rv_csr_t dlm_bpa:__RISCV_XLEN-10; /*!< bit: 10..XLEN-1 DLM base address */
  326. } b; /*!< Structure used for bit access */
  327. rv_csr_t d; /*!< Type used for csr data access */
  328. } CSR_MDLMCTL_Type;
  329. typedef CSR_MDLMCTL_Type CSR_DILM_CTL_Type;
  330. /**
  331. * \brief Union type to access MCFG_INFO CSR register.
  332. */
  333. typedef union {
  334. struct {
  335. rv_csr_t tee:1; /*!< bit: 0 TEE present */
  336. rv_csr_t ecc:1; /*!< bit: 1 ECC present */
  337. rv_csr_t clic:1; /*!< bit: 2 CLIC present */
  338. rv_csr_t plic:1; /*!< bit: 3 PLIC present */
  339. rv_csr_t fio:1; /*!< bit: 4 FIO present */
  340. rv_csr_t ppi:1; /*!< bit: 5 PPI present */
  341. rv_csr_t nice:1; /*!< bit: 6 NICE present */
  342. rv_csr_t ilm:1; /*!< bit: 7 ILM present */
  343. rv_csr_t dlm:1; /*!< bit: 8 DLM present */
  344. rv_csr_t icache:1; /*!< bit: 9 ICache present */
  345. rv_csr_t dcache:1; /*!< bit: 10 DCache present */
  346. rv_csr_t smp:1; /*!< bit: 11 SMP present */
  347. rv_csr_t dsp_n1:1; /*!< bit: 12 DSP N1 present */
  348. rv_csr_t dsp_n2:1; /*!< bit: 13 DSP N2 present */
  349. rv_csr_t dsp_n3:1; /*!< bit: 14 DSP N3 present */
  350. rv_csr_t zc_xlcz:1; /*!< bit: 15 Zc and xlcz extension present */
  351. rv_csr_t iregion:1; /*!< bit: 16 IREGION present */
  352. rv_csr_t vpu_degree:2; /*!< bit: 17..18 Indicate the VPU degree of parallel */
  353. rv_csr_t sec_mode:1; /*!< bit: 19 Smwg extension present */
  354. rv_csr_t etrace:1; /*!< bit: 20 Etrace present */
  355. rv_csr_t safety_mecha:2; /*!< bit: 21..22 Indicate Core's safety mechanism */
  356. rv_csr_t vnice:1; /*!< bit: 23 VNICE present */
  357. rv_csr_t xlcz:1; /*!< bit: 24 XLCZ extension present */
  358. rv_csr_t zilsd:1; /*!< bit: 25 Zilsd/Zclsd extension present */
  359. rv_csr_t sstc:1; /*!< bit: 26 SSTC extension present */
  360. rv_csr_t _reserved1:__RISCV_XLEN-27; /*!< bit: 27..XLEN-1 Reserved */
  361. } b; /*!< Structure used for bit access */
  362. rv_csr_t d; /*!< Type used for csr data access */
  363. } CSR_MCFGINFO_Type;
  364. typedef CSR_MCFGINFO_Type CSR_MCFG_INFO_Type;
  365. /**
  366. * \brief Union type to access MICFG_INFO CSR register.
  367. */
  368. typedef union {
  369. struct {
  370. rv_csr_t set:4; /*!< bit: 0..3 I-Cache sets per way */
  371. rv_csr_t way:3; /*!< bit: 4..6 I-Cache way */
  372. rv_csr_t lsize:3; /*!< bit: 7..9 I-Cache line size */
  373. rv_csr_t ecc:1; /*!< bit: 10 I-Cache ECC support */
  374. rv_csr_t _reserved0:5; /*!< bit: 11..15 Reserved */
  375. rv_csr_t lm_size:5; /*!< bit: 16..20 ILM size, need to be 2^n size */
  376. rv_csr_t lm_xonly:1; /*!< bit: 21 ILM Execute only permission or Reserved */
  377. rv_csr_t lm_ecc:1; /*!< bit: 22 ILM ECC support */
  378. rv_csr_t _reserved1:__RISCV_XLEN-23; /*!< bit: 23..XLEN-1 Reserved */
  379. } b; /*!< Structure used for bit access */
  380. rv_csr_t d; /*!< Type used for csr data access */
  381. } CSR_MICFGINFO_Type;
  382. typedef CSR_MICFGINFO_Type CSR_MICFG_INFO_Type;
  383. /**
  384. * \brief Union type to access MDCFG_INFO CSR register.
  385. */
  386. typedef union {
  387. struct {
  388. rv_csr_t set:4; /*!< bit: 0..3 D-Cache sets per way */
  389. rv_csr_t way:3; /*!< bit: 4..6 D-Cache way */
  390. rv_csr_t lsize:3; /*!< bit: 7..9 D-Cache line size */
  391. rv_csr_t ecc:1; /*!< bit: 10 D-Cache ECC support */
  392. rv_csr_t _reserved0:5; /*!< bit: 11..15 Reserved */
  393. rv_csr_t lm_size:5; /*!< bit: 16..20 DLM size, need to be 2^n size */
  394. rv_csr_t lm_ecc:1; /*!< bit: 21 DLM ECC present */
  395. rv_csr_t _reserved1:__RISCV_XLEN-22; /*!< bit: 22..XLEN-1 Reserved */
  396. } b; /*!< Structure used for bit access */
  397. rv_csr_t d; /*!< Type used for csr data access */
  398. } CSR_MDCFGINFO_Type;
  399. typedef CSR_MDCFGINFO_Type CSR_MDCFG_INFO_Type;
  400. /**
  401. * \brief Union type to access MTLBCFG_INFO CSR register.
  402. */
  403. typedef union {
  404. struct {
  405. rv_csr_t set:4; /*!< bit: 0..3 Main TLB entry per way */
  406. rv_csr_t way:3; /*!< bit: 4..6 Main TLB ways */
  407. rv_csr_t lsize:3; /*!< bit: 7..9 Main TLB line size or Reserved */
  408. rv_csr_t ecc:1; /*!< bit: 10 Main TLB supports ECC or not */
  409. rv_csr_t napot:1; /*!< bit: 11 TLB supports Svnapot or not */
  410. rv_csr_t _reserved1:4; /*!< bit: 12..15 Reserved 0 */
  411. rv_csr_t i_size:3; /*!< bit: 16..18 ITLB size */
  412. rv_csr_t d_size:3; /*!< bit: 19..21 DTLB size */
  413. rv_csr_t _reserved2:__RISCV_XLEN-22; /*!< bit: 22..XLEN-1 Reserved 0 */
  414. } b; /*!< Structure used for bit access */
  415. rv_csr_t d; /*!< Type used for csr data access */
  416. } CSR_MTLBCFGINFO_Type;
  417. typedef CSR_MTLBCFGINFO_Type CSR_MTLBCFG_INFO_Type;
  418. /**
  419. * \brief Union type to access MPPICFG_INFO CSR register.
  420. */
  421. typedef union {
  422. struct {
  423. rv_csr_t _reserved0:1; /*!< bit: 0 Reserved 1 */
  424. rv_csr_t ppi_size:5; /*!< bit: 1..5 PPI size, need to be 2^n size */
  425. rv_csr_t _reserved1:3; /*!< bit: 6..8 Reserved 0 */
  426. rv_csr_t ppi_en:1; /*!< bit: 9 PPI Enable. Software can write this bit to control PPI */
  427. rv_csr_t ppi_bpa:__RISCV_XLEN-10; /*!< bit: 10..XLEN-1 PPI base address */
  428. } b; /*!< Structure used for bit access */
  429. rv_csr_t d; /*!< Type used for csr data access */
  430. } CSR_MPPICFGINFO_Type;
  431. typedef CSR_MPPICFGINFO_Type CSR_MPPICFG_INFO_Type;
  432. /**
  433. * \brief Union type to access MFIOCFG_INFO CSR register.
  434. */
  435. typedef union {
  436. struct {
  437. rv_csr_t _reserved0:1; /*!< bit: 0 Reserved */
  438. rv_csr_t fio_size:5; /*!< bit: 1..5 FIO size, need to be 2^n size */
  439. rv_csr_t _reserved1:4; /*!< bit: 6..9 Reserved */
  440. rv_csr_t fio_bpa:__RISCV_XLEN-10; /*!< bit: 10..XLEN-1 FIO base address */
  441. } b; /*!< Structure used for bit access */
  442. rv_csr_t d; /*!< Type used for csr data access */
  443. } CSR_MFIOCFGINFO_Type;
  444. typedef CSR_MFIOCFGINFO_Type CSR_MFIOCFG_INFO_Type;
  445. /**
  446. * \brief Union type to access MECC_LOCK CSR register.
  447. */
  448. typedef union {
  449. struct {
  450. rv_csr_t ecc_lock:1; /*!< bit: 0 RW permission, ECC Lock configure */
  451. rv_csr_t _reserved0:__RISCV_XLEN-1; /*!< bit: 1..XLEN-1 Reserved */
  452. } b; /*!< Structure used for bit access */
  453. rv_csr_t d; /*!< Type used for csr data access */
  454. } CSR_MECCLOCK_Type;
  455. typedef CSR_MECCLOCK_Type CSR_MECC_LOCK_Type;
  456. /**
  457. * \brief Union type to access MECC_CODE CSR register.
  458. */
  459. typedef union {
  460. struct {
  461. rv_csr_t code:9; /*!< bit: 0..8 Used to inject ECC check code */
  462. rv_csr_t _reserved0:7; /*!< bit: 9..15 Reserved 0 */
  463. rv_csr_t ramid:5; /*!< bit: 16..20 The ID of RAM that has 2bit ECC error, software can clear these bits */
  464. rv_csr_t _reserved1:3; /*!< bit: 21..23 Reserved 0 */
  465. rv_csr_t sramid:5; /*!< bit: 24..28 The ID of RAM that has 1bit ECC error, software can clear these bits */
  466. rv_csr_t _reserved2:__RISCV_XLEN-29; /*!< bit: 29..XLEN-1 Reserved 0 */
  467. } b; /*!< Structure used for bit access */
  468. rv_csr_t d; /*!< Type used for csr data access */
  469. } CSR_MECCCODE_Type;
  470. typedef CSR_MECCCODE_Type CSR_MECC_CODE_Type;
  471. /**
  472. * \brief Union type to access MECC_CTL CSR register.
  473. */
  474. typedef union {
  475. struct {
  476. rv_csr_t ilm_fch_msk:1; /*!< bit: 0 Write 1 to disable aggregate ILM fetch ECC fatal error to safety_error output */
  477. rv_csr_t ilm_acc_msk:1; /*!< bit: 1 Write 1 to disable aggregate ILM load/store access ECC fatal error to safety_error output */
  478. rv_csr_t dlm_acc_msk:1; /*!< bit: 2 Write 1 to disable aggregate DLM access ECC fatal error to safety_error output */
  479. rv_csr_t ic_fch_msk:1; /*!< bit: 3 Write 1 to disable aggregate ICache fetch ECC fatal error to safety_error output */
  480. rv_csr_t dc_acc_msk:1; /*!< bit: 4 Write 1 to disable aggregate DCache access ECC fatal error to safety_error output */
  481. rv_csr_t ilm_ext_msk:1; /*!< bit: 5 Write 1 to disable aggregate ILM external access ECC fatal error to safety_error output */
  482. rv_csr_t dlm_ext_msk:1; /*!< bit: 6 Write 1 to disable aggregate DLM external access ECC fatal error to safety_error output */
  483. rv_csr_t ic_ccm_msk:1; /*!< bit: 7 Write 1 to disable aggregate ICache CCM ECC fatal error to safety_error output */
  484. rv_csr_t dc_ccm_msk:1; /*!< bit: 8 Write 1 to disable aggregate DCache CCM ECC fatal error to safety_error output */
  485. rv_csr_t dc_cpbk_msk:1; /*!< bit: 9 Write 1 to disable aggregate DCache CPBK ECC fatal error to safety_error output */
  486. rv_csr_t _reserved0:__RISCV_XLEN-10; /*!< bit: 10..XLEN-1 Reserved 0 */
  487. } b; /*!< Structure used for bit access */
  488. rv_csr_t d; /*!< Type used for csr data access */
  489. } CSR_MECC_CTL_Type;
  490. /**
  491. * \brief Union type to access MECC_STATUS CSR register.
  492. */
  493. typedef union {
  494. struct {
  495. rv_csr_t ilm_fch_err:1; /*!< bit: 0 ILM fetch ECC fatal error has occurred */
  496. rv_csr_t ilm_acc_err:1; /*!< bit: 1 ILM load/store access ECC fatal error has occurred */
  497. rv_csr_t dlm_acc_err:1; /*!< bit: 2 DLM access ECC fatal error has occurred */
  498. rv_csr_t ic_fch_err:1; /*!< bit: 3 ICache fetch ECC fatal error has occurred */
  499. rv_csr_t dc_acc_err:1; /*!< bit: 4 DCache access ECC fatal error has occurred */
  500. rv_csr_t ilm_ext_err:1; /*!< bit: 5 ILM external access ECC fatal error has occurred */
  501. rv_csr_t dlm_ext_err:1; /*!< bit: 6 DLM external access ECC fatal error has occurred */
  502. rv_csr_t ic_ccm_err:1; /*!< bit: 7 ICache CCM ECC fatal error has occurred */
  503. rv_csr_t dc_ccm_err:1; /*!< bit: 8 DCache CCM ECC fatal error has occurred */
  504. rv_csr_t dc_cpbk_err:1; /*!< bit: 9 DCache CPBK ECC fatal error has occurred */
  505. rv_csr_t _reserved0:__RISCV_XLEN-10; /*!< bit: 10..XLEN-1 Reserved 0 */
  506. } b; /*!< Structure used for bit access */
  507. rv_csr_t d; /*!< Type used for csr data access */
  508. } CSR_MECC_STATUS_Type;
  509. /**
  510. * \brief Union type to access MIRGB_INFO CSR register.
  511. */
  512. typedef union {
  513. struct {
  514. rv_csr_t _reserved0:1; /*!< bit: 0 Reserved */
  515. rv_csr_t iregion_size:5; /*!< bit: 1..5 Indicates the size of IREGION and it should be power of 2 */
  516. rv_csr_t _reserved1:4; /*!< bit: 6..9 Reserved */
  517. rv_csr_t iregion_base:__RISCV_XLEN-10; /*!< bit: 10..PA_SIZE IREGION Base Address */
  518. } b; /*!< Structure used for bit access */
  519. rv_csr_t d; /*!< Type used for csr data access */
  520. } CSR_MIRGB_INFO_Type;
  521. /**
  522. * \brief Union type to access MSTACK_CTL CSR register.
  523. */
  524. typedef union {
  525. struct {
  526. rv_csr_t ovf_track_en:1; /*!< bit: 0 Stack overflow check or track enable */
  527. rv_csr_t udf_en:1; /*!< bit: 1 Stack underflow check enable */
  528. rv_csr_t mode:1; /*!< bit: 2 Mode of stack checking */
  529. rv_csr_t _reserved0:__RISCV_XLEN-3; /*!< bit: 3..XLEN-1 Reserved */
  530. } b; /*!< Structure used for bit access */
  531. rv_csr_t d; /*!< Type used for csr data access */
  532. } CSR_MSTACK_CTL_Type;
  533. /**
  534. * \brief Union type to access MTLB_CTL CSR register.
  535. */
  536. typedef union {
  537. struct {
  538. rv_csr_t tlb_ecc_en:1; /*!< bit: 0 MTLB ECC eanble */
  539. rv_csr_t tlb_ecc_excp_en:1; /*!< bit: 1 MTLB double bit ECC exception enable control */
  540. rv_csr_t tlb_tram_ecc_inj_en:1; /*!< bit: 2 Controls to inject the ECC Code in CSR mecc_code to MTLB tag rams */
  541. rv_csr_t tlb_dram_ecc_inj_en:1; /*!< bit: 3 Controls to inject the ECC Code in CSR mecc_code to MTLB data rams */
  542. rv_csr_t _reserved0:2; /*!< bit: 4..5 Reserved */
  543. rv_csr_t tlb_ecc_chk_en:1; /*!< bit: 6 Controls to check the ECC when core access to MTLB */
  544. rv_csr_t napot_en:1; /*!< bit: 7 NAPOT page enable */
  545. rv_csr_t _reserved1:__RISCV_XLEN-8; /*!< bit: 8..XLEN-1 Reserved */
  546. } b; /*!< Structure used for bit access */
  547. rv_csr_t d; /*!< Type used for csr data access */
  548. } CSR_MTLB_CTL_Type;
  549. /** @} */ /* End of Doxygen Group NMSIS_Core_Base_Registers */
  550. /* ########################### Core Function Access ########################### */
  551. /**
  552. * \defgroup NMSIS_Core_CSR_Register_Access Core CSR Register Access
  553. * \ingroup NMSIS_Core
  554. * \brief Functions to access the Core CSR Registers
  555. * \details
  556. *
  557. * The following functions or macros provide access to Core CSR registers.
  558. * - \ref NMSIS_Core_CSR_Encoding
  559. * - \ref NMSIS_Core_CSR_Registers
  560. * @{
  561. */
  562. #ifndef __ASSEMBLER__
  563. #ifndef __ICCRISCV__
  564. /**
  565. * \brief CSR operation Macro for csrrw instruction.
  566. * \details
  567. * Read the content of csr register to __v,
  568. * then write content of val into csr register, then return __v
  569. * \param csr CSR macro definition defined in
  570. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  571. * \param val value to store into the CSR register
  572. * \return the CSR register value before written
  573. */
  574. #define __RV_CSR_SWAP(csr, val) \
  575. ({ \
  576. rv_csr_t __v = (unsigned long)(val); \
  577. __ASM volatile("csrrw %0, " STRINGIFY(csr) ", %1" \
  578. : "=r"(__v) \
  579. : "rK"(__v) \
  580. : "memory"); \
  581. __v; \
  582. })
  583. /**
  584. * \brief CSR operation Macro for csrr instruction.
  585. * \details
  586. * Read the content of csr register to __v and return it
  587. * \param csr CSR macro definition defined in
  588. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  589. * \return the CSR register value
  590. */
  591. #define __RV_CSR_READ(csr) \
  592. ({ \
  593. rv_csr_t __v; \
  594. __ASM volatile("csrr %0, " STRINGIFY(csr) \
  595. : "=r"(__v) \
  596. : \
  597. : "memory"); \
  598. __v; \
  599. })
  600. /**
  601. * \brief CSR operation Macro for csrw instruction.
  602. * \details
  603. * Write the content of val to csr register
  604. * \param csr CSR macro definition defined in
  605. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  606. * \param val value to store into the CSR register
  607. */
  608. #define __RV_CSR_WRITE(csr, val) \
  609. ({ \
  610. rv_csr_t __v = (rv_csr_t)(val); \
  611. __ASM volatile("csrw " STRINGIFY(csr) ", %0" \
  612. : \
  613. : "rK"(__v) \
  614. : "memory"); \
  615. })
  616. /**
  617. * \brief CSR operation Macro for csrrs instruction.
  618. * \details
  619. * Read the content of csr register to __v,
  620. * then set csr register to be __v | val, then return __v
  621. * \param csr CSR macro definition defined in
  622. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  623. * \param val Mask value to be used wih csrrs instruction
  624. * \return the CSR register value before written
  625. */
  626. #define __RV_CSR_READ_SET(csr, val) \
  627. ({ \
  628. rv_csr_t __v = (rv_csr_t)(val); \
  629. __ASM volatile("csrrs %0, " STRINGIFY(csr) ", %1" \
  630. : "=r"(__v) \
  631. : "rK"(__v) \
  632. : "memory"); \
  633. __v; \
  634. })
  635. /**
  636. * \brief CSR operation Macro for csrs instruction.
  637. * \details
  638. * Set csr register to be csr_content | val
  639. * \param csr CSR macro definition defined in
  640. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  641. * \param val Mask value to be used wih csrs instruction
  642. */
  643. #define __RV_CSR_SET(csr, val) \
  644. ({ \
  645. rv_csr_t __v = (rv_csr_t)(val); \
  646. __ASM volatile("csrs " STRINGIFY(csr) ", %0" \
  647. : \
  648. : "rK"(__v) \
  649. : "memory"); \
  650. })
  651. /**
  652. * \brief CSR operation Macro for csrrc instruction.
  653. * \details
  654. * Read the content of csr register to __v,
  655. * then set csr register to be __v & ~val, then return __v
  656. * \param csr CSR macro definition defined in
  657. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  658. * \param val Mask value to be used wih csrrc instruction
  659. * \return the CSR register value before written
  660. */
  661. #define __RV_CSR_READ_CLEAR(csr, val) \
  662. ({ \
  663. rv_csr_t __v = (rv_csr_t)(val); \
  664. __ASM volatile("csrrc %0, " STRINGIFY(csr) ", %1" \
  665. : "=r"(__v) \
  666. : "rK"(__v) \
  667. : "memory"); \
  668. __v; \
  669. })
  670. /**
  671. * \brief CSR operation Macro for csrc instruction.
  672. * \details
  673. * Set csr register to be csr_content & ~val
  674. * \param csr CSR macro definition defined in
  675. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  676. * \param val Mask value to be used wih csrc instruction
  677. */
  678. #define __RV_CSR_CLEAR(csr, val) \
  679. ({ \
  680. rv_csr_t __v = (rv_csr_t)(val); \
  681. __ASM volatile("csrc " STRINGIFY(csr) ", %0" \
  682. : \
  683. : "rK"(__v) \
  684. : "memory"); \
  685. })
  686. #else
  687. #include <intrinsics.h>
  688. #define __RV_CSR_SWAP __write_csr
  689. #define __RV_CSR_READ __read_csr
  690. #define __RV_CSR_WRITE __write_csr
  691. #define __RV_CSR_READ_SET __set_bits_csr
  692. #define __RV_CSR_SET __set_bits_csr
  693. #define __RV_CSR_READ_CLEAR __clear_bits_csr
  694. #define __RV_CSR_CLEAR __clear_bits_csr
  695. #endif /* __ICCRISCV__ */
  696. #endif /* __ASSEMBLER__ */
  697. /**
  698. * \brief switch privilege from machine mode to others.
  699. * \details
  700. * Execute into \ref entry_point in \ref mode(supervisor or user) with given stack
  701. * \param mode privilege mode
  702. * \param stack predefined stack, size should set enough
  703. * \param entry_point a function pointer to execute
  704. */
  705. __STATIC_INLINE void __switch_mode(uint8_t mode, uintptr_t stack, void(*entry_point)(void))
  706. {
  707. unsigned long val = 0;
  708. /* Set MPP to the requested privilege mode */
  709. val = __RV_CSR_READ(CSR_MSTATUS);
  710. val = __RV_INSERT_FIELD(val, MSTATUS_MPP, mode);
  711. /* Set previous MIE disabled */
  712. val = __RV_INSERT_FIELD(val, MSTATUS_MPIE, 0);
  713. __RV_CSR_WRITE(CSR_MSTATUS, val);
  714. /* Set the entry point in MEPC */
  715. __RV_CSR_WRITE(CSR_MEPC, (unsigned long)entry_point);
  716. /* Set the register file */
  717. __ASM volatile("mv sp, %0" ::"r"(stack));
  718. __ASM volatile("mret");
  719. }
  720. /**
  721. * \brief Enable IRQ Interrupts
  722. * \details Enables IRQ interrupts by setting the MIE-bit in the MSTATUS Register.
  723. * \remarks
  724. * Can only be executed in Privileged modes.
  725. */
  726. __STATIC_FORCEINLINE void __enable_irq(void)
  727. {
  728. __RV_CSR_SET(CSR_MSTATUS, MSTATUS_MIE);
  729. }
  730. /**
  731. * \brief Disable IRQ Interrupts
  732. * \details Disables IRQ interrupts by clearing the MIE-bit in the MSTATUS Register.
  733. * \remarks
  734. * Can only be executed in Privileged modes.
  735. */
  736. __STATIC_FORCEINLINE void __disable_irq(void)
  737. {
  738. __RV_CSR_CLEAR(CSR_MSTATUS, MSTATUS_MIE);
  739. }
  740. /**
  741. * \brief Enable External IRQ Interrupts
  742. * \details Enables External IRQ interrupts by setting the MEIE-bit in the MIE Register.
  743. * \remarks
  744. * Can only be executed in Privileged modes, available for plic interrupt mode.
  745. */
  746. __STATIC_FORCEINLINE void __enable_ext_irq(void)
  747. {
  748. __RV_CSR_SET(CSR_MIE, MIE_MEIE);
  749. }
  750. /**
  751. * \brief Disable External IRQ Interrupts
  752. * \details Disables External IRQ interrupts by clearing the MEIE-bit in the MIE Register.
  753. * \remarks
  754. * Can only be executed in Privileged modes, available for plic interrupt mode.
  755. */
  756. __STATIC_FORCEINLINE void __disable_ext_irq(void)
  757. {
  758. __RV_CSR_CLEAR(CSR_MIE, MIE_MEIE);
  759. }
  760. /**
  761. * \brief Enable Timer IRQ Interrupts
  762. * \details Enables Timer IRQ interrupts by setting the MTIE-bit in the MIE Register.
  763. * \remarks
  764. * Can only be executed in Privileged modes, available for plic interrupt mode.
  765. */
  766. __STATIC_FORCEINLINE void __enable_timer_irq(void)
  767. {
  768. __RV_CSR_SET(CSR_MIE, MIE_MTIE);
  769. }
  770. /**
  771. * \brief Disable Timer IRQ Interrupts
  772. * \details Disables Timer IRQ interrupts by clearing the MTIE-bit in the MIE Register.
  773. * \remarks
  774. * Can only be executed in Privileged modes, available for plic interrupt mode.
  775. */
  776. __STATIC_FORCEINLINE void __disable_timer_irq(void)
  777. {
  778. __RV_CSR_CLEAR(CSR_MIE, MIE_MTIE);
  779. }
  780. /**
  781. * \brief Enable software IRQ Interrupts
  782. * \details Enables software IRQ interrupts by setting the MSIE-bit in the MIE Register.
  783. * \remarks
  784. * Can only be executed in Privileged modes, available for plic interrupt mode.
  785. */
  786. __STATIC_FORCEINLINE void __enable_sw_irq(void)
  787. {
  788. __RV_CSR_SET(CSR_MIE, MIE_MSIE);
  789. }
  790. /**
  791. * \brief Disable software IRQ Interrupts
  792. * \details Disables software IRQ interrupts by clearing the MSIE-bit in the MIE Register.
  793. * \remarks
  794. * Can only be executed in Privileged modes, available for plic interrupt mode.
  795. */
  796. __STATIC_FORCEINLINE void __disable_sw_irq(void)
  797. {
  798. __RV_CSR_CLEAR(CSR_MIE, MIE_MSIE);
  799. }
  800. /**
  801. * \brief Disable Core IRQ Interrupt
  802. * \details Disable Core IRQ interrupt by clearing the irq bit in the MIE Register.
  803. * \remarks
  804. * Can only be executed in Privileged modes, available for plic interrupt mode.
  805. */
  806. __STATIC_FORCEINLINE void __disable_core_irq(uint32_t irq)
  807. {
  808. __RV_CSR_CLEAR(CSR_MIE, 1UL << irq);
  809. }
  810. /**
  811. * \brief Enable Core IRQ Interrupt
  812. * \details Enable Core IRQ interrupt by setting the irq bit in the MIE Register.
  813. * \remarks
  814. * Can only be executed in Privileged modes, available for plic interrupt mode.
  815. */
  816. __STATIC_FORCEINLINE void __enable_core_irq(uint32_t irq)
  817. {
  818. __RV_CSR_SET(CSR_MIE, 1UL << irq);
  819. }
  820. /**
  821. * \brief Get Core IRQ Interrupt Pending status
  822. * \details Get Core IRQ interrupt pending status of irq bit.
  823. * \remarks
  824. * Can only be executed in Privileged modes, available for plic interrupt mode.
  825. */
  826. __STATIC_FORCEINLINE uint32_t __get_core_irq_pending(uint32_t irq)
  827. {
  828. return ((__RV_CSR_READ(CSR_MIP) >> irq) & 0x1);
  829. }
  830. /**
  831. * \brief Clear Core IRQ Interrupt Pending status
  832. * \details Clear Core IRQ interrupt pending status of irq bit.
  833. * \remarks
  834. * Can only be executed in Privileged modes, available for plic interrupt mode.
  835. */
  836. __STATIC_FORCEINLINE void __clear_core_irq_pending(uint32_t irq)
  837. {
  838. __RV_CSR_CLEAR(CSR_MIP, 1UL << irq);
  839. }
  840. /**
  841. * \brief Enable IRQ Interrupts in supervisor mode
  842. * \details Enables IRQ interrupts by setting the SIE-bit in the SSTATUS Register.
  843. * \remarks
  844. * Can only be executed in Privileged modes.
  845. */
  846. __STATIC_FORCEINLINE void __enable_irq_s(void)
  847. {
  848. __RV_CSR_SET(CSR_SSTATUS, SSTATUS_SIE);
  849. }
  850. /**
  851. * \brief Disable IRQ Interrupts in supervisor mode
  852. * \details Disables IRQ interrupts by clearing the SIE-bit in the SSTATUS Register.
  853. * \remarks
  854. * Can only be executed in Privileged modes.
  855. */
  856. __STATIC_FORCEINLINE void __disable_irq_s(void)
  857. {
  858. __RV_CSR_CLEAR(CSR_SSTATUS, SSTATUS_SIE);
  859. }
  860. /**
  861. * \brief Enable External IRQ Interrupts in supervisor mode
  862. * \details Enables External IRQ interrupts by setting the SEIE-bit in the SIE Register.
  863. * \remarks
  864. * Can only be executed in Privileged modes, available for plic interrupt mode.
  865. */
  866. __STATIC_FORCEINLINE void __enable_ext_irq_s(void)
  867. {
  868. __RV_CSR_SET(CSR_SIE, MIE_SEIE);
  869. }
  870. /**
  871. * \brief Disable External IRQ Interrupts in supervisor mode
  872. * \details Disables External IRQ interrupts by clearing the SEIE-bit in the SIE Register.
  873. * \remarks
  874. * Can only be executed in Privileged modes, available for plic interrupt mode.
  875. */
  876. __STATIC_FORCEINLINE void __disable_ext_irq_s(void)
  877. {
  878. __RV_CSR_CLEAR(CSR_SIE, MIE_SEIE);
  879. }
  880. /**
  881. * \brief Enable Timer IRQ Interrupts in supervisor mode
  882. * \details Enables Timer IRQ interrupts by setting the STIE-bit in the SIE Register.
  883. * \remarks
  884. * Can only be executed in Privileged modes, available for plic interrupt mode.
  885. */
  886. __STATIC_FORCEINLINE void __enable_timer_irq_s(void)
  887. {
  888. __RV_CSR_SET(CSR_SIE, MIE_STIE);
  889. }
  890. /**
  891. * \brief Disable Timer IRQ Interrupts in supervisor mode
  892. * \details Disables Timer IRQ interrupts by clearing the STIE-bit in the SIE Register.
  893. * \remarks
  894. * Can only be executed in Privileged modes, available for plic interrupt mode.
  895. */
  896. __STATIC_FORCEINLINE void __disable_timer_irq_s(void)
  897. {
  898. __RV_CSR_CLEAR(CSR_SIE, MIE_STIE);
  899. }
  900. /**
  901. * \brief Enable software IRQ Interrupts in supervisor mode
  902. * \details Enables software IRQ interrupts by setting the SSIE-bit in the SIE Register.
  903. * \remarks
  904. * Can only be executed in Privileged modes, available for plic interrupt mode.
  905. */
  906. __STATIC_FORCEINLINE void __enable_sw_irq_s(void)
  907. {
  908. __RV_CSR_SET(CSR_SIE, MIE_SSIE);
  909. }
  910. /**
  911. * \brief Disable software IRQ Interrupts in supervisor mode
  912. * \details Disables software IRQ interrupts by clearing the SSIE-bit in the SIE Register.
  913. * \remarks
  914. * Can only be executed in Privileged modes, available for plic interrupt mode.
  915. */
  916. __STATIC_FORCEINLINE void __disable_sw_irq_s(void)
  917. {
  918. __RV_CSR_CLEAR(CSR_SIE, MIE_SSIE);
  919. }
  920. /**
  921. * \brief Disable Core IRQ Interrupt in supervisor mode
  922. * \details Disable Core IRQ interrupt by clearing the irq bit in the SIE Register.
  923. * \remarks
  924. * Can only be executed in Privileged modes, available for plic interrupt mode.
  925. */
  926. __STATIC_FORCEINLINE void __disable_core_irq_s(uint32_t irq)
  927. {
  928. __RV_CSR_CLEAR(CSR_SIE, 1UL << irq);
  929. }
  930. /**
  931. * \brief Enable Core IRQ Interrupt in supervisor mode
  932. * \details Enable Core IRQ interrupt by setting the irq bit in the MIE Register.
  933. * \remarks
  934. * Can only be executed in Privileged modes, available for plic interrupt mode.
  935. */
  936. __STATIC_FORCEINLINE void __enable_core_irq_s(uint32_t irq)
  937. {
  938. __RV_CSR_SET(CSR_SIE, 1UL << irq);
  939. }
  940. /**
  941. * \brief Get Core IRQ Interrupt Pending status in supervisor mode
  942. * \details Get Core IRQ interrupt pending status of irq bit.
  943. * \remarks
  944. * Can only be executed in Privileged modes, available for plic interrupt mode.
  945. */
  946. __STATIC_FORCEINLINE uint32_t __get_core_irq_pending_s(uint32_t irq)
  947. {
  948. return ((__RV_CSR_READ(CSR_SIP) >> irq) & 0x1);
  949. }
  950. /**
  951. * \brief Clear Core IRQ Interrupt Pending status in supervisor mode
  952. * \details Clear Core IRQ interrupt pending status of irq bit.
  953. * \remarks
  954. * Can only be executed in Privileged modes, available for plic interrupt mode.
  955. */
  956. __STATIC_FORCEINLINE void __clear_core_irq_pending_s(uint32_t irq)
  957. {
  958. __RV_CSR_CLEAR(CSR_SIP, 1UL << irq);
  959. }
  960. /**
  961. * \brief Read whole 64 bits value of mcycle counter
  962. * \details This function will read the whole 64 bits of MCYCLE register
  963. * \return The whole 64 bits value of MCYCLE
  964. * \remarks It will work for both RV32 and RV64 to get full 64bits value of MCYCLE
  965. */
  966. __STATIC_INLINE uint64_t __get_rv_cycle(void)
  967. {
  968. #if __RISCV_XLEN == 32
  969. volatile uint32_t high0, low, high;
  970. uint64_t full;
  971. high0 = __RV_CSR_READ(CSR_MCYCLEH);
  972. low = __RV_CSR_READ(CSR_MCYCLE);
  973. high = __RV_CSR_READ(CSR_MCYCLEH);
  974. if (high0 != high) {
  975. low = __RV_CSR_READ(CSR_MCYCLE);
  976. }
  977. full = (((uint64_t)high) << 32) | low;
  978. return full;
  979. #elif __RISCV_XLEN == 64
  980. return (uint64_t)__RV_CSR_READ(CSR_MCYCLE);
  981. #else // TODO Need cover for XLEN=128 case in future
  982. return (uint64_t)__RV_CSR_READ(CSR_MCYCLE);
  983. #endif
  984. }
  985. /**
  986. * \brief Set whole 64 bits value of mcycle counter
  987. * \details This function will set the whole 64 bits of MCYCLE register
  988. * \remarks It will work for both RV32 and RV64 to set full 64bits value of MCYCLE
  989. */
  990. __STATIC_FORCEINLINE void __set_rv_cycle(uint64_t cycle)
  991. {
  992. #if __RISCV_XLEN == 32
  993. __RV_CSR_WRITE(CSR_MCYCLE, 0); // prevent carry
  994. __RV_CSR_WRITE(CSR_MCYCLEH, (uint32_t)(cycle >> 32));
  995. __RV_CSR_WRITE(CSR_MCYCLE, (uint32_t)(cycle));
  996. #elif __RISCV_XLEN == 64
  997. __RV_CSR_WRITE(CSR_MCYCLE, cycle);
  998. #else // TODO Need cover for XLEN=128 case in future
  999. #endif
  1000. }
  1001. /**
  1002. * \brief Read whole 64 bits value of machine instruction-retired counter
  1003. * \details This function will read the whole 64 bits of MINSTRET register
  1004. * \return The whole 64 bits value of MINSTRET
  1005. * \remarks It will work for both RV32 and RV64 to get full 64bits value of MINSTRET
  1006. */
  1007. __STATIC_INLINE uint64_t __get_rv_instret(void)
  1008. {
  1009. #if __RISCV_XLEN == 32
  1010. volatile uint32_t high0, low, high;
  1011. uint64_t full;
  1012. high0 = __RV_CSR_READ(CSR_MINSTRETH);
  1013. low = __RV_CSR_READ(CSR_MINSTRET);
  1014. high = __RV_CSR_READ(CSR_MINSTRETH);
  1015. if (high0 != high) {
  1016. low = __RV_CSR_READ(CSR_MINSTRET);
  1017. }
  1018. full = (((uint64_t)high) << 32) | low;
  1019. return full;
  1020. #elif __RISCV_XLEN == 64
  1021. return (uint64_t)__RV_CSR_READ(CSR_MINSTRET);
  1022. #else // TODO Need cover for XLEN=128 case in future
  1023. return (uint64_t)__RV_CSR_READ(CSR_MINSTRET);
  1024. #endif
  1025. }
  1026. /**
  1027. * \brief Set whole 64 bits value of machine instruction-retired counter
  1028. * \details This function will set the whole 64 bits of MINSTRET register
  1029. * \remarks It will work for both RV32 and RV64 to set full 64bits value of MINSTRET
  1030. */
  1031. __STATIC_FORCEINLINE void __set_rv_instret(uint64_t instret)
  1032. {
  1033. #if __RISCV_XLEN == 32
  1034. __RV_CSR_WRITE(CSR_MINSTRET, 0); // prevent carry
  1035. __RV_CSR_WRITE(CSR_MINSTRETH, (uint32_t)(instret >> 32));
  1036. __RV_CSR_WRITE(CSR_MINSTRET, (uint32_t)(instret));
  1037. #elif __RISCV_XLEN == 64
  1038. __RV_CSR_WRITE(CSR_MINSTRET, instret);
  1039. #else // TODO Need cover for XLEN=128 case in future
  1040. #endif
  1041. }
  1042. /**
  1043. * \brief Read whole 64 bits value of real-time clock
  1044. * \details This function will read the whole 64 bits of TIME register
  1045. * \return The whole 64 bits value of TIME CSR
  1046. * \remarks It will work for both RV32 and RV64 to get full 64bits value of TIME
  1047. * \attention only available when user mode available
  1048. */
  1049. __STATIC_INLINE uint64_t __get_rv_time(void)
  1050. {
  1051. #if __RISCV_XLEN == 32
  1052. volatile uint32_t high0, low, high;
  1053. uint64_t full;
  1054. high0 = __RV_CSR_READ(CSR_TIMEH);
  1055. low = __RV_CSR_READ(CSR_TIME);
  1056. high = __RV_CSR_READ(CSR_TIMEH);
  1057. if (high0 != high) {
  1058. low = __RV_CSR_READ(CSR_TIME);
  1059. }
  1060. full = (((uint64_t)high) << 32) | low;
  1061. return full;
  1062. #elif __RISCV_XLEN == 64
  1063. return (uint64_t)__RV_CSR_READ(CSR_TIME);
  1064. #else // TODO Need cover for XLEN=128 case in future
  1065. return (uint64_t)__RV_CSR_READ(CSR_TIME);
  1066. #endif
  1067. }
  1068. /**
  1069. * \brief Read the CYCLE register
  1070. * \details This function will read the CYCLE register without taking the
  1071. * CYCLEH register into account
  1072. * \return 32 bits value when XLEN=32
  1073. * 64 bits value when XLEN=64
  1074. * TODO: XLEN=128 need to be supported
  1075. */
  1076. __STATIC_FORCEINLINE unsigned long __read_cycle_csr()
  1077. {
  1078. return __RV_CSR_READ(CSR_CYCLE);
  1079. }
  1080. /**
  1081. * \brief Read the INSTRET register
  1082. * \details This function will read the INSTRET register without taking the
  1083. * INSTRETH register into account
  1084. * \return 32 bits value when XLEN=32
  1085. * 64 bits value when XLEN=64
  1086. * TODO: XLEN=128 need to be supported
  1087. */
  1088. __STATIC_FORCEINLINE unsigned long __read_instret_csr()
  1089. {
  1090. return __RV_CSR_READ(CSR_INSTRET);
  1091. }
  1092. /**
  1093. * \brief Read the TIME register
  1094. * \details This function will read the TIME register without taking the
  1095. * TIMEH register into account
  1096. * \return 32 bits value when XLEN=32
  1097. * 64 bits value when XLEN=64
  1098. * TODO: XLEN=128 need to be supported
  1099. */
  1100. __STATIC_FORCEINLINE unsigned long __read_time_csr()
  1101. {
  1102. return __RV_CSR_READ(CSR_TIME);
  1103. }
  1104. /**
  1105. * \brief Get cluster id of current cluster
  1106. * \details This function will get cluster id of current cluster in a multiple cluster system
  1107. * \return The cluster id of current cluster
  1108. * \remarks mhartid bit 15-8 is designed for cluster id in nuclei subsystem reference design
  1109. * \attention function is allowed in machine mode only
  1110. */
  1111. __STATIC_FORCEINLINE unsigned long __get_cluster_id(void)
  1112. {
  1113. unsigned long id;
  1114. id = (__RV_CSR_READ(CSR_MHARTID) >> 8) & 0xFF;
  1115. return id;
  1116. }
  1117. /**
  1118. * \brief Get hart index of current cluster
  1119. * \details This function will get hart index of current cluster in a multiple cluster system,
  1120. * hart index is hartid - hartid offset, for example if your hartid is 1, and offset is 1, then
  1121. * hart index is 0
  1122. * \return The hart index of current cluster
  1123. * \attention function is allowed in machine mode only
  1124. */
  1125. __STATIC_FORCEINLINE unsigned long __get_hart_index(void)
  1126. {
  1127. unsigned long id;
  1128. #ifdef __HARTID_OFFSET
  1129. id = __RV_CSR_READ(CSR_MHARTID) - __HARTID_OFFSET;
  1130. #else
  1131. id = __RV_CSR_READ(CSR_MHARTID);
  1132. #endif
  1133. return id;
  1134. }
  1135. /**
  1136. * \brief Get hart id of current cluster
  1137. * \details This function will get hart id of current cluster in a multiple cluster system
  1138. * \return The hart id of current cluster
  1139. * \remarks it will return full hartid not part of it for reference subsystem design,
  1140. * if your reference subsystem design has hartid offset, please define __HARTID_OFFSET in
  1141. * <Device>.h
  1142. * \attention function is allowed in machine mode only
  1143. */
  1144. __STATIC_FORCEINLINE unsigned long __get_hart_id(void)
  1145. {
  1146. unsigned long id;
  1147. id = __RV_CSR_READ(CSR_MHARTID);
  1148. return id;
  1149. }
  1150. /**
  1151. * \brief Get cluster id of current cluster in supervisor mode
  1152. * \details This function will get cluster id of current cluster in a multiple cluster system
  1153. * \return The cluster id of current cluster
  1154. * \remarks hartid bit 15-8 is designed for cluster id in nuclei subsystem reference design
  1155. * \attention function is allowed in machine/supervisor mode,
  1156. * currently only present in 600/900 series from 2024 released version
  1157. */
  1158. __STATIC_FORCEINLINE unsigned long __get_cluster_id_s(void)
  1159. {
  1160. unsigned long id;
  1161. id = (__RV_CSR_READ(CSR_SHARTID) >> 8) & 0xFF;
  1162. return id;
  1163. }
  1164. /**
  1165. * \brief Get hart index of current cluster in supervisor mode
  1166. * \details This function will get hart index of current cluster in a multiple cluster system,
  1167. * hart index is hartid - hartid offset, for example if your hartid is 1, and offset is 1, then
  1168. * hart index is 0
  1169. * \return The hart index of current cluster
  1170. * \attention function is allowed in machine/supervisor mode,
  1171. * currently only present in 600/900 series from 2024 released version
  1172. */
  1173. __STATIC_FORCEINLINE unsigned long __get_hart_index_s(void)
  1174. {
  1175. unsigned long id;
  1176. #ifdef __HARTID_OFFSET
  1177. id = __RV_CSR_READ(CSR_SHARTID) - __HARTID_OFFSET;
  1178. #else
  1179. id = __RV_CSR_READ(CSR_SHARTID);
  1180. #endif
  1181. return id;
  1182. }
  1183. /**
  1184. * \brief Get hart id of current cluster in supervisor mode
  1185. * \details This function will get hart id of current cluster in a multiple cluster system
  1186. * \return The hart id of current cluster
  1187. * \remarks it will return full hartid not part of it for reference subsystem design,
  1188. * if your reference subsystem design has hartid offset, please define __HARTID_OFFSET in
  1189. * <Device>.h
  1190. * \attention function is allowed in machine/supervisor mode,
  1191. * currently only present in 600/900 series from 2024 released version
  1192. */
  1193. __STATIC_FORCEINLINE unsigned long __get_hart_id_s(void)
  1194. {
  1195. unsigned long id;
  1196. id = __RV_CSR_READ(CSR_SHARTID);
  1197. return id;
  1198. }
  1199. /** @} */ /* End of Doxygen Group NMSIS_Core_CSR_Register_Access */
  1200. /* ########################### CPU Intrinsic Functions ########################### */
  1201. /**
  1202. * \defgroup NMSIS_Core_CPU_Intrinsic Intrinsic Functions for CPU Intructions
  1203. * \ingroup NMSIS_Core
  1204. * \brief Functions that generate RISC-V CPU instructions.
  1205. * \details
  1206. *
  1207. * The following functions generate specified RISC-V instructions that cannot be directly accessed by compiler.
  1208. * @{
  1209. */
  1210. /**
  1211. * \brief NOP Instruction
  1212. * \details
  1213. * No Operation does nothing.
  1214. * This instruction can be used for code alignment purposes.
  1215. */
  1216. __STATIC_FORCEINLINE void __NOP(void)
  1217. {
  1218. __ASM volatile("nop");
  1219. }
  1220. /**
  1221. * \brief Wait For Interrupt
  1222. * \details
  1223. * Wait For Interrupt is is executed using CSR_WFE.WFE=0 and WFI instruction.
  1224. * It will suspends execution until interrupt, NMI or Debug happened.
  1225. * When Core is waked up by interrupt, if
  1226. * 1. mstatus.MIE == 1(interrupt enabled), Core will enter ISR code
  1227. * 2. mstatus.MIE == 0(interrupt disabled), Core will resume previous execution
  1228. */
  1229. __STATIC_FORCEINLINE void __WFI(void)
  1230. {
  1231. __RV_CSR_CLEAR(CSR_WFE, WFE_WFE);
  1232. __ASM volatile("wfi");
  1233. }
  1234. /**
  1235. * \brief Wait For Event
  1236. * \details
  1237. * Wait For Event is executed using CSR_WFE.WFE=1 and WFI instruction.
  1238. * It will suspends execution until event, NMI or Debug happened.
  1239. * When Core is waked up, Core will resume previous execution
  1240. */
  1241. __STATIC_FORCEINLINE void __WFE(void)
  1242. {
  1243. __RV_CSR_SET(CSR_WFE, WFE_WFE);
  1244. __ASM volatile("wfi");
  1245. __RV_CSR_CLEAR(CSR_WFE, WFE_WFE);
  1246. }
  1247. /**
  1248. * \brief Breakpoint Instruction
  1249. * \details
  1250. * Causes the processor to enter Debug state.
  1251. * Debug tools can use this to investigate system state
  1252. * when the instruction at a particular address is reached.
  1253. */
  1254. __STATIC_FORCEINLINE void __EBREAK(void)
  1255. {
  1256. __ASM volatile("ebreak");
  1257. }
  1258. /**
  1259. * \brief Environment Call Instruction
  1260. * \details
  1261. * The ECALL instruction is used to make a service request to
  1262. * the execution environment.
  1263. */
  1264. __STATIC_FORCEINLINE void __ECALL(void)
  1265. {
  1266. __ASM volatile("ecall");
  1267. }
  1268. /**
  1269. * \brief WFI Sleep Mode enumeration
  1270. */
  1271. typedef enum WFI_SleepMode {
  1272. WFI_SHALLOW_SLEEP = 0, /*!< Shallow sleep mode, the core_clk will poweroff */
  1273. WFI_DEEP_SLEEP = 1 /*!< Deep sleep mode, the core_clk and core_ano_clk will poweroff */
  1274. } WFI_SleepMode_Type;
  1275. /**
  1276. * \brief Set Sleep mode of WFI
  1277. * \details
  1278. * Set the SLEEPVALUE CSR register to control the
  1279. * WFI Sleep mode.
  1280. * \param[in] mode The sleep mode to be set
  1281. */
  1282. __STATIC_FORCEINLINE void __set_wfi_sleepmode(WFI_SleepMode_Type mode)
  1283. {
  1284. __RV_CSR_WRITE(CSR_SLEEPVALUE, mode);
  1285. }
  1286. /**
  1287. * \brief Send TX Event
  1288. * \details
  1289. * Set the CSR TXEVT to control send a TX Event.
  1290. * The Core will output signal tx_evt as output event signal.
  1291. */
  1292. __STATIC_FORCEINLINE void __TXEVT(void)
  1293. {
  1294. __RV_CSR_SET(CSR_TXEVT, 0x1);
  1295. }
  1296. /**
  1297. * \brief Enable MCYCLE counter
  1298. * \details
  1299. * Clear the CY bit of MCOUNTINHIBIT to 0 to enable MCYCLE Counter
  1300. */
  1301. __STATIC_FORCEINLINE void __enable_mcycle_counter(void)
  1302. {
  1303. __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_CY);
  1304. }
  1305. /**
  1306. * \brief Disable MCYCLE counter
  1307. * \details
  1308. * Set the CY bit of MCOUNTINHIBIT to 1 to disable MCYCLE Counter
  1309. */
  1310. __STATIC_FORCEINLINE void __disable_mcycle_counter(void)
  1311. {
  1312. __RV_CSR_SET(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_CY);
  1313. }
  1314. /**
  1315. * \brief Enable MINSTRET counter
  1316. * \details
  1317. * Clear the IR bit of MCOUNTINHIBIT to 0 to enable MINSTRET Counter
  1318. */
  1319. __STATIC_FORCEINLINE void __enable_minstret_counter(void)
  1320. {
  1321. __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_IR);
  1322. }
  1323. /**
  1324. * \brief Disable MINSTRET counter
  1325. * \details
  1326. * Set the IR bit of MCOUNTINHIBIT to 1 to disable MINSTRET Counter
  1327. */
  1328. __STATIC_FORCEINLINE void __disable_minstret_counter(void)
  1329. {
  1330. __RV_CSR_SET(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_IR);
  1331. }
  1332. /**
  1333. * \brief Enable selected hardware performance monitor counter
  1334. * \param [in] idx the index of the hardware performance monitor counter
  1335. * \details
  1336. * enable selected hardware performance monitor counter mhpmcounterx.
  1337. */
  1338. __STATIC_FORCEINLINE void __enable_mhpm_counter(unsigned long idx)
  1339. {
  1340. __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, (1UL << idx));
  1341. }
  1342. /**
  1343. * \brief Disable selected hardware performance monitor counter
  1344. * \param [in] idx the index of the hardware performance monitor counter
  1345. * \details
  1346. * Disable selected hardware performance monitor counter mhpmcounterx.
  1347. */
  1348. __STATIC_FORCEINLINE void __disable_mhpm_counter(unsigned long idx)
  1349. {
  1350. __RV_CSR_SET(CSR_MCOUNTINHIBIT, (1UL << idx));
  1351. }
  1352. /**
  1353. * \brief Enable hardware performance counters with mask
  1354. * \param [in] mask mask of selected hardware performance monitor counters
  1355. * \details
  1356. * enable mhpmcounterx with mask, only the masked ones will be enabled.
  1357. * mhpmcounter3-mhpmcount31 are for high performance monitor counters.
  1358. */
  1359. __STATIC_FORCEINLINE void __enable_mhpm_counters(unsigned long mask)
  1360. {
  1361. __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, mask);
  1362. }
  1363. /**
  1364. * \brief Disable hardware performance counters with mask
  1365. * \param [in] mask mask of selected hardware performance monitor counters
  1366. * \details
  1367. * Disable mhpmcounterx with mask, only the masked ones will be disabled.
  1368. * mhpmcounter3-mhpmcount31 are for high performance monitor counters.
  1369. */
  1370. __STATIC_FORCEINLINE void __disable_mhpm_counters(unsigned long mask)
  1371. {
  1372. __RV_CSR_SET(CSR_MCOUNTINHIBIT, mask);
  1373. }
  1374. /**
  1375. * \brief Enable all MCYCLE & MINSTRET & MHPMCOUNTER counter
  1376. * \details
  1377. * Clear all to zero to enable all counters,
  1378. * such as cycle, instret, high performance monitor counters
  1379. */
  1380. __STATIC_FORCEINLINE void __enable_all_counter(void)
  1381. {
  1382. __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, 0xFFFFFFFF);
  1383. }
  1384. /**
  1385. * \brief Disable all MCYCLE & MINSTRET & MHPMCOUNTER counter
  1386. * \details
  1387. * Set all to one to disable all counters,
  1388. * such as cycle, instret, high performance monitor counters
  1389. */
  1390. __STATIC_FORCEINLINE void __disable_all_counter(void)
  1391. {
  1392. __RV_CSR_SET(CSR_MCOUNTINHIBIT, 0xFFFFFFFF);
  1393. }
  1394. /**
  1395. * \brief Set event for selected high performance monitor event
  1396. * \param [in] idx HPMEVENTx CSR index(3-31)
  1397. * \param [in] event HPMEVENTx Register value to set
  1398. * \details
  1399. * Set event for high performance monitor event register
  1400. */
  1401. __STATIC_INLINE void __set_hpm_event(unsigned long idx, unsigned long event)
  1402. {
  1403. switch (idx) {
  1404. case 3: __RV_CSR_WRITE(CSR_MHPMEVENT3, event); break;
  1405. case 4: __RV_CSR_WRITE(CSR_MHPMEVENT4, event); break;
  1406. case 5: __RV_CSR_WRITE(CSR_MHPMEVENT5, event); break;
  1407. case 6: __RV_CSR_WRITE(CSR_MHPMEVENT6, event); break;
  1408. case 7: __RV_CSR_WRITE(CSR_MHPMEVENT7, event); break;
  1409. case 8: __RV_CSR_WRITE(CSR_MHPMEVENT8, event); break;
  1410. case 9: __RV_CSR_WRITE(CSR_MHPMEVENT9, event); break;
  1411. case 10: __RV_CSR_WRITE(CSR_MHPMEVENT10, event); break;
  1412. case 11: __RV_CSR_WRITE(CSR_MHPMEVENT11, event); break;
  1413. case 12: __RV_CSR_WRITE(CSR_MHPMEVENT12, event); break;
  1414. case 13: __RV_CSR_WRITE(CSR_MHPMEVENT13, event); break;
  1415. case 14: __RV_CSR_WRITE(CSR_MHPMEVENT14, event); break;
  1416. case 15: __RV_CSR_WRITE(CSR_MHPMEVENT15, event); break;
  1417. case 16: __RV_CSR_WRITE(CSR_MHPMEVENT16, event); break;
  1418. case 17: __RV_CSR_WRITE(CSR_MHPMEVENT17, event); break;
  1419. case 18: __RV_CSR_WRITE(CSR_MHPMEVENT18, event); break;
  1420. case 19: __RV_CSR_WRITE(CSR_MHPMEVENT19, event); break;
  1421. case 20: __RV_CSR_WRITE(CSR_MHPMEVENT20, event); break;
  1422. case 21: __RV_CSR_WRITE(CSR_MHPMEVENT21, event); break;
  1423. case 22: __RV_CSR_WRITE(CSR_MHPMEVENT22, event); break;
  1424. case 23: __RV_CSR_WRITE(CSR_MHPMEVENT23, event); break;
  1425. case 24: __RV_CSR_WRITE(CSR_MHPMEVENT24, event); break;
  1426. case 25: __RV_CSR_WRITE(CSR_MHPMEVENT25, event); break;
  1427. case 26: __RV_CSR_WRITE(CSR_MHPMEVENT26, event); break;
  1428. case 27: __RV_CSR_WRITE(CSR_MHPMEVENT27, event); break;
  1429. case 28: __RV_CSR_WRITE(CSR_MHPMEVENT28, event); break;
  1430. case 29: __RV_CSR_WRITE(CSR_MHPMEVENT29, event); break;
  1431. case 30: __RV_CSR_WRITE(CSR_MHPMEVENT30, event); break;
  1432. case 31: __RV_CSR_WRITE(CSR_MHPMEVENT31, event); break;
  1433. default: break;
  1434. }
  1435. }
  1436. /**
  1437. * \brief Get event for selected high performance monitor event
  1438. * \param [in] idx HPMEVENTx CSR index(3-31)
  1439. * \param [in] event HPMEVENTx Register value to set
  1440. * \details
  1441. * Get high performance monitor event register value
  1442. * \return HPMEVENTx Register value
  1443. */
  1444. __STATIC_INLINE unsigned long __get_hpm_event(unsigned long idx)
  1445. {
  1446. switch (idx) {
  1447. case 3: return __RV_CSR_READ(CSR_MHPMEVENT3);
  1448. case 4: return __RV_CSR_READ(CSR_MHPMEVENT4);
  1449. case 5: return __RV_CSR_READ(CSR_MHPMEVENT5);
  1450. case 6: return __RV_CSR_READ(CSR_MHPMEVENT6);
  1451. case 7: return __RV_CSR_READ(CSR_MHPMEVENT7);
  1452. case 8: return __RV_CSR_READ(CSR_MHPMEVENT8);
  1453. case 9: return __RV_CSR_READ(CSR_MHPMEVENT9);
  1454. case 10: return __RV_CSR_READ(CSR_MHPMEVENT10);
  1455. case 11: return __RV_CSR_READ(CSR_MHPMEVENT11);
  1456. case 12: return __RV_CSR_READ(CSR_MHPMEVENT12);
  1457. case 13: return __RV_CSR_READ(CSR_MHPMEVENT13);
  1458. case 14: return __RV_CSR_READ(CSR_MHPMEVENT14);
  1459. case 15: return __RV_CSR_READ(CSR_MHPMEVENT15);
  1460. case 16: return __RV_CSR_READ(CSR_MHPMEVENT16);
  1461. case 17: return __RV_CSR_READ(CSR_MHPMEVENT17);
  1462. case 18: return __RV_CSR_READ(CSR_MHPMEVENT18);
  1463. case 19: return __RV_CSR_READ(CSR_MHPMEVENT19);
  1464. case 20: return __RV_CSR_READ(CSR_MHPMEVENT20);
  1465. case 21: return __RV_CSR_READ(CSR_MHPMEVENT21);
  1466. case 22: return __RV_CSR_READ(CSR_MHPMEVENT22);
  1467. case 23: return __RV_CSR_READ(CSR_MHPMEVENT23);
  1468. case 24: return __RV_CSR_READ(CSR_MHPMEVENT24);
  1469. case 25: return __RV_CSR_READ(CSR_MHPMEVENT25);
  1470. case 26: return __RV_CSR_READ(CSR_MHPMEVENT26);
  1471. case 27: return __RV_CSR_READ(CSR_MHPMEVENT27);
  1472. case 28: return __RV_CSR_READ(CSR_MHPMEVENT28);
  1473. case 29: return __RV_CSR_READ(CSR_MHPMEVENT29);
  1474. case 30: return __RV_CSR_READ(CSR_MHPMEVENT30);
  1475. case 31: return __RV_CSR_READ(CSR_MHPMEVENT31);
  1476. default: return 0;
  1477. }
  1478. }
  1479. /**
  1480. * \brief Set value for selected high performance monitor counter
  1481. * \param [in] idx HPMCOUNTERx CSR index(3-31)
  1482. * \param [in] value HPMCOUNTERx Register value to set
  1483. * \details
  1484. * Set value for high performance monitor couner register
  1485. */
  1486. __STATIC_INLINE void __set_hpm_counter(unsigned long idx, uint64_t value)
  1487. {
  1488. switch (idx) {
  1489. #if __RISCV_XLEN == 32
  1490. case 3: __RV_CSR_WRITE(CSR_MHPMCOUNTER3, 0); // prevent carry
  1491. __RV_CSR_WRITE(CSR_MHPMCOUNTER3H, (uint32_t)(value >> 32));
  1492. __RV_CSR_WRITE(CSR_MHPMCOUNTER3, (uint32_t)(value)); break;
  1493. case 4: __RV_CSR_WRITE(CSR_MHPMCOUNTER4, 0); // prevent carry
  1494. __RV_CSR_WRITE(CSR_MHPMCOUNTER4H, (uint32_t)(value >> 32));
  1495. __RV_CSR_WRITE(CSR_MHPMCOUNTER4, (uint32_t)(value)); break;
  1496. case 5: __RV_CSR_WRITE(CSR_MHPMCOUNTER5, 0); // prevent carry
  1497. __RV_CSR_WRITE(CSR_MHPMCOUNTER5H, (uint32_t)(value >> 32));
  1498. __RV_CSR_WRITE(CSR_MHPMCOUNTER5, (uint32_t)(value)); break;
  1499. case 6: __RV_CSR_WRITE(CSR_MHPMCOUNTER6, 0); // prevent carry
  1500. __RV_CSR_WRITE(CSR_MHPMCOUNTER6H, (uint32_t)(value >> 32));
  1501. __RV_CSR_WRITE(CSR_MHPMCOUNTER6, (uint32_t)(value)); break;
  1502. case 7: __RV_CSR_WRITE(CSR_MHPMCOUNTER7, 0); // prevent carry
  1503. __RV_CSR_WRITE(CSR_MHPMCOUNTER7H, (uint32_t)(value >> 32));
  1504. __RV_CSR_WRITE(CSR_MHPMCOUNTER7, (uint32_t)(value)); break;
  1505. case 8: __RV_CSR_WRITE(CSR_MHPMCOUNTER8, 0); // prevent carry
  1506. __RV_CSR_WRITE(CSR_MHPMCOUNTER8H, (uint32_t)(value >> 32));
  1507. __RV_CSR_WRITE(CSR_MHPMCOUNTER8, (uint32_t)(value)); break;
  1508. case 9: __RV_CSR_WRITE(CSR_MHPMCOUNTER9, 0); // prevent carry
  1509. __RV_CSR_WRITE(CSR_MHPMCOUNTER9H, (uint32_t)(value >> 32));
  1510. __RV_CSR_WRITE(CSR_MHPMCOUNTER9, (uint32_t)(value)); break;
  1511. case 10: __RV_CSR_WRITE(CSR_MHPMCOUNTER10, 0); // prevent carry
  1512. __RV_CSR_WRITE(CSR_MHPMCOUNTER10H, (uint32_t)(value >> 32));
  1513. __RV_CSR_WRITE(CSR_MHPMCOUNTER10, (uint32_t)(value)); break;
  1514. case 11: __RV_CSR_WRITE(CSR_MHPMCOUNTER11, 0); // prevent carry
  1515. __RV_CSR_WRITE(CSR_MHPMCOUNTER11H, (uint32_t)(value >> 32));
  1516. __RV_CSR_WRITE(CSR_MHPMCOUNTER11, (uint32_t)(value)); break;
  1517. case 12: __RV_CSR_WRITE(CSR_MHPMCOUNTER12, 0); // prevent carry
  1518. __RV_CSR_WRITE(CSR_MHPMCOUNTER12H, (uint32_t)(value >> 32));
  1519. __RV_CSR_WRITE(CSR_MHPMCOUNTER12, (uint32_t)(value)); break;
  1520. case 13: __RV_CSR_WRITE(CSR_MHPMCOUNTER13, 0); // prevent carry
  1521. __RV_CSR_WRITE(CSR_MHPMCOUNTER13H, (uint32_t)(value >> 32));
  1522. __RV_CSR_WRITE(CSR_MHPMCOUNTER13, (uint32_t)(value)); break;
  1523. case 14: __RV_CSR_WRITE(CSR_MHPMCOUNTER14, 0); // prevent carry
  1524. __RV_CSR_WRITE(CSR_MHPMCOUNTER14H, (uint32_t)(value >> 32));
  1525. __RV_CSR_WRITE(CSR_MHPMCOUNTER14, (uint32_t)(value)); break;
  1526. case 15: __RV_CSR_WRITE(CSR_MHPMCOUNTER15, 0); // prevent carry
  1527. __RV_CSR_WRITE(CSR_MHPMCOUNTER15H, (uint32_t)(value >> 32));
  1528. __RV_CSR_WRITE(CSR_MHPMCOUNTER15, (uint32_t)(value)); break;
  1529. case 16: __RV_CSR_WRITE(CSR_MHPMCOUNTER16, 0); // prevent carry
  1530. __RV_CSR_WRITE(CSR_MHPMCOUNTER16H, (uint32_t)(value >> 32));
  1531. __RV_CSR_WRITE(CSR_MHPMCOUNTER16, (uint32_t)(value)); break;
  1532. case 17: __RV_CSR_WRITE(CSR_MHPMCOUNTER17, 0); // prevent carry
  1533. __RV_CSR_WRITE(CSR_MHPMCOUNTER17H, (uint32_t)(value >> 32));
  1534. __RV_CSR_WRITE(CSR_MHPMCOUNTER17, (uint32_t)(value)); break;
  1535. case 18: __RV_CSR_WRITE(CSR_MHPMCOUNTER18, 0); // prevent carry
  1536. __RV_CSR_WRITE(CSR_MHPMCOUNTER18H, (uint32_t)(value >> 32));
  1537. __RV_CSR_WRITE(CSR_MHPMCOUNTER18, (uint32_t)(value)); break;
  1538. case 19: __RV_CSR_WRITE(CSR_MHPMCOUNTER19, 0); // prevent carry
  1539. __RV_CSR_WRITE(CSR_MHPMCOUNTER19H, (uint32_t)(value >> 32));
  1540. __RV_CSR_WRITE(CSR_MHPMCOUNTER19, (uint32_t)(value)); break;
  1541. case 20: __RV_CSR_WRITE(CSR_MHPMCOUNTER20, 0); // prevent carry
  1542. __RV_CSR_WRITE(CSR_MHPMCOUNTER20H, (uint32_t)(value >> 32));
  1543. __RV_CSR_WRITE(CSR_MHPMCOUNTER20, (uint32_t)(value)); break;
  1544. case 21: __RV_CSR_WRITE(CSR_MHPMCOUNTER21, 0); // prevent carry
  1545. __RV_CSR_WRITE(CSR_MHPMCOUNTER21H, (uint32_t)(value >> 32));
  1546. __RV_CSR_WRITE(CSR_MHPMCOUNTER21, (uint32_t)(value)); break;
  1547. case 22: __RV_CSR_WRITE(CSR_MHPMCOUNTER22, 0); // prevent carry
  1548. __RV_CSR_WRITE(CSR_MHPMCOUNTER22H, (uint32_t)(value >> 32));
  1549. __RV_CSR_WRITE(CSR_MHPMCOUNTER22, (uint32_t)(value)); break;
  1550. case 23: __RV_CSR_WRITE(CSR_MHPMCOUNTER23, 0); // prevent carry
  1551. __RV_CSR_WRITE(CSR_MHPMCOUNTER23H, (uint32_t)(value >> 32));
  1552. __RV_CSR_WRITE(CSR_MHPMCOUNTER23, (uint32_t)(value)); break;
  1553. case 24: __RV_CSR_WRITE(CSR_MHPMCOUNTER24, 0); // prevent carry
  1554. __RV_CSR_WRITE(CSR_MHPMCOUNTER24H, (uint32_t)(value >> 32));
  1555. __RV_CSR_WRITE(CSR_MHPMCOUNTER24, (uint32_t)(value)); break;
  1556. case 25: __RV_CSR_WRITE(CSR_MHPMCOUNTER25, 0); // prevent carry
  1557. __RV_CSR_WRITE(CSR_MHPMCOUNTER25H, (uint32_t)(value >> 32));
  1558. __RV_CSR_WRITE(CSR_MHPMCOUNTER25, (uint32_t)(value)); break;
  1559. case 26: __RV_CSR_WRITE(CSR_MHPMCOUNTER26, 0); // prevent carry
  1560. __RV_CSR_WRITE(CSR_MHPMCOUNTER26H, (uint32_t)(value >> 32));
  1561. __RV_CSR_WRITE(CSR_MHPMCOUNTER26, (uint32_t)(value)); break;
  1562. case 27: __RV_CSR_WRITE(CSR_MHPMCOUNTER27, 0); // prevent carry
  1563. __RV_CSR_WRITE(CSR_MHPMCOUNTER27H, (uint32_t)(value >> 32));
  1564. __RV_CSR_WRITE(CSR_MHPMCOUNTER27, (uint32_t)(value)); break;
  1565. case 28: __RV_CSR_WRITE(CSR_MHPMCOUNTER28, 0); // prevent carry
  1566. __RV_CSR_WRITE(CSR_MHPMCOUNTER28H, (uint32_t)(value >> 32));
  1567. __RV_CSR_WRITE(CSR_MHPMCOUNTER28, (uint32_t)(value)); break;
  1568. case 29: __RV_CSR_WRITE(CSR_MHPMCOUNTER29, 0); // prevent carry
  1569. __RV_CSR_WRITE(CSR_MHPMCOUNTER29H, (uint32_t)(value >> 32));
  1570. __RV_CSR_WRITE(CSR_MHPMCOUNTER29, (uint32_t)(value)); break;
  1571. case 30: __RV_CSR_WRITE(CSR_MHPMCOUNTER30, 0); // prevent carry
  1572. __RV_CSR_WRITE(CSR_MHPMCOUNTER30H, (uint32_t)(value >> 32));
  1573. __RV_CSR_WRITE(CSR_MHPMCOUNTER30, (uint32_t)(value)); break;
  1574. case 31: __RV_CSR_WRITE(CSR_MHPMCOUNTER31, 0); // prevent carry
  1575. __RV_CSR_WRITE(CSR_MHPMCOUNTER31H, (uint32_t)(value >> 32));
  1576. __RV_CSR_WRITE(CSR_MHPMCOUNTER31, (uint32_t)(value)); break;
  1577. #elif __RISCV_XLEN == 64
  1578. case 3: __RV_CSR_WRITE(CSR_MHPMCOUNTER3, (value)); break;
  1579. case 4: __RV_CSR_WRITE(CSR_MHPMCOUNTER4, (value)); break;
  1580. case 5: __RV_CSR_WRITE(CSR_MHPMCOUNTER5, (value)); break;
  1581. case 6: __RV_CSR_WRITE(CSR_MHPMCOUNTER6, (value)); break;
  1582. case 7: __RV_CSR_WRITE(CSR_MHPMCOUNTER7, (value)); break;
  1583. case 8: __RV_CSR_WRITE(CSR_MHPMCOUNTER8, (value)); break;
  1584. case 9: __RV_CSR_WRITE(CSR_MHPMCOUNTER9, (value)); break;
  1585. case 10: __RV_CSR_WRITE(CSR_MHPMCOUNTER10, (value)); break;
  1586. case 11: __RV_CSR_WRITE(CSR_MHPMCOUNTER11, (value)); break;
  1587. case 12: __RV_CSR_WRITE(CSR_MHPMCOUNTER12, (value)); break;
  1588. case 13: __RV_CSR_WRITE(CSR_MHPMCOUNTER13, (value)); break;
  1589. case 14: __RV_CSR_WRITE(CSR_MHPMCOUNTER14, (value)); break;
  1590. case 15: __RV_CSR_WRITE(CSR_MHPMCOUNTER15, (value)); break;
  1591. case 16: __RV_CSR_WRITE(CSR_MHPMCOUNTER16, (value)); break;
  1592. case 17: __RV_CSR_WRITE(CSR_MHPMCOUNTER17, (value)); break;
  1593. case 18: __RV_CSR_WRITE(CSR_MHPMCOUNTER18, (value)); break;
  1594. case 19: __RV_CSR_WRITE(CSR_MHPMCOUNTER19, (value)); break;
  1595. case 20: __RV_CSR_WRITE(CSR_MHPMCOUNTER20, (value)); break;
  1596. case 21: __RV_CSR_WRITE(CSR_MHPMCOUNTER21, (value)); break;
  1597. case 22: __RV_CSR_WRITE(CSR_MHPMCOUNTER22, (value)); break;
  1598. case 23: __RV_CSR_WRITE(CSR_MHPMCOUNTER23, (value)); break;
  1599. case 24: __RV_CSR_WRITE(CSR_MHPMCOUNTER24, (value)); break;
  1600. case 25: __RV_CSR_WRITE(CSR_MHPMCOUNTER25, (value)); break;
  1601. case 26: __RV_CSR_WRITE(CSR_MHPMCOUNTER26, (value)); break;
  1602. case 27: __RV_CSR_WRITE(CSR_MHPMCOUNTER27, (value)); break;
  1603. case 28: __RV_CSR_WRITE(CSR_MHPMCOUNTER28, (value)); break;
  1604. case 29: __RV_CSR_WRITE(CSR_MHPMCOUNTER29, (value)); break;
  1605. case 30: __RV_CSR_WRITE(CSR_MHPMCOUNTER30, (value)); break;
  1606. case 31: __RV_CSR_WRITE(CSR_MHPMCOUNTER31, (value)); break;
  1607. #else
  1608. #endif
  1609. default: break;
  1610. }
  1611. }
  1612. /**
  1613. * \brief Get value of selected high performance monitor counter
  1614. * \param [in] idx HPMCOUNTERx CSR index(3-31)
  1615. * \details
  1616. * Get high performance monitor counter register value
  1617. * \return HPMCOUNTERx Register value
  1618. */
  1619. __STATIC_INLINE uint64_t __get_hpm_counter(unsigned long idx)
  1620. {
  1621. #if __RISCV_XLEN == 32
  1622. volatile uint32_t high0, low, high;
  1623. uint64_t full;
  1624. switch (idx) {
  1625. case 0: return __get_rv_cycle();
  1626. case 2: return __get_rv_instret();
  1627. case 3: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER3H);
  1628. low = __RV_CSR_READ(CSR_MHPMCOUNTER3);
  1629. high = __RV_CSR_READ(CSR_MHPMCOUNTER3H);
  1630. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER3); }
  1631. full = (((uint64_t)high) << 32) | low; return full;
  1632. case 4: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER4H);
  1633. low = __RV_CSR_READ(CSR_MHPMCOUNTER4);
  1634. high = __RV_CSR_READ(CSR_MHPMCOUNTER4H);
  1635. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER4); }
  1636. full = (((uint64_t)high) << 32) | low; return full;
  1637. case 5: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER5H);
  1638. low = __RV_CSR_READ(CSR_MHPMCOUNTER5);
  1639. high = __RV_CSR_READ(CSR_MHPMCOUNTER5H);
  1640. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER5); }
  1641. full = (((uint64_t)high) << 32) | low; return full;
  1642. case 6: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER6H);
  1643. low = __RV_CSR_READ(CSR_MHPMCOUNTER6);
  1644. high = __RV_CSR_READ(CSR_MHPMCOUNTER6H);
  1645. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER6); }
  1646. full = (((uint64_t)high) << 32) | low; return full;
  1647. case 7: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER7H);
  1648. low = __RV_CSR_READ(CSR_MHPMCOUNTER7);
  1649. high = __RV_CSR_READ(CSR_MHPMCOUNTER7H);
  1650. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER7); }
  1651. full = (((uint64_t)high) << 32) | low; return full;
  1652. case 8: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER8H);
  1653. low = __RV_CSR_READ(CSR_MHPMCOUNTER8);
  1654. high = __RV_CSR_READ(CSR_MHPMCOUNTER8H);
  1655. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER8); }
  1656. full = (((uint64_t)high) << 32) | low; return full;
  1657. case 9: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER9H);
  1658. low = __RV_CSR_READ(CSR_MHPMCOUNTER9);
  1659. high = __RV_CSR_READ(CSR_MHPMCOUNTER9H);
  1660. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER9); }
  1661. full = (((uint64_t)high) << 32) | low; return full;
  1662. case 10: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER10H);
  1663. low = __RV_CSR_READ(CSR_MHPMCOUNTER10);
  1664. high = __RV_CSR_READ(CSR_MHPMCOUNTER10H);
  1665. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER10); }
  1666. full = (((uint64_t)high) << 32) | low; return full;
  1667. case 11: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER11H);
  1668. low = __RV_CSR_READ(CSR_MHPMCOUNTER11);
  1669. high = __RV_CSR_READ(CSR_MHPMCOUNTER11H);
  1670. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER11); }
  1671. full = (((uint64_t)high) << 32) | low; return full;
  1672. case 12: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER12H);
  1673. low = __RV_CSR_READ(CSR_MHPMCOUNTER12);
  1674. high = __RV_CSR_READ(CSR_MHPMCOUNTER12H);
  1675. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER12); }
  1676. full = (((uint64_t)high) << 32) | low; return full;
  1677. case 13: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER13H);
  1678. low = __RV_CSR_READ(CSR_MHPMCOUNTER13);
  1679. high = __RV_CSR_READ(CSR_MHPMCOUNTER13H);
  1680. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER13); }
  1681. full = (((uint64_t)high) << 32) | low; return full;
  1682. case 14: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER14H);
  1683. low = __RV_CSR_READ(CSR_MHPMCOUNTER14);
  1684. high = __RV_CSR_READ(CSR_MHPMCOUNTER14H);
  1685. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER14); }
  1686. full = (((uint64_t)high) << 32) | low; return full;
  1687. case 15: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER15H);
  1688. low = __RV_CSR_READ(CSR_MHPMCOUNTER15);
  1689. high = __RV_CSR_READ(CSR_MHPMCOUNTER15H);
  1690. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER15); }
  1691. full = (((uint64_t)high) << 32) | low; return full;
  1692. case 16: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER16H);
  1693. low = __RV_CSR_READ(CSR_MHPMCOUNTER16);
  1694. high = __RV_CSR_READ(CSR_MHPMCOUNTER16H);
  1695. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER16); }
  1696. full = (((uint64_t)high) << 32) | low; return full;
  1697. case 17: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER17H);
  1698. low = __RV_CSR_READ(CSR_MHPMCOUNTER17);
  1699. high = __RV_CSR_READ(CSR_MHPMCOUNTER17H);
  1700. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER17); }
  1701. full = (((uint64_t)high) << 32) | low; return full;
  1702. case 18: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER18H);
  1703. low = __RV_CSR_READ(CSR_MHPMCOUNTER18);
  1704. high = __RV_CSR_READ(CSR_MHPMCOUNTER18H);
  1705. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER18); }
  1706. full = (((uint64_t)high) << 32) | low; return full;
  1707. case 19: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER19H);
  1708. low = __RV_CSR_READ(CSR_MHPMCOUNTER19);
  1709. high = __RV_CSR_READ(CSR_MHPMCOUNTER19H);
  1710. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER19); }
  1711. full = (((uint64_t)high) << 32) | low; return full;
  1712. case 20: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER20H);
  1713. low = __RV_CSR_READ(CSR_MHPMCOUNTER20);
  1714. high = __RV_CSR_READ(CSR_MHPMCOUNTER20H);
  1715. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER20); }
  1716. full = (((uint64_t)high) << 32) | low; return full;
  1717. case 21: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER21H);
  1718. low = __RV_CSR_READ(CSR_MHPMCOUNTER21);
  1719. high = __RV_CSR_READ(CSR_MHPMCOUNTER21H);
  1720. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER21); }
  1721. full = (((uint64_t)high) << 32) | low; return full;
  1722. case 22: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER22H);
  1723. low = __RV_CSR_READ(CSR_MHPMCOUNTER22);
  1724. high = __RV_CSR_READ(CSR_MHPMCOUNTER22H);
  1725. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER22); }
  1726. full = (((uint64_t)high) << 32) | low; return full;
  1727. case 23: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER23H);
  1728. low = __RV_CSR_READ(CSR_MHPMCOUNTER23);
  1729. high = __RV_CSR_READ(CSR_MHPMCOUNTER23H);
  1730. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER23); }
  1731. full = (((uint64_t)high) << 32) | low; return full;
  1732. case 24: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER24H);
  1733. low = __RV_CSR_READ(CSR_MHPMCOUNTER24);
  1734. high = __RV_CSR_READ(CSR_MHPMCOUNTER24H);
  1735. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER24); }
  1736. full = (((uint64_t)high) << 32) | low; return full;
  1737. case 25: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER25H);
  1738. low = __RV_CSR_READ(CSR_MHPMCOUNTER25);
  1739. high = __RV_CSR_READ(CSR_MHPMCOUNTER25H);
  1740. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER25); }
  1741. full = (((uint64_t)high) << 32) | low; return full;
  1742. case 26: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER26H);
  1743. low = __RV_CSR_READ(CSR_MHPMCOUNTER26);
  1744. high = __RV_CSR_READ(CSR_MHPMCOUNTER26H);
  1745. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER26); }
  1746. full = (((uint64_t)high) << 32) | low; return full;
  1747. case 27: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER27H);
  1748. low = __RV_CSR_READ(CSR_MHPMCOUNTER27);
  1749. high = __RV_CSR_READ(CSR_MHPMCOUNTER27H);
  1750. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER27); }
  1751. full = (((uint64_t)high) << 32) | low; return full;
  1752. case 28: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER28H);
  1753. low = __RV_CSR_READ(CSR_MHPMCOUNTER28);
  1754. high = __RV_CSR_READ(CSR_MHPMCOUNTER28H);
  1755. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER28); }
  1756. full = (((uint64_t)high) << 32) | low; return full;
  1757. case 29: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER29H);
  1758. low = __RV_CSR_READ(CSR_MHPMCOUNTER29);
  1759. high = __RV_CSR_READ(CSR_MHPMCOUNTER29H);
  1760. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER29); }
  1761. full = (((uint64_t)high) << 32) | low; return full;
  1762. case 30: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER30H);
  1763. low = __RV_CSR_READ(CSR_MHPMCOUNTER30);
  1764. high = __RV_CSR_READ(CSR_MHPMCOUNTER30H);
  1765. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER30); }
  1766. full = (((uint64_t)high) << 32) | low; return full;
  1767. case 31: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER31H);
  1768. low = __RV_CSR_READ(CSR_MHPMCOUNTER31);
  1769. high = __RV_CSR_READ(CSR_MHPMCOUNTER31H);
  1770. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER31); }
  1771. full = (((uint64_t)high) << 32) | low; return full;
  1772. #elif __RISCV_XLEN == 64
  1773. switch (idx) {
  1774. case 0: return __get_rv_cycle();
  1775. case 2: return __get_rv_instret();
  1776. case 3: return __RV_CSR_READ(CSR_MHPMCOUNTER3);
  1777. case 4: return __RV_CSR_READ(CSR_MHPMCOUNTER4);
  1778. case 5: return __RV_CSR_READ(CSR_MHPMCOUNTER5);
  1779. case 6: return __RV_CSR_READ(CSR_MHPMCOUNTER6);
  1780. case 7: return __RV_CSR_READ(CSR_MHPMCOUNTER7);
  1781. case 8: return __RV_CSR_READ(CSR_MHPMCOUNTER8);
  1782. case 9: return __RV_CSR_READ(CSR_MHPMCOUNTER9);
  1783. case 10: return __RV_CSR_READ(CSR_MHPMCOUNTER10);
  1784. case 11: return __RV_CSR_READ(CSR_MHPMCOUNTER11);
  1785. case 12: return __RV_CSR_READ(CSR_MHPMCOUNTER12);
  1786. case 13: return __RV_CSR_READ(CSR_MHPMCOUNTER13);
  1787. case 14: return __RV_CSR_READ(CSR_MHPMCOUNTER14);
  1788. case 15: return __RV_CSR_READ(CSR_MHPMCOUNTER15);
  1789. case 16: return __RV_CSR_READ(CSR_MHPMCOUNTER16);
  1790. case 17: return __RV_CSR_READ(CSR_MHPMCOUNTER17);
  1791. case 18: return __RV_CSR_READ(CSR_MHPMCOUNTER18);
  1792. case 19: return __RV_CSR_READ(CSR_MHPMCOUNTER19);
  1793. case 20: return __RV_CSR_READ(CSR_MHPMCOUNTER20);
  1794. case 21: return __RV_CSR_READ(CSR_MHPMCOUNTER21);
  1795. case 22: return __RV_CSR_READ(CSR_MHPMCOUNTER22);
  1796. case 23: return __RV_CSR_READ(CSR_MHPMCOUNTER23);
  1797. case 24: return __RV_CSR_READ(CSR_MHPMCOUNTER24);
  1798. case 25: return __RV_CSR_READ(CSR_MHPMCOUNTER25);
  1799. case 26: return __RV_CSR_READ(CSR_MHPMCOUNTER26);
  1800. case 27: return __RV_CSR_READ(CSR_MHPMCOUNTER27);
  1801. case 28: return __RV_CSR_READ(CSR_MHPMCOUNTER28);
  1802. case 29: return __RV_CSR_READ(CSR_MHPMCOUNTER29);
  1803. case 30: return __RV_CSR_READ(CSR_MHPMCOUNTER30);
  1804. case 31: return __RV_CSR_READ(CSR_MHPMCOUNTER31);
  1805. #else
  1806. switch (idx) {
  1807. #endif
  1808. default: return 0;
  1809. }
  1810. }
  1811. /**
  1812. * \brief Get value of selected high performance monitor counter
  1813. * \param [in] idx HPMCOUNTERx CSR index(3-31)
  1814. * \details
  1815. * Get high performance monitor counter register value without high
  1816. * 32 bits when XLEN=32
  1817. * \return HPMCOUNTERx Register value
  1818. */
  1819. __STATIC_INLINE unsigned long __read_hpm_counter(unsigned long idx)
  1820. {
  1821. switch (idx) {
  1822. case 0: return __read_cycle_csr();
  1823. case 2: return __read_instret_csr();
  1824. case 3: return __RV_CSR_READ(CSR_MHPMCOUNTER3);
  1825. case 4: return __RV_CSR_READ(CSR_MHPMCOUNTER4);
  1826. case 5: return __RV_CSR_READ(CSR_MHPMCOUNTER5);
  1827. case 6: return __RV_CSR_READ(CSR_MHPMCOUNTER6);
  1828. case 7: return __RV_CSR_READ(CSR_MHPMCOUNTER7);
  1829. case 8: return __RV_CSR_READ(CSR_MHPMCOUNTER8);
  1830. case 9: return __RV_CSR_READ(CSR_MHPMCOUNTER9);
  1831. case 10: return __RV_CSR_READ(CSR_MHPMCOUNTER10);
  1832. case 11: return __RV_CSR_READ(CSR_MHPMCOUNTER11);
  1833. case 12: return __RV_CSR_READ(CSR_MHPMCOUNTER12);
  1834. case 13: return __RV_CSR_READ(CSR_MHPMCOUNTER13);
  1835. case 14: return __RV_CSR_READ(CSR_MHPMCOUNTER14);
  1836. case 15: return __RV_CSR_READ(CSR_MHPMCOUNTER15);
  1837. case 16: return __RV_CSR_READ(CSR_MHPMCOUNTER16);
  1838. case 17: return __RV_CSR_READ(CSR_MHPMCOUNTER17);
  1839. case 18: return __RV_CSR_READ(CSR_MHPMCOUNTER18);
  1840. case 19: return __RV_CSR_READ(CSR_MHPMCOUNTER19);
  1841. case 20: return __RV_CSR_READ(CSR_MHPMCOUNTER20);
  1842. case 21: return __RV_CSR_READ(CSR_MHPMCOUNTER21);
  1843. case 22: return __RV_CSR_READ(CSR_MHPMCOUNTER22);
  1844. case 23: return __RV_CSR_READ(CSR_MHPMCOUNTER23);
  1845. case 24: return __RV_CSR_READ(CSR_MHPMCOUNTER24);
  1846. case 25: return __RV_CSR_READ(CSR_MHPMCOUNTER25);
  1847. case 26: return __RV_CSR_READ(CSR_MHPMCOUNTER26);
  1848. case 27: return __RV_CSR_READ(CSR_MHPMCOUNTER27);
  1849. case 28: return __RV_CSR_READ(CSR_MHPMCOUNTER28);
  1850. case 29: return __RV_CSR_READ(CSR_MHPMCOUNTER29);
  1851. case 30: return __RV_CSR_READ(CSR_MHPMCOUNTER30);
  1852. case 31: return __RV_CSR_READ(CSR_MHPMCOUNTER31);
  1853. default: return 0;
  1854. }
  1855. }
  1856. /**
  1857. * \brief Set exceptions delegation to S mode
  1858. * \details Set certain exceptions of supervisor mode or user mode
  1859. * delegated from machined mode to supervisor mode.
  1860. * \remarks
  1861. * Exception should trigger in supervisor mode or user mode.
  1862. */
  1863. __STATIC_FORCEINLINE void __set_medeleg(unsigned long mask)
  1864. {
  1865. __RV_CSR_WRITE(CSR_MEDELEG, mask);
  1866. }
  1867. /**
  1868. * \brief Set interrupt delegation to S mode
  1869. * \details Set certain interrupt of supervisor mode or user mode
  1870. * delegated from machined mode to supervisor mode.
  1871. * \remarks
  1872. * interrupt should trigger in supervisor mode or user mode.
  1873. */
  1874. __STATIC_FORCEINLINE void __set_mideleg(unsigned long mask)
  1875. {
  1876. __RV_CSR_WRITE(CSR_MIDELEG, mask);
  1877. }
  1878. /**
  1879. * \brief Execute fence instruction, p -> pred, s -> succ
  1880. * \details
  1881. * the FENCE instruction ensures that all memory accesses from instructions preceding
  1882. * the fence in program order (the `predecessor set`) appear earlier in the global memory order than
  1883. * memory accesses from instructions appearing after the fence in program order (the `successor set`).
  1884. * For details, please refer to The RISC-V Instruction Set Manual
  1885. * \param p predecessor set, such as iorw, rw, r, w
  1886. * \param s successor set, such as iorw, rw, r, w
  1887. **/
  1888. #define __FENCE(p, s) __ASM volatile ("fence " #p "," #s : : : "memory")
  1889. /**
  1890. * \brief Fence.i Instruction
  1891. * \details
  1892. * The FENCE.I instruction is used to synchronize the instruction
  1893. * and data streams.
  1894. */
  1895. __STATIC_FORCEINLINE void __FENCE_I(void)
  1896. {
  1897. __ASM volatile("fence.i");
  1898. }
  1899. /** \brief Read & Write Memory barrier */
  1900. #define __RWMB() __FENCE(iorw,iorw)
  1901. /** \brief Read Memory barrier */
  1902. #define __RMB() __FENCE(ir,ir)
  1903. /** \brief Write Memory barrier */
  1904. #define __WMB() __FENCE(ow,ow)
  1905. /** \brief SMP Read & Write Memory barrier */
  1906. #define __SMP_RWMB() __FENCE(rw,rw)
  1907. /** \brief SMP Read Memory barrier */
  1908. #define __SMP_RMB() __FENCE(r,r)
  1909. /** \brief SMP Write Memory barrier */
  1910. #define __SMP_WMB() __FENCE(w,w)
  1911. /** \brief CPU relax for busy loop */
  1912. #define __CPU_RELAX() __ASM volatile ("" : : : "memory")
  1913. /* ===== Load/Store Operations ===== */
  1914. /**
  1915. * \brief Load 8bit value from address (8 bit)
  1916. * \details Load 8 bit value.
  1917. * \param [in] addr Address pointer to data
  1918. * \return value of type uint8_t at (*addr)
  1919. */
  1920. __STATIC_FORCEINLINE uint8_t __LB(volatile void *addr)
  1921. {
  1922. uint8_t result;
  1923. __ASM volatile ("lb %0, 0(%1)" : "=r" (result) : "r" (addr));
  1924. return result;
  1925. }
  1926. /**
  1927. * \brief Load 16bit value from address (16 bit)
  1928. * \details Load 16 bit value.
  1929. * \param [in] addr Address pointer to data
  1930. * \return value of type uint16_t at (*addr)
  1931. */
  1932. __STATIC_FORCEINLINE uint16_t __LH(volatile void *addr)
  1933. {
  1934. uint16_t result;
  1935. __ASM volatile ("lh %0, 0(%1)" : "=r" (result) : "r" (addr));
  1936. return result;
  1937. }
  1938. /**
  1939. * \brief Load 32bit value from address (32 bit)
  1940. * \details Load 32 bit value.
  1941. * \param [in] addr Address pointer to data
  1942. * \return value of type uint32_t at (*addr)
  1943. */
  1944. __STATIC_FORCEINLINE uint32_t __LW(volatile void *addr)
  1945. {
  1946. uint32_t result;
  1947. __ASM volatile ("lw %0, 0(%1)" : "=r" (result) : "r" (addr));
  1948. return result;
  1949. }
  1950. #if __RISCV_XLEN != 32
  1951. /**
  1952. * \brief Load 64bit value from address (64 bit)
  1953. * \details Load 64 bit value.
  1954. * \param [in] addr Address pointer to data
  1955. * \return value of type uint64_t at (*addr)
  1956. * \remarks RV64 only macro
  1957. */
  1958. __STATIC_FORCEINLINE uint64_t __LD(volatile void *addr)
  1959. {
  1960. uint64_t result;
  1961. __ASM volatile ("ld %0, 0(%1)" : "=r" (result) : "r" (addr));
  1962. return result;
  1963. }
  1964. #endif
  1965. /**
  1966. * \brief Write 8bit value to address (8 bit)
  1967. * \details Write 8 bit value.
  1968. * \param [in] addr Address pointer to data
  1969. * \param [in] val Value to set
  1970. */
  1971. __STATIC_FORCEINLINE void __SB(volatile void *addr, uint8_t val)
  1972. {
  1973. __ASM volatile ("sb %0, 0(%1)" : : "r" (val), "r" (addr));
  1974. }
  1975. /**
  1976. * \brief Write 16bit value to address (16 bit)
  1977. * \details Write 16 bit value.
  1978. * \param [in] addr Address pointer to data
  1979. * \param [in] val Value to set
  1980. */
  1981. __STATIC_FORCEINLINE void __SH(volatile void *addr, uint16_t val)
  1982. {
  1983. __ASM volatile ("sh %0, 0(%1)" : : "r" (val), "r" (addr));
  1984. }
  1985. /**
  1986. * \brief Write 32bit value to address (32 bit)
  1987. * \details Write 32 bit value.
  1988. * \param [in] addr Address pointer to data
  1989. * \param [in] val Value to set
  1990. */
  1991. __STATIC_FORCEINLINE void __SW(volatile void *addr, uint32_t val)
  1992. {
  1993. __ASM volatile ("sw %0, 0(%1)" : : "r" (val), "r" (addr));
  1994. }
  1995. #if __RISCV_XLEN != 32
  1996. /**
  1997. * \brief Write 64bit value to address (64 bit)
  1998. * \details Write 64 bit value.
  1999. * \param [in] addr Address pointer to data
  2000. * \param [in] val Value to set
  2001. */
  2002. __STATIC_FORCEINLINE void __SD(volatile void *addr, uint64_t val)
  2003. {
  2004. __ASM volatile ("sd %0, 0(%1)" : : "r" (val), "r" (addr));
  2005. }
  2006. #endif
  2007. /**
  2008. * \brief Compare and Swap 32bit value using LR and SC
  2009. * \details Compare old value with memory, if identical,
  2010. * store new value in memory. Return the initial value in memory.
  2011. * Success is indicated by comparing return value with OLD.
  2012. * memory address, return 0 if successful, otherwise return !0
  2013. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2014. * \param [in] oldval Old value of the data in address
  2015. * \param [in] newval New value to be stored into the address
  2016. * \return return the initial value in memory
  2017. */
  2018. __STATIC_INLINE uint32_t __CAS_W(volatile uint32_t *addr, uint32_t oldval, uint32_t newval)
  2019. {
  2020. uint32_t result;
  2021. uint32_t rc;
  2022. __ASM volatile ( \
  2023. "0: lr.w %0, %2 \n" \
  2024. " bne %0, %z3, 1f \n" \
  2025. " sc.w %1, %z4, %2 \n" \
  2026. " bnez %1, 0b \n" \
  2027. "1:\n" \
  2028. : "=&r"(result), "=&r"(rc), "+A"(*addr) \
  2029. : "r"(oldval), "r"(newval) \
  2030. : "memory");
  2031. return result;
  2032. }
  2033. /**
  2034. * \brief Atomic Swap 32bit value into memory
  2035. * \details Atomically swap new 32bit value into memory using amoswap.d.
  2036. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2037. * \param [in] newval New value to be stored into the address
  2038. * \return return the original value in memory
  2039. */
  2040. __STATIC_FORCEINLINE uint32_t __AMOSWAP_W(volatile uint32_t *addr, uint32_t newval)
  2041. {
  2042. uint32_t result;
  2043. __ASM volatile ("amoswap.w %0, %2, %1" : \
  2044. "=r"(result), "+A"(*addr) : "r"(newval) : "memory");
  2045. return result;
  2046. }
  2047. /**
  2048. * \brief Atomic Add with 32bit value
  2049. * \details Atomically ADD 32bit value with value in memory using amoadd.d.
  2050. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2051. * \param [in] value value to be ADDed
  2052. * \return return memory value + add value
  2053. */
  2054. __STATIC_FORCEINLINE int32_t __AMOADD_W(volatile int32_t *addr, int32_t value)
  2055. {
  2056. int32_t result;
  2057. __ASM volatile ("amoadd.w %0, %2, %1" : \
  2058. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2059. return *addr;
  2060. }
  2061. /**
  2062. * \brief Atomic And with 32bit value
  2063. * \details Atomically AND 32bit value with value in memory using amoand.d.
  2064. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2065. * \param [in] value value to be ANDed
  2066. * \return return memory value & and value
  2067. */
  2068. __STATIC_FORCEINLINE int32_t __AMOAND_W(volatile int32_t *addr, int32_t value)
  2069. {
  2070. int32_t result;
  2071. __ASM volatile ("amoand.w %0, %2, %1" : \
  2072. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2073. return *addr;
  2074. }
  2075. /**
  2076. * \brief Atomic OR with 32bit value
  2077. * \details Atomically OR 32bit value with value in memory using amoor.d.
  2078. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2079. * \param [in] value value to be ORed
  2080. * \return return memory value | and value
  2081. */
  2082. __STATIC_FORCEINLINE int32_t __AMOOR_W(volatile int32_t *addr, int32_t value)
  2083. {
  2084. int32_t result;
  2085. __ASM volatile ("amoor.w %0, %2, %1" : \
  2086. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2087. return *addr;
  2088. }
  2089. /**
  2090. * \brief Atomic XOR with 32bit value
  2091. * \details Atomically XOR 32bit value with value in memory using amoxor.d.
  2092. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2093. * \param [in] value value to be XORed
  2094. * \return return memory value ^ and value
  2095. */
  2096. __STATIC_FORCEINLINE int32_t __AMOXOR_W(volatile int32_t *addr, int32_t value)
  2097. {
  2098. int32_t result;
  2099. __ASM volatile ("amoxor.w %0, %2, %1" : \
  2100. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2101. return *addr;
  2102. }
  2103. /**
  2104. * \brief Atomic unsigned MAX with 32bit value
  2105. * \details Atomically unsigned max compare 32bit value with value in memory using amomaxu.d.
  2106. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2107. * \param [in] value value to be compared
  2108. * \return return the bigger value
  2109. */
  2110. __STATIC_FORCEINLINE uint32_t __AMOMAXU_W(volatile uint32_t *addr, uint32_t value)
  2111. {
  2112. uint32_t result;
  2113. __ASM volatile ("amomaxu.w %0, %2, %1" : \
  2114. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2115. return *addr;
  2116. }
  2117. /**
  2118. * \brief Atomic signed MAX with 32bit value
  2119. * \details Atomically signed max compare 32bit value with value in memory using amomax.d.
  2120. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2121. * \param [in] value value to be compared
  2122. * \return the bigger value
  2123. */
  2124. __STATIC_FORCEINLINE int32_t __AMOMAX_W(volatile int32_t *addr, int32_t value)
  2125. {
  2126. int32_t result;
  2127. __ASM volatile ("amomax.w %0, %2, %1" : \
  2128. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2129. return *addr;
  2130. }
  2131. /**
  2132. * \brief Atomic unsigned MIN with 32bit value
  2133. * \details Atomically unsigned min compare 32bit value with value in memory using amominu.d.
  2134. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2135. * \param [in] value value to be compared
  2136. * \return the smaller value
  2137. */
  2138. __STATIC_FORCEINLINE uint32_t __AMOMINU_W(volatile uint32_t *addr, uint32_t value)
  2139. {
  2140. uint32_t result;
  2141. __ASM volatile ("amominu.w %0, %2, %1" : \
  2142. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2143. return *addr;
  2144. }
  2145. /**
  2146. * \brief Atomic signed MIN with 32bit value
  2147. * \details Atomically signed min compare 32bit value with value in memory using amomin.d.
  2148. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2149. * \param [in] value value to be compared
  2150. * \return the smaller value
  2151. */
  2152. __STATIC_FORCEINLINE int32_t __AMOMIN_W(volatile int32_t *addr, int32_t value)
  2153. {
  2154. int32_t result;
  2155. __ASM volatile ("amomin.w %0, %2, %1" : \
  2156. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2157. return *addr;
  2158. }
  2159. #if __RISCV_XLEN == 64
  2160. /**
  2161. * \brief Compare and Swap 64bit value using LR and SC
  2162. * \details Compare old value with memory, if identical,
  2163. * store new value in memory. Return the initial value in memory.
  2164. * Success is indicated by comparing return value with OLD.
  2165. * memory address, return 0 if successful, otherwise return !0
  2166. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2167. * \param [in] oldval Old value of the data in address
  2168. * \param [in] newval New value to be stored into the address
  2169. * \return return the initial value in memory
  2170. */
  2171. __STATIC_INLINE uint64_t __CAS_D(volatile uint64_t *addr, uint64_t oldval, uint64_t newval)
  2172. {
  2173. uint64_t result;
  2174. uint64_t rc;
  2175. __ASM volatile ( \
  2176. "0: lr.d %0, %2 \n" \
  2177. " bne %0, %z3, 1f \n" \
  2178. " sc.d %1, %z4, %2 \n" \
  2179. " bnez %1, 0b \n" \
  2180. "1:\n" \
  2181. : "=&r"(result), "=&r"(rc), "+A"(*addr) \
  2182. : "r"(oldval), "r"(newval) \
  2183. : "memory");
  2184. return result;
  2185. }
  2186. /**
  2187. * \brief Atomic Swap 64bit value into memory
  2188. * \details Atomically swap new 64bit value into memory using amoswap.d.
  2189. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2190. * \param [in] newval New value to be stored into the address
  2191. * \return return the original value in memory
  2192. */
  2193. __STATIC_FORCEINLINE uint64_t __AMOSWAP_D(volatile uint64_t *addr, uint64_t newval)
  2194. {
  2195. uint64_t result;
  2196. __ASM volatile ("amoswap.d %0, %2, %1" : \
  2197. "=r"(result), "+A"(*addr) : "r"(newval) : "memory");
  2198. return result;
  2199. }
  2200. /**
  2201. * \brief Atomic Add with 64bit value
  2202. * \details Atomically ADD 64bit value with value in memory using amoadd.d.
  2203. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2204. * \param [in] value value to be ADDed
  2205. * \return return memory value + add value
  2206. */
  2207. __STATIC_FORCEINLINE int64_t __AMOADD_D(volatile int64_t *addr, int64_t value)
  2208. {
  2209. int64_t result;
  2210. __ASM volatile ("amoadd.d %0, %2, %1" : \
  2211. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2212. return *addr;
  2213. }
  2214. /**
  2215. * \brief Atomic And with 64bit value
  2216. * \details Atomically AND 64bit value with value in memory using amoand.d.
  2217. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2218. * \param [in] value value to be ANDed
  2219. * \return return memory value & and value
  2220. */
  2221. __STATIC_FORCEINLINE int64_t __AMOAND_D(volatile int64_t *addr, int64_t value)
  2222. {
  2223. int64_t result;
  2224. __ASM volatile ("amoand.d %0, %2, %1" : \
  2225. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2226. return *addr;
  2227. }
  2228. /**
  2229. * \brief Atomic OR with 64bit value
  2230. * \details Atomically OR 64bit value with value in memory using amoor.d.
  2231. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2232. * \param [in] value value to be ORed
  2233. * \return return memory value | and value
  2234. */
  2235. __STATIC_FORCEINLINE int64_t __AMOOR_D(volatile int64_t *addr, int64_t value)
  2236. {
  2237. int64_t result;
  2238. __ASM volatile ("amoor.d %0, %2, %1" : \
  2239. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2240. return *addr;
  2241. }
  2242. /**
  2243. * \brief Atomic XOR with 64bit value
  2244. * \details Atomically XOR 64bit value with value in memory using amoxor.d.
  2245. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2246. * \param [in] value value to be XORed
  2247. * \return return memory value ^ and value
  2248. */
  2249. __STATIC_FORCEINLINE int64_t __AMOXOR_D(volatile int64_t *addr, int64_t value)
  2250. {
  2251. int64_t result;
  2252. __ASM volatile ("amoxor.d %0, %2, %1" : \
  2253. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2254. return *addr;
  2255. }
  2256. /**
  2257. * \brief Atomic unsigned MAX with 64bit value
  2258. * \details Atomically unsigned max compare 64bit value with value in memory using amomaxu.d.
  2259. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2260. * \param [in] value value to be compared
  2261. * \return return the bigger value
  2262. */
  2263. __STATIC_FORCEINLINE uint64_t __AMOMAXU_D(volatile uint64_t *addr, uint64_t value)
  2264. {
  2265. uint64_t result;
  2266. __ASM volatile ("amomaxu.d %0, %2, %1" : \
  2267. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2268. return *addr;
  2269. }
  2270. /**
  2271. * \brief Atomic signed MAX with 64bit value
  2272. * \details Atomically signed max compare 64bit value with value in memory using amomax.d.
  2273. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2274. * \param [in] value value to be compared
  2275. * \return the bigger value
  2276. */
  2277. __STATIC_FORCEINLINE int64_t __AMOMAX_D(volatile int64_t *addr, int64_t value)
  2278. {
  2279. int64_t result;
  2280. __ASM volatile ("amomax.d %0, %2, %1" : \
  2281. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2282. return *addr;
  2283. }
  2284. /**
  2285. * \brief Atomic unsigned MIN with 64bit value
  2286. * \details Atomically unsigned min compare 64bit value with value in memory using amominu.d.
  2287. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2288. * \param [in] value value to be compared
  2289. * \return the smaller value
  2290. */
  2291. __STATIC_FORCEINLINE uint64_t __AMOMINU_D(volatile uint64_t *addr, uint64_t value)
  2292. {
  2293. uint64_t result;
  2294. __ASM volatile ("amominu.d %0, %2, %1" : \
  2295. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2296. return *addr;
  2297. }
  2298. /**
  2299. * \brief Atomic signed MIN with 64bit value
  2300. * \details Atomically signed min compare 64bit value with value in memory using amomin.d.
  2301. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2302. * \param [in] value value to be compared
  2303. * \return the smaller value
  2304. */
  2305. __STATIC_FORCEINLINE int64_t __AMOMIN_D(volatile int64_t *addr, int64_t value)
  2306. {
  2307. int64_t result;
  2308. __ASM volatile ("amomin.d %0, %2, %1" : \
  2309. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2310. return *addr;
  2311. }
  2312. #endif /* __RISCV_XLEN == 64 */
  2313. /** @} */ /* End of Doxygen Group NMSIS_Core_CPU_Intrinsic */
  2314. #ifdef __cplusplus
  2315. }
  2316. #endif
  2317. #endif /* __CORE_FEATURE_BASE__ */