core_feature_base.h 116 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759
  1. /*
  2. * Copyright (c) 2019 Nuclei Limited. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the License); you may
  7. * not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  14. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #ifndef __CORE_FEATURE_BASE__
  19. #define __CORE_FEATURE_BASE__
  20. /*!
  21. * @file core_feature_base.h
  22. * @brief Base core feature API for Nuclei N/NX Core
  23. */
  24. /*
  25. * Core Base Feature Configuration Macro:
  26. * 1. __HARTID_OFFSET: Optional, define this macro when your cpu system first hart hartid and hart index is different.
  27. * eg. If your cpu system, first hart hartid is 2, hart index is 0, then set this macro to 2
  28. *
  29. */
  30. #include <stdint.h>
  31. #ifdef __cplusplus
  32. extern "C" {
  33. #endif
  34. #include "nmsis_compiler.h"
  35. /**
  36. * \defgroup NMSIS_Core_Registers Register Define and Type Definitions
  37. * \brief Type definitions and defines for core registers.
  38. *
  39. * @{
  40. */
  41. #ifndef __RISCV_XLEN
  42. /** \brief Refer to the width of an integer register in bits(either 32 or 64) */
  43. #ifndef __riscv_xlen
  44. #define __RISCV_XLEN 32
  45. #else
  46. #define __RISCV_XLEN __riscv_xlen
  47. #endif
  48. #endif /* __RISCV_XLEN */
  49. /** \brief Type of Control and Status Register(CSR), depends on the XLEN defined in RISC-V */
  50. typedef unsigned long rv_csr_t;
  51. /** \brief Type of RISC-V Counter such as cycle, instret, time, depends on the XLEN defined in RISC-V, but for n100, it will be 32bit max */
  52. #if defined(CPU_SERIES) && CPU_SERIES == 100
  53. typedef uint32_t rv_counter_t;
  54. #else
  55. typedef uint64_t rv_counter_t;
  56. #endif
  57. /** @} */ /* End of Doxygen Group NMSIS_Core_Registers */
  58. /**
  59. * \defgroup NMSIS_Core_Base_Registers Base Register Define and Type Definitions
  60. * \ingroup NMSIS_Core_Registers
  61. * \brief Type definitions and defines for base core registers.
  62. *
  63. * @{
  64. */
  65. /**
  66. * \brief Union type to access MISA CSR register.
  67. */
  68. typedef union {
  69. struct {
  70. rv_csr_t a:1; /*!< bit: 0 Atomic extension */
  71. rv_csr_t b:1; /*!< bit: 1 B extension */
  72. rv_csr_t c:1; /*!< bit: 2 Compressed extension */
  73. rv_csr_t d:1; /*!< bit: 3 Double-precision floating-point extension */
  74. rv_csr_t e:1; /*!< bit: 4 RV32E/64E base ISA */
  75. rv_csr_t f:1; /*!< bit: 5 Single-precision floating-point extension */
  76. rv_csr_t g:1; /*!< bit: 6 Reserved */
  77. rv_csr_t h:1; /*!< bit: 7 Hypervisor extension */
  78. rv_csr_t i:1; /*!< bit: 8 RV32I/64I/128I base ISA */
  79. rv_csr_t j:1; /*!< bit: 9 Reserved */
  80. rv_csr_t k:1; /*!< bit: 10 Reserved */
  81. rv_csr_t l:1; /*!< bit: 11 Reserved */
  82. rv_csr_t m:1; /*!< bit: 12 Integer Multiply/Divide extension */
  83. rv_csr_t n:1; /*!< bit: 13 Tentatively reserved for User-Level Interrupts extension */
  84. rv_csr_t o:1; /*!< bit: 14 Reserved */
  85. rv_csr_t p:1; /*!< bit: 15 Tentatively reserved for Packed-SIMD extension */
  86. rv_csr_t q:1; /*!< bit: 16 Quad-precision floating-point extension */
  87. rv_csr_t r:1; /*!< bit: 17 Reserved */
  88. rv_csr_t s:1; /*!< bit: 18 Supervisor mode implemented */
  89. rv_csr_t t:1; /*!< bit: 19 Reserved */
  90. rv_csr_t u:1; /*!< bit: 20 User mode implemented */
  91. rv_csr_t v:1; /*!< bit: 21 Vector extension */
  92. rv_csr_t w:1; /*!< bit: 22 Reserved */
  93. rv_csr_t x:1; /*!< bit: 23 Non-standard extensions present */
  94. rv_csr_t y:1; /*!< bit: 24 Reserved */
  95. rv_csr_t z:1; /*!< bit: 25 Reserved */
  96. rv_csr_t _reserved0:__RISCV_XLEN-28; /*!< bit: 26..XLEN-3 Reserved */
  97. rv_csr_t mxl:2; /*!< bit: XLEN-2..XLEN-1 Machine XLEN */
  98. } b; /*!< Structure used for bit access */
  99. rv_csr_t d; /*!< Type used for csr data access */
  100. } CSR_MISA_Type;
  101. /**
  102. * \brief Union type to access MSTATUS CSR register.
  103. */
  104. typedef union {
  105. struct {
  106. rv_csr_t _reserved0:1; /*!< bit: 0 Reserved */
  107. rv_csr_t sie:1; /*!< bit: 1 supervisor interrupt enable flag */
  108. rv_csr_t _reserved1:1; /*!< bit: 2 Reserved */
  109. rv_csr_t mie:1; /*!< bit: 3 machine mode interrupt enable flag */
  110. rv_csr_t _reserved2:1; /*!< bit: 4 Reserved */
  111. rv_csr_t spie:1; /*!< bit: 5 supervisor mode interrupt enable flag */
  112. rv_csr_t ube:1; /*!< bit: 6 U-mode non-instruction-fetch memory accesse big-endian enable flag */
  113. rv_csr_t mpie:1; /*!< bit: 7 machine mode previous interrupt enable flag */
  114. rv_csr_t spp:1; /*!< bit: 8 supervisor previous privilede mode */
  115. rv_csr_t vs:2; /*!< bit: 9..10 vector status flag */
  116. rv_csr_t mpp:2; /*!< bit: 11..12 machine previous privilede mode */
  117. rv_csr_t fs:2; /*!< bit: 13..14 FS status flag */
  118. rv_csr_t xs:2; /*!< bit: 15..16 XS status flag */
  119. rv_csr_t mprv:1; /*!< bit: 17 Modify PRiVilege */
  120. rv_csr_t sum:1; /*!< bit: 18 Supervisor Mode load and store protection */
  121. rv_csr_t mxr:1; /*!< bit: 19 Make eXecutable Readable */
  122. rv_csr_t tvm:1; /*!< bit: 20 Trap Virtual Memory */
  123. rv_csr_t tw:1; /*!< bit: 21 Timeout Wait */
  124. rv_csr_t tsr:1; /*!< bit: 22 Trap SRET */
  125. rv_csr_t spelp:1; /*!< bit: 23 Supervisor mode Previous Expected Landing Pad (ELP) State */
  126. rv_csr_t sdt:1; /*!< bit: 24 S-mode-disable-trap */
  127. #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
  128. rv_csr_t _reserved3:7; /*!< bit: 25..31 Reserved */
  129. rv_csr_t uxl:2; /*!< bit: 32..33 U-mode XLEN */
  130. rv_csr_t sxl:2; /*!< bit: 34..35 S-mode XLEN */
  131. rv_csr_t sbe:1; /*!< bit: 36 S-mode non-instruction-fetch memory accesse big-endian enable flag */
  132. rv_csr_t mbe:1; /*!< bit: 37 M-mode non-instruction-fetch memory accesse big-endian enable flag */
  133. rv_csr_t gva:1; /*!< bit: 38 Guest Virtual Address */
  134. rv_csr_t mpv:1; /*!< bit: 39 Machine Previous Virtualization Mode */
  135. rv_csr_t _reserved4:1; /*!< bit: 40 Reserved */
  136. rv_csr_t mpelp:1; /*!< bit: 41 Machine mode Previous Expected Landing Pad (ELP) State */
  137. rv_csr_t mdt:1; /*!< bit: 42 M-mode-disable-trap */
  138. rv_csr_t _reserved5:20; /*!< bit: 43..62 Reserved */
  139. rv_csr_t sd:1; /*!< bit: 63 Dirty status for XS or FS */
  140. #else
  141. rv_csr_t _reserved3:6; /*!< bit: 25..30 Reserved */
  142. rv_csr_t sd:1; /*!< bit: 31 Dirty status for XS or FS */
  143. #endif
  144. } b; /*!< Structure used for bit access */
  145. rv_csr_t d; /*!< Type used for csr data access */
  146. } CSR_MSTATUS_Type;
  147. #if defined(__RISCV_XLEN) && __RISCV_XLEN == 32
  148. /**
  149. * \brief Union type to access MSTATUSH CSR register.
  150. */
  151. typedef union {
  152. struct {
  153. rv_csr_t _reserved0:4; /*!< bit: 0..3 Reserved */
  154. rv_csr_t sbe:1; /*!< bit: 4 S-mode non-instruction-fetch memory accesse big-endian enable flag */
  155. rv_csr_t mbe:1; /*!< bit: 5 M-mode non-instruction-fetch memory accesse big-endian enable flag */
  156. rv_csr_t gva:1; /*!< bit: 6 Guest Virtual Address */
  157. rv_csr_t mpv:1; /*!< bit: 7 Machine Previous Virtualization Mode */
  158. rv_csr_t _reserved1:1; /*!< bit: 8 Reserved */
  159. rv_csr_t mpelp:1; /*!< bit: 9 Machine mode Previous Expected Landing Pad (ELP) State */
  160. rv_csr_t mdt:1; /*!< bit: 10 M-mode-disable-trap */
  161. rv_csr_t _reserved5:21; /*!< bit: 11..31 Reserved */
  162. } b; /*!< Structure used for bit access */
  163. rv_csr_t d; /*!< Type used for csr data access */
  164. } CSR_MSTATUSH_Type;
  165. #endif
  166. /**
  167. * \brief Union type to access MTVEC CSR register.
  168. */
  169. typedef union {
  170. struct {
  171. rv_csr_t mode:6; /*!< bit: 0..5 interrupt mode control */
  172. rv_csr_t addr:__RISCV_XLEN-6; /*!< bit: 6..XLEN-1 mtvec address */
  173. } b; /*!< Structure used for bit access */
  174. rv_csr_t d; /*!< Type used for csr data access */
  175. } CSR_MTVEC_Type;
  176. /**
  177. * \brief Union type to access MCAUSE CSR register.
  178. */
  179. typedef union {
  180. struct {
  181. rv_csr_t exccode:12; /*!< bit: 0..11 exception or interrupt code */
  182. rv_csr_t _reserved0:4; /*!< bit: 12..15 Reserved */
  183. rv_csr_t mpil:8; /*!< bit: 16..23 Previous interrupt level */
  184. rv_csr_t _reserved1:3; /*!< bit: 24..26 Reserved */
  185. rv_csr_t mpie:1; /*!< bit: 27 Interrupt enable flag before enter interrupt */
  186. rv_csr_t mpp:2; /*!< bit: 28..29 Privilede mode flag before enter interrupt */
  187. rv_csr_t minhv:1; /*!< bit: 30 Machine interrupt vector table */
  188. #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
  189. rv_csr_t _reserved2:__RISCV_XLEN-32; /*!< bit: 31..XLEN-2 Reserved */
  190. #endif
  191. rv_csr_t interrupt:1; /*!< bit: XLEN-1 trap type. 0 means exception and 1 means interrupt */
  192. } b; /*!< Structure used for bit access */
  193. rv_csr_t d; /*!< Type used for csr data access */
  194. } CSR_MCAUSE_Type;
  195. /**
  196. * \brief Union type to access MCOUNTINHIBIT CSR register.
  197. */
  198. typedef union {
  199. struct {
  200. rv_csr_t cy:1; /*!< bit: 0 1 means disable mcycle counter */
  201. rv_csr_t _reserved0:1; /*!< bit: 1 Reserved */
  202. rv_csr_t ir:1; /*!< bit: 2 1 means disable minstret counter */
  203. rv_csr_t _reserved1:__RISCV_XLEN-3; /*!< bit: 3..XLEN-1 Reserved */
  204. } b; /*!< Structure used for bit access */
  205. rv_csr_t d; /*!< Type used for csr data access */
  206. } CSR_MCOUNTINHIBIT_Type;
  207. /**
  208. * \brief Union type to access MSUBM CSR register.
  209. */
  210. typedef union {
  211. struct {
  212. rv_csr_t _reserved0:6; /*!< bit: 0..5 Reserved 0 */
  213. rv_csr_t typ:2; /*!< bit: 6..7 Current sub-mode:
  214. 0: Normal Machine Mode;
  215. 1: Interrupt Handling Mode;
  216. 2: Exception Handing Mode;
  217. 3: NMI Handing Mode. */
  218. rv_csr_t ptyp:2; /*!< bit: 8..9 sub-mode before entering the trap:
  219. 0: Normal Machine Mode;
  220. 1: Interrupt Handling Mode;
  221. 2: Exception Handing Mode;
  222. 3: NMI Handing Mode. */
  223. rv_csr_t gpridx:5; /*!< bit: 10..14 Current Register Group Select */
  224. rv_csr_t pgpridx:5; /*!< bit: 15..19 Previous Register Group Select */
  225. rv_csr_t _reserved1:__RISCV_XLEN-20; /*!< bit: 20..XLEN-1 Reserved 0 */
  226. } b; /*!< Structure used for bit access */
  227. rv_csr_t d; /*!< Type used for csr data access */
  228. } CSR_MSUBM_Type;
  229. /**
  230. * \brief Union type to access MDCAUSE CSR register.
  231. */
  232. typedef union {
  233. struct {
  234. rv_csr_t mdcause:3; /*!< bit: 0..2 More detailed exception information as MCAUSE supplement */
  235. rv_csr_t _reserved0:__RISCV_XLEN-3; /*!< bit: 3..XLEN-1 Reserved */
  236. } b; /*!< Structure used for bit access */
  237. rv_csr_t d; /*!< Type used for csr data access */
  238. } CSR_MDCAUSE_Type;
  239. /**
  240. * \brief Union type to access MMISC_CTRL CSR register.
  241. */
  242. typedef union {
  243. struct {
  244. rv_csr_t _reserved0:1; /*!< bit: 0 Reserved */
  245. rv_csr_t zclsd_en:1; /*!< bit: 1 Control the Zclsd will uses the Zcf extension encoding or not */
  246. rv_csr_t _reserved1:1; /*!< bit: 2 Reserved */
  247. rv_csr_t bpu:1; /*!< bit: 3 dynamic prediction enable flag */
  248. rv_csr_t _reserved2:2; /*!< bit: 4..5 Reserved */
  249. rv_csr_t misalign:1; /*!< bit: 6 misaligned access support flag */
  250. rv_csr_t zcmt_zcmp:1; /*!< bit: 7 Zc Ext uses the cfdsp of D Ext's encoding or not */
  251. rv_csr_t core_buserr:1; /*!< bit: 8 core bus error exception or interrupt */
  252. rv_csr_t nmi_cause:1; /*!< bit: 9 mnvec control and nmi mcase exccode */
  253. rv_csr_t imreturn_en:1; /*!< bit: 10 IMRETURN mode of trace */
  254. rv_csr_t sijump_en:1; /*!< bit: 11 SIJUMP mode of trace */
  255. rv_csr_t ldspec_en:1; /*!< bit: 12 enable load speculative goes to mem interface */
  256. rv_csr_t _reserved3:1; /*!< bit: 13 Reserved */
  257. rv_csr_t dbg_sec:1; /*!< bit: 14 debug access mode, removed in latest releases */
  258. rv_csr_t _reserved4:2; /*!< bit: 15..16 Reserved */
  259. rv_csr_t csr_excl_enable:1; /*!< bit: 17 Exclusive instruction(lr,sc) on Non-cacheable/Device memory can send exclusive flag in memory bus */
  260. rv_csr_t _reserved5:2; /*!< bit: 18..19 Reserved */
  261. rv_csr_t lsu_allow_diff_en:1; /*!< bit: 20 LSU access allows the next operation can outstanding transactions when the current transaction has not been completed */
  262. rv_csr_t hw_auto_context:1; /*!< bit: 21 Hardware auto context saving and restoring enable */
  263. rv_csr_t _reserved6:__RISCV_XLEN-22; /*!< bit: 22..XLEN-1 Reserved */
  264. } b; /*!< Structure used for bit access */
  265. rv_csr_t d; /*!< Type used for csr data access */
  266. } CSR_MMISCCTRL_Type;
  267. typedef CSR_MMISCCTRL_Type CSR_MMISCCTL_Type;
  268. typedef CSR_MMISCCTRL_Type CSR_MMISC_CTL_Type;
  269. /**
  270. * \brief Union type to access MMISC_CTL1 CSR register.
  271. */
  272. typedef union {
  273. struct {
  274. rv_csr_t fp16mode:1; /*!< bit: 0 16 bit float precision mode */
  275. rv_csr_t vlsu_ooo_4k_mode:1; /*!< bit: 1 Control the size of address check region for vlsu ooo */
  276. rv_csr_t vlsu_ooo_force_va_4k:1; /*!< bit: 2 Control the size of virtual address check region for vlsu ooo */
  277. rv_csr_t vlsu_ooo_en:1; /*!< bit: 3 Control the enable of vlsu ooo feature */
  278. rv_csr_t vlsu_cof_en:1; /*!< bit: 4 Control the enable of vlsu check-only first feature */
  279. rv_csr_t vlm_path_en:1; /*!< bit: 5 Control vlm dedicated path enable */
  280. rv_csr_t rvv_v1_0_cmpt:1; /*!< bit: 6 Control some vpu instruction behaviour is compatible with rvv1.0 */
  281. rv_csr_t _reserved0:__RISCV_XLEN-7; /*!< bit: 7..XLEN-1 Reserved */
  282. } b; /*!< Structure used for bit access */
  283. rv_csr_t d; /*!< Type used for csr data access */
  284. } CSR_MMISC_CTL1_Type;
  285. /**
  286. * \brief Union type to access MCACHE_CTL CSR register.
  287. */
  288. typedef union {
  289. struct {
  290. rv_csr_t ic_en:1; /*!< bit: 0 I-Cache enable */
  291. rv_csr_t ic_scpd_mod:1; /*!< bit: 1 Scratchpad mode, 0: Scratchpad as ICache Data RAM, 1: Scratchpad as ILM SRAM */
  292. rv_csr_t ic_ecc_en:1; /*!< bit: 2 I-Cache ECC enable */
  293. rv_csr_t ic_ecc_excp_en:1; /*!< bit: 3 I-Cache 2bit ECC error exception enable */
  294. rv_csr_t ic_rwtecc:1; /*!< bit: 4 Control I-Cache Tag Ram ECC code injection */
  295. rv_csr_t ic_rwdecc:1; /*!< bit: 5 Control I-Cache Data Ram ECC code injection */
  296. rv_csr_t ic_pf_en:1; /*!< bit: 6 I-Cache prefetch enable */
  297. rv_csr_t ic_cancel_en:1; /*!< bit: 7 I-Cache change flow canceling enable control */
  298. rv_csr_t ic_ecc_chk_en:1; /*!< bit: 8 I-Cache check ECC codes enable */
  299. rv_csr_t ic_prefetch_en:1; /*!< bit: 9 I-Cache CMO prefetch enable control */
  300. rv_csr_t ic_burst_type:1; /*!< bit: 10 I-Cache Burst type control */
  301. rv_csr_t _reserved0:5; /*!< bit: 11..15 Reserved */
  302. rv_csr_t dc_en:1; /*!< bit: 16 DCache enable */
  303. rv_csr_t dc_ecc_en:1; /*!< bit: 17 D-Cache ECC enable */
  304. rv_csr_t dc_ecc_excp_en:1; /*!< bit: 18 D-Cache 2bit ECC error exception enable */
  305. rv_csr_t dc_rwtecc:1; /*!< bit: 19 Control D-Cache Tag Ram ECC code injection */
  306. rv_csr_t dc_rwdecc:1; /*!< bit: 20 Control D-Cache Data Ram ECC code injection */
  307. rv_csr_t dc_ecc_chk_en:1; /*!< bit: 21 D-Cache check ECC codes enable */
  308. rv_csr_t dc_prefetch_en:1; /*!< bit: 22 D-Cache CMO prefetch enable control */
  309. rv_csr_t dc_burst_type:1; /*!< bit: 23 D-Cache Burst type control */
  310. rv_csr_t _reserved1:__RISCV_XLEN-24; /*!< bit: 24..XLEN-1 Reserved */
  311. } b; /*!< Structure used for bit access */
  312. rv_csr_t d; /*!< Type used for csr data access */
  313. } CSR_MCACHECTL_Type;
  314. typedef CSR_MCACHECTL_Type CSR_MCACHE_CTL_Type;
  315. /**
  316. * \brief Union type to access MSAVESTATUS CSR register.
  317. */
  318. typedef union {
  319. struct {
  320. rv_csr_t mpie1:1; /*!< bit: 0 interrupt enable flag of fisrt level NMI/exception nestting */
  321. rv_csr_t mpp1:2; /*!< bit: 1..2 privilede mode of fisrt level NMI/exception nestting */
  322. rv_csr_t _reserved0:3; /*!< bit: 3..5 Reserved */
  323. rv_csr_t ptyp1:2; /*!< bit: 6..7 NMI/exception type of before first nestting */
  324. rv_csr_t mpie2:1; /*!< bit: 8 interrupt enable flag of second level NMI/exception nestting */
  325. rv_csr_t mpp2:2; /*!< bit: 9..10 privilede mode of second level NMI/exception nestting */
  326. rv_csr_t _reserved1:3; /*!< bit: 11..13 Reserved */
  327. rv_csr_t ptyp2:2; /*!< bit: 14..15 NMI/exception type of before second nestting */
  328. rv_csr_t _reserved2:__RISCV_XLEN-16; /*!< bit: 16..XLEN-1 Reserved */
  329. } b; /*!< Structure used for bit access */
  330. rv_csr_t w; /*!< Type used for csr data access */
  331. } CSR_MSAVESTATUS_Type;
  332. /**
  333. * \brief Union type to access MILM_CTL CSR register.
  334. */
  335. typedef union {
  336. struct {
  337. rv_csr_t ilm_en:1; /*!< bit: 0 ILM enable */
  338. rv_csr_t ilm_ecc_en:1; /*!< bit: 1 ILM ECC eanble */
  339. rv_csr_t ilm_ecc_excp_en:1; /*!< bit: 2 ILM ECC exception enable */
  340. rv_csr_t ilm_rwecc:1; /*!< bit: 3 Control mecc_code write to ilm, simulate error injection */
  341. rv_csr_t ilm_ecc_chk_en:1; /*!< bit: 4 ILM check ECC codes enable */
  342. rv_csr_t ilm_va_en:1; /*!< bit: 5 Using virtual address to judge ILM access */
  343. rv_csr_t dis_lsu_ilm:1; /*!< bit: 6 Disable lsu access ILM */
  344. rv_csr_t _reserved0:3; /*!< bit: 7..9 Reserved */
  345. rv_csr_t ilm_bpa:__RISCV_XLEN-10; /*!< bit: 10..XLEN-1 ILM base physical address */
  346. } b; /*!< Structure used for bit access */
  347. rv_csr_t d; /*!< Type used for csr data access */
  348. } CSR_MILMCTL_Type;
  349. typedef CSR_MILMCTL_Type CSR_MILM_CTL_Type;
  350. /**
  351. * \brief Union type to access MDLM_CTL CSR register.
  352. */
  353. typedef union {
  354. struct {
  355. rv_csr_t dlm_en:1; /*!< bit: 0 DLM enable */
  356. rv_csr_t dlm_ecc_en:1; /*!< bit: 1 DLM ECC eanble */
  357. rv_csr_t dlm_ecc_excp_en:1; /*!< bit: 2 DLM ECC exception enable */
  358. rv_csr_t dlm_rwecc:1; /*!< bit: 3 Control mecc_code write to dlm, simulate error injection */
  359. rv_csr_t dlm_ecc_chk_en:1; /*!< bit: 4 DLM check ECC codes enable */
  360. rv_csr_t dlm_va_en:1; /*!< bit: 5 Using virtual address to judge DLM access */
  361. rv_csr_t dis_lsu_dlm:1; /*!< bit: 6 Disable LSU access DLM */
  362. rv_csr_t _reserved0:3; /*!< bit: 7..9 Reserved */
  363. rv_csr_t dlm_bpa:__RISCV_XLEN-10; /*!< bit: 10..XLEN-1 DLM base address */
  364. } b; /*!< Structure used for bit access */
  365. rv_csr_t d; /*!< Type used for csr data access */
  366. } CSR_MDLMCTL_Type;
  367. typedef CSR_MDLMCTL_Type CSR_DILM_CTL_Type;
  368. /**
  369. * \brief Union type to access MCFG_INFO CSR register.
  370. */
  371. typedef union {
  372. struct {
  373. rv_csr_t tee:1; /*!< bit: 0 TEE present */
  374. rv_csr_t ecc:1; /*!< bit: 1 ECC present */
  375. rv_csr_t clic:1; /*!< bit: 2 CLIC present */
  376. rv_csr_t plic:1; /*!< bit: 3 PLIC present */
  377. rv_csr_t fio:1; /*!< bit: 4 FIO present */
  378. rv_csr_t ppi:1; /*!< bit: 5 PPI present */
  379. rv_csr_t nice:1; /*!< bit: 6 NICE present */
  380. rv_csr_t ilm:1; /*!< bit: 7 ILM present */
  381. rv_csr_t dlm:1; /*!< bit: 8 DLM present */
  382. rv_csr_t icache:1; /*!< bit: 9 ICache present */
  383. rv_csr_t dcache:1; /*!< bit: 10 DCache present */
  384. rv_csr_t smp:1; /*!< bit: 11 SMP present */
  385. rv_csr_t dsp_n1:1; /*!< bit: 12 DSP N1 present */
  386. rv_csr_t dsp_n2:1; /*!< bit: 13 DSP N2 present */
  387. rv_csr_t dsp_n3:1; /*!< bit: 14 DSP N3 present */
  388. rv_csr_t zc_xlcz:1; /*!< bit: 15 Zc and xlcz extension present */
  389. rv_csr_t iregion:1; /*!< bit: 16 IREGION present */
  390. rv_csr_t vpu_degree:2; /*!< bit: 17..18 Indicate the VPU degree of parallel */
  391. rv_csr_t sec_mode:1; /*!< bit: 19 Smwg extension present */
  392. rv_csr_t etrace:1; /*!< bit: 20 Etrace present */
  393. rv_csr_t safety_mecha:2; /*!< bit: 21..22 Indicate Core's safety mechanism */
  394. rv_csr_t vnice:1; /*!< bit: 23 VNICE present */
  395. rv_csr_t xlcz:1; /*!< bit: 24 XLCZ extension present */
  396. rv_csr_t zilsd:1; /*!< bit: 25 Zilsd/Zclsd extension present */
  397. rv_csr_t sstc:1; /*!< bit: 26 SSTC extension present */
  398. rv_csr_t _reserved1:__RISCV_XLEN-27; /*!< bit: 27..XLEN-1 Reserved */
  399. } b; /*!< Structure used for bit access */
  400. rv_csr_t d; /*!< Type used for csr data access */
  401. } CSR_MCFGINFO_Type;
  402. typedef CSR_MCFGINFO_Type CSR_MCFG_INFO_Type;
  403. /**
  404. * \brief Union type to access MICFG_INFO CSR register.
  405. */
  406. typedef union {
  407. struct {
  408. rv_csr_t set:4; /*!< bit: 0..3 I-Cache sets per way */
  409. rv_csr_t way:3; /*!< bit: 4..6 I-Cache way */
  410. rv_csr_t lsize:3; /*!< bit: 7..9 I-Cache line size */
  411. rv_csr_t ecc:1; /*!< bit: 10 I-Cache ECC support */
  412. rv_csr_t _reserved0:5; /*!< bit: 11..15 Reserved */
  413. rv_csr_t lm_size:5; /*!< bit: 16..20 ILM size, need to be 2^n size */
  414. rv_csr_t lm_xonly:1; /*!< bit: 21 ILM Execute only permission or Reserved */
  415. rv_csr_t lm_ecc:1; /*!< bit: 22 ILM ECC support */
  416. rv_csr_t i_share_dlm:1; /*!< bit: 23 Support IFU fetch instructions from DLM */
  417. rv_csr_t _reserved1:__RISCV_XLEN-24; /*!< bit: 24..XLEN-1 Reserved */
  418. } b; /*!< Structure used for bit access */
  419. rv_csr_t d; /*!< Type used for csr data access */
  420. } CSR_MICFGINFO_Type;
  421. typedef CSR_MICFGINFO_Type CSR_MICFG_INFO_Type;
  422. /**
  423. * \brief Union type to access MDCFG_INFO CSR register.
  424. */
  425. typedef union {
  426. struct {
  427. rv_csr_t set:4; /*!< bit: 0..3 D-Cache sets per way */
  428. rv_csr_t way:3; /*!< bit: 4..6 D-Cache way */
  429. rv_csr_t lsize:3; /*!< bit: 7..9 D-Cache line size */
  430. rv_csr_t ecc:1; /*!< bit: 10 D-Cache ECC support */
  431. rv_csr_t _reserved0:5; /*!< bit: 11..15 Reserved */
  432. rv_csr_t lm_size:5; /*!< bit: 16..20 DLM size, need to be 2^n size */
  433. rv_csr_t lm_ecc:1; /*!< bit: 21 DLM ECC present */
  434. rv_csr_t _reserved1:__RISCV_XLEN-22; /*!< bit: 22..XLEN-1 Reserved */
  435. } b; /*!< Structure used for bit access */
  436. rv_csr_t d; /*!< Type used for csr data access */
  437. } CSR_MDCFGINFO_Type;
  438. typedef CSR_MDCFGINFO_Type CSR_MDCFG_INFO_Type;
  439. /**
  440. * \brief Union type to access MTLBCFG_INFO CSR register.
  441. * NOTE: the MTLBCFG_INFO CSR supports two different mapping layouts.
  442. * Use the `b.mapping` or `nb.mapping` field to determine the active
  443. * mapping type. If `mapping` field is set, use the `nb` structure for
  444. * field access. Otherwise, use the `b` structure for field access.
  445. */
  446. typedef union {
  447. struct {
  448. rv_csr_t set:4; /*!< bit: 0..3 Main TLB entry per way */
  449. rv_csr_t way:3; /*!< bit: 4..6 Main TLB ways */
  450. rv_csr_t lsize:3; /*!< bit: 7..9 Main TLB line size or Reserved */
  451. rv_csr_t ecc:1; /*!< bit: 10 Main TLB supports ECC or not */
  452. rv_csr_t napot:1; /*!< bit: 11 TLB supports Svnapot or not */
  453. rv_csr_t _reserved1:4; /*!< bit: 12..15 Reserved 0 */
  454. rv_csr_t i_size:3; /*!< bit: 16..18 ITLB size */
  455. rv_csr_t d_size:3; /*!< bit: 19..21 DTLB size */
  456. rv_csr_t _reserved2:__RISCV_XLEN-23; /*!< bit: 22..XLEN-2 Reserved 0 */
  457. rv_csr_t mapping:1; /*!< bit: XLEN-1 mapping type */
  458. } b; /*!< Structure used for bit access */
  459. struct {
  460. rv_csr_t set:4; /*!< bit: 0..3 Main TLB entry per way */
  461. rv_csr_t way:3; /*!< bit: 4..6 Main TLB ways */
  462. rv_csr_t lsize:3; /*!< bit: 7..9 Main TLB line size or Reserved */
  463. rv_csr_t ecc:1; /*!< bit: 10 Main TLB supports ECC or not */
  464. rv_csr_t napot:1; /*!< bit: 11 TLB supports Svnapot or not */
  465. rv_csr_t i_size:7; /*!< bit: 12..18 ITLB size */
  466. rv_csr_t d_size:8; /*!< bit: 19..26 DTLB size */
  467. rv_csr_t _reserved0:__RISCV_XLEN-28; /*!< bit: 27..XLEN-2 Reserved 0 */
  468. rv_csr_t mapping:1; /*!< bit: XLEN-1 TLB mapping type */
  469. } nb; /*!< Structure used for new mapping bit access */
  470. rv_csr_t d; /*!< Type used for csr data access */
  471. } CSR_MTLBCFGINFO_Type;
  472. typedef CSR_MTLBCFGINFO_Type CSR_MTLBCFG_INFO_Type;
  473. /**
  474. * \brief Union type to access MPPICFG_INFO CSR register.
  475. */
  476. typedef union {
  477. struct {
  478. rv_csr_t _reserved0:1; /*!< bit: 0 Reserved 1 */
  479. rv_csr_t ppi_size:5; /*!< bit: 1..5 PPI size, need to be 2^n size */
  480. rv_csr_t _reserved1:3; /*!< bit: 6..8 Reserved 0 */
  481. rv_csr_t ppi_en:1; /*!< bit: 9 PPI Enable. Software can write this bit to control PPI */
  482. rv_csr_t ppi_bpa:__RISCV_XLEN-10; /*!< bit: 10..XLEN-1 PPI base address */
  483. } b; /*!< Structure used for bit access */
  484. rv_csr_t d; /*!< Type used for csr data access */
  485. } CSR_MPPICFGINFO_Type;
  486. typedef CSR_MPPICFGINFO_Type CSR_MPPICFG_INFO_Type;
  487. /**
  488. * \brief Union type to access MFIOCFG_INFO CSR register.
  489. */
  490. typedef union {
  491. struct {
  492. rv_csr_t _reserved0:1; /*!< bit: 0 Reserved */
  493. rv_csr_t fio_size:5; /*!< bit: 1..5 FIO size, need to be 2^n size */
  494. rv_csr_t _reserved1:4; /*!< bit: 6..9 Reserved */
  495. rv_csr_t fio_bpa:__RISCV_XLEN-10; /*!< bit: 10..XLEN-1 FIO base address */
  496. } b; /*!< Structure used for bit access */
  497. rv_csr_t d; /*!< Type used for csr data access */
  498. } CSR_MFIOCFGINFO_Type;
  499. typedef CSR_MFIOCFGINFO_Type CSR_MFIOCFG_INFO_Type;
  500. /**
  501. * \brief Union type to access MECC_LOCK CSR register.
  502. */
  503. typedef union {
  504. struct {
  505. rv_csr_t ecc_lock:1; /*!< bit: 0 RW permission, ECC Lock configure */
  506. rv_csr_t _reserved0:__RISCV_XLEN-1; /*!< bit: 1..XLEN-1 Reserved */
  507. } b; /*!< Structure used for bit access */
  508. rv_csr_t d; /*!< Type used for csr data access */
  509. } CSR_MECCLOCK_Type;
  510. typedef CSR_MECCLOCK_Type CSR_MECC_LOCK_Type;
  511. /**
  512. * \brief Union type to access MECC_CODE CSR register.
  513. */
  514. typedef union {
  515. struct {
  516. rv_csr_t code:9; /*!< bit: 0..8 Used to inject ECC check code */
  517. rv_csr_t _reserved0:7; /*!< bit: 9..15 Reserved 0 */
  518. rv_csr_t ramid:5; /*!< bit: 16..20 The ID of RAM that has 2bit ECC error, software can clear these bits */
  519. rv_csr_t _reserved1:3; /*!< bit: 21..23 Reserved 0 */
  520. rv_csr_t sramid:5; /*!< bit: 24..28 The ID of RAM that has 1bit ECC error, software can clear these bits */
  521. rv_csr_t _reserved2:2; /*!< bit: 29..30 Reserved 0 */
  522. rv_csr_t ecc_inj_mode:1; /*!< bit: 31 ECC injection mode */
  523. #if __RISCV_XLEN == 64
  524. rv_csr_t _reserved3:__RISCV_XLEN-32; /*!< bit: 32..XLEN-1 Reserved 0 */
  525. #endif
  526. } b; /*!< Structure used for bit access */
  527. rv_csr_t d; /*!< Type used for csr data access */
  528. } CSR_MECCCODE_Type;
  529. typedef CSR_MECCCODE_Type CSR_MECC_CODE_Type;
  530. /**
  531. * \brief Union type to access MECC_CTL CSR register.
  532. */
  533. typedef union {
  534. struct {
  535. rv_csr_t ilm_fch_msk:1; /*!< bit: 0 Write 1 to disable aggregate ILM fetch ECC fatal error to safety_error output */
  536. rv_csr_t ilm_acc_msk:1; /*!< bit: 1 Write 1 to disable aggregate ILM load/store access ECC fatal error to safety_error output */
  537. rv_csr_t dlm_acc_msk:1; /*!< bit: 2 Write 1 to disable aggregate DLM access ECC fatal error to safety_error output */
  538. rv_csr_t ic_fch_msk:1; /*!< bit: 3 Write 1 to disable aggregate ICache fetch ECC fatal error to safety_error output */
  539. rv_csr_t dc_acc_msk:1; /*!< bit: 4 Write 1 to disable aggregate DCache access ECC fatal error to safety_error output */
  540. rv_csr_t ilm_ext_msk:1; /*!< bit: 5 Write 1 to disable aggregate ILM external access ECC fatal error to safety_error output */
  541. rv_csr_t dlm_ext_msk:1; /*!< bit: 6 Write 1 to disable aggregate DLM external access ECC fatal error to safety_error output */
  542. rv_csr_t ic_ccm_msk:1; /*!< bit: 7 Write 1 to disable aggregate ICache CCM ECC fatal error to safety_error output */
  543. rv_csr_t dc_ccm_msk:1; /*!< bit: 8 Write 1 to disable aggregate DCache CCM ECC fatal error to safety_error output */
  544. rv_csr_t dc_cpbk_msk:1; /*!< bit: 9 Write 1 to disable aggregate DCache CPBK ECC fatal error to safety_error output */
  545. rv_csr_t _reserved0:21; /*!< bit: 10..30 Reserved 0 */
  546. rv_csr_t io_prot_chk_en:1; /*!< bit: 31 Controls to check the IO interface */
  547. #if defined(__RISCV_XLEN) && __RISCV_XLEN == 64
  548. rv_csr_t _reserved1:__RISCV_XLEN-32; /*!< bit: 32..63 Reserved 0 */
  549. #endif
  550. } b; /*!< Structure used for bit access */
  551. rv_csr_t d; /*!< Type used for csr data access */
  552. } CSR_MECC_CTL_Type;
  553. /**
  554. * \brief Union type to access MECC_STATUS CSR register.
  555. */
  556. typedef union {
  557. struct {
  558. rv_csr_t ilm_fch_err:1; /*!< bit: 0 ILM fetch ECC fatal error has occurred */
  559. rv_csr_t ilm_acc_err:1; /*!< bit: 1 ILM load/store access ECC fatal error has occurred */
  560. rv_csr_t dlm_acc_err:1; /*!< bit: 2 DLM access ECC fatal error has occurred */
  561. rv_csr_t ic_fch_err:1; /*!< bit: 3 ICache fetch ECC fatal error has occurred */
  562. rv_csr_t dc_acc_err:1; /*!< bit: 4 DCache access ECC fatal error has occurred */
  563. rv_csr_t ilm_ext_err:1; /*!< bit: 5 ILM external access ECC fatal error has occurred */
  564. rv_csr_t dlm_ext_err:1; /*!< bit: 6 DLM external access ECC fatal error has occurred */
  565. rv_csr_t ic_ccm_err:1; /*!< bit: 7 ICache CCM ECC fatal error has occurred */
  566. rv_csr_t dc_ccm_err:1; /*!< bit: 8 DCache CCM ECC fatal error has occurred */
  567. rv_csr_t dc_cpbk_err:1; /*!< bit: 9 DCache CPBK ECC fatal error has occurred */
  568. rv_csr_t _reserved0:__RISCV_XLEN-10; /*!< bit: 10..XLEN-1 Reserved 0 */
  569. } b; /*!< Structure used for bit access */
  570. rv_csr_t d; /*!< Type used for csr data access */
  571. } CSR_MECC_STATUS_Type;
  572. /**
  573. * \brief Union type to access MIRGB_INFO CSR register.
  574. */
  575. typedef union {
  576. struct {
  577. rv_csr_t _reserved0:1; /*!< bit: 0 Reserved */
  578. rv_csr_t iregion_size:5; /*!< bit: 1..5 Indicates the size of IREGION and it should be power of 2 */
  579. rv_csr_t _reserved1:4; /*!< bit: 6..9 Reserved */
  580. rv_csr_t iregion_base:__RISCV_XLEN-10; /*!< bit: 10..PA_SIZE IREGION Base Address */
  581. } b; /*!< Structure used for bit access */
  582. rv_csr_t d; /*!< Type used for csr data access */
  583. } CSR_MIRGB_INFO_Type;
  584. /**
  585. * \brief Union type to access MSTACK_CTL CSR register.
  586. */
  587. typedef union {
  588. struct {
  589. rv_csr_t ovf_track_en:1; /*!< bit: 0 Stack overflow check or track enable */
  590. rv_csr_t udf_en:1; /*!< bit: 1 Stack underflow check enable */
  591. rv_csr_t mode:1; /*!< bit: 2 Mode of stack checking */
  592. rv_csr_t _reserved0:__RISCV_XLEN-3; /*!< bit: 3..XLEN-1 Reserved */
  593. } b; /*!< Structure used for bit access */
  594. rv_csr_t d; /*!< Type used for csr data access */
  595. } CSR_MSTACK_CTL_Type;
  596. /**
  597. * \brief Union type to access MTLB_CTL CSR register.
  598. */
  599. typedef union {
  600. struct {
  601. rv_csr_t tlb_ecc_en:1; /*!< bit: 0 MTLB ECC eanble */
  602. rv_csr_t tlb_ecc_excp_en:1; /*!< bit: 1 MTLB double bit ECC exception enable control */
  603. rv_csr_t tlb_tram_ecc_inj_en:1; /*!< bit: 2 Controls to inject the ECC Code in CSR mecc_code to MTLB tag rams */
  604. rv_csr_t tlb_dram_ecc_inj_en:1; /*!< bit: 3 Controls to inject the ECC Code in CSR mecc_code to MTLB data rams */
  605. rv_csr_t _reserved0:2; /*!< bit: 4..5 Reserved */
  606. rv_csr_t tlb_ecc_chk_en:1; /*!< bit: 6 Controls to check the ECC when core access to MTLB */
  607. rv_csr_t napot_en:1; /*!< bit: 7 NAPOT page enable */
  608. rv_csr_t _reserved1:__RISCV_XLEN-8; /*!< bit: 8..XLEN-1 Reserved */
  609. } b; /*!< Structure used for bit access */
  610. rv_csr_t d; /*!< Type used for csr data access */
  611. } CSR_MTLB_CTL_Type;
  612. /** @} */ /* End of Doxygen Group NMSIS_Core_Base_Registers */
  613. /* ########################### Core Function Access ########################### */
  614. /**
  615. * \defgroup NMSIS_Core_CSR_Register_Access Core CSR Register Access
  616. * \ingroup NMSIS_Core
  617. * \brief Functions to access the Core CSR Registers
  618. * \details
  619. *
  620. * The following functions or macros provide access to Core CSR registers.
  621. * - \ref NMSIS_Core_CSR_Encoding
  622. * - \ref NMSIS_Core_CSR_Registers
  623. * @{
  624. */
  625. #ifndef __ASSEMBLER__
  626. #ifndef __ICCRISCV__
  627. /**
  628. * \brief CSR operation Macro for csrrw instruction.
  629. * \details
  630. * Read the content of csr register to __v,
  631. * then write content of val into csr register, then return __v
  632. * \param csr CSR macro definition defined in
  633. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  634. * \param val value to store into the CSR register
  635. * \return the CSR register value before written
  636. */
  637. #define __RV_CSR_SWAP(csr, val) \
  638. ({ \
  639. rv_csr_t __v = (unsigned long)(val); \
  640. __ASM volatile("csrrw %0, " STRINGIFY(csr) ", %1" \
  641. : "=r"(__v) \
  642. : "rK"(__v) \
  643. : "memory"); \
  644. __v; \
  645. })
  646. /**
  647. * \brief CSR operation Macro for csrr instruction.
  648. * \details
  649. * Read the content of csr register to __v and return it
  650. * \param csr CSR macro definition defined in
  651. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  652. * \return the CSR register value
  653. */
  654. #define __RV_CSR_READ(csr) \
  655. ({ \
  656. rv_csr_t __v; \
  657. __ASM volatile("csrr %0, " STRINGIFY(csr) \
  658. : "=r"(__v) \
  659. : \
  660. : "memory"); \
  661. __v; \
  662. })
  663. /**
  664. * \brief CSR operation Macro for csrw instruction.
  665. * \details
  666. * Write the content of val to csr register
  667. * \param csr CSR macro definition defined in
  668. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  669. * \param val value to store into the CSR register
  670. */
  671. #define __RV_CSR_WRITE(csr, val) \
  672. ({ \
  673. rv_csr_t __v = (rv_csr_t)(val); \
  674. __ASM volatile("csrw " STRINGIFY(csr) ", %0" \
  675. : \
  676. : "rK"(__v) \
  677. : "memory"); \
  678. })
  679. /**
  680. * \brief CSR operation Macro for csrrs instruction.
  681. * \details
  682. * Read the content of csr register to __v,
  683. * then set csr register to be __v | val, then return __v
  684. * \param csr CSR macro definition defined in
  685. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  686. * \param val Mask value to be used wih csrrs instruction
  687. * \return the CSR register value before written
  688. */
  689. #define __RV_CSR_READ_SET(csr, val) \
  690. ({ \
  691. rv_csr_t __v = (rv_csr_t)(val); \
  692. __ASM volatile("csrrs %0, " STRINGIFY(csr) ", %1" \
  693. : "=r"(__v) \
  694. : "rK"(__v) \
  695. : "memory"); \
  696. __v; \
  697. })
  698. /**
  699. * \brief CSR operation Macro for csrs instruction.
  700. * \details
  701. * Set csr register to be csr_content | val
  702. * \param csr CSR macro definition defined in
  703. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  704. * \param val Mask value to be used wih csrs instruction
  705. */
  706. #define __RV_CSR_SET(csr, val) \
  707. ({ \
  708. rv_csr_t __v = (rv_csr_t)(val); \
  709. __ASM volatile("csrs " STRINGIFY(csr) ", %0" \
  710. : \
  711. : "rK"(__v) \
  712. : "memory"); \
  713. })
  714. /**
  715. * \brief CSR operation Macro for csrrc instruction.
  716. * \details
  717. * Read the content of csr register to __v,
  718. * then set csr register to be __v & ~val, then return __v
  719. * \param csr CSR macro definition defined in
  720. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  721. * \param val Mask value to be used wih csrrc instruction
  722. * \return the CSR register value before written
  723. */
  724. #define __RV_CSR_READ_CLEAR(csr, val) \
  725. ({ \
  726. rv_csr_t __v = (rv_csr_t)(val); \
  727. __ASM volatile("csrrc %0, " STRINGIFY(csr) ", %1" \
  728. : "=r"(__v) \
  729. : "rK"(__v) \
  730. : "memory"); \
  731. __v; \
  732. })
  733. /**
  734. * \brief CSR operation Macro for csrc instruction.
  735. * \details
  736. * Set csr register to be csr_content & ~val
  737. * \param csr CSR macro definition defined in
  738. * \ref NMSIS_Core_CSR_Registers, eg. \ref CSR_MSTATUS
  739. * \param val Mask value to be used wih csrc instruction
  740. */
  741. #define __RV_CSR_CLEAR(csr, val) \
  742. ({ \
  743. rv_csr_t __v = (rv_csr_t)(val); \
  744. __ASM volatile("csrc " STRINGIFY(csr) ", %0" \
  745. : \
  746. : "rK"(__v) \
  747. : "memory"); \
  748. })
  749. #else
  750. #include <intrinsics.h>
  751. #define __RV_CSR_SWAP __write_csr
  752. #define __RV_CSR_READ __read_csr
  753. #define __RV_CSR_WRITE __write_csr
  754. #define __RV_CSR_READ_SET __set_bits_csr
  755. #define __RV_CSR_SET __set_bits_csr
  756. #define __RV_CSR_READ_CLEAR __clear_bits_csr
  757. #define __RV_CSR_CLEAR __clear_bits_csr
  758. #endif /* __ICCRISCV__ */
  759. #endif /* __ASSEMBLER__ */
  760. /**
  761. * \brief Execute fence instruction, p -> pred, s -> succ
  762. * \details
  763. * the FENCE instruction ensures that all memory accesses from instructions preceding
  764. * the fence in program order (the `predecessor set`) appear earlier in the global memory order than
  765. * memory accesses from instructions appearing after the fence in program order (the `successor set`).
  766. * For details, please refer to The RISC-V Instruction Set Manual
  767. * \param p predecessor set, such as iorw, rw, r, w
  768. * \param s successor set, such as iorw, rw, r, w
  769. **/
  770. #define __FENCE(p, s) __ASM volatile ("fence " #p "," #s : : : "memory")
  771. /**
  772. * \brief Fence.i Instruction
  773. * \details
  774. * The FENCE.I instruction is used to synchronize the instruction
  775. * and data streams.
  776. */
  777. __STATIC_FORCEINLINE void __FENCE_I(void)
  778. {
  779. #if defined(CPU_SERIES) && CPU_SERIES == 100
  780. #else
  781. __ASM volatile("fence.i");
  782. #endif
  783. }
  784. /** \brief Read & Write Memory barrier */
  785. #define __RWMB() __FENCE(iorw,iorw)
  786. /** \brief Read Memory barrier */
  787. #define __RMB() __FENCE(ir,ir)
  788. /** \brief Write Memory barrier */
  789. #define __WMB() __FENCE(ow,ow)
  790. /** \brief SMP Read & Write Memory barrier */
  791. #define __SMP_RWMB() __FENCE(rw,rw)
  792. /** \brief SMP Read Memory barrier */
  793. #define __SMP_RMB() __FENCE(r,r)
  794. /** \brief SMP Write Memory barrier */
  795. #define __SMP_WMB() __FENCE(w,w)
  796. /** \brief CPU relax for busy loop */
  797. #define __CPU_RELAX() __ASM volatile ("" : : : "memory")
  798. /**
  799. * \brief switch privilege from machine mode to others.
  800. * \details
  801. * Execute into \ref entry_point in \ref mode(supervisor or user) with given stack
  802. * \param mode privilege mode
  803. * \param stack predefined stack, size should set enough
  804. * \param entry_point a function pointer to execute
  805. */
  806. __STATIC_INLINE void __switch_mode(uint8_t mode, uintptr_t stack, void(*entry_point)(void))
  807. {
  808. unsigned long val = 0;
  809. /* Set MPP to the requested privilege mode */
  810. val = __RV_CSR_READ(CSR_MSTATUS);
  811. val = __RV_INSERT_FIELD(val, MSTATUS_MPP, mode);
  812. /* Set previous MIE disabled */
  813. val = __RV_INSERT_FIELD(val, MSTATUS_MPIE, 0);
  814. __RV_CSR_WRITE(CSR_MSTATUS, val);
  815. /* Set the entry point in MEPC */
  816. __RV_CSR_WRITE(CSR_MEPC, (unsigned long)entry_point);
  817. /* Set the register file */
  818. __ASM volatile("mv sp, %0" ::"r"(stack));
  819. __ASM volatile("mret");
  820. }
  821. /**
  822. * \brief Enable IRQ Interrupts
  823. * \details Enables IRQ interrupts by setting the MIE-bit in the MSTATUS Register.
  824. * \remarks
  825. * Can only be executed in Privileged modes.
  826. */
  827. __STATIC_FORCEINLINE void __enable_irq(void)
  828. {
  829. __RV_CSR_SET(CSR_MSTATUS, MSTATUS_MIE);
  830. }
  831. /**
  832. * \brief Disable IRQ Interrupts
  833. * \details Disables IRQ interrupts by clearing the MIE-bit in the MSTATUS Register.
  834. * \remarks
  835. * Can only be executed in Privileged modes.
  836. */
  837. __STATIC_FORCEINLINE void __disable_irq(void)
  838. {
  839. __RV_CSR_CLEAR(CSR_MSTATUS, MSTATUS_MIE);
  840. }
  841. /**
  842. * \brief Enable External IRQ Interrupts
  843. * \details Enables External IRQ interrupts by setting the MEIE-bit in the MIE Register.
  844. * \remarks
  845. * Can only be executed in Privileged modes, available for plic interrupt mode.
  846. */
  847. __STATIC_FORCEINLINE void __enable_ext_irq(void)
  848. {
  849. __RV_CSR_SET(CSR_MIE, MIE_MEIE);
  850. }
  851. /**
  852. * \brief Disable External IRQ Interrupts
  853. * \details Disables External IRQ interrupts by clearing the MEIE-bit in the MIE Register.
  854. * \remarks
  855. * Can only be executed in Privileged modes, available for plic interrupt mode.
  856. */
  857. __STATIC_FORCEINLINE void __disable_ext_irq(void)
  858. {
  859. __RV_CSR_CLEAR(CSR_MIE, MIE_MEIE);
  860. }
  861. /**
  862. * \brief Enable Timer IRQ Interrupts
  863. * \details Enables Timer IRQ interrupts by setting the MTIE-bit in the MIE Register.
  864. * \remarks
  865. * Can only be executed in Privileged modes, available for plic interrupt mode.
  866. */
  867. __STATIC_FORCEINLINE void __enable_timer_irq(void)
  868. {
  869. __RV_CSR_SET(CSR_MIE, MIE_MTIE);
  870. }
  871. /**
  872. * \brief Disable Timer IRQ Interrupts
  873. * \details Disables Timer IRQ interrupts by clearing the MTIE-bit in the MIE Register.
  874. * \remarks
  875. * Can only be executed in Privileged modes, available for plic interrupt mode.
  876. */
  877. __STATIC_FORCEINLINE void __disable_timer_irq(void)
  878. {
  879. __RV_CSR_CLEAR(CSR_MIE, MIE_MTIE);
  880. }
  881. /**
  882. * \brief Enable software IRQ Interrupts
  883. * \details Enables software IRQ interrupts by setting the MSIE-bit in the MIE Register.
  884. * \remarks
  885. * Can only be executed in Privileged modes, available for plic interrupt mode.
  886. */
  887. __STATIC_FORCEINLINE void __enable_sw_irq(void)
  888. {
  889. __RV_CSR_SET(CSR_MIE, MIE_MSIE);
  890. }
  891. /**
  892. * \brief Disable software IRQ Interrupts
  893. * \details Disables software IRQ interrupts by clearing the MSIE-bit in the MIE Register.
  894. * \remarks
  895. * Can only be executed in Privileged modes, available for plic interrupt mode.
  896. */
  897. __STATIC_FORCEINLINE void __disable_sw_irq(void)
  898. {
  899. __RV_CSR_CLEAR(CSR_MIE, MIE_MSIE);
  900. }
  901. /**
  902. * \brief Disable Core IRQ Interrupt
  903. * \details Disable Core IRQ interrupt by clearing the irq bit in the MIE Register.
  904. * \remarks
  905. * Can only be executed in Privileged modes, available for plic interrupt mode.
  906. */
  907. __STATIC_FORCEINLINE void __disable_core_irq(uint32_t irq)
  908. {
  909. __RV_CSR_CLEAR(CSR_MIE, 1UL << irq);
  910. }
  911. /**
  912. * \brief Enable Core IRQ Interrupt
  913. * \details Enable Core IRQ interrupt by setting the irq bit in the MIE Register.
  914. * \remarks
  915. * Can only be executed in Privileged modes, available for plic interrupt mode.
  916. */
  917. __STATIC_FORCEINLINE void __enable_core_irq(uint32_t irq)
  918. {
  919. __RV_CSR_SET(CSR_MIE, 1UL << irq);
  920. }
  921. /**
  922. * \brief Get Core IRQ Interrupt Pending status
  923. * \details Get Core IRQ interrupt pending status of irq bit.
  924. * \remarks
  925. * Can only be executed in Privileged modes, available for plic interrupt mode.
  926. */
  927. __STATIC_FORCEINLINE uint32_t __get_core_irq_pending(uint32_t irq)
  928. {
  929. return ((__RV_CSR_READ(CSR_MIP) >> irq) & 0x1);
  930. }
  931. /**
  932. * \brief Clear Core IRQ Interrupt Pending status
  933. * \details Clear Core IRQ interrupt pending status of irq bit.
  934. * \remarks
  935. * Can only be executed in Privileged modes, available for plic interrupt mode.
  936. */
  937. __STATIC_FORCEINLINE void __clear_core_irq_pending(uint32_t irq)
  938. {
  939. __RV_CSR_CLEAR(CSR_MIP, 1UL << irq);
  940. }
  941. /**
  942. * \brief Enable IRQ Interrupts in supervisor mode
  943. * \details Enables IRQ interrupts by setting the SIE-bit in the SSTATUS Register.
  944. * \remarks
  945. * Can only be executed in Privileged modes.
  946. */
  947. __STATIC_FORCEINLINE void __enable_irq_s(void)
  948. {
  949. __RV_CSR_SET(CSR_SSTATUS, SSTATUS_SIE);
  950. }
  951. /**
  952. * \brief Disable IRQ Interrupts in supervisor mode
  953. * \details Disables IRQ interrupts by clearing the SIE-bit in the SSTATUS Register.
  954. * \remarks
  955. * Can only be executed in Privileged modes.
  956. */
  957. __STATIC_FORCEINLINE void __disable_irq_s(void)
  958. {
  959. __RV_CSR_CLEAR(CSR_SSTATUS, SSTATUS_SIE);
  960. }
  961. /**
  962. * \brief Enable External IRQ Interrupts in supervisor mode
  963. * \details Enables External IRQ interrupts by setting the SEIE-bit in the SIE Register.
  964. * \remarks
  965. * Can only be executed in Privileged modes, available for plic interrupt mode.
  966. */
  967. __STATIC_FORCEINLINE void __enable_ext_irq_s(void)
  968. {
  969. __RV_CSR_SET(CSR_SIE, SIE_SEIE);
  970. }
  971. /**
  972. * \brief Disable External IRQ Interrupts in supervisor mode
  973. * \details Disables External IRQ interrupts by clearing the SEIE-bit in the SIE Register.
  974. * \remarks
  975. * Can only be executed in Privileged modes, available for plic interrupt mode.
  976. */
  977. __STATIC_FORCEINLINE void __disable_ext_irq_s(void)
  978. {
  979. __RV_CSR_CLEAR(CSR_SIE, SIE_SEIE);
  980. }
  981. /**
  982. * \brief Enable Timer IRQ Interrupts in supervisor mode
  983. * \details Enables Timer IRQ interrupts by setting the STIE-bit in the SIE Register.
  984. * \remarks
  985. * Can only be executed in Privileged modes, available for plic interrupt mode.
  986. */
  987. __STATIC_FORCEINLINE void __enable_timer_irq_s(void)
  988. {
  989. __RV_CSR_SET(CSR_SIE, SIE_STIE);
  990. }
  991. /**
  992. * \brief Disable Timer IRQ Interrupts in supervisor mode
  993. * \details Disables Timer IRQ interrupts by clearing the STIE-bit in the SIE Register.
  994. * \remarks
  995. * Can only be executed in Privileged modes, available for plic interrupt mode.
  996. */
  997. __STATIC_FORCEINLINE void __disable_timer_irq_s(void)
  998. {
  999. __RV_CSR_CLEAR(CSR_SIE, SIE_STIE);
  1000. }
  1001. /**
  1002. * \brief Enable software IRQ Interrupts in supervisor mode
  1003. * \details Enables software IRQ interrupts by setting the SSIE-bit in the SIE Register.
  1004. * \remarks
  1005. * Can only be executed in Privileged modes, available for plic interrupt mode.
  1006. */
  1007. __STATIC_FORCEINLINE void __enable_sw_irq_s(void)
  1008. {
  1009. __RV_CSR_SET(CSR_SIE, SIE_SSIE);
  1010. }
  1011. /**
  1012. * \brief Disable software IRQ Interrupts in supervisor mode
  1013. * \details Disables software IRQ interrupts by clearing the SSIE-bit in the SIE Register.
  1014. * \remarks
  1015. * Can only be executed in Privileged modes, available for plic interrupt mode.
  1016. */
  1017. __STATIC_FORCEINLINE void __disable_sw_irq_s(void)
  1018. {
  1019. __RV_CSR_CLEAR(CSR_SIE, SIE_SSIE);
  1020. }
  1021. /**
  1022. * \brief Disable Core IRQ Interrupt in supervisor mode
  1023. * \details Disable Core IRQ interrupt by clearing the irq bit in the SIE Register.
  1024. * \remarks
  1025. * Can only be executed in Privileged modes, available for plic interrupt mode.
  1026. */
  1027. __STATIC_FORCEINLINE void __disable_core_irq_s(uint32_t irq)
  1028. {
  1029. __RV_CSR_CLEAR(CSR_SIE, 1UL << irq);
  1030. }
  1031. /**
  1032. * \brief Enable Core IRQ Interrupt in supervisor mode
  1033. * \details Enable Core IRQ interrupt by setting the irq bit in the MIE Register.
  1034. * \remarks
  1035. * Can only be executed in Privileged modes, available for plic interrupt mode.
  1036. */
  1037. __STATIC_FORCEINLINE void __enable_core_irq_s(uint32_t irq)
  1038. {
  1039. __RV_CSR_SET(CSR_SIE, 1UL << irq);
  1040. }
  1041. /**
  1042. * \brief Get Core IRQ Interrupt Pending status in supervisor mode
  1043. * \details Get Core IRQ interrupt pending status of irq bit.
  1044. * \remarks
  1045. * Can only be executed in Privileged modes, available for plic interrupt mode.
  1046. */
  1047. __STATIC_FORCEINLINE uint32_t __get_core_irq_pending_s(uint32_t irq)
  1048. {
  1049. return ((__RV_CSR_READ(CSR_SIP) >> irq) & 0x1);
  1050. }
  1051. /**
  1052. * \brief Clear Core IRQ Interrupt Pending status in supervisor mode
  1053. * \details Clear Core IRQ interrupt pending status of irq bit.
  1054. * \remarks
  1055. * Can only be executed in Privileged modes, available for plic interrupt mode.
  1056. */
  1057. __STATIC_FORCEINLINE void __clear_core_irq_pending_s(uint32_t irq)
  1058. {
  1059. __RV_CSR_CLEAR(CSR_SIP, 1UL << irq);
  1060. }
  1061. /**
  1062. * \brief Read whole 64 bits value of mcycle counter
  1063. * \details This function will read the whole 64 bits of MCYCLE register
  1064. * \return The whole 64 bits value of MCYCLE
  1065. * \remarks It will work for both RV32 and RV64 to get full 64bits value of MCYCLE
  1066. */
  1067. __STATIC_INLINE rv_counter_t __get_rv_cycle(void)
  1068. {
  1069. __RWMB(); // Make sure previous memory and io operation finished
  1070. #if __RISCV_XLEN == 32
  1071. #if defined(CPU_SERIES) && CPU_SERIES == 100
  1072. return __RV_CSR_READ(CSR_MCYCLE);
  1073. #else
  1074. volatile uint32_t high0, low, high;
  1075. uint64_t full;
  1076. high0 = __RV_CSR_READ(CSR_MCYCLEH);
  1077. low = __RV_CSR_READ(CSR_MCYCLE);
  1078. high = __RV_CSR_READ(CSR_MCYCLEH);
  1079. if (high0 != high) {
  1080. low = __RV_CSR_READ(CSR_MCYCLE);
  1081. }
  1082. full = (((uint64_t)high) << 32) | low;
  1083. return full;
  1084. #endif
  1085. #elif __RISCV_XLEN == 64
  1086. return (uint64_t)__RV_CSR_READ(CSR_MCYCLE);
  1087. #else // TODO Need cover for XLEN=128 case in future
  1088. return (uint64_t)__RV_CSR_READ(CSR_MCYCLE);
  1089. #endif
  1090. }
  1091. /**
  1092. * \brief Set whole 64 bits value of mcycle counter
  1093. * \details This function will set the whole 64 bits of MCYCLE register
  1094. * \remarks It will work for both RV32 and RV64 to set full 64bits value of MCYCLE
  1095. */
  1096. __STATIC_FORCEINLINE void __set_rv_cycle(rv_counter_t cycle)
  1097. {
  1098. #if __RISCV_XLEN == 32
  1099. #if defined(CPU_SERIES) && CPU_SERIES == 100
  1100. __RV_CSR_WRITE(CSR_MCYCLE, (uint32_t)(cycle));
  1101. #else
  1102. __RV_CSR_WRITE(CSR_MCYCLE, 0); // prevent carry
  1103. __RV_CSR_WRITE(CSR_MCYCLEH, (uint32_t)(cycle >> 32));
  1104. __RV_CSR_WRITE(CSR_MCYCLE, (uint32_t)(cycle));
  1105. #endif
  1106. #elif __RISCV_XLEN == 64
  1107. __RV_CSR_WRITE(CSR_MCYCLE, cycle);
  1108. #else // TODO Need cover for XLEN=128 case in future
  1109. #endif
  1110. }
  1111. /**
  1112. * \brief Read whole 64 bits value of machine instruction-retired counter
  1113. * \details This function will read the whole 64 bits of MINSTRET register
  1114. * \return The whole 64 bits value of MINSTRET
  1115. * \remarks It will work for both RV32 and RV64 to get full 64bits value of MINSTRET
  1116. */
  1117. __STATIC_INLINE rv_counter_t __get_rv_instret(void)
  1118. {
  1119. __RWMB(); // Make sure previous memory and io operation finished
  1120. #if __RISCV_XLEN == 32
  1121. #if defined(CPU_SERIES) && CPU_SERIES == 100
  1122. return __RV_CSR_READ(CSR_MINSTRET);
  1123. #else
  1124. volatile uint32_t high0, low, high;
  1125. uint64_t full;
  1126. high0 = __RV_CSR_READ(CSR_MINSTRETH);
  1127. low = __RV_CSR_READ(CSR_MINSTRET);
  1128. high = __RV_CSR_READ(CSR_MINSTRETH);
  1129. if (high0 != high) {
  1130. low = __RV_CSR_READ(CSR_MINSTRET);
  1131. }
  1132. full = (((uint64_t)high) << 32) | low;
  1133. return full;
  1134. #endif
  1135. #elif __RISCV_XLEN == 64
  1136. return (uint64_t)__RV_CSR_READ(CSR_MINSTRET);
  1137. #else // TODO Need cover for XLEN=128 case in future
  1138. return (uint64_t)__RV_CSR_READ(CSR_MINSTRET);
  1139. #endif
  1140. }
  1141. /**
  1142. * \brief Set whole 64 bits value of machine instruction-retired counter
  1143. * \details This function will set the whole 64 bits of MINSTRET register
  1144. * \remarks It will work for both RV32 and RV64 to set full 64bits value of MINSTRET
  1145. */
  1146. __STATIC_FORCEINLINE void __set_rv_instret(rv_counter_t instret)
  1147. {
  1148. #if __RISCV_XLEN == 32
  1149. #if defined(CPU_SERIES) && CPU_SERIES == 100
  1150. __RV_CSR_WRITE(CSR_MINSTRET, (uint32_t)(instret));
  1151. #else
  1152. __RV_CSR_WRITE(CSR_MINSTRET, 0); // prevent carry
  1153. __RV_CSR_WRITE(CSR_MINSTRETH, (uint32_t)(instret >> 32));
  1154. __RV_CSR_WRITE(CSR_MINSTRET, (uint32_t)(instret));
  1155. #endif
  1156. #elif __RISCV_XLEN == 64
  1157. __RV_CSR_WRITE(CSR_MINSTRET, instret);
  1158. #else // TODO Need cover for XLEN=128 case in future
  1159. #endif
  1160. }
  1161. /**
  1162. * \brief Read whole 64 bits value of real-time clock
  1163. * \details This function will read the whole 64 bits of TIME register
  1164. * \return The whole 64 bits value of TIME CSR
  1165. * \remarks It will work for both RV32 and RV64 to get full 64bits value of TIME
  1166. * \attention only available when user mode available
  1167. */
  1168. __STATIC_INLINE rv_counter_t __get_rv_time(void)
  1169. {
  1170. __RWMB(); // Make sure previous memory and io operation finished
  1171. #if __RISCV_XLEN == 32
  1172. #if defined(CPU_SERIES) && CPU_SERIES == 100
  1173. // NOTE: when CSR_MIRGB_INFO CSR exist and not zero, it means eclic and systimer present
  1174. if (__RV_CSR_READ(CSR_MIRGB_INFO) == 0) {
  1175. return __RV_CSR_READ(CSR_MTIME);
  1176. }
  1177. #if defined(__SYSTIMER_PRESENT) && (__SYSTIMER_PRESENT == 1)
  1178. return *(uint32_t *) (__SYSTIMER_BASEADDR);
  1179. #else
  1180. return 0;
  1181. #endif
  1182. #else
  1183. volatile uint32_t high0, low, high;
  1184. uint64_t full;
  1185. high0 = __RV_CSR_READ(CSR_TIMEH);
  1186. low = __RV_CSR_READ(CSR_TIME);
  1187. high = __RV_CSR_READ(CSR_TIMEH);
  1188. if (high0 != high) {
  1189. low = __RV_CSR_READ(CSR_TIME);
  1190. }
  1191. full = (((uint64_t)high) << 32) | low;
  1192. return full;
  1193. #endif
  1194. #elif __RISCV_XLEN == 64
  1195. return (uint64_t)__RV_CSR_READ(CSR_TIME);
  1196. #else // TODO Need cover for XLEN=128 case in future
  1197. return (uint64_t)__RV_CSR_READ(CSR_TIME);
  1198. #endif
  1199. }
  1200. /**
  1201. * \brief Read the CYCLE register
  1202. * \details This function will read the CYCLE register without taking the
  1203. * CYCLEH register into account
  1204. * \return 32 bits value when XLEN=32
  1205. * 64 bits value when XLEN=64
  1206. * TODO: XLEN=128 need to be supported
  1207. */
  1208. __STATIC_FORCEINLINE unsigned long __read_cycle_csr(void)
  1209. {
  1210. __RWMB(); // Make sure previous memory and io operation finished
  1211. return __RV_CSR_READ(CSR_CYCLE);
  1212. }
  1213. /**
  1214. * \brief Read the INSTRET register
  1215. * \details This function will read the INSTRET register without taking the
  1216. * INSTRETH register into account
  1217. * \return 32 bits value when XLEN=32
  1218. * 64 bits value when XLEN=64
  1219. * TODO: XLEN=128 need to be supported
  1220. */
  1221. __STATIC_FORCEINLINE unsigned long __read_instret_csr(void)
  1222. {
  1223. __RWMB(); // Make sure previous memory and io operation finished
  1224. return __RV_CSR_READ(CSR_INSTRET);
  1225. }
  1226. /**
  1227. * \brief Read the TIME register
  1228. * \details This function will read the TIME register without taking the
  1229. * TIMEH register into account
  1230. * \return 32 bits value when XLEN=32
  1231. * 64 bits value when XLEN=64
  1232. * TODO: XLEN=128 need to be supported
  1233. */
  1234. __STATIC_FORCEINLINE unsigned long __read_time_csr(void)
  1235. {
  1236. __RWMB(); // Make sure previous memory and io operation finished
  1237. return __RV_CSR_READ(CSR_TIME);
  1238. }
  1239. /**
  1240. * \brief Get cluster id of current cluster
  1241. * \details This function will get cluster id of current cluster in a multiple cluster system
  1242. * \return The cluster id of current cluster
  1243. * \remarks mhartid bit 15-8 is designed for cluster id in nuclei subsystem reference design
  1244. * \attention function is allowed in machine mode only
  1245. */
  1246. __STATIC_FORCEINLINE unsigned long __get_cluster_id(void)
  1247. {
  1248. unsigned long id;
  1249. id = (__RV_CSR_READ(CSR_MHARTID) >> 8) & 0xFF;
  1250. return id;
  1251. }
  1252. /**
  1253. * \brief Get hart index of current cluster
  1254. * \details This function will get hart index of current cluster in a multiple cluster system,
  1255. * hart index is hartid - hartid offset, for example if your hartid is 1, and offset is 1, then
  1256. * hart index is 0
  1257. * \return The hart index of current cluster
  1258. * \attention function is allowed in machine mode only
  1259. */
  1260. __STATIC_FORCEINLINE unsigned long __get_hart_index(void)
  1261. {
  1262. unsigned long id;
  1263. #ifdef __HARTID_OFFSET
  1264. id = __RV_CSR_READ(CSR_MHARTID) - __HARTID_OFFSET;
  1265. #else
  1266. id = __RV_CSR_READ(CSR_MHARTID);
  1267. #endif
  1268. return id;
  1269. }
  1270. /**
  1271. * \brief Get hart id of current cluster
  1272. * \details This function will get hart id of current cluster in a multiple cluster system
  1273. * \return The hart id of current cluster
  1274. * \remarks it will return full hartid not part of it for reference subsystem design,
  1275. * if your reference subsystem design has hartid offset, please define __HARTID_OFFSET in
  1276. * <Device>.h
  1277. * \attention function is allowed in machine mode only
  1278. */
  1279. __STATIC_FORCEINLINE unsigned long __get_hart_id(void)
  1280. {
  1281. unsigned long id;
  1282. id = __RV_CSR_READ(CSR_MHARTID);
  1283. return id;
  1284. }
  1285. /**
  1286. * \brief Get cluster id of current cluster in supervisor mode
  1287. * \details This function will get cluster id of current cluster in a multiple cluster system
  1288. * \return The cluster id of current cluster
  1289. * \remarks hartid bit 15-8 is designed for cluster id in nuclei subsystem reference design
  1290. * \attention function is allowed in machine/supervisor mode,
  1291. * currently only present in 600/900 series from 2024 released version
  1292. */
  1293. __STATIC_FORCEINLINE unsigned long __get_cluster_id_s(void)
  1294. {
  1295. unsigned long id;
  1296. id = (__RV_CSR_READ(CSR_SHARTID) >> 8) & 0xFF;
  1297. return id;
  1298. }
  1299. /**
  1300. * \brief Get hart index of current cluster in supervisor mode
  1301. * \details This function will get hart index of current cluster in a multiple cluster system,
  1302. * hart index is hartid - hartid offset, for example if your hartid is 1, and offset is 1, then
  1303. * hart index is 0
  1304. * \return The hart index of current cluster
  1305. * \attention function is allowed in machine/supervisor mode,
  1306. * currently only present in 600/900 series from 2024 released version
  1307. */
  1308. __STATIC_FORCEINLINE unsigned long __get_hart_index_s(void)
  1309. {
  1310. unsigned long id;
  1311. #ifdef __HARTID_OFFSET
  1312. id = __RV_CSR_READ(CSR_SHARTID) - __HARTID_OFFSET;
  1313. #else
  1314. id = __RV_CSR_READ(CSR_SHARTID);
  1315. #endif
  1316. return id;
  1317. }
  1318. /**
  1319. * \brief Get hart id of current cluster in supervisor mode
  1320. * \details This function will get hart id of current cluster in a multiple cluster system
  1321. * \return The hart id of current cluster
  1322. * \remarks it will return full hartid not part of it for reference subsystem design,
  1323. * if your reference subsystem design has hartid offset, please define __HARTID_OFFSET in
  1324. * <Device>.h
  1325. * \attention function is allowed in machine/supervisor mode,
  1326. * currently only present in 600/900 series from 2024 released version
  1327. */
  1328. __STATIC_FORCEINLINE unsigned long __get_hart_id_s(void)
  1329. {
  1330. unsigned long id;
  1331. id = __RV_CSR_READ(CSR_SHARTID);
  1332. return id;
  1333. }
  1334. /** @} */ /* End of Doxygen Group NMSIS_Core_CSR_Register_Access */
  1335. /* ########################### CPU Intrinsic Functions ########################### */
  1336. /**
  1337. * \defgroup NMSIS_Core_CPU_Intrinsic Intrinsic Functions for CPU Intructions
  1338. * \ingroup NMSIS_Core
  1339. * \brief Functions that generate RISC-V CPU instructions.
  1340. * \details
  1341. *
  1342. * The following functions generate specified RISC-V instructions that cannot be directly accessed by compiler.
  1343. * @{
  1344. */
  1345. /**
  1346. * \brief NOP Instruction
  1347. * \details
  1348. * No Operation does nothing.
  1349. * This instruction can be used for code alignment purposes.
  1350. */
  1351. __STATIC_FORCEINLINE void __NOP(void)
  1352. {
  1353. __ASM volatile("nop");
  1354. }
  1355. /**
  1356. * \brief Wait For Interrupt
  1357. * \details
  1358. * Wait For Interrupt is is executed using CSR_WFE.WFE=0 and WFI instruction.
  1359. * It will suspends execution until interrupt, NMI or Debug happened.
  1360. * When Core is waked up by interrupt, if
  1361. * 1. mstatus.MIE == 1(interrupt enabled), Core will enter ISR code
  1362. * 2. mstatus.MIE == 0(interrupt disabled), Core will resume previous execution
  1363. */
  1364. __STATIC_FORCEINLINE void __WFI(void)
  1365. {
  1366. __RV_CSR_CLEAR(CSR_WFE, WFE_WFE);
  1367. __ASM volatile("wfi");
  1368. }
  1369. /**
  1370. * \brief Wait For Event
  1371. * \details
  1372. * Wait For Event is executed using CSR_WFE.WFE=1 and WFI instruction.
  1373. * It will suspends execution until event, NMI or Debug happened.
  1374. * When Core is waked up, Core will resume previous execution
  1375. */
  1376. __STATIC_FORCEINLINE void __WFE(void)
  1377. {
  1378. __RV_CSR_SET(CSR_WFE, WFE_WFE);
  1379. __ASM volatile("wfi");
  1380. __RV_CSR_CLEAR(CSR_WFE, WFE_WFE);
  1381. }
  1382. /**
  1383. * \brief Breakpoint Instruction
  1384. * \details
  1385. * Causes the processor to enter Debug state.
  1386. * Debug tools can use this to investigate system state
  1387. * when the instruction at a particular address is reached.
  1388. */
  1389. __STATIC_FORCEINLINE void __EBREAK(void)
  1390. {
  1391. __ASM volatile("ebreak");
  1392. }
  1393. /**
  1394. * \brief Environment Call Instruction
  1395. * \details
  1396. * The ECALL instruction is used to make a service request to
  1397. * the execution environment.
  1398. */
  1399. __STATIC_FORCEINLINE void __ECALL(void)
  1400. {
  1401. __ASM volatile("ecall");
  1402. }
  1403. /**
  1404. * \brief WFI Sleep Mode enumeration
  1405. */
  1406. typedef enum WFI_SleepMode {
  1407. WFI_SHALLOW_SLEEP = 0, /*!< Shallow sleep mode, the core_clk will poweroff */
  1408. WFI_DEEP_SLEEP = 1 /*!< Deep sleep mode, the core_clk and core_ano_clk will poweroff */
  1409. } WFI_SleepMode_Type;
  1410. /**
  1411. * \brief Set Sleep mode of WFI
  1412. * \details
  1413. * Set the SLEEPVALUE CSR register to control the
  1414. * WFI Sleep mode.
  1415. * \param[in] mode The sleep mode to be set
  1416. */
  1417. __STATIC_FORCEINLINE void __set_wfi_sleepmode(WFI_SleepMode_Type mode)
  1418. {
  1419. __RV_CSR_WRITE(CSR_SLEEPVALUE, mode);
  1420. }
  1421. /**
  1422. * \brief Send TX Event
  1423. * \details
  1424. * Set the CSR TXEVT to control send a TX Event.
  1425. * The Core will output signal tx_evt as output event signal.
  1426. */
  1427. __STATIC_FORCEINLINE void __TXEVT(void)
  1428. {
  1429. __RV_CSR_SET(CSR_TXEVT, 0x1);
  1430. }
  1431. /**
  1432. * \brief Enable MCYCLE counter
  1433. * \details
  1434. * Clear the CY bit of MCOUNTINHIBIT to 0 to enable MCYCLE Counter
  1435. */
  1436. __STATIC_FORCEINLINE void __enable_mcycle_counter(void)
  1437. {
  1438. __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_CY);
  1439. }
  1440. /**
  1441. * \brief Disable MCYCLE counter
  1442. * \details
  1443. * Set the CY bit of MCOUNTINHIBIT to 1 to disable MCYCLE Counter
  1444. */
  1445. __STATIC_FORCEINLINE void __disable_mcycle_counter(void)
  1446. {
  1447. __RV_CSR_SET(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_CY);
  1448. }
  1449. /**
  1450. * \brief Enable MINSTRET counter
  1451. * \details
  1452. * Clear the IR bit of MCOUNTINHIBIT to 0 to enable MINSTRET Counter
  1453. */
  1454. __STATIC_FORCEINLINE void __enable_minstret_counter(void)
  1455. {
  1456. __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_IR);
  1457. }
  1458. /**
  1459. * \brief Disable MINSTRET counter
  1460. * \details
  1461. * Set the IR bit of MCOUNTINHIBIT to 1 to disable MINSTRET Counter
  1462. */
  1463. __STATIC_FORCEINLINE void __disable_minstret_counter(void)
  1464. {
  1465. __RV_CSR_SET(CSR_MCOUNTINHIBIT, MCOUNTINHIBIT_IR);
  1466. }
  1467. /**
  1468. * \brief Enable selected hardware performance monitor counter
  1469. * \param [in] idx the index of the hardware performance monitor counter
  1470. * \details
  1471. * enable selected hardware performance monitor counter mhpmcounterx.
  1472. */
  1473. __STATIC_FORCEINLINE void __enable_mhpm_counter(unsigned long idx)
  1474. {
  1475. __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, (1UL << idx));
  1476. }
  1477. /**
  1478. * \brief Disable selected hardware performance monitor counter
  1479. * \param [in] idx the index of the hardware performance monitor counter
  1480. * \details
  1481. * Disable selected hardware performance monitor counter mhpmcounterx.
  1482. */
  1483. __STATIC_FORCEINLINE void __disable_mhpm_counter(unsigned long idx)
  1484. {
  1485. __RV_CSR_SET(CSR_MCOUNTINHIBIT, (1UL << idx));
  1486. }
  1487. /**
  1488. * \brief Enable hardware performance counters with mask
  1489. * \param [in] mask mask of selected hardware performance monitor counters
  1490. * \details
  1491. * enable mhpmcounterx with mask, only the masked ones will be enabled.
  1492. * mhpmcounter3-mhpmcount31 are for high performance monitor counters.
  1493. */
  1494. __STATIC_FORCEINLINE void __enable_mhpm_counters(unsigned long mask)
  1495. {
  1496. __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, mask);
  1497. }
  1498. /**
  1499. * \brief Disable hardware performance counters with mask
  1500. * \param [in] mask mask of selected hardware performance monitor counters
  1501. * \details
  1502. * Disable mhpmcounterx with mask, only the masked ones will be disabled.
  1503. * mhpmcounter3-mhpmcount31 are for high performance monitor counters.
  1504. */
  1505. __STATIC_FORCEINLINE void __disable_mhpm_counters(unsigned long mask)
  1506. {
  1507. __RV_CSR_SET(CSR_MCOUNTINHIBIT, mask);
  1508. }
  1509. /**
  1510. * \brief Enable all MCYCLE & MINSTRET & MHPMCOUNTER counter
  1511. * \details
  1512. * Clear all to zero to enable all counters,
  1513. * such as cycle, instret, high performance monitor counters
  1514. */
  1515. __STATIC_FORCEINLINE void __enable_all_counter(void)
  1516. {
  1517. __RV_CSR_CLEAR(CSR_MCOUNTINHIBIT, 0xFFFFFFFF);
  1518. }
  1519. /**
  1520. * \brief Disable all MCYCLE & MINSTRET & MHPMCOUNTER counter
  1521. * \details
  1522. * Set all to one to disable all counters,
  1523. * such as cycle, instret, high performance monitor counters
  1524. */
  1525. __STATIC_FORCEINLINE void __disable_all_counter(void)
  1526. {
  1527. __RV_CSR_SET(CSR_MCOUNTINHIBIT, 0xFFFFFFFF);
  1528. }
  1529. /**
  1530. * \brief Set event for selected high performance monitor event
  1531. * \param [in] idx HPMEVENTx CSR index(3-31)
  1532. * \param [in] event HPMEVENTx Register value to set
  1533. * \details
  1534. * Set event for high performance monitor event register
  1535. */
  1536. __STATIC_INLINE void __set_hpm_event(unsigned long idx, unsigned long event)
  1537. {
  1538. switch (idx) {
  1539. case 3: __RV_CSR_WRITE(CSR_MHPMEVENT3, event); break;
  1540. case 4: __RV_CSR_WRITE(CSR_MHPMEVENT4, event); break;
  1541. case 5: __RV_CSR_WRITE(CSR_MHPMEVENT5, event); break;
  1542. case 6: __RV_CSR_WRITE(CSR_MHPMEVENT6, event); break;
  1543. case 7: __RV_CSR_WRITE(CSR_MHPMEVENT7, event); break;
  1544. case 8: __RV_CSR_WRITE(CSR_MHPMEVENT8, event); break;
  1545. case 9: __RV_CSR_WRITE(CSR_MHPMEVENT9, event); break;
  1546. case 10: __RV_CSR_WRITE(CSR_MHPMEVENT10, event); break;
  1547. case 11: __RV_CSR_WRITE(CSR_MHPMEVENT11, event); break;
  1548. case 12: __RV_CSR_WRITE(CSR_MHPMEVENT12, event); break;
  1549. case 13: __RV_CSR_WRITE(CSR_MHPMEVENT13, event); break;
  1550. case 14: __RV_CSR_WRITE(CSR_MHPMEVENT14, event); break;
  1551. case 15: __RV_CSR_WRITE(CSR_MHPMEVENT15, event); break;
  1552. case 16: __RV_CSR_WRITE(CSR_MHPMEVENT16, event); break;
  1553. case 17: __RV_CSR_WRITE(CSR_MHPMEVENT17, event); break;
  1554. case 18: __RV_CSR_WRITE(CSR_MHPMEVENT18, event); break;
  1555. case 19: __RV_CSR_WRITE(CSR_MHPMEVENT19, event); break;
  1556. case 20: __RV_CSR_WRITE(CSR_MHPMEVENT20, event); break;
  1557. case 21: __RV_CSR_WRITE(CSR_MHPMEVENT21, event); break;
  1558. case 22: __RV_CSR_WRITE(CSR_MHPMEVENT22, event); break;
  1559. case 23: __RV_CSR_WRITE(CSR_MHPMEVENT23, event); break;
  1560. case 24: __RV_CSR_WRITE(CSR_MHPMEVENT24, event); break;
  1561. case 25: __RV_CSR_WRITE(CSR_MHPMEVENT25, event); break;
  1562. case 26: __RV_CSR_WRITE(CSR_MHPMEVENT26, event); break;
  1563. case 27: __RV_CSR_WRITE(CSR_MHPMEVENT27, event); break;
  1564. case 28: __RV_CSR_WRITE(CSR_MHPMEVENT28, event); break;
  1565. case 29: __RV_CSR_WRITE(CSR_MHPMEVENT29, event); break;
  1566. case 30: __RV_CSR_WRITE(CSR_MHPMEVENT30, event); break;
  1567. case 31: __RV_CSR_WRITE(CSR_MHPMEVENT31, event); break;
  1568. default: break;
  1569. }
  1570. }
  1571. /**
  1572. * \brief Get event for selected high performance monitor event
  1573. * \param [in] idx HPMEVENTx CSR index(3-31)
  1574. * \param [in] event HPMEVENTx Register value to set
  1575. * \details
  1576. * Get high performance monitor event register value
  1577. * \return HPMEVENTx Register value
  1578. */
  1579. __STATIC_INLINE unsigned long __get_hpm_event(unsigned long idx)
  1580. {
  1581. switch (idx) {
  1582. case 3: return __RV_CSR_READ(CSR_MHPMEVENT3);
  1583. case 4: return __RV_CSR_READ(CSR_MHPMEVENT4);
  1584. case 5: return __RV_CSR_READ(CSR_MHPMEVENT5);
  1585. case 6: return __RV_CSR_READ(CSR_MHPMEVENT6);
  1586. case 7: return __RV_CSR_READ(CSR_MHPMEVENT7);
  1587. case 8: return __RV_CSR_READ(CSR_MHPMEVENT8);
  1588. case 9: return __RV_CSR_READ(CSR_MHPMEVENT9);
  1589. case 10: return __RV_CSR_READ(CSR_MHPMEVENT10);
  1590. case 11: return __RV_CSR_READ(CSR_MHPMEVENT11);
  1591. case 12: return __RV_CSR_READ(CSR_MHPMEVENT12);
  1592. case 13: return __RV_CSR_READ(CSR_MHPMEVENT13);
  1593. case 14: return __RV_CSR_READ(CSR_MHPMEVENT14);
  1594. case 15: return __RV_CSR_READ(CSR_MHPMEVENT15);
  1595. case 16: return __RV_CSR_READ(CSR_MHPMEVENT16);
  1596. case 17: return __RV_CSR_READ(CSR_MHPMEVENT17);
  1597. case 18: return __RV_CSR_READ(CSR_MHPMEVENT18);
  1598. case 19: return __RV_CSR_READ(CSR_MHPMEVENT19);
  1599. case 20: return __RV_CSR_READ(CSR_MHPMEVENT20);
  1600. case 21: return __RV_CSR_READ(CSR_MHPMEVENT21);
  1601. case 22: return __RV_CSR_READ(CSR_MHPMEVENT22);
  1602. case 23: return __RV_CSR_READ(CSR_MHPMEVENT23);
  1603. case 24: return __RV_CSR_READ(CSR_MHPMEVENT24);
  1604. case 25: return __RV_CSR_READ(CSR_MHPMEVENT25);
  1605. case 26: return __RV_CSR_READ(CSR_MHPMEVENT26);
  1606. case 27: return __RV_CSR_READ(CSR_MHPMEVENT27);
  1607. case 28: return __RV_CSR_READ(CSR_MHPMEVENT28);
  1608. case 29: return __RV_CSR_READ(CSR_MHPMEVENT29);
  1609. case 30: return __RV_CSR_READ(CSR_MHPMEVENT30);
  1610. case 31: return __RV_CSR_READ(CSR_MHPMEVENT31);
  1611. default: return 0;
  1612. }
  1613. }
  1614. /**
  1615. * \brief Set value for selected high performance monitor counter
  1616. * \param [in] idx HPMCOUNTERx CSR index(3-31)
  1617. * \param [in] value HPMCOUNTERx Register value to set
  1618. * \details
  1619. * Set value for high performance monitor couner register
  1620. */
  1621. __STATIC_INLINE void __set_hpm_counter(unsigned long idx, uint64_t value)
  1622. {
  1623. switch (idx) {
  1624. #if __RISCV_XLEN == 32
  1625. case 3: __RV_CSR_WRITE(CSR_MHPMCOUNTER3, 0); // prevent carry
  1626. __RV_CSR_WRITE(CSR_MHPMCOUNTER3H, (uint32_t)(value >> 32));
  1627. __RV_CSR_WRITE(CSR_MHPMCOUNTER3, (uint32_t)(value)); break;
  1628. case 4: __RV_CSR_WRITE(CSR_MHPMCOUNTER4, 0); // prevent carry
  1629. __RV_CSR_WRITE(CSR_MHPMCOUNTER4H, (uint32_t)(value >> 32));
  1630. __RV_CSR_WRITE(CSR_MHPMCOUNTER4, (uint32_t)(value)); break;
  1631. case 5: __RV_CSR_WRITE(CSR_MHPMCOUNTER5, 0); // prevent carry
  1632. __RV_CSR_WRITE(CSR_MHPMCOUNTER5H, (uint32_t)(value >> 32));
  1633. __RV_CSR_WRITE(CSR_MHPMCOUNTER5, (uint32_t)(value)); break;
  1634. case 6: __RV_CSR_WRITE(CSR_MHPMCOUNTER6, 0); // prevent carry
  1635. __RV_CSR_WRITE(CSR_MHPMCOUNTER6H, (uint32_t)(value >> 32));
  1636. __RV_CSR_WRITE(CSR_MHPMCOUNTER6, (uint32_t)(value)); break;
  1637. case 7: __RV_CSR_WRITE(CSR_MHPMCOUNTER7, 0); // prevent carry
  1638. __RV_CSR_WRITE(CSR_MHPMCOUNTER7H, (uint32_t)(value >> 32));
  1639. __RV_CSR_WRITE(CSR_MHPMCOUNTER7, (uint32_t)(value)); break;
  1640. case 8: __RV_CSR_WRITE(CSR_MHPMCOUNTER8, 0); // prevent carry
  1641. __RV_CSR_WRITE(CSR_MHPMCOUNTER8H, (uint32_t)(value >> 32));
  1642. __RV_CSR_WRITE(CSR_MHPMCOUNTER8, (uint32_t)(value)); break;
  1643. case 9: __RV_CSR_WRITE(CSR_MHPMCOUNTER9, 0); // prevent carry
  1644. __RV_CSR_WRITE(CSR_MHPMCOUNTER9H, (uint32_t)(value >> 32));
  1645. __RV_CSR_WRITE(CSR_MHPMCOUNTER9, (uint32_t)(value)); break;
  1646. case 10: __RV_CSR_WRITE(CSR_MHPMCOUNTER10, 0); // prevent carry
  1647. __RV_CSR_WRITE(CSR_MHPMCOUNTER10H, (uint32_t)(value >> 32));
  1648. __RV_CSR_WRITE(CSR_MHPMCOUNTER10, (uint32_t)(value)); break;
  1649. case 11: __RV_CSR_WRITE(CSR_MHPMCOUNTER11, 0); // prevent carry
  1650. __RV_CSR_WRITE(CSR_MHPMCOUNTER11H, (uint32_t)(value >> 32));
  1651. __RV_CSR_WRITE(CSR_MHPMCOUNTER11, (uint32_t)(value)); break;
  1652. case 12: __RV_CSR_WRITE(CSR_MHPMCOUNTER12, 0); // prevent carry
  1653. __RV_CSR_WRITE(CSR_MHPMCOUNTER12H, (uint32_t)(value >> 32));
  1654. __RV_CSR_WRITE(CSR_MHPMCOUNTER12, (uint32_t)(value)); break;
  1655. case 13: __RV_CSR_WRITE(CSR_MHPMCOUNTER13, 0); // prevent carry
  1656. __RV_CSR_WRITE(CSR_MHPMCOUNTER13H, (uint32_t)(value >> 32));
  1657. __RV_CSR_WRITE(CSR_MHPMCOUNTER13, (uint32_t)(value)); break;
  1658. case 14: __RV_CSR_WRITE(CSR_MHPMCOUNTER14, 0); // prevent carry
  1659. __RV_CSR_WRITE(CSR_MHPMCOUNTER14H, (uint32_t)(value >> 32));
  1660. __RV_CSR_WRITE(CSR_MHPMCOUNTER14, (uint32_t)(value)); break;
  1661. case 15: __RV_CSR_WRITE(CSR_MHPMCOUNTER15, 0); // prevent carry
  1662. __RV_CSR_WRITE(CSR_MHPMCOUNTER15H, (uint32_t)(value >> 32));
  1663. __RV_CSR_WRITE(CSR_MHPMCOUNTER15, (uint32_t)(value)); break;
  1664. case 16: __RV_CSR_WRITE(CSR_MHPMCOUNTER16, 0); // prevent carry
  1665. __RV_CSR_WRITE(CSR_MHPMCOUNTER16H, (uint32_t)(value >> 32));
  1666. __RV_CSR_WRITE(CSR_MHPMCOUNTER16, (uint32_t)(value)); break;
  1667. case 17: __RV_CSR_WRITE(CSR_MHPMCOUNTER17, 0); // prevent carry
  1668. __RV_CSR_WRITE(CSR_MHPMCOUNTER17H, (uint32_t)(value >> 32));
  1669. __RV_CSR_WRITE(CSR_MHPMCOUNTER17, (uint32_t)(value)); break;
  1670. case 18: __RV_CSR_WRITE(CSR_MHPMCOUNTER18, 0); // prevent carry
  1671. __RV_CSR_WRITE(CSR_MHPMCOUNTER18H, (uint32_t)(value >> 32));
  1672. __RV_CSR_WRITE(CSR_MHPMCOUNTER18, (uint32_t)(value)); break;
  1673. case 19: __RV_CSR_WRITE(CSR_MHPMCOUNTER19, 0); // prevent carry
  1674. __RV_CSR_WRITE(CSR_MHPMCOUNTER19H, (uint32_t)(value >> 32));
  1675. __RV_CSR_WRITE(CSR_MHPMCOUNTER19, (uint32_t)(value)); break;
  1676. case 20: __RV_CSR_WRITE(CSR_MHPMCOUNTER20, 0); // prevent carry
  1677. __RV_CSR_WRITE(CSR_MHPMCOUNTER20H, (uint32_t)(value >> 32));
  1678. __RV_CSR_WRITE(CSR_MHPMCOUNTER20, (uint32_t)(value)); break;
  1679. case 21: __RV_CSR_WRITE(CSR_MHPMCOUNTER21, 0); // prevent carry
  1680. __RV_CSR_WRITE(CSR_MHPMCOUNTER21H, (uint32_t)(value >> 32));
  1681. __RV_CSR_WRITE(CSR_MHPMCOUNTER21, (uint32_t)(value)); break;
  1682. case 22: __RV_CSR_WRITE(CSR_MHPMCOUNTER22, 0); // prevent carry
  1683. __RV_CSR_WRITE(CSR_MHPMCOUNTER22H, (uint32_t)(value >> 32));
  1684. __RV_CSR_WRITE(CSR_MHPMCOUNTER22, (uint32_t)(value)); break;
  1685. case 23: __RV_CSR_WRITE(CSR_MHPMCOUNTER23, 0); // prevent carry
  1686. __RV_CSR_WRITE(CSR_MHPMCOUNTER23H, (uint32_t)(value >> 32));
  1687. __RV_CSR_WRITE(CSR_MHPMCOUNTER23, (uint32_t)(value)); break;
  1688. case 24: __RV_CSR_WRITE(CSR_MHPMCOUNTER24, 0); // prevent carry
  1689. __RV_CSR_WRITE(CSR_MHPMCOUNTER24H, (uint32_t)(value >> 32));
  1690. __RV_CSR_WRITE(CSR_MHPMCOUNTER24, (uint32_t)(value)); break;
  1691. case 25: __RV_CSR_WRITE(CSR_MHPMCOUNTER25, 0); // prevent carry
  1692. __RV_CSR_WRITE(CSR_MHPMCOUNTER25H, (uint32_t)(value >> 32));
  1693. __RV_CSR_WRITE(CSR_MHPMCOUNTER25, (uint32_t)(value)); break;
  1694. case 26: __RV_CSR_WRITE(CSR_MHPMCOUNTER26, 0); // prevent carry
  1695. __RV_CSR_WRITE(CSR_MHPMCOUNTER26H, (uint32_t)(value >> 32));
  1696. __RV_CSR_WRITE(CSR_MHPMCOUNTER26, (uint32_t)(value)); break;
  1697. case 27: __RV_CSR_WRITE(CSR_MHPMCOUNTER27, 0); // prevent carry
  1698. __RV_CSR_WRITE(CSR_MHPMCOUNTER27H, (uint32_t)(value >> 32));
  1699. __RV_CSR_WRITE(CSR_MHPMCOUNTER27, (uint32_t)(value)); break;
  1700. case 28: __RV_CSR_WRITE(CSR_MHPMCOUNTER28, 0); // prevent carry
  1701. __RV_CSR_WRITE(CSR_MHPMCOUNTER28H, (uint32_t)(value >> 32));
  1702. __RV_CSR_WRITE(CSR_MHPMCOUNTER28, (uint32_t)(value)); break;
  1703. case 29: __RV_CSR_WRITE(CSR_MHPMCOUNTER29, 0); // prevent carry
  1704. __RV_CSR_WRITE(CSR_MHPMCOUNTER29H, (uint32_t)(value >> 32));
  1705. __RV_CSR_WRITE(CSR_MHPMCOUNTER29, (uint32_t)(value)); break;
  1706. case 30: __RV_CSR_WRITE(CSR_MHPMCOUNTER30, 0); // prevent carry
  1707. __RV_CSR_WRITE(CSR_MHPMCOUNTER30H, (uint32_t)(value >> 32));
  1708. __RV_CSR_WRITE(CSR_MHPMCOUNTER30, (uint32_t)(value)); break;
  1709. case 31: __RV_CSR_WRITE(CSR_MHPMCOUNTER31, 0); // prevent carry
  1710. __RV_CSR_WRITE(CSR_MHPMCOUNTER31H, (uint32_t)(value >> 32));
  1711. __RV_CSR_WRITE(CSR_MHPMCOUNTER31, (uint32_t)(value)); break;
  1712. #elif __RISCV_XLEN == 64
  1713. case 3: __RV_CSR_WRITE(CSR_MHPMCOUNTER3, (value)); break;
  1714. case 4: __RV_CSR_WRITE(CSR_MHPMCOUNTER4, (value)); break;
  1715. case 5: __RV_CSR_WRITE(CSR_MHPMCOUNTER5, (value)); break;
  1716. case 6: __RV_CSR_WRITE(CSR_MHPMCOUNTER6, (value)); break;
  1717. case 7: __RV_CSR_WRITE(CSR_MHPMCOUNTER7, (value)); break;
  1718. case 8: __RV_CSR_WRITE(CSR_MHPMCOUNTER8, (value)); break;
  1719. case 9: __RV_CSR_WRITE(CSR_MHPMCOUNTER9, (value)); break;
  1720. case 10: __RV_CSR_WRITE(CSR_MHPMCOUNTER10, (value)); break;
  1721. case 11: __RV_CSR_WRITE(CSR_MHPMCOUNTER11, (value)); break;
  1722. case 12: __RV_CSR_WRITE(CSR_MHPMCOUNTER12, (value)); break;
  1723. case 13: __RV_CSR_WRITE(CSR_MHPMCOUNTER13, (value)); break;
  1724. case 14: __RV_CSR_WRITE(CSR_MHPMCOUNTER14, (value)); break;
  1725. case 15: __RV_CSR_WRITE(CSR_MHPMCOUNTER15, (value)); break;
  1726. case 16: __RV_CSR_WRITE(CSR_MHPMCOUNTER16, (value)); break;
  1727. case 17: __RV_CSR_WRITE(CSR_MHPMCOUNTER17, (value)); break;
  1728. case 18: __RV_CSR_WRITE(CSR_MHPMCOUNTER18, (value)); break;
  1729. case 19: __RV_CSR_WRITE(CSR_MHPMCOUNTER19, (value)); break;
  1730. case 20: __RV_CSR_WRITE(CSR_MHPMCOUNTER20, (value)); break;
  1731. case 21: __RV_CSR_WRITE(CSR_MHPMCOUNTER21, (value)); break;
  1732. case 22: __RV_CSR_WRITE(CSR_MHPMCOUNTER22, (value)); break;
  1733. case 23: __RV_CSR_WRITE(CSR_MHPMCOUNTER23, (value)); break;
  1734. case 24: __RV_CSR_WRITE(CSR_MHPMCOUNTER24, (value)); break;
  1735. case 25: __RV_CSR_WRITE(CSR_MHPMCOUNTER25, (value)); break;
  1736. case 26: __RV_CSR_WRITE(CSR_MHPMCOUNTER26, (value)); break;
  1737. case 27: __RV_CSR_WRITE(CSR_MHPMCOUNTER27, (value)); break;
  1738. case 28: __RV_CSR_WRITE(CSR_MHPMCOUNTER28, (value)); break;
  1739. case 29: __RV_CSR_WRITE(CSR_MHPMCOUNTER29, (value)); break;
  1740. case 30: __RV_CSR_WRITE(CSR_MHPMCOUNTER30, (value)); break;
  1741. case 31: __RV_CSR_WRITE(CSR_MHPMCOUNTER31, (value)); break;
  1742. #else
  1743. #endif
  1744. default: break;
  1745. }
  1746. }
  1747. /**
  1748. * \brief Get value of selected high performance monitor counter
  1749. * \param [in] idx HPMCOUNTERx CSR index(3-31)
  1750. * \details
  1751. * Get high performance monitor counter register value
  1752. * \return HPMCOUNTERx Register value
  1753. */
  1754. __STATIC_INLINE uint64_t __get_hpm_counter(unsigned long idx)
  1755. {
  1756. __RWMB(); // Make sure previous memory and io operation finished
  1757. #if __RISCV_XLEN == 32
  1758. volatile uint32_t high0, low, high;
  1759. uint64_t full;
  1760. switch (idx) {
  1761. case 0: return __get_rv_cycle();
  1762. case 2: return __get_rv_instret();
  1763. case 3: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER3H);
  1764. low = __RV_CSR_READ(CSR_MHPMCOUNTER3);
  1765. high = __RV_CSR_READ(CSR_MHPMCOUNTER3H);
  1766. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER3); }
  1767. full = (((uint64_t)high) << 32) | low; return full;
  1768. case 4: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER4H);
  1769. low = __RV_CSR_READ(CSR_MHPMCOUNTER4);
  1770. high = __RV_CSR_READ(CSR_MHPMCOUNTER4H);
  1771. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER4); }
  1772. full = (((uint64_t)high) << 32) | low; return full;
  1773. case 5: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER5H);
  1774. low = __RV_CSR_READ(CSR_MHPMCOUNTER5);
  1775. high = __RV_CSR_READ(CSR_MHPMCOUNTER5H);
  1776. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER5); }
  1777. full = (((uint64_t)high) << 32) | low; return full;
  1778. case 6: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER6H);
  1779. low = __RV_CSR_READ(CSR_MHPMCOUNTER6);
  1780. high = __RV_CSR_READ(CSR_MHPMCOUNTER6H);
  1781. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER6); }
  1782. full = (((uint64_t)high) << 32) | low; return full;
  1783. case 7: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER7H);
  1784. low = __RV_CSR_READ(CSR_MHPMCOUNTER7);
  1785. high = __RV_CSR_READ(CSR_MHPMCOUNTER7H);
  1786. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER7); }
  1787. full = (((uint64_t)high) << 32) | low; return full;
  1788. case 8: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER8H);
  1789. low = __RV_CSR_READ(CSR_MHPMCOUNTER8);
  1790. high = __RV_CSR_READ(CSR_MHPMCOUNTER8H);
  1791. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER8); }
  1792. full = (((uint64_t)high) << 32) | low; return full;
  1793. case 9: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER9H);
  1794. low = __RV_CSR_READ(CSR_MHPMCOUNTER9);
  1795. high = __RV_CSR_READ(CSR_MHPMCOUNTER9H);
  1796. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER9); }
  1797. full = (((uint64_t)high) << 32) | low; return full;
  1798. case 10: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER10H);
  1799. low = __RV_CSR_READ(CSR_MHPMCOUNTER10);
  1800. high = __RV_CSR_READ(CSR_MHPMCOUNTER10H);
  1801. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER10); }
  1802. full = (((uint64_t)high) << 32) | low; return full;
  1803. case 11: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER11H);
  1804. low = __RV_CSR_READ(CSR_MHPMCOUNTER11);
  1805. high = __RV_CSR_READ(CSR_MHPMCOUNTER11H);
  1806. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER11); }
  1807. full = (((uint64_t)high) << 32) | low; return full;
  1808. case 12: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER12H);
  1809. low = __RV_CSR_READ(CSR_MHPMCOUNTER12);
  1810. high = __RV_CSR_READ(CSR_MHPMCOUNTER12H);
  1811. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER12); }
  1812. full = (((uint64_t)high) << 32) | low; return full;
  1813. case 13: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER13H);
  1814. low = __RV_CSR_READ(CSR_MHPMCOUNTER13);
  1815. high = __RV_CSR_READ(CSR_MHPMCOUNTER13H);
  1816. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER13); }
  1817. full = (((uint64_t)high) << 32) | low; return full;
  1818. case 14: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER14H);
  1819. low = __RV_CSR_READ(CSR_MHPMCOUNTER14);
  1820. high = __RV_CSR_READ(CSR_MHPMCOUNTER14H);
  1821. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER14); }
  1822. full = (((uint64_t)high) << 32) | low; return full;
  1823. case 15: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER15H);
  1824. low = __RV_CSR_READ(CSR_MHPMCOUNTER15);
  1825. high = __RV_CSR_READ(CSR_MHPMCOUNTER15H);
  1826. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER15); }
  1827. full = (((uint64_t)high) << 32) | low; return full;
  1828. case 16: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER16H);
  1829. low = __RV_CSR_READ(CSR_MHPMCOUNTER16);
  1830. high = __RV_CSR_READ(CSR_MHPMCOUNTER16H);
  1831. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER16); }
  1832. full = (((uint64_t)high) << 32) | low; return full;
  1833. case 17: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER17H);
  1834. low = __RV_CSR_READ(CSR_MHPMCOUNTER17);
  1835. high = __RV_CSR_READ(CSR_MHPMCOUNTER17H);
  1836. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER17); }
  1837. full = (((uint64_t)high) << 32) | low; return full;
  1838. case 18: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER18H);
  1839. low = __RV_CSR_READ(CSR_MHPMCOUNTER18);
  1840. high = __RV_CSR_READ(CSR_MHPMCOUNTER18H);
  1841. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER18); }
  1842. full = (((uint64_t)high) << 32) | low; return full;
  1843. case 19: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER19H);
  1844. low = __RV_CSR_READ(CSR_MHPMCOUNTER19);
  1845. high = __RV_CSR_READ(CSR_MHPMCOUNTER19H);
  1846. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER19); }
  1847. full = (((uint64_t)high) << 32) | low; return full;
  1848. case 20: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER20H);
  1849. low = __RV_CSR_READ(CSR_MHPMCOUNTER20);
  1850. high = __RV_CSR_READ(CSR_MHPMCOUNTER20H);
  1851. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER20); }
  1852. full = (((uint64_t)high) << 32) | low; return full;
  1853. case 21: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER21H);
  1854. low = __RV_CSR_READ(CSR_MHPMCOUNTER21);
  1855. high = __RV_CSR_READ(CSR_MHPMCOUNTER21H);
  1856. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER21); }
  1857. full = (((uint64_t)high) << 32) | low; return full;
  1858. case 22: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER22H);
  1859. low = __RV_CSR_READ(CSR_MHPMCOUNTER22);
  1860. high = __RV_CSR_READ(CSR_MHPMCOUNTER22H);
  1861. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER22); }
  1862. full = (((uint64_t)high) << 32) | low; return full;
  1863. case 23: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER23H);
  1864. low = __RV_CSR_READ(CSR_MHPMCOUNTER23);
  1865. high = __RV_CSR_READ(CSR_MHPMCOUNTER23H);
  1866. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER23); }
  1867. full = (((uint64_t)high) << 32) | low; return full;
  1868. case 24: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER24H);
  1869. low = __RV_CSR_READ(CSR_MHPMCOUNTER24);
  1870. high = __RV_CSR_READ(CSR_MHPMCOUNTER24H);
  1871. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER24); }
  1872. full = (((uint64_t)high) << 32) | low; return full;
  1873. case 25: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER25H);
  1874. low = __RV_CSR_READ(CSR_MHPMCOUNTER25);
  1875. high = __RV_CSR_READ(CSR_MHPMCOUNTER25H);
  1876. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER25); }
  1877. full = (((uint64_t)high) << 32) | low; return full;
  1878. case 26: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER26H);
  1879. low = __RV_CSR_READ(CSR_MHPMCOUNTER26);
  1880. high = __RV_CSR_READ(CSR_MHPMCOUNTER26H);
  1881. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER26); }
  1882. full = (((uint64_t)high) << 32) | low; return full;
  1883. case 27: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER27H);
  1884. low = __RV_CSR_READ(CSR_MHPMCOUNTER27);
  1885. high = __RV_CSR_READ(CSR_MHPMCOUNTER27H);
  1886. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER27); }
  1887. full = (((uint64_t)high) << 32) | low; return full;
  1888. case 28: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER28H);
  1889. low = __RV_CSR_READ(CSR_MHPMCOUNTER28);
  1890. high = __RV_CSR_READ(CSR_MHPMCOUNTER28H);
  1891. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER28); }
  1892. full = (((uint64_t)high) << 32) | low; return full;
  1893. case 29: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER29H);
  1894. low = __RV_CSR_READ(CSR_MHPMCOUNTER29);
  1895. high = __RV_CSR_READ(CSR_MHPMCOUNTER29H);
  1896. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER29); }
  1897. full = (((uint64_t)high) << 32) | low; return full;
  1898. case 30: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER30H);
  1899. low = __RV_CSR_READ(CSR_MHPMCOUNTER30);
  1900. high = __RV_CSR_READ(CSR_MHPMCOUNTER30H);
  1901. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER30); }
  1902. full = (((uint64_t)high) << 32) | low; return full;
  1903. case 31: high0 = __RV_CSR_READ(CSR_MHPMCOUNTER31H);
  1904. low = __RV_CSR_READ(CSR_MHPMCOUNTER31);
  1905. high = __RV_CSR_READ(CSR_MHPMCOUNTER31H);
  1906. if (high0 != high) { low = __RV_CSR_READ(CSR_MHPMCOUNTER31); }
  1907. full = (((uint64_t)high) << 32) | low; return full;
  1908. #elif __RISCV_XLEN == 64
  1909. switch (idx) {
  1910. case 0: return __get_rv_cycle();
  1911. case 2: return __get_rv_instret();
  1912. case 3: return __RV_CSR_READ(CSR_MHPMCOUNTER3);
  1913. case 4: return __RV_CSR_READ(CSR_MHPMCOUNTER4);
  1914. case 5: return __RV_CSR_READ(CSR_MHPMCOUNTER5);
  1915. case 6: return __RV_CSR_READ(CSR_MHPMCOUNTER6);
  1916. case 7: return __RV_CSR_READ(CSR_MHPMCOUNTER7);
  1917. case 8: return __RV_CSR_READ(CSR_MHPMCOUNTER8);
  1918. case 9: return __RV_CSR_READ(CSR_MHPMCOUNTER9);
  1919. case 10: return __RV_CSR_READ(CSR_MHPMCOUNTER10);
  1920. case 11: return __RV_CSR_READ(CSR_MHPMCOUNTER11);
  1921. case 12: return __RV_CSR_READ(CSR_MHPMCOUNTER12);
  1922. case 13: return __RV_CSR_READ(CSR_MHPMCOUNTER13);
  1923. case 14: return __RV_CSR_READ(CSR_MHPMCOUNTER14);
  1924. case 15: return __RV_CSR_READ(CSR_MHPMCOUNTER15);
  1925. case 16: return __RV_CSR_READ(CSR_MHPMCOUNTER16);
  1926. case 17: return __RV_CSR_READ(CSR_MHPMCOUNTER17);
  1927. case 18: return __RV_CSR_READ(CSR_MHPMCOUNTER18);
  1928. case 19: return __RV_CSR_READ(CSR_MHPMCOUNTER19);
  1929. case 20: return __RV_CSR_READ(CSR_MHPMCOUNTER20);
  1930. case 21: return __RV_CSR_READ(CSR_MHPMCOUNTER21);
  1931. case 22: return __RV_CSR_READ(CSR_MHPMCOUNTER22);
  1932. case 23: return __RV_CSR_READ(CSR_MHPMCOUNTER23);
  1933. case 24: return __RV_CSR_READ(CSR_MHPMCOUNTER24);
  1934. case 25: return __RV_CSR_READ(CSR_MHPMCOUNTER25);
  1935. case 26: return __RV_CSR_READ(CSR_MHPMCOUNTER26);
  1936. case 27: return __RV_CSR_READ(CSR_MHPMCOUNTER27);
  1937. case 28: return __RV_CSR_READ(CSR_MHPMCOUNTER28);
  1938. case 29: return __RV_CSR_READ(CSR_MHPMCOUNTER29);
  1939. case 30: return __RV_CSR_READ(CSR_MHPMCOUNTER30);
  1940. case 31: return __RV_CSR_READ(CSR_MHPMCOUNTER31);
  1941. #else
  1942. switch (idx) {
  1943. #endif
  1944. default: return 0;
  1945. }
  1946. }
  1947. /**
  1948. * \brief Get value of selected high performance monitor counter
  1949. * \param [in] idx HPMCOUNTERx CSR index(3-31)
  1950. * \details
  1951. * Get high performance monitor counter register value without high
  1952. * 32 bits when XLEN=32
  1953. * \return HPMCOUNTERx Register value
  1954. */
  1955. __STATIC_INLINE unsigned long __read_hpm_counter(unsigned long idx)
  1956. {
  1957. switch (idx) {
  1958. case 0: return __read_cycle_csr();
  1959. case 2: return __read_instret_csr();
  1960. case 3: return __RV_CSR_READ(CSR_MHPMCOUNTER3);
  1961. case 4: return __RV_CSR_READ(CSR_MHPMCOUNTER4);
  1962. case 5: return __RV_CSR_READ(CSR_MHPMCOUNTER5);
  1963. case 6: return __RV_CSR_READ(CSR_MHPMCOUNTER6);
  1964. case 7: return __RV_CSR_READ(CSR_MHPMCOUNTER7);
  1965. case 8: return __RV_CSR_READ(CSR_MHPMCOUNTER8);
  1966. case 9: return __RV_CSR_READ(CSR_MHPMCOUNTER9);
  1967. case 10: return __RV_CSR_READ(CSR_MHPMCOUNTER10);
  1968. case 11: return __RV_CSR_READ(CSR_MHPMCOUNTER11);
  1969. case 12: return __RV_CSR_READ(CSR_MHPMCOUNTER12);
  1970. case 13: return __RV_CSR_READ(CSR_MHPMCOUNTER13);
  1971. case 14: return __RV_CSR_READ(CSR_MHPMCOUNTER14);
  1972. case 15: return __RV_CSR_READ(CSR_MHPMCOUNTER15);
  1973. case 16: return __RV_CSR_READ(CSR_MHPMCOUNTER16);
  1974. case 17: return __RV_CSR_READ(CSR_MHPMCOUNTER17);
  1975. case 18: return __RV_CSR_READ(CSR_MHPMCOUNTER18);
  1976. case 19: return __RV_CSR_READ(CSR_MHPMCOUNTER19);
  1977. case 20: return __RV_CSR_READ(CSR_MHPMCOUNTER20);
  1978. case 21: return __RV_CSR_READ(CSR_MHPMCOUNTER21);
  1979. case 22: return __RV_CSR_READ(CSR_MHPMCOUNTER22);
  1980. case 23: return __RV_CSR_READ(CSR_MHPMCOUNTER23);
  1981. case 24: return __RV_CSR_READ(CSR_MHPMCOUNTER24);
  1982. case 25: return __RV_CSR_READ(CSR_MHPMCOUNTER25);
  1983. case 26: return __RV_CSR_READ(CSR_MHPMCOUNTER26);
  1984. case 27: return __RV_CSR_READ(CSR_MHPMCOUNTER27);
  1985. case 28: return __RV_CSR_READ(CSR_MHPMCOUNTER28);
  1986. case 29: return __RV_CSR_READ(CSR_MHPMCOUNTER29);
  1987. case 30: return __RV_CSR_READ(CSR_MHPMCOUNTER30);
  1988. case 31: return __RV_CSR_READ(CSR_MHPMCOUNTER31);
  1989. default: return 0;
  1990. }
  1991. }
  1992. /**
  1993. * \brief Set exceptions delegation to S mode
  1994. * \details Set certain exceptions of supervisor mode or user mode
  1995. * delegated from machined mode to supervisor mode.
  1996. * \remarks
  1997. * Exception should trigger in supervisor mode or user mode.
  1998. */
  1999. __STATIC_FORCEINLINE void __set_medeleg(unsigned long mask)
  2000. {
  2001. __RV_CSR_WRITE(CSR_MEDELEG, mask);
  2002. }
  2003. /**
  2004. * \brief Set interrupt delegation to S mode
  2005. * \details Set certain interrupt of supervisor mode or user mode
  2006. * delegated from machined mode to supervisor mode.
  2007. * \remarks
  2008. * interrupt should trigger in supervisor mode or user mode.
  2009. */
  2010. __STATIC_FORCEINLINE void __set_mideleg(unsigned long mask)
  2011. {
  2012. __RV_CSR_WRITE(CSR_MIDELEG, mask);
  2013. }
  2014. /* ===== Load/Store Operations ===== */
  2015. /**
  2016. * \brief Load 8bit value from address (8 bit)
  2017. * \details Load 8 bit value.
  2018. * \param [in] addr Address pointer to data
  2019. * \return value of type uint8_t at (*addr)
  2020. */
  2021. __STATIC_FORCEINLINE uint8_t __LB(volatile void *addr)
  2022. {
  2023. uint8_t result;
  2024. __ASM volatile ("lb %0, 0(%1)" : "=r" (result) : "r" (addr));
  2025. return result;
  2026. }
  2027. /**
  2028. * \brief Load 16bit value from address (16 bit)
  2029. * \details Load 16 bit value.
  2030. * \param [in] addr Address pointer to data
  2031. * \return value of type uint16_t at (*addr)
  2032. */
  2033. __STATIC_FORCEINLINE uint16_t __LH(volatile void *addr)
  2034. {
  2035. uint16_t result;
  2036. __ASM volatile ("lh %0, 0(%1)" : "=r" (result) : "r" (addr));
  2037. return result;
  2038. }
  2039. /**
  2040. * \brief Load 32bit value from address (32 bit)
  2041. * \details Load 32 bit value.
  2042. * \param [in] addr Address pointer to data
  2043. * \return value of type uint32_t at (*addr)
  2044. */
  2045. __STATIC_FORCEINLINE uint32_t __LW(volatile void *addr)
  2046. {
  2047. uint32_t result;
  2048. __ASM volatile ("lw %0, 0(%1)" : "=r" (result) : "r" (addr));
  2049. return result;
  2050. }
  2051. #if (__RISCV_XLEN != 32) || defined(__riscv_zilsd)
  2052. /**
  2053. * \brief Load 64bit value from address (64 bit)
  2054. * \details Load 64 bit value.
  2055. * \param [in] addr Address pointer to data
  2056. * \return value of type uint64_t at (*addr)
  2057. * \remarks RV64 only macro
  2058. */
  2059. __STATIC_FORCEINLINE uint64_t __LD(volatile void *addr)
  2060. {
  2061. uint64_t result;
  2062. __ASM volatile ("ld %0, 0(%1)" : "=r" (result) : "r" (addr));
  2063. return result;
  2064. }
  2065. #endif
  2066. /**
  2067. * \brief Write 8bit value to address (8 bit)
  2068. * \details Write 8 bit value.
  2069. * \param [in] addr Address pointer to data
  2070. * \param [in] val Value to set
  2071. */
  2072. __STATIC_FORCEINLINE void __SB(volatile void *addr, uint8_t val)
  2073. {
  2074. __ASM volatile ("sb %0, 0(%1)" : : "r" (val), "r" (addr));
  2075. }
  2076. /**
  2077. * \brief Write 16bit value to address (16 bit)
  2078. * \details Write 16 bit value.
  2079. * \param [in] addr Address pointer to data
  2080. * \param [in] val Value to set
  2081. */
  2082. __STATIC_FORCEINLINE void __SH(volatile void *addr, uint16_t val)
  2083. {
  2084. __ASM volatile ("sh %0, 0(%1)" : : "r" (val), "r" (addr));
  2085. }
  2086. /**
  2087. * \brief Write 32bit value to address (32 bit)
  2088. * \details Write 32 bit value.
  2089. * \param [in] addr Address pointer to data
  2090. * \param [in] val Value to set
  2091. */
  2092. __STATIC_FORCEINLINE void __SW(volatile void *addr, uint32_t val)
  2093. {
  2094. __ASM volatile ("sw %0, 0(%1)" : : "r" (val), "r" (addr));
  2095. }
  2096. #if (__RISCV_XLEN != 32) || defined(__riscv_zilsd)
  2097. /**
  2098. * \brief Write 64bit value to address (64 bit)
  2099. * \details Write 64 bit value.
  2100. * \param [in] addr Address pointer to data
  2101. * \param [in] val Value to set
  2102. */
  2103. __STATIC_FORCEINLINE void __SD(volatile void *addr, uint64_t val)
  2104. {
  2105. __ASM volatile ("sd %0, 0(%1)" : : "r" (val), "r" (addr));
  2106. }
  2107. #endif
  2108. /**
  2109. * \brief Compare and Swap 32bit value using LR and SC
  2110. * \details Compare old value with memory, if identical,
  2111. * store new value in memory. Return the initial value in memory.
  2112. * Success is indicated by comparing return value with OLD.
  2113. * memory address, return 0 if successful, otherwise return !0
  2114. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2115. * \param [in] oldval Old value of the data in address
  2116. * \param [in] newval New value to be stored into the address
  2117. * \return return the initial value in memory
  2118. */
  2119. __STATIC_INLINE uint32_t __CAS_W(volatile uint32_t *addr, uint32_t oldval, uint32_t newval)
  2120. {
  2121. uint32_t result;
  2122. uint32_t rc;
  2123. __ASM volatile ( \
  2124. "0: lr.w %0, %2 \n" \
  2125. " bne %0, %z3, 1f \n" \
  2126. " sc.w %1, %z4, %2 \n" \
  2127. " bnez %1, 0b \n" \
  2128. "1:\n" \
  2129. : "=&r"(result), "=&r"(rc), "+A"(*addr) \
  2130. : "r"(oldval), "r"(newval) \
  2131. : "memory");
  2132. return result;
  2133. }
  2134. /**
  2135. * \brief Atomic Swap 32bit value into memory
  2136. * \details Atomically swap new 32bit value into memory using amoswap.d.
  2137. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2138. * \param [in] newval New value to be stored into the address
  2139. * \return return the original value in memory
  2140. */
  2141. __STATIC_FORCEINLINE uint32_t __AMOSWAP_W(volatile uint32_t *addr, uint32_t newval)
  2142. {
  2143. uint32_t result;
  2144. __ASM volatile ("amoswap.w %0, %2, %1" : \
  2145. "=r"(result), "+A"(*addr) : "r"(newval) : "memory");
  2146. return result;
  2147. }
  2148. /**
  2149. * \brief Atomic Add with 32bit value
  2150. * \details Atomically ADD 32bit value with value in memory using amoadd.d.
  2151. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2152. * \param [in] value value to be ADDed
  2153. * \return return memory value + add value
  2154. */
  2155. __STATIC_FORCEINLINE int32_t __AMOADD_W(volatile int32_t *addr, int32_t value)
  2156. {
  2157. int32_t result;
  2158. __ASM volatile ("amoadd.w %0, %2, %1" : \
  2159. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2160. return *addr;
  2161. }
  2162. /**
  2163. * \brief Atomic And with 32bit value
  2164. * \details Atomically AND 32bit value with value in memory using amoand.d.
  2165. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2166. * \param [in] value value to be ANDed
  2167. * \return return memory value & and value
  2168. */
  2169. __STATIC_FORCEINLINE int32_t __AMOAND_W(volatile int32_t *addr, int32_t value)
  2170. {
  2171. int32_t result;
  2172. __ASM volatile ("amoand.w %0, %2, %1" : \
  2173. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2174. return *addr;
  2175. }
  2176. /**
  2177. * \brief Atomic OR with 32bit value
  2178. * \details Atomically OR 32bit value with value in memory using amoor.d.
  2179. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2180. * \param [in] value value to be ORed
  2181. * \return return memory value | and value
  2182. */
  2183. __STATIC_FORCEINLINE int32_t __AMOOR_W(volatile int32_t *addr, int32_t value)
  2184. {
  2185. int32_t result;
  2186. __ASM volatile ("amoor.w %0, %2, %1" : \
  2187. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2188. return *addr;
  2189. }
  2190. /**
  2191. * \brief Atomic XOR with 32bit value
  2192. * \details Atomically XOR 32bit value with value in memory using amoxor.d.
  2193. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2194. * \param [in] value value to be XORed
  2195. * \return return memory value ^ and value
  2196. */
  2197. __STATIC_FORCEINLINE int32_t __AMOXOR_W(volatile int32_t *addr, int32_t value)
  2198. {
  2199. int32_t result;
  2200. __ASM volatile ("amoxor.w %0, %2, %1" : \
  2201. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2202. return *addr;
  2203. }
  2204. /**
  2205. * \brief Atomic unsigned MAX with 32bit value
  2206. * \details Atomically unsigned max compare 32bit value with value in memory using amomaxu.d.
  2207. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2208. * \param [in] value value to be compared
  2209. * \return return the bigger value
  2210. */
  2211. __STATIC_FORCEINLINE uint32_t __AMOMAXU_W(volatile uint32_t *addr, uint32_t value)
  2212. {
  2213. uint32_t result;
  2214. __ASM volatile ("amomaxu.w %0, %2, %1" : \
  2215. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2216. return *addr;
  2217. }
  2218. /**
  2219. * \brief Atomic signed MAX with 32bit value
  2220. * \details Atomically signed max compare 32bit value with value in memory using amomax.d.
  2221. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2222. * \param [in] value value to be compared
  2223. * \return the bigger value
  2224. */
  2225. __STATIC_FORCEINLINE int32_t __AMOMAX_W(volatile int32_t *addr, int32_t value)
  2226. {
  2227. int32_t result;
  2228. __ASM volatile ("amomax.w %0, %2, %1" : \
  2229. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2230. return *addr;
  2231. }
  2232. /**
  2233. * \brief Atomic unsigned MIN with 32bit value
  2234. * \details Atomically unsigned min compare 32bit value with value in memory using amominu.d.
  2235. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2236. * \param [in] value value to be compared
  2237. * \return the smaller value
  2238. */
  2239. __STATIC_FORCEINLINE uint32_t __AMOMINU_W(volatile uint32_t *addr, uint32_t value)
  2240. {
  2241. uint32_t result;
  2242. __ASM volatile ("amominu.w %0, %2, %1" : \
  2243. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2244. return *addr;
  2245. }
  2246. /**
  2247. * \brief Atomic signed MIN with 32bit value
  2248. * \details Atomically signed min compare 32bit value with value in memory using amomin.d.
  2249. * \param [in] addr Address pointer to data, address need to be 4byte aligned
  2250. * \param [in] value value to be compared
  2251. * \return the smaller value
  2252. */
  2253. __STATIC_FORCEINLINE int32_t __AMOMIN_W(volatile int32_t *addr, int32_t value)
  2254. {
  2255. int32_t result;
  2256. __ASM volatile ("amomin.w %0, %2, %1" : \
  2257. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2258. return *addr;
  2259. }
  2260. #if __RISCV_XLEN == 64
  2261. /**
  2262. * \brief Compare and Swap 64bit value using LR and SC
  2263. * \details Compare old value with memory, if identical,
  2264. * store new value in memory. Return the initial value in memory.
  2265. * Success is indicated by comparing return value with OLD.
  2266. * memory address, return 0 if successful, otherwise return !0
  2267. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2268. * \param [in] oldval Old value of the data in address
  2269. * \param [in] newval New value to be stored into the address
  2270. * \return return the initial value in memory
  2271. */
  2272. __STATIC_INLINE uint64_t __CAS_D(volatile uint64_t *addr, uint64_t oldval, uint64_t newval)
  2273. {
  2274. uint64_t result;
  2275. uint64_t rc;
  2276. __ASM volatile ( \
  2277. "0: lr.d %0, %2 \n" \
  2278. " bne %0, %z3, 1f \n" \
  2279. " sc.d %1, %z4, %2 \n" \
  2280. " bnez %1, 0b \n" \
  2281. "1:\n" \
  2282. : "=&r"(result), "=&r"(rc), "+A"(*addr) \
  2283. : "r"(oldval), "r"(newval) \
  2284. : "memory");
  2285. return result;
  2286. }
  2287. /**
  2288. * \brief Atomic Swap 64bit value into memory
  2289. * \details Atomically swap new 64bit value into memory using amoswap.d.
  2290. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2291. * \param [in] newval New value to be stored into the address
  2292. * \return return the original value in memory
  2293. */
  2294. __STATIC_FORCEINLINE uint64_t __AMOSWAP_D(volatile uint64_t *addr, uint64_t newval)
  2295. {
  2296. uint64_t result;
  2297. __ASM volatile ("amoswap.d %0, %2, %1" : \
  2298. "=r"(result), "+A"(*addr) : "r"(newval) : "memory");
  2299. return result;
  2300. }
  2301. /**
  2302. * \brief Atomic Add with 64bit value
  2303. * \details Atomically ADD 64bit value with value in memory using amoadd.d.
  2304. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2305. * \param [in] value value to be ADDed
  2306. * \return return memory value + add value
  2307. */
  2308. __STATIC_FORCEINLINE int64_t __AMOADD_D(volatile int64_t *addr, int64_t value)
  2309. {
  2310. int64_t result;
  2311. __ASM volatile ("amoadd.d %0, %2, %1" : \
  2312. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2313. return *addr;
  2314. }
  2315. /**
  2316. * \brief Atomic And with 64bit value
  2317. * \details Atomically AND 64bit value with value in memory using amoand.d.
  2318. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2319. * \param [in] value value to be ANDed
  2320. * \return return memory value & and value
  2321. */
  2322. __STATIC_FORCEINLINE int64_t __AMOAND_D(volatile int64_t *addr, int64_t value)
  2323. {
  2324. int64_t result;
  2325. __ASM volatile ("amoand.d %0, %2, %1" : \
  2326. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2327. return *addr;
  2328. }
  2329. /**
  2330. * \brief Atomic OR with 64bit value
  2331. * \details Atomically OR 64bit value with value in memory using amoor.d.
  2332. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2333. * \param [in] value value to be ORed
  2334. * \return return memory value | and value
  2335. */
  2336. __STATIC_FORCEINLINE int64_t __AMOOR_D(volatile int64_t *addr, int64_t value)
  2337. {
  2338. int64_t result;
  2339. __ASM volatile ("amoor.d %0, %2, %1" : \
  2340. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2341. return *addr;
  2342. }
  2343. /**
  2344. * \brief Atomic XOR with 64bit value
  2345. * \details Atomically XOR 64bit value with value in memory using amoxor.d.
  2346. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2347. * \param [in] value value to be XORed
  2348. * \return return memory value ^ and value
  2349. */
  2350. __STATIC_FORCEINLINE int64_t __AMOXOR_D(volatile int64_t *addr, int64_t value)
  2351. {
  2352. int64_t result;
  2353. __ASM volatile ("amoxor.d %0, %2, %1" : \
  2354. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2355. return *addr;
  2356. }
  2357. /**
  2358. * \brief Atomic unsigned MAX with 64bit value
  2359. * \details Atomically unsigned max compare 64bit value with value in memory using amomaxu.d.
  2360. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2361. * \param [in] value value to be compared
  2362. * \return return the bigger value
  2363. */
  2364. __STATIC_FORCEINLINE uint64_t __AMOMAXU_D(volatile uint64_t *addr, uint64_t value)
  2365. {
  2366. uint64_t result;
  2367. __ASM volatile ("amomaxu.d %0, %2, %1" : \
  2368. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2369. return *addr;
  2370. }
  2371. /**
  2372. * \brief Atomic signed MAX with 64bit value
  2373. * \details Atomically signed max compare 64bit value with value in memory using amomax.d.
  2374. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2375. * \param [in] value value to be compared
  2376. * \return the bigger value
  2377. */
  2378. __STATIC_FORCEINLINE int64_t __AMOMAX_D(volatile int64_t *addr, int64_t value)
  2379. {
  2380. int64_t result;
  2381. __ASM volatile ("amomax.d %0, %2, %1" : \
  2382. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2383. return *addr;
  2384. }
  2385. /**
  2386. * \brief Atomic unsigned MIN with 64bit value
  2387. * \details Atomically unsigned min compare 64bit value with value in memory using amominu.d.
  2388. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2389. * \param [in] value value to be compared
  2390. * \return the smaller value
  2391. */
  2392. __STATIC_FORCEINLINE uint64_t __AMOMINU_D(volatile uint64_t *addr, uint64_t value)
  2393. {
  2394. uint64_t result;
  2395. __ASM volatile ("amominu.d %0, %2, %1" : \
  2396. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2397. return *addr;
  2398. }
  2399. /**
  2400. * \brief Atomic signed MIN with 64bit value
  2401. * \details Atomically signed min compare 64bit value with value in memory using amomin.d.
  2402. * \param [in] addr Address pointer to data, address need to be 8byte aligned
  2403. * \param [in] value value to be compared
  2404. * \return the smaller value
  2405. */
  2406. __STATIC_FORCEINLINE int64_t __AMOMIN_D(volatile int64_t *addr, int64_t value)
  2407. {
  2408. int64_t result;
  2409. __ASM volatile ("amomin.d %0, %2, %1" : \
  2410. "=r"(result), "+A"(*addr) : "r"(value) : "memory");
  2411. return *addr;
  2412. }
  2413. #endif /* __RISCV_XLEN == 64 */
  2414. /**
  2415. * \brief Enable ICache prefetch
  2416. * \details Set the IC_PF_EN bit in the MCACHE_CTL CSR to enable
  2417. * ICache prefetch.
  2418. */
  2419. __STATIC_FORCEINLINE void __enable_ic_prefetch(void)
  2420. {
  2421. __RV_CSR_SET(CSR_MCACHE_CTL, MCACHE_CTL_IC_PF_EN);
  2422. }
  2423. /**
  2424. * \brief Disable ICache prefetch
  2425. * \details Clear the IC_PF_EN bit in the MCACHE_CTL CSR to disable
  2426. * ICache prefetch.
  2427. */
  2428. __STATIC_FORCEINLINE void __disable_ic_prefetch(void)
  2429. {
  2430. __RV_CSR_CLEAR(CSR_MCACHE_CTL, MCACHE_CTL_IC_PF_EN);
  2431. }
  2432. /**
  2433. * \brief Enable ICache CMO prefetch
  2434. * \details Set the IC_CMO_PF_EN bit in the MCACHE_CTL CSR to enable
  2435. * ICache prefetch.
  2436. */
  2437. __STATIC_FORCEINLINE void __enable_ic_cmo_prefetch(void)
  2438. {
  2439. __RV_CSR_SET(CSR_MCACHE_CTL, MCACHE_CTL_IC_CMO_PF_EN);
  2440. }
  2441. /**
  2442. * \brief Disable ICache CMO prefetch
  2443. * \details Clear the IC_CMO_PF_EN bit in the MCACHE_CTL CSR to disable
  2444. * ICache prefetch.
  2445. */
  2446. __STATIC_FORCEINLINE void __disable_ic_cmo_prefetch(void)
  2447. {
  2448. __RV_CSR_CLEAR(CSR_MCACHE_CTL, MCACHE_CTL_IC_CMO_PF_EN);
  2449. }
  2450. /**
  2451. * \brief Enable DCache CMO prefetch
  2452. * \details Set the DC_CMO_PF_EN bit in the MCACHE_CTL CSR to enable
  2453. * DCache prefetch.
  2454. */
  2455. __STATIC_FORCEINLINE void __enable_dc_cmo_prefetch(void)
  2456. {
  2457. __RV_CSR_SET(CSR_MCACHE_CTL, MCACHE_CTL_DC_CMO_PF_EN);
  2458. }
  2459. /**
  2460. * \brief Disable DCache CMO prefetch
  2461. * \details Clear the DC_CMO_PF_EN bit in the MCACHE_CTL CSR to disable
  2462. * DCache prefetch.
  2463. */
  2464. __STATIC_FORCEINLINE void __disable_dc_cmo_prefetch(void)
  2465. {
  2466. __RV_CSR_CLEAR(CSR_MCACHE_CTL, MCACHE_CTL_DC_CMO_PF_EN);
  2467. }
  2468. /**
  2469. * \brief Instruction prefetch operation
  2470. * \details Performs an instruction prefetch operation for the specified address
  2471. * using the RISC-V prefetch.i instruction.
  2472. * \param[in] addr Address to prefetch
  2473. * \remarks Before calling this function, ensure that the hardware supports CMO prefetch
  2474. * and that the code is compiled with the `_zicbop` extension enabled.
  2475. * Here is a code example to check if CMO prefetch is supported:
  2476. *
  2477. * \code
  2478. * if (IINFO_IsCMOPrefetchSupported()) {
  2479. * __cmo_prefetch_i(func);
  2480. * }
  2481. * \endcode
  2482. *
  2483. * \sa
  2484. * - \ref IINFO_IsCMOPrefetchSupported
  2485. *
  2486. */
  2487. __STATIC_FORCEINLINE void __cmo_prefetch_i(const void *addr)
  2488. {
  2489. __ASM volatile ("prefetch.i 0(%0)" : : "r" (addr) : "memory");
  2490. }
  2491. /**
  2492. * \brief Read prefetch operation
  2493. * \details Performs a read prefetch operation for the specified address
  2494. * using the RISC-V prefetch.r instruction.
  2495. * \param[in] addr Address to prefetch
  2496. * \remarks Before calling this function, ensure that the hardware supports CMO prefetch
  2497. * and that the code is compiled with the `_zicbop` extension enabled.
  2498. * Here is a code example to check if CMO prefetch is supported:
  2499. *
  2500. * \code
  2501. * if (IINFO_IsCMOPrefetchSupported()) {
  2502. * __cmo_prefetch_r(data);
  2503. * }
  2504. * \endcode
  2505. *
  2506. * \sa
  2507. * - \ref IINFO_IsCMOPrefetchSupported
  2508. *
  2509. */
  2510. __STATIC_FORCEINLINE void __cmo_prefetch_r(const void *addr)
  2511. {
  2512. __ASM volatile ("prefetch.r 0(%0)" : : "r" (addr) : "memory");
  2513. }
  2514. /**
  2515. * \brief Write prefetch operation
  2516. * \details Performs a write prefetch operation for the specified address
  2517. * using the RISC-V prefetch.w instruction.
  2518. * \param[in] addr Address to prefetch
  2519. * \remarks Before calling this function, ensure that the hardware supports CMO prefetch
  2520. * and that the code is compiled with the `_zicbop` extension enabled.
  2521. * Here is a code example to check if CMO prefetch is supported:
  2522. *
  2523. * \code
  2524. * if (IINFO_IsCMOPrefetchSupported()) {
  2525. * __cmo_prefetch_w(data);
  2526. * }
  2527. * \endcode
  2528. *
  2529. * \sa
  2530. * - \ref IINFO_IsCMOPrefetchSupported
  2531. *
  2532. */
  2533. __STATIC_FORCEINLINE void __cmo_prefetch_w(const void *addr)
  2534. {
  2535. __ASM volatile ("prefetch.w 0(%0)" : : "r" (addr) : "memory");
  2536. }
  2537. /** @} */ /* End of Doxygen Group NMSIS_Core_CPU_Intrinsic */
  2538. #ifdef __cplusplus
  2539. }
  2540. #endif
  2541. #endif /* __CORE_FEATURE_BASE__ */