core_feature_spmp.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. /*
  2. * Copyright (c) 2019 Nuclei Limited. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the License); you may
  7. * not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  14. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #ifndef __CORE_FEATURE_SPMP_H__
  19. #define __CORE_FEATURE_SPMP_H__
  20. /*!
  21. * @file core_feature_spmp.h
  22. * @brief sPMP(has upgraded to S-mode Memory Protection Unit, renamed as SMPU) feature API header file for Nuclei N/NX Core
  23. */
  24. /*
  25. * sPMP Feature Configuration Macro:
  26. * 1. __SPMP_PRESENT: Define whether sPMP is present or not
  27. * __SMPU_PRESENT: Define whether SMPU is present or not
  28. * * 0: Not present
  29. * * 1: Present
  30. * 2. __SPMP_ENTRY_NUM: Define the number of sPMP entries, only 8 or 16 is configurable
  31. * __SMPU_ENTRY_NUM: Define the number of SMPU entries, only 8 or 16 is configurable
  32. * __SMPU_ENTRY_NUM is the same as __SPMP_ENTRY_NUM
  33. */
  34. #ifdef __cplusplus
  35. extern "C" {
  36. #endif
  37. #include "core_feature_base.h"
  38. #include "core_compatiable.h"
  39. #if defined(__SPMP_PRESENT) && (__SPMP_PRESENT == 1)
  40. /* ===== sPMP Operations ===== */
  41. /**
  42. * \defgroup NMSIS_Core_SPMP sPMP or sMPU Functions
  43. * \ingroup NMSIS_Core
  44. * \brief Functions that related to the RISCV supervisor-mode Phyiscal Memory Protection.
  45. * \details
  46. * Optional superviosr physical memory protection (sPMP) unit provides per-hart supervisor-mode
  47. * control registers to allow physical memory access privileges (read, write, execute)
  48. * to be specified for each physical memory region. The sPMP values are checked after the physical
  49. * address to be accessed pass PMP checks described in the RISC-V privileged spec.
  50. *
  51. * Like PMP, the sPMP can supports region access control settings as small as four bytes.
  52. *
  53. * @{
  54. */
  55. #ifndef __SPMP_ENTRY_NUM
  56. /* Number of __SPMP_ENTRY_NUM entries should be defined in <Device.h> */
  57. #error "__SPMP_ENTRY_NUM is not defined, please check!"
  58. #endif
  59. typedef struct SPMP_CONFIG {
  60. /**
  61. * Set permissions using macros \ref SMPU_S/\ref SMPU_R/\ref SMPU_W/\ref SMPU_X of SMPU;
  62. * \ref SPMP_L/\ref SPMP_U/\ref SPMP_R/\ref SPMP_W/\ref SPMP_X of sPMP,
  63. * see details in riscv spec of SMPU/sPMP
  64. */
  65. unsigned int protection;
  66. /**
  67. * Size of memory region as power of 2, it has to be minimum 2 and maxium \ref __RISCV_XLEN according to the
  68. * hardwired granularity 2^N bytes, if N = 12, then order has to be at least 12; if not, the order read out
  69. * is N though you configure less than N.
  70. */
  71. unsigned long order;
  72. /**
  73. * Base address of memory region
  74. * It must be 2^order aligned address
  75. */
  76. unsigned long base_addr;
  77. } spmp_config;
  78. /**
  79. * \brief Get sPMPCFGx Register by csr index
  80. * \details Return the content of the sPMPCFGx Register.
  81. * \param [in] csr_idx sPMPCFG CSR index(0-3)
  82. * \return sPMPCFGx Register value
  83. * \remark
  84. * - For RV64, only csr_idx = 0 and csr_idx = 2 is allowed.
  85. * spmpcfg0 and spmpcfg2 hold the configurations
  86. * for the 16 sPMP entries, spmpcfg1 and spmpcfg3 are illegal
  87. * - For RV32, spmpcfg0–spmpcfg3, hold the configurations
  88. * spmp0cfg–spmp15cfg for the 16 sPMP entries
  89. */
  90. __STATIC_INLINE rv_csr_t __get_sPMPCFGx(uint32_t csr_idx)
  91. {
  92. switch (csr_idx) {
  93. case 0: return __RV_CSR_READ(CSR_SPMPCFG0);
  94. case 1: return __RV_CSR_READ(CSR_SPMPCFG1);
  95. case 2: return __RV_CSR_READ(CSR_SPMPCFG2);
  96. case 3: return __RV_CSR_READ(CSR_SPMPCFG3);
  97. default: return 0;
  98. }
  99. }
  100. /**
  101. * \brief Set sPMPCFGx by csr index
  102. * \details Write the given value to the sPMPCFGx Register.
  103. * \param [in] csr_idx sPMPCFG CSR index(0-3)
  104. * \param [in] spmpcfg sPMPCFGx Register value to set
  105. * \remark
  106. * - For RV64, only csr_idx = 0 and csr_idx = 2 is allowed.
  107. * spmpcfg0 and spmpcfg2 hold the configurations
  108. * for the 16 sPMP entries, spmpcfg1 and spmpcfg3 are illegal
  109. * - For RV32, spmpcfg0–spmpcfg3, hold the configurations
  110. * spmp0cfg–spmp15cfg for the 16 sPMP entries
  111. */
  112. __STATIC_INLINE void __set_sPMPCFGx(uint32_t csr_idx, rv_csr_t spmpcfg)
  113. {
  114. switch (csr_idx) {
  115. case 0: __RV_CSR_WRITE(CSR_SPMPCFG0, spmpcfg); break;
  116. case 1: __RV_CSR_WRITE(CSR_SPMPCFG1, spmpcfg); break;
  117. case 2: __RV_CSR_WRITE(CSR_SPMPCFG2, spmpcfg); break;
  118. case 3: __RV_CSR_WRITE(CSR_SPMPCFG3, spmpcfg); break;
  119. default: return;
  120. }
  121. }
  122. /**
  123. * \brief Get 8bit sPMPxCFG Register by sPMP entry index
  124. * \details Return the content of the sPMPxCFG Register.
  125. * \param [in] entry_idx sPMP region index(0-15)
  126. * \return sPMPxCFG Register value
  127. */
  128. __STATIC_INLINE uint8_t __get_sPMPxCFG(uint32_t entry_idx)
  129. {
  130. rv_csr_t spmpcfgx = 0;
  131. uint8_t csr_cfg_num = 0;
  132. uint16_t csr_idx = 0;
  133. uint16_t cfg_shift = 0;
  134. if (entry_idx >= __SPMP_ENTRY_NUM) return 0;
  135. #if __RISCV_XLEN == 32
  136. csr_cfg_num = 4;
  137. csr_idx = entry_idx >> 2;
  138. #elif __RISCV_XLEN == 64
  139. csr_cfg_num = 8;
  140. /* For RV64, spmpcfg0 and spmpcfg2 each hold 8 sPMP entries, align by 2 */
  141. csr_idx = (entry_idx >> 2) & ~1;
  142. #else
  143. // TODO Add RV128 Handling
  144. return 0;
  145. #endif
  146. spmpcfgx = __get_sPMPCFGx(csr_idx);
  147. /*
  148. * first get specific spmpxcfg's order in one CSR composed of csr_cfg_num spmpxcfgs,
  149. * then get spmpxcfg's bit position in one CSR by left shift 3(each spmpxcfg size is one byte)
  150. */
  151. cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
  152. /* read specific spmpxcfg register value */
  153. return (uint8_t)(__RV_EXTRACT_FIELD(spmpcfgx, 0xFF << cfg_shift));
  154. }
  155. /**
  156. * \brief Set 8bit sPMPxCFG by spmp entry index
  157. * \details Set the given spmpxcfg value to the sPMPxCFG Register.
  158. * \param [in] entry_idx sPMPx region index(0-15)
  159. * \param [in] spmpxcfg sPMPxCFG register value to set
  160. * \remark
  161. * - For RV32, 4 spmpxcfgs are densely packed into one CSR in order
  162. * For RV64, 8 spmpxcfgs are densely packed into one CSR in order
  163. */
  164. __STATIC_INLINE void __set_sPMPxCFG(uint32_t entry_idx, uint8_t spmpxcfg)
  165. {
  166. rv_csr_t spmpcfgx = 0;
  167. uint8_t csr_cfg_num = 0;
  168. uint16_t csr_idx = 0;
  169. uint16_t cfg_shift = 0;
  170. if (entry_idx >= __SPMP_ENTRY_NUM) return;
  171. #if __RISCV_XLEN == 32
  172. csr_cfg_num = 4;
  173. csr_idx = entry_idx >> 2;
  174. #elif __RISCV_XLEN == 64
  175. csr_cfg_num = 8;
  176. /* For RV64, spmpcfg0 and spmpcfg2 each hold 8 sPMP entries, align by 2 */
  177. csr_idx = (entry_idx >> 2) & ~1;
  178. #else
  179. // TODO Add RV128 Handling
  180. return;
  181. #endif
  182. /* read specific spmpcfgx register value */
  183. spmpcfgx = __get_sPMPCFGx(csr_idx);
  184. /*
  185. * first get specific spmpxcfg's order in one CSR composed of csr_cfg_num spmpxcfgs,
  186. * then get spmpxcfg's bit position in one CSR by left shift 3(each spmpxcfg size is one byte)
  187. */
  188. cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
  189. spmpcfgx = __RV_INSERT_FIELD(spmpcfgx, 0xFFUL << cfg_shift, spmpxcfg);
  190. __set_sPMPCFGx(csr_idx, spmpcfgx);
  191. }
  192. /**
  193. * \brief Get sPMPADDRx Register by CSR index
  194. * \details Return the content of the sPMPADDRx Register.
  195. * \param [in] csr_idx sPMP region CSR index(0-15)
  196. * \return sPMPADDRx Register value
  197. */
  198. __STATIC_INLINE rv_csr_t __get_sPMPADDRx(uint32_t csr_idx)
  199. {
  200. switch (csr_idx) {
  201. case 0: return __RV_CSR_READ(CSR_SPMPADDR0);
  202. case 1: return __RV_CSR_READ(CSR_SPMPADDR1);
  203. case 2: return __RV_CSR_READ(CSR_SPMPADDR2);
  204. case 3: return __RV_CSR_READ(CSR_SPMPADDR3);
  205. case 4: return __RV_CSR_READ(CSR_SPMPADDR4);
  206. case 5: return __RV_CSR_READ(CSR_SPMPADDR5);
  207. case 6: return __RV_CSR_READ(CSR_SPMPADDR6);
  208. case 7: return __RV_CSR_READ(CSR_SPMPADDR7);
  209. case 8: return __RV_CSR_READ(CSR_SPMPADDR8);
  210. case 9: return __RV_CSR_READ(CSR_SPMPADDR9);
  211. case 10: return __RV_CSR_READ(CSR_SPMPADDR10);
  212. case 11: return __RV_CSR_READ(CSR_SPMPADDR11);
  213. case 12: return __RV_CSR_READ(CSR_SPMPADDR12);
  214. case 13: return __RV_CSR_READ(CSR_SPMPADDR13);
  215. case 14: return __RV_CSR_READ(CSR_SPMPADDR14);
  216. case 15: return __RV_CSR_READ(CSR_SPMPADDR15);
  217. default: return 0;
  218. }
  219. }
  220. /**
  221. * \brief Set sPMPADDRx by CSR index
  222. * \details Write the given value to the sPMPADDRx Register.
  223. * \param [in] csr_idx sPMP region CSR index(0-15)
  224. * \param [in] spmpaddr sPMPADDRx Register value to set
  225. */
  226. __STATIC_INLINE void __set_sPMPADDRx(uint32_t csr_idx, rv_csr_t spmpaddr)
  227. {
  228. switch (csr_idx) {
  229. case 0: __RV_CSR_WRITE(CSR_SPMPADDR0, spmpaddr); break;
  230. case 1: __RV_CSR_WRITE(CSR_SPMPADDR1, spmpaddr); break;
  231. case 2: __RV_CSR_WRITE(CSR_SPMPADDR2, spmpaddr); break;
  232. case 3: __RV_CSR_WRITE(CSR_SPMPADDR3, spmpaddr); break;
  233. case 4: __RV_CSR_WRITE(CSR_SPMPADDR4, spmpaddr); break;
  234. case 5: __RV_CSR_WRITE(CSR_SPMPADDR5, spmpaddr); break;
  235. case 6: __RV_CSR_WRITE(CSR_SPMPADDR6, spmpaddr); break;
  236. case 7: __RV_CSR_WRITE(CSR_SPMPADDR7, spmpaddr); break;
  237. case 8: __RV_CSR_WRITE(CSR_SPMPADDR8, spmpaddr); break;
  238. case 9: __RV_CSR_WRITE(CSR_SPMPADDR9, spmpaddr); break;
  239. case 10: __RV_CSR_WRITE(CSR_SPMPADDR10, spmpaddr); break;
  240. case 11: __RV_CSR_WRITE(CSR_SPMPADDR11, spmpaddr); break;
  241. case 12: __RV_CSR_WRITE(CSR_SPMPADDR12, spmpaddr); break;
  242. case 13: __RV_CSR_WRITE(CSR_SPMPADDR13, spmpaddr); break;
  243. case 14: __RV_CSR_WRITE(CSR_SPMPADDR14, spmpaddr); break;
  244. case 15: __RV_CSR_WRITE(CSR_SPMPADDR15, spmpaddr); break;
  245. default: return;
  246. }
  247. }
  248. /**
  249. * \brief Set sPMP entry by entry idx
  250. * \details Write the given value to the sPMPxCFG Register and sPMPADDRx.
  251. * \param [in] entry_idx sPMP entry index(0-15)
  252. * \param [in] spmp_cfg structure of L,U,X,W,R field of sPMP configuration register, memory region base address
  253. * and size of memory region as power of 2
  254. * \remark
  255. * - If the size of memory region is 2^12(4KB) range, spmp_cfg->order makes 12, and the like.
  256. * - Suppose the size of memory region is 2^X bytes range, if X >=3, the NA4 mode is not selectable, NAPOT is selected.
  257. * - TOR of A field in sPMP configuration register is not considered here.
  258. */
  259. __STATIC_INLINE void __set_sPMPENTRYx(uint32_t entry_idx, const spmp_config *spmp_cfg)
  260. {
  261. unsigned int cfg_shift, cfg_csr_idx, addr_csr_idx = 0;
  262. unsigned long cfgmask, addrmask = 0;
  263. unsigned long spmpcfg, spmpaddr = 0;
  264. unsigned long protection, csr_cfg_num = 0;
  265. /* check parameters */
  266. if (entry_idx >= __SPMP_ENTRY_NUM || spmp_cfg->order > __RISCV_XLEN || spmp_cfg->order < SPMP_SHIFT) return;
  267. /* calculate sPMP register and offset */
  268. #if __RISCV_XLEN == 32
  269. csr_cfg_num = 4;
  270. cfg_csr_idx = (entry_idx >> 2);
  271. #elif __RISCV_XLEN == 64
  272. csr_cfg_num = 8;
  273. cfg_csr_idx = ((entry_idx >> 2)) & ~1;
  274. #else
  275. // TODO Add RV128 Handling
  276. return;
  277. #endif
  278. /*
  279. * first get specific spmpxcfg's order in one CSR composed of csr_cfg_num spmpxcfgs,
  280. * then get spmpxcfg's bit position in one CSR by left shift 3, each spmpxcfg size is one byte
  281. */
  282. cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
  283. addr_csr_idx = entry_idx;
  284. /* encode sPMP config */
  285. protection = (unsigned long)spmp_cfg->protection;
  286. protection |= (SPMP_SHIFT == spmp_cfg->order) ? SPMP_A_NA4 : SPMP_A_NAPOT;
  287. cfgmask = ~(0xFFUL << cfg_shift);
  288. spmpcfg = (__get_sPMPCFGx(cfg_csr_idx) & cfgmask);
  289. spmpcfg |= ((protection << cfg_shift) & ~cfgmask);
  290. /* encode sPMP address */
  291. if (SPMP_SHIFT == spmp_cfg->order) { /* NA4 */
  292. spmpaddr = (spmp_cfg->base_addr >> SPMP_SHIFT);
  293. } else { /* NAPOT */
  294. addrmask = (1UL << (spmp_cfg->order - SPMP_SHIFT)) - 1;
  295. spmpaddr = ((spmp_cfg->base_addr >> SPMP_SHIFT) & ~addrmask);
  296. spmpaddr |= (addrmask >> 1);
  297. }
  298. /*
  299. * write csrs, update the address first, in case the entry is locked that
  300. * we won't be able to modify it after we set the config csr.
  301. */
  302. __set_sPMPADDRx(addr_csr_idx, spmpaddr);
  303. __set_sPMPCFGx(cfg_csr_idx, spmpcfg);
  304. }
  305. /**
  306. * \brief Get sPMP entry by entry idx
  307. * \details Write the given value to the sPMPxCFG Register and sPMPADDRx.
  308. * \param [in] entry_idx sPMP entry index(0-15)
  309. * \param [out] spmp_cfg structure of L, U, X, W, R, A field of sPMP configuration register, memory region base
  310. * address and size of memory region as power of 2
  311. * \return -1 failure, else 0 success
  312. * \remark
  313. * - If the size of memory region is 2^12(4KB) range, spmp_cfg->order makes 12, and the like.
  314. * - TOR of A field in PMP configuration register is not considered here.
  315. */
  316. __STATIC_INLINE int __get_sPMPENTRYx(unsigned int entry_idx, spmp_config *spmp_cfg)
  317. {
  318. unsigned int cfg_shift, cfg_csr_idx, addr_csr_idx = 0;
  319. unsigned long cfgmask, spmpcfg, prot = 0;
  320. unsigned long t1, addr, spmpaddr, len = 0;
  321. uint8_t csr_cfg_num = 0;
  322. /* check parameters */
  323. if (entry_idx >= __SPMP_ENTRY_NUM || !spmp_cfg) return -1;
  324. /* calculate sPMP register and offset */
  325. #if __RISCV_XLEN == 32
  326. csr_cfg_num = 4;
  327. cfg_csr_idx = entry_idx >> 2;
  328. #elif __RISCV_XLEN == 64
  329. csr_cfg_num = 8;
  330. cfg_csr_idx = (entry_idx >> 2) & ~1;
  331. #else
  332. // TODO Add RV128 Handling
  333. return -1;
  334. #endif
  335. cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
  336. addr_csr_idx = entry_idx;
  337. /* decode sPMP config */
  338. cfgmask = (0xFFUL << cfg_shift);
  339. spmpcfg = (__get_sPMPCFGx(cfg_csr_idx) & cfgmask);
  340. prot = spmpcfg >> cfg_shift;
  341. /* decode sPMP address */
  342. spmpaddr = __get_sPMPADDRx(addr_csr_idx);
  343. if (SPMP_A_NAPOT == (prot & SPMP_A)) {
  344. t1 = __CTZ(~spmpaddr);
  345. addr = (spmpaddr & ~((1UL << t1) - 1)) << SPMP_SHIFT;
  346. len = (t1 + SPMP_SHIFT + 1);
  347. } else {
  348. addr = spmpaddr << SPMP_SHIFT;
  349. len = SPMP_SHIFT;
  350. }
  351. /* return details */
  352. spmp_cfg->protection = prot;
  353. spmp_cfg->base_addr = addr;
  354. spmp_cfg->order = len;
  355. return 0;
  356. }
  357. #if defined(__SMPU_PRESENT) && (__SMPU_PRESENT == 1)
  358. /**
  359. * sPMP has upgraded to S-mode Memory Protection Unit, renamed as SMPU, but still share the apis with sPMP's
  360. */
  361. typedef spmp_config smpu_config;
  362. #define __get_SMPUCFGx __get_sPMPCFGx
  363. #define __set_SMPUCFGx __set_sPMPCFGx
  364. #define __get_SMPUxCFG __get_sPMPxCFG
  365. #define __set_SMPUxCFG __set_sPMPxCFG
  366. #define __get_SMPUADDRx __get_sPMPADDRx
  367. #define __set_SMPUADDRx __set_sPMPADDRx
  368. #define __set_SMPUENTRYx __set_sPMPENTRYx
  369. #define __get_SMPUENTRYx __get_sPMPENTRYx
  370. /**
  371. * \brief Set SMPU each entry's on/off status
  372. * \details Write the given value to the SMPUSWITCHx Register.
  373. * \param [in] val activate each entry(max to 64) or not
  374. * \remark
  375. * - Each bit of this register holds on/off status of the corresponding SMPU entry respectively.
  376. * - An SMPU entry is activated only when both corresponding bits in smpuswitch and
  377. * A field of smpuicfg are set. (i.e., smpuswitch[i] & smpu[i]cfg.A).
  378. */
  379. __STATIC_INLINE void __set_SMPUSWITCHx(uint64_t val)
  380. {
  381. #if __RISCV_XLEN == 32
  382. __RV_CSR_WRITE(CSR_SMPUSWITCH0, (uint32_t)val);
  383. __RV_CSR_WRITE(CSR_SMPUSWITCH1, (uint32_t)(val >> 32));
  384. #elif __RISCV_XLEN == 64
  385. __RV_CSR_WRITE(CSR_SMPUSWITCH0, val);
  386. #else
  387. // TODO Add RV128 Handling
  388. #endif
  389. }
  390. /**
  391. * \brief Get SMPU each entry's on/off status
  392. * \details Get the value of the SMPUSWITCHx Register.
  393. * \remark
  394. * - Each bit of this register holds on/off status of the corresponding SMPU entry respectively.
  395. * - An SMPU entry is activated only when both corresponding bits in smpuswitch and
  396. * A field of smpuicfg are set. (i.e., smpuswitch[i] & smpu[i]cfg.A).
  397. */
  398. __STATIC_INLINE uint64_t __get_SMPUSWITCHx(void)
  399. {
  400. #if __RISCV_XLEN == 32
  401. uint32_t lo, hi = 0;
  402. lo = __RV_CSR_READ(CSR_SMPUSWITCH0);
  403. hi = __RV_CSR_READ(CSR_SMPUSWITCH1);
  404. return (uint64_t)((((uint64_t)hi) << 32) | lo);
  405. #elif __RISCV_XLEN == 64
  406. return (uint64_t)__RV_CSR_READ(CSR_SMPUSWITCH0);
  407. #else
  408. // TODO Add RV128 Handling
  409. #endif
  410. }
  411. #endif
  412. /** @} */ /* End of Doxygen Group NMSIS_Core_SPMP_Functions */
  413. #endif /* defined(__SPMP_PRESENT) && (__SPMP_PRESENT == 1) */
  414. #ifdef __cplusplus
  415. }
  416. #endif
  417. #endif /* __CORE_FEATURE_SPMP_H__ */