core_feature_spmp.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. /*
  2. * Copyright (c) 2019 Nuclei Limited. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Licensed under the Apache License, Version 2.0 (the License); you may
  7. * not use this file except in compliance with the License.
  8. * You may obtain a copy of the License at
  9. *
  10. * www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an AS IS BASIS, WITHOUT
  14. * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #ifndef __CORE_FEATURE_SPMP_H__
  19. #define __CORE_FEATURE_SPMP_H__
  20. /*!
  21. * @file core_feature_spmp.h
  22. * @brief sPMP(has upgraded to S-mode Memory Protection Unit, renamed as SMPU) feature API header file for Nuclei N/NX Core
  23. */
  24. /*
  25. * sPMP Feature Configuration Macro:
  26. * 1. __SPMP_PRESENT: Define whether sPMP is present or not
  27. * __SMPU_PRESENT: Define whether SMPU is present or not
  28. * * 0: Not present
  29. * * 1: Present
  30. * 2. __SPMP_ENTRY_NUM: Define the number of sPMP entries, only 8 or 16 is configurable
  31. * __SMPU_ENTRY_NUM: Define the number of SMPU entries, only 8 or 16 is configurable
  32. * __SMPU_ENTRY_NUM is the same as __SPMP_ENTRY_NUM
  33. */
  34. #ifdef __cplusplus
  35. extern "C" {
  36. #endif
  37. #include "core_feature_base.h"
  38. #include "core_compatiable.h"
  39. #if defined(__SPMP_PRESENT) && (__SPMP_PRESENT == 1)
  40. /* ===== sPMP Operations ===== */
  41. /**
  42. * \defgroup NMSIS_Core_SPMP sPMP or sMPU Functions
  43. * \ingroup NMSIS_Core
  44. * \brief Functions that related to the RISCV supervisor-mode Phyiscal Memory Protection.
  45. * \details
  46. * Optional superviosr physical memory protection (sPMP) unit provides per-hart supervisor-mode
  47. * control registers to allow physical memory access privileges (read, write, execute)
  48. * to be specified for each physical memory region. The sPMP values are checked after the physical
  49. * address to be accessed pass PMP checks described in the RISC-V privileged spec.
  50. *
  51. * Like PMP, the sPMP can supports region access control settings as small as four bytes.
  52. *
  53. * @{
  54. */
  55. #ifndef __SPMP_ENTRY_NUM
  56. /* Number of __SPMP_ENTRY_NUM entries should be defined in <Device.h> */
  57. #error "__SPMP_ENTRY_NUM is not defined, please check!"
  58. #endif
  59. typedef struct SPMP_CONFIG {
  60. /**
  61. * Set permissions using macros \ref SMPU_S/\ref SMPU_R/\ref SMPU_W/\ref SMPU_X of SMPU;
  62. * \ref SPMP_L/\ref SPMP_U/\ref SPMP_R/\ref SPMP_W/\ref SPMP_X of sPMP,
  63. * see details in riscv spec of SMPU/sPMP
  64. */
  65. unsigned int protection;
  66. /**
  67. * Size of memory region as power of 2, it has to be minimum 2 and maxium \ref __RISCV_XLEN according to the
  68. * hardwired granularity 2^N bytes, if N = 12, then order has to be at least 12; if not, the order read out
  69. * is N though you configure less than N.
  70. */
  71. unsigned long order;
  72. /**
  73. * Base address of memory region
  74. * It must be 2^order aligned address
  75. */
  76. unsigned long base_addr;
  77. } spmp_config;
  78. /**
  79. * \brief Get sPMPCFGx Register by csr index
  80. * \details Return the content of the sPMPCFGx Register.
  81. * \param [in] csr_idx sPMPCFG CSR index(0-3)
  82. * \return sPMPCFGx Register value
  83. * \remark
  84. * - For RV64, only csr_idx = 0 and csr_idx = 2 is allowed.
  85. * spmpcfg0 and spmpcfg2 hold the configurations
  86. * for the 16 sPMP entries, spmpcfg1 and spmpcfg3 are illegal
  87. * - For RV32, spmpcfg0–spmpcfg3, hold the configurations
  88. * spmp0cfg–spmp15cfg for the 16 sPMP entries
  89. */
  90. __STATIC_INLINE rv_csr_t __get_sPMPCFGx(uint32_t csr_idx)
  91. {
  92. switch (csr_idx) {
  93. case 0: return __RV_CSR_READ(CSR_SPMPCFG0);
  94. case 1: return __RV_CSR_READ(CSR_SPMPCFG1);
  95. #if __SPMP_ENTRY_NUM > 8
  96. case 2: return __RV_CSR_READ(CSR_SPMPCFG2);
  97. case 3: return __RV_CSR_READ(CSR_SPMPCFG3);
  98. #endif
  99. default: return 0;
  100. }
  101. }
  102. /**
  103. * \brief Set sPMPCFGx by csr index
  104. * \details Write the given value to the sPMPCFGx Register.
  105. * \param [in] csr_idx sPMPCFG CSR index(0-3)
  106. * \param [in] spmpcfg sPMPCFGx Register value to set
  107. * \remark
  108. * - For RV64, only csr_idx = 0 and csr_idx = 2 is allowed.
  109. * spmpcfg0 and spmpcfg2 hold the configurations
  110. * for the 16 sPMP entries, spmpcfg1 and spmpcfg3 are illegal
  111. * - For RV32, spmpcfg0–spmpcfg3, hold the configurations
  112. * spmp0cfg–spmp15cfg for the 16 sPMP entries
  113. */
  114. __STATIC_INLINE void __set_sPMPCFGx(uint32_t csr_idx, rv_csr_t spmpcfg)
  115. {
  116. switch (csr_idx) {
  117. case 0: __RV_CSR_WRITE(CSR_SPMPCFG0, spmpcfg); break;
  118. case 1: __RV_CSR_WRITE(CSR_SPMPCFG1, spmpcfg); break;
  119. #if __SPMP_ENTRY_NUM > 8
  120. case 2: __RV_CSR_WRITE(CSR_SPMPCFG2, spmpcfg); break;
  121. case 3: __RV_CSR_WRITE(CSR_SPMPCFG3, spmpcfg); break;
  122. #endif
  123. default: return;
  124. }
  125. }
  126. /**
  127. * \brief Get 8bit sPMPxCFG Register by sPMP entry index
  128. * \details Return the content of the sPMPxCFG Register.
  129. * \param [in] entry_idx sPMP region index(0-15)
  130. * \return sPMPxCFG Register value
  131. */
  132. __STATIC_INLINE uint8_t __get_sPMPxCFG(uint32_t entry_idx)
  133. {
  134. rv_csr_t spmpcfgx = 0;
  135. uint8_t csr_cfg_num = 0;
  136. uint16_t csr_idx = 0;
  137. uint16_t cfg_shift = 0;
  138. if (entry_idx >= __SPMP_ENTRY_NUM) return 0;
  139. #if __RISCV_XLEN == 32
  140. csr_cfg_num = 4;
  141. csr_idx = entry_idx >> 2;
  142. #elif __RISCV_XLEN == 64
  143. csr_cfg_num = 8;
  144. /* For RV64, spmpcfg0 and spmpcfg2 each hold 8 sPMP entries, align by 2 */
  145. csr_idx = (entry_idx >> 2) & ~1;
  146. #else
  147. // TODO Add RV128 Handling
  148. return 0;
  149. #endif
  150. spmpcfgx = __get_sPMPCFGx(csr_idx);
  151. /*
  152. * first get specific spmpxcfg's order in one CSR composed of csr_cfg_num spmpxcfgs,
  153. * then get spmpxcfg's bit position in one CSR by left shift 3(each spmpxcfg size is one byte)
  154. */
  155. cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
  156. /* read specific spmpxcfg register value */
  157. return (uint8_t)(__RV_EXTRACT_FIELD(spmpcfgx, 0xFF << cfg_shift));
  158. }
  159. /**
  160. * \brief Set 8bit sPMPxCFG by spmp entry index
  161. * \details Set the given spmpxcfg value to the sPMPxCFG Register.
  162. * \param [in] entry_idx sPMPx region index(0-15)
  163. * \param [in] spmpxcfg sPMPxCFG register value to set
  164. * \remark
  165. * - For RV32, 4 spmpxcfgs are densely packed into one CSR in order
  166. * For RV64, 8 spmpxcfgs are densely packed into one CSR in order
  167. */
  168. __STATIC_INLINE void __set_sPMPxCFG(uint32_t entry_idx, uint8_t spmpxcfg)
  169. {
  170. rv_csr_t spmpcfgx = 0;
  171. uint8_t csr_cfg_num = 0;
  172. uint16_t csr_idx = 0;
  173. uint16_t cfg_shift = 0;
  174. if (entry_idx >= __SPMP_ENTRY_NUM) return;
  175. #if __RISCV_XLEN == 32
  176. csr_cfg_num = 4;
  177. csr_idx = entry_idx >> 2;
  178. #elif __RISCV_XLEN == 64
  179. csr_cfg_num = 8;
  180. /* For RV64, spmpcfg0 and spmpcfg2 each hold 8 sPMP entries, align by 2 */
  181. csr_idx = (entry_idx >> 2) & ~1;
  182. #else
  183. // TODO Add RV128 Handling
  184. return;
  185. #endif
  186. /* read specific spmpcfgx register value */
  187. spmpcfgx = __get_sPMPCFGx(csr_idx);
  188. /*
  189. * first get specific spmpxcfg's order in one CSR composed of csr_cfg_num spmpxcfgs,
  190. * then get spmpxcfg's bit position in one CSR by left shift 3(each spmpxcfg size is one byte)
  191. */
  192. cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
  193. spmpcfgx = __RV_INSERT_FIELD(spmpcfgx, 0xFFUL << cfg_shift, spmpxcfg);
  194. __set_sPMPCFGx(csr_idx, spmpcfgx);
  195. }
  196. /**
  197. * \brief Get sPMPADDRx Register by CSR index
  198. * \details Return the content of the sPMPADDRx Register.
  199. * \param [in] csr_idx sPMP region CSR index(0-15)
  200. * \return sPMPADDRx Register value
  201. */
  202. __STATIC_INLINE rv_csr_t __get_sPMPADDRx(uint32_t csr_idx)
  203. {
  204. switch (csr_idx) {
  205. case 0: return __RV_CSR_READ(CSR_SPMPADDR0);
  206. case 1: return __RV_CSR_READ(CSR_SPMPADDR1);
  207. case 2: return __RV_CSR_READ(CSR_SPMPADDR2);
  208. case 3: return __RV_CSR_READ(CSR_SPMPADDR3);
  209. case 4: return __RV_CSR_READ(CSR_SPMPADDR4);
  210. case 5: return __RV_CSR_READ(CSR_SPMPADDR5);
  211. case 6: return __RV_CSR_READ(CSR_SPMPADDR6);
  212. case 7: return __RV_CSR_READ(CSR_SPMPADDR7);
  213. #if __SPMP_ENTRY_NUM > 8
  214. case 8: return __RV_CSR_READ(CSR_SPMPADDR8);
  215. case 9: return __RV_CSR_READ(CSR_SPMPADDR9);
  216. case 10: return __RV_CSR_READ(CSR_SPMPADDR10);
  217. case 11: return __RV_CSR_READ(CSR_SPMPADDR11);
  218. case 12: return __RV_CSR_READ(CSR_SPMPADDR12);
  219. case 13: return __RV_CSR_READ(CSR_SPMPADDR13);
  220. case 14: return __RV_CSR_READ(CSR_SPMPADDR14);
  221. case 15: return __RV_CSR_READ(CSR_SPMPADDR15);
  222. #endif
  223. default: return 0;
  224. }
  225. }
  226. /**
  227. * \brief Set sPMPADDRx by CSR index
  228. * \details Write the given value to the sPMPADDRx Register.
  229. * \param [in] csr_idx sPMP region CSR index(0-15)
  230. * \param [in] spmpaddr sPMPADDRx Register value to set
  231. */
  232. __STATIC_INLINE void __set_sPMPADDRx(uint32_t csr_idx, rv_csr_t spmpaddr)
  233. {
  234. switch (csr_idx) {
  235. case 0: __RV_CSR_WRITE(CSR_SPMPADDR0, spmpaddr); break;
  236. case 1: __RV_CSR_WRITE(CSR_SPMPADDR1, spmpaddr); break;
  237. case 2: __RV_CSR_WRITE(CSR_SPMPADDR2, spmpaddr); break;
  238. case 3: __RV_CSR_WRITE(CSR_SPMPADDR3, spmpaddr); break;
  239. case 4: __RV_CSR_WRITE(CSR_SPMPADDR4, spmpaddr); break;
  240. case 5: __RV_CSR_WRITE(CSR_SPMPADDR5, spmpaddr); break;
  241. case 6: __RV_CSR_WRITE(CSR_SPMPADDR6, spmpaddr); break;
  242. case 7: __RV_CSR_WRITE(CSR_SPMPADDR7, spmpaddr); break;
  243. #if __SPMP_ENTRY_NUM > 8
  244. case 8: __RV_CSR_WRITE(CSR_SPMPADDR8, spmpaddr); break;
  245. case 9: __RV_CSR_WRITE(CSR_SPMPADDR9, spmpaddr); break;
  246. case 10: __RV_CSR_WRITE(CSR_SPMPADDR10, spmpaddr); break;
  247. case 11: __RV_CSR_WRITE(CSR_SPMPADDR11, spmpaddr); break;
  248. case 12: __RV_CSR_WRITE(CSR_SPMPADDR12, spmpaddr); break;
  249. case 13: __RV_CSR_WRITE(CSR_SPMPADDR13, spmpaddr); break;
  250. case 14: __RV_CSR_WRITE(CSR_SPMPADDR14, spmpaddr); break;
  251. case 15: __RV_CSR_WRITE(CSR_SPMPADDR15, spmpaddr); break;
  252. #endif
  253. default: return;
  254. }
  255. }
  256. /**
  257. * \brief Set sPMP entry by entry idx
  258. * \details Write the given value to the sPMPxCFG Register and sPMPADDRx.
  259. * \param [in] entry_idx sPMP entry index(0-15)
  260. * \param [in] spmp_cfg structure of L,U,X,W,R field of sPMP configuration register, memory region base address
  261. * and size of memory region as power of 2
  262. * \remark
  263. * - If the size of memory region is 2^12(4KB) range, spmp_cfg->order makes 12, and the like.
  264. * - Suppose the size of memory region is 2^X bytes range, if X >=3, the NA4 mode is not selectable, NAPOT is selected.
  265. * - TOR of A field in sPMP configuration register is not considered here.
  266. */
  267. __STATIC_INLINE void __set_sPMPENTRYx(uint32_t entry_idx, const spmp_config *spmp_cfg)
  268. {
  269. unsigned int cfg_shift, cfg_csr_idx, addr_csr_idx = 0;
  270. unsigned long cfgmask, addrmask = 0;
  271. unsigned long spmpcfg, spmpaddr = 0;
  272. unsigned long protection, csr_cfg_num = 0;
  273. /* check parameters */
  274. if (entry_idx >= __SPMP_ENTRY_NUM || spmp_cfg->order > __RISCV_XLEN || spmp_cfg->order < SPMP_SHIFT) return;
  275. /* calculate sPMP register and offset */
  276. #if __RISCV_XLEN == 32
  277. csr_cfg_num = 4;
  278. cfg_csr_idx = (entry_idx >> 2);
  279. #elif __RISCV_XLEN == 64
  280. csr_cfg_num = 8;
  281. cfg_csr_idx = ((entry_idx >> 2)) & ~1;
  282. #else
  283. // TODO Add RV128 Handling
  284. return;
  285. #endif
  286. /*
  287. * first get specific spmpxcfg's order in one CSR composed of csr_cfg_num spmpxcfgs,
  288. * then get spmpxcfg's bit position in one CSR by left shift 3, each spmpxcfg size is one byte
  289. */
  290. cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
  291. addr_csr_idx = entry_idx;
  292. /* encode sPMP config */
  293. protection = (unsigned long)spmp_cfg->protection;
  294. protection |= (SPMP_SHIFT == spmp_cfg->order) ? SPMP_A_NA4 : SPMP_A_NAPOT;
  295. cfgmask = ~(0xFFUL << cfg_shift);
  296. spmpcfg = (__get_sPMPCFGx(cfg_csr_idx) & cfgmask);
  297. spmpcfg |= ((protection << cfg_shift) & ~cfgmask);
  298. /* encode sPMP address */
  299. if (SPMP_SHIFT == spmp_cfg->order) { /* NA4 */
  300. spmpaddr = (spmp_cfg->base_addr >> SPMP_SHIFT);
  301. } else { /* NAPOT */
  302. addrmask = (1UL << (spmp_cfg->order - SPMP_SHIFT)) - 1;
  303. spmpaddr = ((spmp_cfg->base_addr >> SPMP_SHIFT) & ~addrmask);
  304. spmpaddr |= (addrmask >> 1);
  305. }
  306. /*
  307. * write csrs, update the address first, in case the entry is locked that
  308. * we won't be able to modify it after we set the config csr.
  309. */
  310. __set_sPMPADDRx(addr_csr_idx, spmpaddr);
  311. __set_sPMPCFGx(cfg_csr_idx, spmpcfg);
  312. }
  313. /**
  314. * \brief Get sPMP entry by entry idx
  315. * \details Write the given value to the sPMPxCFG Register and sPMPADDRx.
  316. * \param [in] entry_idx sPMP entry index(0-15)
  317. * \param [out] spmp_cfg structure of L, U, X, W, R, A field of sPMP configuration register, memory region base
  318. * address and size of memory region as power of 2
  319. * \return -1 failure, else 0 success
  320. * \remark
  321. * - If the size of memory region is 2^12(4KB) range, spmp_cfg->order makes 12, and the like.
  322. * - TOR of A field in PMP configuration register is not considered here.
  323. */
  324. __STATIC_INLINE int __get_sPMPENTRYx(unsigned int entry_idx, spmp_config *spmp_cfg)
  325. {
  326. unsigned int cfg_shift, cfg_csr_idx, addr_csr_idx = 0;
  327. unsigned long cfgmask, spmpcfg, prot = 0;
  328. unsigned long t1, addr, spmpaddr, len = 0;
  329. uint8_t csr_cfg_num = 0;
  330. /* check parameters */
  331. if (entry_idx >= __SPMP_ENTRY_NUM || !spmp_cfg) return -1;
  332. /* calculate sPMP register and offset */
  333. #if __RISCV_XLEN == 32
  334. csr_cfg_num = 4;
  335. cfg_csr_idx = entry_idx >> 2;
  336. #elif __RISCV_XLEN == 64
  337. csr_cfg_num = 8;
  338. cfg_csr_idx = (entry_idx >> 2) & ~1;
  339. #else
  340. // TODO Add RV128 Handling
  341. return -1;
  342. #endif
  343. cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
  344. addr_csr_idx = entry_idx;
  345. /* decode sPMP config */
  346. cfgmask = (0xFFUL << cfg_shift);
  347. spmpcfg = (__get_sPMPCFGx(cfg_csr_idx) & cfgmask);
  348. prot = spmpcfg >> cfg_shift;
  349. /* decode sPMP address */
  350. spmpaddr = __get_sPMPADDRx(addr_csr_idx);
  351. if (SPMP_A_NAPOT == (prot & SPMP_A)) {
  352. t1 = __CTZ(~spmpaddr);
  353. addr = (spmpaddr & ~((1UL << t1) - 1)) << SPMP_SHIFT;
  354. len = (t1 + SPMP_SHIFT + 1);
  355. } else {
  356. addr = spmpaddr << SPMP_SHIFT;
  357. len = SPMP_SHIFT;
  358. }
  359. /* return details */
  360. spmp_cfg->protection = prot;
  361. spmp_cfg->base_addr = addr;
  362. spmp_cfg->order = len;
  363. return 0;
  364. }
  365. #if defined(__SMPU_PRESENT) && (__SMPU_PRESENT == 1)
  366. /**
  367. * sPMP has upgraded to S-mode Memory Protection Unit, renamed as SMPU, but still share the apis with sPMP's
  368. */
  369. typedef spmp_config smpu_config;
  370. #define __get_SMPUCFGx __get_sPMPCFGx
  371. #define __set_SMPUCFGx __set_sPMPCFGx
  372. #define __get_SMPUxCFG __get_sPMPxCFG
  373. #define __set_SMPUxCFG __set_sPMPxCFG
  374. #define __get_SMPUADDRx __get_sPMPADDRx
  375. #define __set_SMPUADDRx __set_sPMPADDRx
  376. #define __set_SMPUENTRYx __set_sPMPENTRYx
  377. #define __get_SMPUENTRYx __get_sPMPENTRYx
  378. /**
  379. * \brief Set SMPU each entry's on/off status
  380. * \details Write the given value to the SMPUSWITCHx Register.
  381. * \param [in] val activate each entry(max to 64) or not
  382. * \remark
  383. * - Each bit of this register holds on/off status of the corresponding SMPU entry respectively.
  384. * - An SMPU entry is activated only when both corresponding bits in smpuswitch and
  385. * A field of smpuicfg are set. (i.e., smpuswitch[i] & smpu[i]cfg.A).
  386. */
  387. __STATIC_INLINE void __set_SMPUSWITCHx(uint64_t val)
  388. {
  389. #if __RISCV_XLEN == 32
  390. __RV_CSR_WRITE(CSR_SMPUSWITCH0, (uint32_t)val);
  391. __RV_CSR_WRITE(CSR_SMPUSWITCH1, (uint32_t)(val >> 32));
  392. #elif __RISCV_XLEN == 64
  393. __RV_CSR_WRITE(CSR_SMPUSWITCH0, val);
  394. #else
  395. // TODO Add RV128 Handling
  396. #endif
  397. }
  398. /**
  399. * \brief Get SMPU each entry's on/off status
  400. * \details Get the value of the SMPUSWITCHx Register.
  401. * \remark
  402. * - Each bit of this register holds on/off status of the corresponding SMPU entry respectively.
  403. * - An SMPU entry is activated only when both corresponding bits in smpuswitch and
  404. * A field of smpuicfg are set. (i.e., smpuswitch[i] & smpu[i]cfg.A).
  405. */
  406. __STATIC_INLINE uint64_t __get_SMPUSWITCHx(void)
  407. {
  408. #if __RISCV_XLEN == 32
  409. uint32_t lo, hi = 0;
  410. lo = __RV_CSR_READ(CSR_SMPUSWITCH0);
  411. hi = __RV_CSR_READ(CSR_SMPUSWITCH1);
  412. return (uint64_t)((((uint64_t)hi) << 32) | lo);
  413. #elif __RISCV_XLEN == 64
  414. return (uint64_t)__RV_CSR_READ(CSR_SMPUSWITCH0);
  415. #else
  416. // TODO Add RV128 Handling
  417. #endif
  418. }
  419. #endif
  420. /** @} */ /* End of Doxygen Group NMSIS_Core_SPMP_Functions */
  421. #endif /* defined(__SPMP_PRESENT) && (__SPMP_PRESENT == 1) */
  422. #ifdef __cplusplus
  423. }
  424. #endif
  425. #endif /* __CORE_FEATURE_SPMP_H__ */