cpu.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948
  1. /*
  2. * SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include "sdkconfig.h"
  7. #include <stdint.h>
  8. #include <assert.h>
  9. #include "soc/soc.h"
  10. #include "soc/soc_caps.h"
  11. // TODO: IDF-5645
  12. #if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2
  13. #include "soc/lp_aon_reg.h"
  14. #include "soc/pcr_reg.h"
  15. #define SYSTEM_CPU_PER_CONF_REG PCR_CPU_WAITI_CONF_REG
  16. #define SYSTEM_CPU_WAIT_MODE_FORCE_ON PCR_CPU_WAIT_MODE_FORCE_ON
  17. #else
  18. #include "soc/rtc_cntl_reg.h"
  19. #endif
  20. #include "hal/soc_hal.h"
  21. #include "hal/mpu_hal.h"
  22. #include "esp_bit_defs.h"
  23. #include "esp_attr.h"
  24. #include "esp_err.h"
  25. #include "esp_cpu.h"
  26. #include "esp_memory_utils.h"
  27. #include "esp_fault.h"
  28. #if __XTENSA__
  29. #include "xtensa/config/core-isa.h"
  30. #else
  31. #include "soc/system_reg.h" // For SYSTEM_CPU_PER_CONF_REG
  32. #include "soc/dport_access.h" // For Dport access
  33. #include "riscv/semihosting.h"
  34. #include "riscv/csr.h" // For PMP_ENTRY. [refactor-todo] create PMP abstraction in rv_utils.h
  35. #endif
  36. #if SOC_CPU_HAS_FLEXIBLE_INTC
  37. #include "riscv/instruction_decode.h"
  38. #endif
  39. /* --------------------------------------------------- CPU Control -----------------------------------------------------
  40. *
  41. * ------------------------------------------------------------------------------------------------------------------ */
  42. void esp_cpu_stall(int core_id)
  43. {
  44. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  45. #if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
  46. /*
  47. We need to write the value "0x86" to stall a particular core. The write location is split into two separate
  48. bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has its own pair of
  49. "c0" and "c1" bit fields.
  50. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  51. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  52. file's "rodata" section (see IDF-5214).
  53. */
  54. int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
  55. int rtc_cntl_c0_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_S : RTC_CNTL_SW_STALL_APPCPU_C0_S;
  56. int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
  57. int rtc_cntl_c1_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_S : RTC_CNTL_SW_STALL_APPCPU_C1_S;
  58. CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
  59. SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s);
  60. CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
  61. SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s);
  62. #endif
  63. }
  64. void esp_cpu_unstall(int core_id)
  65. {
  66. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  67. #if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
  68. /*
  69. We need to write clear the value "0x86" to unstall a particular core. The location of this value is split into
  70. two separate bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has
  71. its own pair of "c0" and "c1" bit fields.
  72. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  73. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  74. file's "rodata" section (see IDF-5214).
  75. */
  76. int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
  77. int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
  78. CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
  79. CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
  80. #endif
  81. }
  82. void esp_cpu_reset(int core_id)
  83. {
  84. #if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2// TODO: IDF-5645
  85. SET_PERI_REG_MASK(LP_AON_CPUCORE0_CFG_REG, LP_AON_CPU_CORE0_SW_RESET);
  86. #else
  87. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  88. #if SOC_CPU_CORES_NUM > 1
  89. /*
  90. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  91. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  92. file's "rodata" section (see IDF-5214).
  93. */
  94. int rtc_cntl_rst_m = (core_id == 0) ? RTC_CNTL_SW_PROCPU_RST_M : RTC_CNTL_SW_APPCPU_RST_M;
  95. #else // SOC_CPU_CORES_NUM > 1
  96. int rtc_cntl_rst_m = RTC_CNTL_SW_PROCPU_RST_M;
  97. #endif // SOC_CPU_CORES_NUM > 1
  98. SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_rst_m);
  99. #endif
  100. }
  101. void esp_cpu_wait_for_intr(void)
  102. {
  103. #if __XTENSA__
  104. xt_utils_wait_for_intr();
  105. #else
  106. // TODO: IDF-5645 (better to implement with ll) C6 register names converted in the #include section at the top
  107. if (esp_cpu_dbgr_is_attached() && DPORT_REG_GET_BIT(SYSTEM_CPU_PER_CONF_REG, SYSTEM_CPU_WAIT_MODE_FORCE_ON) == 0) {
  108. /* when SYSTEM_CPU_WAIT_MODE_FORCE_ON is disabled in WFI mode SBA access to memory does not work for debugger,
  109. so do not enter that mode when debugger is connected */
  110. return;
  111. }
  112. rv_utils_wait_for_intr();
  113. #endif // __XTENSA__
  114. }
  115. /* -------------------------------------------------- CPU Registers ----------------------------------------------------
  116. *
  117. * ------------------------------------------------------------------------------------------------------------------ */
  118. /* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
  119. *
  120. * ------------------------------------------------------------------------------------------------------------------ */
  121. // ---------------- Interrupt Descriptors ------------------
  122. #if SOC_CPU_HAS_FLEXIBLE_INTC
  123. static bool is_intr_num_resv(int intr_num)
  124. {
  125. // Workaround to reserve interrupt number 1 for Wi-Fi, 5,8 for Bluetooth, 6 for "permanently disabled interrupt"
  126. // [TODO: IDF-2465]
  127. uint32_t reserved = BIT(1) | BIT(5) | BIT(6) | BIT(8);
  128. // int_num 0,3,4,7 are inavaliable for PULP cpu
  129. #if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2// TODO: IDF-5728 replace with a better macro name
  130. reserved |= BIT(0) | BIT(3) | BIT(4) | BIT(7);
  131. #endif
  132. if (reserved & BIT(intr_num)) {
  133. return true;
  134. }
  135. extern int _vector_table;
  136. extern int _interrupt_handler;
  137. const intptr_t pc = (intptr_t)(&_vector_table + intr_num);
  138. /* JAL instructions are relative to the PC there are executed from. */
  139. const intptr_t destination = pc + riscv_decode_offset_from_jal_instruction(pc);
  140. return destination != (intptr_t)&_interrupt_handler;
  141. }
  142. void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
  143. {
  144. intr_desc_ret->priority = 1; //Todo: We should make this -1
  145. intr_desc_ret->type = ESP_CPU_INTR_TYPE_NA;
  146. #if __riscv
  147. intr_desc_ret->flags = is_intr_num_resv(intr_num) ? ESP_CPU_INTR_DESC_FLAG_RESVD : 0;
  148. #else
  149. intr_desc_ret->flags = 0;
  150. #endif
  151. }
  152. #else // SOC_CPU_HAS_FLEXIBLE_INTC
  153. typedef struct {
  154. int priority;
  155. esp_cpu_intr_type_t type;
  156. uint32_t flags[SOC_CPU_CORES_NUM];
  157. } intr_desc_t;
  158. #if SOC_CPU_CORES_NUM > 1
  159. // Note: We currently only have dual core targets, so the table initializer is hard coded
  160. const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
  161. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
  162. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
  163. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //2
  164. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //3
  165. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //4
  166. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
  167. #if CONFIG_FREERTOS_CORETIMER_0
  168. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
  169. #else
  170. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
  171. #endif
  172. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
  173. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
  174. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //9
  175. { 1, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //10
  176. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
  177. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //12
  178. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //13
  179. { 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
  180. #if CONFIG_FREERTOS_CORETIMER_1
  181. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
  182. #else
  183. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
  184. #endif
  185. { 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
  186. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //17
  187. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //18
  188. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //19
  189. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //20
  190. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //21
  191. { 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //22
  192. { 3, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //23
  193. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //24
  194. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
  195. { 5, ESP_CPU_INTR_TYPE_LEVEL, { 0, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //26
  196. { 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
  197. { 4, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //28
  198. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
  199. { 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
  200. { 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
  201. };
  202. #else // SOC_CPU_CORES_NUM > 1
  203. const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
  204. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
  205. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
  206. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //2
  207. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //3
  208. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //4
  209. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
  210. #if CONFIG_FREERTOS_CORETIMER_0
  211. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
  212. #else
  213. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
  214. #endif
  215. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
  216. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
  217. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //9
  218. { 1, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //10
  219. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
  220. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //12
  221. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //13
  222. { 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
  223. #if CONFIG_FREERTOS_CORETIMER_1
  224. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
  225. #else
  226. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
  227. #endif
  228. { 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
  229. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //17
  230. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //18
  231. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //19
  232. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //20
  233. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //21
  234. { 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //22
  235. { 3, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //23
  236. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //24
  237. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
  238. { 5, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //26
  239. { 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
  240. { 4, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //28
  241. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
  242. { 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
  243. { 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
  244. };
  245. #endif // SOC_CPU_CORES_NUM > 1
  246. void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
  247. {
  248. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  249. #if SOC_CPU_CORES_NUM == 1
  250. core_id = 0; //If this is a single core target, hard code CPU ID to 0
  251. #endif
  252. intr_desc_ret->priority = intr_desc_table[intr_num].priority;
  253. intr_desc_ret->type = intr_desc_table[intr_num].type;
  254. intr_desc_ret->flags = intr_desc_table[intr_num].flags[core_id];
  255. }
  256. #endif // SOC_CPU_HAS_FLEXIBLE_INTC
  257. /* -------------------------------------------------- Memory Ports -----------------------------------------------------
  258. *
  259. * ------------------------------------------------------------------------------------------------------------------ */
  260. #if SOC_CPU_HAS_PMA
  261. static void esp_cpu_configure_invalid_regions(void)
  262. {
  263. const unsigned PMA_NONE = PMA_EN;
  264. __attribute__((unused)) const unsigned PMA_RW = PMA_EN | PMA_R | PMA_W;
  265. __attribute__((unused)) const unsigned PMA_RX = PMA_EN | PMA_R | PMA_X;
  266. __attribute__((unused)) const unsigned PMA_RWX = PMA_EN | PMA_R | PMA_W | PMA_X;
  267. // 1. Gap at bottom of address space
  268. PMA_ENTRY_SET_TOR(0, SOC_DEBUG_LOW, PMA_TOR | PMA_NONE);
  269. // 2. Gap between debug region & IROM
  270. PMA_ENTRY_SET_TOR(1, SOC_DEBUG_HIGH, PMA_NONE);
  271. PMA_ENTRY_SET_TOR(2, SOC_IROM_MASK_LOW, PMA_TOR | PMA_NONE);
  272. // 3. Gap between ROM & RAM
  273. PMA_ENTRY_SET_TOR(3, SOC_DROM_MASK_HIGH, PMA_NONE);
  274. PMA_ENTRY_SET_TOR(4, SOC_IRAM_LOW, PMA_TOR | PMA_NONE);
  275. // 4. Gap between DRAM and I_Cache
  276. PMA_ENTRY_SET_TOR(5, SOC_IRAM_HIGH, PMA_NONE);
  277. PMA_ENTRY_SET_TOR(6, SOC_IROM_LOW, PMA_TOR | PMA_NONE);
  278. // 5. Gap between D_Cache & LP_RAM
  279. PMA_ENTRY_SET_TOR(7, SOC_DROM_HIGH, PMA_NONE);
  280. PMA_ENTRY_SET_TOR(8, SOC_RTC_IRAM_LOW, PMA_TOR | PMA_NONE);
  281. // 6. Gap between LP memory & peripheral addresses
  282. PMA_ENTRY_SET_TOR(9, SOC_RTC_IRAM_HIGH, PMA_NONE);
  283. PMA_ENTRY_SET_TOR(10, SOC_PERIPHERAL_LOW, PMA_TOR | PMA_NONE);
  284. // 7. End of address space
  285. PMA_ENTRY_SET_TOR(11, SOC_PERIPHERAL_HIGH, PMA_NONE);
  286. PMA_ENTRY_SET_TOR(12, UINT32_MAX, PMA_TOR | PMA_NONE);
  287. }
  288. #endif
  289. #if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2 || CONFIG_IDF_TARGET_ESP32S3
  290. void esp_cpu_configure_region_protection(void)
  291. {
  292. /* Note: currently this is configured the same on all Xtensa targets
  293. *
  294. * Both chips have the address space divided into 8 regions, 512MB each.
  295. */
  296. const int illegal_regions[] = {0, 4, 5, 6, 7}; // 0x00000000, 0x80000000, 0xa0000000, 0xc0000000, 0xe0000000
  297. for (size_t i = 0; i < sizeof(illegal_regions) / sizeof(illegal_regions[0]); ++i) {
  298. mpu_hal_set_region_access(illegal_regions[i], MPU_REGION_ILLEGAL);
  299. }
  300. mpu_hal_set_region_access(1, MPU_REGION_RW); // 0x20000000
  301. }
  302. #elif CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H4
  303. void esp_cpu_configure_region_protection(void)
  304. {
  305. /* Notes on implementation:
  306. *
  307. * 1) Note: ESP32-C3/H4 CPU doesn't support overlapping PMP regions
  308. *
  309. * 2) Therefore, we use TOR (top of range) entries to map the whole address
  310. * space, bottom to top.
  311. *
  312. * 3) There are not enough entries to describe all the memory regions 100% accurately.
  313. *
  314. * 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
  315. * to cover gaps is to extend read-only or read-execute regions or read-only regions only
  316. * (executing unmapped addresses should always fault with invalid instruction, read-only means
  317. * stores will correctly fault even if reads may return some invalid value.)
  318. *
  319. * 5) Entries are grouped in order with some static asserts to try and verify everything is
  320. * correct.
  321. */
  322. const unsigned NONE = PMP_L | PMP_TOR;
  323. const unsigned R = PMP_L | PMP_TOR | PMP_R;
  324. const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
  325. const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
  326. const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
  327. // 1. Gap at bottom of address space
  328. PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
  329. // 2. Debug region
  330. PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
  331. _Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
  332. // 3. Gap between debug region & DROM (flash cache)
  333. PMP_ENTRY_SET(2, SOC_DROM_LOW, NONE);
  334. _Static_assert(SOC_DEBUG_HIGH < SOC_DROM_LOW, "Invalid PMP entry order");
  335. // 4. DROM (flash cache)
  336. // 5. Gap between DROM & DRAM
  337. // (Note: To save PMP entries these two are merged into one read-only region)
  338. PMP_ENTRY_SET(3, SOC_DRAM_LOW, R);
  339. _Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid DROM region");
  340. _Static_assert(SOC_DROM_HIGH < SOC_DRAM_LOW, "Invalid PMP entry order");
  341. // 6. DRAM
  342. PMP_ENTRY_SET(4, SOC_DRAM_HIGH, RW);
  343. _Static_assert(SOC_DRAM_LOW < SOC_DRAM_HIGH, "Invalid DRAM region");
  344. // 7. Gap between DRAM and Mask DROM
  345. // 8. Mask DROM
  346. // (Note: to save PMP entries these two are merged into one read-only region)
  347. PMP_ENTRY_SET(5, SOC_DROM_MASK_HIGH, R);
  348. _Static_assert(SOC_DRAM_HIGH < SOC_DROM_MASK_LOW, "Invalid PMP entry order");
  349. _Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid mask DROM region");
  350. // 9. Gap between mask DROM and mask IROM
  351. // 10. Mask IROM
  352. // (Note: to save PMP entries these two are merged into one RX region)
  353. PMP_ENTRY_SET(6, SOC_IROM_MASK_HIGH, RX);
  354. _Static_assert(SOC_DROM_MASK_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
  355. _Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid mask IROM region");
  356. // 11. Gap between mask IROM & IRAM
  357. PMP_ENTRY_SET(7, SOC_IRAM_LOW, NONE);
  358. _Static_assert(SOC_IROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
  359. // 12. IRAM
  360. PMP_ENTRY_SET(8, SOC_IRAM_HIGH, RWX);
  361. _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid IRAM region");
  362. // 13. Gap between IRAM and IROM
  363. // 14. IROM (flash cache)
  364. // (Note: to save PMP entries these two are merged into one RX region)
  365. PMP_ENTRY_SET(9, SOC_IROM_HIGH, RX);
  366. _Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
  367. _Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid IROM region");
  368. // 15. Gap between IROM & RTC slow memory
  369. PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
  370. _Static_assert(SOC_IROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
  371. // 16. RTC fast memory
  372. PMP_ENTRY_SET(11, SOC_RTC_IRAM_HIGH, RWX);
  373. _Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
  374. // 17. Gap between RTC fast memory & peripheral addresses
  375. PMP_ENTRY_SET(12, SOC_PERIPHERAL_LOW, NONE);
  376. _Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
  377. // 18. Peripheral addresses
  378. PMP_ENTRY_SET(13, SOC_PERIPHERAL_HIGH, RW);
  379. _Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
  380. // 19. End of address space
  381. PMP_ENTRY_SET(14, UINT32_MAX, NONE); // all but last 4 bytes
  382. PMP_ENTRY_SET(15, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
  383. }
  384. #elif CONFIG_IDF_TARGET_ESP32C2
  385. #if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
  386. extern int _iram_end;
  387. extern int _data_start;
  388. #define IRAM_END (int)&_iram_end
  389. #define DRAM_START (int)&_data_start
  390. #else
  391. #define IRAM_END SOC_DIRAM_IRAM_HIGH
  392. #define DRAM_START SOC_DIRAM_DRAM_LOW
  393. #endif
  394. #ifdef BOOTLOADER_BUILD
  395. // Without L bit set
  396. #define CONDITIONAL_NONE 0x0
  397. #define CONDITIONAL_RX PMP_R | PMP_X
  398. #define CONDITIONAL_RW PMP_R | PMP_W
  399. #define CONDITIONAL_RWX PMP_R | PMP_W | PMP_X
  400. #else
  401. // With L bit set
  402. #define CONDITIONAL_NONE NONE
  403. #define CONDITIONAL_RX RX
  404. #define CONDITIONAL_RW RW
  405. #define CONDITIONAL_RWX RWX
  406. #endif
  407. void esp_cpu_configure_region_protection(void)
  408. {
  409. /* Notes on implementation:
  410. *
  411. * 1) ESP32-C2 CPU support overlapping PMP regions, configuration is based on static priority
  412. * feature(lowest numbered entry has highest priority).
  413. *
  414. * 2) Therefore, we use TOR (top of range) and NAOPT entries to map the effective area.
  415. * Finally, define any address without access permission.
  416. *
  417. * 3) 3-15 PMPADDR entries be hardcoded to fixed value, 0-2 PMPADDR be programmed to split ID SRAM
  418. * as IRAM/DRAM. All PMPCFG entryies be available.
  419. *
  420. * 4) Ideally, PMPADDR 0-2 entries should be configured twice, once during bootloader startup and another during app startup.
  421. * However, the CPU currently always executes in machine mode and to enforce these permissions in machine mode, we need
  422. * to set the Lock (L) bit but if set once, it cannot be reconfigured. So, we only configure 0-2 PMPADDR during app startup.
  423. */
  424. const unsigned NONE = PMP_L ;
  425. const unsigned R = PMP_L | PMP_R;
  426. const unsigned X = PMP_L | PMP_X;
  427. const unsigned RW = PMP_L | PMP_R | PMP_W;
  428. const unsigned RX = PMP_L | PMP_R | PMP_X;
  429. const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X;
  430. /* There are 4 configuration scenarios for PMPADDR 0-2
  431. *
  432. * 1. Bootloader build:
  433. * - We cannot set the lock bit as we need to reconfigure it again for the application.
  434. * We configure PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire valid DRAM range.
  435. *
  436. * 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
  437. * - We split the SRAM into IRAM and DRAM such that IRAM region cannot be accessed via DBUS
  438. * and DRAM region cannot be accessed via IBUS. We use _iram_end and _data_start markers to set the boundaries.
  439. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  440. *
  441. * 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
  442. * - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
  443. * so for that we set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
  444. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  445. *
  446. * 4. CPU is in OCD debug mode
  447. * - The IRAM-DRAM split is not enabled so that OpenOCD can write and execute from IRAM.
  448. * We set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
  449. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  450. *
  451. * PMPADDR 3-15 are hard-coded and are appicable to both, bootloader and application. So we configure and lock
  452. * these during BOOTLOADER build itself. During application build, reconfiguration of these PMPADDR entries
  453. * are silently ignored by the CPU
  454. */
  455. if (esp_cpu_dbgr_is_attached()) {
  456. // Anti-FI check that cpu is really in ocd mode
  457. ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
  458. // 1. IRAM
  459. PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, NONE);
  460. PMP_ENTRY_SET(1, SOC_DIRAM_IRAM_HIGH, PMP_TOR | RWX);
  461. // 2. DRAM
  462. PMP_ENTRY_SET(2, SOC_DIRAM_DRAM_LOW, NONE);
  463. PMP_ENTRY_CFG_SET(3, PMP_TOR | RW);
  464. } else {
  465. // 1. IRAM
  466. PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, CONDITIONAL_NONE);
  467. #if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT
  468. PMP_ENTRY_SET(1, IRAM_END, PMP_TOR | CONDITIONAL_RX);
  469. #else
  470. PMP_ENTRY_SET(1, IRAM_END, PMP_TOR | CONDITIONAL_RWX);
  471. #endif
  472. // 2. DRAM
  473. PMP_ENTRY_SET(2, DRAM_START, CONDITIONAL_NONE);
  474. PMP_ENTRY_CFG_SET(3, PMP_TOR | CONDITIONAL_RW);
  475. }
  476. // 3. Debug region
  477. PMP_ENTRY_CFG_SET(4, PMP_NAPOT | RWX);
  478. // 4. DROM (flash dcache)
  479. PMP_ENTRY_CFG_SET(5, PMP_NAPOT | R);
  480. // 5. DROM_MASK
  481. PMP_ENTRY_CFG_SET(6, NONE);
  482. PMP_ENTRY_CFG_SET(7, PMP_TOR | R);
  483. // 6. IROM_MASK
  484. PMP_ENTRY_CFG_SET(8, NONE);
  485. PMP_ENTRY_CFG_SET(9, PMP_TOR | RX);
  486. // 7. IROM (flash icache)
  487. PMP_ENTRY_CFG_SET(10, PMP_NAPOT | RX);
  488. // 8. Peripheral addresses
  489. PMP_ENTRY_CFG_SET(11, PMP_NAPOT | RW);
  490. // 9. SRAM (used as ICache)
  491. PMP_ENTRY_CFG_SET(12, PMP_NAPOT | X);
  492. // 10. no access to any address below(0x0-0xFFFF_FFFF)
  493. PMP_ENTRY_CFG_SET(13, PMP_NA4 | NONE);// last 4 bytes(0xFFFFFFFC)
  494. PMP_ENTRY_CFG_SET(14, NONE);
  495. PMP_ENTRY_CFG_SET(15, PMP_TOR | NONE);
  496. }
  497. #elif CONFIG_IDF_TARGET_ESP32C6
  498. #ifdef BOOTLOADER_BUILD
  499. // Without L bit set
  500. #define CONDITIONAL_NONE 0x0
  501. #define CONDITIONAL_RX PMP_R | PMP_X
  502. #define CONDITIONAL_RW PMP_R | PMP_W
  503. #define CONDITIONAL_RWX PMP_R | PMP_W | PMP_X
  504. #else
  505. // With L bit set
  506. #define CONDITIONAL_NONE PMP_NONE
  507. #define CONDITIONAL_RX PMP_RX
  508. #define CONDITIONAL_RW PMP_RW
  509. #define CONDITIONAL_RWX PMP_RWX
  510. #endif
  511. void esp_cpu_configure_region_protection(void)
  512. {
  513. /* Notes on implementation:
  514. *
  515. * 1) Note: ESP32-C6 CPU doesn't support overlapping PMP regions
  516. *
  517. * 2) ESP32-C6 supports 16 PMA regions so we use this feature to block all the invalid address ranges
  518. *
  519. * 3) We use combination of NAPOT (Naturally Aligned Power Of Two) and TOR (top of range)
  520. * entries to map all the valid address space, bottom to top. This leaves us with some extra PMP entries
  521. * which can be used to provide more granular access
  522. *
  523. * 4) Entries are grouped in order with some static asserts to try and verify everything is
  524. * correct.
  525. */
  526. /* There are 4 configuration scenarios for SRAM
  527. *
  528. * 1. Bootloader build:
  529. * - We cannot set the lock bit as we need to reconfigure it again for the application.
  530. * We configure PMP to cover entire valid IRAM and DRAM range.
  531. *
  532. * 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
  533. * - We split the SRAM into IRAM and DRAM such that IRAM region cannot be written to
  534. * and DRAM region cannot be executed. We use _iram_end and _data_start markers to set the boundaries.
  535. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  536. *
  537. * 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
  538. * - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
  539. * so for that we set PMP to cover entire valid IRAM and DRAM region.
  540. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  541. *
  542. * 4. CPU is in OCD debug mode
  543. * - The IRAM-DRAM split is not enabled so that OpenOCD can write and execute from IRAM.
  544. * We set PMP to cover entire valid IRAM and DRAM region.
  545. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  546. */
  547. const unsigned PMP_NONE = PMP_L;
  548. const unsigned PMP_RW = PMP_L | PMP_R | PMP_W;
  549. const unsigned PMP_RX = PMP_L | PMP_R | PMP_X;
  550. const unsigned PMP_RWX = PMP_L | PMP_R | PMP_W | PMP_X;
  551. //
  552. // Configure all the invalid address regions using PMA
  553. //
  554. esp_cpu_configure_invalid_regions();
  555. //
  556. // Configure all the valid address regions using PMP
  557. //
  558. // 1. Debug region
  559. const uint32_t pmpaddr0 = PMPADDR_NAPOT(SOC_DEBUG_LOW, SOC_DEBUG_HIGH);
  560. PMP_ENTRY_SET(0, pmpaddr0, PMP_NAPOT | PMP_RWX);
  561. _Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
  562. // 2.1 I-ROM
  563. PMP_ENTRY_SET(1, SOC_IROM_MASK_LOW, PMP_NONE);
  564. PMP_ENTRY_SET(2, SOC_IROM_MASK_HIGH, PMP_TOR | PMP_RX);
  565. _Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid I-ROM region");
  566. // 2.2 D-ROM
  567. PMP_ENTRY_SET(3, SOC_DROM_MASK_LOW, PMP_NONE);
  568. PMP_ENTRY_SET(4, SOC_DROM_MASK_HIGH, PMP_TOR | PMP_R);
  569. _Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid D-ROM region");
  570. if (esp_cpu_dbgr_is_attached()) {
  571. // Anti-FI check that cpu is really in ocd mode
  572. ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
  573. // 5. IRAM and DRAM
  574. const uint32_t pmpaddr5 = PMPADDR_NAPOT(SOC_IRAM_LOW, SOC_IRAM_HIGH);
  575. PMP_ENTRY_SET(5, pmpaddr5, PMP_NAPOT | PMP_RWX);
  576. _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
  577. } else {
  578. #if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
  579. extern int _iram_end;
  580. // 5. IRAM and DRAM
  581. PMP_ENTRY_SET(5, SOC_IRAM_LOW, PMP_NONE);
  582. PMP_ENTRY_SET(6, (int)&_iram_end, PMP_TOR | PMP_RX);
  583. PMP_ENTRY_SET(7, SOC_DRAM_HIGH, PMP_TOR | PMP_RW);
  584. #else
  585. // 5. IRAM and DRAM
  586. const uint32_t pmpaddr5 = PMPADDR_NAPOT(SOC_IRAM_LOW, SOC_IRAM_HIGH);
  587. PMP_ENTRY_SET(5, pmpaddr5, PMP_NAPOT | CONDITIONAL_RWX);
  588. _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
  589. #endif
  590. }
  591. // 4. I_Cache (flash)
  592. const uint32_t pmpaddr8 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
  593. PMP_ENTRY_SET(8, pmpaddr8, PMP_NAPOT | PMP_RX);
  594. _Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid I_Cache region");
  595. // 5. D_Cache (flash)
  596. const uint32_t pmpaddr9 = PMPADDR_NAPOT(SOC_DROM_LOW, SOC_DROM_HIGH);
  597. PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | PMP_R);
  598. _Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid D_Cache region");
  599. // 6. LP memory
  600. #if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
  601. extern int _rtc_text_end;
  602. PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, PMP_NONE);
  603. PMP_ENTRY_SET(11, (int)&_rtc_text_end, PMP_TOR | PMP_RX);
  604. PMP_ENTRY_SET(12, SOC_RTC_IRAM_HIGH, PMP_TOR | PMP_RW);
  605. #else
  606. const uint32_t pmpaddr10 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
  607. PMP_ENTRY_SET(10, pmpaddr10, PMP_NAPOT | CONDITIONAL_RWX);
  608. _Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
  609. #endif
  610. // 7. Peripheral addresses
  611. const uint32_t pmpaddr13 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
  612. PMP_ENTRY_SET(13, pmpaddr13, PMP_NAPOT | PMP_RW);
  613. _Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
  614. }
  615. #elif CONFIG_IDF_TARGET_ESP32H2
  616. // ESP32H2-TODO: IDF-6452
  617. void esp_cpu_configure_region_protection(void)
  618. {
  619. /* Notes on implementation:
  620. *
  621. * 1) Note: ESP32-H2 CPU doesn't support overlapping PMP regions
  622. *
  623. * 2) Therefore, we use TOR (top of range) entries to map the whole address
  624. * space, bottom to top.
  625. *
  626. * 3) There are not enough entries to describe all the memory regions 100% accurately.
  627. *
  628. * 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
  629. * to cover gaps is to extend read-only or read-execute regions or read-only regions only
  630. * (executing unmapped addresses should always fault with invalid instruction, read-only means
  631. * stores will correctly fault even if reads may return some invalid value.)
  632. *
  633. * 5) Entries are grouped in order with some static asserts to try and verify everything is
  634. * correct.
  635. */
  636. const unsigned NONE = PMP_L | PMP_TOR;
  637. const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
  638. const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
  639. const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
  640. // 1. Gap at bottom of address space
  641. PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
  642. // 2. Debug region
  643. PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
  644. _Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
  645. // 3. Gap between debug region & IROM
  646. PMP_ENTRY_SET(2, SOC_IROM_MASK_LOW, NONE);
  647. _Static_assert(SOC_DEBUG_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
  648. // 4. ROM
  649. PMP_ENTRY_SET(3, SOC_DROM_MASK_HIGH, RX);
  650. _Static_assert(SOC_IROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid ROM region");
  651. // 5. Gap between ROM & RAM
  652. PMP_ENTRY_SET(4, SOC_IRAM_LOW, NONE);
  653. _Static_assert(SOC_DROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
  654. // 6. RAM
  655. PMP_ENTRY_SET(5, SOC_IRAM_HIGH, RWX);
  656. _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
  657. // 7. Gap between DRAM and I_Cache
  658. PMP_ENTRY_SET(6, SOC_IROM_LOW, NONE);
  659. _Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
  660. // 8. I_Cache (flash)
  661. PMP_ENTRY_SET(7, SOC_IROM_HIGH, RWX);
  662. _Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid I_Cache region");
  663. // 9. D_Cache (flash)
  664. PMP_ENTRY_SET(8, SOC_DROM_HIGH, RW);
  665. _Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid D_Cache region");
  666. // 10. Gap between D_Cache & LP_RAM
  667. PMP_ENTRY_SET(9, SOC_RTC_IRAM_LOW, NONE);
  668. _Static_assert(SOC_DROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
  669. // 16. LP memory
  670. PMP_ENTRY_SET(10, SOC_RTC_IRAM_HIGH, RWX);
  671. _Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
  672. // 17. Gap between LP memory & peripheral addresses
  673. PMP_ENTRY_SET(11, SOC_PERIPHERAL_LOW, NONE);
  674. _Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
  675. // 18. Peripheral addresses
  676. PMP_ENTRY_SET(12, SOC_PERIPHERAL_HIGH, RW);
  677. _Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
  678. // 19. End of address space
  679. PMP_ENTRY_SET(13, UINT32_MAX, NONE); // all but last 4 bytes
  680. PMP_ENTRY_SET(14, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
  681. }
  682. #endif
  683. /* ---------------------------------------------------- Debugging ------------------------------------------------------
  684. *
  685. * ------------------------------------------------------------------------------------------------------------------ */
  686. // --------------- Breakpoints/Watchpoints -----------------
  687. #if SOC_CPU_BREAKPOINTS_NUM > 0
  688. esp_err_t esp_cpu_set_breakpoint(int bp_num, const void *bp_addr)
  689. {
  690. /*
  691. Todo:
  692. - Check that bp_num is in range
  693. */
  694. #if __XTENSA__
  695. xt_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
  696. #else
  697. if (esp_cpu_dbgr_is_attached()) {
  698. /* If we want to set breakpoint which when hit transfers control to debugger
  699. * we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
  700. * That `action` value is supported only when `dmode` of `tdata1` is set.
  701. * But `dmode` can be modified by debugger only (from Debug Mode).
  702. *
  703. * So when debugger is connected we use special syscall to ask it to set breakpoint for us.
  704. */
  705. long args[] = {true, bp_num, (long)bp_addr};
  706. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
  707. if (ret == 0) {
  708. return ESP_ERR_INVALID_RESPONSE;
  709. }
  710. }
  711. rv_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
  712. #endif // __XTENSA__
  713. return ESP_OK;
  714. }
  715. esp_err_t esp_cpu_clear_breakpoint(int bp_num)
  716. {
  717. /*
  718. Todo:
  719. - Check if the bp_num is valid
  720. */
  721. #if __XTENSA__
  722. xt_utils_clear_breakpoint(bp_num);
  723. #else
  724. if (esp_cpu_dbgr_is_attached()) {
  725. // See description in esp_cpu_set_breakpoint()
  726. long args[] = {false, bp_num};
  727. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
  728. if (ret == 0) {
  729. return ESP_ERR_INVALID_RESPONSE;
  730. }
  731. }
  732. rv_utils_clear_breakpoint(bp_num);
  733. #endif // __XTENSA__
  734. return ESP_OK;
  735. }
  736. #endif // SOC_CPU_BREAKPOINTS_NUM > 0
  737. #if SOC_CPU_WATCHPOINTS_NUM > 0
  738. esp_err_t esp_cpu_set_watchpoint(int wp_num, const void *wp_addr, size_t size, esp_cpu_watchpoint_trigger_t trigger)
  739. {
  740. /*
  741. Todo:
  742. - Check that wp_num is in range
  743. - Check if the wp_num is already in use
  744. */
  745. // Check if size is 2^n, where n is in [0...6]
  746. if (size < 1 || size > 64 || (size & (size - 1)) != 0) {
  747. return ESP_ERR_INVALID_ARG;
  748. }
  749. bool on_read = (trigger == ESP_CPU_WATCHPOINT_LOAD || trigger == ESP_CPU_WATCHPOINT_ACCESS);
  750. bool on_write = (trigger == ESP_CPU_WATCHPOINT_STORE || trigger == ESP_CPU_WATCHPOINT_ACCESS);
  751. #if __XTENSA__
  752. xt_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
  753. #else
  754. if (esp_cpu_dbgr_is_attached()) {
  755. // See description in esp_cpu_set_breakpoint()
  756. long args[] = {true, wp_num, (long)wp_addr, (long)size,
  757. (long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))
  758. };
  759. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
  760. if (ret == 0) {
  761. return ESP_ERR_INVALID_RESPONSE;
  762. }
  763. }
  764. rv_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
  765. #endif // __XTENSA__
  766. return ESP_OK;
  767. }
  768. esp_err_t esp_cpu_clear_watchpoint(int wp_num)
  769. {
  770. /*
  771. Todo:
  772. - Check if the wp_num is valid
  773. */
  774. #if __XTENSA__
  775. xt_utils_clear_watchpoint(wp_num);
  776. #else
  777. if (esp_cpu_dbgr_is_attached()) {
  778. // See description in esp_cpu_dbgr_is_attached()
  779. long args[] = {false, wp_num};
  780. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
  781. if (ret == 0) {
  782. return ESP_ERR_INVALID_RESPONSE;
  783. }
  784. }
  785. rv_utils_clear_watchpoint(wp_num);
  786. #endif // __XTENSA__
  787. return ESP_OK;
  788. }
  789. #endif // SOC_CPU_WATCHPOINTS_NUM > 0
  790. /* ------------------------------------------------------ Misc ---------------------------------------------------------
  791. *
  792. * ------------------------------------------------------------------------------------------------------------------ */
  793. #if __XTENSA__ && XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  794. static DRAM_ATTR uint32_t external_ram_cas_lock = 0;
  795. #endif
  796. bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
  797. {
  798. #if __XTENSA__
  799. bool ret;
  800. #if XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  801. // Check if the target address is in external RAM
  802. if ((uint32_t)addr >= SOC_EXTRAM_DATA_LOW && (uint32_t)addr < SOC_EXTRAM_DATA_HIGH) {
  803. /* The target address is in external RAM, thus the native CAS instruction cannot be used. Instead, we achieve
  804. atomicity by disabling interrupts and then acquiring an external RAM CAS lock. */
  805. uint32_t intr_level;
  806. __asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
  807. : "=r"(intr_level));
  808. if (!xt_utils_compare_and_set(&external_ram_cas_lock, 0, 1)) {
  809. // External RAM CAS lock already taken. Exit
  810. ret = false;
  811. goto exit;
  812. }
  813. // Now we compare and set the target address
  814. ret = (*addr == compare_value);
  815. if (ret) {
  816. *addr = new_value;
  817. }
  818. // Release the external RAM CAS lock
  819. external_ram_cas_lock = 0;
  820. exit:
  821. // Reenable interrupts
  822. __asm__ __volatile__ ("memw \n"
  823. "wsr %0, ps\n"
  824. :: "r"(intr_level));
  825. } else
  826. #endif // XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  827. {
  828. // The target address is in internal RAM. Use the CPU's native CAS instruction
  829. ret = xt_utils_compare_and_set(addr, compare_value, new_value);
  830. }
  831. return ret;
  832. #else // __XTENSA__
  833. // Single core targets don't have atomic CAS instruction. So access method is the same for internal and external RAM
  834. return rv_utils_compare_and_set(addr, compare_value, new_value);
  835. #endif
  836. }