cpu.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. /*
  2. * SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include "sdkconfig.h"
  7. #include <stdint.h>
  8. #include <assert.h>
  9. #include "soc/soc.h"
  10. #include "soc/soc_caps.h"
  11. #include "soc/rtc_cntl_reg.h"
  12. #include "hal/soc_hal.h"
  13. #include "hal/mpu_hal.h"
  14. #include "esp_bit_defs.h"
  15. #include "esp_attr.h"
  16. #include "esp_err.h"
  17. #include "esp_cpu.h"
  18. #include "esp_memory_utils.h"
  19. #include "esp_fault.h"
  20. #if __XTENSA__
  21. #include "xtensa/config/core-isa.h"
  22. #else
  23. #include "soc/system_reg.h" // For SYSTEM_CPU_PER_CONF_REG
  24. #include "soc/dport_access.h" // For Dport access
  25. #include "riscv/semihosting.h"
  26. #include "riscv/csr.h" // For PMP_ENTRY. [refactor-todo] create PMP abstraction in rv_utils.h
  27. #endif
  28. #if SOC_CPU_HAS_FLEXIBLE_INTC
  29. #include "riscv/instruction_decode.h"
  30. #endif
  31. /* --------------------------------------------------- CPU Control -----------------------------------------------------
  32. *
  33. * ------------------------------------------------------------------------------------------------------------------ */
  34. void esp_cpu_stall(int core_id)
  35. {
  36. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  37. #if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
  38. /*
  39. We need to write the value "0x86" to stall a particular core. The write location is split into two separate
  40. bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has its own pair of
  41. "c0" and "c1" bit fields.
  42. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  43. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  44. file's "rodata" section (see IDF-5214).
  45. */
  46. int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
  47. int rtc_cntl_c0_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_S : RTC_CNTL_SW_STALL_APPCPU_C0_S;
  48. int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
  49. int rtc_cntl_c1_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_S : RTC_CNTL_SW_STALL_APPCPU_C1_S;
  50. CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
  51. SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s);
  52. CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
  53. SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s);
  54. #endif
  55. }
  56. void esp_cpu_unstall(int core_id)
  57. {
  58. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  59. #if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
  60. /*
  61. We need to write clear the value "0x86" to unstall a particular core. The location of this value is split into
  62. two separate bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has
  63. its own pair of "c0" and "c1" bit fields.
  64. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  65. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  66. file's "rodata" section (see IDF-5214).
  67. */
  68. int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
  69. int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
  70. CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
  71. CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
  72. #endif
  73. }
  74. void esp_cpu_reset(int core_id)
  75. {
  76. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  77. #if SOC_CPU_CORES_NUM > 1
  78. /*
  79. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  80. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  81. file's "rodata" section (see IDF-5214).
  82. */
  83. int rtc_cntl_rst_m = (core_id == 0) ? RTC_CNTL_SW_PROCPU_RST_M : RTC_CNTL_SW_APPCPU_RST_M;
  84. #else // SOC_CPU_CORES_NUM > 1
  85. int rtc_cntl_rst_m = RTC_CNTL_SW_PROCPU_RST_M;
  86. #endif // SOC_CPU_CORES_NUM > 1
  87. SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_rst_m);
  88. }
  89. void esp_cpu_wait_for_intr(void)
  90. {
  91. #if __XTENSA__
  92. xt_utils_wait_for_intr();
  93. #else
  94. if (esp_cpu_dbgr_is_attached() && DPORT_REG_GET_BIT(SYSTEM_CPU_PER_CONF_REG, SYSTEM_CPU_WAIT_MODE_FORCE_ON) == 0) {
  95. /* when SYSTEM_CPU_WAIT_MODE_FORCE_ON is disabled in WFI mode SBA access to memory does not work for debugger,
  96. so do not enter that mode when debugger is connected */
  97. return;
  98. }
  99. rv_utils_wait_for_intr();
  100. #endif // __XTENSA__
  101. }
  102. /* -------------------------------------------------- CPU Registers ----------------------------------------------------
  103. *
  104. * ------------------------------------------------------------------------------------------------------------------ */
  105. /* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
  106. *
  107. * ------------------------------------------------------------------------------------------------------------------ */
  108. // ---------------- Interrupt Descriptors ------------------
  109. #if SOC_CPU_HAS_FLEXIBLE_INTC
  110. static bool is_intr_num_resv(int intr_num)
  111. {
  112. // Workaround to reserve interrupt number 1 for Wi-Fi, 5,8 for Bluetooth, 6 for "permanently disabled interrupt"
  113. // [TODO: IDF-2465]
  114. const uint32_t reserved = BIT(1) | BIT(5) | BIT(6) | BIT(8);
  115. if (reserved & BIT(intr_num)) {
  116. return true;
  117. }
  118. extern int _vector_table;
  119. extern int _interrupt_handler;
  120. const intptr_t pc = (intptr_t)(&_vector_table + intr_num);
  121. /* JAL instructions are relative to the PC there are executed from. */
  122. const intptr_t destination = pc + riscv_decode_offset_from_jal_instruction(pc);
  123. return destination != (intptr_t)&_interrupt_handler;
  124. }
  125. void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
  126. {
  127. intr_desc_ret->priority = 1; //Todo: We should make this -1
  128. intr_desc_ret->type = ESP_CPU_INTR_TYPE_NA;
  129. #if __riscv
  130. intr_desc_ret->flags = is_intr_num_resv(intr_num) ? ESP_CPU_INTR_DESC_FLAG_RESVD : 0;
  131. #else
  132. intr_desc_ret->flags = 0;
  133. #endif
  134. }
  135. #else // SOC_CPU_HAS_FLEXIBLE_INTC
  136. typedef struct {
  137. int priority;
  138. esp_cpu_intr_type_t type;
  139. uint32_t flags[SOC_CPU_CORES_NUM];
  140. } intr_desc_t;
  141. #if SOC_CPU_CORES_NUM > 1
  142. // Note: We currently only have dual core targets, so the table initializer is hard coded
  143. const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
  144. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
  145. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
  146. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //2
  147. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //3
  148. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //4
  149. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
  150. #if CONFIG_FREERTOS_CORETIMER_0
  151. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
  152. #else
  153. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
  154. #endif
  155. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
  156. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
  157. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //9
  158. { 1, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //10
  159. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
  160. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //12
  161. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //13
  162. { 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
  163. #if CONFIG_FREERTOS_CORETIMER_1
  164. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
  165. #else
  166. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
  167. #endif
  168. { 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
  169. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //17
  170. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //18
  171. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //19
  172. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //20
  173. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //21
  174. { 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //22
  175. { 3, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //23
  176. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //24
  177. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
  178. { 5, ESP_CPU_INTR_TYPE_LEVEL, { 0, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //26
  179. { 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
  180. { 4, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //28
  181. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
  182. { 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
  183. { 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
  184. };
  185. #else // SOC_CPU_CORES_NUM > 1
  186. const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
  187. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
  188. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
  189. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //2
  190. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //3
  191. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //4
  192. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
  193. #if CONFIG_FREERTOS_CORETIMER_0
  194. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
  195. #else
  196. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
  197. #endif
  198. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
  199. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
  200. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //9
  201. { 1, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //10
  202. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
  203. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //12
  204. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //13
  205. { 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
  206. #if CONFIG_FREERTOS_CORETIMER_1
  207. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
  208. #else
  209. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
  210. #endif
  211. { 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
  212. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //17
  213. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //18
  214. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //19
  215. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //20
  216. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //21
  217. { 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //22
  218. { 3, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //23
  219. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //24
  220. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
  221. { 5, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //26
  222. { 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
  223. { 4, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //28
  224. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
  225. { 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
  226. { 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
  227. };
  228. #endif // SOC_CPU_CORES_NUM > 1
  229. void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
  230. {
  231. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  232. #if SOC_CPU_CORES_NUM == 1
  233. core_id = 0; //If this is a single core target, hard code CPU ID to 0
  234. #endif
  235. intr_desc_ret->priority = intr_desc_table[intr_num].priority;
  236. intr_desc_ret->type = intr_desc_table[intr_num].type;
  237. intr_desc_ret->flags = intr_desc_table[intr_num].flags[core_id];
  238. }
  239. #endif // SOC_CPU_HAS_FLEXIBLE_INTC
  240. /* -------------------------------------------------- Memory Ports -----------------------------------------------------
  241. *
  242. * ------------------------------------------------------------------------------------------------------------------ */
  243. #if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2 || CONFIG_IDF_TARGET_ESP32S3
  244. void esp_cpu_configure_region_protection(void)
  245. {
  246. /* Note: currently this is configured the same on all Xtensa targets
  247. *
  248. * Both chips have the address space divided into 8 regions, 512MB each.
  249. */
  250. const int illegal_regions[] = {0, 4, 5, 6, 7}; // 0x00000000, 0x80000000, 0xa0000000, 0xc0000000, 0xe0000000
  251. for (size_t i = 0; i < sizeof(illegal_regions) / sizeof(illegal_regions[0]); ++i) {
  252. mpu_hal_set_region_access(illegal_regions[i], MPU_REGION_ILLEGAL);
  253. }
  254. mpu_hal_set_region_access(1, MPU_REGION_RW); // 0x20000000
  255. }
  256. #elif CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H2
  257. void esp_cpu_configure_region_protection(void)
  258. {
  259. /* Notes on implementation:
  260. *
  261. * 1) Note: ESP32-C3/H2 CPU doesn't support overlapping PMP regions
  262. *
  263. * 2) Therefore, we use TOR (top of range) entries to map the whole address
  264. * space, bottom to top.
  265. *
  266. * 3) There are not enough entries to describe all the memory regions 100% accurately.
  267. *
  268. * 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
  269. * to cover gaps is to extend read-only or read-execute regions or read-only regions only
  270. * (executing unmapped addresses should always fault with invalid instruction, read-only means
  271. * stores will correctly fault even if reads may return some invalid value.)
  272. *
  273. * 5) Entries are grouped in order with some static asserts to try and verify everything is
  274. * correct.
  275. */
  276. const unsigned NONE = PMP_L | PMP_TOR;
  277. const unsigned R = PMP_L | PMP_TOR | PMP_R;
  278. const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
  279. const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
  280. const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
  281. // 1. Gap at bottom of address space
  282. PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
  283. // 2. Debug region
  284. PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
  285. _Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
  286. // 3. Gap between debug region & DROM (flash cache)
  287. PMP_ENTRY_SET(2, SOC_DROM_LOW, NONE);
  288. _Static_assert(SOC_DEBUG_HIGH < SOC_DROM_LOW, "Invalid PMP entry order");
  289. // 4. DROM (flash cache)
  290. // 5. Gap between DROM & DRAM
  291. // (Note: To save PMP entries these two are merged into one read-only region)
  292. PMP_ENTRY_SET(3, SOC_DRAM_LOW, R);
  293. _Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid DROM region");
  294. _Static_assert(SOC_DROM_HIGH < SOC_DRAM_LOW, "Invalid PMP entry order");
  295. // 6. DRAM
  296. PMP_ENTRY_SET(4, SOC_DRAM_HIGH, RW);
  297. _Static_assert(SOC_DRAM_LOW < SOC_DRAM_HIGH, "Invalid DRAM region");
  298. // 7. Gap between DRAM and Mask DROM
  299. // 8. Mask DROM
  300. // (Note: to save PMP entries these two are merged into one read-only region)
  301. PMP_ENTRY_SET(5, SOC_DROM_MASK_HIGH, R);
  302. _Static_assert(SOC_DRAM_HIGH < SOC_DROM_MASK_LOW, "Invalid PMP entry order");
  303. _Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid mask DROM region");
  304. // 9. Gap between mask DROM and mask IROM
  305. // 10. Mask IROM
  306. // (Note: to save PMP entries these two are merged into one RX region)
  307. PMP_ENTRY_SET(6, SOC_IROM_MASK_HIGH, RX);
  308. _Static_assert(SOC_DROM_MASK_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
  309. _Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid mask IROM region");
  310. // 11. Gap between mask IROM & IRAM
  311. PMP_ENTRY_SET(7, SOC_IRAM_LOW, NONE);
  312. _Static_assert(SOC_IROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
  313. // 12. IRAM
  314. PMP_ENTRY_SET(8, SOC_IRAM_HIGH, RWX);
  315. _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid IRAM region");
  316. // 13. Gap between IRAM and IROM
  317. // 14. IROM (flash cache)
  318. // (Note: to save PMP entries these two are merged into one RX region)
  319. PMP_ENTRY_SET(9, SOC_IROM_HIGH, RX);
  320. _Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
  321. _Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid IROM region");
  322. // 15. Gap between IROM & RTC slow memory
  323. PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
  324. _Static_assert(SOC_IROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
  325. // 16. RTC fast memory
  326. PMP_ENTRY_SET(11, SOC_RTC_IRAM_HIGH, RWX);
  327. _Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
  328. // 17. Gap between RTC fast memory & peripheral addresses
  329. PMP_ENTRY_SET(12, SOC_PERIPHERAL_LOW, NONE);
  330. _Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
  331. // 18. Peripheral addresses
  332. PMP_ENTRY_SET(13, SOC_PERIPHERAL_HIGH, RW);
  333. _Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
  334. // 19. End of address space
  335. PMP_ENTRY_SET(14, UINT32_MAX, NONE); // all but last 4 bytes
  336. PMP_ENTRY_SET(15, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
  337. }
  338. #elif CONFIG_IDF_TARGET_ESP32C2
  339. #if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
  340. extern int _iram_end;
  341. extern int _data_start;
  342. #define IRAM_END (int)&_iram_end
  343. #define DRAM_START (int)&_data_start
  344. #else
  345. #define IRAM_END SOC_DIRAM_IRAM_HIGH
  346. #define DRAM_START SOC_DIRAM_DRAM_LOW
  347. #endif
  348. #ifdef BOOTLOADER_BUILD
  349. // Without L bit set
  350. #define CONDITIONAL_NONE 0x0
  351. #define CONDITIONAL_RX PMP_R | PMP_X
  352. #define CONDITIONAL_RW PMP_R | PMP_W
  353. #else
  354. // With L bit set
  355. #define CONDITIONAL_NONE NONE
  356. #define CONDITIONAL_RX RX
  357. #define CONDITIONAL_RW RW
  358. #endif
  359. void esp_cpu_configure_region_protection(void)
  360. {
  361. /* Notes on implementation:
  362. *
  363. * 1) ESP32-C2 CPU support overlapping PMP regions, configuration is based on static priority
  364. * feature(lowest numbered entry has highest priority).
  365. *
  366. * 2) Therefore, we use TOR (top of range) and NAOPT entries to map the effective area.
  367. * Finally, define any address without access permission.
  368. *
  369. * 3) 3-15 PMPADDR entries be hardcoded to fixed value, 0-2 PMPADDR be programmed to split ID SRAM
  370. * as IRAM/DRAM. All PMPCFG entryies be available.
  371. *
  372. * 4) Ideally, PMPADDR 0-2 entries should be configured twice, once during bootloader startup and another during app startup.
  373. * However, the CPU currently always executes in machine mode and to enforce these permissions in machine mode, we need
  374. * to set the Lock (L) bit but if set once, it cannot be reconfigured. So, we only configure 0-2 PMPADDR during app startup.
  375. */
  376. const unsigned NONE = PMP_L ;
  377. const unsigned R = PMP_L | PMP_R;
  378. const unsigned X = PMP_L | PMP_X;
  379. const unsigned RW = PMP_L | PMP_R | PMP_W;
  380. const unsigned RX = PMP_L | PMP_R | PMP_X;
  381. const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X;
  382. /* There are 4 configuration scenarios for PMPADDR 0-2
  383. *
  384. * 1. Bootloader build:
  385. * - We cannot set the lock bit as we need to reconfigure it again for the application.
  386. * We configure PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire valid DRAM range.
  387. *
  388. * 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
  389. * - We split the SRAM into IRAM and DRAM such that IRAM region cannot be accessed via DBUS
  390. * and DRAM region cannot be accessed via IBUS. We use _iram_end and _data_start markers to set the boundaries.
  391. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  392. *
  393. * 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
  394. * - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
  395. * so for that we set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
  396. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  397. *
  398. * 4. CPU is in OCD debug mode
  399. * - The IRAM-DRAM split is not enabled so that OpenOCD can write and execute from IRAM.
  400. * We set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
  401. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  402. *
  403. * PMPADDR 3-15 are hard-coded and are appicable to both, bootloader and application. So we configure and lock
  404. * these during BOOTLOADER build itself. During application build, reconfiguration of these PMPADDR entries
  405. * are silently ignored by the CPU
  406. */
  407. if (esp_cpu_dbgr_is_attached()) {
  408. // Anti-FI check that cpu is really in ocd mode
  409. ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
  410. // 1. IRAM
  411. PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, NONE);
  412. PMP_ENTRY_SET(1, SOC_DIRAM_IRAM_HIGH, PMP_TOR | RWX);
  413. // 2. DRAM
  414. PMP_ENTRY_SET(2, SOC_DIRAM_DRAM_LOW, NONE);
  415. PMP_ENTRY_CFG_SET(3, PMP_TOR | RW);
  416. } else {
  417. // 1. IRAM
  418. PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, CONDITIONAL_NONE);
  419. PMP_ENTRY_SET(1, IRAM_END, PMP_TOR | CONDITIONAL_RX);
  420. // 2. DRAM
  421. PMP_ENTRY_SET(2, DRAM_START, CONDITIONAL_NONE);
  422. PMP_ENTRY_CFG_SET(3, PMP_TOR | CONDITIONAL_RW);
  423. }
  424. // 3. Debug region
  425. PMP_ENTRY_CFG_SET(4, PMP_NAPOT | RWX);
  426. // 4. DROM (flash dcache)
  427. PMP_ENTRY_CFG_SET(5, PMP_NAPOT | R);
  428. // 5. DROM_MASK
  429. PMP_ENTRY_CFG_SET(6, NONE);
  430. PMP_ENTRY_CFG_SET(7, PMP_TOR | R);
  431. // 6. IROM_MASK
  432. PMP_ENTRY_CFG_SET(8, NONE);
  433. PMP_ENTRY_CFG_SET(9, PMP_TOR | RX);
  434. // 7. IROM (flash icache)
  435. PMP_ENTRY_CFG_SET(10, PMP_NAPOT | RX);
  436. // 8. Peripheral addresses
  437. PMP_ENTRY_CFG_SET(11, PMP_NAPOT | RW);
  438. // 9. SRAM (used as ICache)
  439. PMP_ENTRY_CFG_SET(12, PMP_NAPOT | X);
  440. // 10. no access to any address below(0x0-0xFFFF_FFFF)
  441. PMP_ENTRY_CFG_SET(13, PMP_NA4 | NONE);// last 4 bytes(0xFFFFFFFC)
  442. PMP_ENTRY_CFG_SET(14, NONE);
  443. PMP_ENTRY_CFG_SET(15, PMP_TOR | NONE);
  444. }
  445. #endif
  446. /* ---------------------------------------------------- Debugging ------------------------------------------------------
  447. *
  448. * ------------------------------------------------------------------------------------------------------------------ */
  449. // --------------- Breakpoints/Watchpoints -----------------
  450. #if SOC_CPU_BREAKPOINTS_NUM > 0
  451. esp_err_t esp_cpu_set_breakpoint(int bp_num, const void *bp_addr)
  452. {
  453. /*
  454. Todo:
  455. - Check that bp_num is in range
  456. */
  457. #if __XTENSA__
  458. xt_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
  459. #else
  460. if (esp_cpu_dbgr_is_attached()) {
  461. /* If we want to set breakpoint which when hit transfers control to debugger
  462. * we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
  463. * That `action` value is supported only when `dmode` of `tdata1` is set.
  464. * But `dmode` can be modified by debugger only (from Debug Mode).
  465. *
  466. * So when debugger is connected we use special syscall to ask it to set breakpoint for us.
  467. */
  468. long args[] = {true, bp_num, (long)bp_addr};
  469. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
  470. if (ret == 0) {
  471. return ESP_ERR_INVALID_RESPONSE;
  472. }
  473. }
  474. rv_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
  475. #endif // __XTENSA__
  476. return ESP_OK;
  477. }
  478. esp_err_t esp_cpu_clear_breakpoint(int bp_num)
  479. {
  480. /*
  481. Todo:
  482. - Check if the bp_num is valid
  483. */
  484. #if __XTENSA__
  485. xt_utils_clear_breakpoint(bp_num);
  486. #else
  487. if (esp_cpu_dbgr_is_attached()) {
  488. // See description in esp_cpu_set_breakpoint()
  489. long args[] = {false, bp_num};
  490. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
  491. if (ret == 0) {
  492. return ESP_ERR_INVALID_RESPONSE;
  493. }
  494. }
  495. rv_utils_clear_breakpoint(bp_num);
  496. #endif // __XTENSA__
  497. return ESP_OK;
  498. }
  499. #endif // SOC_CPU_BREAKPOINTS_NUM > 0
  500. #if SOC_CPU_WATCHPOINTS_NUM > 0
  501. esp_err_t esp_cpu_set_watchpoint(int wp_num, const void *wp_addr, size_t size, esp_cpu_watchpoint_trigger_t trigger)
  502. {
  503. /*
  504. Todo:
  505. - Check that wp_num is in range
  506. - Check if the wp_num is already in use
  507. */
  508. // Check if size is 2^n, where n is in [0...6]
  509. if (size < 1 || size > 64 || (size & (size - 1)) != 0) {
  510. return ESP_ERR_INVALID_ARG;
  511. }
  512. bool on_read = (trigger == ESP_CPU_WATCHPOINT_LOAD || trigger == ESP_CPU_WATCHPOINT_ACCESS);
  513. bool on_write = (trigger == ESP_CPU_WATCHPOINT_STORE || trigger == ESP_CPU_WATCHPOINT_ACCESS);
  514. #if __XTENSA__
  515. xt_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
  516. #else
  517. if (esp_cpu_dbgr_is_attached()) {
  518. // See description in esp_cpu_set_breakpoint()
  519. long args[] = {true, wp_num, (long)wp_addr, (long)size,
  520. (long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))
  521. };
  522. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
  523. if (ret == 0) {
  524. return ESP_ERR_INVALID_RESPONSE;
  525. }
  526. }
  527. rv_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
  528. #endif // __XTENSA__
  529. return ESP_OK;
  530. }
  531. esp_err_t esp_cpu_clear_watchpoint(int wp_num)
  532. {
  533. /*
  534. Todo:
  535. - Check if the wp_num is valid
  536. */
  537. #if __XTENSA__
  538. xt_utils_clear_watchpoint(wp_num);
  539. #else
  540. if (esp_cpu_dbgr_is_attached()) {
  541. // See description in esp_cpu_dbgr_is_attached()
  542. long args[] = {false, wp_num};
  543. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
  544. if (ret == 0) {
  545. return ESP_ERR_INVALID_RESPONSE;
  546. }
  547. }
  548. rv_utils_clear_watchpoint(wp_num);
  549. #endif // __XTENSA__
  550. return ESP_OK;
  551. }
  552. #endif // SOC_CPU_WATCHPOINTS_NUM > 0
  553. /* ------------------------------------------------------ Misc ---------------------------------------------------------
  554. *
  555. * ------------------------------------------------------------------------------------------------------------------ */
  556. #if __XTENSA__ && XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  557. static DRAM_ATTR uint32_t external_ram_cas_lock = 0;
  558. #endif
  559. bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
  560. {
  561. #if __XTENSA__
  562. bool ret;
  563. #if XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  564. // Check if the target address is in external RAM
  565. if ((uint32_t)addr >= SOC_EXTRAM_DATA_LOW && (uint32_t)addr < SOC_EXTRAM_DATA_HIGH) {
  566. /* The target address is in external RAM, thus the native CAS instruction cannot be used. Instead, we achieve
  567. atomicity by disabling interrupts and then acquiring an external RAM CAS lock. */
  568. uint32_t intr_level;
  569. __asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
  570. : "=r"(intr_level));
  571. if (!xt_utils_compare_and_set(&external_ram_cas_lock, 0, 1)) {
  572. // External RAM CAS lock already taken. Exit
  573. ret = false;
  574. goto exit;
  575. }
  576. // Now we compare and set the target address
  577. ret = (*addr == compare_value);
  578. if (ret) {
  579. *addr = new_value;
  580. }
  581. // Release the external RAM CAS lock
  582. external_ram_cas_lock = 0;
  583. exit:
  584. // Reenable interrupts
  585. __asm__ __volatile__ ("memw \n"
  586. "wsr %0, ps\n"
  587. :: "r"(intr_level));
  588. } else
  589. #endif // XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  590. {
  591. // The target address is in internal RAM. Use the CPU's native CAS instruction
  592. ret = xt_utils_compare_and_set(addr, compare_value, new_value);
  593. }
  594. return ret;
  595. #else // __XTENSA__
  596. // Single core targets don't have atomic CAS instruction. So access method is the same for internal and external RAM
  597. return rv_utils_compare_and_set(addr, compare_value, new_value);
  598. #endif
  599. }