cpu.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859
  1. /*
  2. * SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include "sdkconfig.h"
  7. #include <stdint.h>
  8. #include <assert.h>
  9. #include "soc/soc.h"
  10. #include "soc/soc_caps.h"
  11. // TODO: IDF-5645
  12. #if CONFIG_IDF_TARGET_ESP32C6
  13. #include "soc/lp_aon_reg.h"
  14. #include "soc/pcr_reg.h"
  15. #define SYSTEM_CPU_PER_CONF_REG PCR_CPU_WAITI_CONF_REG
  16. #define SYSTEM_CPU_WAIT_MODE_FORCE_ON PCR_CPU_WAIT_MODE_FORCE_ON
  17. #else
  18. #include "soc/rtc_cntl_reg.h"
  19. #endif
  20. #include "hal/soc_hal.h"
  21. #include "hal/mpu_hal.h"
  22. #include "esp_bit_defs.h"
  23. #include "esp_attr.h"
  24. #include "esp_err.h"
  25. #include "esp_cpu.h"
  26. #include "esp_memory_utils.h"
  27. #include "esp_fault.h"
  28. #if __XTENSA__
  29. #include "xtensa/config/core-isa.h"
  30. #else
  31. #include "soc/system_reg.h" // For SYSTEM_CPU_PER_CONF_REG
  32. #include "soc/dport_access.h" // For Dport access
  33. #include "riscv/semihosting.h"
  34. #include "riscv/csr.h" // For PMP_ENTRY. [refactor-todo] create PMP abstraction in rv_utils.h
  35. #endif
  36. #if SOC_CPU_HAS_FLEXIBLE_INTC
  37. #include "riscv/instruction_decode.h"
  38. #endif
  39. /* --------------------------------------------------- CPU Control -----------------------------------------------------
  40. *
  41. * ------------------------------------------------------------------------------------------------------------------ */
  42. void esp_cpu_stall(int core_id)
  43. {
  44. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  45. #if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
  46. /*
  47. We need to write the value "0x86" to stall a particular core. The write location is split into two separate
  48. bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has its own pair of
  49. "c0" and "c1" bit fields.
  50. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  51. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  52. file's "rodata" section (see IDF-5214).
  53. */
  54. int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
  55. int rtc_cntl_c0_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_S : RTC_CNTL_SW_STALL_APPCPU_C0_S;
  56. int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
  57. int rtc_cntl_c1_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_S : RTC_CNTL_SW_STALL_APPCPU_C1_S;
  58. CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
  59. SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s);
  60. CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
  61. SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s);
  62. #endif
  63. }
  64. void esp_cpu_unstall(int core_id)
  65. {
  66. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  67. #if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
  68. /*
  69. We need to write clear the value "0x86" to unstall a particular core. The location of this value is split into
  70. two separate bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has
  71. its own pair of "c0" and "c1" bit fields.
  72. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  73. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  74. file's "rodata" section (see IDF-5214).
  75. */
  76. int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
  77. int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
  78. CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
  79. CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
  80. #endif
  81. }
  82. void esp_cpu_reset(int core_id)
  83. {
  84. #if CONFIG_IDF_TARGET_ESP32C6 // TODO: IDF-5645
  85. SET_PERI_REG_MASK(LP_AON_CPUCORE0_CFG_REG, LP_AON_CPU_CORE0_SW_RESET);
  86. #else
  87. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  88. #if SOC_CPU_CORES_NUM > 1
  89. /*
  90. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  91. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  92. file's "rodata" section (see IDF-5214).
  93. */
  94. int rtc_cntl_rst_m = (core_id == 0) ? RTC_CNTL_SW_PROCPU_RST_M : RTC_CNTL_SW_APPCPU_RST_M;
  95. #else // SOC_CPU_CORES_NUM > 1
  96. int rtc_cntl_rst_m = RTC_CNTL_SW_PROCPU_RST_M;
  97. #endif // SOC_CPU_CORES_NUM > 1
  98. SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_rst_m);
  99. #endif
  100. }
  101. void esp_cpu_wait_for_intr(void)
  102. {
  103. #if __XTENSA__
  104. xt_utils_wait_for_intr();
  105. #else
  106. // TODO: IDF-5645 (better to implement with ll) C6 register names converted in the #include section at the top
  107. if (esp_cpu_dbgr_is_attached() && DPORT_REG_GET_BIT(SYSTEM_CPU_PER_CONF_REG, SYSTEM_CPU_WAIT_MODE_FORCE_ON) == 0) {
  108. /* when SYSTEM_CPU_WAIT_MODE_FORCE_ON is disabled in WFI mode SBA access to memory does not work for debugger,
  109. so do not enter that mode when debugger is connected */
  110. return;
  111. }
  112. rv_utils_wait_for_intr();
  113. #endif // __XTENSA__
  114. }
  115. /* -------------------------------------------------- CPU Registers ----------------------------------------------------
  116. *
  117. * ------------------------------------------------------------------------------------------------------------------ */
  118. /* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
  119. *
  120. * ------------------------------------------------------------------------------------------------------------------ */
  121. // ---------------- Interrupt Descriptors ------------------
  122. #if SOC_CPU_HAS_FLEXIBLE_INTC
  123. static bool is_intr_num_resv(int intr_num)
  124. {
  125. // Workaround to reserve interrupt number 1 for Wi-Fi, 5,8 for Bluetooth, 6 for "permanently disabled interrupt"
  126. // [TODO: IDF-2465]
  127. uint32_t reserved = BIT(1) | BIT(5) | BIT(6) | BIT(8);
  128. // int_num 0,3,4,7 are inavaliable for PULP cpu
  129. #if CONFIG_IDF_TARGET_ESP32C6 // TODO: IDF-5728 replace with a better macro name
  130. reserved |= BIT(0) | BIT(3) | BIT(4) | BIT(7);
  131. #endif
  132. if (reserved & BIT(intr_num)) {
  133. return true;
  134. }
  135. extern int _vector_table;
  136. extern int _interrupt_handler;
  137. const intptr_t pc = (intptr_t)(&_vector_table + intr_num);
  138. /* JAL instructions are relative to the PC there are executed from. */
  139. const intptr_t destination = pc + riscv_decode_offset_from_jal_instruction(pc);
  140. return destination != (intptr_t)&_interrupt_handler;
  141. }
  142. void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
  143. {
  144. intr_desc_ret->priority = 1; //Todo: We should make this -1
  145. intr_desc_ret->type = ESP_CPU_INTR_TYPE_NA;
  146. #if __riscv
  147. intr_desc_ret->flags = is_intr_num_resv(intr_num) ? ESP_CPU_INTR_DESC_FLAG_RESVD : 0;
  148. #else
  149. intr_desc_ret->flags = 0;
  150. #endif
  151. }
  152. #else // SOC_CPU_HAS_FLEXIBLE_INTC
  153. typedef struct {
  154. int priority;
  155. esp_cpu_intr_type_t type;
  156. uint32_t flags[SOC_CPU_CORES_NUM];
  157. } intr_desc_t;
  158. #if SOC_CPU_CORES_NUM > 1
  159. // Note: We currently only have dual core targets, so the table initializer is hard coded
  160. const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
  161. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
  162. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
  163. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //2
  164. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //3
  165. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //4
  166. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
  167. #if CONFIG_FREERTOS_CORETIMER_0
  168. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
  169. #else
  170. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
  171. #endif
  172. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
  173. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
  174. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //9
  175. { 1, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //10
  176. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
  177. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //12
  178. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //13
  179. { 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
  180. #if CONFIG_FREERTOS_CORETIMER_1
  181. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
  182. #else
  183. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
  184. #endif
  185. { 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
  186. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //17
  187. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //18
  188. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //19
  189. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //20
  190. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //21
  191. { 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //22
  192. { 3, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //23
  193. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //24
  194. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
  195. { 5, ESP_CPU_INTR_TYPE_LEVEL, { 0, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //26
  196. { 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
  197. { 4, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //28
  198. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
  199. { 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
  200. { 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
  201. };
  202. #else // SOC_CPU_CORES_NUM > 1
  203. const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
  204. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
  205. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
  206. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //2
  207. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //3
  208. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //4
  209. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
  210. #if CONFIG_FREERTOS_CORETIMER_0
  211. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
  212. #else
  213. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
  214. #endif
  215. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
  216. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
  217. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //9
  218. { 1, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //10
  219. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
  220. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //12
  221. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //13
  222. { 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
  223. #if CONFIG_FREERTOS_CORETIMER_1
  224. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
  225. #else
  226. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
  227. #endif
  228. { 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
  229. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //17
  230. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //18
  231. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //19
  232. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //20
  233. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //21
  234. { 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //22
  235. { 3, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //23
  236. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //24
  237. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
  238. { 5, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //26
  239. { 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
  240. { 4, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //28
  241. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
  242. { 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
  243. { 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
  244. };
  245. #endif // SOC_CPU_CORES_NUM > 1
  246. void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
  247. {
  248. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  249. #if SOC_CPU_CORES_NUM == 1
  250. core_id = 0; //If this is a single core target, hard code CPU ID to 0
  251. #endif
  252. intr_desc_ret->priority = intr_desc_table[intr_num].priority;
  253. intr_desc_ret->type = intr_desc_table[intr_num].type;
  254. intr_desc_ret->flags = intr_desc_table[intr_num].flags[core_id];
  255. }
  256. #endif // SOC_CPU_HAS_FLEXIBLE_INTC
  257. /* -------------------------------------------------- Memory Ports -----------------------------------------------------
  258. *
  259. * ------------------------------------------------------------------------------------------------------------------ */
  260. #if SOC_CPU_HAS_PMA
  261. static void esp_cpu_configure_invalid_regions(void)
  262. {
  263. const unsigned PMA_NONE = PMA_EN;
  264. __attribute__((unused)) const unsigned PMA_RW = PMA_EN | PMA_R | PMA_W;
  265. __attribute__((unused)) const unsigned PMA_RX = PMA_EN | PMA_R | PMA_X;
  266. __attribute__((unused)) const unsigned PMA_RWX = PMA_EN | PMA_R | PMA_W | PMA_X;
  267. // 1. Gap at bottom of address space
  268. PMA_ENTRY_SET_TOR(0, SOC_DEBUG_LOW, PMA_TOR | PMA_NONE);
  269. // 2. Gap between debug region & IROM
  270. PMA_ENTRY_SET_TOR(1, SOC_DEBUG_HIGH, PMA_NONE);
  271. PMA_ENTRY_SET_TOR(2, SOC_IROM_MASK_LOW, PMA_TOR | PMA_NONE);
  272. // 3. Gap between ROM & RAM
  273. PMA_ENTRY_SET_TOR(3, SOC_DROM_MASK_HIGH, PMA_NONE);
  274. PMA_ENTRY_SET_TOR(4, SOC_IRAM_LOW, PMA_TOR | PMA_NONE);
  275. // 4. Gap between DRAM and I_Cache
  276. PMA_ENTRY_SET_TOR(5, SOC_IRAM_HIGH, PMA_NONE);
  277. PMA_ENTRY_SET_TOR(6, SOC_IROM_LOW, PMA_TOR | PMA_NONE);
  278. // 5. Gap between D_Cache & LP_RAM
  279. PMA_ENTRY_SET_TOR(7, SOC_DROM_HIGH, PMA_NONE);
  280. PMA_ENTRY_SET_TOR(8, SOC_RTC_IRAM_LOW, PMA_TOR | PMA_NONE);
  281. // 6. Gap between LP memory & peripheral addresses
  282. PMA_ENTRY_SET_TOR(9, SOC_RTC_IRAM_HIGH, PMA_NONE);
  283. PMA_ENTRY_SET_TOR(10, SOC_PERIPHERAL_LOW, PMA_TOR | PMA_NONE);
  284. // 7. End of address space
  285. PMA_ENTRY_SET_TOR(11, SOC_PERIPHERAL_HIGH, PMA_NONE);
  286. PMA_ENTRY_SET_TOR(12, UINT32_MAX, PMA_TOR | PMA_NONE);
  287. }
  288. #endif
  289. #if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2 || CONFIG_IDF_TARGET_ESP32S3
  290. void esp_cpu_configure_region_protection(void)
  291. {
  292. /* Note: currently this is configured the same on all Xtensa targets
  293. *
  294. * Both chips have the address space divided into 8 regions, 512MB each.
  295. */
  296. const int illegal_regions[] = {0, 4, 5, 6, 7}; // 0x00000000, 0x80000000, 0xa0000000, 0xc0000000, 0xe0000000
  297. for (size_t i = 0; i < sizeof(illegal_regions) / sizeof(illegal_regions[0]); ++i) {
  298. mpu_hal_set_region_access(illegal_regions[i], MPU_REGION_ILLEGAL);
  299. }
  300. mpu_hal_set_region_access(1, MPU_REGION_RW); // 0x20000000
  301. }
  302. #elif CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H4
  303. void esp_cpu_configure_region_protection(void)
  304. {
  305. /* Notes on implementation:
  306. *
  307. * 1) Note: ESP32-C3/H4 CPU doesn't support overlapping PMP regions
  308. *
  309. * 2) Therefore, we use TOR (top of range) entries to map the whole address
  310. * space, bottom to top.
  311. *
  312. * 3) There are not enough entries to describe all the memory regions 100% accurately.
  313. *
  314. * 4) This means some gaps (invalid memory) are accessible. Priority for extending regions
  315. * to cover gaps is to extend read-only or read-execute regions or read-only regions only
  316. * (executing unmapped addresses should always fault with invalid instruction, read-only means
  317. * stores will correctly fault even if reads may return some invalid value.)
  318. *
  319. * 5) Entries are grouped in order with some static asserts to try and verify everything is
  320. * correct.
  321. */
  322. const unsigned NONE = PMP_L | PMP_TOR;
  323. const unsigned R = PMP_L | PMP_TOR | PMP_R;
  324. const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W;
  325. const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X;
  326. const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X;
  327. // 1. Gap at bottom of address space
  328. PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE);
  329. // 2. Debug region
  330. PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX);
  331. _Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
  332. // 3. Gap between debug region & DROM (flash cache)
  333. PMP_ENTRY_SET(2, SOC_DROM_LOW, NONE);
  334. _Static_assert(SOC_DEBUG_HIGH < SOC_DROM_LOW, "Invalid PMP entry order");
  335. // 4. DROM (flash cache)
  336. // 5. Gap between DROM & DRAM
  337. // (Note: To save PMP entries these two are merged into one read-only region)
  338. PMP_ENTRY_SET(3, SOC_DRAM_LOW, R);
  339. _Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid DROM region");
  340. _Static_assert(SOC_DROM_HIGH < SOC_DRAM_LOW, "Invalid PMP entry order");
  341. // 6. DRAM
  342. PMP_ENTRY_SET(4, SOC_DRAM_HIGH, RW);
  343. _Static_assert(SOC_DRAM_LOW < SOC_DRAM_HIGH, "Invalid DRAM region");
  344. // 7. Gap between DRAM and Mask DROM
  345. // 8. Mask DROM
  346. // (Note: to save PMP entries these two are merged into one read-only region)
  347. PMP_ENTRY_SET(5, SOC_DROM_MASK_HIGH, R);
  348. _Static_assert(SOC_DRAM_HIGH < SOC_DROM_MASK_LOW, "Invalid PMP entry order");
  349. _Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid mask DROM region");
  350. // 9. Gap between mask DROM and mask IROM
  351. // 10. Mask IROM
  352. // (Note: to save PMP entries these two are merged into one RX region)
  353. PMP_ENTRY_SET(6, SOC_IROM_MASK_HIGH, RX);
  354. _Static_assert(SOC_DROM_MASK_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order");
  355. _Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid mask IROM region");
  356. // 11. Gap between mask IROM & IRAM
  357. PMP_ENTRY_SET(7, SOC_IRAM_LOW, NONE);
  358. _Static_assert(SOC_IROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order");
  359. // 12. IRAM
  360. PMP_ENTRY_SET(8, SOC_IRAM_HIGH, RWX);
  361. _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid IRAM region");
  362. // 13. Gap between IRAM and IROM
  363. // 14. IROM (flash cache)
  364. // (Note: to save PMP entries these two are merged into one RX region)
  365. PMP_ENTRY_SET(9, SOC_IROM_HIGH, RX);
  366. _Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order");
  367. _Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid IROM region");
  368. // 15. Gap between IROM & RTC slow memory
  369. PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE);
  370. _Static_assert(SOC_IROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order");
  371. // 16. RTC fast memory
  372. PMP_ENTRY_SET(11, SOC_RTC_IRAM_HIGH, RWX);
  373. _Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
  374. // 17. Gap between RTC fast memory & peripheral addresses
  375. PMP_ENTRY_SET(12, SOC_PERIPHERAL_LOW, NONE);
  376. _Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order");
  377. // 18. Peripheral addresses
  378. PMP_ENTRY_SET(13, SOC_PERIPHERAL_HIGH, RW);
  379. _Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
  380. // 19. End of address space
  381. PMP_ENTRY_SET(14, UINT32_MAX, NONE); // all but last 4 bytes
  382. PMP_ENTRY_SET(15, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes
  383. }
  384. #elif CONFIG_IDF_TARGET_ESP32C2
  385. #if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
  386. extern int _iram_end;
  387. extern int _data_start;
  388. #define IRAM_END (int)&_iram_end
  389. #define DRAM_START (int)&_data_start
  390. #else
  391. #define IRAM_END SOC_DIRAM_IRAM_HIGH
  392. #define DRAM_START SOC_DIRAM_DRAM_LOW
  393. #endif
  394. #ifdef BOOTLOADER_BUILD
  395. // Without L bit set
  396. #define CONDITIONAL_NONE 0x0
  397. #define CONDITIONAL_RX PMP_R | PMP_X
  398. #define CONDITIONAL_RW PMP_R | PMP_W
  399. #else
  400. // With L bit set
  401. #define CONDITIONAL_NONE NONE
  402. #define CONDITIONAL_RX RX
  403. #define CONDITIONAL_RW RW
  404. #endif
  405. void esp_cpu_configure_region_protection(void)
  406. {
  407. /* Notes on implementation:
  408. *
  409. * 1) ESP32-C2 CPU support overlapping PMP regions, configuration is based on static priority
  410. * feature(lowest numbered entry has highest priority).
  411. *
  412. * 2) Therefore, we use TOR (top of range) and NAOPT entries to map the effective area.
  413. * Finally, define any address without access permission.
  414. *
  415. * 3) 3-15 PMPADDR entries be hardcoded to fixed value, 0-2 PMPADDR be programmed to split ID SRAM
  416. * as IRAM/DRAM. All PMPCFG entryies be available.
  417. *
  418. * 4) Ideally, PMPADDR 0-2 entries should be configured twice, once during bootloader startup and another during app startup.
  419. * However, the CPU currently always executes in machine mode and to enforce these permissions in machine mode, we need
  420. * to set the Lock (L) bit but if set once, it cannot be reconfigured. So, we only configure 0-2 PMPADDR during app startup.
  421. */
  422. const unsigned NONE = PMP_L ;
  423. const unsigned R = PMP_L | PMP_R;
  424. const unsigned X = PMP_L | PMP_X;
  425. const unsigned RW = PMP_L | PMP_R | PMP_W;
  426. const unsigned RX = PMP_L | PMP_R | PMP_X;
  427. const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X;
  428. /* There are 4 configuration scenarios for PMPADDR 0-2
  429. *
  430. * 1. Bootloader build:
  431. * - We cannot set the lock bit as we need to reconfigure it again for the application.
  432. * We configure PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire valid DRAM range.
  433. *
  434. * 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
  435. * - We split the SRAM into IRAM and DRAM such that IRAM region cannot be accessed via DBUS
  436. * and DRAM region cannot be accessed via IBUS. We use _iram_end and _data_start markers to set the boundaries.
  437. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  438. *
  439. * 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
  440. * - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
  441. * so for that we set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
  442. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  443. *
  444. * 4. CPU is in OCD debug mode
  445. * - The IRAM-DRAM split is not enabled so that OpenOCD can write and execute from IRAM.
  446. * We set PMPADDR 0-1 to cover entire valid IRAM range and PMPADDR 2-3 to cover entire DRAM region.
  447. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  448. *
  449. * PMPADDR 3-15 are hard-coded and are appicable to both, bootloader and application. So we configure and lock
  450. * these during BOOTLOADER build itself. During application build, reconfiguration of these PMPADDR entries
  451. * are silently ignored by the CPU
  452. */
  453. if (esp_cpu_dbgr_is_attached()) {
  454. // Anti-FI check that cpu is really in ocd mode
  455. ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
  456. // 1. IRAM
  457. PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, NONE);
  458. PMP_ENTRY_SET(1, SOC_DIRAM_IRAM_HIGH, PMP_TOR | RWX);
  459. // 2. DRAM
  460. PMP_ENTRY_SET(2, SOC_DIRAM_DRAM_LOW, NONE);
  461. PMP_ENTRY_CFG_SET(3, PMP_TOR | RW);
  462. } else {
  463. // 1. IRAM
  464. PMP_ENTRY_SET(0, SOC_DIRAM_IRAM_LOW, CONDITIONAL_NONE);
  465. PMP_ENTRY_SET(1, IRAM_END, PMP_TOR | CONDITIONAL_RX);
  466. // 2. DRAM
  467. PMP_ENTRY_SET(2, DRAM_START, CONDITIONAL_NONE);
  468. PMP_ENTRY_CFG_SET(3, PMP_TOR | CONDITIONAL_RW);
  469. }
  470. // 3. Debug region
  471. PMP_ENTRY_CFG_SET(4, PMP_NAPOT | RWX);
  472. // 4. DROM (flash dcache)
  473. PMP_ENTRY_CFG_SET(5, PMP_NAPOT | R);
  474. // 5. DROM_MASK
  475. PMP_ENTRY_CFG_SET(6, NONE);
  476. PMP_ENTRY_CFG_SET(7, PMP_TOR | R);
  477. // 6. IROM_MASK
  478. PMP_ENTRY_CFG_SET(8, NONE);
  479. PMP_ENTRY_CFG_SET(9, PMP_TOR | RX);
  480. // 7. IROM (flash icache)
  481. PMP_ENTRY_CFG_SET(10, PMP_NAPOT | RX);
  482. // 8. Peripheral addresses
  483. PMP_ENTRY_CFG_SET(11, PMP_NAPOT | RW);
  484. // 9. SRAM (used as ICache)
  485. PMP_ENTRY_CFG_SET(12, PMP_NAPOT | X);
  486. // 10. no access to any address below(0x0-0xFFFF_FFFF)
  487. PMP_ENTRY_CFG_SET(13, PMP_NA4 | NONE);// last 4 bytes(0xFFFFFFFC)
  488. PMP_ENTRY_CFG_SET(14, NONE);
  489. PMP_ENTRY_CFG_SET(15, PMP_TOR | NONE);
  490. }
  491. #elif CONFIG_IDF_TARGET_ESP32C6
  492. #ifdef BOOTLOADER_BUILD
  493. // Without L bit set
  494. #define CONDITIONAL_NONE 0x0
  495. #define CONDITIONAL_RX PMP_R | PMP_X
  496. #define CONDITIONAL_RW PMP_R | PMP_W
  497. #define CONDITIONAL_RWX PMP_R | PMP_W | PMP_X
  498. #else
  499. // With L bit set
  500. #define CONDITIONAL_NONE PMP_NONE
  501. #define CONDITIONAL_RX PMP_RX
  502. #define CONDITIONAL_RW PMP_RW
  503. #define CONDITIONAL_RWX PMP_RWX
  504. #endif
  505. void esp_cpu_configure_region_protection(void)
  506. {
  507. /* Notes on implementation:
  508. *
  509. * 1) Note: ESP32-C6 CPU doesn't support overlapping PMP regions
  510. *
  511. * 2) ESP32-C6 supports 16 PMA regions so we use this feature to block all the invalid address ranges
  512. *
  513. * 3) We use combination of NAPOT (Naturally Aligned Power Of Two) and TOR (top of range)
  514. * entries to map all the valid address space, bottom to top. This leaves us with some extra PMP entries
  515. * which can be used to provide more granular access
  516. *
  517. * 4) Entries are grouped in order with some static asserts to try and verify everything is
  518. * correct.
  519. */
  520. /* There are 4 configuration scenarios for SRAM
  521. *
  522. * 1. Bootloader build:
  523. * - We cannot set the lock bit as we need to reconfigure it again for the application.
  524. * We configure PMP to cover entire valid IRAM and DRAM range.
  525. *
  526. * 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled
  527. * - We split the SRAM into IRAM and DRAM such that IRAM region cannot be written to
  528. * and DRAM region cannot be executed. We use _iram_end and _data_start markers to set the boundaries.
  529. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  530. *
  531. * 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled
  532. * - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful
  533. * so for that we set PMP to cover entire valid IRAM and DRAM region.
  534. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  535. *
  536. * 4. CPU is in OCD debug mode
  537. * - The IRAM-DRAM split is not enabled so that OpenOCD can write and execute from IRAM.
  538. * We set PMP to cover entire valid IRAM and DRAM region.
  539. * We also lock these entries so the R/W/X permissions are enforced even for machine mode
  540. */
  541. const unsigned PMP_NONE = PMP_L;
  542. const unsigned PMP_RW = PMP_L | PMP_R | PMP_W;
  543. const unsigned PMP_RX = PMP_L | PMP_R | PMP_X;
  544. const unsigned PMP_RWX = PMP_L | PMP_R | PMP_W | PMP_X;
  545. //
  546. // Configure all the invalid address regions using PMA
  547. //
  548. esp_cpu_configure_invalid_regions();
  549. //
  550. // Configure all the valid address regions using PMP
  551. //
  552. // 1. Debug region
  553. const uint32_t pmpaddr0 = PMPADDR_NAPOT(SOC_DEBUG_LOW, SOC_DEBUG_HIGH);
  554. PMP_ENTRY_SET(0, pmpaddr0, PMP_NAPOT | PMP_RWX);
  555. _Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region");
  556. // 2.1 I-ROM
  557. PMP_ENTRY_SET(1, SOC_IROM_MASK_LOW, PMP_NONE);
  558. PMP_ENTRY_SET(2, SOC_IROM_MASK_HIGH, PMP_TOR | PMP_RX);
  559. _Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid I-ROM region");
  560. // 2.2 D-ROM
  561. PMP_ENTRY_SET(3, SOC_DROM_MASK_LOW, PMP_NONE);
  562. PMP_ENTRY_SET(4, SOC_DROM_MASK_HIGH, PMP_TOR | PMP_R);
  563. _Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid D-ROM region");
  564. if (esp_cpu_dbgr_is_attached()) {
  565. // Anti-FI check that cpu is really in ocd mode
  566. ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
  567. // 5. IRAM and DRAM
  568. const uint32_t pmpaddr5 = PMPADDR_NAPOT(SOC_IRAM_LOW, SOC_IRAM_HIGH);
  569. PMP_ENTRY_SET(5, pmpaddr5, PMP_NAPOT | PMP_RWX);
  570. _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
  571. } else {
  572. #if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
  573. extern int _iram_end;
  574. // 5. IRAM and DRAM
  575. PMP_ENTRY_SET(5, SOC_IRAM_LOW, PMP_NONE);
  576. PMP_ENTRY_SET(6, (int)&_iram_end, PMP_TOR | PMP_RX);
  577. PMP_ENTRY_SET(7, SOC_DRAM_HIGH, PMP_TOR | PMP_RW);
  578. #else
  579. // 5. IRAM and DRAM
  580. const uint32_t pmpaddr5 = PMPADDR_NAPOT(SOC_IRAM_LOW, SOC_IRAM_HIGH);
  581. PMP_ENTRY_SET(5, pmpaddr5, PMP_NAPOT | CONDITIONAL_RWX);
  582. _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
  583. #endif
  584. }
  585. // 4. I_Cache (flash)
  586. const uint32_t pmpaddr8 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
  587. PMP_ENTRY_SET(8, pmpaddr8, PMP_NAPOT | PMP_RX);
  588. _Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid I_Cache region");
  589. // 5. D_Cache (flash)
  590. const uint32_t pmpaddr9 = PMPADDR_NAPOT(SOC_DROM_LOW, SOC_DROM_HIGH);
  591. PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | PMP_R);
  592. _Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid D_Cache region");
  593. // 6. LP memory
  594. #if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
  595. extern int _rtc_text_end;
  596. PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, PMP_NONE);
  597. PMP_ENTRY_SET(11, (int)&_rtc_text_end, PMP_TOR | PMP_RX);
  598. PMP_ENTRY_SET(12, SOC_RTC_IRAM_HIGH, PMP_TOR | PMP_RW);
  599. #else
  600. const uint32_t pmpaddr10 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
  601. PMP_ENTRY_SET(10, pmpaddr10, PMP_NAPOT | CONDITIONAL_RWX);
  602. _Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
  603. #endif
  604. // 7. Peripheral addresses
  605. const uint32_t pmpaddr13 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
  606. PMP_ENTRY_SET(13, pmpaddr13, PMP_NAPOT | PMP_RW);
  607. _Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
  608. }
  609. #endif
  610. /* ---------------------------------------------------- Debugging ------------------------------------------------------
  611. *
  612. * ------------------------------------------------------------------------------------------------------------------ */
  613. // --------------- Breakpoints/Watchpoints -----------------
  614. #if SOC_CPU_BREAKPOINTS_NUM > 0
  615. esp_err_t esp_cpu_set_breakpoint(int bp_num, const void *bp_addr)
  616. {
  617. /*
  618. Todo:
  619. - Check that bp_num is in range
  620. */
  621. #if __XTENSA__
  622. xt_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
  623. #else
  624. if (esp_cpu_dbgr_is_attached()) {
  625. /* If we want to set breakpoint which when hit transfers control to debugger
  626. * we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
  627. * That `action` value is supported only when `dmode` of `tdata1` is set.
  628. * But `dmode` can be modified by debugger only (from Debug Mode).
  629. *
  630. * So when debugger is connected we use special syscall to ask it to set breakpoint for us.
  631. */
  632. long args[] = {true, bp_num, (long)bp_addr};
  633. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
  634. if (ret == 0) {
  635. return ESP_ERR_INVALID_RESPONSE;
  636. }
  637. }
  638. rv_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
  639. #endif // __XTENSA__
  640. return ESP_OK;
  641. }
  642. esp_err_t esp_cpu_clear_breakpoint(int bp_num)
  643. {
  644. /*
  645. Todo:
  646. - Check if the bp_num is valid
  647. */
  648. #if __XTENSA__
  649. xt_utils_clear_breakpoint(bp_num);
  650. #else
  651. if (esp_cpu_dbgr_is_attached()) {
  652. // See description in esp_cpu_set_breakpoint()
  653. long args[] = {false, bp_num};
  654. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
  655. if (ret == 0) {
  656. return ESP_ERR_INVALID_RESPONSE;
  657. }
  658. }
  659. rv_utils_clear_breakpoint(bp_num);
  660. #endif // __XTENSA__
  661. return ESP_OK;
  662. }
  663. #endif // SOC_CPU_BREAKPOINTS_NUM > 0
  664. #if SOC_CPU_WATCHPOINTS_NUM > 0
  665. esp_err_t esp_cpu_set_watchpoint(int wp_num, const void *wp_addr, size_t size, esp_cpu_watchpoint_trigger_t trigger)
  666. {
  667. /*
  668. Todo:
  669. - Check that wp_num is in range
  670. - Check if the wp_num is already in use
  671. */
  672. // Check if size is 2^n, where n is in [0...6]
  673. if (size < 1 || size > 64 || (size & (size - 1)) != 0) {
  674. return ESP_ERR_INVALID_ARG;
  675. }
  676. bool on_read = (trigger == ESP_CPU_WATCHPOINT_LOAD || trigger == ESP_CPU_WATCHPOINT_ACCESS);
  677. bool on_write = (trigger == ESP_CPU_WATCHPOINT_STORE || trigger == ESP_CPU_WATCHPOINT_ACCESS);
  678. #if __XTENSA__
  679. xt_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
  680. #else
  681. if (esp_cpu_dbgr_is_attached()) {
  682. // See description in esp_cpu_set_breakpoint()
  683. long args[] = {true, wp_num, (long)wp_addr, (long)size,
  684. (long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))
  685. };
  686. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
  687. if (ret == 0) {
  688. return ESP_ERR_INVALID_RESPONSE;
  689. }
  690. }
  691. rv_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
  692. #endif // __XTENSA__
  693. return ESP_OK;
  694. }
  695. esp_err_t esp_cpu_clear_watchpoint(int wp_num)
  696. {
  697. /*
  698. Todo:
  699. - Check if the wp_num is valid
  700. */
  701. #if __XTENSA__
  702. xt_utils_clear_watchpoint(wp_num);
  703. #else
  704. if (esp_cpu_dbgr_is_attached()) {
  705. // See description in esp_cpu_dbgr_is_attached()
  706. long args[] = {false, wp_num};
  707. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
  708. if (ret == 0) {
  709. return ESP_ERR_INVALID_RESPONSE;
  710. }
  711. }
  712. rv_utils_clear_watchpoint(wp_num);
  713. #endif // __XTENSA__
  714. return ESP_OK;
  715. }
  716. #endif // SOC_CPU_WATCHPOINTS_NUM > 0
  717. /* ------------------------------------------------------ Misc ---------------------------------------------------------
  718. *
  719. * ------------------------------------------------------------------------------------------------------------------ */
  720. #if __XTENSA__ && XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  721. static DRAM_ATTR uint32_t external_ram_cas_lock = 0;
  722. #endif
  723. bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
  724. {
  725. #if __XTENSA__
  726. bool ret;
  727. #if XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  728. // Check if the target address is in external RAM
  729. if ((uint32_t)addr >= SOC_EXTRAM_DATA_LOW && (uint32_t)addr < SOC_EXTRAM_DATA_HIGH) {
  730. /* The target address is in external RAM, thus the native CAS instruction cannot be used. Instead, we achieve
  731. atomicity by disabling interrupts and then acquiring an external RAM CAS lock. */
  732. uint32_t intr_level;
  733. __asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
  734. : "=r"(intr_level));
  735. if (!xt_utils_compare_and_set(&external_ram_cas_lock, 0, 1)) {
  736. // External RAM CAS lock already taken. Exit
  737. ret = false;
  738. goto exit;
  739. }
  740. // Now we compare and set the target address
  741. ret = (*addr == compare_value);
  742. if (ret) {
  743. *addr = new_value;
  744. }
  745. // Release the external RAM CAS lock
  746. external_ram_cas_lock = 0;
  747. exit:
  748. // Reenable interrupts
  749. __asm__ __volatile__ ("memw \n"
  750. "wsr %0, ps\n"
  751. :: "r"(intr_level));
  752. } else
  753. #endif // XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  754. {
  755. // The target address is in internal RAM. Use the CPU's native CAS instruction
  756. ret = xt_utils_compare_and_set(addr, compare_value, new_value);
  757. }
  758. return ret;
  759. #else // __XTENSA__
  760. // Single core targets don't have atomic CAS instruction. So access method is the same for internal and external RAM
  761. return rv_utils_compare_and_set(addr, compare_value, new_value);
  762. #endif
  763. }