cpu.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445
  1. /*
  2. * SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include "sdkconfig.h"
  7. #include <stdint.h>
  8. #include <assert.h>
  9. #include "soc/soc.h"
  10. #include "soc/soc_caps.h"
  11. // TODO: IDF-5645
  12. #if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2
  13. #include "soc/lp_aon_reg.h"
  14. #include "soc/pcr_reg.h"
  15. #define SYSTEM_CPU_PER_CONF_REG PCR_CPU_WAITI_CONF_REG
  16. #define SYSTEM_CPU_WAIT_MODE_FORCE_ON PCR_CPU_WAIT_MODE_FORCE_ON
  17. #else
  18. #include "soc/rtc_cntl_reg.h"
  19. #endif
  20. #include "hal/soc_hal.h"
  21. #include "esp_bit_defs.h"
  22. #include "esp_attr.h"
  23. #include "esp_err.h"
  24. #include "esp_cpu.h"
  25. #if __XTENSA__
  26. #include "xtensa/config/core-isa.h"
  27. #else
  28. #include "soc/system_reg.h" // For SYSTEM_CPU_PER_CONF_REG
  29. #include "soc/dport_access.h" // For Dport access
  30. #include "riscv/semihosting.h"
  31. #endif
  32. #if SOC_CPU_HAS_FLEXIBLE_INTC
  33. #include "riscv/instruction_decode.h"
  34. #endif
  35. /* --------------------------------------------------- CPU Control -----------------------------------------------------
  36. *
  37. * ------------------------------------------------------------------------------------------------------------------ */
  38. void esp_cpu_stall(int core_id)
  39. {
  40. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  41. #if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
  42. /*
  43. We need to write the value "0x86" to stall a particular core. The write location is split into two separate
  44. bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has its own pair of
  45. "c0" and "c1" bit fields.
  46. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  47. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  48. file's "rodata" section (see IDF-5214).
  49. */
  50. int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
  51. int rtc_cntl_c0_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_S : RTC_CNTL_SW_STALL_APPCPU_C0_S;
  52. int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
  53. int rtc_cntl_c1_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_S : RTC_CNTL_SW_STALL_APPCPU_C1_S;
  54. CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
  55. SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s);
  56. CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
  57. SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s);
  58. #endif
  59. }
  60. void esp_cpu_unstall(int core_id)
  61. {
  62. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  63. #if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
  64. /*
  65. We need to write clear the value "0x86" to unstall a particular core. The location of this value is split into
  66. two separate bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has
  67. its own pair of "c0" and "c1" bit fields.
  68. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  69. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  70. file's "rodata" section (see IDF-5214).
  71. */
  72. int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
  73. int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
  74. CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
  75. CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
  76. #endif
  77. }
  78. void esp_cpu_reset(int core_id)
  79. {
  80. #if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2// TODO: IDF-5645
  81. SET_PERI_REG_MASK(LP_AON_CPUCORE0_CFG_REG, LP_AON_CPU_CORE0_SW_RESET);
  82. #else
  83. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  84. #if SOC_CPU_CORES_NUM > 1
  85. /*
  86. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  87. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  88. file's "rodata" section (see IDF-5214).
  89. */
  90. int rtc_cntl_rst_m = (core_id == 0) ? RTC_CNTL_SW_PROCPU_RST_M : RTC_CNTL_SW_APPCPU_RST_M;
  91. #else // SOC_CPU_CORES_NUM > 1
  92. int rtc_cntl_rst_m = RTC_CNTL_SW_PROCPU_RST_M;
  93. #endif // SOC_CPU_CORES_NUM > 1
  94. SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_rst_m);
  95. #endif
  96. }
  97. void esp_cpu_wait_for_intr(void)
  98. {
  99. #if __XTENSA__
  100. xt_utils_wait_for_intr();
  101. #else
  102. // TODO: IDF-5645 (better to implement with ll) C6 register names converted in the #include section at the top
  103. if (esp_cpu_dbgr_is_attached() && DPORT_REG_GET_BIT(SYSTEM_CPU_PER_CONF_REG, SYSTEM_CPU_WAIT_MODE_FORCE_ON) == 0) {
  104. /* when SYSTEM_CPU_WAIT_MODE_FORCE_ON is disabled in WFI mode SBA access to memory does not work for debugger,
  105. so do not enter that mode when debugger is connected */
  106. return;
  107. }
  108. rv_utils_wait_for_intr();
  109. #endif // __XTENSA__
  110. }
  111. /* -------------------------------------------------- CPU Registers ----------------------------------------------------
  112. *
  113. * ------------------------------------------------------------------------------------------------------------------ */
  114. /* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
  115. *
  116. * ------------------------------------------------------------------------------------------------------------------ */
  117. // ---------------- Interrupt Descriptors ------------------
  118. #if SOC_CPU_HAS_FLEXIBLE_INTC
  119. static bool is_intr_num_resv(int intr_num)
  120. {
  121. // Workaround to reserve interrupt number 1 for Wi-Fi, 5,8 for Bluetooth, 6 for "permanently disabled interrupt"
  122. // [TODO: IDF-2465]
  123. uint32_t reserved = BIT(1) | BIT(5) | BIT(6) | BIT(8);
  124. // int_num 0,3,4,7 are inavaliable for PULP cpu
  125. #if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2// TODO: IDF-5728 replace with a better macro name
  126. reserved |= BIT(0) | BIT(3) | BIT(4) | BIT(7);
  127. #endif
  128. if (reserved & BIT(intr_num)) {
  129. return true;
  130. }
  131. extern int _vector_table;
  132. extern int _interrupt_handler;
  133. const intptr_t pc = (intptr_t)(&_vector_table + intr_num);
  134. /* JAL instructions are relative to the PC there are executed from. */
  135. const intptr_t destination = pc + riscv_decode_offset_from_jal_instruction(pc);
  136. return destination != (intptr_t)&_interrupt_handler;
  137. }
  138. void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
  139. {
  140. intr_desc_ret->priority = 1; //Todo: We should make this -1
  141. intr_desc_ret->type = ESP_CPU_INTR_TYPE_NA;
  142. #if __riscv
  143. intr_desc_ret->flags = is_intr_num_resv(intr_num) ? ESP_CPU_INTR_DESC_FLAG_RESVD : 0;
  144. #else
  145. intr_desc_ret->flags = 0;
  146. #endif
  147. }
  148. #else // SOC_CPU_HAS_FLEXIBLE_INTC
  149. typedef struct {
  150. int priority;
  151. esp_cpu_intr_type_t type;
  152. uint32_t flags[SOC_CPU_CORES_NUM];
  153. } intr_desc_t;
  154. #if SOC_CPU_CORES_NUM > 1
  155. // Note: We currently only have dual core targets, so the table initializer is hard coded
  156. const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
  157. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
  158. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
  159. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //2
  160. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //3
  161. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //4
  162. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
  163. #if CONFIG_FREERTOS_CORETIMER_0
  164. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
  165. #else
  166. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
  167. #endif
  168. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
  169. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
  170. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //9
  171. { 1, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //10
  172. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
  173. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //12
  174. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //13
  175. { 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
  176. #if CONFIG_FREERTOS_CORETIMER_1
  177. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
  178. #else
  179. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
  180. #endif
  181. { 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
  182. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //17
  183. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //18
  184. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //19
  185. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //20
  186. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //21
  187. { 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //22
  188. { 3, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //23
  189. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //24
  190. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
  191. { 5, ESP_CPU_INTR_TYPE_LEVEL, { 0, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //26
  192. { 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
  193. { 4, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //28
  194. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
  195. { 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
  196. { 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
  197. };
  198. #else // SOC_CPU_CORES_NUM > 1
  199. const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
  200. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
  201. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
  202. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //2
  203. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //3
  204. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //4
  205. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
  206. #if CONFIG_FREERTOS_CORETIMER_0
  207. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
  208. #else
  209. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
  210. #endif
  211. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
  212. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
  213. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //9
  214. { 1, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //10
  215. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
  216. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //12
  217. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //13
  218. { 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
  219. #if CONFIG_FREERTOS_CORETIMER_1
  220. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
  221. #else
  222. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
  223. #endif
  224. { 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
  225. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //17
  226. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //18
  227. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //19
  228. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //20
  229. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //21
  230. { 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //22
  231. { 3, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //23
  232. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //24
  233. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
  234. { 5, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //26
  235. { 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
  236. { 4, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //28
  237. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
  238. { 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
  239. { 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
  240. };
  241. #endif // SOC_CPU_CORES_NUM > 1
  242. void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
  243. {
  244. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  245. #if SOC_CPU_CORES_NUM == 1
  246. core_id = 0; //If this is a single core target, hard code CPU ID to 0
  247. #endif
  248. intr_desc_ret->priority = intr_desc_table[intr_num].priority;
  249. intr_desc_ret->type = intr_desc_table[intr_num].type;
  250. intr_desc_ret->flags = intr_desc_table[intr_num].flags[core_id];
  251. }
  252. #endif // SOC_CPU_HAS_FLEXIBLE_INTC
  253. /* ---------------------------------------------------- Debugging ------------------------------------------------------
  254. *
  255. * ------------------------------------------------------------------------------------------------------------------ */
  256. // --------------- Breakpoints/Watchpoints -----------------
  257. #if SOC_CPU_BREAKPOINTS_NUM > 0
  258. esp_err_t esp_cpu_set_breakpoint(int bp_num, const void *bp_addr)
  259. {
  260. /*
  261. Todo:
  262. - Check that bp_num is in range
  263. */
  264. #if __XTENSA__
  265. xt_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
  266. #else
  267. if (esp_cpu_dbgr_is_attached()) {
  268. /* If we want to set breakpoint which when hit transfers control to debugger
  269. * we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
  270. * That `action` value is supported only when `dmode` of `tdata1` is set.
  271. * But `dmode` can be modified by debugger only (from Debug Mode).
  272. *
  273. * So when debugger is connected we use special syscall to ask it to set breakpoint for us.
  274. */
  275. long args[] = {true, bp_num, (long)bp_addr};
  276. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
  277. if (ret == 0) {
  278. return ESP_ERR_INVALID_RESPONSE;
  279. }
  280. }
  281. rv_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
  282. #endif // __XTENSA__
  283. return ESP_OK;
  284. }
  285. esp_err_t esp_cpu_clear_breakpoint(int bp_num)
  286. {
  287. /*
  288. Todo:
  289. - Check if the bp_num is valid
  290. */
  291. #if __XTENSA__
  292. xt_utils_clear_breakpoint(bp_num);
  293. #else
  294. if (esp_cpu_dbgr_is_attached()) {
  295. // See description in esp_cpu_set_breakpoint()
  296. long args[] = {false, bp_num};
  297. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
  298. if (ret == 0) {
  299. return ESP_ERR_INVALID_RESPONSE;
  300. }
  301. }
  302. rv_utils_clear_breakpoint(bp_num);
  303. #endif // __XTENSA__
  304. return ESP_OK;
  305. }
  306. #endif // SOC_CPU_BREAKPOINTS_NUM > 0
  307. #if SOC_CPU_WATCHPOINTS_NUM > 0
  308. esp_err_t esp_cpu_set_watchpoint(int wp_num, const void *wp_addr, size_t size, esp_cpu_watchpoint_trigger_t trigger)
  309. {
  310. /*
  311. Todo:
  312. - Check that wp_num is in range
  313. - Check if the wp_num is already in use
  314. */
  315. // Check if size is 2^n, where n is in [0...6]
  316. if (size < 1 || size > 64 || (size & (size - 1)) != 0) {
  317. return ESP_ERR_INVALID_ARG;
  318. }
  319. bool on_read = (trigger == ESP_CPU_WATCHPOINT_LOAD || trigger == ESP_CPU_WATCHPOINT_ACCESS);
  320. bool on_write = (trigger == ESP_CPU_WATCHPOINT_STORE || trigger == ESP_CPU_WATCHPOINT_ACCESS);
  321. #if __XTENSA__
  322. xt_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
  323. #else
  324. if (esp_cpu_dbgr_is_attached()) {
  325. // See description in esp_cpu_set_breakpoint()
  326. long args[] = {true, wp_num, (long)wp_addr, (long)size,
  327. (long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))
  328. };
  329. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
  330. if (ret == 0) {
  331. return ESP_ERR_INVALID_RESPONSE;
  332. }
  333. }
  334. rv_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
  335. #endif // __XTENSA__
  336. return ESP_OK;
  337. }
  338. esp_err_t esp_cpu_clear_watchpoint(int wp_num)
  339. {
  340. /*
  341. Todo:
  342. - Check if the wp_num is valid
  343. */
  344. #if __XTENSA__
  345. xt_utils_clear_watchpoint(wp_num);
  346. #else
  347. if (esp_cpu_dbgr_is_attached()) {
  348. // See description in esp_cpu_dbgr_is_attached()
  349. long args[] = {false, wp_num};
  350. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
  351. if (ret == 0) {
  352. return ESP_ERR_INVALID_RESPONSE;
  353. }
  354. }
  355. rv_utils_clear_watchpoint(wp_num);
  356. #endif // __XTENSA__
  357. return ESP_OK;
  358. }
  359. #endif // SOC_CPU_WATCHPOINTS_NUM > 0
  360. /* ------------------------------------------------------ Misc ---------------------------------------------------------
  361. *
  362. * ------------------------------------------------------------------------------------------------------------------ */
  363. #if __XTENSA__ && XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  364. static DRAM_ATTR uint32_t external_ram_cas_lock = 0;
  365. #endif
  366. bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
  367. {
  368. #if __XTENSA__
  369. bool ret;
  370. #if XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  371. // Check if the target address is in external RAM
  372. if ((uint32_t)addr >= SOC_EXTRAM_DATA_LOW && (uint32_t)addr < SOC_EXTRAM_DATA_HIGH) {
  373. /* The target address is in external RAM, thus the native CAS instruction cannot be used. Instead, we achieve
  374. atomicity by disabling interrupts and then acquiring an external RAM CAS lock. */
  375. uint32_t intr_level;
  376. __asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
  377. : "=r"(intr_level));
  378. if (!xt_utils_compare_and_set(&external_ram_cas_lock, 0, 1)) {
  379. // External RAM CAS lock already taken. Exit
  380. ret = false;
  381. goto exit;
  382. }
  383. // Now we compare and set the target address
  384. ret = (*addr == compare_value);
  385. if (ret) {
  386. *addr = new_value;
  387. }
  388. // Release the external RAM CAS lock
  389. external_ram_cas_lock = 0;
  390. exit:
  391. // Reenable interrupts
  392. __asm__ __volatile__ ("memw \n"
  393. "wsr %0, ps\n"
  394. :: "r"(intr_level));
  395. } else
  396. #endif // XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  397. {
  398. // The target address is in internal RAM. Use the CPU's native CAS instruction
  399. ret = xt_utils_compare_and_set(addr, compare_value, new_value);
  400. }
  401. return ret;
  402. #else // __XTENSA__
  403. // Single core targets don't have atomic CAS instruction. So access method is the same for internal and external RAM
  404. return rv_utils_compare_and_set(addr, compare_value, new_value);
  405. #endif
  406. }