cpu.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496
  1. /*
  2. * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include "sdkconfig.h"
  7. #include <stdint.h>
  8. #include <assert.h>
  9. #include "soc/soc.h"
  10. #include "soc/soc_caps.h"
  11. // TODO: IDF-5645
  12. #if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2
  13. #include "soc/lp_aon_reg.h"
  14. #include "soc/pcr_reg.h"
  15. #define SYSTEM_CPU_PER_CONF_REG PCR_CPU_WAITI_CONF_REG
  16. #define SYSTEM_CPU_WAIT_MODE_FORCE_ON PCR_CPU_WAIT_MODE_FORCE_ON
  17. #elif CONFIG_IDF_TARGET_ESP32P4
  18. #include "soc/lp_clkrst_reg.h"
  19. #include "soc/pmu_reg.h"
  20. #else
  21. #include "soc/rtc_cntl_reg.h"
  22. #endif
  23. #include "hal/soc_hal.h"
  24. #include "esp_bit_defs.h"
  25. #include "esp_attr.h"
  26. #include "esp_err.h"
  27. #include "esp_cpu.h"
  28. #if __XTENSA__
  29. #include "xtensa/config/core-isa.h"
  30. #else
  31. #include "soc/system_reg.h" // For SYSTEM_CPU_PER_CONF_REG
  32. #include "soc/dport_access.h" // For Dport access
  33. #include "riscv/semihosting.h"
  34. #endif
  35. #if SOC_CPU_HAS_FLEXIBLE_INTC
  36. #include "riscv/instruction_decode.h"
  37. #endif
  38. /* --------------------------------------------------- CPU Control -----------------------------------------------------
  39. *
  40. * ------------------------------------------------------------------------------------------------------------------ */
  41. void esp_cpu_stall(int core_id)
  42. {
  43. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  44. #if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
  45. #if CONFIG_IDF_TARGET_ESP32P4
  46. //TODO: IDF-7848
  47. REG_SET_FIELD(PMU_CPU_SW_STALL_REG, core_id ? PMU_HPCORE1_SW_STALL_CODE : PMU_HPCORE0_SW_STALL_CODE, 0x86);
  48. #else
  49. /*
  50. We need to write the value "0x86" to stall a particular core. The write location is split into two separate
  51. bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has its own pair of
  52. "c0" and "c1" bit fields.
  53. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  54. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  55. file's "rodata" section (see IDF-5214).
  56. */
  57. int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
  58. int rtc_cntl_c0_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_S : RTC_CNTL_SW_STALL_APPCPU_C0_S;
  59. int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
  60. int rtc_cntl_c1_s = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_S : RTC_CNTL_SW_STALL_APPCPU_C1_S;
  61. CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
  62. SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, 2 << rtc_cntl_c0_s);
  63. CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
  64. SET_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, 0x21 << rtc_cntl_c1_s);
  65. #endif // CONFIG_IDF_TARGET_ESP32P4
  66. #endif // SOC_CPU_CORES_NUM > 1
  67. }
  68. void esp_cpu_unstall(int core_id)
  69. {
  70. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  71. #if SOC_CPU_CORES_NUM > 1 // We don't allow stalling of the current core
  72. #if CONFIG_IDF_TARGET_ESP32P4
  73. //TODO: IDF-7848
  74. REG_SET_FIELD(PMU_CPU_SW_STALL_REG, core_id ? PMU_HPCORE1_SW_STALL_CODE : PMU_HPCORE0_SW_STALL_CODE, 0);
  75. #else
  76. /*
  77. We need to write clear the value "0x86" to unstall a particular core. The location of this value is split into
  78. two separate bit fields named "c0" and "c1", and the two fields are located in different registers. Each core has
  79. its own pair of "c0" and "c1" bit fields.
  80. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  81. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  82. file's "rodata" section (see IDF-5214).
  83. */
  84. int rtc_cntl_c0_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C0_M : RTC_CNTL_SW_STALL_APPCPU_C0_M;
  85. int rtc_cntl_c1_m = (core_id == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
  86. CLEAR_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_c0_m);
  87. CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1_m);
  88. #endif // CONFIG_IDF_TARGET_ESP32P4
  89. #endif // SOC_CPU_CORES_NUM > 1
  90. }
  91. void esp_cpu_reset(int core_id)
  92. {
  93. #if CONFIG_IDF_TARGET_ESP32P4
  94. //TODO: IDF-7848
  95. if (core_id == 0)
  96. REG_SET_BIT(LP_CLKRST_HPCPU_RESET_CTRL0_REG, LP_CLKRST_HPCORE0_SW_RESET);
  97. else
  98. REG_SET_BIT(LP_CLKRST_HPCPU_RESET_CTRL0_REG, LP_CLKRST_HPCORE1_SW_RESET);
  99. #else
  100. #if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2// TODO: IDF-5645
  101. SET_PERI_REG_MASK(LP_AON_CPUCORE0_CFG_REG, LP_AON_CPU_CORE0_SW_RESET);
  102. #else
  103. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  104. #if SOC_CPU_CORES_NUM > 1
  105. /*
  106. Note: This function can be called when the cache is disabled. We use "ternary if" instead of an array so that the
  107. "rodata" of the register masks/shifts will be stored in this function's "rodata" section, instead of the source
  108. file's "rodata" section (see IDF-5214).
  109. */
  110. int rtc_cntl_rst_m = (core_id == 0) ? RTC_CNTL_SW_PROCPU_RST_M : RTC_CNTL_SW_APPCPU_RST_M;
  111. #else // SOC_CPU_CORES_NUM > 1
  112. int rtc_cntl_rst_m = RTC_CNTL_SW_PROCPU_RST_M;
  113. #endif // SOC_CPU_CORES_NUM > 1
  114. SET_PERI_REG_MASK(RTC_CNTL_OPTIONS0_REG, rtc_cntl_rst_m);
  115. #endif
  116. #endif // CONFIG_IDF_TARGET_ESP32P4
  117. }
  118. void esp_cpu_wait_for_intr(void)
  119. {
  120. #if __XTENSA__
  121. xt_utils_wait_for_intr();
  122. #else
  123. //TODO: IDF-7848
  124. #if !CONFIG_IDF_TARGET_ESP32P4
  125. // TODO: IDF-5645 (better to implement with ll) C6 register names converted in the #include section at the top
  126. if (esp_cpu_dbgr_is_attached() && DPORT_REG_GET_BIT(SYSTEM_CPU_PER_CONF_REG, SYSTEM_CPU_WAIT_MODE_FORCE_ON) == 0) {
  127. /* when SYSTEM_CPU_WAIT_MODE_FORCE_ON is disabled in WFI mode SBA access to memory does not work for debugger,
  128. so do not enter that mode when debugger is connected */
  129. return;
  130. }
  131. #endif
  132. rv_utils_wait_for_intr();
  133. #endif // __XTENSA__
  134. }
  135. /* -------------------------------------------------- CPU Registers ----------------------------------------------------
  136. *
  137. * ------------------------------------------------------------------------------------------------------------------ */
  138. /* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
  139. *
  140. * ------------------------------------------------------------------------------------------------------------------ */
  141. // ---------------- Interrupt Descriptors ------------------
  142. #if SOC_CPU_HAS_FLEXIBLE_INTC
  143. static bool is_intr_num_resv(int intr_num)
  144. {
  145. // Workaround to reserve interrupt number 1 for Wi-Fi, 5,8 for Bluetooth, 6 for "permanently disabled interrupt"
  146. // [TODO: IDF-2465]
  147. uint32_t reserved = BIT(1) | BIT(5) | BIT(6) | BIT(8);
  148. // int_num 0,3,4,7 are inavaliable for PULP cpu
  149. #if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2// TODO: IDF-5728 replace with a better macro name
  150. reserved |= BIT(0) | BIT(3) | BIT(4) | BIT(7);
  151. #endif
  152. #if SOC_INT_CLIC_SUPPORTED
  153. //TODO: IDF-7795
  154. return false;
  155. #endif
  156. if (reserved & BIT(intr_num)) {
  157. return true;
  158. }
  159. extern int _vector_table;
  160. extern int _interrupt_handler;
  161. const intptr_t pc = (intptr_t)(&_vector_table + intr_num);
  162. /* JAL instructions are relative to the PC there are executed from. */
  163. const intptr_t destination = pc + riscv_decode_offset_from_jal_instruction(pc);
  164. return destination != (intptr_t)&_interrupt_handler;
  165. }
  166. void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
  167. {
  168. intr_desc_ret->priority = 1; //Todo: We should make this -1
  169. intr_desc_ret->type = ESP_CPU_INTR_TYPE_NA;
  170. #if __riscv
  171. intr_desc_ret->flags = is_intr_num_resv(intr_num) ? ESP_CPU_INTR_DESC_FLAG_RESVD : 0;
  172. #else
  173. intr_desc_ret->flags = 0;
  174. #endif
  175. }
  176. #else // SOC_CPU_HAS_FLEXIBLE_INTC
  177. typedef struct {
  178. int priority;
  179. esp_cpu_intr_type_t type;
  180. uint32_t flags[SOC_CPU_CORES_NUM];
  181. } intr_desc_t;
  182. #if SOC_CPU_CORES_NUM > 1
  183. // Note: We currently only have dual core targets, so the table initializer is hard coded
  184. const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
  185. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
  186. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
  187. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //2
  188. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //3
  189. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //4
  190. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
  191. #if CONFIG_FREERTOS_CORETIMER_0
  192. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
  193. #else
  194. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
  195. #endif
  196. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
  197. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
  198. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //9
  199. { 1, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //10
  200. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
  201. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //12
  202. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0} }, //13
  203. { 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
  204. #if CONFIG_FREERTOS_CORETIMER_1
  205. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
  206. #else
  207. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
  208. #endif
  209. { 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
  210. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //17
  211. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //18
  212. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //19
  213. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //20
  214. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //21
  215. { 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //22
  216. { 3, ESP_CPU_INTR_TYPE_LEVEL, { 0, 0 } }, //23
  217. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, 0 } }, //24
  218. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
  219. { 5, ESP_CPU_INTR_TYPE_LEVEL, { 0, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //26
  220. { 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
  221. { 4, ESP_CPU_INTR_TYPE_EDGE, { 0, 0 } }, //28
  222. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL, ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
  223. { 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
  224. { 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD, ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
  225. };
  226. #else // SOC_CPU_CORES_NUM > 1
  227. const static intr_desc_t intr_desc_table [SOC_CPU_INTR_NUM] = {
  228. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //0
  229. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //1
  230. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //2
  231. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //3
  232. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //4
  233. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //5
  234. #if CONFIG_FREERTOS_CORETIMER_0
  235. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //6
  236. #else
  237. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //6
  238. #endif
  239. { 1, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //7
  240. { 1, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //8
  241. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //9
  242. { 1, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //10
  243. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //11
  244. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //12
  245. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //13
  246. { 7, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //14, NMI
  247. #if CONFIG_FREERTOS_CORETIMER_1
  248. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //15
  249. #else
  250. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //15
  251. #endif
  252. { 5, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //16
  253. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //17
  254. { 1, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //18
  255. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //19
  256. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //20
  257. { 2, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //21
  258. { 3, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //22
  259. { 3, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //23
  260. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //24
  261. { 4, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //25
  262. { 5, ESP_CPU_INTR_TYPE_LEVEL, { 0 } }, //26
  263. { 3, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //27
  264. { 4, ESP_CPU_INTR_TYPE_EDGE, { 0 } }, //28
  265. { 3, ESP_CPU_INTR_TYPE_NA, { ESP_CPU_INTR_DESC_FLAG_SPECIAL } }, //29
  266. { 4, ESP_CPU_INTR_TYPE_EDGE, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //30
  267. { 5, ESP_CPU_INTR_TYPE_LEVEL, { ESP_CPU_INTR_DESC_FLAG_RESVD } }, //31
  268. };
  269. #endif // SOC_CPU_CORES_NUM > 1
  270. void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
  271. {
  272. assert(core_id >= 0 && core_id < SOC_CPU_CORES_NUM);
  273. #if SOC_CPU_CORES_NUM == 1
  274. core_id = 0; //If this is a single core target, hard code CPU ID to 0
  275. #endif
  276. intr_desc_ret->priority = intr_desc_table[intr_num].priority;
  277. intr_desc_ret->type = intr_desc_table[intr_num].type;
  278. intr_desc_ret->flags = intr_desc_table[intr_num].flags[core_id];
  279. }
  280. #endif // SOC_CPU_HAS_FLEXIBLE_INTC
  281. /* ---------------------------------------------------- Debugging ------------------------------------------------------
  282. *
  283. * ------------------------------------------------------------------------------------------------------------------ */
  284. // --------------- Breakpoints/Watchpoints -----------------
  285. #if SOC_CPU_BREAKPOINTS_NUM > 0
  286. esp_err_t esp_cpu_set_breakpoint(int bp_num, const void *bp_addr)
  287. {
  288. /*
  289. Todo:
  290. - Check that bp_num is in range
  291. */
  292. #if __XTENSA__
  293. xt_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
  294. #else
  295. if (esp_cpu_dbgr_is_attached()) {
  296. /* If we want to set breakpoint which when hit transfers control to debugger
  297. * we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
  298. * That `action` value is supported only when `dmode` of `tdata1` is set.
  299. * But `dmode` can be modified by debugger only (from Debug Mode).
  300. *
  301. * So when debugger is connected we use special syscall to ask it to set breakpoint for us.
  302. */
  303. long args[] = {true, bp_num, (long)bp_addr};
  304. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
  305. if (ret == 0) {
  306. return ESP_ERR_INVALID_RESPONSE;
  307. }
  308. }
  309. rv_utils_set_breakpoint(bp_num, (uint32_t)bp_addr);
  310. #endif // __XTENSA__
  311. return ESP_OK;
  312. }
  313. esp_err_t esp_cpu_clear_breakpoint(int bp_num)
  314. {
  315. /*
  316. Todo:
  317. - Check if the bp_num is valid
  318. */
  319. #if __XTENSA__
  320. xt_utils_clear_breakpoint(bp_num);
  321. #else
  322. if (esp_cpu_dbgr_is_attached()) {
  323. // See description in esp_cpu_set_breakpoint()
  324. long args[] = {false, bp_num};
  325. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
  326. if (ret == 0) {
  327. return ESP_ERR_INVALID_RESPONSE;
  328. }
  329. }
  330. rv_utils_clear_breakpoint(bp_num);
  331. #endif // __XTENSA__
  332. return ESP_OK;
  333. }
  334. #endif // SOC_CPU_BREAKPOINTS_NUM > 0
  335. #if SOC_CPU_WATCHPOINTS_NUM > 0
  336. esp_err_t esp_cpu_set_watchpoint(int wp_num, const void *wp_addr, size_t size, esp_cpu_watchpoint_trigger_t trigger)
  337. {
  338. /*
  339. Todo:
  340. - Check that wp_num is in range
  341. - Check if the wp_num is already in use
  342. */
  343. // Check if size is 2^n, where n is in [0...6]
  344. if (size < 1 || size > 64 || (size & (size - 1)) != 0) {
  345. return ESP_ERR_INVALID_ARG;
  346. }
  347. bool on_read = (trigger == ESP_CPU_WATCHPOINT_LOAD || trigger == ESP_CPU_WATCHPOINT_ACCESS);
  348. bool on_write = (trigger == ESP_CPU_WATCHPOINT_STORE || trigger == ESP_CPU_WATCHPOINT_ACCESS);
  349. #if __XTENSA__
  350. xt_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
  351. #else
  352. if (esp_cpu_dbgr_is_attached()) {
  353. // See description in esp_cpu_set_breakpoint()
  354. long args[] = {true, wp_num, (long)wp_addr, (long)size,
  355. (long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))
  356. };
  357. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
  358. if (ret == 0) {
  359. return ESP_ERR_INVALID_RESPONSE;
  360. }
  361. }
  362. rv_utils_set_watchpoint(wp_num, (uint32_t)wp_addr, size, on_read, on_write);
  363. #endif // __XTENSA__
  364. return ESP_OK;
  365. }
  366. esp_err_t esp_cpu_clear_watchpoint(int wp_num)
  367. {
  368. /*
  369. Todo:
  370. - Check if the wp_num is valid
  371. */
  372. #if __XTENSA__
  373. xt_utils_clear_watchpoint(wp_num);
  374. #else
  375. if (esp_cpu_dbgr_is_attached()) {
  376. // See description in esp_cpu_dbgr_is_attached()
  377. long args[] = {false, wp_num};
  378. int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
  379. if (ret == 0) {
  380. return ESP_ERR_INVALID_RESPONSE;
  381. }
  382. }
  383. rv_utils_clear_watchpoint(wp_num);
  384. #endif // __XTENSA__
  385. return ESP_OK;
  386. }
  387. #endif // SOC_CPU_WATCHPOINTS_NUM > 0
  388. /* ------------------------------------------------------ Misc ---------------------------------------------------------
  389. *
  390. * ------------------------------------------------------------------------------------------------------------------ */
  391. #if __XTENSA__ && XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  392. static DRAM_ATTR uint32_t external_ram_cas_lock = 0;
  393. #endif
  394. bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
  395. {
  396. #if __XTENSA__
  397. bool ret;
  398. #if XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  399. // Check if the target address is in external RAM
  400. if ((uint32_t)addr >= SOC_EXTRAM_DATA_LOW && (uint32_t)addr < SOC_EXTRAM_DATA_HIGH) {
  401. /* The target address is in external RAM, thus the native CAS instruction cannot be used. Instead, we achieve
  402. atomicity by disabling interrupts and then acquiring an external RAM CAS lock. */
  403. uint32_t intr_level;
  404. __asm__ __volatile__ ("rsil %0, " XTSTR(XCHAL_EXCM_LEVEL) "\n"
  405. : "=r"(intr_level));
  406. if (!xt_utils_compare_and_set(&external_ram_cas_lock, 0, 1)) {
  407. // External RAM CAS lock already taken. Exit
  408. ret = false;
  409. goto exit;
  410. }
  411. // Now we compare and set the target address
  412. ret = (*addr == compare_value);
  413. if (ret) {
  414. *addr = new_value;
  415. }
  416. // Release the external RAM CAS lock
  417. external_ram_cas_lock = 0;
  418. exit:
  419. // Reenable interrupts
  420. __asm__ __volatile__ ("memw \n"
  421. "wsr %0, ps\n"
  422. :: "r"(intr_level));
  423. } else
  424. #endif // XCHAL_HAVE_S32C1I && CONFIG_SPIRAM
  425. {
  426. // The target address is in internal RAM. Use the CPU's native CAS instruction
  427. ret = xt_utils_compare_and_set(addr, compare_value, new_value);
  428. }
  429. return ret;
  430. //TODO: IDF-7771
  431. #else // __riscv
  432. #if SOC_CPU_CORES_NUM > 1
  433. /* We use lr.w and sc.w pair for riscv TAS. lr.w will read the memory and register a cpu lock signal
  434. * The state of the lock signal is internal to core, and it is not possible for another core to
  435. * interface. sc.w will assert the address is registered. Then write memory and release the lock
  436. * signal. During the lr.w and sc.w time, if other core acquires the same address, will wait
  437. */
  438. volatile uint32_t old_value = 0xB33FFFFF;
  439. volatile int error = 1;
  440. __asm__ __volatile__(
  441. "0: lr.w %0, 0(%2) \n"
  442. " bne %0, %3, 1f \n"
  443. " sc.w %1, %4, 0(%2) \n"
  444. " bnez %1, 0b \n"
  445. "1: \n"
  446. : "+r" (old_value), "+r" (error)
  447. : "r" (addr), "r" (compare_value), "r" (new_value)
  448. );
  449. return (old_value == compare_value);
  450. #else
  451. // Single core targets don't have atomic CAS instruction. So access method is the same for internal and external RAM
  452. return rv_utils_compare_and_set(addr, compare_value, new_value);
  453. #endif
  454. #endif
  455. }