esp_cpu.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585
  1. /*
  2. * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #pragma once
  7. #include "sdkconfig.h"
  8. #include <stdbool.h>
  9. #include <stdint.h>
  10. #include <assert.h>
  11. #include "soc/soc_caps.h"
  12. #ifdef __XTENSA__
  13. #include "xtensa/xtensa_api.h"
  14. #include "xt_utils.h"
  15. #elif __riscv
  16. #include "riscv/rv_utils.h"
  17. #endif
  18. #include "esp_intr_alloc.h"
  19. #include "esp_err.h"
  20. #include "esp_attr.h"
  21. #ifdef __cplusplus
  22. extern "C" {
  23. #endif
  24. /**
  25. * @brief CPU cycle count type
  26. *
  27. * This data type represents the CPU's clock cycle count
  28. */
  29. typedef uint32_t esp_cpu_cycle_count_t;
  30. /**
  31. * @brief CPU interrupt type
  32. */
  33. typedef enum {
  34. ESP_CPU_INTR_TYPE_LEVEL = 0,
  35. ESP_CPU_INTR_TYPE_EDGE,
  36. ESP_CPU_INTR_TYPE_NA,
  37. } esp_cpu_intr_type_t;
  38. /**
  39. * @brief CPU interrupt descriptor
  40. *
  41. * Each particular CPU interrupt has an associated descriptor describing that
  42. * particular interrupt's characteristics. Call esp_cpu_intr_get_desc() to get
  43. * the descriptors of a particular interrupt.
  44. */
  45. typedef struct {
  46. int priority; /**< Priority of the interrupt if it has a fixed priority, (-1) if the priority is configurable. */
  47. esp_cpu_intr_type_t type; /**< Whether the interrupt is an edge or level type interrupt, ESP_CPU_INTR_TYPE_NA if the type is configurable. */
  48. uint32_t flags; /**< Flags indicating extra details. */
  49. } esp_cpu_intr_desc_t;
  50. /**
  51. * @brief Interrupt descriptor flags of esp_cpu_intr_desc_t
  52. */
  53. #define ESP_CPU_INTR_DESC_FLAG_SPECIAL 0x01 /**< The interrupt is a special interrupt (e.g., a CPU timer interrupt) */
  54. #define ESP_CPU_INTR_DESC_FLAG_RESVD 0x02 /**< The interrupt is reserved for internal use */
  55. /**
  56. * @brief CPU interrupt handler type
  57. */
  58. typedef void (*esp_cpu_intr_handler_t)(void *arg);
  59. /**
  60. * @brief CPU watchpoint trigger type
  61. */
  62. typedef enum {
  63. ESP_CPU_WATCHPOINT_LOAD,
  64. ESP_CPU_WATCHPOINT_STORE,
  65. ESP_CPU_WATCHPOINT_ACCESS,
  66. } esp_cpu_watchpoint_trigger_t;
  67. /* --------------------------------------------------- CPU Control -----------------------------------------------------
  68. *
  69. * ------------------------------------------------------------------------------------------------------------------ */
  70. /**
  71. * @brief Stall a CPU core
  72. *
  73. * @param core_id The core's ID
  74. */
  75. void esp_cpu_stall(int core_id);
  76. /**
  77. * @brief Resume a previously stalled CPU core
  78. *
  79. * @param core_id The core's ID
  80. */
  81. void esp_cpu_unstall(int core_id);
  82. /**
  83. * @brief Reset a CPU core
  84. *
  85. * @param core_id The core's ID
  86. */
  87. void esp_cpu_reset(int core_id);
  88. /**
  89. * @brief Wait for Interrupt
  90. *
  91. * This function causes the current CPU core to execute its Wait For Interrupt
  92. * (WFI or equivalent) instruction. After executing this function, the CPU core
  93. * will stop execution until an interrupt occurs.
  94. */
  95. void esp_cpu_wait_for_intr(void);
  96. /* -------------------------------------------------- CPU Registers ----------------------------------------------------
  97. *
  98. * ------------------------------------------------------------------------------------------------------------------ */
  99. /**
  100. * @brief Get the current core's ID
  101. *
  102. * This function will return the ID of the current CPU (i.e., the CPU that calls
  103. * this function).
  104. *
  105. * @return The current core's ID [0..SOC_CPU_CORES_NUM - 1]
  106. */
  107. FORCE_INLINE_ATTR __attribute__((pure)) int esp_cpu_get_core_id(void)
  108. {
  109. //Note: Made "pure" to optimize for single core target
  110. #ifdef __XTENSA__
  111. return (int)xt_utils_get_core_id();
  112. #else
  113. return (int)rv_utils_get_core_id();
  114. #endif
  115. }
  116. /**
  117. * @brief Read the current stack pointer address
  118. *
  119. * @return Stack pointer address
  120. */
  121. FORCE_INLINE_ATTR void *esp_cpu_get_sp(void)
  122. {
  123. #ifdef __XTENSA__
  124. return xt_utils_get_sp();
  125. #else
  126. return rv_utils_get_sp();
  127. #endif
  128. }
  129. /**
  130. * @brief Get the current CPU core's cycle count
  131. *
  132. * Each CPU core maintains an internal counter (i.e., cycle count) that increments
  133. * every CPU clock cycle.
  134. *
  135. * @return Current CPU's cycle count, 0 if not supported.
  136. */
  137. FORCE_INLINE_ATTR esp_cpu_cycle_count_t esp_cpu_get_cycle_count(void)
  138. {
  139. #ifdef __XTENSA__
  140. return (esp_cpu_cycle_count_t)xt_utils_get_cycle_count();
  141. #else
  142. return (esp_cpu_cycle_count_t)rv_utils_get_cycle_count();
  143. #endif
  144. }
  145. /**
  146. * @brief Set the current CPU core's cycle count
  147. *
  148. * Set the given value into the internal counter that increments every
  149. * CPU clock cycle.
  150. *
  151. * @param cycle_count CPU cycle count
  152. */
  153. FORCE_INLINE_ATTR void esp_cpu_set_cycle_count(esp_cpu_cycle_count_t cycle_count)
  154. {
  155. #ifdef __XTENSA__
  156. xt_utils_set_cycle_count((uint32_t)cycle_count);
  157. #else
  158. rv_utils_set_cycle_count((uint32_t)cycle_count);
  159. #endif
  160. }
  161. /**
  162. * @brief Convert a program counter (PC) value to address
  163. *
  164. * If the architecture does not store the true virtual address in the CPU's PC
  165. * or return addresses, this function will convert the PC value to a virtual
  166. * address. Otherwise, the PC is just returned
  167. *
  168. * @param pc PC value
  169. * @return Virtual address
  170. */
  171. FORCE_INLINE_ATTR __attribute__((pure)) void *esp_cpu_pc_to_addr(uint32_t pc)
  172. {
  173. #ifdef __XTENSA__
  174. // Xtensa stores window rotation in PC[31:30]
  175. return (void *)((pc & 0x3fffffffU) | 0x40000000U);
  176. #else
  177. return (void *)pc;
  178. #endif
  179. }
  180. /* ------------------------------------------------- CPU Interrupts ----------------------------------------------------
  181. *
  182. * ------------------------------------------------------------------------------------------------------------------ */
  183. // ---------------- Interrupt Descriptors ------------------
  184. /**
  185. * @brief Get a CPU interrupt's descriptor
  186. *
  187. * Each CPU interrupt has a descriptor describing the interrupt's capabilities
  188. * and restrictions. This function gets the descriptor of a particular interrupt
  189. * on a particular CPU.
  190. *
  191. * @param[in] core_id The core's ID
  192. * @param[in] intr_num Interrupt number
  193. * @param[out] intr_desc_ret The interrupt's descriptor
  194. */
  195. void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret);
  196. // --------------- Interrupt Configuration -----------------
  197. /**
  198. * @brief Set the base address of the current CPU's Interrupt Vector Table (IVT)
  199. *
  200. * @param ivt_addr Interrupt Vector Table's base address
  201. */
  202. FORCE_INLINE_ATTR void esp_cpu_intr_set_ivt_addr(const void *ivt_addr)
  203. {
  204. #ifdef __XTENSA__
  205. xt_utils_set_vecbase((uint32_t)ivt_addr);
  206. #else
  207. rv_utils_set_mtvec((uint32_t)ivt_addr);
  208. #endif
  209. }
  210. #if SOC_INT_CLIC_SUPPORTED
  211. /**
  212. * @brief Set the base address of the current CPU's Interrupt Vector Table (MTVT)
  213. *
  214. * @param mtvt_addr Interrupt Vector Table's base address
  215. *
  216. * @note The MTVT table is only applicable when CLIC is supported
  217. */
  218. FORCE_INLINE_ATTR void esp_cpu_intr_set_mtvt_addr(const void *mtvt_addr)
  219. {
  220. rv_utils_set_mtvt((uint32_t)mtvt_addr);
  221. }
  222. #endif //#if SOC_INT_CLIC_SUPPORTED
  223. #if SOC_CPU_HAS_FLEXIBLE_INTC
  224. /**
  225. * @brief Set the interrupt type of a particular interrupt
  226. *
  227. * Set the interrupt type (Level or Edge) of a particular interrupt on the
  228. * current CPU.
  229. *
  230. * @param intr_num Interrupt number (from 0 to 31)
  231. * @param intr_type The interrupt's type
  232. */
  233. FORCE_INLINE_ATTR void esp_cpu_intr_set_type(int intr_num, esp_cpu_intr_type_t intr_type)
  234. {
  235. assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
  236. enum intr_type type = (intr_type == ESP_CPU_INTR_TYPE_LEVEL) ? INTR_TYPE_LEVEL : INTR_TYPE_EDGE;
  237. esprv_intc_int_set_type(intr_num, type);
  238. }
  239. /**
  240. * @brief Get the current configured type of a particular interrupt
  241. *
  242. * Get the currently configured type (i.e., level or edge) of a particular
  243. * interrupt on the current CPU.
  244. *
  245. * @param intr_num Interrupt number (from 0 to 31)
  246. * @return Interrupt type
  247. */
  248. FORCE_INLINE_ATTR esp_cpu_intr_type_t esp_cpu_intr_get_type(int intr_num)
  249. {
  250. assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
  251. enum intr_type type = esprv_intc_int_get_type(intr_num);
  252. return (type == INTR_TYPE_LEVEL) ? ESP_CPU_INTR_TYPE_LEVEL : ESP_CPU_INTR_TYPE_EDGE;
  253. }
  254. /**
  255. * @brief Set the priority of a particular interrupt
  256. *
  257. * Set the priority of a particular interrupt on the current CPU.
  258. *
  259. * @param intr_num Interrupt number (from 0 to 31)
  260. * @param intr_priority The interrupt's priority
  261. */
  262. FORCE_INLINE_ATTR void esp_cpu_intr_set_priority(int intr_num, int intr_priority)
  263. {
  264. assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
  265. esprv_intc_int_set_priority(intr_num, intr_priority);
  266. }
  267. /**
  268. * @brief Get the current configured priority of a particular interrupt
  269. *
  270. * Get the currently configured priority of a particular interrupt on the
  271. * current CPU.
  272. *
  273. * @param intr_num Interrupt number (from 0 to 31)
  274. * @return Interrupt's priority
  275. */
  276. FORCE_INLINE_ATTR int esp_cpu_intr_get_priority(int intr_num)
  277. {
  278. assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
  279. return esprv_intc_int_get_priority(intr_num);
  280. }
  281. #endif // SOC_CPU_HAS_FLEXIBLE_INTC
  282. /**
  283. * @brief Check if a particular interrupt already has a handler function
  284. *
  285. * Check if a particular interrupt on the current CPU already has a handler
  286. * function assigned.
  287. *
  288. * @note This function simply checks if the IVT of the current CPU already has
  289. * a handler assigned.
  290. * @param intr_num Interrupt number (from 0 to 31)
  291. * @return True if the interrupt has a handler function, false otherwise.
  292. */
  293. FORCE_INLINE_ATTR bool esp_cpu_intr_has_handler(int intr_num)
  294. {
  295. assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
  296. bool has_handler;
  297. #ifdef __XTENSA__
  298. has_handler = xt_int_has_handler(intr_num, esp_cpu_get_core_id());
  299. #else
  300. has_handler = intr_handler_get(intr_num);
  301. #endif
  302. return has_handler;
  303. }
  304. /**
  305. * @brief Set the handler function of a particular interrupt
  306. *
  307. * Assign a handler function (i.e., ISR) to a particular interrupt on the
  308. * current CPU.
  309. *
  310. * @note This function simply sets the handler function (in the IVT) and does
  311. * not actually enable the interrupt.
  312. * @param intr_num Interrupt number (from 0 to 31)
  313. * @param handler Handler function
  314. * @param handler_arg Argument passed to the handler function
  315. */
  316. FORCE_INLINE_ATTR void esp_cpu_intr_set_handler(int intr_num, esp_cpu_intr_handler_t handler, void *handler_arg)
  317. {
  318. assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
  319. #ifdef __XTENSA__
  320. xt_set_interrupt_handler(intr_num, (xt_handler)handler, handler_arg);
  321. #else
  322. intr_handler_set(intr_num, (intr_handler_t)handler, handler_arg);
  323. #endif
  324. }
  325. /**
  326. * @brief Get a handler function's argument of
  327. *
  328. * Get the argument of a previously assigned handler function on the current CPU.
  329. *
  330. * @param intr_num Interrupt number (from 0 to 31)
  331. * @return The the argument passed to the handler function
  332. */
  333. FORCE_INLINE_ATTR void *esp_cpu_intr_get_handler_arg(int intr_num)
  334. {
  335. assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
  336. void *handler_arg;
  337. #ifdef __XTENSA__
  338. handler_arg = xt_get_interrupt_handler_arg(intr_num);
  339. #else
  340. handler_arg = intr_handler_get_arg(intr_num);
  341. #endif
  342. return handler_arg;
  343. }
  344. // ------------------ Interrupt Control --------------------
  345. /**
  346. * @brief Enable particular interrupts on the current CPU
  347. *
  348. * @param intr_mask Bit mask of the interrupts to enable
  349. */
  350. FORCE_INLINE_ATTR void esp_cpu_intr_enable(uint32_t intr_mask)
  351. {
  352. #ifdef __XTENSA__
  353. xt_ints_on(intr_mask);
  354. #else
  355. rv_utils_intr_enable(intr_mask);
  356. #endif
  357. }
  358. /**
  359. * @brief Disable particular interrupts on the current CPU
  360. *
  361. * @param intr_mask Bit mask of the interrupts to disable
  362. */
  363. FORCE_INLINE_ATTR void esp_cpu_intr_disable(uint32_t intr_mask)
  364. {
  365. #ifdef __XTENSA__
  366. xt_ints_off(intr_mask);
  367. #else
  368. rv_utils_intr_disable(intr_mask);
  369. #endif
  370. }
  371. /**
  372. * @brief Get the enabled interrupts on the current CPU
  373. *
  374. * @return Bit mask of the enabled interrupts
  375. */
  376. FORCE_INLINE_ATTR uint32_t esp_cpu_intr_get_enabled_mask(void)
  377. {
  378. #ifdef __XTENSA__
  379. return xt_utils_intr_get_enabled_mask();
  380. #else
  381. return rv_utils_intr_get_enabled_mask();
  382. #endif
  383. }
  384. /**
  385. * @brief Acknowledge an edge interrupt
  386. *
  387. * @param intr_num Interrupt number (from 0 to 31)
  388. */
  389. FORCE_INLINE_ATTR void esp_cpu_intr_edge_ack(int intr_num)
  390. {
  391. assert(intr_num >= 0 && intr_num < SOC_CPU_INTR_NUM);
  392. #ifdef __XTENSA__
  393. xthal_set_intclear((unsigned) (1 << intr_num));
  394. #else
  395. rv_utils_intr_edge_ack((unsigned) intr_num);
  396. #endif
  397. }
  398. /* -------------------------------------------------- Memory Ports -----------------------------------------------------
  399. *
  400. * ------------------------------------------------------------------------------------------------------------------ */
  401. /**
  402. * @brief Configure the CPU to disable access to invalid memory regions
  403. */
  404. void esp_cpu_configure_region_protection(void);
  405. /* ---------------------------------------------------- Debugging ------------------------------------------------------
  406. *
  407. * ------------------------------------------------------------------------------------------------------------------ */
  408. // --------------- Breakpoints/Watchpoints -----------------
  409. #if SOC_CPU_BREAKPOINTS_NUM > 0
  410. /**
  411. * @brief Set and enable a hardware breakpoint on the current CPU
  412. *
  413. * @note This function is meant to be called by the panic handler to set a
  414. * breakpoint for an attached debugger during a panic.
  415. * @note Overwrites previously set breakpoint with same breakpoint number.
  416. * @param bp_num Hardware breakpoint number [0..SOC_CPU_BREAKPOINTS_NUM - 1]
  417. * @param bp_addr Address to set a breakpoint on
  418. * @return ESP_OK if breakpoint is set. Failure otherwise
  419. */
  420. esp_err_t esp_cpu_set_breakpoint(int bp_num, const void *bp_addr);
  421. /**
  422. * @brief Clear a hardware breakpoint on the current CPU
  423. *
  424. * @note Clears a breakpoint regardless of whether it was previously set
  425. * @param bp_num Hardware breakpoint number [0..SOC_CPU_BREAKPOINTS_NUM - 1]
  426. * @return ESP_OK if breakpoint is cleared. Failure otherwise
  427. */
  428. esp_err_t esp_cpu_clear_breakpoint(int bp_num);
  429. #endif // SOC_CPU_BREAKPOINTS_NUM > 0
  430. /**
  431. * @brief Set and enable a hardware watchpoint on the current CPU
  432. *
  433. * Set and enable a hardware watchpoint on the current CPU, specifying the
  434. * memory range and trigger operation. Watchpoints will break/panic the CPU when
  435. * the CPU accesses (according to the trigger type) on a certain memory range.
  436. *
  437. * @note Overwrites previously set watchpoint with same watchpoint number.
  438. * On RISC-V chips, this API uses method0(Exact matching) and method1(NAPOT matching) according to the
  439. * riscv-debug-spec-0.13 specification for address matching.
  440. * If the watch region size is 1byte, it uses exact matching (method 0).
  441. * If the watch region size is larger than 1byte, it uses NAPOT matching (method 1). This mode requires
  442. * the watching region start address to be aligned to the watching region size.
  443. *
  444. * @param wp_num Hardware watchpoint number [0..SOC_CPU_WATCHPOINTS_NUM - 1]
  445. * @param wp_addr Watchpoint's base address, must be naturally aligned to the size of the region
  446. * @param size Size of the region to watch. Must be one of 2^n and in the range of [1 ... SOC_CPU_WATCHPOINT_MAX_REGION_SIZE]
  447. * @param trigger Trigger type
  448. * @return ESP_ERR_INVALID_ARG on invalid arg, ESP_OK otherwise
  449. */
  450. esp_err_t esp_cpu_set_watchpoint(int wp_num, const void *wp_addr, size_t size, esp_cpu_watchpoint_trigger_t trigger);
  451. /**
  452. * @brief Clear a hardware watchpoint on the current CPU
  453. *
  454. * @note Clears a watchpoint regardless of whether it was previously set
  455. * @param wp_num Hardware watchpoint number [0..SOC_CPU_WATCHPOINTS_NUM - 1]
  456. * @return ESP_OK if watchpoint was cleared. Failure otherwise.
  457. */
  458. esp_err_t esp_cpu_clear_watchpoint(int wp_num);
  459. // ---------------------- Debugger -------------------------
  460. /**
  461. * @brief Check if the current CPU has a debugger attached
  462. *
  463. * @return True if debugger is attached, false otherwise
  464. */
  465. FORCE_INLINE_ATTR bool esp_cpu_dbgr_is_attached(void)
  466. {
  467. #ifdef __XTENSA__
  468. return xt_utils_dbgr_is_attached();
  469. #else
  470. return rv_utils_dbgr_is_attached();
  471. #endif
  472. }
  473. /**
  474. * @brief Trigger a call to the current CPU's attached debugger
  475. */
  476. FORCE_INLINE_ATTR void esp_cpu_dbgr_break(void)
  477. {
  478. #ifdef __XTENSA__
  479. xt_utils_dbgr_break();
  480. #else
  481. rv_utils_dbgr_break();
  482. #endif
  483. }
  484. // ---------------------- Instructions -------------------------
  485. /**
  486. * @brief Given the return address, calculate the address of the preceding call instruction
  487. * This is typically used to answer the question "where was the function called from?"
  488. * @param return_address The value of the return address register.
  489. * Typically set to the value of __builtin_return_address(0).
  490. * @return Address of the call instruction preceding the return address.
  491. */
  492. FORCE_INLINE_ATTR intptr_t esp_cpu_get_call_addr(intptr_t return_address)
  493. {
  494. /* Both Xtensa and RISC-V have 2-byte instructions, so to get this right we
  495. * should decode the preceding instruction as if it is 2-byte, check if it is a call,
  496. * else treat it as 3 or 4 byte one. However for the cases where this function is
  497. * used, being off by one instruction is usually okay, so this is kept simple for now.
  498. */
  499. #ifdef __XTENSA__
  500. return return_address - 3;
  501. #else
  502. return return_address - 4;
  503. #endif
  504. }
  505. /* ------------------------------------------------------ Misc ---------------------------------------------------------
  506. *
  507. * ------------------------------------------------------------------------------------------------------------------ */
  508. /**
  509. * @brief Atomic compare-and-set operation
  510. *
  511. * @param addr Address of atomic variable
  512. * @param compare_value Value to compare the atomic variable to
  513. * @param new_value New value to set the atomic variable to
  514. * @return Whether the atomic variable was set or not
  515. */
  516. bool esp_cpu_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value);
  517. #if SOC_BRANCH_PREDICTOR_SUPPORTED
  518. /**
  519. * @brief Enable branch prediction
  520. */
  521. FORCE_INLINE_ATTR void esp_cpu_branch_prediction_enable(void)
  522. {
  523. rv_utils_en_branch_predictor();
  524. }
  525. #endif //#if SOC_BRANCH_PREDICTOR_SUPPORTED
  526. #ifdef __cplusplus
  527. }
  528. #endif