vectors.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. /*
  2. * SPDX-FileCopyrightText: 2017-2023 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include "soc/soc.h"
  7. #include "soc/interrupt_reg.h"
  8. #include "riscv/rvruntime-frames.h"
  9. #include "soc/soc_caps.h"
  10. #include "sdkconfig.h"
  11. #include "esp_private/vectors_const.h"
  12. #include "esp_private/panic_reason.h"
  13. .equ SAVE_REGS, 32
  14. .equ CONTEXT_SIZE, (SAVE_REGS * 4)
  15. .equ EXC_ILLEGAL_INSTRUCTION, 0x2
  16. .equ panic_from_exception, xt_unhandled_exception
  17. .equ panic_from_isr, panicHandler
  18. #if ( SOC_CPU_COPROC_NUM > 0 )
  19. /* Targets with coprocessors present a special CSR to get Illegal Instruction exception reason */
  20. .equ EXT_ILL_CSR, 0x7F0
  21. /* EXT_ILL CSR reasons are stored as follows:
  22. * - Bit 0: FPU core instruction (Load/Store instructions NOT concerned)
  23. * - Bit 1: Low-power core
  24. * - Bit 2: PIE core */
  25. .equ EXT_ILL_RSN_FPU, 1
  26. .equ EXT_ILL_RSN_LP, 2
  27. .equ EXT_ILL_RSN_PIE, 4
  28. #endif /* SOC_CPU_COPROC_NUM > 0 */
  29. /* Macro which first allocates space on the stack to save general
  30. * purpose registers, and then save them. GP register is excluded.
  31. * The default size allocated on the stack is CONTEXT_SIZE, but it
  32. * can be overridden. */
  33. .macro save_general_regs cxt_size=CONTEXT_SIZE
  34. addi sp, sp, -\cxt_size
  35. sw ra, RV_STK_RA(sp)
  36. sw tp, RV_STK_TP(sp)
  37. sw t0, RV_STK_T0(sp)
  38. sw t1, RV_STK_T1(sp)
  39. sw t2, RV_STK_T2(sp)
  40. sw s0, RV_STK_S0(sp)
  41. sw s1, RV_STK_S1(sp)
  42. sw a0, RV_STK_A0(sp)
  43. sw a1, RV_STK_A1(sp)
  44. sw a2, RV_STK_A2(sp)
  45. sw a3, RV_STK_A3(sp)
  46. sw a4, RV_STK_A4(sp)
  47. sw a5, RV_STK_A5(sp)
  48. sw a6, RV_STK_A6(sp)
  49. sw a7, RV_STK_A7(sp)
  50. sw s2, RV_STK_S2(sp)
  51. sw s3, RV_STK_S3(sp)
  52. sw s4, RV_STK_S4(sp)
  53. sw s5, RV_STK_S5(sp)
  54. sw s6, RV_STK_S6(sp)
  55. sw s7, RV_STK_S7(sp)
  56. sw s8, RV_STK_S8(sp)
  57. sw s9, RV_STK_S9(sp)
  58. sw s10, RV_STK_S10(sp)
  59. sw s11, RV_STK_S11(sp)
  60. sw t3, RV_STK_T3(sp)
  61. sw t4, RV_STK_T4(sp)
  62. sw t5, RV_STK_T5(sp)
  63. sw t6, RV_STK_T6(sp)
  64. .endm
  65. .macro save_mepc
  66. csrr t0, mepc
  67. sw t0, RV_STK_MEPC(sp)
  68. .endm
  69. /* Restore the general purpose registers (excluding gp) from the context on
  70. * the stack. The context is then deallocated. The default size is CONTEXT_SIZE
  71. * but it can be overridden. */
  72. .macro restore_general_regs cxt_size=CONTEXT_SIZE
  73. lw ra, RV_STK_RA(sp)
  74. lw tp, RV_STK_TP(sp)
  75. lw t0, RV_STK_T0(sp)
  76. lw t1, RV_STK_T1(sp)
  77. lw t2, RV_STK_T2(sp)
  78. lw s0, RV_STK_S0(sp)
  79. lw s1, RV_STK_S1(sp)
  80. lw a0, RV_STK_A0(sp)
  81. lw a1, RV_STK_A1(sp)
  82. lw a2, RV_STK_A2(sp)
  83. lw a3, RV_STK_A3(sp)
  84. lw a4, RV_STK_A4(sp)
  85. lw a5, RV_STK_A5(sp)
  86. lw a6, RV_STK_A6(sp)
  87. lw a7, RV_STK_A7(sp)
  88. lw s2, RV_STK_S2(sp)
  89. lw s3, RV_STK_S3(sp)
  90. lw s4, RV_STK_S4(sp)
  91. lw s5, RV_STK_S5(sp)
  92. lw s6, RV_STK_S6(sp)
  93. lw s7, RV_STK_S7(sp)
  94. lw s8, RV_STK_S8(sp)
  95. lw s9, RV_STK_S9(sp)
  96. lw s10, RV_STK_S10(sp)
  97. lw s11, RV_STK_S11(sp)
  98. lw t3, RV_STK_T3(sp)
  99. lw t4, RV_STK_T4(sp)
  100. lw t5, RV_STK_T5(sp)
  101. lw t6, RV_STK_T6(sp)
  102. addi sp,sp, \cxt_size
  103. .endm
  104. .macro restore_mepc
  105. lw t0, RV_STK_MEPC(sp)
  106. csrw mepc, t0
  107. .endm
  108. .global rtos_int_enter
  109. .global rtos_int_exit
  110. .global rtos_save_fpu_coproc
  111. .global _global_interrupt_handler
  112. #ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
  113. .global gdbstub_handle_debug_int
  114. #endif
  115. .section .exception_vectors.text
  116. /* Exception handler.*/
  117. .type _panic_handler, @function
  118. .global _panic_handler
  119. _panic_handler:
  120. /* Allocate space on the stack and store general purpose registers */
  121. save_general_regs RV_STK_FRMSZ
  122. /* As gp register is not saved by the macro, save it here */
  123. sw gp, RV_STK_GP(sp)
  124. /* Same goes for the SP value before trapping */
  125. addi t0, sp, RV_STK_FRMSZ /* restore sp with the value when trap happened */
  126. /* Save CSRs */
  127. sw t0, RV_STK_SP(sp)
  128. csrr t0, mepc
  129. sw t0, RV_STK_MEPC(sp)
  130. csrr t0, mstatus
  131. sw t0, RV_STK_MSTATUS(sp)
  132. csrr t0, mtvec
  133. sw t0, RV_STK_MTVEC(sp)
  134. csrr t0, mhartid
  135. sw t0, RV_STK_MHARTID(sp)
  136. csrr t0, mtval
  137. sw t0, RV_STK_MTVAL(sp)
  138. /* Keep mcause in s0, only the exception code and interrupt bit are relevant */
  139. csrr s0, mcause
  140. li t1, VECTORS_MCAUSE_INTBIT_MASK | VECTORS_MCAUSE_REASON_MASK
  141. and s0, s0, t1
  142. #if ( SOC_CPU_COPROC_NUM > 0 )
  143. /* Check if the exception was cause by a coprocessor instruction. If this is the case, we have
  144. * to lazily save the registers inside the current owner's save area */
  145. /* Check if the exception is Illegal instruction */
  146. li a1, EXC_ILLEGAL_INSTRUCTION
  147. bne s0, a1, _panic_handler_not_coproc
  148. /* In case this is due to a coprocessor, set ra right now to simplify the logic below */
  149. la ra, _return_from_exception
  150. /* EXT_ILL CSR should contain the reason for the Illegal Instruction */
  151. csrr a0, EXT_ILL_CSR
  152. mv a2, a0
  153. /* Check if the FPU bit is set. When targets have the FPU reason bug (SOC_CPU_HAS_FPU_EXT_ILL_BUG),
  154. * it is possible that another bit is set even if the reason is an FPU instruction.
  155. * For example, bit 1 can be set and bit 0 won't, even if the reason is an FPU instruction. */
  156. #if SOC_CPU_HAS_FPU
  157. andi a1, a0, EXT_ILL_RSN_FPU
  158. bnez a1, rtos_save_fpu_coproc
  159. #if SOC_CPU_HAS_FPU_EXT_ILL_BUG
  160. /* If the SOC present the hardware EXT_ILL CSR bug, it doesn't support FPU load/store detection
  161. * so we have to check the instruction's opcode (in `mtval` = `t0`) */
  162. andi a0, t0, 0b1011111
  163. li a1, 0b0000111
  164. /* If opcode is of the form 0b0x00111, the instruction is FLW or FSW */
  165. beq a0, a1, rtos_save_fpu_coproc
  166. /* Check the compressed instructions: C.FLW, C.FSW, C.FLWSP and C.FSWP.
  167. * All of them have their highest 3 bits to x11 and the lowest bit to 0 */
  168. li a0, 0x6001
  169. and a0, t0, a0 /* a0 = mtval & 0x6001 */
  170. li a1, 0x6000
  171. beq a0, a1, rtos_save_fpu_coproc
  172. /* Check if the instruction is CSR-related */
  173. andi a0, t0, 0b1111111
  174. li a1, 0b1110011
  175. bne a0, a1, _panic_handler_not_fpu
  176. /* Check if it's CSR number 1 (fflags), 2 (frm) or 3 (fcsr) */
  177. srli a0, t0, 20
  178. addi a0, a0, -1
  179. li a1, 3
  180. bltu a0, a1, rtos_save_fpu_coproc
  181. /* The instruction was not an FPU one, continue the exception */
  182. _panic_handler_not_fpu:
  183. #endif /* SOC_CPU_HAS_FPU_EXT_ILL_BUG */
  184. #endif /* SOC_CPU_HAS_FPU */
  185. /* Need to check the other coprocessors reason now, instruction is in register a2 */
  186. /* Ignore LP and PIE for now, continue the exception */
  187. _panic_handler_not_coproc:
  188. #endif /* ( SOC_CPU_COPROC_NUM > 0 ) */
  189. /* Call panic_from_exception(sp) or panic_from_isr(sp)
  190. * depending on whether we have a pseudo excause or not.
  191. * If mcause's highest bit is 1, then an interrupt called this routine,
  192. * so we have a pseudo excause. Else, it is due to a exception, we don't
  193. * have an pseudo excause */
  194. mv a0, sp
  195. mv a1, s0
  196. /* Branches instructions don't accept immediate values, so use t1 to
  197. * store our comparator */
  198. li t0, 0x80000000
  199. bgeu a1, t0, _call_panic_handler
  200. sw a1, RV_STK_MCAUSE(sp)
  201. #ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
  202. li t0, 3
  203. beq a1, t0, _call_gdbstub_handler
  204. #endif
  205. call panic_from_exception
  206. /* We arrive here if the exception handler has returned. */
  207. j _return_from_exception
  208. #ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
  209. _call_gdbstub_handler:
  210. call gdbstub_handle_debug_int
  211. j _return_from_exception
  212. #endif
  213. _call_panic_handler:
  214. /* Remove highest bit from mcause (a1) register and save it in the structure */
  215. not t0, t0
  216. and a1, a1, t0
  217. #if CONFIG_SOC_INT_CLIC_SUPPORTED
  218. /* When CLIC is supported, external interrupts are shifted by 16, deduct this difference from mcause */
  219. add a1, a1, -16
  220. #endif // CONFIG_SOC_INT_CLIC_SUPPORTED
  221. #if CONFIG_ESP_INT_WDT_CHECK_CPU1
  222. /* Check if this was a INT WDT */
  223. li t0, PANIC_RSN_INTWDT_CPU0
  224. bne a1, t0, _store_mcause
  225. /* Check if the cause is the app cpu failing to tick, if so then update mcause to reflect this*/
  226. lw t0, int_wdt_cpu1_ticked
  227. bnez t0, _store_mcause
  228. li t0, PANIC_RSN_INTWDT_CPU1_FLAG
  229. add a1, a1, t0
  230. #endif
  231. _store_mcause:
  232. sw a1, RV_STK_MCAUSE(sp)
  233. call panic_from_isr
  234. /* We arrive here if the exception handler has returned. This means that
  235. * the exception was handled, and the execution flow should resume.
  236. * Restore the registers and return from the exception.
  237. */
  238. _return_from_exception:
  239. restore_mepc
  240. /* MTVEC and SP are assumed to be unmodified.
  241. * MSTATUS, MHARTID, MTVAL are read-only and not restored.
  242. */
  243. lw gp, RV_STK_GP(sp)
  244. restore_general_regs RV_STK_FRMSZ
  245. mret
  246. .size _panic_handler, .-_panic_handler
  247. /* This is the interrupt handler.
  248. * It saves the registers on the stack, prepares for interrupt nesting, re-enables the interrupts,
  249. * then jumps to the C dispatcher in interrupt.c. Upon return, the register context will be restored
  250. * from the stack.
  251. */
  252. .global _interrupt_handler
  253. .type _interrupt_handler, @function
  254. _interrupt_handler:
  255. /* Start by saving the general purpose registers and the PC value before
  256. * the interrupt happened. */
  257. save_general_regs
  258. save_mepc
  259. /* Though it is not necessary we save GP and SP here.
  260. * SP is necessary to help GDB to properly unwind
  261. * the backtrace of threads preempted by interrupts (OS tick etc.).
  262. * GP is saved just to have its proper value in GDB. */
  263. /* As gp register is not saved by the macro, save it here */
  264. sw gp, RV_STK_GP(sp)
  265. /* Same goes for the SP value before trapping */
  266. addi a0, sp, CONTEXT_SIZE /* restore sp with the value when interrupt happened */
  267. /* Save SP former value */
  268. sw a0, RV_STK_SP(sp)
  269. /* Notify the RTOS that an interrupt ocurred, it will save the current stack pointer
  270. * in the running TCB, no need to pass it as a parameter */
  271. call rtos_int_enter
  272. /* If this is a non-nested interrupt, SP now points to the interrupt stack */
  273. /* Before dispatch c handler, restore interrupt to enable nested intr */
  274. csrr s1, mcause
  275. csrr s2, mstatus
  276. #if !SOC_INT_HW_NESTED_SUPPORTED
  277. /* Save the interrupt threshold level */
  278. li t0, INTERRUPT_CURRENT_CORE_INT_THRESH_REG
  279. lw s3, 0(t0)
  280. /* Increase interrupt threshold level */
  281. li t2, VECTORS_MCAUSE_REASON_MASK
  282. and t1, s1, t2 /* t1 = mcause & mask */
  283. slli t1, t1, 2 /* t1 = mcause * 4 */
  284. li t2, INTC_INT_PRIO_REG(0)
  285. add t1, t2, t1 /* t1 = INTC_INT_PRIO_REG + 4 * mcause */
  286. lw t2, 0(t1) /* t2 = INTC_INT_PRIO_REG[mcause] */
  287. addi t2, t2, 1 /* t2 = t2 +1 */
  288. sw t2, 0(t0) /* INTERRUPT_CURRENT_CORE_INT_THRESH_REG = t2 */
  289. fence
  290. #endif // !SOC_INT_HW_NESTED_SUPPORTED
  291. csrsi mstatus, 0x8
  292. /* MIE set. Nested interrupts can now occur */
  293. #ifdef CONFIG_PM_TRACE
  294. li a0, 0 /* = ESP_PM_TRACE_IDLE */
  295. #if SOC_CPU_CORES_NUM == 1
  296. li a1, 0 /* No need to check core ID on single core hardware */
  297. #else
  298. csrr a1, mhartid
  299. #endif
  300. la t0, esp_pm_trace_exit
  301. jalr t0 /* absolute jump, avoid the 1 MiB range constraint */
  302. #endif
  303. #ifdef CONFIG_PM_ENABLE
  304. la t0, esp_pm_impl_isr_hook
  305. jalr t0 /* absolute jump, avoid the 1 MiB range constraint */
  306. #endif
  307. /* call the C dispatcher */
  308. mv a0, sp /* argument 1, stack pointer */
  309. mv a1, s1 /* argument 2, interrupt number (mcause) */
  310. /* mask off the interrupt flag of mcause */
  311. li t0, VECTORS_MCAUSE_REASON_MASK
  312. and a1, a1, t0
  313. jal _global_interrupt_handler
  314. /* After dispatch c handler, disable interrupt to make freertos make context switch */
  315. csrci mstatus, 0x8
  316. /* MIE cleared. Nested interrupts are disabled */
  317. #if !SOC_INT_HW_NESTED_SUPPORTED
  318. /* restore the interrupt threshold level */
  319. li t0, INTERRUPT_CURRENT_CORE_INT_THRESH_REG
  320. sw s3, 0(t0)
  321. fence
  322. #endif // !SOC_INT_HW_NESTED_SUPPORTED
  323. /* The RTOS will restore the current TCB stack pointer. This routine will preserve s1 and s2.
  324. * Returns the new `mstatus` value. */
  325. mv a0, s2 /* a0 = mstatus */
  326. call rtos_int_exit
  327. /* Restore the rest of the registers.
  328. * In case the target uses the CLIC, it is mandatory to restore `mcause` register since it contains
  329. * the former CPU priority. When executing `mret`, the hardware will restore the former threshold,
  330. * from `mcause` to `mintstatus` CSR */
  331. csrw mcause, s1
  332. csrw mstatus, a0
  333. restore_mepc
  334. restore_general_regs
  335. /* exit, this will also re-enable the interrupts */
  336. mret
  337. .size _interrupt_handler, .-_interrupt_handler