trap.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2013-07-20 Bernard first version
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #include <board.h>
  13. #include <armv8.h>
  14. #include "interrupt.h"
  15. #include "mm_aspace.h"
  16. #include <backtrace.h>
  17. #define DBG_TAG "libcpu.trap"
  18. #define DBG_LVL DBG_LOG
  19. #include <rtdbg.h>
  20. void rt_unwind(struct rt_hw_exp_stack *regs, int pc_adj)
  21. {
  22. }
  23. #ifdef RT_USING_FINSH
  24. extern long list_thread(void);
  25. #endif
  26. #ifdef RT_USING_LWP
  27. #include <lwp.h>
  28. #include <lwp_arch.h>
  29. #ifdef LWP_USING_CORE_DUMP
  30. #include <lwp_core_dump.h>
  31. #endif
  32. static void _check_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *info)
  33. {
  34. uint32_t mode = regs->cpsr;
  35. if ((mode & 0x1f) == 0x00)
  36. {
  37. rt_kprintf("%s! pc = 0x%x\n", info, regs->pc - pc_adj);
  38. /* user stack backtrace */
  39. #ifdef RT_USING_LWP
  40. {
  41. rt_thread_t th;
  42. th = rt_thread_self();
  43. if (th && th->lwp)
  44. {
  45. rt_backtrace_user_thread(th);
  46. }
  47. }
  48. #endif
  49. #ifdef LWP_USING_CORE_DUMP
  50. lwp_core_dump(regs, pc_adj);
  51. #endif
  52. sys_exit_group(-1);
  53. }
  54. else
  55. {
  56. /* user stack backtrace */
  57. #ifdef RT_USING_LWP
  58. {
  59. rt_thread_t th;
  60. th = rt_thread_self();
  61. if (th && th->lwp)
  62. {
  63. rt_backtrace_user_thread(th);
  64. }
  65. }
  66. #endif
  67. /* kernel stack backtrace */
  68. backtrace((unsigned long)regs->pc, (unsigned long)regs->x30, (unsigned long)regs->x29);
  69. }
  70. }
  71. rt_inline int _get_type(unsigned long esr)
  72. {
  73. int ret;
  74. int fsc = esr & 0x3f;
  75. switch (fsc)
  76. {
  77. case 0x4:
  78. case 0x5:
  79. case 0x6:
  80. case 0x7:
  81. ret = MM_FAULT_TYPE_PAGE_FAULT;
  82. break;
  83. case 0xc:
  84. case 0xd:
  85. case 0xe:
  86. case 0xf:
  87. ret = MM_FAULT_TYPE_ACCESS_FAULT;
  88. break;
  89. case 0x8:
  90. case 0x9:
  91. case 0xa:
  92. case 0xb:
  93. /* access flag fault */
  94. default:
  95. ret = MM_FAULT_TYPE_GENERIC;
  96. }
  97. return ret;
  98. }
  99. rt_inline long _irq_is_disable(long cpsr)
  100. {
  101. return !!(cpsr & 0x80);
  102. }
  103. static int user_fault_fixable(unsigned long esr, struct rt_hw_exp_stack *regs)
  104. {
  105. rt_ubase_t level;
  106. unsigned char ec;
  107. void *dfar;
  108. int ret = 0;
  109. ec = (unsigned char)((esr >> 26) & 0x3fU);
  110. enum rt_mm_fault_op fault_op;
  111. enum rt_mm_fault_type fault_type;
  112. struct rt_lwp *lwp;
  113. switch (ec)
  114. {
  115. case 0x20:
  116. fault_op = MM_FAULT_OP_EXECUTE;
  117. fault_type = _get_type(esr);
  118. break;
  119. case 0x21:
  120. case 0x24:
  121. case 0x25:
  122. fault_op = MM_FAULT_OP_WRITE;
  123. fault_type = _get_type(esr);
  124. break;
  125. default:
  126. fault_op = 0;
  127. break;
  128. }
  129. /* page fault exception only allow from user space */
  130. lwp = lwp_self();
  131. if (lwp && fault_op)
  132. {
  133. __asm__ volatile("mrs %0, far_el1":"=r"(dfar));
  134. struct rt_aspace_fault_msg msg = {
  135. .fault_op = fault_op,
  136. .fault_type = fault_type,
  137. .fault_vaddr = dfar,
  138. };
  139. lwp_user_setting_save(rt_thread_self());
  140. __asm__ volatile("mrs %0, daif\nmsr daifclr, 0x3\nisb\n":"=r"(level));
  141. if (rt_aspace_fault_try_fix(lwp->aspace, &msg))
  142. {
  143. ret = 1;
  144. }
  145. __asm__ volatile("msr daif, %0\nisb\n"::"r"(level));
  146. }
  147. return ret;
  148. }
  149. #endif
  150. /**
  151. * this function will show registers of CPU
  152. *
  153. * @param regs the registers point
  154. */
  155. void rt_hw_show_register(struct rt_hw_exp_stack *regs)
  156. {
  157. rt_kprintf("Execption:\n");
  158. rt_kprintf("X00:0x%16.16p X01:0x%16.16p X02:0x%16.16p X03:0x%16.16p\n", (void *)regs->x0, (void *)regs->x1, (void *)regs->x2, (void *)regs->x3);
  159. rt_kprintf("X04:0x%16.16p X05:0x%16.16p X06:0x%16.16p X07:0x%16.16p\n", (void *)regs->x4, (void *)regs->x5, (void *)regs->x6, (void *)regs->x7);
  160. rt_kprintf("X08:0x%16.16p X09:0x%16.16p X10:0x%16.16p X11:0x%16.16p\n", (void *)regs->x8, (void *)regs->x9, (void *)regs->x10, (void *)regs->x11);
  161. rt_kprintf("X12:0x%16.16p X13:0x%16.16p X14:0x%16.16p X15:0x%16.16p\n", (void *)regs->x12, (void *)regs->x13, (void *)regs->x14, (void *)regs->x15);
  162. rt_kprintf("X16:0x%16.16p X17:0x%16.16p X18:0x%16.16p X19:0x%16.16p\n", (void *)regs->x16, (void *)regs->x17, (void *)regs->x18, (void *)regs->x19);
  163. rt_kprintf("X20:0x%16.16p X21:0x%16.16p X22:0x%16.16p X23:0x%16.16p\n", (void *)regs->x20, (void *)regs->x21, (void *)regs->x22, (void *)regs->x23);
  164. rt_kprintf("X24:0x%16.16p X25:0x%16.16p X26:0x%16.16p X27:0x%16.16p\n", (void *)regs->x24, (void *)regs->x25, (void *)regs->x26, (void *)regs->x27);
  165. rt_kprintf("X28:0x%16.16p X29:0x%16.16p X30:0x%16.16p\n", (void *)regs->x28, (void *)regs->x29, (void *)regs->x30);
  166. rt_kprintf("SP_EL0:0x%16.16p\n", (void *)regs->sp_el0);
  167. rt_kprintf("SPSR :0x%16.16p\n", (void *)regs->cpsr);
  168. rt_kprintf("EPC :0x%16.16p\n", (void *)regs->pc);
  169. }
  170. void rt_hw_trap_irq(void)
  171. {
  172. #ifdef SOC_BCM283x
  173. extern rt_uint8_t core_timer_flag;
  174. void *param;
  175. uint32_t irq;
  176. rt_isr_handler_t isr_func;
  177. extern struct rt_irq_desc isr_table[];
  178. uint32_t value = 0;
  179. value = IRQ_PEND_BASIC & 0x3ff;
  180. if(core_timer_flag != 0)
  181. {
  182. uint32_t cpu_id = rt_hw_cpu_id();
  183. uint32_t int_source = CORE_IRQSOURCE(cpu_id);
  184. if (int_source & 0x0f)
  185. {
  186. if (int_source & 0x08)
  187. {
  188. isr_func = isr_table[IRQ_ARM_TIMER].handler;
  189. #ifdef RT_USING_INTERRUPT_INFO
  190. isr_table[IRQ_ARM_TIMER].counter++;
  191. #endif
  192. if (isr_func)
  193. {
  194. param = isr_table[IRQ_ARM_TIMER].param;
  195. isr_func(IRQ_ARM_TIMER, param);
  196. }
  197. }
  198. }
  199. }
  200. /* local interrupt*/
  201. if (value)
  202. {
  203. if (value & (1 << 8))
  204. {
  205. value = IRQ_PEND1;
  206. irq = __rt_ffs(value) - 1;
  207. }
  208. else if (value & (1 << 9))
  209. {
  210. value = IRQ_PEND2;
  211. irq = __rt_ffs(value) + 31;
  212. }
  213. else
  214. {
  215. value &= 0x0f;
  216. irq = __rt_ffs(value) + 63;
  217. }
  218. /* get interrupt service routine */
  219. isr_func = isr_table[irq].handler;
  220. #ifdef RT_USING_INTERRUPT_INFO
  221. isr_table[irq].counter++;
  222. #endif
  223. if (isr_func)
  224. {
  225. /* Interrupt for myself. */
  226. param = isr_table[irq].param;
  227. /* turn to interrupt service routine */
  228. isr_func(irq, param);
  229. }
  230. }
  231. #else
  232. void *param;
  233. int ir, ir_self;
  234. rt_isr_handler_t isr_func;
  235. extern struct rt_irq_desc isr_table[];
  236. ir = rt_hw_interrupt_get_irq();
  237. if (ir == 1023)
  238. {
  239. /* Spurious interrupt */
  240. return;
  241. }
  242. /* bit 10~12 is cpuid, bit 0~9 is interrupt id */
  243. ir_self = ir & 0x3ffUL;
  244. /* get interrupt service routine */
  245. isr_func = isr_table[ir_self].handler;
  246. #ifdef RT_USING_INTERRUPT_INFO
  247. isr_table[ir_self].counter++;
  248. #endif
  249. if (isr_func)
  250. {
  251. /* Interrupt for myself. */
  252. param = isr_table[ir_self].param;
  253. /* turn to interrupt service routine */
  254. isr_func(ir_self, param);
  255. }
  256. /* end of interrupt */
  257. rt_hw_interrupt_ack(ir);
  258. #endif
  259. }
  260. #ifdef RT_USING_SMART
  261. #define DBG_CHECK_EVENT(regs, esr) dbg_check_event(regs, esr)
  262. #else
  263. #define DBG_CHECK_EVENT(regs, esr) (0)
  264. #endif
  265. void rt_hw_trap_fiq(void)
  266. {
  267. void *param;
  268. int ir, ir_self;
  269. rt_isr_handler_t isr_func;
  270. extern struct rt_irq_desc isr_table[];
  271. ir = rt_hw_interrupt_get_irq();
  272. /* bit 10~12 is cpuid, bit 0~9 is interrup id */
  273. ir_self = ir & 0x3ffUL;
  274. /* get interrupt service routine */
  275. isr_func = isr_table[ir_self].handler;
  276. param = isr_table[ir_self].param;
  277. /* turn to interrupt service routine */
  278. isr_func(ir_self, param);
  279. /* end of interrupt */
  280. rt_hw_interrupt_ack(ir);
  281. }
  282. void print_exception(unsigned long esr, unsigned long epc);
  283. void SVC_Handler(struct rt_hw_exp_stack *regs);
  284. void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
  285. {
  286. unsigned long esr;
  287. unsigned char ec;
  288. asm volatile("mrs %0, esr_el1":"=r"(esr));
  289. ec = (unsigned char)((esr >> 26) & 0x3fU);
  290. if (DBG_CHECK_EVENT(regs, esr))
  291. {
  292. return;
  293. }
  294. else if (ec == 0x15) /* is 64bit syscall ? */
  295. {
  296. SVC_Handler(regs);
  297. /* never return here */
  298. }
  299. #ifdef RT_USING_SMART
  300. /**
  301. * Note: check_user_stack will take lock and it will possibly be a dead-lock
  302. * if exception comes from kernel.
  303. */
  304. if ((regs->cpsr & 0x1f) == 0)
  305. {
  306. if (user_fault_fixable(esr, regs))
  307. return;
  308. }
  309. else
  310. {
  311. if (_irq_is_disable(regs->cpsr))
  312. {
  313. LOG_E("Kernel fault from interrupt/critical section");
  314. }
  315. if (rt_critical_level() != 0)
  316. {
  317. LOG_E("scheduler is not available");
  318. }
  319. else if (user_fault_fixable(esr, regs))
  320. return;
  321. }
  322. #endif
  323. print_exception(esr, regs->pc);
  324. rt_hw_show_register(regs);
  325. LOG_E("current thread: %s\n", rt_thread_self()->parent.name);
  326. #ifdef RT_USING_FINSH
  327. list_thread();
  328. #endif
  329. #ifdef RT_USING_LWP
  330. _check_fault(regs, 0, "user fault");
  331. #endif
  332. rt_hw_cpu_shutdown();
  333. }
  334. void rt_hw_trap_serror(struct rt_hw_exp_stack *regs)
  335. {
  336. rt_kprintf("SError\n");
  337. rt_hw_show_register(regs);
  338. rt_kprintf("current: %s\n", rt_thread_self()->parent.name);
  339. #ifdef RT_USING_FINSH
  340. list_thread();
  341. #endif
  342. rt_hw_cpu_shutdown();
  343. }