interrupt_gcc.S 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018/10/02 Bernard The first version
  9. * 2018/12/27 Jesven Add SMP schedule
  10. * 2021/02/02 lizhirui Add userspace support
  11. * 2021/12/24 JasonHu Add user setting save/restore
  12. * 2022/10/22 Shell Support kernel mode RVV;
  13. * Rewrite trap handling routine
  14. */
  15. #include "cpuport.h"
  16. #include "encoding.h"
  17. #include "stackframe.h"
  18. .align 2
  19. .global trap_entry
  20. .global debug_check_sp
  21. trap_entry:
  22. // distingush exception from kernel or user
  23. csrrw sp, sscratch, sp
  24. bnez sp, _save_context
  25. // BE REALLY careful with sscratch,
  26. // if it's wrong, we could looping here forever
  27. // or accessing random memory and seeing things totally
  28. // messy after a long time and don't even know why
  29. _from_kernel:
  30. csrr sp, sscratch
  31. j _save_context
  32. _save_context:
  33. SAVE_ALL
  34. // clear sscratch to say 'now in kernel mode'
  35. csrw sscratch, zero
  36. RESTORE_SYS_GP
  37. // now we are ready to enter interrupt / excepiton handler
  38. _distinguish_syscall:
  39. csrr t0, scause
  40. #ifdef RT_USING_SMART
  41. // TODO swap 8 with config macro name
  42. li t1, 8
  43. bne t0, t1, _handle_interrupt_and_exception
  44. call syscall_entry
  45. // syscall never return here
  46. #endif
  47. _handle_interrupt_and_exception:
  48. mv a0, t0
  49. csrrc a1, stval, zero
  50. csrr a2, sepc
  51. // sp as exception frame pointer
  52. mv a3, sp
  53. call handle_trap
  54. _interrupt_exit:
  55. #ifndef RT_USING_SMP
  56. la s0, rt_thread_switch_interrupt_flag
  57. lw s2, 0(s0)
  58. beqz s2, _resume_execution
  59. sw zero, 0(s0)
  60. #else
  61. mv a0, sp
  62. call rt_scheduler_do_irq_switch
  63. // if failed, jump to __resume_execution
  64. j _resume_execution
  65. #endif /* RT_USING_SMP */
  66. _context_switch:
  67. la t0, rt_interrupt_from_thread
  68. LOAD a0, 0(t0)
  69. la t0, rt_interrupt_to_thread
  70. LOAD a1, 0(t0)
  71. csrr t0, sstatus
  72. andi t0, t0, ~SSTATUS_SPIE
  73. csrw sstatus, t0
  74. jal rt_hw_context_switch
  75. _resume_execution:
  76. #ifdef RT_USING_SMART
  77. LOAD t0, FRAME_OFF_SSTATUS(sp)
  78. andi t0, t0, SSTATUS_SPP
  79. bnez t0, _resume_kernel
  80. call arch_ret_to_user
  81. #endif
  82. _resume_kernel:
  83. RESTORE_ALL
  84. csrw sscratch, zero
  85. sret
  86. #ifndef RT_USING_SMP
  87. .global rt_hw_interrupt_enable
  88. rt_hw_interrupt_enable:
  89. csrs sstatus, a0 /* restore to old csr */
  90. jr ra
  91. .global rt_hw_interrupt_disable
  92. rt_hw_interrupt_disable:
  93. csrrci a0, sstatus, 2 /* clear SIE */
  94. jr ra
  95. #else
  96. .global rt_hw_local_irq_disable
  97. rt_hw_local_irq_disable:
  98. csrrci a0, sstatus, 2
  99. jr ra
  100. .global rt_hw_local_irq_enable
  101. rt_hw_local_irq_enable:
  102. csrs sstatus, a0
  103. jr ra
  104. .global rt_hw_secondary_cpu_idle_exec
  105. rt_hw_secondary_cpu_idle_exec:
  106. jr ra
  107. #endif /* RT_USING_SMP */