lwp_gcc.S 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. /*
  2. * Copyright (c) 2006-2020, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-12-10 Jesven first version
  9. * 2021-02-03 lizhirui port to riscv64
  10. * 2021-02-19 lizhirui port to new version of rt-smart
  11. * 2022-11-08 Wangxiaoyao Cleanup codes;
  12. * Support new context switch
  13. * 2023-07-16 Shell Move part of the codes to C from asm in signal handling
  14. */
  15. #include "rtconfig.h"
  16. #ifndef __ASSEMBLY__
  17. #define __ASSEMBLY__
  18. #endif /* __ASSEMBLY__ */
  19. #include "cpuport.h"
  20. #include "encoding.h"
  21. #include "stackframe.h"
  22. #include "asm-generic.h"
  23. .section .text.lwp
  24. /*
  25. * void arch_start_umode(args, text, ustack, kstack);
  26. */
  27. .global arch_start_umode
  28. .type arch_start_umode, % function
  29. arch_start_umode:
  30. // load kstack for user process
  31. csrw sscratch, a3
  32. li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
  33. csrc sstatus, t0
  34. li t0, SSTATUS_SPIE // enable interrupt when return to user mode
  35. csrs sstatus, t0
  36. csrw sepc, a1
  37. mv sp, a2
  38. sret//enter user mode
  39. /*
  40. * void arch_crt_start_umode(args, text, ustack, kstack);
  41. */
  42. .global arch_crt_start_umode
  43. .type arch_crt_start_umode, % function
  44. arch_crt_start_umode:
  45. li t0, SSTATUS_SPP | SSTATUS_SIE // set as user mode, close interrupt
  46. csrc sstatus, t0
  47. li t0, SSTATUS_SPIE // enable interrupt when return to user mode
  48. csrs sstatus, t0
  49. csrw sepc, a1
  50. mv s0, a0
  51. mv s1, a1
  52. mv s2, a2
  53. mv s3, a3
  54. mv a0, s2
  55. call lwp_copy_return_code_to_user_stack
  56. mv a0, s2
  57. call lwp_fix_sp
  58. mv sp, a0//user_sp
  59. mv ra, a0//return address
  60. mv a0, s0//args
  61. csrw sscratch, s3
  62. sret//enter user mode
  63. /**
  64. * Unify exit point from kernel mode to enter user space
  65. * we handle following things here:
  66. * 1. restoring user mode debug state (not support yet)
  67. * 2. handling thread's exit request
  68. * 3. handling POSIX signal (skipped for signal quit path)
  69. * 4. restoring user context
  70. * 5. jump to user mode
  71. */
  72. .global arch_ret_to_user
  73. arch_ret_to_user:
  74. li s0, 1 // flag=1 (normal path)
  75. j arch_ret_to_user_impl
  76. .global arch_signal_quit_ret_to_user
  77. arch_signal_quit_ret_to_user:
  78. li s0, 0 // flag=0 (signal quit path)
  79. arch_ret_to_user_impl:
  80. // TODO: we don't support kernel gdb server in risc-v yet
  81. // so we don't check debug state here and handle debugging bussiness
  82. call lwp_check_exit_request
  83. beqz a0, 1f
  84. mv a0, x0
  85. call sys_exit
  86. 1:
  87. // Skip signal handling if coming from arch_signal_quit
  88. beqz s0, ret_to_user_exit
  89. mv a0, sp
  90. call lwp_thread_signal_catch
  91. ret_to_user_exit:
  92. RESTORE_ALL
  93. // `RESTORE_ALL` also reset sp to user sp, and setup sscratch
  94. sret
  95. /**
  96. * Restore user context from exception frame stroraged in ustack
  97. * And handle pending signals;
  98. */
  99. arch_signal_quit:
  100. LOAD a0, FRAME_OFF_SP(sp)
  101. call arch_signal_ucontext_restore
  102. /* reset kernel sp to the stack */
  103. addi sp, sp, CTX_REG_NR * REGBYTES
  104. STORE sp, FRAME_OFF_SP(a0)
  105. /* return value is user sp */
  106. mv sp, a0
  107. /* restore user sp before enter trap */
  108. addi a0, sp, CTX_REG_NR * REGBYTES
  109. csrw sscratch, a0
  110. RESTORE_ALL
  111. SAVE_ALL
  112. j arch_signal_quit_ret_to_user
  113. /**
  114. * rt_noreturn
  115. * void arch_thread_signal_enter(
  116. * int signo, -> a0
  117. * siginfo_t *psiginfo, -> a1
  118. * void *exp_frame, -> a2
  119. * void *entry_uaddr, -> a3
  120. * lwp_sigset_t *save_sig_mask, -> a4
  121. * )
  122. */
  123. .global arch_thread_signal_enter
  124. arch_thread_signal_enter:
  125. mv s3, a2
  126. mv s2, a0
  127. mv s1, a3
  128. LOAD t0, FRAME_OFF_SP(a2)
  129. mv a3, t0
  130. call arch_signal_ucontext_save
  131. /** restore kernel sp */
  132. addi sp, s3, CTX_REG_NR * REGBYTES
  133. /**
  134. * set regiter RA to user signal handler
  135. * set sp to user sp & save kernel sp in sscratch
  136. */
  137. mv ra, a0
  138. csrw sscratch, sp
  139. mv sp, a0
  140. /**
  141. * s1 is signal_handler,
  142. * s1 = !s1 ? lwp_sigreturn : s1;
  143. */
  144. bnez s1, 1f
  145. mv s1, ra
  146. 1:
  147. /* enter user mode and enable interrupt when return to user mode */
  148. li t0, SSTATUS_SPP
  149. csrc sstatus, t0
  150. li t0, SSTATUS_SPIE
  151. csrs sstatus, t0
  152. /* sepc <- signal_handler */
  153. csrw sepc, s1
  154. /* a0 <- signal id */
  155. mv a0, s2
  156. /* a1 <- siginfo */
  157. add a1, sp, 16
  158. /* dummy a2 */
  159. mv a2, a1
  160. /* restore user GP */
  161. LOAD gp, FRAME_OFF_GP(s3)
  162. /* restore user TP */
  163. LOAD tp, FRAME_OFF_TP(s3)
  164. /**
  165. * handler(signo, psi, ucontext);
  166. */
  167. sret
  168. .align 3
  169. lwp_debugreturn:
  170. li a7, 0xff
  171. ecall
  172. .align 3
  173. .global lwp_sigreturn
  174. lwp_sigreturn:
  175. li a7, 0xfe
  176. ecall
  177. .align 3
  178. lwp_sigreturn_end:
  179. .align 3
  180. .global lwp_thread_return
  181. lwp_thread_return:
  182. li a0, 0
  183. li a7, 1
  184. ecall
  185. .align 3
  186. .global lwp_thread_return_end
  187. lwp_thread_return_end:
  188. .globl arch_get_tidr
  189. arch_get_tidr:
  190. mv a0, tp
  191. ret
  192. .global arch_set_thread_area
  193. arch_set_thread_area:
  194. .globl arch_set_tidr
  195. arch_set_tidr:
  196. mv tp, a0
  197. ret
  198. .global arch_clone_exit
  199. .global arch_fork_exit
  200. arch_fork_exit:
  201. arch_clone_exit:
  202. j arch_syscall_exit
  203. START_POINT(syscall_entry)
  204. #ifndef ARCH_USING_NEW_CTX_SWITCH
  205. //swap to thread kernel stack
  206. csrr t0, sstatus
  207. andi t0, t0, 0x100
  208. beqz t0, __restore_sp_from_tcb
  209. __restore_sp_from_sscratch: // from kernel
  210. csrr t0, sscratch
  211. j __move_stack_context
  212. __restore_sp_from_tcb: // from user
  213. jal rt_thread_self
  214. jal get_thread_kernel_stack_top
  215. mv t0, a0
  216. __move_stack_context:
  217. mv t1, sp//src
  218. mv sp, t0//switch stack
  219. addi sp, sp, -CTX_REG_NR * REGBYTES
  220. //copy context
  221. li s0, CTX_REG_NR//cnt
  222. mv t2, sp//dst
  223. copy_context_loop:
  224. LOAD t0, 0(t1)
  225. STORE t0, 0(t2)
  226. addi s0, s0, -1
  227. addi t1, t1, 8
  228. addi t2, t2, 8
  229. bnez s0, copy_context_loop
  230. #endif /* ARCH_USING_NEW_CTX_SWITCH */
  231. /* fetch SYSCALL ID */
  232. LOAD a7, 17 * REGBYTES(sp)
  233. addi a7, a7, -0xfe
  234. beqz a7, arch_signal_quit
  235. #ifdef ARCH_MM_MMU
  236. /* save setting when syscall enter */
  237. call rt_thread_self
  238. call lwp_user_setting_save
  239. #endif
  240. mv a0, sp
  241. OPEN_INTERRUPT
  242. call syscall_handler
  243. j arch_syscall_exit
  244. START_POINT_END(syscall_entry)
  245. .global arch_syscall_exit
  246. arch_syscall_exit:
  247. CLOSE_INTERRUPT
  248. #if defined(ARCH_MM_MMU)
  249. LOAD s0, FRAME_OFF_SSTATUS(sp)
  250. andi s0, s0, 0x100
  251. bnez s0, dont_ret_to_user
  252. j arch_ret_to_user
  253. #endif
  254. dont_ret_to_user:
  255. #ifdef ARCH_MM_MMU
  256. /* restore setting when syscall exit */
  257. call rt_thread_self
  258. call lwp_user_setting_restore
  259. /* after restore the reg `tp`, need modify context */
  260. STORE tp, 4 * REGBYTES(sp)
  261. #endif
  262. //restore context
  263. RESTORE_ALL
  264. csrw sscratch, zero
  265. sret