cpu_gcc.S 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /*
  2. * Copyright (c) 2006-2024, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Date Author Notes
  7. * 2018-10-06 ZhaoXiaowei the first version (cpu_gcc.S)
  8. * 2021-05-18 Jesven the first version (context_gcc.S)
  9. * 2024-01-06 Shell Fix barrier on irq_disable/enable
  10. * 2024-01-18 Shell fix implicit dependency of cpuid management
  11. * 2024-03-28 Shell Move cpu codes from context_gcc.S
  12. */
  13. #ifndef __ASSEMBLY__
  14. #define __ASSEMBLY__
  15. #endif
  16. #include "rtconfig.h"
  17. #include "asm-generic.h"
  18. #include "asm-fpu.h"
  19. #include "armv8.h"
  20. #ifdef RT_USING_SMP
  21. #define rt_hw_interrupt_disable rt_hw_local_irq_disable
  22. #define rt_hw_interrupt_enable rt_hw_local_irq_enable
  23. #endif /* RT_USING_SMP */
  24. .text
  25. /**
  26. * #ifdef RT_USING_OFW
  27. * void rt_hw_cpu_id_set(long cpuid)
  28. * #else
  29. * void rt_hw_cpu_id_set(void)
  30. * #endif
  31. */
  32. .type rt_hw_cpu_id_set, @function
  33. rt_hw_cpu_id_set:
  34. #ifdef ARCH_USING_GENERIC_CPUID
  35. .globl rt_hw_cpu_id_set
  36. #else /* !ARCH_USING_GENERIC_CPUID */
  37. .weak rt_hw_cpu_id_set
  38. #endif /* ARCH_USING_GENERIC_CPUID */
  39. #ifndef RT_USING_OFW
  40. mrs x0, mpidr_el1 /* MPIDR_EL1: Multi-Processor Affinity Register */
  41. #ifdef ARCH_ARM_CORTEX_A55
  42. lsr x0, x0, #8
  43. #endif /* ARCH_ARM_CORTEX_A55 */
  44. and x0, x0, #15
  45. #endif /* !RT_USING_OFW */
  46. #ifdef ARCH_USING_HW_THREAD_SELF
  47. msr tpidrro_el0, x0
  48. #else /* !ARCH_USING_HW_THREAD_SELF */
  49. msr tpidr_el1, x0
  50. #endif /* ARCH_USING_HW_THREAD_SELF */
  51. ret
  52. /*
  53. int rt_hw_cpu_id(void)
  54. */
  55. .type rt_hw_cpu_id, @function
  56. rt_hw_cpu_id:
  57. #ifdef ARCH_USING_GENERIC_CPUID
  58. .globl rt_hw_cpu_id
  59. #else /* !ARCH_USING_GENERIC_CPUID */
  60. .weak rt_hw_cpu_id
  61. #endif /* ARCH_USING_GENERIC_CPUID */
  62. #if RT_CPUS_NR > 1
  63. #ifdef ARCH_USING_GENERIC_CPUID
  64. mrs x0, tpidrro_el0
  65. #else /* !ARCH_USING_GENERIC_CPUID */
  66. mrs x0, tpidr_el1
  67. #endif /* ARCH_USING_GENERIC_CPUID */
  68. #else /* RT_CPUS_NR == 1 */
  69. mov x0, xzr
  70. #endif
  71. ret
  72. /*
  73. void rt_hw_set_process_id(size_t id)
  74. */
  75. .global rt_hw_set_process_id
  76. rt_hw_set_process_id:
  77. msr CONTEXTIDR_EL1, x0
  78. ret
  79. /*
  80. *enable gtimer
  81. */
  82. .globl rt_hw_gtimer_enable
  83. rt_hw_gtimer_enable:
  84. mov x0, #1
  85. msr CNTP_CTL_EL0, x0
  86. ret
  87. /*
  88. *set gtimer CNTP_TVAL_EL0 value
  89. */
  90. .globl rt_hw_set_gtimer_val
  91. rt_hw_set_gtimer_val:
  92. msr CNTP_TVAL_EL0, x0
  93. ret
  94. /*
  95. *get gtimer CNTP_TVAL_EL0 value
  96. */
  97. .globl rt_hw_get_gtimer_val
  98. rt_hw_get_gtimer_val:
  99. mrs x0, CNTP_TVAL_EL0
  100. ret
  101. .globl rt_hw_get_cntpct_val
  102. rt_hw_get_cntpct_val:
  103. mrs x0, CNTPCT_EL0
  104. ret
  105. /*
  106. * get gtimer frq value
  107. * According to the document `Arm A-profile Architecture Registers:
  108. * CNTFRQ_EL0, Counter-timer Frequency Register`
  109. * Bits [63:32] are reserved, so only the lower 32 bits take effect.
  110. */
  111. .globl rt_hw_get_gtimer_frq
  112. rt_hw_get_gtimer_frq:
  113. mrs x0, CNTFRQ_EL0
  114. ret
  115. .global rt_hw_interrupt_is_disabled
  116. rt_hw_interrupt_is_disabled:
  117. mrs x0, DAIF
  118. tst x0, #0xc0
  119. cset x0, NE
  120. ret
  121. /*
  122. * rt_base_t rt_hw_interrupt_disable();
  123. */
  124. .globl rt_hw_interrupt_disable
  125. rt_hw_interrupt_disable:
  126. mrs x0, DAIF
  127. and x0, x0, #0xc0
  128. cmp x0, #0xc0
  129. /* branch if bits not both set(zero) */
  130. bne 1f
  131. ret
  132. 1:
  133. msr DAIFSet, #3
  134. dsb nsh
  135. isb
  136. ret
  137. /*
  138. * void rt_hw_interrupt_enable(rt_base_t level);
  139. */
  140. .globl rt_hw_interrupt_enable
  141. rt_hw_interrupt_enable:
  142. and x0, x0, #0xc0
  143. cmp x0, #0xc0
  144. /* branch if one of the bits not set(zero) */
  145. bne 1f
  146. ret
  147. 1:
  148. isb
  149. dsb nsh
  150. and x0, x0, #0xc0
  151. mrs x1, DAIF
  152. bic x1, x1, #0xc0
  153. orr x0, x0, x1
  154. msr DAIF, x0
  155. ret
  156. .globl rt_hw_get_current_el
  157. rt_hw_get_current_el:
  158. mrs x0, CurrentEL
  159. cmp x0, 0xc
  160. b.eq 3f
  161. cmp x0, 0x8
  162. b.eq 2f
  163. cmp x0, 0x4
  164. b.eq 1f
  165. ldr x0, =0
  166. b 0f
  167. 3:
  168. ldr x0, =3
  169. b 0f
  170. 2:
  171. ldr x0, =2
  172. b 0f
  173. 1:
  174. ldr x0, =1
  175. b 0f
  176. 0:
  177. ret
  178. .globl rt_hw_set_current_vbar
  179. rt_hw_set_current_vbar:
  180. mrs x1, CurrentEL
  181. cmp x1, 0xc
  182. b.eq 3f
  183. cmp x1, 0x8
  184. b.eq 2f
  185. cmp x1, 0x4
  186. b.eq 1f
  187. b 0f
  188. 3:
  189. msr VBAR_EL3,x0
  190. b 0f
  191. 2:
  192. msr VBAR_EL2,x0
  193. b 0f
  194. 1:
  195. msr VBAR_EL1,x0
  196. b 0f
  197. 0:
  198. ret
  199. .globl rt_hw_set_elx_env
  200. rt_hw_set_elx_env:
  201. mrs x1, CurrentEL
  202. cmp x1, 0xc
  203. b.eq 3f
  204. cmp x1, 0x8
  205. b.eq 2f
  206. cmp x1, 0x4
  207. b.eq 1f
  208. b 0f
  209. 3:
  210. mrs x0, SCR_EL3
  211. orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
  212. msr SCR_EL3, x0
  213. b 0f
  214. 2:
  215. mrs x0, HCR_EL2
  216. orr x0, x0, #0x38
  217. msr HCR_EL2, x0
  218. b 0f
  219. 1:
  220. b 0f
  221. 0:
  222. ret
  223. .globl rt_cpu_vector_set_base
  224. rt_cpu_vector_set_base:
  225. msr VBAR_EL1, x0
  226. ret
  227. /**
  228. * unsigned long rt_hw_ffz(unsigned long x)
  229. */
  230. .globl rt_hw_ffz
  231. rt_hw_ffz:
  232. mvn x1, x0
  233. clz x0, x1
  234. mov x1, #0x3f
  235. sub x0, x1, x0
  236. ret
  237. .globl rt_hw_clz
  238. rt_hw_clz:
  239. clz x0, x0
  240. ret
  241. /**
  242. * Spinlock (fallback implementation)
  243. */
  244. rt_hw_spin_lock_init:
  245. .weak rt_hw_spin_lock_init
  246. stlr wzr, [x0]
  247. ret
  248. rt_hw_spin_trylock:
  249. .weak rt_hw_spin_trylock
  250. sub sp, sp, #16
  251. ldar w2, [x0]
  252. add x1, sp, 8
  253. stlr w2, [x1]
  254. ldarh w1, [x1]
  255. and w1, w1, 65535
  256. add x3, sp, 10
  257. ldarh w3, [x3]
  258. cmp w1, w3, uxth
  259. beq 1f
  260. mov w0, 0
  261. add sp, sp, 16
  262. ret
  263. 1:
  264. add x1, sp, 10
  265. 2:
  266. ldaxrh w3, [x1]
  267. add w3, w3, 1
  268. stlxrh w4, w3, [x1]
  269. cbnz w4, 2b
  270. add x1, sp, 8
  271. ldar w1, [x1]
  272. 3:
  273. ldaxr w3, [x0]
  274. cmp w3, w2
  275. bne 4f
  276. stxr w4, w1, [x0]
  277. cbnz w4, 3b
  278. 4:
  279. cset w0, eq
  280. add sp, sp, 16
  281. ret
  282. rt_hw_spin_lock:
  283. .weak rt_hw_spin_lock
  284. add x1, x0, 2
  285. 1:
  286. ldxrh w2, [x1]
  287. add w3, w2, 1
  288. stxrh w4, w3, [x1]
  289. cbnz w4, 1b
  290. and w2, w2, 65535
  291. ldarh w1, [x0]
  292. cmp w2, w1, uxth
  293. beq 3f
  294. sevl
  295. 2:
  296. wfe
  297. ldaxrh w1, [x0]
  298. cmp w2, w1
  299. bne 2b
  300. 3:
  301. ret
  302. rt_hw_spin_unlock:
  303. .weak rt_hw_spin_unlock
  304. ldxrh w1, [x0]
  305. add w1, w1, 1
  306. stlxrh w2, w1, [x0]
  307. cbnz w2, rt_hw_spin_unlock
  308. ret