entry_point.S 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Date Author Notes
  7. * 2020-01-15 bigmagic the first version
  8. * 2020-08-10 SummerGift support clang compiler
  9. * 2023-04-29 GuEe-GUI support kernel's ARM64 boot header
  10. */
  11. #ifndef __ASSEMBLY__
  12. #define __ASSEMBLY__
  13. #endif
  14. #include <mmu.h>
  15. #include <rtconfig.h>
  16. #define ARM64_IMAGE_FLAG_BE_SHIFT 0
  17. #define ARM64_IMAGE_FLAG_PAGE_SIZE_SHIFT (ARM64_IMAGE_FLAG_BE_SHIFT + 1)
  18. #define ARM64_IMAGE_FLAG_PHYS_BASE_SHIFT (ARM64_IMAGE_FLAG_PAGE_SIZE_SHIFT + 2)
  19. #define ARM64_IMAGE_FLAG_LE 0
  20. #define ARM64_IMAGE_FLAG_BE 1
  21. #define ARM64_IMAGE_FLAG_PAGE_SIZE_4K 1
  22. #define ARM64_IMAGE_FLAG_PAGE_SIZE_16K 2
  23. #define ARM64_IMAGE_FLAG_PAGE_SIZE_64K 3
  24. #define ARM64_IMAGE_FLAG_PHYS_BASE 1
  25. #define _HEAD_FLAG(field) (_HEAD_FLAG_##field << ARM64_IMAGE_FLAG_##field##_SHIFT)
  26. #ifdef ARCH_CPU_BIG_ENDIAN
  27. #define _HEAD_FLAG_BE ARM64_IMAGE_FLAG_BE
  28. #else
  29. #define _HEAD_FLAG_BE ARM64_IMAGE_FLAG_LE
  30. #endif
  31. #define _HEAD_FLAG_PAGE_SIZE ((ARCH_PAGE_SHIFT - 10) / 2)
  32. #define _HEAD_FLAG_PHYS_BASE 1
  33. #define _HEAD_FLAGS (_HEAD_FLAG(BE) | _HEAD_FLAG(PAGE_SIZE) | _HEAD_FLAG(PHYS_BASE))
  34. .macro get_phy, reg, symbol
  35. adrp \reg, \symbol
  36. add \reg, \reg, #:lo12:\symbol
  37. .endm
  38. .macro get_pvoff, tmp, out
  39. ldr \tmp, =.boot_cpu_stack_top
  40. get_phy \out, .boot_cpu_stack_top
  41. sub \out, \out, \tmp
  42. .endm
  43. .section ".text.entrypoint","ax"
  44. #ifdef RT_USING_OFW
  45. /*
  46. * Our goal is to boot the rt-thread as possible without modifying the
  47. * bootloader's config, so we use the kernel's boot header for ARM64:
  48. * https://www.kernel.org/doc/html/latest/arm64/booting.html#call-the-kernel-image
  49. */
  50. _head:
  51. b _start /* Executable code */
  52. .long 0 /* Executable code */
  53. .quad _text_offset /* Image load offset from start of RAM, little endian */
  54. .quad _end - _head /* Effective Image size, little endian (_end defined in link.lds) */
  55. .quad _HEAD_FLAGS /* Kernel flags, little endian */
  56. .quad 0 /* Reserved */
  57. .quad 0 /* Reserved */
  58. .quad 0 /* Reserved */
  59. .ascii "ARM\x64" /* Magic number */
  60. .long 0 /* Reserved (used for PE COFF offset) */
  61. #endif /* RT_USING_OFW */
  62. /* Variable registers: x21~x28 */
  63. dtb_paddr .req x21
  64. boot_arg0 .req x22
  65. boot_arg1 .req x23
  66. boot_arg2 .req x24
  67. stack_top .req x25
  68. .global _start
  69. _start:
  70. /*
  71. * Boot CPU general-purpose register settings:
  72. * x0 = physical address of device tree blob (dtb) in system RAM.
  73. * x1 = 0 (reserved for future use)
  74. * x2 = 0 (reserved for future use)
  75. * x3 = 0 (reserved for future use)
  76. */
  77. mov dtb_paddr, x0
  78. mov boot_arg0, x1
  79. mov boot_arg1, x2
  80. mov boot_arg2, x3
  81. /* Save cpu stack */
  82. get_phy stack_top, .boot_cpu_stack_top
  83. /* Save cpu id temp */
  84. msr tpidr_el1, xzr
  85. bl init_cpu_el
  86. bl init_kernel_bss
  87. bl init_cpu_stack_early
  88. #ifdef RT_USING_OFW
  89. /* Save devicetree info */
  90. mov x0, dtb_paddr
  91. bl rt_hw_fdt_install_early
  92. #endif
  93. /* Now we are in the end of boot cpu process */
  94. ldr x8, =rtthread_startup
  95. b init_mmu_early
  96. /* never come back */
  97. kernel_start:
  98. /* jump to the PE's system entry */
  99. mov x29, xzr
  100. mov x30, x8
  101. br x8
  102. cpu_idle:
  103. wfe
  104. b cpu_idle
  105. #ifdef RT_USING_SMP
  106. .globl _secondary_cpu_entry
  107. _secondary_cpu_entry:
  108. #ifdef RT_USING_OFW
  109. /* Read cpu id */
  110. mrs x5, mpidr_el1
  111. ldr x1, =rt_cpu_mpidr_table
  112. get_pvoff x4 x2
  113. add x1, x1, x2
  114. mov x2, #0
  115. ldr x4, =0xff00ffffff
  116. and x0, x5, x4
  117. .cpu_id_confirm:
  118. add x2, x2, #1 /* Next cpu id inc */
  119. ldr x3, [x1], #8
  120. cmp x3, #0
  121. beq cpu_idle
  122. and x3, x3, x4
  123. cmp x3, x0
  124. bne .cpu_id_confirm
  125. /* Save this mpidr */
  126. str x5, [x1, #-8]
  127. /* Get cpu id success */
  128. sub x0, x2, #1
  129. msr tpidr_el1, x0 /* Save cpu id global */
  130. #else
  131. bl rt_hw_cpu_id_set
  132. mrs x0, tpidr_el1
  133. #endif /* RT_USING_OFW */
  134. /* Set current cpu's stack top */
  135. sub x0, x0, #1
  136. mov x1, #ARCH_SECONDARY_CPU_STACK_SIZE
  137. get_phy x2, .secondary_cpu_stack_top
  138. msub stack_top, x0, x1, x2
  139. bl init_cpu_el
  140. bl init_cpu_stack_early
  141. /* secondary cpu start to startup */
  142. ldr x8, =rt_hw_secondary_cpu_bsp_start
  143. b enable_mmu_early
  144. #endif /* RT_USING_SMP */
  145. init_cpu_el:
  146. mrs x0, CurrentEL /* CurrentEL Register. bit 2, 3. Others reserved */
  147. lsr x0, x0, #2
  148. and x0, x0, #3
  149. /* running at EL3? */
  150. cmp x0, #3
  151. bne .init_cpu_hyp
  152. /* should never be executed, just for completeness. (EL3) */
  153. mov x1, #(1 << 0) /* EL0 and EL1 are in Non-Secure state */
  154. orr x1, x1, #(1 << 4) /* RES1 */
  155. orr x1, x1, #(1 << 5) /* RES1 */
  156. /* bic x1, x1, #(1 << 7) disable Secure Monitor Call */
  157. orr x1, x1, #(1 << 10) /* The next lower level is AArch64 */
  158. msr scr_el3, x1
  159. mov x1, #9 /* Next level is 0b1001->EL2h */
  160. orr x1, x1, #(1 << 6) /* Mask FIQ */
  161. orr x1, x1, #(1 << 7) /* Mask IRQ */
  162. orr x1, x1, #(1 << 8) /* Mask SError */
  163. orr x1, x1, #(1 << 9) /* Mask Debug Exception */
  164. msr spsr_el3, x1
  165. get_phy x1, .init_cpu_hyp
  166. msr elr_el3, x1
  167. eret
  168. .init_cpu_hyp:
  169. /* running at EL2? */
  170. cmp x0, #2 /* EL2 = 0b10 */
  171. bne .init_cpu_sys
  172. /* Enable CNTP for EL1 */
  173. mrs x0, cnthctl_el2 /* Counter-timer Hypervisor Control register */
  174. orr x0, x0, #(1 << 0) /* Don't traps NS EL0/1 accesses to the physical counter */
  175. orr x0, x0, #(1 << 1) /* Don't traps NS EL0/1 accesses to the physical timer */
  176. msr cnthctl_el2, x0
  177. msr cntvoff_el2, xzr
  178. mov x0, #(1 << 31) /* Enable AArch64 in EL1 */
  179. orr x0, x0, #(1 << 1) /* SWIO hardwired */
  180. msr hcr_el2, x0
  181. mov x0, #5 /* Next level is 0b0101->EL1h */
  182. orr x0, x0, #(1 << 6) /* Mask FIQ */
  183. orr x0, x0, #(1 << 7) /* Mask IRQ */
  184. orr x0, x0, #(1 << 8) /* Mask SError */
  185. orr x0, x0, #(1 << 9) /* Mask Debug Exception */
  186. msr spsr_el2, x0
  187. get_phy x0, .init_cpu_sys
  188. msr elr_el2, x0
  189. eret
  190. .init_cpu_sys:
  191. mrs x0, sctlr_el1
  192. bic x0, x0, #(3 << 3) /* Disable SP Alignment check */
  193. bic x0, x0, #(1 << 1) /* Disable Alignment check */
  194. msr sctlr_el1, x0
  195. /* Avoid trap from SIMD or float point instruction */
  196. mov x0, #0x00300000 /* Don't trap any SIMD/FP instructions in both EL0 and EL1 */
  197. msr cpacr_el1, x0
  198. /* Applying context change */
  199. dsb ish
  200. isb
  201. ret
  202. init_kernel_bss:
  203. get_phy x1, __bss_start
  204. get_phy x2, __bss_end
  205. sub x2, x2, x1 /* Get bss size */
  206. and x3, x2, #7 /* x3 is < 7 */
  207. ldr x4, =~0x7
  208. and x2, x2, x4 /* Mask ~7 */
  209. .clean_bss_loop_quad:
  210. cbz x2, .clean_bss_loop_byte
  211. str xzr, [x1], #8
  212. sub x2, x2, #8
  213. b .clean_bss_loop_quad
  214. .clean_bss_loop_byte:
  215. cbz x3, .clean_bss_end
  216. strb wzr, [x1], #1
  217. sub x3, x3, #1
  218. b .clean_bss_loop_byte
  219. .clean_bss_end:
  220. ret
  221. init_cpu_stack_early:
  222. msr spsel, #1
  223. mov sp, stack_top
  224. ret
  225. init_mmu_early:
  226. get_phy x0, .early_page_array
  227. bl set_free_page
  228. get_phy x0, .early_tbl0_page
  229. get_phy x1, .early_tbl1_page
  230. get_pvoff x2 x3
  231. ldr x2, =0x40000000 /* Map 1G memory for kernel space */
  232. bl rt_hw_mem_setup_early
  233. b enable_mmu_early
  234. enable_mmu_early:
  235. get_phy x0, .early_tbl0_page
  236. get_phy x1, .early_tbl1_page
  237. msr ttbr0_el1, x0
  238. msr ttbr1_el1, x1
  239. dsb sy
  240. bl mmu_tcr_init
  241. /*
  242. * OK, now, we don't use sp before jump to kernel, set sp to current cpu's
  243. * stack top to visual address
  244. */
  245. get_pvoff x1 x0
  246. mov x1, stack_top
  247. sub x1, x1, x0
  248. mov sp, x1
  249. ldr x30, =kernel_start /* Set LR to kernel_start function, it's virtual addresses */
  250. /* Enable page table translation */
  251. mrs x1, sctlr_el1
  252. orr x1, x1, #(1 << 12) /* Stage 1 instruction access Cacheability control */
  253. orr x1, x1, #(1 << 2) /* Cacheable Normal memory in stage1 */
  254. orr x1, x1, #(1 << 0) /* MMU Enable */
  255. msr sctlr_el1, x1
  256. dsb ish
  257. isb
  258. ic ialluis /* Invalidate all instruction caches in Inner Shareable domain to Point of Unification */
  259. dsb ish
  260. isb
  261. tlbi vmalle1 /* Invalidate all stage 1 translations used at EL1 with the current VMID */
  262. dsb ish
  263. isb
  264. ret
  265. /*
  266. * CPU stack builtin
  267. */
  268. .section ".bss.noclean.cpus_stack"
  269. .align 12
  270. .cpus_stack:
  271. #if defined(RT_USING_SMP) && RT_CPUS_NR > 1
  272. .space (ARCH_SECONDARY_CPU_STACK_SIZE * (RT_CPUS_NR - 1))
  273. #endif
  274. .secondary_cpu_stack_top:
  275. .space ARCH_SECONDARY_CPU_STACK_SIZE
  276. .boot_cpu_stack_top:
  277. /*
  278. * Early page builtin
  279. */
  280. .section ".bss.noclean.early_page"
  281. .align 12
  282. .early_tbl0_page:
  283. .space ARCH_PAGE_SIZE
  284. .early_tbl1_page:
  285. /* Map 4G -> 2M * 512 entries */
  286. .space 4 * ARCH_PAGE_SIZE
  287. .early_page_array:
  288. .space 24 * ARCH_PAGE_SIZE