setup.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-02-21 GuEe-GUI first version
  9. */
  10. #include <rtthread.h>
  11. #define DBG_TAG "cpu.aa64"
  12. #define DBG_LVL DBG_INFO
  13. #include <rtdbg.h>
  14. #include <smp_call.h>
  15. #include <cpu.h>
  16. #include <mmu.h>
  17. #include <cpuport.h>
  18. #include <interrupt.h>
  19. #include <gtimer.h>
  20. #include <setup.h>
  21. #include <stdlib.h>
  22. #include <ioremap.h>
  23. #include <rtdevice.h>
  24. #include <gic.h>
  25. #include <gicv3.h>
  26. #include <mm_memblock.h>
  27. #include <dt-bindings/size.h>
  28. extern rt_ubase_t _start, _end;
  29. extern void _secondary_cpu_entry(void);
  30. extern void rt_hw_builtin_fdt();
  31. extern size_t MMUTable[];
  32. extern void *system_vectors;
  33. static void *fdt_ptr = RT_NULL;
  34. static rt_size_t fdt_size = 0;
  35. #ifdef RT_USING_SMP
  36. extern struct cpu_ops_t cpu_psci_ops;
  37. extern struct cpu_ops_t cpu_spin_table_ops;
  38. #else
  39. extern int rt_hw_cpu_id(void);
  40. #endif
  41. rt_uint64_t rt_cpu_mpidr_table[] =
  42. {
  43. [RT_CPUS_NR] = 0,
  44. };
  45. static struct cpu_ops_t *cpu_ops[] =
  46. {
  47. #ifdef RT_USING_SMP
  48. &cpu_psci_ops,
  49. &cpu_spin_table_ops,
  50. #endif
  51. };
  52. static struct rt_ofw_node *cpu_np[RT_CPUS_NR] = { };
  53. void rt_hw_fdt_install_early(void *fdt)
  54. {
  55. #ifndef RT_USING_BUILTIN_FDT
  56. if (fdt != RT_NULL && !fdt_check_header(fdt))
  57. {
  58. fdt_ptr = fdt;
  59. fdt_size = fdt_totalsize(fdt);
  60. }
  61. #else
  62. (void)fdt;
  63. #endif
  64. }
  65. #ifdef RT_USING_HWTIMER
  66. static rt_ubase_t loops_per_tick[RT_CPUS_NR];
  67. static rt_ubase_t cpu_get_cycles(void)
  68. {
  69. rt_ubase_t cycles;
  70. rt_hw_sysreg_read(cntpct_el0, cycles);
  71. return cycles;
  72. }
  73. static void cpu_loops_per_tick_init(void)
  74. {
  75. rt_ubase_t offset;
  76. volatile rt_ubase_t freq, step, cycles_end1, cycles_end2;
  77. volatile rt_uint32_t cycles_count1 = 0, cycles_count2 = 0;
  78. rt_hw_sysreg_read(cntfrq_el0, freq);
  79. step = freq / RT_TICK_PER_SECOND;
  80. cycles_end1 = cpu_get_cycles() + step;
  81. while (cpu_get_cycles() < cycles_end1)
  82. {
  83. __asm__ volatile ("nop");
  84. __asm__ volatile ("add %0, %0, #1":"=r"(cycles_count1));
  85. }
  86. cycles_end2 = cpu_get_cycles() + step;
  87. while (cpu_get_cycles() < cycles_end2)
  88. {
  89. __asm__ volatile ("add %0, %0, #1":"=r"(cycles_count2));
  90. }
  91. if ((rt_int32_t)(cycles_count2 - cycles_count1) > 0)
  92. {
  93. offset = cycles_count2 - cycles_count1;
  94. }
  95. else
  96. {
  97. /* Impossible, but prepared for any eventualities */
  98. offset = cycles_count2 / 4;
  99. }
  100. loops_per_tick[rt_hw_cpu_id()] = offset;
  101. }
  102. static void cpu_us_delay(rt_uint32_t us)
  103. {
  104. volatile rt_base_t start = cpu_get_cycles(), cycles;
  105. cycles = ((us * 0x10c7UL) * loops_per_tick[rt_hw_cpu_id()] * RT_TICK_PER_SECOND) >> 32;
  106. while ((cpu_get_cycles() - start) < cycles)
  107. {
  108. rt_hw_cpu_relax();
  109. }
  110. }
  111. #endif /* RT_USING_HWTIMER */
  112. rt_weak void rt_hw_idle_wfi(void)
  113. {
  114. __asm__ volatile ("wfi");
  115. }
  116. static void system_vectors_init(void)
  117. {
  118. rt_hw_set_current_vbar((rt_ubase_t)&system_vectors);
  119. }
  120. rt_inline void cpu_info_init(void)
  121. {
  122. int i = 0;
  123. rt_uint64_t mpidr;
  124. struct rt_ofw_node *np;
  125. /* get boot cpu info */
  126. rt_hw_sysreg_read(mpidr_el1, mpidr);
  127. rt_ofw_foreach_cpu_node(np)
  128. {
  129. rt_uint64_t hwid = rt_ofw_get_cpu_hwid(np, 0);
  130. if ((mpidr & MPIDR_AFFINITY_MASK) != hwid)
  131. {
  132. /* Only save affinity and res make smp boot can check */
  133. hwid |= 1ULL << 31;
  134. }
  135. else
  136. {
  137. hwid = mpidr;
  138. }
  139. cpu_np[i] = np;
  140. rt_cpu_mpidr_table[i] = hwid;
  141. for (int idx = 0; idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
  142. {
  143. struct cpu_ops_t *ops = cpu_ops[idx];
  144. if (ops->cpu_init)
  145. {
  146. ops->cpu_init(i, np);
  147. }
  148. }
  149. if (++i >= RT_CPUS_NR)
  150. {
  151. break;
  152. }
  153. }
  154. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, rt_cpu_mpidr_table, sizeof(rt_cpu_mpidr_table));
  155. #ifdef RT_USING_HWTIMER
  156. cpu_loops_per_tick_init();
  157. if (!rt_device_hwtimer_us_delay)
  158. {
  159. rt_device_hwtimer_us_delay = &cpu_us_delay;
  160. }
  161. #endif /* RT_USING_HWTIMER */
  162. }
  163. rt_inline rt_size_t string_to_size(const char *string, const char *who)
  164. {
  165. char unit;
  166. rt_size_t size;
  167. const char *cp = string;
  168. size = atoi(cp);
  169. while (*cp >= '0' && *cp <= '9')
  170. {
  171. ++cp;
  172. }
  173. unit = *cp & '_';
  174. if (unit == 'M')
  175. {
  176. size *= SIZE_MB;
  177. }
  178. else if (unit == 'K')
  179. {
  180. size *= SIZE_KB;
  181. }
  182. else if (unit == 'G')
  183. {
  184. size *= SIZE_GB;
  185. }
  186. else
  187. {
  188. LOG_W("Unknown unit of '%c' in `%s`", unit, who);
  189. }
  190. return size;
  191. }
  192. void rt_hw_common_setup(void)
  193. {
  194. rt_uint64_t initrd_ranges[3];
  195. rt_size_t kernel_start, kernel_end;
  196. rt_size_t heap_start, heap_end;
  197. rt_size_t init_page_start, init_page_end;
  198. rt_size_t fdt_start, fdt_end;
  199. rt_region_t init_page_region = { 0 };
  200. rt_region_t platform_mem_region = { 0 };
  201. static struct mem_desc platform_mem_desc;
  202. const rt_ubase_t pv_off = PV_OFFSET;
  203. system_vectors_init();
  204. #ifdef RT_USING_SMART
  205. rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xffffffff00000000, 0x20000000, MMUTable, pv_off);
  206. #else
  207. rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xffffd0000000, 0x20000000, MMUTable, 0);
  208. #endif
  209. kernel_start = RT_ALIGN_DOWN((rt_size_t)rt_kmem_v2p((void *)&_start) - 64, ARCH_PAGE_SIZE);
  210. kernel_end = RT_ALIGN((rt_size_t)rt_kmem_v2p((void *)&_end), ARCH_PAGE_SIZE);
  211. heap_start = kernel_end;
  212. heap_end = RT_ALIGN(heap_start + ARCH_HEAP_SIZE, ARCH_PAGE_SIZE);
  213. init_page_start = heap_end;
  214. init_page_end = RT_ALIGN(init_page_start + ARCH_INIT_PAGE_SIZE, ARCH_PAGE_SIZE);
  215. fdt_start = init_page_end;
  216. fdt_end = RT_ALIGN(fdt_start + fdt_size, ARCH_PAGE_SIZE);
  217. platform_mem_region.start = kernel_start;
  218. #ifndef RT_USING_BUILTIN_FDT
  219. platform_mem_region.end = fdt_end;
  220. #else
  221. platform_mem_region.end = init_page_end;
  222. (void)fdt_start;
  223. (void)fdt_end;
  224. #endif
  225. rt_memblock_reserve_memory("kernel", kernel_start, kernel_end, MEMBLOCK_NONE);
  226. rt_memblock_reserve_memory("memheap", heap_start, heap_end, MEMBLOCK_NONE);
  227. rt_memblock_reserve_memory("init-page", init_page_start, init_page_end, MEMBLOCK_NONE);
  228. #ifndef RT_USING_BUILTIN_FDT
  229. rt_memblock_reserve_memory("fdt", fdt_start, fdt_end, MEMBLOCK_NONE);
  230. /* To virtual address */
  231. fdt_ptr = (void *)(fdt_ptr - pv_off);
  232. #ifdef KERNEL_VADDR_START
  233. if ((rt_ubase_t)fdt_ptr + fdt_size - KERNEL_VADDR_START > ARCH_EARLY_MAP_SIZE)
  234. {
  235. fdt_ptr = rt_ioremap_early(fdt_ptr + pv_off, fdt_size);
  236. RT_ASSERT(fdt_ptr != RT_NULL);
  237. }
  238. #endif /* KERNEL_VADDR_START */
  239. rt_memmove((void *)(fdt_start - pv_off), fdt_ptr, fdt_size);
  240. fdt_ptr = (void *)fdt_start - pv_off;
  241. #else
  242. fdt_ptr = &rt_hw_builtin_fdt;
  243. fdt_size = fdt_totalsize(fdt_ptr);
  244. #endif /* RT_USING_BUILTIN_FDT */
  245. rt_system_heap_init((void *)(heap_start - pv_off), (void *)(heap_end - pv_off));
  246. init_page_region.start = init_page_start - pv_off;
  247. init_page_region.end = init_page_end - pv_off;
  248. rt_page_init(init_page_region);
  249. /* create MMU mapping of kernel memory */
  250. platform_mem_region.start = RT_ALIGN_DOWN(platform_mem_region.start, ARCH_PAGE_SIZE);
  251. platform_mem_region.end = RT_ALIGN(platform_mem_region.end, ARCH_PAGE_SIZE);
  252. platform_mem_desc.paddr_start = platform_mem_region.start;
  253. platform_mem_desc.vaddr_start = platform_mem_region.start - pv_off;
  254. platform_mem_desc.vaddr_end = platform_mem_region.end - pv_off - 1;
  255. platform_mem_desc.attr = NORMAL_MEM;
  256. rt_hw_mmu_setup(&rt_kernel_space, &platform_mem_desc, 1);
  257. if (rt_fdt_prefetch(fdt_ptr))
  258. {
  259. /* Platform cannot be initialized */
  260. RT_ASSERT(0);
  261. }
  262. rt_fdt_scan_chosen_stdout();
  263. rt_fdt_scan_initrd(initrd_ranges);
  264. rt_fdt_scan_memory();
  265. #ifdef RT_USING_DMA
  266. do {
  267. const char *bootargs;
  268. rt_ubase_t dma_pool_base;
  269. rt_size_t cma_size = 0, coherent_pool_size = 0;
  270. if (!rt_fdt_bootargs_select("cma=", 0, &bootargs))
  271. {
  272. cma_size = string_to_size(bootargs, "cma");
  273. }
  274. if (!rt_fdt_bootargs_select("coherent_pool=", 0, &bootargs))
  275. {
  276. coherent_pool_size = string_to_size(bootargs, "coherent-pool");
  277. }
  278. if (cma_size <= coherent_pool_size)
  279. {
  280. if (cma_size || coherent_pool_size)
  281. {
  282. LOG_W("DMA pool %s=%u > %s=%u",
  283. "CMA", cma_size, "coherent-pool", coherent_pool_size);
  284. }
  285. cma_size = 8 * SIZE_MB;
  286. coherent_pool_size = 2 * SIZE_MB;
  287. }
  288. dma_pool_base = platform_mem_region.end;
  289. rt_memblock_reserve_memory("dma-pool",
  290. dma_pool_base, dma_pool_base + cma_size + coherent_pool_size, MEMBLOCK_NONE);
  291. if (rt_dma_pool_extract(cma_size, coherent_pool_size))
  292. {
  293. LOG_E("Alloc DMA pool %s=%u, %s=%u fail",
  294. "CMA", cma_size, "coherent-pool", coherent_pool_size);
  295. }
  296. } while (0);
  297. #endif /* RT_USING_DMA */
  298. rt_memblock_setup_memory_environment();
  299. rt_fdt_earlycon_kick(FDT_EARLYCON_KICK_UPDATE);
  300. rt_fdt_unflatten();
  301. cpu_info_init();
  302. #ifdef RT_USING_PIC
  303. rt_pic_init();
  304. rt_pic_irq_init();
  305. #else
  306. /* initialize hardware interrupt */
  307. rt_hw_interrupt_init();
  308. /* initialize uart */
  309. rt_hw_uart_init();
  310. #endif
  311. #ifndef RT_HWTIMER_ARM_ARCH
  312. /* initialize timer for os tick */
  313. rt_hw_gtimer_init();
  314. #endif /* !RT_HWTIMER_ARM_ARCH */
  315. #ifdef RT_USING_COMPONENTS_INIT
  316. rt_components_board_init();
  317. #endif
  318. #if defined(RT_USING_CONSOLE) && defined(RT_USING_DEVICE)
  319. rt_ofw_console_setup();
  320. #endif
  321. rt_thread_idle_sethook(rt_hw_idle_wfi);
  322. #ifdef RT_USING_SMP
  323. rt_smp_call_init();
  324. /* Install the IPI handle */
  325. rt_hw_ipi_handler_install(RT_SCHEDULE_IPI, rt_scheduler_ipi_handler);
  326. rt_hw_ipi_handler_install(RT_STOP_IPI, rt_scheduler_ipi_handler);
  327. rt_hw_ipi_handler_install(RT_SMP_CALL_IPI, rt_smp_call_ipi_handler);
  328. rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
  329. rt_hw_interrupt_umask(RT_STOP_IPI);
  330. rt_hw_interrupt_umask(RT_SMP_CALL_IPI);
  331. #endif
  332. }
  333. #ifdef RT_USING_SMP
  334. rt_weak void rt_hw_secondary_cpu_up(void)
  335. {
  336. int cpu_id = rt_hw_cpu_id();
  337. rt_uint64_t entry = (rt_uint64_t)rt_kmem_v2p(_secondary_cpu_entry);
  338. if (!entry)
  339. {
  340. LOG_E("Failed to translate '_secondary_cpu_entry' to physical address");
  341. RT_ASSERT(0);
  342. }
  343. /* Maybe we are no in the first cpu */
  344. for (int i = 0; i < RT_ARRAY_SIZE(cpu_np); ++i)
  345. {
  346. int err;
  347. const char *enable_method;
  348. if (!cpu_np[i] || i == cpu_id)
  349. {
  350. continue;
  351. }
  352. err = rt_ofw_prop_read_string(cpu_np[i], "enable-method", &enable_method);
  353. for (int idx = 0; !err && idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
  354. {
  355. struct cpu_ops_t *ops = cpu_ops[idx];
  356. if (ops->method && !rt_strcmp(ops->method, enable_method) && ops->cpu_boot)
  357. {
  358. err = ops->cpu_boot(i, entry);
  359. break;
  360. }
  361. }
  362. if (err)
  363. {
  364. LOG_W("Call cpu %d on %s", i, "failed");
  365. }
  366. }
  367. }
  368. rt_weak void rt_hw_secondary_cpu_bsp_start(void)
  369. {
  370. int cpu_id = rt_hw_cpu_id();
  371. system_vectors_init();
  372. rt_hw_spin_lock(&_cpus_lock);
  373. /* Save all mpidr */
  374. rt_hw_sysreg_read(mpidr_el1, rt_cpu_mpidr_table[cpu_id]);
  375. rt_hw_mmu_ktbl_set((unsigned long)MMUTable);
  376. #ifdef RT_USING_PIC
  377. rt_pic_irq_init();
  378. #else
  379. /* initialize vector table */
  380. rt_hw_vector_init();
  381. arm_gic_cpu_init(0, 0);
  382. #ifdef BSP_USING_GICV3
  383. arm_gic_redist_init(0, 0);
  384. #endif /* BSP_USING_GICV3 */
  385. #endif
  386. #ifndef RT_HWTIMER_ARM_ARCH
  387. /* initialize timer for os tick */
  388. rt_hw_gtimer_local_enable();
  389. #endif /* !RT_HWTIMER_ARM_ARCH */
  390. rt_dm_secondary_cpu_init();
  391. rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
  392. rt_hw_interrupt_umask(RT_STOP_IPI);
  393. rt_hw_interrupt_umask(RT_SMP_CALL_IPI);
  394. LOG_I("Call cpu %d on %s", cpu_id, "success");
  395. #ifdef RT_USING_HWTIMER
  396. if (rt_device_hwtimer_us_delay == &cpu_us_delay)
  397. {
  398. cpu_loops_per_tick_init();
  399. }
  400. #endif
  401. rt_system_scheduler_start();
  402. }
  403. rt_weak void rt_hw_secondary_cpu_idle_exec(void)
  404. {
  405. rt_hw_wfe();
  406. }
  407. #endif
  408. void rt_hw_console_output(const char *str)
  409. {
  410. rt_fdt_earlycon_output(str);
  411. }