setup.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-02-21 GuEe-GUI first version
  9. */
  10. #include <rtthread.h>
  11. #define DBG_TAG "cpu.aa64"
  12. #define DBG_LVL DBG_INFO
  13. #include <rtdbg.h>
  14. #include <cpu.h>
  15. #include <mmu.h>
  16. #include <cpuport.h>
  17. #include <interrupt.h>
  18. #include <stdlib.h>
  19. #include <ioremap.h>
  20. #include <drivers/ofw.h>
  21. #include <drivers/ofw_fdt.h>
  22. #include <drivers/ofw_raw.h>
  23. #include <drivers/core/dm.h>
  24. #define rt_sysreg_write(sysreg, val) \
  25. __asm__ volatile ("msr "RT_STRINGIFY(sysreg)", %0"::"r"((rt_uint64_t)(val)))
  26. #define rt_sysreg_read(sysreg, val) \
  27. __asm__ volatile ("mrs %0, "RT_STRINGIFY(sysreg)"":"=r"((val)))
  28. extern void _secondary_cpu_entry(void);
  29. extern size_t MMUTable[];
  30. extern void *system_vectors;
  31. static void *fdt_ptr = RT_NULL;
  32. static rt_size_t fdt_size = 0;
  33. #ifdef RT_USING_SMP
  34. extern struct cpu_ops_t cpu_psci_ops;
  35. extern struct cpu_ops_t cpu_spin_table_ops;
  36. #else
  37. extern int rt_hw_cpu_id(void);
  38. #endif
  39. rt_uint64_t rt_cpu_mpidr_table[] =
  40. {
  41. [RT_CPUS_NR] = 0,
  42. };
  43. static struct cpu_ops_t *cpu_ops[] =
  44. {
  45. #ifdef RT_USING_SMP
  46. &cpu_psci_ops,
  47. &cpu_spin_table_ops,
  48. #endif
  49. };
  50. static struct rt_ofw_node *cpu_np[RT_CPUS_NR] = { };
  51. void rt_hw_fdt_install_early(void *fdt)
  52. {
  53. void *fdt_vaddr = fdt - PV_OFFSET;
  54. if (fdt != RT_NULL && !fdt_check_header(fdt_vaddr))
  55. {
  56. fdt_ptr = fdt_vaddr;
  57. fdt_size = fdt_totalsize(fdt_vaddr);
  58. }
  59. }
  60. static void system_vectors_init(void)
  61. {
  62. rt_hw_set_current_vbar((rt_ubase_t)&system_vectors);
  63. }
  64. rt_inline void cpu_info_init(void)
  65. {
  66. int i = 0;
  67. rt_uint64_t mpidr;
  68. struct rt_ofw_node *np;
  69. /* get boot cpu info */
  70. rt_sysreg_read(mpidr_el1, mpidr);
  71. rt_ofw_foreach_cpu_node(np)
  72. {
  73. rt_uint64_t hwid = rt_ofw_get_cpu_hwid(np, 0);
  74. if ((mpidr & MPIDR_AFFINITY_MASK) != hwid)
  75. {
  76. /* Only save affinity and res make smp boot can check */
  77. hwid |= 1ULL << 31;
  78. }
  79. else
  80. {
  81. hwid = mpidr;
  82. }
  83. cpu_np[i] = np;
  84. rt_cpu_mpidr_table[i] = hwid;
  85. rt_ofw_data(np) = (void *)hwid;
  86. for (int idx = 0; idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
  87. {
  88. struct cpu_ops_t *ops = cpu_ops[idx];
  89. if (ops->cpu_init)
  90. {
  91. ops->cpu_init(i, np);
  92. }
  93. }
  94. if (++i >= RT_CPUS_NR)
  95. {
  96. break;
  97. }
  98. }
  99. rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, rt_cpu_mpidr_table, sizeof(rt_cpu_mpidr_table));
  100. }
  101. void rt_hw_common_setup(void)
  102. {
  103. if (rt_fdt_prefetch(fdt_ptr))
  104. {
  105. /* Platform cannot be initialized */
  106. RT_ASSERT(0);
  107. }
  108. rt_fdt_unflatten();
  109. cpu_info_init();
  110. }
  111. #ifdef RT_USING_SMP
  112. rt_weak void rt_hw_secondary_cpu_up(void)
  113. {
  114. int cpu_id = rt_hw_cpu_id();
  115. rt_uint64_t entry = (rt_uint64_t)rt_kmem_v2p(_secondary_cpu_entry);
  116. if (!entry)
  117. {
  118. LOG_E("Failed to translate '_secondary_cpu_entry' to physical address");
  119. RT_ASSERT(0);
  120. }
  121. /* Maybe we are no in the first cpu */
  122. for (int i = 0; i < RT_ARRAY_SIZE(cpu_np); ++i)
  123. {
  124. int err;
  125. const char *enable_method;
  126. if (!cpu_np[i] || i == cpu_id)
  127. {
  128. continue;
  129. }
  130. err = rt_ofw_prop_read_string(cpu_np[i], "enable-method", &enable_method);
  131. for (int idx = 0; !err && idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
  132. {
  133. struct cpu_ops_t *ops = cpu_ops[idx];
  134. if (ops->method && !rt_strcmp(ops->method, enable_method) && ops->cpu_boot)
  135. {
  136. err = ops->cpu_boot(i, entry);
  137. break;
  138. }
  139. }
  140. if (err)
  141. {
  142. LOG_W("Call cpu %d on %s", i, "failed");
  143. }
  144. }
  145. }
  146. rt_weak void rt_hw_secondary_cpu_bsp_start(void)
  147. {
  148. int cpu_id = rt_hw_cpu_id();
  149. system_vectors_init();
  150. rt_hw_spin_lock(&_cpus_lock);
  151. /* Save all mpidr */
  152. rt_sysreg_read(mpidr_el1, rt_cpu_mpidr_table[cpu_id]);
  153. rt_hw_mmu_ktbl_set((unsigned long)MMUTable);
  154. rt_hw_interrupt_init();
  155. rt_dm_secondary_cpu_init();
  156. rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
  157. rt_hw_interrupt_umask(RT_STOP_IPI);
  158. LOG_I("Call cpu %d on %s", cpu_id, "success");
  159. #ifdef RT_USING_HWTIMER
  160. if (rt_device_hwtimer_us_delay == &cpu_us_delay)
  161. {
  162. cpu_loops_per_tick_init();
  163. }
  164. #endif
  165. rt_system_scheduler_start();
  166. }
  167. rt_weak void rt_hw_secondary_cpu_idle_exec(void)
  168. {
  169. rt_hw_wfe();
  170. }
  171. #endif