cpuport.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. /*
  2. * Copyright (c) 2006-2024, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018/10/28 Bernard The unify RISC-V porting code.
  9. * 2021-02-11 lizhirui add gp support
  10. * 2021-11-19 JasonHu add fpu support
  11. */
  12. #include <rthw.h>
  13. #include <rtthread.h>
  14. #include "cpuport.h"
  15. #include "stack.h"
  16. #include <sbi.h>
  17. #include <encoding.h>
  18. #ifdef ARCH_MM_MMU
  19. #include "mmu.h"
  20. #endif
  21. #ifdef RT_USING_SMP
  22. #include "tick.h"
  23. #include "interrupt.h"
  24. #endif /* RT_USING_SMP */
  25. #ifdef ARCH_RISCV_FPU
  26. #define K_SSTATUS_DEFAULT_BASE (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM | SSTATUS_FS)
  27. #else
  28. #define K_SSTATUS_DEFAULT_BASE (SSTATUS_SPP | SSTATUS_SPIE | SSTATUS_SUM)
  29. #endif
  30. #ifdef ARCH_RISCV_VECTOR
  31. #define K_SSTATUS_DEFAULT (K_SSTATUS_DEFAULT_BASE | SSTATUS_VS)
  32. #else
  33. #define K_SSTATUS_DEFAULT K_SSTATUS_DEFAULT_BASE
  34. #endif
  35. #ifdef RT_USING_SMART
  36. #include <lwp_arch.h>
  37. #endif
  38. /**
  39. * @brief from thread used interrupt context switch
  40. *
  41. */
  42. volatile rt_ubase_t rt_interrupt_from_thread = 0;
  43. /**
  44. * @brief to thread used interrupt context switch
  45. *
  46. */
  47. volatile rt_ubase_t rt_interrupt_to_thread = 0;
  48. /**
  49. * @brief flag to indicate context switch in interrupt or not
  50. *
  51. */
  52. volatile rt_ubase_t rt_thread_switch_interrupt_flag = 0;
  53. #ifdef ARCH_MM_MMU
  54. static rt_ubase_t *percpu_hartid;
  55. #endif
  56. void *_rt_hw_stack_init(rt_ubase_t *sp, rt_ubase_t ra, rt_ubase_t sstatus)
  57. {
  58. rt_hw_switch_frame_t frame = (rt_hw_switch_frame_t)((rt_ubase_t)sp - sizeof(struct rt_hw_switch_frame));
  59. rt_memset(frame, 0, sizeof(struct rt_hw_switch_frame));
  60. frame->regs[RT_HW_SWITCH_CONTEXT_RA] = ra;
  61. frame->regs[RT_HW_SWITCH_CONTEXT_SSTATUS] = sstatus;
  62. return (void *)frame;
  63. }
  64. int rt_hw_cpu_id(void)
  65. {
  66. #ifndef RT_USING_SMP
  67. return 0;
  68. #else
  69. if (rt_kmem_pvoff() != 0)
  70. {
  71. return *percpu_hartid;
  72. }
  73. else
  74. {
  75. // if not enable MMU or pvoff==0, read hartid from satp register
  76. rt_ubase_t hartid;
  77. asm volatile("csrr %0, satp" : "=r"(hartid));
  78. return hartid & 0xFFFF; // Assuming hartid fits in lower 16 bits
  79. }
  80. #endif /* RT_USING_SMP */
  81. }
  82. /**
  83. * This function will initialize thread stack, we assuming
  84. * when scheduler restore this new thread, context will restore
  85. * an entry to user first application
  86. *
  87. * s0-s11, ra, sstatus, a0
  88. * @param tentry the entry of thread
  89. * @param parameter the parameter of entry
  90. * @param stack_addr the beginning stack address
  91. * @param texit the function will be called when thread exit
  92. *
  93. * @return stack address
  94. */
  95. rt_uint8_t *rt_hw_stack_init(void *tentry,
  96. void *parameter,
  97. rt_uint8_t *stack_addr,
  98. void *texit)
  99. {
  100. rt_ubase_t *sp = (rt_ubase_t *)stack_addr;
  101. // we use a strict alignment requirement for Q extension
  102. sp = (rt_ubase_t *)RT_ALIGN_DOWN((rt_ubase_t)sp, 16);
  103. (*--sp) = (rt_ubase_t)tentry;
  104. (*--sp) = (rt_ubase_t)parameter;
  105. (*--sp) = (rt_ubase_t)texit;
  106. --sp; /* alignment */
  107. /* compatible to RESTORE_CONTEXT */
  108. extern void _rt_thread_entry(void);
  109. return (rt_uint8_t *)_rt_hw_stack_init(sp, (rt_ubase_t)_rt_thread_entry, K_SSTATUS_DEFAULT);
  110. }
  111. /*
  112. * #ifdef RT_USING_SMP
  113. * void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
  114. * #else
  115. * void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to);
  116. * #endif
  117. */
  118. #ifndef RT_USING_SMP
  119. void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t from_thread, rt_thread_t to_thread)
  120. {
  121. if (rt_thread_switch_interrupt_flag == 0)
  122. rt_interrupt_from_thread = from;
  123. rt_interrupt_to_thread = to;
  124. rt_thread_switch_interrupt_flag = 1;
  125. return;
  126. }
  127. #else
  128. void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread)
  129. {
  130. /* Perform architecture-specific context switch. This call will
  131. * restore the target thread context and should not return when a
  132. * switch is performed. The caller (scheduler) invoked this function
  133. * in a context where local IRQs are disabled. */
  134. rt_uint32_t level;
  135. level = rt_hw_local_irq_disable();
  136. rt_hw_context_switch((rt_ubase_t)from, (rt_ubase_t)to, to_thread);
  137. rt_hw_local_irq_enable(level);
  138. }
  139. #endif /* end of RT_USING_SMP */
  140. /** shutdown CPU */
  141. void rt_hw_cpu_shutdown(void)
  142. {
  143. rt_uint32_t level;
  144. rt_kprintf("shutdown...\n");
  145. level = rt_hw_interrupt_disable();
  146. sbi_shutdown();
  147. while (1)
  148. ;
  149. }
  150. void rt_hw_set_process_id(int pid)
  151. {
  152. // TODO
  153. }
  154. #ifdef RT_USING_SMP
  155. extern void _start(void);
  156. extern int boot_hartid;
  157. /* Boot secondary harts using the SBI HSM hart_start call. */
  158. void rt_hw_secondary_cpu_up(void)
  159. {
  160. rt_uint64_t entry_pa;
  161. int hart, ret;
  162. /* translate kernel virtual _start to physical address. */
  163. #ifdef ARCH_MM_MMU
  164. if (rt_kmem_pvoff() != 0)
  165. {
  166. entry_pa = (rt_uint64_t)rt_kmem_v2p(&_start);
  167. }
  168. else
  169. {
  170. entry_pa = (rt_uint64_t)&_start;
  171. }
  172. #else
  173. entry_pa = (rt_uint64_t)&_start;
  174. #endif /* ARCH_MM_MMU */
  175. /* Assumes hart IDs are in range [0, RT_CPUS_NR) */
  176. RT_ASSERT(boot_hartid < RT_CPUS_NR);
  177. for (hart = 0; hart < RT_CPUS_NR; hart++)
  178. {
  179. if (hart == boot_hartid)
  180. continue;
  181. ret = sbi_hsm_hart_start((unsigned long)hart,
  182. (unsigned long)entry_pa,
  183. 0UL);
  184. if (ret)
  185. {
  186. rt_kprintf("sbi_hsm_hart_start failed for hart %d: %d\n", hart, ret);
  187. }
  188. }
  189. }
  190. #ifdef ARCH_MM_MMU
  191. void rt_hw_percpu_hartid_init(rt_ubase_t *percpu_ptr, rt_ubase_t hartid)
  192. {
  193. RT_ASSERT(hartid < RT_CPUS_NR);
  194. rt_ubase_t *percpu_hartid_paddr;
  195. rt_size_t percpu_size = (rt_size_t)((rt_ubase_t)&__percpu_end - (rt_ubase_t)&__percpu_start);
  196. percpu_hartid = percpu_ptr;
  197. // from virtual address to physical address
  198. percpu_ptr = (rt_ubase_t *)((rt_ubase_t)percpu_ptr + (rt_ubase_t)rt_kmem_pvoff());
  199. percpu_hartid_paddr = percpu_ptr;
  200. /* Save to the real area */
  201. *(rt_ubase_t *)((void *)percpu_hartid_paddr + hartid * percpu_size) = hartid;
  202. }
  203. #endif /* ARCH_MM_MMU */
  204. void secondary_cpu_entry(void)
  205. {
  206. #ifdef RT_USING_SMART
  207. /* switch to kernel address space */
  208. rt_hw_aspace_switch(&rt_kernel_space);
  209. #endif
  210. /* The PLIC peripheral interrupts are currently handled by the boot_hart. */
  211. /* Enable the Supervisor-Timer bit in SIE */
  212. rt_hw_tick_init();
  213. /* ipi init */
  214. rt_hw_ipi_init();
  215. rt_hw_spin_lock(&_cpus_lock);
  216. /* invoke system scheduler start for secondary CPU */
  217. rt_system_scheduler_start();
  218. }
  219. #endif /* RT_USING_SMP */