cpu.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-10-30 Bernard The first version
  9. */
  10. #include <rthw.h>
  11. #include <rtthread.h>
  12. #ifdef RT_USING_SMART
  13. #include <lwp.h>
  14. #endif
  15. #ifdef RT_USING_SMP
  16. static struct rt_cpu _cpus[RT_CPUS_NR];
  17. rt_hw_spinlock_t _cpus_lock;
  18. /*
  19. * disable scheduler
  20. */
  21. static void _cpu_preempt_disable(void)
  22. {
  23. rt_base_t level;
  24. struct rt_thread *current_thread;
  25. /* disable interrupt */
  26. level = rt_hw_local_irq_disable();
  27. current_thread = rt_thread_self();
  28. if (!current_thread)
  29. {
  30. rt_hw_local_irq_enable(level);
  31. return;
  32. }
  33. /* lock scheduler for local cpu */
  34. current_thread->scheduler_lock_nest ++;
  35. /* enable interrupt */
  36. rt_hw_local_irq_enable(level);
  37. }
  38. /*
  39. * enable scheduler
  40. */
  41. static void _cpu_preempt_enable(void)
  42. {
  43. rt_base_t level;
  44. struct rt_thread *current_thread;
  45. /* disable interrupt */
  46. level = rt_hw_local_irq_disable();
  47. current_thread = rt_thread_self();
  48. if (!current_thread)
  49. {
  50. rt_hw_local_irq_enable(level);
  51. return;
  52. }
  53. /* unlock scheduler for local cpu */
  54. current_thread->scheduler_lock_nest --;
  55. rt_schedule();
  56. /* enable interrupt */
  57. rt_hw_local_irq_enable(level);
  58. }
  59. #endif /* RT_USING_SMP */
  60. /**
  61. * @brief Initialize a static spinlock object.
  62. *
  63. * @param lock is a pointer to the spinlock to initialize.
  64. */
  65. void rt_spin_lock_init(struct rt_spinlock *lock)
  66. {
  67. #ifdef RT_USING_SMP
  68. rt_hw_spin_lock_init(&lock->lock);
  69. #endif
  70. }
  71. RTM_EXPORT(rt_spin_lock_init)
  72. /**
  73. * @brief This function will lock the spinlock.
  74. *
  75. * @note If the spinlock is locked, the current CPU will keep polling the spinlock state
  76. * until the spinlock is unlocked.
  77. *
  78. * @param lock is a pointer to the spinlock.
  79. */
  80. void rt_spin_lock(struct rt_spinlock *lock)
  81. {
  82. #ifdef RT_USING_SMP
  83. _cpu_preempt_disable();
  84. rt_hw_spin_lock(&lock->lock);
  85. #else
  86. rt_enter_critical();
  87. #endif
  88. }
  89. RTM_EXPORT(rt_spin_lock)
  90. /**
  91. * @brief This function will unlock the spinlock.
  92. *
  93. * @param lock is a pointer to the spinlock.
  94. */
  95. void rt_spin_unlock(struct rt_spinlock *lock)
  96. {
  97. #ifdef RT_USING_SMP
  98. rt_hw_spin_unlock(&lock->lock);
  99. _cpu_preempt_enable();
  100. #else
  101. rt_exit_critical();
  102. #endif
  103. }
  104. RTM_EXPORT(rt_spin_unlock)
  105. /**
  106. * @brief This function will disable the local interrupt and then lock the spinlock.
  107. *
  108. * @note If the spinlock is locked, the current CPU will keep polling the spinlock state
  109. * until the spinlock is unlocked.
  110. *
  111. * @param lock is a pointer to the spinlock.
  112. *
  113. * @return Return current cpu interrupt status.
  114. */
  115. rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
  116. {
  117. #ifdef RT_USING_SMP
  118. unsigned long level;
  119. _cpu_preempt_disable();
  120. level = rt_hw_local_irq_disable();
  121. rt_hw_spin_lock(&lock->lock);
  122. return level;
  123. #else
  124. return rt_hw_interrupt_disable();
  125. #endif
  126. }
  127. RTM_EXPORT(rt_spin_lock_irqsave)
  128. /**
  129. * @brief This function will unlock the spinlock and then restore current cpu interrupt status.
  130. *
  131. * @param lock is a pointer to the spinlock.
  132. *
  133. * @param level is interrupt status returned by rt_spin_lock_irqsave().
  134. */
  135. void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
  136. {
  137. #ifdef RT_USING_SMP
  138. rt_hw_spin_unlock(&lock->lock);
  139. rt_hw_local_irq_enable(level);
  140. _cpu_preempt_enable();
  141. #else
  142. rt_hw_interrupt_enable(level);
  143. #endif
  144. }
  145. RTM_EXPORT(rt_spin_unlock_irqrestore)
  146. /**
  147. * @brief This fucntion will return current cpu object.
  148. *
  149. * @return Return a pointer to the current cpu object.
  150. */
  151. struct rt_cpu *rt_cpu_self(void)
  152. {
  153. return &_cpus[rt_hw_cpu_id()];
  154. }
  155. /**
  156. * @brief This fucntion will return the cpu object corresponding to index.
  157. *
  158. * @return Return a pointer to the cpu object corresponding to index.
  159. */
  160. struct rt_cpu *rt_cpu_index(int index)
  161. {
  162. return &_cpus[index];
  163. }
  164. /**
  165. * @brief This function will lock all cpus's scheduler and disable local irq.
  166. *
  167. * @return Return current cpu interrupt status.
  168. */
  169. rt_base_t rt_cpus_lock(void)
  170. {
  171. rt_base_t level;
  172. struct rt_cpu* pcpu;
  173. level = rt_hw_local_irq_disable();
  174. pcpu = rt_cpu_self();
  175. if (pcpu->current_thread != RT_NULL)
  176. {
  177. register rt_ubase_t lock_nest = pcpu->current_thread->cpus_lock_nest;
  178. pcpu->current_thread->cpus_lock_nest++;
  179. if (lock_nest == 0)
  180. {
  181. pcpu->current_thread->scheduler_lock_nest++;
  182. rt_hw_spin_lock(&_cpus_lock);
  183. }
  184. }
  185. return level;
  186. }
  187. RTM_EXPORT(rt_cpus_lock);
  188. /**
  189. * @brief This function will restore all cpus's scheduler and restore local irq.
  190. *
  191. * @param level is interrupt status returned by rt_cpus_lock().
  192. */
  193. void rt_cpus_unlock(rt_base_t level)
  194. {
  195. struct rt_cpu* pcpu = rt_cpu_self();
  196. if (pcpu->current_thread != RT_NULL)
  197. {
  198. pcpu->current_thread->cpus_lock_nest--;
  199. if (pcpu->current_thread->cpus_lock_nest == 0)
  200. {
  201. pcpu->current_thread->scheduler_lock_nest--;
  202. rt_hw_spin_unlock(&_cpus_lock);
  203. }
  204. }
  205. rt_hw_local_irq_enable(level);
  206. }
  207. RTM_EXPORT(rt_cpus_unlock);
  208. /**
  209. * This function is invoked by scheduler.
  210. * It will restore the lock state to whatever the thread's counter expects.
  211. * If target thread not locked the cpus then unlock the cpus lock.
  212. */
  213. void rt_cpus_lock_status_restore(struct rt_thread *thread)
  214. {
  215. struct rt_cpu* pcpu = rt_cpu_self();
  216. #if defined(ARCH_MM_MMU) && defined(RT_USING_SMART)
  217. lwp_mmu_switch(thread);
  218. #endif
  219. pcpu->current_thread = thread;
  220. if (!thread->cpus_lock_nest)
  221. {
  222. rt_hw_spin_unlock(&_cpus_lock);
  223. }
  224. }
  225. RTM_EXPORT(rt_cpus_lock_status_restore);