cpu_mp.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-10-30 Bernard The first version
  9. * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
  10. * 2023-12-10 xqyjlj spinlock should lock sched
  11. * 2024-01-25 Shell Using rt_exit_critical_safe
  12. */
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #ifdef RT_USING_SMART
  16. #include <lwp.h>
  17. #endif
  18. #ifdef RT_USING_DEBUG
  19. rt_base_t _cpus_critical_level;
  20. #endif /* RT_USING_DEBUG */
  21. static struct rt_cpu _cpus[RT_CPUS_NR];
  22. rt_hw_spinlock_t _cpus_lock;
  23. #if defined(RT_DEBUGING_SPINLOCK)
  24. void *_cpus_lock_owner = 0;
  25. void *_cpus_lock_pc = 0;
  26. #endif /* RT_DEBUGING_SPINLOCK */
  27. /**
  28. * @addtogroup group_thread_comm
  29. *
  30. * @cond DOXYGEN_SMP
  31. *
  32. * @{
  33. */
  34. /**
  35. * @brief Initialize a static spinlock object.
  36. *
  37. * @param lock is a pointer to the spinlock to initialize.
  38. *
  39. * @note This function has UP version and MP version.
  40. */
  41. void rt_spin_lock_init(struct rt_spinlock *lock)
  42. {
  43. rt_hw_spin_lock_init(&lock->lock);
  44. }
  45. RTM_EXPORT(rt_spin_lock_init)
  46. /**
  47. * @brief This function will lock the spinlock, will lock the thread scheduler.
  48. *
  49. * If the spinlock is locked, the current CPU will keep polling the spinlock state
  50. * until the spinlock is unlocked.
  51. *
  52. * @param lock is a pointer to the spinlock.
  53. *
  54. * @note This function has UP version and MP version.
  55. */
  56. void rt_spin_lock(struct rt_spinlock *lock)
  57. {
  58. rt_enter_critical();
  59. rt_hw_spin_lock(&lock->lock);
  60. RT_SPIN_LOCK_DEBUG(lock);
  61. }
  62. RTM_EXPORT(rt_spin_lock)
  63. /**
  64. * @brief This function will unlock the spinlock, will unlock the thread scheduler.
  65. *
  66. * If the scheduling function is called before unlocking, it will be scheduled in this function.
  67. *
  68. * @param lock is a pointer to the spinlock.
  69. *
  70. * @note This function has UP version and MP version.
  71. */
  72. void rt_spin_unlock(struct rt_spinlock *lock)
  73. {
  74. rt_base_t critical_level;
  75. RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
  76. rt_hw_spin_unlock(&lock->lock);
  77. rt_exit_critical_safe(critical_level);
  78. }
  79. RTM_EXPORT(rt_spin_unlock)
  80. /**
  81. * @brief This function will disable the local interrupt and then lock the spinlock, will lock the thread scheduler.
  82. *
  83. * If the spinlock is locked, the current CPU will keep polling the spinlock state
  84. * until the spinlock is unlocked.
  85. *
  86. * @param lock is a pointer to the spinlock.
  87. *
  88. * @return Return current cpu interrupt status.
  89. *
  90. * @note This function has UP version and MP version.
  91. */
  92. rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
  93. {
  94. rt_base_t level;
  95. level = rt_hw_local_irq_disable();
  96. rt_enter_critical();
  97. rt_hw_spin_lock(&lock->lock);
  98. RT_SPIN_LOCK_DEBUG(lock);
  99. return level;
  100. }
  101. RTM_EXPORT(rt_spin_lock_irqsave)
  102. /**
  103. * @brief This function will unlock the spinlock and then restore current cpu interrupt status, will unlock the thread scheduler.
  104. *
  105. * If the scheduling function is called before unlocking, it will be scheduled in this function.
  106. *
  107. * @param lock is a pointer to the spinlock.
  108. *
  109. * @param level is interrupt status returned by rt_spin_lock_irqsave().
  110. *
  111. * @note This function has UP version and MP version.
  112. */
  113. void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
  114. {
  115. rt_base_t critical_level;
  116. RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
  117. rt_hw_spin_unlock(&lock->lock);
  118. rt_exit_critical_safe(critical_level);
  119. rt_hw_local_irq_enable(level);
  120. }
  121. RTM_EXPORT(rt_spin_unlock_irqrestore)
  122. /**
  123. * @brief This fucntion will return current cpu object.
  124. *
  125. * @return Return a pointer to the current cpu object.
  126. *
  127. * @note This function has UP version and MP version.
  128. */
  129. struct rt_cpu *rt_cpu_self(void)
  130. {
  131. return &_cpus[rt_hw_cpu_id()];
  132. }
  133. /**
  134. * @brief This fucntion will return the cpu object corresponding to index.
  135. *
  136. * @param index is the index of target cpu object.
  137. *
  138. * @return Return a pointer to the cpu object corresponding to index.
  139. *
  140. * @note This function has UP version and MP version.
  141. */
  142. struct rt_cpu *rt_cpu_index(int index)
  143. {
  144. return &_cpus[index];
  145. }
  146. /**
  147. * @brief This function will lock all cpus's scheduler and disable local irq.
  148. *
  149. * @return Return current cpu interrupt status.
  150. *
  151. * @note This function only has MP version.
  152. */
  153. rt_base_t rt_cpus_lock(void)
  154. {
  155. rt_base_t level;
  156. struct rt_cpu* pcpu;
  157. level = rt_hw_local_irq_disable();
  158. pcpu = rt_cpu_self();
  159. if (pcpu->current_thread != RT_NULL)
  160. {
  161. rt_ubase_t lock_nest = rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest));
  162. rt_atomic_add(&(pcpu->current_thread->cpus_lock_nest), 1);
  163. if (lock_nest == 0)
  164. {
  165. rt_enter_critical();
  166. rt_hw_spin_lock(&_cpus_lock);
  167. #ifdef RT_USING_DEBUG
  168. _cpus_critical_level = rt_critical_level();
  169. #endif /* RT_USING_DEBUG */
  170. #ifdef RT_DEBUGING_SPINLOCK
  171. _cpus_lock_owner = pcpu->current_thread;
  172. _cpus_lock_pc = __GET_RETURN_ADDRESS;
  173. #endif /* RT_DEBUGING_SPINLOCK */
  174. }
  175. }
  176. return level;
  177. }
  178. RTM_EXPORT(rt_cpus_lock);
  179. /**
  180. * @brief This function will restore all cpus's scheduler and restore local irq.
  181. *
  182. * @param level is interrupt status returned by rt_cpus_lock().
  183. *
  184. * @note This function only has MP version.
  185. */
  186. void rt_cpus_unlock(rt_base_t level)
  187. {
  188. struct rt_cpu* pcpu = rt_cpu_self();
  189. if (pcpu->current_thread != RT_NULL)
  190. {
  191. rt_base_t critical_level = 0;
  192. RT_ASSERT(rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest)) > 0);
  193. rt_atomic_sub(&(pcpu->current_thread->cpus_lock_nest), 1);
  194. if (pcpu->current_thread->cpus_lock_nest == 0)
  195. {
  196. #if defined(RT_DEBUGING_SPINLOCK)
  197. _cpus_lock_owner = __OWNER_MAGIC;
  198. _cpus_lock_pc = RT_NULL;
  199. #endif /* RT_DEBUGING_SPINLOCK */
  200. #ifdef RT_USING_DEBUG
  201. critical_level = _cpus_critical_level;
  202. _cpus_critical_level = 0;
  203. #endif /* RT_USING_DEBUG */
  204. rt_hw_spin_unlock(&_cpus_lock);
  205. rt_exit_critical_safe(critical_level);
  206. }
  207. }
  208. rt_hw_local_irq_enable(level);
  209. }
  210. RTM_EXPORT(rt_cpus_unlock);
  211. /**
  212. * This function is invoked by scheduler.
  213. * It will restore the lock state to whatever the thread's counter expects.
  214. * If target thread not locked the cpus then unlock the cpus lock.
  215. *
  216. * @param thread is a pointer to the target thread.
  217. *
  218. * @note This function only has MP version.
  219. */
  220. void rt_cpus_lock_status_restore(struct rt_thread *thread)
  221. {
  222. #if defined(ARCH_MM_MMU) && defined(RT_USING_SMART)
  223. lwp_aspace_switch(thread);
  224. #endif
  225. rt_sched_post_ctx_switch(thread);
  226. }
  227. RTM_EXPORT(rt_cpus_lock_status_restore);
  228. /* A safe API with debugging feature to be called in most codes */
  229. #undef rt_cpu_get_id
  230. /**
  231. * @brief Get logical CPU ID
  232. *
  233. * @return logical CPU ID
  234. *
  235. * @note This function only has MP version.
  236. */
  237. rt_base_t rt_cpu_get_id(void)
  238. {
  239. RT_ASSERT(rt_sched_thread_is_binding(RT_NULL) ||
  240. rt_hw_interrupt_is_disabled() ||
  241. !rt_scheduler_is_available());
  242. return rt_hw_cpu_id();
  243. }
  244. /**
  245. * @}
  246. *
  247. * @endcond
  248. */