cpu_mp.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-10-30 Bernard The first version
  9. * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
  10. * 2023-12-10 xqyjlj spinlock should lock sched
  11. * 2024-01-25 Shell Using rt_exit_critical_safe
  12. */
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #ifdef RT_USING_SMART
  16. #include <lwp.h>
  17. #endif
  18. #ifdef RT_USING_DEBUG
  19. rt_base_t _cpus_critical_level;
  20. #endif /* RT_USING_DEBUG */
  21. static struct rt_cpu _cpus[RT_CPUS_NR];
  22. rt_hw_spinlock_t _cpus_lock;
  23. #if defined(RT_DEBUGING_SPINLOCK)
  24. void *_cpus_lock_owner = 0;
  25. void *_cpus_lock_pc = 0;
  26. #endif /* RT_DEBUGING_SPINLOCK */
  27. /**
  28. * @brief Initialize a static spinlock object.
  29. *
  30. * @param lock is a pointer to the spinlock to initialize.
  31. */
  32. void rt_spin_lock_init(struct rt_spinlock *lock)
  33. {
  34. rt_hw_spin_lock_init(&lock->lock);
  35. }
  36. RTM_EXPORT(rt_spin_lock_init)
  37. /**
  38. * @brief This function will lock the spinlock, will lock the thread scheduler.
  39. *
  40. * @note If the spinlock is locked, the current CPU will keep polling the spinlock state
  41. * until the spinlock is unlocked.
  42. *
  43. * @param lock is a pointer to the spinlock.
  44. */
  45. void rt_spin_lock(struct rt_spinlock *lock)
  46. {
  47. rt_enter_critical();
  48. rt_hw_spin_lock(&lock->lock);
  49. RT_SPIN_LOCK_DEBUG(lock);
  50. }
  51. RTM_EXPORT(rt_spin_lock)
  52. /**
  53. * @brief This function will unlock the spinlock, will unlock the thread scheduler.
  54. *
  55. * @param lock is a pointer to the spinlock.
  56. */
  57. void rt_spin_unlock(struct rt_spinlock *lock)
  58. {
  59. rt_base_t critical_level;
  60. RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
  61. rt_hw_spin_unlock(&lock->lock);
  62. rt_exit_critical_safe(critical_level);
  63. }
  64. RTM_EXPORT(rt_spin_unlock)
  65. /**
  66. * @brief This function will disable the local interrupt and then lock the spinlock, will lock the thread scheduler.
  67. *
  68. * @note If the spinlock is locked, the current CPU will keep polling the spinlock state
  69. * until the spinlock is unlocked.
  70. *
  71. * @param lock is a pointer to the spinlock.
  72. *
  73. * @return Return current cpu interrupt status.
  74. */
  75. rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
  76. {
  77. rt_base_t level;
  78. level = rt_hw_local_irq_disable();
  79. rt_enter_critical();
  80. rt_hw_spin_lock(&lock->lock);
  81. RT_SPIN_LOCK_DEBUG(lock);
  82. return level;
  83. }
  84. RTM_EXPORT(rt_spin_lock_irqsave)
  85. /**
  86. * @brief This function will unlock the spinlock and then restore current cpu interrupt status, will unlock the thread scheduler.
  87. *
  88. * @param lock is a pointer to the spinlock.
  89. *
  90. * @param level is interrupt status returned by rt_spin_lock_irqsave().
  91. */
  92. void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
  93. {
  94. rt_base_t critical_level;
  95. RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
  96. rt_hw_spin_unlock(&lock->lock);
  97. rt_exit_critical_safe(critical_level);
  98. rt_hw_local_irq_enable(level);
  99. }
  100. RTM_EXPORT(rt_spin_unlock_irqrestore)
  101. /**
  102. * @brief This fucntion will return current cpu object.
  103. *
  104. * @return Return a pointer to the current cpu object.
  105. */
  106. struct rt_cpu *rt_cpu_self(void)
  107. {
  108. return &_cpus[rt_hw_cpu_id()];
  109. }
  110. /**
  111. * @brief This fucntion will return the cpu object corresponding to index.
  112. *
  113. * @param index is the index of target cpu object.
  114. *
  115. * @return Return a pointer to the cpu object corresponding to index.
  116. */
  117. struct rt_cpu *rt_cpu_index(int index)
  118. {
  119. return &_cpus[index];
  120. }
  121. /**
  122. * @brief This function will lock all cpus's scheduler and disable local irq.
  123. *
  124. * @return Return current cpu interrupt status.
  125. */
  126. rt_base_t rt_cpus_lock(void)
  127. {
  128. rt_base_t level;
  129. struct rt_cpu* pcpu;
  130. level = rt_hw_local_irq_disable();
  131. pcpu = rt_cpu_self();
  132. if (pcpu->current_thread != RT_NULL)
  133. {
  134. rt_ubase_t lock_nest = rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest));
  135. rt_atomic_add(&(pcpu->current_thread->cpus_lock_nest), 1);
  136. if (lock_nest == 0)
  137. {
  138. rt_enter_critical();
  139. rt_hw_spin_lock(&_cpus_lock);
  140. #ifdef RT_USING_DEBUG
  141. _cpus_critical_level = rt_critical_level();
  142. #endif /* RT_USING_DEBUG */
  143. #ifdef RT_DEBUGING_SPINLOCK
  144. _cpus_lock_owner = pcpu->current_thread;
  145. _cpus_lock_pc = __GET_RETURN_ADDRESS;
  146. #endif /* RT_DEBUGING_SPINLOCK */
  147. }
  148. }
  149. return level;
  150. }
  151. RTM_EXPORT(rt_cpus_lock);
  152. /**
  153. * @brief This function will restore all cpus's scheduler and restore local irq.
  154. *
  155. * @param level is interrupt status returned by rt_cpus_lock().
  156. */
  157. void rt_cpus_unlock(rt_base_t level)
  158. {
  159. struct rt_cpu* pcpu = rt_cpu_self();
  160. if (pcpu->current_thread != RT_NULL)
  161. {
  162. rt_base_t critical_level = 0;
  163. RT_ASSERT(rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest)) > 0);
  164. rt_atomic_sub(&(pcpu->current_thread->cpus_lock_nest), 1);
  165. if (pcpu->current_thread->cpus_lock_nest == 0)
  166. {
  167. #if defined(RT_DEBUGING_SPINLOCK)
  168. _cpus_lock_owner = __OWNER_MAGIC;
  169. _cpus_lock_pc = RT_NULL;
  170. #endif /* RT_DEBUGING_SPINLOCK */
  171. #ifdef RT_USING_DEBUG
  172. critical_level = _cpus_critical_level;
  173. _cpus_critical_level = 0;
  174. #endif /* RT_USING_DEBUG */
  175. rt_hw_spin_unlock(&_cpus_lock);
  176. rt_exit_critical_safe(critical_level);
  177. }
  178. }
  179. rt_hw_local_irq_enable(level);
  180. }
  181. RTM_EXPORT(rt_cpus_unlock);
  182. /**
  183. * This function is invoked by scheduler.
  184. * It will restore the lock state to whatever the thread's counter expects.
  185. * If target thread not locked the cpus then unlock the cpus lock.
  186. *
  187. * @param thread is a pointer to the target thread.
  188. */
  189. void rt_cpus_lock_status_restore(struct rt_thread *thread)
  190. {
  191. #if defined(ARCH_MM_MMU) && defined(RT_USING_SMART)
  192. lwp_aspace_switch(thread);
  193. #endif
  194. rt_sched_post_ctx_switch(thread);
  195. }
  196. RTM_EXPORT(rt_cpus_lock_status_restore);
  197. /* A safe API with debugging feature to be called in most codes */
  198. #undef rt_cpu_get_id
  199. /**
  200. * @brief Get logical CPU ID
  201. *
  202. * @return logical CPU ID
  203. */
  204. rt_base_t rt_cpu_get_id(void)
  205. {
  206. RT_ASSERT(rt_sched_thread_is_binding(RT_NULL) ||
  207. rt_hw_interrupt_is_disabled() ||
  208. !rt_scheduler_is_available());
  209. return rt_hw_cpu_id();
  210. }