cpu.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-10-30 Bernard The first version
  9. * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
  10. * 2023-12-10 xqyjlj spinlock should lock sched
  11. * 2024-01-25 Shell Using rt_exit_critical_safe
  12. */
  13. #include <rthw.h>
  14. #include <rtthread.h>
  15. #ifdef RT_USING_SMART
  16. #include <lwp.h>
  17. #endif
  18. #ifdef RT_USING_DEBUG
  19. rt_base_t _cpus_critical_level;
  20. #endif /* RT_USING_DEBUG */
  21. #ifdef RT_USING_SMP
  22. static struct rt_cpu _cpus[RT_CPUS_NR];
  23. rt_hw_spinlock_t _cpus_lock;
  24. #if defined(RT_DEBUGING_SPINLOCK)
  25. void *_cpus_lock_owner = 0;
  26. void *_cpus_lock_pc = 0;
  27. #endif /* RT_DEBUGING_SPINLOCK */
  28. /**
  29. * @brief Initialize a static spinlock object.
  30. *
  31. * @param lock is a pointer to the spinlock to initialize.
  32. */
  33. void rt_spin_lock_init(struct rt_spinlock *lock)
  34. {
  35. rt_hw_spin_lock_init(&lock->lock);
  36. }
  37. RTM_EXPORT(rt_spin_lock_init)
  38. /**
  39. * @brief This function will lock the spinlock, will lock the thread scheduler.
  40. *
  41. * @note If the spinlock is locked, the current CPU will keep polling the spinlock state
  42. * until the spinlock is unlocked.
  43. *
  44. * @param lock is a pointer to the spinlock.
  45. */
  46. void rt_spin_lock(struct rt_spinlock *lock)
  47. {
  48. rt_enter_critical();
  49. rt_hw_spin_lock(&lock->lock);
  50. RT_SPIN_LOCK_DEBUG(lock);
  51. }
  52. RTM_EXPORT(rt_spin_lock)
  53. /**
  54. * @brief This function will unlock the spinlock, will unlock the thread scheduler.
  55. *
  56. * @param lock is a pointer to the spinlock.
  57. */
  58. void rt_spin_unlock(struct rt_spinlock *lock)
  59. {
  60. rt_base_t critical_level;
  61. RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
  62. rt_hw_spin_unlock(&lock->lock);
  63. rt_exit_critical_safe(critical_level);
  64. }
  65. RTM_EXPORT(rt_spin_unlock)
  66. /**
  67. * @brief This function will disable the local interrupt and then lock the spinlock, will lock the thread scheduler.
  68. *
  69. * @note If the spinlock is locked, the current CPU will keep polling the spinlock state
  70. * until the spinlock is unlocked.
  71. *
  72. * @param lock is a pointer to the spinlock.
  73. *
  74. * @return Return current cpu interrupt status.
  75. */
  76. rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
  77. {
  78. rt_base_t level;
  79. level = rt_hw_local_irq_disable();
  80. rt_enter_critical();
  81. rt_hw_spin_lock(&lock->lock);
  82. RT_SPIN_LOCK_DEBUG(lock);
  83. return level;
  84. }
  85. RTM_EXPORT(rt_spin_lock_irqsave)
  86. /**
  87. * @brief This function will unlock the spinlock and then restore current cpu interrupt status, will unlock the thread scheduler.
  88. *
  89. * @param lock is a pointer to the spinlock.
  90. *
  91. * @param level is interrupt status returned by rt_spin_lock_irqsave().
  92. */
  93. void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
  94. {
  95. rt_base_t critical_level;
  96. RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
  97. rt_hw_spin_unlock(&lock->lock);
  98. rt_hw_local_irq_enable(level);
  99. rt_exit_critical_safe(critical_level);
  100. }
  101. RTM_EXPORT(rt_spin_unlock_irqrestore)
  102. /**
  103. * @brief This fucntion will return current cpu object.
  104. *
  105. * @return Return a pointer to the current cpu object.
  106. */
  107. struct rt_cpu *rt_cpu_self(void)
  108. {
  109. return &_cpus[rt_hw_cpu_id()];
  110. }
  111. /**
  112. * @brief This fucntion will return the cpu object corresponding to index.
  113. *
  114. * @param index is the index of target cpu object.
  115. *
  116. * @return Return a pointer to the cpu object corresponding to index.
  117. */
  118. struct rt_cpu *rt_cpu_index(int index)
  119. {
  120. return &_cpus[index];
  121. }
  122. /**
  123. * @brief This function will lock all cpus's scheduler and disable local irq.
  124. *
  125. * @return Return current cpu interrupt status.
  126. */
  127. rt_base_t rt_cpus_lock(void)
  128. {
  129. rt_base_t level;
  130. struct rt_cpu* pcpu;
  131. level = rt_hw_local_irq_disable();
  132. pcpu = rt_cpu_self();
  133. if (pcpu->current_thread != RT_NULL)
  134. {
  135. register rt_ubase_t lock_nest = rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest));
  136. rt_atomic_add(&(pcpu->current_thread->cpus_lock_nest), 1);
  137. if (lock_nest == 0)
  138. {
  139. rt_enter_critical();
  140. rt_hw_spin_lock(&_cpus_lock);
  141. #ifdef RT_USING_DEBUG
  142. _cpus_critical_level = rt_critical_level();
  143. #endif /* RT_USING_DEBUG */
  144. #ifdef RT_DEBUGING_SPINLOCK
  145. _cpus_lock_owner = pcpu->current_thread;
  146. _cpus_lock_pc = __GET_RETURN_ADDRESS;
  147. #endif /* RT_DEBUGING_SPINLOCK */
  148. }
  149. }
  150. return level;
  151. }
  152. RTM_EXPORT(rt_cpus_lock);
  153. /**
  154. * @brief This function will restore all cpus's scheduler and restore local irq.
  155. *
  156. * @param level is interrupt status returned by rt_cpus_lock().
  157. */
  158. void rt_cpus_unlock(rt_base_t level)
  159. {
  160. struct rt_cpu* pcpu = rt_cpu_self();
  161. if (pcpu->current_thread != RT_NULL)
  162. {
  163. rt_base_t critical_level = 0;
  164. RT_ASSERT(rt_atomic_load(&(pcpu->current_thread->cpus_lock_nest)) > 0);
  165. rt_atomic_sub(&(pcpu->current_thread->cpus_lock_nest), 1);
  166. if (pcpu->current_thread->cpus_lock_nest == 0)
  167. {
  168. #if defined(RT_DEBUGING_SPINLOCK)
  169. _cpus_lock_owner = __OWNER_MAGIC;
  170. _cpus_lock_pc = RT_NULL;
  171. #endif /* RT_DEBUGING_SPINLOCK */
  172. #ifdef RT_USING_DEBUG
  173. critical_level = _cpus_critical_level;
  174. _cpus_critical_level = 0;
  175. #endif /* RT_USING_DEBUG */
  176. rt_hw_spin_unlock(&_cpus_lock);
  177. rt_exit_critical_safe(critical_level);
  178. }
  179. }
  180. rt_hw_local_irq_enable(level);
  181. }
  182. RTM_EXPORT(rt_cpus_unlock);
  183. /**
  184. * This function is invoked by scheduler.
  185. * It will restore the lock state to whatever the thread's counter expects.
  186. * If target thread not locked the cpus then unlock the cpus lock.
  187. *
  188. * @param thread is a pointer to the target thread.
  189. */
  190. void rt_cpus_lock_status_restore(struct rt_thread *thread)
  191. {
  192. #if defined(ARCH_MM_MMU) && defined(RT_USING_SMART)
  193. lwp_aspace_switch(thread);
  194. #endif
  195. rt_sched_post_ctx_switch(thread);
  196. }
  197. RTM_EXPORT(rt_cpus_lock_status_restore);
  198. #endif /* RT_USING_SMP */