cpu.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2018-10-30 Bernard The first version
  9. */
  10. #include <rtthread.h>
  11. #include <rthw.h>
  12. #ifdef RT_USING_SMP
  13. /***********************************
  14. * disable scheduler
  15. ***********************************/
  16. static void rt_preempt_disable(void)
  17. {
  18. register rt_base_t level;
  19. struct rt_thread *current_thread;
  20. /* disable interrupt */
  21. level = rt_hw_local_irq_disable();
  22. current_thread = rt_cpu_self()->current_thread;
  23. if (!current_thread)
  24. {
  25. rt_hw_local_irq_enable(level);
  26. return;
  27. }
  28. /* lock scheduler for local cpu */
  29. current_thread->scheduler_lock_nest ++;
  30. /* enable interrupt */
  31. rt_hw_local_irq_enable(level);
  32. }
  33. /***********************************
  34. * restore scheduler
  35. ***********************************/
  36. static void rt_preempt_enable(void)
  37. {
  38. register rt_base_t level;
  39. struct rt_thread *current_thread;
  40. /* disable interrupt */
  41. level = rt_hw_local_irq_disable();
  42. current_thread = rt_cpu_self()->current_thread;
  43. if (!current_thread)
  44. {
  45. rt_hw_local_irq_enable(level);
  46. return;
  47. }
  48. /* unlock scheduler for local cpu */
  49. current_thread->scheduler_lock_nest --;
  50. rt_schedule();
  51. /* enable interrupt */
  52. rt_hw_local_irq_enable(level);
  53. }
  54. #endif
  55. void rt_spin_lock_init(rt_spinlock_t *lock)
  56. {
  57. #ifdef RT_USING_SMP
  58. rt_hw_spin_lock_init(&lock->lock);
  59. #endif
  60. }
  61. RTM_EXPORT(rt_spin_lock_init)
  62. void rt_spin_lock(rt_spinlock_t *lock)
  63. {
  64. #ifdef RT_USING_SMP
  65. rt_preempt_disable();
  66. rt_hw_spin_lock(&lock->lock);
  67. #else
  68. rt_enter_critical();
  69. #endif
  70. }
  71. RTM_EXPORT(rt_spin_lock)
  72. void rt_spin_unlock(rt_spinlock_t *lock)
  73. {
  74. #ifdef RT_USING_SMP
  75. rt_hw_spin_unlock(&lock->lock);
  76. rt_preempt_enable();
  77. #else
  78. rt_exit_critical();
  79. #endif
  80. }
  81. RTM_EXPORT(rt_spin_unlock)
  82. rt_base_t rt_spin_lock_irqsave(rt_spinlock_t *lock)
  83. {
  84. unsigned long level;
  85. #ifdef RT_USING_SMP
  86. rt_preempt_disable();
  87. level = rt_hw_local_irq_disable();
  88. rt_hw_spin_lock(&lock->lock);
  89. return level;
  90. #else
  91. return rt_hw_interrupt_disable();
  92. #endif
  93. }
  94. RTM_EXPORT(rt_spin_lock_irqsave)
  95. void rt_spin_unlock_irqrestore(rt_spinlock_t *lock, rt_base_t level)
  96. {
  97. #ifdef RT_USING_SMP
  98. rt_hw_spin_unlock(&lock->lock);
  99. rt_hw_local_irq_enable(level);
  100. rt_preempt_enable();
  101. #else
  102. rt_hw_interrupt_enable(level);
  103. #endif
  104. }
  105. RTM_EXPORT(rt_spin_unlock_irqrestore)
  106. #ifdef RT_USING_SMP
  107. static struct rt_cpu rt_cpus[RT_CPUS_NR];
  108. rt_hw_spinlock_t _cpus_lock;
  109. /**
  110. * This fucntion will return current cpu.
  111. */
  112. struct rt_cpu *rt_cpu_self(void)
  113. {
  114. return &rt_cpus[rt_hw_cpu_id()];
  115. }
  116. struct rt_cpu *rt_cpu_index(int index)
  117. {
  118. return &rt_cpus[index];
  119. }
  120. /**
  121. * This function will lock all cpus's scheduler and disable local irq.
  122. */
  123. rt_base_t rt_cpus_lock(void)
  124. {
  125. rt_base_t level;
  126. struct rt_cpu* pcpu;
  127. level = rt_hw_local_irq_disable();
  128. pcpu = rt_cpu_self();
  129. if (pcpu->current_thread != RT_NULL)
  130. {
  131. register rt_uint16_t lock_nest = pcpu->current_thread->cpus_lock_nest;
  132. pcpu->current_thread->cpus_lock_nest++;
  133. if (lock_nest == 0)
  134. {
  135. pcpu->current_thread->scheduler_lock_nest++;
  136. rt_hw_spin_lock(&_cpus_lock);
  137. }
  138. }
  139. return level;
  140. }
  141. RTM_EXPORT(rt_cpus_lock);
  142. /**
  143. * This function will restore all cpus's scheduler and restore local irq.
  144. */
  145. void rt_cpus_unlock(rt_base_t level)
  146. {
  147. struct rt_cpu* pcpu = rt_cpu_self();
  148. if (pcpu->current_thread != RT_NULL)
  149. {
  150. pcpu->current_thread->cpus_lock_nest--;
  151. if (pcpu->current_thread->cpus_lock_nest == 0)
  152. {
  153. pcpu->current_thread->scheduler_lock_nest--;
  154. rt_hw_spin_unlock(&_cpus_lock);
  155. }
  156. }
  157. rt_hw_local_irq_enable(level);
  158. }
  159. RTM_EXPORT(rt_cpus_unlock);
  160. /**
  161. * This function is invoked by scheduler.
  162. * It will restore the lock state to whatever the thread's counter expects.
  163. * If target thread not locked the cpus then unlock the cpus lock.
  164. */
  165. void rt_cpus_lock_status_restore(struct rt_thread *thread)
  166. {
  167. struct rt_cpu* pcpu = rt_cpu_self();
  168. pcpu->current_thread = thread;
  169. if (!thread->cpus_lock_nest)
  170. {
  171. rt_hw_spin_unlock(&_cpus_lock);
  172. }
  173. }
  174. RTM_EXPORT(rt_cpus_lock_status_restore);
  175. #endif