scheduler_comm.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. /*
  2. * Copyright (c) 2006-2024 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * (scheduler_comm.c) Common API of scheduling routines.
  7. *
  8. * Change Logs:
  9. * Date Author Notes
  10. * 2024-01-18 Shell Separate scheduling related codes from thread.c, scheduler_.*
  11. */
  12. #define DBG_TAG "kernel.sched"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <rtthread.h>
  16. void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
  17. {
  18. /* setup thread status */
  19. RT_SCHED_CTX(thread).stat = RT_THREAD_INIT;
  20. #ifdef RT_USING_SMP
  21. /* not bind on any cpu */
  22. RT_SCHED_CTX(thread).bind_cpu = RT_CPUS_NR;
  23. RT_SCHED_CTX(thread).oncpu = RT_CPU_DETACHED;
  24. #endif /* RT_USING_SMP */
  25. rt_sched_thread_init_priv(thread, tick, priority);
  26. }
  27. rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread)
  28. {
  29. RT_SCHED_DEBUG_IS_LOCKED;
  30. RT_SCHED_CTX(thread).sched_flag_ttmr_set = 1;
  31. return RT_EOK;
  32. }
  33. rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread)
  34. {
  35. rt_err_t error;
  36. RT_SCHED_DEBUG_IS_LOCKED;
  37. if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
  38. {
  39. error = rt_timer_stop(&thread->thread_timer);
  40. /* mask out timer flag no matter stop success or not */
  41. RT_SCHED_CTX(thread).sched_flag_ttmr_set = 0;
  42. }
  43. else
  44. {
  45. error = RT_EOK;
  46. }
  47. return error;
  48. }
  49. rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread)
  50. {
  51. RT_SCHED_DEBUG_IS_LOCKED;
  52. return RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK;
  53. }
  54. rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread)
  55. {
  56. RT_SCHED_DEBUG_IS_LOCKED;
  57. return RT_SCHED_PRIV(thread).current_priority;
  58. }
  59. rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread)
  60. {
  61. /* read only fields, so lock is unecessary */
  62. return RT_SCHED_PRIV(thread).init_priority;
  63. }
  64. /**
  65. * @note Caller must hold the scheduler lock
  66. */
  67. rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread)
  68. {
  69. RT_SCHED_DEBUG_IS_LOCKED;
  70. return (RT_SCHED_CTX(thread).stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK;
  71. }
  72. rt_err_t rt_sched_thread_close(struct rt_thread *thread)
  73. {
  74. RT_SCHED_DEBUG_IS_LOCKED;
  75. RT_SCHED_CTX(thread).stat = RT_THREAD_CLOSE;
  76. return RT_EOK;
  77. }
  78. rt_err_t rt_sched_thread_yield(struct rt_thread *thread)
  79. {
  80. RT_SCHED_DEBUG_IS_LOCKED;
  81. RT_SCHED_PRIV(thread).remaining_tick = RT_SCHED_PRIV(thread).init_tick;
  82. RT_SCHED_CTX(thread).stat |= RT_THREAD_STAT_YIELD;
  83. return RT_EOK;
  84. }
  85. rt_err_t rt_sched_thread_ready(struct rt_thread *thread)
  86. {
  87. rt_err_t error;
  88. RT_SCHED_DEBUG_IS_LOCKED;
  89. if (!rt_sched_thread_is_suspended(thread))
  90. {
  91. /* failed to proceed, and that's possibly due to a racing condition */
  92. error = -RT_EINVAL;
  93. }
  94. else
  95. {
  96. if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
  97. {
  98. /**
  99. * Quiet timeout timer first if set. and don't continue if we
  100. * failed, because it probably means that a timeout ISR racing to
  101. * resume thread before us.
  102. */
  103. error = rt_sched_thread_timer_stop(thread);
  104. }
  105. else
  106. {
  107. error = RT_EOK;
  108. }
  109. if (!error)
  110. {
  111. /* remove from suspend list */
  112. rt_list_remove(&RT_THREAD_LIST_NODE(thread));
  113. #ifdef RT_USING_SMART
  114. thread->wakeup_handle.func = RT_NULL;
  115. #endif
  116. /* insert to schedule ready list and remove from susp list */
  117. rt_sched_insert_thread(thread);
  118. }
  119. }
  120. return error;
  121. }
  122. rt_err_t rt_sched_tick_increase(rt_tick_t tick)
  123. {
  124. struct rt_thread *thread;
  125. rt_sched_lock_level_t slvl;
  126. thread = rt_thread_self();
  127. rt_sched_lock(&slvl);
  128. if(RT_SCHED_PRIV(thread).remaining_tick > tick)
  129. {
  130. RT_SCHED_PRIV(thread).remaining_tick -= tick;
  131. }
  132. else
  133. {
  134. RT_SCHED_PRIV(thread).remaining_tick = 0;
  135. }
  136. if (RT_SCHED_PRIV(thread).remaining_tick)
  137. {
  138. rt_sched_unlock(slvl);
  139. }
  140. else
  141. {
  142. rt_sched_thread_yield(thread);
  143. /* request a rescheduling even though we are probably in an ISR */
  144. rt_sched_unlock_n_resched(slvl);
  145. }
  146. return RT_EOK;
  147. }
  148. /**
  149. * @brief Update priority of the target thread
  150. */
  151. rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority)
  152. {
  153. RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
  154. RT_SCHED_DEBUG_IS_LOCKED;
  155. /* for ready thread, change queue; otherwise simply update the priority */
  156. if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
  157. {
  158. /* remove thread from schedule queue first */
  159. rt_sched_remove_thread(thread);
  160. /* change thread priority */
  161. RT_SCHED_PRIV(thread).current_priority = priority;
  162. /* recalculate priority attribute */
  163. #if RT_THREAD_PRIORITY_MAX > 32
  164. RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */
  165. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
  166. RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */
  167. #else
  168. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
  169. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  170. RT_SCHED_CTX(thread).stat = RT_THREAD_INIT;
  171. /* insert thread to schedule queue again */
  172. rt_sched_insert_thread(thread);
  173. }
  174. else
  175. {
  176. RT_SCHED_PRIV(thread).current_priority = priority;
  177. /* recalculate priority attribute */
  178. #if RT_THREAD_PRIORITY_MAX > 32
  179. RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */
  180. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
  181. RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */
  182. #else
  183. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
  184. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  185. }
  186. return RT_EOK;
  187. }
  188. #ifdef RT_USING_OVERFLOW_CHECK
  189. void rt_scheduler_stack_check(struct rt_thread *thread)
  190. {
  191. RT_ASSERT(thread != RT_NULL);
  192. #ifdef RT_USING_SMART
  193. #ifndef ARCH_MM_MMU
  194. struct rt_lwp *lwp = thread ? (struct rt_lwp *)thread->lwp : 0;
  195. /* if stack pointer locate in user data section skip stack check. */
  196. if (lwp && ((rt_uint32_t)thread->sp > (rt_uint32_t)lwp->data_entry &&
  197. (rt_uint32_t)thread->sp <= (rt_uint32_t)lwp->data_entry + (rt_uint32_t)lwp->data_size))
  198. {
  199. return;
  200. }
  201. #endif /* not defined ARCH_MM_MMU */
  202. #endif /* RT_USING_SMART */
  203. #ifndef RT_USING_HW_STACK_GUARD
  204. #ifdef ARCH_CPU_STACK_GROWS_UPWARD
  205. if (*((rt_uint8_t *)((rt_uintptr_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
  206. #else
  207. if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
  208. #endif /* ARCH_CPU_STACK_GROWS_UPWARD */
  209. (rt_uintptr_t)thread->sp <= (rt_uintptr_t)thread->stack_addr ||
  210. (rt_uintptr_t)thread->sp >
  211. (rt_uintptr_t)thread->stack_addr + (rt_uintptr_t)thread->stack_size)
  212. {
  213. rt_base_t dummy = 1;
  214. LOG_E("thread:%s stack overflow\n", thread->parent.name);
  215. while (dummy);
  216. }
  217. #endif /* RT_USING_HW_STACK_GUARD */
  218. #ifdef ARCH_CPU_STACK_GROWS_UPWARD
  219. #ifndef RT_USING_HW_STACK_GUARD
  220. else if ((rt_uintptr_t)thread->sp > ((rt_uintptr_t)thread->stack_addr + thread->stack_size))
  221. #else
  222. if ((rt_uintptr_t)thread->sp > ((rt_uintptr_t)thread->stack_addr + thread->stack_size))
  223. #endif
  224. {
  225. LOG_W("warning: %s stack is close to the top of stack address.\n",
  226. thread->parent.name);
  227. }
  228. #else
  229. #ifndef RT_USING_HW_STACK_GUARD
  230. else if ((rt_uintptr_t)thread->sp <= ((rt_uintptr_t)thread->stack_addr + 32))
  231. #else
  232. if ((rt_uintptr_t)thread->sp <= ((rt_uintptr_t)thread->stack_addr + 32))
  233. #endif
  234. {
  235. LOG_W("warning: %s stack is close to end of stack address.\n",
  236. thread->parent.name);
  237. }
  238. #endif /* ARCH_CPU_STACK_GROWS_UPWARD */
  239. }
  240. #endif /* RT_USING_OVERFLOW_CHECK */