scheduler_comm.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. /*
  2. * Copyright (c) 2006-2024 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * (scheduler_comm.c) Common API of scheduling routines.
  7. *
  8. * Change Logs:
  9. * Date Author Notes
  10. * 2024-01-18 Shell Separate scheduling related codes from thread.c, scheduler_.*
  11. */
  12. #define DBG_TAG "kernel.sched"
  13. #define DBG_LVL DBG_INFO
  14. #include <rtdbg.h>
  15. #include <rtthread.h>
  16. void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
  17. {
  18. /* setup thread status */
  19. RT_SCHED_CTX(thread).stat = RT_THREAD_INIT;
  20. #ifdef RT_USING_SMP
  21. /* not bind on any cpu */
  22. RT_SCHED_CTX(thread).bind_cpu = RT_CPUS_NR;
  23. RT_SCHED_CTX(thread).oncpu = RT_CPU_DETACHED;
  24. #endif /* RT_USING_SMP */
  25. rt_sched_thread_init_priv(thread, tick, priority);
  26. }
  27. rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread)
  28. {
  29. RT_SCHED_DEBUG_IS_LOCKED;
  30. RT_SCHED_CTX(thread).sched_flag_ttmr_set = 1;
  31. return RT_EOK;
  32. }
  33. rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread)
  34. {
  35. rt_err_t error;
  36. RT_SCHED_DEBUG_IS_LOCKED;
  37. if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
  38. {
  39. error = rt_timer_stop(&thread->thread_timer);
  40. /* mask out timer flag no matter stop success or not */
  41. RT_SCHED_CTX(thread).sched_flag_ttmr_set = 0;
  42. }
  43. else
  44. {
  45. error = RT_EOK;
  46. }
  47. return error;
  48. }
  49. rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread)
  50. {
  51. RT_SCHED_DEBUG_IS_LOCKED;
  52. return RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK;
  53. }
  54. rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread)
  55. {
  56. RT_SCHED_DEBUG_IS_LOCKED;
  57. return RT_SCHED_PRIV(thread).current_priority;
  58. }
  59. rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread)
  60. {
  61. /* read only fields, so lock is unecessary */
  62. return RT_SCHED_PRIV(thread).init_priority;
  63. }
  64. /**
  65. * @note Caller must hold the scheduler lock
  66. */
  67. rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread)
  68. {
  69. RT_SCHED_DEBUG_IS_LOCKED;
  70. return (RT_SCHED_CTX(thread).stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK;
  71. }
  72. rt_err_t rt_sched_thread_close(struct rt_thread *thread)
  73. {
  74. RT_SCHED_DEBUG_IS_LOCKED;
  75. RT_SCHED_CTX(thread).stat = RT_THREAD_CLOSE;
  76. return RT_EOK;
  77. }
  78. rt_err_t rt_sched_thread_yield(struct rt_thread *thread)
  79. {
  80. RT_SCHED_DEBUG_IS_LOCKED;
  81. RT_SCHED_PRIV(thread).remaining_tick = RT_SCHED_PRIV(thread).init_tick;
  82. RT_SCHED_CTX(thread).stat |= RT_THREAD_STAT_YIELD;
  83. return RT_EOK;
  84. }
  85. rt_err_t rt_sched_thread_ready(struct rt_thread *thread)
  86. {
  87. rt_err_t error;
  88. RT_SCHED_DEBUG_IS_LOCKED;
  89. if (!rt_sched_thread_is_suspended(thread))
  90. {
  91. /* failed to proceed, and that's possibly due to a racing condition */
  92. error = -RT_EINVAL;
  93. }
  94. else
  95. {
  96. if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
  97. {
  98. /**
  99. * Quiet timeout timer first if set. and don't continue if we
  100. * failed, because it probably means that a timeout ISR racing to
  101. * resume thread before us.
  102. */
  103. error = rt_sched_thread_timer_stop(thread);
  104. }
  105. else
  106. {
  107. error = RT_EOK;
  108. }
  109. if (!error)
  110. {
  111. /* remove from suspend list */
  112. rt_list_remove(&RT_THREAD_LIST_NODE(thread));
  113. #ifdef RT_USING_SMART
  114. thread->wakeup_handle.func = RT_NULL;
  115. #endif
  116. /* insert to schedule ready list and remove from susp list */
  117. rt_sched_insert_thread(thread);
  118. }
  119. }
  120. return error;
  121. }
  122. rt_err_t rt_sched_tick_increase(rt_tick_t tick)
  123. {
  124. struct rt_thread *thread;
  125. rt_sched_lock_level_t slvl;
  126. thread = rt_thread_self();
  127. rt_sched_lock(&slvl);
  128. if(RT_SCHED_PRIV(thread).remaining_tick > tick)
  129. {
  130. RT_SCHED_PRIV(thread).remaining_tick -= tick;
  131. }
  132. else
  133. {
  134. RT_SCHED_PRIV(thread).remaining_tick = 0;
  135. }
  136. if (RT_SCHED_PRIV(thread).remaining_tick)
  137. {
  138. rt_sched_unlock(slvl);
  139. }
  140. else
  141. {
  142. rt_sched_thread_yield(thread);
  143. /* request a rescheduling even though we are probably in an ISR */
  144. rt_sched_unlock_n_resched(slvl);
  145. }
  146. return RT_EOK;
  147. }
  148. /**
  149. * @brief Update priority of the target thread
  150. */
  151. static rt_err_t _rt_sched_update_priority(struct rt_thread *thread, rt_uint8_t priority, rt_bool_t update_init_prio)
  152. {
  153. RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
  154. RT_SCHED_DEBUG_IS_LOCKED;
  155. /* for ready thread, change queue; otherwise simply update the priority */
  156. if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
  157. {
  158. /* remove thread from schedule queue first */
  159. rt_sched_remove_thread(thread);
  160. /* change thread priority */
  161. if (update_init_prio)
  162. {
  163. RT_SCHED_PRIV(thread).init_priority = priority;
  164. }
  165. RT_SCHED_PRIV(thread).current_priority = priority;
  166. /* recalculate priority attribute */
  167. #if RT_THREAD_PRIORITY_MAX > 32
  168. RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */
  169. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
  170. RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */
  171. #else
  172. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
  173. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  174. RT_SCHED_CTX(thread).stat = RT_THREAD_INIT;
  175. /* insert thread to schedule queue again */
  176. rt_sched_insert_thread(thread);
  177. }
  178. else
  179. {
  180. if (update_init_prio)
  181. {
  182. RT_SCHED_PRIV(thread).init_priority = priority;
  183. }
  184. RT_SCHED_PRIV(thread).current_priority = priority;
  185. /* recalculate priority attribute */
  186. #if RT_THREAD_PRIORITY_MAX > 32
  187. RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3; /* 5bit */
  188. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
  189. RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07); /* 3bit */
  190. #else
  191. RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
  192. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  193. }
  194. return RT_EOK;
  195. }
  196. /**
  197. * @brief Update priority of the target thread
  198. */
  199. rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority)
  200. {
  201. return _rt_sched_update_priority(thread, priority, RT_FALSE);
  202. }
  203. /**
  204. * @brief Reset priority of the target thread
  205. */
  206. rt_err_t rt_sched_thread_reset_priority(struct rt_thread *thread, rt_uint8_t priority)
  207. {
  208. return _rt_sched_update_priority(thread, priority, RT_TRUE);
  209. }
  210. #ifdef RT_USING_OVERFLOW_CHECK
  211. void rt_scheduler_stack_check(struct rt_thread *thread)
  212. {
  213. RT_ASSERT(thread != RT_NULL);
  214. #ifdef RT_USING_SMART
  215. #ifndef ARCH_MM_MMU
  216. struct rt_lwp *lwp = thread ? (struct rt_lwp *)thread->lwp : 0;
  217. /* if stack pointer locate in user data section skip stack check. */
  218. if (lwp && ((rt_uint32_t)thread->sp > (rt_uint32_t)lwp->data_entry &&
  219. (rt_uint32_t)thread->sp <= (rt_uint32_t)lwp->data_entry + (rt_uint32_t)lwp->data_size))
  220. {
  221. return;
  222. }
  223. #endif /* not defined ARCH_MM_MMU */
  224. #endif /* RT_USING_SMART */
  225. #ifndef RT_USING_HW_STACK_GUARD
  226. #ifdef ARCH_CPU_STACK_GROWS_UPWARD
  227. if (*((rt_uint8_t *)((rt_uintptr_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
  228. #else
  229. if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
  230. #endif /* ARCH_CPU_STACK_GROWS_UPWARD */
  231. (rt_uintptr_t)thread->sp <= (rt_uintptr_t)thread->stack_addr ||
  232. (rt_uintptr_t)thread->sp >
  233. (rt_uintptr_t)thread->stack_addr + (rt_uintptr_t)thread->stack_size)
  234. {
  235. rt_base_t dummy = 1;
  236. LOG_E("thread:%s stack overflow\n", thread->parent.name);
  237. while (dummy);
  238. }
  239. #endif /* RT_USING_HW_STACK_GUARD */
  240. #ifdef ARCH_CPU_STACK_GROWS_UPWARD
  241. #ifndef RT_USING_HW_STACK_GUARD
  242. else if ((rt_uintptr_t)thread->sp > ((rt_uintptr_t)thread->stack_addr + thread->stack_size))
  243. #else
  244. if ((rt_uintptr_t)thread->sp > ((rt_uintptr_t)thread->stack_addr + thread->stack_size))
  245. #endif
  246. {
  247. LOG_W("warning: %s stack is close to the top of stack address.\n",
  248. thread->parent.name);
  249. }
  250. #else
  251. #ifndef RT_USING_HW_STACK_GUARD
  252. else if ((rt_uintptr_t)thread->sp <= ((rt_uintptr_t)thread->stack_addr + 32))
  253. #else
  254. if ((rt_uintptr_t)thread->sp <= ((rt_uintptr_t)thread->stack_addr + 32))
  255. #endif
  256. {
  257. LOG_W("warning: %s stack is close to end of stack address.\n",
  258. thread->parent.name);
  259. }
  260. #endif /* ARCH_CPU_STACK_GROWS_UPWARD */
  261. }
  262. #endif /* RT_USING_OVERFLOW_CHECK */