idle.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-23 Bernard the first version
  9. * 2010-11-10 Bernard add cleanup callback function in thread exit.
  10. * 2012-12-29 Bernard fix compiling warning.
  11. * 2013-12-21 Grissiom let rt_thread_idle_excute loop until there is no
  12. * dead thread.
  13. * 2016-08-09 ArdaFu add method to get the handler of the idle thread.
  14. * 2018-02-07 Bernard lock scheduler to protect tid->cleanup.
  15. * 2018-07-14 armink add idle hook list
  16. * 2018-11-22 Jesven add per cpu idle task
  17. * combine the code of primary and secondary cpu
  18. */
  19. #include <rthw.h>
  20. #include <rtthread.h>
  21. #ifdef RT_USING_MODULE
  22. #include <dlmodule.h>
  23. #endif /* RT_USING_MODULE */
  24. #ifdef RT_USING_HOOK
  25. #ifndef RT_USING_IDLE_HOOK
  26. #define RT_USING_IDLE_HOOK
  27. #endif /* RT_USING_IDLE_HOOK */
  28. #endif /* RT_USING_HOOK */
  29. #ifndef IDLE_THREAD_STACK_SIZE
  30. #if defined (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP)
  31. #define IDLE_THREAD_STACK_SIZE 256
  32. #else
  33. #define IDLE_THREAD_STACK_SIZE 128
  34. #endif /* (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP) */
  35. #endif /* IDLE_THREAD_STACK_SIZE */
  36. #ifdef RT_USING_SMP
  37. #define _CPUS_NR RT_CPUS_NR
  38. #else
  39. #define _CPUS_NR 1
  40. #endif /* RT_USING_SMP */
  41. static rt_list_t _rt_thread_defunct = RT_LIST_OBJECT_INIT(_rt_thread_defunct);;
  42. static struct rt_thread idle[_CPUS_NR];
  43. ALIGN(RT_ALIGN_SIZE)
  44. static rt_uint8_t rt_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE];
  45. #ifdef RT_USING_SMP
  46. #ifndef SYSTEM_THREAD_STACK_SIZE
  47. #define SYSTEM_THREAD_STACK_SIZE IDLE_THREAD_STACK_SIZE
  48. #endif
  49. static struct rt_thread rt_system_thread;
  50. ALIGN(RT_ALIGN_SIZE)
  51. static rt_uint8_t rt_system_stack[SYSTEM_THREAD_STACK_SIZE];
  52. static struct rt_semaphore system_sem;
  53. #endif
  54. #ifdef RT_USING_IDLE_HOOK
  55. #ifndef RT_IDLE_HOOK_LIST_SIZE
  56. #define RT_IDLE_HOOK_LIST_SIZE 4
  57. #endif /* RT_IDLE_HOOK_LIST_SIZE */
  58. static void (*idle_hook_list[RT_IDLE_HOOK_LIST_SIZE])(void);
  59. /**
  60. * @ingroup Hook
  61. * This function sets a hook function to idle thread loop. When the system performs
  62. * idle loop, this hook function should be invoked.
  63. *
  64. * @param hook the specified hook function
  65. *
  66. * @return RT_EOK: set OK
  67. * -RT_EFULL: hook list is full
  68. *
  69. * @note the hook function must be simple and never be blocked or suspend.
  70. */
  71. rt_err_t rt_thread_idle_sethook(void (*hook)(void))
  72. {
  73. rt_size_t i;
  74. rt_base_t level;
  75. rt_err_t ret = -RT_EFULL;
  76. /* disable interrupt */
  77. level = rt_hw_interrupt_disable();
  78. for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
  79. {
  80. if (idle_hook_list[i] == RT_NULL)
  81. {
  82. idle_hook_list[i] = hook;
  83. ret = RT_EOK;
  84. break;
  85. }
  86. }
  87. /* enable interrupt */
  88. rt_hw_interrupt_enable(level);
  89. return ret;
  90. }
  91. /**
  92. * delete the idle hook on hook list
  93. *
  94. * @param hook the specified hook function
  95. *
  96. * @return RT_EOK: delete OK
  97. * -RT_ENOSYS: hook was not found
  98. */
  99. rt_err_t rt_thread_idle_delhook(void (*hook)(void))
  100. {
  101. rt_size_t i;
  102. rt_base_t level;
  103. rt_err_t ret = -RT_ENOSYS;
  104. /* disable interrupt */
  105. level = rt_hw_interrupt_disable();
  106. for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
  107. {
  108. if (idle_hook_list[i] == hook)
  109. {
  110. idle_hook_list[i] = RT_NULL;
  111. ret = RT_EOK;
  112. break;
  113. }
  114. }
  115. /* enable interrupt */
  116. rt_hw_interrupt_enable(level);
  117. return ret;
  118. }
  119. #endif /* RT_USING_IDLE_HOOK */
  120. #ifdef RT_USING_MODULE
  121. /* Return whether there is defunctional thread to be deleted. */
  122. rt_inline int _idle_has_defunct_thread(void)
  123. {
  124. /* The rt_list_isempty has prototype of "int rt_list_isempty(const rt_list_t *l)".
  125. * So the compiler has a good reason that the _rt_thread_defunct list does
  126. * not change within rt_thread_defunct_exceute thus optimize the "while" loop
  127. * into a "if".
  128. *
  129. * So add the volatile qualifier here. */
  130. const volatile rt_list_t *l = (const volatile rt_list_t *)&_rt_thread_defunct;
  131. return l->next != l;
  132. }
  133. #endif /* RT_USING_MODULE */
  134. /* enqueue a thread to defunct queue
  135. * it must be called between rt_hw_interrupt_disable and rt_hw_interrupt_enable
  136. */
  137. void rt_thread_defunct_enqueue(rt_thread_t thread)
  138. {
  139. rt_list_insert_after(&_rt_thread_defunct, &thread->tlist);
  140. #ifdef RT_USING_SMP
  141. rt_sem_release(&system_sem);
  142. #endif
  143. }
  144. /* dequeue a thread from defunct queue
  145. * it must be called between rt_hw_interrupt_disable and rt_hw_interrupt_enable
  146. */
  147. rt_thread_t rt_thread_defunct_dequeue(void)
  148. {
  149. rt_thread_t thread = RT_NULL;
  150. rt_list_t *l = &_rt_thread_defunct;
  151. if (l->next != l)
  152. {
  153. thread = rt_list_entry(l->next,
  154. struct rt_thread,
  155. tlist);
  156. rt_list_remove(&(thread->tlist));
  157. }
  158. return thread;
  159. }
  160. /**
  161. * @ingroup Thread
  162. *
  163. * This function will perform system background job when system idle.
  164. */
  165. static void rt_defunct_execute(void)
  166. {
  167. /* Loop until there is no dead thread. So one call to rt_defunct_execute
  168. * will do all the cleanups. */
  169. while (1)
  170. {
  171. rt_base_t lock;
  172. rt_thread_t thread;
  173. void (*cleanup)(struct rt_thread *tid);
  174. #ifdef RT_USING_MODULE
  175. struct rt_dlmodule *module = RT_NULL;
  176. #endif
  177. RT_DEBUG_NOT_IN_INTERRUPT;
  178. /* disable interrupt */
  179. lock = rt_hw_interrupt_disable();
  180. #ifdef RT_USING_MODULE
  181. /* check whether list is empty */
  182. if (!_idle_has_defunct_thread())
  183. {
  184. rt_hw_interrupt_enable(lock);
  185. break;
  186. }
  187. /* get defunct thread */
  188. thread = rt_list_entry(_rt_thread_defunct.next,
  189. struct rt_thread,
  190. tlist);
  191. module = (struct rt_dlmodule*)thread->module_id;
  192. if (module)
  193. {
  194. dlmodule_destroy(module);
  195. }
  196. /* remove defunct thread */
  197. rt_list_remove(&(thread->tlist));
  198. #else
  199. thread = rt_thread_defunct_dequeue();
  200. if (!thread)
  201. {
  202. rt_hw_interrupt_enable(lock);
  203. break;
  204. }
  205. #endif
  206. /* invoke thread cleanup */
  207. cleanup = thread->cleanup;
  208. if (cleanup != RT_NULL)
  209. {
  210. rt_hw_interrupt_enable(lock);
  211. cleanup(thread);
  212. lock = rt_hw_interrupt_disable();
  213. }
  214. #ifdef RT_USING_SIGNALS
  215. rt_thread_free_sig(thread);
  216. #endif
  217. /* if it's a system object, not delete it */
  218. if (rt_object_is_systemobject((rt_object_t)thread) == RT_TRUE)
  219. {
  220. /* detach this object */
  221. rt_object_detach((rt_object_t)thread);
  222. /* enable interrupt */
  223. rt_hw_interrupt_enable(lock);
  224. }
  225. else
  226. {
  227. rt_hw_interrupt_enable(lock);
  228. #ifdef RT_USING_HEAP
  229. /* release thread's stack */
  230. RT_KERNEL_FREE(thread->stack_addr);
  231. /* delete thread object */
  232. rt_object_delete((rt_object_t)thread);
  233. #endif
  234. }
  235. }
  236. }
  237. extern void rt_system_power_manager(void);
  238. static void rt_thread_idle_entry(void *parameter)
  239. {
  240. #ifdef RT_USING_SMP
  241. if (rt_hw_cpu_id() != 0)
  242. {
  243. while (1)
  244. {
  245. rt_hw_secondary_cpu_idle_exec();
  246. }
  247. }
  248. #endif /* RT_USING_SMP */
  249. while (1)
  250. {
  251. #ifdef RT_USING_IDLE_HOOK
  252. rt_size_t i;
  253. void (*idle_hook)(void);
  254. for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
  255. {
  256. idle_hook = idle_hook_list[i];
  257. if (idle_hook != RT_NULL)
  258. {
  259. idle_hook();
  260. }
  261. }
  262. #endif /* RT_USING_IDLE_HOOK */
  263. #ifndef RT_USING_SMP
  264. rt_defunct_execute();
  265. #endif /* RT_USING_SMP */
  266. #ifdef RT_USING_PM
  267. rt_system_power_manager();
  268. #endif /* RT_USING_PM */
  269. }
  270. }
  271. #ifdef RT_USING_SMP
  272. static void rt_thread_system_entry(void *parameter)
  273. {
  274. while (1)
  275. {
  276. rt_sem_take(&system_sem, RT_WAITING_FOREVER);
  277. rt_defunct_execute();
  278. }
  279. }
  280. #endif
  281. /**
  282. * @ingroup SystemInit
  283. *
  284. * This function will initialize idle thread, then start it.
  285. *
  286. * @note this function must be invoked when system init.
  287. */
  288. void rt_thread_idle_init(void)
  289. {
  290. rt_ubase_t i;
  291. char tidle_name[RT_NAME_MAX];
  292. for (i = 0; i < _CPUS_NR; i++)
  293. {
  294. rt_sprintf(tidle_name, "tidle%d", i);
  295. rt_thread_init(&idle[i],
  296. tidle_name,
  297. rt_thread_idle_entry,
  298. RT_NULL,
  299. &rt_thread_stack[i][0],
  300. sizeof(rt_thread_stack[i]),
  301. RT_THREAD_PRIORITY_MAX - 1,
  302. 32);
  303. #ifdef RT_USING_SMP
  304. rt_thread_control(&idle[i], RT_THREAD_CTRL_BIND_CPU, (void*)i);
  305. #endif /* RT_USING_SMP */
  306. /* startup */
  307. rt_thread_startup(&idle[i]);
  308. }
  309. #ifdef RT_USING_SMP
  310. RT_ASSERT(RT_THREAD_PRIORITY_MAX > 2);
  311. rt_sem_init(&system_sem, "defunct", 1, RT_IPC_FLAG_FIFO);
  312. /* create defunct thread */
  313. rt_thread_init(&rt_system_thread,
  314. "tsystem",
  315. rt_thread_system_entry,
  316. RT_NULL,
  317. rt_system_stack,
  318. sizeof(rt_system_stack),
  319. RT_THREAD_PRIORITY_MAX - 2,
  320. 32);
  321. /* startup */
  322. rt_thread_startup(&rt_system_thread);
  323. #endif
  324. }
  325. /**
  326. * @ingroup Thread
  327. *
  328. * This function will get the handler of the idle thread.
  329. *
  330. */
  331. rt_thread_t rt_thread_idle_gethandler(void)
  332. {
  333. #ifdef RT_USING_SMP
  334. register int id = rt_hw_cpu_id();
  335. #else
  336. register int id = 0;
  337. #endif /* RT_USING_SMP */
  338. return (rt_thread_t)(&idle[id]);
  339. }