idle.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. /*
  2. * Copyright (c) 2006-2022, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-23 Bernard the first version
  9. * 2010-11-10 Bernard add cleanup callback function in thread exit.
  10. * 2012-12-29 Bernard fix compiling warning.
  11. * 2013-12-21 Grissiom let rt_thread_idle_excute loop until there is no
  12. * dead thread.
  13. * 2016-08-09 ArdaFu add method to get the handler of the idle thread.
  14. * 2018-02-07 Bernard lock scheduler to protect tid->cleanup.
  15. * 2018-07-14 armink add idle hook list
  16. * 2018-11-22 Jesven add per cpu idle task
  17. * combine the code of primary and secondary cpu
  18. * 2021-11-15 THEWON Remove duplicate work between idle and _thread_exit
  19. * 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
  20. * 2023-11-07 xqyjlj fix thread exit
  21. * 2023-12-10 xqyjlj add _hook_spinlock
  22. */
  23. #include <rthw.h>
  24. #include <rtthread.h>
  25. #ifdef RT_USING_MODULE
  26. #include <dlmodule.h>
  27. #endif /* RT_USING_MODULE */
  28. #ifdef RT_USING_HOOK
  29. #ifndef RT_USING_IDLE_HOOK
  30. #define RT_USING_IDLE_HOOK
  31. #endif /* RT_USING_IDLE_HOOK */
  32. #endif /* RT_USING_HOOK */
  33. #ifndef IDLE_THREAD_STACK_SIZE
  34. #if defined (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP)
  35. #define IDLE_THREAD_STACK_SIZE 256
  36. #else
  37. #define IDLE_THREAD_STACK_SIZE 128
  38. #endif /* (RT_USING_IDLE_HOOK) || defined(RT_USING_HEAP) */
  39. #endif /* IDLE_THREAD_STACK_SIZE */
  40. #define _CPUS_NR RT_CPUS_NR
  41. static rt_list_t _rt_thread_defunct = RT_LIST_OBJECT_INIT(_rt_thread_defunct);
  42. static struct rt_spinlock _defunct_spinlock;
  43. static struct rt_thread idle_thread[_CPUS_NR];
  44. rt_align(RT_ALIGN_SIZE)
  45. static rt_uint8_t idle_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE];
  46. #ifdef RT_USING_SMP
  47. #ifndef SYSTEM_THREAD_STACK_SIZE
  48. #define SYSTEM_THREAD_STACK_SIZE IDLE_THREAD_STACK_SIZE
  49. #endif
  50. static struct rt_thread rt_system_thread;
  51. rt_align(RT_ALIGN_SIZE)
  52. static rt_uint8_t rt_system_stack[SYSTEM_THREAD_STACK_SIZE];
  53. static struct rt_semaphore system_sem;
  54. #endif
  55. #ifdef RT_USING_IDLE_HOOK
  56. #ifndef RT_IDLE_HOOK_LIST_SIZE
  57. #define RT_IDLE_HOOK_LIST_SIZE 4
  58. #endif /* RT_IDLE_HOOK_LIST_SIZE */
  59. static void (*idle_hook_list[RT_IDLE_HOOK_LIST_SIZE])(void);
  60. static struct rt_spinlock _hook_spinlock;
  61. /**
  62. * @brief This function sets a hook function to idle thread loop. When the system performs
  63. * idle loop, this hook function should be invoked.
  64. *
  65. * @param hook the specified hook function.
  66. *
  67. * @return RT_EOK: set OK.
  68. * -RT_EFULL: hook list is full.
  69. *
  70. * @note the hook function must be simple and never be blocked or suspend.
  71. */
  72. rt_err_t rt_thread_idle_sethook(void (*hook)(void))
  73. {
  74. rt_size_t i;
  75. rt_err_t ret = -RT_EFULL;
  76. rt_base_t level;
  77. level = rt_spin_lock_irqsave(&_hook_spinlock);
  78. for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
  79. {
  80. if (idle_hook_list[i] == RT_NULL)
  81. {
  82. idle_hook_list[i] = hook;
  83. ret = RT_EOK;
  84. break;
  85. }
  86. }
  87. rt_spin_unlock_irqrestore(&_hook_spinlock, level);
  88. return ret;
  89. }
  90. /**
  91. * @brief delete the idle hook on hook list.
  92. *
  93. * @param hook the specified hook function.
  94. *
  95. * @return RT_EOK: delete OK.
  96. * -RT_ENOSYS: hook was not found.
  97. */
  98. rt_err_t rt_thread_idle_delhook(void (*hook)(void))
  99. {
  100. rt_size_t i;
  101. rt_err_t ret = -RT_ENOSYS;
  102. rt_base_t level;
  103. level = rt_spin_lock_irqsave(&_hook_spinlock);
  104. for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
  105. {
  106. if (idle_hook_list[i] == hook)
  107. {
  108. idle_hook_list[i] = RT_NULL;
  109. ret = RT_EOK;
  110. break;
  111. }
  112. }
  113. rt_spin_unlock_irqrestore(&_hook_spinlock, level);
  114. return ret;
  115. }
  116. #endif /* RT_USING_IDLE_HOOK */
  117. /**
  118. * @brief Enqueue a thread to defunct queue.
  119. *
  120. * @param thread the thread to be enqueued.
  121. *
  122. * @note It must be called between rt_hw_interrupt_disable and rt_hw_interrupt_enable
  123. */
  124. void rt_thread_defunct_enqueue(rt_thread_t thread)
  125. {
  126. rt_base_t level;
  127. level = rt_spin_lock_irqsave(&_defunct_spinlock);
  128. rt_list_insert_after(&_rt_thread_defunct, &RT_THREAD_LIST_NODE(thread));
  129. rt_spin_unlock_irqrestore(&_defunct_spinlock, level);
  130. #ifdef RT_USING_SMP
  131. rt_sem_release(&system_sem);
  132. #endif
  133. }
  134. /**
  135. * @brief Dequeue a thread from defunct queue.
  136. */
  137. rt_thread_t rt_thread_defunct_dequeue(void)
  138. {
  139. rt_base_t level;
  140. rt_thread_t thread = RT_NULL;
  141. rt_list_t *l = &_rt_thread_defunct;
  142. #ifdef RT_USING_SMP
  143. level = rt_spin_lock_irqsave(&_defunct_spinlock);
  144. if (l->next != l)
  145. {
  146. thread = RT_THREAD_LIST_NODE_ENTRY(l->next);
  147. rt_list_remove(&RT_THREAD_LIST_NODE(thread));
  148. }
  149. rt_spin_unlock_irqrestore(&_defunct_spinlock, level);
  150. #else
  151. if (l->next != l)
  152. {
  153. thread = RT_THREAD_LIST_NODE_ENTRY(l->next);
  154. level = rt_hw_interrupt_disable();
  155. rt_list_remove(&RT_THREAD_LIST_NODE(thread));
  156. rt_hw_interrupt_enable(level);
  157. }
  158. #endif
  159. return thread;
  160. }
  161. /**
  162. * @brief This function will perform system background job when system idle.
  163. */
  164. static void rt_defunct_execute(void)
  165. {
  166. /* Loop until there is no dead thread. So one call to rt_defunct_execute
  167. * will do all the cleanups. */
  168. while (1)
  169. {
  170. rt_thread_t thread;
  171. rt_bool_t object_is_systemobject;
  172. void (*cleanup)(struct rt_thread *tid);
  173. #ifdef RT_USING_MODULE
  174. struct rt_dlmodule *module = RT_NULL;
  175. #endif
  176. /* get defunct thread */
  177. thread = rt_thread_defunct_dequeue();
  178. if (thread == RT_NULL)
  179. {
  180. break;
  181. }
  182. #ifdef RT_USING_MODULE
  183. module = (struct rt_dlmodule*)thread->parent.module_id;
  184. if (module)
  185. {
  186. dlmodule_destroy(module);
  187. }
  188. #endif
  189. #ifdef RT_USING_SIGNALS
  190. rt_thread_free_sig(thread);
  191. #endif
  192. /* store the point of "thread->cleanup" avoid to lose */
  193. cleanup = thread->cleanup;
  194. /* if it's a system object, not delete it */
  195. object_is_systemobject = rt_object_is_systemobject((rt_object_t)thread);
  196. if (object_is_systemobject == RT_TRUE)
  197. {
  198. /* detach this object */
  199. rt_object_detach((rt_object_t)thread);
  200. }
  201. /* invoke thread cleanup */
  202. if (cleanup != RT_NULL)
  203. {
  204. cleanup(thread);
  205. }
  206. #ifdef RT_USING_HEAP
  207. #ifdef RT_USING_MEM_PROTECTION
  208. if (thread->mem_regions != RT_NULL)
  209. {
  210. RT_KERNEL_FREE(thread->mem_regions);
  211. }
  212. #endif
  213. /* if need free, delete it */
  214. if (object_is_systemobject == RT_FALSE)
  215. {
  216. /* release thread's stack */
  217. #ifdef RT_USING_HW_STACK_GUARD
  218. RT_KERNEL_FREE(thread->stack_buf);
  219. #else
  220. RT_KERNEL_FREE(thread->stack_addr);
  221. #endif
  222. /* delete thread object */
  223. rt_object_delete((rt_object_t)thread);
  224. }
  225. #endif
  226. }
  227. }
  228. static void idle_thread_entry(void *parameter)
  229. {
  230. RT_UNUSED(parameter);
  231. #ifdef RT_USING_SMP
  232. if (rt_hw_cpu_id() != 0)
  233. {
  234. while (1)
  235. {
  236. rt_hw_secondary_cpu_idle_exec();
  237. }
  238. }
  239. #endif /* RT_USING_SMP */
  240. while (1)
  241. {
  242. #ifdef RT_USING_IDLE_HOOK
  243. rt_size_t i;
  244. void (*idle_hook)(void);
  245. for (i = 0; i < RT_IDLE_HOOK_LIST_SIZE; i++)
  246. {
  247. idle_hook = idle_hook_list[i];
  248. if (idle_hook != RT_NULL)
  249. {
  250. idle_hook();
  251. }
  252. }
  253. #endif /* RT_USING_IDLE_HOOK */
  254. #ifndef RT_USING_SMP
  255. rt_defunct_execute();
  256. #endif /* RT_USING_SMP */
  257. #ifdef RT_USING_PM
  258. void rt_system_power_manager(void);
  259. rt_system_power_manager();
  260. #endif /* RT_USING_PM */
  261. }
  262. }
  263. #ifdef RT_USING_SMP
  264. static void rt_thread_system_entry(void *parameter)
  265. {
  266. RT_UNUSED(parameter);
  267. while (1)
  268. {
  269. int ret = rt_sem_take(&system_sem, RT_WAITING_FOREVER);
  270. if (ret != RT_EOK)
  271. {
  272. rt_kprintf("failed to sem_take() error %d\n", ret);
  273. RT_ASSERT(0);
  274. }
  275. rt_defunct_execute();
  276. }
  277. }
  278. #endif
  279. /**
  280. * @brief This function will initialize idle thread, then start it.
  281. *
  282. * @note this function must be invoked when system init.
  283. */
  284. void rt_thread_idle_init(void)
  285. {
  286. rt_ubase_t i;
  287. #if RT_NAME_MAX > 0
  288. char idle_thread_name[RT_NAME_MAX];
  289. #endif /* RT_NAME_MAX > 0 */
  290. for (i = 0; i < _CPUS_NR; i++)
  291. {
  292. #if RT_NAME_MAX > 0
  293. rt_snprintf(idle_thread_name, RT_NAME_MAX, "tidle%d", i);
  294. #endif /* RT_NAME_MAX > 0 */
  295. rt_thread_init(&idle_thread[i],
  296. #if RT_NAME_MAX > 0
  297. idle_thread_name,
  298. #else
  299. "tidle",
  300. #endif /* RT_NAME_MAX > 0 */
  301. idle_thread_entry,
  302. RT_NULL,
  303. &idle_thread_stack[i][0],
  304. sizeof(idle_thread_stack[i]),
  305. RT_THREAD_PRIORITY_MAX - 1,
  306. 32);
  307. #ifdef RT_USING_SMP
  308. rt_thread_control(&idle_thread[i], RT_THREAD_CTRL_BIND_CPU, (void*)i);
  309. rt_cpu_index(i)->idle_thread = &idle_thread[i];
  310. #endif /* RT_USING_SMP */
  311. /* startup */
  312. rt_thread_startup(&idle_thread[i]);
  313. }
  314. #ifdef RT_USING_SMP
  315. RT_ASSERT(RT_THREAD_PRIORITY_MAX > 2);
  316. rt_spin_lock_init(&_defunct_spinlock);
  317. rt_spin_lock_init(&_hook_spinlock);
  318. rt_sem_init(&system_sem, "defunct", 0, RT_IPC_FLAG_FIFO);
  319. /* create defunct thread */
  320. rt_thread_init(&rt_system_thread,
  321. "tsystem",
  322. rt_thread_system_entry,
  323. RT_NULL,
  324. rt_system_stack,
  325. sizeof(rt_system_stack),
  326. RT_THREAD_PRIORITY_MAX - 2,
  327. 32);
  328. /* startup */
  329. rt_thread_startup(&rt_system_thread);
  330. #endif
  331. }
  332. /**
  333. * @brief This function will get the handler of the idle thread.
  334. */
  335. rt_thread_t rt_thread_idle_gethandler(void)
  336. {
  337. #ifdef RT_USING_SMP
  338. int id = rt_hw_cpu_id();
  339. #else
  340. int id = 0;
  341. #endif /* RT_USING_SMP */
  342. return (rt_thread_t)(&idle_thread[id]);
  343. }