scheduler.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-17 Bernard the first version
  9. * 2006-04-28 Bernard fix the scheduler algorthm
  10. * 2006-04-30 Bernard add SCHEDULER_DEBUG
  11. * 2006-05-27 Bernard fix the scheduler algorthm for same priority
  12. * thread schedule
  13. * 2006-06-04 Bernard rewrite the scheduler algorithm
  14. * 2006-08-03 Bernard add hook support
  15. * 2006-09-05 Bernard add 32 priority level support
  16. * 2006-09-24 Bernard add rt_system_scheduler_start function
  17. * 2009-09-16 Bernard fix _rt_scheduler_stack_check
  18. * 2010-04-11 yi.qiu add module feature
  19. * 2010-07-13 Bernard fix the maximal number of rt_scheduler_lock_nest
  20. * issue found by kuronca
  21. * 2010-12-13 Bernard add defunct list initialization even if not use heap.
  22. * 2011-05-10 Bernard clean scheduler debug log.
  23. * 2013-12-21 Grissiom add rt_critical_level
  24. * 2018-11-22 Jesven remove the current task from ready queue
  25. * add per cpu ready queue
  26. * add _scheduler_get_highest_priority_thread to find highest priority task
  27. * rt_schedule_insert_thread won't insert current task to ready queue
  28. * in smp version, rt_hw_context_switch_interrupt maybe switch to
  29. * new task directly
  30. *
  31. */
  32. #include <rtthread.h>
  33. #include <rthw.h>
  34. rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
  35. rt_uint32_t rt_thread_ready_priority_group;
  36. #if RT_THREAD_PRIORITY_MAX > 32
  37. /* Maximum priority level, 256 */
  38. rt_uint8_t rt_thread_ready_table[32];
  39. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  40. #ifndef RT_USING_SMP
  41. extern volatile rt_uint8_t rt_interrupt_nest;
  42. static rt_int16_t rt_scheduler_lock_nest;
  43. struct rt_thread *rt_current_thread = RT_NULL;
  44. rt_uint8_t rt_current_priority;
  45. #endif /* RT_USING_SMP */
  46. #ifdef RT_USING_HOOK
  47. static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to);
  48. static void (*rt_scheduler_switch_hook)(struct rt_thread *tid);
  49. /**
  50. * @addtogroup Hook
  51. */
  52. /**@{*/
  53. /**
  54. * @brief This function will set a hook function, which will be invoked when thread
  55. * switch happens.
  56. *
  57. * @param hook is the hook function.
  58. */
  59. void rt_scheduler_sethook(void (*hook)(struct rt_thread *from, struct rt_thread *to))
  60. {
  61. rt_scheduler_hook = hook;
  62. }
  63. /**
  64. * @brief This function will set a hook function, which will be invoked when context
  65. * switch happens.
  66. *
  67. * @param hook is the hook function.
  68. */
  69. void rt_scheduler_switch_sethook(void (*hook)(struct rt_thread *tid))
  70. {
  71. rt_scheduler_switch_hook = hook;
  72. }
  73. /**@}*/
  74. #endif /* RT_USING_HOOK */
  75. #ifdef RT_USING_OVERFLOW_CHECK
  76. static void _rt_scheduler_stack_check(struct rt_thread *thread)
  77. {
  78. RT_ASSERT(thread != RT_NULL);
  79. #ifdef ARCH_CPU_STACK_GROWS_UPWARD
  80. if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
  81. #else
  82. if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
  83. #endif /* ARCH_CPU_STACK_GROWS_UPWARD */
  84. (rt_ubase_t)thread->sp <= (rt_ubase_t)thread->stack_addr ||
  85. (rt_ubase_t)thread->sp >
  86. (rt_ubase_t)thread->stack_addr + (rt_ubase_t)thread->stack_size)
  87. {
  88. rt_ubase_t level;
  89. rt_kprintf("thread:%s stack overflow\n", thread->name);
  90. level = rt_hw_interrupt_disable();
  91. while (level);
  92. }
  93. #ifdef ARCH_CPU_STACK_GROWS_UPWARD
  94. else if ((rt_ubase_t)thread->sp > ((rt_ubase_t)thread->stack_addr + thread->stack_size))
  95. {
  96. rt_kprintf("warning: %s stack is close to the top of stack address.\n",
  97. thread->name);
  98. }
  99. #else
  100. else if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32))
  101. {
  102. rt_kprintf("warning: %s stack is close to end of stack address.\n",
  103. thread->name);
  104. }
  105. #endif /* ARCH_CPU_STACK_GROWS_UPWARD */
  106. }
  107. #endif /* RT_USING_OVERFLOW_CHECK */
  108. /*
  109. * get the highest priority thread in ready queue
  110. */
  111. #ifdef RT_USING_SMP
  112. static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *highest_prio)
  113. {
  114. register struct rt_thread *highest_priority_thread;
  115. register rt_ubase_t highest_ready_priority, local_highest_ready_priority;
  116. struct rt_cpu* pcpu = rt_cpu_self();
  117. #if RT_THREAD_PRIORITY_MAX > 32
  118. register rt_ubase_t number;
  119. number = __rt_ffs(rt_thread_ready_priority_group) - 1;
  120. highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
  121. number = __rt_ffs(pcpu->priority_group) - 1;
  122. local_highest_ready_priority = (number << 3) + __rt_ffs(pcpu->ready_table[number]) - 1;
  123. #else
  124. highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
  125. local_highest_ready_priority = __rt_ffs(pcpu->priority_group) - 1;
  126. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  127. /* get highest ready priority thread */
  128. if (highest_ready_priority < local_highest_ready_priority)
  129. {
  130. *highest_prio = highest_ready_priority;
  131. highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
  132. struct rt_thread,
  133. tlist);
  134. }
  135. else
  136. {
  137. *highest_prio = local_highest_ready_priority;
  138. highest_priority_thread = rt_list_entry(pcpu->priority_table[local_highest_ready_priority].next,
  139. struct rt_thread,
  140. tlist);
  141. }
  142. return highest_priority_thread;
  143. }
  144. #else
  145. static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *highest_prio)
  146. {
  147. register struct rt_thread *highest_priority_thread;
  148. register rt_ubase_t highest_ready_priority;
  149. #if RT_THREAD_PRIORITY_MAX > 32
  150. register rt_ubase_t number;
  151. number = __rt_ffs(rt_thread_ready_priority_group) - 1;
  152. highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
  153. #else
  154. highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
  155. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  156. /* get highest ready priority thread */
  157. highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
  158. struct rt_thread,
  159. tlist);
  160. *highest_prio = highest_ready_priority;
  161. return highest_priority_thread;
  162. }
  163. #endif /* RT_USING_SMP */
  164. /**
  165. * @brief This function will initialize the system scheduler.
  166. */
  167. void rt_system_scheduler_init(void)
  168. {
  169. #ifdef RT_USING_SMP
  170. int cpu;
  171. #endif /* RT_USING_SMP */
  172. register rt_base_t offset;
  173. #ifndef RT_USING_SMP
  174. rt_scheduler_lock_nest = 0;
  175. #endif /* RT_USING_SMP */
  176. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("start scheduler: max priority 0x%02x\n",
  177. RT_THREAD_PRIORITY_MAX));
  178. for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
  179. {
  180. rt_list_init(&rt_thread_priority_table[offset]);
  181. }
  182. #ifdef RT_USING_SMP
  183. for (cpu = 0; cpu < RT_CPUS_NR; cpu++)
  184. {
  185. struct rt_cpu *pcpu = rt_cpu_index(cpu);
  186. for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
  187. {
  188. rt_list_init(&pcpu->priority_table[offset]);
  189. }
  190. pcpu->irq_switch_flag = 0;
  191. pcpu->current_priority = RT_THREAD_PRIORITY_MAX - 1;
  192. pcpu->current_thread = RT_NULL;
  193. pcpu->priority_group = 0;
  194. #if RT_THREAD_PRIORITY_MAX > 32
  195. rt_memset(pcpu->ready_table, 0, sizeof(pcpu->ready_table));
  196. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  197. }
  198. #endif /* RT_USING_SMP */
  199. /* initialize ready priority group */
  200. rt_thread_ready_priority_group = 0;
  201. #if RT_THREAD_PRIORITY_MAX > 32
  202. /* initialize ready table */
  203. rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
  204. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  205. }
  206. /**
  207. * @brief This function will startup the scheduler. It will select one thread
  208. * with the highest priority level, then switch to it.
  209. */
  210. void rt_system_scheduler_start(void)
  211. {
  212. register struct rt_thread *to_thread;
  213. rt_ubase_t highest_ready_priority;
  214. to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
  215. #ifdef RT_USING_SMP
  216. to_thread->oncpu = rt_hw_cpu_id();
  217. #else
  218. rt_current_thread = to_thread;
  219. #endif /* RT_USING_SMP */
  220. rt_schedule_remove_thread(to_thread);
  221. to_thread->stat = RT_THREAD_RUNNING;
  222. /* switch to new thread */
  223. #ifdef RT_USING_SMP
  224. rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp, to_thread);
  225. #else
  226. rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp);
  227. #endif /* RT_USING_SMP */
  228. /* never come back */
  229. }
  230. /**
  231. * @addtogroup Thread
  232. */
  233. /**@{*/
  234. #ifdef RT_USING_SMP
  235. /**
  236. * @brief This function will handle IPI interrupt and do a scheduling in system.
  237. *
  238. * @param vector is the number of IPI interrupt for system scheduling.
  239. *
  240. * @param param is not used, and can be set to RT_NULL.
  241. *
  242. * @note this function should be invoke or register as ISR in BSP.
  243. */
  244. void rt_scheduler_ipi_handler(int vector, void *param)
  245. {
  246. rt_schedule();
  247. }
  248. /**
  249. * @brief This function will perform one scheduling. It will select one thread
  250. * with the highest priority level in global ready queue or local ready queue,
  251. * then switch to it.
  252. */
  253. void rt_schedule(void)
  254. {
  255. rt_base_t level;
  256. struct rt_thread *to_thread;
  257. struct rt_thread *current_thread;
  258. struct rt_cpu *pcpu;
  259. int cpu_id;
  260. /* disable interrupt */
  261. level = rt_hw_interrupt_disable();
  262. cpu_id = rt_hw_cpu_id();
  263. pcpu = rt_cpu_index(cpu_id);
  264. current_thread = pcpu->current_thread;
  265. /* whether do switch in interrupt */
  266. if (pcpu->irq_nest)
  267. {
  268. pcpu->irq_switch_flag = 1;
  269. rt_hw_interrupt_enable(level);
  270. goto __exit;
  271. }
  272. #ifdef RT_USING_SIGNALS
  273. if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND)
  274. {
  275. /* if current_thread signal is in pending */
  276. if ((current_thread->stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL_PENDING)
  277. {
  278. rt_thread_resume(current_thread);
  279. }
  280. }
  281. #endif /* RT_USING_SIGNALS */
  282. if (current_thread->scheduler_lock_nest == 1) /* whether lock scheduler */
  283. {
  284. rt_ubase_t highest_ready_priority;
  285. if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
  286. {
  287. to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
  288. current_thread->oncpu = RT_CPU_DETACHED;
  289. if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
  290. {
  291. if (current_thread->current_priority < highest_ready_priority)
  292. {
  293. to_thread = current_thread;
  294. }
  295. else if (current_thread->current_priority == highest_ready_priority && (current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0)
  296. {
  297. to_thread = current_thread;
  298. }
  299. else
  300. {
  301. rt_schedule_insert_thread(current_thread);
  302. }
  303. current_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK;
  304. }
  305. to_thread->oncpu = cpu_id;
  306. if (to_thread != current_thread)
  307. {
  308. /* if the destination thread is not the same as current thread */
  309. pcpu->current_priority = (rt_uint8_t)highest_ready_priority;
  310. RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
  311. rt_schedule_remove_thread(to_thread);
  312. to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
  313. /* switch to new thread */
  314. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
  315. ("[%d]switch to priority#%d "
  316. "thread:%.*s(sp:0x%08x), "
  317. "from thread:%.*s(sp: 0x%08x)\n",
  318. pcpu->irq_nest, highest_ready_priority,
  319. RT_NAME_MAX, to_thread->name, to_thread->sp,
  320. RT_NAME_MAX, current_thread->name, current_thread->sp));
  321. #ifdef RT_USING_OVERFLOW_CHECK
  322. _rt_scheduler_stack_check(to_thread);
  323. #endif /* RT_USING_OVERFLOW_CHECK */
  324. RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (current_thread));
  325. rt_hw_context_switch((rt_ubase_t)&current_thread->sp,
  326. (rt_ubase_t)&to_thread->sp, to_thread);
  327. }
  328. }
  329. }
  330. /* enable interrupt */
  331. rt_hw_interrupt_enable(level);
  332. #ifdef RT_USING_SIGNALS
  333. /* check stat of thread for signal */
  334. level = rt_hw_interrupt_disable();
  335. if (current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
  336. {
  337. extern void rt_thread_handle_sig(rt_bool_t clean_state);
  338. current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
  339. rt_hw_interrupt_enable(level);
  340. /* check signal status */
  341. rt_thread_handle_sig(RT_TRUE);
  342. }
  343. else
  344. {
  345. rt_hw_interrupt_enable(level);
  346. }
  347. #endif /* RT_USING_SIGNALS */
  348. __exit:
  349. return ;
  350. }
  351. #else
  352. /**
  353. * @brief This function will perform scheduling once. It will select one thread
  354. * with the highest priority, and switch to it immediately.
  355. */
  356. void rt_schedule(void)
  357. {
  358. rt_base_t level;
  359. struct rt_thread *to_thread;
  360. struct rt_thread *from_thread;
  361. /* disable interrupt */
  362. level = rt_hw_interrupt_disable();
  363. /* check the scheduler is enabled or not */
  364. if (rt_scheduler_lock_nest == 0)
  365. {
  366. rt_ubase_t highest_ready_priority;
  367. if (rt_thread_ready_priority_group != 0)
  368. {
  369. /* need_insert_from_thread: need to insert from_thread to ready queue */
  370. int need_insert_from_thread = 0;
  371. to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
  372. if ((rt_current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
  373. {
  374. if (rt_current_thread->current_priority < highest_ready_priority)
  375. {
  376. to_thread = rt_current_thread;
  377. }
  378. else if (rt_current_thread->current_priority == highest_ready_priority && (rt_current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0)
  379. {
  380. to_thread = rt_current_thread;
  381. }
  382. else
  383. {
  384. need_insert_from_thread = 1;
  385. }
  386. rt_current_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK;
  387. }
  388. if (to_thread != rt_current_thread)
  389. {
  390. /* if the destination thread is not the same as current thread */
  391. rt_current_priority = (rt_uint8_t)highest_ready_priority;
  392. from_thread = rt_current_thread;
  393. rt_current_thread = to_thread;
  394. RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));
  395. if (need_insert_from_thread)
  396. {
  397. rt_schedule_insert_thread(from_thread);
  398. }
  399. rt_schedule_remove_thread(to_thread);
  400. to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
  401. /* switch to new thread */
  402. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
  403. ("[%d]switch to priority#%d "
  404. "thread:%.*s(sp:0x%08x), "
  405. "from thread:%.*s(sp: 0x%08x)\n",
  406. rt_interrupt_nest, highest_ready_priority,
  407. RT_NAME_MAX, to_thread->name, to_thread->sp,
  408. RT_NAME_MAX, from_thread->name, from_thread->sp));
  409. #ifdef RT_USING_OVERFLOW_CHECK
  410. _rt_scheduler_stack_check(to_thread);
  411. #endif /* RT_USING_OVERFLOW_CHECK */
  412. if (rt_interrupt_nest == 0)
  413. {
  414. extern void rt_thread_handle_sig(rt_bool_t clean_state);
  415. RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (from_thread));
  416. rt_hw_context_switch((rt_ubase_t)&from_thread->sp,
  417. (rt_ubase_t)&to_thread->sp);
  418. /* enable interrupt */
  419. rt_hw_interrupt_enable(level);
  420. #ifdef RT_USING_SIGNALS
  421. /* check stat of thread for signal */
  422. level = rt_hw_interrupt_disable();
  423. if (rt_current_thread->stat & RT_THREAD_STAT_SIGNAL_PENDING)
  424. {
  425. extern void rt_thread_handle_sig(rt_bool_t clean_state);
  426. rt_current_thread->stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
  427. rt_hw_interrupt_enable(level);
  428. /* check signal status */
  429. rt_thread_handle_sig(RT_TRUE);
  430. }
  431. else
  432. {
  433. rt_hw_interrupt_enable(level);
  434. }
  435. #endif /* RT_USING_SIGNALS */
  436. goto __exit;
  437. }
  438. else
  439. {
  440. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n"));
  441. rt_hw_context_switch_interrupt((rt_ubase_t)&from_thread->sp,
  442. (rt_ubase_t)&to_thread->sp);
  443. }
  444. }
  445. else
  446. {
  447. rt_schedule_remove_thread(rt_current_thread);
  448. rt_current_thread->stat = RT_THREAD_RUNNING | (rt_current_thread->stat & ~RT_THREAD_STAT_MASK);
  449. }
  450. }
  451. }
  452. /* enable interrupt */
  453. rt_hw_interrupt_enable(level);
  454. __exit:
  455. return;
  456. }
  457. #endif /* RT_USING_SMP */
  458. /**
  459. * @brief This function checks whether a scheduling is needed after an IRQ context switching. If yes,
  460. * it will select one thread with the highest priority level, and then switch
  461. * to it.
  462. */
  463. #ifdef RT_USING_SMP
  464. void rt_scheduler_do_irq_switch(void *context)
  465. {
  466. int cpu_id;
  467. rt_base_t level;
  468. struct rt_cpu* pcpu;
  469. struct rt_thread *to_thread;
  470. struct rt_thread *current_thread;
  471. level = rt_hw_interrupt_disable();
  472. cpu_id = rt_hw_cpu_id();
  473. pcpu = rt_cpu_index(cpu_id);
  474. current_thread = pcpu->current_thread;
  475. #ifdef RT_USING_SIGNALS
  476. if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_SUSPEND)
  477. {
  478. /* if current_thread signal is in pending */
  479. if ((current_thread->stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL_PENDING)
  480. {
  481. rt_thread_resume(current_thread);
  482. }
  483. }
  484. #endif /* RT_USING_SIGNALS */
  485. if (pcpu->irq_switch_flag == 0)
  486. {
  487. rt_hw_interrupt_enable(level);
  488. return;
  489. }
  490. if (current_thread->scheduler_lock_nest == 1 && pcpu->irq_nest == 0)
  491. {
  492. rt_ubase_t highest_ready_priority;
  493. /* clear irq switch flag */
  494. pcpu->irq_switch_flag = 0;
  495. if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
  496. {
  497. to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
  498. current_thread->oncpu = RT_CPU_DETACHED;
  499. if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
  500. {
  501. if (current_thread->current_priority < highest_ready_priority)
  502. {
  503. to_thread = current_thread;
  504. }
  505. else if (current_thread->current_priority == highest_ready_priority && (current_thread->stat & RT_THREAD_STAT_YIELD_MASK) == 0)
  506. {
  507. to_thread = current_thread;
  508. }
  509. else
  510. {
  511. rt_schedule_insert_thread(current_thread);
  512. }
  513. current_thread->stat &= ~RT_THREAD_STAT_YIELD_MASK;
  514. }
  515. to_thread->oncpu = cpu_id;
  516. if (to_thread != current_thread)
  517. {
  518. /* if the destination thread is not the same as current thread */
  519. pcpu->current_priority = (rt_uint8_t)highest_ready_priority;
  520. RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
  521. rt_schedule_remove_thread(to_thread);
  522. to_thread->stat = RT_THREAD_RUNNING | (to_thread->stat & ~RT_THREAD_STAT_MASK);
  523. #ifdef RT_USING_OVERFLOW_CHECK
  524. _rt_scheduler_stack_check(to_thread);
  525. #endif /* RT_USING_OVERFLOW_CHECK */
  526. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n"));
  527. current_thread->cpus_lock_nest--;
  528. current_thread->scheduler_lock_nest--;
  529. RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (current_thread));
  530. rt_hw_context_switch_interrupt(context, (rt_ubase_t)&current_thread->sp,
  531. (rt_ubase_t)&to_thread->sp, to_thread);
  532. }
  533. }
  534. }
  535. rt_hw_interrupt_enable(level);
  536. }
  537. #endif /* RT_USING_SMP */
  538. /**
  539. * @brief This function will insert a thread to the system ready queue. The state of
  540. * thread will be set as READY and the thread will be removed from suspend queue.
  541. *
  542. * @param thread is the thread to be inserted.
  543. *
  544. * @note Please do not invoke this function in user application.
  545. */
  546. #ifdef RT_USING_SMP
  547. void rt_schedule_insert_thread(struct rt_thread *thread)
  548. {
  549. int cpu_id;
  550. int bind_cpu;
  551. rt_uint32_t cpu_mask;
  552. register rt_base_t level;
  553. RT_ASSERT(thread != RT_NULL);
  554. /* disable interrupt */
  555. level = rt_hw_interrupt_disable();
  556. /* it should be RUNNING thread */
  557. if (thread->oncpu != RT_CPU_DETACHED)
  558. {
  559. thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
  560. goto __exit;
  561. }
  562. /* READY thread, insert to ready queue */
  563. thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
  564. cpu_id = rt_hw_cpu_id();
  565. bind_cpu = thread->bind_cpu ;
  566. /* insert thread to ready list */
  567. if (bind_cpu == RT_CPUS_NR)
  568. {
  569. #if RT_THREAD_PRIORITY_MAX > 32
  570. rt_thread_ready_table[thread->number] |= thread->high_mask;
  571. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  572. rt_thread_ready_priority_group |= thread->number_mask;
  573. rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
  574. &(thread->tlist));
  575. cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
  576. rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
  577. }
  578. else
  579. {
  580. struct rt_cpu *pcpu = rt_cpu_index(bind_cpu);
  581. #if RT_THREAD_PRIORITY_MAX > 32
  582. pcpu->ready_table[thread->number] |= thread->high_mask;
  583. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  584. pcpu->priority_group |= thread->number_mask;
  585. rt_list_insert_before(&(rt_cpu_index(bind_cpu)->priority_table[thread->current_priority]),
  586. &(thread->tlist));
  587. if (cpu_id != bind_cpu)
  588. {
  589. cpu_mask = 1 << bind_cpu;
  590. rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
  591. }
  592. }
  593. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n",
  594. RT_NAME_MAX, thread->name, thread->current_priority));
  595. __exit:
  596. /* enable interrupt */
  597. rt_hw_interrupt_enable(level);
  598. }
  599. #else
  600. void rt_schedule_insert_thread(struct rt_thread *thread)
  601. {
  602. register rt_base_t temp;
  603. RT_ASSERT(thread != RT_NULL);
  604. /* disable interrupt */
  605. temp = rt_hw_interrupt_disable();
  606. /* it's current thread, it should be RUNNING thread */
  607. if (thread == rt_current_thread)
  608. {
  609. thread->stat = RT_THREAD_RUNNING | (thread->stat & ~RT_THREAD_STAT_MASK);
  610. goto __exit;
  611. }
  612. /* READY thread, insert to ready queue */
  613. thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
  614. /* insert thread to ready list */
  615. rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
  616. &(thread->tlist));
  617. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n",
  618. RT_NAME_MAX, thread->name, thread->current_priority));
  619. /* set priority mask */
  620. #if RT_THREAD_PRIORITY_MAX > 32
  621. rt_thread_ready_table[thread->number] |= thread->high_mask;
  622. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  623. rt_thread_ready_priority_group |= thread->number_mask;
  624. __exit:
  625. /* enable interrupt */
  626. rt_hw_interrupt_enable(temp);
  627. }
  628. #endif /* RT_USING_SMP */
  629. /**
  630. * @brief This function will remove a thread from system ready queue.
  631. *
  632. * @param thread is the thread to be removed.
  633. *
  634. * @note Please do not invoke this function in user application.
  635. */
  636. #ifdef RT_USING_SMP
  637. void rt_schedule_remove_thread(struct rt_thread *thread)
  638. {
  639. register rt_base_t level;
  640. RT_ASSERT(thread != RT_NULL);
  641. /* disable interrupt */
  642. level = rt_hw_interrupt_disable();
  643. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n",
  644. RT_NAME_MAX, thread->name,
  645. thread->current_priority));
  646. /* remove thread from ready list */
  647. rt_list_remove(&(thread->tlist));
  648. if (thread->bind_cpu == RT_CPUS_NR)
  649. {
  650. if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
  651. {
  652. #if RT_THREAD_PRIORITY_MAX > 32
  653. rt_thread_ready_table[thread->number] &= ~thread->high_mask;
  654. if (rt_thread_ready_table[thread->number] == 0)
  655. {
  656. rt_thread_ready_priority_group &= ~thread->number_mask;
  657. }
  658. #else
  659. rt_thread_ready_priority_group &= ~thread->number_mask;
  660. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  661. }
  662. }
  663. else
  664. {
  665. struct rt_cpu *pcpu = rt_cpu_index(thread->bind_cpu);
  666. if (rt_list_isempty(&(pcpu->priority_table[thread->current_priority])))
  667. {
  668. #if RT_THREAD_PRIORITY_MAX > 32
  669. pcpu->ready_table[thread->number] &= ~thread->high_mask;
  670. if (pcpu->ready_table[thread->number] == 0)
  671. {
  672. pcpu->priority_group &= ~thread->number_mask;
  673. }
  674. #else
  675. pcpu->priority_group &= ~thread->number_mask;
  676. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  677. }
  678. }
  679. /* enable interrupt */
  680. rt_hw_interrupt_enable(level);
  681. }
  682. #else
  683. void rt_schedule_remove_thread(struct rt_thread *thread)
  684. {
  685. register rt_base_t level;
  686. RT_ASSERT(thread != RT_NULL);
  687. /* disable interrupt */
  688. level = rt_hw_interrupt_disable();
  689. RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n",
  690. RT_NAME_MAX, thread->name,
  691. thread->current_priority));
  692. /* remove thread from ready list */
  693. rt_list_remove(&(thread->tlist));
  694. if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
  695. {
  696. #if RT_THREAD_PRIORITY_MAX > 32
  697. rt_thread_ready_table[thread->number] &= ~thread->high_mask;
  698. if (rt_thread_ready_table[thread->number] == 0)
  699. {
  700. rt_thread_ready_priority_group &= ~thread->number_mask;
  701. }
  702. #else
  703. rt_thread_ready_priority_group &= ~thread->number_mask;
  704. #endif /* RT_THREAD_PRIORITY_MAX > 32 */
  705. }
  706. /* enable interrupt */
  707. rt_hw_interrupt_enable(level);
  708. }
  709. #endif /* RT_USING_SMP */
  710. /**
  711. * @brief This function will lock the thread scheduler.
  712. */
  713. #ifdef RT_USING_SMP
  714. void rt_enter_critical(void)
  715. {
  716. register rt_base_t level;
  717. struct rt_thread *current_thread;
  718. /* disable interrupt */
  719. level = rt_hw_local_irq_disable();
  720. current_thread = rt_cpu_self()->current_thread;
  721. if (!current_thread)
  722. {
  723. rt_hw_local_irq_enable(level);
  724. return;
  725. }
  726. /*
  727. * the maximal number of nest is RT_UINT16_MAX, which is big
  728. * enough and does not check here
  729. */
  730. {
  731. register rt_uint16_t lock_nest = current_thread->cpus_lock_nest;
  732. current_thread->cpus_lock_nest++;
  733. if (lock_nest == 0)
  734. {
  735. current_thread->scheduler_lock_nest ++;
  736. rt_hw_spin_lock(&_cpus_lock);
  737. }
  738. }
  739. /* critical for local cpu */
  740. current_thread->critical_lock_nest ++;
  741. /* lock scheduler for local cpu */
  742. current_thread->scheduler_lock_nest ++;
  743. /* enable interrupt */
  744. rt_hw_local_irq_enable(level);
  745. }
  746. #else
  747. void rt_enter_critical(void)
  748. {
  749. register rt_base_t level;
  750. /* disable interrupt */
  751. level = rt_hw_interrupt_disable();
  752. /*
  753. * the maximal number of nest is RT_UINT16_MAX, which is big
  754. * enough and does not check here
  755. */
  756. rt_scheduler_lock_nest ++;
  757. /* enable interrupt */
  758. rt_hw_interrupt_enable(level);
  759. }
  760. #endif /* RT_USING_SMP */
  761. RTM_EXPORT(rt_enter_critical);
  762. /**
  763. * @brief This function will unlock the thread scheduler.
  764. */
  765. #ifdef RT_USING_SMP
  766. void rt_exit_critical(void)
  767. {
  768. register rt_base_t level;
  769. struct rt_thread *current_thread;
  770. /* disable interrupt */
  771. level = rt_hw_local_irq_disable();
  772. current_thread = rt_cpu_self()->current_thread;
  773. if (!current_thread)
  774. {
  775. rt_hw_local_irq_enable(level);
  776. return;
  777. }
  778. current_thread->scheduler_lock_nest --;
  779. current_thread->critical_lock_nest --;
  780. current_thread->cpus_lock_nest--;
  781. if (current_thread->cpus_lock_nest == 0)
  782. {
  783. current_thread->scheduler_lock_nest --;
  784. rt_hw_spin_unlock(&_cpus_lock);
  785. }
  786. if (current_thread->scheduler_lock_nest <= 0)
  787. {
  788. current_thread->scheduler_lock_nest = 0;
  789. /* enable interrupt */
  790. rt_hw_local_irq_enable(level);
  791. rt_schedule();
  792. }
  793. else
  794. {
  795. /* enable interrupt */
  796. rt_hw_local_irq_enable(level);
  797. }
  798. }
  799. #else
  800. void rt_exit_critical(void)
  801. {
  802. register rt_base_t level;
  803. /* disable interrupt */
  804. level = rt_hw_interrupt_disable();
  805. rt_scheduler_lock_nest --;
  806. if (rt_scheduler_lock_nest <= 0)
  807. {
  808. rt_scheduler_lock_nest = 0;
  809. /* enable interrupt */
  810. rt_hw_interrupt_enable(level);
  811. if (rt_current_thread)
  812. {
  813. /* if scheduler is started, do a schedule */
  814. rt_schedule();
  815. }
  816. }
  817. else
  818. {
  819. /* enable interrupt */
  820. rt_hw_interrupt_enable(level);
  821. }
  822. }
  823. #endif /* RT_USING_SMP */
  824. RTM_EXPORT(rt_exit_critical);
  825. /**
  826. * @brief Get the scheduler lock level.
  827. *
  828. * @return the level of the scheduler lock. 0 means unlocked.
  829. */
  830. rt_uint16_t rt_critical_level(void)
  831. {
  832. #ifdef RT_USING_SMP
  833. struct rt_thread *current_thread = rt_cpu_self()->current_thread;
  834. return current_thread->critical_lock_nest;
  835. #else
  836. return rt_scheduler_lock_nest;
  837. #endif /* RT_USING_SMP */
  838. }
  839. RTM_EXPORT(rt_critical_level);
  840. /**@}*/