rtthread.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-18 Bernard the first version
  9. * 2006-04-26 Bernard add semaphore APIs
  10. * 2006-08-10 Bernard add version information
  11. * 2007-01-28 Bernard rename RT_OBJECT_Class_Static to RT_Object_Class_Static
  12. * 2007-03-03 Bernard clean up the definitions to rtdef.h
  13. * 2010-04-11 yi.qiu add module feature
  14. * 2013-06-24 Bernard add rt_kprintf re-define when not use RT_USING_CONSOLE.
  15. * 2016-08-09 ArdaFu add new thread and interrupt hook.
  16. * 2018-11-22 Jesven add all cpu's lock and ipi handler
  17. * 2021-02-28 Meco Man add RT_KSERVICE_USING_STDLIB
  18. * 2021-11-14 Meco Man add rtlegacy.h for compatibility
  19. * 2022-06-04 Meco Man remove strnlen
  20. * 2023-05-20 Bernard add rtatomic.h header file to included files.
  21. * 2023-06-30 ChuShicheng move debug check from the rtdebug.h
  22. * 2023-10-16 Shell Support a new backtrace framework
  23. * 2023-12-10 xqyjlj fix spinlock in up
  24. * 2024-01-25 Shell Add rt_susp_list for IPC primitives
  25. * 2024-03-10 Meco Man move std libc related functions to rtklibc
  26. */
  27. #ifndef __RT_THREAD_H__
  28. #define __RT_THREAD_H__
  29. #include <rtconfig.h>
  30. #include <rtdef.h>
  31. #include <rtservice.h>
  32. #include <rtm.h>
  33. #include <rtatomic.h>
  34. #include <rtklibc.h>
  35. #ifdef RT_USING_LEGACY
  36. #include <rtlegacy.h>
  37. #endif
  38. #ifdef RT_USING_FINSH
  39. #include <finsh.h>
  40. #endif /* RT_USING_FINSH */
  41. #ifdef __cplusplus
  42. extern "C" {
  43. #endif
  44. #ifdef __GNUC__
  45. int entry(void);
  46. #endif
  47. /**
  48. * @addtogroup KernelObject
  49. * @{
  50. */
  51. /*
  52. * kernel object interface
  53. */
  54. struct rt_object_information *
  55. rt_object_get_information(enum rt_object_class_type type);
  56. int rt_object_get_length(enum rt_object_class_type type);
  57. int rt_object_get_pointers(enum rt_object_class_type type, rt_object_t *pointers, int maxlen);
  58. void rt_object_init(struct rt_object *object,
  59. enum rt_object_class_type type,
  60. const char *name);
  61. void rt_object_detach(rt_object_t object);
  62. #ifdef RT_USING_HEAP
  63. rt_object_t rt_object_allocate(enum rt_object_class_type type, const char *name);
  64. void rt_object_delete(rt_object_t object);
  65. /* custom object */
  66. rt_object_t rt_custom_object_create(const char *name, void *data, rt_err_t (*data_destroy)(void *));
  67. rt_err_t rt_custom_object_destroy(rt_object_t obj);
  68. #endif /* RT_USING_HEAP */
  69. rt_bool_t rt_object_is_systemobject(rt_object_t object);
  70. rt_uint8_t rt_object_get_type(rt_object_t object);
  71. rt_err_t rt_object_for_each(rt_uint8_t type, rt_object_iter_t iter, void *data);
  72. rt_object_t rt_object_find(const char *name, rt_uint8_t type);
  73. rt_err_t rt_object_get_name(rt_object_t object, char *name, rt_uint8_t name_size);
  74. #ifdef RT_USING_HOOK
  75. void rt_object_attach_sethook(void (*hook)(struct rt_object *object));
  76. void rt_object_detach_sethook(void (*hook)(struct rt_object *object));
  77. void rt_object_trytake_sethook(void (*hook)(struct rt_object *object));
  78. void rt_object_take_sethook(void (*hook)(struct rt_object *object));
  79. void rt_object_put_sethook(void (*hook)(struct rt_object *object));
  80. #endif /* RT_USING_HOOK */
  81. /**@}*/
  82. /**
  83. * @addtogroup Clock
  84. * @{
  85. */
  86. /*
  87. * clock & timer interface
  88. */
  89. rt_tick_t rt_tick_get(void);
  90. void rt_tick_set(rt_tick_t tick);
  91. void rt_tick_increase(void);
  92. rt_tick_t rt_tick_from_millisecond(rt_int32_t ms);
  93. rt_tick_t rt_tick_get_millisecond(void);
  94. #ifdef RT_USING_HOOK
  95. void rt_tick_sethook(void (*hook)(void));
  96. #endif /* RT_USING_HOOK */
  97. void rt_system_timer_init(void);
  98. void rt_system_timer_thread_init(void);
  99. void rt_timer_init(rt_timer_t timer,
  100. const char *name,
  101. void (*timeout)(void *parameter),
  102. void *parameter,
  103. rt_tick_t time,
  104. rt_uint8_t flag);
  105. rt_err_t rt_timer_detach(rt_timer_t timer);
  106. #ifdef RT_USING_HEAP
  107. rt_timer_t rt_timer_create(const char *name,
  108. void (*timeout)(void *parameter),
  109. void *parameter,
  110. rt_tick_t time,
  111. rt_uint8_t flag);
  112. rt_err_t rt_timer_delete(rt_timer_t timer);
  113. #endif /* RT_USING_HEAP */
  114. rt_err_t rt_timer_start(rt_timer_t timer);
  115. rt_err_t rt_timer_stop(rt_timer_t timer);
  116. rt_err_t rt_timer_control(rt_timer_t timer, int cmd, void *arg);
  117. rt_tick_t rt_timer_next_timeout_tick(void);
  118. void rt_timer_check(void);
  119. #ifdef RT_USING_HOOK
  120. void rt_timer_enter_sethook(void (*hook)(struct rt_timer *timer));
  121. void rt_timer_exit_sethook(void (*hook)(struct rt_timer *timer));
  122. #endif /* RT_USING_HOOK */
  123. /**@}*/
  124. /**
  125. * @addtogroup Thread
  126. * @{
  127. */
  128. /*
  129. * thread interface
  130. */
  131. rt_err_t rt_thread_init(struct rt_thread *thread,
  132. const char *name,
  133. void (*entry)(void *parameter),
  134. void *parameter,
  135. void *stack_start,
  136. rt_uint32_t stack_size,
  137. rt_uint8_t priority,
  138. rt_uint32_t tick);
  139. rt_err_t rt_thread_detach(rt_thread_t thread);
  140. #ifdef RT_USING_HEAP
  141. rt_thread_t rt_thread_create(const char *name,
  142. void (*entry)(void *parameter),
  143. void *parameter,
  144. rt_uint32_t stack_size,
  145. rt_uint8_t priority,
  146. rt_uint32_t tick);
  147. rt_err_t rt_thread_delete(rt_thread_t thread);
  148. #endif /* RT_USING_HEAP */
  149. rt_err_t rt_thread_close(rt_thread_t thread);
  150. rt_thread_t rt_thread_self(void);
  151. rt_thread_t rt_thread_find(char *name);
  152. rt_err_t rt_thread_startup(rt_thread_t thread);
  153. rt_err_t rt_thread_yield(void);
  154. rt_err_t rt_thread_delay(rt_tick_t tick);
  155. rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick);
  156. rt_err_t rt_thread_mdelay(rt_int32_t ms);
  157. rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg);
  158. rt_err_t rt_thread_suspend(rt_thread_t thread);
  159. rt_err_t rt_thread_suspend_with_flag(rt_thread_t thread, int suspend_flag);
  160. rt_err_t rt_thread_resume(rt_thread_t thread);
  161. #ifdef RT_USING_SMART
  162. rt_err_t rt_thread_wakeup(rt_thread_t thread);
  163. void rt_thread_wakeup_set(struct rt_thread *thread, rt_wakeup_func_t func, void* user_data);
  164. #endif /* RT_USING_SMART */
  165. rt_err_t rt_thread_get_name(rt_thread_t thread, char *name, rt_uint8_t name_size);
  166. #ifdef RT_USING_SIGNALS
  167. void rt_thread_alloc_sig(rt_thread_t tid);
  168. void rt_thread_free_sig(rt_thread_t tid);
  169. int rt_thread_kill(rt_thread_t tid, int sig);
  170. #endif /* RT_USING_SIGNALS */
  171. #ifdef RT_USING_HOOK
  172. void rt_thread_suspend_sethook(void (*hook)(rt_thread_t thread));
  173. void rt_thread_resume_sethook (void (*hook)(rt_thread_t thread));
  174. /**
  175. * @brief Sets a hook function when a thread is initialized.
  176. *
  177. * @param thread is the target thread that initializing
  178. */
  179. typedef void (*rt_thread_inited_hookproto_t)(rt_thread_t thread);
  180. RT_OBJECT_HOOKLIST_DECLARE(rt_thread_inited_hookproto_t, rt_thread_inited);
  181. #endif /* RT_USING_HOOK */
  182. /*
  183. * idle thread interface
  184. */
  185. void rt_thread_idle_init(void);
  186. #if defined(RT_USING_HOOK) || defined(RT_USING_IDLE_HOOK)
  187. rt_err_t rt_thread_idle_sethook(void (*hook)(void));
  188. rt_err_t rt_thread_idle_delhook(void (*hook)(void));
  189. #endif /* defined(RT_USING_HOOK) || defined(RT_USING_IDLE_HOOK) */
  190. rt_thread_t rt_thread_idle_gethandler(void);
  191. /*
  192. * schedule service
  193. */
  194. void rt_system_scheduler_init(void);
  195. void rt_system_scheduler_start(void);
  196. void rt_schedule(void);
  197. void rt_scheduler_do_irq_switch(void *context);
  198. #ifdef RT_USING_OVERFLOW_CHECK
  199. void rt_scheduler_stack_check(struct rt_thread *thread);
  200. #define RT_SCHEDULER_STACK_CHECK(thr) rt_scheduler_stack_check(thr)
  201. #else /* !RT_USING_OVERFLOW_CHECK */
  202. #define RT_SCHEDULER_STACK_CHECK(thr)
  203. #endif /* RT_USING_OVERFLOW_CHECK */
  204. rt_base_t rt_enter_critical(void);
  205. void rt_exit_critical(void);
  206. void rt_exit_critical_safe(rt_base_t critical_level);
  207. rt_uint16_t rt_critical_level(void);
  208. #ifdef RT_USING_HOOK
  209. void rt_scheduler_sethook(void (*hook)(rt_thread_t from, rt_thread_t to));
  210. void rt_scheduler_switch_sethook(void (*hook)(struct rt_thread *tid));
  211. #endif /* RT_USING_HOOK */
  212. #ifdef RT_USING_SMP
  213. void rt_secondary_cpu_entry(void);
  214. void rt_scheduler_ipi_handler(int vector, void *param);
  215. #endif /* RT_USING_SMP */
  216. /**@}*/
  217. /**
  218. * @addtogroup Signals
  219. * @{
  220. */
  221. #ifdef RT_USING_SIGNALS
  222. void rt_signal_mask(int signo);
  223. void rt_signal_unmask(int signo);
  224. rt_sighandler_t rt_signal_install(int signo, rt_sighandler_t handler);
  225. int rt_signal_wait(const rt_sigset_t *set, rt_siginfo_t *si, rt_int32_t timeout);
  226. int rt_system_signal_init(void);
  227. #endif /* RT_USING_SIGNALS */
  228. /**@}*/
  229. /**
  230. * @addtogroup MM
  231. * @{
  232. */
  233. /*
  234. * memory management interface
  235. */
  236. #ifdef RT_USING_MEMPOOL
  237. /*
  238. * memory pool interface
  239. */
  240. rt_err_t rt_mp_init(struct rt_mempool *mp,
  241. const char *name,
  242. void *start,
  243. rt_size_t size,
  244. rt_size_t block_size);
  245. rt_err_t rt_mp_detach(struct rt_mempool *mp);
  246. #ifdef RT_USING_HEAP
  247. rt_mp_t rt_mp_create(const char *name,
  248. rt_size_t block_count,
  249. rt_size_t block_size);
  250. rt_err_t rt_mp_delete(rt_mp_t mp);
  251. #endif /* RT_USING_HEAP */
  252. void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time);
  253. void rt_mp_free(void *block);
  254. #ifdef RT_USING_HOOK
  255. void rt_mp_alloc_sethook(void (*hook)(struct rt_mempool *mp, void *block));
  256. void rt_mp_free_sethook(void (*hook)(struct rt_mempool *mp, void *block));
  257. #endif /* RT_USING_HOOK */
  258. #endif /* RT_USING_MEMPOOL */
  259. #ifdef RT_USING_HEAP
  260. /*
  261. * heap memory interface
  262. */
  263. void rt_system_heap_init(void *begin_addr, void *end_addr);
  264. void rt_system_heap_init_generic(void *begin_addr, void *end_addr);
  265. void *rt_malloc(rt_size_t size);
  266. void rt_free(void *ptr);
  267. void *rt_realloc(void *ptr, rt_size_t newsize);
  268. void *rt_calloc(rt_size_t count, rt_size_t size);
  269. void *rt_malloc_align(rt_size_t size, rt_size_t align);
  270. void rt_free_align(void *ptr);
  271. void rt_memory_info(rt_size_t *total,
  272. rt_size_t *used,
  273. rt_size_t *max_used);
  274. #if defined(RT_USING_SLAB) && defined(RT_USING_SLAB_AS_HEAP)
  275. void *rt_page_alloc(rt_size_t npages);
  276. void rt_page_free(void *addr, rt_size_t npages);
  277. #endif /* defined(RT_USING_SLAB) && defined(RT_USING_SLAB_AS_HEAP) */
  278. #ifdef RT_USING_HOOK
  279. void rt_malloc_sethook(void (*hook)(void **ptr, rt_size_t size));
  280. void rt_realloc_set_entry_hook(void (*hook)(void **ptr, rt_size_t size));
  281. void rt_realloc_set_exit_hook(void (*hook)(void **ptr, rt_size_t size));
  282. void rt_free_sethook(void (*hook)(void **ptr));
  283. #endif /* RT_USING_HOOK */
  284. #endif /* RT_USING_HEAP */
  285. #ifdef RT_USING_SMALL_MEM
  286. /**
  287. * small memory object interface
  288. */
  289. rt_smem_t rt_smem_init(const char *name,
  290. void *begin_addr,
  291. rt_size_t size);
  292. rt_err_t rt_smem_detach(rt_smem_t m);
  293. void *rt_smem_alloc(rt_smem_t m, rt_size_t size);
  294. void *rt_smem_realloc(rt_smem_t m, void *rmem, rt_size_t newsize);
  295. void rt_smem_free(void *rmem);
  296. #endif /* RT_USING_SMALL_MEM */
  297. #ifdef RT_USING_MEMHEAP
  298. /**
  299. * memory heap object interface
  300. */
  301. rt_err_t rt_memheap_init(struct rt_memheap *memheap,
  302. const char *name,
  303. void *start_addr,
  304. rt_size_t size);
  305. rt_err_t rt_memheap_detach(struct rt_memheap *heap);
  306. void *rt_memheap_alloc(struct rt_memheap *heap, rt_size_t size);
  307. void *rt_memheap_realloc(struct rt_memheap *heap, void *ptr, rt_size_t newsize);
  308. void rt_memheap_free(void *ptr);
  309. void rt_memheap_info(struct rt_memheap *heap,
  310. rt_size_t *total,
  311. rt_size_t *used,
  312. rt_size_t *max_used);
  313. #endif /* RT_USING_MEMHEAP */
  314. #ifdef RT_USING_MEMHEAP_AS_HEAP
  315. /**
  316. * memory heap as heap
  317. */
  318. void *_memheap_alloc(struct rt_memheap *heap, rt_size_t size);
  319. void _memheap_free(void *rmem);
  320. void *_memheap_realloc(struct rt_memheap *heap, void *rmem, rt_size_t newsize);
  321. #endif
  322. #ifdef RT_USING_SLAB
  323. /**
  324. * slab object interface
  325. */
  326. rt_slab_t rt_slab_init(const char *name, void *begin_addr, rt_size_t size);
  327. rt_err_t rt_slab_detach(rt_slab_t m);
  328. void *rt_slab_page_alloc(rt_slab_t m, rt_size_t npages);
  329. void rt_slab_page_free(rt_slab_t m, void *addr, rt_size_t npages);
  330. void *rt_slab_alloc(rt_slab_t m, rt_size_t size);
  331. void *rt_slab_realloc(rt_slab_t m, void *ptr, rt_size_t size);
  332. void rt_slab_free(rt_slab_t m, void *ptr);
  333. #endif /* RT_USING_SLAB */
  334. /**@}*/
  335. /**
  336. * @addtogroup IPC
  337. * @{
  338. */
  339. /**
  340. * Suspend list - A basic building block for IPC primitives which interacts with
  341. * scheduler directly. Its API is similar to a FIFO list.
  342. *
  343. * Note: don't use in application codes directly
  344. */
  345. void rt_susp_list_print(rt_list_t *list);
  346. /* reserve thread error while resuming it */
  347. #define RT_THREAD_RESUME_RES_THR_ERR (-1)
  348. struct rt_thread *rt_susp_list_dequeue(rt_list_t *susp_list, rt_err_t thread_error);
  349. rt_err_t rt_susp_list_resume_all(rt_list_t *susp_list, rt_err_t thread_error);
  350. rt_err_t rt_susp_list_resume_all_irq(rt_list_t *susp_list,
  351. rt_err_t thread_error,
  352. struct rt_spinlock *lock);
  353. /* suspend and enqueue */
  354. rt_err_t rt_thread_suspend_to_list(rt_thread_t thread, rt_list_t *susp_list, int ipc_flags, int suspend_flag);
  355. /* only for a suspended thread, and caller must hold the scheduler lock */
  356. rt_err_t rt_susp_list_enqueue(rt_list_t *susp_list, rt_thread_t thread, int ipc_flags);
  357. #ifdef RT_USING_SEMAPHORE
  358. /*
  359. * semaphore interface
  360. */
  361. rt_err_t rt_sem_init(rt_sem_t sem,
  362. const char *name,
  363. rt_uint32_t value,
  364. rt_uint8_t flag);
  365. rt_err_t rt_sem_detach(rt_sem_t sem);
  366. #ifdef RT_USING_HEAP
  367. rt_sem_t rt_sem_create(const char *name, rt_uint32_t value, rt_uint8_t flag);
  368. rt_err_t rt_sem_delete(rt_sem_t sem);
  369. #endif /* RT_USING_HEAP */
  370. rt_err_t rt_sem_take(rt_sem_t sem, rt_int32_t timeout);
  371. rt_err_t rt_sem_take_interruptible(rt_sem_t sem, rt_int32_t timeout);
  372. rt_err_t rt_sem_take_killable(rt_sem_t sem, rt_int32_t timeout);
  373. rt_err_t rt_sem_trytake(rt_sem_t sem);
  374. rt_err_t rt_sem_release(rt_sem_t sem);
  375. rt_err_t rt_sem_control(rt_sem_t sem, int cmd, void *arg);
  376. #endif /* RT_USING_SEMAPHORE */
  377. #ifdef RT_USING_MUTEX
  378. /*
  379. * mutex interface
  380. */
  381. rt_err_t rt_mutex_init(rt_mutex_t mutex, const char *name, rt_uint8_t flag);
  382. rt_err_t rt_mutex_detach(rt_mutex_t mutex);
  383. #ifdef RT_USING_HEAP
  384. rt_mutex_t rt_mutex_create(const char *name, rt_uint8_t flag);
  385. rt_err_t rt_mutex_delete(rt_mutex_t mutex);
  386. #endif /* RT_USING_HEAP */
  387. void rt_mutex_drop_thread(rt_mutex_t mutex, rt_thread_t thread);
  388. rt_uint8_t rt_mutex_setprioceiling(rt_mutex_t mutex, rt_uint8_t priority);
  389. rt_uint8_t rt_mutex_getprioceiling(rt_mutex_t mutex);
  390. rt_err_t rt_mutex_take(rt_mutex_t mutex, rt_int32_t timeout);
  391. rt_err_t rt_mutex_trytake(rt_mutex_t mutex);
  392. rt_err_t rt_mutex_take_interruptible(rt_mutex_t mutex, rt_int32_t time);
  393. rt_err_t rt_mutex_take_killable(rt_mutex_t mutex, rt_int32_t time);
  394. rt_err_t rt_mutex_release(rt_mutex_t mutex);
  395. rt_err_t rt_mutex_control(rt_mutex_t mutex, int cmd, void *arg);
  396. rt_inline rt_thread_t rt_mutex_get_owner(rt_mutex_t mutex)
  397. {
  398. return mutex->owner;
  399. }
  400. rt_inline rt_ubase_t rt_mutex_get_hold(rt_mutex_t mutex)
  401. {
  402. return mutex->hold;
  403. }
  404. #endif /* RT_USING_MUTEX */
  405. #ifdef RT_USING_EVENT
  406. /*
  407. * event interface
  408. */
  409. rt_err_t rt_event_init(rt_event_t event, const char *name, rt_uint8_t flag);
  410. rt_err_t rt_event_detach(rt_event_t event);
  411. #ifdef RT_USING_HEAP
  412. rt_event_t rt_event_create(const char *name, rt_uint8_t flag);
  413. rt_err_t rt_event_delete(rt_event_t event);
  414. #endif /* RT_USING_HEAP */
  415. rt_err_t rt_event_send(rt_event_t event, rt_uint32_t set);
  416. rt_err_t rt_event_recv(rt_event_t event,
  417. rt_uint32_t set,
  418. rt_uint8_t opt,
  419. rt_int32_t timeout,
  420. rt_uint32_t *recved);
  421. rt_err_t rt_event_recv_interruptible(rt_event_t event,
  422. rt_uint32_t set,
  423. rt_uint8_t opt,
  424. rt_int32_t timeout,
  425. rt_uint32_t *recved);
  426. rt_err_t rt_event_recv_killable(rt_event_t event,
  427. rt_uint32_t set,
  428. rt_uint8_t opt,
  429. rt_int32_t timeout,
  430. rt_uint32_t *recved);
  431. rt_err_t rt_event_control(rt_event_t event, int cmd, void *arg);
  432. #endif /* RT_USING_EVENT */
  433. #ifdef RT_USING_MAILBOX
  434. /*
  435. * mailbox interface
  436. */
  437. rt_err_t rt_mb_init(rt_mailbox_t mb,
  438. const char *name,
  439. void *msgpool,
  440. rt_size_t size,
  441. rt_uint8_t flag);
  442. rt_err_t rt_mb_detach(rt_mailbox_t mb);
  443. #ifdef RT_USING_HEAP
  444. rt_mailbox_t rt_mb_create(const char *name, rt_size_t size, rt_uint8_t flag);
  445. rt_err_t rt_mb_delete(rt_mailbox_t mb);
  446. #endif /* RT_USING_HEAP */
  447. rt_err_t rt_mb_send(rt_mailbox_t mb, rt_ubase_t value);
  448. rt_err_t rt_mb_send_interruptible(rt_mailbox_t mb, rt_ubase_t value);
  449. rt_err_t rt_mb_send_killable(rt_mailbox_t mb, rt_ubase_t value);
  450. rt_err_t rt_mb_send_wait(rt_mailbox_t mb,
  451. rt_ubase_t value,
  452. rt_int32_t timeout);
  453. rt_err_t rt_mb_send_wait_interruptible(rt_mailbox_t mb,
  454. rt_ubase_t value,
  455. rt_int32_t timeout);
  456. rt_err_t rt_mb_send_wait_killable(rt_mailbox_t mb,
  457. rt_ubase_t value,
  458. rt_int32_t timeout);
  459. rt_err_t rt_mb_urgent(rt_mailbox_t mb, rt_ubase_t value);
  460. rt_err_t rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout);
  461. rt_err_t rt_mb_recv_interruptible(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout);
  462. rt_err_t rt_mb_recv_killable(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout);
  463. rt_err_t rt_mb_control(rt_mailbox_t mb, int cmd, void *arg);
  464. #endif /* RT_USING_MAILBOX */
  465. #ifdef RT_USING_MESSAGEQUEUE
  466. struct rt_mq_message
  467. {
  468. struct rt_mq_message *next;
  469. rt_ssize_t length;
  470. #ifdef RT_USING_MESSAGEQUEUE_PRIORITY
  471. rt_int32_t prio;
  472. #endif /* RT_USING_MESSAGEQUEUE_PRIORITY */
  473. };
  474. #define RT_MQ_BUF_SIZE(msg_size, max_msgs) \
  475. ((RT_ALIGN((msg_size), RT_ALIGN_SIZE) + sizeof(struct rt_mq_message)) * (max_msgs))
  476. /*
  477. * message queue interface
  478. */
  479. rt_err_t rt_mq_init(rt_mq_t mq,
  480. const char *name,
  481. void *msgpool,
  482. rt_size_t msg_size,
  483. rt_size_t pool_size,
  484. rt_uint8_t flag);
  485. rt_err_t rt_mq_detach(rt_mq_t mq);
  486. #ifdef RT_USING_HEAP
  487. rt_mq_t rt_mq_create(const char *name,
  488. rt_size_t msg_size,
  489. rt_size_t max_msgs,
  490. rt_uint8_t flag);
  491. rt_err_t rt_mq_delete(rt_mq_t mq);
  492. #endif /* RT_USING_HEAP */
  493. rt_err_t rt_mq_send(rt_mq_t mq, const void *buffer, rt_size_t size);
  494. rt_err_t rt_mq_send_interruptible(rt_mq_t mq, const void *buffer, rt_size_t size);
  495. rt_err_t rt_mq_send_killable(rt_mq_t mq, const void *buffer, rt_size_t size);
  496. rt_err_t rt_mq_send_wait(rt_mq_t mq,
  497. const void *buffer,
  498. rt_size_t size,
  499. rt_int32_t timeout);
  500. rt_err_t rt_mq_send_wait_interruptible(rt_mq_t mq,
  501. const void *buffer,
  502. rt_size_t size,
  503. rt_int32_t timeout);
  504. rt_err_t rt_mq_send_wait_killable(rt_mq_t mq,
  505. const void *buffer,
  506. rt_size_t size,
  507. rt_int32_t timeout);
  508. rt_err_t rt_mq_urgent(rt_mq_t mq, const void *buffer, rt_size_t size);
  509. rt_ssize_t rt_mq_recv(rt_mq_t mq,
  510. void *buffer,
  511. rt_size_t size,
  512. rt_int32_t timeout);
  513. rt_ssize_t rt_mq_recv_interruptible(rt_mq_t mq,
  514. void *buffer,
  515. rt_size_t size,
  516. rt_int32_t timeout);
  517. rt_ssize_t rt_mq_recv_killable(rt_mq_t mq,
  518. void *buffer,
  519. rt_size_t size,
  520. rt_int32_t timeout);
  521. rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg);
  522. #ifdef RT_USING_MESSAGEQUEUE_PRIORITY
  523. rt_err_t rt_mq_send_wait_prio(rt_mq_t mq,
  524. const void *buffer,
  525. rt_size_t size,
  526. rt_int32_t prio,
  527. rt_int32_t timeout,
  528. int suspend_flag);
  529. rt_ssize_t rt_mq_recv_prio(rt_mq_t mq,
  530. void *buffer,
  531. rt_size_t size,
  532. rt_int32_t *prio,
  533. rt_int32_t timeout,
  534. int suspend_flag);
  535. #endif /* RT_USING_MESSAGEQUEUE_PRIORITY */
  536. #endif /* RT_USING_MESSAGEQUEUE */
  537. /* defunct */
  538. void rt_thread_defunct_enqueue(rt_thread_t thread);
  539. rt_thread_t rt_thread_defunct_dequeue(void);
  540. /*
  541. * spinlock
  542. */
  543. struct rt_spinlock;
  544. void rt_spin_lock_init(struct rt_spinlock *lock);
  545. void rt_spin_lock(struct rt_spinlock *lock);
  546. void rt_spin_unlock(struct rt_spinlock *lock);
  547. rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock);
  548. void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level);
  549. /**@}*/
  550. #ifdef RT_USING_DEVICE
  551. /**
  552. * @addtogroup Device
  553. * @{
  554. */
  555. /*
  556. * device (I/O) system interface
  557. */
  558. rt_device_t rt_device_find(const char *name);
  559. rt_err_t rt_device_register(rt_device_t dev,
  560. const char *name,
  561. rt_uint16_t flags);
  562. rt_err_t rt_device_unregister(rt_device_t dev);
  563. #ifdef RT_USING_HEAP
  564. rt_device_t rt_device_create(int type, int attach_size);
  565. void rt_device_destroy(rt_device_t device);
  566. #endif /* RT_USING_HEAP */
  567. rt_err_t
  568. rt_device_set_rx_indicate(rt_device_t dev,
  569. rt_err_t (*rx_ind)(rt_device_t dev, rt_size_t size));
  570. rt_err_t
  571. rt_device_set_tx_complete(rt_device_t dev,
  572. rt_err_t (*tx_done)(rt_device_t dev, void *buffer));
  573. rt_err_t rt_device_init (rt_device_t dev);
  574. rt_err_t rt_device_open (rt_device_t dev, rt_uint16_t oflag);
  575. rt_err_t rt_device_close(rt_device_t dev);
  576. rt_ssize_t rt_device_read(rt_device_t dev,
  577. rt_off_t pos,
  578. void *buffer,
  579. rt_size_t size);
  580. rt_ssize_t rt_device_write(rt_device_t dev,
  581. rt_off_t pos,
  582. const void *buffer,
  583. rt_size_t size);
  584. rt_err_t rt_device_control(rt_device_t dev, int cmd, void *arg);
  585. /**@}*/
  586. #endif /* RT_USING_DEVICE */
  587. /*
  588. * interrupt service
  589. */
  590. /*
  591. * rt_interrupt_enter and rt_interrupt_leave only can be called by BSP
  592. */
  593. void rt_interrupt_enter(void);
  594. void rt_interrupt_leave(void);
  595. /**
  596. * CPU object
  597. */
  598. struct rt_cpu *rt_cpu_self(void);
  599. struct rt_cpu *rt_cpu_index(int index);
  600. #ifdef RT_USING_SMP
  601. /*
  602. * smp cpus lock service
  603. */
  604. rt_base_t rt_cpus_lock(void);
  605. void rt_cpus_unlock(rt_base_t level);
  606. void rt_cpus_lock_status_restore(struct rt_thread *thread);
  607. #ifdef RT_USING_DEBUG
  608. rt_base_t rt_cpu_get_id(void);
  609. #else /* !RT_USING_DEBUG */
  610. #define rt_cpu_get_id rt_hw_cpu_id
  611. #endif /* RT_USING_DEBUG */
  612. #else /* !RT_USING_SMP */
  613. #define rt_cpu_get_id() (0)
  614. #endif /* RT_USING_SMP */
  615. /*
  616. * the number of nested interrupts.
  617. */
  618. rt_uint8_t rt_interrupt_get_nest(void);
  619. #ifdef RT_USING_HOOK
  620. void rt_interrupt_enter_sethook(void (*hook)(void));
  621. void rt_interrupt_leave_sethook(void (*hook)(void));
  622. #endif /* RT_USING_HOOK */
  623. #ifdef RT_USING_COMPONENTS_INIT
  624. void rt_components_init(void);
  625. void rt_components_board_init(void);
  626. #endif /* RT_USING_COMPONENTS_INIT */
  627. /**
  628. * @addtogroup KernelService
  629. * @{
  630. */
  631. /*
  632. * general kernel service
  633. */
  634. #ifndef RT_USING_CONSOLE
  635. #define rt_kprintf(...)
  636. #define rt_kputs(str)
  637. #else
  638. int rt_kprintf(const char *fmt, ...);
  639. void rt_kputs(const char *str);
  640. #endif /* RT_USING_CONSOLE */
  641. rt_err_t rt_backtrace(void);
  642. rt_err_t rt_backtrace_thread(rt_thread_t thread);
  643. rt_err_t rt_backtrace_frame(rt_thread_t thread, struct rt_hw_backtrace_frame *frame);
  644. rt_err_t rt_backtrace_formatted_print(rt_ubase_t *buffer, long buflen);
  645. rt_err_t rt_backtrace_to_buffer(rt_thread_t thread, struct rt_hw_backtrace_frame *frame,
  646. long skip, rt_ubase_t *buffer, long buflen);
  647. #if defined(RT_USING_DEVICE) && defined(RT_USING_CONSOLE)
  648. rt_device_t rt_console_set_device(const char *name);
  649. rt_device_t rt_console_get_device(void);
  650. #ifdef RT_USING_THREADSAFE_PRINTF
  651. rt_thread_t rt_console_current_user(void);
  652. #else
  653. rt_inline void *rt_console_current_user(void) { return RT_NULL; }
  654. #endif /* RT_USING_THREADSAFE_PRINTF */
  655. #endif /* defined(RT_USING_DEVICE) && defined(RT_USING_CONSOLE) */
  656. int __rt_ffs(int value);
  657. void rt_show_version(void);
  658. #ifdef RT_DEBUGING_ASSERT
  659. extern void (*rt_assert_hook)(const char *ex, const char *func, rt_size_t line);
  660. void rt_assert_set_hook(void (*hook)(const char *ex, const char *func, rt_size_t line));
  661. void rt_assert_handler(const char *ex, const char *func, rt_size_t line);
  662. #define RT_ASSERT(EX) \
  663. if (!(EX)) \
  664. { \
  665. rt_assert_handler(#EX, __FUNCTION__, __LINE__); \
  666. }
  667. #else
  668. #define RT_ASSERT(EX)
  669. #endif /* RT_DEBUGING_ASSERT */
  670. #ifdef RT_DEBUGING_CONTEXT
  671. /* Macro to check current context */
  672. #define RT_DEBUG_NOT_IN_INTERRUPT \
  673. do \
  674. { \
  675. if (rt_interrupt_get_nest() != 0) \
  676. { \
  677. rt_kprintf("Function[%s] shall not be used in ISR\n", __FUNCTION__); \
  678. RT_ASSERT(0) \
  679. } \
  680. } \
  681. while (0)
  682. /* "In thread context" means:
  683. * 1) the scheduler has been started
  684. * 2) not in interrupt context.
  685. */
  686. #define RT_DEBUG_IN_THREAD_CONTEXT \
  687. do \
  688. { \
  689. if (rt_thread_self() == RT_NULL) \
  690. { \
  691. rt_kprintf("Function[%s] shall not be used before scheduler start\n", \
  692. __FUNCTION__); \
  693. RT_ASSERT(0) \
  694. } \
  695. RT_DEBUG_NOT_IN_INTERRUPT; \
  696. } \
  697. while (0)
  698. /* "scheduler available" means:
  699. * 1) the scheduler has been started.
  700. * 2) not in interrupt context.
  701. * 3) scheduler is not locked.
  702. */
  703. #define RT_DEBUG_SCHEDULER_AVAILABLE(need_check) \
  704. do \
  705. { \
  706. if (need_check) \
  707. { \
  708. if (rt_critical_level() != 0) \
  709. { \
  710. rt_kprintf("Function[%s]: scheduler is not available\n", \
  711. __FUNCTION__); \
  712. RT_ASSERT(0) \
  713. } \
  714. RT_DEBUG_IN_THREAD_CONTEXT; \
  715. } \
  716. } \
  717. while (0)
  718. #else
  719. #define RT_DEBUG_NOT_IN_INTERRUPT
  720. #define RT_DEBUG_IN_THREAD_CONTEXT
  721. #define RT_DEBUG_SCHEDULER_AVAILABLE(need_check)
  722. #endif /* RT_DEBUGING_CONTEXT */
  723. rt_inline rt_bool_t rt_in_thread_context(void)
  724. {
  725. return rt_thread_self() != RT_NULL && rt_interrupt_get_nest() == 0;
  726. }
  727. /* is scheduler available */
  728. rt_inline rt_bool_t rt_scheduler_is_available(void)
  729. {
  730. return rt_critical_level() == 0 && rt_in_thread_context();
  731. }
  732. #ifdef RT_USING_SMP
  733. /* is thread bond on core */
  734. rt_inline rt_bool_t rt_sched_thread_is_binding(rt_thread_t thread)
  735. {
  736. if (thread == RT_NULL)
  737. {
  738. thread = rt_thread_self();
  739. }
  740. return !thread || RT_SCHED_CTX(thread).bind_cpu != RT_CPUS_NR;
  741. }
  742. #else
  743. #define rt_sched_thread_is_binding(thread) (RT_TRUE)
  744. #endif
  745. /**@}*/
  746. #ifdef __cplusplus
  747. }
  748. #endif
  749. #endif /* __RT_THREAD_H__ */