rtthread.h 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. /*
  2. * Copyright (c) 2006-2024 RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2006-03-18 Bernard the first version
  9. * 2006-04-26 Bernard add semaphore APIs
  10. * 2006-08-10 Bernard add version information
  11. * 2007-01-28 Bernard rename RT_OBJECT_Class_Static to RT_Object_Class_Static
  12. * 2007-03-03 Bernard clean up the definitions to rtdef.h
  13. * 2010-04-11 yi.qiu add module feature
  14. * 2013-06-24 Bernard add rt_kprintf re-define when not use RT_USING_CONSOLE.
  15. * 2016-08-09 ArdaFu add new thread and interrupt hook.
  16. * 2018-11-22 Jesven add all cpu's lock and ipi handler
  17. * 2021-02-28 Meco Man add RT_KSERVICE_USING_STDLIB
  18. * 2021-11-14 Meco Man add rtlegacy.h for compatibility
  19. * 2022-06-04 Meco Man remove strnlen
  20. * 2023-05-20 Bernard add rtatomic.h header file to included files.
  21. * 2023-06-30 ChuShicheng move debug check from the rtdebug.h
  22. * 2023-10-16 Shell Support a new backtrace framework
  23. * 2023-12-10 xqyjlj fix spinlock in up
  24. * 2024-01-25 Shell Add rt_susp_list for IPC primitives
  25. * 2024-03-10 Meco Man move std libc related functions to rtklibc
  26. */
  27. #ifndef __RT_THREAD_H__
  28. #define __RT_THREAD_H__
  29. #include <rtconfig.h>
  30. #include <rtdef.h>
  31. #include <rtservice.h>
  32. #include <rtm.h>
  33. #include <rtatomic.h>
  34. #include <rtklibc.h>
  35. #ifdef RT_USING_LEGACY
  36. #include <rtlegacy.h>
  37. #endif
  38. #ifdef RT_USING_FINSH
  39. #include <finsh.h>
  40. #endif /* RT_USING_FINSH */
  41. #ifdef __cplusplus
  42. extern "C" {
  43. #endif
  44. #ifdef __GNUC__
  45. int entry(void);
  46. #endif
  47. /*
  48. * kernel object interface
  49. */
  50. struct rt_object_information *
  51. rt_object_get_information(enum rt_object_class_type type);
  52. int rt_object_get_length(enum rt_object_class_type type);
  53. int rt_object_get_pointers(enum rt_object_class_type type, rt_object_t *pointers, int maxlen);
  54. void rt_object_init(struct rt_object *object,
  55. enum rt_object_class_type type,
  56. const char *name);
  57. void rt_object_detach(rt_object_t object);
  58. #ifdef RT_USING_HEAP
  59. rt_object_t rt_object_allocate(enum rt_object_class_type type, const char *name);
  60. void rt_object_delete(rt_object_t object);
  61. /* custom object */
  62. rt_object_t rt_custom_object_create(const char *name, void *data, rt_err_t (*data_destroy)(void *));
  63. rt_err_t rt_custom_object_destroy(rt_object_t obj);
  64. #endif /* RT_USING_HEAP */
  65. rt_bool_t rt_object_is_systemobject(rt_object_t object);
  66. rt_uint8_t rt_object_get_type(rt_object_t object);
  67. rt_err_t rt_object_for_each(rt_uint8_t type, rt_object_iter_t iter, void *data);
  68. rt_object_t rt_object_find(const char *name, rt_uint8_t type);
  69. rt_err_t rt_object_get_name(rt_object_t object, char *name, rt_uint8_t name_size);
  70. #ifdef RT_USING_HOOK
  71. void rt_object_attach_sethook(void (*hook)(struct rt_object *object));
  72. void rt_object_detach_sethook(void (*hook)(struct rt_object *object));
  73. void rt_object_trytake_sethook(void (*hook)(struct rt_object *object));
  74. void rt_object_take_sethook(void (*hook)(struct rt_object *object));
  75. void rt_object_put_sethook(void (*hook)(struct rt_object *object));
  76. #endif /* RT_USING_HOOK */
  77. /**
  78. * @addtogroup group_clock_management
  79. * @{
  80. */
  81. /*
  82. * clock & timer interface
  83. */
  84. rt_tick_t rt_tick_get(void);
  85. rt_tick_t rt_tick_get_delta(rt_tick_t base);
  86. void rt_tick_set(rt_tick_t tick);
  87. void rt_tick_increase(void);
  88. void rt_tick_increase_tick(rt_tick_t tick);
  89. rt_tick_t rt_tick_from_millisecond(rt_int32_t ms);
  90. rt_tick_t rt_tick_get_millisecond(void);
  91. #ifdef RT_USING_HOOK
  92. void rt_tick_sethook(void (*hook)(void));
  93. #endif /* RT_USING_HOOK */
  94. void rt_system_timer_init(void);
  95. void rt_system_timer_thread_init(void);
  96. void rt_timer_init(rt_timer_t timer,
  97. const char *name,
  98. void (*timeout)(void *parameter),
  99. void *parameter,
  100. rt_tick_t time,
  101. rt_uint8_t flag);
  102. rt_err_t rt_timer_detach(rt_timer_t timer);
  103. #ifdef RT_USING_HEAP
  104. rt_timer_t rt_timer_create(const char *name,
  105. void (*timeout)(void *parameter),
  106. void *parameter,
  107. rt_tick_t time,
  108. rt_uint8_t flag);
  109. rt_err_t rt_timer_delete(rt_timer_t timer);
  110. #endif /* RT_USING_HEAP */
  111. rt_err_t rt_timer_start(rt_timer_t timer);
  112. rt_err_t rt_timer_stop(rt_timer_t timer);
  113. rt_err_t rt_timer_control(rt_timer_t timer, int cmd, void *arg);
  114. rt_tick_t rt_timer_next_timeout_tick(void);
  115. void rt_timer_check(void);
  116. #ifdef RT_USING_HOOK
  117. void rt_timer_enter_sethook(void (*hook)(struct rt_timer *timer));
  118. void rt_timer_exit_sethook(void (*hook)(struct rt_timer *timer));
  119. #endif /* RT_USING_HOOK */
  120. /**@}*/
  121. /*
  122. * thread interface
  123. */
  124. rt_err_t rt_thread_init(struct rt_thread *thread,
  125. const char *name,
  126. void (*entry)(void *parameter),
  127. void *parameter,
  128. void *stack_start,
  129. rt_uint32_t stack_size,
  130. rt_uint8_t priority,
  131. rt_uint32_t tick);
  132. rt_err_t rt_thread_detach(rt_thread_t thread);
  133. #ifdef RT_USING_HEAP
  134. rt_thread_t rt_thread_create(const char *name,
  135. void (*entry)(void *parameter),
  136. void *parameter,
  137. rt_uint32_t stack_size,
  138. rt_uint8_t priority,
  139. rt_uint32_t tick);
  140. rt_err_t rt_thread_delete(rt_thread_t thread);
  141. #endif /* RT_USING_HEAP */
  142. rt_err_t rt_thread_close(rt_thread_t thread);
  143. rt_thread_t rt_thread_self(void);
  144. rt_thread_t rt_thread_find(char *name);
  145. rt_err_t rt_thread_startup(rt_thread_t thread);
  146. rt_err_t rt_thread_yield(void);
  147. rt_err_t rt_thread_delay(rt_tick_t tick);
  148. rt_err_t rt_thread_delay_until(rt_tick_t *tick, rt_tick_t inc_tick);
  149. rt_err_t rt_thread_mdelay(rt_int32_t ms);
  150. rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg);
  151. rt_err_t rt_thread_suspend(rt_thread_t thread);
  152. rt_err_t rt_thread_suspend_with_flag(rt_thread_t thread, int suspend_flag);
  153. rt_err_t rt_thread_resume(rt_thread_t thread);
  154. #ifdef RT_USING_SMART
  155. rt_err_t rt_thread_wakeup(rt_thread_t thread);
  156. void rt_thread_wakeup_set(struct rt_thread *thread, rt_wakeup_func_t func, void* user_data);
  157. #endif /* RT_USING_SMART */
  158. rt_err_t rt_thread_get_name(rt_thread_t thread, char *name, rt_uint8_t name_size);
  159. #ifdef RT_USING_CPU_USAGE_TRACER
  160. rt_uint8_t rt_thread_get_usage(rt_thread_t thread);
  161. #endif /* RT_USING_CPU_USAGE_TRACER */
  162. #ifdef RT_USING_SIGNALS
  163. void rt_thread_alloc_sig(rt_thread_t tid);
  164. void rt_thread_free_sig(rt_thread_t tid);
  165. int rt_thread_kill(rt_thread_t tid, int sig);
  166. #endif /* RT_USING_SIGNALS */
  167. #ifdef RT_USING_HOOK
  168. void rt_thread_suspend_sethook(void (*hook)(rt_thread_t thread));
  169. void rt_thread_resume_sethook (void (*hook)(rt_thread_t thread));
  170. /**
  171. * @ingroup group_thread_management
  172. *
  173. * @brief Sets a hook function when a thread is initialized.
  174. *
  175. * @param thread is the target thread that initializing
  176. */
  177. typedef void (*rt_thread_inited_hookproto_t)(rt_thread_t thread);
  178. RT_OBJECT_HOOKLIST_DECLARE(rt_thread_inited_hookproto_t, rt_thread_inited);
  179. #endif /* RT_USING_HOOK */
  180. /*
  181. * idle thread interface
  182. */
  183. void rt_thread_idle_init(void);
  184. #if defined(RT_USING_HOOK) || defined(RT_USING_IDLE_HOOK)
  185. // FIXME: Have to write doxygen comment here for rt_thread_idle_sethook
  186. // but not in src/idle.c. Because the `rt_align(RT_ALIGN_SIZE)` in src/idle.c
  187. // will make doxygen building failed.
  188. /**
  189. * @ingroup group_thread_management
  190. *
  191. * @brief This function sets a hook function to idle thread loop. When the system performs
  192. * idle loop, this hook function should be invoked.
  193. *
  194. * @param hook the specified hook function.
  195. *
  196. * @return `RT_EOK`: set OK.
  197. * `-RT_EFULL`: hook list is full.
  198. *
  199. * @note the hook function must be simple and never be blocked or suspend.
  200. */
  201. rt_err_t rt_thread_idle_sethook(void (*hook)(void));
  202. rt_err_t rt_thread_idle_delhook(void (*hook)(void));
  203. #endif /* defined(RT_USING_HOOK) || defined(RT_USING_IDLE_HOOK) */
  204. rt_thread_t rt_thread_idle_gethandler(void);
  205. /*
  206. * schedule service
  207. */
  208. void rt_system_scheduler_init(void);
  209. void rt_system_scheduler_start(void);
  210. void rt_schedule(void);
  211. void rt_scheduler_do_irq_switch(void *context);
  212. #ifdef RT_USING_OVERFLOW_CHECK
  213. void rt_scheduler_stack_check(struct rt_thread *thread);
  214. #define RT_SCHEDULER_STACK_CHECK(thr) rt_scheduler_stack_check(thr)
  215. #else /* !RT_USING_OVERFLOW_CHECK */
  216. #define RT_SCHEDULER_STACK_CHECK(thr)
  217. #endif /* RT_USING_OVERFLOW_CHECK */
  218. rt_base_t rt_enter_critical(void);
  219. void rt_exit_critical(void);
  220. void rt_exit_critical_safe(rt_base_t critical_level);
  221. rt_uint16_t rt_critical_level(void);
  222. #ifdef RT_USING_HOOK
  223. void rt_scheduler_stack_overflow_sethook(rt_err_t (*hook)(struct rt_thread *thread));
  224. void rt_scheduler_sethook(void (*hook)(rt_thread_t from, rt_thread_t to));
  225. void rt_scheduler_switch_sethook(void (*hook)(struct rt_thread *tid));
  226. #endif /* RT_USING_HOOK */
  227. #ifdef RT_USING_SMP
  228. void rt_secondary_cpu_entry(void);
  229. void rt_scheduler_ipi_handler(int vector, void *param);
  230. #endif /* RT_USING_SMP */
  231. /**
  232. * @addtogroup group_signal
  233. * @{
  234. */
  235. #ifdef RT_USING_SIGNALS
  236. void rt_signal_mask(int signo);
  237. void rt_signal_unmask(int signo);
  238. void *rt_signal_check(void* context);
  239. rt_sighandler_t rt_signal_install(int signo, rt_sighandler_t handler);
  240. int rt_signal_wait(const rt_sigset_t *set, rt_siginfo_t *si, rt_int32_t timeout);
  241. int rt_system_signal_init(void);
  242. #endif /* RT_USING_SIGNALS */
  243. /**@}*/
  244. /**
  245. * @addtogroup group_memory_management
  246. * @{
  247. */
  248. /*
  249. * memory management interface
  250. */
  251. #ifdef RT_USING_MEMPOOL
  252. /*
  253. * memory pool interface
  254. */
  255. rt_err_t rt_mp_init(struct rt_mempool *mp,
  256. const char *name,
  257. void *start,
  258. rt_size_t size,
  259. rt_size_t block_size);
  260. rt_err_t rt_mp_detach(struct rt_mempool *mp);
  261. #ifdef RT_USING_HEAP
  262. rt_mp_t rt_mp_create(const char *name,
  263. rt_size_t block_count,
  264. rt_size_t block_size);
  265. rt_err_t rt_mp_delete(rt_mp_t mp);
  266. #endif /* RT_USING_HEAP */
  267. void *rt_mp_alloc(rt_mp_t mp, rt_int32_t time);
  268. void rt_mp_free(void *block);
  269. #ifdef RT_USING_HOOK
  270. void rt_mp_alloc_sethook(void (*hook)(struct rt_mempool *mp, void *block));
  271. void rt_mp_free_sethook(void (*hook)(struct rt_mempool *mp, void *block));
  272. #endif /* RT_USING_HOOK */
  273. #endif /* RT_USING_MEMPOOL */
  274. #ifdef RT_USING_HEAP
  275. /*
  276. * heap memory interface
  277. */
  278. void rt_system_heap_init(void *begin_addr, void *end_addr);
  279. void rt_system_heap_init_generic(void *begin_addr, void *end_addr);
  280. void *rt_malloc(rt_size_t size);
  281. void rt_free(void *ptr);
  282. void *rt_realloc(void *ptr, rt_size_t newsize);
  283. void *rt_calloc(rt_size_t count, rt_size_t size);
  284. void *rt_malloc_align(rt_size_t size, rt_size_t align);
  285. void rt_free_align(void *ptr);
  286. void rt_memory_info(rt_size_t *total,
  287. rt_size_t *used,
  288. rt_size_t *max_used);
  289. #if defined(RT_USING_SLAB) && defined(RT_USING_SLAB_AS_HEAP)
  290. void *rt_page_alloc(rt_size_t npages);
  291. void rt_page_free(void *addr, rt_size_t npages);
  292. #endif /* defined(RT_USING_SLAB) && defined(RT_USING_SLAB_AS_HEAP) */
  293. /**
  294. * @ingroup group_hook
  295. * @{
  296. */
  297. #ifdef RT_USING_HOOK
  298. void rt_malloc_sethook(void (*hook)(void **ptr, rt_size_t size));
  299. void rt_realloc_set_entry_hook(void (*hook)(void **ptr, rt_size_t size));
  300. void rt_realloc_set_exit_hook(void (*hook)(void **ptr, rt_size_t size));
  301. void rt_free_sethook(void (*hook)(void **ptr));
  302. #endif /* RT_USING_HOOK */
  303. /**@}*/
  304. #endif /* RT_USING_HEAP */
  305. #ifdef RT_USING_SMALL_MEM
  306. /**
  307. * small memory object interface
  308. */
  309. rt_smem_t rt_smem_init(const char *name,
  310. void *begin_addr,
  311. rt_size_t size);
  312. rt_err_t rt_smem_detach(rt_smem_t m);
  313. void *rt_smem_alloc(rt_smem_t m, rt_size_t size);
  314. void *rt_smem_realloc(rt_smem_t m, void *rmem, rt_size_t newsize);
  315. void rt_smem_free(void *rmem);
  316. #endif /* RT_USING_SMALL_MEM */
  317. #ifdef RT_USING_MEMHEAP
  318. /**
  319. * memory heap object interface
  320. */
  321. rt_err_t rt_memheap_init(struct rt_memheap *memheap,
  322. const char *name,
  323. void *start_addr,
  324. rt_size_t size);
  325. rt_err_t rt_memheap_detach(struct rt_memheap *heap);
  326. void *rt_memheap_alloc(struct rt_memheap *heap, rt_size_t size);
  327. void *rt_memheap_realloc(struct rt_memheap *heap, void *ptr, rt_size_t newsize);
  328. void rt_memheap_free(void *ptr);
  329. void rt_memheap_info(struct rt_memheap *heap,
  330. rt_size_t *total,
  331. rt_size_t *used,
  332. rt_size_t *max_used);
  333. #endif /* RT_USING_MEMHEAP */
  334. #ifdef RT_USING_MEMHEAP_AS_HEAP
  335. /**
  336. * memory heap as heap
  337. */
  338. void *_memheap_alloc(struct rt_memheap *heap, rt_size_t size);
  339. void _memheap_free(void *rmem);
  340. void *_memheap_realloc(struct rt_memheap *heap, void *rmem, rt_size_t newsize);
  341. #endif
  342. #ifdef RT_USING_SLAB
  343. /**
  344. * slab object interface
  345. */
  346. rt_slab_t rt_slab_init(const char *name, void *begin_addr, rt_size_t size);
  347. rt_err_t rt_slab_detach(rt_slab_t m);
  348. void *rt_slab_page_alloc(rt_slab_t m, rt_size_t npages);
  349. void rt_slab_page_free(rt_slab_t m, void *addr, rt_size_t npages);
  350. void *rt_slab_alloc(rt_slab_t m, rt_size_t size);
  351. void *rt_slab_realloc(rt_slab_t m, void *ptr, rt_size_t size);
  352. void rt_slab_free(rt_slab_t m, void *ptr);
  353. #endif /* RT_USING_SLAB */
  354. /**@}*/
  355. /**
  356. * @addtogroup group_thread_comm
  357. * @{
  358. */
  359. /**
  360. * Suspend list - A basic building block for IPC primitives which interacts with
  361. * scheduler directly. Its API is similar to a FIFO list.
  362. *
  363. * Note: don't use in application codes directly
  364. */
  365. void rt_susp_list_print(rt_list_t *list);
  366. /* reserve thread error while resuming it */
  367. #define RT_THREAD_RESUME_RES_THR_ERR (-1)
  368. struct rt_thread *rt_susp_list_dequeue(rt_list_t *susp_list, rt_err_t thread_error);
  369. rt_err_t rt_susp_list_resume_all(rt_list_t *susp_list, rt_err_t thread_error);
  370. rt_err_t rt_susp_list_resume_all_irq(rt_list_t *susp_list,
  371. rt_err_t thread_error,
  372. struct rt_spinlock *lock);
  373. /* suspend and enqueue */
  374. rt_err_t rt_thread_suspend_to_list(rt_thread_t thread, rt_list_t *susp_list, int ipc_flags, int suspend_flag);
  375. /* only for a suspended thread, and caller must hold the scheduler lock */
  376. rt_err_t rt_susp_list_enqueue(rt_list_t *susp_list, rt_thread_t thread, int ipc_flags);
  377. /**
  378. * @addtogroup group_semaphore Semaphore
  379. * @{
  380. */
  381. #ifdef RT_USING_SEMAPHORE
  382. /*
  383. * semaphore interface
  384. */
  385. rt_err_t rt_sem_init(rt_sem_t sem,
  386. const char *name,
  387. rt_uint32_t value,
  388. rt_uint8_t flag);
  389. rt_err_t rt_sem_detach(rt_sem_t sem);
  390. #ifdef RT_USING_HEAP
  391. rt_sem_t rt_sem_create(const char *name, rt_uint32_t value, rt_uint8_t flag);
  392. rt_err_t rt_sem_delete(rt_sem_t sem);
  393. #endif /* RT_USING_HEAP */
  394. rt_err_t rt_sem_take(rt_sem_t sem, rt_int32_t timeout);
  395. rt_err_t rt_sem_take_interruptible(rt_sem_t sem, rt_int32_t timeout);
  396. rt_err_t rt_sem_take_killable(rt_sem_t sem, rt_int32_t timeout);
  397. rt_err_t rt_sem_trytake(rt_sem_t sem);
  398. rt_err_t rt_sem_release(rt_sem_t sem);
  399. rt_err_t rt_sem_control(rt_sem_t sem, int cmd, void *arg);
  400. #endif /* RT_USING_SEMAPHORE */
  401. /**@}*/
  402. /**
  403. * @addtogroup group_mutex Mutex
  404. * @{
  405. */
  406. #ifdef RT_USING_MUTEX
  407. /*
  408. * mutex interface
  409. */
  410. rt_err_t rt_mutex_init(rt_mutex_t mutex, const char *name, rt_uint8_t flag);
  411. rt_err_t rt_mutex_detach(rt_mutex_t mutex);
  412. #ifdef RT_USING_HEAP
  413. rt_mutex_t rt_mutex_create(const char *name, rt_uint8_t flag);
  414. rt_err_t rt_mutex_delete(rt_mutex_t mutex);
  415. #endif /* RT_USING_HEAP */
  416. void rt_mutex_drop_thread(rt_mutex_t mutex, rt_thread_t thread);
  417. rt_uint8_t rt_mutex_setprioceiling(rt_mutex_t mutex, rt_uint8_t priority);
  418. rt_uint8_t rt_mutex_getprioceiling(rt_mutex_t mutex);
  419. rt_err_t rt_mutex_take(rt_mutex_t mutex, rt_int32_t timeout);
  420. rt_err_t rt_mutex_trytake(rt_mutex_t mutex);
  421. rt_err_t rt_mutex_take_interruptible(rt_mutex_t mutex, rt_int32_t time);
  422. rt_err_t rt_mutex_take_killable(rt_mutex_t mutex, rt_int32_t time);
  423. rt_err_t rt_mutex_release(rt_mutex_t mutex);
  424. rt_err_t rt_mutex_control(rt_mutex_t mutex, int cmd, void *arg);
  425. rt_inline rt_thread_t rt_mutex_get_owner(rt_mutex_t mutex)
  426. {
  427. return mutex->owner;
  428. }
  429. rt_inline rt_ubase_t rt_mutex_get_hold(rt_mutex_t mutex)
  430. {
  431. return mutex->hold;
  432. }
  433. #endif /* RT_USING_MUTEX */
  434. /**@}*/
  435. /**
  436. * @addtogroup group_event Event
  437. * @{
  438. */
  439. #ifdef RT_USING_EVENT
  440. /*
  441. * event interface
  442. */
  443. rt_err_t rt_event_init(rt_event_t event, const char *name, rt_uint8_t flag);
  444. rt_err_t rt_event_detach(rt_event_t event);
  445. #ifdef RT_USING_HEAP
  446. rt_event_t rt_event_create(const char *name, rt_uint8_t flag);
  447. rt_err_t rt_event_delete(rt_event_t event);
  448. #endif /* RT_USING_HEAP */
  449. rt_err_t rt_event_send(rt_event_t event, rt_uint32_t set);
  450. rt_err_t rt_event_recv(rt_event_t event,
  451. rt_uint32_t set,
  452. rt_uint8_t opt,
  453. rt_int32_t timeout,
  454. rt_uint32_t *recved);
  455. rt_err_t rt_event_recv_interruptible(rt_event_t event,
  456. rt_uint32_t set,
  457. rt_uint8_t opt,
  458. rt_int32_t timeout,
  459. rt_uint32_t *recved);
  460. rt_err_t rt_event_recv_killable(rt_event_t event,
  461. rt_uint32_t set,
  462. rt_uint8_t opt,
  463. rt_int32_t timeout,
  464. rt_uint32_t *recved);
  465. rt_err_t rt_event_control(rt_event_t event, int cmd, void *arg);
  466. #endif /* RT_USING_EVENT */
  467. /**@}*/
  468. /**
  469. * @addtogroup group_mailbox MailBox
  470. * @{
  471. */
  472. #ifdef RT_USING_MAILBOX
  473. /*
  474. * mailbox interface
  475. */
  476. rt_err_t rt_mb_init(rt_mailbox_t mb,
  477. const char *name,
  478. void *msgpool,
  479. rt_size_t size,
  480. rt_uint8_t flag);
  481. rt_err_t rt_mb_detach(rt_mailbox_t mb);
  482. #ifdef RT_USING_HEAP
  483. rt_mailbox_t rt_mb_create(const char *name, rt_size_t size, rt_uint8_t flag);
  484. rt_err_t rt_mb_delete(rt_mailbox_t mb);
  485. #endif /* RT_USING_HEAP */
  486. rt_err_t rt_mb_send(rt_mailbox_t mb, rt_ubase_t value);
  487. rt_err_t rt_mb_send_interruptible(rt_mailbox_t mb, rt_ubase_t value);
  488. rt_err_t rt_mb_send_killable(rt_mailbox_t mb, rt_ubase_t value);
  489. rt_err_t rt_mb_send_wait(rt_mailbox_t mb,
  490. rt_ubase_t value,
  491. rt_int32_t timeout);
  492. rt_err_t rt_mb_send_wait_interruptible(rt_mailbox_t mb,
  493. rt_ubase_t value,
  494. rt_int32_t timeout);
  495. rt_err_t rt_mb_send_wait_killable(rt_mailbox_t mb,
  496. rt_ubase_t value,
  497. rt_int32_t timeout);
  498. rt_err_t rt_mb_urgent(rt_mailbox_t mb, rt_ubase_t value);
  499. rt_err_t rt_mb_recv(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout);
  500. rt_err_t rt_mb_recv_interruptible(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout);
  501. rt_err_t rt_mb_recv_killable(rt_mailbox_t mb, rt_ubase_t *value, rt_int32_t timeout);
  502. rt_err_t rt_mb_control(rt_mailbox_t mb, int cmd, void *arg);
  503. #endif /* RT_USING_MAILBOX */
  504. /**@}*/
  505. /**
  506. * @addtogroup group_messagequeue Message Queue
  507. * @{
  508. */
  509. #ifdef RT_USING_MESSAGEQUEUE
  510. struct rt_mq_message
  511. {
  512. struct rt_mq_message *next;
  513. rt_ssize_t length;
  514. #ifdef RT_USING_MESSAGEQUEUE_PRIORITY
  515. rt_int32_t prio;
  516. #endif /* RT_USING_MESSAGEQUEUE_PRIORITY */
  517. };
  518. #define RT_MQ_BUF_SIZE(msg_size, max_msgs) \
  519. ((RT_ALIGN((msg_size), RT_ALIGN_SIZE) + sizeof(struct rt_mq_message)) * (max_msgs))
  520. /*
  521. * message queue interface
  522. */
  523. rt_err_t rt_mq_init(rt_mq_t mq,
  524. const char *name,
  525. void *msgpool,
  526. rt_size_t msg_size,
  527. rt_size_t pool_size,
  528. rt_uint8_t flag);
  529. rt_err_t rt_mq_detach(rt_mq_t mq);
  530. #ifdef RT_USING_HEAP
  531. rt_mq_t rt_mq_create(const char *name,
  532. rt_size_t msg_size,
  533. rt_size_t max_msgs,
  534. rt_uint8_t flag);
  535. rt_err_t rt_mq_delete(rt_mq_t mq);
  536. #endif /* RT_USING_HEAP */
  537. rt_err_t rt_mq_send(rt_mq_t mq, const void *buffer, rt_size_t size);
  538. rt_err_t rt_mq_send_interruptible(rt_mq_t mq, const void *buffer, rt_size_t size);
  539. rt_err_t rt_mq_send_killable(rt_mq_t mq, const void *buffer, rt_size_t size);
  540. rt_err_t rt_mq_send_wait(rt_mq_t mq,
  541. const void *buffer,
  542. rt_size_t size,
  543. rt_int32_t timeout);
  544. rt_err_t rt_mq_send_wait_interruptible(rt_mq_t mq,
  545. const void *buffer,
  546. rt_size_t size,
  547. rt_int32_t timeout);
  548. rt_err_t rt_mq_send_wait_killable(rt_mq_t mq,
  549. const void *buffer,
  550. rt_size_t size,
  551. rt_int32_t timeout);
  552. rt_err_t rt_mq_urgent(rt_mq_t mq, const void *buffer, rt_size_t size);
  553. rt_ssize_t rt_mq_recv(rt_mq_t mq,
  554. void *buffer,
  555. rt_size_t size,
  556. rt_int32_t timeout);
  557. rt_ssize_t rt_mq_recv_interruptible(rt_mq_t mq,
  558. void *buffer,
  559. rt_size_t size,
  560. rt_int32_t timeout);
  561. rt_ssize_t rt_mq_recv_killable(rt_mq_t mq,
  562. void *buffer,
  563. rt_size_t size,
  564. rt_int32_t timeout);
  565. rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg);
  566. #ifdef RT_USING_MESSAGEQUEUE_PRIORITY
  567. rt_err_t rt_mq_send_wait_prio(rt_mq_t mq,
  568. const void *buffer,
  569. rt_size_t size,
  570. rt_int32_t prio,
  571. rt_int32_t timeout,
  572. int suspend_flag);
  573. rt_ssize_t rt_mq_recv_prio(rt_mq_t mq,
  574. void *buffer,
  575. rt_size_t size,
  576. rt_int32_t *prio,
  577. rt_int32_t timeout,
  578. int suspend_flag);
  579. #endif /* RT_USING_MESSAGEQUEUE_PRIORITY */
  580. #endif /* RT_USING_MESSAGEQUEUE */
  581. /**@}*/
  582. /* defunct */
  583. void rt_thread_defunct_init(void);
  584. void rt_thread_defunct_enqueue(rt_thread_t thread);
  585. rt_thread_t rt_thread_defunct_dequeue(void);
  586. void rt_defunct_execute(void);
  587. /*
  588. * spinlock
  589. */
  590. struct rt_spinlock;
  591. void rt_spin_lock_init(struct rt_spinlock *lock);
  592. void rt_spin_lock(struct rt_spinlock *lock);
  593. void rt_spin_unlock(struct rt_spinlock *lock);
  594. rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock);
  595. void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level);
  596. /**@}*/
  597. #ifdef RT_USING_DEVICE
  598. /**
  599. * @addtogroup group_device_driver
  600. * @{
  601. */
  602. /*
  603. * device (I/O) system interface
  604. */
  605. rt_device_t rt_device_find(const char *name);
  606. rt_err_t rt_device_register(rt_device_t dev,
  607. const char *name,
  608. rt_uint16_t flags);
  609. rt_err_t rt_device_unregister(rt_device_t dev);
  610. #ifdef RT_USING_HEAP
  611. rt_device_t rt_device_create(int type, int attach_size);
  612. void rt_device_destroy(rt_device_t device);
  613. #endif /* RT_USING_HEAP */
  614. rt_err_t
  615. rt_device_set_rx_indicate(rt_device_t dev,
  616. rt_err_t (*rx_ind)(rt_device_t dev, rt_size_t size));
  617. rt_err_t
  618. rt_device_set_tx_complete(rt_device_t dev,
  619. rt_err_t (*tx_done)(rt_device_t dev, void *buffer));
  620. rt_err_t rt_device_init (rt_device_t dev);
  621. rt_err_t rt_device_open (rt_device_t dev, rt_uint16_t oflag);
  622. rt_err_t rt_device_close(rt_device_t dev);
  623. rt_ssize_t rt_device_read(rt_device_t dev,
  624. rt_off_t pos,
  625. void *buffer,
  626. rt_size_t size);
  627. rt_ssize_t rt_device_write(rt_device_t dev,
  628. rt_off_t pos,
  629. const void *buffer,
  630. rt_size_t size);
  631. rt_err_t rt_device_control(rt_device_t dev, int cmd, void *arg);
  632. /**@}*/
  633. #endif /* RT_USING_DEVICE */
  634. /*
  635. * interrupt service
  636. */
  637. /*
  638. * rt_interrupt_enter and rt_interrupt_leave only can be called by BSP
  639. */
  640. void rt_interrupt_enter(void);
  641. void rt_interrupt_leave(void);
  642. void rt_interrupt_context_push(rt_interrupt_context_t this_ctx);
  643. void rt_interrupt_context_pop(void);
  644. void *rt_interrupt_context_get(void);
  645. /**
  646. * CPU object
  647. */
  648. struct rt_cpu *rt_cpu_self(void);
  649. struct rt_cpu *rt_cpu_index(int index);
  650. #ifdef RT_USING_SMP
  651. /*
  652. * smp cpus lock service
  653. */
  654. rt_base_t rt_cpus_lock(void);
  655. void rt_cpus_unlock(rt_base_t level);
  656. void rt_cpus_lock_status_restore(struct rt_thread *thread);
  657. #ifdef RT_USING_DEBUG
  658. rt_base_t rt_cpu_get_id(void);
  659. #else /* !RT_USING_DEBUG */
  660. #define rt_cpu_get_id rt_hw_cpu_id
  661. #endif /* RT_USING_DEBUG */
  662. #else /* !RT_USING_SMP */
  663. #define rt_cpu_get_id() (0)
  664. #endif /* RT_USING_SMP */
  665. /*
  666. * the number of nested interrupts.
  667. */
  668. rt_uint8_t rt_interrupt_get_nest(void);
  669. #ifdef RT_USING_HOOK
  670. void rt_interrupt_enter_sethook(void (*hook)(void));
  671. void rt_interrupt_leave_sethook(void (*hook)(void));
  672. #endif /* RT_USING_HOOK */
  673. #ifdef RT_USING_COMPONENTS_INIT
  674. void rt_components_init(void);
  675. void rt_components_board_init(void);
  676. #endif /* RT_USING_COMPONENTS_INIT */
  677. /**
  678. * @addtogroup group_kernel_service
  679. * @{
  680. */
  681. /*
  682. * general kernel service
  683. */
  684. #ifndef RT_USING_CONSOLE
  685. #define rt_kprintf(...)
  686. #define rt_kputs(str)
  687. #else
  688. int rt_kprintf(const char *fmt, ...);
  689. void rt_kputs(const char *str);
  690. #endif /* RT_USING_CONSOLE */
  691. rt_err_t rt_backtrace(void);
  692. rt_err_t rt_backtrace_thread(rt_thread_t thread);
  693. rt_err_t rt_backtrace_frame(rt_thread_t thread, struct rt_hw_backtrace_frame *frame);
  694. rt_err_t rt_backtrace_formatted_print(rt_ubase_t *buffer, long buflen);
  695. rt_err_t rt_backtrace_to_buffer(rt_thread_t thread, struct rt_hw_backtrace_frame *frame,
  696. long skip, rt_ubase_t *buffer, long buflen);
  697. #if defined(RT_USING_DEVICE) && defined(RT_USING_CONSOLE)
  698. rt_device_t rt_console_set_device(const char *name);
  699. rt_device_t rt_console_get_device(void);
  700. #ifdef RT_USING_THREADSAFE_PRINTF
  701. rt_thread_t rt_console_current_user(void);
  702. #else
  703. rt_inline void *rt_console_current_user(void) { return RT_NULL; }
  704. #endif /* RT_USING_THREADSAFE_PRINTF */
  705. #endif /* defined(RT_USING_DEVICE) && defined(RT_USING_CONSOLE) */
  706. int __rt_ffs(int value);
  707. unsigned long __rt_ffsl(unsigned long value);
  708. unsigned long __rt_clz(unsigned long value);
  709. void rt_show_version(void);
  710. #ifdef RT_DEBUGING_ASSERT
  711. extern void (*rt_assert_hook)(const char *ex, const char *func, rt_size_t line);
  712. void rt_assert_set_hook(void (*hook)(const char *ex, const char *func, rt_size_t line));
  713. void rt_assert_handler(const char *ex, const char *func, rt_size_t line);
  714. #define RT_ASSERT(EX) \
  715. if (!(EX)) \
  716. { \
  717. rt_assert_handler(#EX, __FUNCTION__, __LINE__); \
  718. }
  719. #else
  720. #define RT_ASSERT(EX) {RT_UNUSED(EX);}
  721. #endif /* RT_DEBUGING_ASSERT */
  722. #ifdef RT_DEBUGING_CONTEXT
  723. /* Macro to check current context */
  724. #define RT_DEBUG_NOT_IN_INTERRUPT \
  725. do \
  726. { \
  727. if (rt_interrupt_get_nest() != 0) \
  728. { \
  729. rt_kprintf("Function[%s] shall not be used in ISR\n", __FUNCTION__); \
  730. RT_ASSERT(0) \
  731. } \
  732. } \
  733. while (0)
  734. /* "In thread context" means:
  735. * 1) the scheduler has been started
  736. * 2) not in interrupt context.
  737. */
  738. #define RT_DEBUG_IN_THREAD_CONTEXT \
  739. do \
  740. { \
  741. if (rt_thread_self() == RT_NULL) \
  742. { \
  743. rt_kprintf("Function[%s] shall not be used before scheduler start\n", \
  744. __FUNCTION__); \
  745. RT_ASSERT(0) \
  746. } \
  747. RT_DEBUG_NOT_IN_INTERRUPT; \
  748. } \
  749. while (0)
  750. /* "scheduler available" means:
  751. * 1) the scheduler has been started.
  752. * 2) not in interrupt context.
  753. * 3) scheduler is not locked.
  754. */
  755. #define RT_DEBUG_SCHEDULER_AVAILABLE(need_check) \
  756. do \
  757. { \
  758. if (need_check) \
  759. { \
  760. if (rt_critical_level() != 0) \
  761. { \
  762. rt_kprintf("Function[%s]: scheduler is not available\n", \
  763. __FUNCTION__); \
  764. RT_ASSERT(0) \
  765. } \
  766. RT_DEBUG_IN_THREAD_CONTEXT; \
  767. } \
  768. } \
  769. while (0)
  770. #else
  771. #define RT_DEBUG_NOT_IN_INTERRUPT
  772. #define RT_DEBUG_IN_THREAD_CONTEXT
  773. #define RT_DEBUG_SCHEDULER_AVAILABLE(need_check)
  774. #endif /* RT_DEBUGING_CONTEXT */
  775. rt_inline rt_bool_t rt_in_thread_context(void)
  776. {
  777. return rt_thread_self() != RT_NULL && rt_interrupt_get_nest() == 0;
  778. }
  779. /* is scheduler available */
  780. rt_inline rt_bool_t rt_scheduler_is_available(void)
  781. {
  782. return rt_critical_level() == 0 && rt_in_thread_context();
  783. }
  784. #ifdef RT_USING_SMP
  785. /* is thread bond on core */
  786. rt_inline rt_bool_t rt_sched_thread_is_binding(rt_thread_t thread)
  787. {
  788. if (thread == RT_NULL)
  789. {
  790. thread = rt_thread_self();
  791. }
  792. return !thread || RT_SCHED_CTX(thread).bind_cpu != RT_CPUS_NR;
  793. }
  794. #else
  795. #define rt_sched_thread_is_binding(thread) (RT_TRUE)
  796. #endif
  797. /**@}*/
  798. #ifdef __cplusplus
  799. }
  800. #endif
  801. #endif /* __RT_THREAD_H__ */