dataqueue.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. /*
  2. * Copyright (c) 2006-2021, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2012-09-30 Bernard first version.
  9. * 2016-10-31 armink fix some resume push and pop thread bugs
  10. */
  11. #include <rtthread.h>
  12. #include <rtdevice.h>
  13. #include <rthw.h>
  14. #define DATAQUEUE_MAGIC 0xbead0e0e
  15. struct rt_data_item
  16. {
  17. const void *data_ptr;
  18. rt_size_t data_size;
  19. };
  20. rt_err_t
  21. rt_data_queue_init(struct rt_data_queue *queue,
  22. rt_uint16_t size,
  23. rt_uint16_t lwm,
  24. void (*evt_notify)(struct rt_data_queue *queue, rt_uint32_t event))
  25. {
  26. RT_ASSERT(queue != RT_NULL);
  27. RT_ASSERT(size > 0);
  28. queue->evt_notify = evt_notify;
  29. queue->magic = DATAQUEUE_MAGIC;
  30. queue->size = size;
  31. queue->lwm = lwm;
  32. queue->get_index = 0;
  33. queue->put_index = 0;
  34. queue->is_empty = 1;
  35. queue->is_full = 0;
  36. rt_list_init(&(queue->suspended_push_list));
  37. rt_list_init(&(queue->suspended_pop_list));
  38. queue->queue = (struct rt_data_item *)rt_malloc(sizeof(struct rt_data_item) * size);
  39. if (queue->queue == RT_NULL)
  40. {
  41. return -RT_ENOMEM;
  42. }
  43. return RT_EOK;
  44. }
  45. RTM_EXPORT(rt_data_queue_init);
  46. rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
  47. const void *data_ptr,
  48. rt_size_t data_size,
  49. rt_int32_t timeout)
  50. {
  51. rt_ubase_t level;
  52. rt_thread_t thread;
  53. rt_err_t result;
  54. RT_ASSERT(queue != RT_NULL);
  55. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  56. result = RT_EOK;
  57. thread = rt_thread_self();
  58. level = rt_hw_interrupt_disable();
  59. while (queue->is_full)
  60. {
  61. /* queue is full */
  62. if (timeout == 0)
  63. {
  64. result = -RT_ETIMEOUT;
  65. goto __exit;
  66. }
  67. /* current context checking */
  68. RT_DEBUG_NOT_IN_INTERRUPT;
  69. /* reset thread error number */
  70. thread->error = RT_EOK;
  71. /* suspend thread on the push list */
  72. rt_thread_suspend(thread);
  73. rt_list_insert_before(&(queue->suspended_push_list), &(thread->tlist));
  74. /* start timer */
  75. if (timeout > 0)
  76. {
  77. /* reset the timeout of thread timer and start it */
  78. rt_timer_control(&(thread->thread_timer),
  79. RT_TIMER_CTRL_SET_TIME,
  80. &timeout);
  81. rt_timer_start(&(thread->thread_timer));
  82. }
  83. /* enable interrupt */
  84. rt_hw_interrupt_enable(level);
  85. /* do schedule */
  86. rt_schedule();
  87. /* thread is waked up */
  88. result = thread->error;
  89. level = rt_hw_interrupt_disable();
  90. if (result != RT_EOK) goto __exit;
  91. }
  92. queue->queue[queue->put_index].data_ptr = data_ptr;
  93. queue->queue[queue->put_index].data_size = data_size;
  94. queue->put_index += 1;
  95. if (queue->put_index == queue->size)
  96. {
  97. queue->put_index = 0;
  98. }
  99. queue->is_empty = 0;
  100. if (queue->put_index == queue->get_index)
  101. {
  102. queue->is_full = 1;
  103. }
  104. /* there is at least one thread in suspended list */
  105. if (!rt_list_isempty(&(queue->suspended_pop_list)))
  106. {
  107. /* get thread entry */
  108. thread = rt_list_entry(queue->suspended_pop_list.next,
  109. struct rt_thread,
  110. tlist);
  111. /* resume it */
  112. rt_thread_resume(thread);
  113. rt_hw_interrupt_enable(level);
  114. /* perform a schedule */
  115. rt_schedule();
  116. return result;
  117. }
  118. __exit:
  119. rt_hw_interrupt_enable(level);
  120. if ((result == RT_EOK) && queue->evt_notify != RT_NULL)
  121. {
  122. queue->evt_notify(queue, RT_DATAQUEUE_EVENT_PUSH);
  123. }
  124. return result;
  125. }
  126. RTM_EXPORT(rt_data_queue_push);
  127. rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
  128. const void** data_ptr,
  129. rt_size_t *size,
  130. rt_int32_t timeout)
  131. {
  132. rt_ubase_t level;
  133. rt_thread_t thread;
  134. rt_err_t result;
  135. RT_ASSERT(queue != RT_NULL);
  136. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  137. RT_ASSERT(data_ptr != RT_NULL);
  138. RT_ASSERT(size != RT_NULL);
  139. result = RT_EOK;
  140. thread = rt_thread_self();
  141. level = rt_hw_interrupt_disable();
  142. while (queue->is_empty)
  143. {
  144. /* queue is empty */
  145. if (timeout == 0)
  146. {
  147. result = -RT_ETIMEOUT;
  148. goto __exit;
  149. }
  150. /* current context checking */
  151. RT_DEBUG_NOT_IN_INTERRUPT;
  152. /* reset thread error number */
  153. thread->error = RT_EOK;
  154. /* suspend thread on the pop list */
  155. rt_thread_suspend(thread);
  156. rt_list_insert_before(&(queue->suspended_pop_list), &(thread->tlist));
  157. /* start timer */
  158. if (timeout > 0)
  159. {
  160. /* reset the timeout of thread timer and start it */
  161. rt_timer_control(&(thread->thread_timer),
  162. RT_TIMER_CTRL_SET_TIME,
  163. &timeout);
  164. rt_timer_start(&(thread->thread_timer));
  165. }
  166. /* enable interrupt */
  167. rt_hw_interrupt_enable(level);
  168. /* do schedule */
  169. rt_schedule();
  170. /* thread is waked up */
  171. result = thread->error;
  172. level = rt_hw_interrupt_disable();
  173. if (result != RT_EOK)
  174. goto __exit;
  175. }
  176. *data_ptr = queue->queue[queue->get_index].data_ptr;
  177. *size = queue->queue[queue->get_index].data_size;
  178. queue->get_index += 1;
  179. if (queue->get_index == queue->size)
  180. {
  181. queue->get_index = 0;
  182. }
  183. queue->is_full = 0;
  184. if (queue->put_index == queue->get_index)
  185. {
  186. queue->is_empty = 1;
  187. }
  188. if (rt_data_queue_len(queue) <= queue->lwm)
  189. {
  190. /* there is at least one thread in suspended list */
  191. if (!rt_list_isempty(&(queue->suspended_push_list)))
  192. {
  193. /* get thread entry */
  194. thread = rt_list_entry(queue->suspended_push_list.next,
  195. struct rt_thread,
  196. tlist);
  197. /* resume it */
  198. rt_thread_resume(thread);
  199. rt_hw_interrupt_enable(level);
  200. /* perform a schedule */
  201. rt_schedule();
  202. }
  203. else
  204. {
  205. rt_hw_interrupt_enable(level);
  206. }
  207. if (queue->evt_notify != RT_NULL)
  208. queue->evt_notify(queue, RT_DATAQUEUE_EVENT_LWM);
  209. return result;
  210. }
  211. __exit:
  212. rt_hw_interrupt_enable(level);
  213. if ((result == RT_EOK) && (queue->evt_notify != RT_NULL))
  214. {
  215. queue->evt_notify(queue, RT_DATAQUEUE_EVENT_POP);
  216. }
  217. return result;
  218. }
  219. RTM_EXPORT(rt_data_queue_pop);
  220. rt_err_t rt_data_queue_peek(struct rt_data_queue *queue,
  221. const void** data_ptr,
  222. rt_size_t *size)
  223. {
  224. rt_ubase_t level;
  225. RT_ASSERT(queue != RT_NULL);
  226. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  227. if (queue->is_empty)
  228. {
  229. return -RT_EEMPTY;
  230. }
  231. level = rt_hw_interrupt_disable();
  232. *data_ptr = queue->queue[queue->get_index].data_ptr;
  233. *size = queue->queue[queue->get_index].data_size;
  234. rt_hw_interrupt_enable(level);
  235. return RT_EOK;
  236. }
  237. RTM_EXPORT(rt_data_queue_peek);
  238. void rt_data_queue_reset(struct rt_data_queue *queue)
  239. {
  240. rt_ubase_t level;
  241. struct rt_thread *thread;
  242. RT_ASSERT(queue != RT_NULL);
  243. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  244. level = rt_hw_interrupt_disable();
  245. queue->get_index = 0;
  246. queue->put_index = 0;
  247. queue->is_empty = 1;
  248. queue->is_full = 0;
  249. rt_hw_interrupt_enable(level);
  250. rt_enter_critical();
  251. /* wakeup all suspend threads */
  252. /* resume on pop list */
  253. while (!rt_list_isempty(&(queue->suspended_pop_list)))
  254. {
  255. /* disable interrupt */
  256. level = rt_hw_interrupt_disable();
  257. /* get next suspend thread */
  258. thread = rt_list_entry(queue->suspended_pop_list.next,
  259. struct rt_thread,
  260. tlist);
  261. /* set error code to RT_ERROR */
  262. thread->error = -RT_ERROR;
  263. /*
  264. * resume thread
  265. * In rt_thread_resume function, it will remove current thread from
  266. * suspend list
  267. */
  268. rt_thread_resume(thread);
  269. /* enable interrupt */
  270. rt_hw_interrupt_enable(level);
  271. }
  272. /* resume on push list */
  273. while (!rt_list_isempty(&(queue->suspended_push_list)))
  274. {
  275. /* disable interrupt */
  276. level = rt_hw_interrupt_disable();
  277. /* get next suspend thread */
  278. thread = rt_list_entry(queue->suspended_push_list.next,
  279. struct rt_thread,
  280. tlist);
  281. /* set error code to RT_ERROR */
  282. thread->error = -RT_ERROR;
  283. /*
  284. * resume thread
  285. * In rt_thread_resume function, it will remove current thread from
  286. * suspend list
  287. */
  288. rt_thread_resume(thread);
  289. /* enable interrupt */
  290. rt_hw_interrupt_enable(level);
  291. }
  292. rt_exit_critical();
  293. rt_schedule();
  294. }
  295. RTM_EXPORT(rt_data_queue_reset);
  296. rt_err_t rt_data_queue_deinit(struct rt_data_queue *queue)
  297. {
  298. rt_ubase_t level;
  299. RT_ASSERT(queue != RT_NULL);
  300. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  301. /* wakeup all suspend threads */
  302. rt_data_queue_reset(queue);
  303. level = rt_hw_interrupt_disable();
  304. queue->magic = 0;
  305. rt_hw_interrupt_enable(level);
  306. rt_free(queue->queue);
  307. return RT_EOK;
  308. }
  309. RTM_EXPORT(rt_data_queue_deinit);
  310. rt_uint16_t rt_data_queue_len(struct rt_data_queue *queue)
  311. {
  312. rt_ubase_t level;
  313. rt_int16_t len;
  314. RT_ASSERT(queue != RT_NULL);
  315. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  316. if (queue->is_empty)
  317. {
  318. return 0;
  319. }
  320. level = rt_hw_interrupt_disable();
  321. if (queue->put_index > queue->get_index)
  322. {
  323. len = queue->put_index - queue->get_index;
  324. }
  325. else
  326. {
  327. len = queue->size + queue->put_index - queue->get_index;
  328. }
  329. rt_hw_interrupt_enable(level);
  330. return len;
  331. }
  332. RTM_EXPORT(rt_data_queue_len);