dataqueue.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2012-09-30 Bernard first version.
  9. * 2016-10-31 armink fix some resume push and pop thread bugs
  10. */
  11. #include <rtthread.h>
  12. #include <rtdevice.h>
  13. #include <rthw.h>
  14. #define DATAQUEUE_MAGIC 0xbead0e0e
  15. struct rt_data_item
  16. {
  17. const void *data_ptr;
  18. rt_size_t data_size;
  19. };
  20. rt_err_t
  21. rt_data_queue_init(struct rt_data_queue *queue,
  22. rt_uint16_t size,
  23. rt_uint16_t lwm,
  24. void (*evt_notify)(struct rt_data_queue *queue, rt_uint32_t event))
  25. {
  26. RT_ASSERT(queue != RT_NULL);
  27. RT_ASSERT((0x10000 % size) == 0);
  28. queue->evt_notify = evt_notify;
  29. queue->magic = DATAQUEUE_MAGIC;
  30. queue->size = size;
  31. queue->lwm = lwm;
  32. queue->get_index = 0;
  33. queue->put_index = 0;
  34. rt_list_init(&(queue->suspended_push_list));
  35. rt_list_init(&(queue->suspended_pop_list));
  36. queue->queue = (struct rt_data_item *)rt_malloc(sizeof(struct rt_data_item) * size);
  37. if (queue->queue == RT_NULL)
  38. {
  39. return -RT_ENOMEM;
  40. }
  41. return RT_EOK;
  42. }
  43. RTM_EXPORT(rt_data_queue_init);
  44. rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
  45. const void *data_ptr,
  46. rt_size_t data_size,
  47. rt_int32_t timeout)
  48. {
  49. rt_ubase_t level;
  50. rt_thread_t thread;
  51. rt_err_t result;
  52. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  53. RT_ASSERT(queue != RT_NULL);
  54. result = RT_EOK;
  55. thread = rt_thread_self();
  56. level = rt_hw_interrupt_disable();
  57. while (queue->put_index - queue->get_index == queue->size)
  58. {
  59. /* queue is full */
  60. if (timeout == 0)
  61. {
  62. result = -RT_ETIMEOUT;
  63. goto __exit;
  64. }
  65. /* current context checking */
  66. RT_DEBUG_NOT_IN_INTERRUPT;
  67. /* reset thread error number */
  68. thread->error = RT_EOK;
  69. /* suspend thread on the push list */
  70. rt_thread_suspend(thread);
  71. rt_list_insert_before(&(queue->suspended_push_list), &(thread->tlist));
  72. /* start timer */
  73. if (timeout > 0)
  74. {
  75. /* reset the timeout of thread timer and start it */
  76. rt_timer_control(&(thread->thread_timer),
  77. RT_TIMER_CTRL_SET_TIME,
  78. &timeout);
  79. rt_timer_start(&(thread->thread_timer));
  80. }
  81. /* enable interrupt */
  82. rt_hw_interrupt_enable(level);
  83. /* do schedule */
  84. rt_schedule();
  85. /* thread is waked up */
  86. result = thread->error;
  87. level = rt_hw_interrupt_disable();
  88. if (result != RT_EOK) goto __exit;
  89. }
  90. queue->queue[queue->put_index % queue->size].data_ptr = data_ptr;
  91. queue->queue[queue->put_index % queue->size].data_size = data_size;
  92. queue->put_index += 1;
  93. /* there is at least one thread in suspended list */
  94. if (!rt_list_isempty(&(queue->suspended_pop_list)))
  95. {
  96. /* get thread entry */
  97. thread = rt_list_entry(queue->suspended_pop_list.next,
  98. struct rt_thread,
  99. tlist);
  100. /* resume it */
  101. rt_thread_resume(thread);
  102. rt_hw_interrupt_enable(level);
  103. /* perform a schedule */
  104. rt_schedule();
  105. return result;
  106. }
  107. __exit:
  108. rt_hw_interrupt_enable(level);
  109. if ((result == RT_EOK) && queue->evt_notify != RT_NULL)
  110. {
  111. queue->evt_notify(queue, RT_DATAQUEUE_EVENT_PUSH);
  112. }
  113. return result;
  114. }
  115. RTM_EXPORT(rt_data_queue_push);
  116. rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
  117. const void** data_ptr,
  118. rt_size_t *size,
  119. rt_int32_t timeout)
  120. {
  121. rt_ubase_t level;
  122. rt_thread_t thread;
  123. rt_err_t result;
  124. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  125. RT_ASSERT(queue != RT_NULL);
  126. RT_ASSERT(data_ptr != RT_NULL);
  127. RT_ASSERT(size != RT_NULL);
  128. result = RT_EOK;
  129. thread = rt_thread_self();
  130. level = rt_hw_interrupt_disable();
  131. while (queue->get_index == queue->put_index)
  132. {
  133. /* queue is empty */
  134. if (timeout == 0)
  135. {
  136. result = -RT_ETIMEOUT;
  137. goto __exit;
  138. }
  139. /* current context checking */
  140. RT_DEBUG_NOT_IN_INTERRUPT;
  141. /* reset thread error number */
  142. thread->error = RT_EOK;
  143. /* suspend thread on the pop list */
  144. rt_thread_suspend(thread);
  145. rt_list_insert_before(&(queue->suspended_pop_list), &(thread->tlist));
  146. /* start timer */
  147. if (timeout > 0)
  148. {
  149. /* reset the timeout of thread timer and start it */
  150. rt_timer_control(&(thread->thread_timer),
  151. RT_TIMER_CTRL_SET_TIME,
  152. &timeout);
  153. rt_timer_start(&(thread->thread_timer));
  154. }
  155. /* enable interrupt */
  156. rt_hw_interrupt_enable(level);
  157. /* do schedule */
  158. rt_schedule();
  159. /* thread is waked up */
  160. result = thread->error;
  161. level = rt_hw_interrupt_disable();
  162. if (result != RT_EOK)
  163. goto __exit;
  164. }
  165. *data_ptr = queue->queue[queue->get_index % queue->size].data_ptr;
  166. *size = queue->queue[queue->get_index % queue->size].data_size;
  167. queue->get_index += 1;
  168. if ((queue->put_index - queue->get_index) <= queue->lwm)
  169. {
  170. /* there is at least one thread in suspended list */
  171. if (!rt_list_isempty(&(queue->suspended_push_list)))
  172. {
  173. /* get thread entry */
  174. thread = rt_list_entry(queue->suspended_push_list.next,
  175. struct rt_thread,
  176. tlist);
  177. /* resume it */
  178. rt_thread_resume(thread);
  179. rt_hw_interrupt_enable(level);
  180. /* perform a schedule */
  181. rt_schedule();
  182. }
  183. else
  184. {
  185. rt_hw_interrupt_enable(level);
  186. }
  187. if (queue->evt_notify != RT_NULL)
  188. queue->evt_notify(queue, RT_DATAQUEUE_EVENT_LWM);
  189. return result;
  190. }
  191. __exit:
  192. rt_hw_interrupt_enable(level);
  193. if ((result == RT_EOK) && (queue->evt_notify != RT_NULL))
  194. {
  195. queue->evt_notify(queue, RT_DATAQUEUE_EVENT_POP);
  196. }
  197. return result;
  198. }
  199. RTM_EXPORT(rt_data_queue_pop);
  200. rt_err_t rt_data_queue_peak(struct rt_data_queue *queue,
  201. const void** data_ptr,
  202. rt_size_t *size)
  203. {
  204. rt_ubase_t level;
  205. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  206. RT_ASSERT(queue != RT_NULL);
  207. level = rt_hw_interrupt_disable();
  208. if (queue->get_index == queue->put_index)
  209. {
  210. rt_hw_interrupt_enable(level);
  211. return -RT_EEMPTY;
  212. }
  213. *data_ptr = queue->queue[queue->get_index % queue->size].data_ptr;
  214. *size = queue->queue[queue->get_index % queue->size].data_size;
  215. rt_hw_interrupt_enable(level);
  216. return RT_EOK;
  217. }
  218. RTM_EXPORT(rt_data_queue_peak);
  219. void rt_data_queue_reset(struct rt_data_queue *queue)
  220. {
  221. struct rt_thread *thread;
  222. register rt_ubase_t temp;
  223. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  224. rt_enter_critical();
  225. /* wakeup all suspend threads */
  226. /* resume on pop list */
  227. while (!rt_list_isempty(&(queue->suspended_pop_list)))
  228. {
  229. /* disable interrupt */
  230. temp = rt_hw_interrupt_disable();
  231. /* get next suspend thread */
  232. thread = rt_list_entry(queue->suspended_pop_list.next,
  233. struct rt_thread,
  234. tlist);
  235. /* set error code to RT_ERROR */
  236. thread->error = -RT_ERROR;
  237. /*
  238. * resume thread
  239. * In rt_thread_resume function, it will remove current thread from
  240. * suspend list
  241. */
  242. rt_thread_resume(thread);
  243. /* enable interrupt */
  244. rt_hw_interrupt_enable(temp);
  245. }
  246. /* resume on push list */
  247. while (!rt_list_isempty(&(queue->suspended_push_list)))
  248. {
  249. /* disable interrupt */
  250. temp = rt_hw_interrupt_disable();
  251. /* get next suspend thread */
  252. thread = rt_list_entry(queue->suspended_push_list.next,
  253. struct rt_thread,
  254. tlist);
  255. /* set error code to RT_ERROR */
  256. thread->error = -RT_ERROR;
  257. /*
  258. * resume thread
  259. * In rt_thread_resume function, it will remove current thread from
  260. * suspend list
  261. */
  262. rt_thread_resume(thread);
  263. /* enable interrupt */
  264. rt_hw_interrupt_enable(temp);
  265. }
  266. rt_exit_critical();
  267. rt_schedule();
  268. }
  269. RTM_EXPORT(rt_data_queue_reset);
  270. rt_err_t rt_data_queue_deinit(struct rt_data_queue *queue)
  271. {
  272. rt_ubase_t level;
  273. RT_ASSERT(queue->magic == DATAQUEUE_MAGIC);
  274. RT_ASSERT(queue != RT_NULL);
  275. level = rt_hw_interrupt_disable();
  276. /* wakeup all suspend threads */
  277. rt_data_queue_reset(queue);
  278. queue->magic = 0;
  279. rt_free(queue->queue);
  280. rt_hw_interrupt_enable(level);
  281. return RT_EOK;
  282. }
  283. RTM_EXPORT(rt_data_queue_deinit);