dataqueue.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2012-09-30 Bernard first version.
  9. * 2016-10-31 armink fix some resume push and pop thread bugs
  10. */
  11. #include <rtthread.h>
  12. #include <rtdevice.h>
  13. #include <rthw.h>
  14. struct rt_data_item
  15. {
  16. const void *data_ptr;
  17. rt_size_t data_size;
  18. };
  19. rt_err_t
  20. rt_data_queue_init(struct rt_data_queue *queue,
  21. rt_uint16_t size,
  22. rt_uint16_t lwm,
  23. void (*evt_notify)(struct rt_data_queue *queue, rt_uint32_t event))
  24. {
  25. RT_ASSERT(queue != RT_NULL);
  26. queue->evt_notify = evt_notify;
  27. queue->size = size;
  28. queue->lwm = lwm;
  29. queue->get_index = 0;
  30. queue->put_index = 0;
  31. rt_list_init(&(queue->suspended_push_list));
  32. rt_list_init(&(queue->suspended_pop_list));
  33. queue->queue = (struct rt_data_item *)rt_malloc(sizeof(struct rt_data_item) * size);
  34. if (queue->queue == RT_NULL)
  35. {
  36. return -RT_ENOMEM;
  37. }
  38. return RT_EOK;
  39. }
  40. RTM_EXPORT(rt_data_queue_init);
  41. rt_err_t rt_data_queue_push(struct rt_data_queue *queue,
  42. const void *data_ptr,
  43. rt_size_t data_size,
  44. rt_int32_t timeout)
  45. {
  46. rt_ubase_t level;
  47. rt_thread_t thread;
  48. rt_err_t result;
  49. RT_ASSERT(queue != RT_NULL);
  50. result = RT_EOK;
  51. thread = rt_thread_self();
  52. level = rt_hw_interrupt_disable();
  53. while (queue->put_index - queue->get_index == queue->size)
  54. {
  55. /* queue is full */
  56. if (timeout == 0)
  57. {
  58. result = -RT_ETIMEOUT;
  59. goto __exit;
  60. }
  61. /* current context checking */
  62. RT_DEBUG_NOT_IN_INTERRUPT;
  63. /* reset thread error number */
  64. thread->error = RT_EOK;
  65. /* suspend thread on the push list */
  66. rt_thread_suspend(thread);
  67. rt_list_insert_before(&(queue->suspended_push_list), &(thread->tlist));
  68. /* start timer */
  69. if (timeout > 0)
  70. {
  71. /* reset the timeout of thread timer and start it */
  72. rt_timer_control(&(thread->thread_timer),
  73. RT_TIMER_CTRL_SET_TIME,
  74. &timeout);
  75. rt_timer_start(&(thread->thread_timer));
  76. }
  77. /* enable interrupt */
  78. rt_hw_interrupt_enable(level);
  79. /* do schedule */
  80. rt_schedule();
  81. /* thread is waked up */
  82. result = thread->error;
  83. level = rt_hw_interrupt_disable();
  84. if (result != RT_EOK) goto __exit;
  85. }
  86. queue->queue[queue->put_index % queue->size].data_ptr = data_ptr;
  87. queue->queue[queue->put_index % queue->size].data_size = data_size;
  88. queue->put_index += 1;
  89. /* there is at least one thread in suspended list */
  90. if (!rt_list_isempty(&(queue->suspended_pop_list)))
  91. {
  92. /* get thread entry */
  93. thread = rt_list_entry(queue->suspended_pop_list.next,
  94. struct rt_thread,
  95. tlist);
  96. /* resume it */
  97. rt_thread_resume(thread);
  98. rt_hw_interrupt_enable(level);
  99. /* perform a schedule */
  100. rt_schedule();
  101. return result;
  102. }
  103. __exit:
  104. rt_hw_interrupt_enable(level);
  105. if ((result == RT_EOK) && queue->evt_notify != RT_NULL)
  106. {
  107. queue->evt_notify(queue, RT_DATAQUEUE_EVENT_PUSH);
  108. }
  109. return result;
  110. }
  111. RTM_EXPORT(rt_data_queue_push);
  112. rt_err_t rt_data_queue_pop(struct rt_data_queue *queue,
  113. const void** data_ptr,
  114. rt_size_t *size,
  115. rt_int32_t timeout)
  116. {
  117. rt_ubase_t level;
  118. rt_thread_t thread;
  119. rt_err_t result;
  120. RT_ASSERT(queue != RT_NULL);
  121. RT_ASSERT(data_ptr != RT_NULL);
  122. RT_ASSERT(size != RT_NULL);
  123. result = RT_EOK;
  124. thread = rt_thread_self();
  125. level = rt_hw_interrupt_disable();
  126. while (queue->get_index == queue->put_index)
  127. {
  128. /* queue is empty */
  129. if (timeout == 0)
  130. {
  131. result = -RT_ETIMEOUT;
  132. goto __exit;
  133. }
  134. /* current context checking */
  135. RT_DEBUG_NOT_IN_INTERRUPT;
  136. /* reset thread error number */
  137. thread->error = RT_EOK;
  138. /* suspend thread on the pop list */
  139. rt_thread_suspend(thread);
  140. rt_list_insert_before(&(queue->suspended_pop_list), &(thread->tlist));
  141. /* start timer */
  142. if (timeout > 0)
  143. {
  144. /* reset the timeout of thread timer and start it */
  145. rt_timer_control(&(thread->thread_timer),
  146. RT_TIMER_CTRL_SET_TIME,
  147. &timeout);
  148. rt_timer_start(&(thread->thread_timer));
  149. }
  150. /* enable interrupt */
  151. rt_hw_interrupt_enable(level);
  152. /* do schedule */
  153. rt_schedule();
  154. /* thread is waked up */
  155. result = thread->error;
  156. level = rt_hw_interrupt_disable();
  157. if (result != RT_EOK)
  158. goto __exit;
  159. }
  160. *data_ptr = queue->queue[queue->get_index % queue->size].data_ptr;
  161. *size = queue->queue[queue->get_index % queue->size].data_size;
  162. queue->get_index += 1;
  163. if ((queue->put_index - queue->get_index) <= queue->lwm)
  164. {
  165. /* there is at least one thread in suspended list */
  166. if (!rt_list_isempty(&(queue->suspended_push_list)))
  167. {
  168. /* get thread entry */
  169. thread = rt_list_entry(queue->suspended_push_list.next,
  170. struct rt_thread,
  171. tlist);
  172. /* resume it */
  173. rt_thread_resume(thread);
  174. rt_hw_interrupt_enable(level);
  175. /* perform a schedule */
  176. rt_schedule();
  177. }
  178. else
  179. {
  180. rt_hw_interrupt_enable(level);
  181. }
  182. if (queue->evt_notify != RT_NULL)
  183. queue->evt_notify(queue, RT_DATAQUEUE_EVENT_LWM);
  184. return result;
  185. }
  186. __exit:
  187. rt_hw_interrupt_enable(level);
  188. if ((result == RT_EOK) && (queue->evt_notify != RT_NULL))
  189. {
  190. queue->evt_notify(queue, RT_DATAQUEUE_EVENT_POP);
  191. }
  192. return result;
  193. }
  194. RTM_EXPORT(rt_data_queue_pop);
  195. rt_err_t rt_data_queue_peak(struct rt_data_queue *queue,
  196. const void** data_ptr,
  197. rt_size_t *size)
  198. {
  199. rt_ubase_t level;
  200. RT_ASSERT(queue != RT_NULL);
  201. level = rt_hw_interrupt_disable();
  202. if (queue->get_index == queue->put_index)
  203. {
  204. rt_hw_interrupt_enable(level);
  205. return -RT_EEMPTY;
  206. }
  207. *data_ptr = queue->queue[queue->get_index % queue->size].data_ptr;
  208. *size = queue->queue[queue->get_index % queue->size].data_size;
  209. rt_hw_interrupt_enable(level);
  210. return RT_EOK;
  211. }
  212. RTM_EXPORT(rt_data_queue_peak);
  213. void rt_data_queue_reset(struct rt_data_queue *queue)
  214. {
  215. struct rt_thread *thread;
  216. register rt_ubase_t temp;
  217. rt_enter_critical();
  218. /* wakeup all suspend threads */
  219. /* resume on pop list */
  220. while (!rt_list_isempty(&(queue->suspended_pop_list)))
  221. {
  222. /* disable interrupt */
  223. temp = rt_hw_interrupt_disable();
  224. /* get next suspend thread */
  225. thread = rt_list_entry(queue->suspended_pop_list.next,
  226. struct rt_thread,
  227. tlist);
  228. /* set error code to RT_ERROR */
  229. thread->error = -RT_ERROR;
  230. /*
  231. * resume thread
  232. * In rt_thread_resume function, it will remove current thread from
  233. * suspend list
  234. */
  235. rt_thread_resume(thread);
  236. /* enable interrupt */
  237. rt_hw_interrupt_enable(temp);
  238. }
  239. /* resume on push list */
  240. while (!rt_list_isempty(&(queue->suspended_push_list)))
  241. {
  242. /* disable interrupt */
  243. temp = rt_hw_interrupt_disable();
  244. /* get next suspend thread */
  245. thread = rt_list_entry(queue->suspended_push_list.next,
  246. struct rt_thread,
  247. tlist);
  248. /* set error code to RT_ERROR */
  249. thread->error = -RT_ERROR;
  250. /*
  251. * resume thread
  252. * In rt_thread_resume function, it will remove current thread from
  253. * suspend list
  254. */
  255. rt_thread_resume(thread);
  256. /* enable interrupt */
  257. rt_hw_interrupt_enable(temp);
  258. }
  259. rt_exit_critical();
  260. rt_schedule();
  261. }
  262. RTM_EXPORT(rt_data_queue_reset);