poll.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2016-12-28 Bernard first version
  9. * 2018-03-09 Bernard Add protection for pt->triggered.
  10. */
  11. #include <stdint.h>
  12. #include <rthw.h>
  13. #include <rtdevice.h>
  14. #include <rtthread.h>
  15. #include <dfs.h>
  16. #include <dfs_file.h>
  17. #include <dfs_posix.h>
  18. #include <dfs_poll.h>
  19. #ifdef RT_USING_POSIX
  20. struct rt_poll_node;
  21. struct rt_poll_table
  22. {
  23. rt_pollreq_t req;
  24. rt_uint32_t triggered; /* the waited thread whether triggered */
  25. rt_thread_t polling_thread;
  26. struct rt_poll_node *nodes;
  27. };
  28. struct rt_poll_node
  29. {
  30. struct rt_wqueue_node wqn;
  31. struct rt_poll_table *pt;
  32. struct rt_poll_node *next;
  33. };
  34. static int __wqueue_pollwake(struct rt_wqueue_node *wait, void *key)
  35. {
  36. struct rt_poll_node *pn;
  37. if (key && !((rt_ubase_t)key & wait->key))
  38. return -1;
  39. pn = rt_container_of(wait, struct rt_poll_node, wqn);
  40. pn->pt->triggered = 1;
  41. return __wqueue_default_wake(wait, key);
  42. }
  43. static void _poll_add(rt_wqueue_t *wq, rt_pollreq_t *req)
  44. {
  45. struct rt_poll_table *pt;
  46. struct rt_poll_node *node;
  47. node = (struct rt_poll_node *)rt_malloc(sizeof(struct rt_poll_node));
  48. if (node == RT_NULL)
  49. return;
  50. pt = rt_container_of(req, struct rt_poll_table, req);
  51. node->wqn.key = req->_key;
  52. rt_list_init(&(node->wqn.list));
  53. node->wqn.polling_thread = pt->polling_thread;
  54. node->wqn.wakeup = __wqueue_pollwake;
  55. node->next = pt->nodes;
  56. node->pt = pt;
  57. pt->nodes = node;
  58. rt_wqueue_add(wq, &node->wqn);
  59. }
  60. static void poll_table_init(struct rt_poll_table *pt)
  61. {
  62. pt->req._proc = _poll_add;
  63. pt->triggered = 0;
  64. pt->nodes = RT_NULL;
  65. pt->polling_thread = rt_thread_self();
  66. }
  67. static int poll_wait_timeout(struct rt_poll_table *pt, int msec)
  68. {
  69. rt_int32_t timeout;
  70. int ret = 0;
  71. struct rt_thread *thread;
  72. rt_base_t level;
  73. thread = pt->polling_thread;
  74. timeout = rt_tick_from_millisecond(msec);
  75. level = rt_hw_interrupt_disable();
  76. if (timeout != 0 && !pt->triggered)
  77. {
  78. rt_thread_suspend(thread);
  79. if (timeout > 0)
  80. {
  81. rt_timer_control(&(thread->thread_timer),
  82. RT_TIMER_CTRL_SET_TIME,
  83. &timeout);
  84. rt_timer_start(&(thread->thread_timer));
  85. }
  86. rt_hw_interrupt_enable(level);
  87. rt_schedule();
  88. level = rt_hw_interrupt_disable();
  89. }
  90. ret = !pt->triggered;
  91. rt_hw_interrupt_enable(level);
  92. return ret;
  93. }
  94. static int do_pollfd(struct pollfd *pollfd, rt_pollreq_t *req)
  95. {
  96. int mask = 0;
  97. int fd;
  98. fd = pollfd->fd;
  99. if (fd >= 0)
  100. {
  101. struct dfs_fd *f = fd_get(fd);
  102. mask = POLLNVAL;
  103. if (f)
  104. {
  105. mask = POLLMASK_DEFAULT;
  106. if (f->fops->poll)
  107. {
  108. req->_key = pollfd->events | POLLERR | POLLHUP;
  109. mask = f->fops->poll(f, req);
  110. /* dealwith the device return error -1*/
  111. if (mask < 0)
  112. {
  113. fd_put(f);
  114. pollfd->revents = 0;
  115. return mask;
  116. }
  117. }
  118. /* Mask out unneeded events. */
  119. mask &= pollfd->events | POLLERR | POLLHUP;
  120. fd_put(f);
  121. }
  122. }
  123. pollfd->revents = mask;
  124. return mask;
  125. }
  126. static int poll_do(struct pollfd *fds, nfds_t nfds, struct rt_poll_table *pt, int msec)
  127. {
  128. int num;
  129. int istimeout = 0;
  130. int n;
  131. struct pollfd *pf;
  132. int ret = 0;
  133. if (msec == 0)
  134. {
  135. pt->req._proc = RT_NULL;
  136. istimeout = 1;
  137. }
  138. while (1)
  139. {
  140. pf = fds;
  141. num = 0;
  142. for (n = 0; n < nfds; n ++)
  143. {
  144. ret = do_pollfd(pf, &pt->req);
  145. if(ret < 0)
  146. {
  147. /*dealwith the device return error -1 */
  148. pt->req._proc = RT_NULL;
  149. return ret;
  150. }
  151. else if(ret > 0)
  152. {
  153. num ++;
  154. pt->req._proc = RT_NULL;
  155. }
  156. pf ++;
  157. }
  158. pt->req._proc = RT_NULL;
  159. if (num || istimeout)
  160. break;
  161. if (poll_wait_timeout(pt, msec))
  162. istimeout = 1;
  163. }
  164. return num;
  165. }
  166. static void poll_teardown(struct rt_poll_table *pt)
  167. {
  168. struct rt_poll_node *node, *next;
  169. next = pt->nodes;
  170. while (next)
  171. {
  172. node = next;
  173. rt_wqueue_remove(&node->wqn);
  174. next = node->next;
  175. rt_free(node);
  176. }
  177. }
  178. int poll(struct pollfd *fds, nfds_t nfds, int timeout)
  179. {
  180. int num;
  181. struct rt_poll_table table;
  182. poll_table_init(&table);
  183. num = poll_do(fds, nfds, &table, timeout);
  184. poll_teardown(&table);
  185. return num;
  186. }
  187. #endif