poll.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2016-12-28 Bernard first version
  9. * 2018-03-09 Bernard Add protection for pt->triggered.
  10. */
  11. #include <stdint.h>
  12. #include <rthw.h>
  13. #include <rtdevice.h>
  14. #include <rtthread.h>
  15. #include <dfs.h>
  16. #include <dfs_file.h>
  17. #include <dfs_posix.h>
  18. #include <dfs_poll.h>
  19. struct rt_poll_node;
  20. struct rt_poll_table
  21. {
  22. rt_pollreq_t req;
  23. rt_uint32_t triggered; /* the waited thread whether triggered */
  24. rt_thread_t polling_thread;
  25. struct rt_poll_node *nodes;
  26. };
  27. struct rt_poll_node
  28. {
  29. struct rt_wqueue_node wqn;
  30. struct rt_poll_table *pt;
  31. struct rt_poll_node *next;
  32. };
  33. static int __wqueue_pollwake(struct rt_wqueue_node *wait, void *key)
  34. {
  35. struct rt_poll_node *pn;
  36. if (key && !((rt_uint32_t)key & wait->key))
  37. return -1;
  38. pn = rt_container_of(wait, struct rt_poll_node, wqn);
  39. pn->pt->triggered = 1;
  40. return __wqueue_default_wake(wait, key);
  41. }
  42. static void _poll_add(rt_wqueue_t *wq, rt_pollreq_t *req)
  43. {
  44. struct rt_poll_table *pt;
  45. struct rt_poll_node *node;
  46. node = rt_malloc(sizeof(struct rt_poll_node));
  47. if (node == RT_NULL)
  48. return;
  49. pt = rt_container_of(req, struct rt_poll_table, req);
  50. node->wqn.key = req->_key;
  51. rt_list_init(&(node->wqn.list));
  52. node->wqn.polling_thread = pt->polling_thread;
  53. node->wqn.wakeup = __wqueue_pollwake;
  54. node->next = pt->nodes;
  55. node->pt = pt;
  56. pt->nodes = node;
  57. rt_wqueue_add(wq, &node->wqn);
  58. }
  59. static void poll_table_init(struct rt_poll_table *pt)
  60. {
  61. pt->req._proc = _poll_add;
  62. pt->triggered = 0;
  63. pt->nodes = RT_NULL;
  64. pt->polling_thread = rt_thread_self();
  65. }
  66. static int poll_wait_timeout(struct rt_poll_table *pt, int msec)
  67. {
  68. rt_int32_t timeout;
  69. int ret = 0;
  70. struct rt_thread *thread;
  71. rt_base_t level;
  72. thread = pt->polling_thread;
  73. timeout = rt_tick_from_millisecond(msec);
  74. level = rt_hw_interrupt_disable();
  75. if (timeout != 0 && !pt->triggered)
  76. {
  77. rt_thread_suspend(thread);
  78. if (timeout > 0)
  79. {
  80. rt_timer_control(&(thread->thread_timer),
  81. RT_TIMER_CTRL_SET_TIME,
  82. &timeout);
  83. rt_timer_start(&(thread->thread_timer));
  84. }
  85. rt_hw_interrupt_enable(level);
  86. rt_schedule();
  87. level = rt_hw_interrupt_disable();
  88. }
  89. ret = !pt->triggered;
  90. rt_hw_interrupt_enable(level);
  91. return ret;
  92. }
  93. static int do_pollfd(struct pollfd *pollfd, rt_pollreq_t *req)
  94. {
  95. int mask = 0;
  96. int fd;
  97. fd = pollfd->fd;
  98. if (fd >= 0)
  99. {
  100. struct dfs_fd *f = fd_get(fd);
  101. mask = POLLNVAL;
  102. if (f)
  103. {
  104. mask = POLLMASK_DEFAULT;
  105. if (f->fops->poll)
  106. {
  107. req->_key = pollfd->events | POLLERR| POLLHUP;
  108. mask = f->fops->poll(f, req);
  109. }
  110. /* Mask out unneeded events. */
  111. mask &= pollfd->events | POLLERR | POLLHUP;
  112. fd_put(f);
  113. }
  114. }
  115. pollfd->revents = mask;
  116. return mask;
  117. }
  118. static int poll_do(struct pollfd *fds, nfds_t nfds, struct rt_poll_table *pt, int msec)
  119. {
  120. int num;
  121. int istimeout = 0;
  122. int n;
  123. struct pollfd *pf;
  124. if (msec == 0)
  125. {
  126. pt->req._proc = RT_NULL;
  127. istimeout = 1;
  128. }
  129. while (1)
  130. {
  131. pf = fds;
  132. num = 0;
  133. for (n = 0; n < nfds; n ++)
  134. {
  135. if (do_pollfd(pf, &pt->req))
  136. {
  137. num ++;
  138. pt->req._proc = RT_NULL;
  139. }
  140. pf ++;
  141. }
  142. pt->req._proc = RT_NULL;
  143. if (num || istimeout)
  144. break;
  145. if (poll_wait_timeout(pt, msec))
  146. istimeout = 1;
  147. }
  148. return num;
  149. }
  150. static void poll_teardown(struct rt_poll_table *pt)
  151. {
  152. struct rt_poll_node *node, *next;
  153. next = pt->nodes;
  154. while (next)
  155. {
  156. node = next;
  157. rt_wqueue_remove(&node->wqn);
  158. next = node->next;
  159. rt_free(node);
  160. }
  161. }
  162. int poll(struct pollfd *fds, nfds_t nfds, int timeout)
  163. {
  164. int num;
  165. struct rt_poll_table table;
  166. poll_table_init(&table);
  167. num = poll_do(fds, nfds, &table, timeout);
  168. poll_teardown(&table);
  169. return num;
  170. }