poll.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. /*
  2. * Copyright (c) 2006-2018, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2016-12-28 Bernard first version
  9. * 2018-03-09 Bernard Add protection for pt->triggered.
  10. */
  11. #include <stdint.h>
  12. #include <rthw.h>
  13. #include <rtdevice.h>
  14. #include <rtthread.h>
  15. #include <dfs.h>
  16. #include <dfs_file.h>
  17. #include <dfs_posix.h>
  18. #include <dfs_poll.h>
  19. #ifdef RT_USING_POSIX
  20. struct rt_poll_node;
  21. struct rt_poll_table
  22. {
  23. rt_pollreq_t req;
  24. rt_uint32_t triggered; /* the waited thread whether triggered */
  25. rt_thread_t polling_thread;
  26. struct rt_poll_node *nodes;
  27. };
  28. struct rt_poll_node
  29. {
  30. struct rt_wqueue_node wqn;
  31. struct rt_poll_table *pt;
  32. struct rt_poll_node *next;
  33. };
  34. static int __wqueue_pollwake(struct rt_wqueue_node *wait, void *key)
  35. {
  36. struct rt_poll_node *pn;
  37. if (key && !((rt_ubase_t)key & wait->key))
  38. return -1;
  39. pn = rt_container_of(wait, struct rt_poll_node, wqn);
  40. pn->pt->triggered = 1;
  41. return __wqueue_default_wake(wait, key);
  42. }
  43. static void _poll_add(rt_wqueue_t *wq, rt_pollreq_t *req)
  44. {
  45. struct rt_poll_table *pt;
  46. struct rt_poll_node *node;
  47. node = (struct rt_poll_node *)rt_malloc(sizeof(struct rt_poll_node));
  48. if (node == RT_NULL)
  49. return;
  50. pt = rt_container_of(req, struct rt_poll_table, req);
  51. node->wqn.key = req->_key;
  52. rt_list_init(&(node->wqn.list));
  53. node->wqn.polling_thread = pt->polling_thread;
  54. node->wqn.wakeup = __wqueue_pollwake;
  55. node->next = pt->nodes;
  56. node->pt = pt;
  57. pt->nodes = node;
  58. rt_wqueue_add(wq, &node->wqn);
  59. }
  60. static void poll_table_init(struct rt_poll_table *pt)
  61. {
  62. pt->req._proc = _poll_add;
  63. pt->triggered = 0;
  64. pt->nodes = RT_NULL;
  65. pt->polling_thread = rt_thread_self();
  66. }
  67. static int poll_wait_timeout(struct rt_poll_table *pt, int msec)
  68. {
  69. rt_int32_t timeout;
  70. int ret = 0;
  71. struct rt_thread *thread;
  72. rt_base_t level;
  73. thread = pt->polling_thread;
  74. timeout = rt_tick_from_millisecond(msec);
  75. level = rt_hw_interrupt_disable();
  76. if (timeout != 0 && !pt->triggered)
  77. {
  78. rt_thread_suspend(thread);
  79. if (timeout > 0)
  80. {
  81. rt_timer_control(&(thread->thread_timer),
  82. RT_TIMER_CTRL_SET_TIME,
  83. &timeout);
  84. rt_timer_start(&(thread->thread_timer));
  85. }
  86. rt_hw_interrupt_enable(level);
  87. rt_schedule();
  88. level = rt_hw_interrupt_disable();
  89. }
  90. ret = !pt->triggered;
  91. rt_hw_interrupt_enable(level);
  92. return ret;
  93. }
  94. static int do_pollfd(struct pollfd *pollfd, rt_pollreq_t *req)
  95. {
  96. int mask = 0;
  97. int fd;
  98. fd = pollfd->fd;
  99. if (fd >= 0)
  100. {
  101. struct dfs_fd *f = fd_get(fd);
  102. mask = POLLNVAL;
  103. if (f)
  104. {
  105. mask = POLLMASK_DEFAULT;
  106. if (f->fops->poll)
  107. {
  108. req->_key = pollfd->events | POLLERR | POLLHUP;
  109. mask = f->fops->poll(f, req);
  110. }
  111. /* Mask out unneeded events. */
  112. mask &= pollfd->events | POLLERR | POLLHUP;
  113. fd_put(f);
  114. }
  115. }
  116. pollfd->revents = mask;
  117. return mask;
  118. }
  119. static int poll_do(struct pollfd *fds, nfds_t nfds, struct rt_poll_table *pt, int msec)
  120. {
  121. int num;
  122. int istimeout = 0;
  123. int n;
  124. struct pollfd *pf;
  125. if (msec == 0)
  126. {
  127. pt->req._proc = RT_NULL;
  128. istimeout = 1;
  129. }
  130. while (1)
  131. {
  132. pf = fds;
  133. num = 0;
  134. for (n = 0; n < nfds; n ++)
  135. {
  136. if (do_pollfd(pf, &pt->req))
  137. {
  138. num ++;
  139. pt->req._proc = RT_NULL;
  140. }
  141. pf ++;
  142. }
  143. pt->req._proc = RT_NULL;
  144. if (num || istimeout)
  145. break;
  146. if (poll_wait_timeout(pt, msec))
  147. istimeout = 1;
  148. }
  149. return num;
  150. }
  151. static void poll_teardown(struct rt_poll_table *pt)
  152. {
  153. struct rt_poll_node *node, *next;
  154. next = pt->nodes;
  155. while (next)
  156. {
  157. node = next;
  158. rt_wqueue_remove(&node->wqn);
  159. next = node->next;
  160. rt_free(node);
  161. }
  162. }
  163. int poll(struct pollfd *fds, nfds_t nfds, int timeout)
  164. {
  165. int num;
  166. struct rt_poll_table table;
  167. poll_table_init(&table);
  168. num = poll_do(fds, nfds, &table, timeout);
  169. poll_teardown(&table);
  170. return num;
  171. }
  172. #endif