epoll.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042
  1. /*
  2. * Copyright (c) 2006-2023, RT-Thread Development Team
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. *
  6. * Change Logs:
  7. * Date Author Notes
  8. * 2023-07-29 zmq810150896 first version
  9. * 2024-03-26 TroyMitchelle Add comments for all functions, members within structure members and fix incorrect naming of triggered
  10. * 2023-12-14 Shell When poll goes to sleep before the waitqueue has added a
  11. * record and finished enumerating all the fd's, it may be
  12. * incorrectly woken up. This is basically because the poll
  13. * mechanism wakeup algorithm does not correctly distinguish
  14. * the current wait state.
  15. */
  16. #include <rtthread.h>
  17. #include <fcntl.h>
  18. #include <stdint.h>
  19. #include <unistd.h>
  20. #include <dfs_file.h>
  21. #include "sys/epoll.h"
  22. #include "poll.h"
  23. #include <lwp_signal.h>
  24. #define EPOLL_MUTEX_NAME "EVENTEPOLL"
  25. #define EFD_SHARED_EPOLL_TYPE (EPOLL_CTL_ADD | EPOLL_CTL_DEL | EPOLL_CTL_MOD)
  26. #define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
  27. #define EPOLLEXCLUSIVE_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
  28. EPOLLET | EPOLLEXCLUSIVE)
  29. struct rt_eventpoll;
  30. enum rt_epoll_status {
  31. RT_EPOLL_STAT_INIT,
  32. RT_EPOLL_STAT_TRIG,
  33. RT_EPOLL_STAT_WAITING,
  34. };
  35. /* Monitor queue */
  36. struct rt_fd_list
  37. {
  38. rt_uint32_t revents; /**< Monitored events */
  39. struct epoll_event epev; /**< Epoll event structure */
  40. rt_pollreq_t req; /**< Poll request structure */
  41. struct rt_eventpoll *ep; /**< Pointer to the associated event poll */
  42. struct rt_wqueue_node wqn; /**< Wait queue node */
  43. int exclusive; /**< Indicates if the event is exclusive */
  44. rt_bool_t is_rdl_node; /**< Indicates if the node is in the ready list */
  45. int fd; /**< File descriptor */
  46. struct rt_fd_list *next; /**< Pointer to the next file descriptor list */
  47. rt_slist_t rdl_node; /**< Ready list node */
  48. };
  49. struct rt_eventpoll
  50. {
  51. rt_wqueue_t epoll_read; /**< Epoll read queue */
  52. rt_thread_t polling_thread; /**< Polling thread */
  53. struct rt_mutex lock; /**< Mutex lock */
  54. struct rt_fd_list *fdlist; /**< Monitor list */
  55. int eventpoll_num; /**< Number of ready lists */
  56. rt_pollreq_t req; /**< Poll request structure */
  57. struct rt_spinlock spinlock; /**< Spin lock */
  58. rt_slist_t rdl_head; /**< Ready list head */
  59. enum rt_epoll_status status; /* the waited thread whether triggered */
  60. };
  61. static int epoll_close(struct dfs_file *file);
  62. static int epoll_poll(struct dfs_file *file, struct rt_pollreq *req);
  63. static int epoll_get_event(struct rt_fd_list *fl, rt_pollreq_t *req);
  64. static int epoll_do_ctl(int epfd, int op, int fd, struct epoll_event *event);
  65. static const struct dfs_file_ops epoll_fops =
  66. {
  67. .close = epoll_close,
  68. .poll = epoll_poll,
  69. };
  70. /**
  71. * @brief Closes the file descriptor list associated with epoll.
  72. *
  73. * This function closes the file descriptor list associated with epoll and frees the allocated memory.
  74. *
  75. * @param fdlist Pointer to the file descriptor list.
  76. *
  77. * @return Returns 0 on success.
  78. */
  79. static int epoll_close_fdlist(struct rt_fd_list *fdlist)
  80. {
  81. struct rt_fd_list *fre_node, *list;
  82. if (fdlist != RT_NULL)
  83. {
  84. list = fdlist;
  85. while (list->next != RT_NULL)
  86. {
  87. fre_node = list->next;
  88. rt_wqueue_remove(&fre_node->wqn);
  89. list->next = fre_node->next;
  90. rt_free(fre_node);
  91. }
  92. rt_free(fdlist);
  93. }
  94. return 0;
  95. }
  96. /**
  97. * @brief Closes the epoll file descriptor.
  98. *
  99. * This function closes the epoll file descriptor and cleans up associated resources.
  100. *
  101. * @param file Pointer to the file structure.
  102. *
  103. * @return Returns 0 on success.
  104. */
  105. static int epoll_close(struct dfs_file *file)
  106. {
  107. struct rt_eventpoll *ep;
  108. if (file->vnode->ref_count != 1)
  109. return 0;
  110. if (file->vnode)
  111. {
  112. if (file->vnode->data)
  113. {
  114. ep = file->vnode->data;
  115. if (ep)
  116. {
  117. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  118. if (ep->fdlist)
  119. {
  120. epoll_close_fdlist(ep->fdlist);
  121. }
  122. rt_mutex_release(&ep->lock);
  123. rt_mutex_detach(&ep->lock);
  124. rt_free(ep);
  125. }
  126. }
  127. }
  128. return 0;
  129. }
  130. /**
  131. * @brief Polls the epoll file descriptor for events.
  132. *
  133. * This function polls the epoll file descriptor for events and updates the poll request accordingly.
  134. *
  135. * @param file Pointer to the file structure.
  136. * @param req Pointer to the poll request structure.
  137. *
  138. * @return Returns the events occurred on success.
  139. */
  140. static int epoll_poll(struct dfs_file *file, struct rt_pollreq *req)
  141. {
  142. struct rt_eventpoll *ep;
  143. int events = 0;
  144. rt_base_t level;
  145. if (file->vnode->data)
  146. {
  147. ep = file->vnode->data;
  148. ep->req._key = req->_key;
  149. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  150. rt_poll_add(&ep->epoll_read, req);
  151. level = rt_spin_lock_irqsave(&ep->spinlock);
  152. if (!rt_slist_isempty(&ep->rdl_head))
  153. events |= POLLIN | EPOLLRDNORM | POLLOUT;
  154. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  155. rt_mutex_release(&ep->lock);
  156. }
  157. return events;
  158. }
  159. /**
  160. * @brief Callback function for the wait queue.
  161. *
  162. * This function is called when the file descriptor is ready for polling.
  163. *
  164. * @param wait Pointer to the wait queue node.
  165. * @param key Key associated with the wait queue node.
  166. *
  167. * @return Returns 0 on success.
  168. */
  169. static int epoll_wqueue_callback(struct rt_wqueue_node *wait, void *key)
  170. {
  171. struct rt_fd_list *fdlist;
  172. struct rt_eventpoll *ep;
  173. rt_base_t level;
  174. int is_waiting = 0;
  175. if (key && !((rt_ubase_t)key & wait->key))
  176. return -1;
  177. fdlist = rt_container_of(wait, struct rt_fd_list, wqn);
  178. ep = fdlist->ep;
  179. if (ep)
  180. {
  181. level = rt_spin_lock_irqsave(&ep->spinlock);
  182. if (fdlist->is_rdl_node == RT_FALSE)
  183. {
  184. rt_slist_append(&ep->rdl_head, &fdlist->rdl_node);
  185. fdlist->exclusive = 0;
  186. fdlist->is_rdl_node = RT_TRUE;
  187. ep->eventpoll_num++;
  188. is_waiting = (ep->status == RT_EPOLL_STAT_WAITING);
  189. ep->status = RT_EPOLL_STAT_TRIG;
  190. rt_wqueue_wakeup(&ep->epoll_read, (void *)POLLIN);
  191. }
  192. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  193. }
  194. if (is_waiting)
  195. {
  196. return __wqueue_default_wake(wait, key);
  197. }
  198. return -1;
  199. }
  200. /**
  201. * @brief Adds a callback function to the wait queue associated with epoll.
  202. *
  203. * This function adds a callback function to the wait queue associated with epoll.
  204. *
  205. * @param wq Pointer to the wait queue.
  206. * @param req Pointer to the poll request structure.
  207. */
  208. static void epoll_wqueue_add_callback(rt_wqueue_t *wq, rt_pollreq_t *req)
  209. {
  210. struct rt_fd_list *fdlist;
  211. struct rt_eventpoll *ep;
  212. fdlist = rt_container_of(req, struct rt_fd_list, req);
  213. ep = fdlist->ep;
  214. fdlist->wqn.key = req->_key;
  215. rt_list_init(&(fdlist->wqn.list));
  216. fdlist->wqn.polling_thread = ep->polling_thread;
  217. fdlist->wqn.wakeup = epoll_wqueue_callback;
  218. rt_wqueue_add(wq, &fdlist->wqn);
  219. }
  220. /**
  221. * @brief Installs a file descriptor list into the epoll control structure.
  222. *
  223. * This function installs a file descriptor list into the epoll control structure.
  224. *
  225. * @param fdlist Pointer to the file descriptor list.
  226. * @param ep Pointer to the epoll control structure.
  227. */
  228. static void epoll_ctl_install(struct rt_fd_list *fdlist, struct rt_eventpoll *ep)
  229. {
  230. rt_uint32_t mask = 0;
  231. rt_base_t level;
  232. fdlist->req._key = fdlist->revents;
  233. mask = epoll_get_event(fdlist, &fdlist->req);
  234. if (mask & fdlist->revents)
  235. {
  236. if (ep)
  237. {
  238. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  239. level = rt_spin_lock_irqsave(&ep->spinlock);
  240. rt_slist_append(&ep->rdl_head, &fdlist->rdl_node);
  241. fdlist->exclusive = 0;
  242. fdlist->is_rdl_node = RT_TRUE;
  243. ep->status = RT_EPOLL_STAT_TRIG;
  244. ep->eventpoll_num ++;
  245. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  246. rt_mutex_release(&ep->lock);
  247. }
  248. }
  249. }
  250. /**
  251. * @brief Initializes the epoll control structure.
  252. *
  253. * This function initializes the epoll control structure.
  254. *
  255. * @param ep Pointer to the epoll control structure.
  256. */
  257. static void epoll_member_init(struct rt_eventpoll *ep)
  258. {
  259. ep->status = RT_EPOLL_STAT_INIT;
  260. ep->eventpoll_num = 0;
  261. ep->polling_thread = rt_thread_self();
  262. ep->fdlist = RT_NULL;
  263. ep->req._key = 0;
  264. rt_slist_init(&(ep->rdl_head));
  265. rt_wqueue_init(&ep->epoll_read);
  266. rt_mutex_init(&ep->lock, EPOLL_MUTEX_NAME, RT_IPC_FLAG_FIFO);
  267. rt_spin_lock_init(&ep->spinlock);
  268. }
  269. /**
  270. * @brief Initializes the epoll file descriptor.
  271. *
  272. * This function initializes the epoll file descriptor.
  273. *
  274. * @param fd File descriptor.
  275. *
  276. * @return Returns 0 on success.
  277. */
  278. static int epoll_epf_init(int fd)
  279. {
  280. struct dfs_file *df;
  281. struct rt_eventpoll *ep;
  282. rt_err_t ret = 0;
  283. df = fd_get(fd);
  284. if (df)
  285. {
  286. ep = (struct rt_eventpoll *)rt_malloc(sizeof(struct rt_eventpoll));
  287. if (ep)
  288. {
  289. epoll_member_init(ep);
  290. #ifdef RT_USING_DFS_V2
  291. df->fops = &epoll_fops;
  292. #endif
  293. df->vnode = (struct dfs_vnode *)rt_malloc(sizeof(struct dfs_vnode));
  294. if (df->vnode)
  295. {
  296. ep->fdlist = (struct rt_fd_list *)rt_malloc(sizeof(struct rt_fd_list));
  297. if (ep->fdlist)
  298. {
  299. ep->fdlist->next = RT_NULL;
  300. ep->fdlist->fd = fd;
  301. ep->fdlist->ep = ep;
  302. ep->fdlist->exclusive = 0;
  303. ep->fdlist->is_rdl_node = RT_FALSE;
  304. dfs_vnode_init(df->vnode, FT_REGULAR, &epoll_fops);
  305. df->vnode->data = ep;
  306. rt_slist_init(&ep->fdlist->rdl_node);
  307. }
  308. else
  309. {
  310. ret = -ENOMEM;
  311. rt_free(df->vnode);
  312. rt_free(ep);
  313. }
  314. }
  315. else
  316. {
  317. ret = -ENOMEM;
  318. rt_free(ep);
  319. }
  320. }
  321. else
  322. {
  323. ret = -ENOMEM;
  324. }
  325. }
  326. return ret;
  327. }
  328. /**
  329. * @brief Creates an epoll file descriptor.
  330. *
  331. * This function creates an epoll file descriptor.
  332. *
  333. * @param size Size of the epoll instance.
  334. *
  335. * @return Returns the file descriptor on success, or -1 on failure.
  336. */
  337. static int epoll_do_create(int size)
  338. {
  339. rt_err_t ret = -1;
  340. int status;
  341. int fd;
  342. if (size < 0)
  343. {
  344. rt_set_errno(EINVAL);
  345. }
  346. else
  347. {
  348. fd = fd_new();
  349. if (fd >= 0)
  350. {
  351. ret = fd;
  352. status = epoll_epf_init(fd);
  353. if (status < 0)
  354. {
  355. fd_release(fd);
  356. rt_set_errno(-status);
  357. }
  358. }
  359. else
  360. {
  361. rt_set_errno(-fd);
  362. }
  363. }
  364. return ret;
  365. }
  366. /**
  367. * @brief Adds a file descriptor to the epoll instance.
  368. *
  369. * This function adds a file descriptor to the epoll instance.
  370. *
  371. * @param df Pointer to the file structure.
  372. * @param fd File descriptor to add.
  373. * @param event Pointer to the epoll event structure.
  374. *
  375. * @return Returns 0 on success, or an error code on failure.
  376. */
  377. static int epoll_ctl_add(struct dfs_file *df, int fd, struct epoll_event *event)
  378. {
  379. struct rt_fd_list *fdlist;
  380. struct rt_eventpoll *ep;
  381. rt_err_t ret = -EINVAL;
  382. if (df->vnode->data)
  383. {
  384. ep = df->vnode->data;
  385. fdlist = ep->fdlist;
  386. ret = 0;
  387. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  388. while (fdlist->next != RT_NULL)
  389. {
  390. if (fdlist->next->fd == fd)
  391. {
  392. rt_mutex_release(&ep->lock);
  393. return 0;
  394. }
  395. fdlist = fdlist->next;
  396. }
  397. rt_mutex_release(&ep->lock);
  398. fdlist = (struct rt_fd_list *)rt_malloc(sizeof(struct rt_fd_list));
  399. if (fdlist)
  400. {
  401. fdlist->fd = fd;
  402. memcpy(&fdlist->epev.data, &event->data, sizeof(event->data));
  403. fdlist->epev.events = 0;
  404. fdlist->ep = ep;
  405. fdlist->exclusive = 0;
  406. fdlist->is_rdl_node = RT_FALSE;
  407. fdlist->req._proc = epoll_wqueue_add_callback;
  408. fdlist->revents = event->events;
  409. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  410. fdlist->next = ep->fdlist->next;
  411. ep->fdlist->next = fdlist;
  412. rt_mutex_release(&ep->lock);
  413. rt_slist_init(&fdlist->rdl_node);
  414. epoll_ctl_install(fdlist, ep);
  415. }
  416. else
  417. {
  418. ret = -ENOMEM;
  419. }
  420. }
  421. return ret;
  422. }
  423. /**
  424. * @brief Removes a file descriptor from the epoll instance.
  425. *
  426. * This function removes a file descriptor from the epoll instance.
  427. *
  428. * @param df Pointer to the file structure.
  429. * @param fd File descriptor to remove.
  430. *
  431. * @return Returns 0 on success, or an error code on failure.
  432. */
  433. static int epoll_ctl_del(struct dfs_file *df, int fd)
  434. {
  435. struct rt_fd_list *fdlist, *fre_fd, *rdlist;
  436. struct rt_eventpoll *ep = RT_NULL;
  437. rt_slist_t *node = RT_NULL;
  438. rt_err_t ret = -EINVAL;
  439. rt_base_t level;
  440. if (df->vnode->data)
  441. {
  442. ep = df->vnode->data;
  443. if (ep)
  444. {
  445. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  446. level = rt_spin_lock_irqsave(&ep->spinlock);
  447. rt_slist_for_each(node, &ep->rdl_head)
  448. {
  449. rdlist = rt_slist_entry(node, struct rt_fd_list, rdl_node);
  450. if (rdlist->fd == fd)
  451. rt_slist_remove(&ep->rdl_head, node);
  452. }
  453. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  454. fdlist = ep->fdlist;
  455. while (fdlist->next != RT_NULL)
  456. {
  457. if (fdlist->next->fd == fd)
  458. {
  459. fre_fd = fdlist->next;
  460. fdlist->next = fdlist->next->next;
  461. if (fre_fd->wqn.wqueue)
  462. rt_wqueue_remove(&fre_fd->wqn);
  463. rt_free(fre_fd);
  464. break;
  465. }
  466. else
  467. {
  468. fdlist = fdlist->next;
  469. }
  470. }
  471. rt_mutex_release(&ep->lock);
  472. }
  473. ret = 0;
  474. }
  475. return ret;
  476. }
  477. /**
  478. * @brief Modifies the events associated with a file descriptor in the epoll instance.
  479. *
  480. * This function modifies the events associated with a file descriptor in the epoll instance.
  481. *
  482. * @param df Pointer to the file structure.
  483. * @param fd File descriptor to modify.
  484. * @param event Pointer to the epoll event structure.
  485. *
  486. * @return Returns 0 on success, or an error code on failure.
  487. */
  488. static int epoll_ctl_mod(struct dfs_file *df, int fd, struct epoll_event *event)
  489. {
  490. struct rt_fd_list *fdlist;
  491. struct rt_eventpoll *ep = RT_NULL;
  492. rt_err_t ret = -EINVAL;
  493. if (df->vnode->data)
  494. {
  495. ep = df->vnode->data;
  496. fdlist = ep->fdlist;
  497. while (fdlist->next != RT_NULL)
  498. {
  499. if (fdlist->next->fd == fd)
  500. {
  501. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  502. memcpy(&fdlist->next->epev.data, &event->data, sizeof(event->data));
  503. fdlist->next->revents = event->events;
  504. if (fdlist->next->wqn.wqueue)
  505. rt_wqueue_remove(&fdlist->next->wqn);
  506. rt_mutex_release(&ep->lock);
  507. epoll_ctl_install(fdlist->next, ep);
  508. break;
  509. }
  510. fdlist = fdlist->next;
  511. }
  512. ret = 0;
  513. }
  514. return ret;
  515. }
  516. /**
  517. * @brief Controls an epoll instance.
  518. *
  519. * This function controls an epoll instance, performing operations such as adding,
  520. * modifying, or removing file descriptors associated with the epoll instance.
  521. *
  522. * @param epfd File descriptor of the epoll instance.
  523. * @param op Operation to perform (EPOLL_CTL_ADD, EPOLL_CTL_DEL, or EPOLL_CTL_MOD).
  524. * @param fd File descriptor to add, modify, or remove.
  525. * @param event Pointer to the epoll event structure.
  526. *
  527. * @return Returns 0 on success, or -1 on failure with errno set appropriately.
  528. */
  529. static int epoll_do_ctl(int epfd, int op, int fd, struct epoll_event *event)
  530. {
  531. struct dfs_file *epdf;
  532. struct rt_eventpoll *ep;
  533. rt_err_t ret = 0;
  534. if (op & ~EFD_SHARED_EPOLL_TYPE)
  535. {
  536. rt_set_errno(EINVAL);
  537. return -1;
  538. }
  539. if ((epfd == fd) || (epfd < 0))
  540. {
  541. rt_set_errno(EINVAL);
  542. return -1;
  543. }
  544. if (!(op & EPOLL_CTL_DEL))
  545. {
  546. if (!(event->events & EPOLLEXCLUSIVE_BITS))
  547. {
  548. rt_set_errno(EINVAL);
  549. return -1;
  550. }
  551. event->events |= EPOLLERR | EPOLLHUP;
  552. }
  553. if (!fd_get(fd))
  554. {
  555. rt_set_errno(EBADF);
  556. return -1;
  557. }
  558. epdf = fd_get(epfd);
  559. if (epdf->vnode->data)
  560. {
  561. ep = epdf->vnode->data;
  562. switch (op)
  563. {
  564. case EPOLL_CTL_ADD:
  565. ret = epoll_ctl_add(epdf, fd, event);
  566. break;
  567. case EPOLL_CTL_DEL:
  568. ret = epoll_ctl_del(epdf, fd);
  569. break;
  570. case EPOLL_CTL_MOD:
  571. ret = epoll_ctl_mod(epdf, fd, event);
  572. break;
  573. default:
  574. rt_set_errno(EINVAL);
  575. break;
  576. }
  577. if (ret < 0)
  578. {
  579. rt_set_errno(-ret);
  580. ret = -1;
  581. }
  582. else
  583. {
  584. ep->polling_thread = rt_thread_self();
  585. }
  586. }
  587. return ret;
  588. }
  589. /**
  590. * @brief Waits for events on an epoll instance with a specified timeout.
  591. *
  592. * This function waits for events on the specified epoll instance within the given timeout.
  593. *
  594. * @param ep Pointer to the epoll instance.
  595. * @param msec Timeout in milliseconds.
  596. *
  597. * @return Returns 0 if no events occurred within the timeout, or 1 if events were triggered.
  598. */
  599. static int epoll_wait_timeout(struct rt_eventpoll *ep, int msec)
  600. {
  601. rt_int32_t timeout;
  602. struct rt_thread *thread;
  603. rt_base_t level;
  604. int ret = 0;
  605. thread = ep->polling_thread;
  606. timeout = rt_tick_from_millisecond(msec);
  607. level = rt_spin_lock_irqsave(&ep->spinlock);
  608. if (timeout != 0 && ep->status != RT_EPOLL_STAT_TRIG)
  609. {
  610. if (rt_thread_suspend_with_flag(thread, RT_KILLABLE) == RT_EOK)
  611. {
  612. if (timeout > 0)
  613. {
  614. rt_tick_t timeout_tick = timeout;
  615. rt_timer_control(&(thread->thread_timer),
  616. RT_TIMER_CTRL_SET_TIME,
  617. &timeout_tick);
  618. rt_timer_start(&(thread->thread_timer));
  619. }
  620. ep->status = RT_EPOLL_STAT_WAITING;
  621. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  622. rt_schedule();
  623. level = rt_spin_lock_irqsave(&ep->spinlock);
  624. if (ep->status == RT_EPOLL_STAT_WAITING)
  625. ep->status = RT_EPOLL_STAT_INIT;
  626. }
  627. }
  628. ret = !(ep->status == RT_EPOLL_STAT_TRIG);
  629. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  630. return ret;
  631. }
  632. /**
  633. * @brief Gets events associated with a file descriptor in the epoll instance.
  634. *
  635. * This function gets events associated with a file descriptor in the epoll instance.
  636. *
  637. * @param fl Pointer to the file descriptor list structure.
  638. * @param req Pointer to the poll request structure.
  639. *
  640. * @return Returns the bitmask of events associated with the file descriptor.
  641. */
  642. static int epoll_get_event(struct rt_fd_list *fl, rt_pollreq_t *req)
  643. {
  644. struct dfs_file *df;
  645. int mask = 0;
  646. int fd = 0;
  647. fd = fl->fd;
  648. if (fd >= 0)
  649. {
  650. df = fd_get(fd);
  651. if (df)
  652. {
  653. if (df->vnode->fops->poll)
  654. {
  655. req->_key = fl->revents | POLLERR | POLLHUP;
  656. mask = df->vnode->fops->poll(df, req);
  657. if (mask < 0)
  658. return mask;
  659. }
  660. mask &= fl->revents | EPOLLOUT | POLLERR;
  661. }
  662. }
  663. return mask;
  664. }
  665. /**
  666. * @brief Performs epoll operation to get triggered events.
  667. *
  668. * This function performs epoll operation to get triggered events.
  669. *
  670. * @param ep Pointer to the epoll instance.
  671. * @param events Pointer to the array to store triggered events.
  672. * @param maxevents Maximum number of events to store in the array.
  673. * @param timeout Timeout value in milliseconds.
  674. *
  675. * @return Returns the number of triggered events.
  676. */
  677. static int epoll_do(struct rt_eventpoll *ep, struct epoll_event *events, int maxevents, int timeout)
  678. {
  679. struct rt_fd_list *rdlist;
  680. rt_slist_t *node = RT_NULL;
  681. int event_num = 0;
  682. int istimeout = 0;
  683. int isn_add = 0;
  684. int isfree = 0;
  685. int mask = 0;
  686. rt_base_t level;
  687. while (1)
  688. {
  689. rt_mutex_take(&ep->lock, RT_WAITING_FOREVER);
  690. level = rt_spin_lock_irqsave(&ep->spinlock);
  691. if (ep->eventpoll_num > 0)
  692. {
  693. rt_slist_for_each(node,&ep->rdl_head)
  694. {
  695. rdlist = rt_slist_entry(node, struct rt_fd_list, rdl_node);
  696. ep->eventpoll_num --;
  697. rt_slist_remove(&ep->rdl_head, &rdlist->rdl_node);
  698. rdlist->is_rdl_node = RT_FALSE;
  699. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  700. isfree = 0;
  701. isn_add = 0;
  702. if (event_num < maxevents)
  703. {
  704. if (rdlist->wqn.wqueue)
  705. {
  706. rt_wqueue_remove(&rdlist->wqn);
  707. }
  708. mask = epoll_get_event(rdlist, &rdlist->req);
  709. if (mask & rdlist->revents)
  710. {
  711. rdlist->epev.events = mask & rdlist->revents;
  712. }
  713. else
  714. {
  715. isfree = 1;
  716. isn_add = 1;
  717. }
  718. if (rdlist->revents & EPOLLONESHOT)
  719. {
  720. rdlist->revents = 0;
  721. isfree = 1;
  722. if (rdlist->wqn.wqueue)
  723. rt_wqueue_remove(&rdlist->wqn);
  724. }
  725. else
  726. {
  727. if (rdlist->revents & EPOLLET)
  728. {
  729. isfree = 1;
  730. }
  731. else
  732. {
  733. level = rt_spin_lock_irqsave(&ep->spinlock);
  734. if (rdlist->exclusive != 1)
  735. {
  736. rdlist->exclusive = 1;
  737. }
  738. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  739. }
  740. }
  741. if (!isn_add)
  742. {
  743. memcpy(&events[event_num], &rdlist->epev, sizeof(rdlist->epev));
  744. event_num ++;
  745. }
  746. if (!isfree)
  747. {
  748. if (rdlist->is_rdl_node == RT_FALSE)
  749. {
  750. level = rt_spin_lock_irqsave(&ep->spinlock);
  751. ep->eventpoll_num ++;
  752. rt_slist_append(&ep->rdl_head, &rdlist->rdl_node);
  753. rdlist->is_rdl_node = RT_TRUE;
  754. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  755. }
  756. else
  757. {
  758. level = rt_spin_lock_irqsave(&ep->spinlock);
  759. if (!rdlist->wqn.wqueue)
  760. {
  761. epoll_get_event(rdlist, &rdlist->req);
  762. }
  763. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  764. }
  765. }
  766. }
  767. else
  768. {
  769. level = rt_spin_lock_irqsave(&ep->spinlock);
  770. break;
  771. }
  772. level = rt_spin_lock_irqsave(&ep->spinlock);
  773. }
  774. }
  775. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  776. rt_mutex_release(&ep->lock);
  777. if (event_num || istimeout)
  778. {
  779. level = rt_spin_lock_irqsave(&ep->spinlock);
  780. ep->status = RT_EPOLL_STAT_INIT;
  781. rt_spin_unlock_irqrestore(&ep->spinlock, level);
  782. if ((timeout >= 0) || (event_num > 0))
  783. break;
  784. }
  785. if (epoll_wait_timeout(ep, timeout))
  786. {
  787. istimeout = 1;
  788. }
  789. }
  790. return event_num;
  791. }
  792. /**
  793. * @brief Waits for events on an epoll instance with specified parameters.
  794. *
  795. * This function waits for events on the specified epoll instance within the given timeout, optionally blocking signals based on the provided signal set.
  796. *
  797. * @param epfd File descriptor referring to the epoll instance.
  798. * @param events Pointer to the array to store triggered events.
  799. * @param maxevents Maximum number of events to store in the array.
  800. * @param timeout Timeout value in milliseconds.
  801. * @param ss Pointer to the signal set indicating signals to block during the wait operation. Pass NULL if no signals need to be blocked.
  802. *
  803. * @return Returns the number of triggered events on success, or -1 on failure.
  804. */
  805. static int epoll_do_wait(int epfd, struct epoll_event *events, int maxevents, int timeout, const sigset_t *ss)
  806. {
  807. struct rt_eventpoll *ep;
  808. struct dfs_file *df;
  809. lwp_sigset_t old_sig, new_sig;
  810. rt_err_t ret = 0;
  811. if (ss)
  812. {
  813. memcpy(&new_sig, ss, sizeof(lwp_sigset_t));
  814. lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_BLOCK, &new_sig, &old_sig);
  815. }
  816. if ((maxevents > 0) && (epfd >= 0))
  817. {
  818. df = fd_get(epfd);
  819. if (df && df->vnode)
  820. {
  821. ep = (struct rt_eventpoll *)df->vnode->data;
  822. if (ep)
  823. {
  824. ret = epoll_do(ep, events, maxevents, timeout);
  825. }
  826. }
  827. }
  828. if (ss)
  829. {
  830. lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &old_sig, RT_NULL);
  831. }
  832. if (ret < 0)
  833. {
  834. rt_set_errno(-ret);
  835. ret = -1;
  836. }
  837. return ret;
  838. }
  839. /**
  840. * @brief Creates an epoll instance.
  841. *
  842. * This function creates an epoll instance with the specified size.
  843. *
  844. * @param size Size of the epoll instance.
  845. *
  846. * @return Returns the file descriptor referring to the created epoll instance on success, or -1 on failure.
  847. */
  848. int epoll_create(int size)
  849. {
  850. return epoll_do_create(size);
  851. }
  852. /**
  853. * @brief Modifies an epoll instance.
  854. *
  855. * This function modifies the epoll instance referred to by 'epfd' according to the specified operation 'op', associated with the file descriptor 'fd', and the event structure 'event'.
  856. *
  857. * @param epfd File descriptor referring to the epoll instance.
  858. * @param op Operation to perform (EPOLL_CTL_ADD, EPOLL_CTL_DEL, or EPOLL_CTL_MOD).
  859. * @param fd File descriptor to associate with the epoll instance or remove from it.
  860. * @param event Pointer to the event structure containing the events to modify.
  861. *
  862. * @return Returns 0 on success, or -1 on failure.
  863. */
  864. int epoll_ctl(int epfd, int op, int fd, struct epoll_event *event)
  865. {
  866. return epoll_do_ctl(epfd, op, fd, event);
  867. }
  868. /**
  869. * @brief Waits for events on an epoll instance.
  870. *
  871. * This function waits for events on the epoll instance referred to by 'epfd' within the given timeout.
  872. *
  873. * @param epfd File descriptor referring to the epoll instance.
  874. * @param events Pointer to the array to store triggered events.
  875. * @param maxevents Maximum number of events to store in the array.
  876. * @param timeout Timeout value in milliseconds.
  877. *
  878. * @return Returns the number of triggered events on success, or -1 on failure.
  879. */
  880. int epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout)
  881. {
  882. return epoll_do_wait(epfd, events, maxevents, timeout, RT_NULL);
  883. }
  884. /**
  885. * @brief Waits for events on an epoll instance, blocking signals.
  886. *
  887. * This function waits for events on the epoll instance referred to by 'epfd' within the given timeout, blocking signals based on the provided signal set 'ss'.
  888. *
  889. * @param epfd File descriptor referring to the epoll instance.
  890. * @param events Pointer to the array to store triggered events.
  891. * @param maxevents Maximum number of events to store in the array.
  892. * @param timeout Timeout value in milliseconds.
  893. * @param ss Pointer to the signal set indicating signals to block during the wait operation.
  894. *
  895. * @return Returns the number of triggered events on success, or -1 on failure.
  896. */
  897. int epoll_pwait(int epfd, struct epoll_event *events, int maxevents, int timeout, const sigset_t *ss)
  898. {
  899. return epoll_do_wait(epfd, events, maxevents, timeout, ss);
  900. }
  901. /**
  902. * @brief Waits for events on an epoll instance, blocking signals.
  903. *
  904. * This function waits for events on the epoll instance referred to by 'epfd' within the given timeout, blocking signals based on the provided signal set 'ss'.
  905. *
  906. * @param epfd File descriptor referring to the epoll instance.
  907. * @param events Pointer to the array to store triggered events.
  908. * @param maxevents Maximum number of events to store in the array.
  909. * @param timeout Timeout value in milliseconds.
  910. * @param ss Pointer to the signal set indicating signals to block during the wait operation.
  911. *
  912. * @return Returns the number of triggered events on success, or -1 on failure.
  913. */
  914. int epoll_pwait2(int epfd, struct epoll_event *events, int maxevents, int timeout, const sigset_t *ss)
  915. {
  916. return epoll_do_wait(epfd, events, maxevents, timeout, ss);
  917. }