vfs_eventfd.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465
  1. // Copyright 2021 Espressif Systems (Shanghai) CO LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License
  13. #include "esp_vfs_eventfd.h"
  14. #include <errno.h>
  15. #include <fcntl.h>
  16. #include <stdint.h>
  17. #include <stdlib.h>
  18. #include <string.h>
  19. #include <sys/lock.h>
  20. #include <sys/select.h>
  21. #include <sys/types.h>
  22. #include "esp_err.h"
  23. #include "esp_log.h"
  24. #include "esp_vfs.h"
  25. #include "freertos/FreeRTOS.h"
  26. #include "freertos/portmacro.h"
  27. #include "soc/spinlock.h"
  28. #define FD_INVALID -1
  29. #define FD_PENDING_SELECT -2
  30. /*
  31. * About the event_select_args_t linked list
  32. *
  33. * Each event_select_args_t structure records a pending select from a select call
  34. * on a file descriptor.
  35. *
  36. * For each select() call, we form a linked list in end_select_args containing
  37. * all the pending selects in this select call.
  38. *
  39. * For each file descriptor, we form a double linked list in event_context_t::select_args.
  40. * This list contains all the pending selects on this file descriptor from
  41. * different select() calls.
  42. *
  43. */
  44. typedef struct event_select_args_t {
  45. int fd;
  46. fd_set *read_fds;
  47. fd_set *error_fds;
  48. esp_vfs_select_sem_t signal_sem;
  49. // linked list node in event_context_t::select_args
  50. struct event_select_args_t *prev_in_fd;
  51. struct event_select_args_t *next_in_fd;
  52. // linked list node in end_select_arg
  53. struct event_select_args_t *next_in_args;
  54. } event_select_args_t;
  55. typedef struct {
  56. int fd;
  57. bool support_isr;
  58. volatile bool is_set;
  59. volatile uint64_t value;
  60. // a double-linked list for all pending select args with this fd
  61. event_select_args_t *select_args;
  62. _lock_t lock;
  63. // only for event fds that support ISR.
  64. spinlock_t data_spin_lock;
  65. } event_context_t;
  66. esp_vfs_id_t s_eventfd_vfs_id = -1;
  67. static size_t s_event_size;
  68. static event_context_t *s_events;
  69. static void trigger_select_for_event(event_context_t *event)
  70. {
  71. event_select_args_t *select_args = event->select_args;
  72. while (select_args != NULL) {
  73. esp_vfs_select_triggered(select_args->signal_sem);
  74. select_args = select_args->next_in_fd;
  75. }
  76. }
  77. static void trigger_select_for_event_isr(event_context_t *event, BaseType_t *task_woken)
  78. {
  79. event_select_args_t *select_args = event->select_args;
  80. while (select_args != NULL) {
  81. BaseType_t local_woken;
  82. esp_vfs_select_triggered_isr(select_args->signal_sem, &local_woken);
  83. *task_woken = (local_woken || *task_woken);
  84. select_args = select_args->next_in_fd;
  85. }
  86. }
  87. #ifdef CONFIG_VFS_SUPPORT_SELECT
  88. static esp_err_t event_start_select(int nfds,
  89. fd_set *readfds,
  90. fd_set *writefds,
  91. fd_set *exceptfds,
  92. esp_vfs_select_sem_t signal_sem,
  93. void **end_select_args)
  94. {
  95. esp_err_t error = ESP_OK;
  96. bool should_trigger = false;
  97. nfds = nfds < s_event_size ? nfds : (int)s_event_size;
  98. event_select_args_t *select_args_list = NULL;
  99. // FIXME: end_select_args should be a list to all select args
  100. for (int i = 0; i < nfds; i++) {
  101. _lock_acquire_recursive(&s_events[i].lock);
  102. if (s_events[i].fd == i) {
  103. if (s_events[i].support_isr) {
  104. portENTER_CRITICAL(&s_events[i].data_spin_lock);
  105. }
  106. event_select_args_t *event_select_args =
  107. (event_select_args_t *)malloc(sizeof(event_select_args_t));
  108. event_select_args->fd = i;
  109. event_select_args->signal_sem = signal_sem;
  110. if (FD_ISSET(i, exceptfds)) {
  111. FD_CLR(i, exceptfds);
  112. event_select_args->error_fds = exceptfds;
  113. } else {
  114. event_select_args->error_fds = NULL;
  115. }
  116. FD_CLR(i, exceptfds);
  117. // event fds are always writable
  118. if (FD_ISSET(i, writefds)) {
  119. should_trigger = true;
  120. }
  121. if (FD_ISSET(i, readfds)) {
  122. event_select_args->read_fds = readfds;
  123. if (s_events[i].is_set) {
  124. should_trigger = true;
  125. } else {
  126. FD_CLR(i, readfds);
  127. }
  128. } else {
  129. event_select_args->read_fds = NULL;
  130. }
  131. event_select_args->prev_in_fd = NULL;
  132. event_select_args->next_in_fd = s_events[i].select_args;
  133. if (s_events[i].select_args) {
  134. s_events[i].select_args->prev_in_fd = event_select_args;
  135. }
  136. event_select_args->next_in_args = select_args_list;
  137. select_args_list = event_select_args;
  138. s_events[i].select_args = event_select_args;
  139. if (s_events[i].support_isr) {
  140. portEXIT_CRITICAL(&s_events[i].data_spin_lock);
  141. }
  142. }
  143. _lock_release_recursive(&s_events[i].lock);
  144. }
  145. *end_select_args = select_args_list;
  146. if (should_trigger) {
  147. esp_vfs_select_triggered(signal_sem);
  148. }
  149. return error;
  150. }
  151. static esp_err_t event_end_select(void *end_select_args)
  152. {
  153. event_select_args_t *select_args = (event_select_args_t *)end_select_args;
  154. while (select_args != NULL) {
  155. event_context_t *event = &s_events[select_args->fd];
  156. _lock_acquire_recursive(&event->lock);
  157. if (event->support_isr) {
  158. portENTER_CRITICAL(&event->data_spin_lock);
  159. }
  160. if (event->fd != select_args->fd) { // already closed
  161. if (select_args->error_fds) {
  162. FD_SET(select_args->fd, select_args->error_fds);
  163. }
  164. } else {
  165. if (select_args->read_fds && event->is_set) {
  166. FD_SET(select_args->fd, select_args->read_fds);
  167. }
  168. }
  169. event_select_args_t *prev_in_fd = select_args->prev_in_fd;
  170. event_select_args_t *next_in_fd = select_args->next_in_fd;
  171. event_select_args_t *next_in_args = select_args->next_in_args;
  172. if (prev_in_fd != NULL) {
  173. prev_in_fd->next_in_fd = next_in_fd;
  174. } else {
  175. event->select_args = next_in_fd;
  176. }
  177. if (next_in_fd != NULL) {
  178. next_in_fd->prev_in_fd = prev_in_fd;
  179. }
  180. if (prev_in_fd == NULL && next_in_fd == NULL) { // The last pending select
  181. if (event->fd == FD_PENDING_SELECT) {
  182. event->fd = FD_INVALID;
  183. }
  184. }
  185. if (event->support_isr) {
  186. portEXIT_CRITICAL(&event->data_spin_lock);
  187. }
  188. _lock_release_recursive(&event->lock);
  189. free(select_args);
  190. select_args = next_in_args;
  191. }
  192. return ESP_OK;
  193. }
  194. #endif // CONFIG_VFS_SUPPORT_SELECT
  195. static ssize_t signal_event_fd_from_isr(int fd, const void *data, size_t size)
  196. {
  197. BaseType_t task_woken = pdFALSE;
  198. const uint64_t *val = (const uint64_t *)data;
  199. ssize_t ret = size;
  200. portENTER_CRITICAL_ISR(&s_events[fd].data_spin_lock);
  201. if (s_events[fd].fd == fd) {
  202. s_events[fd].is_set = true;
  203. s_events[fd].value += *val;
  204. trigger_select_for_event_isr(&s_events[fd], &task_woken);
  205. } else {
  206. errno = EBADF;
  207. ret = -1;
  208. }
  209. portEXIT_CRITICAL_ISR(&s_events[fd].data_spin_lock);
  210. if (task_woken) {
  211. portYIELD_FROM_ISR();
  212. }
  213. return ret;
  214. }
  215. static ssize_t event_write(int fd, const void *data, size_t size)
  216. {
  217. ssize_t ret = -1;
  218. if (fd >= s_event_size || data == NULL || size != sizeof(uint64_t)) {
  219. errno = EINVAL;
  220. return ret;
  221. }
  222. if (size != sizeof(uint64_t)) {
  223. errno = EINVAL;
  224. return ret;
  225. }
  226. if (!xPortCanYield()) {
  227. ret = signal_event_fd_from_isr(fd, data, size);
  228. } else {
  229. const uint64_t *val = (const uint64_t *)data;
  230. _lock_acquire_recursive(&s_events[fd].lock);
  231. if (s_events[fd].support_isr) {
  232. portENTER_CRITICAL(&s_events[fd].data_spin_lock);
  233. }
  234. if (s_events[fd].fd == fd) {
  235. s_events[fd].is_set = true;
  236. s_events[fd].value += *val;
  237. ret = size;
  238. trigger_select_for_event(&s_events[fd]);
  239. if (s_events[fd].support_isr) {
  240. portEXIT_CRITICAL(&s_events[fd].data_spin_lock);
  241. }
  242. } else {
  243. errno = EBADF;
  244. ret = -1;
  245. }
  246. _lock_release_recursive(&s_events[fd].lock);
  247. }
  248. return ret;
  249. }
  250. static ssize_t event_read(int fd, void *data, size_t size)
  251. {
  252. ssize_t ret = -1;
  253. if (fd >= s_event_size || data == NULL || size != sizeof(uint64_t)) {
  254. errno = EINVAL;
  255. return ret;
  256. }
  257. uint64_t *val = (uint64_t *)data;
  258. _lock_acquire_recursive(&s_events[fd].lock);
  259. if (s_events[fd].support_isr) {
  260. portENTER_CRITICAL(&s_events[fd].data_spin_lock);
  261. }
  262. if (s_events[fd].fd == fd) {
  263. *val = s_events[fd].value;
  264. s_events[fd].is_set = false;
  265. ret = size;
  266. s_events[fd].value = 0;
  267. } else {
  268. errno = EBADF;
  269. ret = -1;
  270. }
  271. if (s_events[fd].support_isr) {
  272. portEXIT_CRITICAL(&s_events[fd].data_spin_lock);
  273. }
  274. _lock_release_recursive(&s_events[fd].lock);
  275. return ret;
  276. }
  277. static int event_close(int fd)
  278. {
  279. int ret = -1;
  280. if (fd >= s_event_size) {
  281. errno = EINVAL;
  282. return ret;
  283. }
  284. _lock_acquire_recursive(&s_events[fd].lock);
  285. if (s_events[fd].fd == fd) {
  286. if (s_events[fd].support_isr) {
  287. portENTER_CRITICAL(&s_events[fd].data_spin_lock);
  288. }
  289. if (s_events[fd].select_args == NULL) {
  290. s_events[fd].fd = FD_INVALID;
  291. } else {
  292. s_events[fd].fd = FD_PENDING_SELECT;
  293. trigger_select_for_event(&s_events[fd]);
  294. }
  295. s_events[fd].value = 0;
  296. if (s_events[fd].support_isr) {
  297. portEXIT_CRITICAL(&s_events[fd].data_spin_lock);
  298. }
  299. ret = 0;
  300. } else {
  301. errno = EBADF;
  302. }
  303. _lock_release_recursive(&s_events[fd].lock);
  304. return ret;
  305. }
  306. esp_err_t esp_vfs_eventfd_register(const esp_vfs_eventfd_config_t *config)
  307. {
  308. if (config == NULL || config->max_fds >= MAX_FDS) {
  309. return ESP_ERR_INVALID_ARG;
  310. }
  311. if (s_eventfd_vfs_id != -1) {
  312. return ESP_ERR_INVALID_STATE;
  313. }
  314. s_event_size = config->max_fds;
  315. s_events = (event_context_t *)calloc(s_event_size, sizeof(event_context_t));
  316. for (size_t i = 0; i < s_event_size; i++) {
  317. _lock_init_recursive(&s_events[i].lock);
  318. s_events[i].fd = FD_INVALID;
  319. }
  320. esp_vfs_t vfs = {
  321. .flags = ESP_VFS_FLAG_DEFAULT,
  322. .write = &event_write,
  323. .close = &event_close,
  324. .read = &event_read,
  325. #ifdef CONFIG_VFS_SUPPORT_SELECT
  326. .start_select = &event_start_select,
  327. .end_select = &event_end_select,
  328. #endif
  329. };
  330. return esp_vfs_register_with_id(&vfs, NULL, &s_eventfd_vfs_id);
  331. }
  332. esp_err_t esp_vfs_eventfd_unregister(void)
  333. {
  334. if (s_eventfd_vfs_id == -1) {
  335. return ESP_ERR_INVALID_STATE;
  336. }
  337. esp_err_t error = esp_vfs_unregister_with_id(s_eventfd_vfs_id);
  338. if (error == ESP_OK) {
  339. s_eventfd_vfs_id = -1;
  340. }
  341. for (size_t i = 0; i < s_event_size; i++) {
  342. _lock_close_recursive(&s_events[i].lock);
  343. }
  344. free(s_events);
  345. return error;
  346. }
  347. int eventfd(unsigned int initval, int flags)
  348. {
  349. int fd = FD_INVALID;
  350. int global_fd = FD_INVALID;
  351. esp_err_t error = ESP_OK;
  352. if ((flags & (~EFD_SUPPORT_ISR)) != 0) {
  353. errno = EINVAL;
  354. return FD_INVALID;
  355. }
  356. if (s_eventfd_vfs_id == -1) {
  357. errno = EACCES;
  358. return FD_INVALID;
  359. }
  360. for (size_t i = 0; i < s_event_size; i++) {
  361. _lock_acquire_recursive(&s_events[i].lock);
  362. if (s_events[i].fd == FD_INVALID) {
  363. error = esp_vfs_register_fd_with_local_fd(s_eventfd_vfs_id, i, /*permanent=*/false, &global_fd);
  364. if (error != ESP_OK) {
  365. _lock_release_recursive(&s_events[i].lock);
  366. break;
  367. }
  368. bool support_isr = flags & EFD_SUPPORT_ISR;
  369. fd = i;
  370. s_events[i].fd = i;
  371. s_events[i].support_isr = support_isr;
  372. spinlock_initialize(&s_events[i].data_spin_lock);
  373. if (support_isr) {
  374. portENTER_CRITICAL(&s_events[i].data_spin_lock);
  375. }
  376. s_events[i].is_set = false;
  377. s_events[i].value = initval;
  378. s_events[i].select_args = NULL;
  379. if (support_isr) {
  380. portEXIT_CRITICAL(&s_events[i].data_spin_lock);
  381. }
  382. _lock_release_recursive(&s_events[i].lock);
  383. break;
  384. }
  385. _lock_release_recursive(&s_events[i].lock);
  386. }
  387. switch (error) {
  388. case ESP_OK:
  389. fd = global_fd;
  390. break;
  391. case ESP_ERR_NO_MEM:
  392. errno = ENOMEM;
  393. break;
  394. case ESP_ERR_INVALID_ARG:
  395. errno = EINVAL;
  396. break;
  397. default:
  398. errno = EIO;
  399. break;
  400. }
  401. return fd;
  402. }