vfs.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206
  1. // Copyright 2015-2019 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <stdlib.h>
  15. #include <string.h>
  16. #include <assert.h>
  17. #include <sys/errno.h>
  18. #include <sys/fcntl.h>
  19. #include <sys/ioctl.h>
  20. #include <sys/unistd.h>
  21. #include <sys/lock.h>
  22. #include <sys/param.h>
  23. #include <dirent.h>
  24. #include "freertos/FreeRTOS.h"
  25. #include "freertos/semphr.h"
  26. #include "esp_vfs.h"
  27. #include "sdkconfig.h"
  28. #ifdef CONFIG_SUPPRESS_SELECT_DEBUG_OUTPUT
  29. #define LOG_LOCAL_LEVEL ESP_LOG_NONE
  30. #endif //CONFIG_SUPPRESS_SELECT_DEBUG_OUTPUT
  31. #include "esp_log.h"
  32. static const char *TAG = "vfs";
  33. #define VFS_MAX_COUNT 8 /* max number of VFS entries (registered filesystems) */
  34. #define LEN_PATH_PREFIX_IGNORED SIZE_MAX /* special length value for VFS which is never recognised by open() */
  35. #define FD_TABLE_ENTRY_UNUSED (fd_table_t) { .permanent = false, .vfs_index = -1, .local_fd = -1 }
  36. typedef uint8_t local_fd_t;
  37. _Static_assert((1 << (sizeof(local_fd_t)*8)) >= MAX_FDS, "file descriptor type too small");
  38. typedef int8_t vfs_index_t;
  39. _Static_assert((1 << (sizeof(vfs_index_t)*8)) >= VFS_MAX_COUNT, "VFS index type too small");
  40. _Static_assert(((vfs_index_t) -1) < 0, "vfs_index_t must be a signed type");
  41. typedef struct {
  42. bool permanent;
  43. vfs_index_t vfs_index;
  44. local_fd_t local_fd;
  45. } fd_table_t;
  46. typedef struct vfs_entry_ {
  47. esp_vfs_t vfs; // contains pointers to VFS functions
  48. char path_prefix[ESP_VFS_PATH_MAX]; // path prefix mapped to this VFS
  49. size_t path_prefix_len; // micro-optimization to avoid doing extra strlen
  50. void* ctx; // optional pointer which can be passed to VFS
  51. int offset; // index of this structure in s_vfs array
  52. } vfs_entry_t;
  53. typedef struct {
  54. bool isset; // none or at least one bit is set in the following 3 fd sets
  55. fd_set readfds;
  56. fd_set writefds;
  57. fd_set errorfds;
  58. } fds_triple_t;
  59. static vfs_entry_t* s_vfs[VFS_MAX_COUNT] = { 0 };
  60. static size_t s_vfs_count = 0;
  61. static fd_table_t s_fd_table[MAX_FDS] = { [0 ... MAX_FDS-1] = FD_TABLE_ENTRY_UNUSED };
  62. static _lock_t s_fd_table_lock;
  63. static esp_err_t esp_vfs_register_common(const char* base_path, size_t len, const esp_vfs_t* vfs, void* ctx, int *vfs_index)
  64. {
  65. if (len != LEN_PATH_PREFIX_IGNORED) {
  66. if ((len != 0 && len < 2) || (len > ESP_VFS_PATH_MAX)) {
  67. return ESP_ERR_INVALID_ARG;
  68. }
  69. if ((len > 0 && base_path[0] != '/') || base_path[len - 1] == '/') {
  70. return ESP_ERR_INVALID_ARG;
  71. }
  72. }
  73. vfs_entry_t *entry = (vfs_entry_t*) malloc(sizeof(vfs_entry_t));
  74. if (entry == NULL) {
  75. return ESP_ERR_NO_MEM;
  76. }
  77. size_t index;
  78. for (index = 0; index < s_vfs_count; ++index) {
  79. if (s_vfs[index] == NULL) {
  80. break;
  81. }
  82. }
  83. if (index == s_vfs_count) {
  84. if (s_vfs_count >= VFS_MAX_COUNT) {
  85. free(entry);
  86. return ESP_ERR_NO_MEM;
  87. }
  88. ++s_vfs_count;
  89. }
  90. s_vfs[index] = entry;
  91. if (len != LEN_PATH_PREFIX_IGNORED) {
  92. strcpy(entry->path_prefix, base_path); // we have already verified argument length
  93. } else {
  94. bzero(entry->path_prefix, sizeof(entry->path_prefix));
  95. }
  96. memcpy(&entry->vfs, vfs, sizeof(esp_vfs_t));
  97. entry->path_prefix_len = len;
  98. entry->ctx = ctx;
  99. entry->offset = index;
  100. if (vfs_index) {
  101. *vfs_index = index;
  102. }
  103. return ESP_OK;
  104. }
  105. esp_err_t esp_vfs_register(const char* base_path, const esp_vfs_t* vfs, void* ctx)
  106. {
  107. return esp_vfs_register_common(base_path, strlen(base_path), vfs, ctx, NULL);
  108. }
  109. esp_err_t esp_vfs_register_fd_range(const esp_vfs_t *vfs, void *ctx, int min_fd, int max_fd)
  110. {
  111. if (min_fd < 0 || max_fd < 0 || min_fd > MAX_FDS || max_fd > MAX_FDS || min_fd > max_fd) {
  112. ESP_LOGD(TAG, "Invalid arguments: esp_vfs_register_fd_range(0x%x, 0x%x, %d, %d)", (int) vfs, (int) ctx, min_fd, max_fd);
  113. return ESP_ERR_INVALID_ARG;
  114. }
  115. int index = -1;
  116. esp_err_t ret = esp_vfs_register_common("", LEN_PATH_PREFIX_IGNORED, vfs, ctx, &index);
  117. if (ret == ESP_OK) {
  118. _lock_acquire(&s_fd_table_lock);
  119. for (int i = min_fd; i < max_fd; ++i) {
  120. if (s_fd_table[i].vfs_index != -1) {
  121. free(s_vfs[i]);
  122. s_vfs[i] = NULL;
  123. for (int j = min_fd; j < i; ++j) {
  124. if (s_fd_table[j].vfs_index == index) {
  125. s_fd_table[j] = FD_TABLE_ENTRY_UNUSED;
  126. }
  127. }
  128. _lock_release(&s_fd_table_lock);
  129. ESP_LOGD(TAG, "esp_vfs_register_fd_range cannot set fd %d (used by other VFS)", i);
  130. return ESP_ERR_INVALID_ARG;
  131. }
  132. s_fd_table[i].permanent = true;
  133. s_fd_table[i].vfs_index = index;
  134. s_fd_table[i].local_fd = i;
  135. }
  136. _lock_release(&s_fd_table_lock);
  137. }
  138. ESP_LOGD(TAG, "esp_vfs_register_fd_range is successful for range <%d; %d) and VFS ID %d", min_fd, max_fd, index);
  139. return ret;
  140. }
  141. esp_err_t esp_vfs_register_with_id(const esp_vfs_t *vfs, void *ctx, esp_vfs_id_t *vfs_id)
  142. {
  143. if (vfs_id == NULL) {
  144. return ESP_ERR_INVALID_ARG;
  145. }
  146. *vfs_id = -1;
  147. return esp_vfs_register_common("", LEN_PATH_PREFIX_IGNORED, vfs, ctx, vfs_id);
  148. }
  149. esp_err_t esp_vfs_unregister(const char* base_path)
  150. {
  151. const size_t base_path_len = strlen(base_path);
  152. for (size_t i = 0; i < s_vfs_count; ++i) {
  153. vfs_entry_t* vfs = s_vfs[i];
  154. if (vfs == NULL) {
  155. continue;
  156. }
  157. if (base_path_len == vfs->path_prefix_len &&
  158. memcmp(base_path, vfs->path_prefix, vfs->path_prefix_len) == 0) {
  159. free(vfs);
  160. s_vfs[i] = NULL;
  161. _lock_acquire(&s_fd_table_lock);
  162. // Delete all references from the FD lookup-table
  163. for (int j = 0; j < MAX_FDS; ++j) {
  164. if (s_fd_table[j].vfs_index == i) {
  165. s_fd_table[j] = FD_TABLE_ENTRY_UNUSED;
  166. }
  167. }
  168. _lock_release(&s_fd_table_lock);
  169. return ESP_OK;
  170. }
  171. }
  172. return ESP_ERR_INVALID_STATE;
  173. }
  174. esp_err_t esp_vfs_register_fd(esp_vfs_id_t vfs_id, int *fd)
  175. {
  176. if (vfs_id < 0 || vfs_id >= s_vfs_count || fd == NULL) {
  177. ESP_LOGD(TAG, "Invalid arguments for esp_vfs_register_fd(%d, 0x%x)", vfs_id, (int) fd);
  178. return ESP_ERR_INVALID_ARG;
  179. }
  180. esp_err_t ret = ESP_ERR_NO_MEM;
  181. _lock_acquire(&s_fd_table_lock);
  182. for (int i = 0; i < MAX_FDS; ++i) {
  183. if (s_fd_table[i].vfs_index == -1) {
  184. s_fd_table[i].permanent = true;
  185. s_fd_table[i].vfs_index = vfs_id;
  186. s_fd_table[i].local_fd = i;
  187. *fd = i;
  188. ret = ESP_OK;
  189. break;
  190. }
  191. }
  192. _lock_release(&s_fd_table_lock);
  193. ESP_LOGD(TAG, "esp_vfs_register_fd(%d, 0x%x) finished with %s", vfs_id, (int) fd, esp_err_to_name(ret));
  194. return ret;
  195. }
  196. esp_err_t esp_vfs_unregister_fd(esp_vfs_id_t vfs_id, int fd)
  197. {
  198. esp_err_t ret = ESP_ERR_INVALID_ARG;
  199. if (vfs_id < 0 || vfs_id >= s_vfs_count || fd < 0 || fd >= MAX_FDS) {
  200. ESP_LOGD(TAG, "Invalid arguments for esp_vfs_unregister_fd(%d, %d)", vfs_id, fd);
  201. return ret;
  202. }
  203. _lock_acquire(&s_fd_table_lock);
  204. fd_table_t *item = s_fd_table + fd;
  205. if (item->permanent == true && item->vfs_index == vfs_id && item->local_fd == fd) {
  206. *item = FD_TABLE_ENTRY_UNUSED;
  207. ret = ESP_OK;
  208. }
  209. _lock_release(&s_fd_table_lock);
  210. ESP_LOGD(TAG, "esp_vfs_unregister_fd(%d, %d) finished with %s", vfs_id, fd, esp_err_to_name(ret));
  211. return ret;
  212. }
  213. static inline const vfs_entry_t *get_vfs_for_index(int index)
  214. {
  215. if (index < 0 || index >= s_vfs_count) {
  216. return NULL;
  217. } else {
  218. return s_vfs[index];
  219. }
  220. }
  221. static inline bool fd_valid(int fd)
  222. {
  223. return (fd < MAX_FDS) && (fd >= 0);
  224. }
  225. static const vfs_entry_t *get_vfs_for_fd(int fd)
  226. {
  227. const vfs_entry_t *vfs = NULL;
  228. if (fd_valid(fd)) {
  229. const int index = s_fd_table[fd].vfs_index; // single read -> no locking is required
  230. vfs = get_vfs_for_index(index);
  231. }
  232. return vfs;
  233. }
  234. static inline int get_local_fd(const vfs_entry_t *vfs, int fd)
  235. {
  236. int local_fd = -1;
  237. if (vfs && fd_valid(fd)) {
  238. local_fd = s_fd_table[fd].local_fd; // single read -> no locking is required
  239. }
  240. return local_fd;
  241. }
  242. static const char* translate_path(const vfs_entry_t* vfs, const char* src_path)
  243. {
  244. assert(strncmp(src_path, vfs->path_prefix, vfs->path_prefix_len) == 0);
  245. if (strlen(src_path) == vfs->path_prefix_len) {
  246. // special case when src_path matches the path prefix exactly
  247. return "/";
  248. }
  249. return src_path + vfs->path_prefix_len;
  250. }
  251. static const vfs_entry_t* get_vfs_for_path(const char* path)
  252. {
  253. const vfs_entry_t* best_match = NULL;
  254. ssize_t best_match_prefix_len = -1;
  255. size_t len = strlen(path);
  256. for (size_t i = 0; i < s_vfs_count; ++i) {
  257. const vfs_entry_t* vfs = s_vfs[i];
  258. if (!vfs || vfs->path_prefix_len == LEN_PATH_PREFIX_IGNORED) {
  259. continue;
  260. }
  261. // match path prefix
  262. if (len < vfs->path_prefix_len ||
  263. memcmp(path, vfs->path_prefix, vfs->path_prefix_len) != 0) {
  264. continue;
  265. }
  266. // this is the default VFS and we don't have a better match yet.
  267. if (vfs->path_prefix_len == 0 && !best_match) {
  268. best_match = vfs;
  269. continue;
  270. }
  271. // if path is not equal to the prefix, expect to see a path separator
  272. // i.e. don't match "/data" prefix for "/data1/foo.txt" path
  273. if (len > vfs->path_prefix_len &&
  274. path[vfs->path_prefix_len] != '/') {
  275. continue;
  276. }
  277. // Out of all matching path prefixes, select the longest one;
  278. // i.e. if "/dev" and "/dev/uart" both match, for "/dev/uart/1" path,
  279. // choose "/dev/uart",
  280. // This causes all s_vfs_count VFS entries to be scanned when opening
  281. // a file by name. This can be optimized by introducing a table for
  282. // FS search order, sorted so that longer prefixes are checked first.
  283. if (best_match_prefix_len < (ssize_t) vfs->path_prefix_len) {
  284. best_match_prefix_len = (ssize_t) vfs->path_prefix_len;
  285. best_match = vfs;
  286. }
  287. }
  288. return best_match;
  289. }
  290. /*
  291. * Using huge multi-line macros is never nice, but in this case
  292. * the only alternative is to repeat this chunk of code (with different function names)
  293. * for each syscall being implemented. Given that this define is contained within a single
  294. * file, this looks like a good tradeoff.
  295. *
  296. * First we check if syscall is implemented by VFS (corresponding member is not NULL),
  297. * then call the right flavor of the method (e.g. open or open_p) depending on
  298. * ESP_VFS_FLAG_CONTEXT_PTR flag. If ESP_VFS_FLAG_CONTEXT_PTR is set, context is passed
  299. * in as first argument and _p variant is used for the call.
  300. * It is enough to check just one of them for NULL, as both variants are part of a union.
  301. */
  302. #define CHECK_AND_CALL(ret, r, pvfs, func, ...) \
  303. if (pvfs->vfs.func == NULL) { \
  304. __errno_r(r) = ENOSYS; \
  305. return -1; \
  306. } \
  307. if (pvfs->vfs.flags & ESP_VFS_FLAG_CONTEXT_PTR) { \
  308. ret = (*pvfs->vfs.func ## _p)(pvfs->ctx, __VA_ARGS__); \
  309. } else { \
  310. ret = (*pvfs->vfs.func)(__VA_ARGS__);\
  311. }
  312. #define CHECK_AND_CALLV(r, pvfs, func, ...) \
  313. if (pvfs->vfs.func == NULL) { \
  314. __errno_r(r) = ENOSYS; \
  315. return; \
  316. } \
  317. if (pvfs->vfs.flags & ESP_VFS_FLAG_CONTEXT_PTR) { \
  318. (*pvfs->vfs.func ## _p)(pvfs->ctx, __VA_ARGS__); \
  319. } else { \
  320. (*pvfs->vfs.func)(__VA_ARGS__);\
  321. }
  322. #define CHECK_AND_CALLP(ret, r, pvfs, func, ...) \
  323. if (pvfs->vfs.func == NULL) { \
  324. __errno_r(r) = ENOSYS; \
  325. return NULL; \
  326. } \
  327. if (pvfs->vfs.flags & ESP_VFS_FLAG_CONTEXT_PTR) { \
  328. ret = (*pvfs->vfs.func ## _p)(pvfs->ctx, __VA_ARGS__); \
  329. } else { \
  330. ret = (*pvfs->vfs.func)(__VA_ARGS__);\
  331. }
  332. int esp_vfs_open(struct _reent *r, const char * path, int flags, int mode)
  333. {
  334. const vfs_entry_t *vfs = get_vfs_for_path(path);
  335. if (vfs == NULL) {
  336. __errno_r(r) = ENOENT;
  337. return -1;
  338. }
  339. const char *path_within_vfs = translate_path(vfs, path);
  340. int fd_within_vfs;
  341. CHECK_AND_CALL(fd_within_vfs, r, vfs, open, path_within_vfs, flags, mode);
  342. if (fd_within_vfs >= 0) {
  343. _lock_acquire(&s_fd_table_lock);
  344. for (int i = 0; i < MAX_FDS; ++i) {
  345. if (s_fd_table[i].vfs_index == -1) {
  346. s_fd_table[i].permanent = false;
  347. s_fd_table[i].vfs_index = vfs->offset;
  348. s_fd_table[i].local_fd = fd_within_vfs;
  349. _lock_release(&s_fd_table_lock);
  350. return i;
  351. }
  352. }
  353. _lock_release(&s_fd_table_lock);
  354. int ret;
  355. CHECK_AND_CALL(ret, r, vfs, close, fd_within_vfs);
  356. (void) ret; // remove "set but not used" warning
  357. __errno_r(r) = ENOMEM;
  358. return -1;
  359. }
  360. __errno_r(r) = ENOENT;
  361. return -1;
  362. }
  363. ssize_t esp_vfs_write(struct _reent *r, int fd, const void * data, size_t size)
  364. {
  365. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  366. const int local_fd = get_local_fd(vfs, fd);
  367. if (vfs == NULL || local_fd < 0) {
  368. __errno_r(r) = EBADF;
  369. return -1;
  370. }
  371. ssize_t ret;
  372. CHECK_AND_CALL(ret, r, vfs, write, local_fd, data, size);
  373. return ret;
  374. }
  375. off_t esp_vfs_lseek(struct _reent *r, int fd, off_t size, int mode)
  376. {
  377. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  378. const int local_fd = get_local_fd(vfs, fd);
  379. if (vfs == NULL || local_fd < 0) {
  380. __errno_r(r) = EBADF;
  381. return -1;
  382. }
  383. off_t ret;
  384. CHECK_AND_CALL(ret, r, vfs, lseek, local_fd, size, mode);
  385. return ret;
  386. }
  387. ssize_t esp_vfs_read(struct _reent *r, int fd, void * dst, size_t size)
  388. {
  389. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  390. const int local_fd = get_local_fd(vfs, fd);
  391. if (vfs == NULL || local_fd < 0) {
  392. __errno_r(r) = EBADF;
  393. return -1;
  394. }
  395. ssize_t ret;
  396. CHECK_AND_CALL(ret, r, vfs, read, local_fd, dst, size);
  397. return ret;
  398. }
  399. int esp_vfs_close(struct _reent *r, int fd)
  400. {
  401. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  402. const int local_fd = get_local_fd(vfs, fd);
  403. if (vfs == NULL || local_fd < 0) {
  404. __errno_r(r) = EBADF;
  405. return -1;
  406. }
  407. int ret;
  408. CHECK_AND_CALL(ret, r, vfs, close, local_fd);
  409. _lock_acquire(&s_fd_table_lock);
  410. if (!s_fd_table[fd].permanent) {
  411. s_fd_table[fd] = FD_TABLE_ENTRY_UNUSED;
  412. }
  413. _lock_release(&s_fd_table_lock);
  414. return ret;
  415. }
  416. int esp_vfs_fstat(struct _reent *r, int fd, struct stat * st)
  417. {
  418. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  419. const int local_fd = get_local_fd(vfs, fd);
  420. if (vfs == NULL || local_fd < 0) {
  421. __errno_r(r) = EBADF;
  422. return -1;
  423. }
  424. int ret;
  425. CHECK_AND_CALL(ret, r, vfs, fstat, local_fd, st);
  426. return ret;
  427. }
  428. int esp_vfs_stat(struct _reent *r, const char * path, struct stat * st)
  429. {
  430. const vfs_entry_t* vfs = get_vfs_for_path(path);
  431. if (vfs == NULL) {
  432. __errno_r(r) = ENOENT;
  433. return -1;
  434. }
  435. const char* path_within_vfs = translate_path(vfs, path);
  436. int ret;
  437. CHECK_AND_CALL(ret, r, vfs, stat, path_within_vfs, st);
  438. return ret;
  439. }
  440. int esp_vfs_link(struct _reent *r, const char* n1, const char* n2)
  441. {
  442. const vfs_entry_t* vfs = get_vfs_for_path(n1);
  443. if (vfs == NULL) {
  444. __errno_r(r) = ENOENT;
  445. return -1;
  446. }
  447. const vfs_entry_t* vfs2 = get_vfs_for_path(n2);
  448. if (vfs != vfs2) {
  449. __errno_r(r) = EXDEV;
  450. return -1;
  451. }
  452. const char* path1_within_vfs = translate_path(vfs, n1);
  453. const char* path2_within_vfs = translate_path(vfs, n2);
  454. int ret;
  455. CHECK_AND_CALL(ret, r, vfs, link, path1_within_vfs, path2_within_vfs);
  456. return ret;
  457. }
  458. int esp_vfs_unlink(struct _reent *r, const char *path)
  459. {
  460. const vfs_entry_t* vfs = get_vfs_for_path(path);
  461. if (vfs == NULL) {
  462. __errno_r(r) = ENOENT;
  463. return -1;
  464. }
  465. const char* path_within_vfs = translate_path(vfs, path);
  466. int ret;
  467. CHECK_AND_CALL(ret, r, vfs, unlink, path_within_vfs);
  468. return ret;
  469. }
  470. int esp_vfs_rename(struct _reent *r, const char *src, const char *dst)
  471. {
  472. const vfs_entry_t* vfs = get_vfs_for_path(src);
  473. if (vfs == NULL) {
  474. __errno_r(r) = ENOENT;
  475. return -1;
  476. }
  477. const vfs_entry_t* vfs_dst = get_vfs_for_path(dst);
  478. if (vfs != vfs_dst) {
  479. __errno_r(r) = EXDEV;
  480. return -1;
  481. }
  482. const char* src_within_vfs = translate_path(vfs, src);
  483. const char* dst_within_vfs = translate_path(vfs, dst);
  484. int ret;
  485. CHECK_AND_CALL(ret, r, vfs, rename, src_within_vfs, dst_within_vfs);
  486. return ret;
  487. }
  488. DIR* opendir(const char* name)
  489. {
  490. const vfs_entry_t* vfs = get_vfs_for_path(name);
  491. struct _reent* r = __getreent();
  492. if (vfs == NULL) {
  493. __errno_r(r) = ENOENT;
  494. return NULL;
  495. }
  496. const char* path_within_vfs = translate_path(vfs, name);
  497. DIR* ret;
  498. CHECK_AND_CALLP(ret, r, vfs, opendir, path_within_vfs);
  499. if (ret != NULL) {
  500. ret->dd_vfs_idx = vfs->offset;
  501. }
  502. return ret;
  503. }
  504. struct dirent* readdir(DIR* pdir)
  505. {
  506. const vfs_entry_t* vfs = get_vfs_for_index(pdir->dd_vfs_idx);
  507. struct _reent* r = __getreent();
  508. if (vfs == NULL) {
  509. __errno_r(r) = EBADF;
  510. return NULL;
  511. }
  512. struct dirent* ret;
  513. CHECK_AND_CALLP(ret, r, vfs, readdir, pdir);
  514. return ret;
  515. }
  516. int readdir_r(DIR* pdir, struct dirent* entry, struct dirent** out_dirent)
  517. {
  518. const vfs_entry_t* vfs = get_vfs_for_index(pdir->dd_vfs_idx);
  519. struct _reent* r = __getreent();
  520. if (vfs == NULL) {
  521. errno = EBADF;
  522. return -1;
  523. }
  524. int ret;
  525. CHECK_AND_CALL(ret, r, vfs, readdir_r, pdir, entry, out_dirent);
  526. return ret;
  527. }
  528. long telldir(DIR* pdir)
  529. {
  530. const vfs_entry_t* vfs = get_vfs_for_index(pdir->dd_vfs_idx);
  531. struct _reent* r = __getreent();
  532. if (vfs == NULL) {
  533. errno = EBADF;
  534. return -1;
  535. }
  536. long ret;
  537. CHECK_AND_CALL(ret, r, vfs, telldir, pdir);
  538. return ret;
  539. }
  540. void seekdir(DIR* pdir, long loc)
  541. {
  542. const vfs_entry_t* vfs = get_vfs_for_index(pdir->dd_vfs_idx);
  543. struct _reent* r = __getreent();
  544. if (vfs == NULL) {
  545. errno = EBADF;
  546. return;
  547. }
  548. CHECK_AND_CALLV(r, vfs, seekdir, pdir, loc);
  549. }
  550. void rewinddir(DIR* pdir)
  551. {
  552. seekdir(pdir, 0);
  553. }
  554. int closedir(DIR* pdir)
  555. {
  556. const vfs_entry_t* vfs = get_vfs_for_index(pdir->dd_vfs_idx);
  557. struct _reent* r = __getreent();
  558. if (vfs == NULL) {
  559. errno = EBADF;
  560. return -1;
  561. }
  562. int ret;
  563. CHECK_AND_CALL(ret, r, vfs, closedir, pdir);
  564. return ret;
  565. }
  566. int mkdir(const char* name, mode_t mode)
  567. {
  568. const vfs_entry_t* vfs = get_vfs_for_path(name);
  569. struct _reent* r = __getreent();
  570. if (vfs == NULL) {
  571. __errno_r(r) = ENOENT;
  572. return -1;
  573. }
  574. const char* path_within_vfs = translate_path(vfs, name);
  575. int ret;
  576. CHECK_AND_CALL(ret, r, vfs, mkdir, path_within_vfs, mode);
  577. return ret;
  578. }
  579. int rmdir(const char* name)
  580. {
  581. const vfs_entry_t* vfs = get_vfs_for_path(name);
  582. struct _reent* r = __getreent();
  583. if (vfs == NULL) {
  584. __errno_r(r) = ENOENT;
  585. return -1;
  586. }
  587. const char* path_within_vfs = translate_path(vfs, name);
  588. int ret;
  589. CHECK_AND_CALL(ret, r, vfs, rmdir, path_within_vfs);
  590. return ret;
  591. }
  592. int fcntl(int fd, int cmd, ...)
  593. {
  594. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  595. const int local_fd = get_local_fd(vfs, fd);
  596. struct _reent* r = __getreent();
  597. if (vfs == NULL || local_fd < 0) {
  598. __errno_r(r) = EBADF;
  599. return -1;
  600. }
  601. int ret;
  602. va_list args;
  603. va_start(args, cmd);
  604. CHECK_AND_CALL(ret, r, vfs, fcntl, local_fd, cmd, args);
  605. va_end(args);
  606. return ret;
  607. }
  608. int ioctl(int fd, int cmd, ...)
  609. {
  610. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  611. const int local_fd = get_local_fd(vfs, fd);
  612. struct _reent* r = __getreent();
  613. if (vfs == NULL || local_fd < 0) {
  614. __errno_r(r) = EBADF;
  615. return -1;
  616. }
  617. int ret;
  618. va_list args;
  619. va_start(args, cmd);
  620. CHECK_AND_CALL(ret, r, vfs, ioctl, local_fd, cmd, args);
  621. va_end(args);
  622. return ret;
  623. }
  624. int fsync(int fd)
  625. {
  626. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  627. const int local_fd = get_local_fd(vfs, fd);
  628. struct _reent* r = __getreent();
  629. if (vfs == NULL || local_fd < 0) {
  630. __errno_r(r) = EBADF;
  631. return -1;
  632. }
  633. int ret;
  634. CHECK_AND_CALL(ret, r, vfs, fsync, local_fd);
  635. return ret;
  636. }
  637. int access(const char *path, int amode)
  638. {
  639. int ret;
  640. const vfs_entry_t* vfs = get_vfs_for_path(path);
  641. struct _reent* r = __getreent();
  642. if (vfs == NULL) {
  643. __errno_r(r) = ENOENT;
  644. return -1;
  645. }
  646. const char* path_within_vfs = translate_path(vfs, path);
  647. CHECK_AND_CALL(ret, r, vfs, access, path_within_vfs, amode);
  648. return ret;
  649. }
  650. int truncate(const char *path, off_t length)
  651. {
  652. int ret;
  653. const vfs_entry_t* vfs = get_vfs_for_path(path);
  654. struct _reent* r = __getreent();
  655. if (vfs == NULL) {
  656. __errno_r(r) = ENOENT;
  657. return -1;
  658. }
  659. const char* path_within_vfs = translate_path(vfs, path);
  660. CHECK_AND_CALL(ret, r, vfs, truncate, path_within_vfs, length);
  661. return ret;
  662. }
  663. static void call_end_selects(int end_index, const fds_triple_t *vfs_fds_triple)
  664. {
  665. for (int i = 0; i < end_index; ++i) {
  666. const vfs_entry_t *vfs = get_vfs_for_index(i);
  667. const fds_triple_t *item = &vfs_fds_triple[i];
  668. if (vfs && vfs->vfs.end_select && item->isset) {
  669. vfs->vfs.end_select();
  670. }
  671. }
  672. }
  673. static inline bool esp_vfs_safe_fd_isset(int fd, const fd_set *fds)
  674. {
  675. return fds && FD_ISSET(fd, fds);
  676. }
  677. static int set_global_fd_sets(const fds_triple_t *vfs_fds_triple, int size, fd_set *readfds, fd_set *writefds, fd_set *errorfds)
  678. {
  679. int ret = 0;
  680. for (int i = 0; i < size; ++i) {
  681. const fds_triple_t *item = &vfs_fds_triple[i];
  682. if (item->isset) {
  683. for (int fd = 0; fd < MAX_FDS; ++fd) {
  684. const int local_fd = s_fd_table[fd].local_fd; // single read -> no locking is required
  685. if (readfds && esp_vfs_safe_fd_isset(local_fd, &item->readfds)) {
  686. ESP_LOGD(TAG, "FD %d in readfds was set from VFS ID %d", fd, i);
  687. FD_SET(fd, readfds);
  688. ++ret;
  689. }
  690. if (writefds && esp_vfs_safe_fd_isset(local_fd, &item->writefds)) {
  691. ESP_LOGD(TAG, "FD %d in writefds was set from VFS ID %d", fd, i);
  692. FD_SET(fd, writefds);
  693. ++ret;
  694. }
  695. if (errorfds && esp_vfs_safe_fd_isset(local_fd, &item->errorfds)) {
  696. ESP_LOGD(TAG, "FD %d in errorfds was set from VFS ID %d", fd, i);
  697. FD_SET(fd, errorfds);
  698. ++ret;
  699. }
  700. }
  701. }
  702. }
  703. return ret;
  704. }
  705. static void esp_vfs_log_fd_set(const char *fds_name, const fd_set *fds)
  706. {
  707. if (fds_name && fds) {
  708. ESP_LOGD(TAG, "FDs in %s =", fds_name);
  709. for (int i = 0; i < MAX_FDS; ++i) {
  710. if (esp_vfs_safe_fd_isset(i, fds)) {
  711. ESP_LOGD(TAG, "%d", i);
  712. }
  713. }
  714. }
  715. }
  716. int esp_vfs_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *errorfds, struct timeval *timeout)
  717. {
  718. int ret = 0;
  719. struct _reent* r = __getreent();
  720. ESP_LOGD(TAG, "esp_vfs_select starts with nfds = %d", nfds);
  721. if (timeout) {
  722. ESP_LOGD(TAG, "timeout is %lds + %ldus", timeout->tv_sec, timeout->tv_usec);
  723. }
  724. esp_vfs_log_fd_set("readfds", readfds);
  725. esp_vfs_log_fd_set("writefds", writefds);
  726. esp_vfs_log_fd_set("errorfds", errorfds);
  727. if (nfds > MAX_FDS || nfds < 0) {
  728. ESP_LOGD(TAG, "incorrect nfds");
  729. __errno_r(r) = EINVAL;
  730. return -1;
  731. }
  732. // Capture s_vfs_count to a local variable in case a new driver is registered or removed during this actual select()
  733. // call. s_vfs_count cannot be protected with a mutex during a select() call (which can be one without a timeout)
  734. // because that could block the registration of new driver.
  735. const size_t vfs_count = s_vfs_count;
  736. fds_triple_t *vfs_fds_triple;
  737. if ((vfs_fds_triple = calloc(vfs_count, sizeof(fds_triple_t))) == NULL) {
  738. __errno_r(r) = ENOMEM;
  739. ESP_LOGD(TAG, "calloc is unsuccessful");
  740. return -1;
  741. }
  742. int (*socket_select)(int, fd_set *, fd_set *, fd_set *, struct timeval *) = NULL;
  743. for (int fd = 0; fd < nfds; ++fd) {
  744. _lock_acquire(&s_fd_table_lock);
  745. const bool is_socket_fd = s_fd_table[fd].permanent;
  746. const int vfs_index = s_fd_table[fd].vfs_index;
  747. const int local_fd = s_fd_table[fd].local_fd;
  748. _lock_release(&s_fd_table_lock);
  749. if (vfs_index < 0) {
  750. continue;
  751. }
  752. if (is_socket_fd) {
  753. if (!socket_select) {
  754. // no socket_select found yet so take a look
  755. if (esp_vfs_safe_fd_isset(fd, readfds) ||
  756. esp_vfs_safe_fd_isset(fd, writefds) ||
  757. esp_vfs_safe_fd_isset(fd, errorfds)) {
  758. const vfs_entry_t *vfs = s_vfs[vfs_index];
  759. socket_select = vfs->vfs.socket_select;
  760. // get_socket_select_semaphore needs to be set for a socket driver where semaphore can be
  761. // initialized outside interrupt handlers (ignoring this could result in unexpected failures)
  762. if (vfs->vfs.get_socket_select_semaphore != NULL) {
  763. vfs->vfs.get_socket_select_semaphore(); // Semaphore is returned and it was allocated if it
  764. // wasn't before. We don't use the return value just need to be sure that it doesn't get
  765. // allocated later from ISR.
  766. // Note: ESP-IDF v4.0 will start to use this callback differently with some breaking changes
  767. // in the VFS API.
  768. }
  769. }
  770. }
  771. continue;
  772. }
  773. fds_triple_t *item = &vfs_fds_triple[vfs_index]; // FD sets for VFS which belongs to fd
  774. if (esp_vfs_safe_fd_isset(fd, readfds)) {
  775. item->isset = true;
  776. FD_SET(local_fd, &item->readfds);
  777. FD_CLR(fd, readfds);
  778. ESP_LOGD(TAG, "removing %d from readfds and adding as local FD %d to fd_set of VFS ID %d", fd, local_fd, vfs_index);
  779. }
  780. if (esp_vfs_safe_fd_isset(fd, writefds)) {
  781. item->isset = true;
  782. FD_SET(local_fd, &item->writefds);
  783. FD_CLR(fd, writefds);
  784. ESP_LOGD(TAG, "removing %d from writefds and adding as local FD %d to fd_set of VFS ID %d", fd, local_fd, vfs_index);
  785. }
  786. if (esp_vfs_safe_fd_isset(fd, errorfds)) {
  787. item->isset = true;
  788. FD_SET(local_fd, &item->errorfds);
  789. FD_CLR(fd, errorfds);
  790. ESP_LOGD(TAG, "removing %d from errorfds and adding as local FD %d to fd_set of VFS ID %d", fd, local_fd, vfs_index);
  791. }
  792. }
  793. // all non-socket VFSs have their FD sets in vfs_fds_triple
  794. // the global readfds, writefds and errorfds contain only socket FDs (if
  795. // there any)
  796. /* Semaphore used for waiting select events from other VFS drivers when socket
  797. * select is not used (not registered or socket FDs are not observed by the
  798. * given call of select)
  799. */
  800. SemaphoreHandle_t select_sem = NULL;
  801. if (!socket_select) {
  802. // There is no socket VFS registered or select() wasn't called for
  803. // any socket. Therefore, we will use our own signalization.
  804. if ((select_sem = xSemaphoreCreateBinary()) == NULL) {
  805. free(vfs_fds_triple);
  806. __errno_r(r) = ENOMEM;
  807. ESP_LOGD(TAG, "cannot create select_sem");
  808. return -1;
  809. }
  810. }
  811. for (int i = 0; i < vfs_count; ++i) {
  812. const vfs_entry_t *vfs = get_vfs_for_index(i);
  813. fds_triple_t *item = &vfs_fds_triple[i];
  814. if (vfs && vfs->vfs.start_select && item->isset) {
  815. // call start_select for all non-socket VFSs with has at least one FD set in readfds, writefds, or errorfds
  816. // note: it can point to socket VFS but item->isset will be false for that
  817. ESP_LOGD(TAG, "calling start_select for VFS ID %d with the following local FDs", i);
  818. esp_vfs_log_fd_set("readfds", &item->readfds);
  819. esp_vfs_log_fd_set("writefds", &item->writefds);
  820. esp_vfs_log_fd_set("errorfds", &item->errorfds);
  821. esp_err_t err = vfs->vfs.start_select(nfds, &item->readfds, &item->writefds, &item->errorfds, &select_sem);
  822. if (err != ESP_OK) {
  823. call_end_selects(i, vfs_fds_triple);
  824. (void) set_global_fd_sets(vfs_fds_triple, vfs_count, readfds, writefds, errorfds);
  825. if (select_sem) {
  826. vSemaphoreDelete(select_sem);
  827. select_sem = NULL;
  828. }
  829. free(vfs_fds_triple);
  830. __errno_r(r) = EINTR;
  831. ESP_LOGD(TAG, "start_select failed");
  832. return -1;
  833. }
  834. }
  835. }
  836. if (socket_select) {
  837. ESP_LOGD(TAG, "calling socket_select with the following FDs");
  838. esp_vfs_log_fd_set("readfds", readfds);
  839. esp_vfs_log_fd_set("writefds", writefds);
  840. esp_vfs_log_fd_set("errorfds", errorfds);
  841. ret = socket_select(nfds, readfds, writefds, errorfds, timeout);
  842. ESP_LOGD(TAG, "socket_select returned %d and the FDs are the following", ret);
  843. esp_vfs_log_fd_set("readfds", readfds);
  844. esp_vfs_log_fd_set("writefds", writefds);
  845. esp_vfs_log_fd_set("errorfds", errorfds);
  846. } else {
  847. if (readfds) {
  848. FD_ZERO(readfds);
  849. }
  850. if (writefds) {
  851. FD_ZERO(writefds);
  852. }
  853. if (errorfds) {
  854. FD_ZERO(errorfds);
  855. }
  856. TickType_t ticks_to_wait = portMAX_DELAY;
  857. if (timeout) {
  858. uint32_t timeout_ms = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
  859. ticks_to_wait = timeout_ms / portTICK_PERIOD_MS;
  860. ESP_LOGD(TAG, "timeout is %dms", timeout_ms);
  861. }
  862. ESP_LOGD(TAG, "waiting without calling socket_select");
  863. xSemaphoreTake(select_sem, ticks_to_wait);
  864. }
  865. call_end_selects(vfs_count, vfs_fds_triple); // for VFSs for start_select was called before
  866. if (ret >= 0) {
  867. ret += set_global_fd_sets(vfs_fds_triple, vfs_count, readfds, writefds, errorfds);
  868. }
  869. if (select_sem) {
  870. vSemaphoreDelete(select_sem);
  871. select_sem = NULL;
  872. }
  873. free(vfs_fds_triple);
  874. ESP_LOGD(TAG, "esp_vfs_select returns %d", ret);
  875. esp_vfs_log_fd_set("readfds", readfds);
  876. esp_vfs_log_fd_set("writefds", writefds);
  877. esp_vfs_log_fd_set("errorfds", errorfds);
  878. return ret;
  879. }
  880. void esp_vfs_select_triggered(SemaphoreHandle_t *signal_sem)
  881. {
  882. if (signal_sem && (*signal_sem)) {
  883. xSemaphoreGive(*signal_sem);
  884. } else {
  885. // Another way would be to go through s_fd_table and find the VFS
  886. // which has a permanent FD. But in order to avoid to lock
  887. // s_fd_table_lock we go through the VFS table.
  888. for (int i = 0; i < s_vfs_count; ++i) {
  889. // Note: s_vfs_count could have changed since the start of vfs_select() call. However, that change doesn't
  890. // matter here stop_socket_select() will be called for only valid VFS drivers.
  891. const vfs_entry_t *vfs = s_vfs[i];
  892. if (vfs != NULL && vfs->vfs.stop_socket_select != NULL) {
  893. vfs->vfs.stop_socket_select();
  894. break;
  895. }
  896. }
  897. }
  898. }
  899. void esp_vfs_select_triggered_isr(SemaphoreHandle_t *signal_sem, BaseType_t *woken)
  900. {
  901. if (signal_sem && (*signal_sem)) {
  902. xSemaphoreGiveFromISR(*signal_sem, woken);
  903. } else {
  904. // Another way would be to go through s_fd_table and find the VFS
  905. // which has a permanent FD. But in order to avoid to lock
  906. // s_fd_table_lock we go through the VFS table.
  907. for (int i = 0; i < s_vfs_count; ++i) {
  908. // Note: s_vfs_count could have changed since the start of vfs_select() call. However, that change doesn't
  909. // matter here stop_socket_select() will be called for only valid VFS drivers.
  910. const vfs_entry_t *vfs = s_vfs[i];
  911. if (vfs != NULL && vfs->vfs.stop_socket_select_isr != NULL) {
  912. vfs->vfs.stop_socket_select_isr(woken);
  913. break;
  914. }
  915. }
  916. }
  917. }
  918. #ifdef CONFIG_SUPPORT_TERMIOS
  919. int tcgetattr(int fd, struct termios *p)
  920. {
  921. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  922. const int local_fd = get_local_fd(vfs, fd);
  923. struct _reent* r = __getreent();
  924. if (vfs == NULL || local_fd < 0) {
  925. __errno_r(r) = EBADF;
  926. return -1;
  927. }
  928. int ret;
  929. CHECK_AND_CALL(ret, r, vfs, tcgetattr, local_fd, p);
  930. return ret;
  931. }
  932. int tcsetattr(int fd, int optional_actions, const struct termios *p)
  933. {
  934. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  935. const int local_fd = get_local_fd(vfs, fd);
  936. struct _reent* r = __getreent();
  937. if (vfs == NULL || local_fd < 0) {
  938. __errno_r(r) = EBADF;
  939. return -1;
  940. }
  941. int ret;
  942. CHECK_AND_CALL(ret, r, vfs, tcsetattr, local_fd, optional_actions, p);
  943. return ret;
  944. }
  945. int tcdrain(int fd)
  946. {
  947. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  948. const int local_fd = get_local_fd(vfs, fd);
  949. struct _reent* r = __getreent();
  950. if (vfs == NULL || local_fd < 0) {
  951. __errno_r(r) = EBADF;
  952. return -1;
  953. }
  954. int ret;
  955. CHECK_AND_CALL(ret, r, vfs, tcdrain, local_fd);
  956. return ret;
  957. }
  958. int tcflush(int fd, int select)
  959. {
  960. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  961. const int local_fd = get_local_fd(vfs, fd);
  962. struct _reent* r = __getreent();
  963. if (vfs == NULL || local_fd < 0) {
  964. __errno_r(r) = EBADF;
  965. return -1;
  966. }
  967. int ret;
  968. CHECK_AND_CALL(ret, r, vfs, tcflush, local_fd, select);
  969. return ret;
  970. }
  971. int tcflow(int fd, int action)
  972. {
  973. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  974. const int local_fd = get_local_fd(vfs, fd);
  975. struct _reent* r = __getreent();
  976. if (vfs == NULL || local_fd < 0) {
  977. __errno_r(r) = EBADF;
  978. return -1;
  979. }
  980. int ret;
  981. CHECK_AND_CALL(ret, r, vfs, tcflow, local_fd, action);
  982. return ret;
  983. }
  984. pid_t tcgetsid(int fd)
  985. {
  986. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  987. const int local_fd = get_local_fd(vfs, fd);
  988. struct _reent* r = __getreent();
  989. if (vfs == NULL || local_fd < 0) {
  990. __errno_r(r) = EBADF;
  991. return -1;
  992. }
  993. int ret;
  994. CHECK_AND_CALL(ret, r, vfs, tcgetsid, local_fd);
  995. return ret;
  996. }
  997. int tcsendbreak(int fd, int duration)
  998. {
  999. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  1000. const int local_fd = get_local_fd(vfs, fd);
  1001. struct _reent* r = __getreent();
  1002. if (vfs == NULL || local_fd < 0) {
  1003. __errno_r(r) = EBADF;
  1004. return -1;
  1005. }
  1006. int ret;
  1007. CHECK_AND_CALL(ret, r, vfs, tcsendbreak, local_fd, duration);
  1008. return ret;
  1009. }
  1010. #endif // CONFIG_SUPPORT_TERMIOS
  1011. int esp_vfs_utime(const char *path, const struct utimbuf *times)
  1012. {
  1013. int ret;
  1014. const vfs_entry_t* vfs = get_vfs_for_path(path);
  1015. struct _reent* r = __getreent();
  1016. if (vfs == NULL) {
  1017. __errno_r(r) = ENOENT;
  1018. return -1;
  1019. }
  1020. const char* path_within_vfs = translate_path(vfs, path);
  1021. CHECK_AND_CALL(ret, r, vfs, utime, path_within_vfs, times);
  1022. return ret;
  1023. }
  1024. int esp_vfs_poll(struct pollfd *fds, nfds_t nfds, int timeout)
  1025. {
  1026. struct timeval tv = {
  1027. // timeout is in milliseconds
  1028. .tv_sec = timeout / 1000,
  1029. .tv_usec = (timeout % 1000) * 1000,
  1030. };
  1031. int max_fd = -1;
  1032. fd_set readfds;
  1033. fd_set writefds;
  1034. fd_set errorfds;
  1035. struct _reent* r = __getreent();
  1036. int ret = 0;
  1037. if (fds == NULL) {
  1038. __errno_r(r) = ENOENT;
  1039. return -1;
  1040. }
  1041. FD_ZERO(&readfds);
  1042. FD_ZERO(&writefds);
  1043. FD_ZERO(&errorfds);
  1044. for (int i = 0; i < nfds; ++i) {
  1045. fds[i].revents = 0;
  1046. if (fds[i].fd < 0) {
  1047. // revents should remain 0 and events ignored (according to the documentation of poll()).
  1048. continue;
  1049. }
  1050. if (fds[i].fd >= MAX_FDS) {
  1051. fds[i].revents |= POLLNVAL;
  1052. ++ret;
  1053. continue;
  1054. }
  1055. if (fds[i].events & (POLLIN | POLLRDNORM | POLLRDBAND | POLLPRI)) {
  1056. FD_SET(fds[i].fd, &readfds);
  1057. FD_SET(fds[i].fd, &errorfds);
  1058. max_fd = MAX(max_fd, fds[i].fd);
  1059. }
  1060. if (fds[i].events & (POLLOUT | POLLWRNORM | POLLWRBAND)) {
  1061. FD_SET(fds[i].fd, &writefds);
  1062. FD_SET(fds[i].fd, &errorfds);
  1063. max_fd = MAX(max_fd, fds[i].fd);
  1064. }
  1065. }
  1066. const int select_ret = esp_vfs_select(max_fd + 1, &readfds, &writefds, &errorfds, timeout < 0 ? NULL: &tv);
  1067. if (select_ret > 0) {
  1068. ret += select_ret;
  1069. for (int i = 0; i < nfds; ++i) {
  1070. if (FD_ISSET(fds[i].fd, &readfds)) {
  1071. fds[i].revents |= POLLIN;
  1072. }
  1073. if (FD_ISSET(fds[i].fd, &writefds)) {
  1074. fds[i].revents |= POLLOUT;
  1075. }
  1076. if (FD_ISSET(fds[i].fd, &errorfds)) {
  1077. fds[i].revents |= POLLERR;
  1078. }
  1079. }
  1080. } else {
  1081. ret = select_ret;
  1082. // keeping the errno from select()
  1083. }
  1084. return ret;
  1085. }