vfs.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112
  1. // Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <stdlib.h>
  15. #include <string.h>
  16. #include <assert.h>
  17. #include <sys/errno.h>
  18. #include <sys/fcntl.h>
  19. #include <sys/ioctl.h>
  20. #include <sys/unistd.h>
  21. #include <sys/lock.h>
  22. #include <sys/param.h>
  23. #include <dirent.h>
  24. #include "freertos/FreeRTOS.h"
  25. #include "freertos/semphr.h"
  26. #include "esp_vfs.h"
  27. #include "sdkconfig.h"
  28. #ifdef CONFIG_SUPPRESS_SELECT_DEBUG_OUTPUT
  29. #define LOG_LOCAL_LEVEL ESP_LOG_NONE
  30. #endif //CONFIG_SUPPRESS_SELECT_DEBUG_OUTPUT
  31. #include "esp_log.h"
  32. static const char *TAG = "vfs";
  33. #define VFS_MAX_COUNT 8 /* max number of VFS entries (registered filesystems) */
  34. #define LEN_PATH_PREFIX_IGNORED SIZE_MAX /* special length value for VFS which is never recognised by open() */
  35. #define FD_TABLE_ENTRY_UNUSED (fd_table_t) { .permanent = false, .vfs_index = -1, .local_fd = -1 }
  36. typedef uint8_t local_fd_t;
  37. _Static_assert((1 << (sizeof(local_fd_t)*8)) >= MAX_FDS, "file descriptor type too small");
  38. typedef int8_t vfs_index_t;
  39. _Static_assert((1 << (sizeof(vfs_index_t)*8)) >= VFS_MAX_COUNT, "VFS index type too small");
  40. _Static_assert(((vfs_index_t) -1) < 0, "vfs_index_t must be a signed type");
  41. typedef struct {
  42. bool permanent;
  43. vfs_index_t vfs_index;
  44. local_fd_t local_fd;
  45. } fd_table_t;
  46. typedef struct vfs_entry_ {
  47. esp_vfs_t vfs; // contains pointers to VFS functions
  48. char path_prefix[ESP_VFS_PATH_MAX]; // path prefix mapped to this VFS
  49. size_t path_prefix_len; // micro-optimization to avoid doing extra strlen
  50. void* ctx; // optional pointer which can be passed to VFS
  51. int offset; // index of this structure in s_vfs array
  52. } vfs_entry_t;
  53. typedef struct {
  54. bool isset; // none or at least one bit is set in the following 3 fd sets
  55. fd_set readfds;
  56. fd_set writefds;
  57. fd_set errorfds;
  58. } fds_triple_t;
  59. static vfs_entry_t* s_vfs[VFS_MAX_COUNT] = { 0 };
  60. static size_t s_vfs_count = 0;
  61. static fd_table_t s_fd_table[MAX_FDS] = { [0 ... MAX_FDS-1] = FD_TABLE_ENTRY_UNUSED };
  62. static _lock_t s_fd_table_lock;
  63. static esp_err_t esp_vfs_register_common(const char* base_path, size_t len, const esp_vfs_t* vfs, void* ctx, int *vfs_index)
  64. {
  65. if (len != LEN_PATH_PREFIX_IGNORED) {
  66. if ((len != 0 && len < 2) || (len > ESP_VFS_PATH_MAX)) {
  67. return ESP_ERR_INVALID_ARG;
  68. }
  69. if ((len > 0 && base_path[0] != '/') || base_path[len - 1] == '/') {
  70. return ESP_ERR_INVALID_ARG;
  71. }
  72. }
  73. vfs_entry_t *entry = (vfs_entry_t*) malloc(sizeof(vfs_entry_t));
  74. if (entry == NULL) {
  75. return ESP_ERR_NO_MEM;
  76. }
  77. size_t index;
  78. for (index = 0; index < s_vfs_count; ++index) {
  79. if (s_vfs[index] == NULL) {
  80. break;
  81. }
  82. }
  83. if (index == s_vfs_count) {
  84. if (s_vfs_count >= VFS_MAX_COUNT) {
  85. free(entry);
  86. return ESP_ERR_NO_MEM;
  87. }
  88. ++s_vfs_count;
  89. }
  90. s_vfs[index] = entry;
  91. if (len != LEN_PATH_PREFIX_IGNORED) {
  92. strcpy(entry->path_prefix, base_path); // we have already verified argument length
  93. } else {
  94. bzero(entry->path_prefix, sizeof(entry->path_prefix));
  95. }
  96. memcpy(&entry->vfs, vfs, sizeof(esp_vfs_t));
  97. entry->path_prefix_len = len;
  98. entry->ctx = ctx;
  99. entry->offset = index;
  100. if (vfs_index) {
  101. *vfs_index = index;
  102. }
  103. return ESP_OK;
  104. }
  105. esp_err_t esp_vfs_register(const char* base_path, const esp_vfs_t* vfs, void* ctx)
  106. {
  107. return esp_vfs_register_common(base_path, strlen(base_path), vfs, ctx, NULL);
  108. }
  109. esp_err_t esp_vfs_register_fd_range(const esp_vfs_t *vfs, void *ctx, int min_fd, int max_fd)
  110. {
  111. if (min_fd < 0 || max_fd < 0 || min_fd > MAX_FDS || max_fd > MAX_FDS || min_fd > max_fd) {
  112. ESP_LOGD(TAG, "Invalid arguments: esp_vfs_register_fd_range(0x%x, 0x%x, %d, %d)", (int) vfs, (int) ctx, min_fd, max_fd);
  113. return ESP_ERR_INVALID_ARG;
  114. }
  115. int index = -1;
  116. esp_err_t ret = esp_vfs_register_common("", LEN_PATH_PREFIX_IGNORED, vfs, ctx, &index);
  117. if (ret == ESP_OK) {
  118. _lock_acquire(&s_fd_table_lock);
  119. for (int i = min_fd; i < max_fd; ++i) {
  120. if (s_fd_table[i].vfs_index != -1) {
  121. free(s_vfs[i]);
  122. s_vfs[i] = NULL;
  123. for (int j = min_fd; j < i; ++j) {
  124. if (s_fd_table[j].vfs_index == index) {
  125. s_fd_table[j] = FD_TABLE_ENTRY_UNUSED;
  126. }
  127. }
  128. _lock_release(&s_fd_table_lock);
  129. ESP_LOGD(TAG, "esp_vfs_register_fd_range cannot set fd %d (used by other VFS)", i);
  130. return ESP_ERR_INVALID_ARG;
  131. }
  132. s_fd_table[i].permanent = true;
  133. s_fd_table[i].vfs_index = index;
  134. s_fd_table[i].local_fd = i;
  135. }
  136. _lock_release(&s_fd_table_lock);
  137. }
  138. ESP_LOGD(TAG, "esp_vfs_register_fd_range is successful for range <%d; %d) and VFS ID %d", min_fd, max_fd, index);
  139. return ret;
  140. }
  141. esp_err_t esp_vfs_register_with_id(const esp_vfs_t *vfs, void *ctx, esp_vfs_id_t *vfs_id)
  142. {
  143. if (vfs_id == NULL) {
  144. return ESP_ERR_INVALID_ARG;
  145. }
  146. *vfs_id = -1;
  147. return esp_vfs_register_common("", LEN_PATH_PREFIX_IGNORED, vfs, ctx, vfs_id);
  148. }
  149. esp_err_t esp_vfs_unregister(const char* base_path)
  150. {
  151. const size_t base_path_len = strlen(base_path);
  152. for (size_t i = 0; i < s_vfs_count; ++i) {
  153. vfs_entry_t* vfs = s_vfs[i];
  154. if (vfs == NULL) {
  155. continue;
  156. }
  157. if (base_path_len == vfs->path_prefix_len &&
  158. memcmp(base_path, vfs->path_prefix, vfs->path_prefix_len) == 0) {
  159. free(vfs);
  160. s_vfs[i] = NULL;
  161. _lock_acquire(&s_fd_table_lock);
  162. // Delete all references from the FD lookup-table
  163. for (int j = 0; j < MAX_FDS; ++j) {
  164. if (s_fd_table[j].vfs_index == i) {
  165. s_fd_table[j] = FD_TABLE_ENTRY_UNUSED;
  166. }
  167. }
  168. _lock_release(&s_fd_table_lock);
  169. return ESP_OK;
  170. }
  171. }
  172. return ESP_ERR_INVALID_STATE;
  173. }
  174. esp_err_t esp_vfs_register_fd(esp_vfs_id_t vfs_id, int *fd)
  175. {
  176. if (vfs_id < 0 || vfs_id >= s_vfs_count || fd == NULL) {
  177. ESP_LOGD(TAG, "Invalid arguments for esp_vfs_register_fd(%d, 0x%x)", vfs_id, (int) fd);
  178. return ESP_ERR_INVALID_ARG;
  179. }
  180. esp_err_t ret = ESP_ERR_NO_MEM;
  181. _lock_acquire(&s_fd_table_lock);
  182. for (int i = 0; i < MAX_FDS; ++i) {
  183. if (s_fd_table[i].vfs_index == -1) {
  184. s_fd_table[i].permanent = true;
  185. s_fd_table[i].vfs_index = vfs_id;
  186. s_fd_table[i].local_fd = i;
  187. *fd = i;
  188. ret = ESP_OK;
  189. break;
  190. }
  191. }
  192. _lock_release(&s_fd_table_lock);
  193. ESP_LOGD(TAG, "esp_vfs_register_fd(%d, 0x%x) finished with %s", vfs_id, (int) fd, esp_err_to_name(ret));
  194. return ret;
  195. }
  196. esp_err_t esp_vfs_unregister_fd(esp_vfs_id_t vfs_id, int fd)
  197. {
  198. esp_err_t ret = ESP_ERR_INVALID_ARG;
  199. if (vfs_id < 0 || vfs_id >= s_vfs_count || fd < 0 || fd >= MAX_FDS) {
  200. ESP_LOGD(TAG, "Invalid arguments for esp_vfs_unregister_fd(%d, %d)", vfs_id, fd);
  201. return ret;
  202. }
  203. _lock_acquire(&s_fd_table_lock);
  204. fd_table_t *item = s_fd_table + fd;
  205. if (item->permanent == true && item->vfs_index == vfs_id && item->local_fd == fd) {
  206. *item = FD_TABLE_ENTRY_UNUSED;
  207. ret = ESP_OK;
  208. }
  209. _lock_release(&s_fd_table_lock);
  210. ESP_LOGD(TAG, "esp_vfs_unregister_fd(%d, %d) finished with %s", vfs_id, fd, esp_err_to_name(ret));
  211. return ret;
  212. }
  213. static inline const vfs_entry_t *get_vfs_for_index(int index)
  214. {
  215. if (index < 0 || index >= s_vfs_count) {
  216. return NULL;
  217. } else {
  218. return s_vfs[index];
  219. }
  220. }
  221. static inline bool fd_valid(int fd)
  222. {
  223. return (fd < MAX_FDS) && (fd >= 0);
  224. }
  225. static const vfs_entry_t *get_vfs_for_fd(int fd)
  226. {
  227. const vfs_entry_t *vfs = NULL;
  228. if (fd_valid(fd)) {
  229. const int index = s_fd_table[fd].vfs_index; // single read -> no locking is required
  230. vfs = get_vfs_for_index(index);
  231. }
  232. return vfs;
  233. }
  234. static inline int get_local_fd(const vfs_entry_t *vfs, int fd)
  235. {
  236. int local_fd = -1;
  237. if (vfs && fd_valid(fd)) {
  238. local_fd = s_fd_table[fd].local_fd; // single read -> no locking is required
  239. }
  240. return local_fd;
  241. }
  242. static const char* translate_path(const vfs_entry_t* vfs, const char* src_path)
  243. {
  244. assert(strncmp(src_path, vfs->path_prefix, vfs->path_prefix_len) == 0);
  245. if (strlen(src_path) == vfs->path_prefix_len) {
  246. // special case when src_path matches the path prefix exactly
  247. return "/";
  248. }
  249. return src_path + vfs->path_prefix_len;
  250. }
  251. static const vfs_entry_t* get_vfs_for_path(const char* path)
  252. {
  253. const vfs_entry_t* best_match = NULL;
  254. ssize_t best_match_prefix_len = -1;
  255. size_t len = strlen(path);
  256. for (size_t i = 0; i < s_vfs_count; ++i) {
  257. const vfs_entry_t* vfs = s_vfs[i];
  258. if (!vfs || vfs->path_prefix_len == LEN_PATH_PREFIX_IGNORED) {
  259. continue;
  260. }
  261. // match path prefix
  262. if (len < vfs->path_prefix_len ||
  263. memcmp(path, vfs->path_prefix, vfs->path_prefix_len) != 0) {
  264. continue;
  265. }
  266. // this is the default VFS and we don't have a better match yet.
  267. if (vfs->path_prefix_len == 0 && !best_match) {
  268. best_match = vfs;
  269. continue;
  270. }
  271. // if path is not equal to the prefix, expect to see a path separator
  272. // i.e. don't match "/data" prefix for "/data1/foo.txt" path
  273. if (len > vfs->path_prefix_len &&
  274. path[vfs->path_prefix_len] != '/') {
  275. continue;
  276. }
  277. // Out of all matching path prefixes, select the longest one;
  278. // i.e. if "/dev" and "/dev/uart" both match, for "/dev/uart/1" path,
  279. // choose "/dev/uart",
  280. // This causes all s_vfs_count VFS entries to be scanned when opening
  281. // a file by name. This can be optimized by introducing a table for
  282. // FS search order, sorted so that longer prefixes are checked first.
  283. if (best_match_prefix_len < (ssize_t) vfs->path_prefix_len) {
  284. best_match_prefix_len = (ssize_t) vfs->path_prefix_len;
  285. best_match = vfs;
  286. }
  287. }
  288. return best_match;
  289. }
  290. /*
  291. * Using huge multi-line macros is never nice, but in this case
  292. * the only alternative is to repeat this chunk of code (with different function names)
  293. * for each syscall being implemented. Given that this define is contained within a single
  294. * file, this looks like a good tradeoff.
  295. *
  296. * First we check if syscall is implemented by VFS (corresponding member is not NULL),
  297. * then call the right flavor of the method (e.g. open or open_p) depending on
  298. * ESP_VFS_FLAG_CONTEXT_PTR flag. If ESP_VFS_FLAG_CONTEXT_PTR is set, context is passed
  299. * in as first argument and _p variant is used for the call.
  300. * It is enough to check just one of them for NULL, as both variants are part of a union.
  301. */
  302. #define CHECK_AND_CALL(ret, r, pvfs, func, ...) \
  303. if (pvfs->vfs.func == NULL) { \
  304. __errno_r(r) = ENOSYS; \
  305. return -1; \
  306. } \
  307. if (pvfs->vfs.flags & ESP_VFS_FLAG_CONTEXT_PTR) { \
  308. ret = (*pvfs->vfs.func ## _p)(pvfs->ctx, __VA_ARGS__); \
  309. } else { \
  310. ret = (*pvfs->vfs.func)(__VA_ARGS__);\
  311. }
  312. #define CHECK_AND_CALLV(r, pvfs, func, ...) \
  313. if (pvfs->vfs.func == NULL) { \
  314. __errno_r(r) = ENOSYS; \
  315. return; \
  316. } \
  317. if (pvfs->vfs.flags & ESP_VFS_FLAG_CONTEXT_PTR) { \
  318. (*pvfs->vfs.func ## _p)(pvfs->ctx, __VA_ARGS__); \
  319. } else { \
  320. (*pvfs->vfs.func)(__VA_ARGS__);\
  321. }
  322. #define CHECK_AND_CALLP(ret, r, pvfs, func, ...) \
  323. if (pvfs->vfs.func == NULL) { \
  324. __errno_r(r) = ENOSYS; \
  325. return NULL; \
  326. } \
  327. if (pvfs->vfs.flags & ESP_VFS_FLAG_CONTEXT_PTR) { \
  328. ret = (*pvfs->vfs.func ## _p)(pvfs->ctx, __VA_ARGS__); \
  329. } else { \
  330. ret = (*pvfs->vfs.func)(__VA_ARGS__);\
  331. }
  332. int esp_vfs_open(struct _reent *r, const char * path, int flags, int mode)
  333. {
  334. const vfs_entry_t *vfs = get_vfs_for_path(path);
  335. if (vfs == NULL) {
  336. __errno_r(r) = ENOENT;
  337. return -1;
  338. }
  339. const char *path_within_vfs = translate_path(vfs, path);
  340. int fd_within_vfs;
  341. CHECK_AND_CALL(fd_within_vfs, r, vfs, open, path_within_vfs, flags, mode);
  342. if (fd_within_vfs >= 0) {
  343. _lock_acquire(&s_fd_table_lock);
  344. for (int i = 0; i < MAX_FDS; ++i) {
  345. if (s_fd_table[i].vfs_index == -1) {
  346. s_fd_table[i].permanent = false;
  347. s_fd_table[i].vfs_index = vfs->offset;
  348. s_fd_table[i].local_fd = fd_within_vfs;
  349. _lock_release(&s_fd_table_lock);
  350. return i;
  351. }
  352. }
  353. _lock_release(&s_fd_table_lock);
  354. int ret;
  355. CHECK_AND_CALL(ret, r, vfs, close, fd_within_vfs);
  356. (void) ret; // remove "set but not used" warning
  357. __errno_r(r) = ENOMEM;
  358. return -1;
  359. }
  360. __errno_r(r) = ENOENT;
  361. return -1;
  362. }
  363. ssize_t esp_vfs_write(struct _reent *r, int fd, const void * data, size_t size)
  364. {
  365. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  366. const int local_fd = get_local_fd(vfs, fd);
  367. if (vfs == NULL || local_fd < 0) {
  368. __errno_r(r) = EBADF;
  369. return -1;
  370. }
  371. ssize_t ret;
  372. CHECK_AND_CALL(ret, r, vfs, write, local_fd, data, size);
  373. return ret;
  374. }
  375. off_t esp_vfs_lseek(struct _reent *r, int fd, off_t size, int mode)
  376. {
  377. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  378. const int local_fd = get_local_fd(vfs, fd);
  379. if (vfs == NULL || local_fd < 0) {
  380. __errno_r(r) = EBADF;
  381. return -1;
  382. }
  383. off_t ret;
  384. CHECK_AND_CALL(ret, r, vfs, lseek, local_fd, size, mode);
  385. return ret;
  386. }
  387. ssize_t esp_vfs_read(struct _reent *r, int fd, void * dst, size_t size)
  388. {
  389. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  390. const int local_fd = get_local_fd(vfs, fd);
  391. if (vfs == NULL || local_fd < 0) {
  392. __errno_r(r) = EBADF;
  393. return -1;
  394. }
  395. ssize_t ret;
  396. CHECK_AND_CALL(ret, r, vfs, read, local_fd, dst, size);
  397. return ret;
  398. }
  399. int esp_vfs_close(struct _reent *r, int fd)
  400. {
  401. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  402. const int local_fd = get_local_fd(vfs, fd);
  403. if (vfs == NULL || local_fd < 0) {
  404. __errno_r(r) = EBADF;
  405. return -1;
  406. }
  407. int ret;
  408. CHECK_AND_CALL(ret, r, vfs, close, local_fd);
  409. _lock_acquire(&s_fd_table_lock);
  410. if (!s_fd_table[fd].permanent) {
  411. s_fd_table[fd] = FD_TABLE_ENTRY_UNUSED;
  412. }
  413. _lock_release(&s_fd_table_lock);
  414. return ret;
  415. }
  416. int esp_vfs_fstat(struct _reent *r, int fd, struct stat * st)
  417. {
  418. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  419. const int local_fd = get_local_fd(vfs, fd);
  420. if (vfs == NULL || local_fd < 0) {
  421. __errno_r(r) = EBADF;
  422. return -1;
  423. }
  424. int ret;
  425. CHECK_AND_CALL(ret, r, vfs, fstat, local_fd, st);
  426. return ret;
  427. }
  428. int esp_vfs_stat(struct _reent *r, const char * path, struct stat * st)
  429. {
  430. const vfs_entry_t* vfs = get_vfs_for_path(path);
  431. if (vfs == NULL) {
  432. __errno_r(r) = ENOENT;
  433. return -1;
  434. }
  435. const char* path_within_vfs = translate_path(vfs, path);
  436. int ret;
  437. CHECK_AND_CALL(ret, r, vfs, stat, path_within_vfs, st);
  438. return ret;
  439. }
  440. int esp_vfs_link(struct _reent *r, const char* n1, const char* n2)
  441. {
  442. const vfs_entry_t* vfs = get_vfs_for_path(n1);
  443. if (vfs == NULL) {
  444. __errno_r(r) = ENOENT;
  445. return -1;
  446. }
  447. const vfs_entry_t* vfs2 = get_vfs_for_path(n2);
  448. if (vfs != vfs2) {
  449. __errno_r(r) = EXDEV;
  450. return -1;
  451. }
  452. const char* path1_within_vfs = translate_path(vfs, n1);
  453. const char* path2_within_vfs = translate_path(vfs, n2);
  454. int ret;
  455. CHECK_AND_CALL(ret, r, vfs, link, path1_within_vfs, path2_within_vfs);
  456. return ret;
  457. }
  458. int esp_vfs_unlink(struct _reent *r, const char *path)
  459. {
  460. const vfs_entry_t* vfs = get_vfs_for_path(path);
  461. if (vfs == NULL) {
  462. __errno_r(r) = ENOENT;
  463. return -1;
  464. }
  465. const char* path_within_vfs = translate_path(vfs, path);
  466. int ret;
  467. CHECK_AND_CALL(ret, r, vfs, unlink, path_within_vfs);
  468. return ret;
  469. }
  470. int esp_vfs_rename(struct _reent *r, const char *src, const char *dst)
  471. {
  472. const vfs_entry_t* vfs = get_vfs_for_path(src);
  473. if (vfs == NULL) {
  474. __errno_r(r) = ENOENT;
  475. return -1;
  476. }
  477. const vfs_entry_t* vfs_dst = get_vfs_for_path(dst);
  478. if (vfs != vfs_dst) {
  479. __errno_r(r) = EXDEV;
  480. return -1;
  481. }
  482. const char* src_within_vfs = translate_path(vfs, src);
  483. const char* dst_within_vfs = translate_path(vfs, dst);
  484. int ret;
  485. CHECK_AND_CALL(ret, r, vfs, rename, src_within_vfs, dst_within_vfs);
  486. return ret;
  487. }
  488. DIR* opendir(const char* name)
  489. {
  490. const vfs_entry_t* vfs = get_vfs_for_path(name);
  491. struct _reent* r = __getreent();
  492. if (vfs == NULL) {
  493. __errno_r(r) = ENOENT;
  494. return NULL;
  495. }
  496. const char* path_within_vfs = translate_path(vfs, name);
  497. DIR* ret;
  498. CHECK_AND_CALLP(ret, r, vfs, opendir, path_within_vfs);
  499. if (ret != NULL) {
  500. ret->dd_vfs_idx = vfs->offset;
  501. }
  502. return ret;
  503. }
  504. struct dirent* readdir(DIR* pdir)
  505. {
  506. const vfs_entry_t* vfs = get_vfs_for_index(pdir->dd_vfs_idx);
  507. struct _reent* r = __getreent();
  508. if (vfs == NULL) {
  509. __errno_r(r) = EBADF;
  510. return NULL;
  511. }
  512. struct dirent* ret;
  513. CHECK_AND_CALLP(ret, r, vfs, readdir, pdir);
  514. return ret;
  515. }
  516. int readdir_r(DIR* pdir, struct dirent* entry, struct dirent** out_dirent)
  517. {
  518. const vfs_entry_t* vfs = get_vfs_for_index(pdir->dd_vfs_idx);
  519. struct _reent* r = __getreent();
  520. if (vfs == NULL) {
  521. errno = EBADF;
  522. return -1;
  523. }
  524. int ret;
  525. CHECK_AND_CALL(ret, r, vfs, readdir_r, pdir, entry, out_dirent);
  526. return ret;
  527. }
  528. long telldir(DIR* pdir)
  529. {
  530. const vfs_entry_t* vfs = get_vfs_for_index(pdir->dd_vfs_idx);
  531. struct _reent* r = __getreent();
  532. if (vfs == NULL) {
  533. errno = EBADF;
  534. return -1;
  535. }
  536. long ret;
  537. CHECK_AND_CALL(ret, r, vfs, telldir, pdir);
  538. return ret;
  539. }
  540. void seekdir(DIR* pdir, long loc)
  541. {
  542. const vfs_entry_t* vfs = get_vfs_for_index(pdir->dd_vfs_idx);
  543. struct _reent* r = __getreent();
  544. if (vfs == NULL) {
  545. errno = EBADF;
  546. return;
  547. }
  548. CHECK_AND_CALLV(r, vfs, seekdir, pdir, loc);
  549. }
  550. void rewinddir(DIR* pdir)
  551. {
  552. seekdir(pdir, 0);
  553. }
  554. int closedir(DIR* pdir)
  555. {
  556. const vfs_entry_t* vfs = get_vfs_for_index(pdir->dd_vfs_idx);
  557. struct _reent* r = __getreent();
  558. if (vfs == NULL) {
  559. errno = EBADF;
  560. return -1;
  561. }
  562. int ret;
  563. CHECK_AND_CALL(ret, r, vfs, closedir, pdir);
  564. return ret;
  565. }
  566. int mkdir(const char* name, mode_t mode)
  567. {
  568. const vfs_entry_t* vfs = get_vfs_for_path(name);
  569. struct _reent* r = __getreent();
  570. if (vfs == NULL) {
  571. __errno_r(r) = ENOENT;
  572. return -1;
  573. }
  574. const char* path_within_vfs = translate_path(vfs, name);
  575. int ret;
  576. CHECK_AND_CALL(ret, r, vfs, mkdir, path_within_vfs, mode);
  577. return ret;
  578. }
  579. int rmdir(const char* name)
  580. {
  581. const vfs_entry_t* vfs = get_vfs_for_path(name);
  582. struct _reent* r = __getreent();
  583. if (vfs == NULL) {
  584. __errno_r(r) = ENOENT;
  585. return -1;
  586. }
  587. const char* path_within_vfs = translate_path(vfs, name);
  588. int ret;
  589. CHECK_AND_CALL(ret, r, vfs, rmdir, path_within_vfs);
  590. return ret;
  591. }
  592. int fcntl(int fd, int cmd, ...)
  593. {
  594. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  595. const int local_fd = get_local_fd(vfs, fd);
  596. struct _reent* r = __getreent();
  597. if (vfs == NULL || local_fd < 0) {
  598. __errno_r(r) = EBADF;
  599. return -1;
  600. }
  601. int ret;
  602. va_list args;
  603. va_start(args, cmd);
  604. CHECK_AND_CALL(ret, r, vfs, fcntl, local_fd, cmd, args);
  605. va_end(args);
  606. return ret;
  607. }
  608. int ioctl(int fd, int cmd, ...)
  609. {
  610. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  611. const int local_fd = get_local_fd(vfs, fd);
  612. struct _reent* r = __getreent();
  613. if (vfs == NULL || local_fd < 0) {
  614. __errno_r(r) = EBADF;
  615. return -1;
  616. }
  617. int ret;
  618. va_list args;
  619. va_start(args, cmd);
  620. CHECK_AND_CALL(ret, r, vfs, ioctl, local_fd, cmd, args);
  621. va_end(args);
  622. return ret;
  623. }
  624. int fsync(int fd)
  625. {
  626. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  627. const int local_fd = get_local_fd(vfs, fd);
  628. struct _reent* r = __getreent();
  629. if (vfs == NULL || local_fd < 0) {
  630. __errno_r(r) = EBADF;
  631. return -1;
  632. }
  633. int ret;
  634. CHECK_AND_CALL(ret, r, vfs, fsync, local_fd);
  635. return ret;
  636. }
  637. int access(const char *path, int amode)
  638. {
  639. int ret;
  640. const vfs_entry_t* vfs = get_vfs_for_path(path);
  641. struct _reent* r = __getreent();
  642. if (vfs == NULL) {
  643. __errno_r(r) = ENOENT;
  644. return -1;
  645. }
  646. const char* path_within_vfs = translate_path(vfs, path);
  647. CHECK_AND_CALL(ret, r, vfs, access, path_within_vfs, amode);
  648. return ret;
  649. }
  650. int truncate(const char *path, off_t length)
  651. {
  652. int ret;
  653. const vfs_entry_t* vfs = get_vfs_for_path(path);
  654. struct _reent* r = __getreent();
  655. if (vfs == NULL) {
  656. __errno_r(r) = ENOENT;
  657. return -1;
  658. }
  659. const char* path_within_vfs = translate_path(vfs, path);
  660. CHECK_AND_CALL(ret, r, vfs, truncate, path_within_vfs, length);
  661. return ret;
  662. }
  663. static void call_end_selects(int end_index, const fds_triple_t *vfs_fds_triple)
  664. {
  665. for (int i = 0; i < end_index; ++i) {
  666. const vfs_entry_t *vfs = get_vfs_for_index(i);
  667. const fds_triple_t *item = &vfs_fds_triple[i];
  668. if (vfs && vfs->vfs.end_select && item->isset) {
  669. vfs->vfs.end_select();
  670. }
  671. }
  672. }
  673. static inline bool esp_vfs_safe_fd_isset(int fd, const fd_set *fds)
  674. {
  675. return fds && FD_ISSET(fd, fds);
  676. }
  677. static int set_global_fd_sets(const fds_triple_t *vfs_fds_triple, int size, fd_set *readfds, fd_set *writefds, fd_set *errorfds)
  678. {
  679. int ret = 0;
  680. for (int i = 0; i < size; ++i) {
  681. const fds_triple_t *item = &vfs_fds_triple[i];
  682. if (item->isset) {
  683. for (int fd = 0; fd < MAX_FDS; ++fd) {
  684. const int local_fd = s_fd_table[fd].local_fd; // single read -> no locking is required
  685. if (readfds && esp_vfs_safe_fd_isset(local_fd, &item->readfds)) {
  686. ESP_LOGD(TAG, "FD %d in readfds was set from VFS ID %d", fd, i);
  687. FD_SET(fd, readfds);
  688. ++ret;
  689. }
  690. if (writefds && esp_vfs_safe_fd_isset(local_fd, &item->writefds)) {
  691. ESP_LOGD(TAG, "FD %d in writefds was set from VFS ID %d", fd, i);
  692. FD_SET(fd, writefds);
  693. ++ret;
  694. }
  695. if (errorfds && esp_vfs_safe_fd_isset(local_fd, &item->errorfds)) {
  696. ESP_LOGD(TAG, "FD %d in errorfds was set from VFS ID %d", fd, i);
  697. FD_SET(fd, errorfds);
  698. ++ret;
  699. }
  700. }
  701. }
  702. }
  703. return ret;
  704. }
  705. static void esp_vfs_log_fd_set(const char *fds_name, const fd_set *fds)
  706. {
  707. if (fds_name && fds) {
  708. ESP_LOGD(TAG, "FDs in %s =", fds_name);
  709. for (int i = 0; i < MAX_FDS; ++i) {
  710. if (esp_vfs_safe_fd_isset(i, fds)) {
  711. ESP_LOGD(TAG, "%d", i);
  712. }
  713. }
  714. }
  715. }
  716. int esp_vfs_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *errorfds, struct timeval *timeout)
  717. {
  718. int ret = 0;
  719. struct _reent* r = __getreent();
  720. ESP_LOGD(TAG, "esp_vfs_select starts with nfds = %d", nfds);
  721. if (timeout) {
  722. ESP_LOGD(TAG, "timeout is %lds + %ldus", timeout->tv_sec, timeout->tv_usec);
  723. }
  724. esp_vfs_log_fd_set("readfds", readfds);
  725. esp_vfs_log_fd_set("writefds", writefds);
  726. esp_vfs_log_fd_set("errorfds", errorfds);
  727. if (nfds > MAX_FDS || nfds < 0) {
  728. ESP_LOGD(TAG, "incorrect nfds");
  729. __errno_r(r) = EINVAL;
  730. return -1;
  731. }
  732. fds_triple_t *vfs_fds_triple;
  733. if ((vfs_fds_triple = calloc(s_vfs_count, sizeof(fds_triple_t))) == NULL) {
  734. __errno_r(r) = ENOMEM;
  735. ESP_LOGD(TAG, "calloc is unsuccessful");
  736. return -1;
  737. }
  738. int (*socket_select)(int, fd_set *, fd_set *, fd_set *, struct timeval *) = NULL;
  739. for (int fd = 0; fd < nfds; ++fd) {
  740. _lock_acquire(&s_fd_table_lock);
  741. const bool is_socket_fd = s_fd_table[fd].permanent;
  742. const int vfs_index = s_fd_table[fd].vfs_index;
  743. const int local_fd = s_fd_table[fd].local_fd;
  744. _lock_release(&s_fd_table_lock);
  745. if (vfs_index < 0) {
  746. continue;
  747. }
  748. if (is_socket_fd) {
  749. if (!socket_select) {
  750. // no socket_select found yet so take a look
  751. if (esp_vfs_safe_fd_isset(fd, readfds) ||
  752. esp_vfs_safe_fd_isset(fd, writefds) ||
  753. esp_vfs_safe_fd_isset(fd, errorfds)) {
  754. const vfs_entry_t *vfs = s_vfs[vfs_index];
  755. socket_select = vfs->vfs.socket_select;
  756. }
  757. }
  758. continue;
  759. }
  760. fds_triple_t *item = &vfs_fds_triple[vfs_index]; // FD sets for VFS which belongs to fd
  761. if (esp_vfs_safe_fd_isset(fd, readfds)) {
  762. item->isset = true;
  763. FD_SET(local_fd, &item->readfds);
  764. FD_CLR(fd, readfds);
  765. ESP_LOGD(TAG, "removing %d from readfds and adding as local FD %d to fd_set of VFS ID %d", fd, local_fd, vfs_index);
  766. }
  767. if (esp_vfs_safe_fd_isset(fd, writefds)) {
  768. item->isset = true;
  769. FD_SET(local_fd, &item->writefds);
  770. FD_CLR(fd, writefds);
  771. ESP_LOGD(TAG, "removing %d from writefds and adding as local FD %d to fd_set of VFS ID %d", fd, local_fd, vfs_index);
  772. }
  773. if (esp_vfs_safe_fd_isset(fd, errorfds)) {
  774. item->isset = true;
  775. FD_SET(local_fd, &item->errorfds);
  776. FD_CLR(fd, errorfds);
  777. ESP_LOGD(TAG, "removing %d from errorfds and adding as local FD %d to fd_set of VFS ID %d", fd, local_fd, vfs_index);
  778. }
  779. }
  780. // all non-socket VFSs have their FD sets in vfs_fds_triple
  781. // the global readfds, writefds and errorfds contain only socket FDs (if
  782. // there any)
  783. /* Semaphore used for waiting select events from other VFS drivers when socket
  784. * select is not used (not registered or socket FDs are not observed by the
  785. * given call of select)
  786. */
  787. SemaphoreHandle_t select_sem = NULL;
  788. if (!socket_select) {
  789. // There is no socket VFS registered or select() wasn't called for
  790. // any socket. Therefore, we will use our own signalization.
  791. if ((select_sem = xSemaphoreCreateBinary()) == NULL) {
  792. free(vfs_fds_triple);
  793. __errno_r(r) = ENOMEM;
  794. ESP_LOGD(TAG, "cannot create select_sem");
  795. return -1;
  796. }
  797. }
  798. for (int i = 0; i < s_vfs_count; ++i) {
  799. const vfs_entry_t *vfs = get_vfs_for_index(i);
  800. fds_triple_t *item = &vfs_fds_triple[i];
  801. if (vfs && vfs->vfs.start_select && item->isset) {
  802. // call start_select for all non-socket VFSs with has at least one FD set in readfds, writefds, or errorfds
  803. // note: it can point to socket VFS but item->isset will be false for that
  804. ESP_LOGD(TAG, "calling start_select for VFS ID %d with the following local FDs", i);
  805. esp_vfs_log_fd_set("readfds", &item->readfds);
  806. esp_vfs_log_fd_set("writefds", &item->writefds);
  807. esp_vfs_log_fd_set("errorfds", &item->errorfds);
  808. esp_err_t err = vfs->vfs.start_select(nfds, &item->readfds, &item->writefds, &item->errorfds, &select_sem);
  809. if (err != ESP_OK) {
  810. call_end_selects(i, vfs_fds_triple);
  811. (void) set_global_fd_sets(vfs_fds_triple, s_vfs_count, readfds, writefds, errorfds);
  812. if (select_sem) {
  813. vSemaphoreDelete(select_sem);
  814. select_sem = NULL;
  815. }
  816. free(vfs_fds_triple);
  817. __errno_r(r) = EINTR;
  818. ESP_LOGD(TAG, "start_select failed");
  819. return -1;
  820. }
  821. }
  822. }
  823. if (socket_select) {
  824. ESP_LOGD(TAG, "calling socket_select with the following FDs");
  825. esp_vfs_log_fd_set("readfds", readfds);
  826. esp_vfs_log_fd_set("writefds", writefds);
  827. esp_vfs_log_fd_set("errorfds", errorfds);
  828. ret = socket_select(nfds, readfds, writefds, errorfds, timeout);
  829. ESP_LOGD(TAG, "socket_select returned %d and the FDs are the following", ret);
  830. esp_vfs_log_fd_set("readfds", readfds);
  831. esp_vfs_log_fd_set("writefds", writefds);
  832. esp_vfs_log_fd_set("errorfds", errorfds);
  833. } else {
  834. if (readfds) {
  835. FD_ZERO(readfds);
  836. }
  837. if (writefds) {
  838. FD_ZERO(writefds);
  839. }
  840. if (errorfds) {
  841. FD_ZERO(errorfds);
  842. }
  843. TickType_t ticks_to_wait = portMAX_DELAY;
  844. if (timeout) {
  845. uint32_t timeout_ms = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
  846. ticks_to_wait = timeout_ms / portTICK_PERIOD_MS;
  847. ESP_LOGD(TAG, "timeout is %dms", timeout_ms);
  848. }
  849. ESP_LOGD(TAG, "waiting without calling socket_select");
  850. xSemaphoreTake(select_sem, ticks_to_wait);
  851. }
  852. call_end_selects(s_vfs_count, vfs_fds_triple); // for VFSs for start_select was called before
  853. if (ret >= 0) {
  854. ret += set_global_fd_sets(vfs_fds_triple, s_vfs_count, readfds, writefds, errorfds);
  855. }
  856. if (select_sem) {
  857. vSemaphoreDelete(select_sem);
  858. select_sem = NULL;
  859. }
  860. free(vfs_fds_triple);
  861. ESP_LOGD(TAG, "esp_vfs_select returns %d", ret);
  862. esp_vfs_log_fd_set("readfds", readfds);
  863. esp_vfs_log_fd_set("writefds", writefds);
  864. esp_vfs_log_fd_set("errorfds", errorfds);
  865. return ret;
  866. }
  867. void esp_vfs_select_triggered(SemaphoreHandle_t *signal_sem)
  868. {
  869. if (signal_sem && (*signal_sem)) {
  870. xSemaphoreGive(*signal_sem);
  871. } else {
  872. // Another way would be to go through s_fd_table and find the VFS
  873. // which has a permanent FD. But in order to avoid to lock
  874. // s_fd_table_lock we go through the VFS table.
  875. for (int i = 0; i < s_vfs_count; ++i) {
  876. const vfs_entry_t *vfs = s_vfs[i];
  877. if (vfs != NULL && vfs->vfs.stop_socket_select != NULL) {
  878. vfs->vfs.stop_socket_select();
  879. break;
  880. }
  881. }
  882. }
  883. }
  884. void esp_vfs_select_triggered_isr(SemaphoreHandle_t *signal_sem, BaseType_t *woken)
  885. {
  886. if (signal_sem && (*signal_sem)) {
  887. xSemaphoreGiveFromISR(*signal_sem, woken);
  888. } else {
  889. // Another way would be to go through s_fd_table and find the VFS
  890. // which has a permanent FD. But in order to avoid to lock
  891. // s_fd_table_lock we go through the VFS table.
  892. for (int i = 0; i < s_vfs_count; ++i) {
  893. const vfs_entry_t *vfs = s_vfs[i];
  894. if (vfs != NULL && vfs->vfs.stop_socket_select_isr != NULL) {
  895. vfs->vfs.stop_socket_select_isr(woken);
  896. break;
  897. }
  898. }
  899. }
  900. }
  901. #ifdef CONFIG_SUPPORT_TERMIOS
  902. int tcgetattr(int fd, struct termios *p)
  903. {
  904. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  905. const int local_fd = get_local_fd(vfs, fd);
  906. struct _reent* r = __getreent();
  907. if (vfs == NULL || local_fd < 0) {
  908. __errno_r(r) = EBADF;
  909. return -1;
  910. }
  911. int ret;
  912. CHECK_AND_CALL(ret, r, vfs, tcgetattr, local_fd, p);
  913. return ret;
  914. }
  915. int tcsetattr(int fd, int optional_actions, const struct termios *p)
  916. {
  917. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  918. const int local_fd = get_local_fd(vfs, fd);
  919. struct _reent* r = __getreent();
  920. if (vfs == NULL || local_fd < 0) {
  921. __errno_r(r) = EBADF;
  922. return -1;
  923. }
  924. int ret;
  925. CHECK_AND_CALL(ret, r, vfs, tcsetattr, local_fd, optional_actions, p);
  926. return ret;
  927. }
  928. int tcdrain(int fd)
  929. {
  930. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  931. const int local_fd = get_local_fd(vfs, fd);
  932. struct _reent* r = __getreent();
  933. if (vfs == NULL || local_fd < 0) {
  934. __errno_r(r) = EBADF;
  935. return -1;
  936. }
  937. int ret;
  938. CHECK_AND_CALL(ret, r, vfs, tcdrain, local_fd);
  939. return ret;
  940. }
  941. int tcflush(int fd, int select)
  942. {
  943. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  944. const int local_fd = get_local_fd(vfs, fd);
  945. struct _reent* r = __getreent();
  946. if (vfs == NULL || local_fd < 0) {
  947. __errno_r(r) = EBADF;
  948. return -1;
  949. }
  950. int ret;
  951. CHECK_AND_CALL(ret, r, vfs, tcflush, local_fd, select);
  952. return ret;
  953. }
  954. int tcflow(int fd, int action)
  955. {
  956. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  957. const int local_fd = get_local_fd(vfs, fd);
  958. struct _reent* r = __getreent();
  959. if (vfs == NULL || local_fd < 0) {
  960. __errno_r(r) = EBADF;
  961. return -1;
  962. }
  963. int ret;
  964. CHECK_AND_CALL(ret, r, vfs, tcflow, local_fd, action);
  965. return ret;
  966. }
  967. pid_t tcgetsid(int fd)
  968. {
  969. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  970. const int local_fd = get_local_fd(vfs, fd);
  971. struct _reent* r = __getreent();
  972. if (vfs == NULL || local_fd < 0) {
  973. __errno_r(r) = EBADF;
  974. return -1;
  975. }
  976. int ret;
  977. CHECK_AND_CALL(ret, r, vfs, tcgetsid, local_fd);
  978. return ret;
  979. }
  980. int tcsendbreak(int fd, int duration)
  981. {
  982. const vfs_entry_t* vfs = get_vfs_for_fd(fd);
  983. const int local_fd = get_local_fd(vfs, fd);
  984. struct _reent* r = __getreent();
  985. if (vfs == NULL || local_fd < 0) {
  986. __errno_r(r) = EBADF;
  987. return -1;
  988. }
  989. int ret;
  990. CHECK_AND_CALL(ret, r, vfs, tcsendbreak, local_fd, duration);
  991. return ret;
  992. }
  993. #endif // CONFIG_SUPPORT_TERMIOS
  994. int esp_vfs_utime(const char *path, const struct utimbuf *times)
  995. {
  996. int ret;
  997. const vfs_entry_t* vfs = get_vfs_for_path(path);
  998. struct _reent* r = __getreent();
  999. if (vfs == NULL) {
  1000. __errno_r(r) = ENOENT;
  1001. return -1;
  1002. }
  1003. const char* path_within_vfs = translate_path(vfs, path);
  1004. CHECK_AND_CALL(ret, r, vfs, utime, path_within_vfs, times);
  1005. return ret;
  1006. }