pthread.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. // Copyright 2017 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. //
  14. // This module implements pthread API on top of FreeRTOS. API is implemented to the level allowing
  15. // libstdcxx threading framework to operate correctly. So not all original pthread routines are supported.
  16. // Moreover some implemened functions do not provide full functionality, e.g. pthread_create does not support
  17. // thread's attributes customization (prio, stack size and so on). So if you are not satisfied with default
  18. // behavior use native FreeRTOS API.
  19. //
  20. #include <errno.h>
  21. #include <pthread.h>
  22. #include <string.h>
  23. #include "esp_err.h"
  24. #include "esp_attr.h"
  25. #include "freertos/FreeRTOS.h"
  26. #include "freertos/task.h"
  27. #include "freertos/semphr.h"
  28. #include "freertos/list.h"
  29. #define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
  30. #include "esp_log.h"
  31. const static char *TAG = "esp_pthread";
  32. /** task state */
  33. enum esp_pthread_task_state {
  34. PTHREAD_TASK_STATE_RUN,
  35. PTHREAD_TASK_STATE_EXIT
  36. };
  37. /** pthread thread FreeRTOS wrapper */
  38. typedef struct {
  39. ListItem_t list_item; ///< Tasks list node struct. FreeRTOS task handle is kept as list_item.xItemValue
  40. TaskHandle_t join_task; ///< Handle of the task waiting to join
  41. enum esp_pthread_task_state state; ///< pthread task state
  42. bool detached; ///< True if pthread is detached
  43. } esp_pthread_t;
  44. /** pthread wrapper task arg */
  45. typedef struct {
  46. void *(*func)(void *); ///< user task entry
  47. void *arg; ///< user task argument
  48. } esp_pthread_task_arg_t;
  49. /** pthread mutex FreeRTOS wrapper */
  50. typedef struct {
  51. ListItem_t list_item; ///< mutexes list node struct
  52. SemaphoreHandle_t sem; ///< Handle of the task waiting to join
  53. int type; ///< Mutex type. Currently supported PTHREAD_MUTEX_NORMAL and PTHREAD_MUTEX_RECURSIVE
  54. } esp_pthread_mutex_t;
  55. static SemaphoreHandle_t s_once_mux = NULL;
  56. static SemaphoreHandle_t s_threads_mux = NULL;
  57. static portMUX_TYPE s_mutex_init_lock = portMUX_INITIALIZER_UNLOCKED;
  58. static List_t s_threads_list;
  59. static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo);
  60. esp_err_t esp_pthread_init(void)
  61. {
  62. vListInitialise((List_t *)&s_threads_list);
  63. s_once_mux = xSemaphoreCreateRecursiveMutex();
  64. if (s_once_mux == NULL) {
  65. return ESP_ERR_NO_MEM;
  66. }
  67. s_threads_mux = xSemaphoreCreateMutex();
  68. if (s_threads_mux == NULL) {
  69. vSemaphoreDelete(s_once_mux);
  70. return ESP_ERR_NO_MEM;
  71. }
  72. return ESP_OK;
  73. }
  74. static void *pthread_find_list_item(void *(*item_check)(ListItem_t *, void *arg), void *check_arg)
  75. {
  76. ListItem_t const *list_end = listGET_END_MARKER(&s_threads_list);
  77. ListItem_t *list_item = listGET_HEAD_ENTRY(&s_threads_list);
  78. while (list_item != list_end) {
  79. void *val = item_check(list_item, check_arg);
  80. if (val) {
  81. return val;
  82. }
  83. list_item = listGET_NEXT(list_item);
  84. }
  85. return NULL;
  86. }
  87. static void *pthread_get_handle_by_desc(ListItem_t *item, void *arg)
  88. {
  89. esp_pthread_t *pthread = listGET_LIST_ITEM_OWNER(item);
  90. if (pthread == arg) {
  91. return (void *)listGET_LIST_ITEM_VALUE(item);
  92. }
  93. return NULL;
  94. }
  95. static inline TaskHandle_t pthread_find_handle(pthread_t thread)
  96. {
  97. return pthread_find_list_item(pthread_get_handle_by_desc, (void *)thread);
  98. }
  99. static void *pthread_get_desc_by_handle(ListItem_t *item, void *arg)
  100. {
  101. TaskHandle_t task_handle = arg;
  102. TaskHandle_t cur_handle = (TaskHandle_t)listGET_LIST_ITEM_VALUE(item);
  103. if (task_handle == cur_handle) {
  104. return (esp_pthread_t *)listGET_LIST_ITEM_OWNER(item);
  105. }
  106. return NULL;
  107. }
  108. static esp_pthread_t *pthread_find(TaskHandle_t task_handle)
  109. {
  110. return pthread_find_list_item(pthread_get_desc_by_handle, task_handle);
  111. }
  112. static void pthread_delete(esp_pthread_t *pthread)
  113. {
  114. uxListRemove(&pthread->list_item);
  115. free(pthread);
  116. }
  117. static void pthread_task_func(void *arg)
  118. {
  119. esp_pthread_task_arg_t *task_arg = (esp_pthread_task_arg_t *)arg;
  120. ESP_LOGV(TAG, "%s ENTER %p", __FUNCTION__, task_arg->func);
  121. // wait for start
  122. xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
  123. ESP_LOGV(TAG, "%s START %p", __FUNCTION__, task_arg->func);
  124. task_arg->func(task_arg->arg);
  125. ESP_LOGV(TAG, "%s END %p", __FUNCTION__, task_arg->func);
  126. free(task_arg);
  127. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  128. assert(false && "Failed to lock threads list!");
  129. }
  130. esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
  131. if (!pthread) {
  132. assert(false && "Failed to find pthread for current task!");
  133. }
  134. if (pthread->detached) {
  135. // auto-free for detached threads
  136. pthread_delete(pthread);
  137. } else {
  138. // Remove from list, it indicates that task has exited
  139. if (pthread->join_task) {
  140. // notify join
  141. xTaskNotify(pthread->join_task, 0, eNoAction);
  142. } else {
  143. pthread->state = PTHREAD_TASK_STATE_EXIT;
  144. }
  145. }
  146. xSemaphoreGive(s_threads_mux);
  147. vTaskDelete(NULL);
  148. ESP_LOGV(TAG, "%s EXIT", __FUNCTION__);
  149. }
  150. int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
  151. void *(*start_routine) (void *), void *arg)
  152. {
  153. TaskHandle_t xHandle = NULL;
  154. ESP_LOGV(TAG, "%s", __FUNCTION__);
  155. if (attr) {
  156. ESP_LOGE(TAG, "%s: attrs not supported!", __FUNCTION__);
  157. return ENOSYS;
  158. }
  159. esp_pthread_task_arg_t *task_arg = malloc(sizeof(esp_pthread_task_arg_t));
  160. if (task_arg == NULL) {
  161. ESP_LOGE(TAG, "Failed to allocate task args!");
  162. return ENOMEM;
  163. }
  164. memset(task_arg, 0, sizeof(esp_pthread_task_arg_t));
  165. esp_pthread_t *pthread = malloc(sizeof(esp_pthread_t));
  166. if (pthread == NULL) {
  167. ESP_LOGE(TAG, "Failed to allocate pthread data!");
  168. free(task_arg);
  169. return ENOMEM;
  170. }
  171. memset(pthread, 0, sizeof(esp_pthread_t));
  172. task_arg->func = start_routine;
  173. task_arg->arg = arg;
  174. BaseType_t res = xTaskCreate(&pthread_task_func, "pthread", CONFIG_ESP32_PTHREAD_TASK_STACK_SIZE_DEFAULT,
  175. task_arg, CONFIG_ESP32_PTHREAD_TASK_PRIO_DEFAULT, &xHandle);
  176. if(res != pdPASS) {
  177. ESP_LOGE(TAG, "Failed to create task!");
  178. free(pthread);
  179. free(task_arg);
  180. if (res == errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY) {
  181. return ENOMEM;
  182. } else {
  183. return EAGAIN;
  184. }
  185. }
  186. vListInitialiseItem((ListItem_t *)&pthread->list_item);
  187. listSET_LIST_ITEM_OWNER((ListItem_t *)&pthread->list_item, pthread);
  188. listSET_LIST_ITEM_VALUE((ListItem_t *)&pthread->list_item, (TickType_t)xHandle);
  189. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  190. assert(false && "Failed to lock threads list!");
  191. }
  192. vListInsertEnd((List_t *)&s_threads_list, (ListItem_t *)&pthread->list_item);
  193. xSemaphoreGive(s_threads_mux);
  194. // start task
  195. xTaskNotify(xHandle, 0, eNoAction);
  196. *thread = (pthread_t)pthread; // pointer value fit into pthread_t (uint32_t)
  197. ESP_LOGV(TAG, "Created task %x", (uint32_t)xHandle);
  198. return 0;
  199. }
  200. int pthread_join(pthread_t thread, void **retval)
  201. {
  202. esp_pthread_t *pthread = (esp_pthread_t *)thread;
  203. int ret = 0;
  204. bool wait = false;
  205. ESP_LOGV(TAG, "%s %p", __FUNCTION__, pthread);
  206. // find task
  207. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  208. assert(false && "Failed to lock threads list!");
  209. }
  210. TaskHandle_t handle = pthread_find_handle(thread);
  211. if (!handle) {
  212. // not found
  213. ret = ESRCH;
  214. } else if (pthread->join_task) {
  215. // already have waiting task to join
  216. ret = EINVAL;
  217. } else if (handle == xTaskGetCurrentTaskHandle()) {
  218. // join to self not allowed
  219. ret = EDEADLK;
  220. } else {
  221. esp_pthread_t *cur_pthread = pthread_find(xTaskGetCurrentTaskHandle());
  222. if (cur_pthread && cur_pthread->join_task == handle) {
  223. // join to each other not allowed
  224. ret = EDEADLK;
  225. } else {
  226. if (pthread->state == PTHREAD_TASK_STATE_RUN) {
  227. pthread->join_task = xTaskGetCurrentTaskHandle();
  228. wait = true;
  229. } else {
  230. pthread_delete(pthread);
  231. }
  232. }
  233. }
  234. xSemaphoreGive(s_threads_mux);
  235. if (ret == 0 && wait) {
  236. xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
  237. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  238. assert(false && "Failed to lock threads list!");
  239. }
  240. pthread_delete(pthread);
  241. xSemaphoreGive(s_threads_mux);
  242. }
  243. if (retval) {
  244. *retval = 0; // no exit code in FreeRTOS
  245. }
  246. ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
  247. return ret;
  248. }
  249. int pthread_detach(pthread_t thread)
  250. {
  251. esp_pthread_t *pthread = (esp_pthread_t *)thread;
  252. int ret = 0;
  253. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  254. assert(false && "Failed to lock threads list!");
  255. }
  256. TaskHandle_t handle = pthread_find_handle(thread);
  257. if (!handle) {
  258. ret = ESRCH;
  259. } else {
  260. pthread->detached = true;
  261. }
  262. xSemaphoreGive(s_threads_mux);
  263. ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
  264. return ret;
  265. }
  266. int pthread_cancel(pthread_t thread)
  267. {
  268. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  269. return ENOSYS;
  270. }
  271. int sched_yield( void )
  272. {
  273. vTaskDelay(0);
  274. return 0;
  275. }
  276. pthread_t pthread_self(void)
  277. {
  278. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  279. assert(false && "Failed to lock threads list!");
  280. }
  281. esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
  282. if (!pthread) {
  283. assert(false && "Failed to find current thread ID!");
  284. }
  285. xSemaphoreGive(s_threads_mux);
  286. return (pthread_t)pthread;
  287. }
  288. int pthread_equal(pthread_t t1, pthread_t t2)
  289. {
  290. return t1 == t2 ? 1 : 0;
  291. }
  292. /***************** KEY ******************/
  293. int pthread_key_create(pthread_key_t *key, void (*destructor)(void*))
  294. {
  295. static int s_created;
  296. //TODO: Key destructors not suppoted!
  297. if (s_created) {
  298. // key API supports just one key necessary by libstdcxx threading implementation
  299. ESP_LOGE(TAG, "%s: multiple keys not supported!", __FUNCTION__);
  300. return ENOSYS;
  301. }
  302. *key = 1;
  303. s_created = 1;
  304. return 0;
  305. }
  306. int pthread_key_delete(pthread_key_t key)
  307. {
  308. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  309. return ENOSYS;
  310. }
  311. void *pthread_getspecific(pthread_key_t key)
  312. {
  313. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  314. return NULL;
  315. }
  316. int pthread_setspecific(pthread_key_t key, const void *value)
  317. {
  318. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  319. return ENOSYS;
  320. }
  321. /***************** ONCE ******************/
  322. int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
  323. {
  324. if (once_control == NULL || init_routine == NULL || !once_control->is_initialized) {
  325. ESP_LOGE(TAG, "%s: Invalid args!", __FUNCTION__);
  326. return EINVAL;
  327. }
  328. TaskHandle_t cur_task = xTaskGetCurrentTaskHandle();
  329. // do not take mutex if OS is not running yet
  330. if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED ||
  331. // init_routine can call pthread_once for another objects, so use recursive mutex
  332. // FIXME: behaviour is undefined if init_routine calls pthread_once for the same object in the current context
  333. !cur_task || xSemaphoreTakeRecursive(s_once_mux, portMAX_DELAY) == pdTRUE)
  334. {
  335. if (!once_control->init_executed) {
  336. ESP_LOGV(TAG, "%s: call init_routine %p", __FUNCTION__, once_control);
  337. init_routine();
  338. once_control->init_executed = 1;
  339. }
  340. if (cur_task) {
  341. xSemaphoreGiveRecursive(s_once_mux);
  342. }
  343. }
  344. else
  345. {
  346. ESP_LOGE(TAG, "%s: Failed to lock!", __FUNCTION__);
  347. return EBUSY;
  348. }
  349. return 0;
  350. }
  351. /***************** MUTEX ******************/
  352. static int mutexattr_check(const pthread_mutexattr_t *attr)
  353. {
  354. if (attr->type < PTHREAD_MUTEX_NORMAL || attr->type > PTHREAD_MUTEX_RECURSIVE) {
  355. return EINVAL;
  356. }
  357. return 0;
  358. }
  359. int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
  360. {
  361. int type = PTHREAD_MUTEX_NORMAL;
  362. if (!mutex) {
  363. return EINVAL;
  364. }
  365. if (attr) {
  366. if (!attr->is_initialized) {
  367. return EINVAL;
  368. }
  369. int res = mutexattr_check(attr);
  370. if (res) {
  371. return res;
  372. }
  373. type = attr->type;
  374. }
  375. esp_pthread_mutex_t *mux = (esp_pthread_mutex_t *)malloc(sizeof(esp_pthread_mutex_t));
  376. if (!mux) {
  377. return ENOMEM;
  378. }
  379. mux->type = type;
  380. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  381. mux->sem = xSemaphoreCreateRecursiveMutex();
  382. } else {
  383. mux->sem = xSemaphoreCreateMutex();
  384. }
  385. if (!mux->sem) {
  386. free(mux);
  387. return EAGAIN;
  388. }
  389. *mutex = (pthread_mutex_t)mux; // pointer value fit into pthread_mutex_t (uint32_t)
  390. return 0;
  391. }
  392. int pthread_mutex_destroy(pthread_mutex_t *mutex)
  393. {
  394. esp_pthread_mutex_t *mux;
  395. ESP_LOGV(TAG, "%s %p", __FUNCTION__, mutex);
  396. if (!mutex) {
  397. return EINVAL;
  398. }
  399. mux = (esp_pthread_mutex_t *)*mutex;
  400. // check if mux is busy
  401. int res = pthread_mutex_lock_internal(mux, 0);
  402. if (res == EBUSY) {
  403. return EBUSY;
  404. }
  405. vSemaphoreDelete(mux->sem);
  406. free(mux);
  407. return 0;
  408. }
  409. static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo)
  410. {
  411. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  412. if (xSemaphoreTakeRecursive(mux->sem, tmo) != pdTRUE) {
  413. return EBUSY;
  414. }
  415. } else {
  416. if (xSemaphoreTake(mux->sem, tmo) != pdTRUE) {
  417. return EBUSY;
  418. }
  419. }
  420. return 0;
  421. }
  422. static int pthread_mutex_init_if_static(pthread_mutex_t *mutex) {
  423. int res = 0;
  424. if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
  425. portENTER_CRITICAL(&s_mutex_init_lock);
  426. if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
  427. res = pthread_mutex_init(mutex, NULL);
  428. }
  429. portEXIT_CRITICAL(&s_mutex_init_lock);
  430. }
  431. return res;
  432. }
  433. int IRAM_ATTR pthread_mutex_lock(pthread_mutex_t *mutex)
  434. {
  435. if (!mutex) {
  436. return EINVAL;
  437. }
  438. int res = pthread_mutex_init_if_static(mutex);
  439. if (res != 0) {
  440. return res;
  441. }
  442. return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, portMAX_DELAY);
  443. }
  444. int IRAM_ATTR pthread_mutex_trylock(pthread_mutex_t *mutex)
  445. {
  446. if (!mutex) {
  447. return EINVAL;
  448. }
  449. int res = pthread_mutex_init_if_static(mutex);
  450. if (res != 0) {
  451. return res;
  452. }
  453. return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, 0);
  454. }
  455. int IRAM_ATTR pthread_mutex_unlock(pthread_mutex_t *mutex)
  456. {
  457. esp_pthread_mutex_t *mux;
  458. if (!mutex) {
  459. return EINVAL;
  460. }
  461. mux = (esp_pthread_mutex_t *)*mutex;
  462. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  463. xSemaphoreGiveRecursive(mux->sem);
  464. } else {
  465. xSemaphoreGive(mux->sem);
  466. }
  467. return 0;
  468. }
  469. int pthread_mutexattr_init(pthread_mutexattr_t *attr)
  470. {
  471. if (!attr) {
  472. return EINVAL;
  473. }
  474. attr->type = PTHREAD_MUTEX_NORMAL;
  475. attr->is_initialized = 1;
  476. return 0;
  477. }
  478. int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
  479. {
  480. if (!attr) {
  481. return EINVAL;
  482. }
  483. attr->is_initialized = 0;
  484. return 0;
  485. }
  486. int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
  487. {
  488. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  489. return ENOSYS;
  490. }
  491. int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
  492. {
  493. if (!attr) {
  494. return EINVAL;
  495. }
  496. pthread_mutexattr_t tmp_attr = {.type = type};
  497. int res = mutexattr_check(&tmp_attr);
  498. if (!res) {
  499. attr->type = type;
  500. }
  501. return res;
  502. }