pthread.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. // Copyright 2017 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. //
  14. // This module implements pthread API on top of FreeRTOS. API is implemented to the level allowing
  15. // libstdcxx threading framework to operate correctly. So not all original pthread routines are supported.
  16. // Moreover some implemened functions do not provide full functionality, e.g. pthread_create does not support
  17. // thread's attributes customization (prio, stack size and so on). So if you are not satisfied with default
  18. // behavior use native FreeRTOS API.
  19. //
  20. #include <errno.h>
  21. #include <pthread.h>
  22. #include <string.h>
  23. #include "esp_err.h"
  24. #include "esp_attr.h"
  25. #include "freertos/FreeRTOS.h"
  26. #include "freertos/task.h"
  27. #include "freertos/semphr.h"
  28. #include "freertos/list.h"
  29. #define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
  30. #include "esp_log.h"
  31. const static char *TAG = "esp_pthread";
  32. /** task state */
  33. enum esp_pthread_task_state {
  34. PTHREAD_TASK_STATE_RUN,
  35. PTHREAD_TASK_STATE_EXIT
  36. };
  37. /** pthread thread FreeRTOS wrapper */
  38. typedef struct {
  39. ListItem_t list_item; ///< Tasks list node struct. FreeRTOS task handle is kept as list_item.xItemValue
  40. TaskHandle_t join_task; ///< Handle of the task waiting to join
  41. enum esp_pthread_task_state state; ///< pthread task state
  42. bool detached; ///< True if pthread is detached
  43. } esp_pthread_t;
  44. /** pthread wrapper task arg */
  45. typedef struct {
  46. void *(*func)(void *); ///< user task entry
  47. void *arg; ///< user task argument
  48. } esp_pthread_task_arg_t;
  49. /** pthread mutex FreeRTOS wrapper */
  50. typedef struct {
  51. ListItem_t list_item; ///< mutexes list node struct
  52. SemaphoreHandle_t sem; ///< Handle of the task waiting to join
  53. int type; ///< Mutex type. Currently supported PTHREAD_MUTEX_NORMAL and PTHREAD_MUTEX_RECURSIVE
  54. } esp_pthread_mutex_t;
  55. static SemaphoreHandle_t s_once_mux = NULL;
  56. static SemaphoreHandle_t s_threads_mux = NULL;
  57. static portMUX_TYPE s_mutex_init_lock = portMUX_INITIALIZER_UNLOCKED;
  58. static List_t s_threads_list;
  59. static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo);
  60. esp_err_t esp_pthread_init(void)
  61. {
  62. vListInitialise((List_t *)&s_threads_list);
  63. s_once_mux = xSemaphoreCreateMutex();
  64. if (s_once_mux == NULL) {
  65. return ESP_ERR_NO_MEM;
  66. }
  67. s_threads_mux = xSemaphoreCreateMutex();
  68. if (s_threads_mux == NULL) {
  69. vSemaphoreDelete(s_once_mux);
  70. return ESP_ERR_NO_MEM;
  71. }
  72. return ESP_OK;
  73. }
  74. static void *pthread_find_list_item(void *(*item_check)(ListItem_t *, void *arg), void *check_arg)
  75. {
  76. ListItem_t const *list_end = listGET_END_MARKER(&s_threads_list);
  77. ListItem_t *list_item = listGET_HEAD_ENTRY(&s_threads_list);
  78. while (list_item != list_end) {
  79. void *val = item_check(list_item, check_arg);
  80. if (val) {
  81. return val;
  82. }
  83. list_item = listGET_NEXT(list_item);
  84. }
  85. return NULL;
  86. }
  87. static void *pthread_get_handle_by_desc(ListItem_t *item, void *arg)
  88. {
  89. esp_pthread_t *pthread = listGET_LIST_ITEM_OWNER(item);
  90. if (pthread == arg) {
  91. return (void *)listGET_LIST_ITEM_VALUE(item);
  92. }
  93. return NULL;
  94. }
  95. static inline TaskHandle_t pthread_find_handle(pthread_t thread)
  96. {
  97. return pthread_find_list_item(pthread_get_handle_by_desc, (void *)thread);
  98. }
  99. static void *pthread_get_desc_by_handle(ListItem_t *item, void *arg)
  100. {
  101. TaskHandle_t task_handle = arg;
  102. TaskHandle_t cur_handle = (TaskHandle_t)listGET_LIST_ITEM_VALUE(item);
  103. if (task_handle == cur_handle) {
  104. return (esp_pthread_t *)listGET_LIST_ITEM_OWNER(item);
  105. }
  106. return NULL;
  107. }
  108. static esp_pthread_t *pthread_find(TaskHandle_t task_handle)
  109. {
  110. return pthread_find_list_item(pthread_get_desc_by_handle, task_handle);
  111. }
  112. static void pthread_delete(esp_pthread_t *pthread)
  113. {
  114. uxListRemove(&pthread->list_item);
  115. free(pthread);
  116. }
  117. static void pthread_task_func(void *arg)
  118. {
  119. esp_pthread_task_arg_t *task_arg = (esp_pthread_task_arg_t *)arg;
  120. ESP_LOGV(TAG, "%s ENTER %p", __FUNCTION__, task_arg->func);
  121. // wait for start
  122. xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
  123. ESP_LOGV(TAG, "%s START %p", __FUNCTION__, task_arg->func);
  124. task_arg->func(task_arg->arg);
  125. ESP_LOGV(TAG, "%s END %p", __FUNCTION__, task_arg->func);
  126. free(task_arg);
  127. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  128. assert(false && "Failed to lock threads list!");
  129. }
  130. esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
  131. if (!pthread) {
  132. assert(false && "Failed to find pthread for current task!");
  133. }
  134. if (pthread->detached) {
  135. // auto-free for detached threads
  136. pthread_delete(pthread);
  137. } else {
  138. // Remove from list, it indicates that task has exited
  139. if (pthread->join_task) {
  140. // notify join
  141. xTaskNotify(pthread->join_task, 0, eNoAction);
  142. } else {
  143. pthread->state = PTHREAD_TASK_STATE_EXIT;
  144. }
  145. }
  146. xSemaphoreGive(s_threads_mux);
  147. vTaskDelete(NULL);
  148. ESP_LOGV(TAG, "%s EXIT", __FUNCTION__);
  149. }
  150. int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
  151. void *(*start_routine) (void *), void *arg)
  152. {
  153. TaskHandle_t xHandle = NULL;
  154. ESP_LOGV(TAG, "%s", __FUNCTION__);
  155. if (attr) {
  156. ESP_LOGE(TAG, "%s: attrs not supported!", __FUNCTION__);
  157. return ENOSYS;
  158. }
  159. esp_pthread_task_arg_t *task_arg = malloc(sizeof(esp_pthread_task_arg_t));
  160. if (task_arg == NULL) {
  161. ESP_LOGE(TAG, "Failed to allocate task args!");
  162. return ENOMEM;
  163. }
  164. memset(task_arg, 0, sizeof(esp_pthread_task_arg_t));
  165. esp_pthread_t *pthread = malloc(sizeof(esp_pthread_t));
  166. if (pthread == NULL) {
  167. ESP_LOGE(TAG, "Failed to allocate pthread data!");
  168. free(task_arg);
  169. return ENOMEM;
  170. }
  171. memset(pthread, 0, sizeof(esp_pthread_t));
  172. task_arg->func = start_routine;
  173. task_arg->arg = arg;
  174. BaseType_t res = xTaskCreate(&pthread_task_func, "pthread", CONFIG_ESP32_PTHREAD_TASK_STACK_SIZE_DEFAULT,
  175. task_arg, CONFIG_ESP32_PTHREAD_TASK_PRIO_DEFAULT, &xHandle);
  176. if(res != pdPASS) {
  177. ESP_LOGE(TAG, "Failed to create task!");
  178. free(pthread);
  179. free(task_arg);
  180. if (res == errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY) {
  181. return ENOMEM;
  182. } else {
  183. return EAGAIN;
  184. }
  185. }
  186. vListInitialiseItem((ListItem_t *)&pthread->list_item);
  187. listSET_LIST_ITEM_OWNER((ListItem_t *)&pthread->list_item, pthread);
  188. listSET_LIST_ITEM_VALUE((ListItem_t *)&pthread->list_item, (TickType_t)xHandle);
  189. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  190. assert(false && "Failed to lock threads list!");
  191. }
  192. vListInsertEnd((List_t *)&s_threads_list, (ListItem_t *)&pthread->list_item);
  193. xSemaphoreGive(s_threads_mux);
  194. // start task
  195. xTaskNotify(xHandle, 0, eNoAction);
  196. *thread = (pthread_t)pthread; // pointer value fit into pthread_t (uint32_t)
  197. ESP_LOGV(TAG, "Created task %x", (uint32_t)xHandle);
  198. return 0;
  199. }
  200. int pthread_join(pthread_t thread, void **retval)
  201. {
  202. esp_pthread_t *pthread = (esp_pthread_t *)thread;
  203. int ret = 0;
  204. ESP_LOGV(TAG, "%s %p", __FUNCTION__, pthread);
  205. // find task
  206. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  207. assert(false && "Failed to lock threads list!");
  208. }
  209. TaskHandle_t handle = pthread_find_handle(thread);
  210. if (!handle) {
  211. // not found
  212. ret = ESRCH;
  213. } else if (pthread->join_task) {
  214. // already have waiting task to join
  215. ret = EINVAL;
  216. } else if (handle == xTaskGetCurrentTaskHandle()) {
  217. // join to self not allowed
  218. ret = EDEADLK;
  219. } else {
  220. esp_pthread_t *cur_pthread = pthread_find(xTaskGetCurrentTaskHandle());
  221. if (cur_pthread && cur_pthread->join_task == handle) {
  222. // join to each other not allowed
  223. ret = EDEADLK;
  224. } else {
  225. if (pthread->state == PTHREAD_TASK_STATE_RUN) {
  226. pthread->join_task = xTaskGetCurrentTaskHandle();
  227. } else {
  228. pthread_delete(pthread);
  229. }
  230. }
  231. }
  232. xSemaphoreGive(s_threads_mux);
  233. if (ret == 0 && pthread->join_task) {
  234. xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
  235. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  236. assert(false && "Failed to lock threads list!");
  237. }
  238. pthread_delete(pthread);
  239. xSemaphoreGive(s_threads_mux);
  240. }
  241. if (retval) {
  242. *retval = 0; // no exit code in FreeRTOS
  243. }
  244. ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
  245. return ret;
  246. }
  247. int pthread_detach(pthread_t thread)
  248. {
  249. esp_pthread_t *pthread = (esp_pthread_t *)thread;
  250. int ret = 0;
  251. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  252. assert(false && "Failed to lock threads list!");
  253. }
  254. TaskHandle_t handle = pthread_find_handle(thread);
  255. if (!handle) {
  256. ret = ESRCH;
  257. } else {
  258. pthread->detached = true;
  259. }
  260. xSemaphoreGive(s_threads_mux);
  261. ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
  262. return ret;
  263. }
  264. int pthread_cancel(pthread_t thread)
  265. {
  266. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  267. return ENOSYS;
  268. }
  269. int sched_yield( void )
  270. {
  271. vTaskDelay(0);
  272. return 0;
  273. }
  274. pthread_t pthread_self(void)
  275. {
  276. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  277. assert(false && "Failed to lock threads list!");
  278. }
  279. esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
  280. if (!pthread) {
  281. assert(false && "Failed to find current thread ID!");
  282. }
  283. xSemaphoreGive(s_threads_mux);
  284. return (pthread_t)pthread;
  285. }
  286. int pthread_equal(pthread_t t1, pthread_t t2)
  287. {
  288. return t1 == t2 ? 1 : 0;
  289. }
  290. /***************** KEY ******************/
  291. int pthread_key_create(pthread_key_t *key, void (*destructor)(void*))
  292. {
  293. static int s_created;
  294. //TODO: Key destructors not suppoted!
  295. if (s_created) {
  296. // key API supports just one key necessary by libstdcxx threading implementation
  297. ESP_LOGE(TAG, "%s: multiple keys not supported!", __FUNCTION__);
  298. return ENOSYS;
  299. }
  300. *key = 1;
  301. s_created = 1;
  302. return 0;
  303. }
  304. int pthread_key_delete(pthread_key_t key)
  305. {
  306. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  307. return ENOSYS;
  308. }
  309. void *pthread_getspecific(pthread_key_t key)
  310. {
  311. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  312. return NULL;
  313. }
  314. int pthread_setspecific(pthread_key_t key, const void *value)
  315. {
  316. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  317. return ENOSYS;
  318. }
  319. /***************** ONCE ******************/
  320. int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
  321. {
  322. if (once_control == NULL || init_routine == NULL || !once_control->is_initialized) {
  323. ESP_LOGE(TAG, "%s: Invalid args!", __FUNCTION__);
  324. return EINVAL;
  325. }
  326. TaskHandle_t cur_task = xTaskGetCurrentTaskHandle();
  327. // do not take mutex if OS is not running yet
  328. if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED ||
  329. !cur_task || xSemaphoreTake(s_once_mux, portMAX_DELAY) == pdTRUE)
  330. {
  331. if (!once_control->init_executed) {
  332. ESP_LOGV(TAG, "%s: call init_routine %p", __FUNCTION__, once_control);
  333. init_routine();
  334. once_control->init_executed = 1;
  335. }
  336. if (cur_task) {
  337. xSemaphoreGive(s_once_mux);
  338. }
  339. }
  340. else
  341. {
  342. ESP_LOGE(TAG, "%s: Failed to lock!", __FUNCTION__);
  343. return EBUSY;
  344. }
  345. return 0;
  346. }
  347. /***************** MUTEX ******************/
  348. static int mutexattr_check(const pthread_mutexattr_t *attr)
  349. {
  350. if (attr->type < PTHREAD_MUTEX_NORMAL || attr->type > PTHREAD_MUTEX_RECURSIVE) {
  351. return EINVAL;
  352. }
  353. return 0;
  354. }
  355. int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
  356. {
  357. int type = PTHREAD_MUTEX_NORMAL;
  358. if (!mutex) {
  359. return EINVAL;
  360. }
  361. if (attr) {
  362. if (!attr->is_initialized) {
  363. return EINVAL;
  364. }
  365. int res = mutexattr_check(attr);
  366. if (res) {
  367. return res;
  368. }
  369. type = attr->type;
  370. }
  371. esp_pthread_mutex_t *mux = (esp_pthread_mutex_t *)malloc(sizeof(esp_pthread_mutex_t));
  372. if (!mux) {
  373. return ENOMEM;
  374. }
  375. mux->type = type;
  376. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  377. mux->sem = xSemaphoreCreateRecursiveMutex();
  378. } else {
  379. mux->sem = xSemaphoreCreateMutex();
  380. }
  381. if (!mux->sem) {
  382. free(mux);
  383. return EAGAIN;
  384. }
  385. *mutex = (pthread_mutex_t)mux; // pointer value fit into pthread_mutex_t (uint32_t)
  386. return 0;
  387. }
  388. int pthread_mutex_destroy(pthread_mutex_t *mutex)
  389. {
  390. esp_pthread_mutex_t *mux;
  391. ESP_LOGV(TAG, "%s %p", __FUNCTION__, mutex);
  392. if (!mutex) {
  393. return EINVAL;
  394. }
  395. mux = (esp_pthread_mutex_t *)*mutex;
  396. // check if mux is busy
  397. int res = pthread_mutex_lock_internal(mux, 0);
  398. if (res == EBUSY) {
  399. return EBUSY;
  400. }
  401. vSemaphoreDelete(mux->sem);
  402. free(mux);
  403. return 0;
  404. }
  405. static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo)
  406. {
  407. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  408. if (xSemaphoreTakeRecursive(mux->sem, tmo) != pdTRUE) {
  409. return EBUSY;
  410. }
  411. } else {
  412. if (xSemaphoreTake(mux->sem, tmo) != pdTRUE) {
  413. return EBUSY;
  414. }
  415. }
  416. return 0;
  417. }
  418. static int pthread_mutex_init_if_static(pthread_mutex_t *mutex) {
  419. int res = 0;
  420. if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
  421. portENTER_CRITICAL(&s_mutex_init_lock);
  422. if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
  423. res = pthread_mutex_init(mutex, NULL);
  424. }
  425. portEXIT_CRITICAL(&s_mutex_init_lock);
  426. }
  427. return res;
  428. }
  429. int IRAM_ATTR pthread_mutex_lock(pthread_mutex_t *mutex)
  430. {
  431. if (!mutex) {
  432. return EINVAL;
  433. }
  434. int res = pthread_mutex_init_if_static(mutex);
  435. if (res != 0) {
  436. return res;
  437. }
  438. return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, portMAX_DELAY);
  439. }
  440. int IRAM_ATTR pthread_mutex_trylock(pthread_mutex_t *mutex)
  441. {
  442. if (!mutex) {
  443. return EINVAL;
  444. }
  445. int res = pthread_mutex_init_if_static(mutex);
  446. if (res != 0) {
  447. return res;
  448. }
  449. return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, 0);
  450. }
  451. int IRAM_ATTR pthread_mutex_unlock(pthread_mutex_t *mutex)
  452. {
  453. esp_pthread_mutex_t *mux;
  454. if (!mutex) {
  455. return EINVAL;
  456. }
  457. mux = (esp_pthread_mutex_t *)*mutex;
  458. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  459. xSemaphoreGiveRecursive(mux->sem);
  460. } else {
  461. xSemaphoreGive(mux->sem);
  462. }
  463. return 0;
  464. }
  465. int pthread_mutexattr_init(pthread_mutexattr_t *attr)
  466. {
  467. if (!attr) {
  468. return EINVAL;
  469. }
  470. attr->type = PTHREAD_MUTEX_NORMAL;
  471. attr->is_initialized = 1;
  472. return 0;
  473. }
  474. int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
  475. {
  476. if (!attr) {
  477. return EINVAL;
  478. }
  479. attr->is_initialized = 0;
  480. return 0;
  481. }
  482. int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
  483. {
  484. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  485. return ENOSYS;
  486. }
  487. int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
  488. {
  489. if (!attr) {
  490. return EINVAL;
  491. }
  492. pthread_mutexattr_t tmp_attr = {.type = type};
  493. int res = mutexattr_check(&tmp_attr);
  494. if (!res) {
  495. attr->type = type;
  496. }
  497. return res;
  498. }