pthread.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. // Copyright 2017 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. //
  14. // This module implements pthread API on top of FreeRTOS. API is implemented to the level allowing
  15. // libstdcxx threading framework to operate correctly. So not all original pthread routines are supported.
  16. // Moreover some implemened functions do not provide full functionality, e.g. pthread_create does not support
  17. // thread's attributes customization (prio, stack size and so on). So if you are not satisfied with default
  18. // behavior use native FreeRTOS API.
  19. //
  20. #include <errno.h>
  21. #include <pthread.h>
  22. #include <string.h>
  23. #include "esp_err.h"
  24. #include "esp_attr.h"
  25. #include "freertos/FreeRTOS.h"
  26. #include "freertos/task.h"
  27. #include "freertos/semphr.h"
  28. #include "freertos/list.h"
  29. #include "pthread_internal.h"
  30. #define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
  31. #include "esp_log.h"
  32. const static char *TAG = "esp_pthread";
  33. /** task state */
  34. enum esp_pthread_task_state {
  35. PTHREAD_TASK_STATE_RUN,
  36. PTHREAD_TASK_STATE_EXIT
  37. };
  38. /** pthread thread FreeRTOS wrapper */
  39. typedef struct {
  40. ListItem_t list_item; ///< Tasks list node struct. FreeRTOS task handle is kept as list_item.xItemValue
  41. TaskHandle_t join_task; ///< Handle of the task waiting to join
  42. enum esp_pthread_task_state state; ///< pthread task state
  43. bool detached; ///< True if pthread is detached
  44. } esp_pthread_t;
  45. /** pthread wrapper task arg */
  46. typedef struct {
  47. void *(*func)(void *); ///< user task entry
  48. void *arg; ///< user task argument
  49. } esp_pthread_task_arg_t;
  50. /** pthread mutex FreeRTOS wrapper */
  51. typedef struct {
  52. ListItem_t list_item; ///< mutexes list node struct
  53. SemaphoreHandle_t sem; ///< Handle of the task waiting to join
  54. int type; ///< Mutex type. Currently supported PTHREAD_MUTEX_NORMAL and PTHREAD_MUTEX_RECURSIVE
  55. } esp_pthread_mutex_t;
  56. static SemaphoreHandle_t s_once_mux = NULL;
  57. static SemaphoreHandle_t s_threads_mux = NULL;
  58. static portMUX_TYPE s_mutex_init_lock = portMUX_INITIALIZER_UNLOCKED;
  59. static List_t s_threads_list;
  60. static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo);
  61. esp_err_t esp_pthread_init(void)
  62. {
  63. vListInitialise((List_t *)&s_threads_list);
  64. s_once_mux = xSemaphoreCreateRecursiveMutex();
  65. if (s_once_mux == NULL) {
  66. return ESP_ERR_NO_MEM;
  67. }
  68. s_threads_mux = xSemaphoreCreateMutex();
  69. if (s_threads_mux == NULL) {
  70. vSemaphoreDelete(s_once_mux);
  71. return ESP_ERR_NO_MEM;
  72. }
  73. return ESP_OK;
  74. }
  75. static void *pthread_find_list_item(void *(*item_check)(ListItem_t *, void *arg), void *check_arg)
  76. {
  77. ListItem_t const *list_end = listGET_END_MARKER(&s_threads_list);
  78. ListItem_t *list_item = listGET_HEAD_ENTRY(&s_threads_list);
  79. while (list_item != list_end) {
  80. void *val = item_check(list_item, check_arg);
  81. if (val) {
  82. return val;
  83. }
  84. list_item = listGET_NEXT(list_item);
  85. }
  86. return NULL;
  87. }
  88. static void *pthread_get_handle_by_desc(ListItem_t *item, void *arg)
  89. {
  90. esp_pthread_t *pthread = listGET_LIST_ITEM_OWNER(item);
  91. if (pthread == arg) {
  92. return (void *)listGET_LIST_ITEM_VALUE(item);
  93. }
  94. return NULL;
  95. }
  96. static inline TaskHandle_t pthread_find_handle(pthread_t thread)
  97. {
  98. return pthread_find_list_item(pthread_get_handle_by_desc, (void *)thread);
  99. }
  100. static void *pthread_get_desc_by_handle(ListItem_t *item, void *arg)
  101. {
  102. TaskHandle_t task_handle = arg;
  103. TaskHandle_t cur_handle = (TaskHandle_t)listGET_LIST_ITEM_VALUE(item);
  104. if (task_handle == cur_handle) {
  105. return (esp_pthread_t *)listGET_LIST_ITEM_OWNER(item);
  106. }
  107. return NULL;
  108. }
  109. static esp_pthread_t *pthread_find(TaskHandle_t task_handle)
  110. {
  111. return pthread_find_list_item(pthread_get_desc_by_handle, task_handle);
  112. }
  113. static void pthread_delete(esp_pthread_t *pthread)
  114. {
  115. uxListRemove(&pthread->list_item);
  116. free(pthread);
  117. }
  118. static void pthread_task_func(void *arg)
  119. {
  120. esp_pthread_task_arg_t *task_arg = (esp_pthread_task_arg_t *)arg;
  121. ESP_LOGV(TAG, "%s ENTER %p", __FUNCTION__, task_arg->func);
  122. // wait for start
  123. xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
  124. ESP_LOGV(TAG, "%s START %p", __FUNCTION__, task_arg->func);
  125. task_arg->func(task_arg->arg);
  126. ESP_LOGV(TAG, "%s END %p", __FUNCTION__, task_arg->func);
  127. free(task_arg);
  128. /* preemptively clean up thread local storage, rather than
  129. waiting for the idle task to clean up the thread */
  130. pthread_internal_local_storage_destructor_callback();
  131. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  132. assert(false && "Failed to lock threads list!");
  133. }
  134. esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
  135. if (!pthread) {
  136. assert(false && "Failed to find pthread for current task!");
  137. }
  138. if (pthread->detached) {
  139. // auto-free for detached threads
  140. pthread_delete(pthread);
  141. } else {
  142. // Remove from list, it indicates that task has exited
  143. if (pthread->join_task) {
  144. // notify join
  145. xTaskNotify(pthread->join_task, 0, eNoAction);
  146. } else {
  147. pthread->state = PTHREAD_TASK_STATE_EXIT;
  148. }
  149. }
  150. xSemaphoreGive(s_threads_mux);
  151. vTaskDelete(NULL);
  152. ESP_LOGV(TAG, "%s EXIT", __FUNCTION__);
  153. }
  154. int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
  155. void *(*start_routine) (void *), void *arg)
  156. {
  157. TaskHandle_t xHandle = NULL;
  158. ESP_LOGV(TAG, "%s", __FUNCTION__);
  159. if (attr) {
  160. ESP_LOGE(TAG, "%s: attrs not supported!", __FUNCTION__);
  161. return ENOSYS;
  162. }
  163. esp_pthread_task_arg_t *task_arg = malloc(sizeof(esp_pthread_task_arg_t));
  164. if (task_arg == NULL) {
  165. ESP_LOGE(TAG, "Failed to allocate task args!");
  166. return ENOMEM;
  167. }
  168. memset(task_arg, 0, sizeof(esp_pthread_task_arg_t));
  169. esp_pthread_t *pthread = malloc(sizeof(esp_pthread_t));
  170. if (pthread == NULL) {
  171. ESP_LOGE(TAG, "Failed to allocate pthread data!");
  172. free(task_arg);
  173. return ENOMEM;
  174. }
  175. memset(pthread, 0, sizeof(esp_pthread_t));
  176. task_arg->func = start_routine;
  177. task_arg->arg = arg;
  178. BaseType_t res = xTaskCreate(&pthread_task_func, "pthread", CONFIG_ESP32_PTHREAD_TASK_STACK_SIZE_DEFAULT,
  179. task_arg, CONFIG_ESP32_PTHREAD_TASK_PRIO_DEFAULT, &xHandle);
  180. if(res != pdPASS) {
  181. ESP_LOGE(TAG, "Failed to create task!");
  182. free(pthread);
  183. free(task_arg);
  184. if (res == errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY) {
  185. return ENOMEM;
  186. } else {
  187. return EAGAIN;
  188. }
  189. }
  190. vListInitialiseItem((ListItem_t *)&pthread->list_item);
  191. listSET_LIST_ITEM_OWNER((ListItem_t *)&pthread->list_item, pthread);
  192. listSET_LIST_ITEM_VALUE((ListItem_t *)&pthread->list_item, (TickType_t)xHandle);
  193. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  194. assert(false && "Failed to lock threads list!");
  195. }
  196. vListInsertEnd((List_t *)&s_threads_list, (ListItem_t *)&pthread->list_item);
  197. xSemaphoreGive(s_threads_mux);
  198. // start task
  199. xTaskNotify(xHandle, 0, eNoAction);
  200. *thread = (pthread_t)pthread; // pointer value fit into pthread_t (uint32_t)
  201. ESP_LOGV(TAG, "Created task %x", (uint32_t)xHandle);
  202. return 0;
  203. }
  204. int pthread_join(pthread_t thread, void **retval)
  205. {
  206. esp_pthread_t *pthread = (esp_pthread_t *)thread;
  207. int ret = 0;
  208. bool wait = false;
  209. ESP_LOGV(TAG, "%s %p", __FUNCTION__, pthread);
  210. // find task
  211. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  212. assert(false && "Failed to lock threads list!");
  213. }
  214. TaskHandle_t handle = pthread_find_handle(thread);
  215. if (!handle) {
  216. // not found
  217. ret = ESRCH;
  218. } else if (pthread->join_task) {
  219. // already have waiting task to join
  220. ret = EINVAL;
  221. } else if (handle == xTaskGetCurrentTaskHandle()) {
  222. // join to self not allowed
  223. ret = EDEADLK;
  224. } else {
  225. esp_pthread_t *cur_pthread = pthread_find(xTaskGetCurrentTaskHandle());
  226. if (cur_pthread && cur_pthread->join_task == handle) {
  227. // join to each other not allowed
  228. ret = EDEADLK;
  229. } else {
  230. if (pthread->state == PTHREAD_TASK_STATE_RUN) {
  231. pthread->join_task = xTaskGetCurrentTaskHandle();
  232. wait = true;
  233. } else {
  234. pthread_delete(pthread);
  235. }
  236. }
  237. }
  238. xSemaphoreGive(s_threads_mux);
  239. if (ret == 0 && wait) {
  240. xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
  241. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  242. assert(false && "Failed to lock threads list!");
  243. }
  244. pthread_delete(pthread);
  245. xSemaphoreGive(s_threads_mux);
  246. }
  247. if (retval) {
  248. *retval = 0; // no exit code in FreeRTOS
  249. }
  250. ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
  251. return ret;
  252. }
  253. int pthread_detach(pthread_t thread)
  254. {
  255. esp_pthread_t *pthread = (esp_pthread_t *)thread;
  256. int ret = 0;
  257. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  258. assert(false && "Failed to lock threads list!");
  259. }
  260. TaskHandle_t handle = pthread_find_handle(thread);
  261. if (!handle) {
  262. ret = ESRCH;
  263. } else {
  264. pthread->detached = true;
  265. }
  266. xSemaphoreGive(s_threads_mux);
  267. ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
  268. return ret;
  269. }
  270. int pthread_cancel(pthread_t thread)
  271. {
  272. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  273. return ENOSYS;
  274. }
  275. int sched_yield( void )
  276. {
  277. vTaskDelay(0);
  278. return 0;
  279. }
  280. pthread_t pthread_self(void)
  281. {
  282. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  283. assert(false && "Failed to lock threads list!");
  284. }
  285. esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
  286. if (!pthread) {
  287. assert(false && "Failed to find current thread ID!");
  288. }
  289. xSemaphoreGive(s_threads_mux);
  290. return (pthread_t)pthread;
  291. }
  292. int pthread_equal(pthread_t t1, pthread_t t2)
  293. {
  294. return t1 == t2 ? 1 : 0;
  295. }
  296. /***************** ONCE ******************/
  297. int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
  298. {
  299. if (once_control == NULL || init_routine == NULL || !once_control->is_initialized) {
  300. ESP_LOGE(TAG, "%s: Invalid args!", __FUNCTION__);
  301. return EINVAL;
  302. }
  303. TaskHandle_t cur_task = xTaskGetCurrentTaskHandle();
  304. // do not take mutex if OS is not running yet
  305. if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED ||
  306. // init_routine can call pthread_once for another objects, so use recursive mutex
  307. // FIXME: behaviour is undefined if init_routine calls pthread_once for the same object in the current context
  308. !cur_task || xSemaphoreTakeRecursive(s_once_mux, portMAX_DELAY) == pdTRUE)
  309. {
  310. if (!once_control->init_executed) {
  311. ESP_LOGV(TAG, "%s: call init_routine %p", __FUNCTION__, once_control);
  312. init_routine();
  313. once_control->init_executed = 1;
  314. }
  315. if (cur_task) {
  316. xSemaphoreGiveRecursive(s_once_mux);
  317. }
  318. }
  319. else
  320. {
  321. ESP_LOGE(TAG, "%s: Failed to lock!", __FUNCTION__);
  322. return EBUSY;
  323. }
  324. return 0;
  325. }
  326. /***************** MUTEX ******************/
  327. static int mutexattr_check(const pthread_mutexattr_t *attr)
  328. {
  329. if (attr->type < PTHREAD_MUTEX_NORMAL || attr->type > PTHREAD_MUTEX_RECURSIVE) {
  330. return EINVAL;
  331. }
  332. return 0;
  333. }
  334. int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
  335. {
  336. int type = PTHREAD_MUTEX_NORMAL;
  337. if (!mutex) {
  338. return EINVAL;
  339. }
  340. if (attr) {
  341. if (!attr->is_initialized) {
  342. return EINVAL;
  343. }
  344. int res = mutexattr_check(attr);
  345. if (res) {
  346. return res;
  347. }
  348. type = attr->type;
  349. }
  350. esp_pthread_mutex_t *mux = (esp_pthread_mutex_t *)malloc(sizeof(esp_pthread_mutex_t));
  351. if (!mux) {
  352. return ENOMEM;
  353. }
  354. mux->type = type;
  355. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  356. mux->sem = xSemaphoreCreateRecursiveMutex();
  357. } else {
  358. mux->sem = xSemaphoreCreateMutex();
  359. }
  360. if (!mux->sem) {
  361. free(mux);
  362. return EAGAIN;
  363. }
  364. *mutex = (pthread_mutex_t)mux; // pointer value fit into pthread_mutex_t (uint32_t)
  365. return 0;
  366. }
  367. int pthread_mutex_destroy(pthread_mutex_t *mutex)
  368. {
  369. esp_pthread_mutex_t *mux;
  370. ESP_LOGV(TAG, "%s %p", __FUNCTION__, mutex);
  371. if (!mutex) {
  372. return EINVAL;
  373. }
  374. mux = (esp_pthread_mutex_t *)*mutex;
  375. // check if mux is busy
  376. int res = pthread_mutex_lock_internal(mux, 0);
  377. if (res == EBUSY) {
  378. return EBUSY;
  379. }
  380. vSemaphoreDelete(mux->sem);
  381. free(mux);
  382. return 0;
  383. }
  384. static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo)
  385. {
  386. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  387. if (xSemaphoreTakeRecursive(mux->sem, tmo) != pdTRUE) {
  388. return EBUSY;
  389. }
  390. } else {
  391. if (xSemaphoreTake(mux->sem, tmo) != pdTRUE) {
  392. return EBUSY;
  393. }
  394. }
  395. return 0;
  396. }
  397. static int pthread_mutex_init_if_static(pthread_mutex_t *mutex) {
  398. int res = 0;
  399. if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
  400. portENTER_CRITICAL(&s_mutex_init_lock);
  401. if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
  402. res = pthread_mutex_init(mutex, NULL);
  403. }
  404. portEXIT_CRITICAL(&s_mutex_init_lock);
  405. }
  406. return res;
  407. }
  408. int IRAM_ATTR pthread_mutex_lock(pthread_mutex_t *mutex)
  409. {
  410. if (!mutex) {
  411. return EINVAL;
  412. }
  413. int res = pthread_mutex_init_if_static(mutex);
  414. if (res != 0) {
  415. return res;
  416. }
  417. return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, portMAX_DELAY);
  418. }
  419. int IRAM_ATTR pthread_mutex_trylock(pthread_mutex_t *mutex)
  420. {
  421. if (!mutex) {
  422. return EINVAL;
  423. }
  424. int res = pthread_mutex_init_if_static(mutex);
  425. if (res != 0) {
  426. return res;
  427. }
  428. return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, 0);
  429. }
  430. int IRAM_ATTR pthread_mutex_unlock(pthread_mutex_t *mutex)
  431. {
  432. esp_pthread_mutex_t *mux;
  433. if (!mutex) {
  434. return EINVAL;
  435. }
  436. mux = (esp_pthread_mutex_t *)*mutex;
  437. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  438. xSemaphoreGiveRecursive(mux->sem);
  439. } else {
  440. xSemaphoreGive(mux->sem);
  441. }
  442. return 0;
  443. }
  444. int pthread_mutexattr_init(pthread_mutexattr_t *attr)
  445. {
  446. if (!attr) {
  447. return EINVAL;
  448. }
  449. attr->type = PTHREAD_MUTEX_NORMAL;
  450. attr->is_initialized = 1;
  451. return 0;
  452. }
  453. int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
  454. {
  455. if (!attr) {
  456. return EINVAL;
  457. }
  458. attr->is_initialized = 0;
  459. return 0;
  460. }
  461. int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
  462. {
  463. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  464. return ENOSYS;
  465. }
  466. int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
  467. {
  468. if (!attr) {
  469. return EINVAL;
  470. }
  471. pthread_mutexattr_t tmp_attr = {.type = type};
  472. int res = mutexattr_check(&tmp_attr);
  473. if (!res) {
  474. attr->type = type;
  475. }
  476. return res;
  477. }