pthread.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588
  1. // Copyright 2017 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. //
  14. // This module implements pthread API on top of FreeRTOS. API is implemented to the level allowing
  15. // libstdcxx threading framework to operate correctly. So not all original pthread routines are supported.
  16. // Moreover some implemened functions do not provide full functionality, e.g. pthread_create does not support
  17. // thread's attributes customization (prio, stack size and so on). So if you are not satisfied with default
  18. // behavior use native FreeRTOS API.
  19. //
  20. #include <errno.h>
  21. #include <pthread.h>
  22. #include <string.h>
  23. #include "esp_err.h"
  24. #include "esp_attr.h"
  25. #include "rom/queue.h"
  26. #include "freertos/FreeRTOS.h"
  27. #include "freertos/task.h"
  28. #include "freertos/semphr.h"
  29. #include "pthread_internal.h"
  30. #include "esp_pthread.h"
  31. #define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
  32. #include "esp_log.h"
  33. const static char *TAG = "pthread";
  34. /** task state */
  35. enum esp_pthread_task_state {
  36. PTHREAD_TASK_STATE_RUN,
  37. PTHREAD_TASK_STATE_EXIT
  38. };
  39. /** pthread thread FreeRTOS wrapper */
  40. typedef struct esp_pthread_entry {
  41. SLIST_ENTRY(esp_pthread_entry) list_node; ///< Tasks list node struct.
  42. TaskHandle_t handle; ///< FreeRTOS task handle
  43. TaskHandle_t join_task; ///< Handle of the task waiting to join
  44. enum esp_pthread_task_state state; ///< pthread task state
  45. bool detached; ///< True if pthread is detached
  46. } esp_pthread_t;
  47. /** pthread wrapper task arg */
  48. typedef struct {
  49. void *(*func)(void *); ///< user task entry
  50. void *arg; ///< user task argument
  51. esp_pthread_cfg_t cfg; ///< pthread configuration
  52. } esp_pthread_task_arg_t;
  53. /** pthread mutex FreeRTOS wrapper */
  54. typedef struct {
  55. SemaphoreHandle_t sem; ///< Handle of the task waiting to join
  56. int type; ///< Mutex type. Currently supported PTHREAD_MUTEX_NORMAL and PTHREAD_MUTEX_RECURSIVE
  57. } esp_pthread_mutex_t;
  58. static SemaphoreHandle_t s_threads_mux = NULL;
  59. static portMUX_TYPE s_mutex_init_lock = portMUX_INITIALIZER_UNLOCKED;
  60. static SLIST_HEAD(esp_thread_list_head, esp_pthread_entry) s_threads_list
  61. = SLIST_HEAD_INITIALIZER(s_threads_list);
  62. static pthread_key_t s_pthread_cfg_key;
  63. static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo);
  64. static void esp_pthread_cfg_key_destructor(void *value)
  65. {
  66. free(value);
  67. }
  68. esp_err_t esp_pthread_init(void)
  69. {
  70. if (pthread_key_create(&s_pthread_cfg_key, esp_pthread_cfg_key_destructor) != 0) {
  71. return ESP_ERR_NO_MEM;
  72. }
  73. s_threads_mux = xSemaphoreCreateMutex();
  74. if (s_threads_mux == NULL) {
  75. pthread_key_delete(s_pthread_cfg_key);
  76. return ESP_ERR_NO_MEM;
  77. }
  78. return ESP_OK;
  79. }
  80. static void *pthread_list_find_item(void *(*item_check)(esp_pthread_t *, void *arg), void *check_arg)
  81. {
  82. esp_pthread_t *it;
  83. SLIST_FOREACH(it, &s_threads_list, list_node) {
  84. void *val = item_check(it, check_arg);
  85. if (val) {
  86. return val;
  87. }
  88. }
  89. return NULL;
  90. }
  91. static void *pthread_get_handle_by_desc(esp_pthread_t *item, void *desc)
  92. {
  93. if (item == desc) {
  94. return item->handle;
  95. }
  96. return NULL;
  97. }
  98. static void *pthread_get_desc_by_handle(esp_pthread_t *item, void *hnd)
  99. {
  100. if (hnd == item->handle) {
  101. return item;
  102. }
  103. return NULL;
  104. }
  105. static inline TaskHandle_t pthread_find_handle(pthread_t thread)
  106. {
  107. return pthread_list_find_item(pthread_get_handle_by_desc, (void *)thread);
  108. }
  109. static esp_pthread_t *pthread_find(TaskHandle_t task_handle)
  110. {
  111. return pthread_list_find_item(pthread_get_desc_by_handle, task_handle);
  112. }
  113. static void pthread_delete(esp_pthread_t *pthread)
  114. {
  115. SLIST_REMOVE(&s_threads_list, pthread, esp_pthread_entry, list_node);
  116. free(pthread);
  117. }
  118. /* Call this function to configure pthread stacks in Pthreads */
  119. esp_err_t esp_pthread_set_cfg(const esp_pthread_cfg_t *cfg)
  120. {
  121. /* If a value is already set, update that value */
  122. esp_pthread_cfg_t *p = pthread_getspecific(s_pthread_cfg_key);
  123. if (!p) {
  124. p = malloc(sizeof(esp_pthread_cfg_t));
  125. if (!p) {
  126. return ESP_ERR_NO_MEM;
  127. }
  128. }
  129. *p = *cfg;
  130. pthread_setspecific(s_pthread_cfg_key, p);
  131. return 0;
  132. }
  133. esp_err_t esp_pthread_get_cfg(esp_pthread_cfg_t *p)
  134. {
  135. esp_pthread_cfg_t *cfg = pthread_getspecific(s_pthread_cfg_key);
  136. if (cfg) {
  137. *p = *cfg;
  138. return ESP_OK;
  139. }
  140. memset(p, 0, sizeof(*p));
  141. return ESP_ERR_NOT_FOUND;
  142. }
  143. static void pthread_task_func(void *arg)
  144. {
  145. esp_pthread_task_arg_t *task_arg = (esp_pthread_task_arg_t *)arg;
  146. ESP_LOGV(TAG, "%s ENTER %p", __FUNCTION__, task_arg->func);
  147. // wait for start
  148. xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
  149. if (task_arg->cfg.inherit_cfg) {
  150. /* If inherit option is set, then do a set_cfg() ourselves for future forks */
  151. esp_pthread_set_cfg(&task_arg->cfg);
  152. }
  153. ESP_LOGV(TAG, "%s START %p", __FUNCTION__, task_arg->func);
  154. task_arg->func(task_arg->arg);
  155. ESP_LOGV(TAG, "%s END %p", __FUNCTION__, task_arg->func);
  156. free(task_arg);
  157. /* preemptively clean up thread local storage, rather than
  158. waiting for the idle task to clean up the thread */
  159. pthread_internal_local_storage_destructor_callback();
  160. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  161. assert(false && "Failed to lock threads list!");
  162. }
  163. esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
  164. if (!pthread) {
  165. assert(false && "Failed to find pthread for current task!");
  166. }
  167. if (pthread->detached) {
  168. // auto-free for detached threads
  169. pthread_delete(pthread);
  170. } else {
  171. // Remove from list, it indicates that task has exited
  172. if (pthread->join_task) {
  173. // notify join
  174. xTaskNotify(pthread->join_task, 0, eNoAction);
  175. } else {
  176. pthread->state = PTHREAD_TASK_STATE_EXIT;
  177. }
  178. }
  179. xSemaphoreGive(s_threads_mux);
  180. ESP_LOGD(TAG, "Task stk_wm = %d", uxTaskGetStackHighWaterMark(NULL));
  181. vTaskDelete(NULL);
  182. ESP_LOGV(TAG, "%s EXIT", __FUNCTION__);
  183. }
  184. int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
  185. void *(*start_routine) (void *), void *arg)
  186. {
  187. TaskHandle_t xHandle = NULL;
  188. ESP_LOGV(TAG, "%s", __FUNCTION__);
  189. if (attr) {
  190. ESP_LOGE(TAG, "%s: attrs not supported!", __FUNCTION__);
  191. return ENOSYS;
  192. }
  193. esp_pthread_task_arg_t *task_arg = malloc(sizeof(esp_pthread_task_arg_t));
  194. if (task_arg == NULL) {
  195. ESP_LOGE(TAG, "Failed to allocate task args!");
  196. return ENOMEM;
  197. }
  198. memset(task_arg, 0, sizeof(esp_pthread_task_arg_t));
  199. esp_pthread_t *pthread = malloc(sizeof(esp_pthread_t));
  200. if (pthread == NULL) {
  201. ESP_LOGE(TAG, "Failed to allocate pthread data!");
  202. free(task_arg);
  203. return ENOMEM;
  204. }
  205. uint32_t stack_size = CONFIG_ESP32_PTHREAD_TASK_STACK_SIZE_DEFAULT;
  206. BaseType_t prio = CONFIG_ESP32_PTHREAD_TASK_PRIO_DEFAULT;
  207. esp_pthread_cfg_t *pthread_cfg = pthread_getspecific(s_pthread_cfg_key);
  208. if (pthread_cfg) {
  209. if (pthread_cfg->stack_size) {
  210. stack_size = pthread_cfg->stack_size;
  211. }
  212. if (pthread_cfg->prio && pthread_cfg->prio < configMAX_PRIORITIES) {
  213. prio = pthread_cfg->prio;
  214. }
  215. task_arg->cfg = *pthread_cfg;
  216. }
  217. memset(pthread, 0, sizeof(esp_pthread_t));
  218. task_arg->func = start_routine;
  219. task_arg->arg = arg;
  220. BaseType_t res = xTaskCreate(&pthread_task_func, "pthread", stack_size,
  221. task_arg, prio, &xHandle);
  222. if(res != pdPASS) {
  223. ESP_LOGE(TAG, "Failed to create task!");
  224. free(pthread);
  225. free(task_arg);
  226. if (res == errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY) {
  227. return ENOMEM;
  228. } else {
  229. return EAGAIN;
  230. }
  231. }
  232. pthread->handle = xHandle;
  233. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  234. assert(false && "Failed to lock threads list!");
  235. }
  236. SLIST_INSERT_HEAD(&s_threads_list, pthread, list_node);
  237. xSemaphoreGive(s_threads_mux);
  238. // start task
  239. xTaskNotify(xHandle, 0, eNoAction);
  240. *thread = (pthread_t)pthread; // pointer value fit into pthread_t (uint32_t)
  241. ESP_LOGV(TAG, "Created task %x", (uint32_t)xHandle);
  242. return 0;
  243. }
  244. int pthread_join(pthread_t thread, void **retval)
  245. {
  246. esp_pthread_t *pthread = (esp_pthread_t *)thread;
  247. int ret = 0;
  248. bool wait = false;
  249. ESP_LOGV(TAG, "%s %p", __FUNCTION__, pthread);
  250. // find task
  251. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  252. assert(false && "Failed to lock threads list!");
  253. }
  254. TaskHandle_t handle = pthread_find_handle(thread);
  255. if (!handle) {
  256. // not found
  257. ret = ESRCH;
  258. } else if (pthread->join_task) {
  259. // already have waiting task to join
  260. ret = EINVAL;
  261. } else if (handle == xTaskGetCurrentTaskHandle()) {
  262. // join to self not allowed
  263. ret = EDEADLK;
  264. } else {
  265. esp_pthread_t *cur_pthread = pthread_find(xTaskGetCurrentTaskHandle());
  266. if (cur_pthread && cur_pthread->join_task == handle) {
  267. // join to each other not allowed
  268. ret = EDEADLK;
  269. } else {
  270. if (pthread->state == PTHREAD_TASK_STATE_RUN) {
  271. pthread->join_task = xTaskGetCurrentTaskHandle();
  272. wait = true;
  273. } else {
  274. pthread_delete(pthread);
  275. }
  276. }
  277. }
  278. xSemaphoreGive(s_threads_mux);
  279. if (ret == 0 && wait) {
  280. xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
  281. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  282. assert(false && "Failed to lock threads list!");
  283. }
  284. pthread_delete(pthread);
  285. xSemaphoreGive(s_threads_mux);
  286. }
  287. if (retval) {
  288. *retval = 0; // no exit code in FreeRTOS
  289. }
  290. ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
  291. return ret;
  292. }
  293. int pthread_detach(pthread_t thread)
  294. {
  295. esp_pthread_t *pthread = (esp_pthread_t *)thread;
  296. int ret = 0;
  297. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  298. assert(false && "Failed to lock threads list!");
  299. }
  300. TaskHandle_t handle = pthread_find_handle(thread);
  301. if (!handle) {
  302. ret = ESRCH;
  303. } else {
  304. pthread->detached = true;
  305. }
  306. xSemaphoreGive(s_threads_mux);
  307. ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
  308. return ret;
  309. }
  310. int pthread_cancel(pthread_t thread)
  311. {
  312. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  313. return ENOSYS;
  314. }
  315. int sched_yield( void )
  316. {
  317. vTaskDelay(0);
  318. return 0;
  319. }
  320. pthread_t pthread_self(void)
  321. {
  322. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  323. assert(false && "Failed to lock threads list!");
  324. }
  325. esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
  326. if (!pthread) {
  327. assert(false && "Failed to find current thread ID!");
  328. }
  329. xSemaphoreGive(s_threads_mux);
  330. return (pthread_t)pthread;
  331. }
  332. int pthread_equal(pthread_t t1, pthread_t t2)
  333. {
  334. return t1 == t2 ? 1 : 0;
  335. }
  336. /***************** ONCE ******************/
  337. int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
  338. {
  339. if (once_control == NULL || init_routine == NULL || !once_control->is_initialized) {
  340. ESP_LOGE(TAG, "%s: Invalid args!", __FUNCTION__);
  341. return EINVAL;
  342. }
  343. uint32_t res = 1;
  344. #if defined(CONFIG_SPIRAM_SUPPORT)
  345. if (esp_ptr_external_ram(once_control)) {
  346. uxPortCompareSetExtram((uint32_t *) &once_control->init_executed, 0, &res);
  347. } else {
  348. #endif
  349. uxPortCompareSet((uint32_t *) &once_control->init_executed, 0, &res);
  350. #if defined(CONFIG_SPIRAM_SUPPORT)
  351. }
  352. #endif
  353. // Check if compare and set was successful
  354. if (res == 0) {
  355. ESP_LOGV(TAG, "%s: call init_routine %p", __FUNCTION__, once_control);
  356. init_routine();
  357. }
  358. return 0;
  359. }
  360. /***************** MUTEX ******************/
  361. static int mutexattr_check(const pthread_mutexattr_t *attr)
  362. {
  363. if (attr->type < PTHREAD_MUTEX_NORMAL || attr->type > PTHREAD_MUTEX_RECURSIVE) {
  364. return EINVAL;
  365. }
  366. return 0;
  367. }
  368. int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
  369. {
  370. int type = PTHREAD_MUTEX_NORMAL;
  371. if (!mutex) {
  372. return EINVAL;
  373. }
  374. if (attr) {
  375. if (!attr->is_initialized) {
  376. return EINVAL;
  377. }
  378. int res = mutexattr_check(attr);
  379. if (res) {
  380. return res;
  381. }
  382. type = attr->type;
  383. }
  384. esp_pthread_mutex_t *mux = (esp_pthread_mutex_t *)malloc(sizeof(esp_pthread_mutex_t));
  385. if (!mux) {
  386. return ENOMEM;
  387. }
  388. mux->type = type;
  389. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  390. mux->sem = xSemaphoreCreateRecursiveMutex();
  391. } else {
  392. mux->sem = xSemaphoreCreateMutex();
  393. }
  394. if (!mux->sem) {
  395. free(mux);
  396. return EAGAIN;
  397. }
  398. *mutex = (pthread_mutex_t)mux; // pointer value fit into pthread_mutex_t (uint32_t)
  399. return 0;
  400. }
  401. int pthread_mutex_destroy(pthread_mutex_t *mutex)
  402. {
  403. esp_pthread_mutex_t *mux;
  404. ESP_LOGV(TAG, "%s %p", __FUNCTION__, mutex);
  405. if (!mutex) {
  406. return EINVAL;
  407. }
  408. mux = (esp_pthread_mutex_t *)*mutex;
  409. // check if mux is busy
  410. int res = pthread_mutex_lock_internal(mux, 0);
  411. if (res == EBUSY) {
  412. return EBUSY;
  413. }
  414. vSemaphoreDelete(mux->sem);
  415. free(mux);
  416. return 0;
  417. }
  418. static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo)
  419. {
  420. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  421. if (xSemaphoreTakeRecursive(mux->sem, tmo) != pdTRUE) {
  422. return EBUSY;
  423. }
  424. } else {
  425. if (xSemaphoreTake(mux->sem, tmo) != pdTRUE) {
  426. return EBUSY;
  427. }
  428. }
  429. return 0;
  430. }
  431. static int pthread_mutex_init_if_static(pthread_mutex_t *mutex) {
  432. int res = 0;
  433. if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
  434. portENTER_CRITICAL(&s_mutex_init_lock);
  435. if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
  436. res = pthread_mutex_init(mutex, NULL);
  437. }
  438. portEXIT_CRITICAL(&s_mutex_init_lock);
  439. }
  440. return res;
  441. }
  442. int IRAM_ATTR pthread_mutex_lock(pthread_mutex_t *mutex)
  443. {
  444. if (!mutex) {
  445. return EINVAL;
  446. }
  447. int res = pthread_mutex_init_if_static(mutex);
  448. if (res != 0) {
  449. return res;
  450. }
  451. return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, portMAX_DELAY);
  452. }
  453. int IRAM_ATTR pthread_mutex_trylock(pthread_mutex_t *mutex)
  454. {
  455. if (!mutex) {
  456. return EINVAL;
  457. }
  458. int res = pthread_mutex_init_if_static(mutex);
  459. if (res != 0) {
  460. return res;
  461. }
  462. return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, 0);
  463. }
  464. int IRAM_ATTR pthread_mutex_unlock(pthread_mutex_t *mutex)
  465. {
  466. esp_pthread_mutex_t *mux;
  467. if (!mutex) {
  468. return EINVAL;
  469. }
  470. mux = (esp_pthread_mutex_t *)*mutex;
  471. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  472. xSemaphoreGiveRecursive(mux->sem);
  473. } else {
  474. xSemaphoreGive(mux->sem);
  475. }
  476. return 0;
  477. }
  478. int pthread_mutexattr_init(pthread_mutexattr_t *attr)
  479. {
  480. if (!attr) {
  481. return EINVAL;
  482. }
  483. attr->type = PTHREAD_MUTEX_NORMAL;
  484. attr->is_initialized = 1;
  485. return 0;
  486. }
  487. int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
  488. {
  489. if (!attr) {
  490. return EINVAL;
  491. }
  492. attr->is_initialized = 0;
  493. return 0;
  494. }
  495. int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
  496. {
  497. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  498. return ENOSYS;
  499. }
  500. int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
  501. {
  502. if (!attr) {
  503. return EINVAL;
  504. }
  505. pthread_mutexattr_t tmp_attr = {.type = type};
  506. int res = mutexattr_check(&tmp_attr);
  507. if (!res) {
  508. attr->type = type;
  509. }
  510. return res;
  511. }