pthread.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536
  1. // Copyright 2017 Espressif Systems (Shanghai) PTE LTD
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. // http://www.apache.org/licenses/LICENSE-2.0
  7. //
  8. // Unless required by applicable law or agreed to in writing, software
  9. // distributed under the License is distributed on an "AS IS" BASIS,
  10. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11. // See the License for the specific language governing permissions and
  12. // limitations under the License.
  13. //
  14. // This module implements pthread API on top of FreeRTOS. API is implemented to the level allowing
  15. // libstdcxx threading framework to operate correctly. So not all original pthread routines are supported.
  16. // Moreover some implemened functions do not provide full functionality, e.g. pthread_create does not support
  17. // thread's attributes customization (prio, stack size and so on). So if you are not satisfied with default
  18. // behavior use native FreeRTOS API.
  19. //
  20. #include <errno.h>
  21. #include <pthread.h>
  22. #include <string.h>
  23. #include "esp_err.h"
  24. #include "esp_attr.h"
  25. #include "freertos/FreeRTOS.h"
  26. #include "freertos/task.h"
  27. #include "freertos/semphr.h"
  28. #include "freertos/list.h"
  29. #include "pthread_internal.h"
  30. #define LOG_LOCAL_LEVEL CONFIG_LOG_DEFAULT_LEVEL
  31. #include "esp_log.h"
  32. const static char *TAG = "esp_pthread";
  33. /** task state */
  34. enum esp_pthread_task_state {
  35. PTHREAD_TASK_STATE_RUN,
  36. PTHREAD_TASK_STATE_EXIT
  37. };
  38. /** pthread thread FreeRTOS wrapper */
  39. typedef struct {
  40. ListItem_t list_item; ///< Tasks list node struct. FreeRTOS task handle is kept as list_item.xItemValue
  41. TaskHandle_t join_task; ///< Handle of the task waiting to join
  42. enum esp_pthread_task_state state; ///< pthread task state
  43. bool detached; ///< True if pthread is detached
  44. } esp_pthread_t;
  45. /** pthread wrapper task arg */
  46. typedef struct {
  47. void *(*func)(void *); ///< user task entry
  48. void *arg; ///< user task argument
  49. } esp_pthread_task_arg_t;
  50. /** pthread mutex FreeRTOS wrapper */
  51. typedef struct {
  52. ListItem_t list_item; ///< mutexes list node struct
  53. SemaphoreHandle_t sem; ///< Handle of the task waiting to join
  54. int type; ///< Mutex type. Currently supported PTHREAD_MUTEX_NORMAL and PTHREAD_MUTEX_RECURSIVE
  55. } esp_pthread_mutex_t;
  56. static SemaphoreHandle_t s_threads_mux = NULL;
  57. static portMUX_TYPE s_mutex_init_lock = portMUX_INITIALIZER_UNLOCKED;
  58. static List_t s_threads_list;
  59. static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo);
  60. esp_err_t esp_pthread_init(void)
  61. {
  62. vListInitialise((List_t *)&s_threads_list);
  63. s_threads_mux = xSemaphoreCreateMutex();
  64. if (s_threads_mux == NULL) {
  65. return ESP_ERR_NO_MEM;
  66. }
  67. return ESP_OK;
  68. }
  69. static void *pthread_find_list_item(void *(*item_check)(ListItem_t *, void *arg), void *check_arg)
  70. {
  71. ListItem_t const *list_end = listGET_END_MARKER(&s_threads_list);
  72. ListItem_t *list_item = listGET_HEAD_ENTRY(&s_threads_list);
  73. while (list_item != list_end) {
  74. void *val = item_check(list_item, check_arg);
  75. if (val) {
  76. return val;
  77. }
  78. list_item = listGET_NEXT(list_item);
  79. }
  80. return NULL;
  81. }
  82. static void *pthread_get_handle_by_desc(ListItem_t *item, void *arg)
  83. {
  84. esp_pthread_t *pthread = listGET_LIST_ITEM_OWNER(item);
  85. if (pthread == arg) {
  86. return (void *)listGET_LIST_ITEM_VALUE(item);
  87. }
  88. return NULL;
  89. }
  90. static inline TaskHandle_t pthread_find_handle(pthread_t thread)
  91. {
  92. return pthread_find_list_item(pthread_get_handle_by_desc, (void *)thread);
  93. }
  94. static void *pthread_get_desc_by_handle(ListItem_t *item, void *arg)
  95. {
  96. TaskHandle_t task_handle = arg;
  97. TaskHandle_t cur_handle = (TaskHandle_t)listGET_LIST_ITEM_VALUE(item);
  98. if (task_handle == cur_handle) {
  99. return (esp_pthread_t *)listGET_LIST_ITEM_OWNER(item);
  100. }
  101. return NULL;
  102. }
  103. static esp_pthread_t *pthread_find(TaskHandle_t task_handle)
  104. {
  105. return pthread_find_list_item(pthread_get_desc_by_handle, task_handle);
  106. }
  107. static void pthread_delete(esp_pthread_t *pthread)
  108. {
  109. uxListRemove(&pthread->list_item);
  110. free(pthread);
  111. }
  112. static void pthread_task_func(void *arg)
  113. {
  114. esp_pthread_task_arg_t *task_arg = (esp_pthread_task_arg_t *)arg;
  115. ESP_LOGV(TAG, "%s ENTER %p", __FUNCTION__, task_arg->func);
  116. // wait for start
  117. xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
  118. ESP_LOGV(TAG, "%s START %p", __FUNCTION__, task_arg->func);
  119. task_arg->func(task_arg->arg);
  120. ESP_LOGV(TAG, "%s END %p", __FUNCTION__, task_arg->func);
  121. free(task_arg);
  122. /* preemptively clean up thread local storage, rather than
  123. waiting for the idle task to clean up the thread */
  124. pthread_internal_local_storage_destructor_callback();
  125. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  126. assert(false && "Failed to lock threads list!");
  127. }
  128. esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
  129. if (!pthread) {
  130. assert(false && "Failed to find pthread for current task!");
  131. }
  132. if (pthread->detached) {
  133. // auto-free for detached threads
  134. pthread_delete(pthread);
  135. } else {
  136. // Remove from list, it indicates that task has exited
  137. if (pthread->join_task) {
  138. // notify join
  139. xTaskNotify(pthread->join_task, 0, eNoAction);
  140. } else {
  141. pthread->state = PTHREAD_TASK_STATE_EXIT;
  142. }
  143. }
  144. xSemaphoreGive(s_threads_mux);
  145. vTaskDelete(NULL);
  146. ESP_LOGV(TAG, "%s EXIT", __FUNCTION__);
  147. }
  148. int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
  149. void *(*start_routine) (void *), void *arg)
  150. {
  151. TaskHandle_t xHandle = NULL;
  152. ESP_LOGV(TAG, "%s", __FUNCTION__);
  153. if (attr) {
  154. ESP_LOGE(TAG, "%s: attrs not supported!", __FUNCTION__);
  155. return ENOSYS;
  156. }
  157. esp_pthread_task_arg_t *task_arg = malloc(sizeof(esp_pthread_task_arg_t));
  158. if (task_arg == NULL) {
  159. ESP_LOGE(TAG, "Failed to allocate task args!");
  160. return ENOMEM;
  161. }
  162. memset(task_arg, 0, sizeof(esp_pthread_task_arg_t));
  163. esp_pthread_t *pthread = malloc(sizeof(esp_pthread_t));
  164. if (pthread == NULL) {
  165. ESP_LOGE(TAG, "Failed to allocate pthread data!");
  166. free(task_arg);
  167. return ENOMEM;
  168. }
  169. memset(pthread, 0, sizeof(esp_pthread_t));
  170. task_arg->func = start_routine;
  171. task_arg->arg = arg;
  172. BaseType_t res = xTaskCreate(&pthread_task_func, "pthread", CONFIG_ESP32_PTHREAD_TASK_STACK_SIZE_DEFAULT,
  173. task_arg, CONFIG_ESP32_PTHREAD_TASK_PRIO_DEFAULT, &xHandle);
  174. if(res != pdPASS) {
  175. ESP_LOGE(TAG, "Failed to create task!");
  176. free(pthread);
  177. free(task_arg);
  178. if (res == errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY) {
  179. return ENOMEM;
  180. } else {
  181. return EAGAIN;
  182. }
  183. }
  184. vListInitialiseItem((ListItem_t *)&pthread->list_item);
  185. listSET_LIST_ITEM_OWNER((ListItem_t *)&pthread->list_item, pthread);
  186. listSET_LIST_ITEM_VALUE((ListItem_t *)&pthread->list_item, (TickType_t)xHandle);
  187. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  188. assert(false && "Failed to lock threads list!");
  189. }
  190. vListInsertEnd((List_t *)&s_threads_list, (ListItem_t *)&pthread->list_item);
  191. xSemaphoreGive(s_threads_mux);
  192. // start task
  193. xTaskNotify(xHandle, 0, eNoAction);
  194. *thread = (pthread_t)pthread; // pointer value fit into pthread_t (uint32_t)
  195. ESP_LOGV(TAG, "Created task %x", (uint32_t)xHandle);
  196. return 0;
  197. }
  198. int pthread_join(pthread_t thread, void **retval)
  199. {
  200. esp_pthread_t *pthread = (esp_pthread_t *)thread;
  201. int ret = 0;
  202. bool wait = false;
  203. ESP_LOGV(TAG, "%s %p", __FUNCTION__, pthread);
  204. // find task
  205. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  206. assert(false && "Failed to lock threads list!");
  207. }
  208. TaskHandle_t handle = pthread_find_handle(thread);
  209. if (!handle) {
  210. // not found
  211. ret = ESRCH;
  212. } else if (pthread->join_task) {
  213. // already have waiting task to join
  214. ret = EINVAL;
  215. } else if (handle == xTaskGetCurrentTaskHandle()) {
  216. // join to self not allowed
  217. ret = EDEADLK;
  218. } else {
  219. esp_pthread_t *cur_pthread = pthread_find(xTaskGetCurrentTaskHandle());
  220. if (cur_pthread && cur_pthread->join_task == handle) {
  221. // join to each other not allowed
  222. ret = EDEADLK;
  223. } else {
  224. if (pthread->state == PTHREAD_TASK_STATE_RUN) {
  225. pthread->join_task = xTaskGetCurrentTaskHandle();
  226. wait = true;
  227. } else {
  228. pthread_delete(pthread);
  229. }
  230. }
  231. }
  232. xSemaphoreGive(s_threads_mux);
  233. if (ret == 0 && wait) {
  234. xTaskNotifyWait(0, 0, NULL, portMAX_DELAY);
  235. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  236. assert(false && "Failed to lock threads list!");
  237. }
  238. pthread_delete(pthread);
  239. xSemaphoreGive(s_threads_mux);
  240. }
  241. if (retval) {
  242. *retval = 0; // no exit code in FreeRTOS
  243. }
  244. ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
  245. return ret;
  246. }
  247. int pthread_detach(pthread_t thread)
  248. {
  249. esp_pthread_t *pthread = (esp_pthread_t *)thread;
  250. int ret = 0;
  251. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  252. assert(false && "Failed to lock threads list!");
  253. }
  254. TaskHandle_t handle = pthread_find_handle(thread);
  255. if (!handle) {
  256. ret = ESRCH;
  257. } else {
  258. pthread->detached = true;
  259. }
  260. xSemaphoreGive(s_threads_mux);
  261. ESP_LOGV(TAG, "%s %p EXIT %d", __FUNCTION__, pthread, ret);
  262. return ret;
  263. }
  264. int pthread_cancel(pthread_t thread)
  265. {
  266. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  267. return ENOSYS;
  268. }
  269. int sched_yield( void )
  270. {
  271. vTaskDelay(0);
  272. return 0;
  273. }
  274. pthread_t pthread_self(void)
  275. {
  276. if (xSemaphoreTake(s_threads_mux, portMAX_DELAY) != pdTRUE) {
  277. assert(false && "Failed to lock threads list!");
  278. }
  279. esp_pthread_t *pthread = pthread_find(xTaskGetCurrentTaskHandle());
  280. if (!pthread) {
  281. assert(false && "Failed to find current thread ID!");
  282. }
  283. xSemaphoreGive(s_threads_mux);
  284. return (pthread_t)pthread;
  285. }
  286. int pthread_equal(pthread_t t1, pthread_t t2)
  287. {
  288. return t1 == t2 ? 1 : 0;
  289. }
  290. /***************** ONCE ******************/
  291. int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
  292. {
  293. if (once_control == NULL || init_routine == NULL || !once_control->is_initialized) {
  294. ESP_LOGE(TAG, "%s: Invalid args!", __FUNCTION__);
  295. return EINVAL;
  296. }
  297. // Check if once_control belongs to internal DRAM for uxPortCompare to succeed
  298. if (!esp_ptr_internal(once_control)) {
  299. ESP_LOGE(TAG, "%s: once_control should belong to internal DRAM region!", __FUNCTION__);
  300. return EINVAL;
  301. }
  302. uint32_t res = 1;
  303. uxPortCompareSet((uint32_t *) &once_control->init_executed, 0, &res);
  304. // Check if compare and set was successful
  305. if (res == 0) {
  306. ESP_LOGV(TAG, "%s: call init_routine %p", __FUNCTION__, once_control);
  307. init_routine();
  308. }
  309. return 0;
  310. }
  311. /***************** MUTEX ******************/
  312. static int mutexattr_check(const pthread_mutexattr_t *attr)
  313. {
  314. if (attr->type < PTHREAD_MUTEX_NORMAL || attr->type > PTHREAD_MUTEX_RECURSIVE) {
  315. return EINVAL;
  316. }
  317. return 0;
  318. }
  319. int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
  320. {
  321. int type = PTHREAD_MUTEX_NORMAL;
  322. if (!mutex) {
  323. return EINVAL;
  324. }
  325. if (attr) {
  326. if (!attr->is_initialized) {
  327. return EINVAL;
  328. }
  329. int res = mutexattr_check(attr);
  330. if (res) {
  331. return res;
  332. }
  333. type = attr->type;
  334. }
  335. esp_pthread_mutex_t *mux = (esp_pthread_mutex_t *)malloc(sizeof(esp_pthread_mutex_t));
  336. if (!mux) {
  337. return ENOMEM;
  338. }
  339. mux->type = type;
  340. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  341. mux->sem = xSemaphoreCreateRecursiveMutex();
  342. } else {
  343. mux->sem = xSemaphoreCreateMutex();
  344. }
  345. if (!mux->sem) {
  346. free(mux);
  347. return EAGAIN;
  348. }
  349. *mutex = (pthread_mutex_t)mux; // pointer value fit into pthread_mutex_t (uint32_t)
  350. return 0;
  351. }
  352. int pthread_mutex_destroy(pthread_mutex_t *mutex)
  353. {
  354. esp_pthread_mutex_t *mux;
  355. ESP_LOGV(TAG, "%s %p", __FUNCTION__, mutex);
  356. if (!mutex) {
  357. return EINVAL;
  358. }
  359. mux = (esp_pthread_mutex_t *)*mutex;
  360. // check if mux is busy
  361. int res = pthread_mutex_lock_internal(mux, 0);
  362. if (res == EBUSY) {
  363. return EBUSY;
  364. }
  365. vSemaphoreDelete(mux->sem);
  366. free(mux);
  367. return 0;
  368. }
  369. static int IRAM_ATTR pthread_mutex_lock_internal(esp_pthread_mutex_t *mux, TickType_t tmo)
  370. {
  371. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  372. if (xSemaphoreTakeRecursive(mux->sem, tmo) != pdTRUE) {
  373. return EBUSY;
  374. }
  375. } else {
  376. if (xSemaphoreTake(mux->sem, tmo) != pdTRUE) {
  377. return EBUSY;
  378. }
  379. }
  380. return 0;
  381. }
  382. static int pthread_mutex_init_if_static(pthread_mutex_t *mutex) {
  383. int res = 0;
  384. if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
  385. portENTER_CRITICAL(&s_mutex_init_lock);
  386. if ((intptr_t) *mutex == PTHREAD_MUTEX_INITIALIZER) {
  387. res = pthread_mutex_init(mutex, NULL);
  388. }
  389. portEXIT_CRITICAL(&s_mutex_init_lock);
  390. }
  391. return res;
  392. }
  393. int IRAM_ATTR pthread_mutex_lock(pthread_mutex_t *mutex)
  394. {
  395. if (!mutex) {
  396. return EINVAL;
  397. }
  398. int res = pthread_mutex_init_if_static(mutex);
  399. if (res != 0) {
  400. return res;
  401. }
  402. return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, portMAX_DELAY);
  403. }
  404. int IRAM_ATTR pthread_mutex_trylock(pthread_mutex_t *mutex)
  405. {
  406. if (!mutex) {
  407. return EINVAL;
  408. }
  409. int res = pthread_mutex_init_if_static(mutex);
  410. if (res != 0) {
  411. return res;
  412. }
  413. return pthread_mutex_lock_internal((esp_pthread_mutex_t *)*mutex, 0);
  414. }
  415. int IRAM_ATTR pthread_mutex_unlock(pthread_mutex_t *mutex)
  416. {
  417. esp_pthread_mutex_t *mux;
  418. if (!mutex) {
  419. return EINVAL;
  420. }
  421. mux = (esp_pthread_mutex_t *)*mutex;
  422. if (mux->type == PTHREAD_MUTEX_RECURSIVE) {
  423. xSemaphoreGiveRecursive(mux->sem);
  424. } else {
  425. xSemaphoreGive(mux->sem);
  426. }
  427. return 0;
  428. }
  429. int pthread_mutexattr_init(pthread_mutexattr_t *attr)
  430. {
  431. if (!attr) {
  432. return EINVAL;
  433. }
  434. attr->type = PTHREAD_MUTEX_NORMAL;
  435. attr->is_initialized = 1;
  436. return 0;
  437. }
  438. int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
  439. {
  440. if (!attr) {
  441. return EINVAL;
  442. }
  443. attr->is_initialized = 0;
  444. return 0;
  445. }
  446. int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
  447. {
  448. ESP_LOGE(TAG, "%s: not supported!", __FUNCTION__);
  449. return ENOSYS;
  450. }
  451. int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
  452. {
  453. if (!attr) {
  454. return EINVAL;
  455. }
  456. pthread_mutexattr_t tmp_attr = {.type = type};
  457. int res = mutexattr_check(&tmp_attr);
  458. if (!res) {
  459. attr->type = type;
  460. }
  461. return res;
  462. }