pthread_cond_var.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. /*
  2. * SPDX-FileCopyrightText: 2017-2022 Espressif Systems (Shanghai) CO LTD
  3. *
  4. * SPDX-License-Identifier: Apache-2.0
  5. */
  6. #include <errno.h>
  7. #include <pthread.h>
  8. #include <string.h>
  9. #include "esp_err.h"
  10. #include "esp_attr.h"
  11. #include "freertos/FreeRTOS.h"
  12. #include "freertos/task.h"
  13. #include "freertos/semphr.h"
  14. #include "freertos/list.h"
  15. #include "pthread_internal.h"
  16. #include <sys/queue.h>
  17. #include <sys/time.h>
  18. #include "esp_log.h"
  19. const static char *TAG = "esp_pthread";
  20. typedef struct esp_pthread_cond_waiter {
  21. SemaphoreHandle_t wait_sem; ///< task specific semaphore to wait on
  22. TAILQ_ENTRY(esp_pthread_cond_waiter) link; ///< stash on the list of semaphores to be notified
  23. } esp_pthread_cond_waiter_t;
  24. typedef struct esp_pthread_cond {
  25. _lock_t lock; ///< lock that protects the list of semaphores
  26. TAILQ_HEAD(, esp_pthread_cond_waiter) waiter_list; ///< head of the list of semaphores
  27. } esp_pthread_cond_t;
  28. static int s_check_and_init_if_static(pthread_cond_t *cv)
  29. {
  30. int res = 0;
  31. if (cv == NULL || *cv == (pthread_cond_t) 0) {
  32. return EINVAL;
  33. }
  34. if (*cv == PTHREAD_COND_INITIALIZER) {
  35. portENTER_CRITICAL(&pthread_lazy_init_lock);
  36. if (*cv == PTHREAD_COND_INITIALIZER) {
  37. res = pthread_cond_init(cv, NULL);
  38. }
  39. portEXIT_CRITICAL(&pthread_lazy_init_lock);
  40. }
  41. return res;
  42. }
  43. int pthread_cond_signal(pthread_cond_t *cv)
  44. {
  45. int res = s_check_and_init_if_static(cv);
  46. if (res) {
  47. return res;
  48. }
  49. esp_pthread_cond_t *cond = (esp_pthread_cond_t *) *cv;
  50. _lock_acquire_recursive(&cond->lock);
  51. esp_pthread_cond_waiter_t *entry;
  52. entry = TAILQ_FIRST(&cond->waiter_list);
  53. if (entry) {
  54. xSemaphoreGive(entry->wait_sem);
  55. }
  56. _lock_release_recursive(&cond->lock);
  57. return 0;
  58. }
  59. int pthread_cond_broadcast(pthread_cond_t *cv)
  60. {
  61. int res = s_check_and_init_if_static(cv);
  62. if (res) {
  63. return res;
  64. }
  65. esp_pthread_cond_t *cond = (esp_pthread_cond_t *) *cv;
  66. _lock_acquire_recursive(&cond->lock);
  67. esp_pthread_cond_waiter_t *entry;
  68. TAILQ_FOREACH(entry, &cond->waiter_list, link) {
  69. xSemaphoreGive(entry->wait_sem);
  70. }
  71. _lock_release_recursive(&cond->lock);
  72. return 0;
  73. }
  74. int pthread_cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut)
  75. {
  76. return pthread_cond_timedwait(cv, mut, NULL);
  77. }
  78. int pthread_cond_timedwait(pthread_cond_t *cv, pthread_mutex_t *mut, const struct timespec *to)
  79. {
  80. int ret;
  81. TickType_t timeout_ticks;
  82. int res = s_check_and_init_if_static(cv);
  83. if (res) {
  84. return res;
  85. }
  86. esp_pthread_cond_t *cond = (esp_pthread_cond_t *) *cv;
  87. if (to == NULL) {
  88. timeout_ticks = portMAX_DELAY;
  89. } else {
  90. struct timeval abs_time, cur_time, diff_time;
  91. long timeout_msec;
  92. gettimeofday(&cur_time, NULL);
  93. abs_time.tv_sec = to->tv_sec;
  94. // Round up nanoseconds to the next microsecond
  95. abs_time.tv_usec = (to->tv_nsec + 1000 - 1) / 1000;
  96. if (timercmp(&abs_time, &cur_time, <)) {
  97. /* As per the pthread spec, if the time has already
  98. * passed, no sleep is required.
  99. */
  100. timeout_msec = 0;
  101. } else {
  102. timersub(&abs_time, &cur_time, &diff_time);
  103. // Round up timeout microseconds to the next millisecond
  104. timeout_msec = (diff_time.tv_sec * 1000) +
  105. ((diff_time.tv_usec + 1000 - 1) / 1000);
  106. }
  107. if (timeout_msec <= 0) {
  108. return ETIMEDOUT;
  109. }
  110. // Round up milliseconds to the next tick
  111. timeout_ticks = (timeout_msec + portTICK_PERIOD_MS - 1) / portTICK_PERIOD_MS;
  112. /* We have to add 1 more tick of delay
  113. The reason for this is that vTaskDelay(1) will sleep until the start of the next tick,
  114. which can be any amount of time up to one tick period. So if we don't add one more tick,
  115. we're likely to timeout a small time (< 1 tick period) before the requested timeout.
  116. If we add 1 tick then we will timeout a small time (< 1 tick period) after the
  117. requested timeout.
  118. */
  119. timeout_ticks += 1;
  120. }
  121. esp_pthread_cond_waiter_t w;
  122. // Around 80 bytes
  123. StaticSemaphore_t sem_buffer;
  124. // Create semaphore: first take will block
  125. w.wait_sem = xSemaphoreCreateCountingStatic(1, 0, &sem_buffer);
  126. _lock_acquire_recursive(&cond->lock);
  127. TAILQ_INSERT_TAIL(&cond->waiter_list, &w, link);
  128. _lock_release_recursive(&cond->lock);
  129. pthread_mutex_unlock(mut);
  130. if (xSemaphoreTake(w.wait_sem, timeout_ticks) == pdTRUE) {
  131. ret = 0;
  132. } else {
  133. ret = ETIMEDOUT;
  134. }
  135. _lock_acquire_recursive(&cond->lock);
  136. TAILQ_REMOVE(&cond->waiter_list, &w, link);
  137. _lock_release_recursive(&cond->lock);
  138. vSemaphoreDelete(w.wait_sem);
  139. pthread_mutex_lock(mut);
  140. return ret;
  141. }
  142. // The following pthread_condattr_* function definitions are placed here to enable builds of code
  143. // that references these functions but does not actively use them.
  144. int pthread_condattr_init(pthread_condattr_t *attr)
  145. {
  146. ESP_LOGW(TAG, "%s not yet implemented (%p)", __FUNCTION__, attr);
  147. return ENOSYS;
  148. }
  149. int pthread_condattr_destroy(pthread_condattr_t *attr)
  150. {
  151. ESP_LOGW(TAG, "%s not yet implemented (%p)", __FUNCTION__, attr);
  152. return ENOSYS;
  153. }
  154. int pthread_condattr_getpshared(const pthread_condattr_t *restrict attr, int *restrict pshared)
  155. {
  156. ESP_LOGW(TAG, "%s not yet implemented (%p)", __FUNCTION__, attr);
  157. return ENOSYS;
  158. }
  159. int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
  160. {
  161. ESP_LOGW(TAG, "%s not yet implemented (%p)", __FUNCTION__, attr);
  162. return ENOSYS;
  163. }
  164. int pthread_condattr_getclock(const pthread_condattr_t *restrict attr, clockid_t *restrict clock_id)
  165. {
  166. ESP_LOGW(TAG, "%s not yet implemented (%p)", __FUNCTION__, attr);
  167. return ENOSYS;
  168. }
  169. int pthread_condattr_setclock(pthread_condattr_t *attr, clockid_t clock_id)
  170. {
  171. ESP_LOGW(TAG, "%s: not yet supported!", __func__);
  172. return 0; // moved here from newlib, where it was 0 instead of ENOSYS
  173. }
  174. int pthread_cond_init(pthread_cond_t *cv, const pthread_condattr_t *att)
  175. {
  176. (void) att; /* Unused argument as of now */
  177. if (cv == NULL) {
  178. return EINVAL;
  179. }
  180. esp_pthread_cond_t *cond = (esp_pthread_cond_t *) calloc(1, sizeof(esp_pthread_cond_t));
  181. if (cond == NULL) {
  182. return ENOMEM;
  183. }
  184. _lock_init_recursive(&cond->lock);
  185. TAILQ_INIT(&cond->waiter_list);
  186. *cv = (pthread_cond_t) cond;
  187. return 0;
  188. }
  189. int pthread_cond_destroy(pthread_cond_t *cv)
  190. {
  191. int ret = 0;
  192. if (cv == NULL || *cv == (pthread_cond_t) 0) {
  193. return EINVAL;
  194. }
  195. if (*cv == PTHREAD_COND_INITIALIZER) {
  196. return 0; // never initialized
  197. }
  198. esp_pthread_cond_t *cond = (esp_pthread_cond_t *) *cv;
  199. if (!cond) {
  200. return EINVAL;
  201. }
  202. _lock_acquire_recursive(&cond->lock);
  203. if (!TAILQ_EMPTY(&cond->waiter_list)) {
  204. ret = EBUSY;
  205. }
  206. _lock_release_recursive(&cond->lock);
  207. if (ret == 0) {
  208. *cv = (pthread_cond_t) 0;
  209. _lock_close_recursive(&cond->lock);
  210. free(cond);
  211. }
  212. return ret;
  213. }
  214. /* Hook function to force linking this file */
  215. void pthread_include_pthread_cond_var_impl(void)
  216. {
  217. }