zephyr_thread.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "platform_api_vmcore.h"
  6. #include "platform_api_extension.h"
  7. #define bh_assert(v) do { \
  8. if (!(v)) { \
  9. int _count; \
  10. printf("\nASSERTION FAILED: %s, at %s, line %d\n", \
  11. #v, __FILE__, __LINE__); \
  12. _count = printf(" "); \
  13. /* divived by 0 to make it abort */ \
  14. printf("%d\n", _count / (_count - 1)); \
  15. while (1); \
  16. } \
  17. } while (0)
  18. typedef struct os_thread_wait_node {
  19. struct k_sem sem;
  20. os_thread_wait_list next;
  21. } os_thread_wait_node;
  22. typedef struct os_thread_data {
  23. /* Next thread data */
  24. struct os_thread_data *next;
  25. /* Zephyr thread handle */
  26. korp_tid tid;
  27. /* Jeff thread local root */
  28. void *tlr;
  29. /* Lock for waiting list */
  30. struct k_mutex wait_list_lock;
  31. /* Waiting list of other threads who are joining this thread */
  32. os_thread_wait_list thread_wait_list;
  33. /* Thread stack size */
  34. unsigned stack_size;
  35. /* Thread stack */
  36. char stack[1];
  37. } os_thread_data;
  38. typedef struct os_thread_obj {
  39. struct k_thread thread;
  40. /* Whether the thread is terminated and this thread object is to
  41. be freed in the future. */
  42. bool to_be_freed;
  43. struct os_thread_obj *next;
  44. } os_thread_obj;
  45. static bool is_thread_sys_inited = false;
  46. /* Thread data of supervisor thread */
  47. static os_thread_data supervisor_thread_data;
  48. /* Lock for thread data list */
  49. static struct k_mutex thread_data_lock;
  50. /* Thread data list */
  51. static os_thread_data *thread_data_list = NULL;
  52. /* Lock for thread object list */
  53. static struct k_mutex thread_obj_lock;
  54. /* Thread object list */
  55. static os_thread_obj *thread_obj_list = NULL;
  56. static void thread_data_list_add(os_thread_data *thread_data)
  57. {
  58. k_mutex_lock(&thread_data_lock, K_FOREVER);
  59. if (!thread_data_list)
  60. thread_data_list = thread_data;
  61. else {
  62. /* If already in list, just return */
  63. os_thread_data *p = thread_data_list;
  64. while (p) {
  65. if (p == thread_data) {
  66. k_mutex_unlock(&thread_data_lock);
  67. return;
  68. }
  69. p = p->next;
  70. }
  71. /* Set as head of list */
  72. thread_data->next = thread_data_list;
  73. thread_data_list = thread_data;
  74. }
  75. k_mutex_unlock(&thread_data_lock);
  76. }
  77. static void thread_data_list_remove(os_thread_data *thread_data)
  78. {
  79. k_mutex_lock(&thread_data_lock, K_FOREVER);
  80. if (thread_data_list) {
  81. if (thread_data_list == thread_data)
  82. thread_data_list = thread_data_list->next;
  83. else {
  84. /* Search and remove it from list */
  85. os_thread_data *p = thread_data_list;
  86. while (p && p->next != thread_data)
  87. p = p->next;
  88. if (p && p->next == thread_data)
  89. p->next = p->next->next;
  90. }
  91. }
  92. k_mutex_unlock(&thread_data_lock);
  93. }
  94. static os_thread_data *
  95. thread_data_list_lookup(k_tid_t tid)
  96. {
  97. k_mutex_lock(&thread_data_lock, K_FOREVER);
  98. if (thread_data_list) {
  99. os_thread_data *p = thread_data_list;
  100. while (p) {
  101. if (p->tid == tid) {
  102. /* Found */
  103. k_mutex_unlock(&thread_data_lock);
  104. return p;
  105. }
  106. p = p->next;
  107. }
  108. }
  109. k_mutex_unlock(&thread_data_lock);
  110. return NULL;
  111. }
  112. static void thread_obj_list_add(os_thread_obj *thread_obj)
  113. {
  114. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  115. if (!thread_obj_list)
  116. thread_obj_list = thread_obj;
  117. else {
  118. /* Set as head of list */
  119. thread_obj->next = thread_obj_list;
  120. thread_obj_list = thread_obj;
  121. }
  122. k_mutex_unlock(&thread_obj_lock);
  123. }
  124. static void thread_obj_list_reclaim()
  125. {
  126. os_thread_obj *p, *p_prev;
  127. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  128. p_prev = NULL;
  129. p = thread_obj_list;
  130. while (p) {
  131. if (p->to_be_freed) {
  132. if (p_prev == NULL) { /* p is the head of list */
  133. thread_obj_list = p->next;
  134. BH_FREE(p);
  135. p = thread_obj_list;
  136. } else { /* p is not the head of list */
  137. p_prev->next = p->next;
  138. BH_FREE(p);
  139. p = p_prev->next;
  140. }
  141. } else {
  142. p_prev = p;
  143. p = p->next;
  144. }
  145. }
  146. k_mutex_unlock(&thread_obj_lock);
  147. }
  148. int os_thread_sys_init()
  149. {
  150. if (is_thread_sys_inited)
  151. return BHT_OK;
  152. k_mutex_init(&thread_data_lock);
  153. k_mutex_init(&thread_obj_lock);
  154. /* Initialize supervisor thread data */
  155. memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
  156. supervisor_thread_data.tid = k_current_get();
  157. /* Set as head of thread data list */
  158. thread_data_list = &supervisor_thread_data;
  159. is_thread_sys_inited = true;
  160. return BHT_OK;
  161. }
  162. void os_thread_sys_destroy(void)
  163. {
  164. if (is_thread_sys_inited) {
  165. is_thread_sys_inited = false;
  166. }
  167. }
  168. static os_thread_data *
  169. thread_data_current()
  170. {
  171. k_tid_t tid = k_current_get();
  172. return thread_data_list_lookup(tid);
  173. }
  174. static void os_thread_cleanup(void)
  175. {
  176. os_thread_data *thread_data = thread_data_current();
  177. bh_assert(thread_data != NULL);
  178. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  179. if (thread_data->thread_wait_list) {
  180. /* Signal each joining thread */
  181. os_thread_wait_list head = thread_data->thread_wait_list;
  182. while (head) {
  183. os_thread_wait_list next = head->next;
  184. k_sem_give(&head->sem);
  185. /* head will be freed by joining thread */
  186. head = next;
  187. }
  188. thread_data->thread_wait_list = NULL;
  189. }
  190. k_mutex_unlock(&thread_data->wait_list_lock);
  191. thread_data_list_remove(thread_data);
  192. /* Set flag to true for the next thread creating to
  193. free the thread object */
  194. ((os_thread_obj*) thread_data->tid)->to_be_freed = true;
  195. BH_FREE(thread_data);
  196. }
  197. static void os_thread_wrapper(void *start, void *arg, void *thread_data)
  198. {
  199. /* Set thread custom data */
  200. ((os_thread_data*) thread_data)->tid = k_current_get();
  201. thread_data_list_add(thread_data);
  202. ((thread_start_routine_t) start)(arg);
  203. os_thread_cleanup();
  204. }
  205. int os_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
  206. unsigned int stack_size)
  207. {
  208. return os_thread_create_with_prio(p_tid, start, arg, stack_size,
  209. BH_THREAD_DEFAULT_PRIORITY);
  210. }
  211. int os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
  212. void *arg, unsigned int stack_size, int prio)
  213. {
  214. korp_tid tid;
  215. os_thread_data *thread_data;
  216. unsigned thread_data_size;
  217. if (!p_tid || !stack_size)
  218. return BHT_ERROR;
  219. /* Free the thread objects of terminated threads */
  220. thread_obj_list_reclaim();
  221. /* Create and initialize thread object */
  222. if (!(tid = BH_MALLOC(sizeof(os_thread_obj))))
  223. return BHT_ERROR;
  224. memset(tid, 0, sizeof(os_thread_obj));
  225. /* Create and initialize thread data */
  226. thread_data_size = offsetof(os_thread_data, stack) + stack_size;
  227. if (!(thread_data = BH_MALLOC(thread_data_size))) {
  228. BH_FREE(tid);
  229. return BHT_ERROR;
  230. }
  231. memset(thread_data, 0, thread_data_size);
  232. k_mutex_init(&thread_data->wait_list_lock);
  233. thread_data->stack_size = stack_size;
  234. thread_data->tid = tid;
  235. /* Create the thread */
  236. if (!((tid = k_thread_create(tid, (k_thread_stack_t *) thread_data->stack,
  237. stack_size, os_thread_wrapper, start, arg, thread_data, prio, 0,
  238. K_NO_WAIT)))) {
  239. BH_FREE(tid);
  240. BH_FREE(thread_data);
  241. return BHT_ERROR;
  242. }
  243. bh_assert(tid == thread_data->tid);
  244. /* Set thread custom data */
  245. thread_data_list_add(thread_data);
  246. thread_obj_list_add((os_thread_obj*) tid);
  247. *p_tid = tid;
  248. return BHT_OK;
  249. }
  250. korp_tid os_self_thread()
  251. {
  252. return (korp_tid) k_current_get();
  253. }
  254. int os_thread_join(korp_tid thread, void **value_ptr)
  255. {
  256. (void) value_ptr;
  257. os_thread_data *thread_data;
  258. os_thread_wait_node *node;
  259. /* Create wait node and append it to wait list */
  260. if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
  261. return BHT_ERROR;
  262. k_sem_init(&node->sem, 0, 1);
  263. node->next = NULL;
  264. /* Get thread data */
  265. thread_data = thread_data_list_lookup(thread);
  266. bh_assert(thread_data != NULL);
  267. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  268. if (!thread_data->thread_wait_list)
  269. thread_data->thread_wait_list = node;
  270. else {
  271. /* Add to end of waiting list */
  272. os_thread_wait_node *p = thread_data->thread_wait_list;
  273. while (p->next)
  274. p = p->next;
  275. p->next = node;
  276. }
  277. k_mutex_unlock(&thread_data->wait_list_lock);
  278. /* Wait the sem */
  279. k_sem_take(&node->sem, K_FOREVER);
  280. /* Wait some time for the thread to be actually terminated */
  281. k_sleep(Z_TIMEOUT_MS(100));
  282. /* Destroy resource */
  283. BH_FREE(node);
  284. return BHT_OK;
  285. }
  286. int os_mutex_init(korp_mutex *mutex)
  287. {
  288. k_mutex_init(mutex);
  289. return BHT_OK;
  290. }
  291. int os_recursive_mutex_init(korp_mutex *mutex)
  292. {
  293. k_mutex_init(mutex);
  294. return BHT_OK;
  295. }
  296. int os_mutex_destroy(korp_mutex *mutex)
  297. {
  298. (void) mutex;
  299. return BHT_OK;
  300. }
  301. void os_mutex_lock(korp_mutex *mutex)
  302. {
  303. k_mutex_lock(mutex, K_FOREVER);
  304. }
  305. void os_mutex_unlock(korp_mutex *mutex)
  306. {
  307. k_mutex_unlock(mutex);
  308. }
  309. int os_cond_init(korp_cond *cond)
  310. {
  311. k_mutex_init(&cond->wait_list_lock);
  312. cond->thread_wait_list = NULL;
  313. return BHT_OK;
  314. }
  315. int os_cond_destroy(korp_cond *cond)
  316. {
  317. (void) cond;
  318. return BHT_OK;
  319. }
  320. static int os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex,
  321. bool timed, int mills)
  322. {
  323. os_thread_wait_node *node;
  324. /* Create wait node and append it to wait list */
  325. if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
  326. return BHT_ERROR;
  327. k_sem_init(&node->sem, 0, 1);
  328. node->next = NULL;
  329. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  330. if (!cond->thread_wait_list)
  331. cond->thread_wait_list = node;
  332. else {
  333. /* Add to end of wait list */
  334. os_thread_wait_node *p = cond->thread_wait_list;
  335. while (p->next)
  336. p = p->next;
  337. p->next = node;
  338. }
  339. k_mutex_unlock(&cond->wait_list_lock);
  340. /* Unlock mutex, wait sem and lock mutex again */
  341. k_mutex_unlock(mutex);
  342. k_sem_take(&node->sem, timed ? Z_TIMEOUT_MS(mills) : K_FOREVER);
  343. k_mutex_lock(mutex, K_FOREVER);
  344. /* Remove wait node from wait list */
  345. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  346. if (cond->thread_wait_list == node)
  347. cond->thread_wait_list = node->next;
  348. else {
  349. /* Remove from the wait list */
  350. os_thread_wait_node *p = cond->thread_wait_list;
  351. while (p->next != node)
  352. p = p->next;
  353. p->next = node->next;
  354. }
  355. BH_FREE(node);
  356. k_mutex_unlock(&cond->wait_list_lock);
  357. return BHT_OK;
  358. }
  359. int os_cond_wait(korp_cond *cond, korp_mutex *mutex)
  360. {
  361. return os_cond_wait_internal(cond, mutex, false, 0);
  362. }
  363. int os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, int useconds)
  364. {
  365. if (useconds == BHT_WAIT_FOREVER)
  366. return os_cond_wait_internal(cond, mutex, false, 0);
  367. else
  368. return os_cond_wait_internal(cond, mutex, true, useconds / 1000);
  369. }
  370. int os_cond_signal(korp_cond *cond)
  371. {
  372. /* Signal the head wait node of wait list */
  373. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  374. if (cond->thread_wait_list)
  375. k_sem_give(&cond->thread_wait_list->sem);
  376. k_mutex_unlock(&cond->wait_list_lock);
  377. return BHT_OK;
  378. }