bh_thread.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "bh_thread.h"
  6. #include "bh_assert.h"
  7. #include "bh_log.h"
  8. #include "bh_memory.h"
  9. #include <stdio.h>
  10. #include <stdlib.h>
  11. typedef struct bh_thread_wait_node {
  12. struct k_sem sem;
  13. bh_thread_wait_list next;
  14. } bh_thread_wait_node;
  15. typedef struct bh_thread_data {
  16. /* Next thread data */
  17. struct bh_thread_data *next;
  18. /* Zephyr thread handle */
  19. korp_tid tid;
  20. /* Jeff thread local root */
  21. void *tlr;
  22. /* Lock for waiting list */
  23. struct k_mutex wait_list_lock;
  24. /* Waiting list of other threads who are joining this thread */
  25. bh_thread_wait_list thread_wait_list;
  26. /* Thread stack size */
  27. unsigned stack_size;
  28. /* Thread stack */
  29. char stack[1];
  30. } bh_thread_data;
  31. typedef struct bh_thread_obj {
  32. struct k_thread thread;
  33. /* Whether the thread is terminated and this thread object is to
  34. be freed in the future. */
  35. bool to_be_freed;
  36. struct bh_thread_obj *next;
  37. } bh_thread_obj;
  38. static bool is_thread_sys_inited = false;
  39. /* Thread data of supervisor thread */
  40. static bh_thread_data supervisor_thread_data;
  41. /* Lock for thread data list */
  42. static struct k_mutex thread_data_lock;
  43. /* Thread data list */
  44. static bh_thread_data *thread_data_list = NULL;
  45. /* Lock for thread object list */
  46. static struct k_mutex thread_obj_lock;
  47. /* Thread object list */
  48. static bh_thread_obj *thread_obj_list = NULL;
  49. static void thread_data_list_add(bh_thread_data *thread_data)
  50. {
  51. k_mutex_lock(&thread_data_lock, K_FOREVER);
  52. if (!thread_data_list)
  53. thread_data_list = thread_data;
  54. else {
  55. /* If already in list, just return */
  56. bh_thread_data *p = thread_data_list;
  57. while (p) {
  58. if (p == thread_data) {
  59. k_mutex_unlock(&thread_data_lock);
  60. return;
  61. }
  62. p = p->next;
  63. }
  64. /* Set as head of list */
  65. thread_data->next = thread_data_list;
  66. thread_data_list = thread_data;
  67. }
  68. k_mutex_unlock(&thread_data_lock);
  69. }
  70. static void thread_data_list_remove(bh_thread_data *thread_data)
  71. {
  72. k_mutex_lock(&thread_data_lock, K_FOREVER);
  73. if (thread_data_list) {
  74. if (thread_data_list == thread_data)
  75. thread_data_list = thread_data_list->next;
  76. else {
  77. /* Search and remove it from list */
  78. bh_thread_data *p = thread_data_list;
  79. while (p && p->next != thread_data)
  80. p = p->next;
  81. if (p && p->next == thread_data)
  82. p->next = p->next->next;
  83. }
  84. }
  85. k_mutex_unlock(&thread_data_lock);
  86. }
  87. static bh_thread_data *
  88. thread_data_list_lookup(k_tid_t tid)
  89. {
  90. k_mutex_lock(&thread_data_lock, K_FOREVER);
  91. if (thread_data_list) {
  92. bh_thread_data *p = thread_data_list;
  93. while (p) {
  94. if (p->tid == tid) {
  95. /* Found */
  96. k_mutex_unlock(&thread_data_lock);
  97. return p;
  98. }
  99. p = p->next;
  100. }
  101. }
  102. k_mutex_unlock(&thread_data_lock);
  103. return NULL;
  104. }
  105. static void thread_obj_list_add(bh_thread_obj *thread_obj)
  106. {
  107. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  108. if (!thread_obj_list)
  109. thread_obj_list = thread_obj;
  110. else {
  111. /* Set as head of list */
  112. thread_obj->next = thread_obj_list;
  113. thread_obj_list = thread_obj;
  114. }
  115. k_mutex_unlock(&thread_obj_lock);
  116. }
  117. static void thread_obj_list_reclaim()
  118. {
  119. bh_thread_obj *p, *p_prev;
  120. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  121. p_prev = NULL;
  122. p = thread_obj_list;
  123. while (p) {
  124. if (p->to_be_freed) {
  125. if (p_prev == NULL) { /* p is the head of list */
  126. thread_obj_list = p->next;
  127. bh_free(p);
  128. p = thread_obj_list;
  129. } else { /* p is not the head of list */
  130. p_prev->next = p->next;
  131. bh_free(p);
  132. p = p_prev->next;
  133. }
  134. } else {
  135. p_prev = p;
  136. p = p->next;
  137. }
  138. }
  139. k_mutex_unlock(&thread_obj_lock);
  140. }
  141. int _vm_thread_sys_init()
  142. {
  143. if (is_thread_sys_inited)
  144. return BHT_OK;
  145. k_mutex_init(&thread_data_lock);
  146. k_mutex_init(&thread_obj_lock);
  147. /* Initialize supervisor thread data */
  148. memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
  149. supervisor_thread_data.tid = k_current_get();
  150. /* Set as head of thread data list */
  151. thread_data_list = &supervisor_thread_data;
  152. is_thread_sys_inited = true;
  153. return BHT_OK;
  154. }
  155. void vm_thread_sys_destroy(void)
  156. {
  157. if (is_thread_sys_inited) {
  158. is_thread_sys_inited = false;
  159. }
  160. }
  161. static bh_thread_data *
  162. thread_data_current()
  163. {
  164. k_tid_t tid = k_current_get();
  165. return thread_data_list_lookup(tid);
  166. }
  167. static void vm_thread_cleanup(void)
  168. {
  169. bh_thread_data *thread_data = thread_data_current();
  170. bh_assert(thread_data != NULL);
  171. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  172. if (thread_data->thread_wait_list) {
  173. /* Signal each joining thread */
  174. bh_thread_wait_list head = thread_data->thread_wait_list;
  175. while (head) {
  176. bh_thread_wait_list next = head->next;
  177. k_sem_give(&head->sem);
  178. bh_free(head);
  179. head = next;
  180. }
  181. thread_data->thread_wait_list = NULL;
  182. }
  183. k_mutex_unlock(&thread_data->wait_list_lock);
  184. thread_data_list_remove(thread_data);
  185. /* Set flag to true for the next thread creating to
  186. free the thread object */
  187. ((bh_thread_obj*) thread_data->tid)->to_be_freed = true;
  188. bh_free(thread_data);
  189. }
  190. static void vm_thread_wrapper(void *start, void *arg, void *thread_data)
  191. {
  192. /* Set thread custom data */
  193. ((bh_thread_data*) thread_data)->tid = k_current_get();
  194. thread_data_list_add(thread_data);
  195. ((thread_start_routine_t) start)(arg);
  196. vm_thread_cleanup();
  197. }
  198. int _vm_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
  199. unsigned int stack_size)
  200. {
  201. return _vm_thread_create_with_prio(p_tid, start, arg, stack_size,
  202. BH_THREAD_DEFAULT_PRIORITY);
  203. }
  204. int _vm_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
  205. void *arg, unsigned int stack_size, int prio)
  206. {
  207. korp_tid tid;
  208. bh_thread_data *thread_data;
  209. unsigned thread_data_size;
  210. if (!p_tid || !stack_size)
  211. return BHT_ERROR;
  212. /* Free the thread objects of terminated threads */
  213. thread_obj_list_reclaim();
  214. /* Create and initialize thread object */
  215. if (!(tid = bh_malloc(sizeof(bh_thread_obj))))
  216. return BHT_ERROR;
  217. memset(tid, 0, sizeof(bh_thread_obj));
  218. /* Create and initialize thread data */
  219. thread_data_size = offsetof(bh_thread_data, stack) + stack_size;
  220. if (!(thread_data = bh_malloc(thread_data_size))) {
  221. bh_free(tid);
  222. return BHT_ERROR;
  223. }
  224. memset(thread_data, 0, thread_data_size);
  225. k_mutex_init(&thread_data->wait_list_lock);
  226. thread_data->stack_size = stack_size;
  227. thread_data->tid = tid;
  228. /* Create the thread */
  229. if (!((tid = k_thread_create(tid, (k_thread_stack_t *) thread_data->stack,
  230. stack_size, vm_thread_wrapper, start, arg, thread_data, prio, 0,
  231. K_NO_WAIT)))) {
  232. bh_free(tid);
  233. bh_free(thread_data);
  234. return BHT_ERROR;
  235. }
  236. bh_assert(tid == thread_data->tid);
  237. /* Set thread custom data */
  238. thread_data_list_add(thread_data);
  239. thread_obj_list_add((bh_thread_obj*) tid);
  240. *p_tid = tid;
  241. return BHT_OK;
  242. }
  243. korp_tid _vm_self_thread()
  244. {
  245. return (korp_tid) k_current_get();
  246. }
  247. void vm_thread_exit(void * code)
  248. {
  249. (void) code;
  250. korp_tid self = vm_self_thread();
  251. vm_thread_cleanup();
  252. k_thread_abort((k_tid_t) self);
  253. }
  254. int _vm_thread_cancel(korp_tid thread)
  255. {
  256. k_thread_abort((k_tid_t) thread);
  257. return 0;
  258. }
  259. int _vm_thread_join(korp_tid thread, void **value_ptr, int mills)
  260. {
  261. (void) value_ptr;
  262. bh_thread_data *thread_data;
  263. bh_thread_wait_node *node;
  264. /* Create wait node and append it to wait list */
  265. if (!(node = bh_malloc(sizeof(bh_thread_wait_node))))
  266. return BHT_ERROR;
  267. k_sem_init(&node->sem, 0, 1);
  268. node->next = NULL;
  269. /* Get thread data */
  270. thread_data = thread_data_list_lookup(thread);
  271. bh_assert(thread_data != NULL);
  272. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  273. if (!thread_data->thread_wait_list)
  274. thread_data->thread_wait_list = node;
  275. else {
  276. /* Add to end of waiting list */
  277. bh_thread_wait_node *p = thread_data->thread_wait_list;
  278. while (p->next)
  279. p = p->next;
  280. p->next = node;
  281. }
  282. k_mutex_unlock(&thread_data->wait_list_lock);
  283. /* Wait the sem */
  284. k_sem_take(&node->sem, mills);
  285. /* Wait some time for the thread to be actually terminated */
  286. k_sleep(100);
  287. return BHT_OK;
  288. }
  289. int _vm_thread_detach(korp_tid thread)
  290. {
  291. (void) thread;
  292. return BHT_OK;
  293. }
  294. void *_vm_tls_get(unsigned idx)
  295. {
  296. (void) idx;
  297. bh_thread_data *thread_data;
  298. bh_assert(idx == 0);
  299. thread_data = thread_data_current();
  300. return thread_data ? thread_data->tlr : NULL;
  301. }
  302. int _vm_tls_put(unsigned idx, void * tls)
  303. {
  304. bh_thread_data *thread_data;
  305. (void) idx;
  306. bh_assert(idx == 0);
  307. thread_data = thread_data_current();
  308. bh_assert(thread_data != NULL);
  309. thread_data->tlr = tls;
  310. return BHT_OK;
  311. }
  312. int _vm_mutex_init(korp_mutex *mutex)
  313. {
  314. (void) mutex;
  315. k_mutex_init(mutex);
  316. return BHT_OK;
  317. }
  318. int _vm_recursive_mutex_init(korp_mutex *mutex)
  319. {
  320. k_mutex_init(mutex);
  321. return BHT_OK;
  322. }
  323. int _vm_mutex_destroy(korp_mutex *mutex)
  324. {
  325. (void) mutex;
  326. return BHT_OK;
  327. }
  328. void vm_mutex_lock(korp_mutex *mutex)
  329. {
  330. k_mutex_lock(mutex, K_FOREVER);
  331. }
  332. int vm_mutex_trylock(korp_mutex *mutex)
  333. {
  334. return k_mutex_lock(mutex, K_NO_WAIT);
  335. }
  336. void vm_mutex_unlock(korp_mutex *mutex)
  337. {
  338. k_mutex_unlock(mutex);
  339. }
  340. int _vm_sem_init(korp_sem* sem, unsigned int c)
  341. {
  342. k_sem_init(sem, 0, c);
  343. return BHT_OK;
  344. }
  345. int _vm_sem_destroy(korp_sem *sem)
  346. {
  347. (void) sem;
  348. return BHT_OK;
  349. }
  350. int _vm_sem_wait(korp_sem *sem)
  351. {
  352. return k_sem_take(sem, K_FOREVER);
  353. }
  354. int _vm_sem_reltimedwait(korp_sem *sem, int mills)
  355. {
  356. return k_sem_take(sem, mills);
  357. }
  358. int _vm_sem_post(korp_sem *sem)
  359. {
  360. k_sem_give(sem);
  361. return BHT_OK;
  362. }
  363. int _vm_cond_init(korp_cond *cond)
  364. {
  365. k_mutex_init(&cond->wait_list_lock);
  366. cond->thread_wait_list = NULL;
  367. return BHT_OK;
  368. }
  369. int _vm_cond_destroy(korp_cond *cond)
  370. {
  371. (void) cond;
  372. return BHT_OK;
  373. }
  374. static int _vm_cond_wait_internal(korp_cond *cond, korp_mutex *mutex,
  375. bool timed, int mills)
  376. {
  377. bh_thread_wait_node *node;
  378. /* Create wait node and append it to wait list */
  379. if (!(node = bh_malloc(sizeof(bh_thread_wait_node))))
  380. return BHT_ERROR;
  381. k_sem_init(&node->sem, 0, 1);
  382. node->next = NULL;
  383. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  384. if (!cond->thread_wait_list)
  385. cond->thread_wait_list = node;
  386. else {
  387. /* Add to end of wait list */
  388. bh_thread_wait_node *p = cond->thread_wait_list;
  389. while (p->next)
  390. p = p->next;
  391. p->next = node;
  392. }
  393. k_mutex_unlock(&cond->wait_list_lock);
  394. /* Unlock mutex, wait sem and lock mutex again */
  395. k_mutex_unlock(mutex);
  396. k_sem_take(&node->sem, timed ? mills : K_FOREVER);
  397. k_mutex_lock(mutex, K_FOREVER);
  398. /* Remove wait node from wait list */
  399. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  400. if (cond->thread_wait_list == node)
  401. cond->thread_wait_list = node->next;
  402. else {
  403. /* Remove from the wait list */
  404. bh_thread_wait_node *p = cond->thread_wait_list;
  405. while (p->next != node)
  406. p = p->next;
  407. p->next = node->next;
  408. }
  409. bh_free(node);
  410. k_mutex_unlock(&cond->wait_list_lock);
  411. return BHT_OK;
  412. }
  413. int _vm_cond_wait(korp_cond *cond, korp_mutex *mutex)
  414. {
  415. return _vm_cond_wait_internal(cond, mutex, false, 0);
  416. }
  417. int _vm_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, int mills)
  418. {
  419. return _vm_cond_wait_internal(cond, mutex, true, mills);
  420. }
  421. int _vm_cond_signal(korp_cond *cond)
  422. {
  423. /* Signal the head wait node of wait list */
  424. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  425. if (cond->thread_wait_list)
  426. k_sem_give(&cond->thread_wait_list->sem);
  427. k_mutex_unlock(&cond->wait_list_lock);
  428. return BHT_OK;
  429. }
  430. int _vm_cond_broadcast(korp_cond *cond)
  431. {
  432. /* Signal each wait node of wait list */
  433. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  434. if (cond->thread_wait_list) {
  435. bh_thread_wait_node *p = cond->thread_wait_list;
  436. while (p) {
  437. k_sem_give(&p->sem);
  438. p = p->next;
  439. }
  440. }
  441. k_mutex_unlock(&cond->wait_list_lock);
  442. return BHT_OK;
  443. }