bh_thread.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "bh_thread.h"
  6. #include "bh_assert.h"
  7. #include "bh_log.h"
  8. #include "bh_memory.h"
  9. #include <stdio.h>
  10. #include <stdlib.h>
  11. typedef struct bh_thread_wait_node {
  12. struct k_sem sem;
  13. bh_thread_wait_list next;
  14. } bh_thread_wait_node;
  15. typedef struct bh_thread_data {
  16. /* Next thread data */
  17. struct bh_thread_data *next;
  18. /* Zephyr thread handle */
  19. korp_tid tid;
  20. /* Jeff thread local root */
  21. void *tlr;
  22. /* Lock for waiting list */
  23. struct k_mutex wait_list_lock;
  24. /* Waiting list of other threads who are joining this thread */
  25. bh_thread_wait_list thread_wait_list;
  26. /* Thread stack size */
  27. unsigned stack_size;
  28. /* Thread stack */
  29. char stack[1];
  30. } bh_thread_data;
  31. typedef struct bh_thread_obj {
  32. struct k_thread thread;
  33. /* Whether the thread is terminated and this thread object is to
  34. be freed in the future. */
  35. bool to_be_freed;
  36. struct bh_thread_obj *next;
  37. } bh_thread_obj;
  38. static bool is_thread_sys_inited = false;
  39. /* Thread data of supervisor thread */
  40. static bh_thread_data supervisor_thread_data;
  41. /* Lock for thread data list */
  42. static struct k_mutex thread_data_lock;
  43. /* Thread data list */
  44. static bh_thread_data *thread_data_list = NULL;
  45. /* Lock for thread object list */
  46. static struct k_mutex thread_obj_lock;
  47. /* Thread object list */
  48. static bh_thread_obj *thread_obj_list = NULL;
  49. static void thread_data_list_add(bh_thread_data *thread_data)
  50. {
  51. k_mutex_lock(&thread_data_lock, K_FOREVER);
  52. if (!thread_data_list)
  53. thread_data_list = thread_data;
  54. else {
  55. /* If already in list, just return */
  56. bh_thread_data *p = thread_data_list;
  57. while (p) {
  58. if (p == thread_data) {
  59. k_mutex_unlock(&thread_data_lock);
  60. return;
  61. }
  62. p = p->next;
  63. }
  64. /* Set as head of list */
  65. thread_data->next = thread_data_list;
  66. thread_data_list = thread_data;
  67. }
  68. k_mutex_unlock(&thread_data_lock);
  69. }
  70. static void thread_data_list_remove(bh_thread_data *thread_data)
  71. {
  72. k_mutex_lock(&thread_data_lock, K_FOREVER);
  73. if (thread_data_list) {
  74. if (thread_data_list == thread_data)
  75. thread_data_list = thread_data_list->next;
  76. else {
  77. /* Search and remove it from list */
  78. bh_thread_data *p = thread_data_list;
  79. while (p && p->next != thread_data)
  80. p = p->next;
  81. if (p && p->next == thread_data)
  82. p->next = p->next->next;
  83. }
  84. }
  85. k_mutex_unlock(&thread_data_lock);
  86. }
  87. static bh_thread_data *
  88. thread_data_list_lookup(k_tid_t tid)
  89. {
  90. k_mutex_lock(&thread_data_lock, K_FOREVER);
  91. if (thread_data_list) {
  92. bh_thread_data *p = thread_data_list;
  93. while (p) {
  94. if (p->tid == tid) {
  95. /* Found */
  96. k_mutex_unlock(&thread_data_lock);
  97. return p;
  98. }
  99. p = p->next;
  100. }
  101. }
  102. k_mutex_unlock(&thread_data_lock);
  103. return NULL;
  104. }
  105. static void thread_obj_list_add(bh_thread_obj *thread_obj)
  106. {
  107. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  108. if (!thread_obj_list)
  109. thread_obj_list = thread_obj;
  110. else {
  111. /* Set as head of list */
  112. thread_obj->next = thread_obj_list;
  113. thread_obj_list = thread_obj;
  114. }
  115. k_mutex_unlock(&thread_obj_lock);
  116. }
  117. static void thread_obj_list_reclaim()
  118. {
  119. bh_thread_obj *p, *p_prev;
  120. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  121. p_prev = NULL;
  122. p = thread_obj_list;
  123. while (p) {
  124. if (p->to_be_freed) {
  125. if (p_prev == NULL) { /* p is the head of list */
  126. thread_obj_list = p->next;
  127. bh_free(p);
  128. p = thread_obj_list;
  129. } else { /* p is not the head of list */
  130. p_prev->next = p->next;
  131. bh_free(p);
  132. p = p_prev->next;
  133. }
  134. } else {
  135. p_prev = p;
  136. p = p->next;
  137. }
  138. }
  139. k_mutex_unlock(&thread_obj_lock);
  140. }
  141. int _vm_thread_sys_init()
  142. {
  143. if (is_thread_sys_inited)
  144. return BHT_OK;
  145. k_mutex_init(&thread_data_lock);
  146. k_mutex_init(&thread_obj_lock);
  147. /* Initialize supervisor thread data */
  148. memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
  149. supervisor_thread_data.tid = k_current_get();
  150. /* Set as head of thread data list */
  151. thread_data_list = &supervisor_thread_data;
  152. is_thread_sys_inited = true;
  153. return BHT_OK;
  154. }
  155. void vm_thread_sys_destroy(void)
  156. {
  157. if (is_thread_sys_inited) {
  158. is_thread_sys_inited = false;
  159. }
  160. }
  161. static bh_thread_data *
  162. thread_data_current()
  163. {
  164. k_tid_t tid = k_current_get();
  165. return thread_data_list_lookup(tid);
  166. }
  167. static void vm_thread_cleanup(void)
  168. {
  169. bh_thread_data *thread_data = thread_data_current();
  170. bh_assert(thread_data != NULL);
  171. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  172. if (thread_data->thread_wait_list) {
  173. /* Signal each joining thread */
  174. bh_thread_wait_list head = thread_data->thread_wait_list;
  175. while (head) {
  176. bh_thread_wait_list next = head->next;
  177. k_sem_give(&head->sem);
  178. /* head will be freed by joining thread */
  179. head = next;
  180. }
  181. thread_data->thread_wait_list = NULL;
  182. }
  183. k_mutex_unlock(&thread_data->wait_list_lock);
  184. thread_data_list_remove(thread_data);
  185. /* Set flag to true for the next thread creating to
  186. free the thread object */
  187. ((bh_thread_obj*) thread_data->tid)->to_be_freed = true;
  188. bh_free(thread_data);
  189. }
  190. static void vm_thread_wrapper(void *start, void *arg, void *thread_data)
  191. {
  192. /* Set thread custom data */
  193. ((bh_thread_data*) thread_data)->tid = k_current_get();
  194. thread_data_list_add(thread_data);
  195. ((thread_start_routine_t) start)(arg);
  196. vm_thread_cleanup();
  197. }
  198. int _vm_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
  199. unsigned int stack_size)
  200. {
  201. return _vm_thread_create_with_prio(p_tid, start, arg, stack_size,
  202. BH_THREAD_DEFAULT_PRIORITY);
  203. }
  204. int _vm_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
  205. void *arg, unsigned int stack_size, int prio)
  206. {
  207. korp_tid tid;
  208. bh_thread_data *thread_data;
  209. unsigned thread_data_size;
  210. if (!p_tid || !stack_size)
  211. return BHT_ERROR;
  212. /* Free the thread objects of terminated threads */
  213. thread_obj_list_reclaim();
  214. /* Create and initialize thread object */
  215. if (!(tid = bh_malloc(sizeof(bh_thread_obj))))
  216. return BHT_ERROR;
  217. memset(tid, 0, sizeof(bh_thread_obj));
  218. /* Create and initialize thread data */
  219. thread_data_size = offsetof(bh_thread_data, stack) + stack_size;
  220. if (!(thread_data = bh_malloc(thread_data_size))) {
  221. bh_free(tid);
  222. return BHT_ERROR;
  223. }
  224. memset(thread_data, 0, thread_data_size);
  225. k_mutex_init(&thread_data->wait_list_lock);
  226. thread_data->stack_size = stack_size;
  227. thread_data->tid = tid;
  228. /* Create the thread */
  229. if (!((tid = k_thread_create(tid, (k_thread_stack_t *) thread_data->stack,
  230. stack_size, vm_thread_wrapper, start, arg, thread_data, prio, 0,
  231. K_NO_WAIT)))) {
  232. bh_free(tid);
  233. bh_free(thread_data);
  234. return BHT_ERROR;
  235. }
  236. bh_assert(tid == thread_data->tid);
  237. /* Set thread custom data */
  238. thread_data_list_add(thread_data);
  239. thread_obj_list_add((bh_thread_obj*) tid);
  240. *p_tid = tid;
  241. return BHT_OK;
  242. }
  243. korp_tid _vm_self_thread()
  244. {
  245. return (korp_tid) k_current_get();
  246. }
  247. void vm_thread_exit(void * code)
  248. {
  249. (void) code;
  250. korp_tid self = vm_self_thread();
  251. vm_thread_cleanup();
  252. k_thread_abort((k_tid_t) self);
  253. }
  254. int _vm_thread_cancel(korp_tid thread)
  255. {
  256. k_thread_abort((k_tid_t) thread);
  257. return 0;
  258. }
  259. int _vm_thread_join(korp_tid thread, void **value_ptr, int mills)
  260. {
  261. (void) value_ptr;
  262. bh_thread_data *thread_data;
  263. bh_thread_wait_node *node;
  264. /* Create wait node and append it to wait list */
  265. if (!(node = bh_malloc(sizeof(bh_thread_wait_node))))
  266. return BHT_ERROR;
  267. k_sem_init(&node->sem, 0, 1);
  268. node->next = NULL;
  269. /* Get thread data */
  270. thread_data = thread_data_list_lookup(thread);
  271. bh_assert(thread_data != NULL);
  272. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  273. if (!thread_data->thread_wait_list)
  274. thread_data->thread_wait_list = node;
  275. else {
  276. /* Add to end of waiting list */
  277. bh_thread_wait_node *p = thread_data->thread_wait_list;
  278. while (p->next)
  279. p = p->next;
  280. p->next = node;
  281. }
  282. k_mutex_unlock(&thread_data->wait_list_lock);
  283. /* Wait the sem */
  284. k_sem_take(&node->sem, mills);
  285. /* Wait some time for the thread to be actually terminated */
  286. k_sleep(100);
  287. /* Destroy resource */
  288. bh_free(node);
  289. return BHT_OK;
  290. }
  291. int _vm_thread_detach(korp_tid thread)
  292. {
  293. (void) thread;
  294. return BHT_OK;
  295. }
  296. void *_vm_tls_get(unsigned idx)
  297. {
  298. (void) idx;
  299. bh_thread_data *thread_data;
  300. bh_assert(idx == 0);
  301. thread_data = thread_data_current();
  302. return thread_data ? thread_data->tlr : NULL;
  303. }
  304. int _vm_tls_put(unsigned idx, void * tls)
  305. {
  306. bh_thread_data *thread_data;
  307. (void) idx;
  308. bh_assert(idx == 0);
  309. thread_data = thread_data_current();
  310. bh_assert(thread_data != NULL);
  311. thread_data->tlr = tls;
  312. return BHT_OK;
  313. }
  314. int _vm_mutex_init(korp_mutex *mutex)
  315. {
  316. (void) mutex;
  317. k_mutex_init(mutex);
  318. return BHT_OK;
  319. }
  320. int _vm_recursive_mutex_init(korp_mutex *mutex)
  321. {
  322. k_mutex_init(mutex);
  323. return BHT_OK;
  324. }
  325. int _vm_mutex_destroy(korp_mutex *mutex)
  326. {
  327. (void) mutex;
  328. return BHT_OK;
  329. }
  330. void vm_mutex_lock(korp_mutex *mutex)
  331. {
  332. k_mutex_lock(mutex, K_FOREVER);
  333. }
  334. int vm_mutex_trylock(korp_mutex *mutex)
  335. {
  336. return k_mutex_lock(mutex, K_NO_WAIT);
  337. }
  338. void vm_mutex_unlock(korp_mutex *mutex)
  339. {
  340. k_mutex_unlock(mutex);
  341. }
  342. int _vm_sem_init(korp_sem* sem, unsigned int c)
  343. {
  344. k_sem_init(sem, 0, c);
  345. return BHT_OK;
  346. }
  347. int _vm_sem_destroy(korp_sem *sem)
  348. {
  349. (void) sem;
  350. return BHT_OK;
  351. }
  352. int _vm_sem_wait(korp_sem *sem)
  353. {
  354. return k_sem_take(sem, K_FOREVER);
  355. }
  356. int _vm_sem_reltimedwait(korp_sem *sem, int mills)
  357. {
  358. return k_sem_take(sem, mills);
  359. }
  360. int _vm_sem_post(korp_sem *sem)
  361. {
  362. k_sem_give(sem);
  363. return BHT_OK;
  364. }
  365. int _vm_cond_init(korp_cond *cond)
  366. {
  367. k_mutex_init(&cond->wait_list_lock);
  368. cond->thread_wait_list = NULL;
  369. return BHT_OK;
  370. }
  371. int _vm_cond_destroy(korp_cond *cond)
  372. {
  373. (void) cond;
  374. return BHT_OK;
  375. }
  376. static int _vm_cond_wait_internal(korp_cond *cond, korp_mutex *mutex,
  377. bool timed, int mills)
  378. {
  379. bh_thread_wait_node *node;
  380. /* Create wait node and append it to wait list */
  381. if (!(node = bh_malloc(sizeof(bh_thread_wait_node))))
  382. return BHT_ERROR;
  383. k_sem_init(&node->sem, 0, 1);
  384. node->next = NULL;
  385. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  386. if (!cond->thread_wait_list)
  387. cond->thread_wait_list = node;
  388. else {
  389. /* Add to end of wait list */
  390. bh_thread_wait_node *p = cond->thread_wait_list;
  391. while (p->next)
  392. p = p->next;
  393. p->next = node;
  394. }
  395. k_mutex_unlock(&cond->wait_list_lock);
  396. /* Unlock mutex, wait sem and lock mutex again */
  397. k_mutex_unlock(mutex);
  398. k_sem_take(&node->sem, timed ? mills : K_FOREVER);
  399. k_mutex_lock(mutex, K_FOREVER);
  400. /* Remove wait node from wait list */
  401. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  402. if (cond->thread_wait_list == node)
  403. cond->thread_wait_list = node->next;
  404. else {
  405. /* Remove from the wait list */
  406. bh_thread_wait_node *p = cond->thread_wait_list;
  407. while (p->next != node)
  408. p = p->next;
  409. p->next = node->next;
  410. }
  411. bh_free(node);
  412. k_mutex_unlock(&cond->wait_list_lock);
  413. return BHT_OK;
  414. }
  415. int _vm_cond_wait(korp_cond *cond, korp_mutex *mutex)
  416. {
  417. return _vm_cond_wait_internal(cond, mutex, false, 0);
  418. }
  419. int _vm_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, int mills)
  420. {
  421. return _vm_cond_wait_internal(cond, mutex, true, mills);
  422. }
  423. int _vm_cond_signal(korp_cond *cond)
  424. {
  425. /* Signal the head wait node of wait list */
  426. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  427. if (cond->thread_wait_list)
  428. k_sem_give(&cond->thread_wait_list->sem);
  429. k_mutex_unlock(&cond->wait_list_lock);
  430. return BHT_OK;
  431. }
  432. int _vm_cond_broadcast(korp_cond *cond)
  433. {
  434. /* Signal each wait node of wait list */
  435. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  436. if (cond->thread_wait_list) {
  437. bh_thread_wait_node *p = cond->thread_wait_list;
  438. while (p) {
  439. k_sem_give(&p->sem);
  440. p = p->next;
  441. }
  442. }
  443. k_mutex_unlock(&cond->wait_list_lock);
  444. return BHT_OK;
  445. }