bh_thread.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "bh_thread.h"
  6. #include "bh_assert.h"
  7. #include "bh_log.h"
  8. typedef struct bh_thread_wait_node {
  9. struct k_sem sem;
  10. bh_thread_wait_list next;
  11. } bh_thread_wait_node;
  12. typedef struct bh_thread_data {
  13. /* Next thread data */
  14. struct bh_thread_data *next;
  15. /* Zephyr thread handle */
  16. korp_tid tid;
  17. /* Jeff thread local root */
  18. void *tlr;
  19. /* Lock for waiting list */
  20. struct k_mutex wait_list_lock;
  21. /* Waiting list of other threads who are joining this thread */
  22. bh_thread_wait_list thread_wait_list;
  23. /* Thread stack size */
  24. unsigned stack_size;
  25. /* Thread stack */
  26. char stack[1];
  27. } bh_thread_data;
  28. typedef struct bh_thread_obj {
  29. struct k_thread thread;
  30. /* Whether the thread is terminated and this thread object is to
  31. be freed in the future. */
  32. bool to_be_freed;
  33. struct bh_thread_obj *next;
  34. } bh_thread_obj;
  35. static bool is_thread_sys_inited = false;
  36. /* Thread data of supervisor thread */
  37. static bh_thread_data supervisor_thread_data;
  38. /* Lock for thread data list */
  39. static struct k_mutex thread_data_lock;
  40. /* Thread data list */
  41. static bh_thread_data *thread_data_list = NULL;
  42. /* Lock for thread object list */
  43. static struct k_mutex thread_obj_lock;
  44. /* Thread object list */
  45. static bh_thread_obj *thread_obj_list = NULL;
  46. static void thread_data_list_add(bh_thread_data *thread_data)
  47. {
  48. k_mutex_lock(&thread_data_lock, K_FOREVER);
  49. if (!thread_data_list)
  50. thread_data_list = thread_data;
  51. else {
  52. /* If already in list, just return */
  53. bh_thread_data *p = thread_data_list;
  54. while (p) {
  55. if (p == thread_data) {
  56. k_mutex_unlock(&thread_data_lock);
  57. return;
  58. }
  59. p = p->next;
  60. }
  61. /* Set as head of list */
  62. thread_data->next = thread_data_list;
  63. thread_data_list = thread_data;
  64. }
  65. k_mutex_unlock(&thread_data_lock);
  66. }
  67. static void thread_data_list_remove(bh_thread_data *thread_data)
  68. {
  69. k_mutex_lock(&thread_data_lock, K_FOREVER);
  70. if (thread_data_list) {
  71. if (thread_data_list == thread_data)
  72. thread_data_list = thread_data_list->next;
  73. else {
  74. /* Search and remove it from list */
  75. bh_thread_data *p = thread_data_list;
  76. while (p && p->next != thread_data)
  77. p = p->next;
  78. if (p && p->next == thread_data)
  79. p->next = p->next->next;
  80. }
  81. }
  82. k_mutex_unlock(&thread_data_lock);
  83. }
  84. static bh_thread_data *
  85. thread_data_list_lookup(k_tid_t tid)
  86. {
  87. k_mutex_lock(&thread_data_lock, K_FOREVER);
  88. if (thread_data_list) {
  89. bh_thread_data *p = thread_data_list;
  90. while (p) {
  91. if (p->tid == tid) {
  92. /* Found */
  93. k_mutex_unlock(&thread_data_lock);
  94. return p;
  95. }
  96. p = p->next;
  97. }
  98. }
  99. k_mutex_unlock(&thread_data_lock);
  100. return NULL;
  101. }
  102. static void thread_obj_list_add(bh_thread_obj *thread_obj)
  103. {
  104. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  105. if (!thread_obj_list)
  106. thread_obj_list = thread_obj;
  107. else {
  108. /* Set as head of list */
  109. thread_obj->next = thread_obj_list;
  110. thread_obj_list = thread_obj;
  111. }
  112. k_mutex_unlock(&thread_obj_lock);
  113. }
  114. static void thread_obj_list_reclaim()
  115. {
  116. bh_thread_obj *p, *p_prev;
  117. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  118. p_prev = NULL;
  119. p = thread_obj_list;
  120. while (p) {
  121. if (p->to_be_freed) {
  122. if (p_prev == NULL) { /* p is the head of list */
  123. thread_obj_list = p->next;
  124. BH_FREE(p);
  125. p = thread_obj_list;
  126. } else { /* p is not the head of list */
  127. p_prev->next = p->next;
  128. BH_FREE(p);
  129. p = p_prev->next;
  130. }
  131. } else {
  132. p_prev = p;
  133. p = p->next;
  134. }
  135. }
  136. k_mutex_unlock(&thread_obj_lock);
  137. }
  138. int _vm_thread_sys_init()
  139. {
  140. if (is_thread_sys_inited)
  141. return BHT_OK;
  142. k_mutex_init(&thread_data_lock);
  143. k_mutex_init(&thread_obj_lock);
  144. /* Initialize supervisor thread data */
  145. memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
  146. supervisor_thread_data.tid = k_current_get();
  147. /* Set as head of thread data list */
  148. thread_data_list = &supervisor_thread_data;
  149. is_thread_sys_inited = true;
  150. return BHT_OK;
  151. }
  152. void vm_thread_sys_destroy(void)
  153. {
  154. if (is_thread_sys_inited) {
  155. is_thread_sys_inited = false;
  156. }
  157. }
  158. static bh_thread_data *
  159. thread_data_current()
  160. {
  161. k_tid_t tid = k_current_get();
  162. return thread_data_list_lookup(tid);
  163. }
  164. static void vm_thread_cleanup(void)
  165. {
  166. bh_thread_data *thread_data = thread_data_current();
  167. bh_assert(thread_data != NULL);
  168. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  169. if (thread_data->thread_wait_list) {
  170. /* Signal each joining thread */
  171. bh_thread_wait_list head = thread_data->thread_wait_list;
  172. while (head) {
  173. bh_thread_wait_list next = head->next;
  174. k_sem_give(&head->sem);
  175. /* head will be freed by joining thread */
  176. head = next;
  177. }
  178. thread_data->thread_wait_list = NULL;
  179. }
  180. k_mutex_unlock(&thread_data->wait_list_lock);
  181. thread_data_list_remove(thread_data);
  182. /* Set flag to true for the next thread creating to
  183. free the thread object */
  184. ((bh_thread_obj*) thread_data->tid)->to_be_freed = true;
  185. BH_FREE(thread_data);
  186. }
  187. static void vm_thread_wrapper(void *start, void *arg, void *thread_data)
  188. {
  189. /* Set thread custom data */
  190. ((bh_thread_data*) thread_data)->tid = k_current_get();
  191. thread_data_list_add(thread_data);
  192. ((thread_start_routine_t) start)(arg);
  193. vm_thread_cleanup();
  194. }
  195. int _vm_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
  196. unsigned int stack_size)
  197. {
  198. return _vm_thread_create_with_prio(p_tid, start, arg, stack_size,
  199. BH_THREAD_DEFAULT_PRIORITY);
  200. }
  201. int _vm_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
  202. void *arg, unsigned int stack_size, int prio)
  203. {
  204. korp_tid tid;
  205. bh_thread_data *thread_data;
  206. unsigned thread_data_size;
  207. if (!p_tid || !stack_size)
  208. return BHT_ERROR;
  209. /* Free the thread objects of terminated threads */
  210. thread_obj_list_reclaim();
  211. /* Create and initialize thread object */
  212. if (!(tid = BH_MALLOC(sizeof(bh_thread_obj))))
  213. return BHT_ERROR;
  214. memset(tid, 0, sizeof(bh_thread_obj));
  215. /* Create and initialize thread data */
  216. thread_data_size = offsetof(bh_thread_data, stack) + stack_size;
  217. if (!(thread_data = BH_MALLOC(thread_data_size))) {
  218. BH_FREE(tid);
  219. return BHT_ERROR;
  220. }
  221. memset(thread_data, 0, thread_data_size);
  222. k_mutex_init(&thread_data->wait_list_lock);
  223. thread_data->stack_size = stack_size;
  224. thread_data->tid = tid;
  225. /* Create the thread */
  226. if (!((tid = k_thread_create(tid, (k_thread_stack_t *) thread_data->stack,
  227. stack_size, vm_thread_wrapper, start, arg, thread_data, prio, 0,
  228. K_NO_WAIT)))) {
  229. BH_FREE(tid);
  230. BH_FREE(thread_data);
  231. return BHT_ERROR;
  232. }
  233. bh_assert(tid == thread_data->tid);
  234. /* Set thread custom data */
  235. thread_data_list_add(thread_data);
  236. thread_obj_list_add((bh_thread_obj*) tid);
  237. *p_tid = tid;
  238. return BHT_OK;
  239. }
  240. korp_tid _vm_self_thread()
  241. {
  242. return (korp_tid) k_current_get();
  243. }
  244. void vm_thread_exit(void * code)
  245. {
  246. (void) code;
  247. korp_tid self = vm_self_thread();
  248. vm_thread_cleanup();
  249. k_thread_abort((k_tid_t) self);
  250. }
  251. int _vm_thread_cancel(korp_tid thread)
  252. {
  253. k_thread_abort((k_tid_t) thread);
  254. return 0;
  255. }
  256. int _vm_thread_join(korp_tid thread, void **value_ptr, int mills)
  257. {
  258. (void) value_ptr;
  259. bh_thread_data *thread_data;
  260. bh_thread_wait_node *node;
  261. /* Create wait node and append it to wait list */
  262. if (!(node = BH_MALLOC(sizeof(bh_thread_wait_node))))
  263. return BHT_ERROR;
  264. k_sem_init(&node->sem, 0, 1);
  265. node->next = NULL;
  266. /* Get thread data */
  267. thread_data = thread_data_list_lookup(thread);
  268. bh_assert(thread_data != NULL);
  269. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  270. if (!thread_data->thread_wait_list)
  271. thread_data->thread_wait_list = node;
  272. else {
  273. /* Add to end of waiting list */
  274. bh_thread_wait_node *p = thread_data->thread_wait_list;
  275. while (p->next)
  276. p = p->next;
  277. p->next = node;
  278. }
  279. k_mutex_unlock(&thread_data->wait_list_lock);
  280. /* Wait the sem */
  281. k_sem_take(&node->sem, mills);
  282. /* Wait some time for the thread to be actually terminated */
  283. k_sleep(100);
  284. /* Destroy resource */
  285. BH_FREE(node);
  286. return BHT_OK;
  287. }
  288. int _vm_thread_detach(korp_tid thread)
  289. {
  290. (void) thread;
  291. return BHT_OK;
  292. }
  293. void *_vm_tls_get(unsigned idx)
  294. {
  295. (void) idx;
  296. bh_thread_data *thread_data;
  297. bh_assert(idx == 0);
  298. thread_data = thread_data_current();
  299. return thread_data ? thread_data->tlr : NULL;
  300. }
  301. int _vm_tls_put(unsigned idx, void * tls)
  302. {
  303. bh_thread_data *thread_data;
  304. (void) idx;
  305. bh_assert(idx == 0);
  306. thread_data = thread_data_current();
  307. bh_assert(thread_data != NULL);
  308. thread_data->tlr = tls;
  309. return BHT_OK;
  310. }
  311. int _vm_mutex_init(korp_mutex *mutex)
  312. {
  313. (void) mutex;
  314. k_mutex_init(mutex);
  315. return BHT_OK;
  316. }
  317. int _vm_recursive_mutex_init(korp_mutex *mutex)
  318. {
  319. k_mutex_init(mutex);
  320. return BHT_OK;
  321. }
  322. int _vm_mutex_destroy(korp_mutex *mutex)
  323. {
  324. (void) mutex;
  325. return BHT_OK;
  326. }
  327. void vm_mutex_lock(korp_mutex *mutex)
  328. {
  329. k_mutex_lock(mutex, K_FOREVER);
  330. }
  331. int vm_mutex_trylock(korp_mutex *mutex)
  332. {
  333. return k_mutex_lock(mutex, K_NO_WAIT);
  334. }
  335. void vm_mutex_unlock(korp_mutex *mutex)
  336. {
  337. k_mutex_unlock(mutex);
  338. }
  339. int _vm_sem_init(korp_sem* sem, unsigned int c)
  340. {
  341. int ret = k_sem_init(sem, 0, c);
  342. return ret == 0 ? BHT_OK : BHT_ERROR;
  343. }
  344. int _vm_sem_destroy(korp_sem *sem)
  345. {
  346. (void) sem;
  347. return BHT_OK;
  348. }
  349. int _vm_sem_wait(korp_sem *sem)
  350. {
  351. return k_sem_take(sem, K_FOREVER);
  352. }
  353. int _vm_sem_reltimedwait(korp_sem *sem, int mills)
  354. {
  355. return k_sem_take(sem, mills);
  356. }
  357. int _vm_sem_post(korp_sem *sem)
  358. {
  359. k_sem_give(sem);
  360. return BHT_OK;
  361. }
  362. int _vm_cond_init(korp_cond *cond)
  363. {
  364. k_mutex_init(&cond->wait_list_lock);
  365. cond->thread_wait_list = NULL;
  366. return BHT_OK;
  367. }
  368. int _vm_cond_destroy(korp_cond *cond)
  369. {
  370. (void) cond;
  371. return BHT_OK;
  372. }
  373. static int _vm_cond_wait_internal(korp_cond *cond, korp_mutex *mutex,
  374. bool timed, int mills)
  375. {
  376. bh_thread_wait_node *node;
  377. /* Create wait node and append it to wait list */
  378. if (!(node = BH_MALLOC(sizeof(bh_thread_wait_node))))
  379. return BHT_ERROR;
  380. k_sem_init(&node->sem, 0, 1);
  381. node->next = NULL;
  382. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  383. if (!cond->thread_wait_list)
  384. cond->thread_wait_list = node;
  385. else {
  386. /* Add to end of wait list */
  387. bh_thread_wait_node *p = cond->thread_wait_list;
  388. while (p->next)
  389. p = p->next;
  390. p->next = node;
  391. }
  392. k_mutex_unlock(&cond->wait_list_lock);
  393. /* Unlock mutex, wait sem and lock mutex again */
  394. k_mutex_unlock(mutex);
  395. k_sem_take(&node->sem, timed ? mills : K_FOREVER);
  396. k_mutex_lock(mutex, K_FOREVER);
  397. /* Remove wait node from wait list */
  398. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  399. if (cond->thread_wait_list == node)
  400. cond->thread_wait_list = node->next;
  401. else {
  402. /* Remove from the wait list */
  403. bh_thread_wait_node *p = cond->thread_wait_list;
  404. while (p->next != node)
  405. p = p->next;
  406. p->next = node->next;
  407. }
  408. BH_FREE(node);
  409. k_mutex_unlock(&cond->wait_list_lock);
  410. return BHT_OK;
  411. }
  412. int _vm_cond_wait(korp_cond *cond, korp_mutex *mutex)
  413. {
  414. return _vm_cond_wait_internal(cond, mutex, false, 0);
  415. }
  416. int _vm_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, int mills)
  417. {
  418. return _vm_cond_wait_internal(cond, mutex, true, mills);
  419. }
  420. int _vm_cond_signal(korp_cond *cond)
  421. {
  422. /* Signal the head wait node of wait list */
  423. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  424. if (cond->thread_wait_list)
  425. k_sem_give(&cond->thread_wait_list->sem);
  426. k_mutex_unlock(&cond->wait_list_lock);
  427. return BHT_OK;
  428. }
  429. int _vm_cond_broadcast(korp_cond *cond)
  430. {
  431. /* Signal each wait node of wait list */
  432. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  433. if (cond->thread_wait_list) {
  434. bh_thread_wait_node *p = cond->thread_wait_list;
  435. while (p) {
  436. k_sem_give(&p->sem);
  437. p = p->next;
  438. }
  439. }
  440. k_mutex_unlock(&cond->wait_list_lock);
  441. return BHT_OK;
  442. }