bh_thread.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "bh_thread.h"
  17. #include "bh_assert.h"
  18. #include "bh_log.h"
  19. #include "bh_memory.h"
  20. #include <stdio.h>
  21. #include <stdlib.h>
  22. typedef struct bh_thread_wait_node {
  23. struct k_sem sem;
  24. bh_thread_wait_list next;
  25. } bh_thread_wait_node;
  26. typedef struct bh_thread_data {
  27. /* Next thread data */
  28. struct bh_thread_data *next;
  29. /* Zephyr thread handle */
  30. korp_tid tid;
  31. /* Jeff thread local root */
  32. void *tlr;
  33. /* Lock for waiting list */
  34. struct k_mutex wait_list_lock;
  35. /* Waiting list of other threads who are joining this thread */
  36. bh_thread_wait_list thread_wait_list;
  37. /* Thread stack size */
  38. unsigned stack_size;
  39. /* Thread stack */
  40. char stack[1];
  41. } bh_thread_data;
  42. typedef struct bh_thread_obj {
  43. struct k_thread thread;
  44. /* Whether the thread is terminated and this thread object is to
  45. be freed in the future. */
  46. bool to_be_freed;
  47. struct bh_thread_obj *next;
  48. } bh_thread_obj;
  49. static bool is_thread_sys_inited = false;
  50. /* Thread data of supervisor thread */
  51. static bh_thread_data supervisor_thread_data;
  52. /* Lock for thread data list */
  53. static struct k_mutex thread_data_lock;
  54. /* Thread data list */
  55. static bh_thread_data *thread_data_list = NULL;
  56. /* Lock for thread object list */
  57. static struct k_mutex thread_obj_lock;
  58. /* Thread object list */
  59. static bh_thread_obj *thread_obj_list = NULL;
  60. static void thread_data_list_add(bh_thread_data *thread_data)
  61. {
  62. k_mutex_lock(&thread_data_lock, K_FOREVER);
  63. if (!thread_data_list)
  64. thread_data_list = thread_data;
  65. else {
  66. /* If already in list, just return */
  67. bh_thread_data *p = thread_data_list;
  68. while (p) {
  69. if (p == thread_data) {
  70. k_mutex_unlock(&thread_data_lock);
  71. return;
  72. }
  73. p = p->next;
  74. }
  75. /* Set as head of list */
  76. thread_data->next = thread_data_list;
  77. thread_data_list = thread_data;
  78. }
  79. k_mutex_unlock(&thread_data_lock);
  80. }
  81. static void thread_data_list_remove(bh_thread_data *thread_data)
  82. {
  83. k_mutex_lock(&thread_data_lock, K_FOREVER);
  84. if (thread_data_list) {
  85. if (thread_data_list == thread_data)
  86. thread_data_list = thread_data_list->next;
  87. else {
  88. /* Search and remove it from list */
  89. bh_thread_data *p = thread_data_list;
  90. while (p && p->next != thread_data)
  91. p = p->next;
  92. if (p && p->next == thread_data)
  93. p->next = p->next->next;
  94. }
  95. }
  96. k_mutex_unlock(&thread_data_lock);
  97. }
  98. static bh_thread_data *
  99. thread_data_list_lookup(k_tid_t tid)
  100. {
  101. k_mutex_lock(&thread_data_lock, K_FOREVER);
  102. if (thread_data_list) {
  103. bh_thread_data *p = thread_data_list;
  104. while (p) {
  105. if (p->tid == tid) {
  106. /* Found */
  107. k_mutex_unlock(&thread_data_lock);
  108. return p;
  109. }
  110. p = p->next;
  111. }
  112. }
  113. k_mutex_unlock(&thread_data_lock);
  114. return NULL;
  115. }
  116. static void thread_obj_list_add(bh_thread_obj *thread_obj)
  117. {
  118. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  119. if (!thread_obj_list)
  120. thread_obj_list = thread_obj;
  121. else {
  122. /* Set as head of list */
  123. thread_obj->next = thread_obj_list;
  124. thread_obj_list = thread_obj;
  125. }
  126. k_mutex_unlock(&thread_obj_lock);
  127. }
  128. static void thread_obj_list_reclaim()
  129. {
  130. bh_thread_obj *p, *p_prev;
  131. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  132. p_prev = NULL;
  133. p = thread_obj_list;
  134. while (p) {
  135. if (p->to_be_freed) {
  136. if (p_prev == NULL) { /* p is the head of list */
  137. thread_obj_list = p->next;
  138. bh_free(p);
  139. p = thread_obj_list;
  140. } else { /* p is not the head of list */
  141. p_prev->next = p->next;
  142. bh_free(p);
  143. p = p_prev->next;
  144. }
  145. } else {
  146. p_prev = p;
  147. p = p->next;
  148. }
  149. }
  150. k_mutex_unlock(&thread_obj_lock);
  151. }
  152. int _vm_thread_sys_init()
  153. {
  154. if (is_thread_sys_inited)
  155. return BHT_OK;
  156. k_mutex_init(&thread_data_lock);
  157. k_mutex_init(&thread_obj_lock);
  158. /* Initialize supervisor thread data */
  159. memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
  160. supervisor_thread_data.tid = k_current_get();
  161. /* Set as head of thread data list */
  162. thread_data_list = &supervisor_thread_data;
  163. is_thread_sys_inited = true;
  164. return BHT_OK;
  165. }
  166. void vm_thread_sys_destroy(void)
  167. {
  168. if (is_thread_sys_inited) {
  169. is_thread_sys_inited = false;
  170. }
  171. }
  172. static bh_thread_data *
  173. thread_data_current()
  174. {
  175. k_tid_t tid = k_current_get();
  176. return thread_data_list_lookup(tid);
  177. }
  178. static void vm_thread_cleanup(void)
  179. {
  180. bh_thread_data *thread_data = thread_data_current();
  181. bh_assert(thread_data != NULL);
  182. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  183. if (thread_data->thread_wait_list) {
  184. /* Signal each joining thread */
  185. bh_thread_wait_list head = thread_data->thread_wait_list;
  186. while (head) {
  187. bh_thread_wait_list next = head->next;
  188. k_sem_give(&head->sem);
  189. bh_free(head);
  190. head = next;
  191. }
  192. thread_data->thread_wait_list = NULL;
  193. }
  194. k_mutex_unlock(&thread_data->wait_list_lock);
  195. thread_data_list_remove(thread_data);
  196. /* Set flag to true for the next thread creating to
  197. free the thread object */
  198. ((bh_thread_obj*) thread_data->tid)->to_be_freed = true;
  199. bh_free(thread_data);
  200. }
  201. static void vm_thread_wrapper(void *start, void *arg, void *thread_data)
  202. {
  203. /* Set thread custom data */
  204. ((bh_thread_data*) thread_data)->tid = k_current_get();
  205. thread_data_list_add(thread_data);
  206. ((thread_start_routine_t) start)(arg);
  207. vm_thread_cleanup();
  208. }
  209. int _vm_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
  210. unsigned int stack_size)
  211. {
  212. return _vm_thread_create_with_prio(p_tid, start, arg, stack_size,
  213. BH_THREAD_DEFAULT_PRIORITY);
  214. }
  215. int _vm_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
  216. void *arg, unsigned int stack_size, int prio)
  217. {
  218. korp_tid tid;
  219. bh_thread_data *thread_data;
  220. unsigned thread_data_size;
  221. if (!p_tid || !stack_size)
  222. return BHT_ERROR;
  223. /* Free the thread objects of terminated threads */
  224. thread_obj_list_reclaim();
  225. /* Create and initialize thread object */
  226. if (!(tid = bh_malloc(sizeof(bh_thread_obj))))
  227. return BHT_ERROR;
  228. memset(tid, 0, sizeof(bh_thread_obj));
  229. /* Create and initialize thread data */
  230. thread_data_size = offsetof(bh_thread_data, stack) + stack_size;
  231. if (!(thread_data = bh_malloc(thread_data_size))) {
  232. bh_free(tid);
  233. return BHT_ERROR;
  234. }
  235. memset(thread_data, 0, thread_data_size);
  236. k_mutex_init(&thread_data->wait_list_lock);
  237. thread_data->stack_size = stack_size;
  238. thread_data->tid = tid;
  239. /* Create the thread */
  240. if (!((tid = k_thread_create(tid, (k_thread_stack_t *) thread_data->stack,
  241. stack_size, vm_thread_wrapper, start, arg, thread_data, prio, 0,
  242. K_NO_WAIT)))) {
  243. bh_free(tid);
  244. bh_free(thread_data);
  245. return BHT_ERROR;
  246. }
  247. bh_assert(tid == thread_data->tid);
  248. /* Set thread custom data */
  249. thread_data_list_add(thread_data);
  250. thread_obj_list_add((bh_thread_obj*) tid);
  251. *p_tid = tid;
  252. return BHT_OK;
  253. }
  254. korp_tid _vm_self_thread()
  255. {
  256. return (korp_tid) k_current_get();
  257. }
  258. void vm_thread_exit(void * code)
  259. {
  260. (void) code;
  261. korp_tid self = vm_self_thread();
  262. vm_thread_cleanup();
  263. k_thread_abort((k_tid_t) self);
  264. }
  265. int _vm_thread_cancel(korp_tid thread)
  266. {
  267. k_thread_abort((k_tid_t) thread);
  268. return 0;
  269. }
  270. int _vm_thread_join(korp_tid thread, void **value_ptr, int mills)
  271. {
  272. (void) value_ptr;
  273. bh_thread_data *thread_data;
  274. bh_thread_wait_node *node;
  275. /* Create wait node and append it to wait list */
  276. if (!(node = bh_malloc(sizeof(bh_thread_wait_node))))
  277. return BHT_ERROR;
  278. k_sem_init(&node->sem, 0, 1);
  279. node->next = NULL;
  280. /* Get thread data */
  281. thread_data = thread_data_list_lookup(thread);
  282. bh_assert(thread_data != NULL);
  283. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  284. if (!thread_data->thread_wait_list)
  285. thread_data->thread_wait_list = node;
  286. else {
  287. /* Add to end of waiting list */
  288. bh_thread_wait_node *p = thread_data->thread_wait_list;
  289. while (p->next)
  290. p = p->next;
  291. p->next = node;
  292. }
  293. k_mutex_unlock(&thread_data->wait_list_lock);
  294. /* Wait the sem */
  295. k_sem_take(&node->sem, mills);
  296. /* Wait some time for the thread to be actually terminated */
  297. k_sleep(100);
  298. return BHT_OK;
  299. }
  300. int _vm_thread_detach(korp_tid thread)
  301. {
  302. (void) thread;
  303. return BHT_OK;
  304. }
  305. void *_vm_tls_get(unsigned idx)
  306. {
  307. (void) idx;
  308. bh_thread_data *thread_data;
  309. bh_assert(idx == 0);
  310. thread_data = thread_data_current();
  311. return thread_data ? thread_data->tlr : NULL;
  312. }
  313. int _vm_tls_put(unsigned idx, void * tls)
  314. {
  315. bh_thread_data *thread_data;
  316. (void) idx;
  317. bh_assert(idx == 0);
  318. thread_data = thread_data_current();
  319. bh_assert(thread_data != NULL);
  320. thread_data->tlr = tls;
  321. return BHT_OK;
  322. }
  323. int _vm_mutex_init(korp_mutex *mutex)
  324. {
  325. (void) mutex;
  326. k_mutex_init(mutex);
  327. return BHT_OK;
  328. }
  329. int _vm_recursive_mutex_init(korp_mutex *mutex)
  330. {
  331. k_mutex_init(mutex);
  332. return BHT_OK;
  333. }
  334. int _vm_mutex_destroy(korp_mutex *mutex)
  335. {
  336. (void) mutex;
  337. return BHT_OK;
  338. }
  339. void vm_mutex_lock(korp_mutex *mutex)
  340. {
  341. k_mutex_lock(mutex, K_FOREVER);
  342. }
  343. int vm_mutex_trylock(korp_mutex *mutex)
  344. {
  345. return k_mutex_lock(mutex, K_NO_WAIT);
  346. }
  347. void vm_mutex_unlock(korp_mutex *mutex)
  348. {
  349. k_mutex_unlock(mutex);
  350. }
  351. int _vm_sem_init(korp_sem* sem, unsigned int c)
  352. {
  353. k_sem_init(sem, 0, c);
  354. return BHT_OK;
  355. }
  356. int _vm_sem_destroy(korp_sem *sem)
  357. {
  358. (void) sem;
  359. return BHT_OK;
  360. }
  361. int _vm_sem_wait(korp_sem *sem)
  362. {
  363. return k_sem_take(sem, K_FOREVER);
  364. }
  365. int _vm_sem_reltimedwait(korp_sem *sem, int mills)
  366. {
  367. return k_sem_take(sem, mills);
  368. }
  369. int _vm_sem_post(korp_sem *sem)
  370. {
  371. k_sem_give(sem);
  372. return BHT_OK;
  373. }
  374. int _vm_cond_init(korp_cond *cond)
  375. {
  376. k_mutex_init(&cond->wait_list_lock);
  377. cond->thread_wait_list = NULL;
  378. return BHT_OK;
  379. }
  380. int _vm_cond_destroy(korp_cond *cond)
  381. {
  382. (void) cond;
  383. return BHT_OK;
  384. }
  385. static int _vm_cond_wait_internal(korp_cond *cond, korp_mutex *mutex,
  386. bool timed, int mills)
  387. {
  388. bh_thread_wait_node *node;
  389. /* Create wait node and append it to wait list */
  390. if (!(node = bh_malloc(sizeof(bh_thread_wait_node))))
  391. return BHT_ERROR;
  392. k_sem_init(&node->sem, 0, 1);
  393. node->next = NULL;
  394. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  395. if (!cond->thread_wait_list)
  396. cond->thread_wait_list = node;
  397. else {
  398. /* Add to end of wait list */
  399. bh_thread_wait_node *p = cond->thread_wait_list;
  400. while (p->next)
  401. p = p->next;
  402. p->next = node;
  403. }
  404. k_mutex_unlock(&cond->wait_list_lock);
  405. /* Unlock mutex, wait sem and lock mutex again */
  406. k_mutex_unlock(mutex);
  407. k_sem_take(&node->sem, timed ? mills : K_FOREVER);
  408. k_mutex_lock(mutex, K_FOREVER);
  409. /* Remove wait node from wait list */
  410. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  411. if (cond->thread_wait_list == node)
  412. cond->thread_wait_list = node->next;
  413. else {
  414. /* Remove from the wait list */
  415. bh_thread_wait_node *p = cond->thread_wait_list;
  416. while (p->next != node)
  417. p = p->next;
  418. p->next = node->next;
  419. }
  420. bh_free(node);
  421. k_mutex_unlock(&cond->wait_list_lock);
  422. return BHT_OK;
  423. }
  424. int _vm_cond_wait(korp_cond *cond, korp_mutex *mutex)
  425. {
  426. return _vm_cond_wait_internal(cond, mutex, false, 0);
  427. }
  428. int _vm_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, int mills)
  429. {
  430. return _vm_cond_wait_internal(cond, mutex, true, mills);
  431. }
  432. int _vm_cond_signal(korp_cond *cond)
  433. {
  434. /* Signal the head wait node of wait list */
  435. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  436. if (cond->thread_wait_list)
  437. k_sem_give(&cond->thread_wait_list->sem);
  438. k_mutex_unlock(&cond->wait_list_lock);
  439. return BHT_OK;
  440. }
  441. int _vm_cond_broadcast(korp_cond *cond)
  442. {
  443. /* Signal each wait node of wait list */
  444. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  445. if (cond->thread_wait_list) {
  446. bh_thread_wait_node *p = cond->thread_wait_list;
  447. while (p) {
  448. k_sem_give(&p->sem);
  449. p = p->next;
  450. }
  451. }
  452. k_mutex_unlock(&cond->wait_list_lock);
  453. return BHT_OK;
  454. }