zephyr_thread.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "platform_api_vmcore.h"
  6. #include "platform_api_extension.h"
  7. #define bh_assert(v) do { \
  8. if (!(v)) { \
  9. printf("\nASSERTION FAILED: %s, at %s, line %d\n", \
  10. #v, __FILE__, __LINE__); \
  11. abort(); \
  12. } \
  13. } while (0)
  14. typedef struct os_thread_wait_node {
  15. struct k_sem sem;
  16. os_thread_wait_list next;
  17. } os_thread_wait_node;
  18. typedef struct os_thread_data {
  19. /* Next thread data */
  20. struct os_thread_data *next;
  21. /* Zephyr thread handle */
  22. korp_tid tid;
  23. /* Jeff thread local root */
  24. void *tlr;
  25. /* Lock for waiting list */
  26. struct k_mutex wait_list_lock;
  27. /* Waiting list of other threads who are joining this thread */
  28. os_thread_wait_list thread_wait_list;
  29. /* Thread stack size */
  30. unsigned stack_size;
  31. /* Thread stack */
  32. char stack[1];
  33. } os_thread_data;
  34. typedef struct os_thread_obj {
  35. struct k_thread thread;
  36. /* Whether the thread is terminated and this thread object is to
  37. be freed in the future. */
  38. bool to_be_freed;
  39. struct os_thread_obj *next;
  40. } os_thread_obj;
  41. static bool is_thread_sys_inited = false;
  42. /* Thread data of supervisor thread */
  43. static os_thread_data supervisor_thread_data;
  44. /* Lock for thread data list */
  45. static struct k_mutex thread_data_lock;
  46. /* Thread data list */
  47. static os_thread_data *thread_data_list = NULL;
  48. /* Lock for thread object list */
  49. static struct k_mutex thread_obj_lock;
  50. /* Thread object list */
  51. static os_thread_obj *thread_obj_list = NULL;
  52. static void thread_data_list_add(os_thread_data *thread_data)
  53. {
  54. k_mutex_lock(&thread_data_lock, K_FOREVER);
  55. if (!thread_data_list)
  56. thread_data_list = thread_data;
  57. else {
  58. /* If already in list, just return */
  59. os_thread_data *p = thread_data_list;
  60. while (p) {
  61. if (p == thread_data) {
  62. k_mutex_unlock(&thread_data_lock);
  63. return;
  64. }
  65. p = p->next;
  66. }
  67. /* Set as head of list */
  68. thread_data->next = thread_data_list;
  69. thread_data_list = thread_data;
  70. }
  71. k_mutex_unlock(&thread_data_lock);
  72. }
  73. static void thread_data_list_remove(os_thread_data *thread_data)
  74. {
  75. k_mutex_lock(&thread_data_lock, K_FOREVER);
  76. if (thread_data_list) {
  77. if (thread_data_list == thread_data)
  78. thread_data_list = thread_data_list->next;
  79. else {
  80. /* Search and remove it from list */
  81. os_thread_data *p = thread_data_list;
  82. while (p && p->next != thread_data)
  83. p = p->next;
  84. if (p && p->next == thread_data)
  85. p->next = p->next->next;
  86. }
  87. }
  88. k_mutex_unlock(&thread_data_lock);
  89. }
  90. static os_thread_data *
  91. thread_data_list_lookup(k_tid_t tid)
  92. {
  93. k_mutex_lock(&thread_data_lock, K_FOREVER);
  94. if (thread_data_list) {
  95. os_thread_data *p = thread_data_list;
  96. while (p) {
  97. if (p->tid == tid) {
  98. /* Found */
  99. k_mutex_unlock(&thread_data_lock);
  100. return p;
  101. }
  102. p = p->next;
  103. }
  104. }
  105. k_mutex_unlock(&thread_data_lock);
  106. return NULL;
  107. }
  108. static void thread_obj_list_add(os_thread_obj *thread_obj)
  109. {
  110. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  111. if (!thread_obj_list)
  112. thread_obj_list = thread_obj;
  113. else {
  114. /* Set as head of list */
  115. thread_obj->next = thread_obj_list;
  116. thread_obj_list = thread_obj;
  117. }
  118. k_mutex_unlock(&thread_obj_lock);
  119. }
  120. static void thread_obj_list_reclaim()
  121. {
  122. os_thread_obj *p, *p_prev;
  123. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  124. p_prev = NULL;
  125. p = thread_obj_list;
  126. while (p) {
  127. if (p->to_be_freed) {
  128. if (p_prev == NULL) { /* p is the head of list */
  129. thread_obj_list = p->next;
  130. BH_FREE(p);
  131. p = thread_obj_list;
  132. } else { /* p is not the head of list */
  133. p_prev->next = p->next;
  134. BH_FREE(p);
  135. p = p_prev->next;
  136. }
  137. } else {
  138. p_prev = p;
  139. p = p->next;
  140. }
  141. }
  142. k_mutex_unlock(&thread_obj_lock);
  143. }
  144. int os_thread_sys_init()
  145. {
  146. if (is_thread_sys_inited)
  147. return BHT_OK;
  148. k_mutex_init(&thread_data_lock);
  149. k_mutex_init(&thread_obj_lock);
  150. /* Initialize supervisor thread data */
  151. memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
  152. supervisor_thread_data.tid = k_current_get();
  153. /* Set as head of thread data list */
  154. thread_data_list = &supervisor_thread_data;
  155. is_thread_sys_inited = true;
  156. return BHT_OK;
  157. }
  158. void os_thread_sys_destroy(void)
  159. {
  160. if (is_thread_sys_inited) {
  161. is_thread_sys_inited = false;
  162. }
  163. }
  164. static os_thread_data *
  165. thread_data_current()
  166. {
  167. k_tid_t tid = k_current_get();
  168. return thread_data_list_lookup(tid);
  169. }
  170. static void os_thread_cleanup(void)
  171. {
  172. os_thread_data *thread_data = thread_data_current();
  173. bh_assert(thread_data != NULL);
  174. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  175. if (thread_data->thread_wait_list) {
  176. /* Signal each joining thread */
  177. os_thread_wait_list head = thread_data->thread_wait_list;
  178. while (head) {
  179. os_thread_wait_list next = head->next;
  180. k_sem_give(&head->sem);
  181. /* head will be freed by joining thread */
  182. head = next;
  183. }
  184. thread_data->thread_wait_list = NULL;
  185. }
  186. k_mutex_unlock(&thread_data->wait_list_lock);
  187. thread_data_list_remove(thread_data);
  188. /* Set flag to true for the next thread creating to
  189. free the thread object */
  190. ((os_thread_obj*) thread_data->tid)->to_be_freed = true;
  191. BH_FREE(thread_data);
  192. }
  193. static void os_thread_wrapper(void *start, void *arg, void *thread_data)
  194. {
  195. /* Set thread custom data */
  196. ((os_thread_data*) thread_data)->tid = k_current_get();
  197. thread_data_list_add(thread_data);
  198. ((thread_start_routine_t) start)(arg);
  199. os_thread_cleanup();
  200. }
  201. int os_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
  202. unsigned int stack_size)
  203. {
  204. return os_thread_create_with_prio(p_tid, start, arg, stack_size,
  205. BH_THREAD_DEFAULT_PRIORITY);
  206. }
  207. int os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
  208. void *arg, unsigned int stack_size, int prio)
  209. {
  210. korp_tid tid;
  211. os_thread_data *thread_data;
  212. unsigned thread_data_size;
  213. if (!p_tid || !stack_size)
  214. return BHT_ERROR;
  215. /* Free the thread objects of terminated threads */
  216. thread_obj_list_reclaim();
  217. /* Create and initialize thread object */
  218. if (!(tid = BH_MALLOC(sizeof(os_thread_obj))))
  219. return BHT_ERROR;
  220. memset(tid, 0, sizeof(os_thread_obj));
  221. /* Create and initialize thread data */
  222. thread_data_size = offsetof(os_thread_data, stack) + stack_size;
  223. if (!(thread_data = BH_MALLOC(thread_data_size))) {
  224. BH_FREE(tid);
  225. return BHT_ERROR;
  226. }
  227. memset(thread_data, 0, thread_data_size);
  228. k_mutex_init(&thread_data->wait_list_lock);
  229. thread_data->stack_size = stack_size;
  230. thread_data->tid = tid;
  231. /* Create the thread */
  232. if (!((tid = k_thread_create(tid, (k_thread_stack_t *) thread_data->stack,
  233. stack_size, os_thread_wrapper, start, arg, thread_data, prio, 0,
  234. K_NO_WAIT)))) {
  235. BH_FREE(tid);
  236. BH_FREE(thread_data);
  237. return BHT_ERROR;
  238. }
  239. bh_assert(tid == thread_data->tid);
  240. /* Set thread custom data */
  241. thread_data_list_add(thread_data);
  242. thread_obj_list_add((os_thread_obj*) tid);
  243. *p_tid = tid;
  244. return BHT_OK;
  245. }
  246. korp_tid os_self_thread()
  247. {
  248. return (korp_tid) k_current_get();
  249. }
  250. int os_thread_join(korp_tid thread, void **value_ptr)
  251. {
  252. (void) value_ptr;
  253. os_thread_data *thread_data;
  254. os_thread_wait_node *node;
  255. /* Create wait node and append it to wait list */
  256. if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
  257. return BHT_ERROR;
  258. k_sem_init(&node->sem, 0, 1);
  259. node->next = NULL;
  260. /* Get thread data */
  261. thread_data = thread_data_list_lookup(thread);
  262. bh_assert(thread_data != NULL);
  263. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  264. if (!thread_data->thread_wait_list)
  265. thread_data->thread_wait_list = node;
  266. else {
  267. /* Add to end of waiting list */
  268. os_thread_wait_node *p = thread_data->thread_wait_list;
  269. while (p->next)
  270. p = p->next;
  271. p->next = node;
  272. }
  273. k_mutex_unlock(&thread_data->wait_list_lock);
  274. /* Wait the sem */
  275. k_sem_take(&node->sem, K_FOREVER);
  276. /* Wait some time for the thread to be actually terminated */
  277. k_sleep(Z_TIMEOUT_MS(100));
  278. /* Destroy resource */
  279. BH_FREE(node);
  280. return BHT_OK;
  281. }
  282. int os_mutex_init(korp_mutex *mutex)
  283. {
  284. k_mutex_init(mutex);
  285. return BHT_OK;
  286. }
  287. int os_recursive_mutex_init(korp_mutex *mutex)
  288. {
  289. k_mutex_init(mutex);
  290. return BHT_OK;
  291. }
  292. int os_mutex_destroy(korp_mutex *mutex)
  293. {
  294. (void) mutex;
  295. return BHT_OK;
  296. }
  297. int os_mutex_lock(korp_mutex *mutex)
  298. {
  299. return k_mutex_lock(mutex, K_FOREVER);
  300. }
  301. int os_mutex_unlock(korp_mutex *mutex)
  302. {
  303. #if KERNEL_VERSION_NUMBER >= 0x020200 /* version 2.2.0 */
  304. return k_mutex_unlock(mutex);
  305. #else
  306. k_mutex_unlock(mutex);
  307. return 0;
  308. #endif
  309. }
  310. int os_cond_init(korp_cond *cond)
  311. {
  312. k_mutex_init(&cond->wait_list_lock);
  313. cond->thread_wait_list = NULL;
  314. return BHT_OK;
  315. }
  316. int os_cond_destroy(korp_cond *cond)
  317. {
  318. (void) cond;
  319. return BHT_OK;
  320. }
  321. static int os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex,
  322. bool timed, int mills)
  323. {
  324. os_thread_wait_node *node;
  325. /* Create wait node and append it to wait list */
  326. if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
  327. return BHT_ERROR;
  328. k_sem_init(&node->sem, 0, 1);
  329. node->next = NULL;
  330. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  331. if (!cond->thread_wait_list)
  332. cond->thread_wait_list = node;
  333. else {
  334. /* Add to end of wait list */
  335. os_thread_wait_node *p = cond->thread_wait_list;
  336. while (p->next)
  337. p = p->next;
  338. p->next = node;
  339. }
  340. k_mutex_unlock(&cond->wait_list_lock);
  341. /* Unlock mutex, wait sem and lock mutex again */
  342. k_mutex_unlock(mutex);
  343. k_sem_take(&node->sem, timed ? Z_TIMEOUT_MS(mills) : K_FOREVER);
  344. k_mutex_lock(mutex, K_FOREVER);
  345. /* Remove wait node from wait list */
  346. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  347. if (cond->thread_wait_list == node)
  348. cond->thread_wait_list = node->next;
  349. else {
  350. /* Remove from the wait list */
  351. os_thread_wait_node *p = cond->thread_wait_list;
  352. while (p->next != node)
  353. p = p->next;
  354. p->next = node->next;
  355. }
  356. BH_FREE(node);
  357. k_mutex_unlock(&cond->wait_list_lock);
  358. return BHT_OK;
  359. }
  360. int os_cond_wait(korp_cond *cond, korp_mutex *mutex)
  361. {
  362. return os_cond_wait_internal(cond, mutex, false, 0);
  363. }
  364. int os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
  365. {
  366. if (useconds == BHT_WAIT_FOREVER) {
  367. return os_cond_wait_internal(cond, mutex, false, 0);
  368. }
  369. else {
  370. uint64 mills_64 = useconds / 1000;
  371. int32 mills;
  372. if (mills_64 < (uint64)INT32_MAX) {
  373. mills = (int32)mills_64;
  374. }
  375. else {
  376. mills = INT32_MAX;
  377. os_printf("Warning: os_cond_reltimedwait exceeds limit, "
  378. "set to max timeout instead\n");
  379. }
  380. return os_cond_wait_internal(cond, mutex, true, mills);
  381. }
  382. }
  383. int os_cond_signal(korp_cond *cond)
  384. {
  385. /* Signal the head wait node of wait list */
  386. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  387. if (cond->thread_wait_list)
  388. k_sem_give(&cond->thread_wait_list->sem);
  389. k_mutex_unlock(&cond->wait_list_lock);
  390. return BHT_OK;
  391. }
  392. uint8 *os_thread_get_stack_boundary()
  393. {
  394. #if defined(CONFIG_THREAD_STACK_INFO)
  395. korp_tid thread = k_current_get();
  396. return (uint8*)thread->stack_info.start;
  397. #else
  398. return NULL;
  399. #endif
  400. }