zephyr_thread.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "platform_api_vmcore.h"
  6. #include "platform_api_extension.h"
  7. #define bh_assert(v) do { \
  8. if (!(v)) { \
  9. printf("\nASSERTION FAILED: %s, at %s, line %d\n", \
  10. #v, __FILE__, __LINE__); \
  11. abort(); \
  12. } \
  13. } while (0)
  14. typedef struct os_thread_wait_node {
  15. struct k_sem sem;
  16. os_thread_wait_list next;
  17. } os_thread_wait_node;
  18. typedef struct os_thread_data {
  19. /* Next thread data */
  20. struct os_thread_data *next;
  21. /* Zephyr thread handle */
  22. korp_tid tid;
  23. /* Jeff thread local root */
  24. void *tlr;
  25. /* Lock for waiting list */
  26. struct k_mutex wait_list_lock;
  27. /* Waiting list of other threads who are joining this thread */
  28. os_thread_wait_list thread_wait_list;
  29. /* Thread stack size */
  30. unsigned stack_size;
  31. /* Thread stack */
  32. char stack[1];
  33. } os_thread_data;
  34. typedef struct os_thread_obj {
  35. struct k_thread thread;
  36. /* Whether the thread is terminated and this thread object is to
  37. be freed in the future. */
  38. bool to_be_freed;
  39. struct os_thread_obj *next;
  40. } os_thread_obj;
  41. static bool is_thread_sys_inited = false;
  42. /* Thread data of supervisor thread */
  43. static os_thread_data supervisor_thread_data;
  44. /* Lock for thread data list */
  45. static struct k_mutex thread_data_lock;
  46. /* Thread data list */
  47. static os_thread_data *thread_data_list = NULL;
  48. /* Lock for thread object list */
  49. static struct k_mutex thread_obj_lock;
  50. /* Thread object list */
  51. static os_thread_obj *thread_obj_list = NULL;
  52. static void thread_data_list_add(os_thread_data *thread_data)
  53. {
  54. k_mutex_lock(&thread_data_lock, K_FOREVER);
  55. if (!thread_data_list)
  56. thread_data_list = thread_data;
  57. else {
  58. /* If already in list, just return */
  59. os_thread_data *p = thread_data_list;
  60. while (p) {
  61. if (p == thread_data) {
  62. k_mutex_unlock(&thread_data_lock);
  63. return;
  64. }
  65. p = p->next;
  66. }
  67. /* Set as head of list */
  68. thread_data->next = thread_data_list;
  69. thread_data_list = thread_data;
  70. }
  71. k_mutex_unlock(&thread_data_lock);
  72. }
  73. static void thread_data_list_remove(os_thread_data *thread_data)
  74. {
  75. k_mutex_lock(&thread_data_lock, K_FOREVER);
  76. if (thread_data_list) {
  77. if (thread_data_list == thread_data)
  78. thread_data_list = thread_data_list->next;
  79. else {
  80. /* Search and remove it from list */
  81. os_thread_data *p = thread_data_list;
  82. while (p && p->next != thread_data)
  83. p = p->next;
  84. if (p && p->next == thread_data)
  85. p->next = p->next->next;
  86. }
  87. }
  88. k_mutex_unlock(&thread_data_lock);
  89. }
  90. static os_thread_data *
  91. thread_data_list_lookup(k_tid_t tid)
  92. {
  93. k_mutex_lock(&thread_data_lock, K_FOREVER);
  94. if (thread_data_list) {
  95. os_thread_data *p = thread_data_list;
  96. while (p) {
  97. if (p->tid == tid) {
  98. /* Found */
  99. k_mutex_unlock(&thread_data_lock);
  100. return p;
  101. }
  102. p = p->next;
  103. }
  104. }
  105. k_mutex_unlock(&thread_data_lock);
  106. return NULL;
  107. }
  108. static void thread_obj_list_add(os_thread_obj *thread_obj)
  109. {
  110. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  111. if (!thread_obj_list)
  112. thread_obj_list = thread_obj;
  113. else {
  114. /* Set as head of list */
  115. thread_obj->next = thread_obj_list;
  116. thread_obj_list = thread_obj;
  117. }
  118. k_mutex_unlock(&thread_obj_lock);
  119. }
  120. static void thread_obj_list_reclaim()
  121. {
  122. os_thread_obj *p, *p_prev;
  123. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  124. p_prev = NULL;
  125. p = thread_obj_list;
  126. while (p) {
  127. if (p->to_be_freed) {
  128. if (p_prev == NULL) { /* p is the head of list */
  129. thread_obj_list = p->next;
  130. BH_FREE(p);
  131. p = thread_obj_list;
  132. } else { /* p is not the head of list */
  133. p_prev->next = p->next;
  134. BH_FREE(p);
  135. p = p_prev->next;
  136. }
  137. } else {
  138. p_prev = p;
  139. p = p->next;
  140. }
  141. }
  142. k_mutex_unlock(&thread_obj_lock);
  143. }
  144. int os_thread_sys_init()
  145. {
  146. if (is_thread_sys_inited)
  147. return BHT_OK;
  148. k_mutex_init(&thread_data_lock);
  149. k_mutex_init(&thread_obj_lock);
  150. /* Initialize supervisor thread data */
  151. memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
  152. supervisor_thread_data.tid = k_current_get();
  153. /* Set as head of thread data list */
  154. thread_data_list = &supervisor_thread_data;
  155. is_thread_sys_inited = true;
  156. return BHT_OK;
  157. }
  158. void os_thread_sys_destroy(void)
  159. {
  160. if (is_thread_sys_inited) {
  161. is_thread_sys_inited = false;
  162. }
  163. }
  164. static os_thread_data *
  165. thread_data_current()
  166. {
  167. k_tid_t tid = k_current_get();
  168. return thread_data_list_lookup(tid);
  169. }
  170. static void os_thread_cleanup(void)
  171. {
  172. os_thread_data *thread_data = thread_data_current();
  173. bh_assert(thread_data != NULL);
  174. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  175. if (thread_data->thread_wait_list) {
  176. /* Signal each joining thread */
  177. os_thread_wait_list head = thread_data->thread_wait_list;
  178. while (head) {
  179. os_thread_wait_list next = head->next;
  180. k_sem_give(&head->sem);
  181. /* head will be freed by joining thread */
  182. head = next;
  183. }
  184. thread_data->thread_wait_list = NULL;
  185. }
  186. k_mutex_unlock(&thread_data->wait_list_lock);
  187. thread_data_list_remove(thread_data);
  188. /* Set flag to true for the next thread creating to
  189. free the thread object */
  190. ((os_thread_obj*) thread_data->tid)->to_be_freed = true;
  191. BH_FREE(thread_data);
  192. }
  193. static void os_thread_wrapper(void *start, void *arg, void *thread_data)
  194. {
  195. /* Set thread custom data */
  196. ((os_thread_data*) thread_data)->tid = k_current_get();
  197. thread_data_list_add(thread_data);
  198. ((thread_start_routine_t) start)(arg);
  199. os_thread_cleanup();
  200. }
  201. int os_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
  202. unsigned int stack_size)
  203. {
  204. return os_thread_create_with_prio(p_tid, start, arg, stack_size,
  205. BH_THREAD_DEFAULT_PRIORITY);
  206. }
  207. int os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
  208. void *arg, unsigned int stack_size, int prio)
  209. {
  210. korp_tid tid;
  211. os_thread_data *thread_data;
  212. unsigned thread_data_size;
  213. if (!p_tid || !stack_size)
  214. return BHT_ERROR;
  215. /* Free the thread objects of terminated threads */
  216. thread_obj_list_reclaim();
  217. /* Create and initialize thread object */
  218. if (!(tid = BH_MALLOC(sizeof(os_thread_obj))))
  219. return BHT_ERROR;
  220. memset(tid, 0, sizeof(os_thread_obj));
  221. if (stack_size < APP_THREAD_STACK_SIZE_MIN)
  222. stack_size = APP_THREAD_STACK_SIZE_MIN;
  223. /* Create and initialize thread data */
  224. thread_data_size = offsetof(os_thread_data, stack) + stack_size;
  225. if (!(thread_data = BH_MALLOC(thread_data_size))) {
  226. BH_FREE(tid);
  227. return BHT_ERROR;
  228. }
  229. memset(thread_data, 0, thread_data_size);
  230. k_mutex_init(&thread_data->wait_list_lock);
  231. thread_data->stack_size = stack_size;
  232. thread_data->tid = tid;
  233. /* Create the thread */
  234. if (!((tid = k_thread_create(tid, (k_thread_stack_t *)thread_data->stack,
  235. stack_size, os_thread_wrapper, start, arg,
  236. thread_data, prio, 0, K_NO_WAIT)))) {
  237. BH_FREE(tid);
  238. BH_FREE(thread_data);
  239. return BHT_ERROR;
  240. }
  241. bh_assert(tid == thread_data->tid);
  242. /* Set thread custom data */
  243. thread_data_list_add(thread_data);
  244. thread_obj_list_add((os_thread_obj*) tid);
  245. *p_tid = tid;
  246. return BHT_OK;
  247. }
  248. korp_tid os_self_thread()
  249. {
  250. return (korp_tid)k_current_get();
  251. }
  252. int os_thread_join(korp_tid thread, void **value_ptr)
  253. {
  254. (void) value_ptr;
  255. os_thread_data *thread_data;
  256. os_thread_wait_node *node;
  257. /* Create wait node and append it to wait list */
  258. if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
  259. return BHT_ERROR;
  260. k_sem_init(&node->sem, 0, 1);
  261. node->next = NULL;
  262. /* Get thread data */
  263. thread_data = thread_data_list_lookup(thread);
  264. bh_assert(thread_data != NULL);
  265. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  266. if (!thread_data->thread_wait_list)
  267. thread_data->thread_wait_list = node;
  268. else {
  269. /* Add to end of waiting list */
  270. os_thread_wait_node *p = thread_data->thread_wait_list;
  271. while (p->next)
  272. p = p->next;
  273. p->next = node;
  274. }
  275. k_mutex_unlock(&thread_data->wait_list_lock);
  276. /* Wait the sem */
  277. k_sem_take(&node->sem, K_FOREVER);
  278. /* Wait some time for the thread to be actually terminated */
  279. k_sleep(Z_TIMEOUT_MS(100));
  280. /* Destroy resource */
  281. BH_FREE(node);
  282. return BHT_OK;
  283. }
  284. int os_mutex_init(korp_mutex *mutex)
  285. {
  286. k_mutex_init(mutex);
  287. return BHT_OK;
  288. }
  289. int os_recursive_mutex_init(korp_mutex *mutex)
  290. {
  291. k_mutex_init(mutex);
  292. return BHT_OK;
  293. }
  294. int os_mutex_destroy(korp_mutex *mutex)
  295. {
  296. (void) mutex;
  297. return BHT_OK;
  298. }
  299. int os_mutex_lock(korp_mutex *mutex)
  300. {
  301. return k_mutex_lock(mutex, K_FOREVER);
  302. }
  303. int os_mutex_unlock(korp_mutex *mutex)
  304. {
  305. #if KERNEL_VERSION_NUMBER >= 0x020200 /* version 2.2.0 */
  306. return k_mutex_unlock(mutex);
  307. #else
  308. k_mutex_unlock(mutex);
  309. return 0;
  310. #endif
  311. }
  312. int os_cond_init(korp_cond *cond)
  313. {
  314. k_mutex_init(&cond->wait_list_lock);
  315. cond->thread_wait_list = NULL;
  316. return BHT_OK;
  317. }
  318. int os_cond_destroy(korp_cond *cond)
  319. {
  320. (void) cond;
  321. return BHT_OK;
  322. }
  323. static int os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex,
  324. bool timed, int mills)
  325. {
  326. os_thread_wait_node *node;
  327. /* Create wait node and append it to wait list */
  328. if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
  329. return BHT_ERROR;
  330. k_sem_init(&node->sem, 0, 1);
  331. node->next = NULL;
  332. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  333. if (!cond->thread_wait_list)
  334. cond->thread_wait_list = node;
  335. else {
  336. /* Add to end of wait list */
  337. os_thread_wait_node *p = cond->thread_wait_list;
  338. while (p->next)
  339. p = p->next;
  340. p->next = node;
  341. }
  342. k_mutex_unlock(&cond->wait_list_lock);
  343. /* Unlock mutex, wait sem and lock mutex again */
  344. k_mutex_unlock(mutex);
  345. k_sem_take(&node->sem, timed ? Z_TIMEOUT_MS(mills) : K_FOREVER);
  346. k_mutex_lock(mutex, K_FOREVER);
  347. /* Remove wait node from wait list */
  348. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  349. if (cond->thread_wait_list == node)
  350. cond->thread_wait_list = node->next;
  351. else {
  352. /* Remove from the wait list */
  353. os_thread_wait_node *p = cond->thread_wait_list;
  354. while (p->next != node)
  355. p = p->next;
  356. p->next = node->next;
  357. }
  358. BH_FREE(node);
  359. k_mutex_unlock(&cond->wait_list_lock);
  360. return BHT_OK;
  361. }
  362. int os_cond_wait(korp_cond *cond, korp_mutex *mutex)
  363. {
  364. return os_cond_wait_internal(cond, mutex, false, 0);
  365. }
  366. int os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
  367. {
  368. if (useconds == BHT_WAIT_FOREVER) {
  369. return os_cond_wait_internal(cond, mutex, false, 0);
  370. }
  371. else {
  372. uint64 mills_64 = useconds / 1000;
  373. int32 mills;
  374. if (mills_64 < (uint64)INT32_MAX) {
  375. mills = (int32)mills_64;
  376. }
  377. else {
  378. mills = INT32_MAX;
  379. os_printf("Warning: os_cond_reltimedwait exceeds limit, "
  380. "set to max timeout instead\n");
  381. }
  382. return os_cond_wait_internal(cond, mutex, true, mills);
  383. }
  384. }
  385. int os_cond_signal(korp_cond *cond)
  386. {
  387. /* Signal the head wait node of wait list */
  388. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  389. if (cond->thread_wait_list)
  390. k_sem_give(&cond->thread_wait_list->sem);
  391. k_mutex_unlock(&cond->wait_list_lock);
  392. return BHT_OK;
  393. }
  394. uint8 *os_thread_get_stack_boundary()
  395. {
  396. #if defined(CONFIG_THREAD_STACK_INFO)
  397. korp_tid thread = k_current_get();
  398. return (uint8*)thread->stack_info.start;
  399. #else
  400. return NULL;
  401. #endif
  402. }