zephyr_thread.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "platform_api_vmcore.h"
  6. #include "platform_api_extension.h"
  7. /* clang-format off */
  8. #define bh_assert(v) do { \
  9. if (!(v)) { \
  10. printf("\nASSERTION FAILED: %s, at %s, line %d\n", \
  11. #v, __FILE__, __LINE__); \
  12. abort(); \
  13. } \
  14. } while (0)
  15. /* clang-format on */
  16. #if defined(CONFIG_ARM_MPU) || defined(CONFIG_ARC_MPU) \
  17. || KERNEL_VERSION_NUMBER > 0x020300 /* version 2.3.0 */
  18. #define BH_ENABLE_ZEPHYR_MPU_STACK 1
  19. #elif !defined(BH_ENABLE_ZEPHYR_MPU_STACK)
  20. #define BH_ENABLE_ZEPHYR_MPU_STACK 0
  21. #endif
  22. #if !defined(BH_ZEPHYR_MPU_STACK_SIZE)
  23. #define BH_ZEPHYR_MPU_STACK_SIZE APP_THREAD_STACK_SIZE_MIN
  24. #endif
  25. #if !defined(BH_ZEPHYR_MPU_STACK_COUNT)
  26. #define BH_ZEPHYR_MPU_STACK_COUNT 4
  27. #endif
  28. #if BH_ENABLE_ZEPHYR_MPU_STACK != 0
  29. static K_THREAD_STACK_ARRAY_DEFINE(mpu_stacks, BH_ZEPHYR_MPU_STACK_COUNT,
  30. BH_ZEPHYR_MPU_STACK_SIZE);
  31. static bool mpu_stack_allocated[BH_ZEPHYR_MPU_STACK_COUNT];
  32. static struct k_mutex mpu_stack_lock;
  33. static char *
  34. mpu_stack_alloc()
  35. {
  36. int i;
  37. k_mutex_lock(&mpu_stack_lock, K_FOREVER);
  38. for (i = 0; i < BH_ZEPHYR_MPU_STACK_COUNT; i++) {
  39. if (!mpu_stack_allocated[i]) {
  40. mpu_stack_allocated[i] = true;
  41. k_mutex_unlock(&mpu_stack_lock);
  42. return (char *)mpu_stacks[i];
  43. }
  44. }
  45. k_mutex_unlock(&mpu_stack_lock);
  46. return NULL;
  47. }
  48. static void
  49. mpu_stack_free(char *stack)
  50. {
  51. int i;
  52. k_mutex_lock(&mpu_stack_lock, K_FOREVER);
  53. for (i = 0; i < BH_ZEPHYR_MPU_STACK_COUNT; i++) {
  54. if ((char *)mpu_stacks[i] == stack)
  55. mpu_stack_allocated[i] = false;
  56. }
  57. k_mutex_unlock(&mpu_stack_lock);
  58. }
  59. #endif
  60. typedef struct os_thread_wait_node {
  61. struct k_sem sem;
  62. os_thread_wait_list next;
  63. } os_thread_wait_node;
  64. typedef struct os_thread_data {
  65. /* Next thread data */
  66. struct os_thread_data *next;
  67. /* Zephyr thread handle */
  68. korp_tid tid;
  69. /* Jeff thread local root */
  70. void *tlr;
  71. /* Lock for waiting list */
  72. struct k_mutex wait_list_lock;
  73. /* Waiting list of other threads who are joining this thread */
  74. os_thread_wait_list thread_wait_list;
  75. /* Thread stack size */
  76. unsigned stack_size;
  77. #if BH_ENABLE_ZEPHYR_MPU_STACK == 0
  78. /* Thread stack */
  79. char stack[1];
  80. #else
  81. char *stack;
  82. #endif
  83. } os_thread_data;
  84. typedef struct os_thread_obj {
  85. struct k_thread thread;
  86. /* Whether the thread is terminated and this thread object is to
  87. be freed in the future. */
  88. bool to_be_freed;
  89. struct os_thread_obj *next;
  90. } os_thread_obj;
  91. static bool is_thread_sys_inited = false;
  92. /* Thread data of supervisor thread */
  93. static os_thread_data supervisor_thread_data;
  94. /* Lock for thread data list */
  95. static struct k_mutex thread_data_lock;
  96. /* Thread data list */
  97. static os_thread_data *thread_data_list = NULL;
  98. /* Lock for thread object list */
  99. static struct k_mutex thread_obj_lock;
  100. /* Thread object list */
  101. static os_thread_obj *thread_obj_list = NULL;
  102. static void
  103. thread_data_list_add(os_thread_data *thread_data)
  104. {
  105. k_mutex_lock(&thread_data_lock, K_FOREVER);
  106. if (!thread_data_list)
  107. thread_data_list = thread_data;
  108. else {
  109. /* If already in list, just return */
  110. os_thread_data *p = thread_data_list;
  111. while (p) {
  112. if (p == thread_data) {
  113. k_mutex_unlock(&thread_data_lock);
  114. return;
  115. }
  116. p = p->next;
  117. }
  118. /* Set as head of list */
  119. thread_data->next = thread_data_list;
  120. thread_data_list = thread_data;
  121. }
  122. k_mutex_unlock(&thread_data_lock);
  123. }
  124. static void
  125. thread_data_list_remove(os_thread_data *thread_data)
  126. {
  127. k_mutex_lock(&thread_data_lock, K_FOREVER);
  128. if (thread_data_list) {
  129. if (thread_data_list == thread_data)
  130. thread_data_list = thread_data_list->next;
  131. else {
  132. /* Search and remove it from list */
  133. os_thread_data *p = thread_data_list;
  134. while (p && p->next != thread_data)
  135. p = p->next;
  136. if (p && p->next == thread_data)
  137. p->next = p->next->next;
  138. }
  139. }
  140. k_mutex_unlock(&thread_data_lock);
  141. }
  142. static os_thread_data *
  143. thread_data_list_lookup(k_tid_t tid)
  144. {
  145. k_mutex_lock(&thread_data_lock, K_FOREVER);
  146. if (thread_data_list) {
  147. os_thread_data *p = thread_data_list;
  148. while (p) {
  149. if (p->tid == tid) {
  150. /* Found */
  151. k_mutex_unlock(&thread_data_lock);
  152. return p;
  153. }
  154. p = p->next;
  155. }
  156. }
  157. k_mutex_unlock(&thread_data_lock);
  158. return NULL;
  159. }
  160. static void
  161. thread_obj_list_add(os_thread_obj *thread_obj)
  162. {
  163. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  164. if (!thread_obj_list)
  165. thread_obj_list = thread_obj;
  166. else {
  167. /* Set as head of list */
  168. thread_obj->next = thread_obj_list;
  169. thread_obj_list = thread_obj;
  170. }
  171. k_mutex_unlock(&thread_obj_lock);
  172. }
  173. static void
  174. thread_obj_list_reclaim()
  175. {
  176. os_thread_obj *p, *p_prev;
  177. k_mutex_lock(&thread_obj_lock, K_FOREVER);
  178. p_prev = NULL;
  179. p = thread_obj_list;
  180. while (p) {
  181. if (p->to_be_freed) {
  182. if (p_prev == NULL) { /* p is the head of list */
  183. thread_obj_list = p->next;
  184. BH_FREE(p);
  185. p = thread_obj_list;
  186. }
  187. else { /* p is not the head of list */
  188. p_prev->next = p->next;
  189. BH_FREE(p);
  190. p = p_prev->next;
  191. }
  192. }
  193. else {
  194. p_prev = p;
  195. p = p->next;
  196. }
  197. }
  198. k_mutex_unlock(&thread_obj_lock);
  199. }
  200. int
  201. os_thread_sys_init()
  202. {
  203. if (is_thread_sys_inited)
  204. return BHT_OK;
  205. #if BH_ENABLE_ZEPHYR_MPU_STACK != 0
  206. k_mutex_init(&mpu_stack_lock);
  207. #endif
  208. k_mutex_init(&thread_data_lock);
  209. k_mutex_init(&thread_obj_lock);
  210. /* Initialize supervisor thread data */
  211. memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
  212. supervisor_thread_data.tid = k_current_get();
  213. /* Set as head of thread data list */
  214. thread_data_list = &supervisor_thread_data;
  215. is_thread_sys_inited = true;
  216. return BHT_OK;
  217. }
  218. void
  219. os_thread_sys_destroy(void)
  220. {
  221. if (is_thread_sys_inited) {
  222. is_thread_sys_inited = false;
  223. }
  224. }
  225. static os_thread_data *
  226. thread_data_current()
  227. {
  228. k_tid_t tid = k_current_get();
  229. return thread_data_list_lookup(tid);
  230. }
  231. static void
  232. os_thread_cleanup(void)
  233. {
  234. os_thread_data *thread_data = thread_data_current();
  235. bh_assert(thread_data != NULL);
  236. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  237. if (thread_data->thread_wait_list) {
  238. /* Signal each joining thread */
  239. os_thread_wait_list head = thread_data->thread_wait_list;
  240. while (head) {
  241. os_thread_wait_list next = head->next;
  242. k_sem_give(&head->sem);
  243. /* head will be freed by joining thread */
  244. head = next;
  245. }
  246. thread_data->thread_wait_list = NULL;
  247. }
  248. k_mutex_unlock(&thread_data->wait_list_lock);
  249. thread_data_list_remove(thread_data);
  250. /* Set flag to true for the next thread creating to
  251. free the thread object */
  252. ((os_thread_obj *)thread_data->tid)->to_be_freed = true;
  253. #if BH_ENABLE_ZEPHYR_MPU_STACK != 0
  254. mpu_stack_free(thread_data->stack);
  255. #endif
  256. BH_FREE(thread_data);
  257. }
  258. static void
  259. os_thread_wrapper(void *start, void *arg, void *thread_data)
  260. {
  261. /* Set thread custom data */
  262. ((os_thread_data *)thread_data)->tid = k_current_get();
  263. thread_data_list_add(thread_data);
  264. ((thread_start_routine_t)start)(arg);
  265. os_thread_cleanup();
  266. }
  267. int
  268. os_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
  269. unsigned int stack_size)
  270. {
  271. return os_thread_create_with_prio(p_tid, start, arg, stack_size,
  272. BH_THREAD_DEFAULT_PRIORITY);
  273. }
  274. int
  275. os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
  276. void *arg, unsigned int stack_size, int prio)
  277. {
  278. korp_tid tid;
  279. os_thread_data *thread_data;
  280. unsigned thread_data_size;
  281. if (!p_tid || !stack_size)
  282. return BHT_ERROR;
  283. /* Free the thread objects of terminated threads */
  284. thread_obj_list_reclaim();
  285. /* Create and initialize thread object */
  286. if (!(tid = BH_MALLOC(sizeof(os_thread_obj))))
  287. return BHT_ERROR;
  288. memset(tid, 0, sizeof(os_thread_obj));
  289. /* Create and initialize thread data */
  290. #if BH_ENABLE_ZEPHYR_MPU_STACK == 0
  291. if (stack_size < APP_THREAD_STACK_SIZE_MIN)
  292. stack_size = APP_THREAD_STACK_SIZE_MIN;
  293. thread_data_size = offsetof(os_thread_data, stack) + stack_size;
  294. #else
  295. stack_size = BH_ZEPHYR_MPU_STACK_SIZE;
  296. thread_data_size = sizeof(os_thread_data);
  297. #endif
  298. if (!(thread_data = BH_MALLOC(thread_data_size))) {
  299. goto fail1;
  300. }
  301. memset(thread_data, 0, thread_data_size);
  302. k_mutex_init(&thread_data->wait_list_lock);
  303. thread_data->stack_size = stack_size;
  304. thread_data->tid = tid;
  305. #if BH_ENABLE_ZEPHYR_MPU_STACK != 0
  306. if (!(thread_data->stack = mpu_stack_alloc())) {
  307. goto fail2;
  308. }
  309. #endif
  310. /* Create the thread */
  311. if (!((tid = k_thread_create(tid, (k_thread_stack_t *)thread_data->stack,
  312. stack_size, os_thread_wrapper, start, arg,
  313. thread_data, prio, 0, K_NO_WAIT)))) {
  314. goto fail3;
  315. }
  316. bh_assert(tid == thread_data->tid);
  317. /* Set thread custom data */
  318. thread_data_list_add(thread_data);
  319. thread_obj_list_add((os_thread_obj *)tid);
  320. *p_tid = tid;
  321. return BHT_OK;
  322. fail3:
  323. #if BH_ENABLE_ZEPHYR_MPU_STACK != 0
  324. mpu_stack_free(thread_data->stack);
  325. fail2:
  326. #endif
  327. BH_FREE(thread_data);
  328. fail1:
  329. BH_FREE(tid);
  330. return BHT_ERROR;
  331. }
  332. korp_tid
  333. os_self_thread()
  334. {
  335. return (korp_tid)k_current_get();
  336. }
  337. int
  338. os_thread_join(korp_tid thread, void **value_ptr)
  339. {
  340. (void)value_ptr;
  341. os_thread_data *thread_data;
  342. os_thread_wait_node *node;
  343. /* Create wait node and append it to wait list */
  344. if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
  345. return BHT_ERROR;
  346. k_sem_init(&node->sem, 0, 1);
  347. node->next = NULL;
  348. /* Get thread data */
  349. thread_data = thread_data_list_lookup(thread);
  350. bh_assert(thread_data != NULL);
  351. k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
  352. if (!thread_data->thread_wait_list)
  353. thread_data->thread_wait_list = node;
  354. else {
  355. /* Add to end of waiting list */
  356. os_thread_wait_node *p = thread_data->thread_wait_list;
  357. while (p->next)
  358. p = p->next;
  359. p->next = node;
  360. }
  361. k_mutex_unlock(&thread_data->wait_list_lock);
  362. /* Wait the sem */
  363. k_sem_take(&node->sem, K_FOREVER);
  364. /* Wait some time for the thread to be actually terminated */
  365. k_sleep(Z_TIMEOUT_MS(100));
  366. /* Destroy resource */
  367. BH_FREE(node);
  368. return BHT_OK;
  369. }
  370. int
  371. os_mutex_init(korp_mutex *mutex)
  372. {
  373. k_mutex_init(mutex);
  374. return BHT_OK;
  375. }
  376. int
  377. os_recursive_mutex_init(korp_mutex *mutex)
  378. {
  379. k_mutex_init(mutex);
  380. return BHT_OK;
  381. }
  382. int
  383. os_mutex_destroy(korp_mutex *mutex)
  384. {
  385. (void)mutex;
  386. return BHT_OK;
  387. }
  388. int
  389. os_mutex_lock(korp_mutex *mutex)
  390. {
  391. return k_mutex_lock(mutex, K_FOREVER);
  392. }
  393. int
  394. os_mutex_unlock(korp_mutex *mutex)
  395. {
  396. #if KERNEL_VERSION_NUMBER >= 0x020200 /* version 2.2.0 */
  397. return k_mutex_unlock(mutex);
  398. #else
  399. k_mutex_unlock(mutex);
  400. return 0;
  401. #endif
  402. }
  403. int
  404. os_cond_init(korp_cond *cond)
  405. {
  406. k_mutex_init(&cond->wait_list_lock);
  407. cond->thread_wait_list = NULL;
  408. return BHT_OK;
  409. }
  410. int
  411. os_cond_destroy(korp_cond *cond)
  412. {
  413. (void)cond;
  414. return BHT_OK;
  415. }
  416. static int
  417. os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex, bool timed, int mills)
  418. {
  419. os_thread_wait_node *node;
  420. /* Create wait node and append it to wait list */
  421. if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
  422. return BHT_ERROR;
  423. k_sem_init(&node->sem, 0, 1);
  424. node->next = NULL;
  425. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  426. if (!cond->thread_wait_list)
  427. cond->thread_wait_list = node;
  428. else {
  429. /* Add to end of wait list */
  430. os_thread_wait_node *p = cond->thread_wait_list;
  431. while (p->next)
  432. p = p->next;
  433. p->next = node;
  434. }
  435. k_mutex_unlock(&cond->wait_list_lock);
  436. /* Unlock mutex, wait sem and lock mutex again */
  437. k_mutex_unlock(mutex);
  438. k_sem_take(&node->sem, timed ? Z_TIMEOUT_MS(mills) : K_FOREVER);
  439. k_mutex_lock(mutex, K_FOREVER);
  440. /* Remove wait node from wait list */
  441. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  442. if (cond->thread_wait_list == node)
  443. cond->thread_wait_list = node->next;
  444. else {
  445. /* Remove from the wait list */
  446. os_thread_wait_node *p = cond->thread_wait_list;
  447. while (p->next != node)
  448. p = p->next;
  449. p->next = node->next;
  450. }
  451. BH_FREE(node);
  452. k_mutex_unlock(&cond->wait_list_lock);
  453. return BHT_OK;
  454. }
  455. int
  456. os_cond_wait(korp_cond *cond, korp_mutex *mutex)
  457. {
  458. return os_cond_wait_internal(cond, mutex, false, 0);
  459. }
  460. int
  461. os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
  462. {
  463. if (useconds == BHT_WAIT_FOREVER) {
  464. return os_cond_wait_internal(cond, mutex, false, 0);
  465. }
  466. else {
  467. uint64 mills_64 = useconds / 1000;
  468. int32 mills;
  469. if (mills_64 < (uint64)INT32_MAX) {
  470. mills = (int32)mills_64;
  471. }
  472. else {
  473. mills = INT32_MAX;
  474. os_printf("Warning: os_cond_reltimedwait exceeds limit, "
  475. "set to max timeout instead\n");
  476. }
  477. return os_cond_wait_internal(cond, mutex, true, mills);
  478. }
  479. }
  480. int
  481. os_cond_signal(korp_cond *cond)
  482. {
  483. /* Signal the head wait node of wait list */
  484. k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
  485. if (cond->thread_wait_list)
  486. k_sem_give(&cond->thread_wait_list->sem);
  487. k_mutex_unlock(&cond->wait_list_lock);
  488. return BHT_OK;
  489. }
  490. uint8 *
  491. os_thread_get_stack_boundary()
  492. {
  493. #if defined(CONFIG_THREAD_STACK_INFO)
  494. korp_tid thread = k_current_get();
  495. return (uint8 *)thread->stack_info.start;
  496. #else
  497. return NULL;
  498. #endif
  499. }