riot_thread.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * Copyright (C) 2020 TU Bergakademie Freiberg Karl Fessel
  4. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  5. */
  6. #include "platform_api_vmcore.h"
  7. #include "platform_api_extension.h"
  8. #include <panic.h>
  9. /* clang-format off */
  10. #define bh_assert(v) do { \
  11. if (!(v)) { \
  12. printf("\nASSERTION FAILED: %s, at %s, line %d\n", \
  13. #v, __FILE__, __LINE__); \
  14. core_panic(0, 0/*expr_string*/); \
  15. while (1); \
  16. } \
  17. } while (0)
  18. /* clang-format on */
  19. struct os_thread_data;
  20. typedef struct os_thread_wait_node {
  21. sema_t sem;
  22. void *ret;
  23. os_thread_wait_list next;
  24. } os_thread_wait_node;
  25. // all information for thread to cleanup it self
  26. typedef struct os_thread_data {
  27. /* Next thread data */
  28. struct os_thread_data *next;
  29. /* thread handle */
  30. kernel_pid_t tid;
  31. /* Thread start routine */
  32. thread_start_routine_t start_routine;
  33. /* Thread start routine argument */
  34. void *arg;
  35. /* thread local root */
  36. void *tlr;
  37. /* Lock for waiting list */
  38. mutex_t wait_list_lock;
  39. /* Waiting list of other threads who are joining this thread */
  40. os_thread_wait_list thread_wait_list;
  41. /* Thread stack size */
  42. unsigned stack_size;
  43. /* Thread stack */
  44. char stack[1];
  45. } os_thread_data;
  46. typedef struct os_thread_obj {
  47. korp_tid thread;
  48. /* Whether the thread is terminated and this thread object is to
  49. be freed in the future. */
  50. bool to_be_freed;
  51. struct os_thread_obj *next;
  52. } os_thread_obj;
  53. static bool is_thread_sys_inited = false;
  54. /* Lock for thread data list */
  55. static mutex_t thread_data_lock;
  56. /* Thread data list */
  57. static os_thread_data *thread_data_list = NULL;
  58. static void
  59. thread_data_list_add(os_thread_data *thread_data)
  60. {
  61. mutex_lock(&thread_data_lock);
  62. if (!thread_data_list)
  63. thread_data_list = thread_data;
  64. else {
  65. /* If already in list, just return */
  66. os_thread_data *p = thread_data_list;
  67. while (p) {
  68. if (p == thread_data) {
  69. mutex_unlock(&thread_data_lock);
  70. return;
  71. }
  72. p = p->next;
  73. }
  74. /* Set as head of list */
  75. thread_data->next = thread_data_list;
  76. thread_data_list = thread_data;
  77. }
  78. mutex_unlock(&thread_data_lock);
  79. }
  80. static void
  81. thread_data_list_remove(os_thread_data *thread_data)
  82. {
  83. mutex_lock(&thread_data_lock);
  84. if (thread_data_list) {
  85. if (thread_data_list == thread_data)
  86. thread_data_list = thread_data_list->next;
  87. else {
  88. /* Search and remove it from list */
  89. os_thread_data *p = thread_data_list;
  90. while (p && p->next != thread_data)
  91. p = p->next;
  92. if (p && p->next == thread_data)
  93. p->next = p->next->next;
  94. }
  95. }
  96. mutex_unlock(&thread_data_lock);
  97. }
  98. static os_thread_data *
  99. thread_data_list_lookup(korp_tid tid)
  100. {
  101. mutex_lock(&thread_data_lock);
  102. if (thread_data_list) {
  103. os_thread_data *p = thread_data_list;
  104. while (p) {
  105. if (p->tid == tid) {
  106. /* Found */
  107. mutex_unlock(&thread_data_lock);
  108. return p;
  109. }
  110. p = p->next;
  111. }
  112. }
  113. mutex_unlock(&thread_data_lock);
  114. return NULL;
  115. }
  116. int
  117. os_thread_sys_init()
  118. {
  119. if (is_thread_sys_inited)
  120. return BHT_OK;
  121. mutex_init(&thread_data_lock);
  122. is_thread_sys_inited = true;
  123. return BHT_OK;
  124. }
  125. void
  126. os_thread_sys_destroy()
  127. {
  128. if (is_thread_sys_inited) {
  129. is_thread_sys_inited = false;
  130. }
  131. }
  132. static os_thread_data *
  133. thread_data_current()
  134. {
  135. kernel_pid_t tid = thread_getpid();
  136. return thread_data_list_lookup(tid);
  137. }
  138. static void
  139. os_thread_cleanup(void)
  140. {
  141. // TODO Check this (Join sema trigger, cleanup of thread_data)
  142. os_thread_data *thread_data = thread_data_current();
  143. bh_assert(thread_data != NULL);
  144. mutex_lock(&thread_data->wait_list_lock);
  145. if (thread_data->thread_wait_list) {
  146. /* Signal each joining thread */
  147. os_thread_wait_list head = thread_data->thread_wait_list;
  148. while (head) {
  149. os_thread_wait_list next = head->next;
  150. head->ret = thread_data->arg;
  151. sema_post(&head->sem);
  152. head = next;
  153. }
  154. thread_data->thread_wait_list = NULL;
  155. }
  156. mutex_unlock(&thread_data->wait_list_lock);
  157. thread_data_list_remove(thread_data);
  158. }
  159. static void *
  160. os_thread_wrapper(void *thread_data)
  161. {
  162. /* Set thread custom data */
  163. os_thread_data *t = (os_thread_data *)thread_data;
  164. t->tid = thread_getpid();
  165. thread_data_list_add(t);
  166. // save the return value to arg since it is not need after the call
  167. t->arg = (t->start_routine)(t->arg);
  168. os_thread_cleanup(); // internal structures and joiners
  169. BH_FREE(thread_data);
  170. sched_task_exit(); // stop thread //clean
  171. return NULL; // never reached
  172. }
  173. int
  174. os_thread_create(korp_tid *p_tid, thread_start_routine_t start, void *arg,
  175. unsigned int stack_size)
  176. {
  177. return os_thread_create_with_prio(p_tid, start, arg, stack_size,
  178. BH_THREAD_DEFAULT_PRIORITY);
  179. }
  180. int
  181. os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
  182. void *arg, unsigned int stack_size, int prio)
  183. {
  184. kernel_pid_t tid;
  185. os_thread_data *thread_data;
  186. unsigned thread_data_size;
  187. if (!p_tid || !stack_size)
  188. return BHT_ERROR;
  189. /* Create and initialize thread data */
  190. thread_data_size = offsetof(os_thread_data, stack) + stack_size;
  191. if (!(thread_data = BH_MALLOC(thread_data_size))) {
  192. return BHT_ERROR;
  193. }
  194. memset(thread_data, 0, thread_data_size);
  195. mutex_init(&thread_data->wait_list_lock);
  196. thread_data->stack_size = stack_size;
  197. thread_data->start_routine = start;
  198. thread_data->arg = arg;
  199. /* Create the thread &*/
  200. if (!((tid = thread_create(thread_data->stack, stack_size, prio, 0,
  201. os_thread_wrapper, thread_data, "WASM")))) {
  202. BH_FREE(thread_data);
  203. return BHT_ERROR;
  204. }
  205. thread_data->tid = tid;
  206. /* Set thread custom data */
  207. thread_data_list_add(thread_data);
  208. *p_tid = tid;
  209. return BHT_OK;
  210. }
  211. korp_tid
  212. os_self_thread()
  213. {
  214. return (korp_tid)thread_getpid();
  215. }
  216. int
  217. os_thread_join(korp_tid thread, void **value_ptr)
  218. {
  219. // will test if thread is still working,
  220. // wait if it is
  221. os_thread_data *thread_data;
  222. os_thread_wait_node node;
  223. sema_create(&node.sem, 0);
  224. node.next = NULL;
  225. /* Get thread data */
  226. thread_data = thread_data_list_lookup(thread);
  227. if (thread_data == NULL) {
  228. // thread not found
  229. sema_destroy(&node.sem);
  230. return BHT_ERROR;
  231. }
  232. bh_assert(thread_data != NULL);
  233. mutex_lock(&thread_data->wait_list_lock);
  234. if (!thread_data->thread_wait_list)
  235. thread_data->thread_wait_list = &node;
  236. else {
  237. /* Add to end of waiting list */
  238. os_thread_wait_node *p = thread_data->thread_wait_list;
  239. while (p->next)
  240. p = p->next;
  241. p->next = &node;
  242. }
  243. mutex_unlock(&thread_data->wait_list_lock);
  244. sema_wait(&node.sem);
  245. // get the return value pointer conted may not be availible after return
  246. if (value_ptr)
  247. (*value_ptr) = node.ret;
  248. /* Wait some time for the thread to be actually terminated */
  249. // TODO: k_sleep(100);
  250. // TODO: bump target prio to make it finish and free its resources
  251. thread_yield();
  252. // node has done its job
  253. sema_destroy(&node.sem);
  254. return BHT_OK;
  255. }
  256. // int vm_mutex_trylock(korp_mutex *mutex)
  257. // {
  258. // return mutex_trylock(mutex);
  259. // }
  260. int
  261. os_mutex_init(korp_mutex *mutex)
  262. {
  263. mutex_init(mutex);
  264. return BHT_OK;
  265. }
  266. int
  267. os_mutex_destroy(korp_mutex *mutex)
  268. {
  269. (void)mutex;
  270. return BHT_OK;
  271. }
  272. int
  273. os_mutex_lock(korp_mutex *mutex)
  274. {
  275. mutex_lock(mutex);
  276. return 0; // Riot mutexes do not return until success
  277. }
  278. int
  279. os_mutex_unlock(korp_mutex *mutex)
  280. {
  281. mutex_unlock(mutex);
  282. return 0; // Riot mutexes do not return until success
  283. }
  284. int
  285. os_cond_init(korp_cond *cond)
  286. {
  287. mutex_init(&cond->wait_list_lock);
  288. cond->thread_wait_list = NULL;
  289. return BHT_OK;
  290. }
  291. int
  292. os_cond_destroy(korp_cond *cond)
  293. {
  294. (void)cond;
  295. return BHT_OK;
  296. }
  297. static int
  298. os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex, bool timed,
  299. uint64 useconds)
  300. {
  301. os_thread_wait_node *node;
  302. /* Create wait node and append it to wait list */
  303. if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
  304. return BHT_ERROR;
  305. sema_create(&node->sem, 0);
  306. node->next = NULL;
  307. mutex_lock(&cond->wait_list_lock);
  308. if (!cond->thread_wait_list)
  309. cond->thread_wait_list = node;
  310. else {
  311. /* Add to end of wait list */
  312. os_thread_wait_node *p = cond->thread_wait_list;
  313. while (p->next)
  314. p = p->next;
  315. p->next = node;
  316. }
  317. mutex_unlock(&cond->wait_list_lock);
  318. /* Unlock mutex, wait sem and lock mutex again */
  319. mutex_unlock(mutex);
  320. if (timed)
  321. sema_wait(&node->sem);
  322. else
  323. sema_wait_timed(&node->sem, useconds);
  324. mutex_lock(mutex);
  325. /* Remove wait node from wait list */
  326. mutex_lock(&cond->wait_list_lock);
  327. if (cond->thread_wait_list == node)
  328. cond->thread_wait_list = node->next;
  329. else {
  330. /* Remove from the wait list */
  331. os_thread_wait_node *p = cond->thread_wait_list;
  332. while (p->next != node)
  333. p = p->next;
  334. p->next = node->next;
  335. }
  336. BH_FREE(node);
  337. mutex_unlock(&cond->wait_list_lock);
  338. return BHT_OK;
  339. }
  340. int
  341. os_cond_wait(korp_cond *cond, korp_mutex *mutex)
  342. {
  343. return os_cond_wait_internal(cond, mutex, false, 0);
  344. }
  345. int
  346. os_cond_reltimedwait(korp_cond *cond, korp_mutex *mutex, uint64 useconds)
  347. {
  348. return os_cond_wait_internal(cond, mutex, (useconds != BHT_WAIT_FOREVER),
  349. useconds);
  350. }
  351. int
  352. os_cond_signal(korp_cond *cond)
  353. {
  354. /* Signal the head wait node of wait list */
  355. mutex_lock(&cond->wait_list_lock);
  356. if (cond->thread_wait_list)
  357. sema_post(&cond->thread_wait_list->sem);
  358. mutex_unlock(&cond->wait_list_lock);
  359. return BHT_OK;
  360. }
  361. uint8 *
  362. os_thread_get_stack_boundary()
  363. {
  364. #if defined(DEVELHELP) || defined(SCHED_TEST_STACK) \
  365. || defined(MODULE_MPU_STACK_GUARD)
  366. return (uint8 *)thread_get_active()->stack_start;
  367. #else
  368. return NULL;
  369. #endif
  370. }