thread_manager.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "thread_manager.h"
  6. typedef struct {
  7. bh_list_link l;
  8. void (*destroy_cb)(WASMCluster *);
  9. } DestroyCallBackNode;
  10. static bh_list destroy_callback_list_head;
  11. static bh_list *const destroy_callback_list = &destroy_callback_list_head;
  12. static bh_list cluster_list_head;
  13. static bh_list *const cluster_list = &cluster_list_head;
  14. static korp_mutex cluster_list_lock;
  15. typedef void (*list_visitor)(void *, void *);
  16. static uint32 cluster_max_thread_num = CLUSTER_MAX_THREAD_NUM;
  17. /* Set the maximum thread number, if this function is not called,
  18. the max thread num is defined by CLUSTER_MAX_THREAD_NUM */
  19. void
  20. wasm_cluster_set_max_thread_num(uint32 num)
  21. {
  22. cluster_max_thread_num = num;
  23. }
  24. bool
  25. thread_manager_init()
  26. {
  27. if (bh_list_init(cluster_list) != 0)
  28. return false;
  29. if (os_mutex_init(&cluster_list_lock) != 0)
  30. return false;
  31. return true;
  32. }
  33. void
  34. thread_manager_destroy()
  35. {
  36. WASMCluster *cluster = bh_list_first_elem(cluster_list);
  37. WASMCluster *next;
  38. while (cluster) {
  39. next = bh_list_elem_next(cluster);
  40. wasm_cluster_destroy(cluster);
  41. cluster = next;
  42. }
  43. wasm_cluster_cancel_all_callbacks();
  44. os_mutex_destroy(&cluster_list_lock);
  45. }
  46. static void
  47. traverse_list(bh_list *l, list_visitor visitor, void *user_data)
  48. {
  49. void *next, *node = bh_list_first_elem(l);
  50. while (node) {
  51. next = bh_list_elem_next(node);
  52. visitor(node, user_data);
  53. node = next;
  54. }
  55. }
  56. static bool
  57. allocate_aux_stack(WASMCluster *cluster, uint32 *start, uint32 *size)
  58. {
  59. uint32 i;
  60. /* If the module doesn't have aux stack info,
  61. it can't create any threads */
  62. if (!cluster->stack_segment_occupied)
  63. return false;
  64. os_mutex_lock(&cluster->lock);
  65. for (i = 0; i < cluster_max_thread_num; i++) {
  66. if (!cluster->stack_segment_occupied[i]) {
  67. if (start)
  68. *start = cluster->stack_tops[i];
  69. if (size)
  70. *size = cluster->stack_size;
  71. cluster->stack_segment_occupied[i] = true;
  72. os_mutex_unlock(&cluster->lock);
  73. return true;
  74. }
  75. }
  76. os_mutex_unlock(&cluster->lock);
  77. return false;
  78. }
  79. static bool
  80. free_aux_stack(WASMCluster *cluster, uint32 start)
  81. {
  82. uint32 i;
  83. for (i = 0; i < cluster_max_thread_num; i++) {
  84. if (start == cluster->stack_tops[i]) {
  85. os_mutex_lock(&cluster->lock);
  86. cluster->stack_segment_occupied[i] = false;
  87. os_mutex_unlock(&cluster->lock);
  88. return true;
  89. }
  90. }
  91. return false;
  92. }
  93. WASMCluster *
  94. wasm_cluster_create(WASMExecEnv *exec_env)
  95. {
  96. WASMCluster *cluster;
  97. uint64 total_size;
  98. uint32 aux_stack_start, aux_stack_size, i;
  99. bh_assert(exec_env->cluster == NULL);
  100. if (!(cluster = wasm_runtime_malloc(sizeof(WASMCluster)))) {
  101. LOG_ERROR("thread manager error: failed to allocate memory");
  102. return NULL;
  103. }
  104. memset(cluster, 0, sizeof(WASMCluster));
  105. exec_env->cluster = cluster;
  106. bh_list_init(&cluster->exec_env_list);
  107. bh_list_insert(&cluster->exec_env_list, exec_env);
  108. if (os_mutex_init(&cluster->lock) != 0) {
  109. wasm_runtime_free(cluster);
  110. LOG_ERROR("thread manager error: failed to init mutex");
  111. return NULL;
  112. }
  113. /* Prepare the aux stack top and size for every thread */
  114. if (!wasm_exec_env_get_aux_stack(exec_env,
  115. &aux_stack_start,
  116. &aux_stack_size)) {
  117. LOG_VERBOSE("No aux stack info for this module, can't create thread");
  118. /* If the module don't have aux stack info, don't throw error here,
  119. but remain stack_tops and stack_segment_occupied as NULL */
  120. os_mutex_lock(&cluster_list_lock);
  121. if (bh_list_insert(cluster_list, cluster) != 0) {
  122. os_mutex_unlock(&cluster_list_lock);
  123. goto fail;
  124. }
  125. os_mutex_unlock(&cluster_list_lock);
  126. return cluster;
  127. }
  128. cluster->stack_size = aux_stack_size / (cluster_max_thread_num + 1);
  129. if (cluster->stack_size == 0) {
  130. goto fail;
  131. }
  132. /* Set initial aux stack top to the instance and
  133. aux stack boundary to the main exec_env */
  134. if (!wasm_exec_env_set_aux_stack(exec_env, aux_stack_start,
  135. cluster->stack_size))
  136. goto fail;
  137. if (cluster_max_thread_num != 0) {
  138. total_size = cluster_max_thread_num * sizeof(uint32);
  139. if (total_size >= UINT32_MAX
  140. || !(cluster->stack_tops =
  141. wasm_runtime_malloc((uint32)total_size))) {
  142. goto fail;
  143. }
  144. memset(cluster->stack_tops, 0, (uint32)total_size);
  145. if (!(cluster->stack_segment_occupied =
  146. wasm_runtime_malloc(cluster_max_thread_num * sizeof(bool)))) {
  147. goto fail;
  148. }
  149. memset(cluster->stack_segment_occupied, 0,
  150. cluster_max_thread_num * sizeof(bool));
  151. /* Reserve space for main instance */
  152. aux_stack_start -= cluster->stack_size;
  153. for (i = 0; i < cluster_max_thread_num; i++) {
  154. cluster->stack_tops[i] = aux_stack_start - cluster->stack_size * i;
  155. }
  156. }
  157. os_mutex_lock(&cluster_list_lock);
  158. if (bh_list_insert(cluster_list, cluster) != 0) {
  159. os_mutex_unlock(&cluster_list_lock);
  160. goto fail;
  161. }
  162. os_mutex_unlock(&cluster_list_lock);
  163. return cluster;
  164. fail:
  165. if (cluster)
  166. wasm_cluster_destroy(cluster);
  167. return NULL;
  168. }
  169. static void
  170. destroy_cluster_visitor(void *node, void *user_data)
  171. {
  172. DestroyCallBackNode *destroy_node = (DestroyCallBackNode *)node;
  173. WASMCluster *cluster = (WASMCluster *)user_data;
  174. destroy_node->destroy_cb(cluster);
  175. }
  176. void
  177. wasm_cluster_destroy(WASMCluster *cluster)
  178. {
  179. traverse_list(destroy_callback_list,
  180. destroy_cluster_visitor, (void *)cluster);
  181. /* Remove the cluster from the cluster list */
  182. os_mutex_lock(&cluster_list_lock);
  183. bh_list_remove(cluster_list, cluster);
  184. os_mutex_unlock(&cluster_list_lock);
  185. os_mutex_destroy(&cluster->lock);
  186. if (cluster->stack_tops)
  187. wasm_runtime_free(cluster->stack_tops);
  188. if (cluster->stack_segment_occupied)
  189. wasm_runtime_free(cluster->stack_segment_occupied);
  190. wasm_runtime_free(cluster);
  191. }
  192. static void
  193. free_node_visitor(void *node, void *user_data)
  194. {
  195. wasm_runtime_free(node);
  196. }
  197. void
  198. wasm_cluster_cancel_all_callbacks()
  199. {
  200. traverse_list(destroy_callback_list, free_node_visitor, NULL);
  201. }
  202. WASMCluster *
  203. wasm_exec_env_get_cluster(WASMExecEnv *exec_env)
  204. {
  205. return exec_env->cluster;
  206. }
  207. bool
  208. wasm_cluster_add_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  209. {
  210. bool ret = true;
  211. exec_env->cluster = cluster;
  212. os_mutex_lock(&cluster->lock);
  213. if (bh_list_insert(&cluster->exec_env_list, exec_env) != 0)
  214. ret = false;
  215. os_mutex_unlock(&cluster->lock);
  216. return ret;
  217. }
  218. bool
  219. wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  220. {
  221. bool ret = true;
  222. bh_assert(exec_env->cluster == cluster);
  223. os_mutex_lock(&cluster->lock);
  224. if (bh_list_remove(&cluster->exec_env_list, exec_env) != 0)
  225. ret = false;
  226. os_mutex_unlock(&cluster->lock);
  227. if (cluster->exec_env_list.len == 0) {
  228. /* exec_env_list empty, destroy the cluster */
  229. wasm_cluster_destroy(cluster);
  230. }
  231. return ret;
  232. }
  233. /* start routine of thread manager */
  234. static void*
  235. thread_manager_start_routine(void *arg)
  236. {
  237. void *ret;
  238. WASMExecEnv *exec_env = (WASMExecEnv *)arg;
  239. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  240. bh_assert(cluster != NULL);
  241. exec_env->handle = os_self_thread();
  242. ret = exec_env->thread_start_routine(exec_env);
  243. #ifdef OS_ENABLE_HW_BOUND_CHECK
  244. if (exec_env->suspend_flags.flags & 0x08)
  245. ret = exec_env->thread_ret_value;
  246. #endif
  247. /* Routine exit */
  248. /* Free aux stack space */
  249. free_aux_stack(cluster,
  250. exec_env->aux_stack_boundary + cluster->stack_size);
  251. /* Detach the native thread here to ensure the resources are freed */
  252. wasm_cluster_detach_thread(exec_env);
  253. /* Remove and destroy exec_env */
  254. wasm_cluster_del_exec_env(cluster, exec_env);
  255. wasm_exec_env_destroy_internal(exec_env);
  256. os_thread_exit(ret);
  257. return ret;
  258. }
  259. int32
  260. wasm_cluster_create_thread(WASMExecEnv *exec_env,
  261. wasm_module_inst_t module_inst,
  262. void* (*thread_routine)(void *),
  263. void *arg)
  264. {
  265. WASMCluster *cluster;
  266. WASMExecEnv *new_exec_env;
  267. uint32 aux_stack_start, aux_stack_size;
  268. korp_tid tid;
  269. cluster = wasm_exec_env_get_cluster(exec_env);
  270. bh_assert(cluster);
  271. new_exec_env = wasm_exec_env_create_internal(
  272. module_inst, exec_env->wasm_stack_size);
  273. if (!new_exec_env)
  274. return -1;
  275. if (!allocate_aux_stack(cluster, &aux_stack_start, &aux_stack_size)) {
  276. LOG_ERROR("thread manager error: "
  277. "failed to allocate aux stack space for new thread");
  278. goto fail1;
  279. }
  280. /* Set aux stack for current thread */
  281. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  282. aux_stack_size)) {
  283. goto fail2;
  284. }
  285. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  286. goto fail2;
  287. new_exec_env->thread_start_routine = thread_routine;
  288. new_exec_env->thread_arg = arg;
  289. if (0 != os_thread_create(&tid, thread_manager_start_routine,
  290. (void *)new_exec_env,
  291. APP_THREAD_STACK_SIZE_DEFAULT)) {
  292. goto fail3;
  293. }
  294. return 0;
  295. fail3:
  296. wasm_cluster_del_exec_env(cluster, new_exec_env);
  297. fail2:
  298. /* free the allocated aux stack space */
  299. free_aux_stack(cluster, aux_stack_start);
  300. fail1:
  301. wasm_exec_env_destroy(new_exec_env);
  302. return -1;
  303. }
  304. int32
  305. wasm_cluster_join_thread(WASMExecEnv *exec_env, void **ret_val)
  306. {
  307. return os_thread_join(exec_env->handle, ret_val);
  308. }
  309. int32
  310. wasm_cluster_detach_thread(WASMExecEnv *exec_env)
  311. {
  312. return os_thread_detach(exec_env->handle);
  313. }
  314. void
  315. wasm_cluster_exit_thread(WASMExecEnv *exec_env, void *retval)
  316. {
  317. WASMCluster *cluster;
  318. #ifdef OS_ENABLE_HW_BOUND_CHECK
  319. if (exec_env->jmpbuf_stack_top) {
  320. WASMJmpBuf *jmpbuf_node;
  321. /* Store the return value in exec_env */
  322. exec_env->thread_ret_value = retval;
  323. exec_env->suspend_flags.flags |= 0x08;
  324. /* Free all jmpbuf_node except the last one */
  325. while (exec_env->jmpbuf_stack_top->prev) {
  326. jmpbuf_node = wasm_exec_env_pop_jmpbuf(exec_env);
  327. wasm_runtime_free(jmpbuf_node);
  328. }
  329. jmpbuf_node = exec_env->jmpbuf_stack_top;
  330. os_longjmp(jmpbuf_node->jmpbuf, 1);
  331. return;
  332. }
  333. #endif
  334. cluster = wasm_exec_env_get_cluster(exec_env);
  335. bh_assert(cluster);
  336. /* App exit the thread, free the resources before exit native thread */
  337. /* Free aux stack space */
  338. free_aux_stack(cluster,
  339. exec_env->aux_stack_boundary + cluster->stack_size);
  340. /* Detach the native thread here to ensure the resources are freed */
  341. wasm_cluster_detach_thread(exec_env);
  342. /* Remove and destroy exec_env */
  343. wasm_cluster_del_exec_env(cluster, exec_env);
  344. wasm_exec_env_destroy_internal(exec_env);
  345. os_thread_exit(retval);
  346. }
  347. int32
  348. wasm_cluster_cancel_thread(WASMExecEnv *exec_env)
  349. {
  350. /* Set the termination flag */
  351. exec_env->suspend_flags.flags |= 0x01;
  352. return 0;
  353. }
  354. static void
  355. terminate_thread_visitor(void *node, void *user_data)
  356. {
  357. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  358. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  359. if (curr_exec_env == exec_env)
  360. return;
  361. wasm_cluster_cancel_thread(curr_exec_env);
  362. wasm_cluster_join_thread(curr_exec_env, NULL);
  363. }
  364. void
  365. wasm_cluster_terminate_all(WASMCluster *cluster)
  366. {
  367. traverse_list(&cluster->exec_env_list,
  368. terminate_thread_visitor, NULL);
  369. }
  370. void
  371. wasm_cluster_terminate_all_except_self(WASMCluster *cluster,
  372. WASMExecEnv *exec_env)
  373. {
  374. traverse_list(&cluster->exec_env_list,
  375. terminate_thread_visitor, (void *)exec_env);
  376. }
  377. bool
  378. wasm_cluster_register_destroy_callback(void (*callback)(WASMCluster *))
  379. {
  380. DestroyCallBackNode *node;
  381. if (!(node = wasm_runtime_malloc(sizeof(DestroyCallBackNode)))) {
  382. LOG_ERROR("thread manager error: failed to allocate memory");
  383. return false;
  384. }
  385. node->destroy_cb = callback;
  386. bh_list_insert(destroy_callback_list, node);
  387. return true;
  388. }
  389. void
  390. wasm_cluster_suspend_thread(WASMExecEnv *exec_env)
  391. {
  392. /* Set the suspend flag */
  393. exec_env->suspend_flags.flags |= 0x02;
  394. }
  395. static void
  396. suspend_thread_visitor(void *node, void *user_data)
  397. {
  398. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  399. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  400. if (curr_exec_env == exec_env)
  401. return;
  402. wasm_cluster_suspend_thread(curr_exec_env);
  403. }
  404. void
  405. wasm_cluster_suspend_all(WASMCluster *cluster)
  406. {
  407. traverse_list(&cluster->exec_env_list,
  408. suspend_thread_visitor, NULL);
  409. }
  410. void
  411. wasm_cluster_suspend_all_except_self(WASMCluster *cluster,
  412. WASMExecEnv *exec_env)
  413. {
  414. traverse_list(&cluster->exec_env_list,
  415. suspend_thread_visitor, (void *)exec_env);
  416. }
  417. void
  418. wasm_cluster_resume_thread(WASMExecEnv *exec_env)
  419. {
  420. exec_env->suspend_flags.flags &= ~0x02;
  421. }
  422. static void
  423. resume_thread_visitor(void *node, void *user_data)
  424. {
  425. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  426. wasm_cluster_resume_thread(curr_exec_env);
  427. }
  428. void
  429. wasm_cluster_resume_all(WASMCluster *cluster)
  430. {
  431. traverse_list(&cluster->exec_env_list, resume_thread_visitor, NULL);
  432. }
  433. static void
  434. set_exception_visitor(void *node, void *user_data)
  435. {
  436. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  437. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  438. WASMModuleInstanceCommon *module_inst = get_module_inst(exec_env);
  439. WASMModuleInstanceCommon *curr_module_inst =
  440. get_module_inst(curr_exec_env);
  441. const char *exception = wasm_runtime_get_exception(module_inst);
  442. /* skip "Exception: " */
  443. exception += 11;
  444. if (curr_exec_env != exec_env) {
  445. curr_module_inst = get_module_inst(curr_exec_env);
  446. wasm_runtime_set_exception(curr_module_inst, exception);
  447. }
  448. }
  449. void
  450. wasm_cluster_spread_exception(WASMExecEnv *exec_env)
  451. {
  452. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  453. traverse_list(&cluster->exec_env_list, set_exception_visitor, exec_env);
  454. }