thread_manager.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "thread_manager.h"
  6. typedef struct {
  7. bh_list_link l;
  8. void (*destroy_cb)(WASMCluster *);
  9. } DestroyCallBackNode;
  10. static bh_list destroy_callback_list_head;
  11. static bh_list *const destroy_callback_list = &destroy_callback_list_head;
  12. static bh_list cluster_list_head;
  13. static bh_list *const cluster_list = &cluster_list_head;
  14. static korp_mutex cluster_list_lock;
  15. typedef void (*list_visitor)(void *, void *);
  16. static uint32 cluster_max_thread_num = CLUSTER_MAX_THREAD_NUM;
  17. /* Set the maximum thread number, if this function is not called,
  18. the max thread num is defined by CLUSTER_MAX_THREAD_NUM */
  19. void
  20. wasm_cluster_set_max_thread_num(uint32 num)
  21. {
  22. if (num > 0)
  23. cluster_max_thread_num = num;
  24. }
  25. bool
  26. thread_manager_init()
  27. {
  28. if (bh_list_init(cluster_list) != 0)
  29. return false;
  30. if (os_mutex_init(&cluster_list_lock) != 0)
  31. return false;
  32. return true;
  33. }
  34. void
  35. thread_manager_destroy()
  36. {
  37. WASMCluster *cluster = bh_list_first_elem(cluster_list);
  38. WASMCluster *next;
  39. while (cluster) {
  40. next = bh_list_elem_next(cluster);
  41. wasm_cluster_destroy(cluster);
  42. cluster = next;
  43. }
  44. wasm_cluster_cancel_all_callbacks();
  45. os_mutex_destroy(&cluster_list_lock);
  46. }
  47. static void
  48. traverse_list(bh_list *l, list_visitor visitor, void *user_data)
  49. {
  50. void *next, *node = bh_list_first_elem(l);
  51. while (node) {
  52. next = bh_list_elem_next(node);
  53. visitor(node, user_data);
  54. node = next;
  55. }
  56. }
  57. static bool
  58. allocate_aux_stack(WASMCluster *cluster, uint32 *start, uint32 *size)
  59. {
  60. uint32 i;
  61. /* If the module doesn't have aux stack info,
  62. it can't create any threads */
  63. if (!cluster->stack_segment_occupied)
  64. return false;
  65. os_mutex_lock(&cluster->lock);
  66. for (i = 0; i < cluster_max_thread_num; i++) {
  67. if (!cluster->stack_segment_occupied[i]) {
  68. if (start)
  69. *start = cluster->stack_tops[i];
  70. if (size)
  71. *size = cluster->stack_size;
  72. cluster->stack_segment_occupied[i] = true;
  73. os_mutex_unlock(&cluster->lock);
  74. return true;
  75. }
  76. }
  77. os_mutex_unlock(&cluster->lock);
  78. return false;
  79. }
  80. static bool
  81. free_aux_stack(WASMCluster *cluster, uint32 start)
  82. {
  83. uint32 i;
  84. for (i = 0; i < cluster_max_thread_num; i++) {
  85. if (start == cluster->stack_tops[i]) {
  86. os_mutex_lock(&cluster->lock);
  87. cluster->stack_segment_occupied[i] = false;
  88. os_mutex_unlock(&cluster->lock);
  89. return true;
  90. }
  91. }
  92. return false;
  93. }
  94. WASMCluster *
  95. wasm_cluster_create(WASMExecEnv *exec_env)
  96. {
  97. WASMCluster *cluster;
  98. uint64 total_size;
  99. uint32 aux_stack_start, aux_stack_size, i;
  100. bh_assert(exec_env->cluster == NULL);
  101. if (!(cluster = wasm_runtime_malloc(sizeof(WASMCluster)))) {
  102. LOG_ERROR("thread manager error: failed to allocate memory");
  103. return NULL;
  104. }
  105. memset(cluster, 0, sizeof(WASMCluster));
  106. exec_env->cluster = cluster;
  107. bh_list_init(&cluster->exec_env_list);
  108. bh_list_insert(&cluster->exec_env_list, exec_env);
  109. if (os_mutex_init(&cluster->lock) != 0) {
  110. wasm_runtime_free(cluster);
  111. LOG_ERROR("thread manager error: failed to init mutex");
  112. return NULL;
  113. }
  114. /* Prepare the aux stack top and size for every thread */
  115. if (!wasm_exec_env_get_aux_stack(exec_env, &aux_stack_start,
  116. &aux_stack_size)) {
  117. LOG_VERBOSE("No aux stack info for this module, can't create thread");
  118. /* If the module don't have aux stack info, don't throw error here,
  119. but remain stack_tops and stack_segment_occupied as NULL */
  120. os_mutex_lock(&cluster_list_lock);
  121. if (bh_list_insert(cluster_list, cluster) != 0) {
  122. os_mutex_unlock(&cluster_list_lock);
  123. goto fail;
  124. }
  125. os_mutex_unlock(&cluster_list_lock);
  126. return cluster;
  127. }
  128. cluster->stack_size = aux_stack_size / (cluster_max_thread_num + 1);
  129. if (cluster->stack_size < WASM_THREAD_AUX_STACK_SIZE_MIN) {
  130. goto fail;
  131. }
  132. /* Make stack size 16-byte aligned */
  133. cluster->stack_size = cluster->stack_size & (~15);
  134. /* Set initial aux stack top to the instance and
  135. aux stack boundary to the main exec_env */
  136. if (!wasm_exec_env_set_aux_stack(exec_env, aux_stack_start,
  137. cluster->stack_size))
  138. goto fail;
  139. if (cluster_max_thread_num != 0) {
  140. total_size = cluster_max_thread_num * sizeof(uint32);
  141. if (total_size >= UINT32_MAX
  142. || !(cluster->stack_tops =
  143. wasm_runtime_malloc((uint32)total_size))) {
  144. goto fail;
  145. }
  146. memset(cluster->stack_tops, 0, (uint32)total_size);
  147. if (!(cluster->stack_segment_occupied =
  148. wasm_runtime_malloc(cluster_max_thread_num * sizeof(bool)))) {
  149. goto fail;
  150. }
  151. memset(cluster->stack_segment_occupied, 0,
  152. cluster_max_thread_num * sizeof(bool));
  153. /* Reserve space for main instance */
  154. aux_stack_start -= cluster->stack_size;
  155. for (i = 0; i < cluster_max_thread_num; i++) {
  156. cluster->stack_tops[i] = aux_stack_start - cluster->stack_size * i;
  157. }
  158. }
  159. os_mutex_lock(&cluster_list_lock);
  160. if (bh_list_insert(cluster_list, cluster) != 0) {
  161. os_mutex_unlock(&cluster_list_lock);
  162. goto fail;
  163. }
  164. os_mutex_unlock(&cluster_list_lock);
  165. return cluster;
  166. fail:
  167. if (cluster)
  168. wasm_cluster_destroy(cluster);
  169. return NULL;
  170. }
  171. static void
  172. destroy_cluster_visitor(void *node, void *user_data)
  173. {
  174. DestroyCallBackNode *destroy_node = (DestroyCallBackNode *)node;
  175. WASMCluster *cluster = (WASMCluster *)user_data;
  176. destroy_node->destroy_cb(cluster);
  177. }
  178. void
  179. wasm_cluster_destroy(WASMCluster *cluster)
  180. {
  181. traverse_list(destroy_callback_list, destroy_cluster_visitor,
  182. (void *)cluster);
  183. /* Remove the cluster from the cluster list */
  184. os_mutex_lock(&cluster_list_lock);
  185. bh_list_remove(cluster_list, cluster);
  186. os_mutex_unlock(&cluster_list_lock);
  187. os_mutex_destroy(&cluster->lock);
  188. if (cluster->stack_tops)
  189. wasm_runtime_free(cluster->stack_tops);
  190. if (cluster->stack_segment_occupied)
  191. wasm_runtime_free(cluster->stack_segment_occupied);
  192. wasm_runtime_free(cluster);
  193. }
  194. static void
  195. free_node_visitor(void *node, void *user_data)
  196. {
  197. wasm_runtime_free(node);
  198. }
  199. void
  200. wasm_cluster_cancel_all_callbacks()
  201. {
  202. traverse_list(destroy_callback_list, free_node_visitor, NULL);
  203. }
  204. WASMCluster *
  205. wasm_exec_env_get_cluster(WASMExecEnv *exec_env)
  206. {
  207. return exec_env->cluster;
  208. }
  209. bool
  210. wasm_cluster_add_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  211. {
  212. bool ret = true;
  213. exec_env->cluster = cluster;
  214. os_mutex_lock(&cluster->lock);
  215. if (bh_list_insert(&cluster->exec_env_list, exec_env) != 0)
  216. ret = false;
  217. os_mutex_unlock(&cluster->lock);
  218. return ret;
  219. }
  220. bool
  221. wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  222. {
  223. bool ret = true;
  224. bh_assert(exec_env->cluster == cluster);
  225. os_mutex_lock(&cluster->lock);
  226. if (bh_list_remove(&cluster->exec_env_list, exec_env) != 0)
  227. ret = false;
  228. os_mutex_unlock(&cluster->lock);
  229. if (cluster->exec_env_list.len == 0) {
  230. /* exec_env_list empty, destroy the cluster */
  231. wasm_cluster_destroy(cluster);
  232. }
  233. return ret;
  234. }
  235. static WASMExecEnv *
  236. wasm_cluster_search_exec_env(WASMCluster *cluster,
  237. WASMModuleInstanceCommon *module_inst)
  238. {
  239. WASMExecEnv *node = NULL;
  240. os_mutex_lock(&cluster->lock);
  241. node = bh_list_first_elem(&cluster->exec_env_list);
  242. while (node) {
  243. if (node->module_inst == module_inst) {
  244. os_mutex_unlock(&cluster->lock);
  245. return node;
  246. }
  247. node = bh_list_elem_next(node);
  248. }
  249. os_mutex_unlock(&cluster->lock);
  250. return NULL;
  251. }
  252. /* search the global cluster list to find if the given
  253. module instance have a corresponding exec_env */
  254. WASMExecEnv *
  255. wasm_clusters_search_exec_env(WASMModuleInstanceCommon *module_inst)
  256. {
  257. WASMCluster *cluster = NULL;
  258. WASMExecEnv *exec_env = NULL;
  259. os_mutex_lock(&cluster_list_lock);
  260. cluster = bh_list_first_elem(cluster_list);
  261. while (cluster) {
  262. exec_env = wasm_cluster_search_exec_env(cluster, module_inst);
  263. if (exec_env) {
  264. os_mutex_unlock(&cluster_list_lock);
  265. return exec_env;
  266. }
  267. cluster = bh_list_elem_next(cluster);
  268. }
  269. os_mutex_unlock(&cluster_list_lock);
  270. return NULL;
  271. }
  272. WASMExecEnv *
  273. wasm_cluster_spawn_exec_env(WASMExecEnv *exec_env)
  274. {
  275. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  276. wasm_module_inst_t module_inst = get_module_inst(exec_env);
  277. wasm_module_t module = wasm_exec_env_get_module(exec_env);
  278. wasm_module_inst_t new_module_inst;
  279. WASMExecEnv *new_exec_env;
  280. uint32 aux_stack_start, aux_stack_size;
  281. if (!module) {
  282. return NULL;
  283. }
  284. if (!(new_module_inst = wasm_runtime_instantiate_internal(
  285. module, true, 8192, 0, NULL, 0))) {
  286. return NULL;
  287. }
  288. if (module_inst) {
  289. /* Set custom_data to new module instance */
  290. wasm_runtime_set_custom_data_internal(
  291. new_module_inst, wasm_runtime_get_custom_data(module_inst));
  292. }
  293. new_exec_env = wasm_exec_env_create_internal(new_module_inst,
  294. exec_env->wasm_stack_size);
  295. if (!new_exec_env)
  296. goto fail1;
  297. if (!allocate_aux_stack(cluster, &aux_stack_start, &aux_stack_size)) {
  298. LOG_ERROR("thread manager error: "
  299. "failed to allocate aux stack space for new thread");
  300. goto fail2;
  301. }
  302. /* Set aux stack for current thread */
  303. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  304. aux_stack_size)) {
  305. goto fail3;
  306. }
  307. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  308. goto fail3;
  309. return new_exec_env;
  310. fail3:
  311. /* free the allocated aux stack space */
  312. free_aux_stack(cluster, aux_stack_start);
  313. fail2:
  314. wasm_exec_env_destroy(new_exec_env);
  315. fail1:
  316. wasm_runtime_deinstantiate_internal(new_module_inst, true);
  317. return NULL;
  318. }
  319. void
  320. wasm_cluster_destroy_spawned_exec_env(WASMExecEnv *exec_env)
  321. {
  322. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  323. wasm_module_inst_t module_inst = wasm_runtime_get_module_inst(exec_env);
  324. bh_assert(cluster != NULL);
  325. /* Free aux stack space */
  326. free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom);
  327. wasm_cluster_del_exec_env(cluster, exec_env);
  328. wasm_exec_env_destroy_internal(exec_env);
  329. wasm_runtime_deinstantiate_internal(module_inst, true);
  330. }
  331. /* start routine of thread manager */
  332. static void *
  333. thread_manager_start_routine(void *arg)
  334. {
  335. void *ret;
  336. WASMExecEnv *exec_env = (WASMExecEnv *)arg;
  337. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  338. bh_assert(cluster != NULL);
  339. exec_env->handle = os_self_thread();
  340. ret = exec_env->thread_start_routine(exec_env);
  341. #ifdef OS_ENABLE_HW_BOUND_CHECK
  342. if (exec_env->suspend_flags.flags & 0x08)
  343. ret = exec_env->thread_ret_value;
  344. #endif
  345. /* Routine exit */
  346. /* Free aux stack space */
  347. free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom);
  348. /* Detach the native thread here to ensure the resources are freed */
  349. wasm_cluster_detach_thread(exec_env);
  350. /* Remove and destroy exec_env */
  351. wasm_cluster_del_exec_env(cluster, exec_env);
  352. wasm_exec_env_destroy_internal(exec_env);
  353. os_thread_exit(ret);
  354. return ret;
  355. }
  356. int32
  357. wasm_cluster_create_thread(WASMExecEnv *exec_env,
  358. wasm_module_inst_t module_inst,
  359. void *(*thread_routine)(void *), void *arg)
  360. {
  361. WASMCluster *cluster;
  362. WASMExecEnv *new_exec_env;
  363. uint32 aux_stack_start, aux_stack_size;
  364. korp_tid tid;
  365. cluster = wasm_exec_env_get_cluster(exec_env);
  366. bh_assert(cluster);
  367. new_exec_env =
  368. wasm_exec_env_create_internal(module_inst, exec_env->wasm_stack_size);
  369. if (!new_exec_env)
  370. return -1;
  371. if (!allocate_aux_stack(cluster, &aux_stack_start, &aux_stack_size)) {
  372. LOG_ERROR("thread manager error: "
  373. "failed to allocate aux stack space for new thread");
  374. goto fail1;
  375. }
  376. /* Set aux stack for current thread */
  377. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  378. aux_stack_size)) {
  379. goto fail2;
  380. }
  381. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  382. goto fail2;
  383. new_exec_env->thread_start_routine = thread_routine;
  384. new_exec_env->thread_arg = arg;
  385. if (0
  386. != os_thread_create(&tid, thread_manager_start_routine,
  387. (void *)new_exec_env,
  388. APP_THREAD_STACK_SIZE_DEFAULT)) {
  389. goto fail3;
  390. }
  391. return 0;
  392. fail3:
  393. wasm_cluster_del_exec_env(cluster, new_exec_env);
  394. fail2:
  395. /* free the allocated aux stack space */
  396. free_aux_stack(cluster, aux_stack_start);
  397. fail1:
  398. wasm_exec_env_destroy(new_exec_env);
  399. return -1;
  400. }
  401. #if WASM_ENABLE_DEBUG_INTERP != 0
  402. WASMCurrentEnvStatus *
  403. wasm_cluster_create_exenv_status()
  404. {
  405. WASMCurrentEnvStatus *status;
  406. if (!(status = wasm_runtime_malloc(sizeof(WASMCurrentEnvStatus)))) {
  407. goto fail;
  408. }
  409. if (os_mutex_init(&status->wait_lock) != 0)
  410. goto fail1;
  411. if (os_cond_init(&status->wait_cond) != 0)
  412. goto fail2;
  413. status->step_count = 0;
  414. status->signal_flag = 0;
  415. status->running_status = 0;
  416. return status;
  417. fail2:
  418. os_mutex_destroy(&status->wait_lock);
  419. fail1:
  420. wasm_runtime_free(status);
  421. fail:
  422. return NULL;
  423. }
  424. void
  425. wasm_cluster_destroy_exenv_status(WASMCurrentEnvStatus *status)
  426. {
  427. os_mutex_destroy(&status->wait_lock);
  428. os_cond_destroy(&status->wait_cond);
  429. wasm_runtime_free(status);
  430. }
  431. inline static bool
  432. wasm_cluster_thread_is_running(WASMExecEnv *exec_env)
  433. {
  434. return exec_env->current_status->running_status == STATUS_RUNNING
  435. || exec_env->current_status->running_status == STATUS_STEP;
  436. }
  437. void
  438. wasm_cluster_clear_thread_signal(WASMExecEnv *exec_env)
  439. {
  440. exec_env->current_status->signal_flag = 0;
  441. }
  442. void
  443. wasm_cluster_wait_thread_status(WASMExecEnv *exec_env, uint32 *status)
  444. {
  445. os_mutex_lock(&exec_env->current_status->wait_lock);
  446. while (wasm_cluster_thread_is_running(exec_env)) {
  447. os_cond_wait(&exec_env->current_status->wait_cond,
  448. &exec_env->current_status->wait_lock);
  449. }
  450. *status = exec_env->current_status->signal_flag;
  451. os_mutex_unlock(&exec_env->current_status->wait_lock);
  452. }
  453. void
  454. wasm_cluster_thread_send_signal(WASMExecEnv *exec_env, uint32 signo)
  455. {
  456. exec_env->current_status->signal_flag = signo;
  457. }
  458. void
  459. wasm_cluster_thread_stopped(WASMExecEnv *exec_env)
  460. {
  461. exec_env->current_status->running_status = STATUS_STOP;
  462. os_cond_signal(&exec_env->current_status->wait_cond);
  463. }
  464. void
  465. wasm_cluster_thread_waiting_run(WASMExecEnv *exec_env)
  466. {
  467. os_mutex_lock(&exec_env->wait_lock);
  468. while (!wasm_cluster_thread_is_running(exec_env)) {
  469. os_cond_wait(&exec_env->wait_cond, &exec_env->wait_lock);
  470. }
  471. os_mutex_unlock(&exec_env->wait_lock);
  472. }
  473. void
  474. wasm_cluster_send_signal_all(WASMCluster *cluster, uint32 signo)
  475. {
  476. WASMExecEnv *exec_env = bh_list_first_elem(&cluster->exec_env_list);
  477. while (exec_env) {
  478. wasm_cluster_thread_send_signal(exec_env, signo);
  479. exec_env = bh_list_elem_next(exec_env);
  480. }
  481. }
  482. void
  483. wasm_cluster_thread_exited(WASMExecEnv *exec_env)
  484. {
  485. exec_env->current_status->running_status = STATUS_EXIT;
  486. os_cond_signal(&exec_env->current_status->wait_cond);
  487. }
  488. void
  489. wasm_cluster_thread_continue(WASMExecEnv *exec_env)
  490. {
  491. wasm_cluster_clear_thread_signal(exec_env);
  492. exec_env->current_status->running_status = STATUS_RUNNING;
  493. os_cond_signal(&exec_env->wait_cond);
  494. }
  495. void
  496. wasm_cluster_thread_step(WASMExecEnv *exec_env)
  497. {
  498. exec_env->current_status->running_status = STATUS_STEP;
  499. os_cond_signal(&exec_env->wait_cond);
  500. }
  501. #endif
  502. int32
  503. wasm_cluster_join_thread(WASMExecEnv *exec_env, void **ret_val)
  504. {
  505. return os_thread_join(exec_env->handle, ret_val);
  506. }
  507. int32
  508. wasm_cluster_detach_thread(WASMExecEnv *exec_env)
  509. {
  510. return os_thread_detach(exec_env->handle);
  511. }
  512. void
  513. wasm_cluster_exit_thread(WASMExecEnv *exec_env, void *retval)
  514. {
  515. WASMCluster *cluster;
  516. #ifdef OS_ENABLE_HW_BOUND_CHECK
  517. if (exec_env->jmpbuf_stack_top) {
  518. /* Store the return value in exec_env */
  519. exec_env->thread_ret_value = retval;
  520. exec_env->suspend_flags.flags |= 0x08;
  521. #ifndef BH_PLATFORM_WINDOWS
  522. /* Pop all jmpbuf_node except the last one */
  523. while (exec_env->jmpbuf_stack_top->prev) {
  524. wasm_exec_env_pop_jmpbuf(exec_env);
  525. }
  526. os_longjmp(exec_env->jmpbuf_stack_top->jmpbuf, 1);
  527. return;
  528. #endif
  529. }
  530. #endif
  531. cluster = wasm_exec_env_get_cluster(exec_env);
  532. bh_assert(cluster);
  533. #if WASM_ENABLE_DEBUG_INTERP != 0
  534. wasm_cluster_clear_thread_signal(exec_env);
  535. wasm_cluster_thread_exited(exec_env);
  536. #endif
  537. /* App exit the thread, free the resources before exit native thread */
  538. /* Free aux stack space */
  539. free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom);
  540. /* Detach the native thread here to ensure the resources are freed */
  541. wasm_cluster_detach_thread(exec_env);
  542. /* Remove and destroy exec_env */
  543. wasm_cluster_del_exec_env(cluster, exec_env);
  544. wasm_exec_env_destroy_internal(exec_env);
  545. os_thread_exit(retval);
  546. }
  547. int32
  548. wasm_cluster_cancel_thread(WASMExecEnv *exec_env)
  549. {
  550. /* Set the termination flag */
  551. #if WASM_ENABLE_DEBUG_INTERP != 0
  552. wasm_cluster_thread_send_signal(exec_env, WAMR_SIG_TERM);
  553. wasm_cluster_thread_exited(exec_env);
  554. #else
  555. exec_env->suspend_flags.flags |= 0x01;
  556. #endif
  557. return 0;
  558. }
  559. static void
  560. terminate_thread_visitor(void *node, void *user_data)
  561. {
  562. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  563. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  564. if (curr_exec_env == exec_env)
  565. return;
  566. wasm_cluster_cancel_thread(curr_exec_env);
  567. wasm_cluster_join_thread(curr_exec_env, NULL);
  568. }
  569. void
  570. wasm_cluster_terminate_all(WASMCluster *cluster)
  571. {
  572. traverse_list(&cluster->exec_env_list, terminate_thread_visitor, NULL);
  573. }
  574. void
  575. wasm_cluster_terminate_all_except_self(WASMCluster *cluster,
  576. WASMExecEnv *exec_env)
  577. {
  578. traverse_list(&cluster->exec_env_list, terminate_thread_visitor,
  579. (void *)exec_env);
  580. }
  581. bool
  582. wasm_cluster_register_destroy_callback(void (*callback)(WASMCluster *))
  583. {
  584. DestroyCallBackNode *node;
  585. if (!(node = wasm_runtime_malloc(sizeof(DestroyCallBackNode)))) {
  586. LOG_ERROR("thread manager error: failed to allocate memory");
  587. return false;
  588. }
  589. node->destroy_cb = callback;
  590. bh_list_insert(destroy_callback_list, node);
  591. return true;
  592. }
  593. void
  594. wasm_cluster_suspend_thread(WASMExecEnv *exec_env)
  595. {
  596. /* Set the suspend flag */
  597. exec_env->suspend_flags.flags |= 0x02;
  598. }
  599. static void
  600. suspend_thread_visitor(void *node, void *user_data)
  601. {
  602. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  603. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  604. if (curr_exec_env == exec_env)
  605. return;
  606. wasm_cluster_suspend_thread(curr_exec_env);
  607. }
  608. void
  609. wasm_cluster_suspend_all(WASMCluster *cluster)
  610. {
  611. traverse_list(&cluster->exec_env_list, suspend_thread_visitor, NULL);
  612. }
  613. void
  614. wasm_cluster_suspend_all_except_self(WASMCluster *cluster,
  615. WASMExecEnv *exec_env)
  616. {
  617. traverse_list(&cluster->exec_env_list, suspend_thread_visitor,
  618. (void *)exec_env);
  619. }
  620. void
  621. wasm_cluster_resume_thread(WASMExecEnv *exec_env)
  622. {
  623. exec_env->suspend_flags.flags &= ~0x02;
  624. os_cond_signal(&exec_env->wait_cond);
  625. }
  626. static void
  627. resume_thread_visitor(void *node, void *user_data)
  628. {
  629. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  630. wasm_cluster_resume_thread(curr_exec_env);
  631. }
  632. void
  633. wasm_cluster_resume_all(WASMCluster *cluster)
  634. {
  635. traverse_list(&cluster->exec_env_list, resume_thread_visitor, NULL);
  636. }
  637. static void
  638. set_exception_visitor(void *node, void *user_data)
  639. {
  640. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  641. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  642. WASMModuleInstanceCommon *module_inst = get_module_inst(exec_env);
  643. WASMModuleInstanceCommon *curr_module_inst = get_module_inst(curr_exec_env);
  644. const char *exception = wasm_runtime_get_exception(module_inst);
  645. /* skip "Exception: " */
  646. exception += 11;
  647. if (curr_exec_env != exec_env) {
  648. curr_module_inst = get_module_inst(curr_exec_env);
  649. wasm_runtime_set_exception(curr_module_inst, exception);
  650. }
  651. }
  652. void
  653. wasm_cluster_spread_exception(WASMExecEnv *exec_env)
  654. {
  655. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  656. bh_assert(cluster);
  657. traverse_list(&cluster->exec_env_list, set_exception_visitor, exec_env);
  658. }
  659. static void
  660. set_custom_data_visitor(void *node, void *user_data)
  661. {
  662. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  663. WASMModuleInstanceCommon *module_inst = get_module_inst(curr_exec_env);
  664. wasm_runtime_set_custom_data_internal(module_inst, user_data);
  665. }
  666. void
  667. wasm_cluster_spread_custom_data(WASMModuleInstanceCommon *module_inst,
  668. void *custom_data)
  669. {
  670. WASMExecEnv *exec_env = wasm_clusters_search_exec_env(module_inst);
  671. WASMCluster *cluster = NULL;
  672. bh_assert(exec_env);
  673. cluster = wasm_exec_env_get_cluster(exec_env);
  674. bh_assert(cluster);
  675. traverse_list(&cluster->exec_env_list, set_custom_data_visitor,
  676. custom_data);
  677. }