thread_manager.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "thread_manager.h"
  6. typedef struct {
  7. bh_list_link l;
  8. void (*destroy_cb)(WASMCluster *);
  9. } DestroyCallBackNode;
  10. static bh_list destroy_callback_list_head;
  11. static bh_list *const destroy_callback_list = &destroy_callback_list_head;
  12. static bh_list cluster_list_head;
  13. static bh_list *const cluster_list = &cluster_list_head;
  14. static korp_mutex cluster_list_lock;
  15. typedef void (*list_visitor)(void *, void *);
  16. static uint32 cluster_max_thread_num = CLUSTER_MAX_THREAD_NUM;
  17. /* Set the maximum thread number, if this function is not called,
  18. the max thread num is defined by CLUSTER_MAX_THREAD_NUM */
  19. void
  20. wasm_cluster_set_max_thread_num(uint32 num)
  21. {
  22. if (num > 0)
  23. cluster_max_thread_num = num;
  24. }
  25. bool
  26. thread_manager_init()
  27. {
  28. if (bh_list_init(cluster_list) != 0)
  29. return false;
  30. if (os_mutex_init(&cluster_list_lock) != 0)
  31. return false;
  32. return true;
  33. }
  34. void
  35. thread_manager_destroy()
  36. {
  37. WASMCluster *cluster = bh_list_first_elem(cluster_list);
  38. WASMCluster *next;
  39. while (cluster) {
  40. next = bh_list_elem_next(cluster);
  41. wasm_cluster_destroy(cluster);
  42. cluster = next;
  43. }
  44. wasm_cluster_cancel_all_callbacks();
  45. os_mutex_destroy(&cluster_list_lock);
  46. }
  47. static void
  48. traverse_list(bh_list *l, list_visitor visitor, void *user_data)
  49. {
  50. void *next, *node = bh_list_first_elem(l);
  51. while (node) {
  52. next = bh_list_elem_next(node);
  53. visitor(node, user_data);
  54. node = next;
  55. }
  56. }
  57. static bool
  58. allocate_aux_stack(WASMCluster *cluster, uint32 *start, uint32 *size)
  59. {
  60. uint32 i;
  61. /* If the module doesn't have aux stack info,
  62. it can't create any threads */
  63. if (!cluster->stack_segment_occupied)
  64. return false;
  65. os_mutex_lock(&cluster->lock);
  66. for (i = 0; i < cluster_max_thread_num; i++) {
  67. if (!cluster->stack_segment_occupied[i]) {
  68. if (start)
  69. *start = cluster->stack_tops[i];
  70. if (size)
  71. *size = cluster->stack_size;
  72. cluster->stack_segment_occupied[i] = true;
  73. os_mutex_unlock(&cluster->lock);
  74. return true;
  75. }
  76. }
  77. os_mutex_unlock(&cluster->lock);
  78. return false;
  79. }
  80. static bool
  81. free_aux_stack(WASMCluster *cluster, uint32 start)
  82. {
  83. uint32 i;
  84. for (i = 0; i < cluster_max_thread_num; i++) {
  85. if (start == cluster->stack_tops[i]) {
  86. os_mutex_lock(&cluster->lock);
  87. cluster->stack_segment_occupied[i] = false;
  88. os_mutex_unlock(&cluster->lock);
  89. return true;
  90. }
  91. }
  92. return false;
  93. }
  94. WASMCluster *
  95. wasm_cluster_create(WASMExecEnv *exec_env)
  96. {
  97. WASMCluster *cluster;
  98. uint64 total_size;
  99. uint32 aux_stack_start, aux_stack_size, i;
  100. bh_assert(exec_env->cluster == NULL);
  101. if (!(cluster = wasm_runtime_malloc(sizeof(WASMCluster)))) {
  102. LOG_ERROR("thread manager error: failed to allocate memory");
  103. return NULL;
  104. }
  105. memset(cluster, 0, sizeof(WASMCluster));
  106. exec_env->cluster = cluster;
  107. bh_list_init(&cluster->exec_env_list);
  108. bh_list_insert(&cluster->exec_env_list, exec_env);
  109. if (os_mutex_init(&cluster->lock) != 0) {
  110. wasm_runtime_free(cluster);
  111. LOG_ERROR("thread manager error: failed to init mutex");
  112. return NULL;
  113. }
  114. /* Prepare the aux stack top and size for every thread */
  115. if (!wasm_exec_env_get_aux_stack(exec_env, &aux_stack_start,
  116. &aux_stack_size)) {
  117. LOG_VERBOSE("No aux stack info for this module, can't create thread");
  118. /* If the module don't have aux stack info, don't throw error here,
  119. but remain stack_tops and stack_segment_occupied as NULL */
  120. os_mutex_lock(&cluster_list_lock);
  121. if (bh_list_insert(cluster_list, cluster) != 0) {
  122. os_mutex_unlock(&cluster_list_lock);
  123. goto fail;
  124. }
  125. os_mutex_unlock(&cluster_list_lock);
  126. return cluster;
  127. }
  128. cluster->stack_size = aux_stack_size / (cluster_max_thread_num + 1);
  129. if (cluster->stack_size < WASM_THREAD_AUX_STACK_SIZE_MIN) {
  130. goto fail;
  131. }
  132. /* Make stack size 16-byte aligned */
  133. cluster->stack_size = cluster->stack_size & (~15);
  134. /* Set initial aux stack top to the instance and
  135. aux stack boundary to the main exec_env */
  136. if (!wasm_exec_env_set_aux_stack(exec_env, aux_stack_start,
  137. cluster->stack_size))
  138. goto fail;
  139. if (cluster_max_thread_num != 0) {
  140. total_size = cluster_max_thread_num * sizeof(uint32);
  141. if (total_size >= UINT32_MAX
  142. || !(cluster->stack_tops =
  143. wasm_runtime_malloc((uint32)total_size))) {
  144. goto fail;
  145. }
  146. memset(cluster->stack_tops, 0, (uint32)total_size);
  147. if (!(cluster->stack_segment_occupied =
  148. wasm_runtime_malloc(cluster_max_thread_num * sizeof(bool)))) {
  149. goto fail;
  150. }
  151. memset(cluster->stack_segment_occupied, 0,
  152. cluster_max_thread_num * sizeof(bool));
  153. /* Reserve space for main instance */
  154. aux_stack_start -= cluster->stack_size;
  155. for (i = 0; i < cluster_max_thread_num; i++) {
  156. cluster->stack_tops[i] = aux_stack_start - cluster->stack_size * i;
  157. }
  158. }
  159. os_mutex_lock(&cluster_list_lock);
  160. if (bh_list_insert(cluster_list, cluster) != 0) {
  161. os_mutex_unlock(&cluster_list_lock);
  162. goto fail;
  163. }
  164. os_mutex_unlock(&cluster_list_lock);
  165. return cluster;
  166. fail:
  167. if (cluster)
  168. wasm_cluster_destroy(cluster);
  169. return NULL;
  170. }
  171. static void
  172. destroy_cluster_visitor(void *node, void *user_data)
  173. {
  174. DestroyCallBackNode *destroy_node = (DestroyCallBackNode *)node;
  175. WASMCluster *cluster = (WASMCluster *)user_data;
  176. destroy_node->destroy_cb(cluster);
  177. }
  178. void
  179. wasm_cluster_destroy(WASMCluster *cluster)
  180. {
  181. traverse_list(destroy_callback_list, destroy_cluster_visitor,
  182. (void *)cluster);
  183. /* Remove the cluster from the cluster list */
  184. os_mutex_lock(&cluster_list_lock);
  185. bh_list_remove(cluster_list, cluster);
  186. os_mutex_unlock(&cluster_list_lock);
  187. os_mutex_destroy(&cluster->lock);
  188. if (cluster->stack_tops)
  189. wasm_runtime_free(cluster->stack_tops);
  190. if (cluster->stack_segment_occupied)
  191. wasm_runtime_free(cluster->stack_segment_occupied);
  192. wasm_runtime_free(cluster);
  193. }
  194. static void
  195. free_node_visitor(void *node, void *user_data)
  196. {
  197. wasm_runtime_free(node);
  198. }
  199. void
  200. wasm_cluster_cancel_all_callbacks()
  201. {
  202. traverse_list(destroy_callback_list, free_node_visitor, NULL);
  203. bh_list_init(destroy_callback_list);
  204. }
  205. WASMCluster *
  206. wasm_exec_env_get_cluster(WASMExecEnv *exec_env)
  207. {
  208. return exec_env->cluster;
  209. }
  210. bool
  211. wasm_cluster_add_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  212. {
  213. bool ret = true;
  214. exec_env->cluster = cluster;
  215. os_mutex_lock(&cluster->lock);
  216. if (bh_list_insert(&cluster->exec_env_list, exec_env) != 0)
  217. ret = false;
  218. os_mutex_unlock(&cluster->lock);
  219. return ret;
  220. }
  221. bool
  222. wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  223. {
  224. bool ret = true;
  225. bh_assert(exec_env->cluster == cluster);
  226. os_mutex_lock(&cluster->lock);
  227. if (bh_list_remove(&cluster->exec_env_list, exec_env) != 0)
  228. ret = false;
  229. os_mutex_unlock(&cluster->lock);
  230. if (cluster->exec_env_list.len == 0) {
  231. /* exec_env_list empty, destroy the cluster */
  232. wasm_cluster_destroy(cluster);
  233. }
  234. return ret;
  235. }
  236. static WASMExecEnv *
  237. wasm_cluster_search_exec_env(WASMCluster *cluster,
  238. WASMModuleInstanceCommon *module_inst)
  239. {
  240. WASMExecEnv *node = NULL;
  241. os_mutex_lock(&cluster->lock);
  242. node = bh_list_first_elem(&cluster->exec_env_list);
  243. while (node) {
  244. if (node->module_inst == module_inst) {
  245. os_mutex_unlock(&cluster->lock);
  246. return node;
  247. }
  248. node = bh_list_elem_next(node);
  249. }
  250. os_mutex_unlock(&cluster->lock);
  251. return NULL;
  252. }
  253. /* search the global cluster list to find if the given
  254. module instance have a corresponding exec_env */
  255. WASMExecEnv *
  256. wasm_clusters_search_exec_env(WASMModuleInstanceCommon *module_inst)
  257. {
  258. WASMCluster *cluster = NULL;
  259. WASMExecEnv *exec_env = NULL;
  260. os_mutex_lock(&cluster_list_lock);
  261. cluster = bh_list_first_elem(cluster_list);
  262. while (cluster) {
  263. exec_env = wasm_cluster_search_exec_env(cluster, module_inst);
  264. if (exec_env) {
  265. os_mutex_unlock(&cluster_list_lock);
  266. return exec_env;
  267. }
  268. cluster = bh_list_elem_next(cluster);
  269. }
  270. os_mutex_unlock(&cluster_list_lock);
  271. return NULL;
  272. }
  273. WASMExecEnv *
  274. wasm_cluster_spawn_exec_env(WASMExecEnv *exec_env)
  275. {
  276. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  277. wasm_module_inst_t module_inst = get_module_inst(exec_env);
  278. wasm_module_t module = wasm_exec_env_get_module(exec_env);
  279. wasm_module_inst_t new_module_inst;
  280. WASMExecEnv *new_exec_env;
  281. uint32 aux_stack_start, aux_stack_size;
  282. if (!module) {
  283. return NULL;
  284. }
  285. if (!(new_module_inst = wasm_runtime_instantiate_internal(
  286. module, true, 8192, 0, NULL, 0))) {
  287. return NULL;
  288. }
  289. if (module_inst) {
  290. /* Set custom_data to new module instance */
  291. wasm_runtime_set_custom_data_internal(
  292. new_module_inst, wasm_runtime_get_custom_data(module_inst));
  293. }
  294. new_exec_env = wasm_exec_env_create_internal(new_module_inst,
  295. exec_env->wasm_stack_size);
  296. if (!new_exec_env)
  297. goto fail1;
  298. if (!allocate_aux_stack(cluster, &aux_stack_start, &aux_stack_size)) {
  299. LOG_ERROR("thread manager error: "
  300. "failed to allocate aux stack space for new thread");
  301. goto fail2;
  302. }
  303. /* Set aux stack for current thread */
  304. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  305. aux_stack_size)) {
  306. goto fail3;
  307. }
  308. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  309. goto fail3;
  310. return new_exec_env;
  311. fail3:
  312. /* free the allocated aux stack space */
  313. free_aux_stack(cluster, aux_stack_start);
  314. fail2:
  315. wasm_exec_env_destroy(new_exec_env);
  316. fail1:
  317. wasm_runtime_deinstantiate_internal(new_module_inst, true);
  318. return NULL;
  319. }
  320. void
  321. wasm_cluster_destroy_spawned_exec_env(WASMExecEnv *exec_env)
  322. {
  323. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  324. wasm_module_inst_t module_inst = wasm_runtime_get_module_inst(exec_env);
  325. bh_assert(cluster != NULL);
  326. /* Free aux stack space */
  327. free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom);
  328. wasm_cluster_del_exec_env(cluster, exec_env);
  329. wasm_exec_env_destroy_internal(exec_env);
  330. wasm_runtime_deinstantiate_internal(module_inst, true);
  331. }
  332. /* start routine of thread manager */
  333. static void *
  334. thread_manager_start_routine(void *arg)
  335. {
  336. void *ret;
  337. WASMExecEnv *exec_env = (WASMExecEnv *)arg;
  338. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  339. bh_assert(cluster != NULL);
  340. exec_env->handle = os_self_thread();
  341. ret = exec_env->thread_start_routine(exec_env);
  342. #ifdef OS_ENABLE_HW_BOUND_CHECK
  343. if (exec_env->suspend_flags.flags & 0x08)
  344. ret = exec_env->thread_ret_value;
  345. #endif
  346. /* Routine exit */
  347. /* Free aux stack space */
  348. free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom);
  349. /* Detach the native thread here to ensure the resources are freed */
  350. wasm_cluster_detach_thread(exec_env);
  351. /* Remove and destroy exec_env */
  352. wasm_cluster_del_exec_env(cluster, exec_env);
  353. wasm_exec_env_destroy_internal(exec_env);
  354. os_thread_exit(ret);
  355. return ret;
  356. }
  357. int32
  358. wasm_cluster_create_thread(WASMExecEnv *exec_env,
  359. wasm_module_inst_t module_inst,
  360. void *(*thread_routine)(void *), void *arg)
  361. {
  362. WASMCluster *cluster;
  363. WASMExecEnv *new_exec_env;
  364. uint32 aux_stack_start, aux_stack_size;
  365. korp_tid tid;
  366. cluster = wasm_exec_env_get_cluster(exec_env);
  367. bh_assert(cluster);
  368. new_exec_env =
  369. wasm_exec_env_create_internal(module_inst, exec_env->wasm_stack_size);
  370. if (!new_exec_env)
  371. return -1;
  372. if (!allocate_aux_stack(cluster, &aux_stack_start, &aux_stack_size)) {
  373. LOG_ERROR("thread manager error: "
  374. "failed to allocate aux stack space for new thread");
  375. goto fail1;
  376. }
  377. /* Set aux stack for current thread */
  378. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  379. aux_stack_size)) {
  380. goto fail2;
  381. }
  382. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  383. goto fail2;
  384. new_exec_env->thread_start_routine = thread_routine;
  385. new_exec_env->thread_arg = arg;
  386. if (0
  387. != os_thread_create(&tid, thread_manager_start_routine,
  388. (void *)new_exec_env,
  389. APP_THREAD_STACK_SIZE_DEFAULT)) {
  390. goto fail3;
  391. }
  392. return 0;
  393. fail3:
  394. wasm_cluster_del_exec_env(cluster, new_exec_env);
  395. fail2:
  396. /* free the allocated aux stack space */
  397. free_aux_stack(cluster, aux_stack_start);
  398. fail1:
  399. wasm_exec_env_destroy(new_exec_env);
  400. return -1;
  401. }
  402. #if WASM_ENABLE_DEBUG_INTERP != 0
  403. WASMCurrentEnvStatus *
  404. wasm_cluster_create_exenv_status()
  405. {
  406. WASMCurrentEnvStatus *status;
  407. if (!(status = wasm_runtime_malloc(sizeof(WASMCurrentEnvStatus)))) {
  408. goto fail;
  409. }
  410. if (os_mutex_init(&status->wait_lock) != 0)
  411. goto fail1;
  412. if (os_cond_init(&status->wait_cond) != 0)
  413. goto fail2;
  414. status->step_count = 0;
  415. status->signal_flag = 0;
  416. status->running_status = 0;
  417. return status;
  418. fail2:
  419. os_mutex_destroy(&status->wait_lock);
  420. fail1:
  421. wasm_runtime_free(status);
  422. fail:
  423. return NULL;
  424. }
  425. void
  426. wasm_cluster_destroy_exenv_status(WASMCurrentEnvStatus *status)
  427. {
  428. os_mutex_destroy(&status->wait_lock);
  429. os_cond_destroy(&status->wait_cond);
  430. wasm_runtime_free(status);
  431. }
  432. inline static bool
  433. wasm_cluster_thread_is_running(WASMExecEnv *exec_env)
  434. {
  435. return exec_env->current_status->running_status == STATUS_RUNNING
  436. || exec_env->current_status->running_status == STATUS_STEP;
  437. }
  438. void
  439. wasm_cluster_clear_thread_signal(WASMExecEnv *exec_env)
  440. {
  441. exec_env->current_status->signal_flag = 0;
  442. }
  443. void
  444. wasm_cluster_wait_thread_status(WASMExecEnv *exec_env, uint32 *status)
  445. {
  446. os_mutex_lock(&exec_env->current_status->wait_lock);
  447. while (wasm_cluster_thread_is_running(exec_env)) {
  448. os_cond_wait(&exec_env->current_status->wait_cond,
  449. &exec_env->current_status->wait_lock);
  450. }
  451. *status = exec_env->current_status->signal_flag;
  452. os_mutex_unlock(&exec_env->current_status->wait_lock);
  453. }
  454. void
  455. wasm_cluster_thread_send_signal(WASMExecEnv *exec_env, uint32 signo)
  456. {
  457. exec_env->current_status->signal_flag = signo;
  458. }
  459. void
  460. wasm_cluster_thread_stopped(WASMExecEnv *exec_env)
  461. {
  462. exec_env->current_status->running_status = STATUS_STOP;
  463. os_cond_signal(&exec_env->current_status->wait_cond);
  464. }
  465. void
  466. wasm_cluster_thread_waiting_run(WASMExecEnv *exec_env)
  467. {
  468. os_mutex_lock(&exec_env->wait_lock);
  469. while (!wasm_cluster_thread_is_running(exec_env)) {
  470. os_cond_wait(&exec_env->wait_cond, &exec_env->wait_lock);
  471. }
  472. os_mutex_unlock(&exec_env->wait_lock);
  473. }
  474. void
  475. wasm_cluster_send_signal_all(WASMCluster *cluster, uint32 signo)
  476. {
  477. WASMExecEnv *exec_env = bh_list_first_elem(&cluster->exec_env_list);
  478. while (exec_env) {
  479. wasm_cluster_thread_send_signal(exec_env, signo);
  480. exec_env = bh_list_elem_next(exec_env);
  481. }
  482. }
  483. void
  484. wasm_cluster_thread_exited(WASMExecEnv *exec_env)
  485. {
  486. exec_env->current_status->running_status = STATUS_EXIT;
  487. os_cond_signal(&exec_env->current_status->wait_cond);
  488. }
  489. void
  490. wasm_cluster_thread_continue(WASMExecEnv *exec_env)
  491. {
  492. wasm_cluster_clear_thread_signal(exec_env);
  493. exec_env->current_status->running_status = STATUS_RUNNING;
  494. os_cond_signal(&exec_env->wait_cond);
  495. }
  496. void
  497. wasm_cluster_thread_step(WASMExecEnv *exec_env)
  498. {
  499. exec_env->current_status->running_status = STATUS_STEP;
  500. os_cond_signal(&exec_env->wait_cond);
  501. }
  502. #endif
  503. int32
  504. wasm_cluster_join_thread(WASMExecEnv *exec_env, void **ret_val)
  505. {
  506. return os_thread_join(exec_env->handle, ret_val);
  507. }
  508. int32
  509. wasm_cluster_detach_thread(WASMExecEnv *exec_env)
  510. {
  511. return os_thread_detach(exec_env->handle);
  512. }
  513. void
  514. wasm_cluster_exit_thread(WASMExecEnv *exec_env, void *retval)
  515. {
  516. WASMCluster *cluster;
  517. #ifdef OS_ENABLE_HW_BOUND_CHECK
  518. if (exec_env->jmpbuf_stack_top) {
  519. /* Store the return value in exec_env */
  520. exec_env->thread_ret_value = retval;
  521. exec_env->suspend_flags.flags |= 0x08;
  522. #ifndef BH_PLATFORM_WINDOWS
  523. /* Pop all jmpbuf_node except the last one */
  524. while (exec_env->jmpbuf_stack_top->prev) {
  525. wasm_exec_env_pop_jmpbuf(exec_env);
  526. }
  527. os_longjmp(exec_env->jmpbuf_stack_top->jmpbuf, 1);
  528. return;
  529. #endif
  530. }
  531. #endif
  532. cluster = wasm_exec_env_get_cluster(exec_env);
  533. bh_assert(cluster);
  534. #if WASM_ENABLE_DEBUG_INTERP != 0
  535. wasm_cluster_clear_thread_signal(exec_env);
  536. wasm_cluster_thread_exited(exec_env);
  537. #endif
  538. /* App exit the thread, free the resources before exit native thread */
  539. /* Free aux stack space */
  540. free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom);
  541. /* Detach the native thread here to ensure the resources are freed */
  542. wasm_cluster_detach_thread(exec_env);
  543. /* Remove and destroy exec_env */
  544. wasm_cluster_del_exec_env(cluster, exec_env);
  545. wasm_exec_env_destroy_internal(exec_env);
  546. os_thread_exit(retval);
  547. }
  548. int32
  549. wasm_cluster_cancel_thread(WASMExecEnv *exec_env)
  550. {
  551. /* Set the termination flag */
  552. #if WASM_ENABLE_DEBUG_INTERP != 0
  553. wasm_cluster_thread_send_signal(exec_env, WAMR_SIG_TERM);
  554. wasm_cluster_thread_exited(exec_env);
  555. #else
  556. exec_env->suspend_flags.flags |= 0x01;
  557. #endif
  558. return 0;
  559. }
  560. static void
  561. terminate_thread_visitor(void *node, void *user_data)
  562. {
  563. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  564. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  565. if (curr_exec_env == exec_env)
  566. return;
  567. wasm_cluster_cancel_thread(curr_exec_env);
  568. wasm_cluster_join_thread(curr_exec_env, NULL);
  569. }
  570. void
  571. wasm_cluster_terminate_all(WASMCluster *cluster)
  572. {
  573. traverse_list(&cluster->exec_env_list, terminate_thread_visitor, NULL);
  574. }
  575. void
  576. wasm_cluster_terminate_all_except_self(WASMCluster *cluster,
  577. WASMExecEnv *exec_env)
  578. {
  579. traverse_list(&cluster->exec_env_list, terminate_thread_visitor,
  580. (void *)exec_env);
  581. }
  582. bool
  583. wasm_cluster_register_destroy_callback(void (*callback)(WASMCluster *))
  584. {
  585. DestroyCallBackNode *node;
  586. if (!(node = wasm_runtime_malloc(sizeof(DestroyCallBackNode)))) {
  587. LOG_ERROR("thread manager error: failed to allocate memory");
  588. return false;
  589. }
  590. node->destroy_cb = callback;
  591. bh_list_insert(destroy_callback_list, node);
  592. return true;
  593. }
  594. void
  595. wasm_cluster_suspend_thread(WASMExecEnv *exec_env)
  596. {
  597. /* Set the suspend flag */
  598. exec_env->suspend_flags.flags |= 0x02;
  599. }
  600. static void
  601. suspend_thread_visitor(void *node, void *user_data)
  602. {
  603. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  604. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  605. if (curr_exec_env == exec_env)
  606. return;
  607. wasm_cluster_suspend_thread(curr_exec_env);
  608. }
  609. void
  610. wasm_cluster_suspend_all(WASMCluster *cluster)
  611. {
  612. traverse_list(&cluster->exec_env_list, suspend_thread_visitor, NULL);
  613. }
  614. void
  615. wasm_cluster_suspend_all_except_self(WASMCluster *cluster,
  616. WASMExecEnv *exec_env)
  617. {
  618. traverse_list(&cluster->exec_env_list, suspend_thread_visitor,
  619. (void *)exec_env);
  620. }
  621. void
  622. wasm_cluster_resume_thread(WASMExecEnv *exec_env)
  623. {
  624. exec_env->suspend_flags.flags &= ~0x02;
  625. os_cond_signal(&exec_env->wait_cond);
  626. }
  627. static void
  628. resume_thread_visitor(void *node, void *user_data)
  629. {
  630. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  631. wasm_cluster_resume_thread(curr_exec_env);
  632. }
  633. void
  634. wasm_cluster_resume_all(WASMCluster *cluster)
  635. {
  636. traverse_list(&cluster->exec_env_list, resume_thread_visitor, NULL);
  637. }
  638. static void
  639. set_exception_visitor(void *node, void *user_data)
  640. {
  641. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  642. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  643. WASMModuleInstanceCommon *module_inst = get_module_inst(exec_env);
  644. WASMModuleInstanceCommon *curr_module_inst = get_module_inst(curr_exec_env);
  645. const char *exception = wasm_runtime_get_exception(module_inst);
  646. /* skip "Exception: " */
  647. exception += 11;
  648. if (curr_exec_env != exec_env) {
  649. curr_module_inst = get_module_inst(curr_exec_env);
  650. wasm_runtime_set_exception(curr_module_inst, exception);
  651. }
  652. }
  653. void
  654. wasm_cluster_spread_exception(WASMExecEnv *exec_env)
  655. {
  656. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  657. bh_assert(cluster);
  658. traverse_list(&cluster->exec_env_list, set_exception_visitor, exec_env);
  659. }
  660. static void
  661. set_custom_data_visitor(void *node, void *user_data)
  662. {
  663. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  664. WASMModuleInstanceCommon *module_inst = get_module_inst(curr_exec_env);
  665. wasm_runtime_set_custom_data_internal(module_inst, user_data);
  666. }
  667. void
  668. wasm_cluster_spread_custom_data(WASMModuleInstanceCommon *module_inst,
  669. void *custom_data)
  670. {
  671. WASMExecEnv *exec_env = wasm_clusters_search_exec_env(module_inst);
  672. WASMCluster *cluster = NULL;
  673. bh_assert(exec_env);
  674. cluster = wasm_exec_env_get_cluster(exec_env);
  675. bh_assert(cluster);
  676. traverse_list(&cluster->exec_env_list, set_custom_data_visitor,
  677. custom_data);
  678. }