thread_manager.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "thread_manager.h"
  6. #include "../common/wasm_c_api_internal.h"
  7. #if WASM_ENABLE_INTERP != 0
  8. #include "../interpreter/wasm_runtime.h"
  9. #endif
  10. #if WASM_ENABLE_AOT != 0
  11. #include "../aot/aot_runtime.h"
  12. #endif
  13. #if WASM_ENABLE_DEBUG_INTERP != 0
  14. #include "debug_engine.h"
  15. #endif
  16. typedef struct {
  17. bh_list_link l;
  18. void (*destroy_cb)(WASMCluster *);
  19. } DestroyCallBackNode;
  20. static bh_list destroy_callback_list_head;
  21. static bh_list *const destroy_callback_list = &destroy_callback_list_head;
  22. static bh_list cluster_list_head;
  23. static bh_list *const cluster_list = &cluster_list_head;
  24. static korp_mutex cluster_list_lock;
  25. static korp_mutex _exception_lock;
  26. typedef void (*list_visitor)(void *, void *);
  27. static uint32 cluster_max_thread_num = CLUSTER_MAX_THREAD_NUM;
  28. /* Set the maximum thread number, if this function is not called,
  29. the max thread num is defined by CLUSTER_MAX_THREAD_NUM */
  30. void
  31. wasm_cluster_set_max_thread_num(uint32 num)
  32. {
  33. if (num > 0)
  34. cluster_max_thread_num = num;
  35. }
  36. bool
  37. thread_manager_init()
  38. {
  39. if (bh_list_init(cluster_list) != 0)
  40. return false;
  41. if (os_mutex_init(&cluster_list_lock) != 0)
  42. return false;
  43. if (os_mutex_init(&_exception_lock) != 0) {
  44. os_mutex_destroy(&cluster_list_lock);
  45. return false;
  46. }
  47. return true;
  48. }
  49. void
  50. thread_manager_destroy()
  51. {
  52. WASMCluster *cluster = bh_list_first_elem(cluster_list);
  53. WASMCluster *next;
  54. while (cluster) {
  55. next = bh_list_elem_next(cluster);
  56. wasm_cluster_destroy(cluster);
  57. cluster = next;
  58. }
  59. wasm_cluster_cancel_all_callbacks();
  60. os_mutex_destroy(&_exception_lock);
  61. os_mutex_destroy(&cluster_list_lock);
  62. }
  63. static void
  64. traverse_list(bh_list *l, list_visitor visitor, void *user_data)
  65. {
  66. void *next, *node = bh_list_first_elem(l);
  67. while (node) {
  68. next = bh_list_elem_next(node);
  69. visitor(node, user_data);
  70. node = next;
  71. }
  72. }
  73. /* Assumes cluster->lock is locked */
  74. static bool
  75. safe_traverse_exec_env_list(WASMCluster *cluster, list_visitor visitor,
  76. void *user_data)
  77. {
  78. Vector proc_nodes;
  79. void *node;
  80. bool ret = true;
  81. if (!bh_vector_init(&proc_nodes, cluster->exec_env_list.len, sizeof(void *),
  82. false)) {
  83. ret = false;
  84. goto final;
  85. }
  86. node = bh_list_first_elem(&cluster->exec_env_list);
  87. while (node) {
  88. bool already_processed = false;
  89. void *proc_node;
  90. uint32 i;
  91. for (i = 0; i < (uint32)bh_vector_size(&proc_nodes); i++) {
  92. if (!bh_vector_get(&proc_nodes, i, &proc_node)) {
  93. ret = false;
  94. goto final;
  95. }
  96. if (proc_node == node) {
  97. already_processed = true;
  98. break;
  99. }
  100. }
  101. if (already_processed) {
  102. node = bh_list_elem_next(node);
  103. continue;
  104. }
  105. os_mutex_unlock(&cluster->lock);
  106. visitor(node, user_data);
  107. os_mutex_lock(&cluster->lock);
  108. if (!bh_vector_append(&proc_nodes, &node)) {
  109. ret = false;
  110. goto final;
  111. }
  112. node = bh_list_first_elem(&cluster->exec_env_list);
  113. }
  114. final:
  115. bh_vector_destroy(&proc_nodes);
  116. return ret;
  117. }
  118. /* The caller must lock cluster->lock */
  119. static bool
  120. allocate_aux_stack(WASMExecEnv *exec_env, uint32 *start, uint32 *size)
  121. {
  122. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  123. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION != 0
  124. WASMModuleInstanceCommon *module_inst =
  125. wasm_exec_env_get_module_inst(exec_env);
  126. uint32 stack_end;
  127. stack_end = wasm_runtime_module_malloc_internal(module_inst, exec_env,
  128. cluster->stack_size, NULL);
  129. *start = stack_end + cluster->stack_size;
  130. *size = cluster->stack_size;
  131. return stack_end != 0;
  132. #else
  133. uint32 i;
  134. /* If the module doesn't have aux stack info,
  135. it can't create any threads */
  136. if (!cluster->stack_segment_occupied)
  137. return false;
  138. for (i = 0; i < cluster_max_thread_num; i++) {
  139. if (!cluster->stack_segment_occupied[i]) {
  140. if (start)
  141. *start = cluster->stack_tops[i];
  142. if (size)
  143. *size = cluster->stack_size;
  144. cluster->stack_segment_occupied[i] = true;
  145. return true;
  146. }
  147. }
  148. return false;
  149. #endif
  150. }
  151. /* The caller must lock cluster->lock */
  152. static bool
  153. free_aux_stack(WASMExecEnv *exec_env, uint32 start)
  154. {
  155. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  156. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION != 0
  157. WASMModuleInstanceCommon *module_inst =
  158. wasm_exec_env_get_module_inst(exec_env);
  159. if (!wasm_exec_env_is_aux_stack_managed_by_runtime(exec_env)) {
  160. return true;
  161. }
  162. bh_assert(start >= cluster->stack_size);
  163. wasm_runtime_module_free_internal(module_inst, exec_env,
  164. start - cluster->stack_size);
  165. return true;
  166. #else
  167. uint32 i;
  168. for (i = 0; i < cluster_max_thread_num; i++) {
  169. if (start == cluster->stack_tops[i]) {
  170. cluster->stack_segment_occupied[i] = false;
  171. return true;
  172. }
  173. }
  174. return false;
  175. #endif
  176. }
  177. bool
  178. wasm_cluster_allocate_aux_stack(WASMExecEnv *exec_env, uint32 *p_start,
  179. uint32 *p_size)
  180. {
  181. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  182. bool ret;
  183. os_mutex_lock(&cluster->lock);
  184. ret = allocate_aux_stack(exec_env, p_start, p_size);
  185. os_mutex_unlock(&cluster->lock);
  186. return ret;
  187. }
  188. bool
  189. wasm_cluster_free_aux_stack(WASMExecEnv *exec_env, uint32 start)
  190. {
  191. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  192. bool ret;
  193. os_mutex_lock(&cluster->lock);
  194. ret = free_aux_stack(exec_env, start);
  195. os_mutex_unlock(&cluster->lock);
  196. return ret;
  197. }
  198. WASMCluster *
  199. wasm_cluster_create(WASMExecEnv *exec_env)
  200. {
  201. WASMCluster *cluster;
  202. uint32 aux_stack_start, aux_stack_size;
  203. bh_assert(exec_env->cluster == NULL);
  204. if (!(cluster = wasm_runtime_malloc(sizeof(WASMCluster)))) {
  205. LOG_ERROR("thread manager error: failed to allocate memory");
  206. return NULL;
  207. }
  208. memset(cluster, 0, sizeof(WASMCluster));
  209. exec_env->cluster = cluster;
  210. bh_list_init(&cluster->exec_env_list);
  211. bh_list_insert(&cluster->exec_env_list, exec_env);
  212. if (os_mutex_init(&cluster->lock) != 0) {
  213. wasm_runtime_free(cluster);
  214. LOG_ERROR("thread manager error: failed to init mutex");
  215. return NULL;
  216. }
  217. /* Prepare the aux stack top and size for every thread */
  218. if (!wasm_exec_env_get_aux_stack(exec_env, &aux_stack_start,
  219. &aux_stack_size)) {
  220. #if WASM_ENABLE_LIB_WASI_THREADS == 0
  221. LOG_VERBOSE("No aux stack info for this module, can't create thread");
  222. #endif
  223. /* If the module don't have aux stack info, don't throw error here,
  224. but remain stack_tops and stack_segment_occupied as NULL */
  225. os_mutex_lock(&cluster_list_lock);
  226. if (bh_list_insert(cluster_list, cluster) != 0) {
  227. os_mutex_unlock(&cluster_list_lock);
  228. goto fail;
  229. }
  230. os_mutex_unlock(&cluster_list_lock);
  231. return cluster;
  232. }
  233. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION != 0
  234. cluster->stack_size = aux_stack_size;
  235. #else
  236. cluster->stack_size = aux_stack_size / (cluster_max_thread_num + 1);
  237. if (cluster->stack_size < WASM_THREAD_AUX_STACK_SIZE_MIN) {
  238. goto fail;
  239. }
  240. /* Make stack size 16-byte aligned */
  241. cluster->stack_size = cluster->stack_size & (~15);
  242. #endif
  243. /* Set initial aux stack top to the instance and
  244. aux stack boundary to the main exec_env */
  245. if (!wasm_exec_env_set_aux_stack(exec_env, aux_stack_start,
  246. cluster->stack_size))
  247. goto fail;
  248. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION == 0
  249. if (cluster_max_thread_num != 0) {
  250. uint64 total_size = cluster_max_thread_num * sizeof(uint32);
  251. uint32 i;
  252. if (total_size >= UINT32_MAX
  253. || !(cluster->stack_tops =
  254. wasm_runtime_malloc((uint32)total_size))) {
  255. goto fail;
  256. }
  257. memset(cluster->stack_tops, 0, (uint32)total_size);
  258. if (!(cluster->stack_segment_occupied =
  259. wasm_runtime_malloc(cluster_max_thread_num * sizeof(bool)))) {
  260. goto fail;
  261. }
  262. memset(cluster->stack_segment_occupied, 0,
  263. cluster_max_thread_num * sizeof(bool));
  264. /* Reserve space for main instance */
  265. aux_stack_start -= cluster->stack_size;
  266. for (i = 0; i < cluster_max_thread_num; i++) {
  267. cluster->stack_tops[i] = aux_stack_start - cluster->stack_size * i;
  268. }
  269. }
  270. #endif
  271. os_mutex_lock(&cluster_list_lock);
  272. if (bh_list_insert(cluster_list, cluster) != 0) {
  273. os_mutex_unlock(&cluster_list_lock);
  274. goto fail;
  275. }
  276. os_mutex_unlock(&cluster_list_lock);
  277. return cluster;
  278. fail:
  279. if (cluster)
  280. wasm_cluster_destroy(cluster);
  281. return NULL;
  282. }
  283. static void
  284. destroy_cluster_visitor(void *node, void *user_data)
  285. {
  286. DestroyCallBackNode *destroy_node = (DestroyCallBackNode *)node;
  287. WASMCluster *cluster = (WASMCluster *)user_data;
  288. destroy_node->destroy_cb(cluster);
  289. }
  290. void
  291. wasm_cluster_destroy(WASMCluster *cluster)
  292. {
  293. traverse_list(destroy_callback_list, destroy_cluster_visitor,
  294. (void *)cluster);
  295. /* Remove the cluster from the cluster list */
  296. os_mutex_lock(&cluster_list_lock);
  297. bh_list_remove(cluster_list, cluster);
  298. os_mutex_unlock(&cluster_list_lock);
  299. os_mutex_destroy(&cluster->lock);
  300. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION == 0
  301. if (cluster->stack_tops)
  302. wasm_runtime_free(cluster->stack_tops);
  303. if (cluster->stack_segment_occupied)
  304. wasm_runtime_free(cluster->stack_segment_occupied);
  305. #endif
  306. #if WASM_ENABLE_DEBUG_INTERP != 0
  307. wasm_debug_instance_destroy(cluster);
  308. #endif
  309. #if WASM_ENABLE_DUMP_CALL_STACK != 0
  310. bh_vector_destroy(&cluster->exception_frames);
  311. #endif
  312. wasm_runtime_free(cluster);
  313. }
  314. static void
  315. free_node_visitor(void *node, void *user_data)
  316. {
  317. wasm_runtime_free(node);
  318. }
  319. void
  320. wasm_cluster_cancel_all_callbacks()
  321. {
  322. traverse_list(destroy_callback_list, free_node_visitor, NULL);
  323. bh_list_init(destroy_callback_list);
  324. }
  325. WASMCluster *
  326. wasm_exec_env_get_cluster(WASMExecEnv *exec_env)
  327. {
  328. return exec_env->cluster;
  329. }
  330. /* The caller must lock cluster->lock */
  331. static bool
  332. wasm_cluster_add_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  333. {
  334. bool ret = true;
  335. exec_env->cluster = cluster;
  336. if (cluster->exec_env_list.len == cluster_max_thread_num + 1) {
  337. LOG_ERROR("thread manager error: "
  338. "maximum number of threads exceeded");
  339. ret = false;
  340. }
  341. if (ret && bh_list_insert(&cluster->exec_env_list, exec_env) != 0)
  342. ret = false;
  343. return ret;
  344. }
  345. static bool
  346. wasm_cluster_del_exec_env_internal(WASMCluster *cluster, WASMExecEnv *exec_env,
  347. bool can_destroy_cluster)
  348. {
  349. bool ret = true;
  350. bh_assert(exec_env->cluster == cluster);
  351. #if WASM_ENABLE_DEBUG_INTERP != 0
  352. /* Wait for debugger control thread to process the
  353. stop event of this thread */
  354. if (cluster->debug_inst) {
  355. /* lock the debug_inst->wait_lock so
  356. other threads can't fire stop events */
  357. os_mutex_lock(&cluster->debug_inst->wait_lock);
  358. while (cluster->debug_inst->stopped_thread == exec_env) {
  359. /* either wakes up by signal or by 1-second timeout */
  360. os_cond_reltimedwait(&cluster->debug_inst->wait_cond,
  361. &cluster->debug_inst->wait_lock, 1000000);
  362. }
  363. os_mutex_unlock(&cluster->debug_inst->wait_lock);
  364. }
  365. #endif
  366. if (bh_list_remove(&cluster->exec_env_list, exec_env) != 0)
  367. ret = false;
  368. if (can_destroy_cluster) {
  369. if (cluster->exec_env_list.len == 0) {
  370. /* exec_env_list empty, destroy the cluster */
  371. wasm_cluster_destroy(cluster);
  372. }
  373. }
  374. else {
  375. /* Don't destroy cluster as cluster->lock is being used */
  376. }
  377. return ret;
  378. }
  379. /* The caller should lock cluster->lock for thread safety */
  380. bool
  381. wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  382. {
  383. return wasm_cluster_del_exec_env_internal(cluster, exec_env, true);
  384. }
  385. static WASMExecEnv *
  386. wasm_cluster_search_exec_env(WASMCluster *cluster,
  387. WASMModuleInstanceCommon *module_inst)
  388. {
  389. WASMExecEnv *node = NULL;
  390. os_mutex_lock(&cluster->lock);
  391. node = bh_list_first_elem(&cluster->exec_env_list);
  392. while (node) {
  393. if (node->module_inst == module_inst) {
  394. os_mutex_unlock(&cluster->lock);
  395. return node;
  396. }
  397. node = bh_list_elem_next(node);
  398. }
  399. os_mutex_unlock(&cluster->lock);
  400. return NULL;
  401. }
  402. /* search the global cluster list to find if the given
  403. module instance have a corresponding exec_env */
  404. WASMExecEnv *
  405. wasm_clusters_search_exec_env(WASMModuleInstanceCommon *module_inst)
  406. {
  407. WASMCluster *cluster = NULL;
  408. WASMExecEnv *exec_env = NULL;
  409. os_mutex_lock(&cluster_list_lock);
  410. cluster = bh_list_first_elem(cluster_list);
  411. while (cluster) {
  412. exec_env = wasm_cluster_search_exec_env(cluster, module_inst);
  413. if (exec_env) {
  414. os_mutex_unlock(&cluster_list_lock);
  415. return exec_env;
  416. }
  417. cluster = bh_list_elem_next(cluster);
  418. }
  419. os_mutex_unlock(&cluster_list_lock);
  420. return NULL;
  421. }
  422. WASMExecEnv *
  423. wasm_cluster_spawn_exec_env(WASMExecEnv *exec_env)
  424. {
  425. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  426. wasm_module_inst_t module_inst = get_module_inst(exec_env);
  427. wasm_module_t module;
  428. wasm_module_inst_t new_module_inst;
  429. WASMExecEnv *new_exec_env;
  430. uint32 aux_stack_start, aux_stack_size;
  431. uint32 stack_size = 8192;
  432. if (!module_inst || !(module = wasm_exec_env_get_module(exec_env))) {
  433. return NULL;
  434. }
  435. if (!(new_module_inst = wasm_runtime_instantiate_internal(
  436. module, module_inst, exec_env, stack_size, 0, NULL, 0))) {
  437. return NULL;
  438. }
  439. /* Set custom_data to new module instance */
  440. wasm_runtime_set_custom_data_internal(
  441. new_module_inst, wasm_runtime_get_custom_data(module_inst));
  442. wasm_native_inherit_contexts(new_module_inst, module_inst);
  443. if (!(wasm_cluster_dup_c_api_imports(new_module_inst, module_inst))) {
  444. goto fail1;
  445. }
  446. os_mutex_lock(&cluster->lock);
  447. if (cluster->has_exception || cluster->processing) {
  448. goto fail2;
  449. }
  450. #if WASM_ENABLE_INTERP != 0
  451. if (module_inst->module_type == Wasm_Module_Bytecode) {
  452. stack_size =
  453. ((WASMModuleInstance *)module_inst)->default_wasm_stack_size;
  454. }
  455. #endif
  456. #if WASM_ENABLE_AOT != 0
  457. if (module_inst->module_type == Wasm_Module_AoT) {
  458. stack_size =
  459. ((AOTModuleInstance *)module_inst)->default_wasm_stack_size;
  460. }
  461. #endif
  462. new_exec_env = wasm_exec_env_create_internal(new_module_inst,
  463. exec_env->wasm_stack_size);
  464. if (!new_exec_env) {
  465. goto fail2;
  466. }
  467. if (!allocate_aux_stack(exec_env, &aux_stack_start, &aux_stack_size)) {
  468. LOG_ERROR("thread manager error: "
  469. "failed to allocate aux stack space for new thread");
  470. goto fail3;
  471. }
  472. /* Set aux stack for current thread */
  473. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  474. aux_stack_size)) {
  475. goto fail4;
  476. }
  477. /* Inherit suspend_flags of parent thread */
  478. new_exec_env->suspend_flags.flags = exec_env->suspend_flags.flags;
  479. if (!wasm_cluster_add_exec_env(cluster, new_exec_env)) {
  480. goto fail4;
  481. }
  482. os_mutex_unlock(&cluster->lock);
  483. return new_exec_env;
  484. fail4:
  485. /* free the allocated aux stack space */
  486. free_aux_stack(exec_env, aux_stack_start);
  487. fail3:
  488. wasm_exec_env_destroy_internal(new_exec_env);
  489. fail2:
  490. os_mutex_unlock(&cluster->lock);
  491. fail1:
  492. wasm_runtime_deinstantiate_internal(new_module_inst, true);
  493. return NULL;
  494. }
  495. void
  496. wasm_cluster_destroy_spawned_exec_env(WASMExecEnv *exec_env)
  497. {
  498. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  499. wasm_module_inst_t module_inst = wasm_runtime_get_module_inst(exec_env);
  500. bh_assert(cluster != NULL);
  501. os_mutex_lock(&cluster->lock);
  502. /* Free aux stack space */
  503. free_aux_stack(exec_env, exec_env->aux_stack_bottom.bottom);
  504. /* Remove exec_env */
  505. wasm_cluster_del_exec_env_internal(cluster, exec_env, false);
  506. /* Destroy exec_env */
  507. wasm_exec_env_destroy_internal(exec_env);
  508. /* Routine exit, destroy instance */
  509. wasm_runtime_deinstantiate_internal(module_inst, true);
  510. os_mutex_unlock(&cluster->lock);
  511. }
  512. /* start routine of thread manager */
  513. static void *
  514. thread_manager_start_routine(void *arg)
  515. {
  516. void *ret;
  517. WASMExecEnv *exec_env = (WASMExecEnv *)arg;
  518. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  519. WASMModuleInstanceCommon *module_inst =
  520. wasm_exec_env_get_module_inst(exec_env);
  521. bh_assert(cluster != NULL);
  522. bh_assert(module_inst != NULL);
  523. os_mutex_lock(&exec_env->wait_lock);
  524. exec_env->handle = os_self_thread();
  525. /* Notify the parent thread to continue running */
  526. os_cond_signal(&exec_env->wait_cond);
  527. os_mutex_unlock(&exec_env->wait_lock);
  528. ret = exec_env->thread_start_routine(exec_env);
  529. #ifdef OS_ENABLE_HW_BOUND_CHECK
  530. os_mutex_lock(&exec_env->wait_lock);
  531. if (WASM_SUSPEND_FLAGS_GET(exec_env->suspend_flags)
  532. & WASM_SUSPEND_FLAG_EXIT)
  533. ret = exec_env->thread_ret_value;
  534. os_mutex_unlock(&exec_env->wait_lock);
  535. #endif
  536. /* Routine exit */
  537. #if WASM_ENABLE_DEBUG_INTERP != 0
  538. wasm_cluster_thread_exited(exec_env);
  539. #endif
  540. os_mutex_lock(&cluster_list_lock);
  541. os_mutex_lock(&cluster->lock);
  542. /* Detach the native thread here to ensure the resources are freed */
  543. if (exec_env->wait_count == 0 && !exec_env->thread_is_detached) {
  544. /* Only detach current thread when there is no other thread
  545. joining it, otherwise let the system resources for the
  546. thread be released after joining */
  547. os_thread_detach(exec_env->handle);
  548. /* No need to set exec_env->thread_is_detached to true here
  549. since we will exit soon */
  550. }
  551. /* Free aux stack space */
  552. free_aux_stack(exec_env, exec_env->aux_stack_bottom.bottom);
  553. /* Remove exec_env */
  554. wasm_cluster_del_exec_env_internal(cluster, exec_env, false);
  555. /* Destroy exec_env */
  556. wasm_exec_env_destroy_internal(exec_env);
  557. /* Routine exit, destroy instance */
  558. wasm_runtime_deinstantiate_internal(module_inst, true);
  559. os_mutex_unlock(&cluster->lock);
  560. os_mutex_unlock(&cluster_list_lock);
  561. os_thread_exit(ret);
  562. return ret;
  563. }
  564. int32
  565. wasm_cluster_create_thread(WASMExecEnv *exec_env,
  566. wasm_module_inst_t module_inst,
  567. bool is_aux_stack_allocated, uint32 aux_stack_start,
  568. uint32 aux_stack_size,
  569. void *(*thread_routine)(void *), void *arg)
  570. {
  571. WASMCluster *cluster;
  572. WASMExecEnv *new_exec_env;
  573. korp_tid tid;
  574. cluster = wasm_exec_env_get_cluster(exec_env);
  575. bh_assert(cluster);
  576. os_mutex_lock(&cluster->lock);
  577. if (cluster->has_exception || cluster->processing) {
  578. goto fail1;
  579. }
  580. new_exec_env =
  581. wasm_exec_env_create_internal(module_inst, exec_env->wasm_stack_size);
  582. if (!new_exec_env)
  583. goto fail1;
  584. if (is_aux_stack_allocated) {
  585. /* Set aux stack for current thread */
  586. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  587. aux_stack_size)) {
  588. goto fail2;
  589. }
  590. }
  591. else {
  592. /* Disable aux stack */
  593. new_exec_env->aux_stack_boundary.boundary = 0;
  594. new_exec_env->aux_stack_bottom.bottom = UINT32_MAX;
  595. }
  596. /* Inherit suspend_flags of parent thread */
  597. new_exec_env->suspend_flags.flags = exec_env->suspend_flags.flags;
  598. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  599. goto fail2;
  600. new_exec_env->thread_start_routine = thread_routine;
  601. new_exec_env->thread_arg = arg;
  602. os_mutex_lock(&new_exec_env->wait_lock);
  603. if (0
  604. != os_thread_create(&tid, thread_manager_start_routine,
  605. (void *)new_exec_env,
  606. APP_THREAD_STACK_SIZE_DEFAULT)) {
  607. os_mutex_unlock(&new_exec_env->wait_lock);
  608. goto fail3;
  609. }
  610. /* Wait until the new_exec_env->handle is set to avoid it is
  611. illegally accessed after unlocking cluster->lock */
  612. os_cond_wait(&new_exec_env->wait_cond, &new_exec_env->wait_lock);
  613. os_mutex_unlock(&new_exec_env->wait_lock);
  614. os_mutex_unlock(&cluster->lock);
  615. return 0;
  616. fail3:
  617. wasm_cluster_del_exec_env_internal(cluster, new_exec_env, false);
  618. fail2:
  619. wasm_exec_env_destroy_internal(new_exec_env);
  620. fail1:
  621. os_mutex_unlock(&cluster->lock);
  622. return -1;
  623. }
  624. bool
  625. wasm_cluster_dup_c_api_imports(WASMModuleInstanceCommon *module_inst_dst,
  626. const WASMModuleInstanceCommon *module_inst_src)
  627. {
  628. /* workaround about passing instantiate-linking information */
  629. CApiFuncImport **new_c_api_func_imports = NULL;
  630. CApiFuncImport *c_api_func_imports;
  631. uint32 import_func_count = 0;
  632. uint32 size_in_bytes = 0;
  633. #if WASM_ENABLE_INTERP != 0
  634. if (module_inst_src->module_type == Wasm_Module_Bytecode) {
  635. new_c_api_func_imports = &(((WASMModuleInstance *)module_inst_dst)
  636. ->e->common.c_api_func_imports);
  637. c_api_func_imports = ((const WASMModuleInstance *)module_inst_src)
  638. ->e->common.c_api_func_imports;
  639. import_func_count =
  640. ((WASMModule *)(((const WASMModuleInstance *)module_inst_src)
  641. ->module))
  642. ->import_function_count;
  643. }
  644. #endif
  645. #if WASM_ENABLE_AOT != 0
  646. if (module_inst_src->module_type == Wasm_Module_AoT) {
  647. AOTModuleInstanceExtra *e =
  648. (AOTModuleInstanceExtra *)((AOTModuleInstance *)module_inst_dst)->e;
  649. new_c_api_func_imports = &(e->common.c_api_func_imports);
  650. e = (AOTModuleInstanceExtra *)((AOTModuleInstance *)module_inst_src)->e;
  651. c_api_func_imports = e->common.c_api_func_imports;
  652. import_func_count =
  653. ((AOTModule *)(((AOTModuleInstance *)module_inst_src)->module))
  654. ->import_func_count;
  655. }
  656. #endif
  657. if (import_func_count != 0 && c_api_func_imports) {
  658. size_in_bytes = sizeof(CApiFuncImport) * import_func_count;
  659. *new_c_api_func_imports = wasm_runtime_malloc(size_in_bytes);
  660. if (!(*new_c_api_func_imports))
  661. return false;
  662. bh_memcpy_s(*new_c_api_func_imports, size_in_bytes, c_api_func_imports,
  663. size_in_bytes);
  664. }
  665. return true;
  666. }
  667. #if WASM_ENABLE_DEBUG_INTERP != 0
  668. WASMCurrentEnvStatus *
  669. wasm_cluster_create_exenv_status()
  670. {
  671. WASMCurrentEnvStatus *status;
  672. if (!(status = wasm_runtime_malloc(sizeof(WASMCurrentEnvStatus)))) {
  673. return NULL;
  674. }
  675. status->step_count = 0;
  676. status->signal_flag = 0;
  677. status->running_status = 0;
  678. return status;
  679. }
  680. void
  681. wasm_cluster_destroy_exenv_status(WASMCurrentEnvStatus *status)
  682. {
  683. wasm_runtime_free(status);
  684. }
  685. inline static bool
  686. wasm_cluster_thread_is_running(WASMExecEnv *exec_env)
  687. {
  688. return exec_env->current_status->running_status == STATUS_RUNNING
  689. || exec_env->current_status->running_status == STATUS_STEP;
  690. }
  691. void
  692. wasm_cluster_clear_thread_signal(WASMExecEnv *exec_env)
  693. {
  694. exec_env->current_status->signal_flag = 0;
  695. }
  696. void
  697. wasm_cluster_thread_send_signal(WASMExecEnv *exec_env, uint32 signo)
  698. {
  699. exec_env->current_status->signal_flag = signo;
  700. }
  701. static void
  702. notify_debug_instance(WASMExecEnv *exec_env)
  703. {
  704. WASMCluster *cluster;
  705. cluster = wasm_exec_env_get_cluster(exec_env);
  706. bh_assert(cluster);
  707. if (!cluster->debug_inst) {
  708. return;
  709. }
  710. on_thread_stop_event(cluster->debug_inst, exec_env);
  711. }
  712. static void
  713. notify_debug_instance_exit(WASMExecEnv *exec_env)
  714. {
  715. WASMCluster *cluster;
  716. cluster = wasm_exec_env_get_cluster(exec_env);
  717. bh_assert(cluster);
  718. if (!cluster->debug_inst) {
  719. return;
  720. }
  721. on_thread_exit_event(cluster->debug_inst, exec_env);
  722. }
  723. void
  724. wasm_cluster_thread_waiting_run(WASMExecEnv *exec_env)
  725. {
  726. exec_env->current_status->running_status = STATUS_STOP;
  727. notify_debug_instance(exec_env);
  728. while (!wasm_cluster_thread_is_running(exec_env)) {
  729. os_cond_wait(&exec_env->wait_cond, &exec_env->wait_lock);
  730. }
  731. }
  732. void
  733. wasm_cluster_send_signal_all(WASMCluster *cluster, uint32 signo)
  734. {
  735. WASMExecEnv *exec_env = bh_list_first_elem(&cluster->exec_env_list);
  736. while (exec_env) {
  737. wasm_cluster_thread_send_signal(exec_env, signo);
  738. exec_env = bh_list_elem_next(exec_env);
  739. }
  740. }
  741. void
  742. wasm_cluster_thread_exited(WASMExecEnv *exec_env)
  743. {
  744. exec_env->current_status->running_status = STATUS_EXIT;
  745. notify_debug_instance_exit(exec_env);
  746. }
  747. void
  748. wasm_cluster_thread_continue(WASMExecEnv *exec_env)
  749. {
  750. os_mutex_lock(&exec_env->wait_lock);
  751. wasm_cluster_clear_thread_signal(exec_env);
  752. exec_env->current_status->running_status = STATUS_RUNNING;
  753. os_cond_signal(&exec_env->wait_cond);
  754. os_mutex_unlock(&exec_env->wait_lock);
  755. }
  756. void
  757. wasm_cluster_thread_step(WASMExecEnv *exec_env)
  758. {
  759. os_mutex_lock(&exec_env->wait_lock);
  760. exec_env->current_status->running_status = STATUS_STEP;
  761. os_cond_signal(&exec_env->wait_cond);
  762. os_mutex_unlock(&exec_env->wait_lock);
  763. }
  764. void
  765. wasm_cluster_set_debug_inst(WASMCluster *cluster, WASMDebugInstance *inst)
  766. {
  767. cluster->debug_inst = inst;
  768. }
  769. #endif /* end of WASM_ENABLE_DEBUG_INTERP */
  770. /* Check whether the exec_env is in one of all clusters, the caller
  771. should add lock to the cluster list before calling us */
  772. static bool
  773. clusters_have_exec_env(WASMExecEnv *exec_env)
  774. {
  775. WASMCluster *cluster = bh_list_first_elem(cluster_list);
  776. WASMExecEnv *node;
  777. while (cluster) {
  778. os_mutex_lock(&cluster->lock);
  779. node = bh_list_first_elem(&cluster->exec_env_list);
  780. while (node) {
  781. if (node == exec_env) {
  782. bh_assert(exec_env->cluster == cluster);
  783. os_mutex_unlock(&cluster->lock);
  784. return true;
  785. }
  786. node = bh_list_elem_next(node);
  787. }
  788. os_mutex_unlock(&cluster->lock);
  789. cluster = bh_list_elem_next(cluster);
  790. }
  791. return false;
  792. }
  793. int32
  794. wasm_cluster_join_thread(WASMExecEnv *exec_env, void **ret_val)
  795. {
  796. korp_tid handle;
  797. os_mutex_lock(&cluster_list_lock);
  798. if (!clusters_have_exec_env(exec_env) || exec_env->thread_is_detached) {
  799. /* Invalid thread, thread has exited or thread has been detached */
  800. if (ret_val)
  801. *ret_val = NULL;
  802. os_mutex_unlock(&cluster_list_lock);
  803. return 0;
  804. }
  805. os_mutex_lock(&exec_env->wait_lock);
  806. exec_env->wait_count++;
  807. handle = exec_env->handle;
  808. os_mutex_unlock(&exec_env->wait_lock);
  809. os_mutex_unlock(&cluster_list_lock);
  810. return os_thread_join(handle, ret_val);
  811. }
  812. int32
  813. wasm_cluster_detach_thread(WASMExecEnv *exec_env)
  814. {
  815. int32 ret = 0;
  816. os_mutex_lock(&cluster_list_lock);
  817. if (!clusters_have_exec_env(exec_env)) {
  818. /* Invalid thread or the thread has exited */
  819. os_mutex_unlock(&cluster_list_lock);
  820. return 0;
  821. }
  822. if (exec_env->wait_count == 0 && !exec_env->thread_is_detached) {
  823. /* Only detach current thread when there is no other thread
  824. joining it, otherwise let the system resources for the
  825. thread be released after joining */
  826. ret = os_thread_detach(exec_env->handle);
  827. exec_env->thread_is_detached = true;
  828. }
  829. os_mutex_unlock(&cluster_list_lock);
  830. return ret;
  831. }
  832. void
  833. wasm_cluster_exit_thread(WASMExecEnv *exec_env, void *retval)
  834. {
  835. WASMCluster *cluster;
  836. WASMModuleInstanceCommon *module_inst;
  837. #ifdef OS_ENABLE_HW_BOUND_CHECK
  838. if (exec_env->jmpbuf_stack_top) {
  839. /* Store the return value in exec_env */
  840. exec_env->thread_ret_value = retval;
  841. WASM_SUSPEND_FLAGS_FETCH_OR(exec_env->suspend_flags,
  842. WASM_SUSPEND_FLAG_EXIT);
  843. #ifndef BH_PLATFORM_WINDOWS
  844. /* Pop all jmpbuf_node except the last one */
  845. while (exec_env->jmpbuf_stack_top->prev) {
  846. wasm_exec_env_pop_jmpbuf(exec_env);
  847. }
  848. os_longjmp(exec_env->jmpbuf_stack_top->jmpbuf, 1);
  849. return;
  850. #endif
  851. }
  852. #endif
  853. cluster = wasm_exec_env_get_cluster(exec_env);
  854. bh_assert(cluster);
  855. #if WASM_ENABLE_DEBUG_INTERP != 0
  856. wasm_cluster_clear_thread_signal(exec_env);
  857. wasm_cluster_thread_exited(exec_env);
  858. #endif
  859. /* App exit the thread, free the resources before exit native thread */
  860. os_mutex_lock(&cluster_list_lock);
  861. os_mutex_lock(&cluster->lock);
  862. /* Detach the native thread here to ensure the resources are freed */
  863. if (exec_env->wait_count == 0 && !exec_env->thread_is_detached) {
  864. /* Only detach current thread when there is no other thread
  865. joining it, otherwise let the system resources for the
  866. thread be released after joining */
  867. os_thread_detach(exec_env->handle);
  868. /* No need to set exec_env->thread_is_detached to true here
  869. since we will exit soon */
  870. }
  871. module_inst = exec_env->module_inst;
  872. /* Free aux stack space */
  873. free_aux_stack(exec_env, exec_env->aux_stack_bottom.bottom);
  874. /* Remove exec_env */
  875. wasm_cluster_del_exec_env_internal(cluster, exec_env, false);
  876. /* Destroy exec_env */
  877. wasm_exec_env_destroy_internal(exec_env);
  878. /* Routine exit, destroy instance */
  879. wasm_runtime_deinstantiate_internal(module_inst, true);
  880. os_mutex_unlock(&cluster->lock);
  881. os_mutex_unlock(&cluster_list_lock);
  882. os_thread_exit(retval);
  883. }
  884. static void
  885. set_thread_cancel_flags(WASMExecEnv *exec_env)
  886. {
  887. os_mutex_lock(&exec_env->wait_lock);
  888. #if WASM_ENABLE_DEBUG_INTERP != 0
  889. wasm_cluster_thread_send_signal(exec_env, WAMR_SIG_TERM);
  890. #endif
  891. WASM_SUSPEND_FLAGS_FETCH_OR(exec_env->suspend_flags,
  892. WASM_SUSPEND_FLAG_TERMINATE);
  893. os_mutex_unlock(&exec_env->wait_lock);
  894. #ifdef OS_ENABLE_WAKEUP_BLOCKING_OP
  895. wasm_runtime_interrupt_blocking_op(exec_env);
  896. #endif
  897. }
  898. static void
  899. clear_thread_cancel_flags(WASMExecEnv *exec_env)
  900. {
  901. os_mutex_lock(&exec_env->wait_lock);
  902. WASM_SUSPEND_FLAGS_FETCH_AND(exec_env->suspend_flags,
  903. ~WASM_SUSPEND_FLAG_TERMINATE);
  904. os_mutex_unlock(&exec_env->wait_lock);
  905. }
  906. int32
  907. wasm_cluster_cancel_thread(WASMExecEnv *exec_env)
  908. {
  909. os_mutex_lock(&cluster_list_lock);
  910. if (!exec_env->cluster) {
  911. os_mutex_unlock(&cluster_list_lock);
  912. return 0;
  913. }
  914. if (!clusters_have_exec_env(exec_env)) {
  915. /* Invalid thread or the thread has exited */
  916. goto final;
  917. }
  918. set_thread_cancel_flags(exec_env);
  919. final:
  920. os_mutex_unlock(&cluster_list_lock);
  921. return 0;
  922. }
  923. static void
  924. terminate_thread_visitor(void *node, void *user_data)
  925. {
  926. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  927. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  928. if (curr_exec_env == exec_env)
  929. return;
  930. wasm_cluster_cancel_thread(curr_exec_env);
  931. wasm_cluster_join_thread(curr_exec_env, NULL);
  932. }
  933. void
  934. wasm_cluster_terminate_all(WASMCluster *cluster)
  935. {
  936. os_mutex_lock(&cluster->lock);
  937. cluster->processing = true;
  938. safe_traverse_exec_env_list(cluster, terminate_thread_visitor, NULL);
  939. cluster->processing = false;
  940. os_mutex_unlock(&cluster->lock);
  941. }
  942. void
  943. wasm_cluster_terminate_all_except_self(WASMCluster *cluster,
  944. WASMExecEnv *exec_env)
  945. {
  946. os_mutex_lock(&cluster->lock);
  947. cluster->processing = true;
  948. safe_traverse_exec_env_list(cluster, terminate_thread_visitor,
  949. (void *)exec_env);
  950. cluster->processing = false;
  951. os_mutex_unlock(&cluster->lock);
  952. }
  953. static void
  954. wait_for_thread_visitor(void *node, void *user_data)
  955. {
  956. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  957. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  958. if (curr_exec_env == exec_env)
  959. return;
  960. wasm_cluster_join_thread(curr_exec_env, NULL);
  961. }
  962. void
  963. wams_cluster_wait_for_all(WASMCluster *cluster)
  964. {
  965. os_mutex_lock(&cluster->lock);
  966. cluster->processing = true;
  967. safe_traverse_exec_env_list(cluster, wait_for_thread_visitor, NULL);
  968. cluster->processing = false;
  969. os_mutex_unlock(&cluster->lock);
  970. }
  971. void
  972. wasm_cluster_wait_for_all_except_self(WASMCluster *cluster,
  973. WASMExecEnv *exec_env)
  974. {
  975. os_mutex_lock(&cluster->lock);
  976. cluster->processing = true;
  977. safe_traverse_exec_env_list(cluster, wait_for_thread_visitor,
  978. (void *)exec_env);
  979. cluster->processing = false;
  980. os_mutex_unlock(&cluster->lock);
  981. }
  982. bool
  983. wasm_cluster_register_destroy_callback(void (*callback)(WASMCluster *))
  984. {
  985. DestroyCallBackNode *node;
  986. if (!(node = wasm_runtime_malloc(sizeof(DestroyCallBackNode)))) {
  987. LOG_ERROR("thread manager error: failed to allocate memory");
  988. return false;
  989. }
  990. node->destroy_cb = callback;
  991. bh_list_insert(destroy_callback_list, node);
  992. return true;
  993. }
  994. void
  995. wasm_cluster_suspend_thread(WASMExecEnv *exec_env)
  996. {
  997. /* Set the suspend flag */
  998. WASM_SUSPEND_FLAGS_FETCH_OR(exec_env->suspend_flags,
  999. WASM_SUSPEND_FLAG_SUSPEND);
  1000. }
  1001. static void
  1002. suspend_thread_visitor(void *node, void *user_data)
  1003. {
  1004. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  1005. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  1006. if (curr_exec_env == exec_env)
  1007. return;
  1008. wasm_cluster_suspend_thread(curr_exec_env);
  1009. }
  1010. void
  1011. wasm_cluster_suspend_all(WASMCluster *cluster)
  1012. {
  1013. os_mutex_lock(&cluster->lock);
  1014. traverse_list(&cluster->exec_env_list, suspend_thread_visitor, NULL);
  1015. os_mutex_unlock(&cluster->lock);
  1016. }
  1017. void
  1018. wasm_cluster_suspend_all_except_self(WASMCluster *cluster,
  1019. WASMExecEnv *exec_env)
  1020. {
  1021. os_mutex_lock(&cluster->lock);
  1022. traverse_list(&cluster->exec_env_list, suspend_thread_visitor,
  1023. (void *)exec_env);
  1024. os_mutex_unlock(&cluster->lock);
  1025. }
  1026. void
  1027. wasm_cluster_resume_thread(WASMExecEnv *exec_env)
  1028. {
  1029. WASM_SUSPEND_FLAGS_FETCH_AND(exec_env->suspend_flags,
  1030. ~WASM_SUSPEND_FLAG_SUSPEND);
  1031. os_cond_signal(&exec_env->wait_cond);
  1032. }
  1033. static void
  1034. resume_thread_visitor(void *node, void *user_data)
  1035. {
  1036. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  1037. wasm_cluster_resume_thread(curr_exec_env);
  1038. }
  1039. void
  1040. wasm_cluster_resume_all(WASMCluster *cluster)
  1041. {
  1042. os_mutex_lock(&cluster->lock);
  1043. traverse_list(&cluster->exec_env_list, resume_thread_visitor, NULL);
  1044. os_mutex_unlock(&cluster->lock);
  1045. }
  1046. struct spread_exception_data {
  1047. WASMExecEnv *skip;
  1048. const char *exception;
  1049. };
  1050. static void
  1051. set_exception_visitor(void *node, void *user_data)
  1052. {
  1053. const struct spread_exception_data *data = user_data;
  1054. WASMExecEnv *exec_env = (WASMExecEnv *)node;
  1055. if (exec_env != data->skip) {
  1056. WASMModuleInstance *wasm_inst =
  1057. (WASMModuleInstance *)get_module_inst(exec_env);
  1058. exception_lock(wasm_inst);
  1059. if (data->exception != NULL) {
  1060. snprintf(wasm_inst->cur_exception, sizeof(wasm_inst->cur_exception),
  1061. "Exception: %s", data->exception);
  1062. }
  1063. else {
  1064. wasm_inst->cur_exception[0] = '\0';
  1065. }
  1066. exception_unlock(wasm_inst);
  1067. /* Terminate the thread so it can exit from dead loops */
  1068. if (data->exception != NULL) {
  1069. set_thread_cancel_flags(exec_env);
  1070. }
  1071. else {
  1072. clear_thread_cancel_flags(exec_env);
  1073. }
  1074. }
  1075. }
  1076. void
  1077. wasm_cluster_set_exception(WASMExecEnv *exec_env, const char *exception)
  1078. {
  1079. const bool has_exception = exception != NULL;
  1080. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  1081. bh_assert(cluster);
  1082. struct spread_exception_data data;
  1083. data.skip = NULL;
  1084. data.exception = exception;
  1085. os_mutex_lock(&cluster->lock);
  1086. #if WASM_ENABLE_DUMP_CALL_STACK != 0
  1087. if (has_exception) {
  1088. /* Save the stack frames of the crashed thread into the cluster */
  1089. WASMModuleInstance *module_inst =
  1090. (WASMModuleInstance *)get_module_inst(exec_env);
  1091. #if WASM_ENABLE_INTERP != 0
  1092. if (module_inst->module_type == Wasm_Module_Bytecode
  1093. && wasm_interp_create_call_stack(exec_env)) {
  1094. wasm_frame_vec_clone_internal(module_inst->frames,
  1095. &cluster->exception_frames);
  1096. }
  1097. #endif
  1098. #if WASM_ENABLE_AOT != 0
  1099. if (module_inst->module_type == Wasm_Module_AoT
  1100. && aot_create_call_stack(exec_env)) {
  1101. wasm_frame_vec_clone_internal(module_inst->frames,
  1102. &cluster->exception_frames);
  1103. }
  1104. #endif
  1105. }
  1106. #endif /* WASM_ENABLE_DUMP_CALL_STACK != 0 */
  1107. cluster->has_exception = has_exception;
  1108. traverse_list(&cluster->exec_env_list, set_exception_visitor, &data);
  1109. os_mutex_unlock(&cluster->lock);
  1110. }
  1111. static void
  1112. set_custom_data_visitor(void *node, void *user_data)
  1113. {
  1114. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  1115. WASMModuleInstanceCommon *module_inst = get_module_inst(curr_exec_env);
  1116. wasm_runtime_set_custom_data_internal(module_inst, user_data);
  1117. }
  1118. void
  1119. wasm_cluster_spread_custom_data(WASMModuleInstanceCommon *module_inst,
  1120. void *custom_data)
  1121. {
  1122. WASMExecEnv *exec_env = wasm_clusters_search_exec_env(module_inst);
  1123. if (exec_env == NULL) {
  1124. /* Maybe threads have not been started yet. */
  1125. wasm_runtime_set_custom_data_internal(module_inst, custom_data);
  1126. }
  1127. else {
  1128. WASMCluster *cluster;
  1129. cluster = wasm_exec_env_get_cluster(exec_env);
  1130. bh_assert(cluster);
  1131. os_mutex_lock(&cluster->lock);
  1132. traverse_list(&cluster->exec_env_list, set_custom_data_visitor,
  1133. custom_data);
  1134. os_mutex_unlock(&cluster->lock);
  1135. }
  1136. }
  1137. #if WASM_ENABLE_MODULE_INST_CONTEXT != 0
  1138. struct inst_set_context_data {
  1139. void *key;
  1140. void *ctx;
  1141. };
  1142. static void
  1143. set_context_visitor(void *node, void *user_data)
  1144. {
  1145. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  1146. WASMModuleInstanceCommon *module_inst = get_module_inst(curr_exec_env);
  1147. const struct inst_set_context_data *data = user_data;
  1148. wasm_runtime_set_context(module_inst, data->key, data->ctx);
  1149. }
  1150. void
  1151. wasm_cluster_set_context(WASMModuleInstanceCommon *module_inst, void *key,
  1152. void *ctx)
  1153. {
  1154. WASMExecEnv *exec_env = wasm_clusters_search_exec_env(module_inst);
  1155. if (exec_env == NULL) {
  1156. /* Maybe threads have not been started yet. */
  1157. wasm_runtime_set_context(module_inst, key, ctx);
  1158. }
  1159. else {
  1160. WASMCluster *cluster;
  1161. struct inst_set_context_data data;
  1162. data.key = key;
  1163. data.ctx = ctx;
  1164. cluster = wasm_exec_env_get_cluster(exec_env);
  1165. bh_assert(cluster);
  1166. os_mutex_lock(&cluster->lock);
  1167. traverse_list(&cluster->exec_env_list, set_context_visitor, &data);
  1168. os_mutex_unlock(&cluster->lock);
  1169. }
  1170. }
  1171. #endif /* WASM_ENABLE_MODULE_INST_CONTEXT != 0 */
  1172. bool
  1173. wasm_cluster_is_thread_terminated(WASMExecEnv *exec_env)
  1174. {
  1175. os_mutex_lock(&exec_env->wait_lock);
  1176. bool is_thread_terminated = (WASM_SUSPEND_FLAGS_GET(exec_env->suspend_flags)
  1177. & WASM_SUSPEND_FLAG_TERMINATE)
  1178. ? true
  1179. : false;
  1180. os_mutex_unlock(&exec_env->wait_lock);
  1181. return is_thread_terminated;
  1182. }
  1183. void
  1184. exception_lock(WASMModuleInstance *module_inst)
  1185. {
  1186. /*
  1187. * Note: this lock could be per module instance if desirable.
  1188. * We can revisit on AOT version bump.
  1189. * It probably doesn't matter though because the exception handling
  1190. * logic should not be executed too frequently anyway.
  1191. */
  1192. os_mutex_lock(&_exception_lock);
  1193. }
  1194. void
  1195. exception_unlock(WASMModuleInstance *module_inst)
  1196. {
  1197. os_mutex_unlock(&_exception_lock);
  1198. }
  1199. void
  1200. wasm_cluster_traverse_lock(WASMExecEnv *exec_env)
  1201. {
  1202. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  1203. bh_assert(cluster);
  1204. os_mutex_lock(&cluster->lock);
  1205. }
  1206. void
  1207. wasm_cluster_traverse_unlock(WASMExecEnv *exec_env)
  1208. {
  1209. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  1210. bh_assert(cluster);
  1211. os_mutex_unlock(&cluster->lock);
  1212. }