thread_manager.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "thread_manager.h"
  6. #if WASM_ENABLE_INTERP != 0
  7. #include "../interpreter/wasm_runtime.h"
  8. #endif
  9. #if WASM_ENABLE_AOT != 0
  10. #include "../aot/aot_runtime.h"
  11. #endif
  12. #if WASM_ENABLE_DEBUG_INTERP != 0
  13. #include "debug_engine.h"
  14. #endif
  15. #if WASM_ENABLE_SHARED_MEMORY != 0
  16. #include "wasm_shared_memory.h"
  17. #endif
  18. typedef struct {
  19. bh_list_link l;
  20. void (*destroy_cb)(WASMCluster *);
  21. } DestroyCallBackNode;
  22. static bh_list destroy_callback_list_head;
  23. static bh_list *const destroy_callback_list = &destroy_callback_list_head;
  24. static bh_list cluster_list_head;
  25. static bh_list *const cluster_list = &cluster_list_head;
  26. static korp_mutex cluster_list_lock;
  27. typedef void (*list_visitor)(void *, void *);
  28. static uint32 cluster_max_thread_num = CLUSTER_MAX_THREAD_NUM;
  29. /* Set the maximum thread number, if this function is not called,
  30. the max thread num is defined by CLUSTER_MAX_THREAD_NUM */
  31. void
  32. wasm_cluster_set_max_thread_num(uint32 num)
  33. {
  34. if (num > 0)
  35. cluster_max_thread_num = num;
  36. }
  37. bool
  38. thread_manager_init()
  39. {
  40. if (bh_list_init(cluster_list) != 0)
  41. return false;
  42. if (os_mutex_init(&cluster_list_lock) != 0)
  43. return false;
  44. return true;
  45. }
  46. void
  47. thread_manager_destroy()
  48. {
  49. WASMCluster *cluster = bh_list_first_elem(cluster_list);
  50. WASMCluster *next;
  51. while (cluster) {
  52. next = bh_list_elem_next(cluster);
  53. wasm_cluster_destroy(cluster);
  54. cluster = next;
  55. }
  56. wasm_cluster_cancel_all_callbacks();
  57. os_mutex_destroy(&cluster_list_lock);
  58. }
  59. static void
  60. traverse_list(bh_list *l, list_visitor visitor, void *user_data)
  61. {
  62. void *next, *node = bh_list_first_elem(l);
  63. while (node) {
  64. next = bh_list_elem_next(node);
  65. visitor(node, user_data);
  66. node = next;
  67. }
  68. }
  69. /* Assumes cluster->lock is locked */
  70. static bool
  71. safe_traverse_exec_env_list(WASMCluster *cluster, list_visitor visitor,
  72. void *user_data)
  73. {
  74. Vector proc_nodes;
  75. void *node;
  76. bool ret = true;
  77. if (!bh_vector_init(&proc_nodes, cluster->exec_env_list.len, sizeof(void *),
  78. false)) {
  79. ret = false;
  80. goto final;
  81. }
  82. node = bh_list_first_elem(&cluster->exec_env_list);
  83. while (node) {
  84. bool already_processed = false;
  85. void *proc_node;
  86. uint32 i;
  87. for (i = 0; i < (uint32)bh_vector_size(&proc_nodes); i++) {
  88. if (!bh_vector_get(&proc_nodes, i, &proc_node)) {
  89. ret = false;
  90. goto final;
  91. }
  92. if (proc_node == node) {
  93. already_processed = true;
  94. break;
  95. }
  96. }
  97. if (already_processed) {
  98. node = bh_list_elem_next(node);
  99. continue;
  100. }
  101. os_mutex_unlock(&cluster->lock);
  102. visitor(node, user_data);
  103. os_mutex_lock(&cluster->lock);
  104. if (!bh_vector_append(&proc_nodes, &node)) {
  105. ret = false;
  106. goto final;
  107. }
  108. node = bh_list_first_elem(&cluster->exec_env_list);
  109. }
  110. final:
  111. bh_vector_destroy(&proc_nodes);
  112. return ret;
  113. }
  114. /* The caller must lock cluster->lock */
  115. static bool
  116. allocate_aux_stack(WASMExecEnv *exec_env, uint32 *start, uint32 *size)
  117. {
  118. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  119. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION != 0
  120. WASMModuleInstanceCommon *module_inst =
  121. wasm_exec_env_get_module_inst(exec_env);
  122. uint32 stack_end;
  123. stack_end =
  124. wasm_runtime_module_malloc(module_inst, cluster->stack_size, NULL);
  125. *start = stack_end + cluster->stack_size;
  126. *size = cluster->stack_size;
  127. return stack_end != 0;
  128. #else
  129. uint32 i;
  130. /* If the module doesn't have aux stack info,
  131. it can't create any threads */
  132. if (!cluster->stack_segment_occupied)
  133. return false;
  134. for (i = 0; i < cluster_max_thread_num; i++) {
  135. if (!cluster->stack_segment_occupied[i]) {
  136. if (start)
  137. *start = cluster->stack_tops[i];
  138. if (size)
  139. *size = cluster->stack_size;
  140. cluster->stack_segment_occupied[i] = true;
  141. return true;
  142. }
  143. }
  144. return false;
  145. #endif
  146. }
  147. /* The caller must lock cluster->lock */
  148. static bool
  149. free_aux_stack(WASMExecEnv *exec_env, uint32 start)
  150. {
  151. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  152. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION != 0
  153. WASMModuleInstanceCommon *module_inst =
  154. wasm_exec_env_get_module_inst(exec_env);
  155. if (!wasm_exec_env_is_aux_stack_managed_by_runtime(exec_env)) {
  156. return true;
  157. }
  158. bh_assert(start >= cluster->stack_size);
  159. wasm_runtime_module_free(module_inst, start - cluster->stack_size);
  160. return true;
  161. #else
  162. uint32 i;
  163. for (i = 0; i < cluster_max_thread_num; i++) {
  164. if (start == cluster->stack_tops[i]) {
  165. cluster->stack_segment_occupied[i] = false;
  166. return true;
  167. }
  168. }
  169. return false;
  170. #endif
  171. }
  172. WASMCluster *
  173. wasm_cluster_create(WASMExecEnv *exec_env)
  174. {
  175. WASMCluster *cluster;
  176. uint32 aux_stack_start, aux_stack_size;
  177. bh_assert(exec_env->cluster == NULL);
  178. if (!(cluster = wasm_runtime_malloc(sizeof(WASMCluster)))) {
  179. LOG_ERROR("thread manager error: failed to allocate memory");
  180. return NULL;
  181. }
  182. memset(cluster, 0, sizeof(WASMCluster));
  183. exec_env->cluster = cluster;
  184. bh_list_init(&cluster->exec_env_list);
  185. bh_list_insert(&cluster->exec_env_list, exec_env);
  186. if (os_mutex_init(&cluster->lock) != 0) {
  187. wasm_runtime_free(cluster);
  188. LOG_ERROR("thread manager error: failed to init mutex");
  189. return NULL;
  190. }
  191. /* Prepare the aux stack top and size for every thread */
  192. if (!wasm_exec_env_get_aux_stack(exec_env, &aux_stack_start,
  193. &aux_stack_size)) {
  194. #if WASM_ENABLE_LIB_WASI_THREADS == 0
  195. LOG_VERBOSE("No aux stack info for this module, can't create thread");
  196. #endif
  197. /* If the module don't have aux stack info, don't throw error here,
  198. but remain stack_tops and stack_segment_occupied as NULL */
  199. os_mutex_lock(&cluster_list_lock);
  200. if (bh_list_insert(cluster_list, cluster) != 0) {
  201. os_mutex_unlock(&cluster_list_lock);
  202. goto fail;
  203. }
  204. os_mutex_unlock(&cluster_list_lock);
  205. return cluster;
  206. }
  207. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION != 0
  208. cluster->stack_size = aux_stack_size;
  209. #else
  210. cluster->stack_size = aux_stack_size / (cluster_max_thread_num + 1);
  211. if (cluster->stack_size < WASM_THREAD_AUX_STACK_SIZE_MIN) {
  212. goto fail;
  213. }
  214. /* Make stack size 16-byte aligned */
  215. cluster->stack_size = cluster->stack_size & (~15);
  216. #endif
  217. /* Set initial aux stack top to the instance and
  218. aux stack boundary to the main exec_env */
  219. if (!wasm_exec_env_set_aux_stack(exec_env, aux_stack_start,
  220. cluster->stack_size))
  221. goto fail;
  222. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION == 0
  223. if (cluster_max_thread_num != 0) {
  224. uint64 total_size = cluster_max_thread_num * sizeof(uint32);
  225. uint32 i;
  226. if (total_size >= UINT32_MAX
  227. || !(cluster->stack_tops =
  228. wasm_runtime_malloc((uint32)total_size))) {
  229. goto fail;
  230. }
  231. memset(cluster->stack_tops, 0, (uint32)total_size);
  232. if (!(cluster->stack_segment_occupied =
  233. wasm_runtime_malloc(cluster_max_thread_num * sizeof(bool)))) {
  234. goto fail;
  235. }
  236. memset(cluster->stack_segment_occupied, 0,
  237. cluster_max_thread_num * sizeof(bool));
  238. /* Reserve space for main instance */
  239. aux_stack_start -= cluster->stack_size;
  240. for (i = 0; i < cluster_max_thread_num; i++) {
  241. cluster->stack_tops[i] = aux_stack_start - cluster->stack_size * i;
  242. }
  243. }
  244. #endif
  245. os_mutex_lock(&cluster_list_lock);
  246. if (bh_list_insert(cluster_list, cluster) != 0) {
  247. os_mutex_unlock(&cluster_list_lock);
  248. goto fail;
  249. }
  250. os_mutex_unlock(&cluster_list_lock);
  251. return cluster;
  252. fail:
  253. if (cluster)
  254. wasm_cluster_destroy(cluster);
  255. return NULL;
  256. }
  257. static void
  258. destroy_cluster_visitor(void *node, void *user_data)
  259. {
  260. DestroyCallBackNode *destroy_node = (DestroyCallBackNode *)node;
  261. WASMCluster *cluster = (WASMCluster *)user_data;
  262. destroy_node->destroy_cb(cluster);
  263. }
  264. void
  265. wasm_cluster_destroy(WASMCluster *cluster)
  266. {
  267. traverse_list(destroy_callback_list, destroy_cluster_visitor,
  268. (void *)cluster);
  269. /* Remove the cluster from the cluster list */
  270. os_mutex_lock(&cluster_list_lock);
  271. bh_list_remove(cluster_list, cluster);
  272. os_mutex_unlock(&cluster_list_lock);
  273. os_mutex_destroy(&cluster->lock);
  274. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION == 0
  275. if (cluster->stack_tops)
  276. wasm_runtime_free(cluster->stack_tops);
  277. if (cluster->stack_segment_occupied)
  278. wasm_runtime_free(cluster->stack_segment_occupied);
  279. #endif
  280. #if WASM_ENABLE_DEBUG_INTERP != 0
  281. wasm_debug_instance_destroy(cluster);
  282. #endif
  283. wasm_runtime_free(cluster);
  284. }
  285. static void
  286. free_node_visitor(void *node, void *user_data)
  287. {
  288. wasm_runtime_free(node);
  289. }
  290. void
  291. wasm_cluster_cancel_all_callbacks()
  292. {
  293. traverse_list(destroy_callback_list, free_node_visitor, NULL);
  294. bh_list_init(destroy_callback_list);
  295. }
  296. WASMCluster *
  297. wasm_exec_env_get_cluster(WASMExecEnv *exec_env)
  298. {
  299. return exec_env->cluster;
  300. }
  301. /* The caller must lock cluster->lock */
  302. static bool
  303. wasm_cluster_add_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  304. {
  305. bool ret = true;
  306. exec_env->cluster = cluster;
  307. if (cluster->exec_env_list.len == cluster_max_thread_num + 1) {
  308. LOG_ERROR("thread manager error: "
  309. "maximum number of threads exceeded");
  310. ret = false;
  311. }
  312. if (ret && bh_list_insert(&cluster->exec_env_list, exec_env) != 0)
  313. ret = false;
  314. return ret;
  315. }
  316. /* The caller should lock cluster->lock for thread safety */
  317. bool
  318. wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  319. {
  320. bool ret = true;
  321. bh_assert(exec_env->cluster == cluster);
  322. #if WASM_ENABLE_DEBUG_INTERP != 0
  323. /* Wait for debugger control thread to process the
  324. stop event of this thread */
  325. if (cluster->debug_inst) {
  326. /* lock the debug_inst->wait_lock so
  327. other threads can't fire stop events */
  328. os_mutex_lock(&cluster->debug_inst->wait_lock);
  329. while (cluster->debug_inst->stopped_thread == exec_env) {
  330. /* either wakes up by signal or by 1-second timeout */
  331. os_cond_reltimedwait(&cluster->debug_inst->wait_cond,
  332. &cluster->debug_inst->wait_lock, 1000000);
  333. }
  334. os_mutex_unlock(&cluster->debug_inst->wait_lock);
  335. }
  336. #endif
  337. if (bh_list_remove(&cluster->exec_env_list, exec_env) != 0)
  338. ret = false;
  339. if (cluster->exec_env_list.len == 0) {
  340. /* exec_env_list empty, destroy the cluster */
  341. wasm_cluster_destroy(cluster);
  342. }
  343. return ret;
  344. }
  345. static WASMExecEnv *
  346. wasm_cluster_search_exec_env(WASMCluster *cluster,
  347. WASMModuleInstanceCommon *module_inst)
  348. {
  349. WASMExecEnv *node = NULL;
  350. os_mutex_lock(&cluster->lock);
  351. node = bh_list_first_elem(&cluster->exec_env_list);
  352. while (node) {
  353. if (node->module_inst == module_inst) {
  354. os_mutex_unlock(&cluster->lock);
  355. return node;
  356. }
  357. node = bh_list_elem_next(node);
  358. }
  359. os_mutex_unlock(&cluster->lock);
  360. return NULL;
  361. }
  362. /* search the global cluster list to find if the given
  363. module instance have a corresponding exec_env */
  364. WASMExecEnv *
  365. wasm_clusters_search_exec_env(WASMModuleInstanceCommon *module_inst)
  366. {
  367. WASMCluster *cluster = NULL;
  368. WASMExecEnv *exec_env = NULL;
  369. os_mutex_lock(&cluster_list_lock);
  370. cluster = bh_list_first_elem(cluster_list);
  371. while (cluster) {
  372. exec_env = wasm_cluster_search_exec_env(cluster, module_inst);
  373. if (exec_env) {
  374. os_mutex_unlock(&cluster_list_lock);
  375. return exec_env;
  376. }
  377. cluster = bh_list_elem_next(cluster);
  378. }
  379. os_mutex_unlock(&cluster_list_lock);
  380. return NULL;
  381. }
  382. WASMExecEnv *
  383. wasm_cluster_spawn_exec_env(WASMExecEnv *exec_env)
  384. {
  385. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  386. wasm_module_inst_t module_inst = get_module_inst(exec_env);
  387. wasm_module_t module;
  388. wasm_module_inst_t new_module_inst;
  389. #if WASM_ENABLE_LIBC_WASI != 0
  390. WASIContext *wasi_ctx;
  391. #endif
  392. WASMExecEnv *new_exec_env;
  393. uint32 aux_stack_start, aux_stack_size;
  394. uint32 stack_size = 8192;
  395. if (!module_inst || !(module = wasm_exec_env_get_module(exec_env))) {
  396. return NULL;
  397. }
  398. os_mutex_lock(&cluster->lock);
  399. if (cluster->has_exception || cluster->processing) {
  400. goto fail1;
  401. }
  402. #if WASM_ENABLE_INTERP != 0
  403. if (module_inst->module_type == Wasm_Module_Bytecode) {
  404. stack_size =
  405. ((WASMModuleInstance *)module_inst)->default_wasm_stack_size;
  406. }
  407. #endif
  408. #if WASM_ENABLE_AOT != 0
  409. if (module_inst->module_type == Wasm_Module_AoT) {
  410. stack_size =
  411. ((AOTModuleInstance *)module_inst)->default_wasm_stack_size;
  412. }
  413. #endif
  414. if (!(new_module_inst = wasm_runtime_instantiate_internal(
  415. module, true, stack_size, 0, NULL, 0))) {
  416. goto fail1;
  417. }
  418. /* Set custom_data to new module instance */
  419. wasm_runtime_set_custom_data_internal(
  420. new_module_inst, wasm_runtime_get_custom_data(module_inst));
  421. #if WASM_ENABLE_LIBC_WASI != 0
  422. wasi_ctx = wasm_runtime_get_wasi_ctx(module_inst);
  423. wasm_runtime_set_wasi_ctx(new_module_inst, wasi_ctx);
  424. #endif
  425. new_exec_env = wasm_exec_env_create_internal(new_module_inst,
  426. exec_env->wasm_stack_size);
  427. if (!new_exec_env)
  428. goto fail2;
  429. if (!allocate_aux_stack(exec_env, &aux_stack_start, &aux_stack_size)) {
  430. LOG_ERROR("thread manager error: "
  431. "failed to allocate aux stack space for new thread");
  432. goto fail3;
  433. }
  434. /* Set aux stack for current thread */
  435. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  436. aux_stack_size)) {
  437. goto fail4;
  438. }
  439. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  440. goto fail4;
  441. os_mutex_unlock(&cluster->lock);
  442. return new_exec_env;
  443. fail4:
  444. /* free the allocated aux stack space */
  445. free_aux_stack(exec_env, aux_stack_start);
  446. fail3:
  447. wasm_exec_env_destroy_internal(new_exec_env);
  448. fail2:
  449. wasm_runtime_deinstantiate_internal(new_module_inst, true);
  450. fail1:
  451. os_mutex_unlock(&cluster->lock);
  452. return NULL;
  453. }
  454. void
  455. wasm_cluster_destroy_spawned_exec_env(WASMExecEnv *exec_env)
  456. {
  457. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  458. wasm_module_inst_t module_inst = wasm_runtime_get_module_inst(exec_env);
  459. bh_assert(cluster != NULL);
  460. os_mutex_lock(&cluster->lock);
  461. /* Free aux stack space */
  462. free_aux_stack(exec_env, exec_env->aux_stack_bottom.bottom);
  463. /* Remove exec_env */
  464. wasm_cluster_del_exec_env(cluster, exec_env);
  465. /* Destroy exec_env */
  466. wasm_exec_env_destroy_internal(exec_env);
  467. /* Routine exit, destroy instance */
  468. wasm_runtime_deinstantiate_internal(module_inst, true);
  469. os_mutex_unlock(&cluster->lock);
  470. }
  471. /* start routine of thread manager */
  472. static void *
  473. thread_manager_start_routine(void *arg)
  474. {
  475. void *ret;
  476. WASMExecEnv *exec_env = (WASMExecEnv *)arg;
  477. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  478. WASMModuleInstanceCommon *module_inst =
  479. wasm_exec_env_get_module_inst(exec_env);
  480. bh_assert(cluster != NULL);
  481. bh_assert(module_inst != NULL);
  482. os_mutex_lock(&exec_env->wait_lock);
  483. exec_env->handle = os_self_thread();
  484. /* Notify the parent thread to continue running */
  485. os_cond_signal(&exec_env->wait_cond);
  486. os_mutex_unlock(&exec_env->wait_lock);
  487. ret = exec_env->thread_start_routine(exec_env);
  488. #ifdef OS_ENABLE_HW_BOUND_CHECK
  489. os_mutex_lock(&exec_env->wait_lock);
  490. if (exec_env->suspend_flags.flags & 0x08)
  491. ret = exec_env->thread_ret_value;
  492. os_mutex_unlock(&exec_env->wait_lock);
  493. #endif
  494. /* Routine exit */
  495. #if WASM_ENABLE_DEBUG_INTERP != 0
  496. wasm_cluster_thread_exited(exec_env);
  497. #endif
  498. os_mutex_lock(&cluster_list_lock);
  499. os_mutex_lock(&cluster->lock);
  500. /* Detach the native thread here to ensure the resources are freed */
  501. if (exec_env->wait_count == 0 && !exec_env->thread_is_detached) {
  502. /* Only detach current thread when there is no other thread
  503. joining it, otherwise let the system resources for the
  504. thread be released after joining */
  505. os_thread_detach(exec_env->handle);
  506. /* No need to set exec_env->thread_is_detached to true here
  507. since we will exit soon */
  508. }
  509. /* Free aux stack space */
  510. free_aux_stack(exec_env, exec_env->aux_stack_bottom.bottom);
  511. /* Remove exec_env */
  512. wasm_cluster_del_exec_env(cluster, exec_env);
  513. /* Destroy exec_env */
  514. wasm_exec_env_destroy_internal(exec_env);
  515. /* Routine exit, destroy instance */
  516. wasm_runtime_deinstantiate_internal(module_inst, true);
  517. os_mutex_unlock(&cluster->lock);
  518. os_mutex_unlock(&cluster_list_lock);
  519. os_thread_exit(ret);
  520. return ret;
  521. }
  522. int32
  523. wasm_cluster_create_thread(WASMExecEnv *exec_env,
  524. wasm_module_inst_t module_inst, bool alloc_aux_stack,
  525. void *(*thread_routine)(void *), void *arg)
  526. {
  527. WASMCluster *cluster;
  528. WASMExecEnv *new_exec_env;
  529. uint32 aux_stack_start = 0, aux_stack_size;
  530. korp_tid tid;
  531. cluster = wasm_exec_env_get_cluster(exec_env);
  532. bh_assert(cluster);
  533. os_mutex_lock(&cluster->lock);
  534. if (cluster->has_exception || cluster->processing) {
  535. goto fail1;
  536. }
  537. new_exec_env =
  538. wasm_exec_env_create_internal(module_inst, exec_env->wasm_stack_size);
  539. if (!new_exec_env)
  540. goto fail1;
  541. if (alloc_aux_stack) {
  542. if (!allocate_aux_stack(exec_env, &aux_stack_start, &aux_stack_size)) {
  543. LOG_ERROR("thread manager error: "
  544. "failed to allocate aux stack space for new thread");
  545. goto fail2;
  546. }
  547. /* Set aux stack for current thread */
  548. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  549. aux_stack_size)) {
  550. goto fail3;
  551. }
  552. }
  553. else {
  554. /* Disable aux stack */
  555. new_exec_env->aux_stack_boundary.boundary = 0;
  556. new_exec_env->aux_stack_bottom.bottom = UINT32_MAX;
  557. }
  558. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  559. goto fail3;
  560. new_exec_env->thread_start_routine = thread_routine;
  561. new_exec_env->thread_arg = arg;
  562. os_mutex_lock(&new_exec_env->wait_lock);
  563. if (0
  564. != os_thread_create(&tid, thread_manager_start_routine,
  565. (void *)new_exec_env,
  566. APP_THREAD_STACK_SIZE_DEFAULT)) {
  567. os_mutex_unlock(&new_exec_env->wait_lock);
  568. goto fail4;
  569. }
  570. /* Wait until the new_exec_env->handle is set to avoid it is
  571. illegally accessed after unlocking cluster->lock */
  572. os_cond_wait(&new_exec_env->wait_cond, &new_exec_env->wait_lock);
  573. os_mutex_unlock(&new_exec_env->wait_lock);
  574. os_mutex_unlock(&cluster->lock);
  575. return 0;
  576. fail4:
  577. wasm_cluster_del_exec_env(cluster, new_exec_env);
  578. fail3:
  579. /* free the allocated aux stack space */
  580. if (alloc_aux_stack)
  581. free_aux_stack(exec_env, aux_stack_start);
  582. fail2:
  583. wasm_exec_env_destroy_internal(new_exec_env);
  584. fail1:
  585. os_mutex_unlock(&cluster->lock);
  586. return -1;
  587. }
  588. #if WASM_ENABLE_DEBUG_INTERP != 0
  589. WASMCurrentEnvStatus *
  590. wasm_cluster_create_exenv_status()
  591. {
  592. WASMCurrentEnvStatus *status;
  593. if (!(status = wasm_runtime_malloc(sizeof(WASMCurrentEnvStatus)))) {
  594. return NULL;
  595. }
  596. status->step_count = 0;
  597. status->signal_flag = 0;
  598. status->running_status = 0;
  599. return status;
  600. }
  601. void
  602. wasm_cluster_destroy_exenv_status(WASMCurrentEnvStatus *status)
  603. {
  604. wasm_runtime_free(status);
  605. }
  606. inline static bool
  607. wasm_cluster_thread_is_running(WASMExecEnv *exec_env)
  608. {
  609. return exec_env->current_status->running_status == STATUS_RUNNING
  610. || exec_env->current_status->running_status == STATUS_STEP;
  611. }
  612. void
  613. wasm_cluster_clear_thread_signal(WASMExecEnv *exec_env)
  614. {
  615. exec_env->current_status->signal_flag = 0;
  616. }
  617. void
  618. wasm_cluster_thread_send_signal(WASMExecEnv *exec_env, uint32 signo)
  619. {
  620. exec_env->current_status->signal_flag = signo;
  621. }
  622. static void
  623. notify_debug_instance(WASMExecEnv *exec_env)
  624. {
  625. WASMCluster *cluster;
  626. cluster = wasm_exec_env_get_cluster(exec_env);
  627. bh_assert(cluster);
  628. if (!cluster->debug_inst) {
  629. return;
  630. }
  631. on_thread_stop_event(cluster->debug_inst, exec_env);
  632. }
  633. static void
  634. notify_debug_instance_exit(WASMExecEnv *exec_env)
  635. {
  636. WASMCluster *cluster;
  637. cluster = wasm_exec_env_get_cluster(exec_env);
  638. bh_assert(cluster);
  639. if (!cluster->debug_inst) {
  640. return;
  641. }
  642. on_thread_exit_event(cluster->debug_inst, exec_env);
  643. }
  644. void
  645. wasm_cluster_thread_waiting_run(WASMExecEnv *exec_env)
  646. {
  647. os_mutex_lock(&exec_env->wait_lock);
  648. /* Wake up debugger thread after we get the lock, otherwise we may miss the
  649. * signal from debugger thread, see
  650. * https://github.com/bytecodealliance/wasm-micro-runtime/issues/1860 */
  651. exec_env->current_status->running_status = STATUS_STOP;
  652. notify_debug_instance(exec_env);
  653. while (!wasm_cluster_thread_is_running(exec_env)) {
  654. os_cond_wait(&exec_env->wait_cond, &exec_env->wait_lock);
  655. }
  656. os_mutex_unlock(&exec_env->wait_lock);
  657. }
  658. void
  659. wasm_cluster_send_signal_all(WASMCluster *cluster, uint32 signo)
  660. {
  661. WASMExecEnv *exec_env = bh_list_first_elem(&cluster->exec_env_list);
  662. while (exec_env) {
  663. wasm_cluster_thread_send_signal(exec_env, signo);
  664. exec_env = bh_list_elem_next(exec_env);
  665. }
  666. }
  667. void
  668. wasm_cluster_thread_exited(WASMExecEnv *exec_env)
  669. {
  670. exec_env->current_status->running_status = STATUS_EXIT;
  671. notify_debug_instance_exit(exec_env);
  672. }
  673. void
  674. wasm_cluster_thread_continue(WASMExecEnv *exec_env)
  675. {
  676. os_mutex_lock(&exec_env->wait_lock);
  677. wasm_cluster_clear_thread_signal(exec_env);
  678. exec_env->current_status->running_status = STATUS_RUNNING;
  679. os_cond_signal(&exec_env->wait_cond);
  680. os_mutex_unlock(&exec_env->wait_lock);
  681. }
  682. void
  683. wasm_cluster_thread_step(WASMExecEnv *exec_env)
  684. {
  685. os_mutex_lock(&exec_env->wait_lock);
  686. exec_env->current_status->running_status = STATUS_STEP;
  687. os_cond_signal(&exec_env->wait_cond);
  688. os_mutex_unlock(&exec_env->wait_lock);
  689. }
  690. void
  691. wasm_cluster_set_debug_inst(WASMCluster *cluster, WASMDebugInstance *inst)
  692. {
  693. cluster->debug_inst = inst;
  694. }
  695. #endif /* end of WASM_ENABLE_DEBUG_INTERP */
  696. /* Check whether the exec_env is in one of all clusters, the caller
  697. should add lock to the cluster list before calling us */
  698. static bool
  699. clusters_have_exec_env(WASMExecEnv *exec_env)
  700. {
  701. WASMCluster *cluster = bh_list_first_elem(cluster_list);
  702. WASMExecEnv *node;
  703. while (cluster) {
  704. os_mutex_lock(&cluster->lock);
  705. node = bh_list_first_elem(&cluster->exec_env_list);
  706. while (node) {
  707. if (node == exec_env) {
  708. bh_assert(exec_env->cluster == cluster);
  709. os_mutex_unlock(&cluster->lock);
  710. return true;
  711. }
  712. node = bh_list_elem_next(node);
  713. }
  714. os_mutex_unlock(&cluster->lock);
  715. cluster = bh_list_elem_next(cluster);
  716. }
  717. return false;
  718. }
  719. int32
  720. wasm_cluster_join_thread(WASMExecEnv *exec_env, void **ret_val)
  721. {
  722. korp_tid handle;
  723. os_mutex_lock(&cluster_list_lock);
  724. if (!clusters_have_exec_env(exec_env) || exec_env->thread_is_detached) {
  725. /* Invalid thread, thread has exited or thread has been detached */
  726. if (ret_val)
  727. *ret_val = NULL;
  728. os_mutex_unlock(&cluster_list_lock);
  729. return 0;
  730. }
  731. os_mutex_lock(&exec_env->wait_lock);
  732. exec_env->wait_count++;
  733. handle = exec_env->handle;
  734. os_mutex_unlock(&exec_env->wait_lock);
  735. os_mutex_unlock(&cluster_list_lock);
  736. return os_thread_join(handle, ret_val);
  737. }
  738. int32
  739. wasm_cluster_detach_thread(WASMExecEnv *exec_env)
  740. {
  741. int32 ret = 0;
  742. os_mutex_lock(&cluster_list_lock);
  743. if (!clusters_have_exec_env(exec_env)) {
  744. /* Invalid thread or the thread has exited */
  745. os_mutex_unlock(&cluster_list_lock);
  746. return 0;
  747. }
  748. if (exec_env->wait_count == 0 && !exec_env->thread_is_detached) {
  749. /* Only detach current thread when there is no other thread
  750. joining it, otherwise let the system resources for the
  751. thread be released after joining */
  752. ret = os_thread_detach(exec_env->handle);
  753. exec_env->thread_is_detached = true;
  754. }
  755. os_mutex_unlock(&cluster_list_lock);
  756. return ret;
  757. }
  758. void
  759. wasm_cluster_exit_thread(WASMExecEnv *exec_env, void *retval)
  760. {
  761. WASMCluster *cluster;
  762. WASMModuleInstanceCommon *module_inst;
  763. #ifdef OS_ENABLE_HW_BOUND_CHECK
  764. if (exec_env->jmpbuf_stack_top) {
  765. /* Store the return value in exec_env */
  766. exec_env->thread_ret_value = retval;
  767. exec_env->suspend_flags.flags |= 0x08;
  768. #ifndef BH_PLATFORM_WINDOWS
  769. /* Pop all jmpbuf_node except the last one */
  770. while (exec_env->jmpbuf_stack_top->prev) {
  771. wasm_exec_env_pop_jmpbuf(exec_env);
  772. }
  773. os_longjmp(exec_env->jmpbuf_stack_top->jmpbuf, 1);
  774. return;
  775. #endif
  776. }
  777. #endif
  778. cluster = wasm_exec_env_get_cluster(exec_env);
  779. bh_assert(cluster);
  780. #if WASM_ENABLE_DEBUG_INTERP != 0
  781. wasm_cluster_clear_thread_signal(exec_env);
  782. wasm_cluster_thread_exited(exec_env);
  783. #endif
  784. /* App exit the thread, free the resources before exit native thread */
  785. os_mutex_lock(&cluster_list_lock);
  786. os_mutex_lock(&cluster->lock);
  787. /* Detach the native thread here to ensure the resources are freed */
  788. if (exec_env->wait_count == 0 && !exec_env->thread_is_detached) {
  789. /* Only detach current thread when there is no other thread
  790. joining it, otherwise let the system resources for the
  791. thread be released after joining */
  792. os_thread_detach(exec_env->handle);
  793. /* No need to set exec_env->thread_is_detached to true here
  794. since we will exit soon */
  795. }
  796. module_inst = exec_env->module_inst;
  797. /* Free aux stack space */
  798. free_aux_stack(exec_env, exec_env->aux_stack_bottom.bottom);
  799. /* Remove exec_env */
  800. wasm_cluster_del_exec_env(cluster, exec_env);
  801. /* Destroy exec_env */
  802. wasm_exec_env_destroy_internal(exec_env);
  803. /* Routine exit, destroy instance */
  804. wasm_runtime_deinstantiate_internal(module_inst, true);
  805. os_mutex_unlock(&cluster->lock);
  806. os_mutex_unlock(&cluster_list_lock);
  807. os_thread_exit(retval);
  808. }
  809. static void
  810. set_thread_cancel_flags(WASMExecEnv *exec_env)
  811. {
  812. os_mutex_lock(&exec_env->wait_lock);
  813. /* Set the termination flag */
  814. #if WASM_ENABLE_DEBUG_INTERP != 0
  815. wasm_cluster_thread_send_signal(exec_env, WAMR_SIG_TERM);
  816. #else
  817. exec_env->suspend_flags.flags |= 0x01;
  818. #endif
  819. os_mutex_unlock(&exec_env->wait_lock);
  820. }
  821. int32
  822. wasm_cluster_cancel_thread(WASMExecEnv *exec_env)
  823. {
  824. os_mutex_lock(&cluster_list_lock);
  825. if (!exec_env->cluster) {
  826. os_mutex_unlock(&cluster_list_lock);
  827. return 0;
  828. }
  829. if (!clusters_have_exec_env(exec_env)) {
  830. /* Invalid thread or the thread has exited */
  831. goto final;
  832. }
  833. set_thread_cancel_flags(exec_env);
  834. final:
  835. os_mutex_unlock(&cluster_list_lock);
  836. return 0;
  837. }
  838. static void
  839. terminate_thread_visitor(void *node, void *user_data)
  840. {
  841. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  842. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  843. if (curr_exec_env == exec_env)
  844. return;
  845. wasm_cluster_cancel_thread(curr_exec_env);
  846. wasm_cluster_join_thread(curr_exec_env, NULL);
  847. }
  848. void
  849. wasm_cluster_terminate_all(WASMCluster *cluster)
  850. {
  851. os_mutex_lock(&cluster->lock);
  852. cluster->processing = true;
  853. safe_traverse_exec_env_list(cluster, terminate_thread_visitor, NULL);
  854. cluster->processing = false;
  855. os_mutex_unlock(&cluster->lock);
  856. }
  857. void
  858. wasm_cluster_terminate_all_except_self(WASMCluster *cluster,
  859. WASMExecEnv *exec_env)
  860. {
  861. os_mutex_lock(&cluster->lock);
  862. cluster->processing = true;
  863. safe_traverse_exec_env_list(cluster, terminate_thread_visitor,
  864. (void *)exec_env);
  865. cluster->processing = false;
  866. os_mutex_unlock(&cluster->lock);
  867. }
  868. static void
  869. wait_for_thread_visitor(void *node, void *user_data)
  870. {
  871. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  872. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  873. if (curr_exec_env == exec_env)
  874. return;
  875. wasm_cluster_join_thread(curr_exec_env, NULL);
  876. }
  877. void
  878. wams_cluster_wait_for_all(WASMCluster *cluster)
  879. {
  880. os_mutex_lock(&cluster->lock);
  881. cluster->processing = true;
  882. safe_traverse_exec_env_list(cluster, wait_for_thread_visitor, NULL);
  883. cluster->processing = false;
  884. os_mutex_unlock(&cluster->lock);
  885. }
  886. void
  887. wasm_cluster_wait_for_all_except_self(WASMCluster *cluster,
  888. WASMExecEnv *exec_env)
  889. {
  890. os_mutex_lock(&cluster->lock);
  891. cluster->processing = true;
  892. safe_traverse_exec_env_list(cluster, wait_for_thread_visitor,
  893. (void *)exec_env);
  894. cluster->processing = false;
  895. os_mutex_unlock(&cluster->lock);
  896. }
  897. bool
  898. wasm_cluster_register_destroy_callback(void (*callback)(WASMCluster *))
  899. {
  900. DestroyCallBackNode *node;
  901. if (!(node = wasm_runtime_malloc(sizeof(DestroyCallBackNode)))) {
  902. LOG_ERROR("thread manager error: failed to allocate memory");
  903. return false;
  904. }
  905. node->destroy_cb = callback;
  906. bh_list_insert(destroy_callback_list, node);
  907. return true;
  908. }
  909. void
  910. wasm_cluster_suspend_thread(WASMExecEnv *exec_env)
  911. {
  912. /* Set the suspend flag */
  913. exec_env->suspend_flags.flags |= 0x02;
  914. }
  915. static void
  916. suspend_thread_visitor(void *node, void *user_data)
  917. {
  918. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  919. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  920. if (curr_exec_env == exec_env)
  921. return;
  922. wasm_cluster_suspend_thread(curr_exec_env);
  923. }
  924. void
  925. wasm_cluster_suspend_all(WASMCluster *cluster)
  926. {
  927. os_mutex_lock(&cluster->lock);
  928. traverse_list(&cluster->exec_env_list, suspend_thread_visitor, NULL);
  929. os_mutex_unlock(&cluster->lock);
  930. }
  931. void
  932. wasm_cluster_suspend_all_except_self(WASMCluster *cluster,
  933. WASMExecEnv *exec_env)
  934. {
  935. os_mutex_lock(&cluster->lock);
  936. traverse_list(&cluster->exec_env_list, suspend_thread_visitor,
  937. (void *)exec_env);
  938. os_mutex_unlock(&cluster->lock);
  939. }
  940. void
  941. wasm_cluster_resume_thread(WASMExecEnv *exec_env)
  942. {
  943. exec_env->suspend_flags.flags &= ~0x02;
  944. os_cond_signal(&exec_env->wait_cond);
  945. }
  946. static void
  947. resume_thread_visitor(void *node, void *user_data)
  948. {
  949. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  950. wasm_cluster_resume_thread(curr_exec_env);
  951. }
  952. void
  953. wasm_cluster_resume_all(WASMCluster *cluster)
  954. {
  955. os_mutex_lock(&cluster->lock);
  956. traverse_list(&cluster->exec_env_list, resume_thread_visitor, NULL);
  957. os_mutex_unlock(&cluster->lock);
  958. }
  959. static void
  960. set_exception_visitor(void *node, void *user_data)
  961. {
  962. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  963. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  964. WASMModuleInstanceCommon *module_inst = get_module_inst(exec_env);
  965. WASMModuleInstance *wasm_inst = (WASMModuleInstance *)module_inst;
  966. if (curr_exec_env != exec_env) {
  967. WASMModuleInstance *curr_wasm_inst =
  968. (WASMModuleInstance *)get_module_inst(curr_exec_env);
  969. /* Only spread non "wasi proc exit" exception */
  970. #if WASM_ENABLE_SHARED_MEMORY != 0
  971. WASMSharedMemNode *shared_mem_node = wasm_module_get_shared_memory(
  972. (WASMModuleCommon *)curr_wasm_inst->module);
  973. if (shared_mem_node)
  974. os_mutex_lock(&shared_mem_node->shared_mem_lock);
  975. #endif
  976. if (!strstr(wasm_inst->cur_exception, "wasi proc exit")) {
  977. bh_memcpy_s(curr_wasm_inst->cur_exception,
  978. sizeof(curr_wasm_inst->cur_exception),
  979. wasm_inst->cur_exception,
  980. sizeof(wasm_inst->cur_exception));
  981. }
  982. #if WASM_ENABLE_SHARED_MEMORY != 0
  983. if (shared_mem_node)
  984. os_mutex_unlock(&shared_mem_node->shared_mem_lock);
  985. #endif
  986. /* Terminate the thread so it can exit from dead loops */
  987. set_thread_cancel_flags(curr_exec_env);
  988. }
  989. }
  990. static void
  991. clear_exception_visitor(void *node, void *user_data)
  992. {
  993. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  994. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  995. if (curr_exec_env != exec_env) {
  996. WASMModuleInstance *curr_wasm_inst =
  997. (WASMModuleInstance *)get_module_inst(curr_exec_env);
  998. #if WASM_ENABLE_SHARED_MEMORY != 0
  999. WASMSharedMemNode *shared_mem_node = wasm_module_get_shared_memory(
  1000. (WASMModuleCommon *)curr_wasm_inst->module);
  1001. if (shared_mem_node)
  1002. os_mutex_lock(&shared_mem_node->shared_mem_lock);
  1003. #endif
  1004. curr_wasm_inst->cur_exception[0] = '\0';
  1005. #if WASM_ENABLE_SHARED_MEMORY != 0
  1006. if (shared_mem_node)
  1007. os_mutex_unlock(&shared_mem_node->shared_mem_lock);
  1008. #endif
  1009. }
  1010. }
  1011. void
  1012. wasm_cluster_spread_exception(WASMExecEnv *exec_env, bool clear)
  1013. {
  1014. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  1015. bh_assert(cluster);
  1016. os_mutex_lock(&cluster->lock);
  1017. cluster->has_exception = !clear;
  1018. traverse_list(&cluster->exec_env_list,
  1019. clear ? clear_exception_visitor : set_exception_visitor,
  1020. exec_env);
  1021. os_mutex_unlock(&cluster->lock);
  1022. }
  1023. static void
  1024. set_custom_data_visitor(void *node, void *user_data)
  1025. {
  1026. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  1027. WASMModuleInstanceCommon *module_inst = get_module_inst(curr_exec_env);
  1028. wasm_runtime_set_custom_data_internal(module_inst, user_data);
  1029. }
  1030. void
  1031. wasm_cluster_spread_custom_data(WASMModuleInstanceCommon *module_inst,
  1032. void *custom_data)
  1033. {
  1034. WASMExecEnv *exec_env = wasm_clusters_search_exec_env(module_inst);
  1035. if (exec_env == NULL) {
  1036. /* Maybe threads have not been started yet. */
  1037. wasm_runtime_set_custom_data_internal(module_inst, custom_data);
  1038. }
  1039. else {
  1040. WASMCluster *cluster;
  1041. cluster = wasm_exec_env_get_cluster(exec_env);
  1042. bh_assert(cluster);
  1043. os_mutex_lock(&cluster->lock);
  1044. traverse_list(&cluster->exec_env_list, set_custom_data_visitor,
  1045. custom_data);
  1046. os_mutex_unlock(&cluster->lock);
  1047. }
  1048. }
  1049. bool
  1050. wasm_cluster_is_thread_terminated(WASMExecEnv *exec_env)
  1051. {
  1052. os_mutex_lock(&exec_env->wait_lock);
  1053. bool is_thread_terminated =
  1054. (exec_env->suspend_flags.flags & 0x01) ? true : false;
  1055. os_mutex_unlock(&exec_env->wait_lock);
  1056. return is_thread_terminated;
  1057. }