thread_manager.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "thread_manager.h"
  6. #if WASM_ENABLE_INTERP != 0
  7. #include "../interpreter/wasm_runtime.h"
  8. #endif
  9. #if WASM_ENABLE_AOT != 0
  10. #include "../aot/aot_runtime.h"
  11. #endif
  12. #if WASM_ENABLE_DEBUG_INTERP != 0
  13. #include "debug_engine.h"
  14. #endif
  15. #if WASM_ENABLE_SHARED_MEMORY != 0
  16. #include "wasm_shared_memory.h"
  17. #endif
  18. typedef struct {
  19. bh_list_link l;
  20. void (*destroy_cb)(WASMCluster *);
  21. } DestroyCallBackNode;
  22. static bh_list destroy_callback_list_head;
  23. static bh_list *const destroy_callback_list = &destroy_callback_list_head;
  24. static bh_list cluster_list_head;
  25. static bh_list *const cluster_list = &cluster_list_head;
  26. static korp_mutex cluster_list_lock;
  27. typedef void (*list_visitor)(void *, void *);
  28. static uint32 cluster_max_thread_num = CLUSTER_MAX_THREAD_NUM;
  29. /* Set the maximum thread number, if this function is not called,
  30. the max thread num is defined by CLUSTER_MAX_THREAD_NUM */
  31. void
  32. wasm_cluster_set_max_thread_num(uint32 num)
  33. {
  34. if (num > 0)
  35. cluster_max_thread_num = num;
  36. }
  37. bool
  38. thread_manager_init()
  39. {
  40. if (bh_list_init(cluster_list) != 0)
  41. return false;
  42. if (os_mutex_init(&cluster_list_lock) != 0)
  43. return false;
  44. return true;
  45. }
  46. void
  47. thread_manager_destroy()
  48. {
  49. WASMCluster *cluster = bh_list_first_elem(cluster_list);
  50. WASMCluster *next;
  51. while (cluster) {
  52. next = bh_list_elem_next(cluster);
  53. wasm_cluster_destroy(cluster);
  54. cluster = next;
  55. }
  56. wasm_cluster_cancel_all_callbacks();
  57. os_mutex_destroy(&cluster_list_lock);
  58. }
  59. static void
  60. traverse_list(bh_list *l, list_visitor visitor, void *user_data)
  61. {
  62. void *next, *node = bh_list_first_elem(l);
  63. while (node) {
  64. next = bh_list_elem_next(node);
  65. visitor(node, user_data);
  66. node = next;
  67. }
  68. }
  69. /* Assumes cluster->lock is locked */
  70. static bool
  71. safe_traverse_exec_env_list(WASMCluster *cluster, list_visitor visitor,
  72. void *user_data)
  73. {
  74. Vector proc_nodes;
  75. void *node;
  76. bool ret = true;
  77. if (!bh_vector_init(&proc_nodes, cluster->exec_env_list.len, sizeof(void *),
  78. false)) {
  79. ret = false;
  80. goto final;
  81. }
  82. node = bh_list_first_elem(&cluster->exec_env_list);
  83. while (node) {
  84. bool already_processed = false;
  85. void *proc_node;
  86. uint32 i;
  87. for (i = 0; i < (uint32)bh_vector_size(&proc_nodes); i++) {
  88. if (!bh_vector_get(&proc_nodes, i, &proc_node)) {
  89. ret = false;
  90. goto final;
  91. }
  92. if (proc_node == node) {
  93. already_processed = true;
  94. break;
  95. }
  96. }
  97. if (already_processed) {
  98. node = bh_list_elem_next(node);
  99. continue;
  100. }
  101. os_mutex_unlock(&cluster->lock);
  102. visitor(node, user_data);
  103. os_mutex_lock(&cluster->lock);
  104. if (!bh_vector_append(&proc_nodes, &node)) {
  105. ret = false;
  106. goto final;
  107. }
  108. node = bh_list_first_elem(&cluster->exec_env_list);
  109. }
  110. final:
  111. bh_vector_destroy(&proc_nodes);
  112. return ret;
  113. }
  114. /* The caller must lock cluster->lock */
  115. static bool
  116. allocate_aux_stack(WASMExecEnv *exec_env, uint32 *start, uint32 *size)
  117. {
  118. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  119. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION != 0
  120. WASMModuleInstanceCommon *module_inst =
  121. wasm_exec_env_get_module_inst(exec_env);
  122. uint32 stack_end;
  123. stack_end = wasm_runtime_module_malloc_internal(module_inst, exec_env,
  124. cluster->stack_size, NULL);
  125. *start = stack_end + cluster->stack_size;
  126. *size = cluster->stack_size;
  127. return stack_end != 0;
  128. #else
  129. uint32 i;
  130. /* If the module doesn't have aux stack info,
  131. it can't create any threads */
  132. if (!cluster->stack_segment_occupied)
  133. return false;
  134. for (i = 0; i < cluster_max_thread_num; i++) {
  135. if (!cluster->stack_segment_occupied[i]) {
  136. if (start)
  137. *start = cluster->stack_tops[i];
  138. if (size)
  139. *size = cluster->stack_size;
  140. cluster->stack_segment_occupied[i] = true;
  141. return true;
  142. }
  143. }
  144. return false;
  145. #endif
  146. }
  147. /* The caller must lock cluster->lock */
  148. static bool
  149. free_aux_stack(WASMExecEnv *exec_env, uint32 start)
  150. {
  151. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  152. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION != 0
  153. WASMModuleInstanceCommon *module_inst =
  154. wasm_exec_env_get_module_inst(exec_env);
  155. if (!wasm_exec_env_is_aux_stack_managed_by_runtime(exec_env)) {
  156. return true;
  157. }
  158. bh_assert(start >= cluster->stack_size);
  159. wasm_runtime_module_free_internal(module_inst, exec_env,
  160. start - cluster->stack_size);
  161. return true;
  162. #else
  163. uint32 i;
  164. for (i = 0; i < cluster_max_thread_num; i++) {
  165. if (start == cluster->stack_tops[i]) {
  166. cluster->stack_segment_occupied[i] = false;
  167. return true;
  168. }
  169. }
  170. return false;
  171. #endif
  172. }
  173. WASMCluster *
  174. wasm_cluster_create(WASMExecEnv *exec_env)
  175. {
  176. WASMCluster *cluster;
  177. uint32 aux_stack_start, aux_stack_size;
  178. bh_assert(exec_env->cluster == NULL);
  179. if (!(cluster = wasm_runtime_malloc(sizeof(WASMCluster)))) {
  180. LOG_ERROR("thread manager error: failed to allocate memory");
  181. return NULL;
  182. }
  183. memset(cluster, 0, sizeof(WASMCluster));
  184. exec_env->cluster = cluster;
  185. bh_list_init(&cluster->exec_env_list);
  186. bh_list_insert(&cluster->exec_env_list, exec_env);
  187. if (os_mutex_init(&cluster->lock) != 0) {
  188. wasm_runtime_free(cluster);
  189. LOG_ERROR("thread manager error: failed to init mutex");
  190. return NULL;
  191. }
  192. /* Prepare the aux stack top and size for every thread */
  193. if (!wasm_exec_env_get_aux_stack(exec_env, &aux_stack_start,
  194. &aux_stack_size)) {
  195. #if WASM_ENABLE_LIB_WASI_THREADS == 0
  196. LOG_VERBOSE("No aux stack info for this module, can't create thread");
  197. #endif
  198. /* If the module don't have aux stack info, don't throw error here,
  199. but remain stack_tops and stack_segment_occupied as NULL */
  200. os_mutex_lock(&cluster_list_lock);
  201. if (bh_list_insert(cluster_list, cluster) != 0) {
  202. os_mutex_unlock(&cluster_list_lock);
  203. goto fail;
  204. }
  205. os_mutex_unlock(&cluster_list_lock);
  206. return cluster;
  207. }
  208. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION != 0
  209. cluster->stack_size = aux_stack_size;
  210. #else
  211. cluster->stack_size = aux_stack_size / (cluster_max_thread_num + 1);
  212. if (cluster->stack_size < WASM_THREAD_AUX_STACK_SIZE_MIN) {
  213. goto fail;
  214. }
  215. /* Make stack size 16-byte aligned */
  216. cluster->stack_size = cluster->stack_size & (~15);
  217. #endif
  218. /* Set initial aux stack top to the instance and
  219. aux stack boundary to the main exec_env */
  220. if (!wasm_exec_env_set_aux_stack(exec_env, aux_stack_start,
  221. cluster->stack_size))
  222. goto fail;
  223. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION == 0
  224. if (cluster_max_thread_num != 0) {
  225. uint64 total_size = cluster_max_thread_num * sizeof(uint32);
  226. uint32 i;
  227. if (total_size >= UINT32_MAX
  228. || !(cluster->stack_tops =
  229. wasm_runtime_malloc((uint32)total_size))) {
  230. goto fail;
  231. }
  232. memset(cluster->stack_tops, 0, (uint32)total_size);
  233. if (!(cluster->stack_segment_occupied =
  234. wasm_runtime_malloc(cluster_max_thread_num * sizeof(bool)))) {
  235. goto fail;
  236. }
  237. memset(cluster->stack_segment_occupied, 0,
  238. cluster_max_thread_num * sizeof(bool));
  239. /* Reserve space for main instance */
  240. aux_stack_start -= cluster->stack_size;
  241. for (i = 0; i < cluster_max_thread_num; i++) {
  242. cluster->stack_tops[i] = aux_stack_start - cluster->stack_size * i;
  243. }
  244. }
  245. #endif
  246. os_mutex_lock(&cluster_list_lock);
  247. if (bh_list_insert(cluster_list, cluster) != 0) {
  248. os_mutex_unlock(&cluster_list_lock);
  249. goto fail;
  250. }
  251. os_mutex_unlock(&cluster_list_lock);
  252. return cluster;
  253. fail:
  254. if (cluster)
  255. wasm_cluster_destroy(cluster);
  256. return NULL;
  257. }
  258. static void
  259. destroy_cluster_visitor(void *node, void *user_data)
  260. {
  261. DestroyCallBackNode *destroy_node = (DestroyCallBackNode *)node;
  262. WASMCluster *cluster = (WASMCluster *)user_data;
  263. destroy_node->destroy_cb(cluster);
  264. }
  265. void
  266. wasm_cluster_destroy(WASMCluster *cluster)
  267. {
  268. traverse_list(destroy_callback_list, destroy_cluster_visitor,
  269. (void *)cluster);
  270. /* Remove the cluster from the cluster list */
  271. os_mutex_lock(&cluster_list_lock);
  272. bh_list_remove(cluster_list, cluster);
  273. os_mutex_unlock(&cluster_list_lock);
  274. os_mutex_destroy(&cluster->lock);
  275. #if WASM_ENABLE_HEAP_AUX_STACK_ALLOCATION == 0
  276. if (cluster->stack_tops)
  277. wasm_runtime_free(cluster->stack_tops);
  278. if (cluster->stack_segment_occupied)
  279. wasm_runtime_free(cluster->stack_segment_occupied);
  280. #endif
  281. #if WASM_ENABLE_DEBUG_INTERP != 0
  282. wasm_debug_instance_destroy(cluster);
  283. #endif
  284. wasm_runtime_free(cluster);
  285. }
  286. static void
  287. free_node_visitor(void *node, void *user_data)
  288. {
  289. wasm_runtime_free(node);
  290. }
  291. void
  292. wasm_cluster_cancel_all_callbacks()
  293. {
  294. traverse_list(destroy_callback_list, free_node_visitor, NULL);
  295. bh_list_init(destroy_callback_list);
  296. }
  297. WASMCluster *
  298. wasm_exec_env_get_cluster(WASMExecEnv *exec_env)
  299. {
  300. return exec_env->cluster;
  301. }
  302. /* The caller must lock cluster->lock */
  303. static bool
  304. wasm_cluster_add_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  305. {
  306. bool ret = true;
  307. exec_env->cluster = cluster;
  308. if (cluster->exec_env_list.len == cluster_max_thread_num + 1) {
  309. LOG_ERROR("thread manager error: "
  310. "maximum number of threads exceeded");
  311. ret = false;
  312. }
  313. if (ret && bh_list_insert(&cluster->exec_env_list, exec_env) != 0)
  314. ret = false;
  315. return ret;
  316. }
  317. static bool
  318. wasm_cluster_del_exec_env_internal(WASMCluster *cluster, WASMExecEnv *exec_env,
  319. bool can_destroy_cluster)
  320. {
  321. bool ret = true;
  322. bh_assert(exec_env->cluster == cluster);
  323. #if WASM_ENABLE_DEBUG_INTERP != 0
  324. /* Wait for debugger control thread to process the
  325. stop event of this thread */
  326. if (cluster->debug_inst) {
  327. /* lock the debug_inst->wait_lock so
  328. other threads can't fire stop events */
  329. os_mutex_lock(&cluster->debug_inst->wait_lock);
  330. while (cluster->debug_inst->stopped_thread == exec_env) {
  331. /* either wakes up by signal or by 1-second timeout */
  332. os_cond_reltimedwait(&cluster->debug_inst->wait_cond,
  333. &cluster->debug_inst->wait_lock, 1000000);
  334. }
  335. os_mutex_unlock(&cluster->debug_inst->wait_lock);
  336. }
  337. #endif
  338. if (bh_list_remove(&cluster->exec_env_list, exec_env) != 0)
  339. ret = false;
  340. if (can_destroy_cluster) {
  341. if (cluster->exec_env_list.len == 0) {
  342. /* exec_env_list empty, destroy the cluster */
  343. wasm_cluster_destroy(cluster);
  344. }
  345. }
  346. else {
  347. /* Don't destroy cluster as cluster->lock is being used */
  348. }
  349. return ret;
  350. }
  351. /* The caller should lock cluster->lock for thread safety */
  352. bool
  353. wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  354. {
  355. return wasm_cluster_del_exec_env_internal(cluster, exec_env, true);
  356. }
  357. static WASMExecEnv *
  358. wasm_cluster_search_exec_env(WASMCluster *cluster,
  359. WASMModuleInstanceCommon *module_inst)
  360. {
  361. WASMExecEnv *node = NULL;
  362. os_mutex_lock(&cluster->lock);
  363. node = bh_list_first_elem(&cluster->exec_env_list);
  364. while (node) {
  365. if (node->module_inst == module_inst) {
  366. os_mutex_unlock(&cluster->lock);
  367. return node;
  368. }
  369. node = bh_list_elem_next(node);
  370. }
  371. os_mutex_unlock(&cluster->lock);
  372. return NULL;
  373. }
  374. /* search the global cluster list to find if the given
  375. module instance have a corresponding exec_env */
  376. WASMExecEnv *
  377. wasm_clusters_search_exec_env(WASMModuleInstanceCommon *module_inst)
  378. {
  379. WASMCluster *cluster = NULL;
  380. WASMExecEnv *exec_env = NULL;
  381. os_mutex_lock(&cluster_list_lock);
  382. cluster = bh_list_first_elem(cluster_list);
  383. while (cluster) {
  384. exec_env = wasm_cluster_search_exec_env(cluster, module_inst);
  385. if (exec_env) {
  386. os_mutex_unlock(&cluster_list_lock);
  387. return exec_env;
  388. }
  389. cluster = bh_list_elem_next(cluster);
  390. }
  391. os_mutex_unlock(&cluster_list_lock);
  392. return NULL;
  393. }
  394. WASMExecEnv *
  395. wasm_cluster_spawn_exec_env(WASMExecEnv *exec_env)
  396. {
  397. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  398. wasm_module_inst_t module_inst = get_module_inst(exec_env);
  399. wasm_module_t module;
  400. wasm_module_inst_t new_module_inst;
  401. #if WASM_ENABLE_LIBC_WASI != 0
  402. WASIContext *wasi_ctx;
  403. #endif
  404. WASMExecEnv *new_exec_env;
  405. uint32 aux_stack_start, aux_stack_size;
  406. uint32 stack_size = 8192;
  407. if (!module_inst || !(module = wasm_exec_env_get_module(exec_env))) {
  408. return NULL;
  409. }
  410. os_mutex_lock(&cluster->lock);
  411. if (cluster->has_exception || cluster->processing) {
  412. goto fail1;
  413. }
  414. #if WASM_ENABLE_INTERP != 0
  415. if (module_inst->module_type == Wasm_Module_Bytecode) {
  416. stack_size =
  417. ((WASMModuleInstance *)module_inst)->default_wasm_stack_size;
  418. }
  419. #endif
  420. #if WASM_ENABLE_AOT != 0
  421. if (module_inst->module_type == Wasm_Module_AoT) {
  422. stack_size =
  423. ((AOTModuleInstance *)module_inst)->default_wasm_stack_size;
  424. }
  425. #endif
  426. if (!(new_module_inst = wasm_runtime_instantiate_internal(
  427. module, true, exec_env, stack_size, 0, NULL, 0))) {
  428. goto fail1;
  429. }
  430. /* Set custom_data to new module instance */
  431. wasm_runtime_set_custom_data_internal(
  432. new_module_inst, wasm_runtime_get_custom_data(module_inst));
  433. #if WASM_ENABLE_LIBC_WASI != 0
  434. wasi_ctx = wasm_runtime_get_wasi_ctx(module_inst);
  435. wasm_runtime_set_wasi_ctx(new_module_inst, wasi_ctx);
  436. #endif
  437. new_exec_env = wasm_exec_env_create_internal(new_module_inst,
  438. exec_env->wasm_stack_size);
  439. if (!new_exec_env)
  440. goto fail2;
  441. if (!allocate_aux_stack(exec_env, &aux_stack_start, &aux_stack_size)) {
  442. LOG_ERROR("thread manager error: "
  443. "failed to allocate aux stack space for new thread");
  444. goto fail3;
  445. }
  446. /* Set aux stack for current thread */
  447. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  448. aux_stack_size)) {
  449. goto fail4;
  450. }
  451. /* Inherit suspend_flags of parent thread */
  452. new_exec_env->suspend_flags.flags = exec_env->suspend_flags.flags;
  453. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  454. goto fail4;
  455. os_mutex_unlock(&cluster->lock);
  456. return new_exec_env;
  457. fail4:
  458. /* free the allocated aux stack space */
  459. free_aux_stack(exec_env, aux_stack_start);
  460. fail3:
  461. wasm_exec_env_destroy_internal(new_exec_env);
  462. fail2:
  463. wasm_runtime_deinstantiate_internal(new_module_inst, true);
  464. fail1:
  465. os_mutex_unlock(&cluster->lock);
  466. return NULL;
  467. }
  468. void
  469. wasm_cluster_destroy_spawned_exec_env(WASMExecEnv *exec_env)
  470. {
  471. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  472. wasm_module_inst_t module_inst = wasm_runtime_get_module_inst(exec_env);
  473. bh_assert(cluster != NULL);
  474. os_mutex_lock(&cluster->lock);
  475. /* Free aux stack space */
  476. free_aux_stack(exec_env, exec_env->aux_stack_bottom.bottom);
  477. /* Remove exec_env */
  478. wasm_cluster_del_exec_env_internal(cluster, exec_env, false);
  479. /* Destroy exec_env */
  480. wasm_exec_env_destroy_internal(exec_env);
  481. /* Routine exit, destroy instance */
  482. wasm_runtime_deinstantiate_internal(module_inst, true);
  483. os_mutex_unlock(&cluster->lock);
  484. }
  485. /* start routine of thread manager */
  486. static void *
  487. thread_manager_start_routine(void *arg)
  488. {
  489. void *ret;
  490. WASMExecEnv *exec_env = (WASMExecEnv *)arg;
  491. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  492. WASMModuleInstanceCommon *module_inst =
  493. wasm_exec_env_get_module_inst(exec_env);
  494. bh_assert(cluster != NULL);
  495. bh_assert(module_inst != NULL);
  496. os_mutex_lock(&exec_env->wait_lock);
  497. exec_env->handle = os_self_thread();
  498. /* Notify the parent thread to continue running */
  499. os_cond_signal(&exec_env->wait_cond);
  500. os_mutex_unlock(&exec_env->wait_lock);
  501. ret = exec_env->thread_start_routine(exec_env);
  502. #ifdef OS_ENABLE_HW_BOUND_CHECK
  503. os_mutex_lock(&exec_env->wait_lock);
  504. if (exec_env->suspend_flags.flags & 0x08)
  505. ret = exec_env->thread_ret_value;
  506. os_mutex_unlock(&exec_env->wait_lock);
  507. #endif
  508. /* Routine exit */
  509. #if WASM_ENABLE_DEBUG_INTERP != 0
  510. wasm_cluster_thread_exited(exec_env);
  511. #endif
  512. os_mutex_lock(&cluster_list_lock);
  513. os_mutex_lock(&cluster->lock);
  514. /* Detach the native thread here to ensure the resources are freed */
  515. if (exec_env->wait_count == 0 && !exec_env->thread_is_detached) {
  516. /* Only detach current thread when there is no other thread
  517. joining it, otherwise let the system resources for the
  518. thread be released after joining */
  519. os_thread_detach(exec_env->handle);
  520. /* No need to set exec_env->thread_is_detached to true here
  521. since we will exit soon */
  522. }
  523. /* Free aux stack space */
  524. free_aux_stack(exec_env, exec_env->aux_stack_bottom.bottom);
  525. /* Remove exec_env */
  526. wasm_cluster_del_exec_env_internal(cluster, exec_env, false);
  527. /* Destroy exec_env */
  528. wasm_exec_env_destroy_internal(exec_env);
  529. /* Routine exit, destroy instance */
  530. wasm_runtime_deinstantiate_internal(module_inst, true);
  531. os_mutex_unlock(&cluster->lock);
  532. os_mutex_unlock(&cluster_list_lock);
  533. os_thread_exit(ret);
  534. return ret;
  535. }
  536. int32
  537. wasm_cluster_create_thread(WASMExecEnv *exec_env,
  538. wasm_module_inst_t module_inst, bool alloc_aux_stack,
  539. void *(*thread_routine)(void *), void *arg)
  540. {
  541. WASMCluster *cluster;
  542. WASMExecEnv *new_exec_env;
  543. uint32 aux_stack_start = 0, aux_stack_size;
  544. korp_tid tid;
  545. cluster = wasm_exec_env_get_cluster(exec_env);
  546. bh_assert(cluster);
  547. os_mutex_lock(&cluster->lock);
  548. if (cluster->has_exception || cluster->processing) {
  549. goto fail1;
  550. }
  551. new_exec_env =
  552. wasm_exec_env_create_internal(module_inst, exec_env->wasm_stack_size);
  553. if (!new_exec_env)
  554. goto fail1;
  555. if (alloc_aux_stack) {
  556. if (!allocate_aux_stack(exec_env, &aux_stack_start, &aux_stack_size)) {
  557. LOG_ERROR("thread manager error: "
  558. "failed to allocate aux stack space for new thread");
  559. goto fail2;
  560. }
  561. /* Set aux stack for current thread */
  562. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  563. aux_stack_size)) {
  564. goto fail3;
  565. }
  566. }
  567. else {
  568. /* Disable aux stack */
  569. new_exec_env->aux_stack_boundary.boundary = 0;
  570. new_exec_env->aux_stack_bottom.bottom = UINT32_MAX;
  571. }
  572. /* Inherit suspend_flags of parent thread */
  573. new_exec_env->suspend_flags.flags = exec_env->suspend_flags.flags;
  574. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  575. goto fail3;
  576. new_exec_env->thread_start_routine = thread_routine;
  577. new_exec_env->thread_arg = arg;
  578. os_mutex_lock(&new_exec_env->wait_lock);
  579. if (0
  580. != os_thread_create(&tid, thread_manager_start_routine,
  581. (void *)new_exec_env,
  582. APP_THREAD_STACK_SIZE_DEFAULT)) {
  583. os_mutex_unlock(&new_exec_env->wait_lock);
  584. goto fail4;
  585. }
  586. /* Wait until the new_exec_env->handle is set to avoid it is
  587. illegally accessed after unlocking cluster->lock */
  588. os_cond_wait(&new_exec_env->wait_cond, &new_exec_env->wait_lock);
  589. os_mutex_unlock(&new_exec_env->wait_lock);
  590. os_mutex_unlock(&cluster->lock);
  591. return 0;
  592. fail4:
  593. wasm_cluster_del_exec_env_internal(cluster, new_exec_env, false);
  594. fail3:
  595. /* free the allocated aux stack space */
  596. if (alloc_aux_stack)
  597. free_aux_stack(exec_env, aux_stack_start);
  598. fail2:
  599. wasm_exec_env_destroy_internal(new_exec_env);
  600. fail1:
  601. os_mutex_unlock(&cluster->lock);
  602. return -1;
  603. }
  604. #if WASM_ENABLE_DEBUG_INTERP != 0
  605. WASMCurrentEnvStatus *
  606. wasm_cluster_create_exenv_status()
  607. {
  608. WASMCurrentEnvStatus *status;
  609. if (!(status = wasm_runtime_malloc(sizeof(WASMCurrentEnvStatus)))) {
  610. return NULL;
  611. }
  612. status->step_count = 0;
  613. status->signal_flag = 0;
  614. status->running_status = 0;
  615. return status;
  616. }
  617. void
  618. wasm_cluster_destroy_exenv_status(WASMCurrentEnvStatus *status)
  619. {
  620. wasm_runtime_free(status);
  621. }
  622. inline static bool
  623. wasm_cluster_thread_is_running(WASMExecEnv *exec_env)
  624. {
  625. return exec_env->current_status->running_status == STATUS_RUNNING
  626. || exec_env->current_status->running_status == STATUS_STEP;
  627. }
  628. void
  629. wasm_cluster_clear_thread_signal(WASMExecEnv *exec_env)
  630. {
  631. exec_env->current_status->signal_flag = 0;
  632. }
  633. void
  634. wasm_cluster_thread_send_signal(WASMExecEnv *exec_env, uint32 signo)
  635. {
  636. exec_env->current_status->signal_flag = signo;
  637. }
  638. static void
  639. notify_debug_instance(WASMExecEnv *exec_env)
  640. {
  641. WASMCluster *cluster;
  642. cluster = wasm_exec_env_get_cluster(exec_env);
  643. bh_assert(cluster);
  644. if (!cluster->debug_inst) {
  645. return;
  646. }
  647. on_thread_stop_event(cluster->debug_inst, exec_env);
  648. }
  649. static void
  650. notify_debug_instance_exit(WASMExecEnv *exec_env)
  651. {
  652. WASMCluster *cluster;
  653. cluster = wasm_exec_env_get_cluster(exec_env);
  654. bh_assert(cluster);
  655. if (!cluster->debug_inst) {
  656. return;
  657. }
  658. on_thread_exit_event(cluster->debug_inst, exec_env);
  659. }
  660. void
  661. wasm_cluster_thread_waiting_run(WASMExecEnv *exec_env)
  662. {
  663. exec_env->current_status->running_status = STATUS_STOP;
  664. notify_debug_instance(exec_env);
  665. while (!wasm_cluster_thread_is_running(exec_env)) {
  666. os_cond_wait(&exec_env->wait_cond, &exec_env->wait_lock);
  667. }
  668. }
  669. void
  670. wasm_cluster_send_signal_all(WASMCluster *cluster, uint32 signo)
  671. {
  672. WASMExecEnv *exec_env = bh_list_first_elem(&cluster->exec_env_list);
  673. while (exec_env) {
  674. wasm_cluster_thread_send_signal(exec_env, signo);
  675. exec_env = bh_list_elem_next(exec_env);
  676. }
  677. }
  678. void
  679. wasm_cluster_thread_exited(WASMExecEnv *exec_env)
  680. {
  681. exec_env->current_status->running_status = STATUS_EXIT;
  682. notify_debug_instance_exit(exec_env);
  683. }
  684. void
  685. wasm_cluster_thread_continue(WASMExecEnv *exec_env)
  686. {
  687. os_mutex_lock(&exec_env->wait_lock);
  688. wasm_cluster_clear_thread_signal(exec_env);
  689. exec_env->current_status->running_status = STATUS_RUNNING;
  690. os_cond_signal(&exec_env->wait_cond);
  691. os_mutex_unlock(&exec_env->wait_lock);
  692. }
  693. void
  694. wasm_cluster_thread_step(WASMExecEnv *exec_env)
  695. {
  696. os_mutex_lock(&exec_env->wait_lock);
  697. exec_env->current_status->running_status = STATUS_STEP;
  698. os_cond_signal(&exec_env->wait_cond);
  699. os_mutex_unlock(&exec_env->wait_lock);
  700. }
  701. void
  702. wasm_cluster_set_debug_inst(WASMCluster *cluster, WASMDebugInstance *inst)
  703. {
  704. cluster->debug_inst = inst;
  705. }
  706. #endif /* end of WASM_ENABLE_DEBUG_INTERP */
  707. /* Check whether the exec_env is in one of all clusters, the caller
  708. should add lock to the cluster list before calling us */
  709. static bool
  710. clusters_have_exec_env(WASMExecEnv *exec_env)
  711. {
  712. WASMCluster *cluster = bh_list_first_elem(cluster_list);
  713. WASMExecEnv *node;
  714. while (cluster) {
  715. os_mutex_lock(&cluster->lock);
  716. node = bh_list_first_elem(&cluster->exec_env_list);
  717. while (node) {
  718. if (node == exec_env) {
  719. bh_assert(exec_env->cluster == cluster);
  720. os_mutex_unlock(&cluster->lock);
  721. return true;
  722. }
  723. node = bh_list_elem_next(node);
  724. }
  725. os_mutex_unlock(&cluster->lock);
  726. cluster = bh_list_elem_next(cluster);
  727. }
  728. return false;
  729. }
  730. int32
  731. wasm_cluster_join_thread(WASMExecEnv *exec_env, void **ret_val)
  732. {
  733. korp_tid handle;
  734. os_mutex_lock(&cluster_list_lock);
  735. if (!clusters_have_exec_env(exec_env) || exec_env->thread_is_detached) {
  736. /* Invalid thread, thread has exited or thread has been detached */
  737. if (ret_val)
  738. *ret_val = NULL;
  739. os_mutex_unlock(&cluster_list_lock);
  740. return 0;
  741. }
  742. os_mutex_lock(&exec_env->wait_lock);
  743. exec_env->wait_count++;
  744. handle = exec_env->handle;
  745. os_mutex_unlock(&exec_env->wait_lock);
  746. os_mutex_unlock(&cluster_list_lock);
  747. return os_thread_join(handle, ret_val);
  748. }
  749. int32
  750. wasm_cluster_detach_thread(WASMExecEnv *exec_env)
  751. {
  752. int32 ret = 0;
  753. os_mutex_lock(&cluster_list_lock);
  754. if (!clusters_have_exec_env(exec_env)) {
  755. /* Invalid thread or the thread has exited */
  756. os_mutex_unlock(&cluster_list_lock);
  757. return 0;
  758. }
  759. if (exec_env->wait_count == 0 && !exec_env->thread_is_detached) {
  760. /* Only detach current thread when there is no other thread
  761. joining it, otherwise let the system resources for the
  762. thread be released after joining */
  763. ret = os_thread_detach(exec_env->handle);
  764. exec_env->thread_is_detached = true;
  765. }
  766. os_mutex_unlock(&cluster_list_lock);
  767. return ret;
  768. }
  769. void
  770. wasm_cluster_exit_thread(WASMExecEnv *exec_env, void *retval)
  771. {
  772. WASMCluster *cluster;
  773. WASMModuleInstanceCommon *module_inst;
  774. #ifdef OS_ENABLE_HW_BOUND_CHECK
  775. if (exec_env->jmpbuf_stack_top) {
  776. /* Store the return value in exec_env */
  777. exec_env->thread_ret_value = retval;
  778. exec_env->suspend_flags.flags |= 0x08;
  779. #ifndef BH_PLATFORM_WINDOWS
  780. /* Pop all jmpbuf_node except the last one */
  781. while (exec_env->jmpbuf_stack_top->prev) {
  782. wasm_exec_env_pop_jmpbuf(exec_env);
  783. }
  784. os_longjmp(exec_env->jmpbuf_stack_top->jmpbuf, 1);
  785. return;
  786. #endif
  787. }
  788. #endif
  789. cluster = wasm_exec_env_get_cluster(exec_env);
  790. bh_assert(cluster);
  791. #if WASM_ENABLE_DEBUG_INTERP != 0
  792. wasm_cluster_clear_thread_signal(exec_env);
  793. wasm_cluster_thread_exited(exec_env);
  794. #endif
  795. /* App exit the thread, free the resources before exit native thread */
  796. os_mutex_lock(&cluster_list_lock);
  797. os_mutex_lock(&cluster->lock);
  798. /* Detach the native thread here to ensure the resources are freed */
  799. if (exec_env->wait_count == 0 && !exec_env->thread_is_detached) {
  800. /* Only detach current thread when there is no other thread
  801. joining it, otherwise let the system resources for the
  802. thread be released after joining */
  803. os_thread_detach(exec_env->handle);
  804. /* No need to set exec_env->thread_is_detached to true here
  805. since we will exit soon */
  806. }
  807. module_inst = exec_env->module_inst;
  808. /* Free aux stack space */
  809. free_aux_stack(exec_env, exec_env->aux_stack_bottom.bottom);
  810. /* Remove exec_env */
  811. wasm_cluster_del_exec_env_internal(cluster, exec_env, false);
  812. /* Destroy exec_env */
  813. wasm_exec_env_destroy_internal(exec_env);
  814. /* Routine exit, destroy instance */
  815. wasm_runtime_deinstantiate_internal(module_inst, true);
  816. os_mutex_unlock(&cluster->lock);
  817. os_mutex_unlock(&cluster_list_lock);
  818. os_thread_exit(retval);
  819. }
  820. static void
  821. set_thread_cancel_flags(WASMExecEnv *exec_env)
  822. {
  823. os_mutex_lock(&exec_env->wait_lock);
  824. #if WASM_ENABLE_DEBUG_INTERP != 0
  825. wasm_cluster_thread_send_signal(exec_env, WAMR_SIG_TERM);
  826. #endif
  827. exec_env->suspend_flags.flags |= 0x01;
  828. os_mutex_unlock(&exec_env->wait_lock);
  829. }
  830. int32
  831. wasm_cluster_cancel_thread(WASMExecEnv *exec_env)
  832. {
  833. os_mutex_lock(&cluster_list_lock);
  834. if (!exec_env->cluster) {
  835. os_mutex_unlock(&cluster_list_lock);
  836. return 0;
  837. }
  838. if (!clusters_have_exec_env(exec_env)) {
  839. /* Invalid thread or the thread has exited */
  840. goto final;
  841. }
  842. set_thread_cancel_flags(exec_env);
  843. final:
  844. os_mutex_unlock(&cluster_list_lock);
  845. return 0;
  846. }
  847. static void
  848. terminate_thread_visitor(void *node, void *user_data)
  849. {
  850. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  851. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  852. if (curr_exec_env == exec_env)
  853. return;
  854. wasm_cluster_cancel_thread(curr_exec_env);
  855. wasm_cluster_join_thread(curr_exec_env, NULL);
  856. }
  857. void
  858. wasm_cluster_terminate_all(WASMCluster *cluster)
  859. {
  860. os_mutex_lock(&cluster->lock);
  861. cluster->processing = true;
  862. safe_traverse_exec_env_list(cluster, terminate_thread_visitor, NULL);
  863. cluster->processing = false;
  864. os_mutex_unlock(&cluster->lock);
  865. }
  866. void
  867. wasm_cluster_terminate_all_except_self(WASMCluster *cluster,
  868. WASMExecEnv *exec_env)
  869. {
  870. os_mutex_lock(&cluster->lock);
  871. cluster->processing = true;
  872. safe_traverse_exec_env_list(cluster, terminate_thread_visitor,
  873. (void *)exec_env);
  874. cluster->processing = false;
  875. os_mutex_unlock(&cluster->lock);
  876. }
  877. static void
  878. wait_for_thread_visitor(void *node, void *user_data)
  879. {
  880. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  881. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  882. if (curr_exec_env == exec_env)
  883. return;
  884. wasm_cluster_join_thread(curr_exec_env, NULL);
  885. }
  886. void
  887. wams_cluster_wait_for_all(WASMCluster *cluster)
  888. {
  889. os_mutex_lock(&cluster->lock);
  890. cluster->processing = true;
  891. safe_traverse_exec_env_list(cluster, wait_for_thread_visitor, NULL);
  892. cluster->processing = false;
  893. os_mutex_unlock(&cluster->lock);
  894. }
  895. void
  896. wasm_cluster_wait_for_all_except_self(WASMCluster *cluster,
  897. WASMExecEnv *exec_env)
  898. {
  899. os_mutex_lock(&cluster->lock);
  900. cluster->processing = true;
  901. safe_traverse_exec_env_list(cluster, wait_for_thread_visitor,
  902. (void *)exec_env);
  903. cluster->processing = false;
  904. os_mutex_unlock(&cluster->lock);
  905. }
  906. bool
  907. wasm_cluster_register_destroy_callback(void (*callback)(WASMCluster *))
  908. {
  909. DestroyCallBackNode *node;
  910. if (!(node = wasm_runtime_malloc(sizeof(DestroyCallBackNode)))) {
  911. LOG_ERROR("thread manager error: failed to allocate memory");
  912. return false;
  913. }
  914. node->destroy_cb = callback;
  915. bh_list_insert(destroy_callback_list, node);
  916. return true;
  917. }
  918. void
  919. wasm_cluster_suspend_thread(WASMExecEnv *exec_env)
  920. {
  921. /* Set the suspend flag */
  922. exec_env->suspend_flags.flags |= 0x02;
  923. }
  924. static void
  925. suspend_thread_visitor(void *node, void *user_data)
  926. {
  927. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  928. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  929. if (curr_exec_env == exec_env)
  930. return;
  931. wasm_cluster_suspend_thread(curr_exec_env);
  932. }
  933. void
  934. wasm_cluster_suspend_all(WASMCluster *cluster)
  935. {
  936. os_mutex_lock(&cluster->lock);
  937. traverse_list(&cluster->exec_env_list, suspend_thread_visitor, NULL);
  938. os_mutex_unlock(&cluster->lock);
  939. }
  940. void
  941. wasm_cluster_suspend_all_except_self(WASMCluster *cluster,
  942. WASMExecEnv *exec_env)
  943. {
  944. os_mutex_lock(&cluster->lock);
  945. traverse_list(&cluster->exec_env_list, suspend_thread_visitor,
  946. (void *)exec_env);
  947. os_mutex_unlock(&cluster->lock);
  948. }
  949. void
  950. wasm_cluster_resume_thread(WASMExecEnv *exec_env)
  951. {
  952. exec_env->suspend_flags.flags &= ~0x02;
  953. os_cond_signal(&exec_env->wait_cond);
  954. }
  955. static void
  956. resume_thread_visitor(void *node, void *user_data)
  957. {
  958. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  959. wasm_cluster_resume_thread(curr_exec_env);
  960. }
  961. void
  962. wasm_cluster_resume_all(WASMCluster *cluster)
  963. {
  964. os_mutex_lock(&cluster->lock);
  965. traverse_list(&cluster->exec_env_list, resume_thread_visitor, NULL);
  966. os_mutex_unlock(&cluster->lock);
  967. }
  968. static void
  969. set_exception_visitor(void *node, void *user_data)
  970. {
  971. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  972. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  973. WASMModuleInstanceCommon *module_inst = get_module_inst(exec_env);
  974. WASMModuleInstance *wasm_inst = (WASMModuleInstance *)module_inst;
  975. if (curr_exec_env != exec_env) {
  976. WASMModuleInstance *curr_wasm_inst =
  977. (WASMModuleInstance *)get_module_inst(curr_exec_env);
  978. /* Only spread non "wasi proc exit" exception */
  979. #if WASM_ENABLE_SHARED_MEMORY != 0
  980. WASMSharedMemNode *shared_mem_node = wasm_module_get_shared_memory(
  981. (WASMModuleCommon *)curr_wasm_inst->module);
  982. if (shared_mem_node)
  983. os_mutex_lock(&shared_mem_node->shared_mem_lock);
  984. #endif
  985. if (!strstr(wasm_inst->cur_exception, "wasi proc exit")) {
  986. bh_memcpy_s(curr_wasm_inst->cur_exception,
  987. sizeof(curr_wasm_inst->cur_exception),
  988. wasm_inst->cur_exception,
  989. sizeof(wasm_inst->cur_exception));
  990. }
  991. #if WASM_ENABLE_SHARED_MEMORY != 0
  992. if (shared_mem_node)
  993. os_mutex_unlock(&shared_mem_node->shared_mem_lock);
  994. #endif
  995. /* Terminate the thread so it can exit from dead loops */
  996. set_thread_cancel_flags(curr_exec_env);
  997. }
  998. }
  999. static void
  1000. clear_exception_visitor(void *node, void *user_data)
  1001. {
  1002. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  1003. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  1004. if (curr_exec_env != exec_env) {
  1005. WASMModuleInstance *curr_wasm_inst =
  1006. (WASMModuleInstance *)get_module_inst(curr_exec_env);
  1007. #if WASM_ENABLE_SHARED_MEMORY != 0
  1008. WASMSharedMemNode *shared_mem_node = wasm_module_get_shared_memory(
  1009. (WASMModuleCommon *)curr_wasm_inst->module);
  1010. if (shared_mem_node)
  1011. os_mutex_lock(&shared_mem_node->shared_mem_lock);
  1012. #endif
  1013. curr_wasm_inst->cur_exception[0] = '\0';
  1014. #if WASM_ENABLE_SHARED_MEMORY != 0
  1015. if (shared_mem_node)
  1016. os_mutex_unlock(&shared_mem_node->shared_mem_lock);
  1017. #endif
  1018. }
  1019. }
  1020. void
  1021. wasm_cluster_spread_exception(WASMExecEnv *exec_env, bool clear)
  1022. {
  1023. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  1024. bh_assert(cluster);
  1025. os_mutex_lock(&cluster->lock);
  1026. cluster->has_exception = !clear;
  1027. traverse_list(&cluster->exec_env_list,
  1028. clear ? clear_exception_visitor : set_exception_visitor,
  1029. exec_env);
  1030. os_mutex_unlock(&cluster->lock);
  1031. }
  1032. static void
  1033. set_custom_data_visitor(void *node, void *user_data)
  1034. {
  1035. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  1036. WASMModuleInstanceCommon *module_inst = get_module_inst(curr_exec_env);
  1037. wasm_runtime_set_custom_data_internal(module_inst, user_data);
  1038. }
  1039. void
  1040. wasm_cluster_spread_custom_data(WASMModuleInstanceCommon *module_inst,
  1041. void *custom_data)
  1042. {
  1043. WASMExecEnv *exec_env = wasm_clusters_search_exec_env(module_inst);
  1044. if (exec_env == NULL) {
  1045. /* Maybe threads have not been started yet. */
  1046. wasm_runtime_set_custom_data_internal(module_inst, custom_data);
  1047. }
  1048. else {
  1049. WASMCluster *cluster;
  1050. cluster = wasm_exec_env_get_cluster(exec_env);
  1051. bh_assert(cluster);
  1052. os_mutex_lock(&cluster->lock);
  1053. traverse_list(&cluster->exec_env_list, set_custom_data_visitor,
  1054. custom_data);
  1055. os_mutex_unlock(&cluster->lock);
  1056. }
  1057. }
  1058. bool
  1059. wasm_cluster_is_thread_terminated(WASMExecEnv *exec_env)
  1060. {
  1061. os_mutex_lock(&exec_env->wait_lock);
  1062. bool is_thread_terminated =
  1063. (exec_env->suspend_flags.flags & 0x01) ? true : false;
  1064. os_mutex_unlock(&exec_env->wait_lock);
  1065. return is_thread_terminated;
  1066. }