thread_manager.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "thread_manager.h"
  6. #if WASM_ENABLE_INTERP != 0
  7. #include "../interpreter/wasm_runtime.h"
  8. #endif
  9. #if WASM_ENABLE_AOT != 0
  10. #include "../aot/aot_runtime.h"
  11. #endif
  12. #if WASM_ENABLE_DEBUG_INTERP != 0
  13. #include "debug_engine.h"
  14. #endif
  15. typedef struct {
  16. bh_list_link l;
  17. void (*destroy_cb)(WASMCluster *);
  18. } DestroyCallBackNode;
  19. static bh_list destroy_callback_list_head;
  20. static bh_list *const destroy_callback_list = &destroy_callback_list_head;
  21. static bh_list cluster_list_head;
  22. static bh_list *const cluster_list = &cluster_list_head;
  23. static korp_mutex cluster_list_lock;
  24. typedef void (*list_visitor)(void *, void *);
  25. static uint32 cluster_max_thread_num = CLUSTER_MAX_THREAD_NUM;
  26. /* Set the maximum thread number, if this function is not called,
  27. the max thread num is defined by CLUSTER_MAX_THREAD_NUM */
  28. void
  29. wasm_cluster_set_max_thread_num(uint32 num)
  30. {
  31. if (num > 0)
  32. cluster_max_thread_num = num;
  33. }
  34. bool
  35. thread_manager_init()
  36. {
  37. if (bh_list_init(cluster_list) != 0)
  38. return false;
  39. if (os_mutex_init(&cluster_list_lock) != 0)
  40. return false;
  41. return true;
  42. }
  43. void
  44. thread_manager_destroy()
  45. {
  46. WASMCluster *cluster = bh_list_first_elem(cluster_list);
  47. WASMCluster *next;
  48. while (cluster) {
  49. next = bh_list_elem_next(cluster);
  50. wasm_cluster_destroy(cluster);
  51. cluster = next;
  52. }
  53. wasm_cluster_cancel_all_callbacks();
  54. os_mutex_destroy(&cluster_list_lock);
  55. }
  56. static void
  57. traverse_list(bh_list *l, list_visitor visitor, void *user_data)
  58. {
  59. void *next, *node = bh_list_first_elem(l);
  60. while (node) {
  61. next = bh_list_elem_next(node);
  62. visitor(node, user_data);
  63. node = next;
  64. }
  65. }
  66. /* Assumes cluster->lock is locked */
  67. static bool
  68. safe_traverse_exec_env_list(WASMCluster *cluster, list_visitor visitor,
  69. void *user_data)
  70. {
  71. Vector proc_nodes;
  72. void *node;
  73. bool ret = true;
  74. if (!bh_vector_init(&proc_nodes, cluster->exec_env_list.len, sizeof(void *),
  75. false)) {
  76. ret = false;
  77. goto final;
  78. }
  79. node = bh_list_first_elem(&cluster->exec_env_list);
  80. while (node) {
  81. bool already_processed = false;
  82. void *proc_node;
  83. for (size_t i = 0; i < bh_vector_size(&proc_nodes); i++) {
  84. if (!bh_vector_get(&proc_nodes, i, &proc_node)) {
  85. ret = false;
  86. goto final;
  87. }
  88. if (proc_node == node) {
  89. already_processed = true;
  90. break;
  91. }
  92. }
  93. if (already_processed) {
  94. node = bh_list_elem_next(node);
  95. continue;
  96. }
  97. os_mutex_unlock(&cluster->lock);
  98. visitor(node, user_data);
  99. os_mutex_lock(&cluster->lock);
  100. if (!bh_vector_append(&proc_nodes, &node)) {
  101. ret = false;
  102. goto final;
  103. }
  104. node = bh_list_first_elem(&cluster->exec_env_list);
  105. }
  106. final:
  107. bh_vector_destroy(&proc_nodes);
  108. return ret;
  109. }
  110. /* The caller must lock cluster->lock */
  111. static bool
  112. allocate_aux_stack(WASMCluster *cluster, uint32 *start, uint32 *size)
  113. {
  114. uint32 i;
  115. /* If the module doesn't have aux stack info,
  116. it can't create any threads */
  117. if (!cluster->stack_segment_occupied)
  118. return false;
  119. for (i = 0; i < cluster_max_thread_num; i++) {
  120. if (!cluster->stack_segment_occupied[i]) {
  121. if (start)
  122. *start = cluster->stack_tops[i];
  123. if (size)
  124. *size = cluster->stack_size;
  125. cluster->stack_segment_occupied[i] = true;
  126. return true;
  127. }
  128. }
  129. return false;
  130. }
  131. /* The caller must lock cluster->lock */
  132. static bool
  133. free_aux_stack(WASMCluster *cluster, uint32 start)
  134. {
  135. uint32 i;
  136. for (i = 0; i < cluster_max_thread_num; i++) {
  137. if (start == cluster->stack_tops[i]) {
  138. cluster->stack_segment_occupied[i] = false;
  139. return true;
  140. }
  141. }
  142. return false;
  143. }
  144. WASMCluster *
  145. wasm_cluster_create(WASMExecEnv *exec_env)
  146. {
  147. WASMCluster *cluster;
  148. uint64 total_size;
  149. uint32 aux_stack_start, aux_stack_size, i;
  150. bh_assert(exec_env->cluster == NULL);
  151. if (!(cluster = wasm_runtime_malloc(sizeof(WASMCluster)))) {
  152. LOG_ERROR("thread manager error: failed to allocate memory");
  153. return NULL;
  154. }
  155. memset(cluster, 0, sizeof(WASMCluster));
  156. exec_env->cluster = cluster;
  157. bh_list_init(&cluster->exec_env_list);
  158. bh_list_insert(&cluster->exec_env_list, exec_env);
  159. if (os_mutex_init(&cluster->lock) != 0) {
  160. wasm_runtime_free(cluster);
  161. LOG_ERROR("thread manager error: failed to init mutex");
  162. return NULL;
  163. }
  164. /* Prepare the aux stack top and size for every thread */
  165. if (!wasm_exec_env_get_aux_stack(exec_env, &aux_stack_start,
  166. &aux_stack_size)) {
  167. LOG_VERBOSE("No aux stack info for this module, can't create thread");
  168. /* If the module don't have aux stack info, don't throw error here,
  169. but remain stack_tops and stack_segment_occupied as NULL */
  170. os_mutex_lock(&cluster_list_lock);
  171. if (bh_list_insert(cluster_list, cluster) != 0) {
  172. os_mutex_unlock(&cluster_list_lock);
  173. goto fail;
  174. }
  175. os_mutex_unlock(&cluster_list_lock);
  176. return cluster;
  177. }
  178. cluster->stack_size = aux_stack_size / (cluster_max_thread_num + 1);
  179. if (cluster->stack_size < WASM_THREAD_AUX_STACK_SIZE_MIN) {
  180. goto fail;
  181. }
  182. /* Make stack size 16-byte aligned */
  183. cluster->stack_size = cluster->stack_size & (~15);
  184. /* Set initial aux stack top to the instance and
  185. aux stack boundary to the main exec_env */
  186. if (!wasm_exec_env_set_aux_stack(exec_env, aux_stack_start,
  187. cluster->stack_size))
  188. goto fail;
  189. if (cluster_max_thread_num != 0) {
  190. total_size = cluster_max_thread_num * sizeof(uint32);
  191. if (total_size >= UINT32_MAX
  192. || !(cluster->stack_tops =
  193. wasm_runtime_malloc((uint32)total_size))) {
  194. goto fail;
  195. }
  196. memset(cluster->stack_tops, 0, (uint32)total_size);
  197. if (!(cluster->stack_segment_occupied =
  198. wasm_runtime_malloc(cluster_max_thread_num * sizeof(bool)))) {
  199. goto fail;
  200. }
  201. memset(cluster->stack_segment_occupied, 0,
  202. cluster_max_thread_num * sizeof(bool));
  203. /* Reserve space for main instance */
  204. aux_stack_start -= cluster->stack_size;
  205. for (i = 0; i < cluster_max_thread_num; i++) {
  206. cluster->stack_tops[i] = aux_stack_start - cluster->stack_size * i;
  207. }
  208. }
  209. os_mutex_lock(&cluster_list_lock);
  210. if (bh_list_insert(cluster_list, cluster) != 0) {
  211. os_mutex_unlock(&cluster_list_lock);
  212. goto fail;
  213. }
  214. os_mutex_unlock(&cluster_list_lock);
  215. return cluster;
  216. fail:
  217. if (cluster)
  218. wasm_cluster_destroy(cluster);
  219. return NULL;
  220. }
  221. static void
  222. destroy_cluster_visitor(void *node, void *user_data)
  223. {
  224. DestroyCallBackNode *destroy_node = (DestroyCallBackNode *)node;
  225. WASMCluster *cluster = (WASMCluster *)user_data;
  226. destroy_node->destroy_cb(cluster);
  227. }
  228. void
  229. wasm_cluster_destroy(WASMCluster *cluster)
  230. {
  231. traverse_list(destroy_callback_list, destroy_cluster_visitor,
  232. (void *)cluster);
  233. /* Remove the cluster from the cluster list */
  234. os_mutex_lock(&cluster_list_lock);
  235. bh_list_remove(cluster_list, cluster);
  236. os_mutex_unlock(&cluster_list_lock);
  237. os_mutex_destroy(&cluster->lock);
  238. if (cluster->stack_tops)
  239. wasm_runtime_free(cluster->stack_tops);
  240. if (cluster->stack_segment_occupied)
  241. wasm_runtime_free(cluster->stack_segment_occupied);
  242. #if WASM_ENABLE_DEBUG_INTERP != 0
  243. wasm_debug_instance_destroy(cluster);
  244. #endif
  245. wasm_runtime_free(cluster);
  246. }
  247. static void
  248. free_node_visitor(void *node, void *user_data)
  249. {
  250. wasm_runtime_free(node);
  251. }
  252. void
  253. wasm_cluster_cancel_all_callbacks()
  254. {
  255. traverse_list(destroy_callback_list, free_node_visitor, NULL);
  256. bh_list_init(destroy_callback_list);
  257. }
  258. WASMCluster *
  259. wasm_exec_env_get_cluster(WASMExecEnv *exec_env)
  260. {
  261. return exec_env->cluster;
  262. }
  263. /* The caller must lock cluster->lock */
  264. static bool
  265. wasm_cluster_add_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  266. {
  267. bool ret = true;
  268. exec_env->cluster = cluster;
  269. if (bh_list_insert(&cluster->exec_env_list, exec_env) != 0)
  270. ret = false;
  271. return ret;
  272. }
  273. /* The caller should lock cluster->lock for thread safety */
  274. bool
  275. wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  276. {
  277. bool ret = true;
  278. bh_assert(exec_env->cluster == cluster);
  279. #if WASM_ENABLE_DEBUG_INTERP != 0
  280. /* Wait for debugger control thread to process the
  281. stop event of this thread */
  282. if (cluster->debug_inst) {
  283. /* lock the debug_inst->wait_lock so
  284. other threads can't fire stop events */
  285. os_mutex_lock(&cluster->debug_inst->wait_lock);
  286. while (cluster->debug_inst->stopped_thread == exec_env) {
  287. /* either wakes up by signal or by 1-second timeout */
  288. os_cond_reltimedwait(&cluster->debug_inst->wait_cond,
  289. &cluster->debug_inst->wait_lock, 1000000);
  290. }
  291. os_mutex_unlock(&cluster->debug_inst->wait_lock);
  292. }
  293. #endif
  294. if (bh_list_remove(&cluster->exec_env_list, exec_env) != 0)
  295. ret = false;
  296. if (cluster->exec_env_list.len == 0) {
  297. /* exec_env_list empty, destroy the cluster */
  298. wasm_cluster_destroy(cluster);
  299. }
  300. return ret;
  301. }
  302. static WASMExecEnv *
  303. wasm_cluster_search_exec_env(WASMCluster *cluster,
  304. WASMModuleInstanceCommon *module_inst)
  305. {
  306. WASMExecEnv *node = NULL;
  307. os_mutex_lock(&cluster->lock);
  308. node = bh_list_first_elem(&cluster->exec_env_list);
  309. while (node) {
  310. if (node->module_inst == module_inst) {
  311. os_mutex_unlock(&cluster->lock);
  312. return node;
  313. }
  314. node = bh_list_elem_next(node);
  315. }
  316. os_mutex_unlock(&cluster->lock);
  317. return NULL;
  318. }
  319. /* search the global cluster list to find if the given
  320. module instance have a corresponding exec_env */
  321. WASMExecEnv *
  322. wasm_clusters_search_exec_env(WASMModuleInstanceCommon *module_inst)
  323. {
  324. WASMCluster *cluster = NULL;
  325. WASMExecEnv *exec_env = NULL;
  326. os_mutex_lock(&cluster_list_lock);
  327. cluster = bh_list_first_elem(cluster_list);
  328. while (cluster) {
  329. exec_env = wasm_cluster_search_exec_env(cluster, module_inst);
  330. if (exec_env) {
  331. os_mutex_unlock(&cluster_list_lock);
  332. return exec_env;
  333. }
  334. cluster = bh_list_elem_next(cluster);
  335. }
  336. os_mutex_unlock(&cluster_list_lock);
  337. return NULL;
  338. }
  339. WASMExecEnv *
  340. wasm_cluster_spawn_exec_env(WASMExecEnv *exec_env)
  341. {
  342. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  343. wasm_module_inst_t module_inst = get_module_inst(exec_env);
  344. wasm_module_t module;
  345. wasm_module_inst_t new_module_inst;
  346. #if WASM_ENABLE_LIBC_WASI != 0
  347. WASIContext *wasi_ctx;
  348. #endif
  349. WASMExecEnv *new_exec_env;
  350. uint32 aux_stack_start, aux_stack_size;
  351. uint32 stack_size = 8192;
  352. if (!module_inst || !(module = wasm_exec_env_get_module(exec_env))) {
  353. return NULL;
  354. }
  355. os_mutex_lock(&cluster->lock);
  356. if (cluster->has_exception || cluster->processing) {
  357. goto fail1;
  358. }
  359. #if WASM_ENABLE_INTERP != 0
  360. if (module_inst->module_type == Wasm_Module_Bytecode) {
  361. stack_size =
  362. ((WASMModuleInstance *)module_inst)->default_wasm_stack_size;
  363. }
  364. #endif
  365. #if WASM_ENABLE_AOT != 0
  366. if (module_inst->module_type == Wasm_Module_AoT) {
  367. stack_size =
  368. ((AOTModuleInstance *)module_inst)->default_wasm_stack_size;
  369. }
  370. #endif
  371. if (!(new_module_inst = wasm_runtime_instantiate_internal(
  372. module, true, stack_size, 0, NULL, 0))) {
  373. goto fail1;
  374. }
  375. /* Set custom_data to new module instance */
  376. wasm_runtime_set_custom_data_internal(
  377. new_module_inst, wasm_runtime_get_custom_data(module_inst));
  378. #if WASM_ENABLE_LIBC_WASI != 0
  379. wasi_ctx = wasm_runtime_get_wasi_ctx(module_inst);
  380. wasm_runtime_set_wasi_ctx(new_module_inst, wasi_ctx);
  381. #endif
  382. new_exec_env = wasm_exec_env_create_internal(new_module_inst,
  383. exec_env->wasm_stack_size);
  384. if (!new_exec_env)
  385. goto fail2;
  386. if (!allocate_aux_stack(cluster, &aux_stack_start, &aux_stack_size)) {
  387. LOG_ERROR("thread manager error: "
  388. "failed to allocate aux stack space for new thread");
  389. goto fail3;
  390. }
  391. /* Set aux stack for current thread */
  392. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  393. aux_stack_size)) {
  394. goto fail4;
  395. }
  396. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  397. goto fail4;
  398. os_mutex_unlock(&cluster->lock);
  399. return new_exec_env;
  400. fail4:
  401. /* free the allocated aux stack space */
  402. free_aux_stack(cluster, aux_stack_start);
  403. fail3:
  404. wasm_exec_env_destroy(new_exec_env);
  405. fail2:
  406. wasm_runtime_deinstantiate_internal(new_module_inst, true);
  407. fail1:
  408. os_mutex_unlock(&cluster->lock);
  409. return NULL;
  410. }
  411. void
  412. wasm_cluster_destroy_spawned_exec_env(WASMExecEnv *exec_env)
  413. {
  414. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  415. wasm_module_inst_t module_inst = wasm_runtime_get_module_inst(exec_env);
  416. bh_assert(cluster != NULL);
  417. /* Free aux stack space */
  418. os_mutex_lock(&cluster->lock);
  419. free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom);
  420. wasm_cluster_del_exec_env(cluster, exec_env);
  421. os_mutex_unlock(&cluster->lock);
  422. wasm_exec_env_destroy_internal(exec_env);
  423. wasm_runtime_deinstantiate_internal(module_inst, true);
  424. }
  425. /* start routine of thread manager */
  426. static void *
  427. thread_manager_start_routine(void *arg)
  428. {
  429. void *ret;
  430. WASMExecEnv *exec_env = (WASMExecEnv *)arg;
  431. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  432. bh_assert(cluster != NULL);
  433. exec_env->handle = os_self_thread();
  434. ret = exec_env->thread_start_routine(exec_env);
  435. #if defined(OS_ENABLE_HW_BOUND_CHECK) || defined(OS_ENABLE_INTERRUPT_BLOCK_INSN)
  436. if (exec_env->suspend_flags.flags & 0x08)
  437. ret = exec_env->thread_ret_value;
  438. #endif
  439. /* Routine exit */
  440. /* Detach the native thread here to ensure the resources are freed */
  441. wasm_cluster_detach_thread(exec_env);
  442. #if WASM_ENABLE_DEBUG_INTERP != 0
  443. wasm_cluster_thread_exited(exec_env);
  444. #endif
  445. os_mutex_lock(&cluster->lock);
  446. /* Free aux stack space */
  447. free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom);
  448. /* Remove and exec_env */
  449. wasm_cluster_del_exec_env(cluster, exec_env);
  450. os_mutex_unlock(&cluster->lock);
  451. /* destroy exec_env */
  452. wasm_exec_env_destroy_internal(exec_env);
  453. os_thread_exit(ret);
  454. return ret;
  455. }
  456. int32
  457. wasm_cluster_create_thread(WASMExecEnv *exec_env,
  458. wasm_module_inst_t module_inst,
  459. void *(*thread_routine)(void *), void *arg)
  460. {
  461. WASMCluster *cluster;
  462. WASMExecEnv *new_exec_env;
  463. uint32 aux_stack_start, aux_stack_size;
  464. korp_tid tid;
  465. cluster = wasm_exec_env_get_cluster(exec_env);
  466. bh_assert(cluster);
  467. os_mutex_lock(&cluster->lock);
  468. if (cluster->has_exception || cluster->processing) {
  469. goto fail1;
  470. }
  471. new_exec_env =
  472. wasm_exec_env_create_internal(module_inst, exec_env->wasm_stack_size);
  473. if (!new_exec_env)
  474. goto fail1;
  475. if (!allocate_aux_stack(cluster, &aux_stack_start, &aux_stack_size)) {
  476. LOG_ERROR("thread manager error: "
  477. "failed to allocate aux stack space for new thread");
  478. goto fail2;
  479. }
  480. /* Set aux stack for current thread */
  481. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  482. aux_stack_size)) {
  483. goto fail3;
  484. }
  485. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  486. goto fail3;
  487. new_exec_env->thread_start_routine = thread_routine;
  488. new_exec_env->thread_arg = arg;
  489. if (0
  490. != os_thread_create(&tid, thread_manager_start_routine,
  491. (void *)new_exec_env,
  492. APP_THREAD_STACK_SIZE_DEFAULT)) {
  493. goto fail4;
  494. }
  495. os_mutex_unlock(&cluster->lock);
  496. return 0;
  497. fail4:
  498. wasm_cluster_del_exec_env(cluster, new_exec_env);
  499. fail3:
  500. /* free the allocated aux stack space */
  501. free_aux_stack(cluster, aux_stack_start);
  502. fail2:
  503. wasm_exec_env_destroy(new_exec_env);
  504. fail1:
  505. os_mutex_unlock(&cluster->lock);
  506. return -1;
  507. }
  508. #if WASM_ENABLE_DEBUG_INTERP != 0
  509. WASMCurrentEnvStatus *
  510. wasm_cluster_create_exenv_status()
  511. {
  512. WASMCurrentEnvStatus *status;
  513. if (!(status = wasm_runtime_malloc(sizeof(WASMCurrentEnvStatus)))) {
  514. return NULL;
  515. }
  516. status->step_count = 0;
  517. status->signal_flag = 0;
  518. status->running_status = 0;
  519. return status;
  520. }
  521. void
  522. wasm_cluster_destroy_exenv_status(WASMCurrentEnvStatus *status)
  523. {
  524. wasm_runtime_free(status);
  525. }
  526. inline static bool
  527. wasm_cluster_thread_is_running(WASMExecEnv *exec_env)
  528. {
  529. return exec_env->current_status->running_status == STATUS_RUNNING
  530. || exec_env->current_status->running_status == STATUS_STEP;
  531. }
  532. void
  533. wasm_cluster_clear_thread_signal(WASMExecEnv *exec_env)
  534. {
  535. exec_env->current_status->signal_flag = 0;
  536. }
  537. void
  538. wasm_cluster_thread_send_signal(WASMExecEnv *exec_env, uint32 signo)
  539. {
  540. exec_env->current_status->signal_flag = signo;
  541. }
  542. static void
  543. notify_debug_instance(WASMExecEnv *exec_env)
  544. {
  545. WASMCluster *cluster;
  546. cluster = wasm_exec_env_get_cluster(exec_env);
  547. bh_assert(cluster);
  548. if (!cluster->debug_inst) {
  549. return;
  550. }
  551. on_thread_stop_event(cluster->debug_inst, exec_env);
  552. }
  553. static void
  554. notify_debug_instance_exit(WASMExecEnv *exec_env)
  555. {
  556. WASMCluster *cluster;
  557. cluster = wasm_exec_env_get_cluster(exec_env);
  558. bh_assert(cluster);
  559. if (!cluster->debug_inst) {
  560. return;
  561. }
  562. on_thread_exit_event(cluster->debug_inst, exec_env);
  563. }
  564. void
  565. wasm_cluster_thread_waiting_run(WASMExecEnv *exec_env)
  566. {
  567. os_mutex_lock(&exec_env->wait_lock);
  568. /* Wake up debugger thread after we get the lock, otherwise we may miss the
  569. * signal from debugger thread, see
  570. * https://github.com/bytecodealliance/wasm-micro-runtime/issues/1860 */
  571. exec_env->current_status->running_status = STATUS_STOP;
  572. notify_debug_instance(exec_env);
  573. while (!wasm_cluster_thread_is_running(exec_env)) {
  574. os_cond_wait(&exec_env->wait_cond, &exec_env->wait_lock);
  575. }
  576. os_mutex_unlock(&exec_env->wait_lock);
  577. }
  578. void
  579. wasm_cluster_send_signal_all(WASMCluster *cluster, uint32 signo)
  580. {
  581. WASMExecEnv *exec_env = bh_list_first_elem(&cluster->exec_env_list);
  582. while (exec_env) {
  583. wasm_cluster_thread_send_signal(exec_env, signo);
  584. exec_env = bh_list_elem_next(exec_env);
  585. }
  586. }
  587. void
  588. wasm_cluster_thread_exited(WASMExecEnv *exec_env)
  589. {
  590. exec_env->current_status->running_status = STATUS_EXIT;
  591. notify_debug_instance_exit(exec_env);
  592. }
  593. void
  594. wasm_cluster_thread_continue(WASMExecEnv *exec_env)
  595. {
  596. os_mutex_lock(&exec_env->wait_lock);
  597. wasm_cluster_clear_thread_signal(exec_env);
  598. exec_env->current_status->running_status = STATUS_RUNNING;
  599. os_cond_signal(&exec_env->wait_cond);
  600. os_mutex_unlock(&exec_env->wait_lock);
  601. }
  602. void
  603. wasm_cluster_thread_step(WASMExecEnv *exec_env)
  604. {
  605. os_mutex_lock(&exec_env->wait_lock);
  606. exec_env->current_status->running_status = STATUS_STEP;
  607. os_cond_signal(&exec_env->wait_cond);
  608. os_mutex_unlock(&exec_env->wait_lock);
  609. }
  610. void
  611. wasm_cluster_set_debug_inst(WASMCluster *cluster, WASMDebugInstance *inst)
  612. {
  613. cluster->debug_inst = inst;
  614. }
  615. #endif /* end of WASM_ENABLE_DEBUG_INTERP */
  616. /* Check whether the exec_env is in one of all clusters, the caller
  617. should add lock to the cluster list before calling us */
  618. static bool
  619. clusters_have_exec_env(WASMExecEnv *exec_env)
  620. {
  621. WASMCluster *cluster = bh_list_first_elem(cluster_list);
  622. WASMExecEnv *node;
  623. while (cluster) {
  624. node = bh_list_first_elem(&cluster->exec_env_list);
  625. while (node) {
  626. if (node == exec_env) {
  627. bh_assert(exec_env->cluster == cluster);
  628. return true;
  629. }
  630. node = bh_list_elem_next(node);
  631. }
  632. cluster = bh_list_elem_next(cluster);
  633. }
  634. return false;
  635. }
  636. int32
  637. wasm_cluster_join_thread(WASMExecEnv *exec_env, void **ret_val)
  638. {
  639. korp_tid handle;
  640. os_mutex_lock(&cluster_list_lock);
  641. os_mutex_lock(&exec_env->cluster->lock);
  642. if (!clusters_have_exec_env(exec_env) || exec_env->thread_is_detached) {
  643. /* Invalid thread, thread has exited or thread has been detached */
  644. if (ret_val)
  645. *ret_val = NULL;
  646. os_mutex_unlock(&exec_env->cluster->lock);
  647. os_mutex_unlock(&cluster_list_lock);
  648. return 0;
  649. }
  650. exec_env->wait_count++;
  651. handle = exec_env->handle;
  652. os_mutex_unlock(&exec_env->cluster->lock);
  653. os_mutex_unlock(&cluster_list_lock);
  654. return os_thread_join(handle, ret_val);
  655. }
  656. int32
  657. wasm_cluster_detach_thread(WASMExecEnv *exec_env)
  658. {
  659. int32 ret = 0;
  660. os_mutex_lock(&cluster_list_lock);
  661. if (!clusters_have_exec_env(exec_env)) {
  662. /* Invalid thread or the thread has exited */
  663. os_mutex_unlock(&cluster_list_lock);
  664. return 0;
  665. }
  666. if (exec_env->wait_count == 0 && !exec_env->thread_is_detached) {
  667. /* Only detach current thread when there is no other thread
  668. joining it, otherwise let the system resources for the
  669. thread be released after joining */
  670. ret = os_thread_detach(exec_env->handle);
  671. exec_env->thread_is_detached = true;
  672. }
  673. os_mutex_unlock(&cluster_list_lock);
  674. return ret;
  675. }
  676. void
  677. wasm_cluster_exit_thread(WASMExecEnv *exec_env, void *retval)
  678. {
  679. WASMCluster *cluster;
  680. #if defined(OS_ENABLE_HW_BOUND_CHECK) || defined(OS_ENABLE_INTERRUPT_BLOCK_INSN)
  681. if (exec_env->jmpbuf_stack_top) {
  682. /* Store the return value in exec_env */
  683. exec_env->thread_ret_value = retval;
  684. exec_env->suspend_flags.flags |= 0x08;
  685. #ifndef BH_PLATFORM_WINDOWS
  686. /* Pop all jmpbuf_node except the last one */
  687. while (exec_env->jmpbuf_stack_top->prev) {
  688. wasm_exec_env_pop_jmpbuf(exec_env);
  689. }
  690. os_longjmp(exec_env->jmpbuf_stack_top->jmpbuf, 1);
  691. return;
  692. #endif
  693. }
  694. #endif
  695. cluster = wasm_exec_env_get_cluster(exec_env);
  696. bh_assert(cluster);
  697. #if WASM_ENABLE_DEBUG_INTERP != 0
  698. wasm_cluster_clear_thread_signal(exec_env);
  699. wasm_cluster_thread_exited(exec_env);
  700. #endif
  701. /* App exit the thread, free the resources before exit native thread */
  702. /* Detach the native thread here to ensure the resources are freed */
  703. wasm_cluster_detach_thread(exec_env);
  704. os_mutex_lock(&cluster->lock);
  705. /* Free aux stack space */
  706. free_aux_stack(cluster, exec_env->aux_stack_bottom.bottom);
  707. /* Remove and destroy exec_env */
  708. wasm_cluster_del_exec_env(cluster, exec_env);
  709. os_mutex_unlock(&cluster->lock);
  710. wasm_exec_env_destroy_internal(exec_env);
  711. os_thread_exit(retval);
  712. }
  713. static void
  714. set_thread_cancel_flags(WASMExecEnv *exec_env)
  715. {
  716. /* Set the termination flag */
  717. #if WASM_ENABLE_DEBUG_INTERP != 0
  718. wasm_cluster_thread_send_signal(exec_env, WAMR_SIG_TERM);
  719. #else
  720. exec_env->suspend_flags.flags |= 0x01;
  721. #endif
  722. }
  723. int32
  724. wasm_cluster_cancel_thread(WASMExecEnv *exec_env)
  725. {
  726. os_mutex_lock(&cluster_list_lock);
  727. os_mutex_lock(&exec_env->cluster->lock);
  728. if (!exec_env->cluster) {
  729. goto final;
  730. }
  731. if (!clusters_have_exec_env(exec_env)) {
  732. /* Invalid thread or the thread has exited */
  733. goto final;
  734. }
  735. set_thread_cancel_flags(exec_env);
  736. final:
  737. os_mutex_unlock(&exec_env->cluster->lock);
  738. os_mutex_unlock(&cluster_list_lock);
  739. return 0;
  740. }
  741. static void
  742. terminate_thread_visitor(void *node, void *user_data)
  743. {
  744. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  745. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  746. if (curr_exec_env == exec_env)
  747. return;
  748. wasm_cluster_cancel_thread(curr_exec_env);
  749. wasm_cluster_join_thread(curr_exec_env, NULL);
  750. }
  751. void
  752. wasm_cluster_terminate_all(WASMCluster *cluster)
  753. {
  754. os_mutex_lock(&cluster->lock);
  755. cluster->processing = true;
  756. safe_traverse_exec_env_list(cluster, terminate_thread_visitor, NULL);
  757. cluster->processing = false;
  758. os_mutex_unlock(&cluster->lock);
  759. }
  760. void
  761. wasm_cluster_terminate_all_except_self(WASMCluster *cluster,
  762. WASMExecEnv *exec_env)
  763. {
  764. os_mutex_lock(&cluster->lock);
  765. cluster->processing = true;
  766. safe_traverse_exec_env_list(cluster, terminate_thread_visitor,
  767. (void *)exec_env);
  768. cluster->processing = false;
  769. os_mutex_unlock(&cluster->lock);
  770. }
  771. static void
  772. wait_for_thread_visitor(void *node, void *user_data)
  773. {
  774. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  775. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  776. if (curr_exec_env == exec_env)
  777. return;
  778. wasm_cluster_join_thread(curr_exec_env, NULL);
  779. }
  780. void
  781. wams_cluster_wait_for_all(WASMCluster *cluster)
  782. {
  783. os_mutex_lock(&cluster->lock);
  784. cluster->processing = true;
  785. safe_traverse_exec_env_list(cluster, wait_for_thread_visitor, NULL);
  786. cluster->processing = false;
  787. os_mutex_unlock(&cluster->lock);
  788. }
  789. void
  790. wasm_cluster_wait_for_all_except_self(WASMCluster *cluster,
  791. WASMExecEnv *exec_env)
  792. {
  793. os_mutex_lock(&cluster->lock);
  794. cluster->processing = true;
  795. safe_traverse_exec_env_list(cluster, wait_for_thread_visitor,
  796. (void *)exec_env);
  797. cluster->processing = false;
  798. os_mutex_unlock(&cluster->lock);
  799. }
  800. #ifdef OS_ENABLE_INTERRUPT_BLOCK_INSN
  801. static void
  802. kill_thread_visitor(void *node, void *user_data)
  803. {
  804. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  805. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  806. if (curr_exec_env == exec_env)
  807. return;
  808. bh_assert(curr_exec_env->handle);
  809. os_thread_kill(curr_exec_env->handle);
  810. }
  811. void
  812. wasm_cluster_kill_all_except_self(WASMCluster *cluster, WASMExecEnv *exec_env)
  813. {
  814. os_mutex_lock(&cluster->lock);
  815. traverse_list(&cluster->exec_env_list, kill_thread_visitor,
  816. (void *)exec_env);
  817. os_mutex_unlock(&cluster->lock);
  818. }
  819. #endif
  820. bool
  821. wasm_cluster_register_destroy_callback(void (*callback)(WASMCluster *))
  822. {
  823. DestroyCallBackNode *node;
  824. if (!(node = wasm_runtime_malloc(sizeof(DestroyCallBackNode)))) {
  825. LOG_ERROR("thread manager error: failed to allocate memory");
  826. return false;
  827. }
  828. node->destroy_cb = callback;
  829. bh_list_insert(destroy_callback_list, node);
  830. return true;
  831. }
  832. void
  833. wasm_cluster_suspend_thread(WASMExecEnv *exec_env)
  834. {
  835. /* Set the suspend flag */
  836. exec_env->suspend_flags.flags |= 0x02;
  837. }
  838. static void
  839. suspend_thread_visitor(void *node, void *user_data)
  840. {
  841. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  842. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  843. if (curr_exec_env == exec_env)
  844. return;
  845. wasm_cluster_suspend_thread(curr_exec_env);
  846. }
  847. void
  848. wasm_cluster_suspend_all(WASMCluster *cluster)
  849. {
  850. os_mutex_lock(&cluster->lock);
  851. traverse_list(&cluster->exec_env_list, suspend_thread_visitor, NULL);
  852. os_mutex_unlock(&cluster->lock);
  853. }
  854. void
  855. wasm_cluster_suspend_all_except_self(WASMCluster *cluster,
  856. WASMExecEnv *exec_env)
  857. {
  858. os_mutex_lock(&cluster->lock);
  859. traverse_list(&cluster->exec_env_list, suspend_thread_visitor,
  860. (void *)exec_env);
  861. os_mutex_unlock(&cluster->lock);
  862. }
  863. void
  864. wasm_cluster_resume_thread(WASMExecEnv *exec_env)
  865. {
  866. exec_env->suspend_flags.flags &= ~0x02;
  867. os_cond_signal(&exec_env->wait_cond);
  868. }
  869. static void
  870. resume_thread_visitor(void *node, void *user_data)
  871. {
  872. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  873. wasm_cluster_resume_thread(curr_exec_env);
  874. }
  875. void
  876. wasm_cluster_resume_all(WASMCluster *cluster)
  877. {
  878. os_mutex_lock(&cluster->lock);
  879. traverse_list(&cluster->exec_env_list, resume_thread_visitor, NULL);
  880. os_mutex_unlock(&cluster->lock);
  881. }
  882. static void
  883. set_exception_visitor(void *node, void *user_data)
  884. {
  885. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  886. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  887. WASMModuleInstanceCommon *module_inst = get_module_inst(exec_env);
  888. WASMModuleInstance *wasm_inst = (WASMModuleInstance *)module_inst;
  889. if (curr_exec_env != exec_env) {
  890. WASMModuleInstance *curr_wasm_inst =
  891. (WASMModuleInstance *)get_module_inst(curr_exec_env);
  892. bh_memcpy_s(curr_wasm_inst->cur_exception,
  893. sizeof(curr_wasm_inst->cur_exception),
  894. wasm_inst->cur_exception, sizeof(wasm_inst->cur_exception));
  895. /* Terminate the thread so it can exit from dead loops */
  896. set_thread_cancel_flags(curr_exec_env);
  897. }
  898. }
  899. static void
  900. clear_exception_visitor(void *node, void *user_data)
  901. {
  902. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  903. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  904. if (curr_exec_env != exec_env) {
  905. WASMModuleInstance *curr_wasm_inst =
  906. (WASMModuleInstance *)get_module_inst(curr_exec_env);
  907. curr_wasm_inst->cur_exception[0] = '\0';
  908. }
  909. }
  910. void
  911. wasm_cluster_spread_exception(WASMExecEnv *exec_env, bool clear)
  912. {
  913. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  914. bh_assert(cluster);
  915. os_mutex_lock(&cluster->lock);
  916. cluster->has_exception = !clear;
  917. traverse_list(&cluster->exec_env_list,
  918. clear ? clear_exception_visitor : set_exception_visitor,
  919. exec_env);
  920. os_mutex_unlock(&cluster->lock);
  921. }
  922. static void
  923. set_custom_data_visitor(void *node, void *user_data)
  924. {
  925. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  926. WASMModuleInstanceCommon *module_inst = get_module_inst(curr_exec_env);
  927. wasm_runtime_set_custom_data_internal(module_inst, user_data);
  928. }
  929. void
  930. wasm_cluster_spread_custom_data(WASMModuleInstanceCommon *module_inst,
  931. void *custom_data)
  932. {
  933. WASMExecEnv *exec_env = wasm_clusters_search_exec_env(module_inst);
  934. if (exec_env == NULL) {
  935. /* Maybe threads have not been started yet. */
  936. wasm_runtime_set_custom_data_internal(module_inst, custom_data);
  937. }
  938. else {
  939. WASMCluster *cluster;
  940. cluster = wasm_exec_env_get_cluster(exec_env);
  941. bh_assert(cluster);
  942. os_mutex_lock(&cluster->lock);
  943. traverse_list(&cluster->exec_env_list, set_custom_data_visitor,
  944. custom_data);
  945. os_mutex_unlock(&cluster->lock);
  946. }
  947. }