thread_manager.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "thread_manager.h"
  6. typedef struct {
  7. bh_list_link l;
  8. void (*destroy_cb)(WASMCluster *);
  9. } DestroyCallBackNode;
  10. static bh_list destroy_callback_list_head;
  11. static bh_list *const destroy_callback_list = &destroy_callback_list_head;
  12. static bh_list cluster_list_head;
  13. static bh_list *const cluster_list = &cluster_list_head;
  14. static korp_mutex cluster_list_lock;
  15. typedef void (*list_visitor)(void *, void *);
  16. static uint32 cluster_max_thread_num = CLUSTER_MAX_THREAD_NUM;
  17. /* Set the maximum thread number, if this function is not called,
  18. the max thread num is defined by CLUSTER_MAX_THREAD_NUM */
  19. void
  20. wasm_cluster_set_max_thread_num(uint32 num)
  21. {
  22. if (num > 0)
  23. cluster_max_thread_num = num;
  24. }
  25. bool
  26. thread_manager_init()
  27. {
  28. if (bh_list_init(cluster_list) != 0)
  29. return false;
  30. if (os_mutex_init(&cluster_list_lock) != 0)
  31. return false;
  32. return true;
  33. }
  34. void
  35. thread_manager_destroy()
  36. {
  37. WASMCluster *cluster = bh_list_first_elem(cluster_list);
  38. WASMCluster *next;
  39. while (cluster) {
  40. next = bh_list_elem_next(cluster);
  41. wasm_cluster_destroy(cluster);
  42. cluster = next;
  43. }
  44. wasm_cluster_cancel_all_callbacks();
  45. os_mutex_destroy(&cluster_list_lock);
  46. }
  47. static void
  48. traverse_list(bh_list *l, list_visitor visitor, void *user_data)
  49. {
  50. void *next, *node = bh_list_first_elem(l);
  51. while (node) {
  52. next = bh_list_elem_next(node);
  53. visitor(node, user_data);
  54. node = next;
  55. }
  56. }
  57. static bool
  58. allocate_aux_stack(WASMCluster *cluster, uint32 *start, uint32 *size)
  59. {
  60. uint32 i;
  61. /* If the module doesn't have aux stack info,
  62. it can't create any threads */
  63. if (!cluster->stack_segment_occupied)
  64. return false;
  65. os_mutex_lock(&cluster->lock);
  66. for (i = 0; i < cluster_max_thread_num; i++) {
  67. if (!cluster->stack_segment_occupied[i]) {
  68. if (start)
  69. *start = cluster->stack_tops[i];
  70. if (size)
  71. *size = cluster->stack_size;
  72. cluster->stack_segment_occupied[i] = true;
  73. os_mutex_unlock(&cluster->lock);
  74. return true;
  75. }
  76. }
  77. os_mutex_unlock(&cluster->lock);
  78. return false;
  79. }
  80. static bool
  81. free_aux_stack(WASMCluster *cluster, uint32 start)
  82. {
  83. uint32 i;
  84. for (i = 0; i < cluster_max_thread_num; i++) {
  85. if (start == cluster->stack_tops[i]) {
  86. os_mutex_lock(&cluster->lock);
  87. cluster->stack_segment_occupied[i] = false;
  88. os_mutex_unlock(&cluster->lock);
  89. return true;
  90. }
  91. }
  92. return false;
  93. }
  94. WASMCluster *
  95. wasm_cluster_create(WASMExecEnv *exec_env)
  96. {
  97. WASMCluster *cluster;
  98. uint64 total_size;
  99. uint32 aux_stack_start, aux_stack_size, i;
  100. bh_assert(exec_env->cluster == NULL);
  101. if (!(cluster = wasm_runtime_malloc(sizeof(WASMCluster)))) {
  102. LOG_ERROR("thread manager error: failed to allocate memory");
  103. return NULL;
  104. }
  105. memset(cluster, 0, sizeof(WASMCluster));
  106. exec_env->cluster = cluster;
  107. bh_list_init(&cluster->exec_env_list);
  108. bh_list_insert(&cluster->exec_env_list, exec_env);
  109. if (os_mutex_init(&cluster->lock) != 0) {
  110. wasm_runtime_free(cluster);
  111. LOG_ERROR("thread manager error: failed to init mutex");
  112. return NULL;
  113. }
  114. /* Prepare the aux stack top and size for every thread */
  115. if (!wasm_exec_env_get_aux_stack(exec_env,
  116. &aux_stack_start,
  117. &aux_stack_size)) {
  118. LOG_VERBOSE("No aux stack info for this module, can't create thread");
  119. /* If the module don't have aux stack info, don't throw error here,
  120. but remain stack_tops and stack_segment_occupied as NULL */
  121. os_mutex_lock(&cluster_list_lock);
  122. if (bh_list_insert(cluster_list, cluster) != 0) {
  123. os_mutex_unlock(&cluster_list_lock);
  124. goto fail;
  125. }
  126. os_mutex_unlock(&cluster_list_lock);
  127. return cluster;
  128. }
  129. cluster->stack_size = aux_stack_size / (cluster_max_thread_num + 1);
  130. if (cluster->stack_size == 0) {
  131. goto fail;
  132. }
  133. /* Set initial aux stack top to the instance and
  134. aux stack boundary to the main exec_env */
  135. if (!wasm_exec_env_set_aux_stack(exec_env, aux_stack_start,
  136. cluster->stack_size))
  137. goto fail;
  138. if (cluster_max_thread_num != 0) {
  139. total_size = cluster_max_thread_num * sizeof(uint32);
  140. if (total_size >= UINT32_MAX
  141. || !(cluster->stack_tops =
  142. wasm_runtime_malloc((uint32)total_size))) {
  143. goto fail;
  144. }
  145. memset(cluster->stack_tops, 0, (uint32)total_size);
  146. if (!(cluster->stack_segment_occupied =
  147. wasm_runtime_malloc(cluster_max_thread_num * sizeof(bool)))) {
  148. goto fail;
  149. }
  150. memset(cluster->stack_segment_occupied, 0,
  151. cluster_max_thread_num * sizeof(bool));
  152. /* Reserve space for main instance */
  153. aux_stack_start -= cluster->stack_size;
  154. for (i = 0; i < cluster_max_thread_num; i++) {
  155. cluster->stack_tops[i] = aux_stack_start - cluster->stack_size * i;
  156. }
  157. }
  158. os_mutex_lock(&cluster_list_lock);
  159. if (bh_list_insert(cluster_list, cluster) != 0) {
  160. os_mutex_unlock(&cluster_list_lock);
  161. goto fail;
  162. }
  163. os_mutex_unlock(&cluster_list_lock);
  164. return cluster;
  165. fail:
  166. if (cluster)
  167. wasm_cluster_destroy(cluster);
  168. return NULL;
  169. }
  170. static void
  171. destroy_cluster_visitor(void *node, void *user_data)
  172. {
  173. DestroyCallBackNode *destroy_node = (DestroyCallBackNode *)node;
  174. WASMCluster *cluster = (WASMCluster *)user_data;
  175. destroy_node->destroy_cb(cluster);
  176. }
  177. void
  178. wasm_cluster_destroy(WASMCluster *cluster)
  179. {
  180. traverse_list(destroy_callback_list,
  181. destroy_cluster_visitor, (void *)cluster);
  182. /* Remove the cluster from the cluster list */
  183. os_mutex_lock(&cluster_list_lock);
  184. bh_list_remove(cluster_list, cluster);
  185. os_mutex_unlock(&cluster_list_lock);
  186. os_mutex_destroy(&cluster->lock);
  187. if (cluster->stack_tops)
  188. wasm_runtime_free(cluster->stack_tops);
  189. if (cluster->stack_segment_occupied)
  190. wasm_runtime_free(cluster->stack_segment_occupied);
  191. wasm_runtime_free(cluster);
  192. }
  193. static void
  194. free_node_visitor(void *node, void *user_data)
  195. {
  196. wasm_runtime_free(node);
  197. }
  198. void
  199. wasm_cluster_cancel_all_callbacks()
  200. {
  201. traverse_list(destroy_callback_list, free_node_visitor, NULL);
  202. }
  203. WASMCluster *
  204. wasm_exec_env_get_cluster(WASMExecEnv *exec_env)
  205. {
  206. return exec_env->cluster;
  207. }
  208. bool
  209. wasm_cluster_add_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  210. {
  211. bool ret = true;
  212. exec_env->cluster = cluster;
  213. os_mutex_lock(&cluster->lock);
  214. if (bh_list_insert(&cluster->exec_env_list, exec_env) != 0)
  215. ret = false;
  216. os_mutex_unlock(&cluster->lock);
  217. return ret;
  218. }
  219. bool
  220. wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
  221. {
  222. bool ret = true;
  223. bh_assert(exec_env->cluster == cluster);
  224. os_mutex_lock(&cluster->lock);
  225. if (bh_list_remove(&cluster->exec_env_list, exec_env) != 0)
  226. ret = false;
  227. os_mutex_unlock(&cluster->lock);
  228. if (cluster->exec_env_list.len == 0) {
  229. /* exec_env_list empty, destroy the cluster */
  230. wasm_cluster_destroy(cluster);
  231. }
  232. return ret;
  233. }
  234. WASMExecEnv *
  235. wasm_cluster_spawn_exec_env(WASMExecEnv *exec_env)
  236. {
  237. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  238. wasm_module_t module = wasm_exec_env_get_module(exec_env);
  239. wasm_module_inst_t new_module_inst;
  240. WASMExecEnv *new_exec_env;
  241. uint32 aux_stack_start, aux_stack_size;
  242. if (!module) {
  243. return NULL;
  244. }
  245. if (!(new_module_inst =
  246. wasm_runtime_instantiate_internal(module, true, 8192,
  247. 0, NULL, 0))) {
  248. return NULL;
  249. }
  250. new_exec_env = wasm_exec_env_create_internal(
  251. new_module_inst, exec_env->wasm_stack_size);
  252. if (!new_exec_env)
  253. goto fail1;
  254. if (!allocate_aux_stack(cluster, &aux_stack_start, &aux_stack_size)) {
  255. LOG_ERROR("thread manager error: "
  256. "failed to allocate aux stack space for new thread");
  257. goto fail2;
  258. }
  259. /* Set aux stack for current thread */
  260. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  261. aux_stack_size)) {
  262. goto fail3;
  263. }
  264. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  265. goto fail3;
  266. return new_exec_env;
  267. fail3:
  268. /* free the allocated aux stack space */
  269. free_aux_stack(cluster, aux_stack_start);
  270. fail2:
  271. wasm_exec_env_destroy(new_exec_env);
  272. fail1:
  273. wasm_runtime_deinstantiate_internal(new_module_inst, true);
  274. return NULL;
  275. }
  276. void
  277. wasm_cluster_destroy_spawned_exec_env(WASMExecEnv *exec_env)
  278. {
  279. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  280. wasm_module_inst_t module_inst = wasm_runtime_get_module_inst(exec_env);
  281. bh_assert(cluster != NULL);
  282. /* Free aux stack space */
  283. free_aux_stack(cluster,
  284. exec_env->aux_stack_boundary + cluster->stack_size);
  285. wasm_cluster_del_exec_env(cluster, exec_env);
  286. wasm_exec_env_destroy_internal(exec_env);
  287. wasm_runtime_deinstantiate_internal(module_inst, true);
  288. }
  289. /* start routine of thread manager */
  290. static void*
  291. thread_manager_start_routine(void *arg)
  292. {
  293. void *ret;
  294. WASMExecEnv *exec_env = (WASMExecEnv *)arg;
  295. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  296. bh_assert(cluster != NULL);
  297. exec_env->handle = os_self_thread();
  298. ret = exec_env->thread_start_routine(exec_env);
  299. #ifdef OS_ENABLE_HW_BOUND_CHECK
  300. if (exec_env->suspend_flags.flags & 0x08)
  301. ret = exec_env->thread_ret_value;
  302. #endif
  303. /* Routine exit */
  304. /* Free aux stack space */
  305. free_aux_stack(cluster,
  306. exec_env->aux_stack_boundary + cluster->stack_size);
  307. /* Detach the native thread here to ensure the resources are freed */
  308. wasm_cluster_detach_thread(exec_env);
  309. /* Remove and destroy exec_env */
  310. wasm_cluster_del_exec_env(cluster, exec_env);
  311. wasm_exec_env_destroy_internal(exec_env);
  312. os_thread_exit(ret);
  313. return ret;
  314. }
  315. int32
  316. wasm_cluster_create_thread(WASMExecEnv *exec_env,
  317. wasm_module_inst_t module_inst,
  318. void* (*thread_routine)(void *),
  319. void *arg)
  320. {
  321. WASMCluster *cluster;
  322. WASMExecEnv *new_exec_env;
  323. uint32 aux_stack_start, aux_stack_size;
  324. korp_tid tid;
  325. cluster = wasm_exec_env_get_cluster(exec_env);
  326. bh_assert(cluster);
  327. new_exec_env = wasm_exec_env_create_internal(
  328. module_inst, exec_env->wasm_stack_size);
  329. if (!new_exec_env)
  330. return -1;
  331. if (!allocate_aux_stack(cluster, &aux_stack_start, &aux_stack_size)) {
  332. LOG_ERROR("thread manager error: "
  333. "failed to allocate aux stack space for new thread");
  334. goto fail1;
  335. }
  336. /* Set aux stack for current thread */
  337. if (!wasm_exec_env_set_aux_stack(new_exec_env, aux_stack_start,
  338. aux_stack_size)) {
  339. goto fail2;
  340. }
  341. if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
  342. goto fail2;
  343. new_exec_env->thread_start_routine = thread_routine;
  344. new_exec_env->thread_arg = arg;
  345. if (0 != os_thread_create(&tid, thread_manager_start_routine,
  346. (void *)new_exec_env,
  347. APP_THREAD_STACK_SIZE_DEFAULT)) {
  348. goto fail3;
  349. }
  350. return 0;
  351. fail3:
  352. wasm_cluster_del_exec_env(cluster, new_exec_env);
  353. fail2:
  354. /* free the allocated aux stack space */
  355. free_aux_stack(cluster, aux_stack_start);
  356. fail1:
  357. wasm_exec_env_destroy(new_exec_env);
  358. return -1;
  359. }
  360. int32
  361. wasm_cluster_join_thread(WASMExecEnv *exec_env, void **ret_val)
  362. {
  363. return os_thread_join(exec_env->handle, ret_val);
  364. }
  365. int32
  366. wasm_cluster_detach_thread(WASMExecEnv *exec_env)
  367. {
  368. return os_thread_detach(exec_env->handle);
  369. }
  370. void
  371. wasm_cluster_exit_thread(WASMExecEnv *exec_env, void *retval)
  372. {
  373. WASMCluster *cluster;
  374. #ifdef OS_ENABLE_HW_BOUND_CHECK
  375. if (exec_env->jmpbuf_stack_top) {
  376. WASMJmpBuf *jmpbuf_node;
  377. /* Store the return value in exec_env */
  378. exec_env->thread_ret_value = retval;
  379. exec_env->suspend_flags.flags |= 0x08;
  380. /* Free all jmpbuf_node except the last one */
  381. while (exec_env->jmpbuf_stack_top->prev) {
  382. jmpbuf_node = wasm_exec_env_pop_jmpbuf(exec_env);
  383. wasm_runtime_free(jmpbuf_node);
  384. }
  385. jmpbuf_node = exec_env->jmpbuf_stack_top;
  386. os_longjmp(jmpbuf_node->jmpbuf, 1);
  387. return;
  388. }
  389. #endif
  390. cluster = wasm_exec_env_get_cluster(exec_env);
  391. bh_assert(cluster);
  392. /* App exit the thread, free the resources before exit native thread */
  393. /* Free aux stack space */
  394. free_aux_stack(cluster,
  395. exec_env->aux_stack_boundary + cluster->stack_size);
  396. /* Detach the native thread here to ensure the resources are freed */
  397. wasm_cluster_detach_thread(exec_env);
  398. /* Remove and destroy exec_env */
  399. wasm_cluster_del_exec_env(cluster, exec_env);
  400. wasm_exec_env_destroy_internal(exec_env);
  401. os_thread_exit(retval);
  402. }
  403. int32
  404. wasm_cluster_cancel_thread(WASMExecEnv *exec_env)
  405. {
  406. /* Set the termination flag */
  407. exec_env->suspend_flags.flags |= 0x01;
  408. return 0;
  409. }
  410. static void
  411. terminate_thread_visitor(void *node, void *user_data)
  412. {
  413. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  414. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  415. if (curr_exec_env == exec_env)
  416. return;
  417. wasm_cluster_cancel_thread(curr_exec_env);
  418. wasm_cluster_join_thread(curr_exec_env, NULL);
  419. }
  420. void
  421. wasm_cluster_terminate_all(WASMCluster *cluster)
  422. {
  423. traverse_list(&cluster->exec_env_list,
  424. terminate_thread_visitor, NULL);
  425. }
  426. void
  427. wasm_cluster_terminate_all_except_self(WASMCluster *cluster,
  428. WASMExecEnv *exec_env)
  429. {
  430. traverse_list(&cluster->exec_env_list,
  431. terminate_thread_visitor, (void *)exec_env);
  432. }
  433. bool
  434. wasm_cluster_register_destroy_callback(void (*callback)(WASMCluster *))
  435. {
  436. DestroyCallBackNode *node;
  437. if (!(node = wasm_runtime_malloc(sizeof(DestroyCallBackNode)))) {
  438. LOG_ERROR("thread manager error: failed to allocate memory");
  439. return false;
  440. }
  441. node->destroy_cb = callback;
  442. bh_list_insert(destroy_callback_list, node);
  443. return true;
  444. }
  445. void
  446. wasm_cluster_suspend_thread(WASMExecEnv *exec_env)
  447. {
  448. /* Set the suspend flag */
  449. exec_env->suspend_flags.flags |= 0x02;
  450. }
  451. static void
  452. suspend_thread_visitor(void *node, void *user_data)
  453. {
  454. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  455. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  456. if (curr_exec_env == exec_env)
  457. return;
  458. wasm_cluster_suspend_thread(curr_exec_env);
  459. }
  460. void
  461. wasm_cluster_suspend_all(WASMCluster *cluster)
  462. {
  463. traverse_list(&cluster->exec_env_list,
  464. suspend_thread_visitor, NULL);
  465. }
  466. void
  467. wasm_cluster_suspend_all_except_self(WASMCluster *cluster,
  468. WASMExecEnv *exec_env)
  469. {
  470. traverse_list(&cluster->exec_env_list,
  471. suspend_thread_visitor, (void *)exec_env);
  472. }
  473. void
  474. wasm_cluster_resume_thread(WASMExecEnv *exec_env)
  475. {
  476. exec_env->suspend_flags.flags &= ~0x02;
  477. }
  478. static void
  479. resume_thread_visitor(void *node, void *user_data)
  480. {
  481. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  482. wasm_cluster_resume_thread(curr_exec_env);
  483. }
  484. void
  485. wasm_cluster_resume_all(WASMCluster *cluster)
  486. {
  487. traverse_list(&cluster->exec_env_list, resume_thread_visitor, NULL);
  488. }
  489. static void
  490. set_exception_visitor(void *node, void *user_data)
  491. {
  492. WASMExecEnv *curr_exec_env = (WASMExecEnv *)node;
  493. WASMExecEnv *exec_env = (WASMExecEnv *)user_data;
  494. WASMModuleInstanceCommon *module_inst = get_module_inst(exec_env);
  495. WASMModuleInstanceCommon *curr_module_inst =
  496. get_module_inst(curr_exec_env);
  497. const char *exception = wasm_runtime_get_exception(module_inst);
  498. /* skip "Exception: " */
  499. exception += 11;
  500. if (curr_exec_env != exec_env) {
  501. curr_module_inst = get_module_inst(curr_exec_env);
  502. wasm_runtime_set_exception(curr_module_inst, exception);
  503. }
  504. }
  505. void
  506. wasm_cluster_spread_exception(WASMExecEnv *exec_env)
  507. {
  508. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  509. traverse_list(&cluster->exec_env_list, set_exception_visitor, exec_env);
  510. }