lib_pthread_wrapper.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "bh_common.h"
  6. #include "bh_log.h"
  7. #include "wasm_export.h"
  8. #include "../interpreter/wasm.h"
  9. #include "../common/wasm_runtime_common.h"
  10. #include "thread_manager.h"
  11. #if WASM_ENABLE_INTERP != 0
  12. #include "wasm_runtime.h"
  13. #endif
  14. #if WASM_ENABLE_AOT != 0
  15. #include "aot_runtime.h"
  16. #endif
  17. #define WAMR_PTHREAD_KEYS_MAX 32
  18. /* clang-format off */
  19. #define get_module(exec_env) \
  20. wasm_exec_env_get_module(exec_env)
  21. #define get_module_inst(exec_env) \
  22. wasm_runtime_get_module_inst(exec_env)
  23. #define get_thread_arg(exec_env) \
  24. wasm_exec_env_get_thread_arg(exec_env)
  25. #define get_wasi_ctx(module_inst) \
  26. wasm_runtime_get_wasi_ctx(module_inst)
  27. #define validate_app_addr(offset, size) \
  28. wasm_runtime_validate_app_addr(module_inst, offset, size)
  29. #define validate_native_addr(addr, size) \
  30. wasm_runtime_validate_native_addr(module_inst, addr, size)
  31. #define addr_app_to_native(offset) \
  32. wasm_runtime_addr_app_to_native(module_inst, offset)
  33. #define addr_native_to_app(ptr) \
  34. wasm_runtime_addr_native_to_app(module_inst, ptr)
  35. /* clang-format on */
  36. extern bool
  37. wasm_runtime_call_indirect(wasm_exec_env_t exec_env, uint32 element_indices,
  38. uint32 argc, uint32 argv[]);
  39. enum {
  40. T_THREAD,
  41. T_MUTEX,
  42. T_COND,
  43. T_SEM,
  44. };
  45. enum thread_status_t {
  46. THREAD_INIT,
  47. THREAD_RUNNING,
  48. THREAD_CANCELLED,
  49. THREAD_EXIT,
  50. };
  51. enum mutex_status_t {
  52. MUTEX_CREATED,
  53. MUTEX_DESTROYED,
  54. };
  55. enum cond_status_t {
  56. COND_CREATED,
  57. COND_DESTROYED,
  58. };
  59. enum sem_status_t {
  60. SEM_CREATED,
  61. SEM_CLOSED,
  62. SEM_DESTROYED,
  63. };
  64. typedef struct ThreadKeyValueNode {
  65. bh_list_link l;
  66. wasm_exec_env_t exec_env;
  67. int32 thread_key_values[WAMR_PTHREAD_KEYS_MAX];
  68. } ThreadKeyValueNode;
  69. typedef struct KeyData {
  70. int32 destructor_func;
  71. bool is_created;
  72. } KeyData;
  73. typedef struct ClusterInfoNode {
  74. bh_list_link l;
  75. WASMCluster *cluster;
  76. HashMap *thread_info_map;
  77. /* Key data list */
  78. KeyData key_data_list[WAMR_PTHREAD_KEYS_MAX];
  79. korp_mutex key_data_list_lock;
  80. /* Every node contains the key value list for a thread */
  81. bh_list thread_list_head;
  82. bh_list *thread_list;
  83. } ClusterInfoNode;
  84. typedef struct ThreadInfoNode {
  85. wasm_exec_env_t parent_exec_env;
  86. wasm_exec_env_t exec_env;
  87. /* the id returned to app */
  88. uint32 handle;
  89. /* type can be [THREAD | MUTEX | CONDITION] */
  90. uint32 type;
  91. /* Thread status, this variable should be volatile
  92. as its value may be changed in different threads */
  93. volatile uint32 status;
  94. bool joinable;
  95. union {
  96. korp_tid thread;
  97. korp_mutex *mutex;
  98. korp_cond *cond;
  99. #if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
  100. korp_sem *sem;
  101. #endif
  102. /* A copy of the thread return value */
  103. void *ret;
  104. } u;
  105. } ThreadInfoNode;
  106. typedef struct {
  107. ThreadInfoNode *info_node;
  108. /* table elem index of the app's entry function */
  109. uint32 elem_index;
  110. /* arg of the app's entry function */
  111. uint32 arg;
  112. wasm_module_inst_t module_inst;
  113. } ThreadRoutineArgs;
  114. typedef struct {
  115. uint32 handle;
  116. ThreadInfoNode *node;
  117. } SemCallbackArgs;
  118. static bh_list cluster_info_list;
  119. #if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
  120. static HashMap *sem_info_map;
  121. #endif
  122. static korp_mutex thread_global_lock;
  123. static uint32 handle_id = 1;
  124. static void
  125. lib_pthread_destroy_callback(WASMCluster *cluster);
  126. static uint32
  127. thread_handle_hash(void *handle)
  128. {
  129. return (uint32)(uintptr_t)handle;
  130. }
  131. static bool
  132. thread_handle_equal(void *h1, void *h2)
  133. {
  134. return (uint32)(uintptr_t)h1 == (uint32)(uintptr_t)h2 ? true : false;
  135. }
  136. static void
  137. thread_info_destroy(void *node)
  138. {
  139. ThreadInfoNode *info_node = (ThreadInfoNode *)node;
  140. os_mutex_lock(&thread_global_lock);
  141. if (info_node->type == T_MUTEX) {
  142. if (info_node->status != MUTEX_DESTROYED)
  143. os_mutex_destroy(info_node->u.mutex);
  144. wasm_runtime_free(info_node->u.mutex);
  145. }
  146. else if (info_node->type == T_COND) {
  147. if (info_node->status != COND_DESTROYED)
  148. os_cond_destroy(info_node->u.cond);
  149. wasm_runtime_free(info_node->u.cond);
  150. }
  151. #if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
  152. else if (info_node->type == T_SEM) {
  153. if (info_node->status != SEM_DESTROYED)
  154. os_sem_close(info_node->u.sem);
  155. }
  156. #endif
  157. wasm_runtime_free(info_node);
  158. os_mutex_unlock(&thread_global_lock);
  159. }
  160. bool
  161. lib_pthread_init()
  162. {
  163. if (0 != os_mutex_init(&thread_global_lock))
  164. return false;
  165. bh_list_init(&cluster_info_list);
  166. if (!wasm_cluster_register_destroy_callback(lib_pthread_destroy_callback)) {
  167. os_mutex_destroy(&thread_global_lock);
  168. return false;
  169. }
  170. #if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
  171. if (!(sem_info_map = bh_hash_map_create(
  172. 32, true, (HashFunc)wasm_string_hash,
  173. (KeyEqualFunc)wasm_string_equal, NULL, thread_info_destroy))) {
  174. os_mutex_destroy(&thread_global_lock);
  175. return false;
  176. }
  177. #endif
  178. return true;
  179. }
  180. void
  181. lib_pthread_destroy()
  182. {
  183. #if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
  184. bh_hash_map_destroy(sem_info_map);
  185. #endif
  186. os_mutex_destroy(&thread_global_lock);
  187. }
  188. static ClusterInfoNode *
  189. get_cluster_info(WASMCluster *cluster)
  190. {
  191. ClusterInfoNode *node;
  192. os_mutex_lock(&thread_global_lock);
  193. node = bh_list_first_elem(&cluster_info_list);
  194. while (node) {
  195. if (cluster == node->cluster) {
  196. os_mutex_unlock(&thread_global_lock);
  197. return node;
  198. }
  199. node = bh_list_elem_next(node);
  200. }
  201. os_mutex_unlock(&thread_global_lock);
  202. return NULL;
  203. }
  204. static KeyData *
  205. key_data_list_lookup(wasm_exec_env_t exec_env, int32 key)
  206. {
  207. ClusterInfoNode *node;
  208. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  209. if ((node = get_cluster_info(cluster))) {
  210. return (key >= 0 && key < WAMR_PTHREAD_KEYS_MAX
  211. && node->key_data_list[key].is_created)
  212. ? &(node->key_data_list[key])
  213. : NULL;
  214. }
  215. return NULL;
  216. }
  217. /**
  218. * Lookup the thread key value node for a thread, create a new one if failed
  219. * This design will reduce the memory usage. If the thread doesn't use the
  220. * local storage, it will not occupy memory space.
  221. */
  222. static int32 *
  223. key_value_list_lookup_or_create(wasm_exec_env_t exec_env, ClusterInfoNode *info,
  224. int32 key)
  225. {
  226. KeyData *key_node;
  227. ThreadKeyValueNode *data;
  228. /* Check if the key is valid */
  229. key_node = key_data_list_lookup(exec_env, key);
  230. if (!key_node) {
  231. return NULL;
  232. }
  233. /* Find key values node */
  234. data = bh_list_first_elem(info->thread_list);
  235. while (data) {
  236. if (data->exec_env == exec_env)
  237. return data->thread_key_values;
  238. data = bh_list_elem_next(data);
  239. }
  240. /* If not found, create a new node for this thread */
  241. if (!(data = wasm_runtime_malloc(sizeof(ThreadKeyValueNode))))
  242. return NULL;
  243. memset(data, 0, sizeof(ThreadKeyValueNode));
  244. data->exec_env = exec_env;
  245. if (bh_list_insert(info->thread_list, data) != 0) {
  246. wasm_runtime_free(data);
  247. return NULL;
  248. }
  249. return data->thread_key_values;
  250. }
  251. static void
  252. call_key_destructor(wasm_exec_env_t exec_env)
  253. {
  254. int32 i;
  255. uint32 destructor_index;
  256. KeyData *key_node;
  257. ThreadKeyValueNode *value_node;
  258. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  259. ClusterInfoNode *info = get_cluster_info(cluster);
  260. if (!info) {
  261. return;
  262. }
  263. value_node = bh_list_first_elem(info->thread_list);
  264. while (value_node) {
  265. if (value_node->exec_env == exec_env)
  266. break;
  267. value_node = bh_list_elem_next(value_node);
  268. }
  269. /* This thread hasn't created key value node */
  270. if (!value_node)
  271. return;
  272. /* Destroy key values */
  273. for (i = 0; i < WAMR_PTHREAD_KEYS_MAX; i++) {
  274. if (value_node->thread_key_values[i] != 0) {
  275. int32 value = value_node->thread_key_values[i];
  276. os_mutex_lock(&info->key_data_list_lock);
  277. if ((key_node = key_data_list_lookup(exec_env, i)))
  278. destructor_index = key_node->destructor_func;
  279. else
  280. destructor_index = 0;
  281. os_mutex_unlock(&info->key_data_list_lock);
  282. /* reset key value */
  283. value_node->thread_key_values[i] = 0;
  284. /* Call the destructor func provided by app */
  285. if (destructor_index) {
  286. uint32 argv[1];
  287. argv[0] = value;
  288. wasm_runtime_call_indirect(exec_env, destructor_index, 1, argv);
  289. }
  290. }
  291. }
  292. bh_list_remove(info->thread_list, value_node);
  293. wasm_runtime_free(value_node);
  294. }
  295. static void
  296. destroy_thread_key_value_list(bh_list *list)
  297. {
  298. ThreadKeyValueNode *node, *next;
  299. /* There should be only one node for main thread */
  300. bh_assert(list->len <= 1);
  301. if (list->len) {
  302. node = bh_list_first_elem(list);
  303. while (node) {
  304. next = bh_list_elem_next(node);
  305. call_key_destructor(node->exec_env);
  306. node = next;
  307. }
  308. }
  309. }
  310. static ClusterInfoNode *
  311. create_cluster_info(WASMCluster *cluster)
  312. {
  313. ClusterInfoNode *node;
  314. bh_list_status ret;
  315. if (!(node = wasm_runtime_malloc(sizeof(ClusterInfoNode)))) {
  316. return NULL;
  317. }
  318. memset(node, 0, sizeof(ClusterInfoNode));
  319. node->thread_list = &node->thread_list_head;
  320. ret = bh_list_init(node->thread_list);
  321. bh_assert(ret == BH_LIST_SUCCESS);
  322. if (os_mutex_init(&node->key_data_list_lock) != 0) {
  323. wasm_runtime_free(node);
  324. return NULL;
  325. }
  326. node->cluster = cluster;
  327. if (!(node->thread_info_map = bh_hash_map_create(
  328. 32, true, (HashFunc)thread_handle_hash,
  329. (KeyEqualFunc)thread_handle_equal, NULL, thread_info_destroy))) {
  330. os_mutex_destroy(&node->key_data_list_lock);
  331. wasm_runtime_free(node);
  332. return NULL;
  333. }
  334. os_mutex_lock(&thread_global_lock);
  335. ret = bh_list_insert(&cluster_info_list, node);
  336. bh_assert(ret == BH_LIST_SUCCESS);
  337. os_mutex_unlock(&thread_global_lock);
  338. (void)ret;
  339. return node;
  340. }
  341. static bool
  342. destroy_cluster_info(WASMCluster *cluster)
  343. {
  344. ClusterInfoNode *node = get_cluster_info(cluster);
  345. if (node) {
  346. bh_hash_map_destroy(node->thread_info_map);
  347. destroy_thread_key_value_list(node->thread_list);
  348. os_mutex_destroy(&node->key_data_list_lock);
  349. /* Remove from the cluster info list */
  350. os_mutex_lock(&thread_global_lock);
  351. bh_list_remove(&cluster_info_list, node);
  352. wasm_runtime_free(node);
  353. os_mutex_unlock(&thread_global_lock);
  354. return true;
  355. }
  356. return false;
  357. }
  358. static void
  359. lib_pthread_destroy_callback(WASMCluster *cluster)
  360. {
  361. destroy_cluster_info(cluster);
  362. }
  363. static void
  364. delete_thread_info_node(ThreadInfoNode *thread_info)
  365. {
  366. ClusterInfoNode *node;
  367. bool ret;
  368. WASMCluster *cluster = wasm_exec_env_get_cluster(thread_info->exec_env);
  369. if ((node = get_cluster_info(cluster))) {
  370. ret = bh_hash_map_remove(node->thread_info_map,
  371. (void *)(uintptr_t)thread_info->handle, NULL,
  372. NULL);
  373. (void)ret;
  374. }
  375. thread_info_destroy(thread_info);
  376. }
  377. static bool
  378. append_thread_info_node(ThreadInfoNode *thread_info)
  379. {
  380. ClusterInfoNode *node;
  381. WASMCluster *cluster = wasm_exec_env_get_cluster(thread_info->exec_env);
  382. if (!(node = get_cluster_info(cluster))) {
  383. if (!(node = create_cluster_info(cluster))) {
  384. return false;
  385. }
  386. }
  387. if (!bh_hash_map_insert(node->thread_info_map,
  388. (void *)(uintptr_t)thread_info->handle,
  389. thread_info)) {
  390. return false;
  391. }
  392. return true;
  393. }
  394. static ThreadInfoNode *
  395. get_thread_info(wasm_exec_env_t exec_env, uint32 handle)
  396. {
  397. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  398. ClusterInfoNode *info = get_cluster_info(cluster);
  399. if (!info) {
  400. return NULL;
  401. }
  402. return bh_hash_map_find(info->thread_info_map, (void *)(uintptr_t)handle);
  403. }
  404. static uint32
  405. allocate_handle()
  406. {
  407. uint32 id;
  408. os_mutex_lock(&thread_global_lock);
  409. id = handle_id++;
  410. os_mutex_unlock(&thread_global_lock);
  411. return id;
  412. }
  413. static void *
  414. pthread_start_routine(void *arg)
  415. {
  416. wasm_exec_env_t exec_env = (wasm_exec_env_t)arg;
  417. wasm_exec_env_t parent_exec_env;
  418. wasm_module_inst_t module_inst = get_module_inst(exec_env);
  419. ThreadRoutineArgs *routine_args = exec_env->thread_arg;
  420. ThreadInfoNode *info_node = routine_args->info_node;
  421. uint32 argv[1];
  422. parent_exec_env = info_node->parent_exec_env;
  423. os_mutex_lock(&parent_exec_env->wait_lock);
  424. info_node->exec_env = exec_env;
  425. info_node->u.thread = exec_env->handle;
  426. if (!append_thread_info_node(info_node)) {
  427. delete_thread_info_node(info_node);
  428. os_cond_signal(&parent_exec_env->wait_cond);
  429. os_mutex_unlock(&parent_exec_env->wait_lock);
  430. return NULL;
  431. }
  432. info_node->status = THREAD_RUNNING;
  433. os_cond_signal(&parent_exec_env->wait_cond);
  434. os_mutex_unlock(&parent_exec_env->wait_lock);
  435. wasm_exec_env_set_thread_info(exec_env);
  436. argv[0] = routine_args->arg;
  437. if (!wasm_runtime_call_indirect(exec_env, routine_args->elem_index, 1,
  438. argv)) {
  439. if (wasm_runtime_get_exception(module_inst))
  440. wasm_cluster_spread_exception(exec_env);
  441. }
  442. /* destroy pthread key values */
  443. call_key_destructor(exec_env);
  444. wasm_runtime_free(routine_args);
  445. /* if the thread is joinable, store the result in its info node,
  446. if the other threads join this thread after exited, then we
  447. can return the stored result */
  448. if (!info_node->joinable) {
  449. delete_thread_info_node(info_node);
  450. }
  451. else {
  452. info_node->u.ret = (void *)(uintptr_t)argv[0];
  453. #ifdef OS_ENABLE_HW_BOUND_CHECK
  454. if (exec_env->suspend_flags.flags & 0x08)
  455. /* argv[0] isn't set after longjmp(1) to
  456. invoke_native_with_hw_bound_check */
  457. info_node->u.ret = exec_env->thread_ret_value;
  458. #endif
  459. /* Update node status after ret value was set */
  460. info_node->status = THREAD_EXIT;
  461. }
  462. return (void *)(uintptr_t)argv[0];
  463. }
  464. static int
  465. pthread_create_wrapper(wasm_exec_env_t exec_env,
  466. uint32 *thread, /* thread_handle */
  467. const void *attr, /* not supported */
  468. uint32 elem_index, /* entry function */
  469. uint32 arg) /* arguments buffer */
  470. {
  471. wasm_module_t module = get_module(exec_env);
  472. wasm_module_inst_t module_inst = get_module_inst(exec_env);
  473. wasm_module_inst_t new_module_inst = NULL;
  474. ThreadInfoNode *info_node = NULL;
  475. ThreadRoutineArgs *routine_args = NULL;
  476. uint32 thread_handle;
  477. uint32 stack_size = 8192;
  478. int32 ret = -1;
  479. #if WASM_ENABLE_LIBC_WASI != 0
  480. WASIContext *wasi_ctx;
  481. #endif
  482. bh_assert(module);
  483. bh_assert(module_inst);
  484. #if WASM_ENABLE_INTERP != 0
  485. if (module_inst->module_type == Wasm_Module_Bytecode) {
  486. stack_size =
  487. ((WASMModuleInstance *)module_inst)->default_wasm_stack_size;
  488. }
  489. #endif
  490. #if WASM_ENABLE_AOT != 0
  491. if (module_inst->module_type == Wasm_Module_AoT) {
  492. stack_size =
  493. ((AOTModuleInstance *)module_inst)->default_wasm_stack_size;
  494. }
  495. #endif
  496. if (!(new_module_inst = wasm_runtime_instantiate_internal(
  497. module, true, stack_size, 0, NULL, 0)))
  498. return -1;
  499. /* Set custom_data to new module instance */
  500. wasm_runtime_set_custom_data_internal(
  501. new_module_inst, wasm_runtime_get_custom_data(module_inst));
  502. #if WASM_ENABLE_LIBC_WASI != 0
  503. wasi_ctx = get_wasi_ctx(module_inst);
  504. if (wasi_ctx)
  505. wasm_runtime_set_wasi_ctx(new_module_inst, wasi_ctx);
  506. #endif
  507. if (!(info_node = wasm_runtime_malloc(sizeof(ThreadInfoNode))))
  508. goto fail;
  509. memset(info_node, 0, sizeof(ThreadInfoNode));
  510. thread_handle = allocate_handle();
  511. info_node->parent_exec_env = exec_env;
  512. info_node->handle = thread_handle;
  513. info_node->type = T_THREAD;
  514. info_node->status = THREAD_INIT;
  515. info_node->joinable = true;
  516. if (!(routine_args = wasm_runtime_malloc(sizeof(ThreadRoutineArgs))))
  517. goto fail;
  518. routine_args->arg = arg;
  519. routine_args->elem_index = elem_index;
  520. routine_args->info_node = info_node;
  521. routine_args->module_inst = new_module_inst;
  522. os_mutex_lock(&exec_env->wait_lock);
  523. ret = wasm_cluster_create_thread(
  524. exec_env, new_module_inst, pthread_start_routine, (void *)routine_args);
  525. if (ret != 0) {
  526. os_mutex_unlock(&exec_env->wait_lock);
  527. goto fail;
  528. }
  529. /* Wait for the thread routine to assign the exec_env to
  530. thread_info_node, otherwise the exec_env in the thread
  531. info node may be NULL in the next pthread API call */
  532. os_cond_wait(&exec_env->wait_cond, &exec_env->wait_lock);
  533. os_mutex_unlock(&exec_env->wait_lock);
  534. if (thread)
  535. *thread = thread_handle;
  536. return 0;
  537. fail:
  538. if (new_module_inst)
  539. wasm_runtime_deinstantiate_internal(new_module_inst, true);
  540. if (info_node)
  541. wasm_runtime_free(info_node);
  542. if (routine_args)
  543. wasm_runtime_free(routine_args);
  544. return ret;
  545. }
  546. static int32
  547. pthread_join_wrapper(wasm_exec_env_t exec_env, uint32 thread,
  548. int32 retval_offset) /* void **retval */
  549. {
  550. uint32 *ret;
  551. int32 join_ret;
  552. void **retval;
  553. ThreadInfoNode *node;
  554. wasm_module_inst_t module_inst;
  555. wasm_exec_env_t target_exec_env;
  556. module_inst = get_module_inst(exec_env);
  557. /* validate addr, we can use current thread's
  558. module instance here as the memory is shared */
  559. if (!validate_app_addr(retval_offset, sizeof(int32))) {
  560. /* Join failed, but we don't want to terminate all threads,
  561. do not spread exception here */
  562. wasm_runtime_set_exception(module_inst, NULL);
  563. return -1;
  564. }
  565. retval = (void **)addr_app_to_native(retval_offset);
  566. node = get_thread_info(exec_env, thread);
  567. if (!node) {
  568. /* The thread has exited and not joinable, return 0 to app */
  569. return 0;
  570. }
  571. target_exec_env = node->exec_env;
  572. bh_assert(target_exec_env);
  573. if (node->status != THREAD_EXIT) {
  574. /* if the thread is still running, call the platforms join API */
  575. join_ret = wasm_cluster_join_thread(target_exec_env, (void **)&ret);
  576. }
  577. else {
  578. /* if the thread has exited, return stored results */
  579. /* this thread must be joinable, otherwise the
  580. info_node should be destroyed once exit */
  581. bh_assert(node->joinable);
  582. join_ret = 0;
  583. ret = node->u.ret;
  584. }
  585. if (retval_offset != 0)
  586. *(uint32 *)retval = (uint32)(uintptr_t)ret;
  587. return join_ret;
  588. }
  589. static int32
  590. pthread_detach_wrapper(wasm_exec_env_t exec_env, uint32 thread)
  591. {
  592. ThreadInfoNode *node;
  593. wasm_exec_env_t target_exec_env;
  594. node = get_thread_info(exec_env, thread);
  595. if (!node)
  596. return 0;
  597. node->joinable = false;
  598. target_exec_env = node->exec_env;
  599. bh_assert(target_exec_env != NULL);
  600. return wasm_cluster_detach_thread(target_exec_env);
  601. }
  602. static int32
  603. pthread_cancel_wrapper(wasm_exec_env_t exec_env, uint32 thread)
  604. {
  605. ThreadInfoNode *node;
  606. wasm_exec_env_t target_exec_env;
  607. node = get_thread_info(exec_env, thread);
  608. if (!node)
  609. return 0;
  610. node->status = THREAD_CANCELLED;
  611. node->joinable = false;
  612. target_exec_env = node->exec_env;
  613. bh_assert(target_exec_env != NULL);
  614. return wasm_cluster_cancel_thread(target_exec_env);
  615. }
  616. static int32
  617. pthread_self_wrapper(wasm_exec_env_t exec_env)
  618. {
  619. ThreadRoutineArgs *args = get_thread_arg(exec_env);
  620. /* If thread_arg is NULL, it's the exec_env of the main thread,
  621. return id 0 to app */
  622. if (!args)
  623. return 0;
  624. return args->info_node->handle;
  625. }
  626. /* emcc use __pthread_self rather than pthread_self */
  627. static int32
  628. __pthread_self_wrapper(wasm_exec_env_t exec_env)
  629. {
  630. return pthread_self_wrapper(exec_env);
  631. }
  632. static void
  633. pthread_exit_wrapper(wasm_exec_env_t exec_env, int32 retval_offset)
  634. {
  635. wasm_module_inst_t module_inst = get_module_inst(exec_env);
  636. ThreadRoutineArgs *args = get_thread_arg(exec_env);
  637. /* Currently exit main thread is not allowed */
  638. if (!args)
  639. return;
  640. #if defined(OS_ENABLE_HW_BOUND_CHECK) && !defined(BH_PLATFORM_WINDOWS)
  641. /* If hardware bound check enabled, don't deinstantiate module inst
  642. and thread info node here for AoT module, as they will be freed
  643. in pthread_start_routine */
  644. if (exec_env->jmpbuf_stack_top) {
  645. wasm_cluster_exit_thread(exec_env, (void *)(uintptr_t)retval_offset);
  646. }
  647. #endif
  648. /* destroy pthread key values */
  649. call_key_destructor(exec_env);
  650. /* routine exit, destroy instance */
  651. wasm_runtime_deinstantiate_internal(module_inst, true);
  652. if (!args->info_node->joinable) {
  653. delete_thread_info_node(args->info_node);
  654. }
  655. else {
  656. args->info_node->u.ret = (void *)(uintptr_t)retval_offset;
  657. /* Update node status after ret value was set */
  658. args->info_node->status = THREAD_EXIT;
  659. }
  660. wasm_runtime_free(args);
  661. wasm_cluster_exit_thread(exec_env, (void *)(uintptr_t)retval_offset);
  662. }
  663. static int32
  664. pthread_mutex_init_wrapper(wasm_exec_env_t exec_env, uint32 *mutex, void *attr)
  665. {
  666. korp_mutex *pmutex;
  667. ThreadInfoNode *info_node;
  668. if (!(pmutex = wasm_runtime_malloc(sizeof(korp_mutex)))) {
  669. return -1;
  670. }
  671. if (os_mutex_init(pmutex) != 0) {
  672. goto fail1;
  673. }
  674. if (!(info_node = wasm_runtime_malloc(sizeof(ThreadInfoNode))))
  675. goto fail2;
  676. memset(info_node, 0, sizeof(ThreadInfoNode));
  677. info_node->exec_env = exec_env;
  678. info_node->handle = allocate_handle();
  679. info_node->type = T_MUTEX;
  680. info_node->u.mutex = pmutex;
  681. info_node->status = MUTEX_CREATED;
  682. if (!append_thread_info_node(info_node))
  683. goto fail3;
  684. /* Return the mutex handle to app */
  685. if (mutex)
  686. *(uint32 *)mutex = info_node->handle;
  687. return 0;
  688. fail3:
  689. delete_thread_info_node(info_node);
  690. fail2:
  691. os_mutex_destroy(pmutex);
  692. fail1:
  693. wasm_runtime_free(pmutex);
  694. return -1;
  695. }
  696. static int32
  697. pthread_mutex_lock_wrapper(wasm_exec_env_t exec_env, uint32 *mutex)
  698. {
  699. ThreadInfoNode *info_node = get_thread_info(exec_env, *mutex);
  700. if (!info_node || info_node->type != T_MUTEX)
  701. return -1;
  702. return os_mutex_lock(info_node->u.mutex);
  703. }
  704. static int32
  705. pthread_mutex_unlock_wrapper(wasm_exec_env_t exec_env, uint32 *mutex)
  706. {
  707. ThreadInfoNode *info_node = get_thread_info(exec_env, *mutex);
  708. if (!info_node || info_node->type != T_MUTEX)
  709. return -1;
  710. return os_mutex_unlock(info_node->u.mutex);
  711. }
  712. static int32
  713. pthread_mutex_destroy_wrapper(wasm_exec_env_t exec_env, uint32 *mutex)
  714. {
  715. int32 ret_val;
  716. ThreadInfoNode *info_node = get_thread_info(exec_env, *mutex);
  717. if (!info_node || info_node->type != T_MUTEX)
  718. return -1;
  719. ret_val = os_mutex_destroy(info_node->u.mutex);
  720. info_node->status = MUTEX_DESTROYED;
  721. delete_thread_info_node(info_node);
  722. return ret_val;
  723. }
  724. static int32
  725. pthread_cond_init_wrapper(wasm_exec_env_t exec_env, uint32 *cond, void *attr)
  726. {
  727. korp_cond *pcond;
  728. ThreadInfoNode *info_node;
  729. if (!(pcond = wasm_runtime_malloc(sizeof(korp_cond)))) {
  730. return -1;
  731. }
  732. if (os_cond_init(pcond) != 0) {
  733. goto fail1;
  734. }
  735. if (!(info_node = wasm_runtime_malloc(sizeof(ThreadInfoNode))))
  736. goto fail2;
  737. memset(info_node, 0, sizeof(ThreadInfoNode));
  738. info_node->exec_env = exec_env;
  739. info_node->handle = allocate_handle();
  740. info_node->type = T_COND;
  741. info_node->u.cond = pcond;
  742. info_node->status = COND_CREATED;
  743. if (!append_thread_info_node(info_node))
  744. goto fail3;
  745. /* Return the cond handle to app */
  746. if (cond)
  747. *(uint32 *)cond = info_node->handle;
  748. return 0;
  749. fail3:
  750. delete_thread_info_node(info_node);
  751. fail2:
  752. os_cond_destroy(pcond);
  753. fail1:
  754. wasm_runtime_free(pcond);
  755. return -1;
  756. }
  757. static int32
  758. pthread_cond_wait_wrapper(wasm_exec_env_t exec_env, uint32 *cond, uint32 *mutex)
  759. {
  760. ThreadInfoNode *cond_info_node, *mutex_info_node;
  761. cond_info_node = get_thread_info(exec_env, *cond);
  762. if (!cond_info_node || cond_info_node->type != T_COND)
  763. return -1;
  764. mutex_info_node = get_thread_info(exec_env, *mutex);
  765. if (!mutex_info_node || mutex_info_node->type != T_MUTEX)
  766. return -1;
  767. return os_cond_wait(cond_info_node->u.cond, mutex_info_node->u.mutex);
  768. }
  769. /**
  770. * Currently we don't support struct timespec in built-in libc,
  771. * so the pthread_cond_timedwait use useconds instead
  772. */
  773. static int32
  774. pthread_cond_timedwait_wrapper(wasm_exec_env_t exec_env, uint32 *cond,
  775. uint32 *mutex, uint64 useconds)
  776. {
  777. ThreadInfoNode *cond_info_node, *mutex_info_node;
  778. cond_info_node = get_thread_info(exec_env, *cond);
  779. if (!cond_info_node || cond_info_node->type != T_COND)
  780. return -1;
  781. mutex_info_node = get_thread_info(exec_env, *mutex);
  782. if (!mutex_info_node || mutex_info_node->type != T_MUTEX)
  783. return -1;
  784. return os_cond_reltimedwait(cond_info_node->u.cond,
  785. mutex_info_node->u.mutex, useconds);
  786. }
  787. static int32
  788. pthread_cond_signal_wrapper(wasm_exec_env_t exec_env, uint32 *cond)
  789. {
  790. ThreadInfoNode *info_node = get_thread_info(exec_env, *cond);
  791. if (!info_node || info_node->type != T_COND)
  792. return -1;
  793. return os_cond_signal(info_node->u.cond);
  794. }
  795. static int32
  796. pthread_cond_broadcast_wrapper(wasm_exec_env_t exec_env, uint32 *cond)
  797. {
  798. ThreadInfoNode *info_node = get_thread_info(exec_env, *cond);
  799. if (!info_node || info_node->type != T_COND)
  800. return -1;
  801. return os_cond_broadcast(info_node->u.cond);
  802. }
  803. static int32
  804. pthread_cond_destroy_wrapper(wasm_exec_env_t exec_env, uint32 *cond)
  805. {
  806. int32 ret_val;
  807. ThreadInfoNode *info_node = get_thread_info(exec_env, *cond);
  808. if (!info_node || info_node->type != T_COND)
  809. return -1;
  810. ret_val = os_cond_destroy(info_node->u.cond);
  811. info_node->status = COND_DESTROYED;
  812. delete_thread_info_node(info_node);
  813. return ret_val;
  814. }
  815. static int32
  816. pthread_key_create_wrapper(wasm_exec_env_t exec_env, int32 *key,
  817. int32 destructor_elem_index)
  818. {
  819. uint32 i;
  820. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  821. ClusterInfoNode *info = get_cluster_info(cluster);
  822. if (!info) {
  823. /* The user may call pthread_key_create in main thread,
  824. in this case the cluster info hasn't been created */
  825. if (!(info = create_cluster_info(cluster))) {
  826. return -1;
  827. }
  828. }
  829. os_mutex_lock(&info->key_data_list_lock);
  830. for (i = 0; i < WAMR_PTHREAD_KEYS_MAX; i++) {
  831. if (!info->key_data_list[i].is_created) {
  832. break;
  833. }
  834. }
  835. if (i == WAMR_PTHREAD_KEYS_MAX) {
  836. os_mutex_unlock(&info->key_data_list_lock);
  837. return -1;
  838. }
  839. info->key_data_list[i].destructor_func = destructor_elem_index;
  840. info->key_data_list[i].is_created = true;
  841. *key = i;
  842. os_mutex_unlock(&info->key_data_list_lock);
  843. return 0;
  844. }
  845. static int32
  846. pthread_setspecific_wrapper(wasm_exec_env_t exec_env, int32 key,
  847. int32 value_offset)
  848. {
  849. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  850. ClusterInfoNode *info = get_cluster_info(cluster);
  851. int32 *key_values;
  852. if (!info)
  853. return -1;
  854. os_mutex_lock(&info->key_data_list_lock);
  855. key_values = key_value_list_lookup_or_create(exec_env, info, key);
  856. if (!key_values) {
  857. os_mutex_unlock(&info->key_data_list_lock);
  858. return -1;
  859. }
  860. key_values[key] = value_offset;
  861. os_mutex_unlock(&info->key_data_list_lock);
  862. return 0;
  863. }
  864. static int32
  865. pthread_getspecific_wrapper(wasm_exec_env_t exec_env, int32 key)
  866. {
  867. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  868. ClusterInfoNode *info = get_cluster_info(cluster);
  869. int32 ret, *key_values;
  870. if (!info)
  871. return 0;
  872. os_mutex_lock(&info->key_data_list_lock);
  873. key_values = key_value_list_lookup_or_create(exec_env, info, key);
  874. if (!key_values) {
  875. os_mutex_unlock(&info->key_data_list_lock);
  876. return 0;
  877. }
  878. ret = key_values[key];
  879. os_mutex_unlock(&info->key_data_list_lock);
  880. return ret;
  881. }
  882. static int32
  883. pthread_key_delete_wrapper(wasm_exec_env_t exec_env, int32 key)
  884. {
  885. KeyData *data;
  886. WASMCluster *cluster = wasm_exec_env_get_cluster(exec_env);
  887. ClusterInfoNode *info = get_cluster_info(cluster);
  888. if (!info)
  889. return -1;
  890. os_mutex_lock(&info->key_data_list_lock);
  891. data = key_data_list_lookup(exec_env, key);
  892. if (!data) {
  893. os_mutex_unlock(&info->key_data_list_lock);
  894. return -1;
  895. }
  896. memset(data, 0, sizeof(KeyData));
  897. os_mutex_unlock(&info->key_data_list_lock);
  898. return 0;
  899. }
  900. /**
  901. * Currently the memory allocator doesn't support alloc specific aligned
  902. * space, we wrap posix_memalign to simply malloc memory
  903. */
  904. static int32
  905. posix_memalign_wrapper(wasm_exec_env_t exec_env, void **memptr, int32 align,
  906. int32 size)
  907. {
  908. wasm_module_inst_t module_inst = get_module_inst(exec_env);
  909. void *p = NULL;
  910. *((int32 *)memptr) = module_malloc(size, (void **)&p);
  911. if (!p)
  912. return -1;
  913. return 0;
  914. }
  915. #if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
  916. static int32
  917. sem_open_wrapper(wasm_exec_env_t exec_env, const char *name, int32 oflags,
  918. int32 mode, int32 val)
  919. {
  920. korp_sem *psem = NULL;
  921. ThreadInfoNode *info_node = NULL;
  922. /**
  923. * For RTOS, global semaphore map is safe for share the same semaphore
  924. * between task/pthread.
  925. * For Unix like system, it's dedicated for multiple processes.
  926. */
  927. if ((info_node = bh_hash_map_find(sem_info_map, (void *)name))) {
  928. return info_node->handle;
  929. }
  930. if (!(psem = os_sem_open(name, oflags, mode, val))) {
  931. goto fail1;
  932. }
  933. if (!(info_node = wasm_runtime_malloc(sizeof(ThreadInfoNode))))
  934. goto fail2;
  935. memset(info_node, 0, sizeof(ThreadInfoNode));
  936. info_node->exec_env = exec_env;
  937. info_node->handle = allocate_handle();
  938. info_node->type = T_SEM;
  939. info_node->u.sem = psem;
  940. info_node->status = SEM_CREATED;
  941. if (!bh_hash_map_insert(sem_info_map, (void *)name, info_node))
  942. goto fail3;
  943. return info_node->handle;
  944. fail3:
  945. wasm_runtime_free(info_node);
  946. fail2:
  947. os_sem_close(psem);
  948. fail1:
  949. return -1;
  950. }
  951. void
  952. sem_fetch_cb(void *key, void *value, void *user_data)
  953. {
  954. (void)key;
  955. SemCallbackArgs *args = user_data;
  956. ThreadInfoNode *info_node = value;
  957. if (args->handle == info_node->handle && info_node->status == SEM_CREATED) {
  958. args->node = info_node;
  959. }
  960. }
  961. static int32
  962. sem_close_wrapper(wasm_exec_env_t exec_env, uint32 sem)
  963. {
  964. (void)exec_env;
  965. int ret = -1;
  966. SemCallbackArgs args = { sem, NULL };
  967. bh_hash_map_traverse(sem_info_map, sem_fetch_cb, &args);
  968. if (args.node) {
  969. ret = os_sem_close(args.node->u.sem);
  970. if (ret == 0) {
  971. args.node->status = SEM_CLOSED;
  972. }
  973. }
  974. return ret;
  975. }
  976. static int32
  977. sem_wait_wrapper(wasm_exec_env_t exec_env, uint32 sem)
  978. {
  979. (void)exec_env;
  980. SemCallbackArgs args = { sem, NULL };
  981. bh_hash_map_traverse(sem_info_map, sem_fetch_cb, &args);
  982. if (args.node) {
  983. return os_sem_wait(args.node->u.sem);
  984. }
  985. return -1;
  986. }
  987. static int32
  988. sem_trywait_wrapper(wasm_exec_env_t exec_env, uint32 sem)
  989. {
  990. (void)exec_env;
  991. SemCallbackArgs args = { sem, NULL };
  992. bh_hash_map_traverse(sem_info_map, sem_fetch_cb, &args);
  993. if (args.node) {
  994. return os_sem_trywait(args.node->u.sem);
  995. }
  996. return -1;
  997. }
  998. static int32
  999. sem_post_wrapper(wasm_exec_env_t exec_env, uint32 sem)
  1000. {
  1001. (void)exec_env;
  1002. SemCallbackArgs args = { sem, NULL };
  1003. bh_hash_map_traverse(sem_info_map, sem_fetch_cb, &args);
  1004. if (args.node) {
  1005. return os_sem_post(args.node->u.sem);
  1006. }
  1007. return -1;
  1008. }
  1009. static int32
  1010. sem_getvalue_wrapper(wasm_exec_env_t exec_env, uint32 sem, int32 *sval)
  1011. {
  1012. int32 ret = -1;
  1013. wasm_module_inst_t module_inst = get_module_inst(exec_env);
  1014. (void)exec_env;
  1015. SemCallbackArgs args = { sem, NULL };
  1016. if (validate_native_addr(sval, sizeof(int32))) {
  1017. bh_hash_map_traverse(sem_info_map, sem_fetch_cb, &args);
  1018. if (args.node) {
  1019. ret = os_sem_getvalue(args.node->u.sem, sval);
  1020. }
  1021. }
  1022. return ret;
  1023. }
  1024. static int32
  1025. sem_unlink_wrapper(wasm_exec_env_t exec_env, const char *name)
  1026. {
  1027. (void)exec_env;
  1028. int32 ret_val;
  1029. ThreadInfoNode *info_node = bh_hash_map_find(sem_info_map, (void *)name);
  1030. if (!info_node || info_node->type != T_SEM)
  1031. return -1;
  1032. if (info_node->status != SEM_CLOSED) {
  1033. ret_val = os_sem_close(info_node->u.sem);
  1034. if (ret_val != 0) {
  1035. return ret_val;
  1036. }
  1037. }
  1038. ret_val = os_sem_unlink(name);
  1039. if (ret_val == 0) {
  1040. bh_hash_map_remove(sem_info_map, (void *)name, NULL, NULL);
  1041. info_node->status = SEM_DESTROYED;
  1042. thread_info_destroy(info_node);
  1043. }
  1044. return ret_val;
  1045. }
  1046. #endif
  1047. /* clang-format off */
  1048. #define REG_NATIVE_FUNC(func_name, signature) \
  1049. { #func_name, func_name##_wrapper, signature, NULL }
  1050. /* clang-format on */
  1051. static NativeSymbol native_symbols_lib_pthread[] = {
  1052. REG_NATIVE_FUNC(pthread_create, "(**ii)i"),
  1053. REG_NATIVE_FUNC(pthread_join, "(ii)i"),
  1054. REG_NATIVE_FUNC(pthread_detach, "(i)i"),
  1055. REG_NATIVE_FUNC(pthread_cancel, "(i)i"),
  1056. REG_NATIVE_FUNC(pthread_self, "()i"),
  1057. REG_NATIVE_FUNC(__pthread_self, "()i"),
  1058. REG_NATIVE_FUNC(pthread_exit, "(i)"),
  1059. REG_NATIVE_FUNC(pthread_mutex_init, "(**)i"),
  1060. REG_NATIVE_FUNC(pthread_mutex_lock, "(*)i"),
  1061. REG_NATIVE_FUNC(pthread_mutex_unlock, "(*)i"),
  1062. REG_NATIVE_FUNC(pthread_mutex_destroy, "(*)i"),
  1063. REG_NATIVE_FUNC(pthread_cond_init, "(**)i"),
  1064. REG_NATIVE_FUNC(pthread_cond_wait, "(**)i"),
  1065. REG_NATIVE_FUNC(pthread_cond_timedwait, "(**I)i"),
  1066. REG_NATIVE_FUNC(pthread_cond_signal, "(*)i"),
  1067. REG_NATIVE_FUNC(pthread_cond_broadcast, "(*)i"),
  1068. REG_NATIVE_FUNC(pthread_cond_destroy, "(*)i"),
  1069. REG_NATIVE_FUNC(pthread_key_create, "(*i)i"),
  1070. REG_NATIVE_FUNC(pthread_setspecific, "(ii)i"),
  1071. REG_NATIVE_FUNC(pthread_getspecific, "(i)i"),
  1072. REG_NATIVE_FUNC(pthread_key_delete, "(i)i"),
  1073. REG_NATIVE_FUNC(posix_memalign, "(*ii)i"),
  1074. #if WASM_ENABLE_LIB_PTHREAD_SEMAPHORE != 0
  1075. REG_NATIVE_FUNC(sem_open, "($iii)i"),
  1076. REG_NATIVE_FUNC(sem_close, "(i)i"),
  1077. REG_NATIVE_FUNC(sem_wait, "(i)i"),
  1078. REG_NATIVE_FUNC(sem_trywait, "(i)i"),
  1079. REG_NATIVE_FUNC(sem_post, "(i)i"),
  1080. REG_NATIVE_FUNC(sem_getvalue, "(i*)i"),
  1081. REG_NATIVE_FUNC(sem_unlink, "($)i"),
  1082. #endif
  1083. };
  1084. uint32
  1085. get_lib_pthread_export_apis(NativeSymbol **p_lib_pthread_apis)
  1086. {
  1087. *p_lib_pthread_apis = native_symbols_lib_pthread;
  1088. return sizeof(native_symbols_lib_pthread) / sizeof(NativeSymbol);
  1089. }