wasm_shared_memory.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "bh_log.h"
  6. #include "wasm_shared_memory.h"
  7. #if WASM_ENABLE_THREAD_MGR != 0
  8. #include "../libraries/thread-mgr/thread_manager.h"
  9. #endif
  10. #if WASM_ENABLE_AOT != 0
  11. #include "../aot/aot_runtime.h"
  12. #endif
  13. /*
  14. * Note: this lock can be per memory.
  15. *
  16. * For now, just use a global because:
  17. * - it's a bit cumbersome to extend WASMMemoryInstance w/o breaking
  18. * the AOT ABI.
  19. * - If you care performance, it's better to make the interpreters
  20. * use atomic ops.
  21. */
  22. korp_mutex g_shared_memory_lock;
  23. /* clang-format off */
  24. enum {
  25. S_WAITING,
  26. S_NOTIFIED
  27. };
  28. /* clang-format on */
  29. typedef struct AtomicWaitInfo {
  30. bh_list wait_list_head;
  31. bh_list *wait_list;
  32. /* WARNING: insert to the list allowed only in acquire_wait_info
  33. otherwise there will be data race as described in PR #2016 */
  34. } AtomicWaitInfo;
  35. typedef struct AtomicWaitNode {
  36. bh_list_link l;
  37. uint8 status;
  38. korp_cond wait_cond;
  39. } AtomicWaitNode;
  40. /* Atomic wait map */
  41. static HashMap *wait_map;
  42. static uint32
  43. wait_address_hash(const void *address);
  44. static bool
  45. wait_address_equal(void *h1, void *h2);
  46. static void
  47. destroy_wait_info(void *wait_info);
  48. bool
  49. wasm_shared_memory_init()
  50. {
  51. if (os_mutex_init(&g_shared_memory_lock) != 0)
  52. return false;
  53. /* wait map not exists, create new map */
  54. if (!(wait_map = bh_hash_map_create(32, true, (HashFunc)wait_address_hash,
  55. (KeyEqualFunc)wait_address_equal, NULL,
  56. destroy_wait_info))) {
  57. os_mutex_destroy(&g_shared_memory_lock);
  58. return false;
  59. }
  60. return true;
  61. }
  62. void
  63. wasm_shared_memory_destroy()
  64. {
  65. bh_hash_map_destroy(wait_map);
  66. os_mutex_destroy(&g_shared_memory_lock);
  67. }
  68. uint16
  69. shared_memory_inc_reference(WASMMemoryInstance *memory)
  70. {
  71. bh_assert(shared_memory_is_shared(memory));
  72. uint16 old;
  73. #if BH_ATOMIC_16_IS_ATOMIC == 0
  74. os_mutex_lock(&g_shared_memory_lock);
  75. #endif
  76. old = BH_ATOMIC_16_FETCH_ADD(memory->ref_count, 1);
  77. #if BH_ATOMIC_16_IS_ATOMIC == 0
  78. os_mutex_unlock(&g_shared_memory_lock);
  79. #endif
  80. bh_assert(old >= 1);
  81. bh_assert(old < UINT16_MAX);
  82. return old + 1;
  83. }
  84. uint16
  85. shared_memory_dec_reference(WASMMemoryInstance *memory)
  86. {
  87. bh_assert(shared_memory_is_shared(memory));
  88. uint16 old;
  89. #if BH_ATOMIC_16_IS_ATOMIC == 0
  90. os_mutex_lock(&g_shared_memory_lock);
  91. #endif
  92. old = BH_ATOMIC_16_FETCH_SUB(memory->ref_count, 1);
  93. #if BH_ATOMIC_16_IS_ATOMIC == 0
  94. os_mutex_unlock(&g_shared_memory_lock);
  95. #endif
  96. bh_assert(old > 0);
  97. return old - 1;
  98. }
  99. static korp_mutex *
  100. shared_memory_get_lock_pointer(WASMMemoryInstance *memory)
  101. {
  102. bh_assert(memory != NULL);
  103. return &g_shared_memory_lock;
  104. }
  105. /* Atomics wait && notify APIs */
  106. static uint32
  107. wait_address_hash(const void *address)
  108. {
  109. return (uint32)(uintptr_t)address;
  110. }
  111. static bool
  112. wait_address_equal(void *h1, void *h2)
  113. {
  114. return h1 == h2 ? true : false;
  115. }
  116. static bool
  117. is_wait_node_exists(bh_list *wait_list, AtomicWaitNode *node)
  118. {
  119. AtomicWaitNode *curr;
  120. curr = bh_list_first_elem(wait_list);
  121. while (curr) {
  122. if (curr == node) {
  123. return true;
  124. }
  125. curr = bh_list_elem_next(curr);
  126. }
  127. return false;
  128. }
  129. static uint32
  130. notify_wait_list(bh_list *wait_list, uint32 count)
  131. {
  132. AtomicWaitNode *node, *next;
  133. uint32 i, notify_count = count;
  134. if (count > wait_list->len)
  135. notify_count = wait_list->len;
  136. node = bh_list_first_elem(wait_list);
  137. if (!node)
  138. return 0;
  139. for (i = 0; i < notify_count; i++) {
  140. bh_assert(node);
  141. next = bh_list_elem_next(node);
  142. node->status = S_NOTIFIED;
  143. /* wakeup */
  144. os_cond_signal(&node->wait_cond);
  145. node = next;
  146. }
  147. return notify_count;
  148. }
  149. static AtomicWaitInfo *
  150. acquire_wait_info(void *address, AtomicWaitNode *wait_node)
  151. {
  152. AtomicWaitInfo *wait_info = NULL;
  153. bh_list_status ret;
  154. bh_assert(address != NULL);
  155. wait_info = (AtomicWaitInfo *)bh_hash_map_find(wait_map, address);
  156. if (!wait_node) {
  157. return wait_info;
  158. }
  159. /* No wait info on this address, create new info */
  160. if (!wait_info) {
  161. if (!(wait_info = (AtomicWaitInfo *)wasm_runtime_malloc(
  162. sizeof(AtomicWaitInfo)))) {
  163. return NULL;
  164. }
  165. memset(wait_info, 0, sizeof(AtomicWaitInfo));
  166. /* init wait list */
  167. wait_info->wait_list = &wait_info->wait_list_head;
  168. ret = bh_list_init(wait_info->wait_list);
  169. bh_assert(ret == BH_LIST_SUCCESS);
  170. (void)ret;
  171. if (!bh_hash_map_insert(wait_map, address, (void *)wait_info)) {
  172. wasm_runtime_free(wait_info);
  173. return NULL;
  174. }
  175. }
  176. ret = bh_list_insert(wait_info->wait_list, wait_node);
  177. bh_assert(ret == BH_LIST_SUCCESS);
  178. (void)ret;
  179. return wait_info;
  180. }
  181. static void
  182. destroy_wait_info(void *wait_info)
  183. {
  184. AtomicWaitNode *node, *next;
  185. if (wait_info) {
  186. node = bh_list_first_elem(((AtomicWaitInfo *)wait_info)->wait_list);
  187. while (node) {
  188. next = bh_list_elem_next(node);
  189. os_cond_destroy(&node->wait_cond);
  190. wasm_runtime_free(node);
  191. node = next;
  192. }
  193. wasm_runtime_free(wait_info);
  194. }
  195. }
  196. static void
  197. map_try_release_wait_info(HashMap *wait_hash_map, AtomicWaitInfo *wait_info,
  198. void *address)
  199. {
  200. if (wait_info->wait_list->len > 0) {
  201. return;
  202. }
  203. bh_hash_map_remove(wait_hash_map, address, NULL, NULL);
  204. destroy_wait_info(wait_info);
  205. }
  206. #if WASM_ENABLE_SHARED_HEAP != 0
  207. static bool
  208. is_native_addr_in_shared_heap(WASMModuleInstanceCommon *module_inst,
  209. uint8 *addr, uint64 bytes)
  210. {
  211. WASMSharedHeap *shared_heap = NULL;
  212. if (bytes > APP_HEAP_SIZE_MAX) {
  213. return false;
  214. }
  215. #if WASM_ENABLE_INTERP != 0
  216. if (module_inst->module_type == Wasm_Module_Bytecode) {
  217. shared_heap = ((WASMModuleInstance *)module_inst)->e->shared_heap;
  218. }
  219. #endif
  220. #if WASM_ENABLE_AOT != 0
  221. if (module_inst->module_type == Wasm_Module_AoT) {
  222. AOTModuleInstanceExtra *e =
  223. (AOTModuleInstanceExtra *)((AOTModuleInstance *)module_inst)->e;
  224. shared_heap = e->shared_heap;
  225. }
  226. #endif
  227. return shared_heap && addr >= shared_heap->base_addr
  228. && addr + bytes <= shared_heap->base_addr + shared_heap->size;
  229. }
  230. #endif
  231. uint32
  232. wasm_runtime_atomic_wait(WASMModuleInstanceCommon *module, void *address,
  233. uint64 expect, int64 timeout, bool wait64)
  234. {
  235. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  236. AtomicWaitInfo *wait_info;
  237. AtomicWaitNode *wait_node;
  238. korp_mutex *lock;
  239. #if WASM_ENABLE_THREAD_MGR != 0
  240. WASMExecEnv *exec_env;
  241. #endif
  242. uint64 timeout_left, timeout_wait, timeout_1sec;
  243. bool check_ret, is_timeout, no_wait;
  244. bh_assert(module->module_type == Wasm_Module_Bytecode
  245. || module->module_type == Wasm_Module_AoT);
  246. if (wasm_copy_exception(module_inst, NULL)) {
  247. return -1;
  248. }
  249. /* Currently we have only one memory instance */
  250. if (!shared_memory_is_shared(module_inst->memories[0])) {
  251. wasm_runtime_set_exception(module, "expected shared memory");
  252. return -1;
  253. }
  254. shared_memory_lock(module_inst->memories[0]);
  255. if (
  256. #if WASM_ENABLE_SHARED_HEAP != 0
  257. /* not in shared heap */
  258. !is_native_addr_in_shared_heap((WASMModuleInstanceCommon *)module_inst,
  259. address, wait64 ? 8 : 4)
  260. &&
  261. #endif
  262. /* and not in linear memory */
  263. ((uint8 *)address < module_inst->memories[0]->memory_data
  264. || (uint8 *)address + (wait64 ? 8 : 4)
  265. > module_inst->memories[0]->memory_data_end)) {
  266. shared_memory_unlock(module_inst->memories[0]);
  267. wasm_runtime_set_exception(module, "out of bounds memory access");
  268. return -1;
  269. }
  270. shared_memory_unlock(module_inst->memories[0]);
  271. #if WASM_ENABLE_THREAD_MGR != 0
  272. exec_env =
  273. wasm_clusters_search_exec_env((WASMModuleInstanceCommon *)module_inst);
  274. bh_assert(exec_env);
  275. #endif
  276. lock = shared_memory_get_lock_pointer(module_inst->memories[0]);
  277. /* Lock the shared_mem_lock for the whole atomic wait process,
  278. and use it to os_cond_reltimedwait */
  279. os_mutex_lock(lock);
  280. no_wait = (!wait64 && *(uint32 *)address != (uint32)expect)
  281. || (wait64 && *(uint64 *)address != expect);
  282. if (no_wait) {
  283. os_mutex_unlock(lock);
  284. return 1;
  285. }
  286. if (!(wait_node = wasm_runtime_malloc(sizeof(AtomicWaitNode)))) {
  287. os_mutex_unlock(lock);
  288. wasm_runtime_set_exception(module, "failed to create wait node");
  289. return -1;
  290. }
  291. memset(wait_node, 0, sizeof(AtomicWaitNode));
  292. if (0 != os_cond_init(&wait_node->wait_cond)) {
  293. os_mutex_unlock(lock);
  294. wasm_runtime_free(wait_node);
  295. wasm_runtime_set_exception(module, "failed to init wait cond");
  296. return -1;
  297. }
  298. wait_node->status = S_WAITING;
  299. /* Acquire the wait info, create new one if not exists */
  300. wait_info = acquire_wait_info(address, wait_node);
  301. if (!wait_info) {
  302. os_mutex_unlock(lock);
  303. os_cond_destroy(&wait_node->wait_cond);
  304. wasm_runtime_free(wait_node);
  305. wasm_runtime_set_exception(module, "failed to acquire wait_info");
  306. return -1;
  307. }
  308. /* unit of timeout is nsec, convert it to usec */
  309. timeout_left = (uint64)timeout / 1000;
  310. timeout_1sec = (uint64)1e6;
  311. while (1) {
  312. if (timeout < 0) {
  313. /* wait forever until it is notified or terminated
  314. here we keep waiting and checking every second */
  315. os_cond_reltimedwait(&wait_node->wait_cond, lock,
  316. (uint64)timeout_1sec);
  317. if (wait_node->status == S_NOTIFIED /* notified by atomic.notify */
  318. #if WASM_ENABLE_THREAD_MGR != 0
  319. /* terminated by other thread */
  320. || wasm_cluster_is_thread_terminated(exec_env)
  321. #endif
  322. ) {
  323. break;
  324. }
  325. }
  326. else {
  327. timeout_wait =
  328. timeout_left < timeout_1sec ? timeout_left : timeout_1sec;
  329. os_cond_reltimedwait(&wait_node->wait_cond, lock, timeout_wait);
  330. if (wait_node->status == S_NOTIFIED /* notified by atomic.notify */
  331. || timeout_left <= timeout_wait /* time out */
  332. #if WASM_ENABLE_THREAD_MGR != 0
  333. /* terminated by other thread */
  334. || wasm_cluster_is_thread_terminated(exec_env)
  335. #endif
  336. ) {
  337. break;
  338. }
  339. timeout_left -= timeout_wait;
  340. }
  341. }
  342. is_timeout = wait_node->status == S_WAITING ? true : false;
  343. check_ret = is_wait_node_exists(wait_info->wait_list, wait_node);
  344. bh_assert(check_ret);
  345. (void)check_ret;
  346. /* Remove wait node from wait list */
  347. bh_list_remove(wait_info->wait_list, wait_node);
  348. os_cond_destroy(&wait_node->wait_cond);
  349. wasm_runtime_free(wait_node);
  350. /* Release wait info if no wait nodes are attached */
  351. map_try_release_wait_info(wait_map, wait_info, address);
  352. os_mutex_unlock(lock);
  353. return is_timeout ? 2 : 0;
  354. }
  355. uint32
  356. wasm_runtime_atomic_notify(WASMModuleInstanceCommon *module, void *address,
  357. uint32 count)
  358. {
  359. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  360. uint32 notify_result;
  361. AtomicWaitInfo *wait_info;
  362. korp_mutex *lock;
  363. bool out_of_bounds;
  364. bh_assert(module->module_type == Wasm_Module_Bytecode
  365. || module->module_type == Wasm_Module_AoT);
  366. shared_memory_lock(module_inst->memories[0]);
  367. out_of_bounds =
  368. #if WASM_ENABLE_SHARED_HEAP != 0
  369. /* not in shared heap */
  370. !is_native_addr_in_shared_heap(module, address, 4) &&
  371. #endif
  372. /* and not in linear memory */
  373. ((uint8 *)address < module_inst->memories[0]->memory_data
  374. || (uint8 *)address + 4 > module_inst->memories[0]->memory_data_end);
  375. shared_memory_unlock(module_inst->memories[0]);
  376. if (out_of_bounds) {
  377. wasm_runtime_set_exception(module, "out of bounds memory access");
  378. return -1;
  379. }
  380. /* Currently we have only one memory instance */
  381. if (!shared_memory_is_shared(module_inst->memories[0])) {
  382. /* Always return 0 for ushared linear memory since there is
  383. no way to create a waiter on it */
  384. return 0;
  385. }
  386. lock = shared_memory_get_lock_pointer(module_inst->memories[0]);
  387. /* Lock the shared_mem_lock for the whole atomic notify process,
  388. and use it to os_cond_signal */
  389. os_mutex_lock(lock);
  390. wait_info = acquire_wait_info(address, NULL);
  391. /* Nobody wait on this address */
  392. if (!wait_info) {
  393. os_mutex_unlock(lock);
  394. return 0;
  395. }
  396. /* Notify each wait node in the wait list */
  397. notify_result = notify_wait_list(wait_info->wait_list, count);
  398. os_mutex_unlock(lock);
  399. return notify_result;
  400. }