wasm_shared_memory.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "bh_log.h"
  6. #include "wasm_shared_memory.h"
  7. #if WASM_ENABLE_THREAD_MGR != 0
  8. #include "../libraries/thread-mgr/thread_manager.h"
  9. #endif
  10. #if WASM_ENABLE_AOT != 0
  11. #include "../aot/aot_runtime.h"
  12. #endif
  13. /*
  14. * Note: this lock can be per memory.
  15. *
  16. * For now, just use a global because:
  17. * - it's a bit cumbersome to extend WASMMemoryInstance w/o breaking
  18. * the AOT ABI.
  19. * - If you care performance, it's better to make the interpreters
  20. * use atomic ops.
  21. */
  22. korp_mutex g_shared_memory_lock;
  23. /* clang-format off */
  24. enum {
  25. S_WAITING,
  26. S_NOTIFIED
  27. };
  28. /* clang-format on */
  29. typedef struct AtomicWaitInfo {
  30. bh_list wait_list_head;
  31. bh_list *wait_list;
  32. /* WARNING: insert to the list allowed only in acquire_wait_info
  33. otherwise there will be data race as described in PR #2016 */
  34. } AtomicWaitInfo;
  35. typedef struct AtomicWaitNode {
  36. bh_list_link l;
  37. uint8 status;
  38. korp_cond wait_cond;
  39. } AtomicWaitNode;
  40. /* Atomic wait map */
  41. static HashMap *wait_map;
  42. static uint32
  43. wait_address_hash(const void *address);
  44. static bool
  45. wait_address_equal(void *h1, void *h2);
  46. static void
  47. destroy_wait_info(void *wait_info);
  48. bool
  49. wasm_shared_memory_init()
  50. {
  51. if (os_mutex_init(&g_shared_memory_lock) != 0)
  52. return false;
  53. /* wait map not exists, create new map */
  54. if (!(wait_map = bh_hash_map_create(32, true, (HashFunc)wait_address_hash,
  55. (KeyEqualFunc)wait_address_equal, NULL,
  56. destroy_wait_info))) {
  57. os_mutex_destroy(&g_shared_memory_lock);
  58. return false;
  59. }
  60. return true;
  61. }
  62. void
  63. wasm_shared_memory_destroy()
  64. {
  65. bh_hash_map_destroy(wait_map);
  66. os_mutex_destroy(&g_shared_memory_lock);
  67. }
  68. uint16
  69. shared_memory_inc_reference(WASMMemoryInstance *memory)
  70. {
  71. bh_assert(shared_memory_is_shared(memory));
  72. uint16 old;
  73. #if BH_ATOMIC_16_IS_ATOMIC == 0
  74. os_mutex_lock(&g_shared_memory_lock);
  75. #endif
  76. old = BH_ATOMIC_16_FETCH_ADD(memory->ref_count, 1);
  77. #if BH_ATOMIC_16_IS_ATOMIC == 0
  78. os_mutex_unlock(&g_shared_memory_lock);
  79. #endif
  80. bh_assert(old >= 1);
  81. bh_assert(old < UINT16_MAX);
  82. return old + 1;
  83. }
  84. uint16
  85. shared_memory_dec_reference(WASMMemoryInstance *memory)
  86. {
  87. bh_assert(shared_memory_is_shared(memory));
  88. uint16 old;
  89. #if BH_ATOMIC_16_IS_ATOMIC == 0
  90. os_mutex_lock(&g_shared_memory_lock);
  91. #endif
  92. old = BH_ATOMIC_16_FETCH_SUB(memory->ref_count, 1);
  93. #if BH_ATOMIC_16_IS_ATOMIC == 0
  94. os_mutex_unlock(&g_shared_memory_lock);
  95. #endif
  96. bh_assert(old > 0);
  97. return old - 1;
  98. }
  99. static korp_mutex *
  100. shared_memory_get_lock_pointer(WASMMemoryInstance *memory)
  101. {
  102. bh_assert(memory != NULL);
  103. return &g_shared_memory_lock;
  104. }
  105. /* Atomics wait && notify APIs */
  106. static uint32
  107. wait_address_hash(const void *address)
  108. {
  109. return (uint32)(uintptr_t)address;
  110. }
  111. static bool
  112. wait_address_equal(void *h1, void *h2)
  113. {
  114. return h1 == h2 ? true : false;
  115. }
  116. static bool
  117. is_wait_node_exists(bh_list *wait_list, AtomicWaitNode *node)
  118. {
  119. AtomicWaitNode *curr;
  120. curr = bh_list_first_elem(wait_list);
  121. while (curr) {
  122. if (curr == node) {
  123. return true;
  124. }
  125. curr = bh_list_elem_next(curr);
  126. }
  127. return false;
  128. }
  129. static uint32
  130. notify_wait_list(bh_list *wait_list, uint32 count)
  131. {
  132. AtomicWaitNode *node, *next;
  133. uint32 i, notify_count = count;
  134. if (count > wait_list->len)
  135. notify_count = wait_list->len;
  136. node = bh_list_first_elem(wait_list);
  137. if (!node)
  138. return 0;
  139. for (i = 0; i < notify_count; i++) {
  140. bh_assert(node);
  141. next = bh_list_elem_next(node);
  142. node->status = S_NOTIFIED;
  143. /* wakeup */
  144. os_cond_signal(&node->wait_cond);
  145. node = next;
  146. }
  147. return notify_count;
  148. }
  149. static AtomicWaitInfo *
  150. acquire_wait_info(void *address, AtomicWaitNode *wait_node)
  151. {
  152. AtomicWaitInfo *wait_info = NULL;
  153. bh_list_status ret;
  154. bh_assert(address != NULL);
  155. wait_info = (AtomicWaitInfo *)bh_hash_map_find(wait_map, address);
  156. if (!wait_node) {
  157. return wait_info;
  158. }
  159. /* No wait info on this address, create new info */
  160. if (!wait_info) {
  161. if (!(wait_info = (AtomicWaitInfo *)wasm_runtime_malloc(
  162. sizeof(AtomicWaitInfo)))) {
  163. return NULL;
  164. }
  165. memset(wait_info, 0, sizeof(AtomicWaitInfo));
  166. /* init wait list */
  167. wait_info->wait_list = &wait_info->wait_list_head;
  168. ret = bh_list_init(wait_info->wait_list);
  169. bh_assert(ret == BH_LIST_SUCCESS);
  170. (void)ret;
  171. if (!bh_hash_map_insert(wait_map, address, (void *)wait_info)) {
  172. wasm_runtime_free(wait_info);
  173. return NULL;
  174. }
  175. }
  176. ret = bh_list_insert(wait_info->wait_list, wait_node);
  177. bh_assert(ret == BH_LIST_SUCCESS);
  178. (void)ret;
  179. return wait_info;
  180. }
  181. static void
  182. destroy_wait_info(void *wait_info)
  183. {
  184. AtomicWaitNode *node, *next;
  185. if (wait_info) {
  186. node = bh_list_first_elem(((AtomicWaitInfo *)wait_info)->wait_list);
  187. while (node) {
  188. next = bh_list_elem_next(node);
  189. os_cond_destroy(&node->wait_cond);
  190. wasm_runtime_free(node);
  191. node = next;
  192. }
  193. wasm_runtime_free(wait_info);
  194. }
  195. }
  196. static void
  197. map_try_release_wait_info(HashMap *wait_hash_map, AtomicWaitInfo *wait_info,
  198. void *address)
  199. {
  200. if (wait_info->wait_list->len > 0) {
  201. return;
  202. }
  203. bh_hash_map_remove(wait_hash_map, address, NULL, NULL);
  204. destroy_wait_info(wait_info);
  205. }
  206. #if WASM_ENABLE_SHARED_HEAP != 0
  207. static bool
  208. is_native_addr_in_shared_heap(WASMModuleInstanceCommon *module_inst,
  209. uint8 *addr, uint32 bytes)
  210. {
  211. WASMSharedHeap *shared_heap = NULL;
  212. #if WASM_ENABLE_INTERP != 0
  213. if (module_inst->module_type == Wasm_Module_Bytecode) {
  214. shared_heap = ((WASMModuleInstance *)module_inst)->e->shared_heap;
  215. }
  216. #endif
  217. #if WASM_ENABLE_AOT != 0
  218. if (module_inst->module_type == Wasm_Module_AoT) {
  219. AOTModuleInstanceExtra *e =
  220. (AOTModuleInstanceExtra *)((AOTModuleInstance *)module_inst)->e;
  221. shared_heap = e->shared_heap;
  222. }
  223. #endif
  224. return shared_heap && addr >= shared_heap->base_addr
  225. && addr + bytes <= shared_heap->base_addr + shared_heap->size;
  226. }
  227. #endif
  228. uint32
  229. wasm_runtime_atomic_wait(WASMModuleInstanceCommon *module, void *address,
  230. uint64 expect, int64 timeout, bool wait64)
  231. {
  232. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  233. AtomicWaitInfo *wait_info;
  234. AtomicWaitNode *wait_node;
  235. korp_mutex *lock;
  236. #if WASM_ENABLE_THREAD_MGR != 0
  237. WASMExecEnv *exec_env;
  238. #endif
  239. uint64 timeout_left, timeout_wait, timeout_1sec;
  240. bool check_ret, is_timeout, no_wait;
  241. bh_assert(module->module_type == Wasm_Module_Bytecode
  242. || module->module_type == Wasm_Module_AoT);
  243. if (wasm_copy_exception(module_inst, NULL)) {
  244. return -1;
  245. }
  246. /* Currently we have only one memory instance */
  247. if (!shared_memory_is_shared(module_inst->memories[0])) {
  248. wasm_runtime_set_exception(module, "expected shared memory");
  249. return -1;
  250. }
  251. shared_memory_lock(module_inst->memories[0]);
  252. if (
  253. #if WASM_ENABLE_SHARED_HEAP != 0
  254. /* not in shared heap */
  255. !is_native_addr_in_shared_heap((WASMModuleInstanceCommon *)module_inst,
  256. address, wait64 ? 8 : 4)
  257. &&
  258. #endif
  259. /* and not in linear memory */
  260. ((uint8 *)address < module_inst->memories[0]->memory_data
  261. || (uint8 *)address + (wait64 ? 8 : 4)
  262. > module_inst->memories[0]->memory_data_end)) {
  263. shared_memory_unlock(module_inst->memories[0]);
  264. wasm_runtime_set_exception(module, "out of bounds memory access");
  265. return -1;
  266. }
  267. shared_memory_unlock(module_inst->memories[0]);
  268. #if WASM_ENABLE_THREAD_MGR != 0
  269. exec_env =
  270. wasm_clusters_search_exec_env((WASMModuleInstanceCommon *)module_inst);
  271. bh_assert(exec_env);
  272. #endif
  273. lock = shared_memory_get_lock_pointer(module_inst->memories[0]);
  274. /* Lock the shared_mem_lock for the whole atomic wait process,
  275. and use it to os_cond_reltimedwait */
  276. os_mutex_lock(lock);
  277. no_wait = (!wait64 && *(uint32 *)address != (uint32)expect)
  278. || (wait64 && *(uint64 *)address != expect);
  279. if (no_wait) {
  280. os_mutex_unlock(lock);
  281. return 1;
  282. }
  283. if (!(wait_node = wasm_runtime_malloc(sizeof(AtomicWaitNode)))) {
  284. os_mutex_unlock(lock);
  285. wasm_runtime_set_exception(module, "failed to create wait node");
  286. return -1;
  287. }
  288. memset(wait_node, 0, sizeof(AtomicWaitNode));
  289. if (0 != os_cond_init(&wait_node->wait_cond)) {
  290. os_mutex_unlock(lock);
  291. wasm_runtime_free(wait_node);
  292. wasm_runtime_set_exception(module, "failed to init wait cond");
  293. return -1;
  294. }
  295. wait_node->status = S_WAITING;
  296. /* Acquire the wait info, create new one if not exists */
  297. wait_info = acquire_wait_info(address, wait_node);
  298. if (!wait_info) {
  299. os_mutex_unlock(lock);
  300. os_cond_destroy(&wait_node->wait_cond);
  301. wasm_runtime_free(wait_node);
  302. wasm_runtime_set_exception(module, "failed to acquire wait_info");
  303. return -1;
  304. }
  305. /* unit of timeout is nsec, convert it to usec */
  306. timeout_left = (uint64)timeout / 1000;
  307. timeout_1sec = (uint64)1e6;
  308. while (1) {
  309. if (timeout < 0) {
  310. /* wait forever until it is notified or terminated
  311. here we keep waiting and checking every second */
  312. os_cond_reltimedwait(&wait_node->wait_cond, lock,
  313. (uint64)timeout_1sec);
  314. if (wait_node->status == S_NOTIFIED /* notified by atomic.notify */
  315. #if WASM_ENABLE_THREAD_MGR != 0
  316. /* terminated by other thread */
  317. || wasm_cluster_is_thread_terminated(exec_env)
  318. #endif
  319. ) {
  320. break;
  321. }
  322. }
  323. else {
  324. timeout_wait =
  325. timeout_left < timeout_1sec ? timeout_left : timeout_1sec;
  326. os_cond_reltimedwait(&wait_node->wait_cond, lock, timeout_wait);
  327. if (wait_node->status == S_NOTIFIED /* notified by atomic.notify */
  328. || timeout_left <= timeout_wait /* time out */
  329. #if WASM_ENABLE_THREAD_MGR != 0
  330. /* terminated by other thread */
  331. || wasm_cluster_is_thread_terminated(exec_env)
  332. #endif
  333. ) {
  334. break;
  335. }
  336. timeout_left -= timeout_wait;
  337. }
  338. }
  339. is_timeout = wait_node->status == S_WAITING ? true : false;
  340. check_ret = is_wait_node_exists(wait_info->wait_list, wait_node);
  341. bh_assert(check_ret);
  342. (void)check_ret;
  343. /* Remove wait node from wait list */
  344. bh_list_remove(wait_info->wait_list, wait_node);
  345. os_cond_destroy(&wait_node->wait_cond);
  346. wasm_runtime_free(wait_node);
  347. /* Release wait info if no wait nodes are attached */
  348. map_try_release_wait_info(wait_map, wait_info, address);
  349. os_mutex_unlock(lock);
  350. return is_timeout ? 2 : 0;
  351. }
  352. uint32
  353. wasm_runtime_atomic_notify(WASMModuleInstanceCommon *module, void *address,
  354. uint32 count)
  355. {
  356. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  357. uint32 notify_result;
  358. AtomicWaitInfo *wait_info;
  359. korp_mutex *lock;
  360. bool out_of_bounds;
  361. bh_assert(module->module_type == Wasm_Module_Bytecode
  362. || module->module_type == Wasm_Module_AoT);
  363. shared_memory_lock(module_inst->memories[0]);
  364. out_of_bounds =
  365. #if WASM_ENABLE_SHARED_HEAP != 0
  366. /* not in shared heap */
  367. !is_native_addr_in_shared_heap(module, address, 4) &&
  368. #endif
  369. /* and not in linear memory */
  370. ((uint8 *)address < module_inst->memories[0]->memory_data
  371. || (uint8 *)address + 4 > module_inst->memories[0]->memory_data_end);
  372. shared_memory_unlock(module_inst->memories[0]);
  373. if (out_of_bounds) {
  374. wasm_runtime_set_exception(module, "out of bounds memory access");
  375. return -1;
  376. }
  377. /* Currently we have only one memory instance */
  378. if (!shared_memory_is_shared(module_inst->memories[0])) {
  379. /* Always return 0 for ushared linear memory since there is
  380. no way to create a waiter on it */
  381. return 0;
  382. }
  383. lock = shared_memory_get_lock_pointer(module_inst->memories[0]);
  384. /* Lock the shared_mem_lock for the whole atomic notify process,
  385. and use it to os_cond_signal */
  386. os_mutex_lock(lock);
  387. wait_info = acquire_wait_info(address, NULL);
  388. /* Nobody wait on this address */
  389. if (!wait_info) {
  390. os_mutex_unlock(lock);
  391. return 0;
  392. }
  393. /* Notify each wait node in the wait list */
  394. notify_result = notify_wait_list(wait_info->wait_list, count);
  395. os_mutex_unlock(lock);
  396. return notify_result;
  397. }