wasm_shared_memory.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "bh_log.h"
  6. #include "wasm_shared_memory.h"
  7. #if WASM_ENABLE_THREAD_MGR != 0
  8. #include "../libraries/thread-mgr/thread_manager.h"
  9. #endif
  10. /*
  11. * Note: this lock can be per memory.
  12. *
  13. * For now, just use a global because:
  14. * - it's a bit cumbersome to extend WASMMemoryInstance w/o breaking
  15. * the AOT ABI.
  16. * - If you care performance, it's better to make the interpreters
  17. * use atomic ops.
  18. */
  19. korp_mutex g_shared_memory_lock;
  20. /* clang-format off */
  21. enum {
  22. S_WAITING,
  23. S_NOTIFIED
  24. };
  25. /* clang-format on */
  26. typedef struct AtomicWaitInfo {
  27. bh_list wait_list_head;
  28. bh_list *wait_list;
  29. /* WARNING: insert to the list allowed only in acquire_wait_info
  30. otherwise there will be data race as described in PR #2016 */
  31. } AtomicWaitInfo;
  32. typedef struct AtomicWaitNode {
  33. bh_list_link l;
  34. uint8 status;
  35. korp_cond wait_cond;
  36. } AtomicWaitNode;
  37. /* Atomic wait map */
  38. static HashMap *wait_map;
  39. static uint32
  40. wait_address_hash(const void *address);
  41. static bool
  42. wait_address_equal(void *h1, void *h2);
  43. static void
  44. destroy_wait_info(void *wait_info);
  45. bool
  46. wasm_shared_memory_init()
  47. {
  48. if (os_mutex_init(&g_shared_memory_lock) != 0)
  49. return false;
  50. /* wait map not exists, create new map */
  51. if (!(wait_map = bh_hash_map_create(32, true, (HashFunc)wait_address_hash,
  52. (KeyEqualFunc)wait_address_equal, NULL,
  53. destroy_wait_info))) {
  54. os_mutex_destroy(&g_shared_memory_lock);
  55. return false;
  56. }
  57. return true;
  58. }
  59. void
  60. wasm_shared_memory_destroy()
  61. {
  62. bh_hash_map_destroy(wait_map);
  63. os_mutex_destroy(&g_shared_memory_lock);
  64. }
  65. uint16
  66. shared_memory_inc_reference(WASMMemoryInstance *memory)
  67. {
  68. bh_assert(shared_memory_is_shared(memory));
  69. uint16 old;
  70. #if BH_ATOMIC_16_IS_ATOMIC == 0
  71. os_mutex_lock(&g_shared_memory_lock);
  72. #endif
  73. old = BH_ATOMIC_16_FETCH_ADD(memory->ref_count, 1);
  74. #if BH_ATOMIC_16_IS_ATOMIC == 0
  75. os_mutex_unlock(&g_shared_memory_lock);
  76. #endif
  77. bh_assert(old >= 1);
  78. bh_assert(old < UINT16_MAX);
  79. return old + 1;
  80. }
  81. uint16
  82. shared_memory_dec_reference(WASMMemoryInstance *memory)
  83. {
  84. bh_assert(shared_memory_is_shared(memory));
  85. uint16 old;
  86. #if BH_ATOMIC_16_IS_ATOMIC == 0
  87. os_mutex_lock(&g_shared_memory_lock);
  88. #endif
  89. old = BH_ATOMIC_16_FETCH_SUB(memory->ref_count, 1);
  90. #if BH_ATOMIC_16_IS_ATOMIC == 0
  91. os_mutex_unlock(&g_shared_memory_lock);
  92. #endif
  93. bh_assert(old > 0);
  94. return old - 1;
  95. }
  96. static korp_mutex *
  97. shared_memory_get_lock_pointer(WASMMemoryInstance *memory)
  98. {
  99. bh_assert(memory != NULL);
  100. return &g_shared_memory_lock;
  101. }
  102. /* Atomics wait && notify APIs */
  103. static uint32
  104. wait_address_hash(const void *address)
  105. {
  106. return (uint32)(uintptr_t)address;
  107. }
  108. static bool
  109. wait_address_equal(void *h1, void *h2)
  110. {
  111. return h1 == h2 ? true : false;
  112. }
  113. static bool
  114. is_wait_node_exists(bh_list *wait_list, AtomicWaitNode *node)
  115. {
  116. AtomicWaitNode *curr;
  117. curr = bh_list_first_elem(wait_list);
  118. while (curr) {
  119. if (curr == node) {
  120. return true;
  121. }
  122. curr = bh_list_elem_next(curr);
  123. }
  124. return false;
  125. }
  126. static uint32
  127. notify_wait_list(bh_list *wait_list, uint32 count)
  128. {
  129. AtomicWaitNode *node, *next;
  130. uint32 i, notify_count = count;
  131. if (count > wait_list->len)
  132. notify_count = wait_list->len;
  133. node = bh_list_first_elem(wait_list);
  134. if (!node)
  135. return 0;
  136. for (i = 0; i < notify_count; i++) {
  137. bh_assert(node);
  138. next = bh_list_elem_next(node);
  139. node->status = S_NOTIFIED;
  140. /* wakeup */
  141. os_cond_signal(&node->wait_cond);
  142. node = next;
  143. }
  144. return notify_count;
  145. }
  146. static AtomicWaitInfo *
  147. acquire_wait_info(void *address, AtomicWaitNode *wait_node)
  148. {
  149. AtomicWaitInfo *wait_info = NULL;
  150. bh_list_status ret;
  151. bh_assert(address != NULL);
  152. wait_info = (AtomicWaitInfo *)bh_hash_map_find(wait_map, address);
  153. if (!wait_node) {
  154. return wait_info;
  155. }
  156. /* No wait info on this address, create new info */
  157. if (!wait_info) {
  158. if (!(wait_info = (AtomicWaitInfo *)wasm_runtime_malloc(
  159. sizeof(AtomicWaitInfo)))) {
  160. return NULL;
  161. }
  162. memset(wait_info, 0, sizeof(AtomicWaitInfo));
  163. /* init wait list */
  164. wait_info->wait_list = &wait_info->wait_list_head;
  165. ret = bh_list_init(wait_info->wait_list);
  166. bh_assert(ret == BH_LIST_SUCCESS);
  167. (void)ret;
  168. if (!bh_hash_map_insert(wait_map, address, (void *)wait_info)) {
  169. wasm_runtime_free(wait_info);
  170. return NULL;
  171. }
  172. }
  173. ret = bh_list_insert(wait_info->wait_list, wait_node);
  174. bh_assert(ret == BH_LIST_SUCCESS);
  175. (void)ret;
  176. return wait_info;
  177. }
  178. static void
  179. destroy_wait_info(void *wait_info)
  180. {
  181. AtomicWaitNode *node, *next;
  182. if (wait_info) {
  183. node = bh_list_first_elem(((AtomicWaitInfo *)wait_info)->wait_list);
  184. while (node) {
  185. next = bh_list_elem_next(node);
  186. os_cond_destroy(&node->wait_cond);
  187. wasm_runtime_free(node);
  188. node = next;
  189. }
  190. wasm_runtime_free(wait_info);
  191. }
  192. }
  193. static void
  194. map_try_release_wait_info(HashMap *wait_hash_map, AtomicWaitInfo *wait_info,
  195. void *address)
  196. {
  197. if (wait_info->wait_list->len > 0) {
  198. return;
  199. }
  200. bh_hash_map_remove(wait_hash_map, address, NULL, NULL);
  201. destroy_wait_info(wait_info);
  202. }
  203. #if WASM_ENABLE_SHARED_HEAP != 0
  204. static bool
  205. is_native_addr_in_shared_heap(WASMModuleInstanceCommon *module_inst,
  206. uint8 *addr, uint32 bytes)
  207. {
  208. WASMSharedHeap *shared_heap = NULL;
  209. #if WASM_ENABLE_INTERP != 0
  210. if (module_inst->module_type == Wasm_Module_Bytecode) {
  211. shared_heap = ((WASMModuleInstance *)module_inst)->e->shared_heap;
  212. }
  213. #endif
  214. #if WASM_ENABLE_AOT != 0
  215. if (module_inst->module_type == Wasm_Module_AoT) {
  216. // TODO
  217. }
  218. #endif
  219. return shared_heap && addr >= shared_heap->base_addr
  220. && addr + bytes <= shared_heap->base_addr + shared_heap->size;
  221. }
  222. #endif
  223. uint32
  224. wasm_runtime_atomic_wait(WASMModuleInstanceCommon *module, void *address,
  225. uint64 expect, int64 timeout, bool wait64)
  226. {
  227. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  228. AtomicWaitInfo *wait_info;
  229. AtomicWaitNode *wait_node;
  230. korp_mutex *lock;
  231. #if WASM_ENABLE_THREAD_MGR != 0
  232. WASMExecEnv *exec_env;
  233. #endif
  234. uint64 timeout_left, timeout_wait, timeout_1sec;
  235. bool check_ret, is_timeout, no_wait;
  236. bh_assert(module->module_type == Wasm_Module_Bytecode
  237. || module->module_type == Wasm_Module_AoT);
  238. if (wasm_copy_exception(module_inst, NULL)) {
  239. return -1;
  240. }
  241. /* Currently we have only one memory instance */
  242. if (!shared_memory_is_shared(module_inst->memories[0])) {
  243. wasm_runtime_set_exception(module, "expected shared memory");
  244. return -1;
  245. }
  246. shared_memory_lock(module_inst->memories[0]);
  247. if (
  248. #if WASM_ENABLE_SHARED_HEAP != 0
  249. /* not in shared heap */
  250. !is_native_addr_in_shared_heap((WASMModuleInstanceCommon *)module_inst,
  251. address, wait64 ? 8 : 4)
  252. &&
  253. #endif
  254. /* and not in linear memory */
  255. ((uint8 *)address < module_inst->memories[0]->memory_data
  256. || (uint8 *)address + (wait64 ? 8 : 4)
  257. > module_inst->memories[0]->memory_data_end)) {
  258. shared_memory_unlock(module_inst->memories[0]);
  259. wasm_runtime_set_exception(module, "out of bounds memory access");
  260. return -1;
  261. }
  262. shared_memory_unlock(module_inst->memories[0]);
  263. #if WASM_ENABLE_THREAD_MGR != 0
  264. exec_env =
  265. wasm_clusters_search_exec_env((WASMModuleInstanceCommon *)module_inst);
  266. bh_assert(exec_env);
  267. #endif
  268. lock = shared_memory_get_lock_pointer(module_inst->memories[0]);
  269. /* Lock the shared_mem_lock for the whole atomic wait process,
  270. and use it to os_cond_reltimedwait */
  271. os_mutex_lock(lock);
  272. no_wait = (!wait64 && *(uint32 *)address != (uint32)expect)
  273. || (wait64 && *(uint64 *)address != expect);
  274. if (no_wait) {
  275. os_mutex_unlock(lock);
  276. return 1;
  277. }
  278. if (!(wait_node = wasm_runtime_malloc(sizeof(AtomicWaitNode)))) {
  279. os_mutex_unlock(lock);
  280. wasm_runtime_set_exception(module, "failed to create wait node");
  281. return -1;
  282. }
  283. memset(wait_node, 0, sizeof(AtomicWaitNode));
  284. if (0 != os_cond_init(&wait_node->wait_cond)) {
  285. os_mutex_unlock(lock);
  286. wasm_runtime_free(wait_node);
  287. wasm_runtime_set_exception(module, "failed to init wait cond");
  288. return -1;
  289. }
  290. wait_node->status = S_WAITING;
  291. /* Acquire the wait info, create new one if not exists */
  292. wait_info = acquire_wait_info(address, wait_node);
  293. if (!wait_info) {
  294. os_mutex_unlock(lock);
  295. os_cond_destroy(&wait_node->wait_cond);
  296. wasm_runtime_free(wait_node);
  297. wasm_runtime_set_exception(module, "failed to acquire wait_info");
  298. return -1;
  299. }
  300. /* unit of timeout is nsec, convert it to usec */
  301. timeout_left = (uint64)timeout / 1000;
  302. timeout_1sec = (uint64)1e6;
  303. while (1) {
  304. if (timeout < 0) {
  305. /* wait forever until it is notified or terminated
  306. here we keep waiting and checking every second */
  307. os_cond_reltimedwait(&wait_node->wait_cond, lock,
  308. (uint64)timeout_1sec);
  309. if (wait_node->status == S_NOTIFIED /* notified by atomic.notify */
  310. #if WASM_ENABLE_THREAD_MGR != 0
  311. /* terminated by other thread */
  312. || wasm_cluster_is_thread_terminated(exec_env)
  313. #endif
  314. ) {
  315. break;
  316. }
  317. }
  318. else {
  319. timeout_wait =
  320. timeout_left < timeout_1sec ? timeout_left : timeout_1sec;
  321. os_cond_reltimedwait(&wait_node->wait_cond, lock, timeout_wait);
  322. if (wait_node->status == S_NOTIFIED /* notified by atomic.notify */
  323. || timeout_left <= timeout_wait /* time out */
  324. #if WASM_ENABLE_THREAD_MGR != 0
  325. /* terminated by other thread */
  326. || wasm_cluster_is_thread_terminated(exec_env)
  327. #endif
  328. ) {
  329. break;
  330. }
  331. timeout_left -= timeout_wait;
  332. }
  333. }
  334. is_timeout = wait_node->status == S_WAITING ? true : false;
  335. check_ret = is_wait_node_exists(wait_info->wait_list, wait_node);
  336. bh_assert(check_ret);
  337. (void)check_ret;
  338. /* Remove wait node from wait list */
  339. bh_list_remove(wait_info->wait_list, wait_node);
  340. os_cond_destroy(&wait_node->wait_cond);
  341. wasm_runtime_free(wait_node);
  342. /* Release wait info if no wait nodes are attached */
  343. map_try_release_wait_info(wait_map, wait_info, address);
  344. os_mutex_unlock(lock);
  345. return is_timeout ? 2 : 0;
  346. }
  347. uint32
  348. wasm_runtime_atomic_notify(WASMModuleInstanceCommon *module, void *address,
  349. uint32 count)
  350. {
  351. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  352. uint32 notify_result;
  353. AtomicWaitInfo *wait_info;
  354. korp_mutex *lock;
  355. bool out_of_bounds;
  356. bh_assert(module->module_type == Wasm_Module_Bytecode
  357. || module->module_type == Wasm_Module_AoT);
  358. shared_memory_lock(module_inst->memories[0]);
  359. out_of_bounds =
  360. #if WASM_ENABLE_SHARED_HEAP != 0
  361. /* not in shared heap */
  362. !is_native_addr_in_shared_heap(module, address, 4) &&
  363. #endif
  364. /* and not in linear memory */
  365. ((uint8 *)address < module_inst->memories[0]->memory_data
  366. || (uint8 *)address + 4 > module_inst->memories[0]->memory_data_end);
  367. shared_memory_unlock(module_inst->memories[0]);
  368. if (out_of_bounds) {
  369. wasm_runtime_set_exception(module, "out of bounds memory access");
  370. return -1;
  371. }
  372. /* Currently we have only one memory instance */
  373. if (!shared_memory_is_shared(module_inst->memories[0])) {
  374. /* Always return 0 for ushared linear memory since there is
  375. no way to create a waiter on it */
  376. return 0;
  377. }
  378. lock = shared_memory_get_lock_pointer(module_inst->memories[0]);
  379. /* Lock the shared_mem_lock for the whole atomic notify process,
  380. and use it to os_cond_signal */
  381. os_mutex_lock(lock);
  382. wait_info = acquire_wait_info(address, NULL);
  383. /* Nobody wait on this address */
  384. if (!wait_info) {
  385. os_mutex_unlock(lock);
  386. return 0;
  387. }
  388. /* Notify each wait node in the wait list */
  389. notify_result = notify_wait_list(wait_info->wait_list, count);
  390. os_mutex_unlock(lock);
  391. return notify_result;
  392. }