wasm_shared_memory.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "bh_log.h"
  6. #include "wasm_shared_memory.h"
  7. #if WASM_ENABLE_THREAD_MGR != 0
  8. #include "../libraries/thread-mgr/thread_manager.h"
  9. #endif
  10. /*
  11. * Note: this lock can be per memory.
  12. *
  13. * For now, just use a global because:
  14. * - it's a bit cumbersome to extend WASMMemoryInstance w/o breaking
  15. * the AOT ABI.
  16. * - If you care performance, it's better to make the interpreters
  17. * use atomic ops.
  18. */
  19. static korp_mutex _shared_memory_lock;
  20. /* clang-format off */
  21. enum {
  22. S_WAITING,
  23. S_NOTIFIED
  24. };
  25. /* clang-format on */
  26. typedef struct AtomicWaitInfo {
  27. bh_list wait_list_head;
  28. bh_list *wait_list;
  29. /* WARNING: insert to the list allowed only in acquire_wait_info
  30. otherwise there will be data race as described in PR #2016 */
  31. } AtomicWaitInfo;
  32. typedef struct AtomicWaitNode {
  33. bh_list_link l;
  34. uint8 status;
  35. korp_cond wait_cond;
  36. } AtomicWaitNode;
  37. /* Atomic wait map */
  38. static HashMap *wait_map;
  39. static uint32
  40. wait_address_hash(const void *address);
  41. static bool
  42. wait_address_equal(void *h1, void *h2);
  43. static void
  44. destroy_wait_info(void *wait_info);
  45. bool
  46. wasm_shared_memory_init()
  47. {
  48. if (os_mutex_init(&_shared_memory_lock) != 0)
  49. return false;
  50. /* wait map not exists, create new map */
  51. if (!(wait_map = bh_hash_map_create(32, true, (HashFunc)wait_address_hash,
  52. (KeyEqualFunc)wait_address_equal, NULL,
  53. destroy_wait_info))) {
  54. os_mutex_destroy(&_shared_memory_lock);
  55. return false;
  56. }
  57. return true;
  58. }
  59. void
  60. wasm_shared_memory_destroy()
  61. {
  62. bh_hash_map_destroy(wait_map);
  63. os_mutex_destroy(&_shared_memory_lock);
  64. }
  65. uint32
  66. shared_memory_inc_reference(WASMMemoryInstance *memory)
  67. {
  68. bh_assert(shared_memory_is_shared(memory));
  69. uint32 old;
  70. #if BH_ATOMIC_32_IS_ATOMIC == 0
  71. os_mutex_lock(&_shared_memory_lock);
  72. #endif
  73. old = BH_ATOMIC_32_FETCH_ADD(memory->ref_count, 1);
  74. #if BH_ATOMIC_32_IS_ATOMIC == 0
  75. os_mutex_unlock(&_shared_memory_lock);
  76. #endif
  77. bh_assert(old >= 1);
  78. bh_assert(old < UINT32_MAX);
  79. return old + 1;
  80. }
  81. uint32
  82. shared_memory_dec_reference(WASMMemoryInstance *memory)
  83. {
  84. bh_assert(shared_memory_is_shared(memory));
  85. uint32 old;
  86. #if BH_ATOMIC_32_IS_ATOMIC == 0
  87. os_mutex_lock(&_shared_memory_lock);
  88. #endif
  89. old = BH_ATOMIC_32_FETCH_SUB(memory->ref_count, 1);
  90. #if BH_ATOMIC_32_IS_ATOMIC == 0
  91. os_mutex_unlock(&_shared_memory_lock);
  92. #endif
  93. bh_assert(old > 0);
  94. return old - 1;
  95. }
  96. bool
  97. shared_memory_is_shared(WASMMemoryInstance *memory)
  98. {
  99. uint32 old;
  100. #if BH_ATOMIC_32_IS_ATOMIC == 0
  101. os_mutex_lock(&_shared_memory_lock);
  102. #endif
  103. old = BH_ATOMIC_32_LOAD(memory->ref_count);
  104. #if BH_ATOMIC_32_IS_ATOMIC == 0
  105. os_mutex_unlock(&_shared_memory_lock);
  106. #endif
  107. return old > 0;
  108. }
  109. static korp_mutex *
  110. shared_memory_get_lock_pointer(WASMMemoryInstance *memory)
  111. {
  112. bh_assert(memory != NULL);
  113. return &_shared_memory_lock;
  114. }
  115. void
  116. shared_memory_lock(WASMMemoryInstance *memory)
  117. {
  118. /*
  119. * Note: exception logic is currently abusing this lock.
  120. * cf. https://github.com/bytecodealliance/wasm-micro-runtime/issues/2407
  121. */
  122. bh_assert(memory != NULL);
  123. os_mutex_lock(&_shared_memory_lock);
  124. }
  125. void
  126. shared_memory_unlock(WASMMemoryInstance *memory)
  127. {
  128. bh_assert(memory != NULL);
  129. os_mutex_unlock(&_shared_memory_lock);
  130. }
  131. /* Atomics wait && notify APIs */
  132. static uint32
  133. wait_address_hash(const void *address)
  134. {
  135. return (uint32)(uintptr_t)address;
  136. }
  137. static bool
  138. wait_address_equal(void *h1, void *h2)
  139. {
  140. return h1 == h2 ? true : false;
  141. }
  142. static bool
  143. is_wait_node_exists(bh_list *wait_list, AtomicWaitNode *node)
  144. {
  145. AtomicWaitNode *curr;
  146. curr = bh_list_first_elem(wait_list);
  147. while (curr) {
  148. if (curr == node) {
  149. return true;
  150. }
  151. curr = bh_list_elem_next(curr);
  152. }
  153. return false;
  154. }
  155. static uint32
  156. notify_wait_list(bh_list *wait_list, uint32 count)
  157. {
  158. AtomicWaitNode *node, *next;
  159. uint32 i, notify_count = count;
  160. if (count > wait_list->len)
  161. notify_count = wait_list->len;
  162. node = bh_list_first_elem(wait_list);
  163. if (!node)
  164. return 0;
  165. for (i = 0; i < notify_count; i++) {
  166. bh_assert(node);
  167. next = bh_list_elem_next(node);
  168. node->status = S_NOTIFIED;
  169. /* wakeup */
  170. os_cond_signal(&node->wait_cond);
  171. node = next;
  172. }
  173. return notify_count;
  174. }
  175. static AtomicWaitInfo *
  176. acquire_wait_info(void *address, AtomicWaitNode *wait_node)
  177. {
  178. AtomicWaitInfo *wait_info = NULL;
  179. bh_list_status ret;
  180. if (address)
  181. wait_info = (AtomicWaitInfo *)bh_hash_map_find(wait_map, address);
  182. if (!wait_node) {
  183. return wait_info;
  184. }
  185. /* No wait info on this address, create new info */
  186. if (!wait_info) {
  187. if (!(wait_info = (AtomicWaitInfo *)wasm_runtime_malloc(
  188. sizeof(AtomicWaitInfo)))) {
  189. return NULL;
  190. }
  191. memset(wait_info, 0, sizeof(AtomicWaitInfo));
  192. /* init wait list */
  193. wait_info->wait_list = &wait_info->wait_list_head;
  194. ret = bh_list_init(wait_info->wait_list);
  195. bh_assert(ret == BH_LIST_SUCCESS);
  196. (void)ret;
  197. if (!bh_hash_map_insert(wait_map, address, (void *)wait_info)) {
  198. wasm_runtime_free(wait_info);
  199. return NULL;
  200. }
  201. }
  202. ret = bh_list_insert(wait_info->wait_list, wait_node);
  203. bh_assert(ret == BH_LIST_SUCCESS);
  204. (void)ret;
  205. return wait_info;
  206. }
  207. static void
  208. destroy_wait_info(void *wait_info)
  209. {
  210. AtomicWaitNode *node, *next;
  211. if (wait_info) {
  212. node = bh_list_first_elem(((AtomicWaitInfo *)wait_info)->wait_list);
  213. while (node) {
  214. next = bh_list_elem_next(node);
  215. os_cond_destroy(&node->wait_cond);
  216. wasm_runtime_free(node);
  217. node = next;
  218. }
  219. wasm_runtime_free(wait_info);
  220. }
  221. }
  222. static void
  223. map_try_release_wait_info(HashMap *wait_map_, AtomicWaitInfo *wait_info,
  224. void *address)
  225. {
  226. if (wait_info->wait_list->len > 0) {
  227. return;
  228. }
  229. bh_hash_map_remove(wait_map_, address, NULL, NULL);
  230. destroy_wait_info(wait_info);
  231. }
  232. uint32
  233. wasm_runtime_atomic_wait(WASMModuleInstanceCommon *module, void *address,
  234. uint64 expect, int64 timeout, bool wait64)
  235. {
  236. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  237. AtomicWaitInfo *wait_info;
  238. AtomicWaitNode *wait_node;
  239. korp_mutex *lock;
  240. #if WASM_ENABLE_THREAD_MGR != 0
  241. WASMExecEnv *exec_env;
  242. #endif
  243. uint64 timeout_left, timeout_wait, timeout_1sec;
  244. bool check_ret, is_timeout, no_wait;
  245. bh_assert(module->module_type == Wasm_Module_Bytecode
  246. || module->module_type == Wasm_Module_AoT);
  247. if (wasm_copy_exception(module_inst, NULL)) {
  248. return -1;
  249. }
  250. /* Currently we have only one memory instance */
  251. if (!shared_memory_is_shared(module_inst->memories[0])) {
  252. wasm_runtime_set_exception(module, "expected shared memory");
  253. return -1;
  254. }
  255. shared_memory_lock(module_inst->memories[0]);
  256. if ((uint8 *)address < module_inst->memories[0]->memory_data
  257. || (uint8 *)address + (wait64 ? 8 : 4)
  258. > module_inst->memories[0]->memory_data_end) {
  259. shared_memory_unlock(module_inst->memories[0]);
  260. wasm_runtime_set_exception(module, "out of bounds memory access");
  261. return -1;
  262. }
  263. shared_memory_unlock(module_inst->memories[0]);
  264. #if WASM_ENABLE_THREAD_MGR != 0
  265. exec_env =
  266. wasm_clusters_search_exec_env((WASMModuleInstanceCommon *)module_inst);
  267. bh_assert(exec_env);
  268. #endif
  269. lock = shared_memory_get_lock_pointer(module_inst->memories[0]);
  270. /* Lock the shared_mem_lock for the whole atomic wait process,
  271. and use it to os_cond_reltimedwait */
  272. os_mutex_lock(lock);
  273. no_wait = (!wait64 && *(uint32 *)address != (uint32)expect)
  274. || (wait64 && *(uint64 *)address != expect);
  275. if (no_wait) {
  276. os_mutex_unlock(lock);
  277. return 1;
  278. }
  279. if (!(wait_node = wasm_runtime_malloc(sizeof(AtomicWaitNode)))) {
  280. os_mutex_unlock(lock);
  281. wasm_runtime_set_exception(module, "failed to create wait node");
  282. return -1;
  283. }
  284. memset(wait_node, 0, sizeof(AtomicWaitNode));
  285. if (0 != os_cond_init(&wait_node->wait_cond)) {
  286. os_mutex_unlock(lock);
  287. wasm_runtime_free(wait_node);
  288. wasm_runtime_set_exception(module, "failed to init wait cond");
  289. return -1;
  290. }
  291. wait_node->status = S_WAITING;
  292. /* Acquire the wait info, create new one if not exists */
  293. wait_info = acquire_wait_info(address, wait_node);
  294. if (!wait_info) {
  295. os_mutex_unlock(lock);
  296. os_cond_destroy(&wait_node->wait_cond);
  297. wasm_runtime_free(wait_node);
  298. wasm_runtime_set_exception(module, "failed to acquire wait_info");
  299. return -1;
  300. }
  301. /* unit of timeout is nsec, convert it to usec */
  302. timeout_left = (uint64)timeout / 1000;
  303. timeout_1sec = (uint64)1e6;
  304. while (1) {
  305. if (timeout < 0) {
  306. /* wait forever until it is notified or terminatied
  307. here we keep waiting and checking every second */
  308. os_cond_reltimedwait(&wait_node->wait_cond, lock,
  309. (uint64)timeout_1sec);
  310. if (wait_node->status == S_NOTIFIED /* notified by atomic.notify */
  311. #if WASM_ENABLE_THREAD_MGR != 0
  312. /* terminated by other thread */
  313. || wasm_cluster_is_thread_terminated(exec_env)
  314. #endif
  315. ) {
  316. break;
  317. }
  318. }
  319. else {
  320. timeout_wait =
  321. timeout_left < timeout_1sec ? timeout_left : timeout_1sec;
  322. os_cond_reltimedwait(&wait_node->wait_cond, lock, timeout_wait);
  323. if (wait_node->status == S_NOTIFIED /* notified by atomic.notify */
  324. || timeout_left <= timeout_wait /* time out */
  325. #if WASM_ENABLE_THREAD_MGR != 0
  326. /* terminated by other thread */
  327. || wasm_cluster_is_thread_terminated(exec_env)
  328. #endif
  329. ) {
  330. break;
  331. }
  332. timeout_left -= timeout_wait;
  333. }
  334. }
  335. is_timeout = wait_node->status == S_WAITING ? true : false;
  336. check_ret = is_wait_node_exists(wait_info->wait_list, wait_node);
  337. bh_assert(check_ret);
  338. (void)check_ret;
  339. /* Remove wait node from wait list */
  340. bh_list_remove(wait_info->wait_list, wait_node);
  341. os_cond_destroy(&wait_node->wait_cond);
  342. wasm_runtime_free(wait_node);
  343. /* Release wait info if no wait nodes are attached */
  344. map_try_release_wait_info(wait_map, wait_info, address);
  345. os_mutex_unlock(lock);
  346. return is_timeout ? 2 : 0;
  347. }
  348. uint32
  349. wasm_runtime_atomic_notify(WASMModuleInstanceCommon *module, void *address,
  350. uint32 count)
  351. {
  352. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  353. uint32 notify_result;
  354. AtomicWaitInfo *wait_info;
  355. korp_mutex *lock;
  356. bool out_of_bounds;
  357. bh_assert(module->module_type == Wasm_Module_Bytecode
  358. || module->module_type == Wasm_Module_AoT);
  359. shared_memory_lock(module_inst->memories[0]);
  360. out_of_bounds =
  361. ((uint8 *)address < module_inst->memories[0]->memory_data
  362. || (uint8 *)address + 4 > module_inst->memories[0]->memory_data_end);
  363. shared_memory_unlock(module_inst->memories[0]);
  364. if (out_of_bounds) {
  365. wasm_runtime_set_exception(module, "out of bounds memory access");
  366. return -1;
  367. }
  368. /* Currently we have only one memory instance */
  369. if (!shared_memory_is_shared(module_inst->memories[0])) {
  370. /* Always return 0 for ushared linear memory since there is
  371. no way to create a waiter on it */
  372. return 0;
  373. }
  374. lock = shared_memory_get_lock_pointer(module_inst->memories[0]);
  375. /* Lock the shared_mem_lock for the whole atomic notify process,
  376. and use it to os_cond_signal */
  377. os_mutex_lock(lock);
  378. wait_info = acquire_wait_info(address, NULL);
  379. /* Nobody wait on this address */
  380. if (!wait_info) {
  381. os_mutex_unlock(lock);
  382. return 0;
  383. }
  384. /* Notify each wait node in the wait list */
  385. notify_result = notify_wait_list(wait_info->wait_list, count);
  386. os_mutex_unlock(lock);
  387. return notify_result;
  388. }