wasm_shared_memory.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "bh_log.h"
  6. #include "wasm_shared_memory.h"
  7. #if WASM_ENABLE_THREAD_MGR != 0
  8. #include "../libraries/thread-mgr/thread_manager.h"
  9. #endif
  10. /*
  11. * Note: this lock can be per memory.
  12. *
  13. * For now, just use a global because:
  14. * - it's a bit cumbersome to extend WASMMemoryInstance w/o breaking
  15. * the AOT ABI.
  16. * - If you care performance, it's better to make the interpreters
  17. * use atomic ops.
  18. */
  19. korp_mutex g_shared_memory_lock;
  20. /* clang-format off */
  21. enum {
  22. S_WAITING,
  23. S_NOTIFIED
  24. };
  25. /* clang-format on */
  26. typedef struct AtomicWaitInfo {
  27. bh_list wait_list_head;
  28. bh_list *wait_list;
  29. /* WARNING: insert to the list allowed only in acquire_wait_info
  30. otherwise there will be data race as described in PR #2016 */
  31. } AtomicWaitInfo;
  32. typedef struct AtomicWaitNode {
  33. bh_list_link l;
  34. uint8 status;
  35. korp_cond wait_cond;
  36. } AtomicWaitNode;
  37. /* Atomic wait map */
  38. static HashMap *wait_map;
  39. static uint32
  40. wait_address_hash(const void *address);
  41. static bool
  42. wait_address_equal(void *h1, void *h2);
  43. static void
  44. destroy_wait_info(void *wait_info);
  45. bool
  46. wasm_shared_memory_init()
  47. {
  48. if (os_mutex_init(&g_shared_memory_lock) != 0)
  49. return false;
  50. /* wait map not exists, create new map */
  51. if (!(wait_map = bh_hash_map_create(32, true, (HashFunc)wait_address_hash,
  52. (KeyEqualFunc)wait_address_equal, NULL,
  53. destroy_wait_info))) {
  54. os_mutex_destroy(&g_shared_memory_lock);
  55. return false;
  56. }
  57. return true;
  58. }
  59. void
  60. wasm_shared_memory_destroy()
  61. {
  62. bh_hash_map_destroy(wait_map);
  63. os_mutex_destroy(&g_shared_memory_lock);
  64. }
  65. uint16
  66. shared_memory_inc_reference(WASMMemoryInstance *memory)
  67. {
  68. bh_assert(shared_memory_is_shared(memory));
  69. uint16 old;
  70. #if BH_ATOMIC_16_IS_ATOMIC == 0
  71. os_mutex_lock(&g_shared_memory_lock);
  72. #endif
  73. old = BH_ATOMIC_16_FETCH_ADD(memory->ref_count, 1);
  74. #if BH_ATOMIC_16_IS_ATOMIC == 0
  75. os_mutex_unlock(&g_shared_memory_lock);
  76. #endif
  77. bh_assert(old >= 1);
  78. bh_assert(old < UINT16_MAX);
  79. return old + 1;
  80. }
  81. uint16
  82. shared_memory_dec_reference(WASMMemoryInstance *memory)
  83. {
  84. bh_assert(shared_memory_is_shared(memory));
  85. uint16 old;
  86. #if BH_ATOMIC_16_IS_ATOMIC == 0
  87. os_mutex_lock(&g_shared_memory_lock);
  88. #endif
  89. old = BH_ATOMIC_16_FETCH_SUB(memory->ref_count, 1);
  90. #if BH_ATOMIC_16_IS_ATOMIC == 0
  91. os_mutex_unlock(&g_shared_memory_lock);
  92. #endif
  93. bh_assert(old > 0);
  94. return old - 1;
  95. }
  96. static korp_mutex *
  97. shared_memory_get_lock_pointer(WASMMemoryInstance *memory)
  98. {
  99. bh_assert(memory != NULL);
  100. return &g_shared_memory_lock;
  101. }
  102. /* Atomics wait && notify APIs */
  103. static uint32
  104. wait_address_hash(const void *address)
  105. {
  106. return (uint32)(uintptr_t)address;
  107. }
  108. static bool
  109. wait_address_equal(void *h1, void *h2)
  110. {
  111. return h1 == h2 ? true : false;
  112. }
  113. static bool
  114. is_wait_node_exists(bh_list *wait_list, AtomicWaitNode *node)
  115. {
  116. AtomicWaitNode *curr;
  117. curr = bh_list_first_elem(wait_list);
  118. while (curr) {
  119. if (curr == node) {
  120. return true;
  121. }
  122. curr = bh_list_elem_next(curr);
  123. }
  124. return false;
  125. }
  126. static uint32
  127. notify_wait_list(bh_list *wait_list, uint32 count)
  128. {
  129. AtomicWaitNode *node, *next;
  130. uint32 i, notify_count = count;
  131. if (count > wait_list->len)
  132. notify_count = wait_list->len;
  133. node = bh_list_first_elem(wait_list);
  134. if (!node)
  135. return 0;
  136. for (i = 0; i < notify_count; i++) {
  137. bh_assert(node);
  138. next = bh_list_elem_next(node);
  139. node->status = S_NOTIFIED;
  140. /* wakeup */
  141. os_cond_signal(&node->wait_cond);
  142. node = next;
  143. }
  144. return notify_count;
  145. }
  146. static AtomicWaitInfo *
  147. acquire_wait_info(void *address, AtomicWaitNode *wait_node)
  148. {
  149. AtomicWaitInfo *wait_info = NULL;
  150. bh_list_status ret;
  151. bh_assert(address != NULL);
  152. wait_info = (AtomicWaitInfo *)bh_hash_map_find(wait_map, address);
  153. if (!wait_node) {
  154. return wait_info;
  155. }
  156. /* No wait info on this address, create new info */
  157. if (!wait_info) {
  158. if (!(wait_info = (AtomicWaitInfo *)wasm_runtime_malloc(
  159. sizeof(AtomicWaitInfo)))) {
  160. return NULL;
  161. }
  162. memset(wait_info, 0, sizeof(AtomicWaitInfo));
  163. /* init wait list */
  164. wait_info->wait_list = &wait_info->wait_list_head;
  165. ret = bh_list_init(wait_info->wait_list);
  166. bh_assert(ret == BH_LIST_SUCCESS);
  167. (void)ret;
  168. if (!bh_hash_map_insert(wait_map, address, (void *)wait_info)) {
  169. wasm_runtime_free(wait_info);
  170. return NULL;
  171. }
  172. }
  173. ret = bh_list_insert(wait_info->wait_list, wait_node);
  174. bh_assert(ret == BH_LIST_SUCCESS);
  175. (void)ret;
  176. return wait_info;
  177. }
  178. static void
  179. destroy_wait_info(void *wait_info)
  180. {
  181. AtomicWaitNode *node, *next;
  182. if (wait_info) {
  183. node = bh_list_first_elem(((AtomicWaitInfo *)wait_info)->wait_list);
  184. while (node) {
  185. next = bh_list_elem_next(node);
  186. os_cond_destroy(&node->wait_cond);
  187. wasm_runtime_free(node);
  188. node = next;
  189. }
  190. wasm_runtime_free(wait_info);
  191. }
  192. }
  193. static void
  194. map_try_release_wait_info(HashMap *wait_hash_map, AtomicWaitInfo *wait_info,
  195. void *address)
  196. {
  197. if (wait_info->wait_list->len > 0) {
  198. return;
  199. }
  200. bh_hash_map_remove(wait_hash_map, address, NULL, NULL);
  201. destroy_wait_info(wait_info);
  202. }
  203. uint32
  204. wasm_runtime_atomic_wait(WASMModuleInstanceCommon *module, void *address,
  205. uint64 expect, int64 timeout, bool wait64)
  206. {
  207. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  208. AtomicWaitInfo *wait_info;
  209. AtomicWaitNode *wait_node;
  210. korp_mutex *lock;
  211. #if WASM_ENABLE_THREAD_MGR != 0
  212. WASMExecEnv *exec_env;
  213. #endif
  214. uint64 timeout_left, timeout_wait, timeout_1sec;
  215. bool check_ret, is_timeout, no_wait;
  216. bh_assert(module->module_type == Wasm_Module_Bytecode
  217. || module->module_type == Wasm_Module_AoT);
  218. if (wasm_copy_exception(module_inst, NULL)) {
  219. return -1;
  220. }
  221. /* Currently we have only one memory instance */
  222. if (!shared_memory_is_shared(module_inst->memories[0])) {
  223. wasm_runtime_set_exception(module, "expected shared memory");
  224. return -1;
  225. }
  226. shared_memory_lock(module_inst->memories[0]);
  227. if ((uint8 *)address < module_inst->memories[0]->memory_data
  228. || (uint8 *)address + (wait64 ? 8 : 4)
  229. > module_inst->memories[0]->memory_data_end) {
  230. shared_memory_unlock(module_inst->memories[0]);
  231. wasm_runtime_set_exception(module, "out of bounds memory access");
  232. return -1;
  233. }
  234. shared_memory_unlock(module_inst->memories[0]);
  235. #if WASM_ENABLE_THREAD_MGR != 0
  236. exec_env =
  237. wasm_clusters_search_exec_env((WASMModuleInstanceCommon *)module_inst);
  238. bh_assert(exec_env);
  239. #endif
  240. lock = shared_memory_get_lock_pointer(module_inst->memories[0]);
  241. /* Lock the shared_mem_lock for the whole atomic wait process,
  242. and use it to os_cond_reltimedwait */
  243. os_mutex_lock(lock);
  244. no_wait = (!wait64 && *(uint32 *)address != (uint32)expect)
  245. || (wait64 && *(uint64 *)address != expect);
  246. if (no_wait) {
  247. os_mutex_unlock(lock);
  248. return 1;
  249. }
  250. if (!(wait_node = wasm_runtime_malloc(sizeof(AtomicWaitNode)))) {
  251. os_mutex_unlock(lock);
  252. wasm_runtime_set_exception(module, "failed to create wait node");
  253. return -1;
  254. }
  255. memset(wait_node, 0, sizeof(AtomicWaitNode));
  256. if (0 != os_cond_init(&wait_node->wait_cond)) {
  257. os_mutex_unlock(lock);
  258. wasm_runtime_free(wait_node);
  259. wasm_runtime_set_exception(module, "failed to init wait cond");
  260. return -1;
  261. }
  262. wait_node->status = S_WAITING;
  263. /* Acquire the wait info, create new one if not exists */
  264. wait_info = acquire_wait_info(address, wait_node);
  265. if (!wait_info) {
  266. os_mutex_unlock(lock);
  267. os_cond_destroy(&wait_node->wait_cond);
  268. wasm_runtime_free(wait_node);
  269. wasm_runtime_set_exception(module, "failed to acquire wait_info");
  270. return -1;
  271. }
  272. /* unit of timeout is nsec, convert it to usec */
  273. timeout_left = (uint64)timeout / 1000;
  274. timeout_1sec = (uint64)1e6;
  275. while (1) {
  276. if (timeout < 0) {
  277. /* wait forever until it is notified or terminated
  278. here we keep waiting and checking every second */
  279. os_cond_reltimedwait(&wait_node->wait_cond, lock,
  280. (uint64)timeout_1sec);
  281. if (wait_node->status == S_NOTIFIED /* notified by atomic.notify */
  282. #if WASM_ENABLE_THREAD_MGR != 0
  283. /* terminated by other thread */
  284. || wasm_cluster_is_thread_terminated(exec_env)
  285. #endif
  286. ) {
  287. break;
  288. }
  289. }
  290. else {
  291. timeout_wait =
  292. timeout_left < timeout_1sec ? timeout_left : timeout_1sec;
  293. os_cond_reltimedwait(&wait_node->wait_cond, lock, timeout_wait);
  294. if (wait_node->status == S_NOTIFIED /* notified by atomic.notify */
  295. || timeout_left <= timeout_wait /* time out */
  296. #if WASM_ENABLE_THREAD_MGR != 0
  297. /* terminated by other thread */
  298. || wasm_cluster_is_thread_terminated(exec_env)
  299. #endif
  300. ) {
  301. break;
  302. }
  303. timeout_left -= timeout_wait;
  304. }
  305. }
  306. is_timeout = wait_node->status == S_WAITING ? true : false;
  307. check_ret = is_wait_node_exists(wait_info->wait_list, wait_node);
  308. bh_assert(check_ret);
  309. (void)check_ret;
  310. /* Remove wait node from wait list */
  311. bh_list_remove(wait_info->wait_list, wait_node);
  312. os_cond_destroy(&wait_node->wait_cond);
  313. wasm_runtime_free(wait_node);
  314. /* Release wait info if no wait nodes are attached */
  315. map_try_release_wait_info(wait_map, wait_info, address);
  316. os_mutex_unlock(lock);
  317. return is_timeout ? 2 : 0;
  318. }
  319. uint32
  320. wasm_runtime_atomic_notify(WASMModuleInstanceCommon *module, void *address,
  321. uint32 count)
  322. {
  323. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  324. uint32 notify_result;
  325. AtomicWaitInfo *wait_info;
  326. korp_mutex *lock;
  327. bool out_of_bounds;
  328. bh_assert(module->module_type == Wasm_Module_Bytecode
  329. || module->module_type == Wasm_Module_AoT);
  330. shared_memory_lock(module_inst->memories[0]);
  331. out_of_bounds =
  332. ((uint8 *)address < module_inst->memories[0]->memory_data
  333. || (uint8 *)address + 4 > module_inst->memories[0]->memory_data_end);
  334. shared_memory_unlock(module_inst->memories[0]);
  335. if (out_of_bounds) {
  336. wasm_runtime_set_exception(module, "out of bounds memory access");
  337. return -1;
  338. }
  339. /* Currently we have only one memory instance */
  340. if (!shared_memory_is_shared(module_inst->memories[0])) {
  341. /* Always return 0 for ushared linear memory since there is
  342. no way to create a waiter on it */
  343. return 0;
  344. }
  345. lock = shared_memory_get_lock_pointer(module_inst->memories[0]);
  346. /* Lock the shared_mem_lock for the whole atomic notify process,
  347. and use it to os_cond_signal */
  348. os_mutex_lock(lock);
  349. wait_info = acquire_wait_info(address, NULL);
  350. /* Nobody wait on this address */
  351. if (!wait_info) {
  352. os_mutex_unlock(lock);
  353. return 0;
  354. }
  355. /* Notify each wait node in the wait list */
  356. notify_result = notify_wait_list(wait_info->wait_list, count);
  357. os_mutex_unlock(lock);
  358. return notify_result;
  359. }