wasm_shared_memory.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "bh_log.h"
  6. #include "wasm_shared_memory.h"
  7. static bh_list shared_memory_list_head;
  8. static bh_list *const shared_memory_list = &shared_memory_list_head;
  9. static korp_mutex shared_memory_list_lock;
  10. /* clang-format off */
  11. enum {
  12. S_WAITING,
  13. S_NOTIFIED
  14. };
  15. /* clang-format on */
  16. typedef struct AtomicWaitInfo {
  17. korp_mutex wait_list_lock;
  18. bh_list wait_list_head;
  19. bh_list *wait_list;
  20. } AtomicWaitInfo;
  21. typedef struct AtomicWaitNode {
  22. bh_list_link l;
  23. uint8 status;
  24. korp_mutex wait_lock;
  25. korp_cond wait_cond;
  26. } AtomicWaitNode;
  27. /* Atomic wait map */
  28. static HashMap *wait_map;
  29. static uint32
  30. wait_address_hash(void *address);
  31. static bool
  32. wait_address_equal(void *h1, void *h2);
  33. static void
  34. destroy_wait_info(void *wait_info);
  35. bool
  36. wasm_shared_memory_init()
  37. {
  38. if (os_mutex_init(&shared_memory_list_lock) != 0)
  39. return false;
  40. /* wait map not exists, create new map */
  41. if (!(wait_map = bh_hash_map_create(32, true, (HashFunc)wait_address_hash,
  42. (KeyEqualFunc)wait_address_equal, NULL,
  43. destroy_wait_info))) {
  44. os_mutex_destroy(&shared_memory_list_lock);
  45. return false;
  46. }
  47. return true;
  48. }
  49. void
  50. wasm_shared_memory_destroy()
  51. {
  52. os_mutex_destroy(&shared_memory_list_lock);
  53. if (wait_map) {
  54. bh_hash_map_destroy(wait_map);
  55. }
  56. }
  57. static WASMSharedMemNode *
  58. search_module(WASMModuleCommon *module)
  59. {
  60. WASMSharedMemNode *node;
  61. os_mutex_lock(&shared_memory_list_lock);
  62. node = bh_list_first_elem(shared_memory_list);
  63. while (node) {
  64. if (module == node->module) {
  65. os_mutex_unlock(&shared_memory_list_lock);
  66. return node;
  67. }
  68. node = bh_list_elem_next(node);
  69. }
  70. os_mutex_unlock(&shared_memory_list_lock);
  71. return NULL;
  72. }
  73. WASMSharedMemNode *
  74. wasm_module_get_shared_memory(WASMModuleCommon *module)
  75. {
  76. return search_module(module);
  77. }
  78. int32
  79. shared_memory_inc_reference(WASMModuleCommon *module)
  80. {
  81. WASMSharedMemNode *node = search_module(module);
  82. if (node) {
  83. os_mutex_lock(&node->lock);
  84. node->ref_count++;
  85. os_mutex_unlock(&node->lock);
  86. return node->ref_count;
  87. }
  88. return -1;
  89. }
  90. int32
  91. shared_memory_dec_reference(WASMModuleCommon *module)
  92. {
  93. WASMSharedMemNode *node = search_module(module);
  94. uint32 ref_count = 0;
  95. if (node) {
  96. os_mutex_lock(&node->lock);
  97. ref_count = --node->ref_count;
  98. os_mutex_unlock(&node->lock);
  99. if (ref_count == 0) {
  100. os_mutex_lock(&shared_memory_list_lock);
  101. bh_list_remove(shared_memory_list, node);
  102. os_mutex_unlock(&shared_memory_list_lock);
  103. os_mutex_destroy(&node->lock);
  104. wasm_runtime_free(node);
  105. }
  106. return ref_count;
  107. }
  108. return -1;
  109. }
  110. WASMMemoryInstanceCommon *
  111. shared_memory_get_memory_inst(WASMSharedMemNode *node)
  112. {
  113. return node->memory_inst;
  114. }
  115. WASMSharedMemNode *
  116. shared_memory_set_memory_inst(WASMModuleCommon *module,
  117. WASMMemoryInstanceCommon *memory)
  118. {
  119. WASMSharedMemNode *node;
  120. bh_list_status ret;
  121. if (!(node = wasm_runtime_malloc(sizeof(WASMSharedMemNode))))
  122. return NULL;
  123. node->module = module;
  124. node->memory_inst = memory;
  125. node->ref_count = 1;
  126. if (os_mutex_init(&node->lock) != 0) {
  127. wasm_runtime_free(node);
  128. return NULL;
  129. }
  130. os_mutex_lock(&shared_memory_list_lock);
  131. ret = bh_list_insert(shared_memory_list, node);
  132. bh_assert(ret == BH_LIST_SUCCESS);
  133. os_mutex_unlock(&shared_memory_list_lock);
  134. (void)ret;
  135. return node;
  136. }
  137. /* Atomics wait && notify APIs */
  138. static uint32
  139. wait_address_hash(void *address)
  140. {
  141. return (uint32)(uintptr_t)address;
  142. }
  143. static bool
  144. wait_address_equal(void *h1, void *h2)
  145. {
  146. return h1 == h2 ? true : false;
  147. }
  148. static bool
  149. is_wait_node_exists(bh_list *wait_list, AtomicWaitNode *node)
  150. {
  151. AtomicWaitNode *curr;
  152. curr = bh_list_first_elem(wait_list);
  153. while (curr) {
  154. if (curr == node) {
  155. return true;
  156. }
  157. curr = bh_list_elem_next(curr);
  158. }
  159. return false;
  160. }
  161. static uint32
  162. notify_wait_list(bh_list *wait_list, uint32 count)
  163. {
  164. AtomicWaitNode *node, *next;
  165. uint32 i, notify_count = count;
  166. if ((count == UINT32_MAX) || (count > wait_list->len))
  167. notify_count = wait_list->len;
  168. node = bh_list_first_elem(wait_list);
  169. if (!node)
  170. return 0;
  171. for (i = 0; i < notify_count; i++) {
  172. bh_assert(node);
  173. next = bh_list_elem_next(node);
  174. node->status = S_NOTIFIED;
  175. /* wakeup */
  176. os_cond_signal(&node->wait_cond);
  177. node = next;
  178. }
  179. return notify_count;
  180. }
  181. static AtomicWaitInfo *
  182. acquire_wait_info(void *address, bool create)
  183. {
  184. AtomicWaitInfo *wait_info = NULL;
  185. bh_list_status ret;
  186. os_mutex_lock(&shared_memory_list_lock);
  187. if (address)
  188. wait_info = (AtomicWaitInfo *)bh_hash_map_find(wait_map, address);
  189. if (!create) {
  190. os_mutex_unlock(&shared_memory_list_lock);
  191. return wait_info;
  192. }
  193. /* No wait info on this address, create new info */
  194. if (!wait_info) {
  195. if (!(wait_info = (AtomicWaitInfo *)wasm_runtime_malloc(
  196. sizeof(AtomicWaitInfo)))) {
  197. goto fail1;
  198. }
  199. memset(wait_info, 0, sizeof(AtomicWaitInfo));
  200. /* init wait list */
  201. wait_info->wait_list = &wait_info->wait_list_head;
  202. ret = bh_list_init(wait_info->wait_list);
  203. bh_assert(ret == BH_LIST_SUCCESS);
  204. /* init wait list lock */
  205. if (0 != os_mutex_init(&wait_info->wait_list_lock)) {
  206. goto fail2;
  207. }
  208. if (!bh_hash_map_insert(wait_map, address, (void *)wait_info)) {
  209. goto fail3;
  210. }
  211. }
  212. os_mutex_unlock(&shared_memory_list_lock);
  213. bh_assert(wait_info);
  214. (void)ret;
  215. return wait_info;
  216. fail3:
  217. os_mutex_destroy(&wait_info->wait_list_lock);
  218. fail2:
  219. wasm_runtime_free(wait_info);
  220. fail1:
  221. os_mutex_unlock(&shared_memory_list_lock);
  222. return NULL;
  223. }
  224. static void
  225. destroy_wait_info(void *wait_info)
  226. {
  227. AtomicWaitNode *node, *next;
  228. if (wait_info) {
  229. node = bh_list_first_elem(((AtomicWaitInfo *)wait_info)->wait_list);
  230. while (node) {
  231. next = bh_list_elem_next(node);
  232. os_mutex_destroy(&node->wait_lock);
  233. os_cond_destroy(&node->wait_cond);
  234. wasm_runtime_free(node);
  235. node = next;
  236. }
  237. os_mutex_destroy(&((AtomicWaitInfo *)wait_info)->wait_list_lock);
  238. wasm_runtime_free(wait_info);
  239. }
  240. }
  241. static void
  242. release_wait_info(HashMap *wait_map_, AtomicWaitInfo *wait_info, void *address)
  243. {
  244. os_mutex_lock(&shared_memory_list_lock);
  245. if (wait_info->wait_list->len == 0) {
  246. bh_hash_map_remove(wait_map_, address, NULL, NULL);
  247. destroy_wait_info(wait_info);
  248. }
  249. os_mutex_unlock(&shared_memory_list_lock);
  250. }
  251. uint32
  252. wasm_runtime_atomic_wait(WASMModuleInstanceCommon *module, void *address,
  253. uint64 expect, int64 timeout, bool wait64)
  254. {
  255. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  256. AtomicWaitInfo *wait_info;
  257. AtomicWaitNode *wait_node;
  258. bool check_ret, is_timeout;
  259. bh_assert(module->module_type == Wasm_Module_Bytecode
  260. || module->module_type == Wasm_Module_AoT);
  261. /* Currently we have only one memory instance */
  262. if (!module_inst->memories[0]->is_shared) {
  263. wasm_runtime_set_exception(module, "expected shared memory");
  264. return -1;
  265. }
  266. if ((uint8 *)address < module_inst->memories[0]->memory_data
  267. || (uint8 *)address + (wait64 ? 8 : 4)
  268. > module_inst->memories[0]->memory_data_end) {
  269. wasm_runtime_set_exception(module, "out of bounds memory access");
  270. return -1;
  271. }
  272. /* acquire the wait info, create new one if not exists */
  273. wait_info = acquire_wait_info(address, true);
  274. if (!wait_info) {
  275. wasm_runtime_set_exception(module, "failed to acquire wait_info");
  276. return -1;
  277. }
  278. os_mutex_lock(&wait_info->wait_list_lock);
  279. if ((!wait64 && *(uint32 *)address != (uint32)expect)
  280. || (wait64 && *(uint64 *)address != expect)) {
  281. os_mutex_unlock(&wait_info->wait_list_lock);
  282. return 1;
  283. }
  284. else {
  285. bh_list_status ret;
  286. if (!(wait_node = wasm_runtime_malloc(sizeof(AtomicWaitNode)))) {
  287. wasm_runtime_set_exception(module, "failed to create wait node");
  288. os_mutex_unlock(&wait_info->wait_list_lock);
  289. return -1;
  290. }
  291. memset(wait_node, 0, sizeof(AtomicWaitNode));
  292. if (0 != os_mutex_init(&wait_node->wait_lock)) {
  293. wasm_runtime_free(wait_node);
  294. os_mutex_unlock(&wait_info->wait_list_lock);
  295. return -1;
  296. }
  297. if (0 != os_cond_init(&wait_node->wait_cond)) {
  298. os_mutex_destroy(&wait_node->wait_lock);
  299. wasm_runtime_free(wait_node);
  300. os_mutex_unlock(&wait_info->wait_list_lock);
  301. return -1;
  302. }
  303. wait_node->status = S_WAITING;
  304. ret = bh_list_insert(wait_info->wait_list, wait_node);
  305. bh_assert(ret == BH_LIST_SUCCESS);
  306. (void)ret;
  307. }
  308. os_mutex_unlock(&wait_info->wait_list_lock);
  309. /* condition wait start */
  310. os_mutex_lock(&wait_node->wait_lock);
  311. os_cond_reltimedwait(&wait_node->wait_cond, &wait_node->wait_lock,
  312. timeout < 0 ? BHT_WAIT_FOREVER
  313. : (uint64)timeout / 1000);
  314. os_mutex_unlock(&wait_node->wait_lock);
  315. /* Check the wait node status */
  316. os_mutex_lock(&wait_info->wait_list_lock);
  317. check_ret = is_wait_node_exists(wait_info->wait_list, wait_node);
  318. bh_assert(check_ret);
  319. is_timeout = wait_node->status == S_WAITING ? true : false;
  320. bh_list_remove(wait_info->wait_list, wait_node);
  321. os_mutex_destroy(&wait_node->wait_lock);
  322. os_cond_destroy(&wait_node->wait_cond);
  323. wasm_runtime_free(wait_node);
  324. os_mutex_unlock(&wait_info->wait_list_lock);
  325. release_wait_info(wait_map, wait_info, address);
  326. (void)check_ret;
  327. return is_timeout ? 2 : 0;
  328. }
  329. uint32
  330. wasm_runtime_atomic_notify(WASMModuleInstanceCommon *module, void *address,
  331. uint32 count)
  332. {
  333. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  334. uint32 notify_result;
  335. AtomicWaitInfo *wait_info;
  336. bh_assert(module->module_type == Wasm_Module_Bytecode
  337. || module->module_type == Wasm_Module_AoT);
  338. if ((uint8 *)address < module_inst->memories[0]->memory_data
  339. || (uint8 *)address + 4 > module_inst->memories[0]->memory_data_end) {
  340. wasm_runtime_set_exception(module, "out of bounds memory access");
  341. return -1;
  342. }
  343. wait_info = acquire_wait_info(address, false);
  344. /* Nobody wait on this address */
  345. if (!wait_info)
  346. return 0;
  347. os_mutex_lock(&wait_info->wait_list_lock);
  348. notify_result = notify_wait_list(wait_info->wait_list, count);
  349. os_mutex_unlock(&wait_info->wait_list_lock);
  350. return notify_result;
  351. }