wasm_shared_memory.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "bh_log.h"
  6. #include "wasm_shared_memory.h"
  7. static bh_list shared_memory_list_head;
  8. static bh_list *const shared_memory_list = &shared_memory_list_head;
  9. static korp_mutex shared_memory_list_lock;
  10. /* clang-format off */
  11. enum {
  12. S_WAITING,
  13. S_NOTIFIED
  14. };
  15. /* clang-format on */
  16. typedef struct AtomicWaitInfo {
  17. korp_mutex wait_list_lock;
  18. bh_list wait_list_head;
  19. bh_list *wait_list;
  20. } AtomicWaitInfo;
  21. typedef struct AtomicWaitNode {
  22. bh_list_link l;
  23. uint8 status;
  24. korp_mutex wait_lock;
  25. korp_cond wait_cond;
  26. } AtomicWaitNode;
  27. typedef struct AtomicWaitAddressArgs {
  28. uint32 index;
  29. void **addr;
  30. } AtomicWaitAddressArgs;
  31. /* Atomic wait map */
  32. static HashMap *wait_map;
  33. static korp_mutex wait_map_lock;
  34. static uint32
  35. wait_address_hash(void *address);
  36. static bool
  37. wait_address_equal(void *h1, void *h2);
  38. static void
  39. destroy_wait_info(void *wait_info);
  40. bool
  41. wasm_shared_memory_init()
  42. {
  43. if (os_mutex_init(&shared_memory_list_lock) != 0)
  44. return false;
  45. if (os_mutex_init(&wait_map_lock) != 0) {
  46. os_mutex_destroy(&shared_memory_list_lock);
  47. return false;
  48. }
  49. /* wait map not exists, create new map */
  50. if (!(wait_map = bh_hash_map_create(32, true, (HashFunc)wait_address_hash,
  51. (KeyEqualFunc)wait_address_equal, NULL,
  52. destroy_wait_info))) {
  53. os_mutex_destroy(&shared_memory_list_lock);
  54. os_mutex_destroy(&wait_map_lock);
  55. return false;
  56. }
  57. return true;
  58. }
  59. void
  60. wasm_shared_memory_destroy()
  61. {
  62. os_mutex_destroy(&shared_memory_list_lock);
  63. os_mutex_destroy(&wait_map_lock);
  64. if (wait_map) {
  65. bh_hash_map_destroy(wait_map);
  66. }
  67. }
  68. static WASMSharedMemNode *
  69. search_module(WASMModuleCommon *module)
  70. {
  71. WASMSharedMemNode *node;
  72. os_mutex_lock(&shared_memory_list_lock);
  73. node = bh_list_first_elem(shared_memory_list);
  74. while (node) {
  75. if (module == node->module) {
  76. os_mutex_unlock(&shared_memory_list_lock);
  77. return node;
  78. }
  79. node = bh_list_elem_next(node);
  80. }
  81. os_mutex_unlock(&shared_memory_list_lock);
  82. return NULL;
  83. }
  84. static void
  85. wait_map_address_count_callback(void *key, void *value,
  86. void *p_total_elem_count)
  87. {
  88. *(uint32 *)p_total_elem_count = *(uint32 *)p_total_elem_count + 1;
  89. }
  90. static void
  91. create_list_of_waiter_addresses(void *key, void *value, void *user_data)
  92. {
  93. AtomicWaitAddressArgs *data = (AtomicWaitAddressArgs *)user_data;
  94. data->addr[data->index++] = key;
  95. }
  96. void
  97. notify_stale_threads_on_exception(WASMModuleInstanceCommon *module_inst)
  98. {
  99. AtomicWaitAddressArgs args = { 0 };
  100. uint32 i = 0, total_elem_count = 0;
  101. uint64 total_elem_count_size = 0;
  102. os_mutex_lock(&wait_map_lock); /* Make the two traversals atomic */
  103. /* count number of addresses in wait_map */
  104. bh_hash_map_traverse(wait_map, wait_map_address_count_callback,
  105. (void *)&total_elem_count);
  106. if (!total_elem_count) {
  107. os_mutex_unlock(&wait_map_lock);
  108. return;
  109. }
  110. /* allocate memory */
  111. total_elem_count_size = (uint64)sizeof(void *) * total_elem_count;
  112. if (total_elem_count_size >= UINT32_MAX
  113. || !(args.addr = wasm_runtime_malloc((uint32)total_elem_count_size))) {
  114. LOG_ERROR(
  115. "failed to allocate memory for list of atomic wait addresses");
  116. os_mutex_unlock(&wait_map_lock);
  117. return;
  118. }
  119. /* set values in list of addresses */
  120. bh_hash_map_traverse(wait_map, create_list_of_waiter_addresses, &args);
  121. os_mutex_unlock(&wait_map_lock);
  122. /* notify */
  123. for (i = 0; i < args.index; i++) {
  124. wasm_runtime_atomic_notify(module_inst, args.addr[i], UINT32_MAX);
  125. }
  126. /* free memory allocated to args data */
  127. wasm_runtime_free(args.addr);
  128. }
  129. WASMSharedMemNode *
  130. wasm_module_get_shared_memory(WASMModuleCommon *module)
  131. {
  132. return search_module(module);
  133. }
  134. int32
  135. shared_memory_inc_reference(WASMModuleCommon *module)
  136. {
  137. WASMSharedMemNode *node = search_module(module);
  138. uint32 ref_count = -1;
  139. if (node) {
  140. os_mutex_lock(&node->lock);
  141. ref_count = ++node->ref_count;
  142. os_mutex_unlock(&node->lock);
  143. }
  144. return ref_count;
  145. }
  146. int32
  147. shared_memory_dec_reference(WASMModuleCommon *module)
  148. {
  149. WASMSharedMemNode *node = search_module(module);
  150. uint32 ref_count = 0;
  151. if (node) {
  152. os_mutex_lock(&node->lock);
  153. ref_count = --node->ref_count;
  154. os_mutex_unlock(&node->lock);
  155. if (ref_count == 0) {
  156. os_mutex_lock(&shared_memory_list_lock);
  157. bh_list_remove(shared_memory_list, node);
  158. os_mutex_unlock(&shared_memory_list_lock);
  159. os_mutex_destroy(&node->shared_mem_lock);
  160. os_mutex_destroy(&node->lock);
  161. wasm_runtime_free(node);
  162. }
  163. return ref_count;
  164. }
  165. return -1;
  166. }
  167. WASMMemoryInstanceCommon *
  168. shared_memory_get_memory_inst(WASMSharedMemNode *node)
  169. {
  170. return node->memory_inst;
  171. }
  172. WASMSharedMemNode *
  173. shared_memory_set_memory_inst(WASMModuleCommon *module,
  174. WASMMemoryInstanceCommon *memory)
  175. {
  176. WASMSharedMemNode *node;
  177. bh_list_status ret;
  178. if (!(node = wasm_runtime_malloc(sizeof(WASMSharedMemNode))))
  179. return NULL;
  180. node->module = module;
  181. node->memory_inst = memory;
  182. node->ref_count = 1;
  183. if (os_mutex_init(&node->shared_mem_lock) != 0) {
  184. wasm_runtime_free(node);
  185. return NULL;
  186. }
  187. if (os_mutex_init(&node->lock) != 0) {
  188. os_mutex_destroy(&node->shared_mem_lock);
  189. wasm_runtime_free(node);
  190. return NULL;
  191. }
  192. os_mutex_lock(&shared_memory_list_lock);
  193. ret = bh_list_insert(shared_memory_list, node);
  194. bh_assert(ret == BH_LIST_SUCCESS);
  195. os_mutex_unlock(&shared_memory_list_lock);
  196. (void)ret;
  197. return node;
  198. }
  199. /* Atomics wait && notify APIs */
  200. static uint32
  201. wait_address_hash(void *address)
  202. {
  203. return (uint32)(uintptr_t)address;
  204. }
  205. static bool
  206. wait_address_equal(void *h1, void *h2)
  207. {
  208. return h1 == h2 ? true : false;
  209. }
  210. static bool
  211. is_wait_node_exists(bh_list *wait_list, AtomicWaitNode *node)
  212. {
  213. AtomicWaitNode *curr;
  214. curr = bh_list_first_elem(wait_list);
  215. while (curr) {
  216. if (curr == node) {
  217. return true;
  218. }
  219. curr = bh_list_elem_next(curr);
  220. }
  221. return false;
  222. }
  223. static uint32
  224. notify_wait_list(bh_list *wait_list, uint32 count)
  225. {
  226. AtomicWaitNode *node, *next;
  227. uint32 i, notify_count = count;
  228. if ((count == UINT32_MAX) || (count > wait_list->len))
  229. notify_count = wait_list->len;
  230. node = bh_list_first_elem(wait_list);
  231. if (!node)
  232. return 0;
  233. for (i = 0; i < notify_count; i++) {
  234. bh_assert(node);
  235. next = bh_list_elem_next(node);
  236. os_mutex_lock(&node->wait_lock);
  237. node->status = S_NOTIFIED;
  238. /* wakeup */
  239. os_cond_signal(&node->wait_cond);
  240. os_mutex_unlock(&node->wait_lock);
  241. node = next;
  242. }
  243. return notify_count;
  244. }
  245. static AtomicWaitInfo *
  246. acquire_wait_info(void *address, bool create)
  247. {
  248. AtomicWaitInfo *wait_info = NULL;
  249. bh_list_status ret;
  250. os_mutex_lock(&wait_map_lock); /* Make find + insert atomic */
  251. if (address)
  252. wait_info = (AtomicWaitInfo *)bh_hash_map_find(wait_map, address);
  253. if (!create) {
  254. os_mutex_unlock(&wait_map_lock);
  255. return wait_info;
  256. }
  257. /* No wait info on this address, create new info */
  258. if (!wait_info) {
  259. if (!(wait_info = (AtomicWaitInfo *)wasm_runtime_malloc(
  260. sizeof(AtomicWaitInfo)))) {
  261. goto fail1;
  262. }
  263. memset(wait_info, 0, sizeof(AtomicWaitInfo));
  264. /* init wait list */
  265. wait_info->wait_list = &wait_info->wait_list_head;
  266. ret = bh_list_init(wait_info->wait_list);
  267. bh_assert(ret == BH_LIST_SUCCESS);
  268. /* init wait list lock */
  269. if (0 != os_mutex_init(&wait_info->wait_list_lock)) {
  270. goto fail2;
  271. }
  272. if (!bh_hash_map_insert(wait_map, address, (void *)wait_info)) {
  273. goto fail3;
  274. }
  275. }
  276. os_mutex_unlock(&wait_map_lock);
  277. bh_assert(wait_info);
  278. (void)ret;
  279. return wait_info;
  280. fail3:
  281. os_mutex_destroy(&wait_info->wait_list_lock);
  282. fail2:
  283. wasm_runtime_free(wait_info);
  284. fail1:
  285. os_mutex_unlock(&wait_map_lock);
  286. return NULL;
  287. }
  288. static void
  289. destroy_wait_info(void *wait_info)
  290. {
  291. AtomicWaitNode *node, *next;
  292. if (wait_info) {
  293. node = bh_list_first_elem(((AtomicWaitInfo *)wait_info)->wait_list);
  294. while (node) {
  295. next = bh_list_elem_next(node);
  296. os_mutex_destroy(&node->wait_lock);
  297. os_cond_destroy(&node->wait_cond);
  298. wasm_runtime_free(node);
  299. node = next;
  300. }
  301. os_mutex_destroy(&((AtomicWaitInfo *)wait_info)->wait_list_lock);
  302. wasm_runtime_free(wait_info);
  303. }
  304. }
  305. static bool
  306. map_remove_wait_info(HashMap *wait_map_, AtomicWaitInfo *wait_info,
  307. void *address)
  308. {
  309. if (wait_info->wait_list->len > 0) {
  310. return false;
  311. }
  312. bh_hash_map_remove(wait_map_, address, NULL, NULL);
  313. return true;
  314. }
  315. uint32
  316. wasm_runtime_atomic_wait(WASMModuleInstanceCommon *module, void *address,
  317. uint64 expect, int64 timeout, bool wait64)
  318. {
  319. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  320. AtomicWaitInfo *wait_info;
  321. AtomicWaitNode *wait_node;
  322. WASMSharedMemNode *node;
  323. bool check_ret, is_timeout, no_wait, removed_from_map;
  324. bh_assert(module->module_type == Wasm_Module_Bytecode
  325. || module->module_type == Wasm_Module_AoT);
  326. if (wasm_copy_exception(module_inst, NULL)) {
  327. return -1;
  328. }
  329. /* Currently we have only one memory instance */
  330. if (!module_inst->memories[0]->is_shared) {
  331. wasm_runtime_set_exception(module, "expected shared memory");
  332. return -1;
  333. }
  334. if ((uint8 *)address < module_inst->memories[0]->memory_data
  335. || (uint8 *)address + (wait64 ? 8 : 4)
  336. > module_inst->memories[0]->memory_data_end) {
  337. wasm_runtime_set_exception(module, "out of bounds memory access");
  338. return -1;
  339. }
  340. /* acquire the wait info, create new one if not exists */
  341. wait_info = acquire_wait_info(address, true);
  342. if (!wait_info) {
  343. wasm_runtime_set_exception(module, "failed to acquire wait_info");
  344. return -1;
  345. }
  346. node = search_module((WASMModuleCommon *)module_inst->module);
  347. os_mutex_lock(&node->shared_mem_lock);
  348. no_wait = (!wait64 && *(uint32 *)address != (uint32)expect)
  349. || (wait64 && *(uint64 *)address != expect);
  350. os_mutex_unlock(&node->shared_mem_lock);
  351. if (no_wait) {
  352. return 1;
  353. }
  354. else {
  355. bh_list_status ret;
  356. if (!(wait_node = wasm_runtime_malloc(sizeof(AtomicWaitNode)))) {
  357. wasm_runtime_set_exception(module, "failed to create wait node");
  358. return -1;
  359. }
  360. memset(wait_node, 0, sizeof(AtomicWaitNode));
  361. if (0 != os_mutex_init(&wait_node->wait_lock)) {
  362. wasm_runtime_free(wait_node);
  363. return -1;
  364. }
  365. if (0 != os_cond_init(&wait_node->wait_cond)) {
  366. os_mutex_destroy(&wait_node->wait_lock);
  367. wasm_runtime_free(wait_node);
  368. return -1;
  369. }
  370. wait_node->status = S_WAITING;
  371. os_mutex_lock(&wait_info->wait_list_lock);
  372. ret = bh_list_insert(wait_info->wait_list, wait_node);
  373. os_mutex_unlock(&wait_info->wait_list_lock);
  374. bh_assert(ret == BH_LIST_SUCCESS);
  375. (void)ret;
  376. }
  377. /* condition wait start */
  378. os_mutex_lock(&wait_node->wait_lock);
  379. os_cond_reltimedwait(&wait_node->wait_cond, &wait_node->wait_lock,
  380. timeout < 0 ? BHT_WAIT_FOREVER
  381. : (uint64)timeout / 1000);
  382. is_timeout = wait_node->status == S_WAITING ? true : false;
  383. os_mutex_unlock(&wait_node->wait_lock);
  384. os_mutex_lock(&node->shared_mem_lock);
  385. os_mutex_lock(&wait_info->wait_list_lock);
  386. check_ret = is_wait_node_exists(wait_info->wait_list, wait_node);
  387. bh_assert(check_ret);
  388. /* Remove wait node */
  389. bh_list_remove(wait_info->wait_list, wait_node);
  390. os_mutex_destroy(&wait_node->wait_lock);
  391. os_cond_destroy(&wait_node->wait_cond);
  392. wasm_runtime_free(wait_node);
  393. /* Release wait info if no wait nodes attached */
  394. removed_from_map = map_remove_wait_info(wait_map, wait_info, address);
  395. os_mutex_unlock(&wait_info->wait_list_lock);
  396. if (removed_from_map)
  397. destroy_wait_info(wait_info);
  398. os_mutex_unlock(&node->shared_mem_lock);
  399. (void)check_ret;
  400. return is_timeout ? 2 : 0;
  401. }
  402. uint32
  403. wasm_runtime_atomic_notify(WASMModuleInstanceCommon *module, void *address,
  404. uint32 count)
  405. {
  406. WASMModuleInstance *module_inst = (WASMModuleInstance *)module;
  407. uint32 notify_result;
  408. AtomicWaitInfo *wait_info;
  409. WASMSharedMemNode *node;
  410. bool out_of_bounds;
  411. bh_assert(module->module_type == Wasm_Module_Bytecode
  412. || module->module_type == Wasm_Module_AoT);
  413. node = search_module((WASMModuleCommon *)module_inst->module);
  414. if (node)
  415. os_mutex_lock(&node->shared_mem_lock);
  416. out_of_bounds =
  417. ((uint8 *)address < module_inst->memories[0]->memory_data
  418. || (uint8 *)address + 4 > module_inst->memories[0]->memory_data_end);
  419. if (out_of_bounds) {
  420. if (node)
  421. os_mutex_unlock(&node->shared_mem_lock);
  422. wasm_runtime_set_exception(module, "out of bounds memory access");
  423. return -1;
  424. }
  425. wait_info = acquire_wait_info(address, false);
  426. /* Nobody wait on this address */
  427. if (!wait_info) {
  428. if (node)
  429. os_mutex_unlock(&node->shared_mem_lock);
  430. return 0;
  431. }
  432. os_mutex_lock(&wait_info->wait_list_lock);
  433. notify_result = notify_wait_list(wait_info->wait_list, count);
  434. os_mutex_unlock(&wait_info->wait_list_lock);
  435. if (node)
  436. os_mutex_unlock(&node->shared_mem_lock);
  437. return notify_result;
  438. }