aot_emit_memory.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_memory.h"
  6. #include "aot_emit_exception.h"
  7. #include "../aot/aot_runtime.h"
  8. #define BUILD_ICMP(op, left, right, res, name) do { \
  9. if (!(res = LLVMBuildICmp(comp_ctx->builder, op, \
  10. left, right, name))) { \
  11. aot_set_last_error("llvm build icmp failed."); \
  12. goto fail; \
  13. } \
  14. } while (0)
  15. #define BUILD_OP(Op, left, right, res, name) do { \
  16. if (!(res = LLVMBuild##Op(comp_ctx->builder, \
  17. left, right, name))) { \
  18. aot_set_last_error("llvm build " #Op " fail."); \
  19. goto fail; \
  20. } \
  21. } while (0)
  22. #define ADD_BASIC_BLOCK(block, name) do { \
  23. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  24. func_ctx->func, \
  25. name))) { \
  26. aot_set_last_error("llvm add basic block failed."); \
  27. goto fail; \
  28. } \
  29. } while (0)
  30. #define SET_BUILD_POS(block) \
  31. LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
  32. static LLVMValueRef
  33. get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  34. uint32 bytes)
  35. {
  36. LLVMValueRef mem_check_bound = NULL;
  37. switch (bytes) {
  38. case 1:
  39. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_1byte;
  40. break;
  41. case 2:
  42. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_2bytes;
  43. break;
  44. case 4:
  45. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_4bytes;
  46. break;
  47. case 8:
  48. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_8bytes;
  49. break;
  50. case 16:
  51. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_16bytes;
  52. break;
  53. default:
  54. bh_assert(0);
  55. return NULL;
  56. }
  57. if (func_ctx->mem_space_unchanged)
  58. return mem_check_bound;
  59. if (!(mem_check_bound = LLVMBuildLoad(comp_ctx->builder,
  60. mem_check_bound,
  61. "mem_check_bound"))) {
  62. aot_set_last_error("llvm build load failed.");
  63. return NULL;
  64. }
  65. return mem_check_bound;
  66. }
  67. static LLVMValueRef
  68. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
  69. LLVMValueRef
  70. aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  71. uint32 offset, uint32 bytes)
  72. {
  73. LLVMValueRef offset_const = I32_CONST(offset);
  74. LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
  75. LLVMValueRef mem_base_addr, mem_check_bound;
  76. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  77. LLVMBasicBlockRef check_succ;
  78. AOTValue *aot_value;
  79. uint32 local_idx_of_aot_value = 0;
  80. bool is_target_64bit, is_local_of_aot_value = false;
  81. #if WASM_ENABLE_SHARED_MEMORY != 0
  82. bool is_shared_memory =
  83. comp_ctx->comp_data->memories[0].memory_flags & 0x02;
  84. #endif
  85. is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64))
  86. ? true : false;
  87. CHECK_LLVM_CONST(offset_const);
  88. /* Get memory base address and memory data size */
  89. if (func_ctx->mem_space_unchanged
  90. #if WASM_ENABLE_SHARED_MEMORY != 0
  91. || is_shared_memory
  92. #endif
  93. ) {
  94. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  95. }
  96. else {
  97. if (!(mem_base_addr =
  98. LLVMBuildLoad(comp_ctx->builder,
  99. func_ctx->mem_info[0].mem_base_addr,
  100. "mem_base"))) {
  101. aot_set_last_error("llvm build load failed.");
  102. goto fail;
  103. }
  104. }
  105. aot_value = func_ctx->block_stack.block_list_end->value_stack.value_list_end;
  106. if (aot_value) {
  107. /* aot_value is freed in the following POP_I32(addr),
  108. so save its fields here for further use */
  109. is_local_of_aot_value = aot_value->is_local;
  110. local_idx_of_aot_value = aot_value->local_idx;
  111. }
  112. POP_I32(addr);
  113. /* return addres directly if constant offset and inside memory space */
  114. if (LLVMIsConstant(addr)) {
  115. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(addr)
  116. + (uint64)offset;
  117. uint32 num_bytes_per_page =
  118. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  119. uint32 init_page_count =
  120. comp_ctx->comp_data->memories[0].mem_init_page_count;
  121. uint64 mem_data_size = num_bytes_per_page * init_page_count;
  122. if (mem_offset + bytes <= mem_data_size) {
  123. /* inside memory space */
  124. offset1 = I32_CONST((uint32)mem_offset);
  125. CHECK_LLVM_CONST(offset1);
  126. if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder, mem_base_addr,
  127. &offset1, 1, "maddr"))) {
  128. aot_set_last_error("llvm build add failed.");
  129. goto fail;
  130. }
  131. return maddr;
  132. }
  133. }
  134. if (is_target_64bit) {
  135. if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
  136. I64_TYPE, "offset_i64"))
  137. || !(addr = LLVMBuildZExt(comp_ctx->builder, addr,
  138. I64_TYPE, "addr_i64"))) {
  139. aot_set_last_error("llvm build zero extend failed.");
  140. goto fail;
  141. }
  142. }
  143. /* offset1 = offset + addr; */
  144. BUILD_OP(Add, offset_const, addr, offset1, "offset1");
  145. if (comp_ctx->enable_bound_check
  146. && !(is_local_of_aot_value
  147. && aot_checked_addr_list_find(func_ctx, local_idx_of_aot_value,
  148. offset, bytes))) {
  149. uint32 init_page_count =
  150. comp_ctx->comp_data->memories[0].mem_init_page_count;
  151. if (init_page_count == 0) {
  152. LLVMValueRef mem_size;
  153. if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) {
  154. goto fail;
  155. }
  156. BUILD_ICMP(LLVMIntEQ, mem_size, I32_ZERO, cmp, "is_zero");
  157. ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
  158. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  159. if (!aot_emit_exception(comp_ctx, func_ctx,
  160. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
  161. true, cmp, check_succ)) {
  162. goto fail;
  163. }
  164. SET_BUILD_POS(check_succ);
  165. block_curr = check_succ;
  166. }
  167. if (!(mem_check_bound =
  168. get_memory_check_bound(comp_ctx, func_ctx, bytes))) {
  169. goto fail;
  170. }
  171. if (is_target_64bit) {
  172. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp, "cmp");
  173. }
  174. else {
  175. /* Check integer overflow */
  176. BUILD_ICMP(LLVMIntULT, offset1, addr, cmp1, "cmp1");
  177. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp2, "cmp2");
  178. BUILD_OP(Or, cmp1, cmp2, cmp, "cmp");
  179. }
  180. /* Add basic blocks */
  181. ADD_BASIC_BLOCK(check_succ, "check_succ");
  182. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  183. if (!aot_emit_exception(comp_ctx, func_ctx,
  184. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
  185. true, cmp, check_succ)) {
  186. goto fail;
  187. }
  188. SET_BUILD_POS(check_succ);
  189. if (is_local_of_aot_value) {
  190. if (!aot_checked_addr_list_add(func_ctx, local_idx_of_aot_value,
  191. offset, bytes))
  192. goto fail;
  193. }
  194. }
  195. /* maddr = mem_base_addr + offset1 */
  196. if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder, mem_base_addr,
  197. &offset1, 1, "maddr"))) {
  198. aot_set_last_error("llvm build add failed.");
  199. goto fail;
  200. }
  201. return maddr;
  202. fail:
  203. return NULL;
  204. }
  205. #define BUILD_PTR_CAST(ptr_type) do { \
  206. if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr,\
  207. ptr_type, "data_ptr"))) {\
  208. aot_set_last_error("llvm build bit cast failed."); \
  209. goto fail; \
  210. } \
  211. } while (0)
  212. #define BUILD_LOAD() do { \
  213. if (!(value = LLVMBuildLoad(comp_ctx->builder, maddr, \
  214. "data"))) { \
  215. aot_set_last_error("llvm build load failed."); \
  216. goto fail; \
  217. } \
  218. LLVMSetAlignment(value, 1); \
  219. } while (0)
  220. #define BUILD_TRUNC(value, data_type) do { \
  221. if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, \
  222. data_type, "val_trunc"))){ \
  223. aot_set_last_error("llvm build trunc failed."); \
  224. goto fail; \
  225. } \
  226. } while (0)
  227. #define BUILD_STORE() do { \
  228. LLVMValueRef res; \
  229. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  230. aot_set_last_error("llvm build store failed."); \
  231. goto fail; \
  232. } \
  233. LLVMSetAlignment(res, 1); \
  234. } while (0)
  235. #define BUILD_SIGN_EXT(dst_type) do { \
  236. if (!(value = LLVMBuildSExt(comp_ctx->builder, value, \
  237. dst_type, "data_s_ext"))) { \
  238. aot_set_last_error("llvm build sign ext failed."); \
  239. goto fail; \
  240. } \
  241. } while (0)
  242. #define BUILD_ZERO_EXT(dst_type) do { \
  243. if (!(value = LLVMBuildZExt(comp_ctx->builder, value, \
  244. dst_type, "data_z_ext"))) { \
  245. aot_set_last_error("llvm build zero ext failed."); \
  246. goto fail; \
  247. } \
  248. } while (0)
  249. #if WASM_ENABLE_SHARED_MEMORY != 0
  250. bool
  251. check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  252. LLVMValueRef addr, uint32 align)
  253. {
  254. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  255. LLVMBasicBlockRef check_align_succ;
  256. LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
  257. LLVMValueRef res;
  258. CHECK_LLVM_CONST(align_mask);
  259. /* Convert pointer to int */
  260. if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr,
  261. I32_TYPE, "address"))) {
  262. aot_set_last_error("llvm build ptr to int failed.");
  263. goto fail;
  264. }
  265. /* The memory address should be aligned */
  266. BUILD_OP(And, addr, align_mask, res, "and");
  267. BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
  268. /* Add basic blocks */
  269. ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
  270. LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
  271. if (!aot_emit_exception(comp_ctx, func_ctx,
  272. EXCE_UNALIGNED_ATOMIC,
  273. true, res, check_align_succ)) {
  274. goto fail;
  275. }
  276. SET_BUILD_POS(check_align_succ);
  277. return true;
  278. fail:
  279. return false;
  280. }
  281. #define BUILD_ATOMIC_LOAD(align) do { \
  282. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  283. goto fail; \
  284. } \
  285. if (!(value = LLVMBuildLoad(comp_ctx->builder, maddr, \
  286. "data"))) { \
  287. aot_set_last_error("llvm build load failed."); \
  288. goto fail; \
  289. } \
  290. LLVMSetAlignment(value, 1 << align); \
  291. LLVMSetVolatile(value, true); \
  292. LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
  293. } while (0)
  294. #define BUILD_ATOMIC_STORE(align) do { \
  295. LLVMValueRef res; \
  296. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  297. goto fail; \
  298. } \
  299. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  300. aot_set_last_error("llvm build store failed."); \
  301. goto fail; \
  302. } \
  303. LLVMSetAlignment(res, 1 << align); \
  304. LLVMSetVolatile(res, true); \
  305. LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
  306. } while (0)
  307. #endif
  308. bool
  309. aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  310. uint32 align, uint32 offset, uint32 bytes,
  311. bool sign, bool atomic)
  312. {
  313. LLVMValueRef maddr, value = NULL;
  314. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  315. return false;
  316. switch (bytes) {
  317. case 4:
  318. BUILD_PTR_CAST(INT32_PTR_TYPE);
  319. #if WASM_ENABLE_SHARED_MEMORY != 0
  320. if (atomic)
  321. BUILD_ATOMIC_LOAD(align);
  322. else
  323. #endif
  324. BUILD_LOAD();
  325. break;
  326. case 2:
  327. case 1:
  328. if (bytes == 2)
  329. BUILD_PTR_CAST(INT16_PTR_TYPE);
  330. else
  331. BUILD_PTR_CAST(INT8_PTR_TYPE);
  332. #if WASM_ENABLE_SHARED_MEMORY != 0
  333. if (atomic) {
  334. BUILD_ATOMIC_LOAD(align);
  335. BUILD_ZERO_EXT(I32_TYPE);
  336. }
  337. else
  338. #endif
  339. {
  340. BUILD_LOAD();
  341. if (sign)
  342. BUILD_SIGN_EXT(I32_TYPE);
  343. else
  344. BUILD_ZERO_EXT(I32_TYPE);
  345. }
  346. break;
  347. default:
  348. bh_assert(0);
  349. break;
  350. }
  351. PUSH_I32(value);
  352. return true;
  353. fail:
  354. return false;
  355. }
  356. bool
  357. aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  358. uint32 align, uint32 offset, uint32 bytes,
  359. bool sign, bool atomic)
  360. {
  361. LLVMValueRef maddr, value = NULL;
  362. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  363. return false;
  364. switch (bytes) {
  365. case 8:
  366. BUILD_PTR_CAST(INT64_PTR_TYPE);
  367. #if WASM_ENABLE_SHARED_MEMORY != 0
  368. if (atomic)
  369. BUILD_ATOMIC_LOAD(align);
  370. else
  371. #endif
  372. BUILD_LOAD();
  373. break;
  374. case 4:
  375. case 2:
  376. case 1:
  377. if (bytes == 4)
  378. BUILD_PTR_CAST(INT32_PTR_TYPE);
  379. else if (bytes == 2)
  380. BUILD_PTR_CAST(INT16_PTR_TYPE);
  381. else
  382. BUILD_PTR_CAST(INT8_PTR_TYPE);
  383. #if WASM_ENABLE_SHARED_MEMORY != 0
  384. if (atomic) {
  385. BUILD_ATOMIC_LOAD(align);
  386. BUILD_ZERO_EXT(I64_TYPE);
  387. }
  388. else
  389. #endif
  390. {
  391. BUILD_LOAD();
  392. if (sign)
  393. BUILD_SIGN_EXT(I64_TYPE);
  394. else
  395. BUILD_ZERO_EXT(I64_TYPE);
  396. }
  397. break;
  398. default:
  399. bh_assert(0);
  400. break;
  401. }
  402. PUSH_I64(value);
  403. return true;
  404. fail:
  405. return false;
  406. }
  407. bool
  408. aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  409. uint32 align, uint32 offset)
  410. {
  411. LLVMValueRef maddr, value;
  412. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
  413. return false;
  414. BUILD_PTR_CAST(F32_PTR_TYPE);
  415. BUILD_LOAD();
  416. PUSH_F32(value);
  417. return true;
  418. fail:
  419. return false;
  420. }
  421. bool
  422. aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  423. uint32 align, uint32 offset)
  424. {
  425. LLVMValueRef maddr, value;
  426. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
  427. return false;
  428. BUILD_PTR_CAST(F64_PTR_TYPE);
  429. BUILD_LOAD();
  430. PUSH_F64(value);
  431. return true;
  432. fail:
  433. return false;
  434. }
  435. bool
  436. aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  437. uint32 align, uint32 offset, uint32 bytes, bool atomic)
  438. {
  439. LLVMValueRef maddr, value;
  440. POP_I32(value);
  441. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  442. return false;
  443. switch (bytes) {
  444. case 4:
  445. BUILD_PTR_CAST(INT32_PTR_TYPE);
  446. break;
  447. case 2:
  448. BUILD_PTR_CAST(INT16_PTR_TYPE);
  449. BUILD_TRUNC(value, INT16_TYPE);
  450. break;
  451. case 1:
  452. BUILD_PTR_CAST(INT8_PTR_TYPE);
  453. BUILD_TRUNC(value, INT8_TYPE);
  454. break;
  455. default:
  456. bh_assert(0);
  457. break;
  458. }
  459. #if WASM_ENABLE_SHARED_MEMORY != 0
  460. if (atomic)
  461. BUILD_ATOMIC_STORE(align);
  462. else
  463. #endif
  464. BUILD_STORE();
  465. return true;
  466. fail:
  467. return false;
  468. }
  469. bool
  470. aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  471. uint32 align, uint32 offset, uint32 bytes, bool atomic)
  472. {
  473. LLVMValueRef maddr, value;
  474. POP_I64(value);
  475. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  476. return false;
  477. switch (bytes) {
  478. case 8:
  479. BUILD_PTR_CAST(INT64_PTR_TYPE);
  480. break;
  481. case 4:
  482. BUILD_PTR_CAST(INT32_PTR_TYPE);
  483. BUILD_TRUNC(value, I32_TYPE);
  484. break;
  485. case 2:
  486. BUILD_PTR_CAST(INT16_PTR_TYPE);
  487. BUILD_TRUNC(value, INT16_TYPE);
  488. break;
  489. case 1:
  490. BUILD_PTR_CAST(INT8_PTR_TYPE);
  491. BUILD_TRUNC(value, INT8_TYPE);
  492. break;
  493. default:
  494. bh_assert(0);
  495. break;
  496. }
  497. #if WASM_ENABLE_SHARED_MEMORY != 0
  498. if (atomic)
  499. BUILD_ATOMIC_STORE(align);
  500. else
  501. #endif
  502. BUILD_STORE();
  503. return true;
  504. fail:
  505. return false;
  506. }
  507. bool
  508. aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  509. uint32 align, uint32 offset)
  510. {
  511. LLVMValueRef maddr, value;
  512. POP_F32(value);
  513. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
  514. return false;
  515. BUILD_PTR_CAST(F32_PTR_TYPE);
  516. BUILD_STORE();
  517. return true;
  518. fail:
  519. return false;
  520. }
  521. bool
  522. aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  523. uint32 align, uint32 offset)
  524. {
  525. LLVMValueRef maddr, value;
  526. POP_F64(value);
  527. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
  528. return false;
  529. BUILD_PTR_CAST(F64_PTR_TYPE);
  530. BUILD_STORE();
  531. return true;
  532. fail:
  533. return false;
  534. }
  535. static LLVMValueRef
  536. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  537. {
  538. LLVMValueRef mem_size;
  539. if (func_ctx->mem_space_unchanged) {
  540. mem_size = func_ctx->mem_info[0].mem_cur_page_count_addr;
  541. }
  542. else {
  543. if (!(mem_size =
  544. LLVMBuildLoad(comp_ctx->builder,
  545. func_ctx->mem_info[0].mem_cur_page_count_addr,
  546. "mem_size"))) {
  547. aot_set_last_error("llvm build load failed.");
  548. goto fail;
  549. }
  550. }
  551. return mem_size;
  552. fail:
  553. return NULL;
  554. }
  555. bool
  556. aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  557. {
  558. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  559. if (mem_size)
  560. PUSH_I32(mem_size);
  561. return mem_size ? true : false;
  562. fail:
  563. return false;
  564. }
  565. bool
  566. aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  567. {
  568. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  569. LLVMValueRef delta, param_values[2], ret_value, func, value;
  570. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  571. if (!mem_size)
  572. return false;
  573. POP_I32(delta);
  574. /* Function type of aot_enlarge_memory() */
  575. param_types[0] = INT8_PTR_TYPE;
  576. param_types[1] = I32_TYPE;
  577. ret_type = INT8_TYPE;
  578. if (!(func_type = LLVMFunctionType(ret_type, param_types, 2, false))) {
  579. aot_set_last_error("llvm add function type failed.");
  580. return false;
  581. }
  582. if (comp_ctx->is_jit_mode) {
  583. /* JIT mode, call the function directly */
  584. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  585. aot_set_last_error("llvm add pointer type failed.");
  586. return false;
  587. }
  588. if (!(value = I64_CONST((uint64)(uintptr_t)aot_enlarge_memory))
  589. || !(func = LLVMConstIntToPtr(value, func_ptr_type))) {
  590. aot_set_last_error("create LLVM value failed.");
  591. return false;
  592. }
  593. }
  594. else {
  595. char *func_name = "aot_enlarge_memory";
  596. /* AOT mode, delcare the function */
  597. if (!(func = LLVMGetNamedFunction(comp_ctx->module, func_name))
  598. && !(func = LLVMAddFunction(comp_ctx->module,
  599. func_name, func_type))) {
  600. aot_set_last_error("llvm add function failed.");
  601. return false;
  602. }
  603. }
  604. /* Call function aot_enlarge_memory() */
  605. param_values[0] = func_ctx->aot_inst;
  606. param_values[1] = delta;
  607. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  608. param_values, 2, "call"))) {
  609. aot_set_last_error("llvm build call failed.");
  610. return false;
  611. }
  612. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
  613. /* ret_value = ret_value == true ? delta : pre_page_count */
  614. if (!(ret_value = LLVMBuildSelect(comp_ctx->builder, ret_value,
  615. mem_size, I32_NEG_ONE,
  616. "mem_grow_ret"))) {
  617. aot_set_last_error("llvm build select failed.");
  618. return false;
  619. }
  620. PUSH_I32(ret_value);
  621. return true;
  622. fail:
  623. return false;
  624. }
  625. #if WASM_ENABLE_BULK_MEMORY != 0
  626. static LLVMValueRef
  627. check_bulk_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  628. LLVMValueRef offset, LLVMValueRef bytes)
  629. {
  630. LLVMValueRef maddr, max_addr, cmp;
  631. LLVMValueRef mem_base_addr;
  632. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  633. LLVMBasicBlockRef check_succ;
  634. LLVMValueRef mem_size;
  635. /* Get memory base address and memory data size */
  636. #if WASM_ENABLE_SHARED_MEMORY != 0
  637. bool is_shared_memory =
  638. comp_ctx->comp_data->memories[0].memory_flags & 0x02;
  639. if (func_ctx->mem_space_unchanged || is_shared_memory) {
  640. #else
  641. if (func_ctx->mem_space_unchanged) {
  642. #endif
  643. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  644. }
  645. else {
  646. if (!(mem_base_addr =
  647. LLVMBuildLoad(comp_ctx->builder,
  648. func_ctx->mem_info[0].mem_base_addr,
  649. "mem_base"))) {
  650. aot_set_last_error("llvm build load failed.");
  651. goto fail;
  652. }
  653. }
  654. /* return addres directly if constant offset and inside memory space */
  655. if (LLVMIsConstant(offset) && LLVMIsConstant(bytes)) {
  656. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(offset);
  657. uint64 mem_len = (uint64)LLVMConstIntGetZExtValue(bytes);
  658. uint32 num_bytes_per_page =
  659. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  660. uint32 init_page_count =
  661. comp_ctx->comp_data->memories[0].mem_init_page_count;
  662. uint32 mem_data_size = num_bytes_per_page * init_page_count;
  663. if (mem_data_size > 0
  664. && mem_offset + mem_len <= mem_data_size) {
  665. /* inside memory space */
  666. /* maddr = mem_base_addr + moffset */
  667. if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder,
  668. mem_base_addr,
  669. &offset, 1, "maddr"))) {
  670. aot_set_last_error("llvm build add failed.");
  671. goto fail;
  672. }
  673. return maddr;
  674. }
  675. }
  676. if (func_ctx->mem_space_unchanged) {
  677. mem_size = func_ctx->mem_info[0].mem_data_size_addr;
  678. }
  679. else {
  680. if (!(mem_size =
  681. LLVMBuildLoad(comp_ctx->builder,
  682. func_ctx->mem_info[0].mem_data_size_addr,
  683. "mem_size"))) {
  684. aot_set_last_error("llvm build load failed.");
  685. goto fail;
  686. }
  687. }
  688. ADD_BASIC_BLOCK(check_succ, "check_succ");
  689. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  690. offset = LLVMBuildZExt(comp_ctx->builder, offset, I64_TYPE, "extend_offset");
  691. bytes = LLVMBuildZExt(comp_ctx->builder, bytes, I64_TYPE, "extend_len");
  692. mem_size = LLVMBuildZExt(comp_ctx->builder, mem_size, I64_TYPE, "extend_size");
  693. BUILD_OP(Add, offset, bytes, max_addr, "max_addr");
  694. BUILD_ICMP(LLVMIntUGT, max_addr, mem_size, cmp,
  695. "cmp_max_mem_addr");
  696. if (!aot_emit_exception(comp_ctx, func_ctx,
  697. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
  698. true, cmp, check_succ)) {
  699. goto fail;
  700. }
  701. /* maddr = mem_base_addr + offset */
  702. if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder, mem_base_addr,
  703. &offset, 1, "maddr"))) {
  704. aot_set_last_error("llvm build add failed.");
  705. goto fail;
  706. }
  707. return maddr;
  708. fail:
  709. return NULL;
  710. }
  711. bool
  712. aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  713. uint32 seg_index)
  714. {
  715. LLVMValueRef seg, offset, dst, len, param_values[5], ret_value, func, value;
  716. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  717. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  718. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  719. LLVMBasicBlockRef mem_init_fail, init_success;
  720. seg = I32_CONST(seg_index);
  721. POP_I32(len);
  722. POP_I32(offset);
  723. POP_I32(dst);
  724. param_types[0] = INT8_PTR_TYPE;
  725. param_types[1] = I32_TYPE;
  726. param_types[2] = I32_TYPE;
  727. param_types[3] = I32_TYPE;
  728. param_types[4] = I32_TYPE;
  729. ret_type = INT8_TYPE;
  730. GET_AOT_FUNCTION(aot_memory_init, 5);
  731. /* Call function aot_memory_init() */
  732. param_values[0] = func_ctx->aot_inst;
  733. param_values[1] = seg;
  734. param_values[2] = offset;
  735. param_values[3] = len;
  736. param_values[4] = dst;
  737. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  738. param_values, 5, "call"))) {
  739. aot_set_last_error("llvm build call failed.");
  740. return false;
  741. }
  742. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_init_ret");
  743. ADD_BASIC_BLOCK(mem_init_fail, "mem_init_fail");
  744. ADD_BASIC_BLOCK(init_success, "init_success");
  745. LLVMMoveBasicBlockAfter(mem_init_fail, block_curr);
  746. LLVMMoveBasicBlockAfter(init_success, block_curr);
  747. if (!LLVMBuildCondBr(comp_ctx->builder, ret_value,
  748. init_success, mem_init_fail)) {
  749. aot_set_last_error("llvm build cond br failed.");
  750. goto fail;
  751. }
  752. /* If memory.init failed, return this function
  753. so the runtime can catch the exception */
  754. LLVMPositionBuilderAtEnd(comp_ctx->builder, mem_init_fail);
  755. if (!aot_build_zero_function_ret(comp_ctx, aot_func_type)) {
  756. goto fail;
  757. }
  758. LLVMPositionBuilderAtEnd(comp_ctx->builder, init_success);
  759. return true;
  760. fail:
  761. return false;
  762. }
  763. bool
  764. aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  765. uint32 seg_index)
  766. {
  767. LLVMValueRef seg, param_values[2], ret_value, func, value;
  768. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  769. seg = I32_CONST(seg_index);
  770. CHECK_LLVM_CONST(seg);
  771. param_types[0] = INT8_PTR_TYPE;
  772. param_types[1] = I32_TYPE;
  773. ret_type = INT8_TYPE;
  774. GET_AOT_FUNCTION(aot_data_drop, 2);
  775. /* Call function aot_data_drop() */
  776. param_values[0] = func_ctx->aot_inst;
  777. param_values[1] = seg;
  778. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  779. param_values, 2, "call"))) {
  780. aot_set_last_error("llvm build call failed.");
  781. return false;
  782. }
  783. return true;
  784. fail:
  785. return false;
  786. }
  787. bool
  788. aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  789. {
  790. LLVMValueRef src, dst, src_addr, dst_addr, len, res;
  791. POP_I32(len);
  792. POP_I32(src);
  793. POP_I32(dst);
  794. if (!(src_addr =
  795. check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
  796. return false;
  797. if (!(dst_addr =
  798. check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  799. return false;
  800. if (!(res = LLVMBuildMemMove(comp_ctx->builder, dst_addr, 1,
  801. src_addr, 1, len))) {
  802. aot_set_last_error("llvm build memmove failed.");
  803. return false;
  804. }
  805. return true;
  806. fail:
  807. return false;
  808. }
  809. bool
  810. aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  811. {
  812. LLVMValueRef val, dst, dst_addr, len, res;
  813. POP_I32(len);
  814. POP_I32(val);
  815. POP_I32(dst);
  816. if (!(dst_addr =
  817. check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  818. return false;
  819. val = LLVMBuildIntCast2(comp_ctx->builder, val, INT8_TYPE, true, "mem_set_value");
  820. if (!(res = LLVMBuildMemSet(comp_ctx->builder, dst_addr,
  821. val, len, 1))) {
  822. aot_set_last_error("llvm build memset failed.");
  823. return false;
  824. }
  825. return true;
  826. fail:
  827. return false;
  828. }
  829. #endif /* end of WASM_ENABLE_BULK_MEMORY */
  830. #if WASM_ENABLE_SHARED_MEMORY != 0
  831. bool
  832. aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx,
  833. AOTFuncContext *func_ctx,
  834. uint8 atomic_op, uint8 op_type,
  835. uint32 align, uint32 offset,
  836. uint32 bytes)
  837. {
  838. LLVMValueRef maddr, value, result;
  839. if (op_type == VALUE_TYPE_I32)
  840. POP_I32(value);
  841. else
  842. POP_I64(value);
  843. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  844. return false;
  845. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  846. return false;
  847. switch (bytes) {
  848. case 8:
  849. BUILD_PTR_CAST(INT64_PTR_TYPE);
  850. break;
  851. case 4:
  852. BUILD_PTR_CAST(INT32_PTR_TYPE);
  853. if (op_type == VALUE_TYPE_I64)
  854. BUILD_TRUNC(value, I32_TYPE);
  855. break;
  856. case 2:
  857. BUILD_PTR_CAST(INT16_PTR_TYPE);
  858. BUILD_TRUNC(value, INT16_TYPE);
  859. break;
  860. case 1:
  861. BUILD_PTR_CAST(INT8_PTR_TYPE);
  862. BUILD_TRUNC(value, INT8_TYPE);
  863. break;
  864. default:
  865. bh_assert(0);
  866. break;
  867. }
  868. if (!(result =
  869. LLVMBuildAtomicRMW(comp_ctx->builder,
  870. atomic_op, maddr, value,
  871. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  872. goto fail;
  873. }
  874. LLVMSetVolatile(result, true);
  875. if (op_type == VALUE_TYPE_I32) {
  876. if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
  877. I32_TYPE, "result_i32"))) {
  878. goto fail;
  879. }
  880. PUSH_I32(result);
  881. }
  882. else {
  883. if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
  884. I64_TYPE, "result_i64"))) {
  885. goto fail;
  886. }
  887. PUSH_I64(result);
  888. }
  889. return true;
  890. fail:
  891. return false;
  892. }
  893. bool
  894. aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
  895. AOTFuncContext *func_ctx,
  896. uint8 op_type, uint32 align,
  897. uint32 offset, uint32 bytes)
  898. {
  899. LLVMValueRef maddr, value, expect, result;
  900. if (op_type == VALUE_TYPE_I32) {
  901. POP_I32(value);
  902. POP_I32(expect);
  903. }
  904. else {
  905. POP_I64(value);
  906. POP_I64(expect);
  907. }
  908. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  909. return false;
  910. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  911. return false;
  912. switch (bytes) {
  913. case 8:
  914. BUILD_PTR_CAST(INT64_PTR_TYPE);
  915. break;
  916. case 4:
  917. BUILD_PTR_CAST(INT32_PTR_TYPE);
  918. if (op_type == VALUE_TYPE_I64) {
  919. BUILD_TRUNC(value, I32_TYPE);
  920. BUILD_TRUNC(expect, I32_TYPE);
  921. }
  922. break;
  923. case 2:
  924. BUILD_PTR_CAST(INT16_PTR_TYPE);
  925. BUILD_TRUNC(value, INT16_TYPE);
  926. BUILD_TRUNC(expect, INT16_TYPE);
  927. break;
  928. case 1:
  929. BUILD_PTR_CAST(INT8_PTR_TYPE);
  930. BUILD_TRUNC(value, INT8_TYPE);
  931. BUILD_TRUNC(expect, INT8_TYPE);
  932. break;
  933. default:
  934. bh_assert(0);
  935. break;
  936. }
  937. if (!(result =
  938. LLVMBuildAtomicCmpXchg(comp_ctx->builder, maddr, expect, value,
  939. LLVMAtomicOrderingSequentiallyConsistent,
  940. LLVMAtomicOrderingSequentiallyConsistent,
  941. false))) {
  942. goto fail;
  943. }
  944. LLVMSetVolatile(result, true);
  945. /* CmpXchg return {i32, i1} structure,
  946. we need to extrack the previous_value from the structure */
  947. if (!(result =
  948. LLVMBuildExtractValue(comp_ctx->builder,
  949. result, 0, "previous_value"))) {
  950. goto fail;
  951. }
  952. if (op_type == VALUE_TYPE_I32) {
  953. if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
  954. I32_TYPE, "result_i32"))) {
  955. goto fail;
  956. }
  957. PUSH_I32(result);
  958. }
  959. else {
  960. if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
  961. I64_TYPE, "result_i64"))) {
  962. goto fail;
  963. }
  964. PUSH_I64(result);
  965. }
  966. return true;
  967. fail:
  968. return false;
  969. }
  970. bool
  971. aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  972. uint8 op_type, uint32 align,
  973. uint32 offset, uint32 bytes)
  974. {
  975. LLVMValueRef maddr, value, timeout, expect, cmp;
  976. LLVMValueRef param_values[5], ret_value, func, is_wait64;
  977. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  978. LLVMBasicBlockRef wait_fail, wait_success;
  979. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  980. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  981. POP_I64(timeout);
  982. if (op_type == VALUE_TYPE_I32) {
  983. POP_I32(expect);
  984. is_wait64 = I8_CONST(false);
  985. if (!(expect =
  986. LLVMBuildZExt(comp_ctx->builder, expect,
  987. I64_TYPE, "expect_i64"))) {
  988. goto fail;
  989. }
  990. }
  991. else {
  992. POP_I64(expect);
  993. is_wait64 = I8_CONST(true);
  994. }
  995. CHECK_LLVM_CONST(is_wait64);
  996. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  997. return false;
  998. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  999. return false;
  1000. param_types[0] = INT8_PTR_TYPE;
  1001. param_types[1] = INT8_PTR_TYPE;
  1002. param_types[2] = I64_TYPE;
  1003. param_types[3] = I64_TYPE;
  1004. param_types[4] = INT8_TYPE;
  1005. ret_type = I32_TYPE;
  1006. GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
  1007. /* Call function wasm_runtime_atomic_wait() */
  1008. param_values[0] = func_ctx->aot_inst;
  1009. param_values[1] = maddr;
  1010. param_values[2] = expect;
  1011. param_values[3] = timeout;
  1012. param_values[4] = is_wait64;
  1013. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  1014. param_values, 5, "call"))) {
  1015. aot_set_last_error("llvm build call failed.");
  1016. return false;
  1017. }
  1018. BUILD_ICMP(LLVMIntSGT, ret_value, I32_ZERO, cmp, "atomic_wait_ret");
  1019. ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
  1020. ADD_BASIC_BLOCK(wait_success, "wait_success");
  1021. LLVMMoveBasicBlockAfter(wait_fail, block_curr);
  1022. LLVMMoveBasicBlockAfter(wait_success, block_curr);
  1023. if (!LLVMBuildCondBr(comp_ctx->builder, cmp,
  1024. wait_success, wait_fail)) {
  1025. aot_set_last_error("llvm build cond br failed.");
  1026. goto fail;
  1027. }
  1028. /* If atomic wait failed, return this function
  1029. so the runtime can catch the exception */
  1030. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
  1031. if (!aot_build_zero_function_ret(comp_ctx, aot_func_type)) {
  1032. goto fail;
  1033. }
  1034. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
  1035. PUSH_I32(ret_value);
  1036. return true;
  1037. fail:
  1038. return false;
  1039. }
  1040. bool
  1041. aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
  1042. AOTFuncContext *func_ctx,
  1043. uint32 align, uint32 offset, uint32 bytes)
  1044. {
  1045. LLVMValueRef maddr, value, count;
  1046. LLVMValueRef param_values[3], ret_value, func;
  1047. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1048. POP_I32(count);
  1049. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  1050. return false;
  1051. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1052. return false;
  1053. param_types[0] = INT8_PTR_TYPE;
  1054. param_types[1] = INT8_PTR_TYPE;
  1055. param_types[2] = I32_TYPE;
  1056. ret_type = I32_TYPE;
  1057. GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
  1058. /* Call function wasm_runtime_atomic_notify() */
  1059. param_values[0] = func_ctx->aot_inst;
  1060. param_values[1] = maddr;
  1061. param_values[2] = count;
  1062. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  1063. param_values, 3, "call"))) {
  1064. aot_set_last_error("llvm build call failed.");
  1065. return false;
  1066. }
  1067. PUSH_I32(ret_value);
  1068. return true;
  1069. fail:
  1070. return false;
  1071. }
  1072. #endif /* end of WASM_ENABLE_SHARED_MEMORY */