aot_emit_memory.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_memory.h"
  6. #include "aot_emit_exception.h"
  7. #include "../aot/aot_runtime.h"
  8. #define BUILD_ICMP(op, left, right, res, name) do { \
  9. if (!(res = LLVMBuildICmp(comp_ctx->builder, op, \
  10. left, right, name))) { \
  11. aot_set_last_error("llvm build icmp failed."); \
  12. goto fail; \
  13. } \
  14. } while (0)
  15. #define BUILD_OP(Op, left, right, res, name) do { \
  16. if (!(res = LLVMBuild##Op(comp_ctx->builder, \
  17. left, right, name))) { \
  18. aot_set_last_error("llvm build " #Op " fail."); \
  19. goto fail; \
  20. } \
  21. } while (0)
  22. #define ADD_BASIC_BLOCK(block, name) do { \
  23. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  24. func_ctx->func, \
  25. name))) { \
  26. aot_set_last_error("llvm add basic block failed."); \
  27. goto fail; \
  28. } \
  29. } while (0)
  30. #define SET_BUILD_POS(block) \
  31. LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
  32. static LLVMValueRef
  33. get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  34. uint32 bytes)
  35. {
  36. LLVMValueRef mem_check_bound = NULL;
  37. switch (bytes) {
  38. case 1:
  39. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_1byte;
  40. break;
  41. case 2:
  42. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_2bytes;
  43. break;
  44. case 4:
  45. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_4bytes;
  46. break;
  47. case 8:
  48. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_8bytes;
  49. break;
  50. case 16:
  51. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_16bytes;
  52. break;
  53. default:
  54. bh_assert(0);
  55. return NULL;
  56. }
  57. if (func_ctx->mem_space_unchanged)
  58. return mem_check_bound;
  59. if (!(mem_check_bound = LLVMBuildLoad(comp_ctx->builder,
  60. mem_check_bound,
  61. "mem_check_bound"))) {
  62. aot_set_last_error("llvm build load failed.");
  63. return NULL;
  64. }
  65. return mem_check_bound;
  66. }
  67. static LLVMValueRef
  68. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
  69. LLVMValueRef
  70. aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  71. uint32 offset, uint32 bytes)
  72. {
  73. LLVMValueRef offset_const = I32_CONST(offset);
  74. LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
  75. LLVMValueRef mem_base_addr, mem_check_bound;
  76. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  77. LLVMBasicBlockRef check_succ;
  78. AOTValue *aot_value;
  79. uint32 local_idx_of_aot_value = 0;
  80. bool is_target_64bit, is_local_of_aot_value = false;
  81. #if WASM_ENABLE_SHARED_MEMORY != 0
  82. bool is_shared_memory =
  83. comp_ctx->comp_data->memories[0].memory_flags & 0x02;
  84. #endif
  85. is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64))
  86. ? true : false;
  87. CHECK_LLVM_CONST(offset_const);
  88. /* Get memory base address and memory data size */
  89. if (func_ctx->mem_space_unchanged
  90. #if WASM_ENABLE_SHARED_MEMORY != 0
  91. || is_shared_memory
  92. #endif
  93. ) {
  94. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  95. }
  96. else {
  97. if (!(mem_base_addr =
  98. LLVMBuildLoad(comp_ctx->builder,
  99. func_ctx->mem_info[0].mem_base_addr,
  100. "mem_base"))) {
  101. aot_set_last_error("llvm build load failed.");
  102. goto fail;
  103. }
  104. }
  105. aot_value = func_ctx->block_stack.block_list_end->value_stack.value_list_end;
  106. if (aot_value) {
  107. /* aot_value is freed in the following POP_I32(addr),
  108. so save its fields here for further use */
  109. is_local_of_aot_value = aot_value->is_local;
  110. local_idx_of_aot_value = aot_value->local_idx;
  111. }
  112. POP_I32(addr);
  113. /*
  114. * Note: not throw the integer-overflow-exception here since it must
  115. * have been thrown when converting float to integer before
  116. */
  117. /* return addres directly if constant offset and inside memory space */
  118. if (LLVMIsConstant(addr) && !LLVMIsUndef(addr)
  119. #if LLVM_VERSION_NUMBER >= 12
  120. && !LLVMIsPoison(addr)
  121. #endif
  122. ) {
  123. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(addr)
  124. + (uint64)offset;
  125. uint32 num_bytes_per_page =
  126. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  127. uint32 init_page_count =
  128. comp_ctx->comp_data->memories[0].mem_init_page_count;
  129. uint64 mem_data_size = num_bytes_per_page * init_page_count;
  130. if (mem_offset + bytes <= mem_data_size) {
  131. /* inside memory space */
  132. offset1 = I32_CONST((uint32)mem_offset);
  133. CHECK_LLVM_CONST(offset1);
  134. if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder, mem_base_addr,
  135. &offset1, 1, "maddr"))) {
  136. aot_set_last_error("llvm build add failed.");
  137. goto fail;
  138. }
  139. return maddr;
  140. }
  141. }
  142. if (is_target_64bit) {
  143. if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
  144. I64_TYPE, "offset_i64"))
  145. || !(addr = LLVMBuildZExt(comp_ctx->builder, addr,
  146. I64_TYPE, "addr_i64"))) {
  147. aot_set_last_error("llvm build zero extend failed.");
  148. goto fail;
  149. }
  150. }
  151. /* offset1 = offset + addr; */
  152. BUILD_OP(Add, offset_const, addr, offset1, "offset1");
  153. if (comp_ctx->enable_bound_check
  154. && !(is_local_of_aot_value
  155. && aot_checked_addr_list_find(func_ctx, local_idx_of_aot_value,
  156. offset, bytes))) {
  157. uint32 init_page_count =
  158. comp_ctx->comp_data->memories[0].mem_init_page_count;
  159. if (init_page_count == 0) {
  160. LLVMValueRef mem_size;
  161. if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) {
  162. goto fail;
  163. }
  164. BUILD_ICMP(LLVMIntEQ, mem_size, I32_ZERO, cmp, "is_zero");
  165. ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
  166. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  167. if (!aot_emit_exception(comp_ctx, func_ctx,
  168. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
  169. true, cmp, check_succ)) {
  170. goto fail;
  171. }
  172. SET_BUILD_POS(check_succ);
  173. block_curr = check_succ;
  174. }
  175. if (!(mem_check_bound =
  176. get_memory_check_bound(comp_ctx, func_ctx, bytes))) {
  177. goto fail;
  178. }
  179. if (is_target_64bit) {
  180. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp, "cmp");
  181. }
  182. else {
  183. /* Check integer overflow */
  184. BUILD_ICMP(LLVMIntULT, offset1, addr, cmp1, "cmp1");
  185. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp2, "cmp2");
  186. BUILD_OP(Or, cmp1, cmp2, cmp, "cmp");
  187. }
  188. /* Add basic blocks */
  189. ADD_BASIC_BLOCK(check_succ, "check_succ");
  190. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  191. if (!aot_emit_exception(comp_ctx, func_ctx,
  192. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
  193. true, cmp, check_succ)) {
  194. goto fail;
  195. }
  196. SET_BUILD_POS(check_succ);
  197. if (is_local_of_aot_value) {
  198. if (!aot_checked_addr_list_add(func_ctx, local_idx_of_aot_value,
  199. offset, bytes))
  200. goto fail;
  201. }
  202. }
  203. /* maddr = mem_base_addr + offset1 */
  204. if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder, mem_base_addr,
  205. &offset1, 1, "maddr"))) {
  206. aot_set_last_error("llvm build add failed.");
  207. goto fail;
  208. }
  209. return maddr;
  210. fail:
  211. return NULL;
  212. }
  213. #define BUILD_PTR_CAST(ptr_type) do { \
  214. if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr,\
  215. ptr_type, "data_ptr"))) {\
  216. aot_set_last_error("llvm build bit cast failed."); \
  217. goto fail; \
  218. } \
  219. } while (0)
  220. #define BUILD_LOAD() do { \
  221. if (!(value = LLVMBuildLoad(comp_ctx->builder, maddr, \
  222. "data"))) { \
  223. aot_set_last_error("llvm build load failed."); \
  224. goto fail; \
  225. } \
  226. LLVMSetAlignment(value, 1); \
  227. } while (0)
  228. #define BUILD_TRUNC(value, data_type) do { \
  229. if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, \
  230. data_type, "val_trunc"))){ \
  231. aot_set_last_error("llvm build trunc failed."); \
  232. goto fail; \
  233. } \
  234. } while (0)
  235. #define BUILD_STORE() do { \
  236. LLVMValueRef res; \
  237. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  238. aot_set_last_error("llvm build store failed."); \
  239. goto fail; \
  240. } \
  241. LLVMSetAlignment(res, 1); \
  242. } while (0)
  243. #define BUILD_SIGN_EXT(dst_type) do { \
  244. if (!(value = LLVMBuildSExt(comp_ctx->builder, value, \
  245. dst_type, "data_s_ext"))) { \
  246. aot_set_last_error("llvm build sign ext failed."); \
  247. goto fail; \
  248. } \
  249. } while (0)
  250. #define BUILD_ZERO_EXT(dst_type) do { \
  251. if (!(value = LLVMBuildZExt(comp_ctx->builder, value, \
  252. dst_type, "data_z_ext"))) { \
  253. aot_set_last_error("llvm build zero ext failed."); \
  254. goto fail; \
  255. } \
  256. } while (0)
  257. #if WASM_ENABLE_SHARED_MEMORY != 0
  258. bool
  259. check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  260. LLVMValueRef addr, uint32 align)
  261. {
  262. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  263. LLVMBasicBlockRef check_align_succ;
  264. LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
  265. LLVMValueRef res;
  266. CHECK_LLVM_CONST(align_mask);
  267. /* Convert pointer to int */
  268. if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr,
  269. I32_TYPE, "address"))) {
  270. aot_set_last_error("llvm build ptr to int failed.");
  271. goto fail;
  272. }
  273. /* The memory address should be aligned */
  274. BUILD_OP(And, addr, align_mask, res, "and");
  275. BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
  276. /* Add basic blocks */
  277. ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
  278. LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
  279. if (!aot_emit_exception(comp_ctx, func_ctx,
  280. EXCE_UNALIGNED_ATOMIC,
  281. true, res, check_align_succ)) {
  282. goto fail;
  283. }
  284. SET_BUILD_POS(check_align_succ);
  285. return true;
  286. fail:
  287. return false;
  288. }
  289. #define BUILD_ATOMIC_LOAD(align) do { \
  290. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  291. goto fail; \
  292. } \
  293. if (!(value = LLVMBuildLoad(comp_ctx->builder, maddr, \
  294. "data"))) { \
  295. aot_set_last_error("llvm build load failed."); \
  296. goto fail; \
  297. } \
  298. LLVMSetAlignment(value, 1 << align); \
  299. LLVMSetVolatile(value, true); \
  300. LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
  301. } while (0)
  302. #define BUILD_ATOMIC_STORE(align) do { \
  303. LLVMValueRef res; \
  304. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  305. goto fail; \
  306. } \
  307. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  308. aot_set_last_error("llvm build store failed."); \
  309. goto fail; \
  310. } \
  311. LLVMSetAlignment(res, 1 << align); \
  312. LLVMSetVolatile(res, true); \
  313. LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
  314. } while (0)
  315. #endif
  316. bool
  317. aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  318. uint32 align, uint32 offset, uint32 bytes,
  319. bool sign, bool atomic)
  320. {
  321. LLVMValueRef maddr, value = NULL;
  322. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  323. return false;
  324. switch (bytes) {
  325. case 4:
  326. BUILD_PTR_CAST(INT32_PTR_TYPE);
  327. #if WASM_ENABLE_SHARED_MEMORY != 0
  328. if (atomic)
  329. BUILD_ATOMIC_LOAD(align);
  330. else
  331. #endif
  332. BUILD_LOAD();
  333. break;
  334. case 2:
  335. case 1:
  336. if (bytes == 2)
  337. BUILD_PTR_CAST(INT16_PTR_TYPE);
  338. else
  339. BUILD_PTR_CAST(INT8_PTR_TYPE);
  340. #if WASM_ENABLE_SHARED_MEMORY != 0
  341. if (atomic) {
  342. BUILD_ATOMIC_LOAD(align);
  343. BUILD_ZERO_EXT(I32_TYPE);
  344. }
  345. else
  346. #endif
  347. {
  348. BUILD_LOAD();
  349. if (sign)
  350. BUILD_SIGN_EXT(I32_TYPE);
  351. else
  352. BUILD_ZERO_EXT(I32_TYPE);
  353. }
  354. break;
  355. default:
  356. bh_assert(0);
  357. break;
  358. }
  359. PUSH_I32(value);
  360. return true;
  361. fail:
  362. return false;
  363. }
  364. bool
  365. aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  366. uint32 align, uint32 offset, uint32 bytes,
  367. bool sign, bool atomic)
  368. {
  369. LLVMValueRef maddr, value = NULL;
  370. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  371. return false;
  372. switch (bytes) {
  373. case 8:
  374. BUILD_PTR_CAST(INT64_PTR_TYPE);
  375. #if WASM_ENABLE_SHARED_MEMORY != 0
  376. if (atomic)
  377. BUILD_ATOMIC_LOAD(align);
  378. else
  379. #endif
  380. BUILD_LOAD();
  381. break;
  382. case 4:
  383. case 2:
  384. case 1:
  385. if (bytes == 4)
  386. BUILD_PTR_CAST(INT32_PTR_TYPE);
  387. else if (bytes == 2)
  388. BUILD_PTR_CAST(INT16_PTR_TYPE);
  389. else
  390. BUILD_PTR_CAST(INT8_PTR_TYPE);
  391. #if WASM_ENABLE_SHARED_MEMORY != 0
  392. if (atomic) {
  393. BUILD_ATOMIC_LOAD(align);
  394. BUILD_ZERO_EXT(I64_TYPE);
  395. }
  396. else
  397. #endif
  398. {
  399. BUILD_LOAD();
  400. if (sign)
  401. BUILD_SIGN_EXT(I64_TYPE);
  402. else
  403. BUILD_ZERO_EXT(I64_TYPE);
  404. }
  405. break;
  406. default:
  407. bh_assert(0);
  408. break;
  409. }
  410. PUSH_I64(value);
  411. return true;
  412. fail:
  413. return false;
  414. }
  415. bool
  416. aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  417. uint32 align, uint32 offset)
  418. {
  419. LLVMValueRef maddr, value;
  420. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
  421. return false;
  422. BUILD_PTR_CAST(F32_PTR_TYPE);
  423. BUILD_LOAD();
  424. PUSH_F32(value);
  425. return true;
  426. fail:
  427. return false;
  428. }
  429. bool
  430. aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  431. uint32 align, uint32 offset)
  432. {
  433. LLVMValueRef maddr, value;
  434. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
  435. return false;
  436. BUILD_PTR_CAST(F64_PTR_TYPE);
  437. BUILD_LOAD();
  438. PUSH_F64(value);
  439. return true;
  440. fail:
  441. return false;
  442. }
  443. bool
  444. aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  445. uint32 align, uint32 offset, uint32 bytes, bool atomic)
  446. {
  447. LLVMValueRef maddr, value;
  448. POP_I32(value);
  449. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  450. return false;
  451. switch (bytes) {
  452. case 4:
  453. BUILD_PTR_CAST(INT32_PTR_TYPE);
  454. break;
  455. case 2:
  456. BUILD_PTR_CAST(INT16_PTR_TYPE);
  457. BUILD_TRUNC(value, INT16_TYPE);
  458. break;
  459. case 1:
  460. BUILD_PTR_CAST(INT8_PTR_TYPE);
  461. BUILD_TRUNC(value, INT8_TYPE);
  462. break;
  463. default:
  464. bh_assert(0);
  465. break;
  466. }
  467. #if WASM_ENABLE_SHARED_MEMORY != 0
  468. if (atomic)
  469. BUILD_ATOMIC_STORE(align);
  470. else
  471. #endif
  472. BUILD_STORE();
  473. return true;
  474. fail:
  475. return false;
  476. }
  477. bool
  478. aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  479. uint32 align, uint32 offset, uint32 bytes, bool atomic)
  480. {
  481. LLVMValueRef maddr, value;
  482. POP_I64(value);
  483. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  484. return false;
  485. switch (bytes) {
  486. case 8:
  487. BUILD_PTR_CAST(INT64_PTR_TYPE);
  488. break;
  489. case 4:
  490. BUILD_PTR_CAST(INT32_PTR_TYPE);
  491. BUILD_TRUNC(value, I32_TYPE);
  492. break;
  493. case 2:
  494. BUILD_PTR_CAST(INT16_PTR_TYPE);
  495. BUILD_TRUNC(value, INT16_TYPE);
  496. break;
  497. case 1:
  498. BUILD_PTR_CAST(INT8_PTR_TYPE);
  499. BUILD_TRUNC(value, INT8_TYPE);
  500. break;
  501. default:
  502. bh_assert(0);
  503. break;
  504. }
  505. #if WASM_ENABLE_SHARED_MEMORY != 0
  506. if (atomic)
  507. BUILD_ATOMIC_STORE(align);
  508. else
  509. #endif
  510. BUILD_STORE();
  511. return true;
  512. fail:
  513. return false;
  514. }
  515. bool
  516. aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  517. uint32 align, uint32 offset)
  518. {
  519. LLVMValueRef maddr, value;
  520. POP_F32(value);
  521. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
  522. return false;
  523. BUILD_PTR_CAST(F32_PTR_TYPE);
  524. BUILD_STORE();
  525. return true;
  526. fail:
  527. return false;
  528. }
  529. bool
  530. aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  531. uint32 align, uint32 offset)
  532. {
  533. LLVMValueRef maddr, value;
  534. POP_F64(value);
  535. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
  536. return false;
  537. BUILD_PTR_CAST(F64_PTR_TYPE);
  538. BUILD_STORE();
  539. return true;
  540. fail:
  541. return false;
  542. }
  543. static LLVMValueRef
  544. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  545. {
  546. LLVMValueRef mem_size;
  547. if (func_ctx->mem_space_unchanged) {
  548. mem_size = func_ctx->mem_info[0].mem_cur_page_count_addr;
  549. }
  550. else {
  551. if (!(mem_size =
  552. LLVMBuildLoad(comp_ctx->builder,
  553. func_ctx->mem_info[0].mem_cur_page_count_addr,
  554. "mem_size"))) {
  555. aot_set_last_error("llvm build load failed.");
  556. goto fail;
  557. }
  558. }
  559. return mem_size;
  560. fail:
  561. return NULL;
  562. }
  563. bool
  564. aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  565. {
  566. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  567. if (mem_size)
  568. PUSH_I32(mem_size);
  569. return mem_size ? true : false;
  570. fail:
  571. return false;
  572. }
  573. bool
  574. aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  575. {
  576. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  577. LLVMValueRef delta, param_values[2], ret_value, func, value;
  578. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  579. int32 func_index;
  580. if (!mem_size)
  581. return false;
  582. POP_I32(delta);
  583. /* Function type of aot_enlarge_memory() */
  584. param_types[0] = INT8_PTR_TYPE;
  585. param_types[1] = I32_TYPE;
  586. ret_type = INT8_TYPE;
  587. if (!(func_type = LLVMFunctionType(ret_type, param_types, 2, false))) {
  588. aot_set_last_error("llvm add function type failed.");
  589. return false;
  590. }
  591. if (comp_ctx->is_jit_mode) {
  592. /* JIT mode, call the function directly */
  593. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  594. aot_set_last_error("llvm add pointer type failed.");
  595. return false;
  596. }
  597. if (!(value = I64_CONST((uint64)(uintptr_t)aot_enlarge_memory))
  598. || !(func = LLVMConstIntToPtr(value, func_ptr_type))) {
  599. aot_set_last_error("create LLVM value failed.");
  600. return false;
  601. }
  602. }
  603. else if (comp_ctx->is_indirect_mode) {
  604. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  605. aot_set_last_error("create LLVM function type failed.");
  606. return false;
  607. }
  608. func_index =
  609. aot_get_native_symbol_index(comp_ctx, "aot_enlarge_memory");
  610. if (func_index < 0) {
  611. return false;
  612. }
  613. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  614. func_ptr_type, func_index))) {
  615. return false;
  616. }
  617. }
  618. else {
  619. char *func_name = "aot_enlarge_memory";
  620. /* AOT mode, delcare the function */
  621. if (!(func = LLVMGetNamedFunction(comp_ctx->module, func_name))
  622. && !(func = LLVMAddFunction(comp_ctx->module,
  623. func_name, func_type))) {
  624. aot_set_last_error("llvm add function failed.");
  625. return false;
  626. }
  627. }
  628. /* Call function aot_enlarge_memory() */
  629. param_values[0] = func_ctx->aot_inst;
  630. param_values[1] = delta;
  631. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  632. param_values, 2, "call"))) {
  633. aot_set_last_error("llvm build call failed.");
  634. return false;
  635. }
  636. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
  637. /* ret_value = ret_value == true ? delta : pre_page_count */
  638. if (!(ret_value = LLVMBuildSelect(comp_ctx->builder, ret_value,
  639. mem_size, I32_NEG_ONE,
  640. "mem_grow_ret"))) {
  641. aot_set_last_error("llvm build select failed.");
  642. return false;
  643. }
  644. PUSH_I32(ret_value);
  645. return true;
  646. fail:
  647. return false;
  648. }
  649. #if WASM_ENABLE_BULK_MEMORY != 0
  650. static LLVMValueRef
  651. check_bulk_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  652. LLVMValueRef offset, LLVMValueRef bytes)
  653. {
  654. LLVMValueRef maddr, max_addr, cmp;
  655. LLVMValueRef mem_base_addr;
  656. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  657. LLVMBasicBlockRef check_succ;
  658. LLVMValueRef mem_size;
  659. /* Get memory base address and memory data size */
  660. #if WASM_ENABLE_SHARED_MEMORY != 0
  661. bool is_shared_memory =
  662. comp_ctx->comp_data->memories[0].memory_flags & 0x02;
  663. if (func_ctx->mem_space_unchanged || is_shared_memory) {
  664. #else
  665. if (func_ctx->mem_space_unchanged) {
  666. #endif
  667. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  668. }
  669. else {
  670. if (!(mem_base_addr =
  671. LLVMBuildLoad(comp_ctx->builder,
  672. func_ctx->mem_info[0].mem_base_addr,
  673. "mem_base"))) {
  674. aot_set_last_error("llvm build load failed.");
  675. goto fail;
  676. }
  677. }
  678. /*
  679. * Note: not throw the integer-overflow-exception here since it must
  680. * have been thrown when converting float to integer before
  681. */
  682. /* return addres directly if constant offset and inside memory space */
  683. if (!LLVMIsUndef(offset) && !LLVMIsUndef(bytes)
  684. #if LLVM_VERSION_NUMBER >= 12
  685. && !LLVMIsPoison(offset) && !LLVMIsPoison(bytes)
  686. #endif
  687. && LLVMIsConstant(offset) && LLVMIsConstant(bytes)) {
  688. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(offset);
  689. uint64 mem_len = (uint64)LLVMConstIntGetZExtValue(bytes);
  690. uint32 num_bytes_per_page =
  691. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  692. uint32 init_page_count =
  693. comp_ctx->comp_data->memories[0].mem_init_page_count;
  694. uint32 mem_data_size = num_bytes_per_page * init_page_count;
  695. if (mem_data_size > 0
  696. && mem_offset + mem_len <= mem_data_size) {
  697. /* inside memory space */
  698. /* maddr = mem_base_addr + moffset */
  699. if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder,
  700. mem_base_addr,
  701. &offset, 1, "maddr"))) {
  702. aot_set_last_error("llvm build add failed.");
  703. goto fail;
  704. }
  705. return maddr;
  706. }
  707. }
  708. if (func_ctx->mem_space_unchanged) {
  709. mem_size = func_ctx->mem_info[0].mem_data_size_addr;
  710. }
  711. else {
  712. if (!(mem_size =
  713. LLVMBuildLoad(comp_ctx->builder,
  714. func_ctx->mem_info[0].mem_data_size_addr,
  715. "mem_size"))) {
  716. aot_set_last_error("llvm build load failed.");
  717. goto fail;
  718. }
  719. }
  720. ADD_BASIC_BLOCK(check_succ, "check_succ");
  721. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  722. offset = LLVMBuildZExt(comp_ctx->builder, offset, I64_TYPE, "extend_offset");
  723. bytes = LLVMBuildZExt(comp_ctx->builder, bytes, I64_TYPE, "extend_len");
  724. mem_size = LLVMBuildZExt(comp_ctx->builder, mem_size, I64_TYPE, "extend_size");
  725. BUILD_OP(Add, offset, bytes, max_addr, "max_addr");
  726. BUILD_ICMP(LLVMIntUGT, max_addr, mem_size, cmp,
  727. "cmp_max_mem_addr");
  728. if (!aot_emit_exception(comp_ctx, func_ctx,
  729. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
  730. true, cmp, check_succ)) {
  731. goto fail;
  732. }
  733. /* maddr = mem_base_addr + offset */
  734. if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder, mem_base_addr,
  735. &offset, 1, "maddr"))) {
  736. aot_set_last_error("llvm build add failed.");
  737. goto fail;
  738. }
  739. return maddr;
  740. fail:
  741. return NULL;
  742. }
  743. bool
  744. aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  745. uint32 seg_index)
  746. {
  747. LLVMValueRef seg, offset, dst, len, param_values[5], ret_value, func, value;
  748. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  749. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  750. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  751. LLVMBasicBlockRef mem_init_fail, init_success;
  752. seg = I32_CONST(seg_index);
  753. POP_I32(len);
  754. POP_I32(offset);
  755. POP_I32(dst);
  756. param_types[0] = INT8_PTR_TYPE;
  757. param_types[1] = I32_TYPE;
  758. param_types[2] = I32_TYPE;
  759. param_types[3] = I32_TYPE;
  760. param_types[4] = I32_TYPE;
  761. ret_type = INT8_TYPE;
  762. GET_AOT_FUNCTION(aot_memory_init, 5);
  763. /* Call function aot_memory_init() */
  764. param_values[0] = func_ctx->aot_inst;
  765. param_values[1] = seg;
  766. param_values[2] = offset;
  767. param_values[3] = len;
  768. param_values[4] = dst;
  769. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  770. param_values, 5, "call"))) {
  771. aot_set_last_error("llvm build call failed.");
  772. return false;
  773. }
  774. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_init_ret");
  775. ADD_BASIC_BLOCK(mem_init_fail, "mem_init_fail");
  776. ADD_BASIC_BLOCK(init_success, "init_success");
  777. LLVMMoveBasicBlockAfter(mem_init_fail, block_curr);
  778. LLVMMoveBasicBlockAfter(init_success, block_curr);
  779. if (!LLVMBuildCondBr(comp_ctx->builder, ret_value,
  780. init_success, mem_init_fail)) {
  781. aot_set_last_error("llvm build cond br failed.");
  782. goto fail;
  783. }
  784. /* If memory.init failed, return this function
  785. so the runtime can catch the exception */
  786. LLVMPositionBuilderAtEnd(comp_ctx->builder, mem_init_fail);
  787. if (!aot_build_zero_function_ret(comp_ctx, aot_func_type)) {
  788. goto fail;
  789. }
  790. LLVMPositionBuilderAtEnd(comp_ctx->builder, init_success);
  791. return true;
  792. fail:
  793. return false;
  794. }
  795. bool
  796. aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  797. uint32 seg_index)
  798. {
  799. LLVMValueRef seg, param_values[2], ret_value, func, value;
  800. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  801. seg = I32_CONST(seg_index);
  802. CHECK_LLVM_CONST(seg);
  803. param_types[0] = INT8_PTR_TYPE;
  804. param_types[1] = I32_TYPE;
  805. ret_type = INT8_TYPE;
  806. GET_AOT_FUNCTION(aot_data_drop, 2);
  807. /* Call function aot_data_drop() */
  808. param_values[0] = func_ctx->aot_inst;
  809. param_values[1] = seg;
  810. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  811. param_values, 2, "call"))) {
  812. aot_set_last_error("llvm build call failed.");
  813. return false;
  814. }
  815. return true;
  816. fail:
  817. return false;
  818. }
  819. bool
  820. aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  821. {
  822. LLVMValueRef src, dst, src_addr, dst_addr, len, res;
  823. POP_I32(len);
  824. POP_I32(src);
  825. POP_I32(dst);
  826. if (!(src_addr =
  827. check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
  828. return false;
  829. if (!(dst_addr =
  830. check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  831. return false;
  832. /* TODO: lookup func ptr of "memmove" to call for XIP mode */
  833. if (!(res = LLVMBuildMemMove(comp_ctx->builder, dst_addr, 1,
  834. src_addr, 1, len))) {
  835. aot_set_last_error("llvm build memmove failed.");
  836. return false;
  837. }
  838. return true;
  839. fail:
  840. return false;
  841. }
  842. bool
  843. aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  844. {
  845. LLVMValueRef val, dst, dst_addr, len, res;
  846. POP_I32(len);
  847. POP_I32(val);
  848. POP_I32(dst);
  849. if (!(dst_addr =
  850. check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  851. return false;
  852. if (!(val = LLVMBuildIntCast2(comp_ctx->builder, val, INT8_TYPE,
  853. true, "mem_set_value"))) {
  854. aot_set_last_error("llvm build int cast2 failed.");
  855. return false;
  856. }
  857. /* TODO: lookup func ptr of "memset" to call for XIP mode */
  858. if (!(res = LLVMBuildMemSet(comp_ctx->builder, dst_addr,
  859. val, len, 1))) {
  860. aot_set_last_error("llvm build memset failed.");
  861. return false;
  862. }
  863. return true;
  864. fail:
  865. return false;
  866. }
  867. #endif /* end of WASM_ENABLE_BULK_MEMORY */
  868. #if WASM_ENABLE_SHARED_MEMORY != 0
  869. bool
  870. aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx,
  871. AOTFuncContext *func_ctx,
  872. uint8 atomic_op, uint8 op_type,
  873. uint32 align, uint32 offset,
  874. uint32 bytes)
  875. {
  876. LLVMValueRef maddr, value, result;
  877. if (op_type == VALUE_TYPE_I32)
  878. POP_I32(value);
  879. else
  880. POP_I64(value);
  881. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  882. return false;
  883. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  884. return false;
  885. switch (bytes) {
  886. case 8:
  887. BUILD_PTR_CAST(INT64_PTR_TYPE);
  888. break;
  889. case 4:
  890. BUILD_PTR_CAST(INT32_PTR_TYPE);
  891. if (op_type == VALUE_TYPE_I64)
  892. BUILD_TRUNC(value, I32_TYPE);
  893. break;
  894. case 2:
  895. BUILD_PTR_CAST(INT16_PTR_TYPE);
  896. BUILD_TRUNC(value, INT16_TYPE);
  897. break;
  898. case 1:
  899. BUILD_PTR_CAST(INT8_PTR_TYPE);
  900. BUILD_TRUNC(value, INT8_TYPE);
  901. break;
  902. default:
  903. bh_assert(0);
  904. break;
  905. }
  906. if (!(result =
  907. LLVMBuildAtomicRMW(comp_ctx->builder,
  908. atomic_op, maddr, value,
  909. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  910. goto fail;
  911. }
  912. LLVMSetVolatile(result, true);
  913. if (op_type == VALUE_TYPE_I32) {
  914. if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
  915. I32_TYPE, "result_i32"))) {
  916. goto fail;
  917. }
  918. PUSH_I32(result);
  919. }
  920. else {
  921. if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
  922. I64_TYPE, "result_i64"))) {
  923. goto fail;
  924. }
  925. PUSH_I64(result);
  926. }
  927. return true;
  928. fail:
  929. return false;
  930. }
  931. bool
  932. aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
  933. AOTFuncContext *func_ctx,
  934. uint8 op_type, uint32 align,
  935. uint32 offset, uint32 bytes)
  936. {
  937. LLVMValueRef maddr, value, expect, result;
  938. if (op_type == VALUE_TYPE_I32) {
  939. POP_I32(value);
  940. POP_I32(expect);
  941. }
  942. else {
  943. POP_I64(value);
  944. POP_I64(expect);
  945. }
  946. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  947. return false;
  948. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  949. return false;
  950. switch (bytes) {
  951. case 8:
  952. BUILD_PTR_CAST(INT64_PTR_TYPE);
  953. break;
  954. case 4:
  955. BUILD_PTR_CAST(INT32_PTR_TYPE);
  956. if (op_type == VALUE_TYPE_I64) {
  957. BUILD_TRUNC(value, I32_TYPE);
  958. BUILD_TRUNC(expect, I32_TYPE);
  959. }
  960. break;
  961. case 2:
  962. BUILD_PTR_CAST(INT16_PTR_TYPE);
  963. BUILD_TRUNC(value, INT16_TYPE);
  964. BUILD_TRUNC(expect, INT16_TYPE);
  965. break;
  966. case 1:
  967. BUILD_PTR_CAST(INT8_PTR_TYPE);
  968. BUILD_TRUNC(value, INT8_TYPE);
  969. BUILD_TRUNC(expect, INT8_TYPE);
  970. break;
  971. default:
  972. bh_assert(0);
  973. break;
  974. }
  975. if (!(result =
  976. LLVMBuildAtomicCmpXchg(comp_ctx->builder, maddr, expect, value,
  977. LLVMAtomicOrderingSequentiallyConsistent,
  978. LLVMAtomicOrderingSequentiallyConsistent,
  979. false))) {
  980. goto fail;
  981. }
  982. LLVMSetVolatile(result, true);
  983. /* CmpXchg return {i32, i1} structure,
  984. we need to extrack the previous_value from the structure */
  985. if (!(result =
  986. LLVMBuildExtractValue(comp_ctx->builder,
  987. result, 0, "previous_value"))) {
  988. goto fail;
  989. }
  990. if (op_type == VALUE_TYPE_I32) {
  991. if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
  992. I32_TYPE, "result_i32"))) {
  993. goto fail;
  994. }
  995. PUSH_I32(result);
  996. }
  997. else {
  998. if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
  999. I64_TYPE, "result_i64"))) {
  1000. goto fail;
  1001. }
  1002. PUSH_I64(result);
  1003. }
  1004. return true;
  1005. fail:
  1006. return false;
  1007. }
  1008. bool
  1009. aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1010. uint8 op_type, uint32 align,
  1011. uint32 offset, uint32 bytes)
  1012. {
  1013. LLVMValueRef maddr, value, timeout, expect, cmp;
  1014. LLVMValueRef param_values[5], ret_value, func, is_wait64;
  1015. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  1016. LLVMBasicBlockRef wait_fail, wait_success;
  1017. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1018. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  1019. POP_I64(timeout);
  1020. if (op_type == VALUE_TYPE_I32) {
  1021. POP_I32(expect);
  1022. is_wait64 = I8_CONST(false);
  1023. if (!(expect =
  1024. LLVMBuildZExt(comp_ctx->builder, expect,
  1025. I64_TYPE, "expect_i64"))) {
  1026. goto fail;
  1027. }
  1028. }
  1029. else {
  1030. POP_I64(expect);
  1031. is_wait64 = I8_CONST(true);
  1032. }
  1033. CHECK_LLVM_CONST(is_wait64);
  1034. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  1035. return false;
  1036. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1037. return false;
  1038. param_types[0] = INT8_PTR_TYPE;
  1039. param_types[1] = INT8_PTR_TYPE;
  1040. param_types[2] = I64_TYPE;
  1041. param_types[3] = I64_TYPE;
  1042. param_types[4] = INT8_TYPE;
  1043. ret_type = I32_TYPE;
  1044. GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
  1045. /* Call function wasm_runtime_atomic_wait() */
  1046. param_values[0] = func_ctx->aot_inst;
  1047. param_values[1] = maddr;
  1048. param_values[2] = expect;
  1049. param_values[3] = timeout;
  1050. param_values[4] = is_wait64;
  1051. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  1052. param_values, 5, "call"))) {
  1053. aot_set_last_error("llvm build call failed.");
  1054. return false;
  1055. }
  1056. BUILD_ICMP(LLVMIntSGT, ret_value, I32_ZERO, cmp, "atomic_wait_ret");
  1057. ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
  1058. ADD_BASIC_BLOCK(wait_success, "wait_success");
  1059. LLVMMoveBasicBlockAfter(wait_fail, block_curr);
  1060. LLVMMoveBasicBlockAfter(wait_success, block_curr);
  1061. if (!LLVMBuildCondBr(comp_ctx->builder, cmp,
  1062. wait_success, wait_fail)) {
  1063. aot_set_last_error("llvm build cond br failed.");
  1064. goto fail;
  1065. }
  1066. /* If atomic wait failed, return this function
  1067. so the runtime can catch the exception */
  1068. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
  1069. if (!aot_build_zero_function_ret(comp_ctx, aot_func_type)) {
  1070. goto fail;
  1071. }
  1072. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
  1073. PUSH_I32(ret_value);
  1074. return true;
  1075. fail:
  1076. return false;
  1077. }
  1078. bool
  1079. aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
  1080. AOTFuncContext *func_ctx,
  1081. uint32 align, uint32 offset, uint32 bytes)
  1082. {
  1083. LLVMValueRef maddr, value, count;
  1084. LLVMValueRef param_values[3], ret_value, func;
  1085. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1086. POP_I32(count);
  1087. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  1088. return false;
  1089. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1090. return false;
  1091. param_types[0] = INT8_PTR_TYPE;
  1092. param_types[1] = INT8_PTR_TYPE;
  1093. param_types[2] = I32_TYPE;
  1094. ret_type = I32_TYPE;
  1095. GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
  1096. /* Call function wasm_runtime_atomic_notify() */
  1097. param_values[0] = func_ctx->aot_inst;
  1098. param_values[1] = maddr;
  1099. param_values[2] = count;
  1100. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  1101. param_values, 3, "call"))) {
  1102. aot_set_last_error("llvm build call failed.");
  1103. return false;
  1104. }
  1105. PUSH_I32(ret_value);
  1106. return true;
  1107. fail:
  1108. return false;
  1109. }
  1110. #endif /* end of WASM_ENABLE_SHARED_MEMORY */