aot_emit_memory.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_memory.h"
  6. #include "aot_emit_exception.h"
  7. #include "../aot/aot_runtime.h"
  8. #include "aot_intrinsic.h"
  9. #define BUILD_ICMP(op, left, right, res, name) \
  10. do { \
  11. if (!(res = \
  12. LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
  13. aot_set_last_error("llvm build icmp failed."); \
  14. goto fail; \
  15. } \
  16. } while (0)
  17. #define BUILD_OP(Op, left, right, res, name) \
  18. do { \
  19. if (!(res = LLVMBuild##Op(comp_ctx->builder, left, right, name))) { \
  20. aot_set_last_error("llvm build " #Op " fail."); \
  21. goto fail; \
  22. } \
  23. } while (0)
  24. #define ADD_BASIC_BLOCK(block, name) \
  25. do { \
  26. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  27. func_ctx->func, name))) { \
  28. aot_set_last_error("llvm add basic block failed."); \
  29. goto fail; \
  30. } \
  31. } while (0)
  32. #define SET_BUILD_POS(block) LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
  33. static LLVMValueRef
  34. get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  35. uint32 bytes)
  36. {
  37. LLVMValueRef mem_check_bound = NULL;
  38. switch (bytes) {
  39. case 1:
  40. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_1byte;
  41. break;
  42. case 2:
  43. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_2bytes;
  44. break;
  45. case 4:
  46. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_4bytes;
  47. break;
  48. case 8:
  49. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_8bytes;
  50. break;
  51. case 16:
  52. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_16bytes;
  53. break;
  54. default:
  55. bh_assert(0);
  56. return NULL;
  57. }
  58. if (func_ctx->mem_space_unchanged)
  59. return mem_check_bound;
  60. if (!(mem_check_bound = LLVMBuildLoad2(
  61. comp_ctx->builder,
  62. (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE : I32_TYPE,
  63. mem_check_bound, "mem_check_bound"))) {
  64. aot_set_last_error("llvm build load failed.");
  65. return NULL;
  66. }
  67. return mem_check_bound;
  68. }
  69. static LLVMValueRef
  70. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
  71. LLVMValueRef
  72. aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  73. uint32 offset, uint32 bytes)
  74. {
  75. LLVMValueRef offset_const = I32_CONST(offset);
  76. LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
  77. LLVMValueRef mem_base_addr, mem_check_bound;
  78. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  79. LLVMBasicBlockRef check_succ;
  80. AOTValue *aot_value_top;
  81. uint32 local_idx_of_aot_value = 0;
  82. bool is_target_64bit, is_local_of_aot_value = false;
  83. #if WASM_ENABLE_SHARED_MEMORY != 0
  84. bool is_shared_memory =
  85. comp_ctx->comp_data->memories[0].memory_flags & 0x02;
  86. #endif
  87. is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false;
  88. if (comp_ctx->is_indirect_mode
  89. && aot_intrinsic_check_capability(comp_ctx, "i32.const")) {
  90. WASMValue wasm_value;
  91. wasm_value.i32 = offset;
  92. offset_const = aot_load_const_from_table(
  93. comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_I32);
  94. if (!offset_const) {
  95. return NULL;
  96. }
  97. }
  98. else {
  99. CHECK_LLVM_CONST(offset_const);
  100. }
  101. /* Get memory base address and memory data size */
  102. if (func_ctx->mem_space_unchanged
  103. #if WASM_ENABLE_SHARED_MEMORY != 0
  104. || is_shared_memory
  105. #endif
  106. ) {
  107. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  108. }
  109. else {
  110. if (!(mem_base_addr = LLVMBuildLoad2(
  111. comp_ctx->builder, OPQ_PTR_TYPE,
  112. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  113. aot_set_last_error("llvm build load failed.");
  114. goto fail;
  115. }
  116. }
  117. aot_value_top =
  118. func_ctx->block_stack.block_list_end->value_stack.value_list_end;
  119. if (aot_value_top) {
  120. /* aot_value_top is freed in the following POP_I32(addr),
  121. so save its fields here for further use */
  122. is_local_of_aot_value = aot_value_top->is_local;
  123. local_idx_of_aot_value = aot_value_top->local_idx;
  124. }
  125. POP_I32(addr);
  126. /*
  127. * Note: not throw the integer-overflow-exception here since it must
  128. * have been thrown when converting float to integer before
  129. */
  130. /* return addres directly if constant offset and inside memory space */
  131. if (LLVMIsConstant(addr) && !LLVMIsUndef(addr)
  132. #if LLVM_VERSION_NUMBER >= 12
  133. && !LLVMIsPoison(addr)
  134. #endif
  135. ) {
  136. uint64 mem_offset =
  137. (uint64)LLVMConstIntGetZExtValue(addr) + (uint64)offset;
  138. uint32 num_bytes_per_page =
  139. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  140. uint32 init_page_count =
  141. comp_ctx->comp_data->memories[0].mem_init_page_count;
  142. uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
  143. if (mem_offset + bytes <= mem_data_size) {
  144. /* inside memory space */
  145. offset1 = I32_CONST((uint32)mem_offset);
  146. CHECK_LLVM_CONST(offset1);
  147. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  148. mem_base_addr, &offset1, 1,
  149. "maddr"))) {
  150. aot_set_last_error("llvm build add failed.");
  151. goto fail;
  152. }
  153. return maddr;
  154. }
  155. }
  156. if (is_target_64bit) {
  157. if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
  158. I64_TYPE, "offset_i64"))
  159. || !(addr = LLVMBuildZExt(comp_ctx->builder, addr, I64_TYPE,
  160. "addr_i64"))) {
  161. aot_set_last_error("llvm build zero extend failed.");
  162. goto fail;
  163. }
  164. }
  165. /* offset1 = offset + addr; */
  166. BUILD_OP(Add, offset_const, addr, offset1, "offset1");
  167. if (comp_ctx->enable_bound_check
  168. && !(is_local_of_aot_value
  169. && aot_checked_addr_list_find(func_ctx, local_idx_of_aot_value,
  170. offset, bytes))) {
  171. uint32 init_page_count =
  172. comp_ctx->comp_data->memories[0].mem_init_page_count;
  173. if (init_page_count == 0) {
  174. LLVMValueRef mem_size;
  175. if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) {
  176. goto fail;
  177. }
  178. BUILD_ICMP(LLVMIntEQ, mem_size, I32_ZERO, cmp, "is_zero");
  179. ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
  180. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  181. if (!aot_emit_exception(comp_ctx, func_ctx,
  182. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  183. check_succ)) {
  184. goto fail;
  185. }
  186. SET_BUILD_POS(check_succ);
  187. block_curr = check_succ;
  188. }
  189. if (!(mem_check_bound =
  190. get_memory_check_bound(comp_ctx, func_ctx, bytes))) {
  191. goto fail;
  192. }
  193. if (is_target_64bit) {
  194. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp, "cmp");
  195. }
  196. else {
  197. /* Check integer overflow */
  198. BUILD_ICMP(LLVMIntULT, offset1, addr, cmp1, "cmp1");
  199. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp2, "cmp2");
  200. BUILD_OP(Or, cmp1, cmp2, cmp, "cmp");
  201. }
  202. /* Add basic blocks */
  203. ADD_BASIC_BLOCK(check_succ, "check_succ");
  204. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  205. if (!aot_emit_exception(comp_ctx, func_ctx,
  206. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  207. check_succ)) {
  208. goto fail;
  209. }
  210. SET_BUILD_POS(check_succ);
  211. if (is_local_of_aot_value) {
  212. if (!aot_checked_addr_list_add(func_ctx, local_idx_of_aot_value,
  213. offset, bytes))
  214. goto fail;
  215. }
  216. }
  217. /* maddr = mem_base_addr + offset1 */
  218. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  219. mem_base_addr, &offset1, 1, "maddr"))) {
  220. aot_set_last_error("llvm build add failed.");
  221. goto fail;
  222. }
  223. return maddr;
  224. fail:
  225. return NULL;
  226. }
  227. #define BUILD_PTR_CAST(ptr_type) \
  228. do { \
  229. if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, ptr_type, \
  230. "data_ptr"))) { \
  231. aot_set_last_error("llvm build bit cast failed."); \
  232. goto fail; \
  233. } \
  234. } while (0)
  235. #define BUILD_LOAD(data_type) \
  236. do { \
  237. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  238. "data"))) { \
  239. aot_set_last_error("llvm build load failed."); \
  240. goto fail; \
  241. } \
  242. LLVMSetAlignment(value, 1); \
  243. } while (0)
  244. #define BUILD_TRUNC(value, data_type) \
  245. do { \
  246. if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, data_type, \
  247. "val_trunc"))) { \
  248. aot_set_last_error("llvm build trunc failed."); \
  249. goto fail; \
  250. } \
  251. } while (0)
  252. #define BUILD_STORE() \
  253. do { \
  254. LLVMValueRef res; \
  255. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  256. aot_set_last_error("llvm build store failed."); \
  257. goto fail; \
  258. } \
  259. LLVMSetAlignment(res, 1); \
  260. } while (0)
  261. #define BUILD_SIGN_EXT(dst_type) \
  262. do { \
  263. if (!(value = LLVMBuildSExt(comp_ctx->builder, value, dst_type, \
  264. "data_s_ext"))) { \
  265. aot_set_last_error("llvm build sign ext failed."); \
  266. goto fail; \
  267. } \
  268. } while (0)
  269. #define BUILD_ZERO_EXT(dst_type) \
  270. do { \
  271. if (!(value = LLVMBuildZExt(comp_ctx->builder, value, dst_type, \
  272. "data_z_ext"))) { \
  273. aot_set_last_error("llvm build zero ext failed."); \
  274. goto fail; \
  275. } \
  276. } while (0)
  277. #if WASM_ENABLE_SHARED_MEMORY != 0
  278. bool
  279. check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  280. LLVMValueRef addr, uint32 align)
  281. {
  282. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  283. LLVMBasicBlockRef check_align_succ;
  284. LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
  285. LLVMValueRef res;
  286. CHECK_LLVM_CONST(align_mask);
  287. /* Convert pointer to int */
  288. if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr, I32_TYPE,
  289. "address"))) {
  290. aot_set_last_error("llvm build ptr to int failed.");
  291. goto fail;
  292. }
  293. /* The memory address should be aligned */
  294. BUILD_OP(And, addr, align_mask, res, "and");
  295. BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
  296. /* Add basic blocks */
  297. ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
  298. LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
  299. if (!aot_emit_exception(comp_ctx, func_ctx, EXCE_UNALIGNED_ATOMIC, true,
  300. res, check_align_succ)) {
  301. goto fail;
  302. }
  303. SET_BUILD_POS(check_align_succ);
  304. return true;
  305. fail:
  306. return false;
  307. }
  308. #define BUILD_ATOMIC_LOAD(align, data_type) \
  309. do { \
  310. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  311. goto fail; \
  312. } \
  313. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  314. "data"))) { \
  315. aot_set_last_error("llvm build load failed."); \
  316. goto fail; \
  317. } \
  318. LLVMSetAlignment(value, 1 << align); \
  319. LLVMSetVolatile(value, true); \
  320. LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
  321. } while (0)
  322. #define BUILD_ATOMIC_STORE(align) \
  323. do { \
  324. LLVMValueRef res; \
  325. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  326. goto fail; \
  327. } \
  328. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  329. aot_set_last_error("llvm build store failed."); \
  330. goto fail; \
  331. } \
  332. LLVMSetAlignment(res, 1 << align); \
  333. LLVMSetVolatile(res, true); \
  334. LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
  335. } while (0)
  336. #endif
  337. bool
  338. aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  339. uint32 align, uint32 offset, uint32 bytes, bool sign,
  340. bool atomic)
  341. {
  342. LLVMValueRef maddr, value = NULL;
  343. LLVMTypeRef data_type;
  344. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  345. return false;
  346. switch (bytes) {
  347. case 4:
  348. BUILD_PTR_CAST(INT32_PTR_TYPE);
  349. #if WASM_ENABLE_SHARED_MEMORY != 0
  350. if (atomic)
  351. BUILD_ATOMIC_LOAD(align, I32_TYPE);
  352. else
  353. #endif
  354. BUILD_LOAD(I32_TYPE);
  355. break;
  356. case 2:
  357. case 1:
  358. if (bytes == 2) {
  359. BUILD_PTR_CAST(INT16_PTR_TYPE);
  360. data_type = INT16_TYPE;
  361. }
  362. else {
  363. BUILD_PTR_CAST(INT8_PTR_TYPE);
  364. data_type = INT8_TYPE;
  365. }
  366. #if WASM_ENABLE_SHARED_MEMORY != 0
  367. if (atomic) {
  368. BUILD_ATOMIC_LOAD(align, data_type);
  369. BUILD_ZERO_EXT(I32_TYPE);
  370. }
  371. else
  372. #endif
  373. {
  374. BUILD_LOAD(data_type);
  375. if (sign)
  376. BUILD_SIGN_EXT(I32_TYPE);
  377. else
  378. BUILD_ZERO_EXT(I32_TYPE);
  379. }
  380. break;
  381. default:
  382. bh_assert(0);
  383. break;
  384. }
  385. PUSH_I32(value);
  386. (void)data_type;
  387. return true;
  388. fail:
  389. return false;
  390. }
  391. bool
  392. aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  393. uint32 align, uint32 offset, uint32 bytes, bool sign,
  394. bool atomic)
  395. {
  396. LLVMValueRef maddr, value = NULL;
  397. LLVMTypeRef data_type;
  398. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  399. return false;
  400. switch (bytes) {
  401. case 8:
  402. BUILD_PTR_CAST(INT64_PTR_TYPE);
  403. #if WASM_ENABLE_SHARED_MEMORY != 0
  404. if (atomic)
  405. BUILD_ATOMIC_LOAD(align, I64_TYPE);
  406. else
  407. #endif
  408. BUILD_LOAD(I64_TYPE);
  409. break;
  410. case 4:
  411. case 2:
  412. case 1:
  413. if (bytes == 4) {
  414. BUILD_PTR_CAST(INT32_PTR_TYPE);
  415. data_type = I32_TYPE;
  416. }
  417. else if (bytes == 2) {
  418. BUILD_PTR_CAST(INT16_PTR_TYPE);
  419. data_type = INT16_TYPE;
  420. }
  421. else {
  422. BUILD_PTR_CAST(INT8_PTR_TYPE);
  423. data_type = INT8_TYPE;
  424. }
  425. #if WASM_ENABLE_SHARED_MEMORY != 0
  426. if (atomic) {
  427. BUILD_ATOMIC_LOAD(align, data_type);
  428. BUILD_ZERO_EXT(I64_TYPE);
  429. }
  430. else
  431. #endif
  432. {
  433. BUILD_LOAD(data_type);
  434. if (sign)
  435. BUILD_SIGN_EXT(I64_TYPE);
  436. else
  437. BUILD_ZERO_EXT(I64_TYPE);
  438. }
  439. break;
  440. default:
  441. bh_assert(0);
  442. break;
  443. }
  444. PUSH_I64(value);
  445. (void)data_type;
  446. return true;
  447. fail:
  448. return false;
  449. }
  450. bool
  451. aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  452. uint32 align, uint32 offset)
  453. {
  454. LLVMValueRef maddr, value;
  455. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
  456. return false;
  457. BUILD_PTR_CAST(F32_PTR_TYPE);
  458. BUILD_LOAD(F32_TYPE);
  459. PUSH_F32(value);
  460. return true;
  461. fail:
  462. return false;
  463. }
  464. bool
  465. aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  466. uint32 align, uint32 offset)
  467. {
  468. LLVMValueRef maddr, value;
  469. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
  470. return false;
  471. BUILD_PTR_CAST(F64_PTR_TYPE);
  472. BUILD_LOAD(F64_TYPE);
  473. PUSH_F64(value);
  474. return true;
  475. fail:
  476. return false;
  477. }
  478. bool
  479. aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  480. uint32 align, uint32 offset, uint32 bytes, bool atomic)
  481. {
  482. LLVMValueRef maddr, value;
  483. POP_I32(value);
  484. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  485. return false;
  486. switch (bytes) {
  487. case 4:
  488. BUILD_PTR_CAST(INT32_PTR_TYPE);
  489. break;
  490. case 2:
  491. BUILD_PTR_CAST(INT16_PTR_TYPE);
  492. BUILD_TRUNC(value, INT16_TYPE);
  493. break;
  494. case 1:
  495. BUILD_PTR_CAST(INT8_PTR_TYPE);
  496. BUILD_TRUNC(value, INT8_TYPE);
  497. break;
  498. default:
  499. bh_assert(0);
  500. break;
  501. }
  502. #if WASM_ENABLE_SHARED_MEMORY != 0
  503. if (atomic)
  504. BUILD_ATOMIC_STORE(align);
  505. else
  506. #endif
  507. BUILD_STORE();
  508. return true;
  509. fail:
  510. return false;
  511. }
  512. bool
  513. aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  514. uint32 align, uint32 offset, uint32 bytes, bool atomic)
  515. {
  516. LLVMValueRef maddr, value;
  517. POP_I64(value);
  518. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  519. return false;
  520. switch (bytes) {
  521. case 8:
  522. BUILD_PTR_CAST(INT64_PTR_TYPE);
  523. break;
  524. case 4:
  525. BUILD_PTR_CAST(INT32_PTR_TYPE);
  526. BUILD_TRUNC(value, I32_TYPE);
  527. break;
  528. case 2:
  529. BUILD_PTR_CAST(INT16_PTR_TYPE);
  530. BUILD_TRUNC(value, INT16_TYPE);
  531. break;
  532. case 1:
  533. BUILD_PTR_CAST(INT8_PTR_TYPE);
  534. BUILD_TRUNC(value, INT8_TYPE);
  535. break;
  536. default:
  537. bh_assert(0);
  538. break;
  539. }
  540. #if WASM_ENABLE_SHARED_MEMORY != 0
  541. if (atomic)
  542. BUILD_ATOMIC_STORE(align);
  543. else
  544. #endif
  545. BUILD_STORE();
  546. return true;
  547. fail:
  548. return false;
  549. }
  550. bool
  551. aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  552. uint32 align, uint32 offset)
  553. {
  554. LLVMValueRef maddr, value;
  555. POP_F32(value);
  556. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
  557. return false;
  558. BUILD_PTR_CAST(F32_PTR_TYPE);
  559. BUILD_STORE();
  560. return true;
  561. fail:
  562. return false;
  563. }
  564. bool
  565. aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  566. uint32 align, uint32 offset)
  567. {
  568. LLVMValueRef maddr, value;
  569. POP_F64(value);
  570. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
  571. return false;
  572. BUILD_PTR_CAST(F64_PTR_TYPE);
  573. BUILD_STORE();
  574. return true;
  575. fail:
  576. return false;
  577. }
  578. static LLVMValueRef
  579. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  580. {
  581. LLVMValueRef mem_size;
  582. if (func_ctx->mem_space_unchanged) {
  583. mem_size = func_ctx->mem_info[0].mem_cur_page_count_addr;
  584. }
  585. else {
  586. if (!(mem_size = LLVMBuildLoad2(
  587. comp_ctx->builder, I32_TYPE,
  588. func_ctx->mem_info[0].mem_cur_page_count_addr, "mem_size"))) {
  589. aot_set_last_error("llvm build load failed.");
  590. goto fail;
  591. }
  592. }
  593. return mem_size;
  594. fail:
  595. return NULL;
  596. }
  597. bool
  598. aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  599. {
  600. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  601. if (mem_size)
  602. PUSH_I32(mem_size);
  603. return mem_size ? true : false;
  604. fail:
  605. return false;
  606. }
  607. bool
  608. aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  609. {
  610. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  611. LLVMValueRef delta, param_values[2], ret_value, func, value;
  612. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  613. int32 func_index;
  614. if (!mem_size)
  615. return false;
  616. POP_I32(delta);
  617. /* Function type of aot_enlarge_memory() */
  618. param_types[0] = INT8_PTR_TYPE;
  619. param_types[1] = I32_TYPE;
  620. ret_type = INT8_TYPE;
  621. if (!(func_type = LLVMFunctionType(ret_type, param_types, 2, false))) {
  622. aot_set_last_error("llvm add function type failed.");
  623. return false;
  624. }
  625. if (comp_ctx->is_jit_mode) {
  626. /* JIT mode, call the function directly */
  627. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  628. aot_set_last_error("llvm add pointer type failed.");
  629. return false;
  630. }
  631. if (!(value = I64_CONST((uint64)(uintptr_t)wasm_enlarge_memory))
  632. || !(func = LLVMConstIntToPtr(value, func_ptr_type))) {
  633. aot_set_last_error("create LLVM value failed.");
  634. return false;
  635. }
  636. }
  637. else if (comp_ctx->is_indirect_mode) {
  638. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  639. aot_set_last_error("create LLVM function type failed.");
  640. return false;
  641. }
  642. func_index =
  643. aot_get_native_symbol_index(comp_ctx, "aot_enlarge_memory");
  644. if (func_index < 0) {
  645. return false;
  646. }
  647. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  648. func_ptr_type, func_index))) {
  649. return false;
  650. }
  651. }
  652. else {
  653. char *func_name = "aot_enlarge_memory";
  654. /* AOT mode, delcare the function */
  655. if (!(func = LLVMGetNamedFunction(func_ctx->module, func_name))
  656. && !(func =
  657. LLVMAddFunction(func_ctx->module, func_name, func_type))) {
  658. aot_set_last_error("llvm add function failed.");
  659. return false;
  660. }
  661. }
  662. /* Call function aot_enlarge_memory() */
  663. param_values[0] = func_ctx->aot_inst;
  664. param_values[1] = delta;
  665. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  666. param_values, 2, "call"))) {
  667. aot_set_last_error("llvm build call failed.");
  668. return false;
  669. }
  670. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
  671. /* ret_value = ret_value == true ? delta : pre_page_count */
  672. if (!(ret_value = LLVMBuildSelect(comp_ctx->builder, ret_value, mem_size,
  673. I32_NEG_ONE, "mem_grow_ret"))) {
  674. aot_set_last_error("llvm build select failed.");
  675. return false;
  676. }
  677. PUSH_I32(ret_value);
  678. return true;
  679. fail:
  680. return false;
  681. }
  682. #if WASM_ENABLE_BULK_MEMORY != 0
  683. static LLVMValueRef
  684. check_bulk_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  685. LLVMValueRef offset, LLVMValueRef bytes)
  686. {
  687. LLVMValueRef maddr, max_addr, cmp;
  688. LLVMValueRef mem_base_addr;
  689. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  690. LLVMBasicBlockRef check_succ;
  691. LLVMValueRef mem_size;
  692. /* Get memory base address and memory data size */
  693. #if WASM_ENABLE_SHARED_MEMORY != 0
  694. bool is_shared_memory =
  695. comp_ctx->comp_data->memories[0].memory_flags & 0x02;
  696. if (func_ctx->mem_space_unchanged || is_shared_memory) {
  697. #else
  698. if (func_ctx->mem_space_unchanged) {
  699. #endif
  700. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  701. }
  702. else {
  703. if (!(mem_base_addr = LLVMBuildLoad2(
  704. comp_ctx->builder, OPQ_PTR_TYPE,
  705. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  706. aot_set_last_error("llvm build load failed.");
  707. goto fail;
  708. }
  709. }
  710. /*
  711. * Note: not throw the integer-overflow-exception here since it must
  712. * have been thrown when converting float to integer before
  713. */
  714. /* return addres directly if constant offset and inside memory space */
  715. if (!LLVMIsUndef(offset) && !LLVMIsUndef(bytes)
  716. #if LLVM_VERSION_NUMBER >= 12
  717. && !LLVMIsPoison(offset) && !LLVMIsPoison(bytes)
  718. #endif
  719. && LLVMIsConstant(offset) && LLVMIsConstant(bytes)) {
  720. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(offset);
  721. uint64 mem_len = (uint64)LLVMConstIntGetZExtValue(bytes);
  722. uint32 num_bytes_per_page =
  723. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  724. uint32 init_page_count =
  725. comp_ctx->comp_data->memories[0].mem_init_page_count;
  726. uint32 mem_data_size = num_bytes_per_page * init_page_count;
  727. if (mem_data_size > 0 && mem_offset + mem_len <= mem_data_size) {
  728. /* inside memory space */
  729. /* maddr = mem_base_addr + moffset */
  730. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  731. mem_base_addr, &offset, 1,
  732. "maddr"))) {
  733. aot_set_last_error("llvm build add failed.");
  734. goto fail;
  735. }
  736. return maddr;
  737. }
  738. }
  739. if (func_ctx->mem_space_unchanged) {
  740. mem_size = func_ctx->mem_info[0].mem_data_size_addr;
  741. }
  742. else {
  743. if (!(mem_size = LLVMBuildLoad2(
  744. comp_ctx->builder, I32_TYPE,
  745. func_ctx->mem_info[0].mem_data_size_addr, "mem_size"))) {
  746. aot_set_last_error("llvm build load failed.");
  747. goto fail;
  748. }
  749. }
  750. ADD_BASIC_BLOCK(check_succ, "check_succ");
  751. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  752. offset =
  753. LLVMBuildZExt(comp_ctx->builder, offset, I64_TYPE, "extend_offset");
  754. bytes = LLVMBuildZExt(comp_ctx->builder, bytes, I64_TYPE, "extend_len");
  755. mem_size =
  756. LLVMBuildZExt(comp_ctx->builder, mem_size, I64_TYPE, "extend_size");
  757. BUILD_OP(Add, offset, bytes, max_addr, "max_addr");
  758. BUILD_ICMP(LLVMIntUGT, max_addr, mem_size, cmp, "cmp_max_mem_addr");
  759. if (!aot_emit_exception(comp_ctx, func_ctx,
  760. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  761. check_succ)) {
  762. goto fail;
  763. }
  764. /* maddr = mem_base_addr + offset */
  765. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  766. mem_base_addr, &offset, 1, "maddr"))) {
  767. aot_set_last_error("llvm build add failed.");
  768. goto fail;
  769. }
  770. return maddr;
  771. fail:
  772. return NULL;
  773. }
  774. bool
  775. aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  776. uint32 seg_index)
  777. {
  778. LLVMValueRef seg, offset, dst, len, param_values[5], ret_value, func, value;
  779. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  780. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  781. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  782. LLVMBasicBlockRef mem_init_fail, init_success;
  783. seg = I32_CONST(seg_index);
  784. POP_I32(len);
  785. POP_I32(offset);
  786. POP_I32(dst);
  787. param_types[0] = INT8_PTR_TYPE;
  788. param_types[1] = I32_TYPE;
  789. param_types[2] = I32_TYPE;
  790. param_types[3] = I32_TYPE;
  791. param_types[4] = I32_TYPE;
  792. ret_type = INT8_TYPE;
  793. if (comp_ctx->is_jit_mode)
  794. GET_AOT_FUNCTION(llvm_jit_memory_init, 5);
  795. else
  796. GET_AOT_FUNCTION(aot_memory_init, 5);
  797. /* Call function aot_memory_init() */
  798. param_values[0] = func_ctx->aot_inst;
  799. param_values[1] = seg;
  800. param_values[2] = offset;
  801. param_values[3] = len;
  802. param_values[4] = dst;
  803. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  804. param_values, 5, "call"))) {
  805. aot_set_last_error("llvm build call failed.");
  806. return false;
  807. }
  808. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_init_ret");
  809. ADD_BASIC_BLOCK(mem_init_fail, "mem_init_fail");
  810. ADD_BASIC_BLOCK(init_success, "init_success");
  811. LLVMMoveBasicBlockAfter(mem_init_fail, block_curr);
  812. LLVMMoveBasicBlockAfter(init_success, block_curr);
  813. if (!LLVMBuildCondBr(comp_ctx->builder, ret_value, init_success,
  814. mem_init_fail)) {
  815. aot_set_last_error("llvm build cond br failed.");
  816. goto fail;
  817. }
  818. /* If memory.init failed, return this function
  819. so the runtime can catch the exception */
  820. LLVMPositionBuilderAtEnd(comp_ctx->builder, mem_init_fail);
  821. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  822. goto fail;
  823. }
  824. LLVMPositionBuilderAtEnd(comp_ctx->builder, init_success);
  825. return true;
  826. fail:
  827. return false;
  828. }
  829. bool
  830. aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  831. uint32 seg_index)
  832. {
  833. LLVMValueRef seg, param_values[2], ret_value, func, value;
  834. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  835. seg = I32_CONST(seg_index);
  836. CHECK_LLVM_CONST(seg);
  837. param_types[0] = INT8_PTR_TYPE;
  838. param_types[1] = I32_TYPE;
  839. ret_type = INT8_TYPE;
  840. if (comp_ctx->is_jit_mode)
  841. GET_AOT_FUNCTION(llvm_jit_data_drop, 2);
  842. else
  843. GET_AOT_FUNCTION(aot_data_drop, 2);
  844. /* Call function aot_data_drop() */
  845. param_values[0] = func_ctx->aot_inst;
  846. param_values[1] = seg;
  847. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  848. param_values, 2, "call"))) {
  849. aot_set_last_error("llvm build call failed.");
  850. return false;
  851. }
  852. return true;
  853. fail:
  854. return false;
  855. }
  856. bool
  857. aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  858. {
  859. LLVMValueRef src, dst, src_addr, dst_addr, len, res;
  860. bool call_aot_memmove = false;
  861. POP_I32(len);
  862. POP_I32(src);
  863. POP_I32(dst);
  864. if (!(src_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
  865. return false;
  866. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  867. return false;
  868. call_aot_memmove = comp_ctx->is_indirect_mode || comp_ctx->is_jit_mode;
  869. if (call_aot_memmove) {
  870. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  871. LLVMValueRef func, params[3];
  872. param_types[0] = INT8_PTR_TYPE;
  873. param_types[1] = INT8_PTR_TYPE;
  874. param_types[2] = I32_TYPE;
  875. ret_type = INT8_PTR_TYPE;
  876. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  877. aot_set_last_error("create LLVM function type failed.");
  878. return false;
  879. }
  880. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  881. aot_set_last_error("create LLVM function pointer type failed.");
  882. return false;
  883. }
  884. if (comp_ctx->is_jit_mode) {
  885. if (!(func = I64_CONST((uint64)(uintptr_t)aot_memmove))
  886. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  887. aot_set_last_error("create LLVM value failed.");
  888. return false;
  889. }
  890. }
  891. else {
  892. int32 func_index;
  893. func_index = aot_get_native_symbol_index(comp_ctx, "memmove");
  894. if (func_index < 0) {
  895. return false;
  896. }
  897. if (!(func =
  898. aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  899. func_ptr_type, func_index))) {
  900. return false;
  901. }
  902. }
  903. params[0] = dst_addr;
  904. params[1] = src_addr;
  905. params[2] = len;
  906. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params,
  907. 3, "call_memmove"))) {
  908. aot_set_last_error("llvm build memmove failed.");
  909. return false;
  910. }
  911. }
  912. else {
  913. if (!(res = LLVMBuildMemMove(comp_ctx->builder, dst_addr, 1, src_addr,
  914. 1, len))) {
  915. aot_set_last_error("llvm build memmove failed.");
  916. return false;
  917. }
  918. }
  919. return true;
  920. fail:
  921. return false;
  922. }
  923. static void *
  924. jit_memset(void *s, int c, size_t n)
  925. {
  926. return memset(s, c, n);
  927. }
  928. bool
  929. aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  930. {
  931. LLVMValueRef val, dst, dst_addr, len, res;
  932. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  933. LLVMValueRef func, params[3];
  934. POP_I32(len);
  935. POP_I32(val);
  936. POP_I32(dst);
  937. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  938. return false;
  939. param_types[0] = INT8_PTR_TYPE;
  940. param_types[1] = I32_TYPE;
  941. param_types[2] = I32_TYPE;
  942. ret_type = INT8_PTR_TYPE;
  943. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  944. aot_set_last_error("create LLVM function type failed.");
  945. return false;
  946. }
  947. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  948. aot_set_last_error("create LLVM function pointer type failed.");
  949. return false;
  950. }
  951. if (comp_ctx->is_jit_mode) {
  952. if (!(func = I64_CONST((uint64)(uintptr_t)jit_memset))
  953. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  954. aot_set_last_error("create LLVM value failed.");
  955. return false;
  956. }
  957. }
  958. else if (comp_ctx->is_indirect_mode) {
  959. int32 func_index;
  960. func_index = aot_get_native_symbol_index(comp_ctx, "memset");
  961. if (func_index < 0) {
  962. return false;
  963. }
  964. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  965. func_ptr_type, func_index))) {
  966. return false;
  967. }
  968. }
  969. else {
  970. if (!(func = LLVMGetNamedFunction(func_ctx->module, "memset"))
  971. && !(func =
  972. LLVMAddFunction(func_ctx->module, "memset", func_type))) {
  973. aot_set_last_error("llvm add function failed.");
  974. return false;
  975. }
  976. }
  977. params[0] = dst_addr;
  978. params[1] = val;
  979. params[2] = len;
  980. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params, 3,
  981. "call_memset"))) {
  982. aot_set_last_error("llvm build memset failed.");
  983. return false;
  984. }
  985. return true;
  986. fail:
  987. return false;
  988. }
  989. #endif /* end of WASM_ENABLE_BULK_MEMORY */
  990. #if WASM_ENABLE_SHARED_MEMORY != 0
  991. bool
  992. aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  993. uint8 atomic_op, uint8 op_type, uint32 align,
  994. uint32 offset, uint32 bytes)
  995. {
  996. LLVMValueRef maddr, value, result;
  997. if (op_type == VALUE_TYPE_I32)
  998. POP_I32(value);
  999. else
  1000. POP_I64(value);
  1001. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  1002. return false;
  1003. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1004. return false;
  1005. switch (bytes) {
  1006. case 8:
  1007. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1008. break;
  1009. case 4:
  1010. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1011. if (op_type == VALUE_TYPE_I64)
  1012. BUILD_TRUNC(value, I32_TYPE);
  1013. break;
  1014. case 2:
  1015. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1016. BUILD_TRUNC(value, INT16_TYPE);
  1017. break;
  1018. case 1:
  1019. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1020. BUILD_TRUNC(value, INT8_TYPE);
  1021. break;
  1022. default:
  1023. bh_assert(0);
  1024. break;
  1025. }
  1026. if (!(result = LLVMBuildAtomicRMW(
  1027. comp_ctx->builder, atomic_op, maddr, value,
  1028. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1029. goto fail;
  1030. }
  1031. LLVMSetVolatile(result, true);
  1032. if (op_type == VALUE_TYPE_I32) {
  1033. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1034. "result_i32"))) {
  1035. goto fail;
  1036. }
  1037. PUSH_I32(result);
  1038. }
  1039. else {
  1040. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1041. "result_i64"))) {
  1042. goto fail;
  1043. }
  1044. PUSH_I64(result);
  1045. }
  1046. return true;
  1047. fail:
  1048. return false;
  1049. }
  1050. bool
  1051. aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
  1052. AOTFuncContext *func_ctx, uint8 op_type,
  1053. uint32 align, uint32 offset, uint32 bytes)
  1054. {
  1055. LLVMValueRef maddr, value, expect, result;
  1056. if (op_type == VALUE_TYPE_I32) {
  1057. POP_I32(value);
  1058. POP_I32(expect);
  1059. }
  1060. else {
  1061. POP_I64(value);
  1062. POP_I64(expect);
  1063. }
  1064. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  1065. return false;
  1066. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1067. return false;
  1068. switch (bytes) {
  1069. case 8:
  1070. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1071. break;
  1072. case 4:
  1073. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1074. if (op_type == VALUE_TYPE_I64) {
  1075. BUILD_TRUNC(value, I32_TYPE);
  1076. BUILD_TRUNC(expect, I32_TYPE);
  1077. }
  1078. break;
  1079. case 2:
  1080. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1081. BUILD_TRUNC(value, INT16_TYPE);
  1082. BUILD_TRUNC(expect, INT16_TYPE);
  1083. break;
  1084. case 1:
  1085. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1086. BUILD_TRUNC(value, INT8_TYPE);
  1087. BUILD_TRUNC(expect, INT8_TYPE);
  1088. break;
  1089. default:
  1090. bh_assert(0);
  1091. break;
  1092. }
  1093. if (!(result = LLVMBuildAtomicCmpXchg(
  1094. comp_ctx->builder, maddr, expect, value,
  1095. LLVMAtomicOrderingSequentiallyConsistent,
  1096. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1097. goto fail;
  1098. }
  1099. LLVMSetVolatile(result, true);
  1100. /* CmpXchg return {i32, i1} structure,
  1101. we need to extrack the previous_value from the structure */
  1102. if (!(result = LLVMBuildExtractValue(comp_ctx->builder, result, 0,
  1103. "previous_value"))) {
  1104. goto fail;
  1105. }
  1106. if (op_type == VALUE_TYPE_I32) {
  1107. if (LLVMTypeOf(result) != I32_TYPE) {
  1108. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1109. "result_i32"))) {
  1110. goto fail;
  1111. }
  1112. }
  1113. PUSH_I32(result);
  1114. }
  1115. else {
  1116. if (LLVMTypeOf(result) != I64_TYPE) {
  1117. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1118. "result_i64"))) {
  1119. goto fail;
  1120. }
  1121. }
  1122. PUSH_I64(result);
  1123. }
  1124. return true;
  1125. fail:
  1126. return false;
  1127. }
  1128. bool
  1129. aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1130. uint8 op_type, uint32 align, uint32 offset,
  1131. uint32 bytes)
  1132. {
  1133. LLVMValueRef maddr, value, timeout, expect, cmp;
  1134. LLVMValueRef param_values[5], ret_value, func, is_wait64;
  1135. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  1136. LLVMBasicBlockRef wait_fail, wait_success;
  1137. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1138. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  1139. POP_I64(timeout);
  1140. if (op_type == VALUE_TYPE_I32) {
  1141. POP_I32(expect);
  1142. is_wait64 = I8_CONST(false);
  1143. if (!(expect = LLVMBuildZExt(comp_ctx->builder, expect, I64_TYPE,
  1144. "expect_i64"))) {
  1145. goto fail;
  1146. }
  1147. }
  1148. else {
  1149. POP_I64(expect);
  1150. is_wait64 = I8_CONST(true);
  1151. }
  1152. CHECK_LLVM_CONST(is_wait64);
  1153. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  1154. return false;
  1155. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1156. return false;
  1157. param_types[0] = INT8_PTR_TYPE;
  1158. param_types[1] = INT8_PTR_TYPE;
  1159. param_types[2] = I64_TYPE;
  1160. param_types[3] = I64_TYPE;
  1161. param_types[4] = INT8_TYPE;
  1162. ret_type = I32_TYPE;
  1163. GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
  1164. /* Call function wasm_runtime_atomic_wait() */
  1165. param_values[0] = func_ctx->aot_inst;
  1166. param_values[1] = maddr;
  1167. param_values[2] = expect;
  1168. param_values[3] = timeout;
  1169. param_values[4] = is_wait64;
  1170. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1171. param_values, 5, "call"))) {
  1172. aot_set_last_error("llvm build call failed.");
  1173. return false;
  1174. }
  1175. BUILD_ICMP(LLVMIntSGT, ret_value, I32_ZERO, cmp, "atomic_wait_ret");
  1176. ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
  1177. ADD_BASIC_BLOCK(wait_success, "wait_success");
  1178. LLVMMoveBasicBlockAfter(wait_fail, block_curr);
  1179. LLVMMoveBasicBlockAfter(wait_success, block_curr);
  1180. if (!LLVMBuildCondBr(comp_ctx->builder, cmp, wait_success, wait_fail)) {
  1181. aot_set_last_error("llvm build cond br failed.");
  1182. goto fail;
  1183. }
  1184. /* If atomic wait failed, return this function
  1185. so the runtime can catch the exception */
  1186. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
  1187. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  1188. goto fail;
  1189. }
  1190. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
  1191. PUSH_I32(ret_value);
  1192. return true;
  1193. fail:
  1194. return false;
  1195. }
  1196. bool
  1197. aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
  1198. AOTFuncContext *func_ctx, uint32 align,
  1199. uint32 offset, uint32 bytes)
  1200. {
  1201. LLVMValueRef maddr, value, count;
  1202. LLVMValueRef param_values[3], ret_value, func;
  1203. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1204. POP_I32(count);
  1205. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  1206. return false;
  1207. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1208. return false;
  1209. param_types[0] = INT8_PTR_TYPE;
  1210. param_types[1] = INT8_PTR_TYPE;
  1211. param_types[2] = I32_TYPE;
  1212. ret_type = I32_TYPE;
  1213. GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
  1214. /* Call function wasm_runtime_atomic_notify() */
  1215. param_values[0] = func_ctx->aot_inst;
  1216. param_values[1] = maddr;
  1217. param_values[2] = count;
  1218. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1219. param_values, 3, "call"))) {
  1220. aot_set_last_error("llvm build call failed.");
  1221. return false;
  1222. }
  1223. PUSH_I32(ret_value);
  1224. return true;
  1225. fail:
  1226. return false;
  1227. }
  1228. #endif /* end of WASM_ENABLE_SHARED_MEMORY */