aot_emit_memory.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_memory.h"
  6. #include "aot_compiler.h"
  7. #include "aot_emit_exception.h"
  8. #include "../aot/aot_runtime.h"
  9. #include "aot_intrinsic.h"
  10. #include "aot_emit_control.h"
  11. #define BUILD_ICMP(op, left, right, res, name) \
  12. do { \
  13. if (!(res = \
  14. LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
  15. aot_set_last_error("llvm build icmp failed."); \
  16. goto fail; \
  17. } \
  18. } while (0)
  19. #define BUILD_OP(Op, left, right, res, name) \
  20. do { \
  21. if (!(res = LLVMBuild##Op(comp_ctx->builder, left, right, name))) { \
  22. aot_set_last_error("llvm build " #Op " fail."); \
  23. goto fail; \
  24. } \
  25. } while (0)
  26. #define ADD_BASIC_BLOCK(block, name) \
  27. do { \
  28. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  29. func_ctx->func, name))) { \
  30. aot_set_last_error("llvm add basic block failed."); \
  31. goto fail; \
  32. } \
  33. } while (0)
  34. #define SET_BUILD_POS(block) LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
  35. static bool
  36. zero_extend_u64(AOTCompContext *comp_ctx, LLVMValueRef *value, const char *name)
  37. {
  38. if (comp_ctx->pointer_size == sizeof(uint64)) {
  39. /* zero extend to uint64 if the target is 64-bit */
  40. *value = LLVMBuildZExt(comp_ctx->builder, *value, I64_TYPE, name);
  41. if (!*value) {
  42. aot_set_last_error("llvm build zero extend failed.");
  43. return false;
  44. }
  45. }
  46. return true;
  47. }
  48. static LLVMValueRef
  49. get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  50. uint32 bytes)
  51. {
  52. LLVMValueRef mem_check_bound = NULL;
  53. switch (bytes) {
  54. case 1:
  55. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_1byte;
  56. break;
  57. case 2:
  58. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_2bytes;
  59. break;
  60. case 4:
  61. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_4bytes;
  62. break;
  63. case 8:
  64. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_8bytes;
  65. break;
  66. case 16:
  67. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_16bytes;
  68. break;
  69. default:
  70. bh_assert(0);
  71. return NULL;
  72. }
  73. if (func_ctx->mem_space_unchanged)
  74. return mem_check_bound;
  75. if (!(mem_check_bound = LLVMBuildLoad2(
  76. comp_ctx->builder,
  77. (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE : I32_TYPE,
  78. mem_check_bound, "mem_check_bound"))) {
  79. aot_set_last_error("llvm build load failed.");
  80. return NULL;
  81. }
  82. return mem_check_bound;
  83. }
  84. static LLVMValueRef
  85. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
  86. LLVMValueRef
  87. aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  88. mem_offset_t offset, uint32 bytes, bool enable_segue,
  89. unsigned int *alignp)
  90. {
  91. LLVMValueRef offset_const =
  92. MEMORY64_COND_VALUE(I64_CONST(offset), I32_CONST(offset));
  93. LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
  94. LLVMValueRef mem_base_addr, mem_check_bound;
  95. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  96. LLVMBasicBlockRef check_succ;
  97. AOTValue *aot_value_top;
  98. uint32 local_idx_of_aot_value = 0;
  99. uint64 const_value;
  100. bool is_target_64bit, is_local_of_aot_value = false;
  101. bool is_const = false;
  102. #if WASM_ENABLE_SHARED_MEMORY != 0
  103. bool is_shared_memory =
  104. comp_ctx->comp_data->memories[0].flags & SHARED_MEMORY_FLAG;
  105. #endif
  106. is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false;
  107. if (comp_ctx->is_indirect_mode
  108. && aot_intrinsic_check_capability(
  109. comp_ctx, MEMORY64_COND_VALUE("i64.const", "i32.const"))) {
  110. WASMValue wasm_value;
  111. #if WASM_ENABLE_MEMORY64 != 0
  112. if (IS_MEMORY64) {
  113. wasm_value.i64 = offset;
  114. }
  115. else
  116. #endif
  117. {
  118. wasm_value.i32 = (int32)offset;
  119. }
  120. offset_const = aot_load_const_from_table(
  121. comp_ctx, func_ctx->native_symbol, &wasm_value,
  122. MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32));
  123. if (!offset_const) {
  124. return NULL;
  125. }
  126. }
  127. else {
  128. CHECK_LLVM_CONST(offset_const);
  129. }
  130. /* Get memory base address and memory data size */
  131. if (func_ctx->mem_space_unchanged
  132. #if WASM_ENABLE_SHARED_MEMORY != 0
  133. || is_shared_memory
  134. #endif
  135. ) {
  136. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  137. }
  138. else {
  139. if (!(mem_base_addr = LLVMBuildLoad2(
  140. comp_ctx->builder, OPQ_PTR_TYPE,
  141. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  142. aot_set_last_error("llvm build load failed.");
  143. goto fail;
  144. }
  145. }
  146. aot_value_top =
  147. func_ctx->block_stack.block_list_end->value_stack.value_list_end;
  148. if (aot_value_top) {
  149. /* aot_value_top is freed in the following POP_I32(addr),
  150. so save its fields here for further use */
  151. is_local_of_aot_value = aot_value_top->is_local;
  152. is_const = aot_value_top->is_const;
  153. local_idx_of_aot_value = aot_value_top->local_idx;
  154. const_value = aot_value_top->const_value;
  155. }
  156. POP_MEM_OFFSET(addr);
  157. /*
  158. * Note: not throw the integer-overflow-exception here since it must
  159. * have been thrown when converting float to integer before
  160. */
  161. /* return address directly if constant offset and inside memory space */
  162. if (LLVMIsEfficientConstInt(addr) || is_const) {
  163. uint64 value;
  164. if (LLVMIsEfficientConstInt(addr)) {
  165. value = (uint64)LLVMConstIntGetZExtValue(addr);
  166. }
  167. else {
  168. value = const_value;
  169. }
  170. uint64 mem_offset = value + (uint64)offset;
  171. uint32 num_bytes_per_page =
  172. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  173. uint32 init_page_count =
  174. comp_ctx->comp_data->memories[0].init_page_count;
  175. uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
  176. if (alignp != NULL) {
  177. /*
  178. * A note about max_align below:
  179. * the assumption here is the base address of a linear memory
  180. * has the natural alignment. for platforms using mmap, it can
  181. * be even larger. for now, use a conservative value.
  182. */
  183. const int max_align = 8;
  184. int shift = ffs((int)(unsigned int)mem_offset);
  185. if (shift == 0) {
  186. *alignp = max_align;
  187. }
  188. else {
  189. unsigned int align = 1 << (shift - 1);
  190. if (align > max_align) {
  191. align = max_align;
  192. }
  193. *alignp = align;
  194. }
  195. }
  196. if (mem_offset + bytes <= mem_data_size) {
  197. /* inside memory space */
  198. if (comp_ctx->pointer_size == sizeof(uint64))
  199. offset1 = I64_CONST(mem_offset);
  200. else
  201. offset1 = I32_CONST((uint32)mem_offset);
  202. CHECK_LLVM_CONST(offset1);
  203. if (!enable_segue) {
  204. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder,
  205. INT8_TYPE, mem_base_addr,
  206. &offset1, 1, "maddr"))) {
  207. aot_set_last_error("llvm build add failed.");
  208. goto fail;
  209. }
  210. }
  211. else {
  212. if (!(maddr = LLVMBuildIntToPtr(comp_ctx->builder, offset1,
  213. INT8_PTR_TYPE_GS, "maddr"))) {
  214. aot_set_last_error("llvm build IntToPtr failed.");
  215. goto fail;
  216. }
  217. }
  218. return maddr;
  219. }
  220. }
  221. else if (alignp != NULL) {
  222. *alignp = 1;
  223. }
  224. if (is_target_64bit) {
  225. if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
  226. I64_TYPE, "offset_i64"))
  227. || !(addr = LLVMBuildZExt(comp_ctx->builder, addr, I64_TYPE,
  228. "addr_i64"))) {
  229. aot_set_last_error("llvm build zero extend failed.");
  230. goto fail;
  231. }
  232. }
  233. /* offset1 = offset + addr; */
  234. BUILD_OP(Add, offset_const, addr, offset1, "offset1");
  235. if (comp_ctx->enable_bound_check
  236. && !(is_local_of_aot_value
  237. && aot_checked_addr_list_find(func_ctx, local_idx_of_aot_value,
  238. offset, bytes))) {
  239. uint32 init_page_count =
  240. comp_ctx->comp_data->memories[0].init_page_count;
  241. if (init_page_count == 0) {
  242. LLVMValueRef mem_size;
  243. if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) {
  244. goto fail;
  245. }
  246. BUILD_ICMP(LLVMIntEQ, mem_size,
  247. MEMORY64_COND_VALUE(I64_ZERO, I32_ZERO), cmp, "is_zero");
  248. ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
  249. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  250. if (!aot_emit_exception(comp_ctx, func_ctx,
  251. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  252. check_succ)) {
  253. goto fail;
  254. }
  255. SET_BUILD_POS(check_succ);
  256. block_curr = check_succ;
  257. }
  258. if (!(mem_check_bound =
  259. get_memory_check_bound(comp_ctx, func_ctx, bytes))) {
  260. goto fail;
  261. }
  262. if (is_target_64bit) {
  263. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp, "cmp");
  264. }
  265. else {
  266. /* Check integer overflow */
  267. BUILD_ICMP(LLVMIntULT, offset1, addr, cmp1, "cmp1");
  268. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp2, "cmp2");
  269. BUILD_OP(Or, cmp1, cmp2, cmp, "cmp");
  270. }
  271. /* Add basic blocks */
  272. ADD_BASIC_BLOCK(check_succ, "check_succ");
  273. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  274. if (!aot_emit_exception(comp_ctx, func_ctx,
  275. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  276. check_succ)) {
  277. goto fail;
  278. }
  279. SET_BUILD_POS(check_succ);
  280. if (is_local_of_aot_value) {
  281. if (!aot_checked_addr_list_add(func_ctx, local_idx_of_aot_value,
  282. offset, bytes))
  283. goto fail;
  284. }
  285. }
  286. if (!enable_segue) {
  287. /* maddr = mem_base_addr + offset1 */
  288. if (!(maddr =
  289. LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  290. mem_base_addr, &offset1, 1, "maddr"))) {
  291. aot_set_last_error("llvm build add failed.");
  292. goto fail;
  293. }
  294. }
  295. else {
  296. LLVMValueRef maddr_base;
  297. if (!(maddr_base = LLVMBuildIntToPtr(comp_ctx->builder, addr,
  298. INT8_PTR_TYPE_GS, "maddr_base"))) {
  299. aot_set_last_error("llvm build int to ptr failed.");
  300. goto fail;
  301. }
  302. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  303. maddr_base, &offset_const, 1,
  304. "maddr"))) {
  305. aot_set_last_error("llvm build inboundgep failed.");
  306. goto fail;
  307. }
  308. }
  309. return maddr;
  310. fail:
  311. return NULL;
  312. }
  313. #define BUILD_PTR_CAST(ptr_type) \
  314. do { \
  315. if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, ptr_type, \
  316. "data_ptr"))) { \
  317. aot_set_last_error("llvm build bit cast failed."); \
  318. goto fail; \
  319. } \
  320. } while (0)
  321. #define BUILD_LOAD(data_type) \
  322. do { \
  323. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  324. "data"))) { \
  325. aot_set_last_error("llvm build load failed."); \
  326. goto fail; \
  327. } \
  328. LLVMSetAlignment(value, known_align); \
  329. } while (0)
  330. #define BUILD_TRUNC(value, data_type) \
  331. do { \
  332. if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, data_type, \
  333. "val_trunc"))) { \
  334. aot_set_last_error("llvm build trunc failed."); \
  335. goto fail; \
  336. } \
  337. } while (0)
  338. #define BUILD_STORE() \
  339. do { \
  340. LLVMValueRef res; \
  341. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  342. aot_set_last_error("llvm build store failed."); \
  343. goto fail; \
  344. } \
  345. LLVMSetAlignment(res, known_align); \
  346. } while (0)
  347. #define BUILD_SIGN_EXT(dst_type) \
  348. do { \
  349. if (!(value = LLVMBuildSExt(comp_ctx->builder, value, dst_type, \
  350. "data_s_ext"))) { \
  351. aot_set_last_error("llvm build sign ext failed."); \
  352. goto fail; \
  353. } \
  354. } while (0)
  355. #define BUILD_ZERO_EXT(dst_type) \
  356. do { \
  357. if (!(value = LLVMBuildZExt(comp_ctx->builder, value, dst_type, \
  358. "data_z_ext"))) { \
  359. aot_set_last_error("llvm build zero ext failed."); \
  360. goto fail; \
  361. } \
  362. } while (0)
  363. #if WASM_ENABLE_SHARED_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0
  364. bool
  365. check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  366. LLVMValueRef addr, uint32 align)
  367. {
  368. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  369. LLVMBasicBlockRef check_align_succ;
  370. LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
  371. LLVMValueRef res;
  372. CHECK_LLVM_CONST(align_mask);
  373. /* Convert pointer to int */
  374. if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr, I32_TYPE,
  375. "address"))) {
  376. aot_set_last_error("llvm build ptr to int failed.");
  377. goto fail;
  378. }
  379. /* The memory address should be aligned */
  380. BUILD_OP(And, addr, align_mask, res, "and");
  381. BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
  382. /* Add basic blocks */
  383. ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
  384. LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
  385. if (!aot_emit_exception(comp_ctx, func_ctx, EXCE_UNALIGNED_ATOMIC, true,
  386. res, check_align_succ)) {
  387. goto fail;
  388. }
  389. SET_BUILD_POS(check_align_succ);
  390. return true;
  391. fail:
  392. return false;
  393. }
  394. #endif /* WASM_ENABLE_SHARED_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0 */
  395. #if WASM_ENABLE_SHARED_MEMORY != 0
  396. #define BUILD_ATOMIC_LOAD(align, data_type) \
  397. do { \
  398. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  399. goto fail; \
  400. } \
  401. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  402. "data"))) { \
  403. aot_set_last_error("llvm build load failed."); \
  404. goto fail; \
  405. } \
  406. LLVMSetAlignment(value, 1 << align); \
  407. LLVMSetVolatile(value, true); \
  408. LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
  409. } while (0)
  410. #define BUILD_ATOMIC_STORE(align) \
  411. do { \
  412. LLVMValueRef res; \
  413. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  414. goto fail; \
  415. } \
  416. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  417. aot_set_last_error("llvm build store failed."); \
  418. goto fail; \
  419. } \
  420. LLVMSetAlignment(res, 1 << align); \
  421. LLVMSetVolatile(res, true); \
  422. LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
  423. } while (0)
  424. #endif
  425. bool
  426. aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  427. uint32 align, mem_offset_t offset, uint32 bytes,
  428. bool sign, bool atomic)
  429. {
  430. LLVMValueRef maddr, value = NULL;
  431. LLVMTypeRef data_type;
  432. bool enable_segue = comp_ctx->enable_segue_i32_load;
  433. unsigned int known_align;
  434. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  435. enable_segue, &known_align)))
  436. return false;
  437. switch (bytes) {
  438. case 4:
  439. if (!enable_segue)
  440. BUILD_PTR_CAST(INT32_PTR_TYPE);
  441. else
  442. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  443. #if WASM_ENABLE_SHARED_MEMORY != 0
  444. if (atomic)
  445. BUILD_ATOMIC_LOAD(align, I32_TYPE);
  446. else
  447. #endif
  448. BUILD_LOAD(I32_TYPE);
  449. break;
  450. case 2:
  451. case 1:
  452. if (bytes == 2) {
  453. if (!enable_segue)
  454. BUILD_PTR_CAST(INT16_PTR_TYPE);
  455. else
  456. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  457. data_type = INT16_TYPE;
  458. }
  459. else {
  460. if (!enable_segue)
  461. BUILD_PTR_CAST(INT8_PTR_TYPE);
  462. else
  463. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  464. data_type = INT8_TYPE;
  465. }
  466. #if WASM_ENABLE_SHARED_MEMORY != 0
  467. if (atomic) {
  468. BUILD_ATOMIC_LOAD(align, data_type);
  469. BUILD_ZERO_EXT(I32_TYPE);
  470. }
  471. else
  472. #endif
  473. {
  474. BUILD_LOAD(data_type);
  475. if (sign)
  476. BUILD_SIGN_EXT(I32_TYPE);
  477. else
  478. BUILD_ZERO_EXT(I32_TYPE);
  479. }
  480. break;
  481. default:
  482. bh_assert(0);
  483. break;
  484. }
  485. PUSH_I32(value);
  486. (void)data_type;
  487. return true;
  488. fail:
  489. return false;
  490. }
  491. bool
  492. aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  493. uint32 align, mem_offset_t offset, uint32 bytes,
  494. bool sign, bool atomic)
  495. {
  496. LLVMValueRef maddr, value = NULL;
  497. LLVMTypeRef data_type;
  498. bool enable_segue = comp_ctx->enable_segue_i64_load;
  499. unsigned int known_align;
  500. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  501. enable_segue, &known_align)))
  502. return false;
  503. switch (bytes) {
  504. case 8:
  505. if (!enable_segue)
  506. BUILD_PTR_CAST(INT64_PTR_TYPE);
  507. else
  508. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  509. #if WASM_ENABLE_SHARED_MEMORY != 0
  510. if (atomic)
  511. BUILD_ATOMIC_LOAD(align, I64_TYPE);
  512. else
  513. #endif
  514. BUILD_LOAD(I64_TYPE);
  515. break;
  516. case 4:
  517. case 2:
  518. case 1:
  519. if (bytes == 4) {
  520. if (!enable_segue)
  521. BUILD_PTR_CAST(INT32_PTR_TYPE);
  522. else
  523. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  524. data_type = I32_TYPE;
  525. }
  526. else if (bytes == 2) {
  527. if (!enable_segue)
  528. BUILD_PTR_CAST(INT16_PTR_TYPE);
  529. else
  530. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  531. data_type = INT16_TYPE;
  532. }
  533. else {
  534. if (!enable_segue)
  535. BUILD_PTR_CAST(INT8_PTR_TYPE);
  536. else
  537. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  538. data_type = INT8_TYPE;
  539. }
  540. #if WASM_ENABLE_SHARED_MEMORY != 0
  541. if (atomic) {
  542. BUILD_ATOMIC_LOAD(align, data_type);
  543. BUILD_ZERO_EXT(I64_TYPE);
  544. }
  545. else
  546. #endif
  547. {
  548. BUILD_LOAD(data_type);
  549. if (sign)
  550. BUILD_SIGN_EXT(I64_TYPE);
  551. else
  552. BUILD_ZERO_EXT(I64_TYPE);
  553. }
  554. break;
  555. default:
  556. bh_assert(0);
  557. break;
  558. }
  559. PUSH_I64(value);
  560. (void)data_type;
  561. return true;
  562. fail:
  563. return false;
  564. }
  565. bool
  566. aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  567. uint32 align, mem_offset_t offset)
  568. {
  569. LLVMValueRef maddr, value;
  570. bool enable_segue = comp_ctx->enable_segue_f32_load;
  571. unsigned int known_align;
  572. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
  573. enable_segue, &known_align)))
  574. return false;
  575. if (!enable_segue)
  576. BUILD_PTR_CAST(F32_PTR_TYPE);
  577. else
  578. BUILD_PTR_CAST(F32_PTR_TYPE_GS);
  579. BUILD_LOAD(F32_TYPE);
  580. PUSH_F32(value);
  581. return true;
  582. fail:
  583. return false;
  584. }
  585. bool
  586. aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  587. uint32 align, mem_offset_t offset)
  588. {
  589. LLVMValueRef maddr, value;
  590. bool enable_segue = comp_ctx->enable_segue_f64_load;
  591. unsigned int known_align;
  592. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
  593. enable_segue, &known_align)))
  594. return false;
  595. if (!enable_segue)
  596. BUILD_PTR_CAST(F64_PTR_TYPE);
  597. else
  598. BUILD_PTR_CAST(F64_PTR_TYPE_GS);
  599. BUILD_LOAD(F64_TYPE);
  600. PUSH_F64(value);
  601. return true;
  602. fail:
  603. return false;
  604. }
  605. bool
  606. aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  607. uint32 align, mem_offset_t offset, uint32 bytes,
  608. bool atomic)
  609. {
  610. LLVMValueRef maddr, value;
  611. bool enable_segue = comp_ctx->enable_segue_i32_store;
  612. POP_I32(value);
  613. unsigned int known_align;
  614. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  615. enable_segue, &known_align)))
  616. return false;
  617. switch (bytes) {
  618. case 4:
  619. if (!enable_segue)
  620. BUILD_PTR_CAST(INT32_PTR_TYPE);
  621. else
  622. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  623. break;
  624. case 2:
  625. if (!enable_segue)
  626. BUILD_PTR_CAST(INT16_PTR_TYPE);
  627. else
  628. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  629. BUILD_TRUNC(value, INT16_TYPE);
  630. break;
  631. case 1:
  632. if (!enable_segue)
  633. BUILD_PTR_CAST(INT8_PTR_TYPE);
  634. else
  635. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  636. BUILD_TRUNC(value, INT8_TYPE);
  637. break;
  638. default:
  639. bh_assert(0);
  640. break;
  641. }
  642. #if WASM_ENABLE_SHARED_MEMORY != 0
  643. if (atomic)
  644. BUILD_ATOMIC_STORE(align);
  645. else
  646. #endif
  647. BUILD_STORE();
  648. return true;
  649. fail:
  650. return false;
  651. }
  652. bool
  653. aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  654. uint32 align, mem_offset_t offset, uint32 bytes,
  655. bool atomic)
  656. {
  657. LLVMValueRef maddr, value;
  658. bool enable_segue = comp_ctx->enable_segue_i64_store;
  659. POP_I64(value);
  660. unsigned int known_align;
  661. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  662. enable_segue, &known_align)))
  663. return false;
  664. switch (bytes) {
  665. case 8:
  666. if (!enable_segue)
  667. BUILD_PTR_CAST(INT64_PTR_TYPE);
  668. else
  669. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  670. break;
  671. case 4:
  672. if (!enable_segue)
  673. BUILD_PTR_CAST(INT32_PTR_TYPE);
  674. else
  675. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  676. BUILD_TRUNC(value, I32_TYPE);
  677. break;
  678. case 2:
  679. if (!enable_segue)
  680. BUILD_PTR_CAST(INT16_PTR_TYPE);
  681. else
  682. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  683. BUILD_TRUNC(value, INT16_TYPE);
  684. break;
  685. case 1:
  686. if (!enable_segue)
  687. BUILD_PTR_CAST(INT8_PTR_TYPE);
  688. else
  689. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  690. BUILD_TRUNC(value, INT8_TYPE);
  691. break;
  692. default:
  693. bh_assert(0);
  694. break;
  695. }
  696. #if WASM_ENABLE_SHARED_MEMORY != 0
  697. if (atomic)
  698. BUILD_ATOMIC_STORE(align);
  699. else
  700. #endif
  701. BUILD_STORE();
  702. return true;
  703. fail:
  704. return false;
  705. }
  706. bool
  707. aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  708. uint32 align, mem_offset_t offset)
  709. {
  710. LLVMValueRef maddr, value;
  711. bool enable_segue = comp_ctx->enable_segue_f32_store;
  712. POP_F32(value);
  713. unsigned int known_align;
  714. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
  715. enable_segue, &known_align)))
  716. return false;
  717. if (!enable_segue)
  718. BUILD_PTR_CAST(F32_PTR_TYPE);
  719. else
  720. BUILD_PTR_CAST(F32_PTR_TYPE_GS);
  721. BUILD_STORE();
  722. return true;
  723. fail:
  724. return false;
  725. }
  726. bool
  727. aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  728. uint32 align, mem_offset_t offset)
  729. {
  730. LLVMValueRef maddr, value;
  731. bool enable_segue = comp_ctx->enable_segue_f64_store;
  732. POP_F64(value);
  733. unsigned int known_align;
  734. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
  735. enable_segue, &known_align)))
  736. return false;
  737. if (!enable_segue)
  738. BUILD_PTR_CAST(F64_PTR_TYPE);
  739. else
  740. BUILD_PTR_CAST(F64_PTR_TYPE_GS);
  741. BUILD_STORE();
  742. return true;
  743. fail:
  744. return false;
  745. }
  746. static LLVMValueRef
  747. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  748. {
  749. LLVMValueRef mem_size;
  750. if (func_ctx->mem_space_unchanged) {
  751. mem_size = func_ctx->mem_info[0].mem_cur_page_count_addr;
  752. }
  753. else {
  754. if (!(mem_size = LLVMBuildLoad2(
  755. comp_ctx->builder, I32_TYPE,
  756. func_ctx->mem_info[0].mem_cur_page_count_addr, "mem_size"))) {
  757. aot_set_last_error("llvm build load failed.");
  758. goto fail;
  759. }
  760. }
  761. return LLVMBuildIntCast(comp_ctx->builder, mem_size,
  762. MEMORY64_COND_VALUE(I64_TYPE, I32_TYPE), "");
  763. fail:
  764. return NULL;
  765. }
  766. bool
  767. aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  768. {
  769. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  770. if (mem_size)
  771. PUSH_PAGE_COUNT(mem_size);
  772. return mem_size ? true : false;
  773. fail:
  774. return false;
  775. }
  776. bool
  777. aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  778. {
  779. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  780. LLVMValueRef delta, param_values[2], ret_value, func, value;
  781. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  782. int32 func_index;
  783. #if WASM_ENABLE_MEMORY64 != 0
  784. LLVMValueRef u32_max, u32_cmp_result;
  785. #endif
  786. if (!mem_size)
  787. return false;
  788. POP_PAGE_COUNT(delta);
  789. /* Function type of aot_enlarge_memory() */
  790. param_types[0] = INT8_PTR_TYPE;
  791. param_types[1] = I32_TYPE;
  792. ret_type = INT8_TYPE;
  793. if (!(func_type = LLVMFunctionType(ret_type, param_types, 2, false))) {
  794. aot_set_last_error("llvm add function type failed.");
  795. return false;
  796. }
  797. if (comp_ctx->is_jit_mode) {
  798. /* JIT mode, call the function directly */
  799. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  800. aot_set_last_error("llvm add pointer type failed.");
  801. return false;
  802. }
  803. if (!(value = I64_CONST((uint64)(uintptr_t)wasm_enlarge_memory))
  804. || !(func = LLVMConstIntToPtr(value, func_ptr_type))) {
  805. aot_set_last_error("create LLVM value failed.");
  806. return false;
  807. }
  808. }
  809. else if (comp_ctx->is_indirect_mode) {
  810. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  811. aot_set_last_error("create LLVM function type failed.");
  812. return false;
  813. }
  814. func_index =
  815. aot_get_native_symbol_index(comp_ctx, "aot_enlarge_memory");
  816. if (func_index < 0) {
  817. return false;
  818. }
  819. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  820. func_ptr_type, func_index))) {
  821. return false;
  822. }
  823. }
  824. else {
  825. char *func_name = "aot_enlarge_memory";
  826. /* AOT mode, delcare the function */
  827. if (!(func = LLVMGetNamedFunction(func_ctx->module, func_name))
  828. && !(func =
  829. LLVMAddFunction(func_ctx->module, func_name, func_type))) {
  830. aot_set_last_error("llvm add function failed.");
  831. return false;
  832. }
  833. }
  834. /* Call function aot_enlarge_memory() */
  835. param_values[0] = func_ctx->aot_inst;
  836. param_values[1] = LLVMBuildTrunc(comp_ctx->builder, delta, I32_TYPE, "");
  837. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  838. param_values, 2, "call"))) {
  839. aot_set_last_error("llvm build call failed.");
  840. return false;
  841. }
  842. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
  843. #if WASM_ENABLE_MEMORY64 != 0
  844. if (IS_MEMORY64) {
  845. if (!(u32_max = I64_CONST(UINT32_MAX))) {
  846. aot_set_last_error("llvm build const failed");
  847. return false;
  848. }
  849. BUILD_ICMP(LLVMIntULE, delta, u32_max, u32_cmp_result, "page_size_cmp");
  850. BUILD_OP(And, ret_value, u32_cmp_result, ret_value, "and");
  851. }
  852. #endif
  853. /* ret_value = ret_value == true ? pre_page_count : -1 */
  854. if (!(ret_value = LLVMBuildSelect(
  855. comp_ctx->builder, ret_value, mem_size,
  856. MEMORY64_COND_VALUE(I64_NEG_ONE, I32_NEG_ONE), "mem_grow_ret"))) {
  857. aot_set_last_error("llvm build select failed.");
  858. return false;
  859. }
  860. PUSH_PAGE_COUNT(ret_value);
  861. return true;
  862. fail:
  863. return false;
  864. }
  865. #if WASM_ENABLE_BULK_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0
  866. LLVMValueRef
  867. check_bulk_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  868. LLVMValueRef offset, LLVMValueRef bytes)
  869. {
  870. LLVMValueRef maddr, max_addr, cmp;
  871. LLVMValueRef mem_base_addr;
  872. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  873. LLVMBasicBlockRef check_succ;
  874. LLVMValueRef mem_size;
  875. /* Get memory base address and memory data size */
  876. #if WASM_ENABLE_SHARED_MEMORY != 0
  877. bool is_shared_memory = comp_ctx->comp_data->memories[0].flags & 0x02;
  878. if (func_ctx->mem_space_unchanged || is_shared_memory) {
  879. #else
  880. if (func_ctx->mem_space_unchanged) {
  881. #endif
  882. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  883. }
  884. else {
  885. if (!(mem_base_addr = LLVMBuildLoad2(
  886. comp_ctx->builder, OPQ_PTR_TYPE,
  887. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  888. aot_set_last_error("llvm build load failed.");
  889. goto fail;
  890. }
  891. }
  892. /*
  893. * Note: not throw the integer-overflow-exception here since it must
  894. * have been thrown when converting float to integer before
  895. */
  896. /* return addres directly if constant offset and inside memory space */
  897. if (LLVMIsEfficientConstInt(offset) && LLVMIsEfficientConstInt(bytes)) {
  898. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(offset);
  899. uint64 mem_len = (uint64)LLVMConstIntGetZExtValue(bytes);
  900. uint32 num_bytes_per_page =
  901. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  902. uint32 init_page_count =
  903. comp_ctx->comp_data->memories[0].init_page_count;
  904. uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
  905. if (mem_data_size > 0 && mem_offset + mem_len <= mem_data_size) {
  906. /* inside memory space */
  907. /* maddr = mem_base_addr + moffset */
  908. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  909. mem_base_addr, &offset, 1,
  910. "maddr"))) {
  911. aot_set_last_error("llvm build add failed.");
  912. goto fail;
  913. }
  914. return maddr;
  915. }
  916. }
  917. if (func_ctx->mem_space_unchanged) {
  918. mem_size = func_ctx->mem_info[0].mem_data_size_addr;
  919. }
  920. else {
  921. if (!(mem_size = LLVMBuildLoad2(
  922. comp_ctx->builder, I64_TYPE,
  923. func_ctx->mem_info[0].mem_data_size_addr, "mem_size"))) {
  924. aot_set_last_error("llvm build load failed.");
  925. goto fail;
  926. }
  927. }
  928. ADD_BASIC_BLOCK(check_succ, "check_succ");
  929. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  930. offset =
  931. LLVMBuildZExt(comp_ctx->builder, offset, I64_TYPE, "extend_offset");
  932. bytes = LLVMBuildZExt(comp_ctx->builder, bytes, I64_TYPE, "extend_len");
  933. BUILD_OP(Add, offset, bytes, max_addr, "max_addr");
  934. BUILD_ICMP(LLVMIntUGT, max_addr, mem_size, cmp, "cmp_max_mem_addr");
  935. if (!aot_emit_exception(comp_ctx, func_ctx,
  936. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  937. check_succ)) {
  938. goto fail;
  939. }
  940. /* maddr = mem_base_addr + offset */
  941. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  942. mem_base_addr, &offset, 1, "maddr"))) {
  943. aot_set_last_error("llvm build add failed.");
  944. goto fail;
  945. }
  946. return maddr;
  947. fail:
  948. return NULL;
  949. }
  950. #endif /* end of WASM_ENABLE_BULK_MEMORY != 0 or WASM_ENABLE_STRINGREF != 0 */
  951. #if WASM_ENABLE_BULK_MEMORY != 0
  952. bool
  953. aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  954. uint32 seg_index)
  955. {
  956. LLVMValueRef seg, offset, dst, len, param_values[5], ret_value, func, value;
  957. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  958. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  959. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  960. LLVMBasicBlockRef mem_init_fail, init_success;
  961. seg = I32_CONST(seg_index);
  962. POP_I32(len);
  963. POP_I32(offset);
  964. POP_MEM_OFFSET(dst);
  965. if (!zero_extend_u64(comp_ctx, &dst, "dst64")) {
  966. return false;
  967. }
  968. param_types[0] = INT8_PTR_TYPE;
  969. param_types[1] = I32_TYPE;
  970. param_types[2] = I32_TYPE;
  971. param_types[3] = I32_TYPE;
  972. param_types[4] = SIZE_T_TYPE;
  973. ret_type = INT8_TYPE;
  974. if (comp_ctx->is_jit_mode)
  975. GET_AOT_FUNCTION(llvm_jit_memory_init, 5);
  976. else
  977. GET_AOT_FUNCTION(aot_memory_init, 5);
  978. /* Call function aot_memory_init() */
  979. param_values[0] = func_ctx->aot_inst;
  980. param_values[1] = seg;
  981. param_values[2] = offset;
  982. param_values[3] = len;
  983. param_values[4] = dst;
  984. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  985. param_values, 5, "call"))) {
  986. aot_set_last_error("llvm build call failed.");
  987. return false;
  988. }
  989. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_init_ret");
  990. ADD_BASIC_BLOCK(mem_init_fail, "mem_init_fail");
  991. ADD_BASIC_BLOCK(init_success, "init_success");
  992. LLVMMoveBasicBlockAfter(mem_init_fail, block_curr);
  993. LLVMMoveBasicBlockAfter(init_success, block_curr);
  994. if (!LLVMBuildCondBr(comp_ctx->builder, ret_value, init_success,
  995. mem_init_fail)) {
  996. aot_set_last_error("llvm build cond br failed.");
  997. goto fail;
  998. }
  999. /* If memory.init failed, return this function
  1000. so the runtime can catch the exception */
  1001. LLVMPositionBuilderAtEnd(comp_ctx->builder, mem_init_fail);
  1002. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  1003. goto fail;
  1004. }
  1005. LLVMPositionBuilderAtEnd(comp_ctx->builder, init_success);
  1006. return true;
  1007. fail:
  1008. return false;
  1009. }
  1010. bool
  1011. aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1012. uint32 seg_index)
  1013. {
  1014. LLVMValueRef seg, param_values[2], ret_value, func, value;
  1015. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  1016. seg = I32_CONST(seg_index);
  1017. CHECK_LLVM_CONST(seg);
  1018. param_types[0] = INT8_PTR_TYPE;
  1019. param_types[1] = I32_TYPE;
  1020. ret_type = INT8_TYPE;
  1021. if (comp_ctx->is_jit_mode)
  1022. GET_AOT_FUNCTION(llvm_jit_data_drop, 2);
  1023. else
  1024. GET_AOT_FUNCTION(aot_data_drop, 2);
  1025. /* Call function aot_data_drop() */
  1026. param_values[0] = func_ctx->aot_inst;
  1027. param_values[1] = seg;
  1028. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1029. param_values, 2, "call"))) {
  1030. aot_set_last_error("llvm build call failed.");
  1031. return false;
  1032. }
  1033. return true;
  1034. fail:
  1035. return false;
  1036. }
  1037. bool
  1038. aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1039. {
  1040. LLVMValueRef src, dst, src_addr, dst_addr, len, res;
  1041. bool call_aot_memmove = false;
  1042. POP_MEM_OFFSET(len);
  1043. POP_MEM_OFFSET(src);
  1044. POP_MEM_OFFSET(dst);
  1045. if (!(src_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
  1046. return false;
  1047. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  1048. return false;
  1049. if (!zero_extend_u64(comp_ctx, &len, "len64")) {
  1050. return false;
  1051. }
  1052. call_aot_memmove = comp_ctx->is_indirect_mode || comp_ctx->is_jit_mode;
  1053. if (call_aot_memmove) {
  1054. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1055. LLVMValueRef func, params[3];
  1056. param_types[0] = INT8_PTR_TYPE;
  1057. param_types[1] = INT8_PTR_TYPE;
  1058. param_types[2] = SIZE_T_TYPE;
  1059. ret_type = INT8_PTR_TYPE;
  1060. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  1061. aot_set_last_error("create LLVM function type failed.");
  1062. return false;
  1063. }
  1064. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  1065. aot_set_last_error("create LLVM function pointer type failed.");
  1066. return false;
  1067. }
  1068. if (comp_ctx->is_jit_mode) {
  1069. if (!(func = I64_CONST((uint64)(uintptr_t)aot_memmove))
  1070. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  1071. aot_set_last_error("create LLVM value failed.");
  1072. return false;
  1073. }
  1074. }
  1075. else {
  1076. int32 func_index;
  1077. func_index = aot_get_native_symbol_index(comp_ctx, "memmove");
  1078. if (func_index < 0) {
  1079. return false;
  1080. }
  1081. if (!(func =
  1082. aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  1083. func_ptr_type, func_index))) {
  1084. return false;
  1085. }
  1086. }
  1087. params[0] = dst_addr;
  1088. params[1] = src_addr;
  1089. params[2] = len;
  1090. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params,
  1091. 3, "call_memmove"))) {
  1092. aot_set_last_error("llvm build memmove failed.");
  1093. return false;
  1094. }
  1095. }
  1096. else {
  1097. if (!(res = LLVMBuildMemMove(comp_ctx->builder, dst_addr, 1, src_addr,
  1098. 1, len))) {
  1099. aot_set_last_error("llvm build memmove failed.");
  1100. return false;
  1101. }
  1102. }
  1103. return true;
  1104. fail:
  1105. return false;
  1106. }
  1107. static void *
  1108. jit_memset(void *s, int c, size_t n)
  1109. {
  1110. return memset(s, c, n);
  1111. }
  1112. bool
  1113. aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1114. {
  1115. LLVMValueRef val, dst, dst_addr, len, res;
  1116. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1117. LLVMValueRef func, params[3];
  1118. POP_MEM_OFFSET(len);
  1119. POP_I32(val);
  1120. POP_MEM_OFFSET(dst);
  1121. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  1122. return false;
  1123. if (!zero_extend_u64(comp_ctx, &len, "len64")) {
  1124. return false;
  1125. }
  1126. param_types[0] = INT8_PTR_TYPE;
  1127. param_types[1] = I32_TYPE;
  1128. param_types[2] = SIZE_T_TYPE;
  1129. ret_type = INT8_PTR_TYPE;
  1130. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  1131. aot_set_last_error("create LLVM function type failed.");
  1132. return false;
  1133. }
  1134. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  1135. aot_set_last_error("create LLVM function pointer type failed.");
  1136. return false;
  1137. }
  1138. if (comp_ctx->is_jit_mode) {
  1139. if (!(func = I64_CONST((uint64)(uintptr_t)jit_memset))
  1140. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  1141. aot_set_last_error("create LLVM value failed.");
  1142. return false;
  1143. }
  1144. }
  1145. else if (comp_ctx->is_indirect_mode) {
  1146. int32 func_index;
  1147. func_index = aot_get_native_symbol_index(comp_ctx, "memset");
  1148. if (func_index < 0) {
  1149. return false;
  1150. }
  1151. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  1152. func_ptr_type, func_index))) {
  1153. return false;
  1154. }
  1155. }
  1156. else {
  1157. if (!(func = LLVMGetNamedFunction(func_ctx->module, "memset"))
  1158. && !(func =
  1159. LLVMAddFunction(func_ctx->module, "memset", func_type))) {
  1160. aot_set_last_error("llvm add function failed.");
  1161. return false;
  1162. }
  1163. }
  1164. params[0] = dst_addr;
  1165. params[1] = val;
  1166. params[2] = len;
  1167. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params, 3,
  1168. "call_memset"))) {
  1169. aot_set_last_error("llvm build memset failed.");
  1170. return false;
  1171. }
  1172. return true;
  1173. fail:
  1174. return false;
  1175. }
  1176. #endif /* end of WASM_ENABLE_BULK_MEMORY */
  1177. #if WASM_ENABLE_SHARED_MEMORY != 0
  1178. bool
  1179. aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1180. uint8 atomic_op, uint8 op_type, uint32 align,
  1181. mem_offset_t offset, uint32 bytes)
  1182. {
  1183. LLVMValueRef maddr, value, result;
  1184. bool enable_segue = (op_type == VALUE_TYPE_I32)
  1185. ? comp_ctx->enable_segue_i32_load
  1186. && comp_ctx->enable_segue_i32_store
  1187. : comp_ctx->enable_segue_i64_load
  1188. && comp_ctx->enable_segue_i64_store;
  1189. if (op_type == VALUE_TYPE_I32)
  1190. POP_I32(value);
  1191. else
  1192. POP_I64(value);
  1193. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1194. enable_segue, NULL)))
  1195. return false;
  1196. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1197. return false;
  1198. switch (bytes) {
  1199. case 8:
  1200. if (!enable_segue)
  1201. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1202. else
  1203. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  1204. break;
  1205. case 4:
  1206. if (!enable_segue)
  1207. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1208. else
  1209. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  1210. if (op_type == VALUE_TYPE_I64)
  1211. BUILD_TRUNC(value, I32_TYPE);
  1212. break;
  1213. case 2:
  1214. if (!enable_segue)
  1215. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1216. else
  1217. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  1218. BUILD_TRUNC(value, INT16_TYPE);
  1219. break;
  1220. case 1:
  1221. if (!enable_segue)
  1222. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1223. else
  1224. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  1225. BUILD_TRUNC(value, INT8_TYPE);
  1226. break;
  1227. default:
  1228. bh_assert(0);
  1229. break;
  1230. }
  1231. if (!(result = LLVMBuildAtomicRMW(
  1232. comp_ctx->builder, atomic_op, maddr, value,
  1233. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1234. goto fail;
  1235. }
  1236. LLVMSetVolatile(result, true);
  1237. if (op_type == VALUE_TYPE_I32) {
  1238. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1239. "result_i32"))) {
  1240. goto fail;
  1241. }
  1242. PUSH_I32(result);
  1243. }
  1244. else {
  1245. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1246. "result_i64"))) {
  1247. goto fail;
  1248. }
  1249. PUSH_I64(result);
  1250. }
  1251. return true;
  1252. fail:
  1253. return false;
  1254. }
  1255. bool
  1256. aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
  1257. AOTFuncContext *func_ctx, uint8 op_type,
  1258. uint32 align, mem_offset_t offset, uint32 bytes)
  1259. {
  1260. LLVMValueRef maddr, value, expect, result;
  1261. bool enable_segue = (op_type == VALUE_TYPE_I32)
  1262. ? comp_ctx->enable_segue_i32_load
  1263. && comp_ctx->enable_segue_i32_store
  1264. : comp_ctx->enable_segue_i64_load
  1265. && comp_ctx->enable_segue_i64_store;
  1266. if (op_type == VALUE_TYPE_I32) {
  1267. POP_I32(value);
  1268. POP_I32(expect);
  1269. }
  1270. else {
  1271. POP_I64(value);
  1272. POP_I64(expect);
  1273. }
  1274. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1275. enable_segue, NULL)))
  1276. return false;
  1277. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1278. return false;
  1279. switch (bytes) {
  1280. case 8:
  1281. if (!enable_segue)
  1282. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1283. else
  1284. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  1285. break;
  1286. case 4:
  1287. if (!enable_segue)
  1288. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1289. else
  1290. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  1291. if (op_type == VALUE_TYPE_I64) {
  1292. BUILD_TRUNC(value, I32_TYPE);
  1293. BUILD_TRUNC(expect, I32_TYPE);
  1294. }
  1295. break;
  1296. case 2:
  1297. if (!enable_segue)
  1298. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1299. else
  1300. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  1301. BUILD_TRUNC(value, INT16_TYPE);
  1302. BUILD_TRUNC(expect, INT16_TYPE);
  1303. break;
  1304. case 1:
  1305. if (!enable_segue)
  1306. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1307. else
  1308. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  1309. BUILD_TRUNC(value, INT8_TYPE);
  1310. BUILD_TRUNC(expect, INT8_TYPE);
  1311. break;
  1312. default:
  1313. bh_assert(0);
  1314. break;
  1315. }
  1316. if (!(result = LLVMBuildAtomicCmpXchg(
  1317. comp_ctx->builder, maddr, expect, value,
  1318. LLVMAtomicOrderingSequentiallyConsistent,
  1319. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1320. goto fail;
  1321. }
  1322. LLVMSetVolatile(result, true);
  1323. /* CmpXchg return {i32, i1} structure,
  1324. we need to extract the previous_value from the structure */
  1325. if (!(result = LLVMBuildExtractValue(comp_ctx->builder, result, 0,
  1326. "previous_value"))) {
  1327. goto fail;
  1328. }
  1329. if (op_type == VALUE_TYPE_I32) {
  1330. if (LLVMTypeOf(result) != I32_TYPE) {
  1331. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1332. "result_i32"))) {
  1333. goto fail;
  1334. }
  1335. }
  1336. PUSH_I32(result);
  1337. }
  1338. else {
  1339. if (LLVMTypeOf(result) != I64_TYPE) {
  1340. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1341. "result_i64"))) {
  1342. goto fail;
  1343. }
  1344. }
  1345. PUSH_I64(result);
  1346. }
  1347. return true;
  1348. fail:
  1349. return false;
  1350. }
  1351. bool
  1352. aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1353. uint8 op_type, uint32 align, mem_offset_t offset,
  1354. uint32 bytes)
  1355. {
  1356. LLVMValueRef maddr, value, timeout, expect, cmp;
  1357. LLVMValueRef param_values[5], ret_value, func, is_wait64;
  1358. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  1359. LLVMBasicBlockRef wait_fail, wait_success;
  1360. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1361. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  1362. POP_I64(timeout);
  1363. if (op_type == VALUE_TYPE_I32) {
  1364. POP_I32(expect);
  1365. is_wait64 = I8_CONST(false);
  1366. if (!(expect = LLVMBuildZExt(comp_ctx->builder, expect, I64_TYPE,
  1367. "expect_i64"))) {
  1368. goto fail;
  1369. }
  1370. }
  1371. else {
  1372. POP_I64(expect);
  1373. is_wait64 = I8_CONST(true);
  1374. }
  1375. CHECK_LLVM_CONST(is_wait64);
  1376. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1377. false, NULL)))
  1378. return false;
  1379. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1380. return false;
  1381. param_types[0] = INT8_PTR_TYPE;
  1382. param_types[1] = INT8_PTR_TYPE;
  1383. param_types[2] = I64_TYPE;
  1384. param_types[3] = I64_TYPE;
  1385. param_types[4] = INT8_TYPE;
  1386. ret_type = I32_TYPE;
  1387. GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
  1388. /* Call function wasm_runtime_atomic_wait() */
  1389. param_values[0] = func_ctx->aot_inst;
  1390. param_values[1] = maddr;
  1391. param_values[2] = expect;
  1392. param_values[3] = timeout;
  1393. param_values[4] = is_wait64;
  1394. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1395. param_values, 5, "call"))) {
  1396. aot_set_last_error("llvm build call failed.");
  1397. return false;
  1398. }
  1399. BUILD_ICMP(LLVMIntNE, ret_value, I32_NEG_ONE, cmp, "atomic_wait_ret");
  1400. ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
  1401. ADD_BASIC_BLOCK(wait_success, "wait_success");
  1402. LLVMMoveBasicBlockAfter(wait_fail, block_curr);
  1403. LLVMMoveBasicBlockAfter(wait_success, block_curr);
  1404. if (!LLVMBuildCondBr(comp_ctx->builder, cmp, wait_success, wait_fail)) {
  1405. aot_set_last_error("llvm build cond br failed.");
  1406. goto fail;
  1407. }
  1408. /* If atomic wait failed, return this function
  1409. so the runtime can catch the exception */
  1410. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
  1411. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  1412. goto fail;
  1413. }
  1414. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
  1415. PUSH_I32(ret_value);
  1416. /* Insert suspend check point */
  1417. if (comp_ctx->enable_thread_mgr) {
  1418. if (!check_suspend_flags(comp_ctx, func_ctx, false))
  1419. return false;
  1420. }
  1421. return true;
  1422. fail:
  1423. return false;
  1424. }
  1425. bool
  1426. aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
  1427. AOTFuncContext *func_ctx, uint32 align,
  1428. mem_offset_t offset, uint32 bytes)
  1429. {
  1430. LLVMValueRef maddr, value, count;
  1431. LLVMValueRef param_values[3], ret_value, func;
  1432. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1433. POP_I32(count);
  1434. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1435. false, NULL)))
  1436. return false;
  1437. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1438. return false;
  1439. param_types[0] = INT8_PTR_TYPE;
  1440. param_types[1] = INT8_PTR_TYPE;
  1441. param_types[2] = I32_TYPE;
  1442. ret_type = I32_TYPE;
  1443. GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
  1444. /* Call function wasm_runtime_atomic_notify() */
  1445. param_values[0] = func_ctx->aot_inst;
  1446. param_values[1] = maddr;
  1447. param_values[2] = count;
  1448. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1449. param_values, 3, "call"))) {
  1450. aot_set_last_error("llvm build call failed.");
  1451. return false;
  1452. }
  1453. PUSH_I32(ret_value);
  1454. return true;
  1455. fail:
  1456. return false;
  1457. }
  1458. bool
  1459. aot_compiler_op_atomic_fence(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1460. {
  1461. return LLVMBuildFence(comp_ctx->builder,
  1462. LLVMAtomicOrderingSequentiallyConsistent, false, "")
  1463. ? true
  1464. : false;
  1465. }
  1466. #endif /* end of WASM_ENABLE_SHARED_MEMORY */