aot_emit_memory.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_memory.h"
  6. #include "aot_compiler.h"
  7. #include "aot_emit_exception.h"
  8. #include "../aot/aot_runtime.h"
  9. #include "aot_intrinsic.h"
  10. #include "aot_emit_control.h"
  11. #define BUILD_ICMP(op, left, right, res, name) \
  12. do { \
  13. if (!(res = \
  14. LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
  15. aot_set_last_error("llvm build icmp failed."); \
  16. goto fail; \
  17. } \
  18. } while (0)
  19. #define BUILD_OP(Op, left, right, res, name) \
  20. do { \
  21. if (!(res = LLVMBuild##Op(comp_ctx->builder, left, right, name))) { \
  22. aot_set_last_error("llvm build " #Op " fail."); \
  23. goto fail; \
  24. } \
  25. } while (0)
  26. #define ADD_BASIC_BLOCK(block, name) \
  27. do { \
  28. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  29. func_ctx->func, name))) { \
  30. aot_set_last_error("llvm add basic block failed."); \
  31. goto fail; \
  32. } \
  33. } while (0)
  34. #define SET_BUILD_POS(block) LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
  35. static bool
  36. zero_extend_u64(AOTCompContext *comp_ctx, LLVMValueRef *value, const char *name)
  37. {
  38. if (comp_ctx->pointer_size == sizeof(uint64)) {
  39. /* zero extend to uint64 if the target is 64-bit */
  40. *value = LLVMBuildZExt(comp_ctx->builder, *value, I64_TYPE, name);
  41. if (!*value) {
  42. aot_set_last_error("llvm build zero extend failed.");
  43. return false;
  44. }
  45. }
  46. return true;
  47. }
  48. static LLVMValueRef
  49. get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  50. uint32 bytes)
  51. {
  52. LLVMValueRef mem_check_bound = NULL;
  53. switch (bytes) {
  54. case 1:
  55. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_1byte;
  56. break;
  57. case 2:
  58. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_2bytes;
  59. break;
  60. case 4:
  61. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_4bytes;
  62. break;
  63. case 8:
  64. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_8bytes;
  65. break;
  66. case 16:
  67. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_16bytes;
  68. break;
  69. default:
  70. bh_assert(0);
  71. return NULL;
  72. }
  73. if (func_ctx->mem_space_unchanged)
  74. return mem_check_bound;
  75. if (!(mem_check_bound = LLVMBuildLoad2(
  76. comp_ctx->builder,
  77. (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE : I32_TYPE,
  78. mem_check_bound, "mem_check_bound"))) {
  79. aot_set_last_error("llvm build load failed.");
  80. return NULL;
  81. }
  82. return mem_check_bound;
  83. }
  84. #if defined(_WIN32) || defined(_WIN32_)
  85. static inline int
  86. ffs(int n)
  87. {
  88. int pos = 0;
  89. if (n == 0)
  90. return 0;
  91. while (!(n & 1)) {
  92. pos++;
  93. n >>= 1;
  94. }
  95. return pos + 1;
  96. }
  97. #endif
  98. static LLVMValueRef
  99. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
  100. LLVMValueRef
  101. aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  102. mem_offset_t offset, uint32 bytes, bool enable_segue,
  103. unsigned int *alignp)
  104. {
  105. LLVMValueRef offset_const =
  106. MEMORY64_COND_VALUE(I64_CONST(offset), I32_CONST(offset));
  107. LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
  108. LLVMValueRef mem_base_addr, mem_check_bound;
  109. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  110. LLVMBasicBlockRef check_succ;
  111. AOTValue *aot_value_top;
  112. uint32 local_idx_of_aot_value = 0;
  113. uint64 const_value;
  114. bool is_target_64bit, is_local_of_aot_value = false;
  115. bool is_const = false;
  116. #if WASM_ENABLE_SHARED_MEMORY != 0
  117. bool is_shared_memory =
  118. comp_ctx->comp_data->memories[0].flags & SHARED_MEMORY_FLAG;
  119. #endif
  120. is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false;
  121. if (comp_ctx->is_indirect_mode
  122. && aot_intrinsic_check_capability(
  123. comp_ctx, MEMORY64_COND_VALUE("i64.const", "i32.const"))) {
  124. WASMValue wasm_value;
  125. #if WASM_ENABLE_MEMORY64 != 0
  126. if (IS_MEMORY64) {
  127. wasm_value.i64 = offset;
  128. }
  129. else
  130. #endif
  131. {
  132. wasm_value.i32 = (int32)offset;
  133. }
  134. offset_const = aot_load_const_from_table(
  135. comp_ctx, func_ctx->native_symbol, &wasm_value,
  136. MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32));
  137. if (!offset_const) {
  138. return NULL;
  139. }
  140. }
  141. else {
  142. CHECK_LLVM_CONST(offset_const);
  143. }
  144. /* Get memory base address and memory data size */
  145. if (func_ctx->mem_space_unchanged
  146. #if WASM_ENABLE_SHARED_MEMORY != 0
  147. || is_shared_memory
  148. #endif
  149. ) {
  150. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  151. }
  152. else {
  153. if (!(mem_base_addr = LLVMBuildLoad2(
  154. comp_ctx->builder, OPQ_PTR_TYPE,
  155. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  156. aot_set_last_error("llvm build load failed.");
  157. goto fail;
  158. }
  159. }
  160. aot_value_top =
  161. func_ctx->block_stack.block_list_end->value_stack.value_list_end;
  162. if (aot_value_top) {
  163. /* aot_value_top is freed in the following POP_I32(addr),
  164. so save its fields here for further use */
  165. is_local_of_aot_value = aot_value_top->is_local;
  166. is_const = aot_value_top->is_const;
  167. local_idx_of_aot_value = aot_value_top->local_idx;
  168. const_value = aot_value_top->const_value;
  169. }
  170. POP_MEM_OFFSET(addr);
  171. /*
  172. * Note: not throw the integer-overflow-exception here since it must
  173. * have been thrown when converting float to integer before
  174. */
  175. /* return address directly if constant offset and inside memory space */
  176. if (LLVMIsEfficientConstInt(addr) || is_const) {
  177. uint64 value;
  178. if (LLVMIsEfficientConstInt(addr)) {
  179. value = (uint64)LLVMConstIntGetZExtValue(addr);
  180. }
  181. else {
  182. value = const_value;
  183. }
  184. uint64 mem_offset = value + (uint64)offset;
  185. uint32 num_bytes_per_page =
  186. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  187. uint32 init_page_count =
  188. comp_ctx->comp_data->memories[0].init_page_count;
  189. uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
  190. if (alignp != NULL) {
  191. /*
  192. * A note about max_align below:
  193. * the assumption here is the base address of a linear memory
  194. * has the natural alignment. for platforms using mmap, it can
  195. * be even larger. for now, use a conservative value.
  196. */
  197. const unsigned int max_align = 8;
  198. int shift = ffs((int)(unsigned int)mem_offset);
  199. if (shift == 0) {
  200. *alignp = max_align;
  201. }
  202. else {
  203. unsigned int align = 1 << (shift - 1);
  204. if (align > max_align) {
  205. align = max_align;
  206. }
  207. *alignp = align;
  208. }
  209. }
  210. if (mem_offset + bytes <= mem_data_size) {
  211. /* inside memory space */
  212. if (comp_ctx->pointer_size == sizeof(uint64))
  213. offset1 = I64_CONST(mem_offset);
  214. else
  215. offset1 = I32_CONST((uint32)mem_offset);
  216. CHECK_LLVM_CONST(offset1);
  217. if (!enable_segue) {
  218. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder,
  219. INT8_TYPE, mem_base_addr,
  220. &offset1, 1, "maddr"))) {
  221. aot_set_last_error("llvm build add failed.");
  222. goto fail;
  223. }
  224. }
  225. else {
  226. if (!(maddr = LLVMBuildIntToPtr(comp_ctx->builder, offset1,
  227. INT8_PTR_TYPE_GS, "maddr"))) {
  228. aot_set_last_error("llvm build IntToPtr failed.");
  229. goto fail;
  230. }
  231. }
  232. return maddr;
  233. }
  234. }
  235. else if (alignp != NULL) {
  236. *alignp = 1;
  237. }
  238. if (is_target_64bit) {
  239. if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
  240. I64_TYPE, "offset_i64"))
  241. || !(addr = LLVMBuildZExt(comp_ctx->builder, addr, I64_TYPE,
  242. "addr_i64"))) {
  243. aot_set_last_error("llvm build zero extend failed.");
  244. goto fail;
  245. }
  246. }
  247. /* offset1 = offset + addr; */
  248. BUILD_OP(Add, offset_const, addr, offset1, "offset1");
  249. if (comp_ctx->enable_bound_check
  250. && !(is_local_of_aot_value
  251. && aot_checked_addr_list_find(func_ctx, local_idx_of_aot_value,
  252. offset, bytes))) {
  253. uint32 init_page_count =
  254. comp_ctx->comp_data->memories[0].init_page_count;
  255. if (init_page_count == 0) {
  256. LLVMValueRef mem_size;
  257. if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) {
  258. goto fail;
  259. }
  260. BUILD_ICMP(LLVMIntEQ, mem_size,
  261. MEMORY64_COND_VALUE(I64_ZERO, I32_ZERO), cmp, "is_zero");
  262. ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
  263. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  264. if (!aot_emit_exception(comp_ctx, func_ctx,
  265. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  266. check_succ)) {
  267. goto fail;
  268. }
  269. SET_BUILD_POS(check_succ);
  270. block_curr = check_succ;
  271. }
  272. if (!(mem_check_bound =
  273. get_memory_check_bound(comp_ctx, func_ctx, bytes))) {
  274. goto fail;
  275. }
  276. if (is_target_64bit) {
  277. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp, "cmp");
  278. }
  279. else {
  280. /* Check integer overflow */
  281. BUILD_ICMP(LLVMIntULT, offset1, addr, cmp1, "cmp1");
  282. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp2, "cmp2");
  283. BUILD_OP(Or, cmp1, cmp2, cmp, "cmp");
  284. }
  285. /* Add basic blocks */
  286. ADD_BASIC_BLOCK(check_succ, "check_succ");
  287. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  288. if (!aot_emit_exception(comp_ctx, func_ctx,
  289. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  290. check_succ)) {
  291. goto fail;
  292. }
  293. SET_BUILD_POS(check_succ);
  294. if (is_local_of_aot_value) {
  295. if (!aot_checked_addr_list_add(func_ctx, local_idx_of_aot_value,
  296. offset, bytes))
  297. goto fail;
  298. }
  299. }
  300. if (!enable_segue) {
  301. /* maddr = mem_base_addr + offset1 */
  302. if (!(maddr =
  303. LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  304. mem_base_addr, &offset1, 1, "maddr"))) {
  305. aot_set_last_error("llvm build add failed.");
  306. goto fail;
  307. }
  308. }
  309. else {
  310. LLVMValueRef maddr_base;
  311. if (!(maddr_base = LLVMBuildIntToPtr(comp_ctx->builder, addr,
  312. INT8_PTR_TYPE_GS, "maddr_base"))) {
  313. aot_set_last_error("llvm build int to ptr failed.");
  314. goto fail;
  315. }
  316. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  317. maddr_base, &offset_const, 1,
  318. "maddr"))) {
  319. aot_set_last_error("llvm build inboundgep failed.");
  320. goto fail;
  321. }
  322. }
  323. return maddr;
  324. fail:
  325. return NULL;
  326. }
  327. #define BUILD_PTR_CAST(ptr_type) \
  328. do { \
  329. if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, ptr_type, \
  330. "data_ptr"))) { \
  331. aot_set_last_error("llvm build bit cast failed."); \
  332. goto fail; \
  333. } \
  334. } while (0)
  335. #define BUILD_LOAD(data_type) \
  336. do { \
  337. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  338. "data"))) { \
  339. aot_set_last_error("llvm build load failed."); \
  340. goto fail; \
  341. } \
  342. LLVMSetAlignment(value, known_align); \
  343. } while (0)
  344. #define BUILD_TRUNC(value, data_type) \
  345. do { \
  346. if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, data_type, \
  347. "val_trunc"))) { \
  348. aot_set_last_error("llvm build trunc failed."); \
  349. goto fail; \
  350. } \
  351. } while (0)
  352. #define BUILD_STORE() \
  353. do { \
  354. LLVMValueRef res; \
  355. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  356. aot_set_last_error("llvm build store failed."); \
  357. goto fail; \
  358. } \
  359. LLVMSetAlignment(res, known_align); \
  360. } while (0)
  361. #define BUILD_SIGN_EXT(dst_type) \
  362. do { \
  363. if (!(value = LLVMBuildSExt(comp_ctx->builder, value, dst_type, \
  364. "data_s_ext"))) { \
  365. aot_set_last_error("llvm build sign ext failed."); \
  366. goto fail; \
  367. } \
  368. } while (0)
  369. #define BUILD_ZERO_EXT(dst_type) \
  370. do { \
  371. if (!(value = LLVMBuildZExt(comp_ctx->builder, value, dst_type, \
  372. "data_z_ext"))) { \
  373. aot_set_last_error("llvm build zero ext failed."); \
  374. goto fail; \
  375. } \
  376. } while (0)
  377. #if WASM_ENABLE_SHARED_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0
  378. bool
  379. check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  380. LLVMValueRef addr, uint32 align)
  381. {
  382. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  383. LLVMBasicBlockRef check_align_succ;
  384. LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
  385. LLVMValueRef res;
  386. CHECK_LLVM_CONST(align_mask);
  387. /* Convert pointer to int */
  388. if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr, I32_TYPE,
  389. "address"))) {
  390. aot_set_last_error("llvm build ptr to int failed.");
  391. goto fail;
  392. }
  393. /* The memory address should be aligned */
  394. BUILD_OP(And, addr, align_mask, res, "and");
  395. BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
  396. /* Add basic blocks */
  397. ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
  398. LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
  399. if (!aot_emit_exception(comp_ctx, func_ctx, EXCE_UNALIGNED_ATOMIC, true,
  400. res, check_align_succ)) {
  401. goto fail;
  402. }
  403. SET_BUILD_POS(check_align_succ);
  404. return true;
  405. fail:
  406. return false;
  407. }
  408. #endif /* WASM_ENABLE_SHARED_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0 */
  409. #if WASM_ENABLE_SHARED_MEMORY != 0
  410. #define BUILD_ATOMIC_LOAD(align, data_type) \
  411. do { \
  412. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  413. goto fail; \
  414. } \
  415. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  416. "data"))) { \
  417. aot_set_last_error("llvm build load failed."); \
  418. goto fail; \
  419. } \
  420. LLVMSetAlignment(value, 1 << align); \
  421. LLVMSetVolatile(value, true); \
  422. LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
  423. } while (0)
  424. #define BUILD_ATOMIC_STORE(align) \
  425. do { \
  426. LLVMValueRef res; \
  427. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  428. goto fail; \
  429. } \
  430. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  431. aot_set_last_error("llvm build store failed."); \
  432. goto fail; \
  433. } \
  434. LLVMSetAlignment(res, 1 << align); \
  435. LLVMSetVolatile(res, true); \
  436. LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
  437. } while (0)
  438. #endif
  439. bool
  440. aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  441. uint32 align, mem_offset_t offset, uint32 bytes,
  442. bool sign, bool atomic)
  443. {
  444. LLVMValueRef maddr, value = NULL;
  445. LLVMTypeRef data_type;
  446. bool enable_segue = comp_ctx->enable_segue_i32_load;
  447. unsigned int known_align;
  448. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  449. enable_segue, &known_align)))
  450. return false;
  451. switch (bytes) {
  452. case 4:
  453. if (!enable_segue)
  454. BUILD_PTR_CAST(INT32_PTR_TYPE);
  455. else
  456. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  457. #if WASM_ENABLE_SHARED_MEMORY != 0
  458. if (atomic)
  459. BUILD_ATOMIC_LOAD(align, I32_TYPE);
  460. else
  461. #endif
  462. BUILD_LOAD(I32_TYPE);
  463. break;
  464. case 2:
  465. case 1:
  466. if (bytes == 2) {
  467. if (!enable_segue)
  468. BUILD_PTR_CAST(INT16_PTR_TYPE);
  469. else
  470. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  471. data_type = INT16_TYPE;
  472. }
  473. else {
  474. if (!enable_segue)
  475. BUILD_PTR_CAST(INT8_PTR_TYPE);
  476. else
  477. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  478. data_type = INT8_TYPE;
  479. }
  480. #if WASM_ENABLE_SHARED_MEMORY != 0
  481. if (atomic) {
  482. BUILD_ATOMIC_LOAD(align, data_type);
  483. BUILD_ZERO_EXT(I32_TYPE);
  484. }
  485. else
  486. #endif
  487. {
  488. BUILD_LOAD(data_type);
  489. if (sign)
  490. BUILD_SIGN_EXT(I32_TYPE);
  491. else
  492. BUILD_ZERO_EXT(I32_TYPE);
  493. }
  494. break;
  495. default:
  496. bh_assert(0);
  497. break;
  498. }
  499. PUSH_I32(value);
  500. (void)data_type;
  501. return true;
  502. fail:
  503. return false;
  504. }
  505. bool
  506. aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  507. uint32 align, mem_offset_t offset, uint32 bytes,
  508. bool sign, bool atomic)
  509. {
  510. LLVMValueRef maddr, value = NULL;
  511. LLVMTypeRef data_type;
  512. bool enable_segue = comp_ctx->enable_segue_i64_load;
  513. unsigned int known_align;
  514. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  515. enable_segue, &known_align)))
  516. return false;
  517. switch (bytes) {
  518. case 8:
  519. if (!enable_segue)
  520. BUILD_PTR_CAST(INT64_PTR_TYPE);
  521. else
  522. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  523. #if WASM_ENABLE_SHARED_MEMORY != 0
  524. if (atomic)
  525. BUILD_ATOMIC_LOAD(align, I64_TYPE);
  526. else
  527. #endif
  528. BUILD_LOAD(I64_TYPE);
  529. break;
  530. case 4:
  531. case 2:
  532. case 1:
  533. if (bytes == 4) {
  534. if (!enable_segue)
  535. BUILD_PTR_CAST(INT32_PTR_TYPE);
  536. else
  537. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  538. data_type = I32_TYPE;
  539. }
  540. else if (bytes == 2) {
  541. if (!enable_segue)
  542. BUILD_PTR_CAST(INT16_PTR_TYPE);
  543. else
  544. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  545. data_type = INT16_TYPE;
  546. }
  547. else {
  548. if (!enable_segue)
  549. BUILD_PTR_CAST(INT8_PTR_TYPE);
  550. else
  551. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  552. data_type = INT8_TYPE;
  553. }
  554. #if WASM_ENABLE_SHARED_MEMORY != 0
  555. if (atomic) {
  556. BUILD_ATOMIC_LOAD(align, data_type);
  557. BUILD_ZERO_EXT(I64_TYPE);
  558. }
  559. else
  560. #endif
  561. {
  562. BUILD_LOAD(data_type);
  563. if (sign)
  564. BUILD_SIGN_EXT(I64_TYPE);
  565. else
  566. BUILD_ZERO_EXT(I64_TYPE);
  567. }
  568. break;
  569. default:
  570. bh_assert(0);
  571. break;
  572. }
  573. PUSH_I64(value);
  574. (void)data_type;
  575. return true;
  576. fail:
  577. return false;
  578. }
  579. bool
  580. aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  581. uint32 align, mem_offset_t offset)
  582. {
  583. LLVMValueRef maddr, value;
  584. bool enable_segue = comp_ctx->enable_segue_f32_load;
  585. unsigned int known_align;
  586. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
  587. enable_segue, &known_align)))
  588. return false;
  589. if (!enable_segue)
  590. BUILD_PTR_CAST(F32_PTR_TYPE);
  591. else
  592. BUILD_PTR_CAST(F32_PTR_TYPE_GS);
  593. BUILD_LOAD(F32_TYPE);
  594. PUSH_F32(value);
  595. return true;
  596. fail:
  597. return false;
  598. }
  599. bool
  600. aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  601. uint32 align, mem_offset_t offset)
  602. {
  603. LLVMValueRef maddr, value;
  604. bool enable_segue = comp_ctx->enable_segue_f64_load;
  605. unsigned int known_align;
  606. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
  607. enable_segue, &known_align)))
  608. return false;
  609. if (!enable_segue)
  610. BUILD_PTR_CAST(F64_PTR_TYPE);
  611. else
  612. BUILD_PTR_CAST(F64_PTR_TYPE_GS);
  613. BUILD_LOAD(F64_TYPE);
  614. PUSH_F64(value);
  615. return true;
  616. fail:
  617. return false;
  618. }
  619. bool
  620. aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  621. uint32 align, mem_offset_t offset, uint32 bytes,
  622. bool atomic)
  623. {
  624. LLVMValueRef maddr, value;
  625. bool enable_segue = comp_ctx->enable_segue_i32_store;
  626. POP_I32(value);
  627. unsigned int known_align;
  628. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  629. enable_segue, &known_align)))
  630. return false;
  631. switch (bytes) {
  632. case 4:
  633. if (!enable_segue)
  634. BUILD_PTR_CAST(INT32_PTR_TYPE);
  635. else
  636. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  637. break;
  638. case 2:
  639. if (!enable_segue)
  640. BUILD_PTR_CAST(INT16_PTR_TYPE);
  641. else
  642. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  643. BUILD_TRUNC(value, INT16_TYPE);
  644. break;
  645. case 1:
  646. if (!enable_segue)
  647. BUILD_PTR_CAST(INT8_PTR_TYPE);
  648. else
  649. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  650. BUILD_TRUNC(value, INT8_TYPE);
  651. break;
  652. default:
  653. bh_assert(0);
  654. break;
  655. }
  656. #if WASM_ENABLE_SHARED_MEMORY != 0
  657. if (atomic)
  658. BUILD_ATOMIC_STORE(align);
  659. else
  660. #endif
  661. BUILD_STORE();
  662. return true;
  663. fail:
  664. return false;
  665. }
  666. bool
  667. aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  668. uint32 align, mem_offset_t offset, uint32 bytes,
  669. bool atomic)
  670. {
  671. LLVMValueRef maddr, value;
  672. bool enable_segue = comp_ctx->enable_segue_i64_store;
  673. POP_I64(value);
  674. unsigned int known_align;
  675. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  676. enable_segue, &known_align)))
  677. return false;
  678. switch (bytes) {
  679. case 8:
  680. if (!enable_segue)
  681. BUILD_PTR_CAST(INT64_PTR_TYPE);
  682. else
  683. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  684. break;
  685. case 4:
  686. if (!enable_segue)
  687. BUILD_PTR_CAST(INT32_PTR_TYPE);
  688. else
  689. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  690. BUILD_TRUNC(value, I32_TYPE);
  691. break;
  692. case 2:
  693. if (!enable_segue)
  694. BUILD_PTR_CAST(INT16_PTR_TYPE);
  695. else
  696. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  697. BUILD_TRUNC(value, INT16_TYPE);
  698. break;
  699. case 1:
  700. if (!enable_segue)
  701. BUILD_PTR_CAST(INT8_PTR_TYPE);
  702. else
  703. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  704. BUILD_TRUNC(value, INT8_TYPE);
  705. break;
  706. default:
  707. bh_assert(0);
  708. break;
  709. }
  710. #if WASM_ENABLE_SHARED_MEMORY != 0
  711. if (atomic)
  712. BUILD_ATOMIC_STORE(align);
  713. else
  714. #endif
  715. BUILD_STORE();
  716. return true;
  717. fail:
  718. return false;
  719. }
  720. bool
  721. aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  722. uint32 align, mem_offset_t offset)
  723. {
  724. LLVMValueRef maddr, value;
  725. bool enable_segue = comp_ctx->enable_segue_f32_store;
  726. POP_F32(value);
  727. unsigned int known_align;
  728. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
  729. enable_segue, &known_align)))
  730. return false;
  731. if (!enable_segue)
  732. BUILD_PTR_CAST(F32_PTR_TYPE);
  733. else
  734. BUILD_PTR_CAST(F32_PTR_TYPE_GS);
  735. BUILD_STORE();
  736. return true;
  737. fail:
  738. return false;
  739. }
  740. bool
  741. aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  742. uint32 align, mem_offset_t offset)
  743. {
  744. LLVMValueRef maddr, value;
  745. bool enable_segue = comp_ctx->enable_segue_f64_store;
  746. POP_F64(value);
  747. unsigned int known_align;
  748. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
  749. enable_segue, &known_align)))
  750. return false;
  751. if (!enable_segue)
  752. BUILD_PTR_CAST(F64_PTR_TYPE);
  753. else
  754. BUILD_PTR_CAST(F64_PTR_TYPE_GS);
  755. BUILD_STORE();
  756. return true;
  757. fail:
  758. return false;
  759. }
  760. static LLVMValueRef
  761. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  762. {
  763. LLVMValueRef mem_size;
  764. if (func_ctx->mem_space_unchanged) {
  765. mem_size = func_ctx->mem_info[0].mem_cur_page_count_addr;
  766. }
  767. else {
  768. if (!(mem_size = LLVMBuildLoad2(
  769. comp_ctx->builder, I32_TYPE,
  770. func_ctx->mem_info[0].mem_cur_page_count_addr, "mem_size"))) {
  771. aot_set_last_error("llvm build load failed.");
  772. goto fail;
  773. }
  774. }
  775. return LLVMBuildIntCast(comp_ctx->builder, mem_size,
  776. MEMORY64_COND_VALUE(I64_TYPE, I32_TYPE), "");
  777. fail:
  778. return NULL;
  779. }
  780. bool
  781. aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  782. {
  783. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  784. if (mem_size)
  785. PUSH_PAGE_COUNT(mem_size);
  786. return mem_size ? true : false;
  787. fail:
  788. return false;
  789. }
  790. bool
  791. aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  792. {
  793. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  794. LLVMValueRef delta, param_values[2], ret_value, func, value;
  795. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  796. int32 func_index;
  797. #if WASM_ENABLE_MEMORY64 != 0
  798. LLVMValueRef u32_max, u32_cmp_result;
  799. #endif
  800. if (!mem_size)
  801. return false;
  802. POP_PAGE_COUNT(delta);
  803. /* TODO: multi-memory aot_enlarge_memory_with_idx() */
  804. /* Function type of aot_enlarge_memory() */
  805. param_types[0] = INT8_PTR_TYPE;
  806. param_types[1] = I32_TYPE;
  807. ret_type = INT8_TYPE;
  808. if (!(func_type = LLVMFunctionType(ret_type, param_types, 2, false))) {
  809. aot_set_last_error("llvm add function type failed.");
  810. return false;
  811. }
  812. if (comp_ctx->is_jit_mode) {
  813. /* JIT mode, call the function directly */
  814. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  815. aot_set_last_error("llvm add pointer type failed.");
  816. return false;
  817. }
  818. if (!(value = I64_CONST((uint64)(uintptr_t)wasm_enlarge_memory))
  819. || !(func = LLVMConstIntToPtr(value, func_ptr_type))) {
  820. aot_set_last_error("create LLVM value failed.");
  821. return false;
  822. }
  823. }
  824. else if (comp_ctx->is_indirect_mode) {
  825. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  826. aot_set_last_error("create LLVM function type failed.");
  827. return false;
  828. }
  829. func_index =
  830. aot_get_native_symbol_index(comp_ctx, "aot_enlarge_memory");
  831. if (func_index < 0) {
  832. return false;
  833. }
  834. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  835. func_ptr_type, func_index))) {
  836. return false;
  837. }
  838. }
  839. else {
  840. char *func_name = "aot_enlarge_memory";
  841. /* AOT mode, delcare the function */
  842. if (!(func = LLVMGetNamedFunction(func_ctx->module, func_name))
  843. && !(func =
  844. LLVMAddFunction(func_ctx->module, func_name, func_type))) {
  845. aot_set_last_error("llvm add function failed.");
  846. return false;
  847. }
  848. }
  849. /* Call function aot_enlarge_memory() */
  850. param_values[0] = func_ctx->aot_inst;
  851. param_values[1] = LLVMBuildTrunc(comp_ctx->builder, delta, I32_TYPE, "");
  852. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  853. param_values, 2, "call"))) {
  854. aot_set_last_error("llvm build call failed.");
  855. return false;
  856. }
  857. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
  858. #if WASM_ENABLE_MEMORY64 != 0
  859. if (IS_MEMORY64) {
  860. if (!(u32_max = I64_CONST(UINT32_MAX))) {
  861. aot_set_last_error("llvm build const failed");
  862. return false;
  863. }
  864. BUILD_ICMP(LLVMIntULE, delta, u32_max, u32_cmp_result, "page_size_cmp");
  865. BUILD_OP(And, ret_value, u32_cmp_result, ret_value, "and");
  866. }
  867. #endif
  868. /* ret_value = ret_value == true ? pre_page_count : -1 */
  869. if (!(ret_value = LLVMBuildSelect(
  870. comp_ctx->builder, ret_value, mem_size,
  871. MEMORY64_COND_VALUE(I64_NEG_ONE, I32_NEG_ONE), "mem_grow_ret"))) {
  872. aot_set_last_error("llvm build select failed.");
  873. return false;
  874. }
  875. PUSH_PAGE_COUNT(ret_value);
  876. return true;
  877. fail:
  878. return false;
  879. }
  880. #if WASM_ENABLE_BULK_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0
  881. LLVMValueRef
  882. check_bulk_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  883. LLVMValueRef offset, LLVMValueRef bytes)
  884. {
  885. LLVMValueRef maddr, max_addr, cmp;
  886. LLVMValueRef mem_base_addr;
  887. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  888. LLVMBasicBlockRef check_succ;
  889. LLVMValueRef mem_size;
  890. /* Get memory base address and memory data size */
  891. #if WASM_ENABLE_SHARED_MEMORY != 0
  892. bool is_shared_memory = comp_ctx->comp_data->memories[0].flags & 0x02;
  893. if (func_ctx->mem_space_unchanged || is_shared_memory) {
  894. #else
  895. if (func_ctx->mem_space_unchanged) {
  896. #endif
  897. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  898. }
  899. else {
  900. if (!(mem_base_addr = LLVMBuildLoad2(
  901. comp_ctx->builder, OPQ_PTR_TYPE,
  902. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  903. aot_set_last_error("llvm build load failed.");
  904. goto fail;
  905. }
  906. }
  907. /*
  908. * Note: not throw the integer-overflow-exception here since it must
  909. * have been thrown when converting float to integer before
  910. */
  911. /* return addres directly if constant offset and inside memory space */
  912. if (LLVMIsEfficientConstInt(offset) && LLVMIsEfficientConstInt(bytes)) {
  913. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(offset);
  914. uint64 mem_len = (uint64)LLVMConstIntGetZExtValue(bytes);
  915. uint32 num_bytes_per_page =
  916. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  917. uint32 init_page_count =
  918. comp_ctx->comp_data->memories[0].init_page_count;
  919. uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
  920. if (mem_data_size > 0 && mem_offset + mem_len <= mem_data_size) {
  921. /* inside memory space */
  922. /* maddr = mem_base_addr + moffset */
  923. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  924. mem_base_addr, &offset, 1,
  925. "maddr"))) {
  926. aot_set_last_error("llvm build add failed.");
  927. goto fail;
  928. }
  929. return maddr;
  930. }
  931. }
  932. if (func_ctx->mem_space_unchanged) {
  933. mem_size = func_ctx->mem_info[0].mem_data_size_addr;
  934. }
  935. else {
  936. if (!(mem_size = LLVMBuildLoad2(
  937. comp_ctx->builder, I64_TYPE,
  938. func_ctx->mem_info[0].mem_data_size_addr, "mem_size"))) {
  939. aot_set_last_error("llvm build load failed.");
  940. goto fail;
  941. }
  942. }
  943. ADD_BASIC_BLOCK(check_succ, "check_succ");
  944. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  945. offset =
  946. LLVMBuildZExt(comp_ctx->builder, offset, I64_TYPE, "extend_offset");
  947. bytes = LLVMBuildZExt(comp_ctx->builder, bytes, I64_TYPE, "extend_len");
  948. BUILD_OP(Add, offset, bytes, max_addr, "max_addr");
  949. BUILD_ICMP(LLVMIntUGT, max_addr, mem_size, cmp, "cmp_max_mem_addr");
  950. if (!aot_emit_exception(comp_ctx, func_ctx,
  951. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  952. check_succ)) {
  953. goto fail;
  954. }
  955. /* maddr = mem_base_addr + offset */
  956. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  957. mem_base_addr, &offset, 1, "maddr"))) {
  958. aot_set_last_error("llvm build add failed.");
  959. goto fail;
  960. }
  961. return maddr;
  962. fail:
  963. return NULL;
  964. }
  965. #endif /* end of WASM_ENABLE_BULK_MEMORY != 0 or WASM_ENABLE_STRINGREF != 0 */
  966. #if WASM_ENABLE_BULK_MEMORY != 0
  967. bool
  968. aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  969. uint32 seg_index)
  970. {
  971. LLVMValueRef seg, offset, dst, len, param_values[5], ret_value, func, value;
  972. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  973. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  974. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  975. LLVMBasicBlockRef mem_init_fail, init_success;
  976. seg = I32_CONST(seg_index);
  977. POP_I32(len);
  978. POP_I32(offset);
  979. POP_MEM_OFFSET(dst);
  980. if (!zero_extend_u64(comp_ctx, &dst, "dst64")) {
  981. return false;
  982. }
  983. param_types[0] = INT8_PTR_TYPE;
  984. param_types[1] = I32_TYPE;
  985. param_types[2] = I32_TYPE;
  986. param_types[3] = I32_TYPE;
  987. param_types[4] = SIZE_T_TYPE;
  988. ret_type = INT8_TYPE;
  989. if (comp_ctx->is_jit_mode)
  990. GET_AOT_FUNCTION(llvm_jit_memory_init, 5);
  991. else
  992. GET_AOT_FUNCTION(aot_memory_init, 5);
  993. /* Call function aot_memory_init() */
  994. param_values[0] = func_ctx->aot_inst;
  995. param_values[1] = seg;
  996. param_values[2] = offset;
  997. param_values[3] = len;
  998. param_values[4] = dst;
  999. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1000. param_values, 5, "call"))) {
  1001. aot_set_last_error("llvm build call failed.");
  1002. return false;
  1003. }
  1004. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_init_ret");
  1005. ADD_BASIC_BLOCK(mem_init_fail, "mem_init_fail");
  1006. ADD_BASIC_BLOCK(init_success, "init_success");
  1007. LLVMMoveBasicBlockAfter(mem_init_fail, block_curr);
  1008. LLVMMoveBasicBlockAfter(init_success, block_curr);
  1009. if (!LLVMBuildCondBr(comp_ctx->builder, ret_value, init_success,
  1010. mem_init_fail)) {
  1011. aot_set_last_error("llvm build cond br failed.");
  1012. goto fail;
  1013. }
  1014. /* If memory.init failed, return this function
  1015. so the runtime can catch the exception */
  1016. LLVMPositionBuilderAtEnd(comp_ctx->builder, mem_init_fail);
  1017. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  1018. goto fail;
  1019. }
  1020. LLVMPositionBuilderAtEnd(comp_ctx->builder, init_success);
  1021. return true;
  1022. fail:
  1023. return false;
  1024. }
  1025. bool
  1026. aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1027. uint32 seg_index)
  1028. {
  1029. LLVMValueRef seg, param_values[2], ret_value, func, value;
  1030. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  1031. seg = I32_CONST(seg_index);
  1032. CHECK_LLVM_CONST(seg);
  1033. param_types[0] = INT8_PTR_TYPE;
  1034. param_types[1] = I32_TYPE;
  1035. ret_type = INT8_TYPE;
  1036. if (comp_ctx->is_jit_mode)
  1037. GET_AOT_FUNCTION(llvm_jit_data_drop, 2);
  1038. else
  1039. GET_AOT_FUNCTION(aot_data_drop, 2);
  1040. /* Call function aot_data_drop() */
  1041. param_values[0] = func_ctx->aot_inst;
  1042. param_values[1] = seg;
  1043. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1044. param_values, 2, "call"))) {
  1045. aot_set_last_error("llvm build call failed.");
  1046. return false;
  1047. }
  1048. return true;
  1049. fail:
  1050. return false;
  1051. }
  1052. bool
  1053. aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1054. {
  1055. LLVMValueRef src, dst, src_addr, dst_addr, len, res;
  1056. bool call_aot_memmove = false;
  1057. POP_MEM_OFFSET(len);
  1058. POP_MEM_OFFSET(src);
  1059. POP_MEM_OFFSET(dst);
  1060. if (!(src_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
  1061. return false;
  1062. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  1063. return false;
  1064. if (!zero_extend_u64(comp_ctx, &len, "len64")) {
  1065. return false;
  1066. }
  1067. call_aot_memmove = comp_ctx->is_indirect_mode || comp_ctx->is_jit_mode;
  1068. if (call_aot_memmove) {
  1069. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1070. LLVMValueRef func, params[3];
  1071. param_types[0] = INT8_PTR_TYPE;
  1072. param_types[1] = INT8_PTR_TYPE;
  1073. param_types[2] = SIZE_T_TYPE;
  1074. ret_type = INT8_PTR_TYPE;
  1075. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  1076. aot_set_last_error("create LLVM function type failed.");
  1077. return false;
  1078. }
  1079. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  1080. aot_set_last_error("create LLVM function pointer type failed.");
  1081. return false;
  1082. }
  1083. if (comp_ctx->is_jit_mode) {
  1084. if (!(func = I64_CONST((uint64)(uintptr_t)aot_memmove))
  1085. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  1086. aot_set_last_error("create LLVM value failed.");
  1087. return false;
  1088. }
  1089. }
  1090. else {
  1091. int32 func_index;
  1092. func_index = aot_get_native_symbol_index(comp_ctx, "memmove");
  1093. if (func_index < 0) {
  1094. return false;
  1095. }
  1096. if (!(func =
  1097. aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  1098. func_ptr_type, func_index))) {
  1099. return false;
  1100. }
  1101. }
  1102. params[0] = dst_addr;
  1103. params[1] = src_addr;
  1104. params[2] = len;
  1105. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params,
  1106. 3, "call_memmove"))) {
  1107. aot_set_last_error("llvm build memmove failed.");
  1108. return false;
  1109. }
  1110. }
  1111. else {
  1112. if (!(res = LLVMBuildMemMove(comp_ctx->builder, dst_addr, 1, src_addr,
  1113. 1, len))) {
  1114. aot_set_last_error("llvm build memmove failed.");
  1115. return false;
  1116. }
  1117. }
  1118. return true;
  1119. fail:
  1120. return false;
  1121. }
  1122. static void *
  1123. jit_memset(void *s, int c, size_t n)
  1124. {
  1125. return memset(s, c, n);
  1126. }
  1127. bool
  1128. aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1129. {
  1130. LLVMValueRef val, dst, dst_addr, len, res;
  1131. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1132. LLVMValueRef func, params[3];
  1133. POP_MEM_OFFSET(len);
  1134. POP_I32(val);
  1135. POP_MEM_OFFSET(dst);
  1136. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  1137. return false;
  1138. if (!zero_extend_u64(comp_ctx, &len, "len64")) {
  1139. return false;
  1140. }
  1141. param_types[0] = INT8_PTR_TYPE;
  1142. param_types[1] = I32_TYPE;
  1143. param_types[2] = SIZE_T_TYPE;
  1144. ret_type = INT8_PTR_TYPE;
  1145. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  1146. aot_set_last_error("create LLVM function type failed.");
  1147. return false;
  1148. }
  1149. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  1150. aot_set_last_error("create LLVM function pointer type failed.");
  1151. return false;
  1152. }
  1153. if (comp_ctx->is_jit_mode) {
  1154. if (!(func = I64_CONST((uint64)(uintptr_t)jit_memset))
  1155. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  1156. aot_set_last_error("create LLVM value failed.");
  1157. return false;
  1158. }
  1159. }
  1160. else if (comp_ctx->is_indirect_mode) {
  1161. int32 func_index;
  1162. func_index = aot_get_native_symbol_index(comp_ctx, "memset");
  1163. if (func_index < 0) {
  1164. return false;
  1165. }
  1166. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  1167. func_ptr_type, func_index))) {
  1168. return false;
  1169. }
  1170. }
  1171. else {
  1172. if (!(func = LLVMGetNamedFunction(func_ctx->module, "memset"))
  1173. && !(func =
  1174. LLVMAddFunction(func_ctx->module, "memset", func_type))) {
  1175. aot_set_last_error("llvm add function failed.");
  1176. return false;
  1177. }
  1178. }
  1179. params[0] = dst_addr;
  1180. params[1] = val;
  1181. params[2] = len;
  1182. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params, 3,
  1183. "call_memset"))) {
  1184. aot_set_last_error("llvm build memset failed.");
  1185. return false;
  1186. }
  1187. return true;
  1188. fail:
  1189. return false;
  1190. }
  1191. #endif /* end of WASM_ENABLE_BULK_MEMORY */
  1192. #if WASM_ENABLE_SHARED_MEMORY != 0
  1193. bool
  1194. aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1195. uint8 atomic_op, uint8 op_type, uint32 align,
  1196. mem_offset_t offset, uint32 bytes)
  1197. {
  1198. LLVMValueRef maddr, value, result;
  1199. bool enable_segue = (op_type == VALUE_TYPE_I32)
  1200. ? comp_ctx->enable_segue_i32_load
  1201. && comp_ctx->enable_segue_i32_store
  1202. : comp_ctx->enable_segue_i64_load
  1203. && comp_ctx->enable_segue_i64_store;
  1204. if (op_type == VALUE_TYPE_I32)
  1205. POP_I32(value);
  1206. else
  1207. POP_I64(value);
  1208. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1209. enable_segue, NULL)))
  1210. return false;
  1211. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1212. return false;
  1213. switch (bytes) {
  1214. case 8:
  1215. if (!enable_segue)
  1216. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1217. else
  1218. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  1219. break;
  1220. case 4:
  1221. if (!enable_segue)
  1222. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1223. else
  1224. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  1225. if (op_type == VALUE_TYPE_I64)
  1226. BUILD_TRUNC(value, I32_TYPE);
  1227. break;
  1228. case 2:
  1229. if (!enable_segue)
  1230. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1231. else
  1232. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  1233. BUILD_TRUNC(value, INT16_TYPE);
  1234. break;
  1235. case 1:
  1236. if (!enable_segue)
  1237. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1238. else
  1239. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  1240. BUILD_TRUNC(value, INT8_TYPE);
  1241. break;
  1242. default:
  1243. bh_assert(0);
  1244. break;
  1245. }
  1246. if (!(result = LLVMBuildAtomicRMW(
  1247. comp_ctx->builder, atomic_op, maddr, value,
  1248. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1249. goto fail;
  1250. }
  1251. LLVMSetVolatile(result, true);
  1252. if (op_type == VALUE_TYPE_I32) {
  1253. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1254. "result_i32"))) {
  1255. goto fail;
  1256. }
  1257. PUSH_I32(result);
  1258. }
  1259. else {
  1260. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1261. "result_i64"))) {
  1262. goto fail;
  1263. }
  1264. PUSH_I64(result);
  1265. }
  1266. return true;
  1267. fail:
  1268. return false;
  1269. }
  1270. bool
  1271. aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
  1272. AOTFuncContext *func_ctx, uint8 op_type,
  1273. uint32 align, mem_offset_t offset, uint32 bytes)
  1274. {
  1275. LLVMValueRef maddr, value, expect, result;
  1276. bool enable_segue = (op_type == VALUE_TYPE_I32)
  1277. ? comp_ctx->enable_segue_i32_load
  1278. && comp_ctx->enable_segue_i32_store
  1279. : comp_ctx->enable_segue_i64_load
  1280. && comp_ctx->enable_segue_i64_store;
  1281. if (op_type == VALUE_TYPE_I32) {
  1282. POP_I32(value);
  1283. POP_I32(expect);
  1284. }
  1285. else {
  1286. POP_I64(value);
  1287. POP_I64(expect);
  1288. }
  1289. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1290. enable_segue, NULL)))
  1291. return false;
  1292. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1293. return false;
  1294. switch (bytes) {
  1295. case 8:
  1296. if (!enable_segue)
  1297. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1298. else
  1299. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  1300. break;
  1301. case 4:
  1302. if (!enable_segue)
  1303. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1304. else
  1305. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  1306. if (op_type == VALUE_TYPE_I64) {
  1307. BUILD_TRUNC(value, I32_TYPE);
  1308. BUILD_TRUNC(expect, I32_TYPE);
  1309. }
  1310. break;
  1311. case 2:
  1312. if (!enable_segue)
  1313. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1314. else
  1315. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  1316. BUILD_TRUNC(value, INT16_TYPE);
  1317. BUILD_TRUNC(expect, INT16_TYPE);
  1318. break;
  1319. case 1:
  1320. if (!enable_segue)
  1321. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1322. else
  1323. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  1324. BUILD_TRUNC(value, INT8_TYPE);
  1325. BUILD_TRUNC(expect, INT8_TYPE);
  1326. break;
  1327. default:
  1328. bh_assert(0);
  1329. break;
  1330. }
  1331. if (!(result = LLVMBuildAtomicCmpXchg(
  1332. comp_ctx->builder, maddr, expect, value,
  1333. LLVMAtomicOrderingSequentiallyConsistent,
  1334. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1335. goto fail;
  1336. }
  1337. LLVMSetVolatile(result, true);
  1338. /* CmpXchg return {i32, i1} structure,
  1339. we need to extract the previous_value from the structure */
  1340. if (!(result = LLVMBuildExtractValue(comp_ctx->builder, result, 0,
  1341. "previous_value"))) {
  1342. goto fail;
  1343. }
  1344. if (op_type == VALUE_TYPE_I32) {
  1345. if (LLVMTypeOf(result) != I32_TYPE) {
  1346. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1347. "result_i32"))) {
  1348. goto fail;
  1349. }
  1350. }
  1351. PUSH_I32(result);
  1352. }
  1353. else {
  1354. if (LLVMTypeOf(result) != I64_TYPE) {
  1355. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1356. "result_i64"))) {
  1357. goto fail;
  1358. }
  1359. }
  1360. PUSH_I64(result);
  1361. }
  1362. return true;
  1363. fail:
  1364. return false;
  1365. }
  1366. bool
  1367. aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1368. uint8 op_type, uint32 align, mem_offset_t offset,
  1369. uint32 bytes)
  1370. {
  1371. LLVMValueRef maddr, value, timeout, expect, cmp;
  1372. LLVMValueRef param_values[5], ret_value, func, is_wait64;
  1373. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  1374. LLVMBasicBlockRef wait_fail, wait_success;
  1375. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1376. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  1377. POP_I64(timeout);
  1378. if (op_type == VALUE_TYPE_I32) {
  1379. POP_I32(expect);
  1380. is_wait64 = I8_CONST(false);
  1381. if (!(expect = LLVMBuildZExt(comp_ctx->builder, expect, I64_TYPE,
  1382. "expect_i64"))) {
  1383. goto fail;
  1384. }
  1385. }
  1386. else {
  1387. POP_I64(expect);
  1388. is_wait64 = I8_CONST(true);
  1389. }
  1390. CHECK_LLVM_CONST(is_wait64);
  1391. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1392. false, NULL)))
  1393. return false;
  1394. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1395. return false;
  1396. param_types[0] = INT8_PTR_TYPE;
  1397. param_types[1] = INT8_PTR_TYPE;
  1398. param_types[2] = I64_TYPE;
  1399. param_types[3] = I64_TYPE;
  1400. param_types[4] = INT8_TYPE;
  1401. ret_type = I32_TYPE;
  1402. GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
  1403. /* Call function wasm_runtime_atomic_wait() */
  1404. param_values[0] = func_ctx->aot_inst;
  1405. param_values[1] = maddr;
  1406. param_values[2] = expect;
  1407. param_values[3] = timeout;
  1408. param_values[4] = is_wait64;
  1409. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1410. param_values, 5, "call"))) {
  1411. aot_set_last_error("llvm build call failed.");
  1412. return false;
  1413. }
  1414. BUILD_ICMP(LLVMIntNE, ret_value, I32_NEG_ONE, cmp, "atomic_wait_ret");
  1415. ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
  1416. ADD_BASIC_BLOCK(wait_success, "wait_success");
  1417. LLVMMoveBasicBlockAfter(wait_fail, block_curr);
  1418. LLVMMoveBasicBlockAfter(wait_success, block_curr);
  1419. if (!LLVMBuildCondBr(comp_ctx->builder, cmp, wait_success, wait_fail)) {
  1420. aot_set_last_error("llvm build cond br failed.");
  1421. goto fail;
  1422. }
  1423. /* If atomic wait failed, return this function
  1424. so the runtime can catch the exception */
  1425. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
  1426. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  1427. goto fail;
  1428. }
  1429. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
  1430. PUSH_I32(ret_value);
  1431. /* Insert suspend check point */
  1432. if (comp_ctx->enable_thread_mgr) {
  1433. if (!check_suspend_flags(comp_ctx, func_ctx, false))
  1434. return false;
  1435. }
  1436. return true;
  1437. fail:
  1438. return false;
  1439. }
  1440. bool
  1441. aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
  1442. AOTFuncContext *func_ctx, uint32 align,
  1443. mem_offset_t offset, uint32 bytes)
  1444. {
  1445. LLVMValueRef maddr, value, count;
  1446. LLVMValueRef param_values[3], ret_value, func;
  1447. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1448. POP_I32(count);
  1449. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1450. false, NULL)))
  1451. return false;
  1452. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1453. return false;
  1454. param_types[0] = INT8_PTR_TYPE;
  1455. param_types[1] = INT8_PTR_TYPE;
  1456. param_types[2] = I32_TYPE;
  1457. ret_type = I32_TYPE;
  1458. GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
  1459. /* Call function wasm_runtime_atomic_notify() */
  1460. param_values[0] = func_ctx->aot_inst;
  1461. param_values[1] = maddr;
  1462. param_values[2] = count;
  1463. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1464. param_values, 3, "call"))) {
  1465. aot_set_last_error("llvm build call failed.");
  1466. return false;
  1467. }
  1468. PUSH_I32(ret_value);
  1469. return true;
  1470. fail:
  1471. return false;
  1472. }
  1473. bool
  1474. aot_compiler_op_atomic_fence(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1475. {
  1476. return LLVMBuildFence(comp_ctx->builder,
  1477. LLVMAtomicOrderingSequentiallyConsistent, false, "")
  1478. ? true
  1479. : false;
  1480. }
  1481. #endif /* end of WASM_ENABLE_SHARED_MEMORY */