aot_emit_memory.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_memory.h"
  6. #include "aot_compiler.h"
  7. #include "aot_emit_exception.h"
  8. #include "../aot/aot_runtime.h"
  9. #include "aot_intrinsic.h"
  10. #include "aot_emit_control.h"
  11. #define BUILD_ICMP(op, left, right, res, name) \
  12. do { \
  13. if (!(res = \
  14. LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
  15. aot_set_last_error("llvm build icmp failed."); \
  16. goto fail; \
  17. } \
  18. } while (0)
  19. #define BUILD_OP(Op, left, right, res, name) \
  20. do { \
  21. if (!(res = LLVMBuild##Op(comp_ctx->builder, left, right, name))) { \
  22. aot_set_last_error("llvm build " #Op " fail."); \
  23. goto fail; \
  24. } \
  25. } while (0)
  26. #define ADD_BASIC_BLOCK(block, name) \
  27. do { \
  28. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  29. func_ctx->func, name))) { \
  30. aot_set_last_error("llvm add basic block failed."); \
  31. goto fail; \
  32. } \
  33. } while (0)
  34. #define SET_BUILD_POS(block) LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
  35. static bool
  36. zero_extend_u64(AOTCompContext *comp_ctx, LLVMValueRef *value, const char *name)
  37. {
  38. if (comp_ctx->pointer_size == sizeof(uint64)) {
  39. /* zero extend to uint64 if the target is 64-bit */
  40. *value = LLVMBuildZExt(comp_ctx->builder, *value, I64_TYPE, name);
  41. if (!*value) {
  42. aot_set_last_error("llvm build zero extend failed.");
  43. return false;
  44. }
  45. }
  46. return true;
  47. }
  48. static LLVMValueRef
  49. get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  50. uint32 bytes)
  51. {
  52. LLVMValueRef mem_check_bound = NULL;
  53. switch (bytes) {
  54. case 1:
  55. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_1byte;
  56. break;
  57. case 2:
  58. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_2bytes;
  59. break;
  60. case 4:
  61. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_4bytes;
  62. break;
  63. case 8:
  64. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_8bytes;
  65. break;
  66. case 16:
  67. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_16bytes;
  68. break;
  69. default:
  70. bh_assert(0);
  71. return NULL;
  72. }
  73. if (func_ctx->mem_space_unchanged)
  74. return mem_check_bound;
  75. if (!(mem_check_bound = LLVMBuildLoad2(
  76. comp_ctx->builder,
  77. (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE : I32_TYPE,
  78. mem_check_bound, "mem_check_bound"))) {
  79. aot_set_last_error("llvm build load failed.");
  80. return NULL;
  81. }
  82. return mem_check_bound;
  83. }
  84. #if defined(_WIN32) || defined(_WIN32_)
  85. static inline int
  86. ffs(int n)
  87. {
  88. int pos = 0;
  89. if (n == 0)
  90. return 0;
  91. while (!(n & 1)) {
  92. pos++;
  93. n >>= 1;
  94. }
  95. return pos + 1;
  96. }
  97. #endif
  98. static LLVMValueRef
  99. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
  100. LLVMValueRef
  101. aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  102. mem_offset_t offset, uint32 bytes, bool enable_segue,
  103. unsigned int *alignp)
  104. {
  105. LLVMValueRef offset_const =
  106. MEMORY64_COND_VALUE(I64_CONST(offset), I32_CONST(offset));
  107. LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
  108. LLVMValueRef mem_base_addr, mem_check_bound;
  109. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  110. LLVMBasicBlockRef check_succ;
  111. AOTValue *aot_value_top;
  112. uint32 local_idx_of_aot_value = 0;
  113. uint64 const_value;
  114. bool is_target_64bit, is_local_of_aot_value = false;
  115. bool is_const = false;
  116. #if WASM_ENABLE_SHARED_MEMORY != 0
  117. bool is_shared_memory =
  118. comp_ctx->comp_data->memories[0].flags & SHARED_MEMORY_FLAG;
  119. #endif
  120. is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false;
  121. if (comp_ctx->is_indirect_mode
  122. && aot_intrinsic_check_capability(
  123. comp_ctx, MEMORY64_COND_VALUE("i64.const", "i32.const"))) {
  124. WASMValue wasm_value;
  125. #if WASM_ENABLE_MEMORY64 != 0
  126. if (IS_MEMORY64) {
  127. wasm_value.i64 = offset;
  128. }
  129. else
  130. #endif
  131. {
  132. wasm_value.i32 = (int32)offset;
  133. }
  134. offset_const = aot_load_const_from_table(
  135. comp_ctx, func_ctx->native_symbol, &wasm_value,
  136. MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32));
  137. if (!offset_const) {
  138. return NULL;
  139. }
  140. }
  141. else {
  142. CHECK_LLVM_CONST(offset_const);
  143. }
  144. /* Get memory base address and memory data size */
  145. if (func_ctx->mem_space_unchanged
  146. #if WASM_ENABLE_SHARED_MEMORY != 0
  147. || is_shared_memory
  148. #endif
  149. ) {
  150. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  151. }
  152. else {
  153. if (!(mem_base_addr = LLVMBuildLoad2(
  154. comp_ctx->builder, OPQ_PTR_TYPE,
  155. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  156. aot_set_last_error("llvm build load failed.");
  157. goto fail;
  158. }
  159. }
  160. aot_value_top =
  161. func_ctx->block_stack.block_list_end->value_stack.value_list_end;
  162. if (aot_value_top) {
  163. /* aot_value_top is freed in the following POP_I32(addr),
  164. so save its fields here for further use */
  165. is_local_of_aot_value = aot_value_top->is_local;
  166. is_const = aot_value_top->is_const;
  167. local_idx_of_aot_value = aot_value_top->local_idx;
  168. const_value = aot_value_top->const_value;
  169. }
  170. POP_MEM_OFFSET(addr);
  171. /*
  172. * Note: not throw the integer-overflow-exception here since it must
  173. * have been thrown when converting float to integer before
  174. */
  175. /* return address directly if constant offset and inside memory space */
  176. if (LLVMIsEfficientConstInt(addr) || is_const) {
  177. uint64 value;
  178. if (LLVMIsEfficientConstInt(addr)) {
  179. value = (uint64)LLVMConstIntGetZExtValue(addr);
  180. }
  181. else {
  182. value = const_value;
  183. }
  184. uint64 mem_offset = value + (uint64)offset;
  185. uint32 num_bytes_per_page =
  186. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  187. uint32 init_page_count =
  188. comp_ctx->comp_data->memories[0].init_page_count;
  189. uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
  190. if (alignp != NULL) {
  191. /*
  192. * A note about max_align below:
  193. * the assumption here is the base address of a linear memory
  194. * has the natural alignment. for platforms using mmap, it can
  195. * be even larger. for now, use a conservative value.
  196. */
  197. const unsigned int max_align = 8;
  198. int shift = ffs((int)(unsigned int)mem_offset);
  199. if (shift == 0) {
  200. *alignp = max_align;
  201. }
  202. else {
  203. unsigned int align = 1 << (shift - 1);
  204. if (align > max_align) {
  205. align = max_align;
  206. }
  207. *alignp = align;
  208. }
  209. }
  210. if (mem_offset + bytes <= mem_data_size) {
  211. /* inside memory space */
  212. if (comp_ctx->pointer_size == sizeof(uint64))
  213. offset1 = I64_CONST(mem_offset);
  214. else
  215. offset1 = I32_CONST((uint32)mem_offset);
  216. CHECK_LLVM_CONST(offset1);
  217. if (!enable_segue) {
  218. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder,
  219. INT8_TYPE, mem_base_addr,
  220. &offset1, 1, "maddr"))) {
  221. aot_set_last_error("llvm build add failed.");
  222. goto fail;
  223. }
  224. }
  225. else {
  226. if (!(maddr = LLVMBuildIntToPtr(comp_ctx->builder, offset1,
  227. INT8_PTR_TYPE_GS, "maddr"))) {
  228. aot_set_last_error("llvm build IntToPtr failed.");
  229. goto fail;
  230. }
  231. }
  232. return maddr;
  233. }
  234. }
  235. else if (alignp != NULL) {
  236. *alignp = 1;
  237. }
  238. if (is_target_64bit) {
  239. if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
  240. I64_TYPE, "offset_i64"))
  241. || !(addr = LLVMBuildZExt(comp_ctx->builder, addr, I64_TYPE,
  242. "addr_i64"))) {
  243. aot_set_last_error("llvm build zero extend failed.");
  244. goto fail;
  245. }
  246. }
  247. /* offset1 = offset + addr; */
  248. BUILD_OP(Add, offset_const, addr, offset1, "offset1");
  249. if (comp_ctx->enable_bound_check
  250. && !(is_local_of_aot_value
  251. && aot_checked_addr_list_find(func_ctx, local_idx_of_aot_value,
  252. offset, bytes))) {
  253. uint32 init_page_count =
  254. comp_ctx->comp_data->memories[0].init_page_count;
  255. if (init_page_count == 0) {
  256. LLVMValueRef mem_size;
  257. if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) {
  258. goto fail;
  259. }
  260. BUILD_ICMP(LLVMIntEQ, mem_size,
  261. MEMORY64_COND_VALUE(I64_ZERO, I32_ZERO), cmp, "is_zero");
  262. ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
  263. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  264. if (!aot_emit_exception(comp_ctx, func_ctx,
  265. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  266. check_succ)) {
  267. goto fail;
  268. }
  269. SET_BUILD_POS(check_succ);
  270. block_curr = check_succ;
  271. }
  272. if (!(mem_check_bound =
  273. get_memory_check_bound(comp_ctx, func_ctx, bytes))) {
  274. goto fail;
  275. }
  276. if (is_target_64bit) {
  277. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp, "cmp");
  278. }
  279. else {
  280. /* Check integer overflow */
  281. BUILD_ICMP(LLVMIntULT, offset1, addr, cmp1, "cmp1");
  282. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp2, "cmp2");
  283. BUILD_OP(Or, cmp1, cmp2, cmp, "cmp");
  284. }
  285. /* Add basic blocks */
  286. ADD_BASIC_BLOCK(check_succ, "check_succ");
  287. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  288. if (!aot_emit_exception(comp_ctx, func_ctx,
  289. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  290. check_succ)) {
  291. goto fail;
  292. }
  293. SET_BUILD_POS(check_succ);
  294. if (is_local_of_aot_value) {
  295. if (!aot_checked_addr_list_add(func_ctx, local_idx_of_aot_value,
  296. offset, bytes))
  297. goto fail;
  298. }
  299. }
  300. if (!enable_segue) {
  301. /* maddr = mem_base_addr + offset1 */
  302. if (!(maddr =
  303. LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  304. mem_base_addr, &offset1, 1, "maddr"))) {
  305. aot_set_last_error("llvm build add failed.");
  306. goto fail;
  307. }
  308. }
  309. else {
  310. LLVMValueRef maddr_base;
  311. if (!(maddr_base = LLVMBuildIntToPtr(comp_ctx->builder, addr,
  312. INT8_PTR_TYPE_GS, "maddr_base"))) {
  313. aot_set_last_error("llvm build int to ptr failed.");
  314. goto fail;
  315. }
  316. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  317. maddr_base, &offset_const, 1,
  318. "maddr"))) {
  319. aot_set_last_error("llvm build inboundgep failed.");
  320. goto fail;
  321. }
  322. }
  323. return maddr;
  324. fail:
  325. return NULL;
  326. }
  327. #define BUILD_PTR_CAST(ptr_type) \
  328. do { \
  329. if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, ptr_type, \
  330. "data_ptr"))) { \
  331. aot_set_last_error("llvm build bit cast failed."); \
  332. goto fail; \
  333. } \
  334. } while (0)
  335. #define BUILD_LOAD(data_type) \
  336. do { \
  337. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  338. "data"))) { \
  339. aot_set_last_error("llvm build load failed."); \
  340. goto fail; \
  341. } \
  342. LLVMSetAlignment(value, known_align); \
  343. } while (0)
  344. #define BUILD_TRUNC(value, data_type) \
  345. do { \
  346. if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, data_type, \
  347. "val_trunc"))) { \
  348. aot_set_last_error("llvm build trunc failed."); \
  349. goto fail; \
  350. } \
  351. } while (0)
  352. #define BUILD_STORE() \
  353. do { \
  354. LLVMValueRef res; \
  355. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  356. aot_set_last_error("llvm build store failed."); \
  357. goto fail; \
  358. } \
  359. LLVMSetAlignment(res, known_align); \
  360. } while (0)
  361. #define BUILD_SIGN_EXT(dst_type) \
  362. do { \
  363. if (!(value = LLVMBuildSExt(comp_ctx->builder, value, dst_type, \
  364. "data_s_ext"))) { \
  365. aot_set_last_error("llvm build sign ext failed."); \
  366. goto fail; \
  367. } \
  368. } while (0)
  369. #define BUILD_ZERO_EXT(dst_type) \
  370. do { \
  371. if (!(value = LLVMBuildZExt(comp_ctx->builder, value, dst_type, \
  372. "data_z_ext"))) { \
  373. aot_set_last_error("llvm build zero ext failed."); \
  374. goto fail; \
  375. } \
  376. } while (0)
  377. #if WASM_ENABLE_SHARED_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0
  378. bool
  379. check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  380. LLVMValueRef addr, uint32 align)
  381. {
  382. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  383. LLVMBasicBlockRef check_align_succ;
  384. LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
  385. LLVMValueRef res;
  386. CHECK_LLVM_CONST(align_mask);
  387. /* Convert pointer to int */
  388. if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr, I32_TYPE,
  389. "address"))) {
  390. aot_set_last_error("llvm build ptr to int failed.");
  391. goto fail;
  392. }
  393. /* The memory address should be aligned */
  394. BUILD_OP(And, addr, align_mask, res, "and");
  395. BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
  396. /* Add basic blocks */
  397. ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
  398. LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
  399. if (!aot_emit_exception(comp_ctx, func_ctx, EXCE_UNALIGNED_ATOMIC, true,
  400. res, check_align_succ)) {
  401. goto fail;
  402. }
  403. SET_BUILD_POS(check_align_succ);
  404. return true;
  405. fail:
  406. return false;
  407. }
  408. #endif /* WASM_ENABLE_SHARED_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0 */
  409. #if WASM_ENABLE_SHARED_MEMORY != 0
  410. #define BUILD_ATOMIC_LOAD(align, data_type) \
  411. do { \
  412. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  413. goto fail; \
  414. } \
  415. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  416. "data"))) { \
  417. aot_set_last_error("llvm build load failed."); \
  418. goto fail; \
  419. } \
  420. LLVMSetAlignment(value, 1 << align); \
  421. LLVMSetVolatile(value, true); \
  422. LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
  423. } while (0)
  424. #define BUILD_ATOMIC_STORE(align) \
  425. do { \
  426. LLVMValueRef res; \
  427. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  428. goto fail; \
  429. } \
  430. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  431. aot_set_last_error("llvm build store failed."); \
  432. goto fail; \
  433. } \
  434. LLVMSetAlignment(res, 1 << align); \
  435. LLVMSetVolatile(res, true); \
  436. LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
  437. } while (0)
  438. #endif
  439. bool
  440. aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  441. uint32 align, mem_offset_t offset, uint32 bytes,
  442. bool sign, bool atomic)
  443. {
  444. LLVMValueRef maddr, value = NULL;
  445. LLVMTypeRef data_type;
  446. bool enable_segue = comp_ctx->enable_segue_i32_load;
  447. unsigned int known_align;
  448. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  449. enable_segue, &known_align)))
  450. return false;
  451. switch (bytes) {
  452. case 4:
  453. if (!enable_segue)
  454. BUILD_PTR_CAST(INT32_PTR_TYPE);
  455. else
  456. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  457. #if WASM_ENABLE_SHARED_MEMORY != 0
  458. if (atomic)
  459. BUILD_ATOMIC_LOAD(align, I32_TYPE);
  460. else
  461. #endif
  462. BUILD_LOAD(I32_TYPE);
  463. break;
  464. case 2:
  465. case 1:
  466. if (bytes == 2) {
  467. if (!enable_segue)
  468. BUILD_PTR_CAST(INT16_PTR_TYPE);
  469. else
  470. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  471. data_type = INT16_TYPE;
  472. }
  473. else {
  474. if (!enable_segue)
  475. BUILD_PTR_CAST(INT8_PTR_TYPE);
  476. else
  477. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  478. data_type = INT8_TYPE;
  479. }
  480. #if WASM_ENABLE_SHARED_MEMORY != 0
  481. if (atomic) {
  482. BUILD_ATOMIC_LOAD(align, data_type);
  483. BUILD_ZERO_EXT(I32_TYPE);
  484. }
  485. else
  486. #endif
  487. {
  488. BUILD_LOAD(data_type);
  489. if (sign)
  490. BUILD_SIGN_EXT(I32_TYPE);
  491. else
  492. BUILD_ZERO_EXT(I32_TYPE);
  493. }
  494. break;
  495. default:
  496. bh_assert(0);
  497. break;
  498. }
  499. PUSH_I32(value);
  500. (void)data_type;
  501. return true;
  502. fail:
  503. return false;
  504. }
  505. bool
  506. aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  507. uint32 align, mem_offset_t offset, uint32 bytes,
  508. bool sign, bool atomic)
  509. {
  510. LLVMValueRef maddr, value = NULL;
  511. LLVMTypeRef data_type;
  512. bool enable_segue = comp_ctx->enable_segue_i64_load;
  513. unsigned int known_align;
  514. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  515. enable_segue, &known_align)))
  516. return false;
  517. switch (bytes) {
  518. case 8:
  519. if (!enable_segue)
  520. BUILD_PTR_CAST(INT64_PTR_TYPE);
  521. else
  522. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  523. #if WASM_ENABLE_SHARED_MEMORY != 0
  524. if (atomic)
  525. BUILD_ATOMIC_LOAD(align, I64_TYPE);
  526. else
  527. #endif
  528. BUILD_LOAD(I64_TYPE);
  529. break;
  530. case 4:
  531. case 2:
  532. case 1:
  533. if (bytes == 4) {
  534. if (!enable_segue)
  535. BUILD_PTR_CAST(INT32_PTR_TYPE);
  536. else
  537. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  538. data_type = I32_TYPE;
  539. }
  540. else if (bytes == 2) {
  541. if (!enable_segue)
  542. BUILD_PTR_CAST(INT16_PTR_TYPE);
  543. else
  544. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  545. data_type = INT16_TYPE;
  546. }
  547. else {
  548. if (!enable_segue)
  549. BUILD_PTR_CAST(INT8_PTR_TYPE);
  550. else
  551. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  552. data_type = INT8_TYPE;
  553. }
  554. #if WASM_ENABLE_SHARED_MEMORY != 0
  555. if (atomic) {
  556. BUILD_ATOMIC_LOAD(align, data_type);
  557. BUILD_ZERO_EXT(I64_TYPE);
  558. }
  559. else
  560. #endif
  561. {
  562. BUILD_LOAD(data_type);
  563. if (sign)
  564. BUILD_SIGN_EXT(I64_TYPE);
  565. else
  566. BUILD_ZERO_EXT(I64_TYPE);
  567. }
  568. break;
  569. default:
  570. bh_assert(0);
  571. break;
  572. }
  573. PUSH_I64(value);
  574. (void)data_type;
  575. return true;
  576. fail:
  577. return false;
  578. }
  579. bool
  580. aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  581. uint32 align, mem_offset_t offset)
  582. {
  583. LLVMValueRef maddr, value;
  584. bool enable_segue = comp_ctx->enable_segue_f32_load;
  585. unsigned int known_align;
  586. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
  587. enable_segue, &known_align)))
  588. return false;
  589. if (!enable_segue)
  590. BUILD_PTR_CAST(F32_PTR_TYPE);
  591. else
  592. BUILD_PTR_CAST(F32_PTR_TYPE_GS);
  593. BUILD_LOAD(F32_TYPE);
  594. PUSH_F32(value);
  595. return true;
  596. fail:
  597. return false;
  598. }
  599. bool
  600. aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  601. uint32 align, mem_offset_t offset)
  602. {
  603. LLVMValueRef maddr, value;
  604. bool enable_segue = comp_ctx->enable_segue_f64_load;
  605. unsigned int known_align;
  606. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
  607. enable_segue, &known_align)))
  608. return false;
  609. if (!enable_segue)
  610. BUILD_PTR_CAST(F64_PTR_TYPE);
  611. else
  612. BUILD_PTR_CAST(F64_PTR_TYPE_GS);
  613. BUILD_LOAD(F64_TYPE);
  614. PUSH_F64(value);
  615. return true;
  616. fail:
  617. return false;
  618. }
  619. bool
  620. aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  621. uint32 align, mem_offset_t offset, uint32 bytes,
  622. bool atomic)
  623. {
  624. LLVMValueRef maddr, value;
  625. bool enable_segue = comp_ctx->enable_segue_i32_store;
  626. POP_I32(value);
  627. unsigned int known_align;
  628. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  629. enable_segue, &known_align)))
  630. return false;
  631. switch (bytes) {
  632. case 4:
  633. if (!enable_segue)
  634. BUILD_PTR_CAST(INT32_PTR_TYPE);
  635. else
  636. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  637. break;
  638. case 2:
  639. if (!enable_segue)
  640. BUILD_PTR_CAST(INT16_PTR_TYPE);
  641. else
  642. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  643. BUILD_TRUNC(value, INT16_TYPE);
  644. break;
  645. case 1:
  646. if (!enable_segue)
  647. BUILD_PTR_CAST(INT8_PTR_TYPE);
  648. else
  649. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  650. BUILD_TRUNC(value, INT8_TYPE);
  651. break;
  652. default:
  653. bh_assert(0);
  654. break;
  655. }
  656. #if WASM_ENABLE_SHARED_MEMORY != 0
  657. if (atomic)
  658. BUILD_ATOMIC_STORE(align);
  659. else
  660. #endif
  661. BUILD_STORE();
  662. return true;
  663. fail:
  664. return false;
  665. }
  666. bool
  667. aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  668. uint32 align, mem_offset_t offset, uint32 bytes,
  669. bool atomic)
  670. {
  671. LLVMValueRef maddr, value;
  672. bool enable_segue = comp_ctx->enable_segue_i64_store;
  673. POP_I64(value);
  674. unsigned int known_align;
  675. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  676. enable_segue, &known_align)))
  677. return false;
  678. switch (bytes) {
  679. case 8:
  680. if (!enable_segue)
  681. BUILD_PTR_CAST(INT64_PTR_TYPE);
  682. else
  683. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  684. break;
  685. case 4:
  686. if (!enable_segue)
  687. BUILD_PTR_CAST(INT32_PTR_TYPE);
  688. else
  689. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  690. BUILD_TRUNC(value, I32_TYPE);
  691. break;
  692. case 2:
  693. if (!enable_segue)
  694. BUILD_PTR_CAST(INT16_PTR_TYPE);
  695. else
  696. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  697. BUILD_TRUNC(value, INT16_TYPE);
  698. break;
  699. case 1:
  700. if (!enable_segue)
  701. BUILD_PTR_CAST(INT8_PTR_TYPE);
  702. else
  703. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  704. BUILD_TRUNC(value, INT8_TYPE);
  705. break;
  706. default:
  707. bh_assert(0);
  708. break;
  709. }
  710. #if WASM_ENABLE_SHARED_MEMORY != 0
  711. if (atomic)
  712. BUILD_ATOMIC_STORE(align);
  713. else
  714. #endif
  715. BUILD_STORE();
  716. return true;
  717. fail:
  718. return false;
  719. }
  720. bool
  721. aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  722. uint32 align, mem_offset_t offset)
  723. {
  724. LLVMValueRef maddr, value;
  725. bool enable_segue = comp_ctx->enable_segue_f32_store;
  726. POP_F32(value);
  727. unsigned int known_align;
  728. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
  729. enable_segue, &known_align)))
  730. return false;
  731. if (!enable_segue)
  732. BUILD_PTR_CAST(F32_PTR_TYPE);
  733. else
  734. BUILD_PTR_CAST(F32_PTR_TYPE_GS);
  735. BUILD_STORE();
  736. return true;
  737. fail:
  738. return false;
  739. }
  740. bool
  741. aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  742. uint32 align, mem_offset_t offset)
  743. {
  744. LLVMValueRef maddr, value;
  745. bool enable_segue = comp_ctx->enable_segue_f64_store;
  746. POP_F64(value);
  747. unsigned int known_align;
  748. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
  749. enable_segue, &known_align)))
  750. return false;
  751. if (!enable_segue)
  752. BUILD_PTR_CAST(F64_PTR_TYPE);
  753. else
  754. BUILD_PTR_CAST(F64_PTR_TYPE_GS);
  755. BUILD_STORE();
  756. return true;
  757. fail:
  758. return false;
  759. }
  760. static LLVMValueRef
  761. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  762. {
  763. LLVMValueRef mem_size;
  764. if (func_ctx->mem_space_unchanged) {
  765. mem_size = func_ctx->mem_info[0].mem_cur_page_count_addr;
  766. }
  767. else {
  768. if (!(mem_size = LLVMBuildLoad2(
  769. comp_ctx->builder, I32_TYPE,
  770. func_ctx->mem_info[0].mem_cur_page_count_addr, "mem_size"))) {
  771. aot_set_last_error("llvm build load failed.");
  772. goto fail;
  773. }
  774. }
  775. return LLVMBuildIntCast(comp_ctx->builder, mem_size,
  776. MEMORY64_COND_VALUE(I64_TYPE, I32_TYPE), "");
  777. fail:
  778. return NULL;
  779. }
  780. bool
  781. aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  782. {
  783. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  784. if (mem_size)
  785. PUSH_PAGE_COUNT(mem_size);
  786. return mem_size ? true : false;
  787. fail:
  788. return false;
  789. }
  790. bool
  791. aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  792. {
  793. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  794. LLVMValueRef delta, param_values[2], ret_value, func, value;
  795. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  796. int32 func_index;
  797. #if WASM_ENABLE_MEMORY64 != 0
  798. LLVMValueRef u32_max, u32_cmp_result;
  799. #endif
  800. if (!mem_size)
  801. return false;
  802. POP_PAGE_COUNT(delta);
  803. /* Function type of aot_enlarge_memory() */
  804. param_types[0] = INT8_PTR_TYPE;
  805. param_types[1] = I32_TYPE;
  806. ret_type = INT8_TYPE;
  807. if (!(func_type = LLVMFunctionType(ret_type, param_types, 2, false))) {
  808. aot_set_last_error("llvm add function type failed.");
  809. return false;
  810. }
  811. if (comp_ctx->is_jit_mode) {
  812. /* JIT mode, call the function directly */
  813. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  814. aot_set_last_error("llvm add pointer type failed.");
  815. return false;
  816. }
  817. if (!(value = I64_CONST((uint64)(uintptr_t)wasm_enlarge_memory))
  818. || !(func = LLVMConstIntToPtr(value, func_ptr_type))) {
  819. aot_set_last_error("create LLVM value failed.");
  820. return false;
  821. }
  822. }
  823. else if (comp_ctx->is_indirect_mode) {
  824. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  825. aot_set_last_error("create LLVM function type failed.");
  826. return false;
  827. }
  828. func_index =
  829. aot_get_native_symbol_index(comp_ctx, "aot_enlarge_memory");
  830. if (func_index < 0) {
  831. return false;
  832. }
  833. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  834. func_ptr_type, func_index))) {
  835. return false;
  836. }
  837. }
  838. else {
  839. char *func_name = "aot_enlarge_memory";
  840. /* AOT mode, delcare the function */
  841. if (!(func = LLVMGetNamedFunction(func_ctx->module, func_name))
  842. && !(func =
  843. LLVMAddFunction(func_ctx->module, func_name, func_type))) {
  844. aot_set_last_error("llvm add function failed.");
  845. return false;
  846. }
  847. }
  848. /* Call function aot_enlarge_memory() */
  849. param_values[0] = func_ctx->aot_inst;
  850. param_values[1] = LLVMBuildTrunc(comp_ctx->builder, delta, I32_TYPE, "");
  851. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  852. param_values, 2, "call"))) {
  853. aot_set_last_error("llvm build call failed.");
  854. return false;
  855. }
  856. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
  857. #if WASM_ENABLE_MEMORY64 != 0
  858. if (IS_MEMORY64) {
  859. if (!(u32_max = I64_CONST(UINT32_MAX))) {
  860. aot_set_last_error("llvm build const failed");
  861. return false;
  862. }
  863. BUILD_ICMP(LLVMIntULE, delta, u32_max, u32_cmp_result, "page_size_cmp");
  864. BUILD_OP(And, ret_value, u32_cmp_result, ret_value, "and");
  865. }
  866. #endif
  867. /* ret_value = ret_value == true ? pre_page_count : -1 */
  868. if (!(ret_value = LLVMBuildSelect(
  869. comp_ctx->builder, ret_value, mem_size,
  870. MEMORY64_COND_VALUE(I64_NEG_ONE, I32_NEG_ONE), "mem_grow_ret"))) {
  871. aot_set_last_error("llvm build select failed.");
  872. return false;
  873. }
  874. PUSH_PAGE_COUNT(ret_value);
  875. return true;
  876. fail:
  877. return false;
  878. }
  879. #if WASM_ENABLE_BULK_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0
  880. LLVMValueRef
  881. check_bulk_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  882. LLVMValueRef offset, LLVMValueRef bytes)
  883. {
  884. LLVMValueRef maddr, max_addr, cmp;
  885. LLVMValueRef mem_base_addr;
  886. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  887. LLVMBasicBlockRef check_succ;
  888. LLVMValueRef mem_size;
  889. /* Get memory base address and memory data size */
  890. #if WASM_ENABLE_SHARED_MEMORY != 0
  891. bool is_shared_memory = comp_ctx->comp_data->memories[0].flags & 0x02;
  892. if (func_ctx->mem_space_unchanged || is_shared_memory) {
  893. #else
  894. if (func_ctx->mem_space_unchanged) {
  895. #endif
  896. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  897. }
  898. else {
  899. if (!(mem_base_addr = LLVMBuildLoad2(
  900. comp_ctx->builder, OPQ_PTR_TYPE,
  901. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  902. aot_set_last_error("llvm build load failed.");
  903. goto fail;
  904. }
  905. }
  906. /*
  907. * Note: not throw the integer-overflow-exception here since it must
  908. * have been thrown when converting float to integer before
  909. */
  910. /* return addres directly if constant offset and inside memory space */
  911. if (LLVMIsEfficientConstInt(offset) && LLVMIsEfficientConstInt(bytes)) {
  912. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(offset);
  913. uint64 mem_len = (uint64)LLVMConstIntGetZExtValue(bytes);
  914. uint32 num_bytes_per_page =
  915. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  916. uint32 init_page_count =
  917. comp_ctx->comp_data->memories[0].init_page_count;
  918. uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
  919. if (mem_data_size > 0 && mem_offset + mem_len <= mem_data_size) {
  920. /* inside memory space */
  921. /* maddr = mem_base_addr + moffset */
  922. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  923. mem_base_addr, &offset, 1,
  924. "maddr"))) {
  925. aot_set_last_error("llvm build add failed.");
  926. goto fail;
  927. }
  928. return maddr;
  929. }
  930. }
  931. if (func_ctx->mem_space_unchanged) {
  932. mem_size = func_ctx->mem_info[0].mem_data_size_addr;
  933. }
  934. else {
  935. if (!(mem_size = LLVMBuildLoad2(
  936. comp_ctx->builder, I64_TYPE,
  937. func_ctx->mem_info[0].mem_data_size_addr, "mem_size"))) {
  938. aot_set_last_error("llvm build load failed.");
  939. goto fail;
  940. }
  941. }
  942. ADD_BASIC_BLOCK(check_succ, "check_succ");
  943. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  944. offset =
  945. LLVMBuildZExt(comp_ctx->builder, offset, I64_TYPE, "extend_offset");
  946. bytes = LLVMBuildZExt(comp_ctx->builder, bytes, I64_TYPE, "extend_len");
  947. BUILD_OP(Add, offset, bytes, max_addr, "max_addr");
  948. BUILD_ICMP(LLVMIntUGT, max_addr, mem_size, cmp, "cmp_max_mem_addr");
  949. if (!aot_emit_exception(comp_ctx, func_ctx,
  950. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  951. check_succ)) {
  952. goto fail;
  953. }
  954. /* maddr = mem_base_addr + offset */
  955. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  956. mem_base_addr, &offset, 1, "maddr"))) {
  957. aot_set_last_error("llvm build add failed.");
  958. goto fail;
  959. }
  960. return maddr;
  961. fail:
  962. return NULL;
  963. }
  964. #endif /* end of WASM_ENABLE_BULK_MEMORY != 0 or WASM_ENABLE_STRINGREF != 0 */
  965. #if WASM_ENABLE_BULK_MEMORY != 0
  966. bool
  967. aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  968. uint32 seg_index)
  969. {
  970. LLVMValueRef seg, offset, dst, len, param_values[5], ret_value, func, value;
  971. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  972. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  973. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  974. LLVMBasicBlockRef mem_init_fail, init_success;
  975. seg = I32_CONST(seg_index);
  976. POP_I32(len);
  977. POP_I32(offset);
  978. POP_MEM_OFFSET(dst);
  979. if (!zero_extend_u64(comp_ctx, &dst, "dst64")) {
  980. return false;
  981. }
  982. param_types[0] = INT8_PTR_TYPE;
  983. param_types[1] = I32_TYPE;
  984. param_types[2] = I32_TYPE;
  985. param_types[3] = I32_TYPE;
  986. param_types[4] = SIZE_T_TYPE;
  987. ret_type = INT8_TYPE;
  988. if (comp_ctx->is_jit_mode)
  989. GET_AOT_FUNCTION(llvm_jit_memory_init, 5);
  990. else
  991. GET_AOT_FUNCTION(aot_memory_init, 5);
  992. /* Call function aot_memory_init() */
  993. param_values[0] = func_ctx->aot_inst;
  994. param_values[1] = seg;
  995. param_values[2] = offset;
  996. param_values[3] = len;
  997. param_values[4] = dst;
  998. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  999. param_values, 5, "call"))) {
  1000. aot_set_last_error("llvm build call failed.");
  1001. return false;
  1002. }
  1003. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_init_ret");
  1004. ADD_BASIC_BLOCK(mem_init_fail, "mem_init_fail");
  1005. ADD_BASIC_BLOCK(init_success, "init_success");
  1006. LLVMMoveBasicBlockAfter(mem_init_fail, block_curr);
  1007. LLVMMoveBasicBlockAfter(init_success, block_curr);
  1008. if (!LLVMBuildCondBr(comp_ctx->builder, ret_value, init_success,
  1009. mem_init_fail)) {
  1010. aot_set_last_error("llvm build cond br failed.");
  1011. goto fail;
  1012. }
  1013. /* If memory.init failed, return this function
  1014. so the runtime can catch the exception */
  1015. LLVMPositionBuilderAtEnd(comp_ctx->builder, mem_init_fail);
  1016. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  1017. goto fail;
  1018. }
  1019. LLVMPositionBuilderAtEnd(comp_ctx->builder, init_success);
  1020. return true;
  1021. fail:
  1022. return false;
  1023. }
  1024. bool
  1025. aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1026. uint32 seg_index)
  1027. {
  1028. LLVMValueRef seg, param_values[2], ret_value, func, value;
  1029. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  1030. seg = I32_CONST(seg_index);
  1031. CHECK_LLVM_CONST(seg);
  1032. param_types[0] = INT8_PTR_TYPE;
  1033. param_types[1] = I32_TYPE;
  1034. ret_type = INT8_TYPE;
  1035. if (comp_ctx->is_jit_mode)
  1036. GET_AOT_FUNCTION(llvm_jit_data_drop, 2);
  1037. else
  1038. GET_AOT_FUNCTION(aot_data_drop, 2);
  1039. /* Call function aot_data_drop() */
  1040. param_values[0] = func_ctx->aot_inst;
  1041. param_values[1] = seg;
  1042. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1043. param_values, 2, "call"))) {
  1044. aot_set_last_error("llvm build call failed.");
  1045. return false;
  1046. }
  1047. return true;
  1048. fail:
  1049. return false;
  1050. }
  1051. bool
  1052. aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1053. {
  1054. LLVMValueRef src, dst, src_addr, dst_addr, len, res;
  1055. bool call_aot_memmove = false;
  1056. POP_MEM_OFFSET(len);
  1057. POP_MEM_OFFSET(src);
  1058. POP_MEM_OFFSET(dst);
  1059. if (!(src_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
  1060. return false;
  1061. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  1062. return false;
  1063. if (!zero_extend_u64(comp_ctx, &len, "len64")) {
  1064. return false;
  1065. }
  1066. call_aot_memmove = comp_ctx->is_indirect_mode || comp_ctx->is_jit_mode;
  1067. if (call_aot_memmove) {
  1068. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1069. LLVMValueRef func, params[3];
  1070. param_types[0] = INT8_PTR_TYPE;
  1071. param_types[1] = INT8_PTR_TYPE;
  1072. param_types[2] = SIZE_T_TYPE;
  1073. ret_type = INT8_PTR_TYPE;
  1074. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  1075. aot_set_last_error("create LLVM function type failed.");
  1076. return false;
  1077. }
  1078. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  1079. aot_set_last_error("create LLVM function pointer type failed.");
  1080. return false;
  1081. }
  1082. if (comp_ctx->is_jit_mode) {
  1083. if (!(func = I64_CONST((uint64)(uintptr_t)aot_memmove))
  1084. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  1085. aot_set_last_error("create LLVM value failed.");
  1086. return false;
  1087. }
  1088. }
  1089. else {
  1090. int32 func_index;
  1091. func_index = aot_get_native_symbol_index(comp_ctx, "memmove");
  1092. if (func_index < 0) {
  1093. return false;
  1094. }
  1095. if (!(func =
  1096. aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  1097. func_ptr_type, func_index))) {
  1098. return false;
  1099. }
  1100. }
  1101. params[0] = dst_addr;
  1102. params[1] = src_addr;
  1103. params[2] = len;
  1104. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params,
  1105. 3, "call_memmove"))) {
  1106. aot_set_last_error("llvm build memmove failed.");
  1107. return false;
  1108. }
  1109. }
  1110. else {
  1111. if (!(res = LLVMBuildMemMove(comp_ctx->builder, dst_addr, 1, src_addr,
  1112. 1, len))) {
  1113. aot_set_last_error("llvm build memmove failed.");
  1114. return false;
  1115. }
  1116. }
  1117. return true;
  1118. fail:
  1119. return false;
  1120. }
  1121. static void *
  1122. jit_memset(void *s, int c, size_t n)
  1123. {
  1124. return memset(s, c, n);
  1125. }
  1126. bool
  1127. aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1128. {
  1129. LLVMValueRef val, dst, dst_addr, len, res;
  1130. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1131. LLVMValueRef func, params[3];
  1132. POP_MEM_OFFSET(len);
  1133. POP_I32(val);
  1134. POP_MEM_OFFSET(dst);
  1135. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  1136. return false;
  1137. if (!zero_extend_u64(comp_ctx, &len, "len64")) {
  1138. return false;
  1139. }
  1140. param_types[0] = INT8_PTR_TYPE;
  1141. param_types[1] = I32_TYPE;
  1142. param_types[2] = SIZE_T_TYPE;
  1143. ret_type = INT8_PTR_TYPE;
  1144. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  1145. aot_set_last_error("create LLVM function type failed.");
  1146. return false;
  1147. }
  1148. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  1149. aot_set_last_error("create LLVM function pointer type failed.");
  1150. return false;
  1151. }
  1152. if (comp_ctx->is_jit_mode) {
  1153. if (!(func = I64_CONST((uint64)(uintptr_t)jit_memset))
  1154. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  1155. aot_set_last_error("create LLVM value failed.");
  1156. return false;
  1157. }
  1158. }
  1159. else if (comp_ctx->is_indirect_mode) {
  1160. int32 func_index;
  1161. func_index = aot_get_native_symbol_index(comp_ctx, "memset");
  1162. if (func_index < 0) {
  1163. return false;
  1164. }
  1165. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  1166. func_ptr_type, func_index))) {
  1167. return false;
  1168. }
  1169. }
  1170. else {
  1171. if (!(func = LLVMGetNamedFunction(func_ctx->module, "memset"))
  1172. && !(func =
  1173. LLVMAddFunction(func_ctx->module, "memset", func_type))) {
  1174. aot_set_last_error("llvm add function failed.");
  1175. return false;
  1176. }
  1177. }
  1178. params[0] = dst_addr;
  1179. params[1] = val;
  1180. params[2] = len;
  1181. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params, 3,
  1182. "call_memset"))) {
  1183. aot_set_last_error("llvm build memset failed.");
  1184. return false;
  1185. }
  1186. return true;
  1187. fail:
  1188. return false;
  1189. }
  1190. #endif /* end of WASM_ENABLE_BULK_MEMORY */
  1191. #if WASM_ENABLE_SHARED_MEMORY != 0
  1192. bool
  1193. aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1194. uint8 atomic_op, uint8 op_type, uint32 align,
  1195. mem_offset_t offset, uint32 bytes)
  1196. {
  1197. LLVMValueRef maddr, value, result;
  1198. bool enable_segue = (op_type == VALUE_TYPE_I32)
  1199. ? comp_ctx->enable_segue_i32_load
  1200. && comp_ctx->enable_segue_i32_store
  1201. : comp_ctx->enable_segue_i64_load
  1202. && comp_ctx->enable_segue_i64_store;
  1203. if (op_type == VALUE_TYPE_I32)
  1204. POP_I32(value);
  1205. else
  1206. POP_I64(value);
  1207. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1208. enable_segue, NULL)))
  1209. return false;
  1210. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1211. return false;
  1212. switch (bytes) {
  1213. case 8:
  1214. if (!enable_segue)
  1215. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1216. else
  1217. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  1218. break;
  1219. case 4:
  1220. if (!enable_segue)
  1221. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1222. else
  1223. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  1224. if (op_type == VALUE_TYPE_I64)
  1225. BUILD_TRUNC(value, I32_TYPE);
  1226. break;
  1227. case 2:
  1228. if (!enable_segue)
  1229. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1230. else
  1231. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  1232. BUILD_TRUNC(value, INT16_TYPE);
  1233. break;
  1234. case 1:
  1235. if (!enable_segue)
  1236. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1237. else
  1238. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  1239. BUILD_TRUNC(value, INT8_TYPE);
  1240. break;
  1241. default:
  1242. bh_assert(0);
  1243. break;
  1244. }
  1245. if (!(result = LLVMBuildAtomicRMW(
  1246. comp_ctx->builder, atomic_op, maddr, value,
  1247. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1248. goto fail;
  1249. }
  1250. LLVMSetVolatile(result, true);
  1251. if (op_type == VALUE_TYPE_I32) {
  1252. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1253. "result_i32"))) {
  1254. goto fail;
  1255. }
  1256. PUSH_I32(result);
  1257. }
  1258. else {
  1259. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1260. "result_i64"))) {
  1261. goto fail;
  1262. }
  1263. PUSH_I64(result);
  1264. }
  1265. return true;
  1266. fail:
  1267. return false;
  1268. }
  1269. bool
  1270. aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
  1271. AOTFuncContext *func_ctx, uint8 op_type,
  1272. uint32 align, mem_offset_t offset, uint32 bytes)
  1273. {
  1274. LLVMValueRef maddr, value, expect, result;
  1275. bool enable_segue = (op_type == VALUE_TYPE_I32)
  1276. ? comp_ctx->enable_segue_i32_load
  1277. && comp_ctx->enable_segue_i32_store
  1278. : comp_ctx->enable_segue_i64_load
  1279. && comp_ctx->enable_segue_i64_store;
  1280. if (op_type == VALUE_TYPE_I32) {
  1281. POP_I32(value);
  1282. POP_I32(expect);
  1283. }
  1284. else {
  1285. POP_I64(value);
  1286. POP_I64(expect);
  1287. }
  1288. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1289. enable_segue, NULL)))
  1290. return false;
  1291. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1292. return false;
  1293. switch (bytes) {
  1294. case 8:
  1295. if (!enable_segue)
  1296. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1297. else
  1298. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  1299. break;
  1300. case 4:
  1301. if (!enable_segue)
  1302. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1303. else
  1304. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  1305. if (op_type == VALUE_TYPE_I64) {
  1306. BUILD_TRUNC(value, I32_TYPE);
  1307. BUILD_TRUNC(expect, I32_TYPE);
  1308. }
  1309. break;
  1310. case 2:
  1311. if (!enable_segue)
  1312. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1313. else
  1314. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  1315. BUILD_TRUNC(value, INT16_TYPE);
  1316. BUILD_TRUNC(expect, INT16_TYPE);
  1317. break;
  1318. case 1:
  1319. if (!enable_segue)
  1320. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1321. else
  1322. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  1323. BUILD_TRUNC(value, INT8_TYPE);
  1324. BUILD_TRUNC(expect, INT8_TYPE);
  1325. break;
  1326. default:
  1327. bh_assert(0);
  1328. break;
  1329. }
  1330. if (!(result = LLVMBuildAtomicCmpXchg(
  1331. comp_ctx->builder, maddr, expect, value,
  1332. LLVMAtomicOrderingSequentiallyConsistent,
  1333. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1334. goto fail;
  1335. }
  1336. LLVMSetVolatile(result, true);
  1337. /* CmpXchg return {i32, i1} structure,
  1338. we need to extract the previous_value from the structure */
  1339. if (!(result = LLVMBuildExtractValue(comp_ctx->builder, result, 0,
  1340. "previous_value"))) {
  1341. goto fail;
  1342. }
  1343. if (op_type == VALUE_TYPE_I32) {
  1344. if (LLVMTypeOf(result) != I32_TYPE) {
  1345. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1346. "result_i32"))) {
  1347. goto fail;
  1348. }
  1349. }
  1350. PUSH_I32(result);
  1351. }
  1352. else {
  1353. if (LLVMTypeOf(result) != I64_TYPE) {
  1354. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1355. "result_i64"))) {
  1356. goto fail;
  1357. }
  1358. }
  1359. PUSH_I64(result);
  1360. }
  1361. return true;
  1362. fail:
  1363. return false;
  1364. }
  1365. bool
  1366. aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1367. uint8 op_type, uint32 align, mem_offset_t offset,
  1368. uint32 bytes)
  1369. {
  1370. LLVMValueRef maddr, value, timeout, expect, cmp;
  1371. LLVMValueRef param_values[5], ret_value, func, is_wait64;
  1372. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  1373. LLVMBasicBlockRef wait_fail, wait_success;
  1374. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1375. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  1376. POP_I64(timeout);
  1377. if (op_type == VALUE_TYPE_I32) {
  1378. POP_I32(expect);
  1379. is_wait64 = I8_CONST(false);
  1380. if (!(expect = LLVMBuildZExt(comp_ctx->builder, expect, I64_TYPE,
  1381. "expect_i64"))) {
  1382. goto fail;
  1383. }
  1384. }
  1385. else {
  1386. POP_I64(expect);
  1387. is_wait64 = I8_CONST(true);
  1388. }
  1389. CHECK_LLVM_CONST(is_wait64);
  1390. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1391. false, NULL)))
  1392. return false;
  1393. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1394. return false;
  1395. param_types[0] = INT8_PTR_TYPE;
  1396. param_types[1] = INT8_PTR_TYPE;
  1397. param_types[2] = I64_TYPE;
  1398. param_types[3] = I64_TYPE;
  1399. param_types[4] = INT8_TYPE;
  1400. ret_type = I32_TYPE;
  1401. GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
  1402. /* Call function wasm_runtime_atomic_wait() */
  1403. param_values[0] = func_ctx->aot_inst;
  1404. param_values[1] = maddr;
  1405. param_values[2] = expect;
  1406. param_values[3] = timeout;
  1407. param_values[4] = is_wait64;
  1408. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1409. param_values, 5, "call"))) {
  1410. aot_set_last_error("llvm build call failed.");
  1411. return false;
  1412. }
  1413. BUILD_ICMP(LLVMIntNE, ret_value, I32_NEG_ONE, cmp, "atomic_wait_ret");
  1414. ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
  1415. ADD_BASIC_BLOCK(wait_success, "wait_success");
  1416. LLVMMoveBasicBlockAfter(wait_fail, block_curr);
  1417. LLVMMoveBasicBlockAfter(wait_success, block_curr);
  1418. if (!LLVMBuildCondBr(comp_ctx->builder, cmp, wait_success, wait_fail)) {
  1419. aot_set_last_error("llvm build cond br failed.");
  1420. goto fail;
  1421. }
  1422. /* If atomic wait failed, return this function
  1423. so the runtime can catch the exception */
  1424. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
  1425. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  1426. goto fail;
  1427. }
  1428. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
  1429. PUSH_I32(ret_value);
  1430. /* Insert suspend check point */
  1431. if (comp_ctx->enable_thread_mgr) {
  1432. if (!check_suspend_flags(comp_ctx, func_ctx, false))
  1433. return false;
  1434. }
  1435. return true;
  1436. fail:
  1437. return false;
  1438. }
  1439. bool
  1440. aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
  1441. AOTFuncContext *func_ctx, uint32 align,
  1442. mem_offset_t offset, uint32 bytes)
  1443. {
  1444. LLVMValueRef maddr, value, count;
  1445. LLVMValueRef param_values[3], ret_value, func;
  1446. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1447. POP_I32(count);
  1448. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1449. false, NULL)))
  1450. return false;
  1451. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1452. return false;
  1453. param_types[0] = INT8_PTR_TYPE;
  1454. param_types[1] = INT8_PTR_TYPE;
  1455. param_types[2] = I32_TYPE;
  1456. ret_type = I32_TYPE;
  1457. GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
  1458. /* Call function wasm_runtime_atomic_notify() */
  1459. param_values[0] = func_ctx->aot_inst;
  1460. param_values[1] = maddr;
  1461. param_values[2] = count;
  1462. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1463. param_values, 3, "call"))) {
  1464. aot_set_last_error("llvm build call failed.");
  1465. return false;
  1466. }
  1467. PUSH_I32(ret_value);
  1468. return true;
  1469. fail:
  1470. return false;
  1471. }
  1472. bool
  1473. aot_compiler_op_atomic_fence(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1474. {
  1475. return LLVMBuildFence(comp_ctx->builder,
  1476. LLVMAtomicOrderingSequentiallyConsistent, false, "")
  1477. ? true
  1478. : false;
  1479. }
  1480. #endif /* end of WASM_ENABLE_SHARED_MEMORY */