aot_emit_memory.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_memory.h"
  6. #include "aot_compiler.h"
  7. #include "aot_emit_exception.h"
  8. #include "../aot/aot_runtime.h"
  9. #include "aot_intrinsic.h"
  10. #include "aot_emit_control.h"
  11. #define BUILD_ICMP(op, left, right, res, name) \
  12. do { \
  13. if (!(res = \
  14. LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
  15. aot_set_last_error("llvm build icmp failed."); \
  16. goto fail; \
  17. } \
  18. } while (0)
  19. #define BUILD_OP(Op, left, right, res, name) \
  20. do { \
  21. if (!(res = LLVMBuild##Op(comp_ctx->builder, left, right, name))) { \
  22. aot_set_last_error("llvm build " #Op " fail."); \
  23. goto fail; \
  24. } \
  25. } while (0)
  26. #define ADD_BASIC_BLOCK(block, name) \
  27. do { \
  28. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  29. func_ctx->func, name))) { \
  30. aot_set_last_error("llvm add basic block failed."); \
  31. goto fail; \
  32. } \
  33. } while (0)
  34. #define SET_BUILD_POS(block) LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
  35. static bool
  36. zero_extend_u64(AOTCompContext *comp_ctx, LLVMValueRef *value, const char *name)
  37. {
  38. if (comp_ctx->pointer_size == sizeof(uint64)) {
  39. /* zero extend to uint64 if the target is 64-bit */
  40. *value = LLVMBuildZExt(comp_ctx->builder, *value, I64_TYPE, name);
  41. if (!*value) {
  42. aot_set_last_error("llvm build zero extend failed.");
  43. return false;
  44. }
  45. }
  46. return true;
  47. }
  48. static LLVMValueRef
  49. get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  50. uint32 bytes)
  51. {
  52. LLVMValueRef mem_check_bound = NULL;
  53. switch (bytes) {
  54. case 1:
  55. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_1byte;
  56. break;
  57. case 2:
  58. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_2bytes;
  59. break;
  60. case 4:
  61. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_4bytes;
  62. break;
  63. case 8:
  64. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_8bytes;
  65. break;
  66. case 16:
  67. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_16bytes;
  68. break;
  69. default:
  70. bh_assert(0);
  71. return NULL;
  72. }
  73. if (func_ctx->mem_space_unchanged)
  74. return mem_check_bound;
  75. if (!(mem_check_bound = LLVMBuildLoad2(
  76. comp_ctx->builder,
  77. (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE : I32_TYPE,
  78. mem_check_bound, "mem_check_bound"))) {
  79. aot_set_last_error("llvm build load failed.");
  80. return NULL;
  81. }
  82. return mem_check_bound;
  83. }
  84. static LLVMValueRef
  85. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
  86. LLVMValueRef
  87. aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  88. mem_offset_t offset, uint32 bytes, bool enable_segue,
  89. unsigned int *alignp)
  90. {
  91. LLVMValueRef offset_const =
  92. MEMORY64_COND_VALUE(I64_CONST(offset), I32_CONST(offset));
  93. LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
  94. LLVMValueRef mem_base_addr, mem_check_bound;
  95. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  96. LLVMBasicBlockRef check_succ;
  97. AOTValue *aot_value_top;
  98. uint32 local_idx_of_aot_value = 0;
  99. bool is_target_64bit, is_local_of_aot_value = false;
  100. #if WASM_ENABLE_SHARED_MEMORY != 0
  101. bool is_shared_memory =
  102. comp_ctx->comp_data->memories[0].flags & SHARED_MEMORY_FLAG;
  103. #endif
  104. is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false;
  105. if (comp_ctx->is_indirect_mode
  106. && aot_intrinsic_check_capability(
  107. comp_ctx, MEMORY64_COND_VALUE("i64.const", "i32.const"))) {
  108. WASMValue wasm_value;
  109. #if WASM_ENABLE_MEMORY64 != 0
  110. if (IS_MEMORY64) {
  111. wasm_value.i64 = offset;
  112. }
  113. else
  114. #endif
  115. {
  116. wasm_value.i32 = (int32)offset;
  117. }
  118. offset_const = aot_load_const_from_table(
  119. comp_ctx, func_ctx->native_symbol, &wasm_value,
  120. MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32));
  121. if (!offset_const) {
  122. return NULL;
  123. }
  124. }
  125. else {
  126. CHECK_LLVM_CONST(offset_const);
  127. }
  128. /* Get memory base address and memory data size */
  129. if (func_ctx->mem_space_unchanged
  130. #if WASM_ENABLE_SHARED_MEMORY != 0
  131. || is_shared_memory
  132. #endif
  133. ) {
  134. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  135. }
  136. else {
  137. if (!(mem_base_addr = LLVMBuildLoad2(
  138. comp_ctx->builder, OPQ_PTR_TYPE,
  139. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  140. aot_set_last_error("llvm build load failed.");
  141. goto fail;
  142. }
  143. }
  144. aot_value_top =
  145. func_ctx->block_stack.block_list_end->value_stack.value_list_end;
  146. if (aot_value_top) {
  147. /* aot_value_top is freed in the following POP_I32(addr),
  148. so save its fields here for further use */
  149. is_local_of_aot_value = aot_value_top->is_local;
  150. local_idx_of_aot_value = aot_value_top->local_idx;
  151. }
  152. POP_MEM_OFFSET(addr);
  153. /*
  154. * Note: not throw the integer-overflow-exception here since it must
  155. * have been thrown when converting float to integer before
  156. */
  157. /* return address directly if constant offset and inside memory space */
  158. if (LLVMIsEfficientConstInt(addr)) {
  159. uint64 mem_offset =
  160. (uint64)LLVMConstIntGetZExtValue(addr) + (uint64)offset;
  161. uint32 num_bytes_per_page =
  162. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  163. uint32 init_page_count =
  164. comp_ctx->comp_data->memories[0].init_page_count;
  165. uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
  166. if (alignp != NULL) {
  167. /*
  168. * A note about max_align below:
  169. * the assumption here is the base address of a linear memory
  170. * has the natural alignment. for platforms using mmap, it can
  171. * be even larger. for now, use a conservative value.
  172. */
  173. const int max_align = 8;
  174. int shift = ffs((int)(unsigned int)mem_offset);
  175. if (shift == 0) {
  176. *alignp = max_align;
  177. }
  178. else {
  179. unsigned int align = 1 << (shift - 1);
  180. if (align > max_align) {
  181. align = max_align;
  182. }
  183. *alignp = align;
  184. }
  185. }
  186. if (mem_offset + bytes <= mem_data_size) {
  187. /* inside memory space */
  188. if (comp_ctx->pointer_size == sizeof(uint64))
  189. offset1 = I64_CONST(mem_offset);
  190. else
  191. offset1 = I32_CONST((uint32)mem_offset);
  192. CHECK_LLVM_CONST(offset1);
  193. if (!enable_segue) {
  194. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder,
  195. INT8_TYPE, mem_base_addr,
  196. &offset1, 1, "maddr"))) {
  197. aot_set_last_error("llvm build add failed.");
  198. goto fail;
  199. }
  200. }
  201. else {
  202. if (!(maddr = LLVMBuildIntToPtr(comp_ctx->builder, offset1,
  203. INT8_PTR_TYPE_GS, "maddr"))) {
  204. aot_set_last_error("llvm build IntToPtr failed.");
  205. goto fail;
  206. }
  207. }
  208. return maddr;
  209. }
  210. }
  211. else if (alignp != NULL) {
  212. *alignp = 1;
  213. }
  214. if (is_target_64bit) {
  215. if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
  216. I64_TYPE, "offset_i64"))
  217. || !(addr = LLVMBuildZExt(comp_ctx->builder, addr, I64_TYPE,
  218. "addr_i64"))) {
  219. aot_set_last_error("llvm build zero extend failed.");
  220. goto fail;
  221. }
  222. }
  223. /* offset1 = offset + addr; */
  224. BUILD_OP(Add, offset_const, addr, offset1, "offset1");
  225. if (comp_ctx->enable_bound_check
  226. && !(is_local_of_aot_value
  227. && aot_checked_addr_list_find(func_ctx, local_idx_of_aot_value,
  228. offset, bytes))) {
  229. uint32 init_page_count =
  230. comp_ctx->comp_data->memories[0].init_page_count;
  231. if (init_page_count == 0) {
  232. LLVMValueRef mem_size;
  233. if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) {
  234. goto fail;
  235. }
  236. BUILD_ICMP(LLVMIntEQ, mem_size,
  237. MEMORY64_COND_VALUE(I64_ZERO, I32_ZERO), cmp, "is_zero");
  238. ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
  239. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  240. if (!aot_emit_exception(comp_ctx, func_ctx,
  241. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  242. check_succ)) {
  243. goto fail;
  244. }
  245. SET_BUILD_POS(check_succ);
  246. block_curr = check_succ;
  247. }
  248. if (!(mem_check_bound =
  249. get_memory_check_bound(comp_ctx, func_ctx, bytes))) {
  250. goto fail;
  251. }
  252. if (is_target_64bit) {
  253. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp, "cmp");
  254. }
  255. else {
  256. /* Check integer overflow */
  257. BUILD_ICMP(LLVMIntULT, offset1, addr, cmp1, "cmp1");
  258. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp2, "cmp2");
  259. BUILD_OP(Or, cmp1, cmp2, cmp, "cmp");
  260. }
  261. /* Add basic blocks */
  262. ADD_BASIC_BLOCK(check_succ, "check_succ");
  263. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  264. if (!aot_emit_exception(comp_ctx, func_ctx,
  265. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  266. check_succ)) {
  267. goto fail;
  268. }
  269. SET_BUILD_POS(check_succ);
  270. if (is_local_of_aot_value) {
  271. if (!aot_checked_addr_list_add(func_ctx, local_idx_of_aot_value,
  272. offset, bytes))
  273. goto fail;
  274. }
  275. }
  276. if (!enable_segue) {
  277. /* maddr = mem_base_addr + offset1 */
  278. if (!(maddr =
  279. LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  280. mem_base_addr, &offset1, 1, "maddr"))) {
  281. aot_set_last_error("llvm build add failed.");
  282. goto fail;
  283. }
  284. }
  285. else {
  286. LLVMValueRef maddr_base;
  287. if (!(maddr_base = LLVMBuildIntToPtr(comp_ctx->builder, addr,
  288. INT8_PTR_TYPE_GS, "maddr_base"))) {
  289. aot_set_last_error("llvm build int to ptr failed.");
  290. goto fail;
  291. }
  292. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  293. maddr_base, &offset_const, 1,
  294. "maddr"))) {
  295. aot_set_last_error("llvm build inboundgep failed.");
  296. goto fail;
  297. }
  298. }
  299. return maddr;
  300. fail:
  301. return NULL;
  302. }
  303. #define BUILD_PTR_CAST(ptr_type) \
  304. do { \
  305. if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, ptr_type, \
  306. "data_ptr"))) { \
  307. aot_set_last_error("llvm build bit cast failed."); \
  308. goto fail; \
  309. } \
  310. } while (0)
  311. #define BUILD_LOAD(data_type) \
  312. do { \
  313. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  314. "data"))) { \
  315. aot_set_last_error("llvm build load failed."); \
  316. goto fail; \
  317. } \
  318. LLVMSetAlignment(value, known_align); \
  319. } while (0)
  320. #define BUILD_TRUNC(value, data_type) \
  321. do { \
  322. if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, data_type, \
  323. "val_trunc"))) { \
  324. aot_set_last_error("llvm build trunc failed."); \
  325. goto fail; \
  326. } \
  327. } while (0)
  328. #define BUILD_STORE() \
  329. do { \
  330. LLVMValueRef res; \
  331. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  332. aot_set_last_error("llvm build store failed."); \
  333. goto fail; \
  334. } \
  335. LLVMSetAlignment(res, known_align); \
  336. } while (0)
  337. #define BUILD_SIGN_EXT(dst_type) \
  338. do { \
  339. if (!(value = LLVMBuildSExt(comp_ctx->builder, value, dst_type, \
  340. "data_s_ext"))) { \
  341. aot_set_last_error("llvm build sign ext failed."); \
  342. goto fail; \
  343. } \
  344. } while (0)
  345. #define BUILD_ZERO_EXT(dst_type) \
  346. do { \
  347. if (!(value = LLVMBuildZExt(comp_ctx->builder, value, dst_type, \
  348. "data_z_ext"))) { \
  349. aot_set_last_error("llvm build zero ext failed."); \
  350. goto fail; \
  351. } \
  352. } while (0)
  353. #if WASM_ENABLE_SHARED_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0
  354. bool
  355. check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  356. LLVMValueRef addr, uint32 align)
  357. {
  358. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  359. LLVMBasicBlockRef check_align_succ;
  360. LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
  361. LLVMValueRef res;
  362. CHECK_LLVM_CONST(align_mask);
  363. /* Convert pointer to int */
  364. if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr, I32_TYPE,
  365. "address"))) {
  366. aot_set_last_error("llvm build ptr to int failed.");
  367. goto fail;
  368. }
  369. /* The memory address should be aligned */
  370. BUILD_OP(And, addr, align_mask, res, "and");
  371. BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
  372. /* Add basic blocks */
  373. ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
  374. LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
  375. if (!aot_emit_exception(comp_ctx, func_ctx, EXCE_UNALIGNED_ATOMIC, true,
  376. res, check_align_succ)) {
  377. goto fail;
  378. }
  379. SET_BUILD_POS(check_align_succ);
  380. return true;
  381. fail:
  382. return false;
  383. }
  384. #endif /* WASM_ENABLE_SHARED_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0 */
  385. #if WASM_ENABLE_SHARED_MEMORY != 0
  386. #define BUILD_ATOMIC_LOAD(align, data_type) \
  387. do { \
  388. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  389. goto fail; \
  390. } \
  391. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  392. "data"))) { \
  393. aot_set_last_error("llvm build load failed."); \
  394. goto fail; \
  395. } \
  396. LLVMSetAlignment(value, 1 << align); \
  397. LLVMSetVolatile(value, true); \
  398. LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
  399. } while (0)
  400. #define BUILD_ATOMIC_STORE(align) \
  401. do { \
  402. LLVMValueRef res; \
  403. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  404. goto fail; \
  405. } \
  406. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  407. aot_set_last_error("llvm build store failed."); \
  408. goto fail; \
  409. } \
  410. LLVMSetAlignment(res, 1 << align); \
  411. LLVMSetVolatile(res, true); \
  412. LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
  413. } while (0)
  414. #endif
  415. bool
  416. aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  417. uint32 align, mem_offset_t offset, uint32 bytes,
  418. bool sign, bool atomic)
  419. {
  420. LLVMValueRef maddr, value = NULL;
  421. LLVMTypeRef data_type;
  422. bool enable_segue = comp_ctx->enable_segue_i32_load;
  423. unsigned int known_align;
  424. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  425. enable_segue, &known_align)))
  426. return false;
  427. switch (bytes) {
  428. case 4:
  429. if (!enable_segue)
  430. BUILD_PTR_CAST(INT32_PTR_TYPE);
  431. else
  432. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  433. #if WASM_ENABLE_SHARED_MEMORY != 0
  434. if (atomic)
  435. BUILD_ATOMIC_LOAD(align, I32_TYPE);
  436. else
  437. #endif
  438. BUILD_LOAD(I32_TYPE);
  439. break;
  440. case 2:
  441. case 1:
  442. if (bytes == 2) {
  443. if (!enable_segue)
  444. BUILD_PTR_CAST(INT16_PTR_TYPE);
  445. else
  446. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  447. data_type = INT16_TYPE;
  448. }
  449. else {
  450. if (!enable_segue)
  451. BUILD_PTR_CAST(INT8_PTR_TYPE);
  452. else
  453. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  454. data_type = INT8_TYPE;
  455. }
  456. #if WASM_ENABLE_SHARED_MEMORY != 0
  457. if (atomic) {
  458. BUILD_ATOMIC_LOAD(align, data_type);
  459. BUILD_ZERO_EXT(I32_TYPE);
  460. }
  461. else
  462. #endif
  463. {
  464. BUILD_LOAD(data_type);
  465. if (sign)
  466. BUILD_SIGN_EXT(I32_TYPE);
  467. else
  468. BUILD_ZERO_EXT(I32_TYPE);
  469. }
  470. break;
  471. default:
  472. bh_assert(0);
  473. break;
  474. }
  475. PUSH_I32(value);
  476. (void)data_type;
  477. return true;
  478. fail:
  479. return false;
  480. }
  481. bool
  482. aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  483. uint32 align, mem_offset_t offset, uint32 bytes,
  484. bool sign, bool atomic)
  485. {
  486. LLVMValueRef maddr, value = NULL;
  487. LLVMTypeRef data_type;
  488. bool enable_segue = comp_ctx->enable_segue_i64_load;
  489. unsigned int known_align;
  490. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  491. enable_segue, &known_align)))
  492. return false;
  493. switch (bytes) {
  494. case 8:
  495. if (!enable_segue)
  496. BUILD_PTR_CAST(INT64_PTR_TYPE);
  497. else
  498. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  499. #if WASM_ENABLE_SHARED_MEMORY != 0
  500. if (atomic)
  501. BUILD_ATOMIC_LOAD(align, I64_TYPE);
  502. else
  503. #endif
  504. BUILD_LOAD(I64_TYPE);
  505. break;
  506. case 4:
  507. case 2:
  508. case 1:
  509. if (bytes == 4) {
  510. if (!enable_segue)
  511. BUILD_PTR_CAST(INT32_PTR_TYPE);
  512. else
  513. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  514. data_type = I32_TYPE;
  515. }
  516. else if (bytes == 2) {
  517. if (!enable_segue)
  518. BUILD_PTR_CAST(INT16_PTR_TYPE);
  519. else
  520. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  521. data_type = INT16_TYPE;
  522. }
  523. else {
  524. if (!enable_segue)
  525. BUILD_PTR_CAST(INT8_PTR_TYPE);
  526. else
  527. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  528. data_type = INT8_TYPE;
  529. }
  530. #if WASM_ENABLE_SHARED_MEMORY != 0
  531. if (atomic) {
  532. BUILD_ATOMIC_LOAD(align, data_type);
  533. BUILD_ZERO_EXT(I64_TYPE);
  534. }
  535. else
  536. #endif
  537. {
  538. BUILD_LOAD(data_type);
  539. if (sign)
  540. BUILD_SIGN_EXT(I64_TYPE);
  541. else
  542. BUILD_ZERO_EXT(I64_TYPE);
  543. }
  544. break;
  545. default:
  546. bh_assert(0);
  547. break;
  548. }
  549. PUSH_I64(value);
  550. (void)data_type;
  551. return true;
  552. fail:
  553. return false;
  554. }
  555. bool
  556. aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  557. uint32 align, mem_offset_t offset)
  558. {
  559. LLVMValueRef maddr, value;
  560. bool enable_segue = comp_ctx->enable_segue_f32_load;
  561. unsigned int known_align;
  562. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
  563. enable_segue, &known_align)))
  564. return false;
  565. if (!enable_segue)
  566. BUILD_PTR_CAST(F32_PTR_TYPE);
  567. else
  568. BUILD_PTR_CAST(F32_PTR_TYPE_GS);
  569. BUILD_LOAD(F32_TYPE);
  570. PUSH_F32(value);
  571. return true;
  572. fail:
  573. return false;
  574. }
  575. bool
  576. aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  577. uint32 align, mem_offset_t offset)
  578. {
  579. LLVMValueRef maddr, value;
  580. bool enable_segue = comp_ctx->enable_segue_f64_load;
  581. unsigned int known_align;
  582. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
  583. enable_segue, &known_align)))
  584. return false;
  585. if (!enable_segue)
  586. BUILD_PTR_CAST(F64_PTR_TYPE);
  587. else
  588. BUILD_PTR_CAST(F64_PTR_TYPE_GS);
  589. BUILD_LOAD(F64_TYPE);
  590. PUSH_F64(value);
  591. return true;
  592. fail:
  593. return false;
  594. }
  595. bool
  596. aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  597. uint32 align, mem_offset_t offset, uint32 bytes,
  598. bool atomic)
  599. {
  600. LLVMValueRef maddr, value;
  601. bool enable_segue = comp_ctx->enable_segue_i32_store;
  602. POP_I32(value);
  603. unsigned int known_align;
  604. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  605. enable_segue, &known_align)))
  606. return false;
  607. switch (bytes) {
  608. case 4:
  609. if (!enable_segue)
  610. BUILD_PTR_CAST(INT32_PTR_TYPE);
  611. else
  612. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  613. break;
  614. case 2:
  615. if (!enable_segue)
  616. BUILD_PTR_CAST(INT16_PTR_TYPE);
  617. else
  618. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  619. BUILD_TRUNC(value, INT16_TYPE);
  620. break;
  621. case 1:
  622. if (!enable_segue)
  623. BUILD_PTR_CAST(INT8_PTR_TYPE);
  624. else
  625. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  626. BUILD_TRUNC(value, INT8_TYPE);
  627. break;
  628. default:
  629. bh_assert(0);
  630. break;
  631. }
  632. #if WASM_ENABLE_SHARED_MEMORY != 0
  633. if (atomic)
  634. BUILD_ATOMIC_STORE(align);
  635. else
  636. #endif
  637. BUILD_STORE();
  638. return true;
  639. fail:
  640. return false;
  641. }
  642. bool
  643. aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  644. uint32 align, mem_offset_t offset, uint32 bytes,
  645. bool atomic)
  646. {
  647. LLVMValueRef maddr, value;
  648. bool enable_segue = comp_ctx->enable_segue_i64_store;
  649. POP_I64(value);
  650. unsigned int known_align;
  651. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  652. enable_segue, &known_align)))
  653. return false;
  654. switch (bytes) {
  655. case 8:
  656. if (!enable_segue)
  657. BUILD_PTR_CAST(INT64_PTR_TYPE);
  658. else
  659. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  660. break;
  661. case 4:
  662. if (!enable_segue)
  663. BUILD_PTR_CAST(INT32_PTR_TYPE);
  664. else
  665. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  666. BUILD_TRUNC(value, I32_TYPE);
  667. break;
  668. case 2:
  669. if (!enable_segue)
  670. BUILD_PTR_CAST(INT16_PTR_TYPE);
  671. else
  672. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  673. BUILD_TRUNC(value, INT16_TYPE);
  674. break;
  675. case 1:
  676. if (!enable_segue)
  677. BUILD_PTR_CAST(INT8_PTR_TYPE);
  678. else
  679. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  680. BUILD_TRUNC(value, INT8_TYPE);
  681. break;
  682. default:
  683. bh_assert(0);
  684. break;
  685. }
  686. #if WASM_ENABLE_SHARED_MEMORY != 0
  687. if (atomic)
  688. BUILD_ATOMIC_STORE(align);
  689. else
  690. #endif
  691. BUILD_STORE();
  692. return true;
  693. fail:
  694. return false;
  695. }
  696. bool
  697. aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  698. uint32 align, mem_offset_t offset)
  699. {
  700. LLVMValueRef maddr, value;
  701. bool enable_segue = comp_ctx->enable_segue_f32_store;
  702. POP_F32(value);
  703. unsigned int known_align;
  704. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
  705. enable_segue, &known_align)))
  706. return false;
  707. if (!enable_segue)
  708. BUILD_PTR_CAST(F32_PTR_TYPE);
  709. else
  710. BUILD_PTR_CAST(F32_PTR_TYPE_GS);
  711. BUILD_STORE();
  712. return true;
  713. fail:
  714. return false;
  715. }
  716. bool
  717. aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  718. uint32 align, mem_offset_t offset)
  719. {
  720. LLVMValueRef maddr, value;
  721. bool enable_segue = comp_ctx->enable_segue_f64_store;
  722. POP_F64(value);
  723. unsigned int known_align;
  724. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
  725. enable_segue, &known_align)))
  726. return false;
  727. if (!enable_segue)
  728. BUILD_PTR_CAST(F64_PTR_TYPE);
  729. else
  730. BUILD_PTR_CAST(F64_PTR_TYPE_GS);
  731. BUILD_STORE();
  732. return true;
  733. fail:
  734. return false;
  735. }
  736. static LLVMValueRef
  737. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  738. {
  739. LLVMValueRef mem_size;
  740. if (func_ctx->mem_space_unchanged) {
  741. mem_size = func_ctx->mem_info[0].mem_cur_page_count_addr;
  742. }
  743. else {
  744. if (!(mem_size = LLVMBuildLoad2(
  745. comp_ctx->builder, I32_TYPE,
  746. func_ctx->mem_info[0].mem_cur_page_count_addr, "mem_size"))) {
  747. aot_set_last_error("llvm build load failed.");
  748. goto fail;
  749. }
  750. }
  751. return LLVMBuildIntCast(comp_ctx->builder, mem_size,
  752. MEMORY64_COND_VALUE(I64_TYPE, I32_TYPE), "");
  753. fail:
  754. return NULL;
  755. }
  756. bool
  757. aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  758. {
  759. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  760. if (mem_size)
  761. PUSH_PAGE_COUNT(mem_size);
  762. return mem_size ? true : false;
  763. fail:
  764. return false;
  765. }
  766. bool
  767. aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  768. {
  769. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  770. LLVMValueRef delta, param_values[2], ret_value, func, value;
  771. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  772. int32 func_index;
  773. #if WASM_ENABLE_MEMORY64 != 0
  774. LLVMValueRef u32_max, u32_cmp_result;
  775. #endif
  776. if (!mem_size)
  777. return false;
  778. POP_PAGE_COUNT(delta);
  779. /* Function type of aot_enlarge_memory() */
  780. param_types[0] = INT8_PTR_TYPE;
  781. param_types[1] = I32_TYPE;
  782. ret_type = INT8_TYPE;
  783. if (!(func_type = LLVMFunctionType(ret_type, param_types, 2, false))) {
  784. aot_set_last_error("llvm add function type failed.");
  785. return false;
  786. }
  787. if (comp_ctx->is_jit_mode) {
  788. /* JIT mode, call the function directly */
  789. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  790. aot_set_last_error("llvm add pointer type failed.");
  791. return false;
  792. }
  793. if (!(value = I64_CONST((uint64)(uintptr_t)wasm_enlarge_memory))
  794. || !(func = LLVMConstIntToPtr(value, func_ptr_type))) {
  795. aot_set_last_error("create LLVM value failed.");
  796. return false;
  797. }
  798. }
  799. else if (comp_ctx->is_indirect_mode) {
  800. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  801. aot_set_last_error("create LLVM function type failed.");
  802. return false;
  803. }
  804. func_index =
  805. aot_get_native_symbol_index(comp_ctx, "aot_enlarge_memory");
  806. if (func_index < 0) {
  807. return false;
  808. }
  809. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  810. func_ptr_type, func_index))) {
  811. return false;
  812. }
  813. }
  814. else {
  815. char *func_name = "aot_enlarge_memory";
  816. /* AOT mode, delcare the function */
  817. if (!(func = LLVMGetNamedFunction(func_ctx->module, func_name))
  818. && !(func =
  819. LLVMAddFunction(func_ctx->module, func_name, func_type))) {
  820. aot_set_last_error("llvm add function failed.");
  821. return false;
  822. }
  823. }
  824. /* Call function aot_enlarge_memory() */
  825. param_values[0] = func_ctx->aot_inst;
  826. param_values[1] = LLVMBuildTrunc(comp_ctx->builder, delta, I32_TYPE, "");
  827. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  828. param_values, 2, "call"))) {
  829. aot_set_last_error("llvm build call failed.");
  830. return false;
  831. }
  832. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
  833. #if WASM_ENABLE_MEMORY64 != 0
  834. if (IS_MEMORY64) {
  835. if (!(u32_max = I64_CONST(UINT32_MAX))) {
  836. aot_set_last_error("llvm build const failed");
  837. return false;
  838. }
  839. BUILD_ICMP(LLVMIntULE, delta, u32_max, u32_cmp_result, "page_size_cmp");
  840. BUILD_OP(And, ret_value, u32_cmp_result, ret_value, "and");
  841. }
  842. #endif
  843. /* ret_value = ret_value == true ? pre_page_count : -1 */
  844. if (!(ret_value = LLVMBuildSelect(
  845. comp_ctx->builder, ret_value, mem_size,
  846. MEMORY64_COND_VALUE(I64_NEG_ONE, I32_NEG_ONE), "mem_grow_ret"))) {
  847. aot_set_last_error("llvm build select failed.");
  848. return false;
  849. }
  850. PUSH_PAGE_COUNT(ret_value);
  851. return true;
  852. fail:
  853. return false;
  854. }
  855. #if WASM_ENABLE_BULK_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0
  856. LLVMValueRef
  857. check_bulk_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  858. LLVMValueRef offset, LLVMValueRef bytes)
  859. {
  860. LLVMValueRef maddr, max_addr, cmp;
  861. LLVMValueRef mem_base_addr;
  862. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  863. LLVMBasicBlockRef check_succ;
  864. LLVMValueRef mem_size;
  865. /* Get memory base address and memory data size */
  866. #if WASM_ENABLE_SHARED_MEMORY != 0
  867. bool is_shared_memory = comp_ctx->comp_data->memories[0].flags & 0x02;
  868. if (func_ctx->mem_space_unchanged || is_shared_memory) {
  869. #else
  870. if (func_ctx->mem_space_unchanged) {
  871. #endif
  872. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  873. }
  874. else {
  875. if (!(mem_base_addr = LLVMBuildLoad2(
  876. comp_ctx->builder, OPQ_PTR_TYPE,
  877. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  878. aot_set_last_error("llvm build load failed.");
  879. goto fail;
  880. }
  881. }
  882. /*
  883. * Note: not throw the integer-overflow-exception here since it must
  884. * have been thrown when converting float to integer before
  885. */
  886. /* return addres directly if constant offset and inside memory space */
  887. if (LLVMIsEfficientConstInt(offset) && LLVMIsEfficientConstInt(bytes)) {
  888. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(offset);
  889. uint64 mem_len = (uint64)LLVMConstIntGetZExtValue(bytes);
  890. uint32 num_bytes_per_page =
  891. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  892. uint32 init_page_count =
  893. comp_ctx->comp_data->memories[0].init_page_count;
  894. uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
  895. if (mem_data_size > 0 && mem_offset + mem_len <= mem_data_size) {
  896. /* inside memory space */
  897. /* maddr = mem_base_addr + moffset */
  898. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  899. mem_base_addr, &offset, 1,
  900. "maddr"))) {
  901. aot_set_last_error("llvm build add failed.");
  902. goto fail;
  903. }
  904. return maddr;
  905. }
  906. }
  907. if (func_ctx->mem_space_unchanged) {
  908. mem_size = func_ctx->mem_info[0].mem_data_size_addr;
  909. }
  910. else {
  911. if (!(mem_size = LLVMBuildLoad2(
  912. comp_ctx->builder, I64_TYPE,
  913. func_ctx->mem_info[0].mem_data_size_addr, "mem_size"))) {
  914. aot_set_last_error("llvm build load failed.");
  915. goto fail;
  916. }
  917. }
  918. ADD_BASIC_BLOCK(check_succ, "check_succ");
  919. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  920. offset =
  921. LLVMBuildZExt(comp_ctx->builder, offset, I64_TYPE, "extend_offset");
  922. bytes = LLVMBuildZExt(comp_ctx->builder, bytes, I64_TYPE, "extend_len");
  923. BUILD_OP(Add, offset, bytes, max_addr, "max_addr");
  924. BUILD_ICMP(LLVMIntUGT, max_addr, mem_size, cmp, "cmp_max_mem_addr");
  925. if (!aot_emit_exception(comp_ctx, func_ctx,
  926. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  927. check_succ)) {
  928. goto fail;
  929. }
  930. /* maddr = mem_base_addr + offset */
  931. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  932. mem_base_addr, &offset, 1, "maddr"))) {
  933. aot_set_last_error("llvm build add failed.");
  934. goto fail;
  935. }
  936. return maddr;
  937. fail:
  938. return NULL;
  939. }
  940. #endif /* end of WASM_ENABLE_BULK_MEMORY != 0 or WASM_ENABLE_STRINGREF != 0 */
  941. #if WASM_ENABLE_BULK_MEMORY != 0
  942. bool
  943. aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  944. uint32 seg_index)
  945. {
  946. LLVMValueRef seg, offset, dst, len, param_values[5], ret_value, func, value;
  947. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  948. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  949. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  950. LLVMBasicBlockRef mem_init_fail, init_success;
  951. seg = I32_CONST(seg_index);
  952. POP_I32(len);
  953. POP_I32(offset);
  954. POP_MEM_OFFSET(dst);
  955. if (!zero_extend_u64(comp_ctx, &dst, "dst64")) {
  956. return false;
  957. }
  958. param_types[0] = INT8_PTR_TYPE;
  959. param_types[1] = I32_TYPE;
  960. param_types[2] = I32_TYPE;
  961. param_types[3] = I32_TYPE;
  962. param_types[4] = SIZE_T_TYPE;
  963. ret_type = INT8_TYPE;
  964. if (comp_ctx->is_jit_mode)
  965. GET_AOT_FUNCTION(llvm_jit_memory_init, 5);
  966. else
  967. GET_AOT_FUNCTION(aot_memory_init, 5);
  968. /* Call function aot_memory_init() */
  969. param_values[0] = func_ctx->aot_inst;
  970. param_values[1] = seg;
  971. param_values[2] = offset;
  972. param_values[3] = len;
  973. param_values[4] = dst;
  974. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  975. param_values, 5, "call"))) {
  976. aot_set_last_error("llvm build call failed.");
  977. return false;
  978. }
  979. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_init_ret");
  980. ADD_BASIC_BLOCK(mem_init_fail, "mem_init_fail");
  981. ADD_BASIC_BLOCK(init_success, "init_success");
  982. LLVMMoveBasicBlockAfter(mem_init_fail, block_curr);
  983. LLVMMoveBasicBlockAfter(init_success, block_curr);
  984. if (!LLVMBuildCondBr(comp_ctx->builder, ret_value, init_success,
  985. mem_init_fail)) {
  986. aot_set_last_error("llvm build cond br failed.");
  987. goto fail;
  988. }
  989. /* If memory.init failed, return this function
  990. so the runtime can catch the exception */
  991. LLVMPositionBuilderAtEnd(comp_ctx->builder, mem_init_fail);
  992. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  993. goto fail;
  994. }
  995. LLVMPositionBuilderAtEnd(comp_ctx->builder, init_success);
  996. return true;
  997. fail:
  998. return false;
  999. }
  1000. bool
  1001. aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1002. uint32 seg_index)
  1003. {
  1004. LLVMValueRef seg, param_values[2], ret_value, func, value;
  1005. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  1006. seg = I32_CONST(seg_index);
  1007. CHECK_LLVM_CONST(seg);
  1008. param_types[0] = INT8_PTR_TYPE;
  1009. param_types[1] = I32_TYPE;
  1010. ret_type = INT8_TYPE;
  1011. if (comp_ctx->is_jit_mode)
  1012. GET_AOT_FUNCTION(llvm_jit_data_drop, 2);
  1013. else
  1014. GET_AOT_FUNCTION(aot_data_drop, 2);
  1015. /* Call function aot_data_drop() */
  1016. param_values[0] = func_ctx->aot_inst;
  1017. param_values[1] = seg;
  1018. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1019. param_values, 2, "call"))) {
  1020. aot_set_last_error("llvm build call failed.");
  1021. return false;
  1022. }
  1023. return true;
  1024. fail:
  1025. return false;
  1026. }
  1027. bool
  1028. aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1029. {
  1030. LLVMValueRef src, dst, src_addr, dst_addr, len, res;
  1031. bool call_aot_memmove = false;
  1032. POP_MEM_OFFSET(len);
  1033. POP_MEM_OFFSET(src);
  1034. POP_MEM_OFFSET(dst);
  1035. if (!(src_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
  1036. return false;
  1037. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  1038. return false;
  1039. if (!zero_extend_u64(comp_ctx, &len, "len64")) {
  1040. return false;
  1041. }
  1042. call_aot_memmove = comp_ctx->is_indirect_mode || comp_ctx->is_jit_mode;
  1043. if (call_aot_memmove) {
  1044. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1045. LLVMValueRef func, params[3];
  1046. param_types[0] = INT8_PTR_TYPE;
  1047. param_types[1] = INT8_PTR_TYPE;
  1048. param_types[2] = SIZE_T_TYPE;
  1049. ret_type = INT8_PTR_TYPE;
  1050. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  1051. aot_set_last_error("create LLVM function type failed.");
  1052. return false;
  1053. }
  1054. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  1055. aot_set_last_error("create LLVM function pointer type failed.");
  1056. return false;
  1057. }
  1058. if (comp_ctx->is_jit_mode) {
  1059. if (!(func = I64_CONST((uint64)(uintptr_t)aot_memmove))
  1060. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  1061. aot_set_last_error("create LLVM value failed.");
  1062. return false;
  1063. }
  1064. }
  1065. else {
  1066. int32 func_index;
  1067. func_index = aot_get_native_symbol_index(comp_ctx, "memmove");
  1068. if (func_index < 0) {
  1069. return false;
  1070. }
  1071. if (!(func =
  1072. aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  1073. func_ptr_type, func_index))) {
  1074. return false;
  1075. }
  1076. }
  1077. params[0] = dst_addr;
  1078. params[1] = src_addr;
  1079. params[2] = len;
  1080. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params,
  1081. 3, "call_memmove"))) {
  1082. aot_set_last_error("llvm build memmove failed.");
  1083. return false;
  1084. }
  1085. }
  1086. else {
  1087. if (!(res = LLVMBuildMemMove(comp_ctx->builder, dst_addr, 1, src_addr,
  1088. 1, len))) {
  1089. aot_set_last_error("llvm build memmove failed.");
  1090. return false;
  1091. }
  1092. }
  1093. return true;
  1094. fail:
  1095. return false;
  1096. }
  1097. static void *
  1098. jit_memset(void *s, int c, size_t n)
  1099. {
  1100. return memset(s, c, n);
  1101. }
  1102. bool
  1103. aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1104. {
  1105. LLVMValueRef val, dst, dst_addr, len, res;
  1106. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1107. LLVMValueRef func, params[3];
  1108. POP_MEM_OFFSET(len);
  1109. POP_I32(val);
  1110. POP_MEM_OFFSET(dst);
  1111. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  1112. return false;
  1113. if (!zero_extend_u64(comp_ctx, &len, "len64")) {
  1114. return false;
  1115. }
  1116. param_types[0] = INT8_PTR_TYPE;
  1117. param_types[1] = I32_TYPE;
  1118. param_types[2] = SIZE_T_TYPE;
  1119. ret_type = INT8_PTR_TYPE;
  1120. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  1121. aot_set_last_error("create LLVM function type failed.");
  1122. return false;
  1123. }
  1124. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  1125. aot_set_last_error("create LLVM function pointer type failed.");
  1126. return false;
  1127. }
  1128. if (comp_ctx->is_jit_mode) {
  1129. if (!(func = I64_CONST((uint64)(uintptr_t)jit_memset))
  1130. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  1131. aot_set_last_error("create LLVM value failed.");
  1132. return false;
  1133. }
  1134. }
  1135. else if (comp_ctx->is_indirect_mode) {
  1136. int32 func_index;
  1137. func_index = aot_get_native_symbol_index(comp_ctx, "memset");
  1138. if (func_index < 0) {
  1139. return false;
  1140. }
  1141. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  1142. func_ptr_type, func_index))) {
  1143. return false;
  1144. }
  1145. }
  1146. else {
  1147. if (!(func = LLVMGetNamedFunction(func_ctx->module, "memset"))
  1148. && !(func =
  1149. LLVMAddFunction(func_ctx->module, "memset", func_type))) {
  1150. aot_set_last_error("llvm add function failed.");
  1151. return false;
  1152. }
  1153. }
  1154. params[0] = dst_addr;
  1155. params[1] = val;
  1156. params[2] = len;
  1157. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params, 3,
  1158. "call_memset"))) {
  1159. aot_set_last_error("llvm build memset failed.");
  1160. return false;
  1161. }
  1162. return true;
  1163. fail:
  1164. return false;
  1165. }
  1166. #endif /* end of WASM_ENABLE_BULK_MEMORY */
  1167. #if WASM_ENABLE_SHARED_MEMORY != 0
  1168. bool
  1169. aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1170. uint8 atomic_op, uint8 op_type, uint32 align,
  1171. mem_offset_t offset, uint32 bytes)
  1172. {
  1173. LLVMValueRef maddr, value, result;
  1174. bool enable_segue = (op_type == VALUE_TYPE_I32)
  1175. ? comp_ctx->enable_segue_i32_load
  1176. && comp_ctx->enable_segue_i32_store
  1177. : comp_ctx->enable_segue_i64_load
  1178. && comp_ctx->enable_segue_i64_store;
  1179. if (op_type == VALUE_TYPE_I32)
  1180. POP_I32(value);
  1181. else
  1182. POP_I64(value);
  1183. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1184. enable_segue, NULL)))
  1185. return false;
  1186. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1187. return false;
  1188. switch (bytes) {
  1189. case 8:
  1190. if (!enable_segue)
  1191. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1192. else
  1193. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  1194. break;
  1195. case 4:
  1196. if (!enable_segue)
  1197. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1198. else
  1199. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  1200. if (op_type == VALUE_TYPE_I64)
  1201. BUILD_TRUNC(value, I32_TYPE);
  1202. break;
  1203. case 2:
  1204. if (!enable_segue)
  1205. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1206. else
  1207. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  1208. BUILD_TRUNC(value, INT16_TYPE);
  1209. break;
  1210. case 1:
  1211. if (!enable_segue)
  1212. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1213. else
  1214. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  1215. BUILD_TRUNC(value, INT8_TYPE);
  1216. break;
  1217. default:
  1218. bh_assert(0);
  1219. break;
  1220. }
  1221. if (!(result = LLVMBuildAtomicRMW(
  1222. comp_ctx->builder, atomic_op, maddr, value,
  1223. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1224. goto fail;
  1225. }
  1226. LLVMSetVolatile(result, true);
  1227. if (op_type == VALUE_TYPE_I32) {
  1228. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1229. "result_i32"))) {
  1230. goto fail;
  1231. }
  1232. PUSH_I32(result);
  1233. }
  1234. else {
  1235. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1236. "result_i64"))) {
  1237. goto fail;
  1238. }
  1239. PUSH_I64(result);
  1240. }
  1241. return true;
  1242. fail:
  1243. return false;
  1244. }
  1245. bool
  1246. aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
  1247. AOTFuncContext *func_ctx, uint8 op_type,
  1248. uint32 align, mem_offset_t offset, uint32 bytes)
  1249. {
  1250. LLVMValueRef maddr, value, expect, result;
  1251. bool enable_segue = (op_type == VALUE_TYPE_I32)
  1252. ? comp_ctx->enable_segue_i32_load
  1253. && comp_ctx->enable_segue_i32_store
  1254. : comp_ctx->enable_segue_i64_load
  1255. && comp_ctx->enable_segue_i64_store;
  1256. if (op_type == VALUE_TYPE_I32) {
  1257. POP_I32(value);
  1258. POP_I32(expect);
  1259. }
  1260. else {
  1261. POP_I64(value);
  1262. POP_I64(expect);
  1263. }
  1264. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1265. enable_segue, NULL)))
  1266. return false;
  1267. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1268. return false;
  1269. switch (bytes) {
  1270. case 8:
  1271. if (!enable_segue)
  1272. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1273. else
  1274. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  1275. break;
  1276. case 4:
  1277. if (!enable_segue)
  1278. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1279. else
  1280. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  1281. if (op_type == VALUE_TYPE_I64) {
  1282. BUILD_TRUNC(value, I32_TYPE);
  1283. BUILD_TRUNC(expect, I32_TYPE);
  1284. }
  1285. break;
  1286. case 2:
  1287. if (!enable_segue)
  1288. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1289. else
  1290. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  1291. BUILD_TRUNC(value, INT16_TYPE);
  1292. BUILD_TRUNC(expect, INT16_TYPE);
  1293. break;
  1294. case 1:
  1295. if (!enable_segue)
  1296. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1297. else
  1298. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  1299. BUILD_TRUNC(value, INT8_TYPE);
  1300. BUILD_TRUNC(expect, INT8_TYPE);
  1301. break;
  1302. default:
  1303. bh_assert(0);
  1304. break;
  1305. }
  1306. if (!(result = LLVMBuildAtomicCmpXchg(
  1307. comp_ctx->builder, maddr, expect, value,
  1308. LLVMAtomicOrderingSequentiallyConsistent,
  1309. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1310. goto fail;
  1311. }
  1312. LLVMSetVolatile(result, true);
  1313. /* CmpXchg return {i32, i1} structure,
  1314. we need to extract the previous_value from the structure */
  1315. if (!(result = LLVMBuildExtractValue(comp_ctx->builder, result, 0,
  1316. "previous_value"))) {
  1317. goto fail;
  1318. }
  1319. if (op_type == VALUE_TYPE_I32) {
  1320. if (LLVMTypeOf(result) != I32_TYPE) {
  1321. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1322. "result_i32"))) {
  1323. goto fail;
  1324. }
  1325. }
  1326. PUSH_I32(result);
  1327. }
  1328. else {
  1329. if (LLVMTypeOf(result) != I64_TYPE) {
  1330. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1331. "result_i64"))) {
  1332. goto fail;
  1333. }
  1334. }
  1335. PUSH_I64(result);
  1336. }
  1337. return true;
  1338. fail:
  1339. return false;
  1340. }
  1341. bool
  1342. aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1343. uint8 op_type, uint32 align, mem_offset_t offset,
  1344. uint32 bytes)
  1345. {
  1346. LLVMValueRef maddr, value, timeout, expect, cmp;
  1347. LLVMValueRef param_values[5], ret_value, func, is_wait64;
  1348. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  1349. LLVMBasicBlockRef wait_fail, wait_success;
  1350. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1351. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  1352. POP_I64(timeout);
  1353. if (op_type == VALUE_TYPE_I32) {
  1354. POP_I32(expect);
  1355. is_wait64 = I8_CONST(false);
  1356. if (!(expect = LLVMBuildZExt(comp_ctx->builder, expect, I64_TYPE,
  1357. "expect_i64"))) {
  1358. goto fail;
  1359. }
  1360. }
  1361. else {
  1362. POP_I64(expect);
  1363. is_wait64 = I8_CONST(true);
  1364. }
  1365. CHECK_LLVM_CONST(is_wait64);
  1366. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1367. false, NULL)))
  1368. return false;
  1369. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1370. return false;
  1371. param_types[0] = INT8_PTR_TYPE;
  1372. param_types[1] = INT8_PTR_TYPE;
  1373. param_types[2] = I64_TYPE;
  1374. param_types[3] = I64_TYPE;
  1375. param_types[4] = INT8_TYPE;
  1376. ret_type = I32_TYPE;
  1377. GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
  1378. /* Call function wasm_runtime_atomic_wait() */
  1379. param_values[0] = func_ctx->aot_inst;
  1380. param_values[1] = maddr;
  1381. param_values[2] = expect;
  1382. param_values[3] = timeout;
  1383. param_values[4] = is_wait64;
  1384. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1385. param_values, 5, "call"))) {
  1386. aot_set_last_error("llvm build call failed.");
  1387. return false;
  1388. }
  1389. BUILD_ICMP(LLVMIntNE, ret_value, I32_NEG_ONE, cmp, "atomic_wait_ret");
  1390. ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
  1391. ADD_BASIC_BLOCK(wait_success, "wait_success");
  1392. LLVMMoveBasicBlockAfter(wait_fail, block_curr);
  1393. LLVMMoveBasicBlockAfter(wait_success, block_curr);
  1394. if (!LLVMBuildCondBr(comp_ctx->builder, cmp, wait_success, wait_fail)) {
  1395. aot_set_last_error("llvm build cond br failed.");
  1396. goto fail;
  1397. }
  1398. /* If atomic wait failed, return this function
  1399. so the runtime can catch the exception */
  1400. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
  1401. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  1402. goto fail;
  1403. }
  1404. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
  1405. PUSH_I32(ret_value);
  1406. /* Insert suspend check point */
  1407. if (comp_ctx->enable_thread_mgr) {
  1408. if (!check_suspend_flags(comp_ctx, func_ctx, false))
  1409. return false;
  1410. }
  1411. return true;
  1412. fail:
  1413. return false;
  1414. }
  1415. bool
  1416. aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
  1417. AOTFuncContext *func_ctx, uint32 align,
  1418. mem_offset_t offset, uint32 bytes)
  1419. {
  1420. LLVMValueRef maddr, value, count;
  1421. LLVMValueRef param_values[3], ret_value, func;
  1422. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1423. POP_I32(count);
  1424. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1425. false, NULL)))
  1426. return false;
  1427. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1428. return false;
  1429. param_types[0] = INT8_PTR_TYPE;
  1430. param_types[1] = INT8_PTR_TYPE;
  1431. param_types[2] = I32_TYPE;
  1432. ret_type = I32_TYPE;
  1433. GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
  1434. /* Call function wasm_runtime_atomic_notify() */
  1435. param_values[0] = func_ctx->aot_inst;
  1436. param_values[1] = maddr;
  1437. param_values[2] = count;
  1438. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1439. param_values, 3, "call"))) {
  1440. aot_set_last_error("llvm build call failed.");
  1441. return false;
  1442. }
  1443. PUSH_I32(ret_value);
  1444. return true;
  1445. fail:
  1446. return false;
  1447. }
  1448. bool
  1449. aot_compiler_op_atomic_fence(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1450. {
  1451. return LLVMBuildFence(comp_ctx->builder,
  1452. LLVMAtomicOrderingSequentiallyConsistent, false, "")
  1453. ? true
  1454. : false;
  1455. }
  1456. #endif /* end of WASM_ENABLE_SHARED_MEMORY */