aot_emit_memory.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_memory.h"
  6. #include "aot_emit_exception.h"
  7. #include "../aot/aot_runtime.h"
  8. #include "aot_intrinsic.h"
  9. #include "aot_emit_control.h"
  10. #define BUILD_ICMP(op, left, right, res, name) \
  11. do { \
  12. if (!(res = \
  13. LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
  14. aot_set_last_error("llvm build icmp failed."); \
  15. goto fail; \
  16. } \
  17. } while (0)
  18. #define BUILD_OP(Op, left, right, res, name) \
  19. do { \
  20. if (!(res = LLVMBuild##Op(comp_ctx->builder, left, right, name))) { \
  21. aot_set_last_error("llvm build " #Op " fail."); \
  22. goto fail; \
  23. } \
  24. } while (0)
  25. #define ADD_BASIC_BLOCK(block, name) \
  26. do { \
  27. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  28. func_ctx->func, name))) { \
  29. aot_set_last_error("llvm add basic block failed."); \
  30. goto fail; \
  31. } \
  32. } while (0)
  33. #define SET_BUILD_POS(block) LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
  34. static LLVMValueRef
  35. get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  36. uint32 bytes)
  37. {
  38. LLVMValueRef mem_check_bound = NULL;
  39. switch (bytes) {
  40. case 1:
  41. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_1byte;
  42. break;
  43. case 2:
  44. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_2bytes;
  45. break;
  46. case 4:
  47. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_4bytes;
  48. break;
  49. case 8:
  50. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_8bytes;
  51. break;
  52. case 16:
  53. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_16bytes;
  54. break;
  55. default:
  56. bh_assert(0);
  57. return NULL;
  58. }
  59. if (func_ctx->mem_space_unchanged)
  60. return mem_check_bound;
  61. if (!(mem_check_bound = LLVMBuildLoad2(
  62. comp_ctx->builder,
  63. (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE : I32_TYPE,
  64. mem_check_bound, "mem_check_bound"))) {
  65. aot_set_last_error("llvm build load failed.");
  66. return NULL;
  67. }
  68. return mem_check_bound;
  69. }
  70. static LLVMValueRef
  71. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
  72. LLVMValueRef
  73. aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  74. uint32 offset, uint32 bytes, bool enable_segue)
  75. {
  76. LLVMValueRef offset_const = I32_CONST(offset);
  77. LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
  78. LLVMValueRef mem_base_addr, mem_check_bound;
  79. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  80. LLVMBasicBlockRef check_succ;
  81. AOTValue *aot_value_top;
  82. uint32 local_idx_of_aot_value = 0;
  83. bool is_target_64bit, is_local_of_aot_value = false;
  84. #if WASM_ENABLE_SHARED_MEMORY != 0
  85. bool is_shared_memory =
  86. comp_ctx->comp_data->memories[0].memory_flags & 0x02;
  87. #endif
  88. is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false;
  89. if (comp_ctx->is_indirect_mode
  90. && aot_intrinsic_check_capability(comp_ctx, "i32.const")) {
  91. WASMValue wasm_value;
  92. wasm_value.i32 = offset;
  93. offset_const = aot_load_const_from_table(
  94. comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_I32);
  95. if (!offset_const) {
  96. return NULL;
  97. }
  98. }
  99. else {
  100. CHECK_LLVM_CONST(offset_const);
  101. }
  102. /* Get memory base address and memory data size */
  103. if (func_ctx->mem_space_unchanged
  104. #if WASM_ENABLE_SHARED_MEMORY != 0
  105. || is_shared_memory
  106. #endif
  107. ) {
  108. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  109. }
  110. else {
  111. if (!(mem_base_addr = LLVMBuildLoad2(
  112. comp_ctx->builder, OPQ_PTR_TYPE,
  113. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  114. aot_set_last_error("llvm build load failed.");
  115. goto fail;
  116. }
  117. }
  118. aot_value_top =
  119. func_ctx->block_stack.block_list_end->value_stack.value_list_end;
  120. if (aot_value_top) {
  121. /* aot_value_top is freed in the following POP_I32(addr),
  122. so save its fields here for further use */
  123. is_local_of_aot_value = aot_value_top->is_local;
  124. local_idx_of_aot_value = aot_value_top->local_idx;
  125. }
  126. POP_I32(addr);
  127. /*
  128. * Note: not throw the integer-overflow-exception here since it must
  129. * have been thrown when converting float to integer before
  130. */
  131. /* return addres directly if constant offset and inside memory space */
  132. if (LLVMIsConstant(addr) && !LLVMIsUndef(addr)
  133. #if LLVM_VERSION_NUMBER >= 12
  134. && !LLVMIsPoison(addr)
  135. #endif
  136. ) {
  137. uint64 mem_offset =
  138. (uint64)LLVMConstIntGetZExtValue(addr) + (uint64)offset;
  139. uint32 num_bytes_per_page =
  140. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  141. uint32 init_page_count =
  142. comp_ctx->comp_data->memories[0].mem_init_page_count;
  143. uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
  144. if (mem_offset + bytes <= mem_data_size) {
  145. /* inside memory space */
  146. offset1 = I32_CONST((uint32)mem_offset);
  147. CHECK_LLVM_CONST(offset1);
  148. if (!enable_segue) {
  149. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder,
  150. INT8_TYPE, mem_base_addr,
  151. &offset1, 1, "maddr"))) {
  152. aot_set_last_error("llvm build add failed.");
  153. goto fail;
  154. }
  155. }
  156. else {
  157. if (!(maddr = LLVMBuildIntToPtr(comp_ctx->builder, offset1,
  158. INT8_PTR_TYPE_GS, "maddr"))) {
  159. aot_set_last_error("llvm build IntToPtr failed.");
  160. goto fail;
  161. }
  162. }
  163. return maddr;
  164. }
  165. }
  166. if (is_target_64bit) {
  167. if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
  168. I64_TYPE, "offset_i64"))
  169. || !(addr = LLVMBuildZExt(comp_ctx->builder, addr, I64_TYPE,
  170. "addr_i64"))) {
  171. aot_set_last_error("llvm build zero extend failed.");
  172. goto fail;
  173. }
  174. }
  175. /* offset1 = offset + addr; */
  176. BUILD_OP(Add, offset_const, addr, offset1, "offset1");
  177. if (comp_ctx->enable_bound_check
  178. && !(is_local_of_aot_value
  179. && aot_checked_addr_list_find(func_ctx, local_idx_of_aot_value,
  180. offset, bytes))) {
  181. uint32 init_page_count =
  182. comp_ctx->comp_data->memories[0].mem_init_page_count;
  183. if (init_page_count == 0) {
  184. LLVMValueRef mem_size;
  185. if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) {
  186. goto fail;
  187. }
  188. BUILD_ICMP(LLVMIntEQ, mem_size, I32_ZERO, cmp, "is_zero");
  189. ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
  190. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  191. if (!aot_emit_exception(comp_ctx, func_ctx,
  192. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  193. check_succ)) {
  194. goto fail;
  195. }
  196. SET_BUILD_POS(check_succ);
  197. block_curr = check_succ;
  198. }
  199. if (!(mem_check_bound =
  200. get_memory_check_bound(comp_ctx, func_ctx, bytes))) {
  201. goto fail;
  202. }
  203. if (is_target_64bit) {
  204. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp, "cmp");
  205. }
  206. else {
  207. /* Check integer overflow */
  208. BUILD_ICMP(LLVMIntULT, offset1, addr, cmp1, "cmp1");
  209. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp2, "cmp2");
  210. BUILD_OP(Or, cmp1, cmp2, cmp, "cmp");
  211. }
  212. /* Add basic blocks */
  213. ADD_BASIC_BLOCK(check_succ, "check_succ");
  214. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  215. if (!aot_emit_exception(comp_ctx, func_ctx,
  216. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  217. check_succ)) {
  218. goto fail;
  219. }
  220. SET_BUILD_POS(check_succ);
  221. if (is_local_of_aot_value) {
  222. if (!aot_checked_addr_list_add(func_ctx, local_idx_of_aot_value,
  223. offset, bytes))
  224. goto fail;
  225. }
  226. }
  227. if (!enable_segue) {
  228. /* maddr = mem_base_addr + offset1 */
  229. if (!(maddr =
  230. LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  231. mem_base_addr, &offset1, 1, "maddr"))) {
  232. aot_set_last_error("llvm build add failed.");
  233. goto fail;
  234. }
  235. }
  236. else {
  237. LLVMValueRef maddr_base;
  238. if (!(maddr_base = LLVMBuildIntToPtr(comp_ctx->builder, addr,
  239. INT8_PTR_TYPE_GS, "maddr_base"))) {
  240. aot_set_last_error("llvm build int to ptr failed.");
  241. goto fail;
  242. }
  243. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  244. maddr_base, &offset_const, 1,
  245. "maddr"))) {
  246. aot_set_last_error("llvm build inboundgep failed.");
  247. goto fail;
  248. }
  249. }
  250. return maddr;
  251. fail:
  252. return NULL;
  253. }
  254. #define BUILD_PTR_CAST(ptr_type) \
  255. do { \
  256. if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, ptr_type, \
  257. "data_ptr"))) { \
  258. aot_set_last_error("llvm build bit cast failed."); \
  259. goto fail; \
  260. } \
  261. } while (0)
  262. #define BUILD_LOAD(data_type) \
  263. do { \
  264. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  265. "data"))) { \
  266. aot_set_last_error("llvm build load failed."); \
  267. goto fail; \
  268. } \
  269. LLVMSetAlignment(value, 1); \
  270. } while (0)
  271. #define BUILD_TRUNC(value, data_type) \
  272. do { \
  273. if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, data_type, \
  274. "val_trunc"))) { \
  275. aot_set_last_error("llvm build trunc failed."); \
  276. goto fail; \
  277. } \
  278. } while (0)
  279. #define BUILD_STORE() \
  280. do { \
  281. LLVMValueRef res; \
  282. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  283. aot_set_last_error("llvm build store failed."); \
  284. goto fail; \
  285. } \
  286. LLVMSetAlignment(res, 1); \
  287. } while (0)
  288. #define BUILD_SIGN_EXT(dst_type) \
  289. do { \
  290. if (!(value = LLVMBuildSExt(comp_ctx->builder, value, dst_type, \
  291. "data_s_ext"))) { \
  292. aot_set_last_error("llvm build sign ext failed."); \
  293. goto fail; \
  294. } \
  295. } while (0)
  296. #define BUILD_ZERO_EXT(dst_type) \
  297. do { \
  298. if (!(value = LLVMBuildZExt(comp_ctx->builder, value, dst_type, \
  299. "data_z_ext"))) { \
  300. aot_set_last_error("llvm build zero ext failed."); \
  301. goto fail; \
  302. } \
  303. } while (0)
  304. #if WASM_ENABLE_SHARED_MEMORY != 0
  305. bool
  306. check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  307. LLVMValueRef addr, uint32 align)
  308. {
  309. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  310. LLVMBasicBlockRef check_align_succ;
  311. LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
  312. LLVMValueRef res;
  313. CHECK_LLVM_CONST(align_mask);
  314. /* Convert pointer to int */
  315. if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr, I32_TYPE,
  316. "address"))) {
  317. aot_set_last_error("llvm build ptr to int failed.");
  318. goto fail;
  319. }
  320. /* The memory address should be aligned */
  321. BUILD_OP(And, addr, align_mask, res, "and");
  322. BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
  323. /* Add basic blocks */
  324. ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
  325. LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
  326. if (!aot_emit_exception(comp_ctx, func_ctx, EXCE_UNALIGNED_ATOMIC, true,
  327. res, check_align_succ)) {
  328. goto fail;
  329. }
  330. SET_BUILD_POS(check_align_succ);
  331. return true;
  332. fail:
  333. return false;
  334. }
  335. #define BUILD_ATOMIC_LOAD(align, data_type) \
  336. do { \
  337. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  338. goto fail; \
  339. } \
  340. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  341. "data"))) { \
  342. aot_set_last_error("llvm build load failed."); \
  343. goto fail; \
  344. } \
  345. LLVMSetAlignment(value, 1 << align); \
  346. LLVMSetVolatile(value, true); \
  347. LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
  348. } while (0)
  349. #define BUILD_ATOMIC_STORE(align) \
  350. do { \
  351. LLVMValueRef res; \
  352. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  353. goto fail; \
  354. } \
  355. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  356. aot_set_last_error("llvm build store failed."); \
  357. goto fail; \
  358. } \
  359. LLVMSetAlignment(res, 1 << align); \
  360. LLVMSetVolatile(res, true); \
  361. LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
  362. } while (0)
  363. #endif
  364. bool
  365. aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  366. uint32 align, uint32 offset, uint32 bytes, bool sign,
  367. bool atomic)
  368. {
  369. LLVMValueRef maddr, value = NULL;
  370. LLVMTypeRef data_type;
  371. bool enable_segue = comp_ctx->enable_segue_i32_load;
  372. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  373. enable_segue)))
  374. return false;
  375. switch (bytes) {
  376. case 4:
  377. if (!enable_segue)
  378. BUILD_PTR_CAST(INT32_PTR_TYPE);
  379. else
  380. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  381. #if WASM_ENABLE_SHARED_MEMORY != 0
  382. if (atomic)
  383. BUILD_ATOMIC_LOAD(align, I32_TYPE);
  384. else
  385. #endif
  386. BUILD_LOAD(I32_TYPE);
  387. break;
  388. case 2:
  389. case 1:
  390. if (bytes == 2) {
  391. if (!enable_segue)
  392. BUILD_PTR_CAST(INT16_PTR_TYPE);
  393. else
  394. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  395. data_type = INT16_TYPE;
  396. }
  397. else {
  398. if (!enable_segue)
  399. BUILD_PTR_CAST(INT8_PTR_TYPE);
  400. else
  401. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  402. data_type = INT8_TYPE;
  403. }
  404. #if WASM_ENABLE_SHARED_MEMORY != 0
  405. if (atomic) {
  406. BUILD_ATOMIC_LOAD(align, data_type);
  407. BUILD_ZERO_EXT(I32_TYPE);
  408. }
  409. else
  410. #endif
  411. {
  412. BUILD_LOAD(data_type);
  413. if (sign)
  414. BUILD_SIGN_EXT(I32_TYPE);
  415. else
  416. BUILD_ZERO_EXT(I32_TYPE);
  417. }
  418. break;
  419. default:
  420. bh_assert(0);
  421. break;
  422. }
  423. PUSH_I32(value);
  424. (void)data_type;
  425. return true;
  426. fail:
  427. return false;
  428. }
  429. bool
  430. aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  431. uint32 align, uint32 offset, uint32 bytes, bool sign,
  432. bool atomic)
  433. {
  434. LLVMValueRef maddr, value = NULL;
  435. LLVMTypeRef data_type;
  436. bool enable_segue = comp_ctx->enable_segue_i64_load;
  437. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  438. enable_segue)))
  439. return false;
  440. switch (bytes) {
  441. case 8:
  442. if (!enable_segue)
  443. BUILD_PTR_CAST(INT64_PTR_TYPE);
  444. else
  445. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  446. #if WASM_ENABLE_SHARED_MEMORY != 0
  447. if (atomic)
  448. BUILD_ATOMIC_LOAD(align, I64_TYPE);
  449. else
  450. #endif
  451. BUILD_LOAD(I64_TYPE);
  452. break;
  453. case 4:
  454. case 2:
  455. case 1:
  456. if (bytes == 4) {
  457. if (!enable_segue)
  458. BUILD_PTR_CAST(INT32_PTR_TYPE);
  459. else
  460. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  461. data_type = I32_TYPE;
  462. }
  463. else if (bytes == 2) {
  464. if (!enable_segue)
  465. BUILD_PTR_CAST(INT16_PTR_TYPE);
  466. else
  467. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  468. data_type = INT16_TYPE;
  469. }
  470. else {
  471. if (!enable_segue)
  472. BUILD_PTR_CAST(INT8_PTR_TYPE);
  473. else
  474. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  475. data_type = INT8_TYPE;
  476. }
  477. #if WASM_ENABLE_SHARED_MEMORY != 0
  478. if (atomic) {
  479. BUILD_ATOMIC_LOAD(align, data_type);
  480. BUILD_ZERO_EXT(I64_TYPE);
  481. }
  482. else
  483. #endif
  484. {
  485. BUILD_LOAD(data_type);
  486. if (sign)
  487. BUILD_SIGN_EXT(I64_TYPE);
  488. else
  489. BUILD_ZERO_EXT(I64_TYPE);
  490. }
  491. break;
  492. default:
  493. bh_assert(0);
  494. break;
  495. }
  496. PUSH_I64(value);
  497. (void)data_type;
  498. return true;
  499. fail:
  500. return false;
  501. }
  502. bool
  503. aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  504. uint32 align, uint32 offset)
  505. {
  506. LLVMValueRef maddr, value;
  507. bool enable_segue = comp_ctx->enable_segue_f32_load;
  508. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
  509. enable_segue)))
  510. return false;
  511. if (!enable_segue)
  512. BUILD_PTR_CAST(F32_PTR_TYPE);
  513. else
  514. BUILD_PTR_CAST(F32_PTR_TYPE_GS);
  515. BUILD_LOAD(F32_TYPE);
  516. PUSH_F32(value);
  517. return true;
  518. fail:
  519. return false;
  520. }
  521. bool
  522. aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  523. uint32 align, uint32 offset)
  524. {
  525. LLVMValueRef maddr, value;
  526. bool enable_segue = comp_ctx->enable_segue_f64_load;
  527. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
  528. enable_segue)))
  529. return false;
  530. if (!enable_segue)
  531. BUILD_PTR_CAST(F64_PTR_TYPE);
  532. else
  533. BUILD_PTR_CAST(F64_PTR_TYPE_GS);
  534. BUILD_LOAD(F64_TYPE);
  535. PUSH_F64(value);
  536. return true;
  537. fail:
  538. return false;
  539. }
  540. bool
  541. aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  542. uint32 align, uint32 offset, uint32 bytes, bool atomic)
  543. {
  544. LLVMValueRef maddr, value;
  545. bool enable_segue = comp_ctx->enable_segue_i32_store;
  546. POP_I32(value);
  547. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  548. enable_segue)))
  549. return false;
  550. switch (bytes) {
  551. case 4:
  552. if (!enable_segue)
  553. BUILD_PTR_CAST(INT32_PTR_TYPE);
  554. else
  555. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  556. break;
  557. case 2:
  558. if (!enable_segue)
  559. BUILD_PTR_CAST(INT16_PTR_TYPE);
  560. else
  561. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  562. BUILD_TRUNC(value, INT16_TYPE);
  563. break;
  564. case 1:
  565. if (!enable_segue)
  566. BUILD_PTR_CAST(INT8_PTR_TYPE);
  567. else
  568. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  569. BUILD_TRUNC(value, INT8_TYPE);
  570. break;
  571. default:
  572. bh_assert(0);
  573. break;
  574. }
  575. #if WASM_ENABLE_SHARED_MEMORY != 0
  576. if (atomic)
  577. BUILD_ATOMIC_STORE(align);
  578. else
  579. #endif
  580. BUILD_STORE();
  581. return true;
  582. fail:
  583. return false;
  584. }
  585. bool
  586. aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  587. uint32 align, uint32 offset, uint32 bytes, bool atomic)
  588. {
  589. LLVMValueRef maddr, value;
  590. bool enable_segue = comp_ctx->enable_segue_i64_store;
  591. POP_I64(value);
  592. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  593. enable_segue)))
  594. return false;
  595. switch (bytes) {
  596. case 8:
  597. if (!enable_segue)
  598. BUILD_PTR_CAST(INT64_PTR_TYPE);
  599. else
  600. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  601. break;
  602. case 4:
  603. if (!enable_segue)
  604. BUILD_PTR_CAST(INT32_PTR_TYPE);
  605. else
  606. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  607. BUILD_TRUNC(value, I32_TYPE);
  608. break;
  609. case 2:
  610. if (!enable_segue)
  611. BUILD_PTR_CAST(INT16_PTR_TYPE);
  612. else
  613. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  614. BUILD_TRUNC(value, INT16_TYPE);
  615. break;
  616. case 1:
  617. if (!enable_segue)
  618. BUILD_PTR_CAST(INT8_PTR_TYPE);
  619. else
  620. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  621. BUILD_TRUNC(value, INT8_TYPE);
  622. break;
  623. default:
  624. bh_assert(0);
  625. break;
  626. }
  627. #if WASM_ENABLE_SHARED_MEMORY != 0
  628. if (atomic)
  629. BUILD_ATOMIC_STORE(align);
  630. else
  631. #endif
  632. BUILD_STORE();
  633. return true;
  634. fail:
  635. return false;
  636. }
  637. bool
  638. aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  639. uint32 align, uint32 offset)
  640. {
  641. LLVMValueRef maddr, value;
  642. bool enable_segue = comp_ctx->enable_segue_f32_store;
  643. POP_F32(value);
  644. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
  645. enable_segue)))
  646. return false;
  647. if (!enable_segue)
  648. BUILD_PTR_CAST(F32_PTR_TYPE);
  649. else
  650. BUILD_PTR_CAST(F32_PTR_TYPE_GS);
  651. BUILD_STORE();
  652. return true;
  653. fail:
  654. return false;
  655. }
  656. bool
  657. aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  658. uint32 align, uint32 offset)
  659. {
  660. LLVMValueRef maddr, value;
  661. bool enable_segue = comp_ctx->enable_segue_f64_store;
  662. POP_F64(value);
  663. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
  664. enable_segue)))
  665. return false;
  666. if (!enable_segue)
  667. BUILD_PTR_CAST(F64_PTR_TYPE);
  668. else
  669. BUILD_PTR_CAST(F64_PTR_TYPE_GS);
  670. BUILD_STORE();
  671. return true;
  672. fail:
  673. return false;
  674. }
  675. static LLVMValueRef
  676. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  677. {
  678. LLVMValueRef mem_size;
  679. if (func_ctx->mem_space_unchanged) {
  680. mem_size = func_ctx->mem_info[0].mem_cur_page_count_addr;
  681. }
  682. else {
  683. if (!(mem_size = LLVMBuildLoad2(
  684. comp_ctx->builder, I32_TYPE,
  685. func_ctx->mem_info[0].mem_cur_page_count_addr, "mem_size"))) {
  686. aot_set_last_error("llvm build load failed.");
  687. goto fail;
  688. }
  689. }
  690. return mem_size;
  691. fail:
  692. return NULL;
  693. }
  694. bool
  695. aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  696. {
  697. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  698. if (mem_size)
  699. PUSH_I32(mem_size);
  700. return mem_size ? true : false;
  701. fail:
  702. return false;
  703. }
  704. bool
  705. aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  706. {
  707. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  708. LLVMValueRef delta, param_values[2], ret_value, func, value;
  709. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  710. int32 func_index;
  711. if (!mem_size)
  712. return false;
  713. POP_I32(delta);
  714. /* Function type of aot_enlarge_memory() */
  715. param_types[0] = INT8_PTR_TYPE;
  716. param_types[1] = I32_TYPE;
  717. ret_type = INT8_TYPE;
  718. if (!(func_type = LLVMFunctionType(ret_type, param_types, 2, false))) {
  719. aot_set_last_error("llvm add function type failed.");
  720. return false;
  721. }
  722. if (comp_ctx->is_jit_mode) {
  723. /* JIT mode, call the function directly */
  724. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  725. aot_set_last_error("llvm add pointer type failed.");
  726. return false;
  727. }
  728. if (!(value = I64_CONST((uint64)(uintptr_t)wasm_enlarge_memory))
  729. || !(func = LLVMConstIntToPtr(value, func_ptr_type))) {
  730. aot_set_last_error("create LLVM value failed.");
  731. return false;
  732. }
  733. }
  734. else if (comp_ctx->is_indirect_mode) {
  735. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  736. aot_set_last_error("create LLVM function type failed.");
  737. return false;
  738. }
  739. func_index =
  740. aot_get_native_symbol_index(comp_ctx, "aot_enlarge_memory");
  741. if (func_index < 0) {
  742. return false;
  743. }
  744. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  745. func_ptr_type, func_index))) {
  746. return false;
  747. }
  748. }
  749. else {
  750. char *func_name = "aot_enlarge_memory";
  751. /* AOT mode, delcare the function */
  752. if (!(func = LLVMGetNamedFunction(func_ctx->module, func_name))
  753. && !(func =
  754. LLVMAddFunction(func_ctx->module, func_name, func_type))) {
  755. aot_set_last_error("llvm add function failed.");
  756. return false;
  757. }
  758. }
  759. /* Call function aot_enlarge_memory() */
  760. param_values[0] = func_ctx->aot_inst;
  761. param_values[1] = delta;
  762. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  763. param_values, 2, "call"))) {
  764. aot_set_last_error("llvm build call failed.");
  765. return false;
  766. }
  767. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
  768. /* ret_value = ret_value == true ? delta : pre_page_count */
  769. if (!(ret_value = LLVMBuildSelect(comp_ctx->builder, ret_value, mem_size,
  770. I32_NEG_ONE, "mem_grow_ret"))) {
  771. aot_set_last_error("llvm build select failed.");
  772. return false;
  773. }
  774. PUSH_I32(ret_value);
  775. return true;
  776. fail:
  777. return false;
  778. }
  779. #if WASM_ENABLE_BULK_MEMORY != 0
  780. static LLVMValueRef
  781. check_bulk_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  782. LLVMValueRef offset, LLVMValueRef bytes)
  783. {
  784. LLVMValueRef maddr, max_addr, cmp;
  785. LLVMValueRef mem_base_addr;
  786. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  787. LLVMBasicBlockRef check_succ;
  788. LLVMValueRef mem_size;
  789. /* Get memory base address and memory data size */
  790. #if WASM_ENABLE_SHARED_MEMORY != 0
  791. bool is_shared_memory =
  792. comp_ctx->comp_data->memories[0].memory_flags & 0x02;
  793. if (func_ctx->mem_space_unchanged || is_shared_memory) {
  794. #else
  795. if (func_ctx->mem_space_unchanged) {
  796. #endif
  797. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  798. }
  799. else {
  800. if (!(mem_base_addr = LLVMBuildLoad2(
  801. comp_ctx->builder, OPQ_PTR_TYPE,
  802. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  803. aot_set_last_error("llvm build load failed.");
  804. goto fail;
  805. }
  806. }
  807. /*
  808. * Note: not throw the integer-overflow-exception here since it must
  809. * have been thrown when converting float to integer before
  810. */
  811. /* return addres directly if constant offset and inside memory space */
  812. if (!LLVMIsUndef(offset) && !LLVMIsUndef(bytes)
  813. #if LLVM_VERSION_NUMBER >= 12
  814. && !LLVMIsPoison(offset) && !LLVMIsPoison(bytes)
  815. #endif
  816. && LLVMIsConstant(offset) && LLVMIsConstant(bytes)) {
  817. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(offset);
  818. uint64 mem_len = (uint64)LLVMConstIntGetZExtValue(bytes);
  819. uint32 num_bytes_per_page =
  820. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  821. uint32 init_page_count =
  822. comp_ctx->comp_data->memories[0].mem_init_page_count;
  823. uint32 mem_data_size = num_bytes_per_page * init_page_count;
  824. if (mem_data_size > 0 && mem_offset + mem_len <= mem_data_size) {
  825. /* inside memory space */
  826. /* maddr = mem_base_addr + moffset */
  827. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  828. mem_base_addr, &offset, 1,
  829. "maddr"))) {
  830. aot_set_last_error("llvm build add failed.");
  831. goto fail;
  832. }
  833. return maddr;
  834. }
  835. }
  836. if (func_ctx->mem_space_unchanged) {
  837. mem_size = func_ctx->mem_info[0].mem_data_size_addr;
  838. }
  839. else {
  840. if (!(mem_size = LLVMBuildLoad2(
  841. comp_ctx->builder, I32_TYPE,
  842. func_ctx->mem_info[0].mem_data_size_addr, "mem_size"))) {
  843. aot_set_last_error("llvm build load failed.");
  844. goto fail;
  845. }
  846. }
  847. ADD_BASIC_BLOCK(check_succ, "check_succ");
  848. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  849. offset =
  850. LLVMBuildZExt(comp_ctx->builder, offset, I64_TYPE, "extend_offset");
  851. bytes = LLVMBuildZExt(comp_ctx->builder, bytes, I64_TYPE, "extend_len");
  852. mem_size =
  853. LLVMBuildZExt(comp_ctx->builder, mem_size, I64_TYPE, "extend_size");
  854. BUILD_OP(Add, offset, bytes, max_addr, "max_addr");
  855. BUILD_ICMP(LLVMIntUGT, max_addr, mem_size, cmp, "cmp_max_mem_addr");
  856. if (!aot_emit_exception(comp_ctx, func_ctx,
  857. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  858. check_succ)) {
  859. goto fail;
  860. }
  861. /* maddr = mem_base_addr + offset */
  862. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  863. mem_base_addr, &offset, 1, "maddr"))) {
  864. aot_set_last_error("llvm build add failed.");
  865. goto fail;
  866. }
  867. return maddr;
  868. fail:
  869. return NULL;
  870. }
  871. bool
  872. aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  873. uint32 seg_index)
  874. {
  875. LLVMValueRef seg, offset, dst, len, param_values[5], ret_value, func, value;
  876. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  877. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  878. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  879. LLVMBasicBlockRef mem_init_fail, init_success;
  880. seg = I32_CONST(seg_index);
  881. POP_I32(len);
  882. POP_I32(offset);
  883. POP_I32(dst);
  884. param_types[0] = INT8_PTR_TYPE;
  885. param_types[1] = I32_TYPE;
  886. param_types[2] = I32_TYPE;
  887. param_types[3] = I32_TYPE;
  888. param_types[4] = I32_TYPE;
  889. ret_type = INT8_TYPE;
  890. if (comp_ctx->is_jit_mode)
  891. GET_AOT_FUNCTION(llvm_jit_memory_init, 5);
  892. else
  893. GET_AOT_FUNCTION(aot_memory_init, 5);
  894. /* Call function aot_memory_init() */
  895. param_values[0] = func_ctx->aot_inst;
  896. param_values[1] = seg;
  897. param_values[2] = offset;
  898. param_values[3] = len;
  899. param_values[4] = dst;
  900. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  901. param_values, 5, "call"))) {
  902. aot_set_last_error("llvm build call failed.");
  903. return false;
  904. }
  905. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_init_ret");
  906. ADD_BASIC_BLOCK(mem_init_fail, "mem_init_fail");
  907. ADD_BASIC_BLOCK(init_success, "init_success");
  908. LLVMMoveBasicBlockAfter(mem_init_fail, block_curr);
  909. LLVMMoveBasicBlockAfter(init_success, block_curr);
  910. if (!LLVMBuildCondBr(comp_ctx->builder, ret_value, init_success,
  911. mem_init_fail)) {
  912. aot_set_last_error("llvm build cond br failed.");
  913. goto fail;
  914. }
  915. /* If memory.init failed, return this function
  916. so the runtime can catch the exception */
  917. LLVMPositionBuilderAtEnd(comp_ctx->builder, mem_init_fail);
  918. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  919. goto fail;
  920. }
  921. LLVMPositionBuilderAtEnd(comp_ctx->builder, init_success);
  922. return true;
  923. fail:
  924. return false;
  925. }
  926. bool
  927. aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  928. uint32 seg_index)
  929. {
  930. LLVMValueRef seg, param_values[2], ret_value, func, value;
  931. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  932. seg = I32_CONST(seg_index);
  933. CHECK_LLVM_CONST(seg);
  934. param_types[0] = INT8_PTR_TYPE;
  935. param_types[1] = I32_TYPE;
  936. ret_type = INT8_TYPE;
  937. if (comp_ctx->is_jit_mode)
  938. GET_AOT_FUNCTION(llvm_jit_data_drop, 2);
  939. else
  940. GET_AOT_FUNCTION(aot_data_drop, 2);
  941. /* Call function aot_data_drop() */
  942. param_values[0] = func_ctx->aot_inst;
  943. param_values[1] = seg;
  944. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  945. param_values, 2, "call"))) {
  946. aot_set_last_error("llvm build call failed.");
  947. return false;
  948. }
  949. return true;
  950. fail:
  951. return false;
  952. }
  953. bool
  954. aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  955. {
  956. LLVMValueRef src, dst, src_addr, dst_addr, len, res;
  957. bool call_aot_memmove = false;
  958. POP_I32(len);
  959. POP_I32(src);
  960. POP_I32(dst);
  961. if (!(src_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
  962. return false;
  963. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  964. return false;
  965. call_aot_memmove = comp_ctx->is_indirect_mode || comp_ctx->is_jit_mode;
  966. if (call_aot_memmove) {
  967. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  968. LLVMValueRef func, params[3];
  969. param_types[0] = INT8_PTR_TYPE;
  970. param_types[1] = INT8_PTR_TYPE;
  971. param_types[2] = I32_TYPE;
  972. ret_type = INT8_PTR_TYPE;
  973. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  974. aot_set_last_error("create LLVM function type failed.");
  975. return false;
  976. }
  977. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  978. aot_set_last_error("create LLVM function pointer type failed.");
  979. return false;
  980. }
  981. if (comp_ctx->is_jit_mode) {
  982. if (!(func = I64_CONST((uint64)(uintptr_t)aot_memmove))
  983. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  984. aot_set_last_error("create LLVM value failed.");
  985. return false;
  986. }
  987. }
  988. else {
  989. int32 func_index;
  990. func_index = aot_get_native_symbol_index(comp_ctx, "memmove");
  991. if (func_index < 0) {
  992. return false;
  993. }
  994. if (!(func =
  995. aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  996. func_ptr_type, func_index))) {
  997. return false;
  998. }
  999. }
  1000. params[0] = dst_addr;
  1001. params[1] = src_addr;
  1002. params[2] = len;
  1003. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params,
  1004. 3, "call_memmove"))) {
  1005. aot_set_last_error("llvm build memmove failed.");
  1006. return false;
  1007. }
  1008. }
  1009. else {
  1010. if (!(res = LLVMBuildMemMove(comp_ctx->builder, dst_addr, 1, src_addr,
  1011. 1, len))) {
  1012. aot_set_last_error("llvm build memmove failed.");
  1013. return false;
  1014. }
  1015. }
  1016. return true;
  1017. fail:
  1018. return false;
  1019. }
  1020. static void *
  1021. jit_memset(void *s, int c, size_t n)
  1022. {
  1023. return memset(s, c, n);
  1024. }
  1025. bool
  1026. aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1027. {
  1028. LLVMValueRef val, dst, dst_addr, len, res;
  1029. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1030. LLVMValueRef func, params[3];
  1031. POP_I32(len);
  1032. POP_I32(val);
  1033. POP_I32(dst);
  1034. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  1035. return false;
  1036. param_types[0] = INT8_PTR_TYPE;
  1037. param_types[1] = I32_TYPE;
  1038. param_types[2] = I32_TYPE;
  1039. ret_type = INT8_PTR_TYPE;
  1040. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  1041. aot_set_last_error("create LLVM function type failed.");
  1042. return false;
  1043. }
  1044. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  1045. aot_set_last_error("create LLVM function pointer type failed.");
  1046. return false;
  1047. }
  1048. if (comp_ctx->is_jit_mode) {
  1049. if (!(func = I64_CONST((uint64)(uintptr_t)jit_memset))
  1050. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  1051. aot_set_last_error("create LLVM value failed.");
  1052. return false;
  1053. }
  1054. }
  1055. else if (comp_ctx->is_indirect_mode) {
  1056. int32 func_index;
  1057. func_index = aot_get_native_symbol_index(comp_ctx, "memset");
  1058. if (func_index < 0) {
  1059. return false;
  1060. }
  1061. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  1062. func_ptr_type, func_index))) {
  1063. return false;
  1064. }
  1065. }
  1066. else {
  1067. if (!(func = LLVMGetNamedFunction(func_ctx->module, "memset"))
  1068. && !(func =
  1069. LLVMAddFunction(func_ctx->module, "memset", func_type))) {
  1070. aot_set_last_error("llvm add function failed.");
  1071. return false;
  1072. }
  1073. }
  1074. params[0] = dst_addr;
  1075. params[1] = val;
  1076. params[2] = len;
  1077. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params, 3,
  1078. "call_memset"))) {
  1079. aot_set_last_error("llvm build memset failed.");
  1080. return false;
  1081. }
  1082. return true;
  1083. fail:
  1084. return false;
  1085. }
  1086. #endif /* end of WASM_ENABLE_BULK_MEMORY */
  1087. #if WASM_ENABLE_SHARED_MEMORY != 0
  1088. bool
  1089. aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1090. uint8 atomic_op, uint8 op_type, uint32 align,
  1091. uint32 offset, uint32 bytes)
  1092. {
  1093. LLVMValueRef maddr, value, result;
  1094. bool enable_segue = (op_type == VALUE_TYPE_I32)
  1095. ? comp_ctx->enable_segue_i32_load
  1096. && comp_ctx->enable_segue_i32_store
  1097. : comp_ctx->enable_segue_i64_load
  1098. && comp_ctx->enable_segue_i64_store;
  1099. if (op_type == VALUE_TYPE_I32)
  1100. POP_I32(value);
  1101. else
  1102. POP_I64(value);
  1103. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1104. enable_segue)))
  1105. return false;
  1106. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1107. return false;
  1108. switch (bytes) {
  1109. case 8:
  1110. if (!enable_segue)
  1111. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1112. else
  1113. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  1114. break;
  1115. case 4:
  1116. if (!enable_segue)
  1117. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1118. else
  1119. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  1120. if (op_type == VALUE_TYPE_I64)
  1121. BUILD_TRUNC(value, I32_TYPE);
  1122. break;
  1123. case 2:
  1124. if (!enable_segue)
  1125. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1126. else
  1127. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  1128. BUILD_TRUNC(value, INT16_TYPE);
  1129. break;
  1130. case 1:
  1131. if (!enable_segue)
  1132. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1133. else
  1134. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  1135. BUILD_TRUNC(value, INT8_TYPE);
  1136. break;
  1137. default:
  1138. bh_assert(0);
  1139. break;
  1140. }
  1141. if (!(result = LLVMBuildAtomicRMW(
  1142. comp_ctx->builder, atomic_op, maddr, value,
  1143. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1144. goto fail;
  1145. }
  1146. LLVMSetVolatile(result, true);
  1147. if (op_type == VALUE_TYPE_I32) {
  1148. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1149. "result_i32"))) {
  1150. goto fail;
  1151. }
  1152. PUSH_I32(result);
  1153. }
  1154. else {
  1155. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1156. "result_i64"))) {
  1157. goto fail;
  1158. }
  1159. PUSH_I64(result);
  1160. }
  1161. return true;
  1162. fail:
  1163. return false;
  1164. }
  1165. bool
  1166. aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
  1167. AOTFuncContext *func_ctx, uint8 op_type,
  1168. uint32 align, uint32 offset, uint32 bytes)
  1169. {
  1170. LLVMValueRef maddr, value, expect, result;
  1171. bool enable_segue = (op_type == VALUE_TYPE_I32)
  1172. ? comp_ctx->enable_segue_i32_load
  1173. && comp_ctx->enable_segue_i32_store
  1174. : comp_ctx->enable_segue_i64_load
  1175. && comp_ctx->enable_segue_i64_store;
  1176. if (op_type == VALUE_TYPE_I32) {
  1177. POP_I32(value);
  1178. POP_I32(expect);
  1179. }
  1180. else {
  1181. POP_I64(value);
  1182. POP_I64(expect);
  1183. }
  1184. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1185. enable_segue)))
  1186. return false;
  1187. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1188. return false;
  1189. switch (bytes) {
  1190. case 8:
  1191. if (!enable_segue)
  1192. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1193. else
  1194. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  1195. break;
  1196. case 4:
  1197. if (!enable_segue)
  1198. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1199. else
  1200. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  1201. if (op_type == VALUE_TYPE_I64) {
  1202. BUILD_TRUNC(value, I32_TYPE);
  1203. BUILD_TRUNC(expect, I32_TYPE);
  1204. }
  1205. break;
  1206. case 2:
  1207. if (!enable_segue)
  1208. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1209. else
  1210. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  1211. BUILD_TRUNC(value, INT16_TYPE);
  1212. BUILD_TRUNC(expect, INT16_TYPE);
  1213. break;
  1214. case 1:
  1215. if (!enable_segue)
  1216. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1217. else
  1218. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  1219. BUILD_TRUNC(value, INT8_TYPE);
  1220. BUILD_TRUNC(expect, INT8_TYPE);
  1221. break;
  1222. default:
  1223. bh_assert(0);
  1224. break;
  1225. }
  1226. if (!(result = LLVMBuildAtomicCmpXchg(
  1227. comp_ctx->builder, maddr, expect, value,
  1228. LLVMAtomicOrderingSequentiallyConsistent,
  1229. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1230. goto fail;
  1231. }
  1232. LLVMSetVolatile(result, true);
  1233. /* CmpXchg return {i32, i1} structure,
  1234. we need to extrack the previous_value from the structure */
  1235. if (!(result = LLVMBuildExtractValue(comp_ctx->builder, result, 0,
  1236. "previous_value"))) {
  1237. goto fail;
  1238. }
  1239. if (op_type == VALUE_TYPE_I32) {
  1240. if (LLVMTypeOf(result) != I32_TYPE) {
  1241. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1242. "result_i32"))) {
  1243. goto fail;
  1244. }
  1245. }
  1246. PUSH_I32(result);
  1247. }
  1248. else {
  1249. if (LLVMTypeOf(result) != I64_TYPE) {
  1250. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1251. "result_i64"))) {
  1252. goto fail;
  1253. }
  1254. }
  1255. PUSH_I64(result);
  1256. }
  1257. return true;
  1258. fail:
  1259. return false;
  1260. }
  1261. bool
  1262. aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1263. uint8 op_type, uint32 align, uint32 offset,
  1264. uint32 bytes)
  1265. {
  1266. LLVMValueRef maddr, value, timeout, expect, cmp;
  1267. LLVMValueRef param_values[5], ret_value, func, is_wait64;
  1268. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  1269. LLVMBasicBlockRef wait_fail, wait_success;
  1270. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1271. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  1272. POP_I64(timeout);
  1273. if (op_type == VALUE_TYPE_I32) {
  1274. POP_I32(expect);
  1275. is_wait64 = I8_CONST(false);
  1276. if (!(expect = LLVMBuildZExt(comp_ctx->builder, expect, I64_TYPE,
  1277. "expect_i64"))) {
  1278. goto fail;
  1279. }
  1280. }
  1281. else {
  1282. POP_I64(expect);
  1283. is_wait64 = I8_CONST(true);
  1284. }
  1285. CHECK_LLVM_CONST(is_wait64);
  1286. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1287. false)))
  1288. return false;
  1289. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1290. return false;
  1291. param_types[0] = INT8_PTR_TYPE;
  1292. param_types[1] = INT8_PTR_TYPE;
  1293. param_types[2] = I64_TYPE;
  1294. param_types[3] = I64_TYPE;
  1295. param_types[4] = INT8_TYPE;
  1296. ret_type = I32_TYPE;
  1297. GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
  1298. /* Call function wasm_runtime_atomic_wait() */
  1299. param_values[0] = func_ctx->aot_inst;
  1300. param_values[1] = maddr;
  1301. param_values[2] = expect;
  1302. param_values[3] = timeout;
  1303. param_values[4] = is_wait64;
  1304. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1305. param_values, 5, "call"))) {
  1306. aot_set_last_error("llvm build call failed.");
  1307. return false;
  1308. }
  1309. BUILD_ICMP(LLVMIntNE, ret_value, I32_NEG_ONE, cmp, "atomic_wait_ret");
  1310. ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
  1311. ADD_BASIC_BLOCK(wait_success, "wait_success");
  1312. LLVMMoveBasicBlockAfter(wait_fail, block_curr);
  1313. LLVMMoveBasicBlockAfter(wait_success, block_curr);
  1314. if (!LLVMBuildCondBr(comp_ctx->builder, cmp, wait_success, wait_fail)) {
  1315. aot_set_last_error("llvm build cond br failed.");
  1316. goto fail;
  1317. }
  1318. /* If atomic wait failed, return this function
  1319. so the runtime can catch the exception */
  1320. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
  1321. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  1322. goto fail;
  1323. }
  1324. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
  1325. PUSH_I32(ret_value);
  1326. #if WASM_ENABLE_THREAD_MGR != 0
  1327. /* Insert suspend check point */
  1328. if (comp_ctx->enable_thread_mgr) {
  1329. if (!check_suspend_flags(comp_ctx, func_ctx))
  1330. return false;
  1331. }
  1332. #endif
  1333. return true;
  1334. fail:
  1335. return false;
  1336. }
  1337. bool
  1338. aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
  1339. AOTFuncContext *func_ctx, uint32 align,
  1340. uint32 offset, uint32 bytes)
  1341. {
  1342. LLVMValueRef maddr, value, count;
  1343. LLVMValueRef param_values[3], ret_value, func;
  1344. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1345. POP_I32(count);
  1346. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1347. false)))
  1348. return false;
  1349. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1350. return false;
  1351. param_types[0] = INT8_PTR_TYPE;
  1352. param_types[1] = INT8_PTR_TYPE;
  1353. param_types[2] = I32_TYPE;
  1354. ret_type = I32_TYPE;
  1355. GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
  1356. /* Call function wasm_runtime_atomic_notify() */
  1357. param_values[0] = func_ctx->aot_inst;
  1358. param_values[1] = maddr;
  1359. param_values[2] = count;
  1360. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1361. param_values, 3, "call"))) {
  1362. aot_set_last_error("llvm build call failed.");
  1363. return false;
  1364. }
  1365. PUSH_I32(ret_value);
  1366. return true;
  1367. fail:
  1368. return false;
  1369. }
  1370. bool
  1371. aot_compiler_op_atomic_fence(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1372. {
  1373. return LLVMBuildFence(comp_ctx->builder,
  1374. LLVMAtomicOrderingSequentiallyConsistent, false, "")
  1375. ? true
  1376. : false;
  1377. }
  1378. #endif /* end of WASM_ENABLE_SHARED_MEMORY */