aot_emit_memory.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_memory.h"
  6. #include "aot_emit_exception.h"
  7. #include "../aot/aot_runtime.h"
  8. #include "aot_intrinsic.h"
  9. #include "aot_emit_control.h"
  10. #define BUILD_ICMP(op, left, right, res, name) \
  11. do { \
  12. if (!(res = \
  13. LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
  14. aot_set_last_error("llvm build icmp failed."); \
  15. goto fail; \
  16. } \
  17. } while (0)
  18. #define BUILD_OP(Op, left, right, res, name) \
  19. do { \
  20. if (!(res = LLVMBuild##Op(comp_ctx->builder, left, right, name))) { \
  21. aot_set_last_error("llvm build " #Op " fail."); \
  22. goto fail; \
  23. } \
  24. } while (0)
  25. #define ADD_BASIC_BLOCK(block, name) \
  26. do { \
  27. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  28. func_ctx->func, name))) { \
  29. aot_set_last_error("llvm add basic block failed."); \
  30. goto fail; \
  31. } \
  32. } while (0)
  33. #define SET_BUILD_POS(block) LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
  34. static LLVMValueRef
  35. get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  36. uint32 bytes)
  37. {
  38. LLVMValueRef mem_check_bound = NULL;
  39. switch (bytes) {
  40. case 1:
  41. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_1byte;
  42. break;
  43. case 2:
  44. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_2bytes;
  45. break;
  46. case 4:
  47. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_4bytes;
  48. break;
  49. case 8:
  50. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_8bytes;
  51. break;
  52. case 16:
  53. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_16bytes;
  54. break;
  55. default:
  56. bh_assert(0);
  57. return NULL;
  58. }
  59. if (func_ctx->mem_space_unchanged)
  60. return mem_check_bound;
  61. if (!(mem_check_bound = LLVMBuildLoad2(
  62. comp_ctx->builder,
  63. (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE : I32_TYPE,
  64. mem_check_bound, "mem_check_bound"))) {
  65. aot_set_last_error("llvm build load failed.");
  66. return NULL;
  67. }
  68. return mem_check_bound;
  69. }
  70. static LLVMValueRef
  71. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
  72. LLVMValueRef
  73. aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  74. uint32 offset, uint32 bytes)
  75. {
  76. LLVMValueRef offset_const = I32_CONST(offset);
  77. LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
  78. LLVMValueRef mem_base_addr, mem_check_bound;
  79. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  80. LLVMBasicBlockRef check_succ;
  81. AOTValue *aot_value_top;
  82. uint32 local_idx_of_aot_value = 0;
  83. bool is_target_64bit, is_local_of_aot_value = false;
  84. #if WASM_ENABLE_SHARED_MEMORY != 0
  85. bool is_shared_memory =
  86. comp_ctx->comp_data->memories[0].memory_flags & 0x02;
  87. #endif
  88. is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false;
  89. if (comp_ctx->is_indirect_mode
  90. && aot_intrinsic_check_capability(comp_ctx, "i32.const")) {
  91. WASMValue wasm_value;
  92. wasm_value.i32 = offset;
  93. offset_const = aot_load_const_from_table(
  94. comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_I32);
  95. if (!offset_const) {
  96. return NULL;
  97. }
  98. }
  99. else {
  100. CHECK_LLVM_CONST(offset_const);
  101. }
  102. /* Get memory base address and memory data size */
  103. if (func_ctx->mem_space_unchanged
  104. #if WASM_ENABLE_SHARED_MEMORY != 0
  105. || is_shared_memory
  106. #endif
  107. ) {
  108. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  109. }
  110. else {
  111. if (!(mem_base_addr = LLVMBuildLoad2(
  112. comp_ctx->builder, OPQ_PTR_TYPE,
  113. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  114. aot_set_last_error("llvm build load failed.");
  115. goto fail;
  116. }
  117. }
  118. aot_value_top =
  119. func_ctx->block_stack.block_list_end->value_stack.value_list_end;
  120. if (aot_value_top) {
  121. /* aot_value_top is freed in the following POP_I32(addr),
  122. so save its fields here for further use */
  123. is_local_of_aot_value = aot_value_top->is_local;
  124. local_idx_of_aot_value = aot_value_top->local_idx;
  125. }
  126. POP_I32(addr);
  127. /*
  128. * Note: not throw the integer-overflow-exception here since it must
  129. * have been thrown when converting float to integer before
  130. */
  131. /* return addres directly if constant offset and inside memory space */
  132. if (LLVMIsConstant(addr) && !LLVMIsUndef(addr)
  133. #if LLVM_VERSION_NUMBER >= 12
  134. && !LLVMIsPoison(addr)
  135. #endif
  136. ) {
  137. uint64 mem_offset =
  138. (uint64)LLVMConstIntGetZExtValue(addr) + (uint64)offset;
  139. uint32 num_bytes_per_page =
  140. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  141. uint32 init_page_count =
  142. comp_ctx->comp_data->memories[0].mem_init_page_count;
  143. uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
  144. if (mem_offset + bytes <= mem_data_size) {
  145. /* inside memory space */
  146. offset1 = I32_CONST((uint32)mem_offset);
  147. CHECK_LLVM_CONST(offset1);
  148. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  149. mem_base_addr, &offset1, 1,
  150. "maddr"))) {
  151. aot_set_last_error("llvm build add failed.");
  152. goto fail;
  153. }
  154. return maddr;
  155. }
  156. }
  157. if (is_target_64bit) {
  158. if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
  159. I64_TYPE, "offset_i64"))
  160. || !(addr = LLVMBuildZExt(comp_ctx->builder, addr, I64_TYPE,
  161. "addr_i64"))) {
  162. aot_set_last_error("llvm build zero extend failed.");
  163. goto fail;
  164. }
  165. }
  166. /* offset1 = offset + addr; */
  167. BUILD_OP(Add, offset_const, addr, offset1, "offset1");
  168. if (comp_ctx->enable_bound_check
  169. && !(is_local_of_aot_value
  170. && aot_checked_addr_list_find(func_ctx, local_idx_of_aot_value,
  171. offset, bytes))) {
  172. uint32 init_page_count =
  173. comp_ctx->comp_data->memories[0].mem_init_page_count;
  174. if (init_page_count == 0) {
  175. LLVMValueRef mem_size;
  176. if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) {
  177. goto fail;
  178. }
  179. BUILD_ICMP(LLVMIntEQ, mem_size, I32_ZERO, cmp, "is_zero");
  180. ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
  181. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  182. if (!aot_emit_exception(comp_ctx, func_ctx,
  183. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  184. check_succ)) {
  185. goto fail;
  186. }
  187. SET_BUILD_POS(check_succ);
  188. block_curr = check_succ;
  189. }
  190. if (!(mem_check_bound =
  191. get_memory_check_bound(comp_ctx, func_ctx, bytes))) {
  192. goto fail;
  193. }
  194. if (is_target_64bit) {
  195. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp, "cmp");
  196. }
  197. else {
  198. /* Check integer overflow */
  199. BUILD_ICMP(LLVMIntULT, offset1, addr, cmp1, "cmp1");
  200. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp2, "cmp2");
  201. BUILD_OP(Or, cmp1, cmp2, cmp, "cmp");
  202. }
  203. /* Add basic blocks */
  204. ADD_BASIC_BLOCK(check_succ, "check_succ");
  205. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  206. if (!aot_emit_exception(comp_ctx, func_ctx,
  207. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  208. check_succ)) {
  209. goto fail;
  210. }
  211. SET_BUILD_POS(check_succ);
  212. if (is_local_of_aot_value) {
  213. if (!aot_checked_addr_list_add(func_ctx, local_idx_of_aot_value,
  214. offset, bytes))
  215. goto fail;
  216. }
  217. }
  218. /* maddr = mem_base_addr + offset1 */
  219. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  220. mem_base_addr, &offset1, 1, "maddr"))) {
  221. aot_set_last_error("llvm build add failed.");
  222. goto fail;
  223. }
  224. return maddr;
  225. fail:
  226. return NULL;
  227. }
  228. #define BUILD_PTR_CAST(ptr_type) \
  229. do { \
  230. if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, ptr_type, \
  231. "data_ptr"))) { \
  232. aot_set_last_error("llvm build bit cast failed."); \
  233. goto fail; \
  234. } \
  235. } while (0)
  236. #define BUILD_LOAD(data_type) \
  237. do { \
  238. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  239. "data"))) { \
  240. aot_set_last_error("llvm build load failed."); \
  241. goto fail; \
  242. } \
  243. LLVMSetAlignment(value, 1); \
  244. } while (0)
  245. #define BUILD_TRUNC(value, data_type) \
  246. do { \
  247. if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, data_type, \
  248. "val_trunc"))) { \
  249. aot_set_last_error("llvm build trunc failed."); \
  250. goto fail; \
  251. } \
  252. } while (0)
  253. #define BUILD_STORE() \
  254. do { \
  255. LLVMValueRef res; \
  256. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  257. aot_set_last_error("llvm build store failed."); \
  258. goto fail; \
  259. } \
  260. LLVMSetAlignment(res, 1); \
  261. } while (0)
  262. #define BUILD_SIGN_EXT(dst_type) \
  263. do { \
  264. if (!(value = LLVMBuildSExt(comp_ctx->builder, value, dst_type, \
  265. "data_s_ext"))) { \
  266. aot_set_last_error("llvm build sign ext failed."); \
  267. goto fail; \
  268. } \
  269. } while (0)
  270. #define BUILD_ZERO_EXT(dst_type) \
  271. do { \
  272. if (!(value = LLVMBuildZExt(comp_ctx->builder, value, dst_type, \
  273. "data_z_ext"))) { \
  274. aot_set_last_error("llvm build zero ext failed."); \
  275. goto fail; \
  276. } \
  277. } while (0)
  278. #if WASM_ENABLE_SHARED_MEMORY != 0
  279. bool
  280. check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  281. LLVMValueRef addr, uint32 align)
  282. {
  283. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  284. LLVMBasicBlockRef check_align_succ;
  285. LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
  286. LLVMValueRef res;
  287. CHECK_LLVM_CONST(align_mask);
  288. /* Convert pointer to int */
  289. if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr, I32_TYPE,
  290. "address"))) {
  291. aot_set_last_error("llvm build ptr to int failed.");
  292. goto fail;
  293. }
  294. /* The memory address should be aligned */
  295. BUILD_OP(And, addr, align_mask, res, "and");
  296. BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
  297. /* Add basic blocks */
  298. ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
  299. LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
  300. if (!aot_emit_exception(comp_ctx, func_ctx, EXCE_UNALIGNED_ATOMIC, true,
  301. res, check_align_succ)) {
  302. goto fail;
  303. }
  304. SET_BUILD_POS(check_align_succ);
  305. return true;
  306. fail:
  307. return false;
  308. }
  309. #define BUILD_ATOMIC_LOAD(align, data_type) \
  310. do { \
  311. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  312. goto fail; \
  313. } \
  314. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  315. "data"))) { \
  316. aot_set_last_error("llvm build load failed."); \
  317. goto fail; \
  318. } \
  319. LLVMSetAlignment(value, 1 << align); \
  320. LLVMSetVolatile(value, true); \
  321. LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
  322. } while (0)
  323. #define BUILD_ATOMIC_STORE(align) \
  324. do { \
  325. LLVMValueRef res; \
  326. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  327. goto fail; \
  328. } \
  329. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  330. aot_set_last_error("llvm build store failed."); \
  331. goto fail; \
  332. } \
  333. LLVMSetAlignment(res, 1 << align); \
  334. LLVMSetVolatile(res, true); \
  335. LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
  336. } while (0)
  337. #endif
  338. bool
  339. aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  340. uint32 align, uint32 offset, uint32 bytes, bool sign,
  341. bool atomic)
  342. {
  343. LLVMValueRef maddr, value = NULL;
  344. LLVMTypeRef data_type;
  345. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  346. return false;
  347. switch (bytes) {
  348. case 4:
  349. BUILD_PTR_CAST(INT32_PTR_TYPE);
  350. #if WASM_ENABLE_SHARED_MEMORY != 0
  351. if (atomic)
  352. BUILD_ATOMIC_LOAD(align, I32_TYPE);
  353. else
  354. #endif
  355. BUILD_LOAD(I32_TYPE);
  356. break;
  357. case 2:
  358. case 1:
  359. if (bytes == 2) {
  360. BUILD_PTR_CAST(INT16_PTR_TYPE);
  361. data_type = INT16_TYPE;
  362. }
  363. else {
  364. BUILD_PTR_CAST(INT8_PTR_TYPE);
  365. data_type = INT8_TYPE;
  366. }
  367. #if WASM_ENABLE_SHARED_MEMORY != 0
  368. if (atomic) {
  369. BUILD_ATOMIC_LOAD(align, data_type);
  370. BUILD_ZERO_EXT(I32_TYPE);
  371. }
  372. else
  373. #endif
  374. {
  375. BUILD_LOAD(data_type);
  376. if (sign)
  377. BUILD_SIGN_EXT(I32_TYPE);
  378. else
  379. BUILD_ZERO_EXT(I32_TYPE);
  380. }
  381. break;
  382. default:
  383. bh_assert(0);
  384. break;
  385. }
  386. PUSH_I32(value);
  387. (void)data_type;
  388. return true;
  389. fail:
  390. return false;
  391. }
  392. bool
  393. aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  394. uint32 align, uint32 offset, uint32 bytes, bool sign,
  395. bool atomic)
  396. {
  397. LLVMValueRef maddr, value = NULL;
  398. LLVMTypeRef data_type;
  399. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  400. return false;
  401. switch (bytes) {
  402. case 8:
  403. BUILD_PTR_CAST(INT64_PTR_TYPE);
  404. #if WASM_ENABLE_SHARED_MEMORY != 0
  405. if (atomic)
  406. BUILD_ATOMIC_LOAD(align, I64_TYPE);
  407. else
  408. #endif
  409. BUILD_LOAD(I64_TYPE);
  410. break;
  411. case 4:
  412. case 2:
  413. case 1:
  414. if (bytes == 4) {
  415. BUILD_PTR_CAST(INT32_PTR_TYPE);
  416. data_type = I32_TYPE;
  417. }
  418. else if (bytes == 2) {
  419. BUILD_PTR_CAST(INT16_PTR_TYPE);
  420. data_type = INT16_TYPE;
  421. }
  422. else {
  423. BUILD_PTR_CAST(INT8_PTR_TYPE);
  424. data_type = INT8_TYPE;
  425. }
  426. #if WASM_ENABLE_SHARED_MEMORY != 0
  427. if (atomic) {
  428. BUILD_ATOMIC_LOAD(align, data_type);
  429. BUILD_ZERO_EXT(I64_TYPE);
  430. }
  431. else
  432. #endif
  433. {
  434. BUILD_LOAD(data_type);
  435. if (sign)
  436. BUILD_SIGN_EXT(I64_TYPE);
  437. else
  438. BUILD_ZERO_EXT(I64_TYPE);
  439. }
  440. break;
  441. default:
  442. bh_assert(0);
  443. break;
  444. }
  445. PUSH_I64(value);
  446. (void)data_type;
  447. return true;
  448. fail:
  449. return false;
  450. }
  451. bool
  452. aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  453. uint32 align, uint32 offset)
  454. {
  455. LLVMValueRef maddr, value;
  456. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
  457. return false;
  458. BUILD_PTR_CAST(F32_PTR_TYPE);
  459. BUILD_LOAD(F32_TYPE);
  460. PUSH_F32(value);
  461. return true;
  462. fail:
  463. return false;
  464. }
  465. bool
  466. aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  467. uint32 align, uint32 offset)
  468. {
  469. LLVMValueRef maddr, value;
  470. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
  471. return false;
  472. BUILD_PTR_CAST(F64_PTR_TYPE);
  473. BUILD_LOAD(F64_TYPE);
  474. PUSH_F64(value);
  475. return true;
  476. fail:
  477. return false;
  478. }
  479. bool
  480. aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  481. uint32 align, uint32 offset, uint32 bytes, bool atomic)
  482. {
  483. LLVMValueRef maddr, value;
  484. POP_I32(value);
  485. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  486. return false;
  487. switch (bytes) {
  488. case 4:
  489. BUILD_PTR_CAST(INT32_PTR_TYPE);
  490. break;
  491. case 2:
  492. BUILD_PTR_CAST(INT16_PTR_TYPE);
  493. BUILD_TRUNC(value, INT16_TYPE);
  494. break;
  495. case 1:
  496. BUILD_PTR_CAST(INT8_PTR_TYPE);
  497. BUILD_TRUNC(value, INT8_TYPE);
  498. break;
  499. default:
  500. bh_assert(0);
  501. break;
  502. }
  503. #if WASM_ENABLE_SHARED_MEMORY != 0
  504. if (atomic)
  505. BUILD_ATOMIC_STORE(align);
  506. else
  507. #endif
  508. BUILD_STORE();
  509. return true;
  510. fail:
  511. return false;
  512. }
  513. bool
  514. aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  515. uint32 align, uint32 offset, uint32 bytes, bool atomic)
  516. {
  517. LLVMValueRef maddr, value;
  518. POP_I64(value);
  519. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  520. return false;
  521. switch (bytes) {
  522. case 8:
  523. BUILD_PTR_CAST(INT64_PTR_TYPE);
  524. break;
  525. case 4:
  526. BUILD_PTR_CAST(INT32_PTR_TYPE);
  527. BUILD_TRUNC(value, I32_TYPE);
  528. break;
  529. case 2:
  530. BUILD_PTR_CAST(INT16_PTR_TYPE);
  531. BUILD_TRUNC(value, INT16_TYPE);
  532. break;
  533. case 1:
  534. BUILD_PTR_CAST(INT8_PTR_TYPE);
  535. BUILD_TRUNC(value, INT8_TYPE);
  536. break;
  537. default:
  538. bh_assert(0);
  539. break;
  540. }
  541. #if WASM_ENABLE_SHARED_MEMORY != 0
  542. if (atomic)
  543. BUILD_ATOMIC_STORE(align);
  544. else
  545. #endif
  546. BUILD_STORE();
  547. return true;
  548. fail:
  549. return false;
  550. }
  551. bool
  552. aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  553. uint32 align, uint32 offset)
  554. {
  555. LLVMValueRef maddr, value;
  556. POP_F32(value);
  557. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
  558. return false;
  559. BUILD_PTR_CAST(F32_PTR_TYPE);
  560. BUILD_STORE();
  561. return true;
  562. fail:
  563. return false;
  564. }
  565. bool
  566. aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  567. uint32 align, uint32 offset)
  568. {
  569. LLVMValueRef maddr, value;
  570. POP_F64(value);
  571. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
  572. return false;
  573. BUILD_PTR_CAST(F64_PTR_TYPE);
  574. BUILD_STORE();
  575. return true;
  576. fail:
  577. return false;
  578. }
  579. static LLVMValueRef
  580. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  581. {
  582. LLVMValueRef mem_size;
  583. if (func_ctx->mem_space_unchanged) {
  584. mem_size = func_ctx->mem_info[0].mem_cur_page_count_addr;
  585. }
  586. else {
  587. if (!(mem_size = LLVMBuildLoad2(
  588. comp_ctx->builder, I32_TYPE,
  589. func_ctx->mem_info[0].mem_cur_page_count_addr, "mem_size"))) {
  590. aot_set_last_error("llvm build load failed.");
  591. goto fail;
  592. }
  593. }
  594. return mem_size;
  595. fail:
  596. return NULL;
  597. }
  598. bool
  599. aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  600. {
  601. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  602. if (mem_size)
  603. PUSH_I32(mem_size);
  604. return mem_size ? true : false;
  605. fail:
  606. return false;
  607. }
  608. bool
  609. aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  610. {
  611. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  612. LLVMValueRef delta, param_values[2], ret_value, func, value;
  613. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  614. int32 func_index;
  615. if (!mem_size)
  616. return false;
  617. POP_I32(delta);
  618. /* Function type of aot_enlarge_memory() */
  619. param_types[0] = INT8_PTR_TYPE;
  620. param_types[1] = I32_TYPE;
  621. ret_type = INT8_TYPE;
  622. if (!(func_type = LLVMFunctionType(ret_type, param_types, 2, false))) {
  623. aot_set_last_error("llvm add function type failed.");
  624. return false;
  625. }
  626. if (comp_ctx->is_jit_mode) {
  627. /* JIT mode, call the function directly */
  628. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  629. aot_set_last_error("llvm add pointer type failed.");
  630. return false;
  631. }
  632. if (!(value = I64_CONST((uint64)(uintptr_t)wasm_enlarge_memory))
  633. || !(func = LLVMConstIntToPtr(value, func_ptr_type))) {
  634. aot_set_last_error("create LLVM value failed.");
  635. return false;
  636. }
  637. }
  638. else if (comp_ctx->is_indirect_mode) {
  639. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  640. aot_set_last_error("create LLVM function type failed.");
  641. return false;
  642. }
  643. func_index =
  644. aot_get_native_symbol_index(comp_ctx, "aot_enlarge_memory");
  645. if (func_index < 0) {
  646. return false;
  647. }
  648. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  649. func_ptr_type, func_index))) {
  650. return false;
  651. }
  652. }
  653. else {
  654. char *func_name = "aot_enlarge_memory";
  655. /* AOT mode, delcare the function */
  656. if (!(func = LLVMGetNamedFunction(func_ctx->module, func_name))
  657. && !(func =
  658. LLVMAddFunction(func_ctx->module, func_name, func_type))) {
  659. aot_set_last_error("llvm add function failed.");
  660. return false;
  661. }
  662. }
  663. /* Call function aot_enlarge_memory() */
  664. param_values[0] = func_ctx->aot_inst;
  665. param_values[1] = delta;
  666. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  667. param_values, 2, "call"))) {
  668. aot_set_last_error("llvm build call failed.");
  669. return false;
  670. }
  671. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
  672. /* ret_value = ret_value == true ? delta : pre_page_count */
  673. if (!(ret_value = LLVMBuildSelect(comp_ctx->builder, ret_value, mem_size,
  674. I32_NEG_ONE, "mem_grow_ret"))) {
  675. aot_set_last_error("llvm build select failed.");
  676. return false;
  677. }
  678. PUSH_I32(ret_value);
  679. return true;
  680. fail:
  681. return false;
  682. }
  683. #if WASM_ENABLE_BULK_MEMORY != 0
  684. static LLVMValueRef
  685. check_bulk_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  686. LLVMValueRef offset, LLVMValueRef bytes)
  687. {
  688. LLVMValueRef maddr, max_addr, cmp;
  689. LLVMValueRef mem_base_addr;
  690. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  691. LLVMBasicBlockRef check_succ;
  692. LLVMValueRef mem_size;
  693. /* Get memory base address and memory data size */
  694. #if WASM_ENABLE_SHARED_MEMORY != 0
  695. bool is_shared_memory =
  696. comp_ctx->comp_data->memories[0].memory_flags & 0x02;
  697. if (func_ctx->mem_space_unchanged || is_shared_memory) {
  698. #else
  699. if (func_ctx->mem_space_unchanged) {
  700. #endif
  701. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  702. }
  703. else {
  704. if (!(mem_base_addr = LLVMBuildLoad2(
  705. comp_ctx->builder, OPQ_PTR_TYPE,
  706. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  707. aot_set_last_error("llvm build load failed.");
  708. goto fail;
  709. }
  710. }
  711. /*
  712. * Note: not throw the integer-overflow-exception here since it must
  713. * have been thrown when converting float to integer before
  714. */
  715. /* return addres directly if constant offset and inside memory space */
  716. if (!LLVMIsUndef(offset) && !LLVMIsUndef(bytes)
  717. #if LLVM_VERSION_NUMBER >= 12
  718. && !LLVMIsPoison(offset) && !LLVMIsPoison(bytes)
  719. #endif
  720. && LLVMIsConstant(offset) && LLVMIsConstant(bytes)) {
  721. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(offset);
  722. uint64 mem_len = (uint64)LLVMConstIntGetZExtValue(bytes);
  723. uint32 num_bytes_per_page =
  724. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  725. uint32 init_page_count =
  726. comp_ctx->comp_data->memories[0].mem_init_page_count;
  727. uint32 mem_data_size = num_bytes_per_page * init_page_count;
  728. if (mem_data_size > 0 && mem_offset + mem_len <= mem_data_size) {
  729. /* inside memory space */
  730. /* maddr = mem_base_addr + moffset */
  731. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  732. mem_base_addr, &offset, 1,
  733. "maddr"))) {
  734. aot_set_last_error("llvm build add failed.");
  735. goto fail;
  736. }
  737. return maddr;
  738. }
  739. }
  740. if (func_ctx->mem_space_unchanged) {
  741. mem_size = func_ctx->mem_info[0].mem_data_size_addr;
  742. }
  743. else {
  744. if (!(mem_size = LLVMBuildLoad2(
  745. comp_ctx->builder, I32_TYPE,
  746. func_ctx->mem_info[0].mem_data_size_addr, "mem_size"))) {
  747. aot_set_last_error("llvm build load failed.");
  748. goto fail;
  749. }
  750. }
  751. ADD_BASIC_BLOCK(check_succ, "check_succ");
  752. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  753. offset =
  754. LLVMBuildZExt(comp_ctx->builder, offset, I64_TYPE, "extend_offset");
  755. bytes = LLVMBuildZExt(comp_ctx->builder, bytes, I64_TYPE, "extend_len");
  756. mem_size =
  757. LLVMBuildZExt(comp_ctx->builder, mem_size, I64_TYPE, "extend_size");
  758. BUILD_OP(Add, offset, bytes, max_addr, "max_addr");
  759. BUILD_ICMP(LLVMIntUGT, max_addr, mem_size, cmp, "cmp_max_mem_addr");
  760. if (!aot_emit_exception(comp_ctx, func_ctx,
  761. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  762. check_succ)) {
  763. goto fail;
  764. }
  765. /* maddr = mem_base_addr + offset */
  766. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  767. mem_base_addr, &offset, 1, "maddr"))) {
  768. aot_set_last_error("llvm build add failed.");
  769. goto fail;
  770. }
  771. return maddr;
  772. fail:
  773. return NULL;
  774. }
  775. bool
  776. aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  777. uint32 seg_index)
  778. {
  779. LLVMValueRef seg, offset, dst, len, param_values[5], ret_value, func, value;
  780. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  781. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  782. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  783. LLVMBasicBlockRef mem_init_fail, init_success;
  784. seg = I32_CONST(seg_index);
  785. POP_I32(len);
  786. POP_I32(offset);
  787. POP_I32(dst);
  788. param_types[0] = INT8_PTR_TYPE;
  789. param_types[1] = I32_TYPE;
  790. param_types[2] = I32_TYPE;
  791. param_types[3] = I32_TYPE;
  792. param_types[4] = I32_TYPE;
  793. ret_type = INT8_TYPE;
  794. if (comp_ctx->is_jit_mode)
  795. GET_AOT_FUNCTION(llvm_jit_memory_init, 5);
  796. else
  797. GET_AOT_FUNCTION(aot_memory_init, 5);
  798. /* Call function aot_memory_init() */
  799. param_values[0] = func_ctx->aot_inst;
  800. param_values[1] = seg;
  801. param_values[2] = offset;
  802. param_values[3] = len;
  803. param_values[4] = dst;
  804. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  805. param_values, 5, "call"))) {
  806. aot_set_last_error("llvm build call failed.");
  807. return false;
  808. }
  809. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_init_ret");
  810. ADD_BASIC_BLOCK(mem_init_fail, "mem_init_fail");
  811. ADD_BASIC_BLOCK(init_success, "init_success");
  812. LLVMMoveBasicBlockAfter(mem_init_fail, block_curr);
  813. LLVMMoveBasicBlockAfter(init_success, block_curr);
  814. if (!LLVMBuildCondBr(comp_ctx->builder, ret_value, init_success,
  815. mem_init_fail)) {
  816. aot_set_last_error("llvm build cond br failed.");
  817. goto fail;
  818. }
  819. /* If memory.init failed, return this function
  820. so the runtime can catch the exception */
  821. LLVMPositionBuilderAtEnd(comp_ctx->builder, mem_init_fail);
  822. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  823. goto fail;
  824. }
  825. LLVMPositionBuilderAtEnd(comp_ctx->builder, init_success);
  826. return true;
  827. fail:
  828. return false;
  829. }
  830. bool
  831. aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  832. uint32 seg_index)
  833. {
  834. LLVMValueRef seg, param_values[2], ret_value, func, value;
  835. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  836. seg = I32_CONST(seg_index);
  837. CHECK_LLVM_CONST(seg);
  838. param_types[0] = INT8_PTR_TYPE;
  839. param_types[1] = I32_TYPE;
  840. ret_type = INT8_TYPE;
  841. if (comp_ctx->is_jit_mode)
  842. GET_AOT_FUNCTION(llvm_jit_data_drop, 2);
  843. else
  844. GET_AOT_FUNCTION(aot_data_drop, 2);
  845. /* Call function aot_data_drop() */
  846. param_values[0] = func_ctx->aot_inst;
  847. param_values[1] = seg;
  848. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  849. param_values, 2, "call"))) {
  850. aot_set_last_error("llvm build call failed.");
  851. return false;
  852. }
  853. return true;
  854. fail:
  855. return false;
  856. }
  857. bool
  858. aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  859. {
  860. LLVMValueRef src, dst, src_addr, dst_addr, len, res;
  861. bool call_aot_memmove = false;
  862. POP_I32(len);
  863. POP_I32(src);
  864. POP_I32(dst);
  865. if (!(src_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
  866. return false;
  867. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  868. return false;
  869. call_aot_memmove = comp_ctx->is_indirect_mode || comp_ctx->is_jit_mode;
  870. if (call_aot_memmove) {
  871. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  872. LLVMValueRef func, params[3];
  873. param_types[0] = INT8_PTR_TYPE;
  874. param_types[1] = INT8_PTR_TYPE;
  875. param_types[2] = I32_TYPE;
  876. ret_type = INT8_PTR_TYPE;
  877. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  878. aot_set_last_error("create LLVM function type failed.");
  879. return false;
  880. }
  881. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  882. aot_set_last_error("create LLVM function pointer type failed.");
  883. return false;
  884. }
  885. if (comp_ctx->is_jit_mode) {
  886. if (!(func = I64_CONST((uint64)(uintptr_t)aot_memmove))
  887. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  888. aot_set_last_error("create LLVM value failed.");
  889. return false;
  890. }
  891. }
  892. else {
  893. int32 func_index;
  894. func_index = aot_get_native_symbol_index(comp_ctx, "memmove");
  895. if (func_index < 0) {
  896. return false;
  897. }
  898. if (!(func =
  899. aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  900. func_ptr_type, func_index))) {
  901. return false;
  902. }
  903. }
  904. params[0] = dst_addr;
  905. params[1] = src_addr;
  906. params[2] = len;
  907. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params,
  908. 3, "call_memmove"))) {
  909. aot_set_last_error("llvm build memmove failed.");
  910. return false;
  911. }
  912. }
  913. else {
  914. if (!(res = LLVMBuildMemMove(comp_ctx->builder, dst_addr, 1, src_addr,
  915. 1, len))) {
  916. aot_set_last_error("llvm build memmove failed.");
  917. return false;
  918. }
  919. }
  920. return true;
  921. fail:
  922. return false;
  923. }
  924. static void *
  925. jit_memset(void *s, int c, size_t n)
  926. {
  927. return memset(s, c, n);
  928. }
  929. bool
  930. aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  931. {
  932. LLVMValueRef val, dst, dst_addr, len, res;
  933. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  934. LLVMValueRef func, params[3];
  935. POP_I32(len);
  936. POP_I32(val);
  937. POP_I32(dst);
  938. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  939. return false;
  940. param_types[0] = INT8_PTR_TYPE;
  941. param_types[1] = I32_TYPE;
  942. param_types[2] = I32_TYPE;
  943. ret_type = INT8_PTR_TYPE;
  944. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  945. aot_set_last_error("create LLVM function type failed.");
  946. return false;
  947. }
  948. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  949. aot_set_last_error("create LLVM function pointer type failed.");
  950. return false;
  951. }
  952. if (comp_ctx->is_jit_mode) {
  953. if (!(func = I64_CONST((uint64)(uintptr_t)jit_memset))
  954. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  955. aot_set_last_error("create LLVM value failed.");
  956. return false;
  957. }
  958. }
  959. else if (comp_ctx->is_indirect_mode) {
  960. int32 func_index;
  961. func_index = aot_get_native_symbol_index(comp_ctx, "memset");
  962. if (func_index < 0) {
  963. return false;
  964. }
  965. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  966. func_ptr_type, func_index))) {
  967. return false;
  968. }
  969. }
  970. else {
  971. if (!(func = LLVMGetNamedFunction(func_ctx->module, "memset"))
  972. && !(func =
  973. LLVMAddFunction(func_ctx->module, "memset", func_type))) {
  974. aot_set_last_error("llvm add function failed.");
  975. return false;
  976. }
  977. }
  978. params[0] = dst_addr;
  979. params[1] = val;
  980. params[2] = len;
  981. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params, 3,
  982. "call_memset"))) {
  983. aot_set_last_error("llvm build memset failed.");
  984. return false;
  985. }
  986. return true;
  987. fail:
  988. return false;
  989. }
  990. #endif /* end of WASM_ENABLE_BULK_MEMORY */
  991. #if WASM_ENABLE_SHARED_MEMORY != 0
  992. bool
  993. aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  994. uint8 atomic_op, uint8 op_type, uint32 align,
  995. uint32 offset, uint32 bytes)
  996. {
  997. LLVMValueRef maddr, value, result;
  998. if (op_type == VALUE_TYPE_I32)
  999. POP_I32(value);
  1000. else
  1001. POP_I64(value);
  1002. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  1003. return false;
  1004. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1005. return false;
  1006. switch (bytes) {
  1007. case 8:
  1008. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1009. break;
  1010. case 4:
  1011. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1012. if (op_type == VALUE_TYPE_I64)
  1013. BUILD_TRUNC(value, I32_TYPE);
  1014. break;
  1015. case 2:
  1016. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1017. BUILD_TRUNC(value, INT16_TYPE);
  1018. break;
  1019. case 1:
  1020. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1021. BUILD_TRUNC(value, INT8_TYPE);
  1022. break;
  1023. default:
  1024. bh_assert(0);
  1025. break;
  1026. }
  1027. if (!(result = LLVMBuildAtomicRMW(
  1028. comp_ctx->builder, atomic_op, maddr, value,
  1029. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1030. goto fail;
  1031. }
  1032. LLVMSetVolatile(result, true);
  1033. if (op_type == VALUE_TYPE_I32) {
  1034. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1035. "result_i32"))) {
  1036. goto fail;
  1037. }
  1038. PUSH_I32(result);
  1039. }
  1040. else {
  1041. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1042. "result_i64"))) {
  1043. goto fail;
  1044. }
  1045. PUSH_I64(result);
  1046. }
  1047. return true;
  1048. fail:
  1049. return false;
  1050. }
  1051. bool
  1052. aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
  1053. AOTFuncContext *func_ctx, uint8 op_type,
  1054. uint32 align, uint32 offset, uint32 bytes)
  1055. {
  1056. LLVMValueRef maddr, value, expect, result;
  1057. if (op_type == VALUE_TYPE_I32) {
  1058. POP_I32(value);
  1059. POP_I32(expect);
  1060. }
  1061. else {
  1062. POP_I64(value);
  1063. POP_I64(expect);
  1064. }
  1065. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  1066. return false;
  1067. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1068. return false;
  1069. switch (bytes) {
  1070. case 8:
  1071. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1072. break;
  1073. case 4:
  1074. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1075. if (op_type == VALUE_TYPE_I64) {
  1076. BUILD_TRUNC(value, I32_TYPE);
  1077. BUILD_TRUNC(expect, I32_TYPE);
  1078. }
  1079. break;
  1080. case 2:
  1081. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1082. BUILD_TRUNC(value, INT16_TYPE);
  1083. BUILD_TRUNC(expect, INT16_TYPE);
  1084. break;
  1085. case 1:
  1086. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1087. BUILD_TRUNC(value, INT8_TYPE);
  1088. BUILD_TRUNC(expect, INT8_TYPE);
  1089. break;
  1090. default:
  1091. bh_assert(0);
  1092. break;
  1093. }
  1094. if (!(result = LLVMBuildAtomicCmpXchg(
  1095. comp_ctx->builder, maddr, expect, value,
  1096. LLVMAtomicOrderingSequentiallyConsistent,
  1097. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1098. goto fail;
  1099. }
  1100. LLVMSetVolatile(result, true);
  1101. /* CmpXchg return {i32, i1} structure,
  1102. we need to extrack the previous_value from the structure */
  1103. if (!(result = LLVMBuildExtractValue(comp_ctx->builder, result, 0,
  1104. "previous_value"))) {
  1105. goto fail;
  1106. }
  1107. if (op_type == VALUE_TYPE_I32) {
  1108. if (LLVMTypeOf(result) != I32_TYPE) {
  1109. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1110. "result_i32"))) {
  1111. goto fail;
  1112. }
  1113. }
  1114. PUSH_I32(result);
  1115. }
  1116. else {
  1117. if (LLVMTypeOf(result) != I64_TYPE) {
  1118. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1119. "result_i64"))) {
  1120. goto fail;
  1121. }
  1122. }
  1123. PUSH_I64(result);
  1124. }
  1125. return true;
  1126. fail:
  1127. return false;
  1128. }
  1129. bool
  1130. aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1131. uint8 op_type, uint32 align, uint32 offset,
  1132. uint32 bytes)
  1133. {
  1134. LLVMValueRef maddr, value, timeout, expect, cmp;
  1135. LLVMValueRef param_values[5], ret_value, func, is_wait64;
  1136. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  1137. LLVMBasicBlockRef wait_fail, wait_success;
  1138. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1139. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  1140. POP_I64(timeout);
  1141. if (op_type == VALUE_TYPE_I32) {
  1142. POP_I32(expect);
  1143. is_wait64 = I8_CONST(false);
  1144. if (!(expect = LLVMBuildZExt(comp_ctx->builder, expect, I64_TYPE,
  1145. "expect_i64"))) {
  1146. goto fail;
  1147. }
  1148. }
  1149. else {
  1150. POP_I64(expect);
  1151. is_wait64 = I8_CONST(true);
  1152. }
  1153. CHECK_LLVM_CONST(is_wait64);
  1154. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  1155. return false;
  1156. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1157. return false;
  1158. param_types[0] = INT8_PTR_TYPE;
  1159. param_types[1] = INT8_PTR_TYPE;
  1160. param_types[2] = I64_TYPE;
  1161. param_types[3] = I64_TYPE;
  1162. param_types[4] = INT8_TYPE;
  1163. ret_type = I32_TYPE;
  1164. GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
  1165. /* Call function wasm_runtime_atomic_wait() */
  1166. param_values[0] = func_ctx->aot_inst;
  1167. param_values[1] = maddr;
  1168. param_values[2] = expect;
  1169. param_values[3] = timeout;
  1170. param_values[4] = is_wait64;
  1171. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1172. param_values, 5, "call"))) {
  1173. aot_set_last_error("llvm build call failed.");
  1174. return false;
  1175. }
  1176. BUILD_ICMP(LLVMIntNE, ret_value, I32_NEG_ONE, cmp, "atomic_wait_ret");
  1177. ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
  1178. ADD_BASIC_BLOCK(wait_success, "wait_success");
  1179. LLVMMoveBasicBlockAfter(wait_fail, block_curr);
  1180. LLVMMoveBasicBlockAfter(wait_success, block_curr);
  1181. if (!LLVMBuildCondBr(comp_ctx->builder, cmp, wait_success, wait_fail)) {
  1182. aot_set_last_error("llvm build cond br failed.");
  1183. goto fail;
  1184. }
  1185. /* If atomic wait failed, return this function
  1186. so the runtime can catch the exception */
  1187. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
  1188. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  1189. goto fail;
  1190. }
  1191. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
  1192. PUSH_I32(ret_value);
  1193. #if WASM_ENABLE_THREAD_MGR != 0
  1194. /* Insert suspend check point */
  1195. if (comp_ctx->enable_thread_mgr) {
  1196. if (!check_suspend_flags(comp_ctx, func_ctx))
  1197. return false;
  1198. }
  1199. #endif
  1200. return true;
  1201. fail:
  1202. return false;
  1203. }
  1204. bool
  1205. aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
  1206. AOTFuncContext *func_ctx, uint32 align,
  1207. uint32 offset, uint32 bytes)
  1208. {
  1209. LLVMValueRef maddr, value, count;
  1210. LLVMValueRef param_values[3], ret_value, func;
  1211. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1212. POP_I32(count);
  1213. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  1214. return false;
  1215. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1216. return false;
  1217. param_types[0] = INT8_PTR_TYPE;
  1218. param_types[1] = INT8_PTR_TYPE;
  1219. param_types[2] = I32_TYPE;
  1220. ret_type = I32_TYPE;
  1221. GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
  1222. /* Call function wasm_runtime_atomic_notify() */
  1223. param_values[0] = func_ctx->aot_inst;
  1224. param_values[1] = maddr;
  1225. param_values[2] = count;
  1226. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1227. param_values, 3, "call"))) {
  1228. aot_set_last_error("llvm build call failed.");
  1229. return false;
  1230. }
  1231. PUSH_I32(ret_value);
  1232. return true;
  1233. fail:
  1234. return false;
  1235. }
  1236. bool
  1237. aot_compiler_op_atomic_fence(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1238. {
  1239. return LLVMBuildFence(comp_ctx->builder,
  1240. LLVMAtomicOrderingSequentiallyConsistent, false, "")
  1241. ? true
  1242. : false;
  1243. }
  1244. #endif /* end of WASM_ENABLE_SHARED_MEMORY */