aot_emit_memory.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_memory.h"
  6. #include "aot_emit_exception.h"
  7. #include "../aot/aot_runtime.h"
  8. #define BUILD_ICMP(op, left, right, res, name) do { \
  9. if (!(res = LLVMBuildICmp(comp_ctx->builder, op, \
  10. left, right, name))) { \
  11. aot_set_last_error("llvm build icmp failed."); \
  12. goto fail; \
  13. } \
  14. } while (0)
  15. #define BUILD_OP(Op, left, right, res, name) do { \
  16. if (!(res = LLVMBuild##Op(comp_ctx->builder, \
  17. left, right, name))) { \
  18. aot_set_last_error("llvm build " #Op " fail."); \
  19. goto fail; \
  20. } \
  21. } while (0)
  22. #define ADD_BASIC_BLOCK(block, name) do { \
  23. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  24. func_ctx->func, \
  25. name))) { \
  26. aot_set_last_error("llvm add basic block failed."); \
  27. goto fail; \
  28. } \
  29. } while (0)
  30. #define SET_BUILD_POS(block) \
  31. LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
  32. static LLVMValueRef
  33. get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  34. uint32 bytes)
  35. {
  36. LLVMValueRef mem_check_bound = NULL;
  37. switch (bytes) {
  38. case 1:
  39. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_1byte;
  40. break;
  41. case 2:
  42. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_2bytes;
  43. break;
  44. case 4:
  45. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_4bytes;
  46. break;
  47. case 8:
  48. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_8bytes;
  49. break;
  50. default:
  51. bh_assert(0);
  52. return NULL;
  53. }
  54. if (func_ctx->mem_space_unchanged)
  55. return mem_check_bound;
  56. if (!(mem_check_bound = LLVMBuildLoad(comp_ctx->builder,
  57. mem_check_bound,
  58. "mem_check_bound"))) {
  59. aot_set_last_error("llvm build load failed.");
  60. return NULL;
  61. }
  62. return mem_check_bound;
  63. }
  64. static LLVMValueRef
  65. get_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
  66. static LLVMValueRef
  67. check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  68. uint32 offset, uint32 bytes)
  69. {
  70. LLVMValueRef offset_const = I32_CONST(offset);
  71. LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
  72. LLVMValueRef mem_base_addr, mem_check_bound;
  73. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  74. LLVMBasicBlockRef check_succ;
  75. AOTValue *aot_value;
  76. bool is_target_64bit;
  77. #if WASM_ENABLE_SHARED_MEMORY != 0
  78. bool is_shared_memory =
  79. comp_ctx->comp_data->memories[0].memory_flags & 0x02;
  80. #endif
  81. is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64))
  82. ? true : false;
  83. CHECK_LLVM_CONST(offset_const);
  84. /* Get memory base address and memory data size */
  85. if (func_ctx->mem_space_unchanged
  86. #if WASM_ENABLE_SHARED_MEMORY != 0
  87. || is_shared_memory
  88. #endif
  89. ) {
  90. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  91. }
  92. else {
  93. if (!(mem_base_addr =
  94. LLVMBuildLoad(comp_ctx->builder,
  95. func_ctx->mem_info[0].mem_base_addr,
  96. "mem_base"))) {
  97. aot_set_last_error("llvm build load failed.");
  98. goto fail;
  99. }
  100. }
  101. aot_value = func_ctx->block_stack.block_list_end->value_stack.value_list_end;
  102. POP_I32(addr);
  103. /* return addres directly if constant offset and inside memory space */
  104. if (LLVMIsConstant(addr)) {
  105. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(addr)
  106. + (uint64)offset;
  107. uint32 num_bytes_per_page =
  108. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  109. uint32 init_page_count =
  110. comp_ctx->comp_data->memories[0].mem_init_page_count;
  111. uint64 mem_data_size = num_bytes_per_page * init_page_count;
  112. if (mem_offset + bytes <= mem_data_size) {
  113. /* inside memory space */
  114. offset1 = I32_CONST((uint32)mem_offset);
  115. CHECK_LLVM_CONST(offset1);
  116. if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder, mem_base_addr,
  117. &offset1, 1, "maddr"))) {
  118. aot_set_last_error("llvm build add failed.");
  119. goto fail;
  120. }
  121. return maddr;
  122. }
  123. }
  124. if (is_target_64bit) {
  125. if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
  126. I64_TYPE, "offset_i64"))
  127. || !(addr = LLVMBuildZExt(comp_ctx->builder, addr,
  128. I64_TYPE, "addr_i64"))) {
  129. aot_set_last_error("llvm build zero extend failed.");
  130. goto fail;
  131. }
  132. }
  133. /* offset1 = offset + addr; */
  134. BUILD_OP(Add, offset_const, addr, offset1, "offset1");
  135. if (comp_ctx->enable_bound_check
  136. && !(aot_value->is_local
  137. && aot_checked_addr_list_find(func_ctx, aot_value->local_idx,
  138. offset, bytes))) {
  139. uint32 init_page_count =
  140. comp_ctx->comp_data->memories[0].mem_init_page_count;
  141. if (init_page_count == 0) {
  142. LLVMValueRef mem_size;
  143. if (!(mem_size = get_memory_size(comp_ctx, func_ctx))) {
  144. goto fail;
  145. }
  146. BUILD_ICMP(LLVMIntEQ, mem_size, I32_ZERO, cmp, "is_zero");
  147. ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
  148. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  149. if (!aot_emit_exception(comp_ctx, func_ctx,
  150. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
  151. true, cmp, check_succ)) {
  152. goto fail;
  153. }
  154. SET_BUILD_POS(check_succ);
  155. block_curr = check_succ;
  156. }
  157. if (!(mem_check_bound =
  158. get_memory_check_bound(comp_ctx, func_ctx, bytes))) {
  159. goto fail;
  160. }
  161. if (is_target_64bit) {
  162. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp, "cmp");
  163. }
  164. else {
  165. /* Check integer overflow */
  166. BUILD_ICMP(LLVMIntULT, offset1, addr, cmp1, "cmp1");
  167. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp2, "cmp2");
  168. BUILD_OP(Or, cmp1, cmp2, cmp, "cmp");
  169. }
  170. /* Add basic blocks */
  171. ADD_BASIC_BLOCK(check_succ, "check_succ");
  172. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  173. if (!aot_emit_exception(comp_ctx, func_ctx,
  174. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
  175. true, cmp, check_succ)) {
  176. goto fail;
  177. }
  178. SET_BUILD_POS(check_succ);
  179. if (aot_value->is_local) {
  180. if (!aot_checked_addr_list_add(func_ctx, aot_value->local_idx,
  181. offset, bytes))
  182. goto fail;
  183. }
  184. }
  185. /* maddr = mem_base_addr + offset1 */
  186. if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder, mem_base_addr,
  187. &offset1, 1, "maddr"))) {
  188. aot_set_last_error("llvm build add failed.");
  189. goto fail;
  190. }
  191. return maddr;
  192. fail:
  193. return NULL;
  194. }
  195. #define BUILD_PTR_CAST(ptr_type) do { \
  196. if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr,\
  197. ptr_type, "data_ptr"))) {\
  198. aot_set_last_error("llvm build bit cast failed."); \
  199. goto fail; \
  200. } \
  201. } while (0)
  202. #define BUILD_LOAD() do { \
  203. if (!(value = LLVMBuildLoad(comp_ctx->builder, maddr, \
  204. "data"))) { \
  205. aot_set_last_error("llvm build load failed."); \
  206. goto fail; \
  207. } \
  208. LLVMSetAlignment(value, 1); \
  209. } while (0)
  210. #define BUILD_TRUNC(value, data_type) do { \
  211. if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, \
  212. data_type, "val_trunc"))){ \
  213. aot_set_last_error("llvm build trunc failed."); \
  214. goto fail; \
  215. } \
  216. } while (0)
  217. #define BUILD_STORE() do { \
  218. LLVMValueRef res; \
  219. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  220. aot_set_last_error("llvm build store failed."); \
  221. goto fail; \
  222. } \
  223. LLVMSetAlignment(res, 1); \
  224. } while (0)
  225. #define BUILD_SIGN_EXT(dst_type) do { \
  226. if (!(value = LLVMBuildSExt(comp_ctx->builder, value, \
  227. dst_type, "data_s_ext"))) { \
  228. aot_set_last_error("llvm build sign ext failed."); \
  229. goto fail; \
  230. } \
  231. } while (0)
  232. #define BUILD_ZERO_EXT(dst_type) do { \
  233. if (!(value = LLVMBuildZExt(comp_ctx->builder, value, \
  234. dst_type, "data_z_ext"))) { \
  235. aot_set_last_error("llvm build zero ext failed."); \
  236. goto fail; \
  237. } \
  238. } while (0)
  239. #if WASM_ENABLE_SHARED_MEMORY != 0
  240. bool
  241. check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  242. LLVMValueRef addr, uint32 align)
  243. {
  244. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  245. LLVMBasicBlockRef check_align_succ;
  246. LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
  247. LLVMValueRef res;
  248. CHECK_LLVM_CONST(align_mask);
  249. /* Convert pointer to int */
  250. if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr,
  251. I32_TYPE, "address"))) {
  252. aot_set_last_error("llvm build ptr to int failed.");
  253. goto fail;
  254. }
  255. /* The memory address should be aligned */
  256. BUILD_OP(And, addr, align_mask, res, "and");
  257. BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
  258. /* Add basic blocks */
  259. ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
  260. LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
  261. if (!aot_emit_exception(comp_ctx, func_ctx,
  262. EXCE_UNALIGNED_ATOMIC,
  263. true, res, check_align_succ)) {
  264. goto fail;
  265. }
  266. SET_BUILD_POS(check_align_succ);
  267. return true;
  268. fail:
  269. return false;
  270. }
  271. #define BUILD_ATOMIC_LOAD(align) do { \
  272. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  273. goto fail; \
  274. } \
  275. if (!(value = LLVMBuildLoad(comp_ctx->builder, maddr, \
  276. "data"))) { \
  277. aot_set_last_error("llvm build load failed."); \
  278. goto fail; \
  279. } \
  280. LLVMSetAlignment(value, 1 << align); \
  281. LLVMSetVolatile(value, true); \
  282. LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
  283. } while (0)
  284. #define BUILD_ATOMIC_STORE(align) do { \
  285. LLVMValueRef res; \
  286. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  287. goto fail; \
  288. } \
  289. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  290. aot_set_last_error("llvm build store failed."); \
  291. goto fail; \
  292. } \
  293. LLVMSetAlignment(res, 1 << align); \
  294. LLVMSetVolatile(res, true); \
  295. LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
  296. } while (0)
  297. #endif
  298. bool
  299. aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  300. uint32 align, uint32 offset, uint32 bytes,
  301. bool sign, bool atomic)
  302. {
  303. LLVMValueRef maddr, value = NULL;
  304. if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  305. return false;
  306. switch (bytes) {
  307. case 4:
  308. BUILD_PTR_CAST(INT32_PTR_TYPE);
  309. #if WASM_ENABLE_SHARED_MEMORY != 0
  310. if (atomic)
  311. BUILD_ATOMIC_LOAD(align);
  312. else
  313. #endif
  314. BUILD_LOAD();
  315. break;
  316. case 2:
  317. case 1:
  318. if (bytes == 2)
  319. BUILD_PTR_CAST(INT16_PTR_TYPE);
  320. else
  321. BUILD_PTR_CAST(INT8_PTR_TYPE);
  322. #if WASM_ENABLE_SHARED_MEMORY != 0
  323. if (atomic) {
  324. BUILD_ATOMIC_LOAD(align);
  325. BUILD_ZERO_EXT(I32_TYPE);
  326. }
  327. else
  328. #endif
  329. {
  330. BUILD_LOAD();
  331. if (sign)
  332. BUILD_SIGN_EXT(I32_TYPE);
  333. else
  334. BUILD_ZERO_EXT(I32_TYPE);
  335. }
  336. break;
  337. default:
  338. bh_assert(0);
  339. break;
  340. }
  341. PUSH_I32(value);
  342. return true;
  343. fail:
  344. return false;
  345. }
  346. bool
  347. aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  348. uint32 align, uint32 offset, uint32 bytes,
  349. bool sign, bool atomic)
  350. {
  351. LLVMValueRef maddr, value = NULL;
  352. if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  353. return false;
  354. switch (bytes) {
  355. case 8:
  356. BUILD_PTR_CAST(INT64_PTR_TYPE);
  357. #if WASM_ENABLE_SHARED_MEMORY != 0
  358. if (atomic)
  359. BUILD_ATOMIC_LOAD(align);
  360. else
  361. #endif
  362. BUILD_LOAD();
  363. break;
  364. case 4:
  365. case 2:
  366. case 1:
  367. if (bytes == 4)
  368. BUILD_PTR_CAST(INT32_PTR_TYPE);
  369. else if (bytes == 2)
  370. BUILD_PTR_CAST(INT16_PTR_TYPE);
  371. else
  372. BUILD_PTR_CAST(INT8_PTR_TYPE);
  373. #if WASM_ENABLE_SHARED_MEMORY != 0
  374. if (atomic) {
  375. BUILD_ATOMIC_LOAD(align);
  376. BUILD_ZERO_EXT(I64_TYPE);
  377. }
  378. else
  379. #endif
  380. {
  381. BUILD_LOAD();
  382. if (sign)
  383. BUILD_SIGN_EXT(I64_TYPE);
  384. else
  385. BUILD_ZERO_EXT(I64_TYPE);
  386. }
  387. break;
  388. default:
  389. bh_assert(0);
  390. break;
  391. }
  392. PUSH_I64(value);
  393. return true;
  394. fail:
  395. return false;
  396. }
  397. bool
  398. aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  399. uint32 align, uint32 offset)
  400. {
  401. LLVMValueRef maddr, value;
  402. if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
  403. return false;
  404. BUILD_PTR_CAST(F32_PTR_TYPE);
  405. BUILD_LOAD();
  406. PUSH_F32(value);
  407. return true;
  408. fail:
  409. return false;
  410. }
  411. bool
  412. aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  413. uint32 align, uint32 offset)
  414. {
  415. LLVMValueRef maddr, value;
  416. if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
  417. return false;
  418. BUILD_PTR_CAST(F64_PTR_TYPE);
  419. BUILD_LOAD();
  420. PUSH_F64(value);
  421. return true;
  422. fail:
  423. return false;
  424. }
  425. bool
  426. aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  427. uint32 align, uint32 offset, uint32 bytes, bool atomic)
  428. {
  429. LLVMValueRef maddr, value;
  430. POP_I32(value);
  431. if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  432. return false;
  433. switch (bytes) {
  434. case 4:
  435. BUILD_PTR_CAST(INT32_PTR_TYPE);
  436. break;
  437. case 2:
  438. BUILD_PTR_CAST(INT16_PTR_TYPE);
  439. BUILD_TRUNC(value, INT16_TYPE);
  440. break;
  441. case 1:
  442. BUILD_PTR_CAST(INT8_PTR_TYPE);
  443. BUILD_TRUNC(value, INT8_TYPE);
  444. break;
  445. default:
  446. bh_assert(0);
  447. break;
  448. }
  449. #if WASM_ENABLE_SHARED_MEMORY != 0
  450. if (atomic)
  451. BUILD_ATOMIC_STORE(align);
  452. else
  453. #endif
  454. BUILD_STORE();
  455. return true;
  456. fail:
  457. return false;
  458. }
  459. bool
  460. aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  461. uint32 align, uint32 offset, uint32 bytes, bool atomic)
  462. {
  463. LLVMValueRef maddr, value;
  464. POP_I64(value);
  465. if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  466. return false;
  467. switch (bytes) {
  468. case 8:
  469. BUILD_PTR_CAST(INT64_PTR_TYPE);
  470. break;
  471. case 4:
  472. BUILD_PTR_CAST(INT32_PTR_TYPE);
  473. BUILD_TRUNC(value, I32_TYPE);
  474. break;
  475. case 2:
  476. BUILD_PTR_CAST(INT16_PTR_TYPE);
  477. BUILD_TRUNC(value, INT16_TYPE);
  478. break;
  479. case 1:
  480. BUILD_PTR_CAST(INT8_PTR_TYPE);
  481. BUILD_TRUNC(value, INT8_TYPE);
  482. break;
  483. default:
  484. bh_assert(0);
  485. break;
  486. }
  487. #if WASM_ENABLE_SHARED_MEMORY != 0
  488. if (atomic)
  489. BUILD_ATOMIC_STORE(align);
  490. else
  491. #endif
  492. BUILD_STORE();
  493. return true;
  494. fail:
  495. return false;
  496. }
  497. bool
  498. aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  499. uint32 align, uint32 offset)
  500. {
  501. LLVMValueRef maddr, value;
  502. POP_F32(value);
  503. if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
  504. return false;
  505. BUILD_PTR_CAST(F32_PTR_TYPE);
  506. BUILD_STORE();
  507. return true;
  508. fail:
  509. return false;
  510. }
  511. bool
  512. aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  513. uint32 align, uint32 offset)
  514. {
  515. LLVMValueRef maddr, value;
  516. POP_F64(value);
  517. if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
  518. return false;
  519. BUILD_PTR_CAST(F64_PTR_TYPE);
  520. BUILD_STORE();
  521. return true;
  522. fail:
  523. return false;
  524. }
  525. static LLVMValueRef
  526. get_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  527. {
  528. LLVMValueRef mem_size;
  529. if (func_ctx->mem_space_unchanged) {
  530. mem_size = func_ctx->mem_info[0].mem_cur_page_count_addr;
  531. }
  532. else {
  533. if (!(mem_size =
  534. LLVMBuildLoad(comp_ctx->builder,
  535. func_ctx->mem_info[0].mem_cur_page_count_addr,
  536. "mem_size"))) {
  537. aot_set_last_error("llvm build load failed.");
  538. goto fail;
  539. }
  540. }
  541. return mem_size;
  542. fail:
  543. return NULL;
  544. }
  545. bool
  546. aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  547. {
  548. LLVMValueRef mem_size = get_memory_size(comp_ctx, func_ctx);
  549. if (mem_size)
  550. PUSH_I32(mem_size);
  551. return mem_size ? true : false;
  552. fail:
  553. return false;
  554. }
  555. bool
  556. aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  557. {
  558. LLVMValueRef mem_size = get_memory_size(comp_ctx, func_ctx);
  559. LLVMValueRef delta, param_values[2], ret_value, func, value;
  560. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  561. if (!mem_size)
  562. return false;
  563. POP_I32(delta);
  564. /* Function type of wasm_runtime_enlarge_memory() */
  565. param_types[0] = INT8_PTR_TYPE;
  566. param_types[1] = I32_TYPE;
  567. ret_type = INT8_TYPE;
  568. if (!(func_type = LLVMFunctionType(ret_type, param_types, 2, false))) {
  569. aot_set_last_error("llvm add function type failed.");
  570. return false;
  571. }
  572. if (comp_ctx->is_jit_mode) {
  573. /* JIT mode, call the function directly */
  574. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  575. aot_set_last_error("llvm add pointer type failed.");
  576. return false;
  577. }
  578. if (!(value = I64_CONST((uint64)(uintptr_t)wasm_runtime_enlarge_memory))
  579. || !(func = LLVMConstIntToPtr(value, func_ptr_type))) {
  580. aot_set_last_error("create LLVM value failed.");
  581. return false;
  582. }
  583. }
  584. else {
  585. char *func_name = "wasm_runtime_enlarge_memory";
  586. /* AOT mode, delcare the function */
  587. if (!(func = LLVMGetNamedFunction(comp_ctx->module, func_name))
  588. && !(func = LLVMAddFunction(comp_ctx->module,
  589. func_name, func_type))) {
  590. aot_set_last_error("llvm add function failed.");
  591. return false;
  592. }
  593. }
  594. /* Call function wasm_runtime_enlarge_memory() */
  595. param_values[0] = func_ctx->aot_inst;
  596. param_values[1] = delta;
  597. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  598. param_values, 2, "call"))) {
  599. aot_set_last_error("llvm build call failed.");
  600. return false;
  601. }
  602. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
  603. /* ret_value = ret_value == true ? delta : pre_page_count */
  604. if (!(ret_value = LLVMBuildSelect(comp_ctx->builder, ret_value,
  605. mem_size, I32_NEG_ONE,
  606. "mem_grow_ret"))) {
  607. aot_set_last_error("llvm build select failed.");
  608. return false;
  609. }
  610. PUSH_I32(ret_value);
  611. return true;
  612. fail:
  613. return false;
  614. }
  615. #define GET_AOT_FUNCTION(name, argc) do { \
  616. if (!(func_type = LLVMFunctionType(ret_type, param_types, \
  617. argc, false))) { \
  618. aot_set_last_error("llvm add function type failed."); \
  619. return false; \
  620. } \
  621. if (comp_ctx->is_jit_mode) { \
  622. /* JIT mode, call the function directly */ \
  623. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) { \
  624. aot_set_last_error("llvm add pointer type failed."); \
  625. return false; \
  626. } \
  627. if (!(value = I64_CONST((uint64)(uintptr_t)name)) \
  628. || !(func = LLVMConstIntToPtr(value, func_ptr_type))) { \
  629. aot_set_last_error("create LLVM value failed."); \
  630. return false; \
  631. } \
  632. } \
  633. else { \
  634. char *func_name = #name; \
  635. /* AOT mode, delcare the function */ \
  636. if (!(func = LLVMGetNamedFunction(comp_ctx->module, func_name)) \
  637. && !(func = LLVMAddFunction(comp_ctx->module, \
  638. func_name, func_type))) { \
  639. aot_set_last_error("llvm add function failed."); \
  640. return false; \
  641. } \
  642. } \
  643. } while (0)
  644. #if WASM_ENABLE_BULK_MEMORY != 0
  645. static LLVMValueRef
  646. check_bulk_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  647. LLVMValueRef offset, LLVMValueRef bytes)
  648. {
  649. LLVMValueRef maddr, max_addr, cmp;
  650. LLVMValueRef mem_base_addr;
  651. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  652. LLVMBasicBlockRef check_succ;
  653. LLVMValueRef mem_size;
  654. /* Get memory base address and memory data size */
  655. #if WASM_ENABLE_SHARED_MEMORY != 0
  656. bool is_shared_memory =
  657. comp_ctx->comp_data->memories[0].memory_flags & 0x02;
  658. if (func_ctx->mem_space_unchanged || is_shared_memory) {
  659. #else
  660. if (func_ctx->mem_space_unchanged) {
  661. #endif
  662. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  663. }
  664. else {
  665. if (!(mem_base_addr =
  666. LLVMBuildLoad(comp_ctx->builder,
  667. func_ctx->mem_info[0].mem_base_addr,
  668. "mem_base"))) {
  669. aot_set_last_error("llvm build load failed.");
  670. goto fail;
  671. }
  672. }
  673. /* return addres directly if constant offset and inside memory space */
  674. if (LLVMIsConstant(offset) && LLVMIsConstant(bytes)) {
  675. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(offset);
  676. uint64 mem_len = (uint64)LLVMConstIntGetZExtValue(bytes);
  677. uint32 num_bytes_per_page =
  678. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  679. uint32 init_page_count =
  680. comp_ctx->comp_data->memories[0].mem_init_page_count;
  681. uint32 mem_data_size = num_bytes_per_page * init_page_count;
  682. if (mem_data_size > 0
  683. && mem_offset + mem_len <= mem_data_size) {
  684. /* inside memory space */
  685. /* maddr = mem_base_addr + moffset */
  686. if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder,
  687. mem_base_addr,
  688. &offset, 1, "maddr"))) {
  689. aot_set_last_error("llvm build add failed.");
  690. goto fail;
  691. }
  692. return maddr;
  693. }
  694. }
  695. /* mem_size_offset = aot_inst + off */
  696. if (!(mem_size = get_memory_size(comp_ctx, func_ctx))) {
  697. goto fail;
  698. }
  699. ADD_BASIC_BLOCK(check_succ, "check_succ");
  700. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  701. offset = LLVMBuildZExt(comp_ctx->builder, offset, I64_TYPE, "extend_offset");
  702. bytes = LLVMBuildZExt(comp_ctx->builder, bytes, I64_TYPE, "extend_len");
  703. mem_size = LLVMBuildZExt(comp_ctx->builder, mem_size, I64_TYPE, "extend_size");
  704. BUILD_OP(Add, offset, bytes, max_addr, "max_addr");
  705. BUILD_ICMP(LLVMIntUGT, max_addr, mem_size, cmp,
  706. "cmp_max_mem_addr");
  707. if (!aot_emit_exception(comp_ctx, func_ctx,
  708. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
  709. true, cmp, check_succ)) {
  710. goto fail;
  711. }
  712. /* maddr = mem_base_addr + offset */
  713. if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder, mem_base_addr,
  714. &offset, 1, "maddr"))) {
  715. aot_set_last_error("llvm build add failed.");
  716. goto fail;
  717. }
  718. return maddr;
  719. fail:
  720. return NULL;
  721. }
  722. bool
  723. aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  724. uint32 seg_index)
  725. {
  726. LLVMValueRef seg, offset, dst, len, param_values[5], ret_value, func, value;
  727. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  728. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  729. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  730. LLVMBasicBlockRef mem_init_fail, init_success;
  731. seg = I32_CONST(seg_index);
  732. POP_I32(len);
  733. POP_I32(offset);
  734. POP_I32(dst);
  735. param_types[0] = INT8_PTR_TYPE;
  736. param_types[1] = I32_TYPE;
  737. param_types[2] = I32_TYPE;
  738. param_types[3] = I32_TYPE;
  739. param_types[4] = I32_TYPE;
  740. ret_type = INT8_TYPE;
  741. GET_AOT_FUNCTION(aot_memory_init, 5);
  742. /* Call function aot_memory_init() */
  743. param_values[0] = func_ctx->aot_inst;
  744. param_values[1] = seg;
  745. param_values[2] = offset;
  746. param_values[3] = len;
  747. param_values[4] = dst;
  748. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  749. param_values, 5, "call"))) {
  750. aot_set_last_error("llvm build call failed.");
  751. return false;
  752. }
  753. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_init_ret");
  754. ADD_BASIC_BLOCK(mem_init_fail, "mem_init_fail");
  755. ADD_BASIC_BLOCK(init_success, "init_success");
  756. LLVMMoveBasicBlockAfter(mem_init_fail, block_curr);
  757. LLVMMoveBasicBlockAfter(init_success, block_curr);
  758. if (!LLVMBuildCondBr(comp_ctx->builder, ret_value,
  759. init_success, mem_init_fail)) {
  760. aot_set_last_error("llvm build cond br failed.");
  761. goto fail;
  762. }
  763. /* If memory.init failed, return this function
  764. so the runtime can catch the exception */
  765. LLVMPositionBuilderAtEnd(comp_ctx->builder, mem_init_fail);
  766. if (aot_func_type->result_count) {
  767. switch (aot_func_type->types[aot_func_type->param_count]) {
  768. case VALUE_TYPE_I32:
  769. LLVMBuildRet(comp_ctx->builder, I32_ZERO);
  770. break;
  771. case VALUE_TYPE_I64:
  772. LLVMBuildRet(comp_ctx->builder, I64_ZERO);
  773. break;
  774. case VALUE_TYPE_F32:
  775. LLVMBuildRet(comp_ctx->builder, F32_ZERO);
  776. break;
  777. case VALUE_TYPE_F64:
  778. LLVMBuildRet(comp_ctx->builder, F64_ZERO);
  779. break;
  780. }
  781. }
  782. else {
  783. LLVMBuildRetVoid(comp_ctx->builder);
  784. }
  785. LLVMPositionBuilderAtEnd(comp_ctx->builder, init_success);
  786. return true;
  787. fail:
  788. return false;
  789. }
  790. bool
  791. aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  792. uint32 seg_index)
  793. {
  794. LLVMValueRef seg, param_values[2], ret_value, func, value;
  795. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  796. seg = I32_CONST(seg_index);
  797. CHECK_LLVM_CONST(seg);
  798. param_types[0] = INT8_PTR_TYPE;
  799. param_types[1] = I32_TYPE;
  800. ret_type = INT8_TYPE;
  801. GET_AOT_FUNCTION(aot_data_drop, 2);
  802. /* Call function aot_data_drop() */
  803. param_values[0] = func_ctx->aot_inst;
  804. param_values[1] = seg;
  805. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  806. param_values, 2, "call"))) {
  807. aot_set_last_error("llvm build call failed.");
  808. return false;
  809. }
  810. return true;
  811. fail:
  812. return false;
  813. }
  814. bool
  815. aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  816. {
  817. LLVMValueRef src, dst, src_addr, dst_addr, len, res;
  818. POP_I32(len);
  819. POP_I32(src);
  820. POP_I32(dst);
  821. if (!(src_addr =
  822. check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
  823. return false;
  824. if (!(dst_addr =
  825. check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  826. return false;
  827. if (!(res = LLVMBuildMemMove(comp_ctx->builder, dst_addr, 1,
  828. src_addr, 1, len))) {
  829. aot_set_last_error("llvm build memmove failed.");
  830. return false;
  831. }
  832. return true;
  833. fail:
  834. return false;
  835. }
  836. bool
  837. aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  838. {
  839. LLVMValueRef val, dst, dst_addr, len, res;
  840. POP_I32(len);
  841. POP_I32(val);
  842. POP_I32(dst);
  843. if (!(dst_addr =
  844. check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  845. return false;
  846. val = LLVMBuildIntCast2(comp_ctx->builder, val, INT8_TYPE, true, "mem_set_value");
  847. if (!(res = LLVMBuildMemSet(comp_ctx->builder, dst_addr,
  848. val, len, 1))) {
  849. aot_set_last_error("llvm build memset failed.");
  850. return false;
  851. }
  852. return true;
  853. fail:
  854. return false;
  855. }
  856. #endif /* end of WASM_ENABLE_BULK_MEMORY */
  857. #if WASM_ENABLE_SHARED_MEMORY != 0
  858. bool
  859. aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx,
  860. AOTFuncContext *func_ctx,
  861. uint8 atomic_op, uint8 op_type,
  862. uint32 align, uint32 offset,
  863. uint32 bytes)
  864. {
  865. LLVMValueRef maddr, value, result;
  866. if (op_type == VALUE_TYPE_I32)
  867. POP_I32(value);
  868. else
  869. POP_I64(value);
  870. if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  871. return false;
  872. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  873. return false;
  874. switch (bytes) {
  875. case 8:
  876. BUILD_PTR_CAST(INT64_PTR_TYPE);
  877. break;
  878. case 4:
  879. BUILD_PTR_CAST(INT32_PTR_TYPE);
  880. if (op_type == VALUE_TYPE_I64)
  881. BUILD_TRUNC(value, I32_TYPE);
  882. break;
  883. case 2:
  884. BUILD_PTR_CAST(INT16_PTR_TYPE);
  885. BUILD_TRUNC(value, INT16_TYPE);
  886. break;
  887. case 1:
  888. BUILD_PTR_CAST(INT8_PTR_TYPE);
  889. BUILD_TRUNC(value, INT8_TYPE);
  890. break;
  891. default:
  892. bh_assert(0);
  893. break;
  894. }
  895. if (!(result =
  896. LLVMBuildAtomicRMW(comp_ctx->builder,
  897. atomic_op, maddr, value,
  898. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  899. goto fail;
  900. }
  901. LLVMSetVolatile(result, true);
  902. if (op_type == VALUE_TYPE_I32) {
  903. if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
  904. I32_TYPE, "result_i32"))) {
  905. goto fail;
  906. }
  907. PUSH_I32(result);
  908. }
  909. else {
  910. if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
  911. I64_TYPE, "result_i64"))) {
  912. goto fail;
  913. }
  914. PUSH_I64(result);
  915. }
  916. return true;
  917. fail:
  918. return false;
  919. }
  920. bool
  921. aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
  922. AOTFuncContext *func_ctx,
  923. uint8 op_type, uint32 align,
  924. uint32 offset, uint32 bytes)
  925. {
  926. LLVMValueRef maddr, value, expect, result;
  927. if (op_type == VALUE_TYPE_I32) {
  928. POP_I32(value);
  929. POP_I32(expect);
  930. }
  931. else {
  932. POP_I64(value);
  933. POP_I64(expect);
  934. }
  935. if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  936. return false;
  937. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  938. return false;
  939. switch (bytes) {
  940. case 8:
  941. BUILD_PTR_CAST(INT64_PTR_TYPE);
  942. break;
  943. case 4:
  944. BUILD_PTR_CAST(INT32_PTR_TYPE);
  945. if (op_type == VALUE_TYPE_I64) {
  946. BUILD_TRUNC(value, I32_TYPE);
  947. BUILD_TRUNC(expect, I32_TYPE);
  948. }
  949. break;
  950. case 2:
  951. BUILD_PTR_CAST(INT16_PTR_TYPE);
  952. BUILD_TRUNC(value, INT16_TYPE);
  953. BUILD_TRUNC(expect, INT16_TYPE);
  954. break;
  955. case 1:
  956. BUILD_PTR_CAST(INT8_PTR_TYPE);
  957. BUILD_TRUNC(value, INT8_TYPE);
  958. BUILD_TRUNC(expect, INT8_TYPE);
  959. break;
  960. default:
  961. bh_assert(0);
  962. break;
  963. }
  964. if (!(result =
  965. LLVMBuildAtomicCmpXchg(comp_ctx->builder, maddr, expect, value,
  966. LLVMAtomicOrderingSequentiallyConsistent,
  967. LLVMAtomicOrderingSequentiallyConsistent,
  968. false))) {
  969. goto fail;
  970. }
  971. LLVMSetVolatile(result, true);
  972. /* CmpXchg return {i32, i1} structure,
  973. we need to extrack the previous_value from the structure */
  974. if (!(result =
  975. LLVMBuildExtractValue(comp_ctx->builder,
  976. result, 0, "previous_value"))) {
  977. goto fail;
  978. }
  979. if (op_type == VALUE_TYPE_I32) {
  980. if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
  981. I32_TYPE, "result_i32"))) {
  982. goto fail;
  983. }
  984. PUSH_I32(result);
  985. }
  986. else {
  987. if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
  988. I64_TYPE, "result_i64"))) {
  989. goto fail;
  990. }
  991. PUSH_I64(result);
  992. }
  993. return true;
  994. fail:
  995. return false;
  996. }
  997. bool
  998. aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  999. uint8 op_type, uint32 align,
  1000. uint32 offset, uint32 bytes)
  1001. {
  1002. LLVMValueRef maddr, value, timeout, expect, cmp;
  1003. LLVMValueRef param_values[5], ret_value, func, is_wait64;
  1004. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  1005. LLVMBasicBlockRef wait_fail, wait_success;
  1006. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1007. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  1008. POP_I64(timeout);
  1009. if (op_type == VALUE_TYPE_I32) {
  1010. POP_I32(expect);
  1011. is_wait64 = I8_CONST(false);
  1012. if (!(expect =
  1013. LLVMBuildZExt(comp_ctx->builder, expect,
  1014. I64_TYPE, "expect_i64"))) {
  1015. goto fail;
  1016. }
  1017. }
  1018. else {
  1019. POP_I64(expect);
  1020. is_wait64 = I8_CONST(true);
  1021. }
  1022. CHECK_LLVM_CONST(is_wait64);
  1023. if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  1024. return false;
  1025. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1026. return false;
  1027. param_types[0] = INT8_PTR_TYPE;
  1028. param_types[1] = INT8_PTR_TYPE;
  1029. param_types[2] = I64_TYPE;
  1030. param_types[3] = I64_TYPE;
  1031. param_types[4] = INT8_TYPE;
  1032. ret_type = I32_TYPE;
  1033. GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
  1034. /* Call function wasm_runtime_atomic_wait() */
  1035. param_values[0] = func_ctx->aot_inst;
  1036. param_values[1] = maddr;
  1037. param_values[2] = expect;
  1038. param_values[3] = timeout;
  1039. param_values[4] = is_wait64;
  1040. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  1041. param_values, 5, "call"))) {
  1042. aot_set_last_error("llvm build call failed.");
  1043. return false;
  1044. }
  1045. BUILD_ICMP(LLVMIntSGT, ret_value, I32_ZERO, cmp, "atomic_wait_ret");
  1046. ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
  1047. ADD_BASIC_BLOCK(wait_success, "wait_success");
  1048. LLVMMoveBasicBlockAfter(wait_fail, block_curr);
  1049. LLVMMoveBasicBlockAfter(wait_success, block_curr);
  1050. if (!LLVMBuildCondBr(comp_ctx->builder, cmp,
  1051. wait_success, wait_fail)) {
  1052. aot_set_last_error("llvm build cond br failed.");
  1053. goto fail;
  1054. }
  1055. /* If atomic wait failed, return this function
  1056. so the runtime can catch the exception */
  1057. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
  1058. if (aot_func_type->result_count) {
  1059. switch (aot_func_type->types[aot_func_type->param_count]) {
  1060. case VALUE_TYPE_I32:
  1061. LLVMBuildRet(comp_ctx->builder, I32_ZERO);
  1062. break;
  1063. case VALUE_TYPE_I64:
  1064. LLVMBuildRet(comp_ctx->builder, I64_ZERO);
  1065. break;
  1066. case VALUE_TYPE_F32:
  1067. LLVMBuildRet(comp_ctx->builder, F32_ZERO);
  1068. break;
  1069. case VALUE_TYPE_F64:
  1070. LLVMBuildRet(comp_ctx->builder, F64_ZERO);
  1071. break;
  1072. }
  1073. }
  1074. else {
  1075. LLVMBuildRetVoid(comp_ctx->builder);
  1076. }
  1077. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
  1078. PUSH_I32(ret_value);
  1079. return true;
  1080. fail:
  1081. return false;
  1082. }
  1083. bool
  1084. aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
  1085. AOTFuncContext *func_ctx,
  1086. uint32 align, uint32 offset, uint32 bytes)
  1087. {
  1088. LLVMValueRef maddr, value, count;
  1089. LLVMValueRef param_values[3], ret_value, func;
  1090. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1091. POP_I32(count);
  1092. if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
  1093. return false;
  1094. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1095. return false;
  1096. param_types[0] = INT8_PTR_TYPE;
  1097. param_types[1] = INT8_PTR_TYPE;
  1098. param_types[2] = I32_TYPE;
  1099. ret_type = I32_TYPE;
  1100. GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
  1101. /* Call function wasm_runtime_atomic_notify() */
  1102. param_values[0] = func_ctx->aot_inst;
  1103. param_values[1] = maddr;
  1104. param_values[2] = count;
  1105. if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
  1106. param_values, 3, "call"))) {
  1107. aot_set_last_error("llvm build call failed.");
  1108. return false;
  1109. }
  1110. PUSH_I32(ret_value);
  1111. return true;
  1112. fail:
  1113. return false;
  1114. }
  1115. #endif /* end of WASM_ENABLE_SHARED_MEMORY */