aot_emit_memory.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_memory.h"
  6. #include "aot_compiler.h"
  7. #include "aot_emit_exception.h"
  8. #include "../aot/aot_runtime.h"
  9. #include "aot_intrinsic.h"
  10. #include "aot_emit_control.h"
  11. #define BUILD_ICMP(op, left, right, res, name) \
  12. do { \
  13. if (!(res = \
  14. LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
  15. aot_set_last_error("llvm build icmp failed."); \
  16. goto fail; \
  17. } \
  18. } while (0)
  19. #define BUILD_OP(Op, left, right, res, name) \
  20. do { \
  21. if (!(res = LLVMBuild##Op(comp_ctx->builder, left, right, name))) { \
  22. aot_set_last_error("llvm build " #Op " fail."); \
  23. goto fail; \
  24. } \
  25. } while (0)
  26. #define ADD_BASIC_BLOCK(block, name) \
  27. do { \
  28. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  29. func_ctx->func, name))) { \
  30. aot_set_last_error("llvm add basic block failed."); \
  31. goto fail; \
  32. } \
  33. } while (0)
  34. #define SET_BUILD_POS(block) LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
  35. static bool
  36. zero_extend_u64(AOTCompContext *comp_ctx, LLVMValueRef *value, const char *name)
  37. {
  38. if (comp_ctx->pointer_size == sizeof(uint64)) {
  39. /* zero extend to uint64 if the target is 64-bit */
  40. *value = LLVMBuildZExt(comp_ctx->builder, *value, I64_TYPE, name);
  41. if (!*value) {
  42. aot_set_last_error("llvm build zero extend failed.");
  43. return false;
  44. }
  45. }
  46. return true;
  47. }
  48. static LLVMValueRef
  49. get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  50. uint32 bytes)
  51. {
  52. LLVMValueRef mem_check_bound = NULL;
  53. switch (bytes) {
  54. case 1:
  55. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_1byte;
  56. break;
  57. case 2:
  58. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_2bytes;
  59. break;
  60. case 4:
  61. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_4bytes;
  62. break;
  63. case 8:
  64. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_8bytes;
  65. break;
  66. case 16:
  67. mem_check_bound = func_ctx->mem_info[0].mem_bound_check_16bytes;
  68. break;
  69. default:
  70. bh_assert(0);
  71. return NULL;
  72. }
  73. if (func_ctx->mem_space_unchanged)
  74. return mem_check_bound;
  75. if (!(mem_check_bound = LLVMBuildLoad2(
  76. comp_ctx->builder,
  77. (comp_ctx->pointer_size == sizeof(uint64)) ? I64_TYPE : I32_TYPE,
  78. mem_check_bound, "mem_check_bound"))) {
  79. aot_set_last_error("llvm build load failed.");
  80. return NULL;
  81. }
  82. return mem_check_bound;
  83. }
  84. #if defined(_WIN32) || defined(_WIN32_)
  85. static inline int
  86. ffs(int n)
  87. {
  88. int pos = 0;
  89. if (n == 0)
  90. return 0;
  91. while (!(n & 1)) {
  92. pos++;
  93. n >>= 1;
  94. }
  95. return pos + 1;
  96. }
  97. #endif
  98. static LLVMValueRef
  99. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
  100. LLVMValueRef
  101. aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  102. mem_offset_t offset, uint32 bytes, bool enable_segue,
  103. unsigned int *alignp)
  104. {
  105. LLVMValueRef offset_const =
  106. MEMORY64_COND_VALUE(I64_CONST(offset), I32_CONST(offset));
  107. LLVMValueRef addr, maddr, maddr_phi = NULL, offset1, cmp1, cmp2, cmp;
  108. LLVMValueRef mem_base_addr, mem_check_bound;
  109. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  110. LLVMBasicBlockRef check_succ, block_maddr_phi = NULL;
  111. AOTValue *aot_value_top;
  112. uint32 local_idx_of_aot_value = 0;
  113. uint64 const_value;
  114. bool is_target_64bit, is_local_of_aot_value = false;
  115. bool is_const = false;
  116. #if WASM_ENABLE_SHARED_MEMORY != 0
  117. bool is_shared_memory =
  118. comp_ctx->comp_data->memories[0].flags & SHARED_MEMORY_FLAG;
  119. #endif
  120. #if WASM_ENABLE_MEMORY64 == 0
  121. bool is_memory64 = false;
  122. #else
  123. bool is_memory64 = IS_MEMORY64;
  124. #endif
  125. is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false;
  126. if (comp_ctx->is_indirect_mode
  127. && aot_intrinsic_check_capability(
  128. comp_ctx, MEMORY64_COND_VALUE("i64.const", "i32.const"))) {
  129. WASMValue wasm_value;
  130. #if WASM_ENABLE_MEMORY64 != 0
  131. if (IS_MEMORY64) {
  132. wasm_value.i64 = offset;
  133. }
  134. else
  135. #endif
  136. {
  137. wasm_value.i32 = (int32)offset;
  138. }
  139. offset_const = aot_load_const_from_table(
  140. comp_ctx, func_ctx->native_symbol, &wasm_value,
  141. MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32));
  142. if (!offset_const) {
  143. return NULL;
  144. }
  145. }
  146. else {
  147. CHECK_LLVM_CONST(offset_const);
  148. }
  149. /* Get memory base address and memory data size */
  150. if (func_ctx->mem_space_unchanged
  151. #if WASM_ENABLE_SHARED_MEMORY != 0
  152. || is_shared_memory
  153. #endif
  154. ) {
  155. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  156. }
  157. else {
  158. if (!(mem_base_addr = LLVMBuildLoad2(
  159. comp_ctx->builder, OPQ_PTR_TYPE,
  160. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  161. aot_set_last_error("llvm build load failed.");
  162. goto fail;
  163. }
  164. }
  165. aot_value_top =
  166. func_ctx->block_stack.block_list_end->value_stack.value_list_end;
  167. if (aot_value_top) {
  168. /* aot_value_top is freed in the following POP_I32(addr),
  169. so save its fields here for further use */
  170. is_local_of_aot_value = aot_value_top->is_local;
  171. is_const = aot_value_top->is_const;
  172. local_idx_of_aot_value = aot_value_top->local_idx;
  173. const_value = aot_value_top->const_value;
  174. }
  175. POP_MEM_OFFSET(addr);
  176. /*
  177. * Note: not throw the integer-overflow-exception here since it must
  178. * have been thrown when converting float to integer before
  179. */
  180. /* return address directly if constant offset and inside memory space */
  181. if (LLVMIsEfficientConstInt(addr) || is_const) {
  182. uint64 value;
  183. if (LLVMIsEfficientConstInt(addr)) {
  184. value = (uint64)LLVMConstIntGetZExtValue(addr);
  185. }
  186. else {
  187. value = const_value;
  188. }
  189. uint64 mem_offset = value + (uint64)offset;
  190. uint32 num_bytes_per_page =
  191. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  192. uint32 init_page_count =
  193. comp_ctx->comp_data->memories[0].init_page_count;
  194. uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
  195. if (alignp != NULL) {
  196. /*
  197. * A note about max_align below:
  198. * the assumption here is the base address of a linear memory
  199. * has the natural alignment. for platforms using mmap, it can
  200. * be even larger. for now, use a conservative value.
  201. */
  202. const unsigned int max_align = 8;
  203. int shift = ffs((int)(unsigned int)mem_offset);
  204. if (shift == 0) {
  205. *alignp = max_align;
  206. }
  207. else {
  208. unsigned int align = 1 << (shift - 1);
  209. if (align > max_align) {
  210. align = max_align;
  211. }
  212. *alignp = align;
  213. }
  214. }
  215. if (mem_offset + bytes <= mem_data_size) {
  216. /* inside memory space */
  217. if (comp_ctx->pointer_size == sizeof(uint64))
  218. offset1 = I64_CONST(mem_offset);
  219. else
  220. offset1 = I32_CONST((uint32)mem_offset);
  221. CHECK_LLVM_CONST(offset1);
  222. if (!enable_segue) {
  223. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder,
  224. INT8_TYPE, mem_base_addr,
  225. &offset1, 1, "maddr"))) {
  226. aot_set_last_error("llvm build add failed.");
  227. goto fail;
  228. }
  229. }
  230. else {
  231. if (!(maddr = LLVMBuildIntToPtr(comp_ctx->builder, offset1,
  232. INT8_PTR_TYPE_GS, "maddr"))) {
  233. aot_set_last_error("llvm build IntToPtr failed.");
  234. goto fail;
  235. }
  236. }
  237. return maddr;
  238. }
  239. }
  240. else if (alignp != NULL) {
  241. *alignp = 1;
  242. }
  243. if (is_target_64bit) {
  244. if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
  245. I64_TYPE, "offset_i64"))
  246. || !(addr = LLVMBuildZExt(comp_ctx->builder, addr, I64_TYPE,
  247. "addr_i64"))) {
  248. aot_set_last_error("llvm build zero extend failed.");
  249. goto fail;
  250. }
  251. }
  252. /* offset1 = offset + addr; */
  253. /* TODO: check whether integer overflow occurs when memory is 64-bit
  254. and boundary check is enabled */
  255. BUILD_OP(Add, offset_const, addr, offset1, "offset1");
  256. if (comp_ctx->enable_shared_heap /* TODO: && mem_idx == 0 */) {
  257. LLVMBasicBlockRef app_addr_in_shared_heap, app_addr_in_linear_mem;
  258. LLVMValueRef is_in_shared_heap, shared_heap_check_bound = NULL;
  259. /* Add basic blocks */
  260. ADD_BASIC_BLOCK(app_addr_in_shared_heap, "app_addr_in_shared_heap");
  261. ADD_BASIC_BLOCK(app_addr_in_linear_mem, "app_addr_in_linear_mem");
  262. ADD_BASIC_BLOCK(block_maddr_phi, "maddr_phi");
  263. LLVMMoveBasicBlockAfter(app_addr_in_shared_heap, block_curr);
  264. LLVMMoveBasicBlockAfter(app_addr_in_linear_mem,
  265. app_addr_in_shared_heap);
  266. LLVMMoveBasicBlockAfter(block_maddr_phi, app_addr_in_linear_mem);
  267. LLVMPositionBuilderAtEnd(comp_ctx->builder, block_maddr_phi);
  268. if (!(maddr_phi =
  269. LLVMBuildPhi(comp_ctx->builder,
  270. enable_segue ? INT8_PTR_TYPE_GS : INT8_PTR_TYPE,
  271. "maddr_phi"))) {
  272. aot_set_last_error("llvm build phi failed");
  273. goto fail;
  274. }
  275. LLVMPositionBuilderAtEnd(comp_ctx->builder, block_curr);
  276. if (!is_target_64bit) {
  277. /* Check whether interger overflow occurs in addr + offset */
  278. LLVMBasicBlockRef check_integer_overflow_end;
  279. ADD_BASIC_BLOCK(check_integer_overflow_end,
  280. "check_integer_overflow_end");
  281. LLVMMoveBasicBlockAfter(check_integer_overflow_end, block_curr);
  282. BUILD_ICMP(LLVMIntULT, offset1, addr, cmp1, "cmp1");
  283. if (!aot_emit_exception(comp_ctx, func_ctx,
  284. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true,
  285. cmp1, check_integer_overflow_end)) {
  286. goto fail;
  287. }
  288. SET_BUILD_POS(check_integer_overflow_end);
  289. }
  290. shared_heap_check_bound =
  291. is_memory64 ? I64_CONST(UINT64_MAX - bytes + 1)
  292. : (comp_ctx->pointer_size == sizeof(uint64)
  293. ? I64_CONST(UINT32_MAX - bytes + 1)
  294. : I32_CONST(UINT32_MAX - bytes + 1));
  295. CHECK_LLVM_CONST(shared_heap_check_bound);
  296. /* Check whether the bytes to access are in shared heap */
  297. if (!comp_ctx->enable_bound_check) {
  298. /* Use IntUGT but not IntUGE to compare, since (1) in the ems
  299. memory allocator, the hmu node includes hmu header and hmu
  300. memory, only the latter is returned to the caller as the
  301. allocated memory, the hmu header isn't returned so the
  302. first byte of the shared heap won't be accesed, (2) using
  303. IntUGT gets better performance than IntUGE in some cases */
  304. BUILD_ICMP(LLVMIntUGT, offset1, func_ctx->shared_heap_start_off,
  305. is_in_shared_heap, "is_in_shared_heap");
  306. /* We don't check the shared heap's upper boundary if boundary
  307. check isn't enabled, the runtime may also use the guard pages
  308. of shared heap to check the boundary if hardware boundary
  309. check feature is enabled. */
  310. }
  311. else {
  312. /* Use IntUGT but not IntUGE to compare, same as above */
  313. BUILD_ICMP(LLVMIntUGT, offset1, func_ctx->shared_heap_start_off,
  314. cmp1, "cmp1");
  315. /* Check the shared heap's upper boundary if boundary check is
  316. enabled */
  317. BUILD_ICMP(LLVMIntULE, offset1, shared_heap_check_bound, cmp2,
  318. "cmp2");
  319. BUILD_OP(And, cmp1, cmp2, is_in_shared_heap, "is_in_shared_heap");
  320. }
  321. if (!LLVMBuildCondBr(comp_ctx->builder, is_in_shared_heap,
  322. app_addr_in_shared_heap, app_addr_in_linear_mem)) {
  323. aot_set_last_error("llvm build cond br failed");
  324. goto fail;
  325. }
  326. LLVMPositionBuilderAtEnd(comp_ctx->builder, app_addr_in_shared_heap);
  327. /* Get native address inside shared heap */
  328. if (!(maddr =
  329. LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  330. func_ctx->shared_heap_base_addr_adj,
  331. &offset1, 1, "maddr_shared_heap"))) {
  332. aot_set_last_error("llvm build inbounds gep failed");
  333. goto fail;
  334. }
  335. if (enable_segue) {
  336. LLVMValueRef mem_base_addr_u64, maddr_u64, offset_to_mem_base;
  337. if (!(maddr_u64 = LLVMBuildPtrToInt(comp_ctx->builder, maddr,
  338. I64_TYPE, "maddr_u64"))
  339. || !(mem_base_addr_u64 =
  340. LLVMBuildPtrToInt(comp_ctx->builder, mem_base_addr,
  341. I64_TYPE, "mem_base_addr_u64"))) {
  342. aot_set_last_error("llvm build ptr to int failed");
  343. goto fail;
  344. }
  345. if (!(offset_to_mem_base =
  346. LLVMBuildSub(comp_ctx->builder, maddr_u64,
  347. mem_base_addr_u64, "offset_to_mem_base"))) {
  348. aot_set_last_error("llvm build sub failed");
  349. goto fail;
  350. }
  351. if (!(maddr = LLVMBuildIntToPtr(
  352. comp_ctx->builder, offset_to_mem_base, INT8_PTR_TYPE_GS,
  353. "maddr_shared_heap_segue"))) {
  354. aot_set_last_error("llvm build int to ptr failed.");
  355. goto fail;
  356. }
  357. }
  358. LLVMAddIncoming(maddr_phi, &maddr, &app_addr_in_shared_heap, 1);
  359. if (!LLVMBuildBr(comp_ctx->builder, block_maddr_phi)) {
  360. aot_set_last_error("llvm build br failed");
  361. goto fail;
  362. }
  363. LLVMPositionBuilderAtEnd(comp_ctx->builder, app_addr_in_linear_mem);
  364. block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  365. }
  366. if (comp_ctx->enable_bound_check
  367. && !(is_local_of_aot_value
  368. && aot_checked_addr_list_find(func_ctx, local_idx_of_aot_value,
  369. offset, bytes))) {
  370. uint32 init_page_count =
  371. comp_ctx->comp_data->memories[0].init_page_count;
  372. if (init_page_count == 0) {
  373. LLVMValueRef mem_size;
  374. if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) {
  375. goto fail;
  376. }
  377. BUILD_ICMP(LLVMIntEQ, mem_size,
  378. MEMORY64_COND_VALUE(I64_ZERO, I32_ZERO), cmp, "is_zero");
  379. ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
  380. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  381. if (!aot_emit_exception(comp_ctx, func_ctx,
  382. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  383. check_succ)) {
  384. goto fail;
  385. }
  386. SET_BUILD_POS(check_succ);
  387. block_curr = check_succ;
  388. }
  389. if (!(mem_check_bound =
  390. get_memory_check_bound(comp_ctx, func_ctx, bytes))) {
  391. goto fail;
  392. }
  393. if (is_target_64bit) {
  394. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp, "cmp");
  395. }
  396. else {
  397. if (comp_ctx->enable_shared_heap /* TODO: && mem_idx == 0 */) {
  398. /* Check integer overflow has been checked above */
  399. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp, "cmp");
  400. }
  401. else {
  402. /* Check integer overflow */
  403. BUILD_ICMP(LLVMIntULT, offset1, addr, cmp1, "cmp1");
  404. BUILD_ICMP(LLVMIntUGT, offset1, mem_check_bound, cmp2, "cmp2");
  405. BUILD_OP(Or, cmp1, cmp2, cmp, "cmp");
  406. }
  407. }
  408. /* Add basic blocks */
  409. ADD_BASIC_BLOCK(check_succ, "check_succ");
  410. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  411. if (!aot_emit_exception(comp_ctx, func_ctx,
  412. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  413. check_succ)) {
  414. goto fail;
  415. }
  416. SET_BUILD_POS(check_succ);
  417. if (is_local_of_aot_value) {
  418. if (!aot_checked_addr_list_add(func_ctx, local_idx_of_aot_value,
  419. offset, bytes))
  420. goto fail;
  421. }
  422. }
  423. if (!enable_segue) {
  424. /* maddr = mem_base_addr + offset1 */
  425. if (!(maddr =
  426. LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  427. mem_base_addr, &offset1, 1, "maddr"))) {
  428. aot_set_last_error("llvm build add failed.");
  429. goto fail;
  430. }
  431. }
  432. else {
  433. LLVMValueRef maddr_base;
  434. if (!(maddr_base = LLVMBuildIntToPtr(comp_ctx->builder, addr,
  435. INT8_PTR_TYPE_GS, "maddr_base"))) {
  436. aot_set_last_error("llvm build int to ptr failed.");
  437. goto fail;
  438. }
  439. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  440. maddr_base, &offset_const, 1,
  441. "maddr"))) {
  442. aot_set_last_error("llvm build inboundgep failed.");
  443. goto fail;
  444. }
  445. }
  446. if (comp_ctx->enable_shared_heap /* TODO: && mem_idx == 0 */) {
  447. block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  448. LLVMAddIncoming(maddr_phi, &maddr, &block_curr, 1);
  449. if (!LLVMBuildBr(comp_ctx->builder, block_maddr_phi)) {
  450. aot_set_last_error("llvm build br failed");
  451. goto fail;
  452. }
  453. LLVMPositionBuilderAtEnd(comp_ctx->builder, block_maddr_phi);
  454. return maddr_phi;
  455. }
  456. else
  457. return maddr;
  458. fail:
  459. return NULL;
  460. }
  461. #define BUILD_PTR_CAST(ptr_type) \
  462. do { \
  463. if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, ptr_type, \
  464. "data_ptr"))) { \
  465. aot_set_last_error("llvm build bit cast failed."); \
  466. goto fail; \
  467. } \
  468. } while (0)
  469. #define BUILD_LOAD(data_type) \
  470. do { \
  471. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  472. "data"))) { \
  473. aot_set_last_error("llvm build load failed."); \
  474. goto fail; \
  475. } \
  476. LLVMSetAlignment(value, known_align); \
  477. } while (0)
  478. #define BUILD_TRUNC(value, data_type) \
  479. do { \
  480. if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, data_type, \
  481. "val_trunc"))) { \
  482. aot_set_last_error("llvm build trunc failed."); \
  483. goto fail; \
  484. } \
  485. } while (0)
  486. #define BUILD_STORE() \
  487. do { \
  488. LLVMValueRef res; \
  489. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  490. aot_set_last_error("llvm build store failed."); \
  491. goto fail; \
  492. } \
  493. LLVMSetAlignment(res, known_align); \
  494. } while (0)
  495. #define BUILD_SIGN_EXT(dst_type) \
  496. do { \
  497. if (!(value = LLVMBuildSExt(comp_ctx->builder, value, dst_type, \
  498. "data_s_ext"))) { \
  499. aot_set_last_error("llvm build sign ext failed."); \
  500. goto fail; \
  501. } \
  502. } while (0)
  503. #define BUILD_ZERO_EXT(dst_type) \
  504. do { \
  505. if (!(value = LLVMBuildZExt(comp_ctx->builder, value, dst_type, \
  506. "data_z_ext"))) { \
  507. aot_set_last_error("llvm build zero ext failed."); \
  508. goto fail; \
  509. } \
  510. } while (0)
  511. #if WASM_ENABLE_SHARED_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0
  512. bool
  513. check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  514. LLVMValueRef addr, uint32 align)
  515. {
  516. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  517. LLVMBasicBlockRef check_align_succ;
  518. LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
  519. LLVMValueRef res;
  520. CHECK_LLVM_CONST(align_mask);
  521. /* Convert pointer to int */
  522. if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr, I32_TYPE,
  523. "address"))) {
  524. aot_set_last_error("llvm build ptr to int failed.");
  525. goto fail;
  526. }
  527. /* The memory address should be aligned */
  528. BUILD_OP(And, addr, align_mask, res, "and");
  529. BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
  530. /* Add basic blocks */
  531. ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
  532. LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
  533. if (!aot_emit_exception(comp_ctx, func_ctx, EXCE_UNALIGNED_ATOMIC, true,
  534. res, check_align_succ)) {
  535. goto fail;
  536. }
  537. SET_BUILD_POS(check_align_succ);
  538. return true;
  539. fail:
  540. return false;
  541. }
  542. #endif /* WASM_ENABLE_SHARED_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0 */
  543. #if WASM_ENABLE_SHARED_MEMORY != 0
  544. #define BUILD_ATOMIC_LOAD(align, data_type) \
  545. do { \
  546. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  547. goto fail; \
  548. } \
  549. if (!(value = LLVMBuildLoad2(comp_ctx->builder, data_type, maddr, \
  550. "data"))) { \
  551. aot_set_last_error("llvm build load failed."); \
  552. goto fail; \
  553. } \
  554. LLVMSetAlignment(value, 1 << align); \
  555. LLVMSetVolatile(value, true); \
  556. LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
  557. } while (0)
  558. #define BUILD_ATOMIC_STORE(align) \
  559. do { \
  560. LLVMValueRef res; \
  561. if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
  562. goto fail; \
  563. } \
  564. if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
  565. aot_set_last_error("llvm build store failed."); \
  566. goto fail; \
  567. } \
  568. LLVMSetAlignment(res, 1 << align); \
  569. LLVMSetVolatile(res, true); \
  570. LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
  571. } while (0)
  572. #endif
  573. bool
  574. aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  575. uint32 align, mem_offset_t offset, uint32 bytes,
  576. bool sign, bool atomic)
  577. {
  578. LLVMValueRef maddr, value = NULL;
  579. LLVMTypeRef data_type;
  580. bool enable_segue = comp_ctx->enable_segue_i32_load;
  581. unsigned int known_align;
  582. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  583. enable_segue, &known_align)))
  584. return false;
  585. switch (bytes) {
  586. case 4:
  587. if (!enable_segue)
  588. BUILD_PTR_CAST(INT32_PTR_TYPE);
  589. else
  590. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  591. #if WASM_ENABLE_SHARED_MEMORY != 0
  592. if (atomic)
  593. BUILD_ATOMIC_LOAD(align, I32_TYPE);
  594. else
  595. #endif
  596. BUILD_LOAD(I32_TYPE);
  597. break;
  598. case 2:
  599. case 1:
  600. if (bytes == 2) {
  601. if (!enable_segue)
  602. BUILD_PTR_CAST(INT16_PTR_TYPE);
  603. else
  604. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  605. data_type = INT16_TYPE;
  606. }
  607. else {
  608. if (!enable_segue)
  609. BUILD_PTR_CAST(INT8_PTR_TYPE);
  610. else
  611. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  612. data_type = INT8_TYPE;
  613. }
  614. #if WASM_ENABLE_SHARED_MEMORY != 0
  615. if (atomic) {
  616. BUILD_ATOMIC_LOAD(align, data_type);
  617. BUILD_ZERO_EXT(I32_TYPE);
  618. }
  619. else
  620. #endif
  621. {
  622. BUILD_LOAD(data_type);
  623. if (sign)
  624. BUILD_SIGN_EXT(I32_TYPE);
  625. else
  626. BUILD_ZERO_EXT(I32_TYPE);
  627. }
  628. break;
  629. default:
  630. bh_assert(0);
  631. break;
  632. }
  633. PUSH_I32(value);
  634. (void)data_type;
  635. return true;
  636. fail:
  637. return false;
  638. }
  639. bool
  640. aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  641. uint32 align, mem_offset_t offset, uint32 bytes,
  642. bool sign, bool atomic)
  643. {
  644. LLVMValueRef maddr, value = NULL;
  645. LLVMTypeRef data_type;
  646. bool enable_segue = comp_ctx->enable_segue_i64_load;
  647. unsigned int known_align;
  648. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  649. enable_segue, &known_align)))
  650. return false;
  651. switch (bytes) {
  652. case 8:
  653. if (!enable_segue)
  654. BUILD_PTR_CAST(INT64_PTR_TYPE);
  655. else
  656. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  657. #if WASM_ENABLE_SHARED_MEMORY != 0
  658. if (atomic)
  659. BUILD_ATOMIC_LOAD(align, I64_TYPE);
  660. else
  661. #endif
  662. BUILD_LOAD(I64_TYPE);
  663. break;
  664. case 4:
  665. case 2:
  666. case 1:
  667. if (bytes == 4) {
  668. if (!enable_segue)
  669. BUILD_PTR_CAST(INT32_PTR_TYPE);
  670. else
  671. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  672. data_type = I32_TYPE;
  673. }
  674. else if (bytes == 2) {
  675. if (!enable_segue)
  676. BUILD_PTR_CAST(INT16_PTR_TYPE);
  677. else
  678. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  679. data_type = INT16_TYPE;
  680. }
  681. else {
  682. if (!enable_segue)
  683. BUILD_PTR_CAST(INT8_PTR_TYPE);
  684. else
  685. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  686. data_type = INT8_TYPE;
  687. }
  688. #if WASM_ENABLE_SHARED_MEMORY != 0
  689. if (atomic) {
  690. BUILD_ATOMIC_LOAD(align, data_type);
  691. BUILD_ZERO_EXT(I64_TYPE);
  692. }
  693. else
  694. #endif
  695. {
  696. BUILD_LOAD(data_type);
  697. if (sign)
  698. BUILD_SIGN_EXT(I64_TYPE);
  699. else
  700. BUILD_ZERO_EXT(I64_TYPE);
  701. }
  702. break;
  703. default:
  704. bh_assert(0);
  705. break;
  706. }
  707. PUSH_I64(value);
  708. (void)data_type;
  709. return true;
  710. fail:
  711. return false;
  712. }
  713. bool
  714. aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  715. uint32 align, mem_offset_t offset)
  716. {
  717. LLVMValueRef maddr, value;
  718. bool enable_segue = comp_ctx->enable_segue_f32_load;
  719. unsigned int known_align;
  720. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
  721. enable_segue, &known_align)))
  722. return false;
  723. if (!enable_segue)
  724. BUILD_PTR_CAST(F32_PTR_TYPE);
  725. else
  726. BUILD_PTR_CAST(F32_PTR_TYPE_GS);
  727. BUILD_LOAD(F32_TYPE);
  728. PUSH_F32(value);
  729. return true;
  730. fail:
  731. return false;
  732. }
  733. bool
  734. aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  735. uint32 align, mem_offset_t offset)
  736. {
  737. LLVMValueRef maddr, value;
  738. bool enable_segue = comp_ctx->enable_segue_f64_load;
  739. unsigned int known_align;
  740. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
  741. enable_segue, &known_align)))
  742. return false;
  743. if (!enable_segue)
  744. BUILD_PTR_CAST(F64_PTR_TYPE);
  745. else
  746. BUILD_PTR_CAST(F64_PTR_TYPE_GS);
  747. BUILD_LOAD(F64_TYPE);
  748. PUSH_F64(value);
  749. return true;
  750. fail:
  751. return false;
  752. }
  753. bool
  754. aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  755. uint32 align, mem_offset_t offset, uint32 bytes,
  756. bool atomic)
  757. {
  758. LLVMValueRef maddr, value;
  759. bool enable_segue = comp_ctx->enable_segue_i32_store;
  760. POP_I32(value);
  761. unsigned int known_align;
  762. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  763. enable_segue, &known_align)))
  764. return false;
  765. switch (bytes) {
  766. case 4:
  767. if (!enable_segue)
  768. BUILD_PTR_CAST(INT32_PTR_TYPE);
  769. else
  770. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  771. break;
  772. case 2:
  773. if (!enable_segue)
  774. BUILD_PTR_CAST(INT16_PTR_TYPE);
  775. else
  776. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  777. BUILD_TRUNC(value, INT16_TYPE);
  778. break;
  779. case 1:
  780. if (!enable_segue)
  781. BUILD_PTR_CAST(INT8_PTR_TYPE);
  782. else
  783. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  784. BUILD_TRUNC(value, INT8_TYPE);
  785. break;
  786. default:
  787. bh_assert(0);
  788. break;
  789. }
  790. #if WASM_ENABLE_SHARED_MEMORY != 0
  791. if (atomic)
  792. BUILD_ATOMIC_STORE(align);
  793. else
  794. #endif
  795. BUILD_STORE();
  796. return true;
  797. fail:
  798. return false;
  799. }
  800. bool
  801. aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  802. uint32 align, mem_offset_t offset, uint32 bytes,
  803. bool atomic)
  804. {
  805. LLVMValueRef maddr, value;
  806. bool enable_segue = comp_ctx->enable_segue_i64_store;
  807. POP_I64(value);
  808. unsigned int known_align;
  809. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  810. enable_segue, &known_align)))
  811. return false;
  812. switch (bytes) {
  813. case 8:
  814. if (!enable_segue)
  815. BUILD_PTR_CAST(INT64_PTR_TYPE);
  816. else
  817. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  818. break;
  819. case 4:
  820. if (!enable_segue)
  821. BUILD_PTR_CAST(INT32_PTR_TYPE);
  822. else
  823. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  824. BUILD_TRUNC(value, I32_TYPE);
  825. break;
  826. case 2:
  827. if (!enable_segue)
  828. BUILD_PTR_CAST(INT16_PTR_TYPE);
  829. else
  830. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  831. BUILD_TRUNC(value, INT16_TYPE);
  832. break;
  833. case 1:
  834. if (!enable_segue)
  835. BUILD_PTR_CAST(INT8_PTR_TYPE);
  836. else
  837. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  838. BUILD_TRUNC(value, INT8_TYPE);
  839. break;
  840. default:
  841. bh_assert(0);
  842. break;
  843. }
  844. #if WASM_ENABLE_SHARED_MEMORY != 0
  845. if (atomic)
  846. BUILD_ATOMIC_STORE(align);
  847. else
  848. #endif
  849. BUILD_STORE();
  850. return true;
  851. fail:
  852. return false;
  853. }
  854. bool
  855. aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  856. uint32 align, mem_offset_t offset)
  857. {
  858. LLVMValueRef maddr, value;
  859. bool enable_segue = comp_ctx->enable_segue_f32_store;
  860. POP_F32(value);
  861. unsigned int known_align;
  862. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4,
  863. enable_segue, &known_align)))
  864. return false;
  865. if (!enable_segue)
  866. BUILD_PTR_CAST(F32_PTR_TYPE);
  867. else
  868. BUILD_PTR_CAST(F32_PTR_TYPE_GS);
  869. BUILD_STORE();
  870. return true;
  871. fail:
  872. return false;
  873. }
  874. bool
  875. aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  876. uint32 align, mem_offset_t offset)
  877. {
  878. LLVMValueRef maddr, value;
  879. bool enable_segue = comp_ctx->enable_segue_f64_store;
  880. POP_F64(value);
  881. unsigned int known_align;
  882. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8,
  883. enable_segue, &known_align)))
  884. return false;
  885. if (!enable_segue)
  886. BUILD_PTR_CAST(F64_PTR_TYPE);
  887. else
  888. BUILD_PTR_CAST(F64_PTR_TYPE_GS);
  889. BUILD_STORE();
  890. return true;
  891. fail:
  892. return false;
  893. }
  894. static LLVMValueRef
  895. get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  896. {
  897. LLVMValueRef mem_size;
  898. if (func_ctx->mem_space_unchanged) {
  899. mem_size = func_ctx->mem_info[0].mem_cur_page_count_addr;
  900. }
  901. else {
  902. if (!(mem_size = LLVMBuildLoad2(
  903. comp_ctx->builder, I32_TYPE,
  904. func_ctx->mem_info[0].mem_cur_page_count_addr, "mem_size"))) {
  905. aot_set_last_error("llvm build load failed.");
  906. goto fail;
  907. }
  908. }
  909. return LLVMBuildIntCast(comp_ctx->builder, mem_size,
  910. MEMORY64_COND_VALUE(I64_TYPE, I32_TYPE), "");
  911. fail:
  912. return NULL;
  913. }
  914. bool
  915. aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  916. {
  917. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  918. if (mem_size)
  919. PUSH_PAGE_COUNT(mem_size);
  920. return mem_size ? true : false;
  921. fail:
  922. return false;
  923. }
  924. bool
  925. aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  926. {
  927. LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
  928. LLVMValueRef delta, param_values[2], ret_value, func, value;
  929. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  930. int32 func_index;
  931. #if WASM_ENABLE_MEMORY64 != 0
  932. LLVMValueRef u32_max, u32_cmp_result;
  933. #endif
  934. if (!mem_size)
  935. return false;
  936. POP_PAGE_COUNT(delta);
  937. /* TODO: multi-memory aot_enlarge_memory_with_idx() */
  938. /* Function type of aot_enlarge_memory() */
  939. param_types[0] = INT8_PTR_TYPE;
  940. param_types[1] = I32_TYPE;
  941. ret_type = INT8_TYPE;
  942. if (!(func_type = LLVMFunctionType(ret_type, param_types, 2, false))) {
  943. aot_set_last_error("llvm add function type failed.");
  944. return false;
  945. }
  946. if (comp_ctx->is_jit_mode) {
  947. /* JIT mode, call the function directly */
  948. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  949. aot_set_last_error("llvm add pointer type failed.");
  950. return false;
  951. }
  952. if (!(value = I64_CONST((uint64)(uintptr_t)wasm_enlarge_memory))
  953. || !(func = LLVMConstIntToPtr(value, func_ptr_type))) {
  954. aot_set_last_error("create LLVM value failed.");
  955. return false;
  956. }
  957. }
  958. else if (comp_ctx->is_indirect_mode) {
  959. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  960. aot_set_last_error("create LLVM function type failed.");
  961. return false;
  962. }
  963. func_index =
  964. aot_get_native_symbol_index(comp_ctx, "aot_enlarge_memory");
  965. if (func_index < 0) {
  966. return false;
  967. }
  968. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  969. func_ptr_type, func_index))) {
  970. return false;
  971. }
  972. }
  973. else {
  974. char *func_name = "aot_enlarge_memory";
  975. /* AOT mode, delcare the function */
  976. if (!(func = LLVMGetNamedFunction(func_ctx->module, func_name))
  977. && !(func =
  978. LLVMAddFunction(func_ctx->module, func_name, func_type))) {
  979. aot_set_last_error("llvm add function failed.");
  980. return false;
  981. }
  982. }
  983. /* Call function aot_enlarge_memory() */
  984. param_values[0] = func_ctx->aot_inst;
  985. param_values[1] = LLVMBuildTrunc(comp_ctx->builder, delta, I32_TYPE, "");
  986. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  987. param_values, 2, "call"))) {
  988. aot_set_last_error("llvm build call failed.");
  989. return false;
  990. }
  991. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
  992. #if WASM_ENABLE_MEMORY64 != 0
  993. if (IS_MEMORY64) {
  994. if (!(u32_max = I64_CONST(UINT32_MAX))) {
  995. aot_set_last_error("llvm build const failed");
  996. return false;
  997. }
  998. BUILD_ICMP(LLVMIntULE, delta, u32_max, u32_cmp_result, "page_size_cmp");
  999. BUILD_OP(And, ret_value, u32_cmp_result, ret_value, "and");
  1000. }
  1001. #endif
  1002. /* ret_value = ret_value == true ? pre_page_count : -1 */
  1003. if (!(ret_value = LLVMBuildSelect(
  1004. comp_ctx->builder, ret_value, mem_size,
  1005. MEMORY64_COND_VALUE(I64_NEG_ONE, I32_NEG_ONE), "mem_grow_ret"))) {
  1006. aot_set_last_error("llvm build select failed.");
  1007. return false;
  1008. }
  1009. PUSH_PAGE_COUNT(ret_value);
  1010. return true;
  1011. fail:
  1012. return false;
  1013. }
  1014. #if WASM_ENABLE_BULK_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0
  1015. LLVMValueRef
  1016. check_bulk_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1017. LLVMValueRef offset, LLVMValueRef bytes)
  1018. {
  1019. LLVMValueRef maddr, max_addr, cmp;
  1020. LLVMValueRef mem_base_addr, maddr_phi = NULL;
  1021. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1022. LLVMBasicBlockRef check_succ, block_maddr_phi = NULL;
  1023. LLVMValueRef mem_size;
  1024. #if WASM_ENABLE_MEMORY64 == 0
  1025. bool is_memory64 = false;
  1026. #else
  1027. bool is_memory64 = IS_MEMORY64;
  1028. #endif
  1029. /* Get memory base address and memory data size */
  1030. #if WASM_ENABLE_SHARED_MEMORY != 0
  1031. bool is_shared_memory = comp_ctx->comp_data->memories[0].flags & 0x02;
  1032. if (func_ctx->mem_space_unchanged || is_shared_memory) {
  1033. #else
  1034. if (func_ctx->mem_space_unchanged) {
  1035. #endif
  1036. mem_base_addr = func_ctx->mem_info[0].mem_base_addr;
  1037. }
  1038. else {
  1039. if (!(mem_base_addr = LLVMBuildLoad2(
  1040. comp_ctx->builder, OPQ_PTR_TYPE,
  1041. func_ctx->mem_info[0].mem_base_addr, "mem_base"))) {
  1042. aot_set_last_error("llvm build load failed.");
  1043. goto fail;
  1044. }
  1045. }
  1046. /*
  1047. * Note: not throw the integer-overflow-exception here since it must
  1048. * have been thrown when converting float to integer before
  1049. */
  1050. /* return addres directly if constant offset and inside memory space */
  1051. if (LLVMIsEfficientConstInt(offset) && LLVMIsEfficientConstInt(bytes)) {
  1052. uint64 mem_offset = (uint64)LLVMConstIntGetZExtValue(offset);
  1053. uint64 mem_len = (uint64)LLVMConstIntGetZExtValue(bytes);
  1054. uint32 num_bytes_per_page =
  1055. comp_ctx->comp_data->memories[0].num_bytes_per_page;
  1056. uint32 init_page_count =
  1057. comp_ctx->comp_data->memories[0].init_page_count;
  1058. uint64 mem_data_size = (uint64)num_bytes_per_page * init_page_count;
  1059. if (mem_data_size > 0 && mem_offset + mem_len <= mem_data_size) {
  1060. /* inside memory space */
  1061. /* maddr = mem_base_addr + moffset */
  1062. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  1063. mem_base_addr, &offset, 1,
  1064. "maddr"))) {
  1065. aot_set_last_error("llvm build add failed.");
  1066. goto fail;
  1067. }
  1068. return maddr;
  1069. }
  1070. }
  1071. if (func_ctx->mem_space_unchanged) {
  1072. mem_size = func_ctx->mem_info[0].mem_data_size_addr;
  1073. }
  1074. else {
  1075. if (!(mem_size = LLVMBuildLoad2(
  1076. comp_ctx->builder, I64_TYPE,
  1077. func_ctx->mem_info[0].mem_data_size_addr, "mem_size"))) {
  1078. aot_set_last_error("llvm build load failed.");
  1079. goto fail;
  1080. }
  1081. }
  1082. ADD_BASIC_BLOCK(check_succ, "check_succ");
  1083. LLVMMoveBasicBlockAfter(check_succ, block_curr);
  1084. offset =
  1085. LLVMBuildZExt(comp_ctx->builder, offset, I64_TYPE, "extend_offset");
  1086. bytes = LLVMBuildZExt(comp_ctx->builder, bytes, I64_TYPE, "extend_len");
  1087. if (!offset || !bytes) {
  1088. aot_set_last_error("llvm build zext failed.");
  1089. goto fail;
  1090. }
  1091. /* TODO: check whether integer overflow occurs when memory is 64-bit
  1092. and boundary check is enabled */
  1093. BUILD_OP(Add, offset, bytes, max_addr, "max_addr");
  1094. if (comp_ctx->enable_shared_heap /* TODO: && mem_idx == 0 */) {
  1095. LLVMBasicBlockRef app_addr_in_shared_heap, app_addr_in_linear_mem;
  1096. LLVMValueRef shared_heap_start_off, shared_heap_check_bound;
  1097. LLVMValueRef max_offset, cmp1, cmp2, is_in_shared_heap;
  1098. /* Add basic blocks */
  1099. ADD_BASIC_BLOCK(app_addr_in_shared_heap, "app_addr_in_shared_heap");
  1100. ADD_BASIC_BLOCK(app_addr_in_linear_mem, "app_addr_in_linear_mem");
  1101. ADD_BASIC_BLOCK(block_maddr_phi, "maddr_phi");
  1102. LLVMMoveBasicBlockAfter(app_addr_in_shared_heap, block_curr);
  1103. LLVMMoveBasicBlockAfter(app_addr_in_linear_mem,
  1104. app_addr_in_shared_heap);
  1105. LLVMMoveBasicBlockAfter(block_maddr_phi, check_succ);
  1106. LLVMPositionBuilderAtEnd(comp_ctx->builder, block_maddr_phi);
  1107. if (!(maddr_phi = LLVMBuildPhi(comp_ctx->builder, INT8_PTR_TYPE,
  1108. "maddr_phi"))) {
  1109. aot_set_last_error("llvm build phi failed");
  1110. goto fail;
  1111. }
  1112. LLVMPositionBuilderAtEnd(comp_ctx->builder, block_curr);
  1113. shared_heap_start_off = func_ctx->shared_heap_start_off;
  1114. if (comp_ctx->pointer_size == sizeof(uint32)) {
  1115. if (!(shared_heap_start_off =
  1116. LLVMBuildZExt(comp_ctx->builder, shared_heap_start_off,
  1117. I64_TYPE, "shared_heap_start_off_u64"))) {
  1118. aot_set_last_error("llvm build zext failed");
  1119. goto fail;
  1120. }
  1121. }
  1122. shared_heap_check_bound =
  1123. is_memory64 ? I64_CONST(UINT64_MAX) : I64_CONST(UINT32_MAX);
  1124. CHECK_LLVM_CONST(shared_heap_check_bound);
  1125. /* Check whether the bytes to access are in shared heap */
  1126. if (!comp_ctx->enable_bound_check) {
  1127. /* Use IntUGT but not IntUGE to compare, same as the check
  1128. in aot_check_memory_overflow */
  1129. BUILD_ICMP(LLVMIntUGT, offset, func_ctx->shared_heap_start_off,
  1130. is_in_shared_heap, "is_in_shared_heap");
  1131. }
  1132. else {
  1133. BUILD_ICMP(LLVMIntUGT, offset, func_ctx->shared_heap_start_off,
  1134. cmp1, "cmp1");
  1135. BUILD_OP(Add, max_addr, I64_NEG_ONE, max_offset, "max_offset");
  1136. BUILD_ICMP(LLVMIntULE, max_offset, shared_heap_check_bound, cmp2,
  1137. "cmp2");
  1138. BUILD_OP(And, cmp1, cmp2, is_in_shared_heap, "is_in_shared_heap");
  1139. }
  1140. if (!LLVMBuildCondBr(comp_ctx->builder, is_in_shared_heap,
  1141. app_addr_in_shared_heap, app_addr_in_linear_mem)) {
  1142. aot_set_last_error("llvm build cond br failed");
  1143. goto fail;
  1144. }
  1145. LLVMPositionBuilderAtEnd(comp_ctx->builder, app_addr_in_shared_heap);
  1146. /* Get native address inside shared heap */
  1147. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  1148. func_ctx->shared_heap_base_addr_adj,
  1149. &offset, 1, "maddr_shared_heap"))) {
  1150. aot_set_last_error("llvm build inbounds gep failed");
  1151. goto fail;
  1152. }
  1153. LLVMAddIncoming(maddr_phi, &maddr, &app_addr_in_shared_heap, 1);
  1154. if (!LLVMBuildBr(comp_ctx->builder, block_maddr_phi)) {
  1155. aot_set_last_error("llvm build br failed");
  1156. goto fail;
  1157. }
  1158. LLVMPositionBuilderAtEnd(comp_ctx->builder, app_addr_in_linear_mem);
  1159. block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1160. }
  1161. BUILD_ICMP(LLVMIntUGT, max_addr, mem_size, cmp, "cmp_max_mem_addr");
  1162. if (!aot_emit_exception(comp_ctx, func_ctx,
  1163. EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, true, cmp,
  1164. check_succ)) {
  1165. goto fail;
  1166. }
  1167. /* maddr = mem_base_addr + offset */
  1168. if (!(maddr = LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE,
  1169. mem_base_addr, &offset, 1, "maddr"))) {
  1170. aot_set_last_error("llvm build add failed.");
  1171. goto fail;
  1172. }
  1173. if (comp_ctx->enable_shared_heap /* TODO: && mem_idx == 0 */) {
  1174. block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1175. LLVMAddIncoming(maddr_phi, &maddr, &block_curr, 1);
  1176. if (!LLVMBuildBr(comp_ctx->builder, block_maddr_phi)) {
  1177. aot_set_last_error("llvm build br failed");
  1178. goto fail;
  1179. }
  1180. LLVMPositionBuilderAtEnd(comp_ctx->builder, block_maddr_phi);
  1181. return maddr_phi;
  1182. }
  1183. else
  1184. return maddr;
  1185. fail:
  1186. return NULL;
  1187. }
  1188. #endif /* end of WASM_ENABLE_BULK_MEMORY != 0 || WASM_ENABLE_STRINGREF != 0 */
  1189. #if WASM_ENABLE_BULK_MEMORY != 0
  1190. bool
  1191. aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1192. uint32 seg_index)
  1193. {
  1194. LLVMValueRef seg, offset, dst, len, param_values[5], ret_value, func, value;
  1195. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  1196. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  1197. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1198. LLVMBasicBlockRef mem_init_fail, init_success;
  1199. seg = I32_CONST(seg_index);
  1200. POP_I32(len);
  1201. POP_I32(offset);
  1202. POP_MEM_OFFSET(dst);
  1203. if (!zero_extend_u64(comp_ctx, &dst, "dst64")) {
  1204. return false;
  1205. }
  1206. param_types[0] = INT8_PTR_TYPE;
  1207. param_types[1] = I32_TYPE;
  1208. param_types[2] = I32_TYPE;
  1209. param_types[3] = I32_TYPE;
  1210. param_types[4] = SIZE_T_TYPE;
  1211. ret_type = INT8_TYPE;
  1212. if (comp_ctx->is_jit_mode)
  1213. GET_AOT_FUNCTION(llvm_jit_memory_init, 5);
  1214. else
  1215. GET_AOT_FUNCTION(aot_memory_init, 5);
  1216. /* Call function aot_memory_init() */
  1217. param_values[0] = func_ctx->aot_inst;
  1218. param_values[1] = seg;
  1219. param_values[2] = offset;
  1220. param_values[3] = len;
  1221. param_values[4] = dst;
  1222. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1223. param_values, 5, "call"))) {
  1224. aot_set_last_error("llvm build call failed.");
  1225. return false;
  1226. }
  1227. BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_init_ret");
  1228. ADD_BASIC_BLOCK(mem_init_fail, "mem_init_fail");
  1229. ADD_BASIC_BLOCK(init_success, "init_success");
  1230. LLVMMoveBasicBlockAfter(mem_init_fail, block_curr);
  1231. LLVMMoveBasicBlockAfter(init_success, block_curr);
  1232. if (!LLVMBuildCondBr(comp_ctx->builder, ret_value, init_success,
  1233. mem_init_fail)) {
  1234. aot_set_last_error("llvm build cond br failed.");
  1235. goto fail;
  1236. }
  1237. /* If memory.init failed, return this function
  1238. so the runtime can catch the exception */
  1239. LLVMPositionBuilderAtEnd(comp_ctx->builder, mem_init_fail);
  1240. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  1241. goto fail;
  1242. }
  1243. LLVMPositionBuilderAtEnd(comp_ctx->builder, init_success);
  1244. return true;
  1245. fail:
  1246. return false;
  1247. }
  1248. bool
  1249. aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1250. uint32 seg_index)
  1251. {
  1252. LLVMValueRef seg, param_values[2], ret_value, func, value;
  1253. LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
  1254. seg = I32_CONST(seg_index);
  1255. CHECK_LLVM_CONST(seg);
  1256. param_types[0] = INT8_PTR_TYPE;
  1257. param_types[1] = I32_TYPE;
  1258. ret_type = INT8_TYPE;
  1259. if (comp_ctx->is_jit_mode)
  1260. GET_AOT_FUNCTION(llvm_jit_data_drop, 2);
  1261. else
  1262. GET_AOT_FUNCTION(aot_data_drop, 2);
  1263. /* Call function aot_data_drop() */
  1264. param_values[0] = func_ctx->aot_inst;
  1265. param_values[1] = seg;
  1266. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1267. param_values, 2, "call"))) {
  1268. aot_set_last_error("llvm build call failed.");
  1269. return false;
  1270. }
  1271. return true;
  1272. fail:
  1273. return false;
  1274. }
  1275. bool
  1276. aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1277. {
  1278. LLVMValueRef src, dst, src_addr, dst_addr, len, res;
  1279. bool call_aot_memmove = false;
  1280. POP_MEM_OFFSET(len);
  1281. POP_MEM_OFFSET(src);
  1282. POP_MEM_OFFSET(dst);
  1283. if (!(src_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
  1284. return false;
  1285. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  1286. return false;
  1287. if (!zero_extend_u64(comp_ctx, &len, "len64")) {
  1288. return false;
  1289. }
  1290. call_aot_memmove = comp_ctx->is_indirect_mode || comp_ctx->is_jit_mode;
  1291. if (call_aot_memmove) {
  1292. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1293. LLVMValueRef func, params[3];
  1294. param_types[0] = INT8_PTR_TYPE;
  1295. param_types[1] = INT8_PTR_TYPE;
  1296. param_types[2] = SIZE_T_TYPE;
  1297. ret_type = INT8_PTR_TYPE;
  1298. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  1299. aot_set_last_error("create LLVM function type failed.");
  1300. return false;
  1301. }
  1302. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  1303. aot_set_last_error("create LLVM function pointer type failed.");
  1304. return false;
  1305. }
  1306. if (comp_ctx->is_jit_mode) {
  1307. if (!(func = I64_CONST((uint64)(uintptr_t)aot_memmove))
  1308. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  1309. aot_set_last_error("create LLVM value failed.");
  1310. return false;
  1311. }
  1312. }
  1313. else {
  1314. int32 func_index;
  1315. func_index = aot_get_native_symbol_index(comp_ctx, "memmove");
  1316. if (func_index < 0) {
  1317. return false;
  1318. }
  1319. if (!(func =
  1320. aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  1321. func_ptr_type, func_index))) {
  1322. return false;
  1323. }
  1324. }
  1325. params[0] = dst_addr;
  1326. params[1] = src_addr;
  1327. params[2] = len;
  1328. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params,
  1329. 3, "call_memmove"))) {
  1330. aot_set_last_error("llvm build memmove failed.");
  1331. return false;
  1332. }
  1333. }
  1334. else {
  1335. if (!(res = LLVMBuildMemMove(comp_ctx->builder, dst_addr, 1, src_addr,
  1336. 1, len))) {
  1337. aot_set_last_error("llvm build memmove failed.");
  1338. return false;
  1339. }
  1340. }
  1341. return true;
  1342. fail:
  1343. return false;
  1344. }
  1345. static void *
  1346. jit_memset(void *s, int c, size_t n)
  1347. {
  1348. return memset(s, c, n);
  1349. }
  1350. bool
  1351. aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1352. {
  1353. LLVMValueRef val, dst, dst_addr, len, res;
  1354. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1355. LLVMValueRef func, params[3];
  1356. POP_MEM_OFFSET(len);
  1357. POP_I32(val);
  1358. POP_MEM_OFFSET(dst);
  1359. if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
  1360. return false;
  1361. if (!zero_extend_u64(comp_ctx, &len, "len64")) {
  1362. return false;
  1363. }
  1364. param_types[0] = INT8_PTR_TYPE;
  1365. param_types[1] = I32_TYPE;
  1366. param_types[2] = SIZE_T_TYPE;
  1367. ret_type = INT8_PTR_TYPE;
  1368. if (!(func_type = LLVMFunctionType(ret_type, param_types, 3, false))) {
  1369. aot_set_last_error("create LLVM function type failed.");
  1370. return false;
  1371. }
  1372. if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
  1373. aot_set_last_error("create LLVM function pointer type failed.");
  1374. return false;
  1375. }
  1376. if (comp_ctx->is_jit_mode) {
  1377. if (!(func = I64_CONST((uint64)(uintptr_t)jit_memset))
  1378. || !(func = LLVMConstIntToPtr(func, func_ptr_type))) {
  1379. aot_set_last_error("create LLVM value failed.");
  1380. return false;
  1381. }
  1382. }
  1383. else if (comp_ctx->is_indirect_mode) {
  1384. int32 func_index;
  1385. func_index = aot_get_native_symbol_index(comp_ctx, "memset");
  1386. if (func_index < 0) {
  1387. return false;
  1388. }
  1389. if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
  1390. func_ptr_type, func_index))) {
  1391. return false;
  1392. }
  1393. }
  1394. else {
  1395. if (!(func = LLVMGetNamedFunction(func_ctx->module, "memset"))
  1396. && !(func =
  1397. LLVMAddFunction(func_ctx->module, "memset", func_type))) {
  1398. aot_set_last_error("llvm add function failed.");
  1399. return false;
  1400. }
  1401. }
  1402. params[0] = dst_addr;
  1403. params[1] = val;
  1404. params[2] = len;
  1405. if (!(res = LLVMBuildCall2(comp_ctx->builder, func_type, func, params, 3,
  1406. "call_memset"))) {
  1407. aot_set_last_error("llvm build memset failed.");
  1408. return false;
  1409. }
  1410. return true;
  1411. fail:
  1412. return false;
  1413. }
  1414. #endif /* end of WASM_ENABLE_BULK_MEMORY */
  1415. #if WASM_ENABLE_SHARED_MEMORY != 0
  1416. bool
  1417. aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1418. uint8 atomic_op, uint8 op_type, uint32 align,
  1419. mem_offset_t offset, uint32 bytes)
  1420. {
  1421. LLVMValueRef maddr, value, result;
  1422. bool enable_segue = (op_type == VALUE_TYPE_I32)
  1423. ? comp_ctx->enable_segue_i32_load
  1424. && comp_ctx->enable_segue_i32_store
  1425. : comp_ctx->enable_segue_i64_load
  1426. && comp_ctx->enable_segue_i64_store;
  1427. if (op_type == VALUE_TYPE_I32)
  1428. POP_I32(value);
  1429. else
  1430. POP_I64(value);
  1431. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1432. enable_segue, NULL)))
  1433. return false;
  1434. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1435. return false;
  1436. switch (bytes) {
  1437. case 8:
  1438. if (!enable_segue)
  1439. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1440. else
  1441. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  1442. break;
  1443. case 4:
  1444. if (!enable_segue)
  1445. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1446. else
  1447. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  1448. if (op_type == VALUE_TYPE_I64)
  1449. BUILD_TRUNC(value, I32_TYPE);
  1450. break;
  1451. case 2:
  1452. if (!enable_segue)
  1453. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1454. else
  1455. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  1456. BUILD_TRUNC(value, INT16_TYPE);
  1457. break;
  1458. case 1:
  1459. if (!enable_segue)
  1460. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1461. else
  1462. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  1463. BUILD_TRUNC(value, INT8_TYPE);
  1464. break;
  1465. default:
  1466. bh_assert(0);
  1467. break;
  1468. }
  1469. if (!(result = LLVMBuildAtomicRMW(
  1470. comp_ctx->builder, atomic_op, maddr, value,
  1471. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1472. goto fail;
  1473. }
  1474. LLVMSetVolatile(result, true);
  1475. if (op_type == VALUE_TYPE_I32) {
  1476. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1477. "result_i32"))) {
  1478. goto fail;
  1479. }
  1480. PUSH_I32(result);
  1481. }
  1482. else {
  1483. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1484. "result_i64"))) {
  1485. goto fail;
  1486. }
  1487. PUSH_I64(result);
  1488. }
  1489. return true;
  1490. fail:
  1491. return false;
  1492. }
  1493. bool
  1494. aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
  1495. AOTFuncContext *func_ctx, uint8 op_type,
  1496. uint32 align, mem_offset_t offset, uint32 bytes)
  1497. {
  1498. LLVMValueRef maddr, value, expect, result;
  1499. bool enable_segue = (op_type == VALUE_TYPE_I32)
  1500. ? comp_ctx->enable_segue_i32_load
  1501. && comp_ctx->enable_segue_i32_store
  1502. : comp_ctx->enable_segue_i64_load
  1503. && comp_ctx->enable_segue_i64_store;
  1504. if (op_type == VALUE_TYPE_I32) {
  1505. POP_I32(value);
  1506. POP_I32(expect);
  1507. }
  1508. else {
  1509. POP_I64(value);
  1510. POP_I64(expect);
  1511. }
  1512. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1513. enable_segue, NULL)))
  1514. return false;
  1515. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1516. return false;
  1517. switch (bytes) {
  1518. case 8:
  1519. if (!enable_segue)
  1520. BUILD_PTR_CAST(INT64_PTR_TYPE);
  1521. else
  1522. BUILD_PTR_CAST(INT64_PTR_TYPE_GS);
  1523. break;
  1524. case 4:
  1525. if (!enable_segue)
  1526. BUILD_PTR_CAST(INT32_PTR_TYPE);
  1527. else
  1528. BUILD_PTR_CAST(INT32_PTR_TYPE_GS);
  1529. if (op_type == VALUE_TYPE_I64) {
  1530. BUILD_TRUNC(value, I32_TYPE);
  1531. BUILD_TRUNC(expect, I32_TYPE);
  1532. }
  1533. break;
  1534. case 2:
  1535. if (!enable_segue)
  1536. BUILD_PTR_CAST(INT16_PTR_TYPE);
  1537. else
  1538. BUILD_PTR_CAST(INT16_PTR_TYPE_GS);
  1539. BUILD_TRUNC(value, INT16_TYPE);
  1540. BUILD_TRUNC(expect, INT16_TYPE);
  1541. break;
  1542. case 1:
  1543. if (!enable_segue)
  1544. BUILD_PTR_CAST(INT8_PTR_TYPE);
  1545. else
  1546. BUILD_PTR_CAST(INT8_PTR_TYPE_GS);
  1547. BUILD_TRUNC(value, INT8_TYPE);
  1548. BUILD_TRUNC(expect, INT8_TYPE);
  1549. break;
  1550. default:
  1551. bh_assert(0);
  1552. break;
  1553. }
  1554. if (!(result = LLVMBuildAtomicCmpXchg(
  1555. comp_ctx->builder, maddr, expect, value,
  1556. LLVMAtomicOrderingSequentiallyConsistent,
  1557. LLVMAtomicOrderingSequentiallyConsistent, false))) {
  1558. goto fail;
  1559. }
  1560. LLVMSetVolatile(result, true);
  1561. /* CmpXchg return {i32, i1} structure,
  1562. we need to extract the previous_value from the structure */
  1563. if (!(result = LLVMBuildExtractValue(comp_ctx->builder, result, 0,
  1564. "previous_value"))) {
  1565. goto fail;
  1566. }
  1567. if (op_type == VALUE_TYPE_I32) {
  1568. if (LLVMTypeOf(result) != I32_TYPE) {
  1569. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE,
  1570. "result_i32"))) {
  1571. goto fail;
  1572. }
  1573. }
  1574. PUSH_I32(result);
  1575. }
  1576. else {
  1577. if (LLVMTypeOf(result) != I64_TYPE) {
  1578. if (!(result = LLVMBuildZExt(comp_ctx->builder, result, I64_TYPE,
  1579. "result_i64"))) {
  1580. goto fail;
  1581. }
  1582. }
  1583. PUSH_I64(result);
  1584. }
  1585. return true;
  1586. fail:
  1587. return false;
  1588. }
  1589. bool
  1590. aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1591. uint8 op_type, uint32 align, mem_offset_t offset,
  1592. uint32 bytes)
  1593. {
  1594. LLVMValueRef maddr, value, timeout, expect, cmp;
  1595. LLVMValueRef param_values[5], ret_value, func, is_wait64;
  1596. LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
  1597. LLVMBasicBlockRef wait_fail, wait_success;
  1598. LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  1599. AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
  1600. POP_I64(timeout);
  1601. if (op_type == VALUE_TYPE_I32) {
  1602. POP_I32(expect);
  1603. is_wait64 = I8_CONST(false);
  1604. if (!(expect = LLVMBuildZExt(comp_ctx->builder, expect, I64_TYPE,
  1605. "expect_i64"))) {
  1606. goto fail;
  1607. }
  1608. }
  1609. else {
  1610. POP_I64(expect);
  1611. is_wait64 = I8_CONST(true);
  1612. }
  1613. CHECK_LLVM_CONST(is_wait64);
  1614. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1615. false, NULL)))
  1616. return false;
  1617. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1618. return false;
  1619. param_types[0] = INT8_PTR_TYPE;
  1620. param_types[1] = INT8_PTR_TYPE;
  1621. param_types[2] = I64_TYPE;
  1622. param_types[3] = I64_TYPE;
  1623. param_types[4] = INT8_TYPE;
  1624. ret_type = I32_TYPE;
  1625. GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
  1626. /* Call function wasm_runtime_atomic_wait() */
  1627. param_values[0] = func_ctx->aot_inst;
  1628. param_values[1] = maddr;
  1629. param_values[2] = expect;
  1630. param_values[3] = timeout;
  1631. param_values[4] = is_wait64;
  1632. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1633. param_values, 5, "call"))) {
  1634. aot_set_last_error("llvm build call failed.");
  1635. return false;
  1636. }
  1637. BUILD_ICMP(LLVMIntNE, ret_value, I32_NEG_ONE, cmp, "atomic_wait_ret");
  1638. ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
  1639. ADD_BASIC_BLOCK(wait_success, "wait_success");
  1640. LLVMMoveBasicBlockAfter(wait_fail, block_curr);
  1641. LLVMMoveBasicBlockAfter(wait_success, block_curr);
  1642. if (!LLVMBuildCondBr(comp_ctx->builder, cmp, wait_success, wait_fail)) {
  1643. aot_set_last_error("llvm build cond br failed.");
  1644. goto fail;
  1645. }
  1646. /* If atomic wait failed, return this function
  1647. so the runtime can catch the exception */
  1648. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
  1649. if (!aot_build_zero_function_ret(comp_ctx, func_ctx, aot_func_type)) {
  1650. goto fail;
  1651. }
  1652. LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
  1653. PUSH_I32(ret_value);
  1654. /* Insert suspend check point */
  1655. if (comp_ctx->enable_thread_mgr) {
  1656. if (!check_suspend_flags(comp_ctx, func_ctx, false))
  1657. return false;
  1658. }
  1659. return true;
  1660. fail:
  1661. return false;
  1662. }
  1663. bool
  1664. aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
  1665. AOTFuncContext *func_ctx, uint32 align,
  1666. mem_offset_t offset, uint32 bytes)
  1667. {
  1668. LLVMValueRef maddr, value, count;
  1669. LLVMValueRef param_values[3], ret_value, func;
  1670. LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
  1671. POP_I32(count);
  1672. if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes,
  1673. false, NULL)))
  1674. return false;
  1675. if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
  1676. return false;
  1677. param_types[0] = INT8_PTR_TYPE;
  1678. param_types[1] = INT8_PTR_TYPE;
  1679. param_types[2] = I32_TYPE;
  1680. ret_type = I32_TYPE;
  1681. GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
  1682. /* Call function wasm_runtime_atomic_notify() */
  1683. param_values[0] = func_ctx->aot_inst;
  1684. param_values[1] = maddr;
  1685. param_values[2] = count;
  1686. if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
  1687. param_values, 3, "call"))) {
  1688. aot_set_last_error("llvm build call failed.");
  1689. return false;
  1690. }
  1691. PUSH_I32(ret_value);
  1692. return true;
  1693. fail:
  1694. return false;
  1695. }
  1696. bool
  1697. aot_compiler_op_atomic_fence(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1698. {
  1699. return LLVMBuildFence(comp_ctx->builder,
  1700. LLVMAtomicOrderingSequentiallyConsistent, false, "")
  1701. ? true
  1702. : false;
  1703. }
  1704. #endif /* end of WASM_ENABLE_SHARED_MEMORY */