aot_emit_numberic.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250
  1. /*
  2. * Copyright (C) 2020 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_numberic.h"
  6. #include "aot_emit_exception.h"
  7. #include "aot_emit_control.h"
  8. #include "../aot/aot_runtime.h"
  9. #include "../aot/aot_intrinsic.h"
  10. #include <stdarg.h>
  11. #define LLVM_BUILD_ICMP(op, left, right, res, name) \
  12. do { \
  13. if (!(res = \
  14. LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
  15. aot_set_last_error("llvm build " name " fail."); \
  16. return false; \
  17. } \
  18. } while (0)
  19. #define LLVM_BUILD_OP(Op, left, right, res, name, err_ret) \
  20. do { \
  21. if (!(res = LLVMBuild##Op(comp_ctx->builder, left, right, name))) { \
  22. aot_set_last_error("llvm build " #name " fail."); \
  23. return err_ret; \
  24. } \
  25. } while (0)
  26. #define LLVM_BUILD_OP_OR_INTRINSIC(Op, left, right, res, intrinsic, name, \
  27. err_ret) \
  28. do { \
  29. if (comp_ctx->disable_llvm_intrinsics \
  30. && aot_intrinsic_check_capability(comp_ctx, intrinsic)) { \
  31. res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, \
  32. param_types[0], param_types, 2, \
  33. left, right); \
  34. } \
  35. else { \
  36. LLVM_BUILD_OP(Op, left, right, res, name, false); \
  37. } \
  38. } while (0)
  39. #define ADD_BASIC_BLOCK(block, name) \
  40. do { \
  41. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  42. func_ctx->func, name))) { \
  43. aot_set_last_error("llvm add basic block failed."); \
  44. goto fail; \
  45. } \
  46. \
  47. LLVMMoveBasicBlockAfter(block, LLVMGetInsertBlock(comp_ctx->builder)); \
  48. } while (0)
  49. #if LLVM_VERSION_NUMBER >= 12
  50. #define IS_CONST_ZERO(val) \
  51. (LLVMIsEfficientConstInt(val) \
  52. && ((is_i32 && (int32)LLVMConstIntGetZExtValue(val) == 0) \
  53. || (!is_i32 && (int64)LLVMConstIntGetSExtValue(val) == 0)))
  54. #else
  55. #define IS_CONST_ZERO(val) \
  56. (LLVMIsEfficientConstInt(val) \
  57. && ((is_i32 && (int32)LLVMConstIntGetZExtValue(val) == 0) \
  58. || (!is_i32 && (int64)LLVMConstIntGetSExtValue(val) == 0)))
  59. #endif
  60. #define CHECK_INT_OVERFLOW(type) \
  61. do { \
  62. LLVMValueRef cmp_min_int, cmp_neg_one; \
  63. LLVM_BUILD_ICMP(LLVMIntEQ, left, type##_MIN, cmp_min_int, \
  64. "cmp_min_int"); \
  65. LLVM_BUILD_ICMP(LLVMIntEQ, right, type##_NEG_ONE, cmp_neg_one, \
  66. "cmp_neg_one"); \
  67. LLVM_BUILD_OP(And, cmp_min_int, cmp_neg_one, overflow, "overflow", \
  68. false); \
  69. } while (0)
  70. #define PUSH_INT(v) \
  71. do { \
  72. if (is_i32) \
  73. PUSH_I32(v); \
  74. else \
  75. PUSH_I64(v); \
  76. } while (0)
  77. #define POP_INT(v) \
  78. do { \
  79. if (is_i32) \
  80. POP_I32(v); \
  81. else \
  82. POP_I64(v); \
  83. } while (0)
  84. #define PUSH_FLOAT(v) \
  85. do { \
  86. if (is_f32) \
  87. PUSH_F32(v); \
  88. else \
  89. PUSH_F64(v); \
  90. } while (0)
  91. #define POP_FLOAT(v) \
  92. do { \
  93. if (is_f32) \
  94. POP_F32(v); \
  95. else \
  96. POP_F64(v); \
  97. } while (0)
  98. #define DEF_INT_UNARY_OP(op, err) \
  99. do { \
  100. LLVMValueRef res, operand; \
  101. POP_INT(operand); \
  102. if (!(res = op)) { \
  103. if (err) \
  104. aot_set_last_error(err); \
  105. return false; \
  106. } \
  107. PUSH_INT(res); \
  108. } while (0)
  109. #define DEF_INT_BINARY_OP(op, err) \
  110. do { \
  111. LLVMValueRef res, left, right; \
  112. POP_INT(right); \
  113. POP_INT(left); \
  114. if (!(res = op)) { \
  115. if (err) \
  116. aot_set_last_error(err); \
  117. return false; \
  118. } \
  119. PUSH_INT(res); \
  120. } while (0)
  121. #define DEF_FP_UNARY_OP(op, err) \
  122. do { \
  123. LLVMValueRef res, operand; \
  124. POP_FLOAT(operand); \
  125. if (!(res = op)) { \
  126. if (err) \
  127. aot_set_last_error(err); \
  128. return false; \
  129. } \
  130. PUSH_FLOAT(res); \
  131. } while (0)
  132. #define DEF_FP_BINARY_OP(op, err) \
  133. do { \
  134. LLVMValueRef res, left, right; \
  135. POP_FLOAT(right); \
  136. POP_FLOAT(left); \
  137. if (!(res = op)) { \
  138. if (err) \
  139. aot_set_last_error(err); \
  140. return false; \
  141. } \
  142. PUSH_FLOAT(res); \
  143. } while (0)
  144. #define SHIFT_COUNT_MASK \
  145. do { \
  146. /* LLVM has undefined behavior if shift count is greater than \
  147. * bits count while Webassembly spec requires the shift count \
  148. * be wrapped. \
  149. */ \
  150. LLVMValueRef shift_count_mask, bits_minus_one; \
  151. bits_minus_one = is_i32 ? I32_31 : I64_63; \
  152. LLVM_BUILD_OP(And, right, bits_minus_one, shift_count_mask, \
  153. "shift_count_mask", NULL); \
  154. right = shift_count_mask; \
  155. } while (0)
  156. /* Call llvm constrained floating-point intrinsic */
  157. static LLVMValueRef
  158. call_llvm_float_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
  159. AOTFuncContext *func_ctx,
  160. bool is_f32,
  161. const char *intrinsic, ...)
  162. {
  163. va_list param_value_list;
  164. LLVMValueRef ret;
  165. LLVMTypeRef param_types[4], ret_type = is_f32 ? F32_TYPE : F64_TYPE;
  166. int param_count = (comp_ctx->disable_llvm_intrinsics
  167. && aot_intrinsic_check_capability(comp_ctx, intrinsic))
  168. ? 2
  169. : 4;
  170. param_types[0] = param_types[1] = ret_type;
  171. param_types[2] = param_types[3] = MD_TYPE;
  172. va_start(param_value_list, intrinsic);
  173. ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
  174. param_types, param_count, param_value_list);
  175. va_end(param_value_list);
  176. return ret;
  177. }
  178. /* Call llvm constrained libm-equivalent intrinsic */
  179. static LLVMValueRef
  180. call_llvm_libm_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
  181. AOTFuncContext *func_ctx,
  182. bool is_f32,
  183. const char *intrinsic, ...)
  184. {
  185. va_list param_value_list;
  186. LLVMValueRef ret;
  187. LLVMTypeRef param_types[3], ret_type = is_f32 ? F32_TYPE : F64_TYPE;
  188. param_types[0] = ret_type;
  189. param_types[1] = param_types[2] = MD_TYPE;
  190. va_start(param_value_list, intrinsic);
  191. ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
  192. param_types, 3, param_value_list);
  193. va_end(param_value_list);
  194. return ret;
  195. }
  196. static LLVMValueRef
  197. compile_op_float_min_max(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  198. bool is_f32, LLVMValueRef left, LLVMValueRef right,
  199. bool is_min)
  200. {
  201. LLVMTypeRef param_types[2], ret_type = is_f32 ? F32_TYPE : F64_TYPE,
  202. int_type = is_f32 ? I32_TYPE : I64_TYPE;
  203. LLVMValueRef cmp, is_eq, is_nan, ret, left_int, right_int, tmp,
  204. nan = LLVMConstRealOfString(ret_type, "NaN");
  205. char *intrinsic = is_min ? (is_f32 ? "llvm.minnum.f32" : "llvm.minnum.f64")
  206. : (is_f32 ? "llvm.maxnum.f32" : "llvm.maxnum.f64");
  207. CHECK_LLVM_CONST(nan);
  208. param_types[0] = param_types[1] = ret_type;
  209. if (comp_ctx->disable_llvm_intrinsics
  210. && aot_intrinsic_check_capability(comp_ctx,
  211. is_f32 ? "f32_cmp" : "f64_cmp")) {
  212. LLVMTypeRef param_types_intrinsic[3];
  213. LLVMValueRef opcond = LLVMConstInt(I32_TYPE, FLOAT_UNO, true);
  214. param_types_intrinsic[0] = I32_TYPE;
  215. param_types_intrinsic[1] = is_f32 ? F32_TYPE : F64_TYPE;
  216. param_types_intrinsic[2] = param_types_intrinsic[1];
  217. is_nan = aot_call_llvm_intrinsic(
  218. comp_ctx, func_ctx, is_f32 ? "f32_cmp" : "f64_cmp", I32_TYPE,
  219. param_types_intrinsic, 3, opcond, left, right);
  220. opcond = LLVMConstInt(I32_TYPE, FLOAT_EQ, true);
  221. is_eq = aot_call_llvm_intrinsic(
  222. comp_ctx, func_ctx, is_f32 ? "f32_cmp" : "f64_cmp", I32_TYPE,
  223. param_types_intrinsic, 3, opcond, left, right);
  224. if (!is_nan || !is_eq) {
  225. return NULL;
  226. }
  227. if (!(is_nan = LLVMBuildIntCast(comp_ctx->builder, is_nan, INT1_TYPE,
  228. "bit_cast_is_nan"))) {
  229. aot_set_last_error("llvm build is_nan bit cast fail.");
  230. return NULL;
  231. }
  232. if (!(is_eq = LLVMBuildIntCast(comp_ctx->builder, is_eq, INT1_TYPE,
  233. "bit_cast_is_eq"))) {
  234. aot_set_last_error("llvm build is_eq bit cast fail.");
  235. return NULL;
  236. }
  237. }
  238. else if (!(is_nan = LLVMBuildFCmp(comp_ctx->builder, LLVMRealUNO, left,
  239. right, "is_nan"))
  240. || !(is_eq = LLVMBuildFCmp(comp_ctx->builder, LLVMRealOEQ, left,
  241. right, "is_eq"))) {
  242. aot_set_last_error("llvm build fcmp fail.");
  243. return NULL;
  244. }
  245. /* If left and right are equal, they may be zero with different sign.
  246. Webassembly spec assert -0 < +0. So do a bitwise here. */
  247. if (!(left_int =
  248. LLVMBuildBitCast(comp_ctx->builder, left, int_type, "left_int"))
  249. || !(right_int = LLVMBuildBitCast(comp_ctx->builder, right, int_type,
  250. "right_int"))) {
  251. aot_set_last_error("llvm build bitcast fail.");
  252. return NULL;
  253. }
  254. if (is_min)
  255. LLVM_BUILD_OP_OR_INTRINSIC(Or, left_int, right_int, tmp,
  256. is_f32 ? "i32.or" : "i64.or", "tmp_int",
  257. false);
  258. else
  259. LLVM_BUILD_OP_OR_INTRINSIC(And, left_int, right_int, tmp,
  260. is_f32 ? "i32.and" : "i64.and", "tmp_int",
  261. false);
  262. if (!(tmp = LLVMBuildBitCast(comp_ctx->builder, tmp, ret_type, "tmp"))) {
  263. aot_set_last_error("llvm build bitcast fail.");
  264. return NULL;
  265. }
  266. if (!(cmp = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, ret_type,
  267. param_types, 2, left, right)))
  268. return NULL;
  269. /* The result of XIP intrinsic is 0 or 1, should return it directly */
  270. if (comp_ctx->disable_llvm_intrinsics
  271. && aot_intrinsic_check_capability(comp_ctx,
  272. is_f32 ? "f32_cmp" : "f64_cmp")) {
  273. return cmp;
  274. }
  275. if (!(cmp = LLVMBuildSelect(comp_ctx->builder, is_eq, tmp, cmp, "cmp"))) {
  276. aot_set_last_error("llvm build select fail.");
  277. return NULL;
  278. }
  279. if (!(ret = LLVMBuildSelect(comp_ctx->builder, is_nan, nan, cmp,
  280. is_min ? "min" : "max"))) {
  281. aot_set_last_error("llvm build select fail.");
  282. return NULL;
  283. }
  284. return ret;
  285. fail:
  286. return NULL;
  287. }
  288. typedef enum BitCountType {
  289. CLZ32 = 0,
  290. CLZ64,
  291. CTZ32,
  292. CTZ64,
  293. POP_CNT32,
  294. POP_CNT64
  295. } BitCountType;
  296. /* clang-format off */
  297. static char *bit_cnt_llvm_intrinsic[] = {
  298. "llvm.ctlz.i32",
  299. "llvm.ctlz.i64",
  300. "llvm.cttz.i32",
  301. "llvm.cttz.i64",
  302. "llvm.ctpop.i32",
  303. "llvm.ctpop.i64",
  304. };
  305. /* clang-format on */
  306. static bool
  307. aot_compile_int_bit_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  308. BitCountType type, bool is_i32)
  309. {
  310. LLVMValueRef zero_undef;
  311. LLVMTypeRef ret_type, param_types[2];
  312. param_types[0] = ret_type = is_i32 ? I32_TYPE : I64_TYPE;
  313. param_types[1] = LLVMInt1TypeInContext(comp_ctx->context);
  314. zero_undef = LLVMConstInt(param_types[1], false, true);
  315. CHECK_LLVM_CONST(zero_undef);
  316. /* Call the LLVM intrinsic function */
  317. if (type < POP_CNT32)
  318. DEF_INT_UNARY_OP(aot_call_llvm_intrinsic(
  319. comp_ctx, func_ctx, bit_cnt_llvm_intrinsic[type],
  320. ret_type, param_types, 2, operand, zero_undef),
  321. NULL);
  322. else
  323. DEF_INT_UNARY_OP(aot_call_llvm_intrinsic(
  324. comp_ctx, func_ctx, bit_cnt_llvm_intrinsic[type],
  325. ret_type, param_types, 1, operand),
  326. NULL);
  327. return true;
  328. fail:
  329. return false;
  330. }
  331. static bool
  332. compile_rems(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  333. LLVMValueRef left, LLVMValueRef right, LLVMValueRef overflow_cond,
  334. bool is_i32)
  335. {
  336. LLVMValueRef phi, no_overflow_value, zero = is_i32 ? I32_ZERO : I64_ZERO;
  337. LLVMBasicBlockRef block_curr, no_overflow_block, rems_end_block;
  338. LLVMTypeRef param_types[2];
  339. param_types[1] = param_types[0] = is_i32 ? I32_TYPE : I64_TYPE;
  340. block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  341. /* Add 2 blocks: no_overflow_block and rems_end block */
  342. ADD_BASIC_BLOCK(rems_end_block, "rems_end");
  343. ADD_BASIC_BLOCK(no_overflow_block, "rems_no_overflow");
  344. /* Create condition br */
  345. if (!LLVMBuildCondBr(comp_ctx->builder, overflow_cond, rems_end_block,
  346. no_overflow_block)) {
  347. aot_set_last_error("llvm build cond br failed.");
  348. return false;
  349. }
  350. /* Translate no_overflow_block */
  351. LLVMPositionBuilderAtEnd(comp_ctx->builder, no_overflow_block);
  352. LLVM_BUILD_OP_OR_INTRINSIC(SRem, left, right, no_overflow_value,
  353. is_i32 ? "i32.rem_s" : "i64.rem_s", "rem_s",
  354. false);
  355. /* Jump to rems_end block */
  356. if (!LLVMBuildBr(comp_ctx->builder, rems_end_block)) {
  357. aot_set_last_error("llvm build br failed.");
  358. return false;
  359. }
  360. /* Translate rems_end_block */
  361. LLVMPositionBuilderAtEnd(comp_ctx->builder, rems_end_block);
  362. /* Create result phi */
  363. if (!(phi = LLVMBuildPhi(comp_ctx->builder, is_i32 ? I32_TYPE : I64_TYPE,
  364. "rems_result_phi"))) {
  365. aot_set_last_error("llvm build phi failed.");
  366. return false;
  367. }
  368. /* Add phi incoming values */
  369. LLVMAddIncoming(phi, &no_overflow_value, &no_overflow_block, 1);
  370. LLVMAddIncoming(phi, &zero, &block_curr, 1);
  371. if (is_i32)
  372. PUSH_I32(phi);
  373. else
  374. PUSH_I64(phi);
  375. return true;
  376. fail:
  377. return false;
  378. }
  379. static bool
  380. compile_int_div(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  381. IntArithmetic arith_op, bool is_i32, uint8 **p_frame_ip)
  382. {
  383. LLVMValueRef left, right, cmp_div_zero, overflow, res;
  384. LLVMBasicBlockRef check_div_zero_succ, check_overflow_succ;
  385. LLVMTypeRef param_types[2];
  386. const char *intrinsic = NULL;
  387. param_types[1] = param_types[0] = is_i32 ? I32_TYPE : I64_TYPE;
  388. bh_assert(arith_op == INT_DIV_S || arith_op == INT_DIV_U
  389. || arith_op == INT_REM_S || arith_op == INT_REM_U);
  390. POP_INT(right);
  391. POP_INT(left);
  392. if (LLVMIsUndef(right) || LLVMIsUndef(left)
  393. #if LLVM_VERSION_NUMBER >= 12
  394. || LLVMIsPoison(right) || LLVMIsPoison(left)
  395. #endif
  396. ) {
  397. if (!(aot_emit_exception(comp_ctx, func_ctx, EXCE_INTEGER_OVERFLOW,
  398. false, NULL, NULL))) {
  399. goto fail;
  400. }
  401. return aot_handle_next_reachable_block(comp_ctx, func_ctx, p_frame_ip);
  402. }
  403. if (LLVMIsEfficientConstInt(right)) {
  404. int64 right_val = (int64)LLVMConstIntGetSExtValue(right);
  405. switch (right_val) {
  406. case 0:
  407. /* Directly throw exception if divided by zero */
  408. if (!(aot_emit_exception(comp_ctx, func_ctx,
  409. EXCE_INTEGER_DIVIDE_BY_ZERO, false,
  410. NULL, NULL)))
  411. goto fail;
  412. return aot_handle_next_reachable_block(comp_ctx, func_ctx,
  413. p_frame_ip);
  414. case 1:
  415. if (arith_op == INT_DIV_S || arith_op == INT_DIV_U)
  416. PUSH_INT(left);
  417. else
  418. PUSH_INT(is_i32 ? I32_ZERO : I64_ZERO);
  419. return true;
  420. case -1:
  421. if (arith_op == INT_DIV_S) {
  422. LLVM_BUILD_ICMP(LLVMIntEQ, left, is_i32 ? I32_MIN : I64_MIN,
  423. overflow, "overflow");
  424. ADD_BASIC_BLOCK(check_overflow_succ,
  425. "check_overflow_success");
  426. /* Throw conditional exception if overflow */
  427. if (!(aot_emit_exception(comp_ctx, func_ctx,
  428. EXCE_INTEGER_OVERFLOW, true,
  429. overflow, check_overflow_succ)))
  430. goto fail;
  431. /* Push -(left) to stack */
  432. if (!(res = LLVMBuildNeg(comp_ctx->builder, left, "neg"))) {
  433. aot_set_last_error("llvm build neg fail.");
  434. return false;
  435. }
  436. PUSH_INT(res);
  437. return true;
  438. }
  439. else if (arith_op == INT_REM_S) {
  440. PUSH_INT(is_i32 ? I32_ZERO : I64_ZERO);
  441. return true;
  442. }
  443. else {
  444. /* fall to default */
  445. goto handle_default;
  446. }
  447. handle_default:
  448. default:
  449. /* Build div */
  450. switch (arith_op) {
  451. case INT_DIV_S:
  452. LLVM_BUILD_OP_OR_INTRINSIC(
  453. SDiv, left, right, res,
  454. is_i32 ? "i32.div_s" : "i64.div_s", "div_s", false);
  455. break;
  456. case INT_DIV_U:
  457. LLVM_BUILD_OP_OR_INTRINSIC(
  458. UDiv, left, right, res,
  459. is_i32 ? "i32.div_u" : "i64.div_u", "div_u", false);
  460. break;
  461. case INT_REM_S:
  462. LLVM_BUILD_OP_OR_INTRINSIC(
  463. SRem, left, right, res,
  464. is_i32 ? "i32.rem_s" : "i64.rem_s", "rem_s", false);
  465. break;
  466. case INT_REM_U:
  467. LLVM_BUILD_OP_OR_INTRINSIC(
  468. URem, left, right, res,
  469. is_i32 ? "i32.rem_u" : "i64.rem_u", "rem_u", false);
  470. break;
  471. default:
  472. bh_assert(0);
  473. return false;
  474. }
  475. PUSH_INT(res);
  476. return true;
  477. }
  478. }
  479. else {
  480. /* Check divided by zero */
  481. LLVM_BUILD_ICMP(LLVMIntEQ, right, is_i32 ? I32_ZERO : I64_ZERO,
  482. cmp_div_zero, "cmp_div_zero");
  483. ADD_BASIC_BLOCK(check_div_zero_succ, "check_div_zero_success");
  484. /* Throw conditional exception if divided by zero */
  485. if (!(aot_emit_exception(comp_ctx, func_ctx,
  486. EXCE_INTEGER_DIVIDE_BY_ZERO, true,
  487. cmp_div_zero, check_div_zero_succ)))
  488. goto fail;
  489. switch (arith_op) {
  490. case INT_DIV_S:
  491. /* Check integer overflow */
  492. if (is_i32)
  493. CHECK_INT_OVERFLOW(I32);
  494. else
  495. CHECK_INT_OVERFLOW(I64);
  496. ADD_BASIC_BLOCK(check_overflow_succ, "check_overflow_success");
  497. /* Throw conditional exception if integer overflow */
  498. if (!(aot_emit_exception(comp_ctx, func_ctx,
  499. EXCE_INTEGER_OVERFLOW, true, overflow,
  500. check_overflow_succ)))
  501. goto fail;
  502. LLVM_BUILD_OP_OR_INTRINSIC(SDiv, left, right, res,
  503. is_i32 ? "i32.div_s" : "i64.div_s",
  504. "div_s", false);
  505. PUSH_INT(res);
  506. return true;
  507. case INT_DIV_U:
  508. intrinsic = is_i32 ? "i32.div_u" : "i64.div_u";
  509. if (comp_ctx->disable_llvm_intrinsics
  510. && aot_intrinsic_check_capability(comp_ctx, intrinsic)) {
  511. res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic,
  512. param_types[0], param_types,
  513. 2, left, right);
  514. }
  515. else {
  516. LLVM_BUILD_OP(UDiv, left, right, res, "div_u", false);
  517. }
  518. PUSH_INT(res);
  519. return true;
  520. case INT_REM_S:
  521. /* Webassembly spec requires it return 0 */
  522. if (is_i32)
  523. CHECK_INT_OVERFLOW(I32);
  524. else
  525. CHECK_INT_OVERFLOW(I64);
  526. return compile_rems(comp_ctx, func_ctx, left, right, overflow,
  527. is_i32);
  528. case INT_REM_U:
  529. LLVM_BUILD_OP_OR_INTRINSIC(URem, left, right, res,
  530. is_i32 ? "i32.rem_u" : "i64.rem_u",
  531. "rem_u", false);
  532. PUSH_INT(res);
  533. return true;
  534. default:
  535. bh_assert(0);
  536. return false;
  537. }
  538. }
  539. fail:
  540. return false;
  541. }
  542. static LLVMValueRef
  543. compile_int_add(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
  544. bool is_i32)
  545. {
  546. /* If one of the operands is 0, just return the other */
  547. if (IS_CONST_ZERO(left))
  548. return right;
  549. if (IS_CONST_ZERO(right))
  550. return left;
  551. /* Build add */
  552. return LLVMBuildAdd(comp_ctx->builder, left, right, "add");
  553. }
  554. static LLVMValueRef
  555. compile_int_sub(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
  556. bool is_i32)
  557. {
  558. /* If the right operand is 0, just return the left */
  559. if (IS_CONST_ZERO(right))
  560. return left;
  561. /* Build sub */
  562. return LLVMBuildSub(comp_ctx->builder, left, right, "sub");
  563. }
  564. static LLVMValueRef
  565. compile_int_mul(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
  566. bool is_i32)
  567. {
  568. /* If one of the operands is 0, just return constant 0 */
  569. if (IS_CONST_ZERO(left) || IS_CONST_ZERO(right))
  570. return is_i32 ? I32_ZERO : I64_ZERO;
  571. /* Build mul */
  572. return LLVMBuildMul(comp_ctx->builder, left, right, "mul");
  573. }
  574. static bool
  575. compile_op_int_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  576. IntArithmetic arith_op, bool is_i32,
  577. uint8 **p_frame_ip)
  578. {
  579. switch (arith_op) {
  580. case INT_ADD:
  581. DEF_INT_BINARY_OP(compile_int_add(comp_ctx, left, right, is_i32),
  582. "compile int add fail.");
  583. return true;
  584. case INT_SUB:
  585. DEF_INT_BINARY_OP(compile_int_sub(comp_ctx, left, right, is_i32),
  586. "compile int sub fail.");
  587. return true;
  588. case INT_MUL:
  589. DEF_INT_BINARY_OP(compile_int_mul(comp_ctx, left, right, is_i32),
  590. "compile int mul fail.");
  591. return true;
  592. case INT_DIV_S:
  593. case INT_DIV_U:
  594. case INT_REM_S:
  595. case INT_REM_U:
  596. return compile_int_div(comp_ctx, func_ctx, arith_op, is_i32,
  597. p_frame_ip);
  598. default:
  599. bh_assert(0);
  600. return false;
  601. }
  602. fail:
  603. return false;
  604. }
  605. static bool
  606. compile_op_int_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  607. IntBitwise bitwise_op, bool is_i32)
  608. {
  609. switch (bitwise_op) {
  610. case INT_AND:
  611. DEF_INT_BINARY_OP(
  612. LLVMBuildAnd(comp_ctx->builder, left, right, "and"),
  613. "llvm build and fail.");
  614. return true;
  615. case INT_OR:
  616. DEF_INT_BINARY_OP(LLVMBuildOr(comp_ctx->builder, left, right, "or"),
  617. "llvm build or fail.");
  618. return true;
  619. case INT_XOR:
  620. DEF_INT_BINARY_OP(
  621. LLVMBuildXor(comp_ctx->builder, left, right, "xor"),
  622. "llvm build xor fail.");
  623. return true;
  624. default:
  625. bh_assert(0);
  626. return false;
  627. }
  628. fail:
  629. return false;
  630. }
  631. static LLVMValueRef
  632. compile_int_shl(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
  633. bool is_i32)
  634. {
  635. LLVMValueRef res;
  636. SHIFT_COUNT_MASK;
  637. /* Build shl */
  638. LLVM_BUILD_OP(Shl, left, right, res, "shl", NULL);
  639. return res;
  640. }
  641. static LLVMValueRef
  642. compile_int_shr_s(AOTCompContext *comp_ctx, LLVMValueRef left,
  643. LLVMValueRef right, bool is_i32)
  644. {
  645. LLVMValueRef res;
  646. SHIFT_COUNT_MASK;
  647. /* Build shl */
  648. LLVM_BUILD_OP(AShr, left, right, res, "shr_s", NULL);
  649. return res;
  650. }
  651. static LLVMValueRef
  652. compile_int_shr_u(AOTCompContext *comp_ctx, LLVMValueRef left,
  653. LLVMValueRef right, bool is_i32)
  654. {
  655. LLVMValueRef res;
  656. SHIFT_COUNT_MASK;
  657. /* Build shl */
  658. LLVM_BUILD_OP(LShr, left, right, res, "shr_u", NULL);
  659. return res;
  660. }
  661. static LLVMValueRef
  662. compile_int_rot(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
  663. bool is_rotl, bool is_i32)
  664. {
  665. LLVMValueRef bits_minus_shift_count, res, tmp_l, tmp_r;
  666. char *name = is_rotl ? "rotl" : "rotr";
  667. SHIFT_COUNT_MASK;
  668. /* rotl/rotr with 0 */
  669. if (IS_CONST_ZERO(right))
  670. return left;
  671. /* Calculate (bits - shift_count) */
  672. LLVM_BUILD_OP(Sub, is_i32 ? I32_32 : I64_64, right, bits_minus_shift_count,
  673. "bits_minus_shift_count", NULL);
  674. /* Calculate (bits - shift_count) & mask */
  675. bits_minus_shift_count =
  676. LLVMBuildAnd(comp_ctx->builder, bits_minus_shift_count,
  677. is_i32 ? I32_31 : I64_63, "bits_minus_shift_count_and");
  678. if (!bits_minus_shift_count) {
  679. aot_set_last_error("llvm build and failed.");
  680. return NULL;
  681. }
  682. if (is_rotl) {
  683. /* (left << count) | (left >> ((BITS - count) & mask)) */
  684. LLVM_BUILD_OP(Shl, left, right, tmp_l, "tmp_l", NULL);
  685. LLVM_BUILD_OP(LShr, left, bits_minus_shift_count, tmp_r, "tmp_r", NULL);
  686. }
  687. else {
  688. /* (left >> count) | (left << ((BITS - count) & mask)) */
  689. LLVM_BUILD_OP(LShr, left, right, tmp_l, "tmp_l", NULL);
  690. LLVM_BUILD_OP(Shl, left, bits_minus_shift_count, tmp_r, "tmp_r", NULL);
  691. }
  692. LLVM_BUILD_OP(Or, tmp_l, tmp_r, res, name, NULL);
  693. return res;
  694. }
  695. static bool
  696. compile_op_int_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  697. IntShift shift_op, bool is_i32)
  698. {
  699. switch (shift_op) {
  700. case INT_SHL:
  701. DEF_INT_BINARY_OP(compile_int_shl(comp_ctx, left, right, is_i32),
  702. NULL);
  703. return true;
  704. case INT_SHR_S:
  705. DEF_INT_BINARY_OP(compile_int_shr_s(comp_ctx, left, right, is_i32),
  706. NULL);
  707. return true;
  708. case INT_SHR_U:
  709. DEF_INT_BINARY_OP(compile_int_shr_u(comp_ctx, left, right, is_i32),
  710. NULL);
  711. return true;
  712. case INT_ROTL:
  713. DEF_INT_BINARY_OP(
  714. compile_int_rot(comp_ctx, left, right, true, is_i32), NULL);
  715. return true;
  716. case INT_ROTR:
  717. DEF_INT_BINARY_OP(
  718. compile_int_rot(comp_ctx, left, right, false, is_i32), NULL);
  719. return true;
  720. default:
  721. bh_assert(0);
  722. return false;
  723. }
  724. fail:
  725. return false;
  726. }
  727. static bool
  728. is_target_arm(AOTCompContext *comp_ctx)
  729. {
  730. return !strncmp(comp_ctx->target_arch, "arm", 3)
  731. || !strncmp(comp_ctx->target_arch, "aarch64", 7)
  732. || !strncmp(comp_ctx->target_arch, "thumb", 5);
  733. }
  734. static bool
  735. is_target_x86(AOTCompContext *comp_ctx)
  736. {
  737. return !strncmp(comp_ctx->target_arch, "x86_64", 6)
  738. || !strncmp(comp_ctx->target_arch, "i386", 4);
  739. }
  740. static bool
  741. is_target_xtensa(AOTCompContext *comp_ctx)
  742. {
  743. return !strncmp(comp_ctx->target_arch, "xtensa", 6);
  744. }
  745. static bool
  746. is_target_mips(AOTCompContext *comp_ctx)
  747. {
  748. return !strncmp(comp_ctx->target_arch, "mips", 4);
  749. }
  750. static bool
  751. is_target_riscv(AOTCompContext *comp_ctx)
  752. {
  753. return !strncmp(comp_ctx->target_arch, "riscv", 5);
  754. }
  755. static bool
  756. is_targeting_soft_float(AOTCompContext *comp_ctx, bool is_f32)
  757. {
  758. bool ret = false;
  759. char *feature_string;
  760. if (!(feature_string =
  761. LLVMGetTargetMachineFeatureString(comp_ctx->target_machine))) {
  762. aot_set_last_error("llvm get target machine feature string fail.");
  763. return false;
  764. }
  765. /* Note:
  766. * LLVM CodeGen uses FPU Coprocessor registers by default,
  767. * so user must specify '--cpu-features=+soft-float' to wamrc if the target
  768. * doesn't have or enable FPU on arm, x86 or mips. */
  769. if (is_target_arm(comp_ctx) || is_target_x86(comp_ctx)
  770. || is_target_mips(comp_ctx)) {
  771. ret = strstr(feature_string, "+soft-float") ? true : false;
  772. }
  773. else if (is_target_xtensa(comp_ctx)) {
  774. /* Note:
  775. * 1. The Floating-Point Coprocessor Option of xtensa only support
  776. * single-precision floating-point operations, so must use soft-float
  777. * for f64(i.e. double).
  778. * 2. LLVM CodeGen uses Floating-Point Coprocessor registers by default,
  779. * so user must specify '--cpu-features=-fp' to wamrc if the target
  780. * doesn't have or enable Floating-Point Coprocessor Option on xtensa.
  781. */
  782. if (comp_ctx->disable_llvm_intrinsics)
  783. ret = false;
  784. else
  785. ret = (!is_f32 || strstr(feature_string, "-fp")) ? true : false;
  786. }
  787. else if (is_target_riscv(comp_ctx)) {
  788. /*
  789. * Note: Use builtin intrinsics since hardware float operation
  790. * will cause rodata relocation, this will try to use hardware
  791. * float unit (by return false) but handled by software finally
  792. */
  793. if (comp_ctx->disable_llvm_intrinsics)
  794. ret = false;
  795. else
  796. ret = !strstr(feature_string, "+d") ? true : false;
  797. }
  798. else {
  799. ret = true;
  800. }
  801. LLVMDisposeMessage(feature_string);
  802. return ret;
  803. }
  804. static bool
  805. compile_op_float_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  806. FloatArithmetic arith_op, bool is_f32)
  807. {
  808. switch (arith_op) {
  809. case FLOAT_ADD:
  810. if (is_targeting_soft_float(comp_ctx, is_f32))
  811. DEF_FP_BINARY_OP(
  812. LLVMBuildFAdd(comp_ctx->builder, left, right, "fadd"),
  813. "llvm build fadd fail.");
  814. else
  815. DEF_FP_BINARY_OP(
  816. call_llvm_float_experimental_constrained_intrinsic(
  817. comp_ctx, func_ctx, is_f32,
  818. (is_f32 ? "llvm.experimental.constrained.fadd.f32"
  819. : "llvm.experimental.constrained.fadd.f64"),
  820. left, right, comp_ctx->fp_rounding_mode,
  821. comp_ctx->fp_exception_behavior),
  822. NULL);
  823. return true;
  824. case FLOAT_SUB:
  825. if (is_targeting_soft_float(comp_ctx, is_f32))
  826. DEF_FP_BINARY_OP(
  827. LLVMBuildFSub(comp_ctx->builder, left, right, "fsub"),
  828. "llvm build fsub fail.");
  829. else
  830. DEF_FP_BINARY_OP(
  831. call_llvm_float_experimental_constrained_intrinsic(
  832. comp_ctx, func_ctx, is_f32,
  833. (is_f32 ? "llvm.experimental.constrained.fsub.f32"
  834. : "llvm.experimental.constrained.fsub.f64"),
  835. left, right, comp_ctx->fp_rounding_mode,
  836. comp_ctx->fp_exception_behavior),
  837. NULL);
  838. return true;
  839. case FLOAT_MUL:
  840. if (is_targeting_soft_float(comp_ctx, is_f32))
  841. DEF_FP_BINARY_OP(
  842. LLVMBuildFMul(comp_ctx->builder, left, right, "fmul"),
  843. "llvm build fmul fail.");
  844. else
  845. DEF_FP_BINARY_OP(
  846. call_llvm_float_experimental_constrained_intrinsic(
  847. comp_ctx, func_ctx, is_f32,
  848. (is_f32 ? "llvm.experimental.constrained.fmul.f32"
  849. : "llvm.experimental.constrained.fmul.f64"),
  850. left, right, comp_ctx->fp_rounding_mode,
  851. comp_ctx->fp_exception_behavior),
  852. NULL);
  853. return true;
  854. case FLOAT_DIV:
  855. if (is_targeting_soft_float(comp_ctx, is_f32))
  856. DEF_FP_BINARY_OP(
  857. LLVMBuildFDiv(comp_ctx->builder, left, right, "fdiv"),
  858. "llvm build fdiv fail.");
  859. else
  860. DEF_FP_BINARY_OP(
  861. call_llvm_float_experimental_constrained_intrinsic(
  862. comp_ctx, func_ctx, is_f32,
  863. (is_f32 ? "llvm.experimental.constrained.fdiv.f32"
  864. : "llvm.experimental.constrained.fdiv.f64"),
  865. left, right, comp_ctx->fp_rounding_mode,
  866. comp_ctx->fp_exception_behavior),
  867. NULL);
  868. return true;
  869. case FLOAT_MIN:
  870. DEF_FP_BINARY_OP(compile_op_float_min_max(
  871. comp_ctx, func_ctx, is_f32, left, right, true),
  872. NULL);
  873. return true;
  874. case FLOAT_MAX:
  875. DEF_FP_BINARY_OP(compile_op_float_min_max(comp_ctx, func_ctx,
  876. is_f32, left, right,
  877. false),
  878. NULL);
  879. return true;
  880. default:
  881. bh_assert(0);
  882. return false;
  883. }
  884. fail:
  885. return false;
  886. }
  887. static LLVMValueRef
  888. call_llvm_float_math_intrinsic(AOTCompContext *comp_ctx,
  889. AOTFuncContext *func_ctx, bool is_f32,
  890. const char *intrinsic, ...)
  891. {
  892. va_list param_value_list;
  893. LLVMValueRef ret;
  894. LLVMTypeRef param_type, ret_type = is_f32 ? F32_TYPE : F64_TYPE;
  895. param_type = ret_type;
  896. va_start(param_value_list, intrinsic);
  897. ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
  898. &param_type, 1, param_value_list);
  899. va_end(param_value_list);
  900. return ret;
  901. }
  902. static bool
  903. compile_op_float_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  904. FloatMath math_op, bool is_f32)
  905. {
  906. switch (math_op) {
  907. case FLOAT_ABS:
  908. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  909. comp_ctx, func_ctx, is_f32,
  910. is_f32 ? "llvm.fabs.f32" : "llvm.fabs.f64",
  911. operand),
  912. NULL);
  913. return true;
  914. case FLOAT_NEG:
  915. DEF_FP_UNARY_OP(LLVMBuildFNeg(comp_ctx->builder, operand, "fneg"),
  916. "llvm build fneg fail.");
  917. return true;
  918. case FLOAT_CEIL:
  919. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  920. comp_ctx, func_ctx, is_f32,
  921. is_f32 ? "llvm.ceil.f32" : "llvm.ceil.f64",
  922. operand),
  923. NULL);
  924. return true;
  925. case FLOAT_FLOOR:
  926. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  927. comp_ctx, func_ctx, is_f32,
  928. is_f32 ? "llvm.floor.f32" : "llvm.floor.f64",
  929. operand),
  930. NULL);
  931. return true;
  932. case FLOAT_TRUNC:
  933. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  934. comp_ctx, func_ctx, is_f32,
  935. is_f32 ? "llvm.trunc.f32" : "llvm.trunc.f64",
  936. operand),
  937. NULL);
  938. return true;
  939. case FLOAT_NEAREST:
  940. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  941. comp_ctx, func_ctx, is_f32,
  942. is_f32 ? "llvm.rint.f32" : "llvm.rint.f64",
  943. operand),
  944. NULL);
  945. return true;
  946. case FLOAT_SQRT:
  947. if (is_targeting_soft_float(comp_ctx, is_f32)
  948. || comp_ctx->disable_llvm_intrinsics)
  949. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  950. comp_ctx, func_ctx, is_f32,
  951. is_f32 ? "llvm.sqrt.f32" : "llvm.sqrt.f64",
  952. operand),
  953. NULL);
  954. else
  955. DEF_FP_UNARY_OP(
  956. call_llvm_libm_experimental_constrained_intrinsic(
  957. comp_ctx, func_ctx, is_f32,
  958. (is_f32 ? "llvm.experimental.constrained.sqrt.f32"
  959. : "llvm.experimental.constrained.sqrt.f64"),
  960. operand, comp_ctx->fp_rounding_mode,
  961. comp_ctx->fp_exception_behavior),
  962. NULL);
  963. return true;
  964. default:
  965. bh_assert(0);
  966. return false;
  967. }
  968. return true;
  969. fail:
  970. return false;
  971. }
  972. static bool
  973. compile_float_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  974. bool is_f32)
  975. {
  976. LLVMTypeRef ret_type, param_types[2];
  977. param_types[0] = param_types[1] = ret_type = is_f32 ? F32_TYPE : F64_TYPE;
  978. DEF_FP_BINARY_OP(aot_call_llvm_intrinsic(
  979. comp_ctx, func_ctx,
  980. is_f32 ? "llvm.copysign.f32" : "llvm.copysign.f64",
  981. ret_type, param_types, 2, left, right),
  982. NULL);
  983. return true;
  984. fail:
  985. return false;
  986. }
  987. bool
  988. aot_compile_op_i32_clz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  989. {
  990. return aot_compile_int_bit_count(comp_ctx, func_ctx, CLZ32, true);
  991. }
  992. bool
  993. aot_compile_op_i32_ctz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  994. {
  995. return aot_compile_int_bit_count(comp_ctx, func_ctx, CTZ32, true);
  996. }
  997. bool
  998. aot_compile_op_i32_popcnt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  999. {
  1000. return aot_compile_int_bit_count(comp_ctx, func_ctx, POP_CNT32, true);
  1001. }
  1002. bool
  1003. aot_compile_op_i64_clz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1004. {
  1005. return aot_compile_int_bit_count(comp_ctx, func_ctx, CLZ64, false);
  1006. }
  1007. bool
  1008. aot_compile_op_i64_ctz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1009. {
  1010. return aot_compile_int_bit_count(comp_ctx, func_ctx, CTZ64, false);
  1011. }
  1012. bool
  1013. aot_compile_op_i64_popcnt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1014. {
  1015. return aot_compile_int_bit_count(comp_ctx, func_ctx, POP_CNT64, false);
  1016. }
  1017. bool
  1018. aot_compile_op_i32_arithmetic(AOTCompContext *comp_ctx,
  1019. AOTFuncContext *func_ctx, IntArithmetic arith_op,
  1020. uint8 **p_frame_ip)
  1021. {
  1022. return compile_op_int_arithmetic(comp_ctx, func_ctx, arith_op, true,
  1023. p_frame_ip);
  1024. }
  1025. bool
  1026. aot_compile_op_i64_arithmetic(AOTCompContext *comp_ctx,
  1027. AOTFuncContext *func_ctx, IntArithmetic arith_op,
  1028. uint8 **p_frame_ip)
  1029. {
  1030. return compile_op_int_arithmetic(comp_ctx, func_ctx, arith_op, false,
  1031. p_frame_ip);
  1032. }
  1033. bool
  1034. aot_compile_op_i32_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1035. IntBitwise bitwise_op)
  1036. {
  1037. return compile_op_int_bitwise(comp_ctx, func_ctx, bitwise_op, true);
  1038. }
  1039. bool
  1040. aot_compile_op_i64_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1041. IntBitwise bitwise_op)
  1042. {
  1043. return compile_op_int_bitwise(comp_ctx, func_ctx, bitwise_op, false);
  1044. }
  1045. bool
  1046. aot_compile_op_i32_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1047. IntShift shift_op)
  1048. {
  1049. return compile_op_int_shift(comp_ctx, func_ctx, shift_op, true);
  1050. }
  1051. bool
  1052. aot_compile_op_i64_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1053. IntShift shift_op)
  1054. {
  1055. return compile_op_int_shift(comp_ctx, func_ctx, shift_op, false);
  1056. }
  1057. bool
  1058. aot_compile_op_f32_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1059. FloatMath math_op)
  1060. {
  1061. return compile_op_float_math(comp_ctx, func_ctx, math_op, true);
  1062. }
  1063. bool
  1064. aot_compile_op_f64_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1065. FloatMath math_op)
  1066. {
  1067. return compile_op_float_math(comp_ctx, func_ctx, math_op, false);
  1068. }
  1069. bool
  1070. aot_compile_op_f32_arithmetic(AOTCompContext *comp_ctx,
  1071. AOTFuncContext *func_ctx,
  1072. FloatArithmetic arith_op)
  1073. {
  1074. return compile_op_float_arithmetic(comp_ctx, func_ctx, arith_op, true);
  1075. }
  1076. bool
  1077. aot_compile_op_f64_arithmetic(AOTCompContext *comp_ctx,
  1078. AOTFuncContext *func_ctx,
  1079. FloatArithmetic arith_op)
  1080. {
  1081. return compile_op_float_arithmetic(comp_ctx, func_ctx, arith_op, false);
  1082. }
  1083. bool
  1084. aot_compile_op_f32_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1085. {
  1086. return compile_float_copysign(comp_ctx, func_ctx, true);
  1087. }
  1088. bool
  1089. aot_compile_op_f64_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1090. {
  1091. return compile_float_copysign(comp_ctx, func_ctx, false);
  1092. }