aot_emit_numberic.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253
  1. /*
  2. * Copyright (C) 2020 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_numberic.h"
  6. #include "aot_emit_exception.h"
  7. #include "aot_emit_control.h"
  8. #include "../aot/aot_runtime.h"
  9. #include "../aot/aot_intrinsic.h"
  10. #include <stdarg.h>
  11. #define LLVM_BUILD_ICMP(op, left, right, res, name) \
  12. do { \
  13. if (!(res = \
  14. LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
  15. aot_set_last_error("llvm build " name " fail."); \
  16. return false; \
  17. } \
  18. } while (0)
  19. #define LLVM_BUILD_OP(Op, left, right, res, name, err_ret) \
  20. do { \
  21. if (!(res = LLVMBuild##Op(comp_ctx->builder, left, right, name))) { \
  22. aot_set_last_error("llvm build " #name " fail."); \
  23. return err_ret; \
  24. } \
  25. } while (0)
  26. #define LLVM_BUILD_OP_OR_INTRINSIC(Op, left, right, res, intrinsic, name, \
  27. err_ret) \
  28. do { \
  29. if (comp_ctx->disable_llvm_intrinsics \
  30. && aot_intrinsic_check_capability(comp_ctx, intrinsic)) { \
  31. res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, \
  32. param_types[0], param_types, 2, \
  33. left, right); \
  34. } \
  35. else { \
  36. LLVM_BUILD_OP(Op, left, right, res, name, false); \
  37. } \
  38. } while (0)
  39. #define ADD_BASIC_BLOCK(block, name) \
  40. do { \
  41. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  42. func_ctx->func, name))) { \
  43. aot_set_last_error("llvm add basic block failed."); \
  44. goto fail; \
  45. } \
  46. \
  47. LLVMMoveBasicBlockAfter(block, LLVMGetInsertBlock(comp_ctx->builder)); \
  48. } while (0)
  49. #if LLVM_VERSION_NUMBER >= 12
  50. #define IS_CONST_ZERO(val) \
  51. (LLVMIsEfficientConstInt(val) \
  52. && ((is_i32 && (int32)LLVMConstIntGetZExtValue(val) == 0) \
  53. || (!is_i32 && (int64)LLVMConstIntGetSExtValue(val) == 0)))
  54. #else
  55. #define IS_CONST_ZERO(val) \
  56. (LLVMIsEfficientConstInt(val) \
  57. && ((is_i32 && (int32)LLVMConstIntGetZExtValue(val) == 0) \
  58. || (!is_i32 && (int64)LLVMConstIntGetSExtValue(val) == 0)))
  59. #endif
  60. #define CHECK_INT_OVERFLOW(type) \
  61. do { \
  62. LLVMValueRef cmp_min_int, cmp_neg_one; \
  63. LLVM_BUILD_ICMP(LLVMIntEQ, left, type##_MIN, cmp_min_int, \
  64. "cmp_min_int"); \
  65. LLVM_BUILD_ICMP(LLVMIntEQ, right, type##_NEG_ONE, cmp_neg_one, \
  66. "cmp_neg_one"); \
  67. LLVM_BUILD_OP(And, cmp_min_int, cmp_neg_one, overflow, "overflow", \
  68. false); \
  69. } while (0)
  70. #define PUSH_INT(v) \
  71. do { \
  72. if (is_i32) \
  73. PUSH_I32(v); \
  74. else \
  75. PUSH_I64(v); \
  76. } while (0)
  77. #define POP_INT(v) \
  78. do { \
  79. if (is_i32) \
  80. POP_I32(v); \
  81. else \
  82. POP_I64(v); \
  83. } while (0)
  84. #define PUSH_FLOAT(v) \
  85. do { \
  86. if (is_f32) \
  87. PUSH_F32(v); \
  88. else \
  89. PUSH_F64(v); \
  90. } while (0)
  91. #define POP_FLOAT(v) \
  92. do { \
  93. if (is_f32) \
  94. POP_F32(v); \
  95. else \
  96. POP_F64(v); \
  97. } while (0)
  98. #define DEF_INT_UNARY_OP(op, err) \
  99. do { \
  100. LLVMValueRef res, operand; \
  101. POP_INT(operand); \
  102. if (!(res = op)) { \
  103. if (err) \
  104. aot_set_last_error(err); \
  105. return false; \
  106. } \
  107. PUSH_INT(res); \
  108. } while (0)
  109. #define DEF_INT_BINARY_OP(op, err) \
  110. do { \
  111. LLVMValueRef res, left, right; \
  112. POP_INT(right); \
  113. POP_INT(left); \
  114. if (!(res = op)) { \
  115. if (err) \
  116. aot_set_last_error(err); \
  117. return false; \
  118. } \
  119. PUSH_INT(res); \
  120. } while (0)
  121. #define DEF_FP_UNARY_OP(op, err) \
  122. do { \
  123. LLVMValueRef res, operand; \
  124. POP_FLOAT(operand); \
  125. if (!(res = op)) { \
  126. if (err) \
  127. aot_set_last_error(err); \
  128. return false; \
  129. } \
  130. PUSH_FLOAT(res); \
  131. } while (0)
  132. #define DEF_FP_BINARY_OP(op, err) \
  133. do { \
  134. LLVMValueRef res, left, right; \
  135. POP_FLOAT(right); \
  136. POP_FLOAT(left); \
  137. if (!(res = op)) { \
  138. if (err) \
  139. aot_set_last_error(err); \
  140. return false; \
  141. } \
  142. PUSH_FLOAT(res); \
  143. } while (0)
  144. #define SHIFT_COUNT_MASK \
  145. do { \
  146. /* LLVM has undefined behavior if shift count is greater than \
  147. * bits count while Webassembly spec requires the shift count \
  148. * be wrapped. \
  149. */ \
  150. LLVMValueRef shift_count_mask, bits_minus_one; \
  151. bits_minus_one = is_i32 ? I32_31 : I64_63; \
  152. LLVM_BUILD_OP(And, right, bits_minus_one, shift_count_mask, \
  153. "shift_count_mask", NULL); \
  154. right = shift_count_mask; \
  155. } while (0)
  156. /* Call llvm constrained floating-point intrinsic */
  157. static LLVMValueRef
  158. call_llvm_float_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
  159. AOTFuncContext *func_ctx,
  160. bool is_f32,
  161. const char *intrinsic, ...)
  162. {
  163. va_list param_value_list;
  164. LLVMValueRef ret;
  165. LLVMTypeRef param_types[4], ret_type = is_f32 ? F32_TYPE : F64_TYPE;
  166. int param_count = (comp_ctx->disable_llvm_intrinsics
  167. && aot_intrinsic_check_capability(comp_ctx, intrinsic))
  168. ? 2
  169. : 4;
  170. param_types[0] = param_types[1] = ret_type;
  171. param_types[2] = param_types[3] = MD_TYPE;
  172. va_start(param_value_list, intrinsic);
  173. ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
  174. param_types, param_count, param_value_list);
  175. va_end(param_value_list);
  176. return ret;
  177. }
  178. /* Call llvm constrained libm-equivalent intrinsic */
  179. static LLVMValueRef
  180. call_llvm_libm_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
  181. AOTFuncContext *func_ctx,
  182. bool is_f32,
  183. const char *intrinsic, ...)
  184. {
  185. va_list param_value_list;
  186. LLVMValueRef ret;
  187. LLVMTypeRef param_types[3], ret_type = is_f32 ? F32_TYPE : F64_TYPE;
  188. param_types[0] = ret_type;
  189. param_types[1] = param_types[2] = MD_TYPE;
  190. va_start(param_value_list, intrinsic);
  191. ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
  192. param_types, 3, param_value_list);
  193. va_end(param_value_list);
  194. return ret;
  195. }
  196. static LLVMValueRef
  197. compile_op_float_min_max(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  198. bool is_f32, LLVMValueRef left, LLVMValueRef right,
  199. bool is_min)
  200. {
  201. LLVMTypeRef float_param_types[2];
  202. LLVMTypeRef param_types[2], ret_type = is_f32 ? F32_TYPE : F64_TYPE,
  203. int_type = is_f32 ? I32_TYPE : I64_TYPE;
  204. LLVMValueRef cmp, is_eq, is_nan, ret, left_int, right_int, tmp,
  205. nan = LLVMConstRealOfString(ret_type, "NaN");
  206. char *intrinsic = is_min ? (is_f32 ? "llvm.minnum.f32" : "llvm.minnum.f64")
  207. : (is_f32 ? "llvm.maxnum.f32" : "llvm.maxnum.f64");
  208. CHECK_LLVM_CONST(nan);
  209. /* Note: param_types is used by LLVM_BUILD_OP_OR_INTRINSIC */
  210. param_types[0] = param_types[1] = int_type;
  211. float_param_types[0] = float_param_types[1] = ret_type;
  212. if (comp_ctx->disable_llvm_intrinsics
  213. && aot_intrinsic_check_capability(comp_ctx,
  214. is_f32 ? "f32_cmp" : "f64_cmp")) {
  215. LLVMTypeRef param_types_intrinsic[3];
  216. LLVMValueRef opcond = LLVMConstInt(I32_TYPE, FLOAT_UNO, true);
  217. param_types_intrinsic[0] = I32_TYPE;
  218. param_types_intrinsic[1] = is_f32 ? F32_TYPE : F64_TYPE;
  219. param_types_intrinsic[2] = param_types_intrinsic[1];
  220. is_nan = aot_call_llvm_intrinsic(
  221. comp_ctx, func_ctx, is_f32 ? "f32_cmp" : "f64_cmp", I32_TYPE,
  222. param_types_intrinsic, 3, opcond, left, right);
  223. opcond = LLVMConstInt(I32_TYPE, FLOAT_EQ, true);
  224. is_eq = aot_call_llvm_intrinsic(
  225. comp_ctx, func_ctx, is_f32 ? "f32_cmp" : "f64_cmp", I32_TYPE,
  226. param_types_intrinsic, 3, opcond, left, right);
  227. if (!is_nan || !is_eq) {
  228. return NULL;
  229. }
  230. if (!(is_nan = LLVMBuildIntCast(comp_ctx->builder, is_nan, INT1_TYPE,
  231. "bit_cast_is_nan"))) {
  232. aot_set_last_error("llvm build is_nan bit cast fail.");
  233. return NULL;
  234. }
  235. if (!(is_eq = LLVMBuildIntCast(comp_ctx->builder, is_eq, INT1_TYPE,
  236. "bit_cast_is_eq"))) {
  237. aot_set_last_error("llvm build is_eq bit cast fail.");
  238. return NULL;
  239. }
  240. }
  241. else if (!(is_nan = LLVMBuildFCmp(comp_ctx->builder, LLVMRealUNO, left,
  242. right, "is_nan"))
  243. || !(is_eq = LLVMBuildFCmp(comp_ctx->builder, LLVMRealOEQ, left,
  244. right, "is_eq"))) {
  245. aot_set_last_error("llvm build fcmp fail.");
  246. return NULL;
  247. }
  248. /* If left and right are equal, they may be zero with different sign.
  249. Webassembly spec assert -0 < +0. So do a bitwise here. */
  250. if (!(left_int =
  251. LLVMBuildBitCast(comp_ctx->builder, left, int_type, "left_int"))
  252. || !(right_int = LLVMBuildBitCast(comp_ctx->builder, right, int_type,
  253. "right_int"))) {
  254. aot_set_last_error("llvm build bitcast fail.");
  255. return NULL;
  256. }
  257. if (is_min)
  258. LLVM_BUILD_OP_OR_INTRINSIC(Or, left_int, right_int, tmp,
  259. is_f32 ? "i32.or" : "i64.or", "tmp_int",
  260. false);
  261. else
  262. LLVM_BUILD_OP_OR_INTRINSIC(And, left_int, right_int, tmp,
  263. is_f32 ? "i32.and" : "i64.and", "tmp_int",
  264. false);
  265. if (!(tmp = LLVMBuildBitCast(comp_ctx->builder, tmp, ret_type, "tmp"))) {
  266. aot_set_last_error("llvm build bitcast fail.");
  267. return NULL;
  268. }
  269. if (!(cmp = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, ret_type,
  270. float_param_types, 2, left, right)))
  271. return NULL;
  272. /* The result of XIP intrinsic is 0 or 1, should return it directly */
  273. if (comp_ctx->disable_llvm_intrinsics
  274. && aot_intrinsic_check_capability(comp_ctx,
  275. is_f32 ? "f32_cmp" : "f64_cmp")) {
  276. return cmp;
  277. }
  278. if (!(cmp = LLVMBuildSelect(comp_ctx->builder, is_eq, tmp, cmp, "cmp"))) {
  279. aot_set_last_error("llvm build select fail.");
  280. return NULL;
  281. }
  282. if (!(ret = LLVMBuildSelect(comp_ctx->builder, is_nan, nan, cmp,
  283. is_min ? "min" : "max"))) {
  284. aot_set_last_error("llvm build select fail.");
  285. return NULL;
  286. }
  287. return ret;
  288. fail:
  289. return NULL;
  290. }
  291. typedef enum BitCountType {
  292. CLZ32 = 0,
  293. CLZ64,
  294. CTZ32,
  295. CTZ64,
  296. POP_CNT32,
  297. POP_CNT64
  298. } BitCountType;
  299. /* clang-format off */
  300. static char *bit_cnt_llvm_intrinsic[] = {
  301. "llvm.ctlz.i32",
  302. "llvm.ctlz.i64",
  303. "llvm.cttz.i32",
  304. "llvm.cttz.i64",
  305. "llvm.ctpop.i32",
  306. "llvm.ctpop.i64",
  307. };
  308. /* clang-format on */
  309. static bool
  310. aot_compile_int_bit_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  311. BitCountType type, bool is_i32)
  312. {
  313. LLVMValueRef zero_undef;
  314. LLVMTypeRef ret_type, param_types[2];
  315. param_types[0] = ret_type = is_i32 ? I32_TYPE : I64_TYPE;
  316. param_types[1] = LLVMInt1TypeInContext(comp_ctx->context);
  317. zero_undef = LLVMConstInt(param_types[1], false, true);
  318. CHECK_LLVM_CONST(zero_undef);
  319. /* Call the LLVM intrinsic function */
  320. if (type < POP_CNT32)
  321. DEF_INT_UNARY_OP(aot_call_llvm_intrinsic(
  322. comp_ctx, func_ctx, bit_cnt_llvm_intrinsic[type],
  323. ret_type, param_types, 2, operand, zero_undef),
  324. NULL);
  325. else
  326. DEF_INT_UNARY_OP(aot_call_llvm_intrinsic(
  327. comp_ctx, func_ctx, bit_cnt_llvm_intrinsic[type],
  328. ret_type, param_types, 1, operand),
  329. NULL);
  330. return true;
  331. fail:
  332. return false;
  333. }
  334. static bool
  335. compile_rems(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  336. LLVMValueRef left, LLVMValueRef right, LLVMValueRef overflow_cond,
  337. bool is_i32)
  338. {
  339. LLVMValueRef phi, no_overflow_value, zero = is_i32 ? I32_ZERO : I64_ZERO;
  340. LLVMBasicBlockRef block_curr, no_overflow_block, rems_end_block;
  341. LLVMTypeRef param_types[2];
  342. param_types[1] = param_types[0] = is_i32 ? I32_TYPE : I64_TYPE;
  343. block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  344. /* Add 2 blocks: no_overflow_block and rems_end block */
  345. ADD_BASIC_BLOCK(rems_end_block, "rems_end");
  346. ADD_BASIC_BLOCK(no_overflow_block, "rems_no_overflow");
  347. /* Create condition br */
  348. if (!LLVMBuildCondBr(comp_ctx->builder, overflow_cond, rems_end_block,
  349. no_overflow_block)) {
  350. aot_set_last_error("llvm build cond br failed.");
  351. return false;
  352. }
  353. /* Translate no_overflow_block */
  354. LLVMPositionBuilderAtEnd(comp_ctx->builder, no_overflow_block);
  355. LLVM_BUILD_OP_OR_INTRINSIC(SRem, left, right, no_overflow_value,
  356. is_i32 ? "i32.rem_s" : "i64.rem_s", "rem_s",
  357. false);
  358. /* Jump to rems_end block */
  359. if (!LLVMBuildBr(comp_ctx->builder, rems_end_block)) {
  360. aot_set_last_error("llvm build br failed.");
  361. return false;
  362. }
  363. /* Translate rems_end_block */
  364. LLVMPositionBuilderAtEnd(comp_ctx->builder, rems_end_block);
  365. /* Create result phi */
  366. if (!(phi = LLVMBuildPhi(comp_ctx->builder, is_i32 ? I32_TYPE : I64_TYPE,
  367. "rems_result_phi"))) {
  368. aot_set_last_error("llvm build phi failed.");
  369. return false;
  370. }
  371. /* Add phi incoming values */
  372. LLVMAddIncoming(phi, &no_overflow_value, &no_overflow_block, 1);
  373. LLVMAddIncoming(phi, &zero, &block_curr, 1);
  374. if (is_i32)
  375. PUSH_I32(phi);
  376. else
  377. PUSH_I64(phi);
  378. return true;
  379. fail:
  380. return false;
  381. }
  382. static bool
  383. compile_int_div(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  384. IntArithmetic arith_op, bool is_i32, uint8 **p_frame_ip)
  385. {
  386. LLVMValueRef left, right, cmp_div_zero, overflow, res;
  387. LLVMBasicBlockRef check_div_zero_succ, check_overflow_succ;
  388. LLVMTypeRef param_types[2];
  389. const char *intrinsic = NULL;
  390. param_types[1] = param_types[0] = is_i32 ? I32_TYPE : I64_TYPE;
  391. bh_assert(arith_op == INT_DIV_S || arith_op == INT_DIV_U
  392. || arith_op == INT_REM_S || arith_op == INT_REM_U);
  393. POP_INT(right);
  394. POP_INT(left);
  395. if (LLVMIsUndef(right) || LLVMIsUndef(left)
  396. #if LLVM_VERSION_NUMBER >= 12
  397. || LLVMIsPoison(right) || LLVMIsPoison(left)
  398. #endif
  399. ) {
  400. if (!(aot_emit_exception(comp_ctx, func_ctx, EXCE_INTEGER_OVERFLOW,
  401. false, NULL, NULL))) {
  402. goto fail;
  403. }
  404. return aot_handle_next_reachable_block(comp_ctx, func_ctx, p_frame_ip);
  405. }
  406. if (LLVMIsEfficientConstInt(right)) {
  407. int64 right_val = (int64)LLVMConstIntGetSExtValue(right);
  408. switch (right_val) {
  409. case 0:
  410. /* Directly throw exception if divided by zero */
  411. if (!(aot_emit_exception(comp_ctx, func_ctx,
  412. EXCE_INTEGER_DIVIDE_BY_ZERO, false,
  413. NULL, NULL)))
  414. goto fail;
  415. return aot_handle_next_reachable_block(comp_ctx, func_ctx,
  416. p_frame_ip);
  417. case 1:
  418. if (arith_op == INT_DIV_S || arith_op == INT_DIV_U)
  419. PUSH_INT(left);
  420. else
  421. PUSH_INT(is_i32 ? I32_ZERO : I64_ZERO);
  422. return true;
  423. case -1:
  424. if (arith_op == INT_DIV_S) {
  425. LLVM_BUILD_ICMP(LLVMIntEQ, left, is_i32 ? I32_MIN : I64_MIN,
  426. overflow, "overflow");
  427. ADD_BASIC_BLOCK(check_overflow_succ,
  428. "check_overflow_success");
  429. /* Throw conditional exception if overflow */
  430. if (!(aot_emit_exception(comp_ctx, func_ctx,
  431. EXCE_INTEGER_OVERFLOW, true,
  432. overflow, check_overflow_succ)))
  433. goto fail;
  434. /* Push -(left) to stack */
  435. if (!(res = LLVMBuildNeg(comp_ctx->builder, left, "neg"))) {
  436. aot_set_last_error("llvm build neg fail.");
  437. return false;
  438. }
  439. PUSH_INT(res);
  440. return true;
  441. }
  442. else if (arith_op == INT_REM_S) {
  443. PUSH_INT(is_i32 ? I32_ZERO : I64_ZERO);
  444. return true;
  445. }
  446. else {
  447. /* fall to default */
  448. goto handle_default;
  449. }
  450. handle_default:
  451. default:
  452. /* Build div */
  453. switch (arith_op) {
  454. case INT_DIV_S:
  455. LLVM_BUILD_OP_OR_INTRINSIC(
  456. SDiv, left, right, res,
  457. is_i32 ? "i32.div_s" : "i64.div_s", "div_s", false);
  458. break;
  459. case INT_DIV_U:
  460. LLVM_BUILD_OP_OR_INTRINSIC(
  461. UDiv, left, right, res,
  462. is_i32 ? "i32.div_u" : "i64.div_u", "div_u", false);
  463. break;
  464. case INT_REM_S:
  465. LLVM_BUILD_OP_OR_INTRINSIC(
  466. SRem, left, right, res,
  467. is_i32 ? "i32.rem_s" : "i64.rem_s", "rem_s", false);
  468. break;
  469. case INT_REM_U:
  470. LLVM_BUILD_OP_OR_INTRINSIC(
  471. URem, left, right, res,
  472. is_i32 ? "i32.rem_u" : "i64.rem_u", "rem_u", false);
  473. break;
  474. default:
  475. bh_assert(0);
  476. return false;
  477. }
  478. PUSH_INT(res);
  479. return true;
  480. }
  481. }
  482. else {
  483. /* Check divided by zero */
  484. LLVM_BUILD_ICMP(LLVMIntEQ, right, is_i32 ? I32_ZERO : I64_ZERO,
  485. cmp_div_zero, "cmp_div_zero");
  486. ADD_BASIC_BLOCK(check_div_zero_succ, "check_div_zero_success");
  487. /* Throw conditional exception if divided by zero */
  488. if (!(aot_emit_exception(comp_ctx, func_ctx,
  489. EXCE_INTEGER_DIVIDE_BY_ZERO, true,
  490. cmp_div_zero, check_div_zero_succ)))
  491. goto fail;
  492. switch (arith_op) {
  493. case INT_DIV_S:
  494. /* Check integer overflow */
  495. if (is_i32)
  496. CHECK_INT_OVERFLOW(I32);
  497. else
  498. CHECK_INT_OVERFLOW(I64);
  499. ADD_BASIC_BLOCK(check_overflow_succ, "check_overflow_success");
  500. /* Throw conditional exception if integer overflow */
  501. if (!(aot_emit_exception(comp_ctx, func_ctx,
  502. EXCE_INTEGER_OVERFLOW, true, overflow,
  503. check_overflow_succ)))
  504. goto fail;
  505. LLVM_BUILD_OP_OR_INTRINSIC(SDiv, left, right, res,
  506. is_i32 ? "i32.div_s" : "i64.div_s",
  507. "div_s", false);
  508. PUSH_INT(res);
  509. return true;
  510. case INT_DIV_U:
  511. intrinsic = is_i32 ? "i32.div_u" : "i64.div_u";
  512. if (comp_ctx->disable_llvm_intrinsics
  513. && aot_intrinsic_check_capability(comp_ctx, intrinsic)) {
  514. res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic,
  515. param_types[0], param_types,
  516. 2, left, right);
  517. }
  518. else {
  519. LLVM_BUILD_OP(UDiv, left, right, res, "div_u", false);
  520. }
  521. PUSH_INT(res);
  522. return true;
  523. case INT_REM_S:
  524. /* Webassembly spec requires it return 0 */
  525. if (is_i32)
  526. CHECK_INT_OVERFLOW(I32);
  527. else
  528. CHECK_INT_OVERFLOW(I64);
  529. return compile_rems(comp_ctx, func_ctx, left, right, overflow,
  530. is_i32);
  531. case INT_REM_U:
  532. LLVM_BUILD_OP_OR_INTRINSIC(URem, left, right, res,
  533. is_i32 ? "i32.rem_u" : "i64.rem_u",
  534. "rem_u", false);
  535. PUSH_INT(res);
  536. return true;
  537. default:
  538. bh_assert(0);
  539. return false;
  540. }
  541. }
  542. fail:
  543. return false;
  544. }
  545. static LLVMValueRef
  546. compile_int_add(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
  547. bool is_i32)
  548. {
  549. /* If one of the operands is 0, just return the other */
  550. if (IS_CONST_ZERO(left))
  551. return right;
  552. if (IS_CONST_ZERO(right))
  553. return left;
  554. /* Build add */
  555. return LLVMBuildAdd(comp_ctx->builder, left, right, "add");
  556. }
  557. static LLVMValueRef
  558. compile_int_sub(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
  559. bool is_i32)
  560. {
  561. /* If the right operand is 0, just return the left */
  562. if (IS_CONST_ZERO(right))
  563. return left;
  564. /* Build sub */
  565. return LLVMBuildSub(comp_ctx->builder, left, right, "sub");
  566. }
  567. static LLVMValueRef
  568. compile_int_mul(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
  569. bool is_i32)
  570. {
  571. /* If one of the operands is 0, just return constant 0 */
  572. if (IS_CONST_ZERO(left) || IS_CONST_ZERO(right))
  573. return is_i32 ? I32_ZERO : I64_ZERO;
  574. /* Build mul */
  575. return LLVMBuildMul(comp_ctx->builder, left, right, "mul");
  576. }
  577. static bool
  578. compile_op_int_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  579. IntArithmetic arith_op, bool is_i32,
  580. uint8 **p_frame_ip)
  581. {
  582. switch (arith_op) {
  583. case INT_ADD:
  584. DEF_INT_BINARY_OP(compile_int_add(comp_ctx, left, right, is_i32),
  585. "compile int add fail.");
  586. return true;
  587. case INT_SUB:
  588. DEF_INT_BINARY_OP(compile_int_sub(comp_ctx, left, right, is_i32),
  589. "compile int sub fail.");
  590. return true;
  591. case INT_MUL:
  592. DEF_INT_BINARY_OP(compile_int_mul(comp_ctx, left, right, is_i32),
  593. "compile int mul fail.");
  594. return true;
  595. case INT_DIV_S:
  596. case INT_DIV_U:
  597. case INT_REM_S:
  598. case INT_REM_U:
  599. return compile_int_div(comp_ctx, func_ctx, arith_op, is_i32,
  600. p_frame_ip);
  601. default:
  602. bh_assert(0);
  603. return false;
  604. }
  605. fail:
  606. return false;
  607. }
  608. static bool
  609. compile_op_int_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  610. IntBitwise bitwise_op, bool is_i32)
  611. {
  612. switch (bitwise_op) {
  613. case INT_AND:
  614. DEF_INT_BINARY_OP(
  615. LLVMBuildAnd(comp_ctx->builder, left, right, "and"),
  616. "llvm build and fail.");
  617. return true;
  618. case INT_OR:
  619. DEF_INT_BINARY_OP(LLVMBuildOr(comp_ctx->builder, left, right, "or"),
  620. "llvm build or fail.");
  621. return true;
  622. case INT_XOR:
  623. DEF_INT_BINARY_OP(
  624. LLVMBuildXor(comp_ctx->builder, left, right, "xor"),
  625. "llvm build xor fail.");
  626. return true;
  627. default:
  628. bh_assert(0);
  629. return false;
  630. }
  631. fail:
  632. return false;
  633. }
  634. static LLVMValueRef
  635. compile_int_shl(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
  636. bool is_i32)
  637. {
  638. LLVMValueRef res;
  639. SHIFT_COUNT_MASK;
  640. /* Build shl */
  641. LLVM_BUILD_OP(Shl, left, right, res, "shl", NULL);
  642. return res;
  643. }
  644. static LLVMValueRef
  645. compile_int_shr_s(AOTCompContext *comp_ctx, LLVMValueRef left,
  646. LLVMValueRef right, bool is_i32)
  647. {
  648. LLVMValueRef res;
  649. SHIFT_COUNT_MASK;
  650. /* Build shl */
  651. LLVM_BUILD_OP(AShr, left, right, res, "shr_s", NULL);
  652. return res;
  653. }
  654. static LLVMValueRef
  655. compile_int_shr_u(AOTCompContext *comp_ctx, LLVMValueRef left,
  656. LLVMValueRef right, bool is_i32)
  657. {
  658. LLVMValueRef res;
  659. SHIFT_COUNT_MASK;
  660. /* Build shl */
  661. LLVM_BUILD_OP(LShr, left, right, res, "shr_u", NULL);
  662. return res;
  663. }
  664. static LLVMValueRef
  665. compile_int_rot(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
  666. bool is_rotl, bool is_i32)
  667. {
  668. LLVMValueRef bits_minus_shift_count, res, tmp_l, tmp_r;
  669. char *name = is_rotl ? "rotl" : "rotr";
  670. SHIFT_COUNT_MASK;
  671. /* rotl/rotr with 0 */
  672. if (IS_CONST_ZERO(right))
  673. return left;
  674. /* Calculate (bits - shift_count) */
  675. LLVM_BUILD_OP(Sub, is_i32 ? I32_32 : I64_64, right, bits_minus_shift_count,
  676. "bits_minus_shift_count", NULL);
  677. /* Calculate (bits - shift_count) & mask */
  678. bits_minus_shift_count =
  679. LLVMBuildAnd(comp_ctx->builder, bits_minus_shift_count,
  680. is_i32 ? I32_31 : I64_63, "bits_minus_shift_count_and");
  681. if (!bits_minus_shift_count) {
  682. aot_set_last_error("llvm build and failed.");
  683. return NULL;
  684. }
  685. if (is_rotl) {
  686. /* (left << count) | (left >> ((BITS - count) & mask)) */
  687. LLVM_BUILD_OP(Shl, left, right, tmp_l, "tmp_l", NULL);
  688. LLVM_BUILD_OP(LShr, left, bits_minus_shift_count, tmp_r, "tmp_r", NULL);
  689. }
  690. else {
  691. /* (left >> count) | (left << ((BITS - count) & mask)) */
  692. LLVM_BUILD_OP(LShr, left, right, tmp_l, "tmp_l", NULL);
  693. LLVM_BUILD_OP(Shl, left, bits_minus_shift_count, tmp_r, "tmp_r", NULL);
  694. }
  695. LLVM_BUILD_OP(Or, tmp_l, tmp_r, res, name, NULL);
  696. return res;
  697. }
  698. static bool
  699. compile_op_int_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  700. IntShift shift_op, bool is_i32)
  701. {
  702. switch (shift_op) {
  703. case INT_SHL:
  704. DEF_INT_BINARY_OP(compile_int_shl(comp_ctx, left, right, is_i32),
  705. NULL);
  706. return true;
  707. case INT_SHR_S:
  708. DEF_INT_BINARY_OP(compile_int_shr_s(comp_ctx, left, right, is_i32),
  709. NULL);
  710. return true;
  711. case INT_SHR_U:
  712. DEF_INT_BINARY_OP(compile_int_shr_u(comp_ctx, left, right, is_i32),
  713. NULL);
  714. return true;
  715. case INT_ROTL:
  716. DEF_INT_BINARY_OP(
  717. compile_int_rot(comp_ctx, left, right, true, is_i32), NULL);
  718. return true;
  719. case INT_ROTR:
  720. DEF_INT_BINARY_OP(
  721. compile_int_rot(comp_ctx, left, right, false, is_i32), NULL);
  722. return true;
  723. default:
  724. bh_assert(0);
  725. return false;
  726. }
  727. fail:
  728. return false;
  729. }
  730. static bool
  731. is_target_arm(AOTCompContext *comp_ctx)
  732. {
  733. return !strncmp(comp_ctx->target_arch, "arm", 3)
  734. || !strncmp(comp_ctx->target_arch, "aarch64", 7)
  735. || !strncmp(comp_ctx->target_arch, "thumb", 5);
  736. }
  737. static bool
  738. is_target_x86(AOTCompContext *comp_ctx)
  739. {
  740. return !strncmp(comp_ctx->target_arch, "x86_64", 6)
  741. || !strncmp(comp_ctx->target_arch, "i386", 4);
  742. }
  743. static bool
  744. is_target_xtensa(AOTCompContext *comp_ctx)
  745. {
  746. return !strncmp(comp_ctx->target_arch, "xtensa", 6);
  747. }
  748. static bool
  749. is_target_mips(AOTCompContext *comp_ctx)
  750. {
  751. return !strncmp(comp_ctx->target_arch, "mips", 4);
  752. }
  753. static bool
  754. is_target_riscv(AOTCompContext *comp_ctx)
  755. {
  756. return !strncmp(comp_ctx->target_arch, "riscv", 5);
  757. }
  758. static bool
  759. is_targeting_soft_float(AOTCompContext *comp_ctx, bool is_f32)
  760. {
  761. bool ret = false;
  762. char *feature_string;
  763. if (!(feature_string =
  764. LLVMGetTargetMachineFeatureString(comp_ctx->target_machine))) {
  765. aot_set_last_error("llvm get target machine feature string fail.");
  766. return false;
  767. }
  768. /* Note:
  769. * LLVM CodeGen uses FPU Coprocessor registers by default,
  770. * so user must specify '--cpu-features=+soft-float' to wamrc if the target
  771. * doesn't have or enable FPU on arm, x86 or mips. */
  772. if (is_target_arm(comp_ctx) || is_target_x86(comp_ctx)
  773. || is_target_mips(comp_ctx)) {
  774. ret = strstr(feature_string, "+soft-float") ? true : false;
  775. }
  776. else if (is_target_xtensa(comp_ctx)) {
  777. /* Note:
  778. * 1. The Floating-Point Coprocessor Option of xtensa only support
  779. * single-precision floating-point operations, so must use soft-float
  780. * for f64(i.e. double).
  781. * 2. LLVM CodeGen uses Floating-Point Coprocessor registers by default,
  782. * so user must specify '--cpu-features=-fp' to wamrc if the target
  783. * doesn't have or enable Floating-Point Coprocessor Option on xtensa.
  784. */
  785. if (comp_ctx->disable_llvm_intrinsics)
  786. ret = false;
  787. else
  788. ret = (!is_f32 || strstr(feature_string, "-fp")) ? true : false;
  789. }
  790. else if (is_target_riscv(comp_ctx)) {
  791. /*
  792. * Note: Use builtin intrinsics since hardware float operation
  793. * will cause rodata relocation, this will try to use hardware
  794. * float unit (by return false) but handled by software finally
  795. */
  796. if (comp_ctx->disable_llvm_intrinsics)
  797. ret = false;
  798. else
  799. ret = !strstr(feature_string, "+d") ? true : false;
  800. }
  801. else {
  802. ret = true;
  803. }
  804. LLVMDisposeMessage(feature_string);
  805. return ret;
  806. }
  807. static bool
  808. compile_op_float_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  809. FloatArithmetic arith_op, bool is_f32)
  810. {
  811. switch (arith_op) {
  812. case FLOAT_ADD:
  813. if (is_targeting_soft_float(comp_ctx, is_f32))
  814. DEF_FP_BINARY_OP(
  815. LLVMBuildFAdd(comp_ctx->builder, left, right, "fadd"),
  816. "llvm build fadd fail.");
  817. else
  818. DEF_FP_BINARY_OP(
  819. call_llvm_float_experimental_constrained_intrinsic(
  820. comp_ctx, func_ctx, is_f32,
  821. (is_f32 ? "llvm.experimental.constrained.fadd.f32"
  822. : "llvm.experimental.constrained.fadd.f64"),
  823. left, right, comp_ctx->fp_rounding_mode,
  824. comp_ctx->fp_exception_behavior),
  825. NULL);
  826. return true;
  827. case FLOAT_SUB:
  828. if (is_targeting_soft_float(comp_ctx, is_f32))
  829. DEF_FP_BINARY_OP(
  830. LLVMBuildFSub(comp_ctx->builder, left, right, "fsub"),
  831. "llvm build fsub fail.");
  832. else
  833. DEF_FP_BINARY_OP(
  834. call_llvm_float_experimental_constrained_intrinsic(
  835. comp_ctx, func_ctx, is_f32,
  836. (is_f32 ? "llvm.experimental.constrained.fsub.f32"
  837. : "llvm.experimental.constrained.fsub.f64"),
  838. left, right, comp_ctx->fp_rounding_mode,
  839. comp_ctx->fp_exception_behavior),
  840. NULL);
  841. return true;
  842. case FLOAT_MUL:
  843. if (is_targeting_soft_float(comp_ctx, is_f32))
  844. DEF_FP_BINARY_OP(
  845. LLVMBuildFMul(comp_ctx->builder, left, right, "fmul"),
  846. "llvm build fmul fail.");
  847. else
  848. DEF_FP_BINARY_OP(
  849. call_llvm_float_experimental_constrained_intrinsic(
  850. comp_ctx, func_ctx, is_f32,
  851. (is_f32 ? "llvm.experimental.constrained.fmul.f32"
  852. : "llvm.experimental.constrained.fmul.f64"),
  853. left, right, comp_ctx->fp_rounding_mode,
  854. comp_ctx->fp_exception_behavior),
  855. NULL);
  856. return true;
  857. case FLOAT_DIV:
  858. if (is_targeting_soft_float(comp_ctx, is_f32))
  859. DEF_FP_BINARY_OP(
  860. LLVMBuildFDiv(comp_ctx->builder, left, right, "fdiv"),
  861. "llvm build fdiv fail.");
  862. else
  863. DEF_FP_BINARY_OP(
  864. call_llvm_float_experimental_constrained_intrinsic(
  865. comp_ctx, func_ctx, is_f32,
  866. (is_f32 ? "llvm.experimental.constrained.fdiv.f32"
  867. : "llvm.experimental.constrained.fdiv.f64"),
  868. left, right, comp_ctx->fp_rounding_mode,
  869. comp_ctx->fp_exception_behavior),
  870. NULL);
  871. return true;
  872. case FLOAT_MIN:
  873. DEF_FP_BINARY_OP(compile_op_float_min_max(
  874. comp_ctx, func_ctx, is_f32, left, right, true),
  875. NULL);
  876. return true;
  877. case FLOAT_MAX:
  878. DEF_FP_BINARY_OP(compile_op_float_min_max(comp_ctx, func_ctx,
  879. is_f32, left, right,
  880. false),
  881. NULL);
  882. return true;
  883. default:
  884. bh_assert(0);
  885. return false;
  886. }
  887. fail:
  888. return false;
  889. }
  890. static LLVMValueRef
  891. call_llvm_float_math_intrinsic(AOTCompContext *comp_ctx,
  892. AOTFuncContext *func_ctx, bool is_f32,
  893. const char *intrinsic, ...)
  894. {
  895. va_list param_value_list;
  896. LLVMValueRef ret;
  897. LLVMTypeRef param_type, ret_type = is_f32 ? F32_TYPE : F64_TYPE;
  898. param_type = ret_type;
  899. va_start(param_value_list, intrinsic);
  900. ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
  901. &param_type, 1, param_value_list);
  902. va_end(param_value_list);
  903. return ret;
  904. }
  905. static bool
  906. compile_op_float_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  907. FloatMath math_op, bool is_f32)
  908. {
  909. switch (math_op) {
  910. case FLOAT_ABS:
  911. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  912. comp_ctx, func_ctx, is_f32,
  913. is_f32 ? "llvm.fabs.f32" : "llvm.fabs.f64",
  914. operand),
  915. NULL);
  916. return true;
  917. case FLOAT_NEG:
  918. DEF_FP_UNARY_OP(LLVMBuildFNeg(comp_ctx->builder, operand, "fneg"),
  919. "llvm build fneg fail.");
  920. return true;
  921. case FLOAT_CEIL:
  922. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  923. comp_ctx, func_ctx, is_f32,
  924. is_f32 ? "llvm.ceil.f32" : "llvm.ceil.f64",
  925. operand),
  926. NULL);
  927. return true;
  928. case FLOAT_FLOOR:
  929. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  930. comp_ctx, func_ctx, is_f32,
  931. is_f32 ? "llvm.floor.f32" : "llvm.floor.f64",
  932. operand),
  933. NULL);
  934. return true;
  935. case FLOAT_TRUNC:
  936. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  937. comp_ctx, func_ctx, is_f32,
  938. is_f32 ? "llvm.trunc.f32" : "llvm.trunc.f64",
  939. operand),
  940. NULL);
  941. return true;
  942. case FLOAT_NEAREST:
  943. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  944. comp_ctx, func_ctx, is_f32,
  945. is_f32 ? "llvm.rint.f32" : "llvm.rint.f64",
  946. operand),
  947. NULL);
  948. return true;
  949. case FLOAT_SQRT:
  950. if (is_targeting_soft_float(comp_ctx, is_f32)
  951. || comp_ctx->disable_llvm_intrinsics)
  952. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  953. comp_ctx, func_ctx, is_f32,
  954. is_f32 ? "llvm.sqrt.f32" : "llvm.sqrt.f64",
  955. operand),
  956. NULL);
  957. else
  958. DEF_FP_UNARY_OP(
  959. call_llvm_libm_experimental_constrained_intrinsic(
  960. comp_ctx, func_ctx, is_f32,
  961. (is_f32 ? "llvm.experimental.constrained.sqrt.f32"
  962. : "llvm.experimental.constrained.sqrt.f64"),
  963. operand, comp_ctx->fp_rounding_mode,
  964. comp_ctx->fp_exception_behavior),
  965. NULL);
  966. return true;
  967. default:
  968. bh_assert(0);
  969. return false;
  970. }
  971. return true;
  972. fail:
  973. return false;
  974. }
  975. static bool
  976. compile_float_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  977. bool is_f32)
  978. {
  979. LLVMTypeRef ret_type, param_types[2];
  980. param_types[0] = param_types[1] = ret_type = is_f32 ? F32_TYPE : F64_TYPE;
  981. DEF_FP_BINARY_OP(aot_call_llvm_intrinsic(
  982. comp_ctx, func_ctx,
  983. is_f32 ? "llvm.copysign.f32" : "llvm.copysign.f64",
  984. ret_type, param_types, 2, left, right),
  985. NULL);
  986. return true;
  987. fail:
  988. return false;
  989. }
  990. bool
  991. aot_compile_op_i32_clz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  992. {
  993. return aot_compile_int_bit_count(comp_ctx, func_ctx, CLZ32, true);
  994. }
  995. bool
  996. aot_compile_op_i32_ctz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  997. {
  998. return aot_compile_int_bit_count(comp_ctx, func_ctx, CTZ32, true);
  999. }
  1000. bool
  1001. aot_compile_op_i32_popcnt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1002. {
  1003. return aot_compile_int_bit_count(comp_ctx, func_ctx, POP_CNT32, true);
  1004. }
  1005. bool
  1006. aot_compile_op_i64_clz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1007. {
  1008. return aot_compile_int_bit_count(comp_ctx, func_ctx, CLZ64, false);
  1009. }
  1010. bool
  1011. aot_compile_op_i64_ctz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1012. {
  1013. return aot_compile_int_bit_count(comp_ctx, func_ctx, CTZ64, false);
  1014. }
  1015. bool
  1016. aot_compile_op_i64_popcnt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1017. {
  1018. return aot_compile_int_bit_count(comp_ctx, func_ctx, POP_CNT64, false);
  1019. }
  1020. bool
  1021. aot_compile_op_i32_arithmetic(AOTCompContext *comp_ctx,
  1022. AOTFuncContext *func_ctx, IntArithmetic arith_op,
  1023. uint8 **p_frame_ip)
  1024. {
  1025. return compile_op_int_arithmetic(comp_ctx, func_ctx, arith_op, true,
  1026. p_frame_ip);
  1027. }
  1028. bool
  1029. aot_compile_op_i64_arithmetic(AOTCompContext *comp_ctx,
  1030. AOTFuncContext *func_ctx, IntArithmetic arith_op,
  1031. uint8 **p_frame_ip)
  1032. {
  1033. return compile_op_int_arithmetic(comp_ctx, func_ctx, arith_op, false,
  1034. p_frame_ip);
  1035. }
  1036. bool
  1037. aot_compile_op_i32_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1038. IntBitwise bitwise_op)
  1039. {
  1040. return compile_op_int_bitwise(comp_ctx, func_ctx, bitwise_op, true);
  1041. }
  1042. bool
  1043. aot_compile_op_i64_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1044. IntBitwise bitwise_op)
  1045. {
  1046. return compile_op_int_bitwise(comp_ctx, func_ctx, bitwise_op, false);
  1047. }
  1048. bool
  1049. aot_compile_op_i32_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1050. IntShift shift_op)
  1051. {
  1052. return compile_op_int_shift(comp_ctx, func_ctx, shift_op, true);
  1053. }
  1054. bool
  1055. aot_compile_op_i64_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1056. IntShift shift_op)
  1057. {
  1058. return compile_op_int_shift(comp_ctx, func_ctx, shift_op, false);
  1059. }
  1060. bool
  1061. aot_compile_op_f32_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1062. FloatMath math_op)
  1063. {
  1064. return compile_op_float_math(comp_ctx, func_ctx, math_op, true);
  1065. }
  1066. bool
  1067. aot_compile_op_f64_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1068. FloatMath math_op)
  1069. {
  1070. return compile_op_float_math(comp_ctx, func_ctx, math_op, false);
  1071. }
  1072. bool
  1073. aot_compile_op_f32_arithmetic(AOTCompContext *comp_ctx,
  1074. AOTFuncContext *func_ctx,
  1075. FloatArithmetic arith_op)
  1076. {
  1077. return compile_op_float_arithmetic(comp_ctx, func_ctx, arith_op, true);
  1078. }
  1079. bool
  1080. aot_compile_op_f64_arithmetic(AOTCompContext *comp_ctx,
  1081. AOTFuncContext *func_ctx,
  1082. FloatArithmetic arith_op)
  1083. {
  1084. return compile_op_float_arithmetic(comp_ctx, func_ctx, arith_op, false);
  1085. }
  1086. bool
  1087. aot_compile_op_f32_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1088. {
  1089. return compile_float_copysign(comp_ctx, func_ctx, true);
  1090. }
  1091. bool
  1092. aot_compile_op_f64_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1093. {
  1094. return compile_float_copysign(comp_ctx, func_ctx, false);
  1095. }