aot_emit_numberic.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277
  1. /*
  2. * Copyright (C) 2020 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "aot_emit_numberic.h"
  6. #include "aot_emit_exception.h"
  7. #include "aot_emit_control.h"
  8. #include "../aot/aot_runtime.h"
  9. #include "../aot/aot_intrinsic.h"
  10. #include <stdarg.h>
  11. #define LLVM_BUILD_ICMP(op, left, right, res, name) \
  12. do { \
  13. if (!(res = \
  14. LLVMBuildICmp(comp_ctx->builder, op, left, right, name))) { \
  15. aot_set_last_error("llvm build " name " fail."); \
  16. return false; \
  17. } \
  18. } while (0)
  19. #define LLVM_BUILD_OP(Op, left, right, res, name, err_ret) \
  20. do { \
  21. if (!(res = LLVMBuild##Op(comp_ctx->builder, left, right, name))) { \
  22. aot_set_last_error("llvm build " #name " fail."); \
  23. return err_ret; \
  24. } \
  25. } while (0)
  26. #define LLVM_BUILD_OP_OR_INTRINSIC(Op, left, right, res, intrinsic, name, \
  27. err_ret) \
  28. do { \
  29. if (comp_ctx->disable_llvm_intrinsics \
  30. && aot_intrinsic_check_capability(comp_ctx, intrinsic)) { \
  31. res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, \
  32. param_types[0], param_types, 2, \
  33. left, right); \
  34. } \
  35. else { \
  36. LLVM_BUILD_OP(Op, left, right, res, name, false); \
  37. } \
  38. } while (0)
  39. #define ADD_BASIC_BLOCK(block, name) \
  40. do { \
  41. if (!(block = LLVMAppendBasicBlockInContext(comp_ctx->context, \
  42. func_ctx->func, name))) { \
  43. aot_set_last_error("llvm add basic block failed."); \
  44. goto fail; \
  45. } \
  46. \
  47. LLVMMoveBasicBlockAfter(block, LLVMGetInsertBlock(comp_ctx->builder)); \
  48. } while (0)
  49. #if LLVM_VERSION_NUMBER >= 12
  50. #define IS_CONST_ZERO(val) \
  51. (LLVMIsEfficientConstInt(val) \
  52. && ((is_i32 && (int32)LLVMConstIntGetZExtValue(val) == 0) \
  53. || (!is_i32 && (int64)LLVMConstIntGetSExtValue(val) == 0)))
  54. #else
  55. #define IS_CONST_ZERO(val) \
  56. (LLVMIsEfficientConstInt(val) \
  57. && ((is_i32 && (int32)LLVMConstIntGetZExtValue(val) == 0) \
  58. || (!is_i32 && (int64)LLVMConstIntGetSExtValue(val) == 0)))
  59. #endif
  60. #define CHECK_INT_OVERFLOW(type) \
  61. do { \
  62. LLVMValueRef cmp_min_int, cmp_neg_one; \
  63. LLVM_BUILD_ICMP(LLVMIntEQ, left, type##_MIN, cmp_min_int, \
  64. "cmp_min_int"); \
  65. LLVM_BUILD_ICMP(LLVMIntEQ, right, type##_NEG_ONE, cmp_neg_one, \
  66. "cmp_neg_one"); \
  67. LLVM_BUILD_OP(And, cmp_min_int, cmp_neg_one, overflow, "overflow", \
  68. false); \
  69. } while (0)
  70. #define PUSH_INT(v) \
  71. do { \
  72. if (is_i32) \
  73. PUSH_I32(v); \
  74. else \
  75. PUSH_I64(v); \
  76. } while (0)
  77. #define POP_INT(v) \
  78. do { \
  79. if (is_i32) \
  80. POP_I32(v); \
  81. else \
  82. POP_I64(v); \
  83. } while (0)
  84. #define PUSH_FLOAT(v) \
  85. do { \
  86. if (is_f32) \
  87. PUSH_F32(v); \
  88. else \
  89. PUSH_F64(v); \
  90. } while (0)
  91. #define POP_FLOAT(v) \
  92. do { \
  93. if (is_f32) \
  94. POP_F32(v); \
  95. else \
  96. POP_F64(v); \
  97. } while (0)
  98. #define DEF_INT_UNARY_OP(op, err) \
  99. do { \
  100. LLVMValueRef res, operand; \
  101. POP_INT(operand); \
  102. if (!(res = op)) { \
  103. if (err) \
  104. aot_set_last_error(err); \
  105. return false; \
  106. } \
  107. PUSH_INT(res); \
  108. } while (0)
  109. #define DEF_INT_BINARY_OP(op, err) \
  110. do { \
  111. LLVMValueRef res, left, right; \
  112. POP_INT(right); \
  113. POP_INT(left); \
  114. if (!(res = op)) { \
  115. if (err) \
  116. aot_set_last_error(err); \
  117. return false; \
  118. } \
  119. PUSH_INT(res); \
  120. } while (0)
  121. #define DEF_FP_UNARY_OP(op, err) \
  122. do { \
  123. LLVMValueRef res, operand; \
  124. POP_FLOAT(operand); \
  125. if (!(res = op)) { \
  126. if (err) \
  127. aot_set_last_error(err); \
  128. return false; \
  129. } \
  130. PUSH_FLOAT(res); \
  131. } while (0)
  132. #define DEF_FP_BINARY_OP(op, err) \
  133. do { \
  134. LLVMValueRef res, left, right; \
  135. POP_FLOAT(right); \
  136. POP_FLOAT(left); \
  137. if (!(res = op)) { \
  138. if (err) \
  139. aot_set_last_error(err); \
  140. return false; \
  141. } \
  142. PUSH_FLOAT(res); \
  143. } while (0)
  144. #define SHIFT_COUNT_MASK \
  145. do { \
  146. /* LLVM has undefined behavior if shift count is greater than \
  147. * bits count while Webassembly spec requires the shift count \
  148. * be wrapped. \
  149. */ \
  150. LLVMValueRef shift_count_mask, bits_minus_one; \
  151. bits_minus_one = is_i32 ? I32_31 : I64_63; \
  152. LLVM_BUILD_OP(And, right, bits_minus_one, shift_count_mask, \
  153. "shift_count_mask", NULL); \
  154. right = shift_count_mask; \
  155. } while (0)
  156. /* Call llvm constrained floating-point intrinsic */
  157. static LLVMValueRef
  158. call_llvm_float_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
  159. AOTFuncContext *func_ctx,
  160. bool is_f32,
  161. const char *intrinsic, ...)
  162. {
  163. va_list param_value_list;
  164. LLVMValueRef ret;
  165. LLVMTypeRef param_types[4], ret_type = is_f32 ? F32_TYPE : F64_TYPE;
  166. int param_count = (comp_ctx->disable_llvm_intrinsics
  167. && aot_intrinsic_check_capability(comp_ctx, intrinsic))
  168. ? 2
  169. : 4;
  170. param_types[0] = param_types[1] = ret_type;
  171. param_types[2] = param_types[3] = MD_TYPE;
  172. va_start(param_value_list, intrinsic);
  173. ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
  174. param_types, param_count, param_value_list);
  175. va_end(param_value_list);
  176. return ret;
  177. }
  178. /* Call llvm constrained libm-equivalent intrinsic */
  179. static LLVMValueRef
  180. call_llvm_libm_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
  181. AOTFuncContext *func_ctx,
  182. bool is_f32,
  183. const char *intrinsic, ...)
  184. {
  185. va_list param_value_list;
  186. LLVMValueRef ret;
  187. LLVMTypeRef param_types[3], ret_type = is_f32 ? F32_TYPE : F64_TYPE;
  188. param_types[0] = ret_type;
  189. param_types[1] = param_types[2] = MD_TYPE;
  190. va_start(param_value_list, intrinsic);
  191. ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
  192. param_types, 3, param_value_list);
  193. va_end(param_value_list);
  194. return ret;
  195. }
  196. static LLVMValueRef
  197. compile_op_float_min_max(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  198. bool is_f32, LLVMValueRef left, LLVMValueRef right,
  199. bool is_min)
  200. {
  201. LLVMTypeRef float_param_types[2];
  202. LLVMTypeRef param_types[2], ret_type = is_f32 ? F32_TYPE : F64_TYPE,
  203. int_type = is_f32 ? I32_TYPE : I64_TYPE;
  204. LLVMValueRef cmp, is_eq, is_nan, ret, left_int, right_int, tmp,
  205. nan = LLVMConstRealOfString(ret_type, "NaN");
  206. char *intrinsic = is_min ? (is_f32 ? "llvm.minnum.f32" : "llvm.minnum.f64")
  207. : (is_f32 ? "llvm.maxnum.f32" : "llvm.maxnum.f64");
  208. CHECK_LLVM_CONST(nan);
  209. /* Note: param_types is used by LLVM_BUILD_OP_OR_INTRINSIC */
  210. param_types[0] = param_types[1] = int_type;
  211. float_param_types[0] = float_param_types[1] = ret_type;
  212. if (comp_ctx->disable_llvm_intrinsics
  213. && aot_intrinsic_check_capability(comp_ctx,
  214. is_f32 ? "f32_cmp" : "f64_cmp")) {
  215. LLVMTypeRef param_types_intrinsic[3];
  216. LLVMValueRef opcond = LLVMConstInt(I32_TYPE, FLOAT_UNO, true);
  217. param_types_intrinsic[0] = I32_TYPE;
  218. param_types_intrinsic[1] = is_f32 ? F32_TYPE : F64_TYPE;
  219. param_types_intrinsic[2] = param_types_intrinsic[1];
  220. is_nan = aot_call_llvm_intrinsic(
  221. comp_ctx, func_ctx, is_f32 ? "f32_cmp" : "f64_cmp", I32_TYPE,
  222. param_types_intrinsic, 3, opcond, left, right);
  223. opcond = LLVMConstInt(I32_TYPE, FLOAT_EQ, true);
  224. is_eq = aot_call_llvm_intrinsic(
  225. comp_ctx, func_ctx, is_f32 ? "f32_cmp" : "f64_cmp", I32_TYPE,
  226. param_types_intrinsic, 3, opcond, left, right);
  227. if (!is_nan || !is_eq) {
  228. return NULL;
  229. }
  230. if (!(is_nan = LLVMBuildIntCast(comp_ctx->builder, is_nan, INT1_TYPE,
  231. "bit_cast_is_nan"))) {
  232. aot_set_last_error("llvm build is_nan bit cast fail.");
  233. return NULL;
  234. }
  235. if (!(is_eq = LLVMBuildIntCast(comp_ctx->builder, is_eq, INT1_TYPE,
  236. "bit_cast_is_eq"))) {
  237. aot_set_last_error("llvm build is_eq bit cast fail.");
  238. return NULL;
  239. }
  240. }
  241. else if (!(is_nan = LLVMBuildFCmp(comp_ctx->builder, LLVMRealUNO, left,
  242. right, "is_nan"))
  243. || !(is_eq = LLVMBuildFCmp(comp_ctx->builder, LLVMRealOEQ, left,
  244. right, "is_eq"))) {
  245. aot_set_last_error("llvm build fcmp fail.");
  246. return NULL;
  247. }
  248. /* If left and right are equal, they may be zero with different sign.
  249. Webassembly spec assert -0 < +0. So do a bitwise here. */
  250. if (!(left_int =
  251. LLVMBuildBitCast(comp_ctx->builder, left, int_type, "left_int"))
  252. || !(right_int = LLVMBuildBitCast(comp_ctx->builder, right, int_type,
  253. "right_int"))) {
  254. aot_set_last_error("llvm build bitcast fail.");
  255. return NULL;
  256. }
  257. if (is_min)
  258. LLVM_BUILD_OP_OR_INTRINSIC(Or, left_int, right_int, tmp,
  259. is_f32 ? "i32.or" : "i64.or", "tmp_int",
  260. false);
  261. else
  262. LLVM_BUILD_OP_OR_INTRINSIC(And, left_int, right_int, tmp,
  263. is_f32 ? "i32.and" : "i64.and", "tmp_int",
  264. false);
  265. if (!(tmp = LLVMBuildBitCast(comp_ctx->builder, tmp, ret_type, "tmp"))) {
  266. aot_set_last_error("llvm build bitcast fail.");
  267. return NULL;
  268. }
  269. if (!(cmp = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, ret_type,
  270. float_param_types, 2, left, right)))
  271. return NULL;
  272. /* The result of XIP intrinsic is 0 or 1, should return it directly */
  273. if (comp_ctx->disable_llvm_intrinsics
  274. && aot_intrinsic_check_capability(comp_ctx,
  275. is_f32 ? "f32_cmp" : "f64_cmp")) {
  276. return cmp;
  277. }
  278. if (!(cmp = LLVMBuildSelect(comp_ctx->builder, is_eq, tmp, cmp, "cmp"))) {
  279. aot_set_last_error("llvm build select fail.");
  280. return NULL;
  281. }
  282. if (!(ret = LLVMBuildSelect(comp_ctx->builder, is_nan, nan, cmp,
  283. is_min ? "min" : "max"))) {
  284. aot_set_last_error("llvm build select fail.");
  285. return NULL;
  286. }
  287. return ret;
  288. fail:
  289. return NULL;
  290. }
  291. typedef enum BitCountType {
  292. CLZ32 = 0,
  293. CLZ64,
  294. CTZ32,
  295. CTZ64,
  296. POP_CNT32,
  297. POP_CNT64
  298. } BitCountType;
  299. /* clang-format off */
  300. static char *bit_cnt_llvm_intrinsic[] = {
  301. "llvm.ctlz.i32",
  302. "llvm.ctlz.i64",
  303. "llvm.cttz.i32",
  304. "llvm.cttz.i64",
  305. "llvm.ctpop.i32",
  306. "llvm.ctpop.i64",
  307. };
  308. /* clang-format on */
  309. static bool
  310. aot_compile_int_bit_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  311. BitCountType type, bool is_i32)
  312. {
  313. LLVMValueRef zero_undef;
  314. LLVMTypeRef ret_type, param_types[2];
  315. param_types[0] = ret_type = is_i32 ? I32_TYPE : I64_TYPE;
  316. param_types[1] = LLVMInt1TypeInContext(comp_ctx->context);
  317. zero_undef = LLVMConstInt(param_types[1], false, true);
  318. CHECK_LLVM_CONST(zero_undef);
  319. /* Call the LLVM intrinsic function */
  320. if (type < POP_CNT32)
  321. DEF_INT_UNARY_OP(aot_call_llvm_intrinsic(
  322. comp_ctx, func_ctx, bit_cnt_llvm_intrinsic[type],
  323. ret_type, param_types, 2, operand, zero_undef),
  324. NULL);
  325. else
  326. DEF_INT_UNARY_OP(aot_call_llvm_intrinsic(
  327. comp_ctx, func_ctx, bit_cnt_llvm_intrinsic[type],
  328. ret_type, param_types, 1, operand),
  329. NULL);
  330. return true;
  331. fail:
  332. return false;
  333. }
  334. static bool
  335. compile_rems(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  336. LLVMValueRef left, LLVMValueRef right, LLVMValueRef overflow_cond,
  337. bool is_i32)
  338. {
  339. LLVMValueRef phi, no_overflow_value, zero = is_i32 ? I32_ZERO : I64_ZERO;
  340. LLVMBasicBlockRef block_curr, no_overflow_block, rems_end_block;
  341. LLVMTypeRef param_types[2];
  342. param_types[1] = param_types[0] = is_i32 ? I32_TYPE : I64_TYPE;
  343. block_curr = LLVMGetInsertBlock(comp_ctx->builder);
  344. /* Add 2 blocks: no_overflow_block and rems_end block */
  345. ADD_BASIC_BLOCK(rems_end_block, "rems_end");
  346. ADD_BASIC_BLOCK(no_overflow_block, "rems_no_overflow");
  347. /* Create condition br */
  348. if (!LLVMBuildCondBr(comp_ctx->builder, overflow_cond, rems_end_block,
  349. no_overflow_block)) {
  350. aot_set_last_error("llvm build cond br failed.");
  351. return false;
  352. }
  353. /* Translate no_overflow_block */
  354. LLVMPositionBuilderAtEnd(comp_ctx->builder, no_overflow_block);
  355. LLVM_BUILD_OP_OR_INTRINSIC(SRem, left, right, no_overflow_value,
  356. is_i32 ? "i32.rem_s" : "i64.rem_s", "rem_s",
  357. false);
  358. /* Jump to rems_end block */
  359. if (!LLVMBuildBr(comp_ctx->builder, rems_end_block)) {
  360. aot_set_last_error("llvm build br failed.");
  361. return false;
  362. }
  363. /* Translate rems_end_block */
  364. LLVMPositionBuilderAtEnd(comp_ctx->builder, rems_end_block);
  365. /* Create result phi */
  366. if (!(phi = LLVMBuildPhi(comp_ctx->builder, is_i32 ? I32_TYPE : I64_TYPE,
  367. "rems_result_phi"))) {
  368. aot_set_last_error("llvm build phi failed.");
  369. return false;
  370. }
  371. /* Add phi incoming values */
  372. LLVMAddIncoming(phi, &no_overflow_value, &no_overflow_block, 1);
  373. LLVMAddIncoming(phi, &zero, &block_curr, 1);
  374. if (is_i32)
  375. PUSH_I32(phi);
  376. else
  377. PUSH_I64(phi);
  378. return true;
  379. fail:
  380. return false;
  381. }
  382. static bool
  383. compile_int_div(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  384. IntArithmetic arith_op, bool is_i32, uint8 **p_frame_ip)
  385. {
  386. LLVMValueRef left, right, cmp_div_zero, overflow, res;
  387. LLVMBasicBlockRef check_div_zero_succ, check_overflow_succ;
  388. LLVMTypeRef param_types[2];
  389. const char *intrinsic = NULL;
  390. param_types[1] = param_types[0] = is_i32 ? I32_TYPE : I64_TYPE;
  391. bh_assert(arith_op == INT_DIV_S || arith_op == INT_DIV_U
  392. || arith_op == INT_REM_S || arith_op == INT_REM_U);
  393. POP_INT(right);
  394. POP_INT(left);
  395. if (LLVMIsUndef(right) || LLVMIsUndef(left)
  396. #if LLVM_VERSION_NUMBER >= 12
  397. || LLVMIsPoison(right) || LLVMIsPoison(left)
  398. #endif
  399. ) {
  400. if (!(aot_emit_exception(comp_ctx, func_ctx, EXCE_INTEGER_OVERFLOW,
  401. false, NULL, NULL))) {
  402. goto fail;
  403. }
  404. return aot_handle_next_reachable_block(comp_ctx, func_ctx, p_frame_ip);
  405. }
  406. if (LLVMIsEfficientConstInt(right)) {
  407. int64 right_val = (int64)LLVMConstIntGetSExtValue(right);
  408. switch (right_val) {
  409. case 0:
  410. /* Directly throw exception if divided by zero */
  411. if (!(aot_emit_exception(comp_ctx, func_ctx,
  412. EXCE_INTEGER_DIVIDE_BY_ZERO, false,
  413. NULL, NULL)))
  414. goto fail;
  415. return aot_handle_next_reachable_block(comp_ctx, func_ctx,
  416. p_frame_ip);
  417. case 1:
  418. if (arith_op == INT_DIV_S || arith_op == INT_DIV_U)
  419. PUSH_INT(left);
  420. else
  421. PUSH_INT(is_i32 ? I32_ZERO : I64_ZERO);
  422. return true;
  423. case -1:
  424. if (arith_op == INT_DIV_S) {
  425. LLVM_BUILD_ICMP(LLVMIntEQ, left, is_i32 ? I32_MIN : I64_MIN,
  426. overflow, "overflow");
  427. ADD_BASIC_BLOCK(check_overflow_succ,
  428. "check_overflow_success");
  429. /* Throw conditional exception if overflow */
  430. if (!(aot_emit_exception(comp_ctx, func_ctx,
  431. EXCE_INTEGER_OVERFLOW, true,
  432. overflow, check_overflow_succ)))
  433. goto fail;
  434. /* Push -(left) to stack */
  435. if (!(res = LLVMBuildNeg(comp_ctx->builder, left, "neg"))) {
  436. aot_set_last_error("llvm build neg fail.");
  437. return false;
  438. }
  439. PUSH_INT(res);
  440. return true;
  441. }
  442. else if (arith_op == INT_REM_S) {
  443. PUSH_INT(is_i32 ? I32_ZERO : I64_ZERO);
  444. return true;
  445. }
  446. else {
  447. /* fall to default */
  448. goto handle_default;
  449. }
  450. handle_default:
  451. default:
  452. /* Build div */
  453. switch (arith_op) {
  454. case INT_DIV_S:
  455. LLVM_BUILD_OP_OR_INTRINSIC(
  456. SDiv, left, right, res,
  457. is_i32 ? "i32.div_s" : "i64.div_s", "div_s", false);
  458. break;
  459. case INT_DIV_U:
  460. LLVM_BUILD_OP_OR_INTRINSIC(
  461. UDiv, left, right, res,
  462. is_i32 ? "i32.div_u" : "i64.div_u", "div_u", false);
  463. break;
  464. case INT_REM_S:
  465. LLVM_BUILD_OP_OR_INTRINSIC(
  466. SRem, left, right, res,
  467. is_i32 ? "i32.rem_s" : "i64.rem_s", "rem_s", false);
  468. break;
  469. case INT_REM_U:
  470. LLVM_BUILD_OP_OR_INTRINSIC(
  471. URem, left, right, res,
  472. is_i32 ? "i32.rem_u" : "i64.rem_u", "rem_u", false);
  473. break;
  474. default:
  475. bh_assert(0);
  476. return false;
  477. }
  478. PUSH_INT(res);
  479. return true;
  480. }
  481. }
  482. else {
  483. /* Check divided by zero */
  484. LLVM_BUILD_ICMP(LLVMIntEQ, right, is_i32 ? I32_ZERO : I64_ZERO,
  485. cmp_div_zero, "cmp_div_zero");
  486. ADD_BASIC_BLOCK(check_div_zero_succ, "check_div_zero_success");
  487. /* Throw conditional exception if divided by zero */
  488. if (!(aot_emit_exception(comp_ctx, func_ctx,
  489. EXCE_INTEGER_DIVIDE_BY_ZERO, true,
  490. cmp_div_zero, check_div_zero_succ)))
  491. goto fail;
  492. switch (arith_op) {
  493. case INT_DIV_S:
  494. /* Check integer overflow */
  495. if (is_i32)
  496. CHECK_INT_OVERFLOW(I32);
  497. else
  498. CHECK_INT_OVERFLOW(I64);
  499. ADD_BASIC_BLOCK(check_overflow_succ, "check_overflow_success");
  500. /* Throw conditional exception if integer overflow */
  501. if (!(aot_emit_exception(comp_ctx, func_ctx,
  502. EXCE_INTEGER_OVERFLOW, true, overflow,
  503. check_overflow_succ)))
  504. goto fail;
  505. LLVM_BUILD_OP_OR_INTRINSIC(SDiv, left, right, res,
  506. is_i32 ? "i32.div_s" : "i64.div_s",
  507. "div_s", false);
  508. PUSH_INT(res);
  509. return true;
  510. case INT_DIV_U:
  511. intrinsic = is_i32 ? "i32.div_u" : "i64.div_u";
  512. if (comp_ctx->disable_llvm_intrinsics
  513. && aot_intrinsic_check_capability(comp_ctx, intrinsic)) {
  514. res = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic,
  515. param_types[0], param_types,
  516. 2, left, right);
  517. }
  518. else {
  519. LLVM_BUILD_OP(UDiv, left, right, res, "div_u", false);
  520. }
  521. PUSH_INT(res);
  522. return true;
  523. case INT_REM_S:
  524. /* Webassembly spec requires it return 0 */
  525. if (is_i32)
  526. CHECK_INT_OVERFLOW(I32);
  527. else
  528. CHECK_INT_OVERFLOW(I64);
  529. return compile_rems(comp_ctx, func_ctx, left, right, overflow,
  530. is_i32);
  531. case INT_REM_U:
  532. LLVM_BUILD_OP_OR_INTRINSIC(URem, left, right, res,
  533. is_i32 ? "i32.rem_u" : "i64.rem_u",
  534. "rem_u", false);
  535. PUSH_INT(res);
  536. return true;
  537. default:
  538. bh_assert(0);
  539. return false;
  540. }
  541. }
  542. fail:
  543. return false;
  544. }
  545. static LLVMValueRef
  546. compile_int_add(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
  547. bool is_i32)
  548. {
  549. /* If one of the operands is 0, just return the other */
  550. if (IS_CONST_ZERO(left))
  551. return right;
  552. if (IS_CONST_ZERO(right))
  553. return left;
  554. /* Build add */
  555. return LLVMBuildAdd(comp_ctx->builder, left, right, "add");
  556. }
  557. static LLVMValueRef
  558. compile_int_sub(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
  559. bool is_i32)
  560. {
  561. /* If the right operand is 0, just return the left */
  562. if (IS_CONST_ZERO(right))
  563. return left;
  564. /* Build sub */
  565. return LLVMBuildSub(comp_ctx->builder, left, right, "sub");
  566. }
  567. static LLVMValueRef
  568. compile_int_mul(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  569. LLVMValueRef left, LLVMValueRef right, bool is_i32)
  570. {
  571. /* If one of the operands is 0, just return constant 0 */
  572. if (IS_CONST_ZERO(left) || IS_CONST_ZERO(right))
  573. return is_i32 ? I32_ZERO : I64_ZERO;
  574. /* Build mul */
  575. LLVMTypeRef param_types[2];
  576. param_types[1] = param_types[0] = is_i32 ? I32_TYPE : I64_TYPE;
  577. LLVMValueRef res;
  578. LLVM_BUILD_OP_OR_INTRINSIC(Mul, left, right, res,
  579. is_i32 ? "i32.mul" : "i64.mul", "mul", false);
  580. return res;
  581. }
  582. static bool
  583. compile_op_int_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  584. IntArithmetic arith_op, bool is_i32,
  585. uint8 **p_frame_ip)
  586. {
  587. switch (arith_op) {
  588. case INT_ADD:
  589. DEF_INT_BINARY_OP(compile_int_add(comp_ctx, left, right, is_i32),
  590. "compile int add fail.");
  591. return true;
  592. case INT_SUB:
  593. DEF_INT_BINARY_OP(compile_int_sub(comp_ctx, left, right, is_i32),
  594. "compile int sub fail.");
  595. return true;
  596. case INT_MUL:
  597. DEF_INT_BINARY_OP(
  598. compile_int_mul(comp_ctx, func_ctx, left, right, is_i32),
  599. "compile int mul fail.");
  600. return true;
  601. case INT_DIV_S:
  602. case INT_DIV_U:
  603. case INT_REM_S:
  604. case INT_REM_U:
  605. return compile_int_div(comp_ctx, func_ctx, arith_op, is_i32,
  606. p_frame_ip);
  607. default:
  608. bh_assert(0);
  609. return false;
  610. }
  611. fail:
  612. return false;
  613. }
  614. static bool
  615. compile_op_int_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  616. IntBitwise bitwise_op, bool is_i32)
  617. {
  618. switch (bitwise_op) {
  619. case INT_AND:
  620. DEF_INT_BINARY_OP(
  621. LLVMBuildAnd(comp_ctx->builder, left, right, "and"),
  622. "llvm build and fail.");
  623. return true;
  624. case INT_OR:
  625. DEF_INT_BINARY_OP(LLVMBuildOr(comp_ctx->builder, left, right, "or"),
  626. "llvm build or fail.");
  627. return true;
  628. case INT_XOR:
  629. DEF_INT_BINARY_OP(
  630. LLVMBuildXor(comp_ctx->builder, left, right, "xor"),
  631. "llvm build xor fail.");
  632. return true;
  633. default:
  634. bh_assert(0);
  635. return false;
  636. }
  637. fail:
  638. return false;
  639. }
  640. static LLVMValueRef
  641. compile_int_shl(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  642. LLVMValueRef left, LLVMValueRef right, bool is_i32)
  643. {
  644. LLVMValueRef res;
  645. SHIFT_COUNT_MASK;
  646. /* Build shl */
  647. LLVMTypeRef param_types[2];
  648. param_types[1] = param_types[0] = is_i32 ? I32_TYPE : I64_TYPE;
  649. LLVM_BUILD_OP_OR_INTRINSIC(Shl, left, right, res,
  650. is_i32 ? "i32.shl" : "i64.shl", "shl", false);
  651. return res;
  652. }
  653. static LLVMValueRef
  654. compile_int_shr_s(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  655. LLVMValueRef left, LLVMValueRef right, bool is_i32)
  656. {
  657. LLVMValueRef res;
  658. SHIFT_COUNT_MASK;
  659. /* Build shl */
  660. LLVMTypeRef param_types[2];
  661. param_types[1] = param_types[0] = is_i32 ? I32_TYPE : I64_TYPE;
  662. LLVM_BUILD_OP_OR_INTRINSIC(AShr, left, right, res,
  663. is_i32 ? "i32.shr_s" : "i64.shr_s", "shr_s",
  664. false);
  665. return res;
  666. }
  667. static LLVMValueRef
  668. compile_int_shr_u(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  669. LLVMValueRef left, LLVMValueRef right, bool is_i32)
  670. {
  671. LLVMValueRef res;
  672. SHIFT_COUNT_MASK;
  673. /* Build shl */
  674. LLVMTypeRef param_types[2];
  675. param_types[1] = param_types[0] = is_i32 ? I32_TYPE : I64_TYPE;
  676. LLVM_BUILD_OP_OR_INTRINSIC(LShr, left, right, res,
  677. is_i32 ? "i32.shr_u" : "i64.shr_u", "shr_u",
  678. false);
  679. return res;
  680. }
  681. static LLVMValueRef
  682. compile_int_rot(AOTCompContext *comp_ctx, LLVMValueRef left, LLVMValueRef right,
  683. bool is_rotl, bool is_i32)
  684. {
  685. LLVMValueRef bits_minus_shift_count, res, tmp_l, tmp_r;
  686. char *name = is_rotl ? "rotl" : "rotr";
  687. SHIFT_COUNT_MASK;
  688. /* rotl/rotr with 0 */
  689. if (IS_CONST_ZERO(right))
  690. return left;
  691. /* Calculate (bits - shift_count) */
  692. LLVM_BUILD_OP(Sub, is_i32 ? I32_32 : I64_64, right, bits_minus_shift_count,
  693. "bits_minus_shift_count", NULL);
  694. /* Calculate (bits - shift_count) & mask */
  695. bits_minus_shift_count =
  696. LLVMBuildAnd(comp_ctx->builder, bits_minus_shift_count,
  697. is_i32 ? I32_31 : I64_63, "bits_minus_shift_count_and");
  698. if (!bits_minus_shift_count) {
  699. aot_set_last_error("llvm build and failed.");
  700. return NULL;
  701. }
  702. if (is_rotl) {
  703. /* (left << count) | (left >> ((BITS - count) & mask)) */
  704. LLVM_BUILD_OP(Shl, left, right, tmp_l, "tmp_l", NULL);
  705. LLVM_BUILD_OP(LShr, left, bits_minus_shift_count, tmp_r, "tmp_r", NULL);
  706. }
  707. else {
  708. /* (left >> count) | (left << ((BITS - count) & mask)) */
  709. LLVM_BUILD_OP(LShr, left, right, tmp_l, "tmp_l", NULL);
  710. LLVM_BUILD_OP(Shl, left, bits_minus_shift_count, tmp_r, "tmp_r", NULL);
  711. }
  712. LLVM_BUILD_OP(Or, tmp_l, tmp_r, res, name, NULL);
  713. return res;
  714. }
  715. static bool
  716. compile_op_int_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  717. IntShift shift_op, bool is_i32)
  718. {
  719. switch (shift_op) {
  720. case INT_SHL:
  721. DEF_INT_BINARY_OP(
  722. compile_int_shl(comp_ctx, func_ctx, left, right, is_i32), NULL);
  723. return true;
  724. case INT_SHR_S:
  725. DEF_INT_BINARY_OP(
  726. compile_int_shr_s(comp_ctx, func_ctx, left, right, is_i32),
  727. NULL);
  728. return true;
  729. case INT_SHR_U:
  730. DEF_INT_BINARY_OP(
  731. compile_int_shr_u(comp_ctx, func_ctx, left, right, is_i32),
  732. NULL);
  733. return true;
  734. case INT_ROTL:
  735. DEF_INT_BINARY_OP(
  736. compile_int_rot(comp_ctx, left, right, true, is_i32), NULL);
  737. return true;
  738. case INT_ROTR:
  739. DEF_INT_BINARY_OP(
  740. compile_int_rot(comp_ctx, left, right, false, is_i32), NULL);
  741. return true;
  742. default:
  743. bh_assert(0);
  744. return false;
  745. }
  746. fail:
  747. return false;
  748. }
  749. static bool
  750. is_target_arm(AOTCompContext *comp_ctx)
  751. {
  752. return !strncmp(comp_ctx->target_arch, "arm", 3)
  753. || !strncmp(comp_ctx->target_arch, "aarch64", 7)
  754. || !strncmp(comp_ctx->target_arch, "thumb", 5);
  755. }
  756. static bool
  757. is_target_x86(AOTCompContext *comp_ctx)
  758. {
  759. return !strncmp(comp_ctx->target_arch, "x86_64", 6)
  760. || !strncmp(comp_ctx->target_arch, "i386", 4);
  761. }
  762. static bool
  763. is_target_xtensa(AOTCompContext *comp_ctx)
  764. {
  765. return !strncmp(comp_ctx->target_arch, "xtensa", 6);
  766. }
  767. static bool
  768. is_target_mips(AOTCompContext *comp_ctx)
  769. {
  770. return !strncmp(comp_ctx->target_arch, "mips", 4);
  771. }
  772. static bool
  773. is_target_riscv(AOTCompContext *comp_ctx)
  774. {
  775. return !strncmp(comp_ctx->target_arch, "riscv", 5);
  776. }
  777. static bool
  778. is_targeting_soft_float(AOTCompContext *comp_ctx, bool is_f32)
  779. {
  780. bool ret = false;
  781. char *feature_string;
  782. if (!(feature_string =
  783. LLVMGetTargetMachineFeatureString(comp_ctx->target_machine))) {
  784. aot_set_last_error("llvm get target machine feature string fail.");
  785. return false;
  786. }
  787. /* Note:
  788. * LLVM CodeGen uses FPU Coprocessor registers by default,
  789. * so user must specify '--cpu-features=+soft-float' to wamrc if the target
  790. * doesn't have or enable FPU on arm, x86 or mips. */
  791. if (is_target_arm(comp_ctx) || is_target_x86(comp_ctx)
  792. || is_target_mips(comp_ctx)) {
  793. ret = strstr(feature_string, "+soft-float") ? true : false;
  794. }
  795. else if (is_target_xtensa(comp_ctx)) {
  796. /* Note:
  797. * 1. The Floating-Point Coprocessor Option of xtensa only support
  798. * single-precision floating-point operations, so must use soft-float
  799. * for f64(i.e. double).
  800. * 2. LLVM CodeGen uses Floating-Point Coprocessor registers by default,
  801. * so user must specify '--cpu-features=-fp' to wamrc if the target
  802. * doesn't have or enable Floating-Point Coprocessor Option on xtensa.
  803. */
  804. if (comp_ctx->disable_llvm_intrinsics)
  805. ret = false;
  806. else
  807. ret = (!is_f32 || strstr(feature_string, "-fp")) ? true : false;
  808. }
  809. else if (is_target_riscv(comp_ctx)) {
  810. /*
  811. * Note: Use builtin intrinsics since hardware float operation
  812. * will cause rodata relocation, this will try to use hardware
  813. * float unit (by return false) but handled by software finally
  814. */
  815. if (comp_ctx->disable_llvm_intrinsics)
  816. ret = false;
  817. else
  818. ret = !strstr(feature_string, "+d") ? true : false;
  819. }
  820. else {
  821. ret = true;
  822. }
  823. LLVMDisposeMessage(feature_string);
  824. return ret;
  825. }
  826. static bool
  827. compile_op_float_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  828. FloatArithmetic arith_op, bool is_f32)
  829. {
  830. switch (arith_op) {
  831. case FLOAT_ADD:
  832. if (is_targeting_soft_float(comp_ctx, is_f32))
  833. DEF_FP_BINARY_OP(
  834. LLVMBuildFAdd(comp_ctx->builder, left, right, "fadd"),
  835. "llvm build fadd fail.");
  836. else
  837. DEF_FP_BINARY_OP(
  838. call_llvm_float_experimental_constrained_intrinsic(
  839. comp_ctx, func_ctx, is_f32,
  840. (is_f32 ? "llvm.experimental.constrained.fadd.f32"
  841. : "llvm.experimental.constrained.fadd.f64"),
  842. left, right, comp_ctx->fp_rounding_mode,
  843. comp_ctx->fp_exception_behavior),
  844. NULL);
  845. return true;
  846. case FLOAT_SUB:
  847. if (is_targeting_soft_float(comp_ctx, is_f32))
  848. DEF_FP_BINARY_OP(
  849. LLVMBuildFSub(comp_ctx->builder, left, right, "fsub"),
  850. "llvm build fsub fail.");
  851. else
  852. DEF_FP_BINARY_OP(
  853. call_llvm_float_experimental_constrained_intrinsic(
  854. comp_ctx, func_ctx, is_f32,
  855. (is_f32 ? "llvm.experimental.constrained.fsub.f32"
  856. : "llvm.experimental.constrained.fsub.f64"),
  857. left, right, comp_ctx->fp_rounding_mode,
  858. comp_ctx->fp_exception_behavior),
  859. NULL);
  860. return true;
  861. case FLOAT_MUL:
  862. if (is_targeting_soft_float(comp_ctx, is_f32))
  863. DEF_FP_BINARY_OP(
  864. LLVMBuildFMul(comp_ctx->builder, left, right, "fmul"),
  865. "llvm build fmul fail.");
  866. else
  867. DEF_FP_BINARY_OP(
  868. call_llvm_float_experimental_constrained_intrinsic(
  869. comp_ctx, func_ctx, is_f32,
  870. (is_f32 ? "llvm.experimental.constrained.fmul.f32"
  871. : "llvm.experimental.constrained.fmul.f64"),
  872. left, right, comp_ctx->fp_rounding_mode,
  873. comp_ctx->fp_exception_behavior),
  874. NULL);
  875. return true;
  876. case FLOAT_DIV:
  877. if (is_targeting_soft_float(comp_ctx, is_f32))
  878. DEF_FP_BINARY_OP(
  879. LLVMBuildFDiv(comp_ctx->builder, left, right, "fdiv"),
  880. "llvm build fdiv fail.");
  881. else
  882. DEF_FP_BINARY_OP(
  883. call_llvm_float_experimental_constrained_intrinsic(
  884. comp_ctx, func_ctx, is_f32,
  885. (is_f32 ? "llvm.experimental.constrained.fdiv.f32"
  886. : "llvm.experimental.constrained.fdiv.f64"),
  887. left, right, comp_ctx->fp_rounding_mode,
  888. comp_ctx->fp_exception_behavior),
  889. NULL);
  890. return true;
  891. case FLOAT_MIN:
  892. DEF_FP_BINARY_OP(compile_op_float_min_max(
  893. comp_ctx, func_ctx, is_f32, left, right, true),
  894. NULL);
  895. return true;
  896. case FLOAT_MAX:
  897. DEF_FP_BINARY_OP(compile_op_float_min_max(comp_ctx, func_ctx,
  898. is_f32, left, right,
  899. false),
  900. NULL);
  901. return true;
  902. default:
  903. bh_assert(0);
  904. return false;
  905. }
  906. fail:
  907. return false;
  908. }
  909. static LLVMValueRef
  910. call_llvm_float_math_intrinsic(AOTCompContext *comp_ctx,
  911. AOTFuncContext *func_ctx, bool is_f32,
  912. const char *intrinsic, ...)
  913. {
  914. va_list param_value_list;
  915. LLVMValueRef ret;
  916. LLVMTypeRef param_type, ret_type = is_f32 ? F32_TYPE : F64_TYPE;
  917. param_type = ret_type;
  918. va_start(param_value_list, intrinsic);
  919. ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
  920. &param_type, 1, param_value_list);
  921. va_end(param_value_list);
  922. return ret;
  923. }
  924. static bool
  925. compile_op_float_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  926. FloatMath math_op, bool is_f32)
  927. {
  928. switch (math_op) {
  929. case FLOAT_ABS:
  930. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  931. comp_ctx, func_ctx, is_f32,
  932. is_f32 ? "llvm.fabs.f32" : "llvm.fabs.f64",
  933. operand),
  934. NULL);
  935. return true;
  936. case FLOAT_NEG:
  937. DEF_FP_UNARY_OP(LLVMBuildFNeg(comp_ctx->builder, operand, "fneg"),
  938. "llvm build fneg fail.");
  939. return true;
  940. case FLOAT_CEIL:
  941. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  942. comp_ctx, func_ctx, is_f32,
  943. is_f32 ? "llvm.ceil.f32" : "llvm.ceil.f64",
  944. operand),
  945. NULL);
  946. return true;
  947. case FLOAT_FLOOR:
  948. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  949. comp_ctx, func_ctx, is_f32,
  950. is_f32 ? "llvm.floor.f32" : "llvm.floor.f64",
  951. operand),
  952. NULL);
  953. return true;
  954. case FLOAT_TRUNC:
  955. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  956. comp_ctx, func_ctx, is_f32,
  957. is_f32 ? "llvm.trunc.f32" : "llvm.trunc.f64",
  958. operand),
  959. NULL);
  960. return true;
  961. case FLOAT_NEAREST:
  962. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  963. comp_ctx, func_ctx, is_f32,
  964. is_f32 ? "llvm.rint.f32" : "llvm.rint.f64",
  965. operand),
  966. NULL);
  967. return true;
  968. case FLOAT_SQRT:
  969. if (is_targeting_soft_float(comp_ctx, is_f32)
  970. || comp_ctx->disable_llvm_intrinsics)
  971. DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(
  972. comp_ctx, func_ctx, is_f32,
  973. is_f32 ? "llvm.sqrt.f32" : "llvm.sqrt.f64",
  974. operand),
  975. NULL);
  976. else
  977. DEF_FP_UNARY_OP(
  978. call_llvm_libm_experimental_constrained_intrinsic(
  979. comp_ctx, func_ctx, is_f32,
  980. (is_f32 ? "llvm.experimental.constrained.sqrt.f32"
  981. : "llvm.experimental.constrained.sqrt.f64"),
  982. operand, comp_ctx->fp_rounding_mode,
  983. comp_ctx->fp_exception_behavior),
  984. NULL);
  985. return true;
  986. default:
  987. bh_assert(0);
  988. return false;
  989. }
  990. return true;
  991. fail:
  992. return false;
  993. }
  994. static bool
  995. compile_float_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  996. bool is_f32)
  997. {
  998. LLVMTypeRef ret_type, param_types[2];
  999. param_types[0] = param_types[1] = ret_type = is_f32 ? F32_TYPE : F64_TYPE;
  1000. DEF_FP_BINARY_OP(aot_call_llvm_intrinsic(
  1001. comp_ctx, func_ctx,
  1002. is_f32 ? "llvm.copysign.f32" : "llvm.copysign.f64",
  1003. ret_type, param_types, 2, left, right),
  1004. NULL);
  1005. return true;
  1006. fail:
  1007. return false;
  1008. }
  1009. bool
  1010. aot_compile_op_i32_clz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1011. {
  1012. return aot_compile_int_bit_count(comp_ctx, func_ctx, CLZ32, true);
  1013. }
  1014. bool
  1015. aot_compile_op_i32_ctz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1016. {
  1017. return aot_compile_int_bit_count(comp_ctx, func_ctx, CTZ32, true);
  1018. }
  1019. bool
  1020. aot_compile_op_i32_popcnt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1021. {
  1022. return aot_compile_int_bit_count(comp_ctx, func_ctx, POP_CNT32, true);
  1023. }
  1024. bool
  1025. aot_compile_op_i64_clz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1026. {
  1027. return aot_compile_int_bit_count(comp_ctx, func_ctx, CLZ64, false);
  1028. }
  1029. bool
  1030. aot_compile_op_i64_ctz(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1031. {
  1032. return aot_compile_int_bit_count(comp_ctx, func_ctx, CTZ64, false);
  1033. }
  1034. bool
  1035. aot_compile_op_i64_popcnt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1036. {
  1037. return aot_compile_int_bit_count(comp_ctx, func_ctx, POP_CNT64, false);
  1038. }
  1039. bool
  1040. aot_compile_op_i32_arithmetic(AOTCompContext *comp_ctx,
  1041. AOTFuncContext *func_ctx, IntArithmetic arith_op,
  1042. uint8 **p_frame_ip)
  1043. {
  1044. return compile_op_int_arithmetic(comp_ctx, func_ctx, arith_op, true,
  1045. p_frame_ip);
  1046. }
  1047. bool
  1048. aot_compile_op_i64_arithmetic(AOTCompContext *comp_ctx,
  1049. AOTFuncContext *func_ctx, IntArithmetic arith_op,
  1050. uint8 **p_frame_ip)
  1051. {
  1052. return compile_op_int_arithmetic(comp_ctx, func_ctx, arith_op, false,
  1053. p_frame_ip);
  1054. }
  1055. bool
  1056. aot_compile_op_i32_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1057. IntBitwise bitwise_op)
  1058. {
  1059. return compile_op_int_bitwise(comp_ctx, func_ctx, bitwise_op, true);
  1060. }
  1061. bool
  1062. aot_compile_op_i64_bitwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1063. IntBitwise bitwise_op)
  1064. {
  1065. return compile_op_int_bitwise(comp_ctx, func_ctx, bitwise_op, false);
  1066. }
  1067. bool
  1068. aot_compile_op_i32_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1069. IntShift shift_op)
  1070. {
  1071. return compile_op_int_shift(comp_ctx, func_ctx, shift_op, true);
  1072. }
  1073. bool
  1074. aot_compile_op_i64_shift(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1075. IntShift shift_op)
  1076. {
  1077. return compile_op_int_shift(comp_ctx, func_ctx, shift_op, false);
  1078. }
  1079. bool
  1080. aot_compile_op_f32_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1081. FloatMath math_op)
  1082. {
  1083. return compile_op_float_math(comp_ctx, func_ctx, math_op, true);
  1084. }
  1085. bool
  1086. aot_compile_op_f64_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  1087. FloatMath math_op)
  1088. {
  1089. return compile_op_float_math(comp_ctx, func_ctx, math_op, false);
  1090. }
  1091. bool
  1092. aot_compile_op_f32_arithmetic(AOTCompContext *comp_ctx,
  1093. AOTFuncContext *func_ctx,
  1094. FloatArithmetic arith_op)
  1095. {
  1096. return compile_op_float_arithmetic(comp_ctx, func_ctx, arith_op, true);
  1097. }
  1098. bool
  1099. aot_compile_op_f64_arithmetic(AOTCompContext *comp_ctx,
  1100. AOTFuncContext *func_ctx,
  1101. FloatArithmetic arith_op)
  1102. {
  1103. return compile_op_float_arithmetic(comp_ctx, func_ctx, arith_op, false);
  1104. }
  1105. bool
  1106. aot_compile_op_f32_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1107. {
  1108. return compile_float_copysign(comp_ctx, func_ctx, true);
  1109. }
  1110. bool
  1111. aot_compile_op_f64_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  1112. {
  1113. return compile_float_copysign(comp_ctx, func_ctx, false);
  1114. }