simd_conversions.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "simd_conversions.h"
  6. #include "simd_common.h"
  7. #include "../aot_emit_exception.h"
  8. #include "../aot_emit_numberic.h"
  9. #include "../../aot/aot_runtime.h"
  10. static bool
  11. simd_integer_narrow_x86(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  12. LLVMTypeRef in_vector_type, LLVMTypeRef out_vector_type,
  13. const char *intrinsic)
  14. {
  15. LLVMValueRef vector1, vector2, result;
  16. LLVMTypeRef param_types[2] = { in_vector_type, in_vector_type };
  17. if (!(vector2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
  18. in_vector_type, "vec2"))
  19. || !(vector1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
  20. in_vector_type, "vec1"))) {
  21. return false;
  22. }
  23. if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic,
  24. out_vector_type, param_types, 2,
  25. vector1, vector2))) {
  26. HANDLE_FAILURE("LLVMBuildCall");
  27. return false;
  28. }
  29. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  30. }
  31. enum integer_sat_type {
  32. e_sat_i16x8 = 0,
  33. e_sat_i32x4,
  34. e_sat_i64x2,
  35. e_sat_i32x8,
  36. };
  37. static LLVMValueRef
  38. simd_saturate(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  39. enum integer_sat_type itype, LLVMValueRef vector,
  40. LLVMValueRef min, LLVMValueRef max, bool is_signed)
  41. {
  42. LLVMValueRef result;
  43. LLVMTypeRef vector_type;
  44. LLVMTypeRef param_types[][2] = {
  45. { V128_i16x8_TYPE, V128_i16x8_TYPE },
  46. { V128_i32x4_TYPE, V128_i32x4_TYPE },
  47. { V128_i64x2_TYPE, V128_i64x2_TYPE },
  48. { 0 },
  49. };
  50. const char *smin_intrinsic[] = {
  51. "llvm.smin.v8i16",
  52. "llvm.smin.v4i32",
  53. "llvm.smin.v2i64",
  54. "llvm.smin.v8i32",
  55. };
  56. const char *umin_intrinsic[] = {
  57. "llvm.umin.v8i16",
  58. "llvm.umin.v4i32",
  59. "llvm.umin.v2i64",
  60. "llvm.umin.v8i32",
  61. };
  62. const char *smax_intrinsic[] = {
  63. "llvm.smax.v8i16",
  64. "llvm.smax.v4i32",
  65. "llvm.smax.v2i64",
  66. "llvm.smax.v8i32",
  67. };
  68. const char *umax_intrinsic[] = {
  69. "llvm.umax.v8i16",
  70. "llvm.umax.v4i32",
  71. "llvm.umax.v2i64",
  72. "llvm.umax.v8i32",
  73. };
  74. if (e_sat_i32x8 == itype) {
  75. if (!(vector_type = LLVMVectorType(I32_TYPE, 8))) {
  76. HANDLE_FAILURE("LLVMVectorType");
  77. return NULL;
  78. }
  79. param_types[itype][0] = vector_type;
  80. param_types[itype][1] = vector_type;
  81. }
  82. if (!(result = aot_call_llvm_intrinsic(
  83. comp_ctx, func_ctx,
  84. is_signed ? smin_intrinsic[itype] : umin_intrinsic[itype],
  85. param_types[itype][0], param_types[itype], 2, vector, max))
  86. || !(result = aot_call_llvm_intrinsic(
  87. comp_ctx, func_ctx,
  88. is_signed ? smax_intrinsic[itype] : umax_intrinsic[itype],
  89. param_types[itype][0], param_types[itype], 2, result, min))) {
  90. return NULL;
  91. }
  92. return result;
  93. }
  94. static bool
  95. simd_integer_narrow_common(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  96. enum integer_sat_type itype, bool is_signed)
  97. {
  98. LLVMValueRef vec1, vec2, min, max, mask, result;
  99. LLVMTypeRef in_vector_type[] = { V128_i16x8_TYPE, V128_i32x4_TYPE,
  100. V128_i64x2_TYPE };
  101. LLVMTypeRef min_max_type[] = { INT16_TYPE, I32_TYPE, I64_TYPE };
  102. LLVMTypeRef trunc_type[3] = { 0 };
  103. uint8 length[] = { 8, 4, 2 };
  104. int64 smin[] = { 0xff80, 0xffFF8000, 0xffFFffFF80000000 };
  105. int64 umin[] = { 0x0, 0x0, 0x0 };
  106. int64 smax[] = { 0x007f, 0x00007fff, 0x000000007fFFffFF };
  107. int64 umax[] = { 0x00ff, 0x0000ffff, 0x00000000ffFFffFF };
  108. LLVMValueRef mask_element[] = {
  109. LLVM_CONST(i32_zero), LLVM_CONST(i32_one),
  110. LLVM_CONST(i32_two), LLVM_CONST(i32_three),
  111. LLVM_CONST(i32_four), LLVM_CONST(i32_five),
  112. LLVM_CONST(i32_six), LLVM_CONST(i32_seven),
  113. LLVM_CONST(i32_eight), LLVM_CONST(i32_nine),
  114. LLVM_CONST(i32_ten), LLVM_CONST(i32_eleven),
  115. LLVM_CONST(i32_twelve), LLVM_CONST(i32_thirteen),
  116. LLVM_CONST(i32_fourteen), LLVM_CONST(i32_fifteen),
  117. };
  118. if (!(trunc_type[0] = LLVMVectorType(INT8_TYPE, 8))
  119. || !(trunc_type[1] = LLVMVectorType(INT16_TYPE, 4))
  120. || !(trunc_type[2] = LLVMVectorType(I32_TYPE, 2))) {
  121. HANDLE_FAILURE("LLVMVectorType");
  122. return false;
  123. }
  124. if (!(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
  125. in_vector_type[itype], "vec2"))
  126. || !(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
  127. in_vector_type[itype], "vec1"))) {
  128. return false;
  129. }
  130. if (!(max = simd_build_splat_const_integer_vector(
  131. comp_ctx, min_max_type[itype],
  132. is_signed ? smax[itype] : umax[itype], length[itype]))
  133. || !(min = simd_build_splat_const_integer_vector(
  134. comp_ctx, min_max_type[itype],
  135. is_signed ? smin[itype] : umin[itype], length[itype]))) {
  136. return false;
  137. }
  138. /* Refer to:
  139. * https://github.com/WebAssembly/spec/blob/main/proposals/simd/SIMD.md#integer-to-integer-narrowing
  140. * Regardless of the whether the operation is signed or unsigned, the input
  141. * lanes are interpreted as signed integers.
  142. */
  143. if (!(vec1 = simd_saturate(comp_ctx, func_ctx, e_sat_i16x8, vec1, min, max,
  144. true))
  145. || !(vec2 = simd_saturate(comp_ctx, func_ctx, e_sat_i16x8, vec2, min,
  146. max, true))) {
  147. return false;
  148. }
  149. /* trunc */
  150. if (!(vec1 = LLVMBuildTrunc(comp_ctx->builder, vec1, trunc_type[itype],
  151. "vec1_trunc"))
  152. || !(vec2 = LLVMBuildTrunc(comp_ctx->builder, vec2, trunc_type[itype],
  153. "vec2_trunc"))) {
  154. HANDLE_FAILURE("LLVMBuildTrunc");
  155. return false;
  156. }
  157. /* combine */
  158. if (!(mask = LLVMConstVector(mask_element, (length[itype] << 1)))) {
  159. HANDLE_FAILURE("LLVMConstInt");
  160. return false;
  161. }
  162. if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, vec1, vec2, mask,
  163. "vec_shuffle"))) {
  164. HANDLE_FAILURE("LLVMBuildShuffleVector");
  165. return false;
  166. }
  167. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  168. }
  169. bool
  170. aot_compile_simd_i8x16_narrow_i16x8(AOTCompContext *comp_ctx,
  171. AOTFuncContext *func_ctx, bool is_signed)
  172. {
  173. if (is_target_x86(comp_ctx)) {
  174. return simd_integer_narrow_x86(
  175. comp_ctx, func_ctx, V128_i16x8_TYPE, V128_i8x16_TYPE,
  176. is_signed ? "llvm.x86.sse2.packsswb.128"
  177. : "llvm.x86.sse2.packuswb.128");
  178. }
  179. else {
  180. return simd_integer_narrow_common(comp_ctx, func_ctx, e_sat_i16x8,
  181. is_signed);
  182. }
  183. }
  184. bool
  185. aot_compile_simd_i16x8_narrow_i32x4(AOTCompContext *comp_ctx,
  186. AOTFuncContext *func_ctx, bool is_signed)
  187. {
  188. if (is_target_x86(comp_ctx)) {
  189. return simd_integer_narrow_x86(comp_ctx, func_ctx, V128_i32x4_TYPE,
  190. V128_i16x8_TYPE,
  191. is_signed ? "llvm.x86.sse2.packssdw.128"
  192. : "llvm.x86.sse41.packusdw");
  193. }
  194. else {
  195. return simd_integer_narrow_common(comp_ctx, func_ctx, e_sat_i32x4,
  196. is_signed);
  197. }
  198. }
  199. enum integer_extend_type {
  200. e_ext_i8x16,
  201. e_ext_i16x8,
  202. e_ext_i32x4,
  203. };
  204. static LLVMValueRef
  205. simd_integer_extension(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  206. enum integer_extend_type itype, LLVMValueRef vector,
  207. bool lower_half, bool is_signed)
  208. {
  209. LLVMValueRef mask, sub_vector, result;
  210. LLVMValueRef bits[] = {
  211. LLVM_CONST(i32_zero), LLVM_CONST(i32_one),
  212. LLVM_CONST(i32_two), LLVM_CONST(i32_three),
  213. LLVM_CONST(i32_four), LLVM_CONST(i32_five),
  214. LLVM_CONST(i32_six), LLVM_CONST(i32_seven),
  215. LLVM_CONST(i32_eight), LLVM_CONST(i32_nine),
  216. LLVM_CONST(i32_ten), LLVM_CONST(i32_eleven),
  217. LLVM_CONST(i32_twelve), LLVM_CONST(i32_thirteen),
  218. LLVM_CONST(i32_fourteen), LLVM_CONST(i32_fifteen),
  219. };
  220. LLVMTypeRef out_vector_type[] = { V128_i16x8_TYPE, V128_i32x4_TYPE,
  221. V128_i64x2_TYPE };
  222. LLVMValueRef undef[] = { LLVM_CONST(i8x16_undef), LLVM_CONST(i16x8_undef),
  223. LLVM_CONST(i32x4_undef) };
  224. uint32 sub_vector_length[] = { 8, 4, 2 };
  225. if (!(mask = lower_half ? LLVMConstVector(bits, sub_vector_length[itype])
  226. : LLVMConstVector(bits + sub_vector_length[itype],
  227. sub_vector_length[itype]))) {
  228. HANDLE_FAILURE("LLVMConstVector");
  229. return false;
  230. }
  231. /* retrieve the low or high half */
  232. if (!(sub_vector = LLVMBuildShuffleVector(comp_ctx->builder, vector,
  233. undef[itype], mask, "half"))) {
  234. HANDLE_FAILURE("LLVMBuildShuffleVector");
  235. return false;
  236. }
  237. if (is_signed) {
  238. if (!(result = LLVMBuildSExt(comp_ctx->builder, sub_vector,
  239. out_vector_type[itype], "sext"))) {
  240. HANDLE_FAILURE("LLVMBuildSExt");
  241. return false;
  242. }
  243. }
  244. else {
  245. if (!(result = LLVMBuildZExt(comp_ctx->builder, sub_vector,
  246. out_vector_type[itype], "zext"))) {
  247. HANDLE_FAILURE("LLVMBuildZExt");
  248. return false;
  249. }
  250. }
  251. return result;
  252. }
  253. static bool
  254. simd_integer_extension_wrapper(AOTCompContext *comp_ctx,
  255. AOTFuncContext *func_ctx,
  256. enum integer_extend_type itype, bool lower_half,
  257. bool is_signed)
  258. {
  259. LLVMValueRef vector, result;
  260. LLVMTypeRef in_vector_type[] = { V128_i8x16_TYPE, V128_i16x8_TYPE,
  261. V128_i32x4_TYPE };
  262. if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
  263. in_vector_type[itype], "vec"))) {
  264. return false;
  265. }
  266. if (!(result = simd_integer_extension(comp_ctx, func_ctx, itype, vector,
  267. lower_half, is_signed))) {
  268. return false;
  269. }
  270. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  271. }
  272. bool
  273. aot_compile_simd_i16x8_extend_i8x16(AOTCompContext *comp_ctx,
  274. AOTFuncContext *func_ctx, bool lower_half,
  275. bool is_signed)
  276. {
  277. return simd_integer_extension_wrapper(comp_ctx, func_ctx, e_ext_i8x16,
  278. lower_half, is_signed);
  279. }
  280. bool
  281. aot_compile_simd_i32x4_extend_i16x8(AOTCompContext *comp_ctx,
  282. AOTFuncContext *func_ctx, bool lower_half,
  283. bool is_signed)
  284. {
  285. return simd_integer_extension_wrapper(comp_ctx, func_ctx, e_ext_i16x8,
  286. lower_half, is_signed);
  287. }
  288. bool
  289. aot_compile_simd_i64x2_extend_i32x4(AOTCompContext *comp_ctx,
  290. AOTFuncContext *func_ctx, bool lower_half,
  291. bool is_signed)
  292. {
  293. return simd_integer_extension_wrapper(comp_ctx, func_ctx, e_ext_i32x4,
  294. lower_half, is_signed);
  295. }
  296. static LLVMValueRef
  297. simd_trunc_sat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  298. const char *intrinsics, LLVMTypeRef in_vector_type,
  299. LLVMTypeRef out_vector_type)
  300. {
  301. LLVMValueRef vector, result;
  302. LLVMTypeRef param_types[] = { in_vector_type };
  303. if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, in_vector_type,
  304. "vector"))) {
  305. return false;
  306. }
  307. if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsics,
  308. out_vector_type, param_types, 1,
  309. vector))) {
  310. return false;
  311. }
  312. return result;
  313. }
  314. bool
  315. aot_compile_simd_i32x4_trunc_sat_f32x4(AOTCompContext *comp_ctx,
  316. AOTFuncContext *func_ctx, bool is_signed)
  317. {
  318. LLVMValueRef result;
  319. if (!(result = simd_trunc_sat(comp_ctx, func_ctx,
  320. is_signed ? "llvm.fptosi.sat.v4i32.v4f32"
  321. : "llvm.fptoui.sat.v4i32.v4f32",
  322. V128_f32x4_TYPE, V128_i32x4_TYPE))) {
  323. return false;
  324. }
  325. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  326. }
  327. bool
  328. aot_compile_simd_i32x4_trunc_sat_f64x2(AOTCompContext *comp_ctx,
  329. AOTFuncContext *func_ctx, bool is_signed)
  330. {
  331. LLVMValueRef result, zero, mask;
  332. LLVMTypeRef out_vector_type;
  333. LLVMValueRef lanes[] = {
  334. LLVM_CONST(i32_zero),
  335. LLVM_CONST(i32_one),
  336. LLVM_CONST(i32_two),
  337. LLVM_CONST(i32_three),
  338. };
  339. if (!(out_vector_type = LLVMVectorType(I32_TYPE, 2))) {
  340. HANDLE_FAILURE("LLVMVectorType");
  341. return false;
  342. }
  343. if (!(result = simd_trunc_sat(comp_ctx, func_ctx,
  344. is_signed ? "llvm.fptosi.sat.v2i32.v2f64"
  345. : "llvm.fptoui.sat.v2i32.v2f64",
  346. V128_f64x2_TYPE, out_vector_type))) {
  347. return false;
  348. }
  349. if (!(zero = LLVMConstNull(out_vector_type))) {
  350. HANDLE_FAILURE("LLVMConstNull");
  351. return false;
  352. }
  353. /* v2i32 -> v4i32 */
  354. if (!(mask = LLVMConstVector(lanes, 4))) {
  355. HANDLE_FAILURE("LLVMConstVector");
  356. return false;
  357. }
  358. if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, result, zero, mask,
  359. "extend"))) {
  360. HANDLE_FAILURE("LLVMBuildShuffleVector");
  361. return false;
  362. }
  363. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  364. }
  365. static LLVMValueRef
  366. simd_integer_convert(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  367. bool is_signed, LLVMValueRef vector,
  368. LLVMTypeRef out_vector_type)
  369. {
  370. LLVMValueRef result;
  371. result = is_signed ? LLVMBuildSIToFP(comp_ctx->builder, vector,
  372. out_vector_type, "converted")
  373. : LLVMBuildUIToFP(comp_ctx->builder, vector,
  374. out_vector_type, "converted");
  375. if (!result) {
  376. HANDLE_FAILURE("LLVMBuildSIToFP/LLVMBuildUIToFP");
  377. }
  378. return result;
  379. }
  380. bool
  381. aot_compile_simd_f32x4_convert_i32x4(AOTCompContext *comp_ctx,
  382. AOTFuncContext *func_ctx, bool is_signed)
  383. {
  384. LLVMValueRef vector, result;
  385. if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
  386. V128_i32x4_TYPE, "vec"))) {
  387. return false;
  388. }
  389. if (!(result = simd_integer_convert(comp_ctx, func_ctx, is_signed, vector,
  390. V128_f32x4_TYPE))) {
  391. return false;
  392. }
  393. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  394. }
  395. bool
  396. aot_compile_simd_f64x2_convert_i32x4(AOTCompContext *comp_ctx,
  397. AOTFuncContext *func_ctx, bool is_signed)
  398. {
  399. LLVMValueRef vector, mask, result;
  400. LLVMValueRef lanes[] = {
  401. LLVM_CONST(i32_zero),
  402. LLVM_CONST(i32_one),
  403. };
  404. LLVMTypeRef out_vector_type;
  405. if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
  406. V128_i32x4_TYPE, "vec"))) {
  407. return false;
  408. }
  409. if (!(out_vector_type = LLVMVectorType(F64_TYPE, 4))) {
  410. HANDLE_FAILURE("LLVMVectorType");
  411. return false;
  412. }
  413. if (!(result = simd_integer_convert(comp_ctx, func_ctx, is_signed, vector,
  414. out_vector_type))) {
  415. return false;
  416. }
  417. /* v4f64 -> v2f64 */
  418. if (!(mask = LLVMConstVector(lanes, 2))) {
  419. HANDLE_FAILURE("LLVMConstVector");
  420. return false;
  421. }
  422. if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, result, result,
  423. mask, "trunc"))) {
  424. HANDLE_FAILURE("LLVMBuildShuffleVector");
  425. return false;
  426. }
  427. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  428. }
  429. static bool
  430. simd_extadd_pairwise(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  431. LLVMTypeRef in_vector_type, LLVMTypeRef out_vector_type,
  432. bool is_signed)
  433. {
  434. LLVMValueRef vector, even_mask, odd_mask, sub_vector_even, sub_vector_odd,
  435. result;
  436. LLVMValueRef even_element[] = {
  437. LLVM_CONST(i32_zero), LLVM_CONST(i32_two), LLVM_CONST(i32_four),
  438. LLVM_CONST(i32_six), LLVM_CONST(i32_eight), LLVM_CONST(i32_ten),
  439. LLVM_CONST(i32_twelve), LLVM_CONST(i32_fourteen),
  440. };
  441. LLVMValueRef odd_element[] = {
  442. LLVM_CONST(i32_one), LLVM_CONST(i32_three),
  443. LLVM_CONST(i32_five), LLVM_CONST(i32_seven),
  444. LLVM_CONST(i32_nine), LLVM_CONST(i32_eleven),
  445. LLVM_CONST(i32_thirteen), LLVM_CONST(i32_fifteen),
  446. };
  447. /* assumption about i16x8 from i8x16 and i32x4 from i16x8 */
  448. uint8 mask_length = V128_i16x8_TYPE == out_vector_type ? 8 : 4;
  449. if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, in_vector_type,
  450. "vector"))) {
  451. return false;
  452. }
  453. if (!(even_mask = LLVMConstVector(even_element, mask_length))
  454. || !(odd_mask = LLVMConstVector(odd_element, mask_length))) {
  455. HANDLE_FAILURE("LLVMConstVector");
  456. return false;
  457. }
  458. /* shuffle a <16xi8> vector to two <8xi8> vectors */
  459. if (!(sub_vector_even = LLVMBuildShuffleVector(
  460. comp_ctx->builder, vector, vector, even_mask, "pick_even"))
  461. || !(sub_vector_odd = LLVMBuildShuffleVector(
  462. comp_ctx->builder, vector, vector, odd_mask, "pick_odd"))) {
  463. HANDLE_FAILURE("LLVMBuildShuffleVector");
  464. return false;
  465. }
  466. /* sext/zext <8xi8> to <8xi16> */
  467. if (is_signed) {
  468. if (!(sub_vector_even =
  469. LLVMBuildSExt(comp_ctx->builder, sub_vector_even,
  470. out_vector_type, "even_sext"))
  471. || !(sub_vector_odd =
  472. LLVMBuildSExt(comp_ctx->builder, sub_vector_odd,
  473. out_vector_type, "odd_sext"))) {
  474. HANDLE_FAILURE("LLVMBuildSExt");
  475. return false;
  476. }
  477. }
  478. else {
  479. if (!(sub_vector_even =
  480. LLVMBuildZExt(comp_ctx->builder, sub_vector_even,
  481. out_vector_type, "even_zext"))
  482. || !(sub_vector_odd =
  483. LLVMBuildZExt(comp_ctx->builder, sub_vector_odd,
  484. out_vector_type, "odd_zext"))) {
  485. HANDLE_FAILURE("LLVMBuildZExt");
  486. return false;
  487. }
  488. }
  489. if (!(result = LLVMBuildAdd(comp_ctx->builder, sub_vector_even,
  490. sub_vector_odd, "sum"))) {
  491. HANDLE_FAILURE("LLVMBuildAdd");
  492. return false;
  493. }
  494. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  495. }
  496. bool
  497. aot_compile_simd_i16x8_extadd_pairwise_i8x16(AOTCompContext *comp_ctx,
  498. AOTFuncContext *func_ctx,
  499. bool is_signed)
  500. {
  501. return simd_extadd_pairwise(comp_ctx, func_ctx, V128_i8x16_TYPE,
  502. V128_i16x8_TYPE, is_signed);
  503. }
  504. bool
  505. aot_compile_simd_i32x4_extadd_pairwise_i16x8(AOTCompContext *comp_ctx,
  506. AOTFuncContext *func_ctx,
  507. bool is_signed)
  508. {
  509. return simd_extadd_pairwise(comp_ctx, func_ctx, V128_i16x8_TYPE,
  510. V128_i32x4_TYPE, is_signed);
  511. }
  512. bool
  513. aot_compile_simd_i16x8_q15mulr_sat(AOTCompContext *comp_ctx,
  514. AOTFuncContext *func_ctx)
  515. {
  516. LLVMValueRef lhs, rhs, pad, offset, min, max, result;
  517. LLVMTypeRef vector_ext_type;
  518. if (!(rhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i16x8_TYPE,
  519. "rhs"))
  520. || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
  521. V128_i16x8_TYPE, "lhs"))) {
  522. return false;
  523. }
  524. if (!(vector_ext_type = LLVMVectorType(I32_TYPE, 8))) {
  525. HANDLE_FAILURE("LLVMVectorType");
  526. return false;
  527. }
  528. if (!(lhs = LLVMBuildSExt(comp_ctx->builder, lhs, vector_ext_type,
  529. "lhs_v8i32"))
  530. || !(rhs = LLVMBuildSExt(comp_ctx->builder, rhs, vector_ext_type,
  531. "rhs_v8i32"))) {
  532. HANDLE_FAILURE("LLVMBuildSExt");
  533. return false;
  534. }
  535. /* 0x4000 and 15*/
  536. if (!(pad = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE,
  537. 0x4000, 8))
  538. || !(offset = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE,
  539. 15, 8))) {
  540. return false;
  541. }
  542. /* TODO: looking for x86 intrinsics about integer"fused multiply-and-add" */
  543. /* S.SignedSaturate((x * y + 0x4000) >> 15) */
  544. if (!(result = LLVMBuildMul(comp_ctx->builder, lhs, rhs, "mul"))) {
  545. HANDLE_FAILURE("LLVMBuildMul");
  546. return false;
  547. }
  548. if (!(result = LLVMBuildAdd(comp_ctx->builder, result, pad, "add"))) {
  549. HANDLE_FAILURE("LLVMBuildAdd");
  550. return false;
  551. }
  552. if (!(result = LLVMBuildAShr(comp_ctx->builder, result, offset, "ashr"))) {
  553. HANDLE_FAILURE("LLVMBuildAShr");
  554. return false;
  555. }
  556. if (!(min = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE,
  557. 0xffff8000, 8))
  558. || !(max = simd_build_splat_const_integer_vector(comp_ctx, I32_TYPE,
  559. 0x00007fff, 8))) {
  560. return false;
  561. }
  562. /* sat after trunc will let *sat* part be optimized */
  563. if (!(result = simd_saturate(comp_ctx, func_ctx, e_sat_i32x8, result, min,
  564. max, true))) {
  565. return false;
  566. }
  567. if (!(result = LLVMBuildTrunc(comp_ctx->builder, result, V128_i16x8_TYPE,
  568. "down_to_v8i16"))) {
  569. HANDLE_FAILURE("LLVMBuildTrunc");
  570. return false;
  571. }
  572. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  573. }
  574. enum integer_extmul_type {
  575. e_i16x8_extmul_i8x16,
  576. e_i32x4_extmul_i16x8,
  577. e_i64x2_extmul_i32x4,
  578. };
  579. static bool
  580. simd_integer_extmul(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  581. bool lower_half, bool is_signed,
  582. enum integer_extmul_type itype)
  583. {
  584. LLVMValueRef vec1, vec2, result;
  585. enum integer_extend_type ext_type[] = {
  586. e_ext_i8x16,
  587. e_ext_i16x8,
  588. e_ext_i32x4,
  589. };
  590. LLVMTypeRef in_vector_type[] = {
  591. V128_i8x16_TYPE,
  592. V128_i16x8_TYPE,
  593. V128_i32x4_TYPE,
  594. };
  595. if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
  596. in_vector_type[itype], "vec1"))
  597. || !(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
  598. in_vector_type[itype], "vec2"))) {
  599. return false;
  600. }
  601. if (!(vec1 = simd_integer_extension(comp_ctx, func_ctx, ext_type[itype],
  602. vec1, lower_half, is_signed))
  603. || !(vec2 = simd_integer_extension(comp_ctx, func_ctx, ext_type[itype],
  604. vec2, lower_half, is_signed))) {
  605. return false;
  606. }
  607. if (!(result = LLVMBuildMul(comp_ctx->builder, vec1, vec2, "product"))) {
  608. return false;
  609. }
  610. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  611. }
  612. bool
  613. aot_compile_simd_i16x8_extmul_i8x16(AOTCompContext *comp_ctx,
  614. AOTFuncContext *func_ctx, bool lower_half,
  615. bool is_signed)
  616. {
  617. return simd_integer_extmul(comp_ctx, func_ctx, lower_half, is_signed,
  618. e_i16x8_extmul_i8x16);
  619. }
  620. bool
  621. aot_compile_simd_i32x4_extmul_i16x8(AOTCompContext *comp_ctx,
  622. AOTFuncContext *func_ctx, bool lower_half,
  623. bool is_signed)
  624. {
  625. return simd_integer_extmul(comp_ctx, func_ctx, lower_half, is_signed,
  626. e_i32x4_extmul_i16x8);
  627. }
  628. bool
  629. aot_compile_simd_i64x2_extmul_i32x4(AOTCompContext *comp_ctx,
  630. AOTFuncContext *func_ctx, bool lower_half,
  631. bool is_signed)
  632. {
  633. return simd_integer_extmul(comp_ctx, func_ctx, lower_half, is_signed,
  634. e_i64x2_extmul_i32x4);
  635. }