simd_floating_point.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "simd_floating_point.h"
  6. #include "simd_common.h"
  7. #include "../aot_emit_exception.h"
  8. #include "../aot_emit_numberic.h"
  9. #include "../../aot/aot_runtime.h"
  10. static bool
  11. simd_v128_float_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  12. FloatArithmetic arith_op, LLVMTypeRef vector_type)
  13. {
  14. LLVMValueRef lhs, rhs, result = NULL;
  15. if (!(rhs =
  16. simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
  17. || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
  18. "lhs"))) {
  19. return false;
  20. }
  21. switch (arith_op) {
  22. case FLOAT_ADD:
  23. result = LLVMBuildFAdd(comp_ctx->builder, lhs, rhs, "sum");
  24. break;
  25. case FLOAT_SUB:
  26. result = LLVMBuildFSub(comp_ctx->builder, lhs, rhs, "difference");
  27. break;
  28. case FLOAT_MUL:
  29. result = LLVMBuildFMul(comp_ctx->builder, lhs, rhs, "product");
  30. break;
  31. case FLOAT_DIV:
  32. result = LLVMBuildFDiv(comp_ctx->builder, lhs, rhs, "quotient");
  33. break;
  34. default:
  35. return false;
  36. }
  37. if (!result) {
  38. HANDLE_FAILURE(
  39. "LLVMBuildFAdd/LLVMBuildFSub/LLVMBuildFMul/LLVMBuildFDiv");
  40. return false;
  41. }
  42. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  43. }
  44. bool
  45. aot_compile_simd_f32x4_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  46. FloatArithmetic arith_op)
  47. {
  48. return simd_v128_float_arith(comp_ctx, func_ctx, arith_op, V128_f32x4_TYPE);
  49. }
  50. bool
  51. aot_compile_simd_f64x2_arith(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  52. FloatArithmetic arith_op)
  53. {
  54. return simd_v128_float_arith(comp_ctx, func_ctx, arith_op, V128_f64x2_TYPE);
  55. }
  56. static bool
  57. simd_v128_float_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  58. LLVMTypeRef vector_type)
  59. {
  60. LLVMValueRef vector, result;
  61. if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
  62. "vector"))) {
  63. return false;
  64. }
  65. if (!(result = LLVMBuildFNeg(comp_ctx->builder, vector, "neg"))) {
  66. HANDLE_FAILURE("LLVMBuildFNeg");
  67. return false;
  68. }
  69. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  70. }
  71. bool
  72. aot_compile_simd_f32x4_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  73. {
  74. return simd_v128_float_neg(comp_ctx, func_ctx, V128_f32x4_TYPE);
  75. }
  76. bool
  77. aot_compile_simd_f64x2_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  78. {
  79. return simd_v128_float_neg(comp_ctx, func_ctx, V128_f64x2_TYPE);
  80. }
  81. static bool
  82. simd_float_intrinsic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  83. LLVMTypeRef vector_type, const char *intrinsic)
  84. {
  85. LLVMValueRef vector, result;
  86. LLVMTypeRef param_types[1] = { vector_type };
  87. if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
  88. "vector"))) {
  89. return false;
  90. }
  91. if (!(result =
  92. aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic,
  93. vector_type, param_types, 1, vector))) {
  94. HANDLE_FAILURE("LLVMBuildCall");
  95. return false;
  96. }
  97. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  98. }
  99. bool
  100. aot_compile_simd_f32x4_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  101. {
  102. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
  103. "llvm.fabs.v4f32");
  104. }
  105. bool
  106. aot_compile_simd_f64x2_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  107. {
  108. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
  109. "llvm.fabs.v2f64");
  110. }
  111. bool
  112. aot_compile_simd_f32x4_round(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  113. {
  114. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
  115. "llvm.round.v4f32");
  116. }
  117. bool
  118. aot_compile_simd_f64x2_round(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  119. {
  120. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
  121. "llvm.round.v2f64");
  122. }
  123. bool
  124. aot_compile_simd_f32x4_sqrt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  125. {
  126. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
  127. "llvm.sqrt.v4f32");
  128. }
  129. bool
  130. aot_compile_simd_f64x2_sqrt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  131. {
  132. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
  133. "llvm.sqrt.v2f64");
  134. }
  135. bool
  136. aot_compile_simd_f32x4_ceil(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  137. {
  138. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
  139. "llvm.ceil.v4f32");
  140. }
  141. bool
  142. aot_compile_simd_f64x2_ceil(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  143. {
  144. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
  145. "llvm.ceil.v2f64");
  146. }
  147. bool
  148. aot_compile_simd_f32x4_floor(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  149. {
  150. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
  151. "llvm.floor.v4f32");
  152. }
  153. bool
  154. aot_compile_simd_f64x2_floor(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  155. {
  156. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
  157. "llvm.floor.v2f64");
  158. }
  159. bool
  160. aot_compile_simd_f32x4_trunc(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  161. {
  162. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
  163. "llvm.trunc.v4f32");
  164. }
  165. bool
  166. aot_compile_simd_f64x2_trunc(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
  167. {
  168. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
  169. "llvm.trunc.v2f64");
  170. }
  171. bool
  172. aot_compile_simd_f32x4_nearest(AOTCompContext *comp_ctx,
  173. AOTFuncContext *func_ctx)
  174. {
  175. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f32x4_TYPE,
  176. "llvm.rint.v4f32");
  177. }
  178. bool
  179. aot_compile_simd_f64x2_nearest(AOTCompContext *comp_ctx,
  180. AOTFuncContext *func_ctx)
  181. {
  182. return simd_float_intrinsic(comp_ctx, func_ctx, V128_f64x2_TYPE,
  183. "llvm.rint.v2f64");
  184. }
  185. static bool
  186. simd_float_cmp(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  187. FloatArithmetic op, LLVMTypeRef vector_type)
  188. {
  189. LLVMValueRef lhs, rhs, cmp, selected;
  190. if (!(rhs =
  191. simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
  192. || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
  193. "lhs"))) {
  194. return false;
  195. }
  196. if (!(cmp = LLVMBuildFCmp(comp_ctx->builder,
  197. op == FLOAT_MIN ? LLVMRealOLT : LLVMRealOGT, rhs,
  198. lhs, "cmp"))) {
  199. HANDLE_FAILURE("LLVMBuildFCmp");
  200. return false;
  201. }
  202. if (!(selected =
  203. LLVMBuildSelect(comp_ctx->builder, cmp, rhs, lhs, "selected"))) {
  204. HANDLE_FAILURE("LLVMBuildSelect");
  205. return false;
  206. }
  207. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, selected, "result");
  208. }
  209. static bool
  210. simd_float_min(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  211. LLVMTypeRef vector_type)
  212. {
  213. LLVMValueRef lhs, rhs, lhs_nan, rhs_nan, olt_ret, ogt_ret, or_ret, ret1,
  214. ret2, ret3, ret4;
  215. if (!(rhs =
  216. simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
  217. || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
  218. "lhs"))) {
  219. return false;
  220. }
  221. if (!(lhs_nan = LLVMBuildFCmp(comp_ctx->builder, LLVMRealUNO, lhs, lhs,
  222. "lhs_nan"))) {
  223. HANDLE_FAILURE("LLVMBuildFCmp + LLVMRealUNO");
  224. return false;
  225. }
  226. if (!(rhs_nan = LLVMBuildFCmp(comp_ctx->builder, LLVMRealUNO, rhs, rhs,
  227. "rhs_nan"))) {
  228. HANDLE_FAILURE("LLVMBuildFCmp + LLVMRealUNO");
  229. return false;
  230. }
  231. if (!(olt_ret = LLVMBuildFCmp(comp_ctx->builder, LLVMRealOLT, lhs, rhs,
  232. "olt_ret"))) {
  233. HANDLE_FAILURE("LLVMBuildFCmp + LLVMRealOLT");
  234. return false;
  235. }
  236. if (!(ogt_ret = LLVMBuildFCmp(comp_ctx->builder, LLVMRealOGT, lhs, rhs,
  237. "ogt_ret"))) {
  238. HANDLE_FAILURE("LLVMBuildFCmp + LLVMRealOGT");
  239. return false;
  240. }
  241. /* lhs or rhs */
  242. {
  243. LLVMValueRef integer_l, integer_r, integer_or;
  244. if (!(integer_l = LLVMBuildBitCast(comp_ctx->builder, lhs,
  245. V128_i64x2_TYPE, "lhs_to_int"))) {
  246. HANDLE_FAILURE("LLVMBuildBitCas");
  247. return false;
  248. }
  249. if (!(integer_r = LLVMBuildBitCast(comp_ctx->builder, rhs,
  250. V128_i64x2_TYPE, "rhs_to_int"))) {
  251. HANDLE_FAILURE("LLVMBuildBitCas");
  252. return false;
  253. }
  254. if (!(integer_or =
  255. LLVMBuildOr(comp_ctx->builder, integer_l, integer_r, "or"))) {
  256. HANDLE_FAILURE("LLVMBuildOr");
  257. return false;
  258. }
  259. if (!(or_ret = LLVMBuildBitCast(comp_ctx->builder, integer_or,
  260. vector_type, "holder"))) {
  261. HANDLE_FAILURE("LLVMBuildBitCast");
  262. return false;
  263. }
  264. }
  265. if (!(ret1 = LLVMBuildSelect(comp_ctx->builder, olt_ret, lhs, or_ret,
  266. "sel_olt"))) {
  267. HANDLE_FAILURE("LLVMBuildSelect");
  268. return false;
  269. }
  270. if (!(ret2 = LLVMBuildSelect(comp_ctx->builder, ogt_ret, rhs, ret1,
  271. "sel_ogt"))) {
  272. HANDLE_FAILURE("LLVMBuildSelect");
  273. return false;
  274. }
  275. if (!(ret3 = LLVMBuildSelect(comp_ctx->builder, lhs_nan, lhs, ret2,
  276. "sel_lhs_nan"))) {
  277. HANDLE_FAILURE("LLVMBuildSelect");
  278. return false;
  279. }
  280. if (!(ret4 = LLVMBuildSelect(comp_ctx->builder, rhs_nan, rhs, ret3,
  281. "sel_rhs_nan"))) {
  282. HANDLE_FAILURE("LLVMBuildSelect");
  283. return false;
  284. }
  285. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, ret4, "result");
  286. }
  287. static bool
  288. simd_float_max(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
  289. LLVMTypeRef vector_type)
  290. {
  291. LLVMValueRef lhs, rhs, lhs_nan, rhs_nan, olt_ret, ogt_ret, and_ret, ret1,
  292. ret2, ret3, ret4;
  293. if (!(rhs =
  294. simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type, "rhs"))
  295. || !(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
  296. "lhs"))) {
  297. return false;
  298. }
  299. if (!(lhs_nan = LLVMBuildFCmp(comp_ctx->builder, LLVMRealUNO, lhs, lhs,
  300. "lhs_nan"))) {
  301. HANDLE_FAILURE("LLVMBuildFCmp + LLVMRealUNO");
  302. return false;
  303. }
  304. if (!(rhs_nan = LLVMBuildFCmp(comp_ctx->builder, LLVMRealUNO, rhs, rhs,
  305. "rhs_nan"))) {
  306. HANDLE_FAILURE("LLVMBuildFCmp + LLVMRealUNO");
  307. return false;
  308. }
  309. if (!(olt_ret = LLVMBuildFCmp(comp_ctx->builder, LLVMRealOLT, lhs, rhs,
  310. "olt_ret"))) {
  311. HANDLE_FAILURE("LLVMBuildFCmp + LLVMRealOLT");
  312. return false;
  313. }
  314. if (!(ogt_ret = LLVMBuildFCmp(comp_ctx->builder, LLVMRealOGT, lhs, rhs,
  315. "ogt_ret"))) {
  316. HANDLE_FAILURE("LLVMBuildFCmp + LLVMRealOGT");
  317. return false;
  318. }
  319. /* lhs and rhs */
  320. {
  321. LLVMValueRef integer_l, integer_r, integer_and;
  322. if (!(integer_l = LLVMBuildBitCast(comp_ctx->builder, lhs,
  323. V128_i64x2_TYPE, "lhs_to_int"))) {
  324. HANDLE_FAILURE("LLVMBuildBitCas");
  325. return false;
  326. }
  327. if (!(integer_r = LLVMBuildBitCast(comp_ctx->builder, rhs,
  328. V128_i64x2_TYPE, "rhs_to_int"))) {
  329. HANDLE_FAILURE("LLVMBuildBitCas");
  330. return false;
  331. }
  332. if (!(integer_and = LLVMBuildAnd(comp_ctx->builder, integer_l,
  333. integer_r, "and"))) {
  334. HANDLE_FAILURE("LLVMBuildOr");
  335. return false;
  336. }
  337. if (!(and_ret = LLVMBuildBitCast(comp_ctx->builder, integer_and,
  338. vector_type, "holder"))) {
  339. HANDLE_FAILURE("LLVMBuildBitCast");
  340. return false;
  341. }
  342. }
  343. if (!(ret1 = LLVMBuildSelect(comp_ctx->builder, ogt_ret, lhs, and_ret,
  344. "sel_ogt"))) {
  345. HANDLE_FAILURE("LLVMBuildSelect");
  346. return false;
  347. }
  348. if (!(ret2 = LLVMBuildSelect(comp_ctx->builder, olt_ret, rhs, ret1,
  349. "sel_olt"))) {
  350. HANDLE_FAILURE("LLVMBuildSelect");
  351. return false;
  352. }
  353. if (!(ret3 = LLVMBuildSelect(comp_ctx->builder, lhs_nan, lhs, ret2,
  354. "sel_lhs_nan"))) {
  355. HANDLE_FAILURE("LLVMBuildSelect");
  356. return false;
  357. }
  358. if (!(ret4 = LLVMBuildSelect(comp_ctx->builder, rhs_nan, rhs, ret3,
  359. "sel_rhs_nan"))) {
  360. HANDLE_FAILURE("LLVMBuildSelect");
  361. return false;
  362. }
  363. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, ret4, "result");
  364. }
  365. bool
  366. aot_compile_simd_f32x4_min_max(AOTCompContext *comp_ctx,
  367. AOTFuncContext *func_ctx, bool run_min)
  368. {
  369. return run_min ? simd_float_min(comp_ctx, func_ctx, V128_f32x4_TYPE)
  370. : simd_float_max(comp_ctx, func_ctx, V128_f32x4_TYPE);
  371. }
  372. bool
  373. aot_compile_simd_f64x2_min_max(AOTCompContext *comp_ctx,
  374. AOTFuncContext *func_ctx, bool run_min)
  375. {
  376. return run_min ? simd_float_min(comp_ctx, func_ctx, V128_f64x2_TYPE)
  377. : simd_float_max(comp_ctx, func_ctx, V128_f64x2_TYPE);
  378. }
  379. bool
  380. aot_compile_simd_f32x4_pmin_pmax(AOTCompContext *comp_ctx,
  381. AOTFuncContext *func_ctx, bool run_min)
  382. {
  383. return simd_float_cmp(comp_ctx, func_ctx, run_min ? FLOAT_MIN : FLOAT_MAX,
  384. V128_f32x4_TYPE);
  385. }
  386. bool
  387. aot_compile_simd_f64x2_pmin_pmax(AOTCompContext *comp_ctx,
  388. AOTFuncContext *func_ctx, bool run_min)
  389. {
  390. return simd_float_cmp(comp_ctx, func_ctx, run_min ? FLOAT_MIN : FLOAT_MAX,
  391. V128_f64x2_TYPE);
  392. }
  393. bool
  394. aot_compile_simd_f64x2_demote(AOTCompContext *comp_ctx,
  395. AOTFuncContext *func_ctx)
  396. {
  397. LLVMValueRef vector, elem_0, elem_1, result;
  398. if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
  399. V128_f64x2_TYPE, "vector"))) {
  400. return false;
  401. }
  402. if (!(elem_0 = LLVMBuildExtractElement(comp_ctx->builder, vector,
  403. LLVM_CONST(i32_zero), "elem_0"))
  404. || !(elem_1 = LLVMBuildExtractElement(comp_ctx->builder, vector,
  405. LLVM_CONST(i32_one), "elem_1"))) {
  406. HANDLE_FAILURE("LLVMBuildExtractElement");
  407. return false;
  408. }
  409. /* fptrunc <f64> elem to <f32> */
  410. if (!(elem_0 = LLVMBuildFPTrunc(comp_ctx->builder, elem_0, F32_TYPE,
  411. "elem_0_trunc"))
  412. || !(elem_1 = LLVMBuildFPTrunc(comp_ctx->builder, elem_1, F32_TYPE,
  413. "elem_1_trunc"))) {
  414. HANDLE_FAILURE("LLVMBuildFPTrunc");
  415. return false;
  416. }
  417. if (!(result = LLVMBuildInsertElement(comp_ctx->builder,
  418. LLVM_CONST(f32x4_vec_zero), elem_0,
  419. LLVM_CONST(i32_zero), "new_vector_0"))
  420. || !(result =
  421. LLVMBuildInsertElement(comp_ctx->builder, result, elem_1,
  422. LLVM_CONST(i32_one), "new_vector_1"))) {
  423. HANDLE_FAILURE("LLVMBuildInsertElement");
  424. return false;
  425. }
  426. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  427. }
  428. bool
  429. aot_compile_simd_f32x4_promote(AOTCompContext *comp_ctx,
  430. AOTFuncContext *func_ctx)
  431. {
  432. LLVMValueRef vector, elem_0, elem_1, result;
  433. if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
  434. V128_f32x4_TYPE, "vector"))) {
  435. return false;
  436. }
  437. if (!(elem_0 = LLVMBuildExtractElement(comp_ctx->builder, vector,
  438. LLVM_CONST(i32_zero), "elem_0"))
  439. || !(elem_1 = LLVMBuildExtractElement(comp_ctx->builder, vector,
  440. LLVM_CONST(i32_one), "elem_1"))) {
  441. HANDLE_FAILURE("LLVMBuildExtractElement");
  442. return false;
  443. }
  444. /* fpext <f32> elem to <f64> */
  445. if (!(elem_0 =
  446. LLVMBuildFPExt(comp_ctx->builder, elem_0, F64_TYPE, "elem_0_ext"))
  447. || !(elem_1 = LLVMBuildFPExt(comp_ctx->builder, elem_1, F64_TYPE,
  448. "elem_1_ext"))) {
  449. HANDLE_FAILURE("LLVMBuildFPExt");
  450. return false;
  451. }
  452. if (!(result = LLVMBuildInsertElement(comp_ctx->builder,
  453. LLVM_CONST(f64x2_vec_zero), elem_0,
  454. LLVM_CONST(i32_zero), "new_vector_0"))
  455. || !(result =
  456. LLVMBuildInsertElement(comp_ctx->builder, result, elem_1,
  457. LLVM_CONST(i32_one), "new_vector_1"))) {
  458. HANDLE_FAILURE("LLVMBuildInsertElement");
  459. return false;
  460. }
  461. return simd_bitcast_and_push_v128(comp_ctx, func_ctx, result, "result");
  462. }