jit_emit_function.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "jit_emit_function.h"
  6. #include "jit_emit_exception.h"
  7. #include "jit_emit_control.h"
  8. #include "../jit_frontend.h"
  9. #include "../jit_codegen.h"
  10. #include "../../interpreter/wasm_runtime.h"
  11. static bool
  12. emit_callnative(JitCompContext *cc, JitReg native_func_reg, JitReg res,
  13. JitReg *params, uint32 param_count);
  14. /* Prepare parameters for the function to call */
  15. static bool
  16. pre_call(JitCompContext *cc, const WASMType *func_type)
  17. {
  18. JitReg value;
  19. uint32 i, outs_off;
  20. /* Prepare parameters for the function to call */
  21. outs_off =
  22. cc->total_frame_size + offsetof(WASMInterpFrame, lp)
  23. + wasm_get_cell_num(func_type->types, func_type->param_count) * 4;
  24. for (i = 0; i < func_type->param_count; i++) {
  25. switch (func_type->types[func_type->param_count - 1 - i]) {
  26. case VALUE_TYPE_I32:
  27. #if WASM_ENABLE_REF_TYPES != 0
  28. case VALUE_TYPE_EXTERNREF:
  29. case VALUE_TYPE_FUNCREF:
  30. #endif
  31. POP_I32(value);
  32. outs_off -= 4;
  33. GEN_INSN(STI32, value, cc->fp_reg, NEW_CONST(I32, outs_off));
  34. break;
  35. case VALUE_TYPE_I64:
  36. POP_I64(value);
  37. outs_off -= 8;
  38. GEN_INSN(STI64, value, cc->fp_reg, NEW_CONST(I32, outs_off));
  39. break;
  40. case VALUE_TYPE_F32:
  41. POP_F32(value);
  42. outs_off -= 4;
  43. GEN_INSN(STF32, value, cc->fp_reg, NEW_CONST(I32, outs_off));
  44. break;
  45. case VALUE_TYPE_F64:
  46. POP_F64(value);
  47. outs_off -= 8;
  48. GEN_INSN(STF64, value, cc->fp_reg, NEW_CONST(I32, outs_off));
  49. break;
  50. default:
  51. bh_assert(0);
  52. goto fail;
  53. }
  54. }
  55. /* Commit sp as the callee may use it to store the results */
  56. gen_commit_sp_ip(cc->jit_frame);
  57. return true;
  58. fail:
  59. return false;
  60. }
  61. /* Push results */
  62. static bool
  63. post_return(JitCompContext *cc, const WASMType *func_type, JitReg first_res,
  64. bool update_committed_sp)
  65. {
  66. uint32 i, n;
  67. JitReg value;
  68. n = cc->jit_frame->sp - cc->jit_frame->lp;
  69. for (i = 0; i < func_type->result_count; i++) {
  70. switch (func_type->types[func_type->param_count + i]) {
  71. case VALUE_TYPE_I32:
  72. #if WASM_ENABLE_REF_TYPES != 0
  73. case VALUE_TYPE_EXTERNREF:
  74. case VALUE_TYPE_FUNCREF:
  75. #endif
  76. if (i == 0 && first_res) {
  77. bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_I32);
  78. value = first_res;
  79. }
  80. else {
  81. value = jit_cc_new_reg_I32(cc);
  82. GEN_INSN(LDI32, value, cc->fp_reg,
  83. NEW_CONST(I32, offset_of_local(n)));
  84. }
  85. PUSH_I32(value);
  86. n++;
  87. break;
  88. case VALUE_TYPE_I64:
  89. if (i == 0 && first_res) {
  90. bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_I64);
  91. value = first_res;
  92. }
  93. else {
  94. value = jit_cc_new_reg_I64(cc);
  95. GEN_INSN(LDI64, value, cc->fp_reg,
  96. NEW_CONST(I32, offset_of_local(n)));
  97. }
  98. PUSH_I64(value);
  99. n += 2;
  100. break;
  101. case VALUE_TYPE_F32:
  102. if (i == 0 && first_res) {
  103. bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_F32);
  104. value = first_res;
  105. }
  106. else {
  107. value = jit_cc_new_reg_F32(cc);
  108. GEN_INSN(LDF32, value, cc->fp_reg,
  109. NEW_CONST(I32, offset_of_local(n)));
  110. }
  111. PUSH_F32(value);
  112. n++;
  113. break;
  114. case VALUE_TYPE_F64:
  115. if (i == 0 && first_res) {
  116. bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_F64);
  117. value = first_res;
  118. }
  119. else {
  120. value = jit_cc_new_reg_F64(cc);
  121. GEN_INSN(LDF64, value, cc->fp_reg,
  122. NEW_CONST(I32, offset_of_local(n)));
  123. }
  124. PUSH_F64(value);
  125. n += 2;
  126. break;
  127. default:
  128. bh_assert(0);
  129. goto fail;
  130. }
  131. }
  132. if (update_committed_sp)
  133. /* Update the committed_sp as the callee has updated the frame sp */
  134. cc->jit_frame->committed_sp = cc->jit_frame->sp;
  135. return true;
  136. fail:
  137. return false;
  138. }
  139. static bool
  140. pre_load(JitCompContext *cc, JitReg *argvs, const WASMType *func_type)
  141. {
  142. JitReg value;
  143. uint32 i;
  144. /* Prepare parameters for the function to call */
  145. for (i = 0; i < func_type->param_count; i++) {
  146. switch (func_type->types[func_type->param_count - 1 - i]) {
  147. case VALUE_TYPE_I32:
  148. #if WASM_ENABLE_REF_TYPES != 0
  149. case VALUE_TYPE_EXTERNREF:
  150. case VALUE_TYPE_FUNCREF:
  151. #endif
  152. POP_I32(value);
  153. argvs[func_type->param_count - 1 - i] = value;
  154. break;
  155. case VALUE_TYPE_I64:
  156. POP_I64(value);
  157. argvs[func_type->param_count - 1 - i] = value;
  158. break;
  159. case VALUE_TYPE_F32:
  160. POP_F32(value);
  161. argvs[func_type->param_count - 1 - i] = value;
  162. break;
  163. case VALUE_TYPE_F64:
  164. POP_F64(value);
  165. argvs[func_type->param_count - 1 - i] = value;
  166. break;
  167. default:
  168. bh_assert(0);
  169. goto fail;
  170. }
  171. }
  172. gen_commit_sp_ip(cc->jit_frame);
  173. return true;
  174. fail:
  175. return false;
  176. }
  177. static JitReg
  178. create_first_res_reg(JitCompContext *cc, const WASMType *func_type)
  179. {
  180. if (func_type->result_count) {
  181. switch (func_type->types[func_type->param_count]) {
  182. case VALUE_TYPE_I32:
  183. #if WASM_ENABLE_REF_TYPES != 0
  184. case VALUE_TYPE_EXTERNREF:
  185. case VALUE_TYPE_FUNCREF:
  186. #endif
  187. return jit_cc_new_reg_I32(cc);
  188. case VALUE_TYPE_I64:
  189. return jit_cc_new_reg_I64(cc);
  190. case VALUE_TYPE_F32:
  191. return jit_cc_new_reg_F32(cc);
  192. case VALUE_TYPE_F64:
  193. return jit_cc_new_reg_F64(cc);
  194. default:
  195. bh_assert(0);
  196. return 0;
  197. }
  198. }
  199. return 0;
  200. }
  201. bool
  202. jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
  203. {
  204. WASMModule *wasm_module = cc->cur_wasm_module;
  205. WASMFunctionImport *func_import;
  206. WASMFunction *func;
  207. WASMType *func_type;
  208. JitFrame *jit_frame = cc->jit_frame;
  209. JitReg fast_jit_func_ptrs, jitted_code = 0;
  210. JitReg native_func, *argvs = NULL, *argvs1 = NULL, func_params[5];
  211. JitReg native_addr_ptr, module_inst_reg, ret, res;
  212. uint32 jitted_func_idx, i;
  213. uint64 total_size;
  214. const char *signature = NULL;
  215. /* Whether the argument is a pointer/str argument and
  216. need to call jit_check_app_addr_and_convert */
  217. bool is_pointer_arg;
  218. bool return_value = false;
  219. #if WASM_ENABLE_THREAD_MGR != 0
  220. /* Insert suspend check point */
  221. if (!jit_check_suspend_flags(cc))
  222. goto fail;
  223. #endif
  224. if (func_idx < wasm_module->import_function_count) {
  225. /* The function to call is an import function */
  226. func_import = &wasm_module->import_functions[func_idx].u.function;
  227. func_type = func_import->func_type;
  228. /* Call fast_jit_invoke_native in some cases */
  229. if (!func_import->func_ptr_linked /* import func hasn't been linked */
  230. || func_import->call_conv_wasm_c_api /* linked by wasm_c_api */
  231. || func_import->call_conv_raw /* registered as raw mode */
  232. || func_type->param_count >= 5 /* registered as normal mode, but
  233. jit_emit_callnative only supports
  234. maximum 6 registers now
  235. (include exec_nev) */) {
  236. JitReg arg_regs[3];
  237. if (!pre_call(cc, func_type)) {
  238. goto fail;
  239. }
  240. /* Call fast_jit_invoke_native */
  241. ret = jit_cc_new_reg_I32(cc);
  242. arg_regs[0] = cc->exec_env_reg;
  243. arg_regs[1] = NEW_CONST(I32, func_idx);
  244. arg_regs[2] = cc->fp_reg;
  245. if (!jit_emit_callnative(cc, fast_jit_invoke_native, ret, arg_regs,
  246. 3)) {
  247. goto fail;
  248. }
  249. /* Convert the return value from bool to uint32 */
  250. GEN_INSN(AND, ret, ret, NEW_CONST(I32, 0xFF));
  251. /* Check whether there is exception thrown */
  252. GEN_INSN(CMP, cc->cmp_reg, ret, NEW_CONST(I32, 0));
  253. if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BEQ,
  254. cc->cmp_reg, NULL)) {
  255. goto fail;
  256. }
  257. if (!post_return(cc, func_type, 0, true)) {
  258. goto fail;
  259. }
  260. #if WASM_ENABLE_THREAD_MGR != 0
  261. /* Insert suspend check point */
  262. if (!jit_check_suspend_flags(cc))
  263. goto fail;
  264. #endif
  265. return true;
  266. }
  267. /* Import function was registered as normal mode, and its argument count
  268. is no more than 5, we directly call it */
  269. signature = func_import->signature;
  270. bh_assert(signature);
  271. /* Allocate memory for argvs*/
  272. total_size = sizeof(JitReg) * (uint64)(func_type->param_count);
  273. if (total_size > 0) {
  274. if (total_size >= UINT32_MAX
  275. || !(argvs = jit_malloc((uint32)total_size))) {
  276. goto fail;
  277. }
  278. }
  279. /* Pop function params from stack and store them into argvs */
  280. if (!pre_load(cc, argvs, func_type)) {
  281. goto fail;
  282. }
  283. ret = jit_cc_new_reg_I32(cc);
  284. func_params[0] = module_inst_reg = get_module_inst_reg(jit_frame);
  285. func_params[4] = native_addr_ptr = jit_cc_new_reg_ptr(cc);
  286. GEN_INSN(ADD, native_addr_ptr, cc->exec_env_reg,
  287. NEW_CONST(PTR, offsetof(WASMExecEnv, jit_cache)));
  288. /* Traverse each pointer/str argument, call
  289. jit_check_app_addr_and_convert to check whether it is
  290. in the range of linear memory and and convert it from
  291. app offset into native address */
  292. for (i = 0; i < func_type->param_count; i++) {
  293. is_pointer_arg = false;
  294. if (signature[i + 1] == '*') {
  295. /* param is a pointer */
  296. is_pointer_arg = true;
  297. func_params[1] = NEW_CONST(I32, false); /* is_str = false */
  298. func_params[2] = argvs[i];
  299. if (signature[i + 2] == '~') {
  300. /* TODO: Memory64 no need to convert if mem idx type i64 */
  301. func_params[3] = jit_cc_new_reg_I64(cc);
  302. /* pointer with length followed */
  303. GEN_INSN(I32TOI64, func_params[3], argvs[i + 1]);
  304. }
  305. else {
  306. /* pointer with length followed */
  307. func_params[3] = NEW_CONST(I64, 1);
  308. }
  309. }
  310. else if (signature[i + 1] == '$') {
  311. /* param is a string */
  312. is_pointer_arg = true;
  313. func_params[1] = NEW_CONST(I32, true); /* is_str = true */
  314. func_params[2] = argvs[i];
  315. func_params[3] = NEW_CONST(I64, 1);
  316. }
  317. if (is_pointer_arg) {
  318. JitReg native_addr_64 = jit_cc_new_reg_I64(cc);
  319. /* TODO: Memory64 no need to convert if mem idx type i64 */
  320. GEN_INSN(I32TOI64, native_addr_64, func_params[2]);
  321. func_params[2] = native_addr_64;
  322. if (!jit_emit_callnative(cc, jit_check_app_addr_and_convert,
  323. ret, func_params, 5)) {
  324. goto fail;
  325. }
  326. /* Convert the return value from bool to uint32 */
  327. GEN_INSN(AND, ret, ret, NEW_CONST(I32, 0xFF));
  328. /* Check whether there is exception thrown */
  329. GEN_INSN(CMP, cc->cmp_reg, ret, NEW_CONST(I32, 0));
  330. if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BEQ,
  331. cc->cmp_reg, NULL)) {
  332. return false;
  333. }
  334. /* Load native addr from pointer of native addr,
  335. or exec_env->jit_cache */
  336. argvs[i] = jit_cc_new_reg_ptr(cc);
  337. GEN_INSN(LDPTR, argvs[i], native_addr_ptr, NEW_CONST(I32, 0));
  338. }
  339. }
  340. res = create_first_res_reg(cc, func_type);
  341. /* Prepare arguments of the native function */
  342. if (!(argvs1 =
  343. jit_calloc(sizeof(JitReg) * (func_type->param_count + 1)))) {
  344. goto fail;
  345. }
  346. argvs1[0] = cc->exec_env_reg;
  347. for (i = 0; i < func_type->param_count; i++) {
  348. argvs1[i + 1] = argvs[i];
  349. }
  350. /* Call the native function */
  351. native_func = NEW_CONST(PTR, (uintptr_t)func_import->func_ptr_linked);
  352. if (!emit_callnative(cc, native_func, res, argvs1,
  353. func_type->param_count + 1)) {
  354. jit_free(argvs1);
  355. goto fail;
  356. }
  357. jit_free(argvs1);
  358. /* Check whether there is exception thrown */
  359. GEN_INSN(LDI8, ret, module_inst_reg,
  360. NEW_CONST(I32, offsetof(WASMModuleInstance, cur_exception)));
  361. GEN_INSN(CMP, cc->cmp_reg, ret, NEW_CONST(I32, 0));
  362. if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BNE,
  363. cc->cmp_reg, NULL)) {
  364. goto fail;
  365. }
  366. if (!post_return(cc, func_type, res, false)) {
  367. goto fail;
  368. }
  369. }
  370. else {
  371. /* The function to call is a bytecode function */
  372. func = wasm_module
  373. ->functions[func_idx - wasm_module->import_function_count];
  374. func_type = func->func_type;
  375. /* jitted_code = func_ptrs[func_idx - import_function_count] */
  376. fast_jit_func_ptrs = get_fast_jit_func_ptrs_reg(jit_frame);
  377. jitted_code = jit_cc_new_reg_ptr(cc);
  378. jitted_func_idx = func_idx - wasm_module->import_function_count;
  379. GEN_INSN(LDPTR, jitted_code, fast_jit_func_ptrs,
  380. NEW_CONST(I32, (uint32)sizeof(void *) * jitted_func_idx));
  381. if (!pre_call(cc, func_type)) {
  382. goto fail;
  383. }
  384. res = create_first_res_reg(cc, func_type);
  385. GEN_INSN(CALLBC, res, 0, jitted_code, NEW_CONST(I32, func_idx));
  386. if (!post_return(cc, func_type, res, true)) {
  387. goto fail;
  388. }
  389. }
  390. #if WASM_ENABLE_THREAD_MGR != 0
  391. /* Insert suspend check point */
  392. if (!jit_check_suspend_flags(cc))
  393. goto fail;
  394. #endif
  395. /* Clear part of memory regs and table regs as their values
  396. may be changed in the function call */
  397. if (cc->cur_wasm_module->possible_memory_grow)
  398. clear_memory_regs(jit_frame);
  399. clear_table_regs(jit_frame);
  400. /* Ignore tail call currently */
  401. (void)tail_call;
  402. return_value = true;
  403. fail:
  404. if (argvs)
  405. jit_free(argvs);
  406. return return_value;
  407. }
  408. static JitReg
  409. pack_argv(JitCompContext *cc)
  410. {
  411. /* reuse the stack of the next frame */
  412. uint32 stack_base;
  413. JitReg argv;
  414. stack_base = cc->total_frame_size + offsetof(WASMInterpFrame, lp);
  415. argv = jit_cc_new_reg_ptr(cc);
  416. GEN_INSN(ADD, argv, cc->fp_reg, NEW_CONST(PTR, stack_base));
  417. if (jit_get_last_error(cc)) {
  418. return (JitReg)0;
  419. }
  420. return argv;
  421. }
  422. bool
  423. jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
  424. uint32 tbl_idx)
  425. {
  426. WASMModule *wasm_module = cc->cur_wasm_module;
  427. JitBasicBlock *block_import, *block_nonimport, *func_return;
  428. JitReg elem_idx, native_ret, argv, arg_regs[6];
  429. JitFrame *jit_frame = cc->jit_frame;
  430. JitReg tbl_size, offset, offset_i32;
  431. JitReg func_import, func_idx, tbl_elems, func_count;
  432. JitReg func_type_indexes, func_type_idx, fast_jit_func_ptrs;
  433. JitReg offset1_i32, offset1, func_type_idx1, res;
  434. JitReg import_func_ptrs, jitted_code_idx, jitted_code;
  435. WASMType *func_type;
  436. uint32 n;
  437. POP_I32(elem_idx);
  438. /* check elem_idx */
  439. tbl_size = get_table_cur_size_reg(jit_frame, tbl_idx);
  440. GEN_INSN(CMP, cc->cmp_reg, elem_idx, tbl_size);
  441. if (!jit_emit_exception(cc, EXCE_UNDEFINED_ELEMENT, JIT_OP_BGEU,
  442. cc->cmp_reg, NULL))
  443. goto fail;
  444. /* check func_idx */
  445. if (UINTPTR_MAX == UINT64_MAX) {
  446. offset_i32 = jit_cc_new_reg_I32(cc);
  447. offset = jit_cc_new_reg_I64(cc);
  448. /* Calculate offset by pointer size (elem_idx *
  449. * sizeof(table_elem_type_t)) */
  450. GEN_INSN(SHL, offset_i32, elem_idx, NEW_CONST(I32, 3));
  451. GEN_INSN(I32TOI64, offset, offset_i32);
  452. }
  453. else {
  454. offset = jit_cc_new_reg_I32(cc);
  455. GEN_INSN(SHL, offset, elem_idx, NEW_CONST(I32, 2));
  456. }
  457. func_idx = jit_cc_new_reg_I32(cc);
  458. tbl_elems = get_table_elems_reg(jit_frame, tbl_idx);
  459. GEN_INSN(LDI32, func_idx, tbl_elems, offset);
  460. GEN_INSN(CMP, cc->cmp_reg, func_idx, NEW_CONST(I32, -1));
  461. if (!jit_emit_exception(cc, EXCE_UNINITIALIZED_ELEMENT, JIT_OP_BEQ,
  462. cc->cmp_reg, NULL))
  463. goto fail;
  464. func_count = NEW_CONST(I32, wasm_module->import_function_count
  465. + wasm_module->function_count);
  466. GEN_INSN(CMP, cc->cmp_reg, func_idx, func_count);
  467. if (!jit_emit_exception(cc, EXCE_INVALID_FUNCTION_INDEX, JIT_OP_BGTU,
  468. cc->cmp_reg, NULL))
  469. goto fail;
  470. /* check func_type */
  471. /* get func_type_idx from func_type_indexes */
  472. if (UINTPTR_MAX == UINT64_MAX) {
  473. offset1_i32 = jit_cc_new_reg_I32(cc);
  474. offset1 = jit_cc_new_reg_I64(cc);
  475. GEN_INSN(SHL, offset1_i32, func_idx, NEW_CONST(I32, 2));
  476. GEN_INSN(I32TOI64, offset1, offset1_i32);
  477. }
  478. else {
  479. offset1 = jit_cc_new_reg_I32(cc);
  480. GEN_INSN(SHL, offset1, func_idx, NEW_CONST(I32, 2));
  481. }
  482. func_type_indexes = get_func_type_indexes_reg(jit_frame);
  483. func_type_idx = jit_cc_new_reg_I32(cc);
  484. GEN_INSN(LDI32, func_type_idx, func_type_indexes, offset1);
  485. type_idx = wasm_get_smallest_type_idx(wasm_module->types,
  486. wasm_module->type_count, type_idx);
  487. func_type_idx1 = NEW_CONST(I32, type_idx);
  488. GEN_INSN(CMP, cc->cmp_reg, func_type_idx, func_type_idx1);
  489. if (!jit_emit_exception(cc, EXCE_INVALID_FUNCTION_TYPE_INDEX, JIT_OP_BNE,
  490. cc->cmp_reg, NULL))
  491. goto fail;
  492. /* pop function arguments and store it to out area of callee stack frame */
  493. func_type = wasm_module->types[type_idx];
  494. if (!pre_call(cc, func_type)) {
  495. goto fail;
  496. }
  497. /* store elem_idx and func_idx to exec_env->jit_cache */
  498. GEN_INSN(STI32, elem_idx, cc->exec_env_reg,
  499. NEW_CONST(I32, offsetof(WASMExecEnv, jit_cache)));
  500. GEN_INSN(STI32, func_idx, cc->exec_env_reg,
  501. NEW_CONST(I32, offsetof(WASMExecEnv, jit_cache) + 4));
  502. #if WASM_ENABLE_THREAD_MGR != 0
  503. /* Insert suspend check point */
  504. if (!jit_check_suspend_flags(cc))
  505. goto fail;
  506. #endif
  507. block_import = jit_cc_new_basic_block(cc, 0);
  508. block_nonimport = jit_cc_new_basic_block(cc, 0);
  509. func_return = jit_cc_new_basic_block(cc, 0);
  510. if (!block_import || !block_nonimport || !func_return) {
  511. goto fail;
  512. }
  513. /* Commit register values to locals and stacks */
  514. gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
  515. /* Clear frame values */
  516. clear_values(jit_frame);
  517. /* jump to block_import or block_nonimport */
  518. GEN_INSN(CMP, cc->cmp_reg, func_idx,
  519. NEW_CONST(I32, cc->cur_wasm_module->import_function_count));
  520. GEN_INSN(BLTU, cc->cmp_reg, jit_basic_block_label(block_import),
  521. jit_basic_block_label(block_nonimport));
  522. /* block_import */
  523. cc->cur_basic_block = block_import;
  524. elem_idx = jit_cc_new_reg_I32(cc);
  525. GEN_INSN(LDI32, elem_idx, cc->exec_env_reg,
  526. NEW_CONST(I32, offsetof(WASMExecEnv, jit_cache)));
  527. GEN_INSN(LDI32, func_idx, cc->exec_env_reg,
  528. NEW_CONST(I32, offsetof(WASMExecEnv, jit_cache) + 4));
  529. argv = pack_argv(cc);
  530. if (!argv) {
  531. goto fail;
  532. }
  533. native_ret = jit_cc_new_reg_I32(cc);
  534. arg_regs[0] = cc->exec_env_reg;
  535. arg_regs[1] = NEW_CONST(I32, tbl_idx);
  536. arg_regs[2] = elem_idx;
  537. arg_regs[3] = NEW_CONST(I32, type_idx);
  538. arg_regs[4] = NEW_CONST(I32, func_type->param_cell_num);
  539. arg_regs[5] = argv;
  540. import_func_ptrs = get_import_func_ptrs_reg(jit_frame);
  541. func_import = jit_cc_new_reg_ptr(cc);
  542. if (UINTPTR_MAX == UINT64_MAX) {
  543. JitReg func_import_offset = jit_cc_new_reg_I32(cc);
  544. JitReg func_import_offset_i64 = jit_cc_new_reg_I64(cc);
  545. GEN_INSN(SHL, func_import_offset, func_idx, NEW_CONST(I32, 3));
  546. GEN_INSN(I32TOI64, func_import_offset_i64, func_import_offset);
  547. GEN_INSN(LDPTR, func_import, import_func_ptrs, func_import_offset_i64);
  548. }
  549. else {
  550. JitReg func_import_offset = jit_cc_new_reg_I32(cc);
  551. GEN_INSN(SHL, func_import_offset, func_idx, NEW_CONST(I32, 2));
  552. GEN_INSN(LDPTR, func_import, import_func_ptrs, func_import_offset);
  553. }
  554. if (!jit_emit_callnative(cc, fast_jit_call_indirect, native_ret, arg_regs,
  555. 6)) {
  556. goto fail;
  557. }
  558. /* Convert bool to uint32 */
  559. GEN_INSN(AND, native_ret, native_ret, NEW_CONST(I32, 0xFF));
  560. /* Check whether there is exception thrown */
  561. GEN_INSN(CMP, cc->cmp_reg, native_ret, NEW_CONST(I32, 0));
  562. if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BEQ, cc->cmp_reg,
  563. NULL)) {
  564. return false;
  565. }
  566. /* Store res into current frame, so that post_return in
  567. block func_return can get the value */
  568. n = cc->jit_frame->sp - cc->jit_frame->lp;
  569. if (func_type->result_count > 0) {
  570. switch (func_type->types[func_type->param_count]) {
  571. case VALUE_TYPE_I32:
  572. #if WASM_ENABLE_REF_TYPES != 0
  573. case VALUE_TYPE_EXTERNREF:
  574. case VALUE_TYPE_FUNCREF:
  575. #endif
  576. res = jit_cc_new_reg_I32(cc);
  577. GEN_INSN(LDI32, res, argv, NEW_CONST(I32, 0));
  578. GEN_INSN(STI32, res, cc->fp_reg,
  579. NEW_CONST(I32, offset_of_local(n)));
  580. break;
  581. case VALUE_TYPE_I64:
  582. res = jit_cc_new_reg_I64(cc);
  583. GEN_INSN(LDI64, res, argv, NEW_CONST(I32, 0));
  584. GEN_INSN(STI64, res, cc->fp_reg,
  585. NEW_CONST(I32, offset_of_local(n)));
  586. break;
  587. case VALUE_TYPE_F32:
  588. res = jit_cc_new_reg_F32(cc);
  589. GEN_INSN(LDF32, res, argv, NEW_CONST(I32, 0));
  590. GEN_INSN(STF32, res, cc->fp_reg,
  591. NEW_CONST(I32, offset_of_local(n)));
  592. break;
  593. case VALUE_TYPE_F64:
  594. res = jit_cc_new_reg_F64(cc);
  595. GEN_INSN(LDF64, res, argv, NEW_CONST(I32, 0));
  596. GEN_INSN(STF64, res, cc->fp_reg,
  597. NEW_CONST(I32, offset_of_local(n)));
  598. break;
  599. default:
  600. bh_assert(0);
  601. goto fail;
  602. }
  603. }
  604. gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
  605. clear_values(jit_frame);
  606. GEN_INSN(JMP, jit_basic_block_label(func_return));
  607. /* basic_block non_import */
  608. cc->cur_basic_block = block_nonimport;
  609. GEN_INSN(LDI32, func_idx, cc->exec_env_reg,
  610. NEW_CONST(I32, offsetof(WASMExecEnv, jit_cache) + 4));
  611. /* get jitted_code */
  612. fast_jit_func_ptrs = get_fast_jit_func_ptrs_reg(jit_frame);
  613. jitted_code_idx = jit_cc_new_reg_I32(cc);
  614. jitted_code = jit_cc_new_reg_ptr(cc);
  615. GEN_INSN(SUB, jitted_code_idx, func_idx,
  616. NEW_CONST(I32, cc->cur_wasm_module->import_function_count));
  617. if (UINTPTR_MAX == UINT64_MAX) {
  618. JitReg jitted_code_offset = jit_cc_new_reg_I32(cc);
  619. JitReg jitted_code_offset_64 = jit_cc_new_reg_I64(cc);
  620. GEN_INSN(SHL, jitted_code_offset, jitted_code_idx, NEW_CONST(I32, 3));
  621. GEN_INSN(I32TOI64, jitted_code_offset_64, jitted_code_offset);
  622. GEN_INSN(LDPTR, jitted_code, fast_jit_func_ptrs, jitted_code_offset_64);
  623. }
  624. else {
  625. JitReg jitted_code_offset = jit_cc_new_reg_I32(cc);
  626. GEN_INSN(SHL, jitted_code_offset, jitted_code_idx, NEW_CONST(I32, 2));
  627. GEN_INSN(LDPTR, jitted_code, fast_jit_func_ptrs, jitted_code_offset);
  628. }
  629. res = 0;
  630. if (func_type->result_count > 0) {
  631. switch (func_type->types[func_type->param_count]) {
  632. case VALUE_TYPE_I32:
  633. #if WASM_ENABLE_REF_TYPES != 0
  634. case VALUE_TYPE_EXTERNREF:
  635. case VALUE_TYPE_FUNCREF:
  636. #endif
  637. res = jit_cc_new_reg_I32(cc);
  638. break;
  639. case VALUE_TYPE_I64:
  640. res = jit_cc_new_reg_I64(cc);
  641. break;
  642. case VALUE_TYPE_F32:
  643. res = jit_cc_new_reg_F32(cc);
  644. break;
  645. case VALUE_TYPE_F64:
  646. res = jit_cc_new_reg_F64(cc);
  647. break;
  648. default:
  649. bh_assert(0);
  650. goto fail;
  651. }
  652. }
  653. GEN_INSN(CALLBC, res, 0, jitted_code, func_idx);
  654. /* Store res into current frame, so that post_return in
  655. block func_return can get the value */
  656. n = cc->jit_frame->sp - cc->jit_frame->lp;
  657. if (func_type->result_count > 0) {
  658. switch (func_type->types[func_type->param_count]) {
  659. case VALUE_TYPE_I32:
  660. #if WASM_ENABLE_REF_TYPES != 0
  661. case VALUE_TYPE_EXTERNREF:
  662. case VALUE_TYPE_FUNCREF:
  663. #endif
  664. GEN_INSN(STI32, res, cc->fp_reg,
  665. NEW_CONST(I32, offset_of_local(n)));
  666. break;
  667. case VALUE_TYPE_I64:
  668. GEN_INSN(STI64, res, cc->fp_reg,
  669. NEW_CONST(I32, offset_of_local(n)));
  670. break;
  671. case VALUE_TYPE_F32:
  672. GEN_INSN(STF32, res, cc->fp_reg,
  673. NEW_CONST(I32, offset_of_local(n)));
  674. break;
  675. case VALUE_TYPE_F64:
  676. GEN_INSN(STF64, res, cc->fp_reg,
  677. NEW_CONST(I32, offset_of_local(n)));
  678. break;
  679. default:
  680. bh_assert(0);
  681. goto fail;
  682. }
  683. }
  684. /* commit and clear jit frame, then jump to block func_ret */
  685. gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
  686. clear_values(jit_frame);
  687. GEN_INSN(JMP, jit_basic_block_label(func_return));
  688. /* translate block func_return */
  689. cc->cur_basic_block = func_return;
  690. if (!post_return(cc, func_type, 0, true)) {
  691. goto fail;
  692. }
  693. #if WASM_ENABLE_THREAD_MGR != 0
  694. /* Insert suspend check point */
  695. if (!jit_check_suspend_flags(cc))
  696. goto fail;
  697. #endif
  698. /* Clear part of memory regs and table regs as their values
  699. may be changed in the function call */
  700. if (cc->cur_wasm_module->possible_memory_grow)
  701. clear_memory_regs(cc->jit_frame);
  702. clear_table_regs(cc->jit_frame);
  703. return true;
  704. fail:
  705. return false;
  706. }
  707. #if WASM_ENABLE_REF_TYPES != 0
  708. bool
  709. jit_compile_op_ref_null(JitCompContext *cc, uint32 ref_type)
  710. {
  711. PUSH_I32(NEW_CONST(I32, NULL_REF));
  712. (void)ref_type;
  713. return true;
  714. fail:
  715. return false;
  716. }
  717. bool
  718. jit_compile_op_ref_is_null(JitCompContext *cc)
  719. {
  720. JitReg ref, res;
  721. POP_I32(ref);
  722. GEN_INSN(CMP, cc->cmp_reg, ref, NEW_CONST(I32, NULL_REF));
  723. res = jit_cc_new_reg_I32(cc);
  724. GEN_INSN(SELECTEQ, res, cc->cmp_reg, NEW_CONST(I32, 1), NEW_CONST(I32, 0));
  725. PUSH_I32(res);
  726. return true;
  727. fail:
  728. return false;
  729. }
  730. bool
  731. jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx)
  732. {
  733. PUSH_I32(NEW_CONST(I32, func_idx));
  734. return true;
  735. fail:
  736. return false;
  737. }
  738. #endif
  739. #if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
  740. static bool
  741. emit_callnative(JitCompContext *cc, JitReg native_func_reg, JitReg res,
  742. JitReg *params, uint32 param_count)
  743. {
  744. JitInsn *insn;
  745. char *i32_arg_names[] = { "edi", "esi", "edx", "ecx" };
  746. char *i64_arg_names[] = { "rdi", "rsi", "rdx", "rcx", "r8", "r9" };
  747. char *f32_arg_names[] = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" };
  748. char *f64_arg_names[] = { "xmm0_f64", "xmm1_f64", "xmm2_f64",
  749. "xmm3_f64", "xmm4_f64", "xmm5_f64" };
  750. JitReg i32_arg_regs[4], i64_arg_regs[6];
  751. JitReg f32_arg_regs[6], f64_arg_regs[6], res_reg = 0;
  752. JitReg eax_hreg = jit_codegen_get_hreg_by_name("eax");
  753. JitReg xmm0_hreg = jit_codegen_get_hreg_by_name("xmm0");
  754. uint32 i, i64_reg_idx, float_reg_idx, lock_i32_reg_num;
  755. bh_assert(param_count <= 6);
  756. for (i = 0; i < 4; i++) {
  757. i32_arg_regs[i] = jit_codegen_get_hreg_by_name(i32_arg_names[i]);
  758. }
  759. for (i = 0; i < 6; i++) {
  760. i64_arg_regs[i] = jit_codegen_get_hreg_by_name(i64_arg_names[i]);
  761. f32_arg_regs[i] = jit_codegen_get_hreg_by_name(f32_arg_names[i]);
  762. f64_arg_regs[i] = jit_codegen_get_hreg_by_name(f64_arg_names[i]);
  763. }
  764. lock_i32_reg_num = param_count < 4 ? param_count : 4;
  765. /*
  766. * Lock i32 registers so that they won't be allocated for the operand
  767. * of below I32TOI64 insn, which may have been overwritten in the
  768. * previous MOV, for example, in the below insns:
  769. * MOV I5, I15
  770. * I32TOI64 I6, i5
  771. * CALLNATIVE VOID, native_func, I5, I6
  772. * i5 is used in the second insn, but it has been overwritten in I5
  773. * by the first insn
  774. */
  775. for (i = 0; i < lock_i32_reg_num; i++) {
  776. GEN_INSN(MOV, i32_arg_regs[i], i32_arg_regs[i]);
  777. }
  778. i64_reg_idx = float_reg_idx = 0;
  779. for (i = 0; i < param_count; i++) {
  780. switch (jit_reg_kind(params[i])) {
  781. case JIT_REG_KIND_I32:
  782. GEN_INSN(I32TOI64, i64_arg_regs[i64_reg_idx++], params[i]);
  783. break;
  784. case JIT_REG_KIND_I64:
  785. GEN_INSN(MOV, i64_arg_regs[i64_reg_idx++], params[i]);
  786. break;
  787. case JIT_REG_KIND_F32:
  788. GEN_INSN(MOV, f32_arg_regs[float_reg_idx++], params[i]);
  789. break;
  790. case JIT_REG_KIND_F64:
  791. GEN_INSN(MOV, f64_arg_regs[float_reg_idx++], params[i]);
  792. break;
  793. default:
  794. bh_assert(0);
  795. return false;
  796. }
  797. }
  798. /*
  799. * Announce the locked i32 registers are being used, and do necessary
  800. * spill ASAP
  801. */
  802. for (i = 0; i < lock_i32_reg_num; i++) {
  803. GEN_INSN(MOV, i32_arg_regs[i], i32_arg_regs[i]);
  804. }
  805. if (res) {
  806. switch (jit_reg_kind(res)) {
  807. case JIT_REG_KIND_I32:
  808. res_reg = eax_hreg;
  809. break;
  810. case JIT_REG_KIND_I64:
  811. res_reg = res;
  812. break;
  813. case JIT_REG_KIND_F32:
  814. res_reg = xmm0_hreg;
  815. break;
  816. case JIT_REG_KIND_F64:
  817. res_reg = res;
  818. break;
  819. default:
  820. bh_assert(0);
  821. return false;
  822. }
  823. }
  824. insn = GEN_INSN(CALLNATIVE, res_reg, native_func_reg, param_count);
  825. if (!insn) {
  826. return false;
  827. }
  828. i64_reg_idx = float_reg_idx = 0;
  829. for (i = 0; i < param_count; i++) {
  830. switch (jit_reg_kind(params[i])) {
  831. case JIT_REG_KIND_I32:
  832. case JIT_REG_KIND_I64:
  833. *(jit_insn_opndv(insn, i + 2)) = i64_arg_regs[i64_reg_idx++];
  834. break;
  835. case JIT_REG_KIND_F32:
  836. *(jit_insn_opndv(insn, i + 2)) = f32_arg_regs[float_reg_idx++];
  837. break;
  838. case JIT_REG_KIND_F64:
  839. *(jit_insn_opndv(insn, i + 2)) = f64_arg_regs[float_reg_idx++];
  840. break;
  841. default:
  842. bh_assert(0);
  843. return false;
  844. }
  845. }
  846. if (res && res != res_reg) {
  847. GEN_INSN(MOV, res, res_reg);
  848. }
  849. return true;
  850. }
  851. #else
  852. static bool
  853. emit_callnative(JitCompContext *cc, JitRef native_func_reg, JitReg res,
  854. JitReg *params, uint32 param_count)
  855. {
  856. JitInsn *insn;
  857. uint32 i;
  858. bh_assert(param_count <= 6);
  859. insn = GEN_INSN(CALLNATIVE, res, native_func_reg, param_count);
  860. if (!insn)
  861. return false;
  862. for (i = 0; i < param_count; i++) {
  863. *(jit_insn_opndv(insn, i + 2)) = params[i];
  864. }
  865. return true;
  866. }
  867. #endif
  868. bool
  869. jit_emit_callnative(JitCompContext *cc, void *native_func, JitReg res,
  870. JitReg *params, uint32 param_count)
  871. {
  872. return emit_callnative(cc, NEW_CONST(PTR, (uintptr_t)native_func), res,
  873. params, param_count);
  874. }