jit_ir.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411
  1. /*
  2. * Copyright (C) 2021 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "jit_ir.h"
  6. #include "jit_codegen.h"
  7. #include "jit_frontend.h"
  8. /**
  9. * Operand kinds of instructions.
  10. */
  11. enum { JIT_OPND_KIND_Reg, JIT_OPND_KIND_VReg, JIT_OPND_KIND_LookupSwitch };
  12. /**
  13. * Operand kind of each instruction.
  14. */
  15. static const uint8 insn_opnd_kind[] = {
  16. #define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) JIT_OPND_KIND_##OPND_KIND,
  17. #include "jit_ir.def"
  18. #undef INSN
  19. };
  20. /**
  21. * Operand number of each instruction.
  22. */
  23. static const uint8 insn_opnd_num[] = {
  24. #define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) OPND_NUM,
  25. #include "jit_ir.def"
  26. #undef INSN
  27. };
  28. /**
  29. * Operand number of each instruction.
  30. */
  31. static const uint8 insn_opnd_first_use[] = {
  32. #define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) FIRST_USE,
  33. #include "jit_ir.def"
  34. #undef INSN
  35. };
  36. #define JIT_INSN_NEW_Reg(OPND_NUM) \
  37. jit_calloc(offsetof(JitInsn, _opnd) + sizeof(JitReg) * (OPND_NUM))
  38. #define JIT_INSN_NEW_VReg(OPND_NUM) \
  39. jit_calloc(offsetof(JitInsn, _opnd._opnd_VReg._reg) \
  40. + sizeof(JitReg) * (OPND_NUM))
  41. JitInsn *
  42. _jit_insn_new_Reg_1(JitOpcode opc, JitReg r0)
  43. {
  44. JitInsn *insn = JIT_INSN_NEW_Reg(1);
  45. if (insn) {
  46. insn->opcode = opc;
  47. *jit_insn_opnd(insn, 0) = r0;
  48. }
  49. return insn;
  50. }
  51. JitInsn *
  52. _jit_insn_new_Reg_2(JitOpcode opc, JitReg r0, JitReg r1)
  53. {
  54. JitInsn *insn = JIT_INSN_NEW_Reg(2);
  55. if (insn) {
  56. insn->opcode = opc;
  57. *jit_insn_opnd(insn, 0) = r0;
  58. *jit_insn_opnd(insn, 1) = r1;
  59. }
  60. return insn;
  61. }
  62. JitInsn *
  63. _jit_insn_new_Reg_3(JitOpcode opc, JitReg r0, JitReg r1, JitReg r2)
  64. {
  65. JitInsn *insn = JIT_INSN_NEW_Reg(3);
  66. if (insn) {
  67. insn->opcode = opc;
  68. *jit_insn_opnd(insn, 0) = r0;
  69. *jit_insn_opnd(insn, 1) = r1;
  70. *jit_insn_opnd(insn, 2) = r2;
  71. }
  72. return insn;
  73. }
  74. JitInsn *
  75. _jit_insn_new_Reg_4(JitOpcode opc, JitReg r0, JitReg r1, JitReg r2, JitReg r3)
  76. {
  77. JitInsn *insn = JIT_INSN_NEW_Reg(4);
  78. if (insn) {
  79. insn->opcode = opc;
  80. *jit_insn_opnd(insn, 0) = r0;
  81. *jit_insn_opnd(insn, 1) = r1;
  82. *jit_insn_opnd(insn, 2) = r2;
  83. *jit_insn_opnd(insn, 3) = r3;
  84. }
  85. return insn;
  86. }
  87. JitInsn *
  88. _jit_insn_new_Reg_5(JitOpcode opc, JitReg r0, JitReg r1, JitReg r2, JitReg r3,
  89. JitReg r4)
  90. {
  91. JitInsn *insn = JIT_INSN_NEW_Reg(5);
  92. if (insn) {
  93. insn->opcode = opc;
  94. *jit_insn_opnd(insn, 0) = r0;
  95. *jit_insn_opnd(insn, 1) = r1;
  96. *jit_insn_opnd(insn, 2) = r2;
  97. *jit_insn_opnd(insn, 3) = r3;
  98. *jit_insn_opnd(insn, 4) = r4;
  99. }
  100. return insn;
  101. }
  102. JitInsn *
  103. _jit_insn_new_VReg_1(JitOpcode opc, JitReg r0, int n)
  104. {
  105. JitInsn *insn = JIT_INSN_NEW_VReg(1 + n);
  106. if (insn) {
  107. insn->opcode = opc;
  108. insn->_opnd._opnd_VReg._reg_num = 1 + n;
  109. *(jit_insn_opndv(insn, 0)) = r0;
  110. }
  111. return insn;
  112. }
  113. JitInsn *
  114. _jit_insn_new_VReg_2(JitOpcode opc, JitReg r0, JitReg r1, int n)
  115. {
  116. JitInsn *insn = JIT_INSN_NEW_VReg(2 + n);
  117. if (insn) {
  118. insn->opcode = opc;
  119. insn->_opnd._opnd_VReg._reg_num = 2 + n;
  120. *(jit_insn_opndv(insn, 0)) = r0;
  121. *(jit_insn_opndv(insn, 1)) = r1;
  122. }
  123. return insn;
  124. }
  125. JitInsn *
  126. _jit_insn_new_LookupSwitch_1(JitOpcode opc, JitReg value, uint32 num)
  127. {
  128. JitOpndLookupSwitch *opnd = NULL;
  129. JitInsn *insn =
  130. jit_calloc(offsetof(JitInsn, _opnd._opnd_LookupSwitch.match_pairs)
  131. + sizeof(opnd->match_pairs[0]) * num);
  132. if (insn) {
  133. insn->opcode = opc;
  134. opnd = jit_insn_opndls(insn);
  135. opnd->value = value;
  136. opnd->match_pairs_num = num;
  137. }
  138. return insn;
  139. }
  140. #undef JIT_INSN_NEW_Reg
  141. #undef JIT_INSN_NEW_VReg
  142. void
  143. jit_insn_insert_before(JitInsn *insn1, JitInsn *insn2)
  144. {
  145. bh_assert(insn1->prev);
  146. insn1->prev->next = insn2;
  147. insn2->prev = insn1->prev;
  148. insn2->next = insn1;
  149. insn1->prev = insn2;
  150. }
  151. void
  152. jit_insn_insert_after(JitInsn *insn1, JitInsn *insn2)
  153. {
  154. bh_assert(insn1->next);
  155. insn1->next->prev = insn2;
  156. insn2->next = insn1->next;
  157. insn2->prev = insn1;
  158. insn1->next = insn2;
  159. }
  160. void
  161. jit_insn_unlink(JitInsn *insn)
  162. {
  163. bh_assert(insn->prev);
  164. insn->prev->next = insn->next;
  165. bh_assert(insn->next);
  166. insn->next->prev = insn->prev;
  167. insn->prev = insn->next = NULL;
  168. }
  169. unsigned
  170. jit_insn_hash(JitInsn *insn)
  171. {
  172. const uint8 opcode = insn->opcode;
  173. unsigned hash = opcode, i;
  174. /* Currently, only instructions with Reg kind operand require
  175. hashing. For others, simply use opcode as the hash value. */
  176. if (insn_opnd_kind[opcode] != JIT_OPND_KIND_Reg
  177. || insn_opnd_num[opcode] < 1)
  178. return hash;
  179. /* All the instructions with hashing support must be in the
  180. assignment format, i.e. the first operand is the result (hence
  181. being ignored) and all the others are operands. This is also
  182. true for CHK instructions, whose first operand is the instruction
  183. pointer. */
  184. for (i = 1; i < insn_opnd_num[opcode]; i++)
  185. hash = ((hash << 5) - hash) + *(jit_insn_opnd(insn, i));
  186. return hash;
  187. }
  188. bool
  189. jit_insn_equal(JitInsn *insn1, JitInsn *insn2)
  190. {
  191. const uint8 opcode = insn1->opcode;
  192. unsigned i;
  193. if (insn2->opcode != opcode)
  194. return false;
  195. if (insn_opnd_kind[opcode] != JIT_OPND_KIND_Reg
  196. || insn_opnd_num[opcode] < 1)
  197. return false;
  198. for (i = 1; i < insn_opnd_num[opcode]; i++)
  199. if (*(jit_insn_opnd(insn1, i)) != *(jit_insn_opnd(insn2, i)))
  200. return false;
  201. return true;
  202. }
  203. JitRegVec
  204. jit_insn_opnd_regs(JitInsn *insn)
  205. {
  206. JitRegVec vec = { 0 };
  207. JitOpndLookupSwitch *ls;
  208. vec._stride = 1;
  209. switch (insn_opnd_kind[insn->opcode]) {
  210. case JIT_OPND_KIND_Reg:
  211. vec.num = insn_opnd_num[insn->opcode];
  212. vec._base = jit_insn_opnd(insn, 0);
  213. break;
  214. case JIT_OPND_KIND_VReg:
  215. vec.num = jit_insn_opndv_num(insn);
  216. vec._base = jit_insn_opndv(insn, 0);
  217. break;
  218. case JIT_OPND_KIND_LookupSwitch:
  219. ls = jit_insn_opndls(insn);
  220. vec.num = ls->match_pairs_num + 2;
  221. vec._base = &ls->value;
  222. vec._stride = sizeof(ls->match_pairs[0]) / sizeof(*vec._base);
  223. break;
  224. }
  225. return vec;
  226. }
  227. unsigned
  228. jit_insn_opnd_first_use(JitInsn *insn)
  229. {
  230. return insn_opnd_first_use[insn->opcode];
  231. }
  232. JitBasicBlock *
  233. jit_basic_block_new(JitReg label, int n)
  234. {
  235. JitBasicBlock *block = jit_insn_new_PHI(label, n);
  236. if (!block)
  237. return NULL;
  238. block->prev = block->next = block;
  239. return block;
  240. }
  241. void
  242. jit_basic_block_delete(JitBasicBlock *block)
  243. {
  244. JitInsn *insn, *next_insn, *end;
  245. if (!block)
  246. return;
  247. insn = jit_basic_block_first_insn(block);
  248. end = jit_basic_block_end_insn(block);
  249. for (; insn != end; insn = next_insn) {
  250. next_insn = insn->next;
  251. jit_insn_delete(insn);
  252. }
  253. jit_insn_delete(block);
  254. }
  255. JitRegVec
  256. jit_basic_block_preds(JitBasicBlock *block)
  257. {
  258. JitRegVec vec;
  259. vec.num = jit_insn_opndv_num(block) - 1;
  260. vec._base = vec.num > 0 ? jit_insn_opndv(block, 1) : NULL;
  261. vec._stride = 1;
  262. return vec;
  263. }
  264. JitRegVec
  265. jit_basic_block_succs(JitBasicBlock *block)
  266. {
  267. JitInsn *last_insn = jit_basic_block_last_insn(block);
  268. JitRegVec vec;
  269. vec.num = 0;
  270. vec._base = NULL;
  271. vec._stride = 1;
  272. switch (last_insn->opcode) {
  273. case JIT_OP_JMP:
  274. vec.num = 1;
  275. vec._base = jit_insn_opnd(last_insn, 0);
  276. break;
  277. case JIT_OP_BEQ:
  278. case JIT_OP_BNE:
  279. case JIT_OP_BGTS:
  280. case JIT_OP_BGES:
  281. case JIT_OP_BLTS:
  282. case JIT_OP_BLES:
  283. case JIT_OP_BGTU:
  284. case JIT_OP_BGEU:
  285. case JIT_OP_BLTU:
  286. case JIT_OP_BLEU:
  287. vec.num = 2;
  288. vec._base = jit_insn_opnd(last_insn, 1);
  289. break;
  290. case JIT_OP_LOOKUPSWITCH:
  291. {
  292. JitOpndLookupSwitch *opnd = jit_insn_opndls(last_insn);
  293. vec.num = opnd->match_pairs_num + 1;
  294. vec._base = &opnd->default_target;
  295. vec._stride = sizeof(opnd->match_pairs[0]) / sizeof(*vec._base);
  296. break;
  297. }
  298. default:
  299. vec._stride = 0;
  300. }
  301. return vec;
  302. }
  303. JitCompContext *
  304. jit_cc_init(JitCompContext *cc, unsigned htab_size)
  305. {
  306. JitBasicBlock *entry_block, *exit_block;
  307. unsigned i, num;
  308. memset(cc, 0, sizeof(*cc));
  309. cc->_reference_count = 1;
  310. jit_annl_enable_basic_block(cc);
  311. /* Create entry and exit blocks. They must be the first two
  312. blocks respectively. */
  313. if (!(entry_block = jit_cc_new_basic_block(cc, 0)))
  314. goto fail;
  315. if (!(exit_block = jit_cc_new_basic_block(cc, 0))) {
  316. jit_basic_block_delete(entry_block);
  317. goto fail;
  318. }
  319. /* Record the entry and exit labels, whose indexes must be 0 and 1
  320. respectively. */
  321. cc->entry_label = jit_basic_block_label(entry_block);
  322. cc->exit_label = jit_basic_block_label(exit_block);
  323. bh_assert(jit_reg_no(cc->entry_label) == 0
  324. && jit_reg_no(cc->exit_label) == 1);
  325. if (!(cc->exce_basic_blocks =
  326. jit_calloc(sizeof(JitBasicBlock *) * EXCE_NUM)))
  327. goto fail;
  328. if (!(cc->incoming_insns_for_exec_bbs =
  329. jit_calloc(sizeof(JitIncomingInsnList) * EXCE_NUM)))
  330. goto fail;
  331. cc->hreg_info = jit_codegen_get_hreg_info();
  332. bh_assert(cc->hreg_info->info[JIT_REG_KIND_I32].num > 3);
  333. /* Initialize virtual registers for hard registers. */
  334. for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) {
  335. if ((num = cc->hreg_info->info[i].num)) {
  336. /* Initialize the capacity to be large enough. */
  337. jit_cc_new_reg(cc, i);
  338. bh_assert(cc->_ann._reg_capacity[i] > num);
  339. cc->_ann._reg_num[i] = num;
  340. }
  341. }
  342. /* Create registers for frame pointer, exec_env and cmp. */
  343. cc->fp_reg = jit_reg_new(JIT_REG_KIND_PTR, cc->hreg_info->fp_hreg_index);
  344. cc->exec_env_reg =
  345. jit_reg_new(JIT_REG_KIND_PTR, cc->hreg_info->exec_env_hreg_index);
  346. cc->cmp_reg = jit_reg_new(JIT_REG_KIND_I32, cc->hreg_info->cmp_hreg_index);
  347. cc->_const_val._hash_table_size = htab_size;
  348. if (!(cc->_const_val._hash_table =
  349. jit_calloc(htab_size * sizeof(*cc->_const_val._hash_table))))
  350. goto fail;
  351. return cc;
  352. fail:
  353. jit_cc_destroy(cc);
  354. return NULL;
  355. }
  356. void
  357. jit_cc_destroy(JitCompContext *cc)
  358. {
  359. unsigned i, end;
  360. JitBasicBlock *block;
  361. JitIncomingInsn *incoming_insn, *incoming_insn_next;
  362. jit_block_stack_destroy(&cc->block_stack);
  363. if (cc->jit_frame) {
  364. if (cc->jit_frame->memory_regs)
  365. jit_free(cc->jit_frame->memory_regs);
  366. if (cc->jit_frame->table_regs)
  367. jit_free(cc->jit_frame->table_regs);
  368. jit_free(cc->jit_frame);
  369. }
  370. if (cc->memory_regs)
  371. jit_free(cc->memory_regs);
  372. if (cc->table_regs)
  373. jit_free(cc->table_regs);
  374. jit_free(cc->_const_val._hash_table);
  375. /* Release the instruction hash table. */
  376. jit_cc_disable_insn_hash(cc);
  377. jit_free(cc->exce_basic_blocks);
  378. if (cc->incoming_insns_for_exec_bbs) {
  379. for (i = 0; i < EXCE_NUM; i++) {
  380. incoming_insn = cc->incoming_insns_for_exec_bbs[i];
  381. while (incoming_insn) {
  382. incoming_insn_next = incoming_insn->next;
  383. jit_free(incoming_insn);
  384. incoming_insn = incoming_insn_next;
  385. }
  386. }
  387. jit_free(cc->incoming_insns_for_exec_bbs);
  388. }
  389. /* Release entry and exit blocks. */
  390. if (0 != cc->entry_label)
  391. jit_basic_block_delete(jit_cc_entry_basic_block(cc));
  392. if (0 != cc->exit_label)
  393. jit_basic_block_delete(jit_cc_exit_basic_block(cc));
  394. /* clang-format off */
  395. /* Release blocks and instructions. */
  396. JIT_FOREACH_BLOCK(cc, i, end, block)
  397. {
  398. jit_basic_block_delete(block);
  399. }
  400. /* clang-format on */
  401. /* Release constant values. */
  402. for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) {
  403. jit_free(cc->_const_val._value[i]);
  404. jit_free(cc->_const_val._next[i]);
  405. }
  406. /* Release storage of annotations. */
  407. #define ANN_LABEL(TYPE, NAME) jit_annl_disable_##NAME(cc);
  408. #define ANN_INSN(TYPE, NAME) jit_anni_disable_##NAME(cc);
  409. #define ANN_REG(TYPE, NAME) jit_annr_disable_##NAME(cc);
  410. #include "jit_ir.def"
  411. #undef ANN_LABEL
  412. #undef ANN_INSN
  413. #undef ANN_REG
  414. }
  415. void
  416. jit_cc_delete(JitCompContext *cc)
  417. {
  418. if (cc && --cc->_reference_count == 0) {
  419. jit_cc_destroy(cc);
  420. jit_free(cc);
  421. }
  422. }
  423. /*
  424. * Reallocate a memory block with the new_size.
  425. * TODO: replace this with imported jit_realloc when it's available.
  426. */
  427. static void *
  428. _jit_realloc(void *ptr, unsigned new_size, unsigned old_size)
  429. {
  430. void *new_ptr = jit_malloc(new_size);
  431. if (new_ptr) {
  432. bh_assert(new_size > old_size);
  433. if (ptr) {
  434. memcpy(new_ptr, ptr, old_size);
  435. memset((uint8 *)new_ptr + old_size, 0, new_size - old_size);
  436. jit_free(ptr);
  437. }
  438. else
  439. memset(new_ptr, 0, new_size);
  440. }
  441. return new_ptr;
  442. }
  443. static unsigned
  444. hash_of_const(unsigned kind, unsigned size, void *val)
  445. {
  446. uint8 *p = (uint8 *)val, *end = p + size;
  447. unsigned hash = kind;
  448. do
  449. hash = ((hash << 5) - hash) + *p++;
  450. while (p != end);
  451. return hash;
  452. }
  453. static inline void *
  454. address_of_const(JitCompContext *cc, JitReg reg, unsigned size)
  455. {
  456. int kind = jit_reg_kind(reg);
  457. unsigned no = jit_reg_no(reg);
  458. unsigned idx = no & ~_JIT_REG_CONST_IDX_FLAG;
  459. bh_assert(kind < JIT_REG_KIND_L32);
  460. bh_assert(jit_reg_is_const_idx(reg) && idx < cc->_const_val._num[kind]);
  461. return cc->_const_val._value[kind] + size * idx;
  462. }
  463. static inline JitReg
  464. next_of_const(JitCompContext *cc, JitReg reg)
  465. {
  466. int kind = jit_reg_kind(reg);
  467. unsigned no = jit_reg_no(reg);
  468. unsigned idx = no & ~_JIT_REG_CONST_IDX_FLAG;
  469. bh_assert(kind < JIT_REG_KIND_L32);
  470. bh_assert(jit_reg_is_const_idx(reg) && idx < cc->_const_val._num[kind]);
  471. return cc->_const_val._next[kind][idx];
  472. }
  473. /**
  474. * Put a constant value into the compilation context.
  475. *
  476. * @param cc compilation context
  477. * @param kind register kind
  478. * @param size size of the value
  479. * @param val pointer to value which must be aligned
  480. *
  481. * @return a constant register containing the value
  482. */
  483. static JitReg
  484. _jit_cc_new_const(JitCompContext *cc, int kind, unsigned size, void *val)
  485. {
  486. unsigned num = cc->_const_val._num[kind], slot;
  487. unsigned capacity = cc->_const_val._capacity[kind];
  488. uint8 *new_value;
  489. JitReg r, *new_next;
  490. bh_assert(num <= capacity);
  491. /* Find the existing value first. */
  492. slot = hash_of_const(kind, size, val) % cc->_const_val._hash_table_size;
  493. r = cc->_const_val._hash_table[slot];
  494. for (; r; r = next_of_const(cc, r))
  495. if (jit_reg_kind(r) == kind
  496. && !memcmp(val, address_of_const(cc, r, size), size))
  497. return r;
  498. if (num == capacity) {
  499. /* Increase the space of value and next. */
  500. capacity = capacity > 0 ? (capacity + capacity / 2) : 16;
  501. new_value = _jit_realloc(cc->_const_val._value[kind], size * capacity,
  502. size * num);
  503. new_next =
  504. _jit_realloc(cc->_const_val._next[kind],
  505. sizeof(*new_next) * capacity, sizeof(*new_next) * num);
  506. if (new_value && new_next) {
  507. cc->_const_val._value[kind] = new_value;
  508. cc->_const_val._next[kind] = new_next;
  509. }
  510. else {
  511. jit_set_last_error(cc, "create const register failed");
  512. jit_free(new_value);
  513. jit_free(new_next);
  514. return 0;
  515. }
  516. cc->_const_val._capacity[kind] = capacity;
  517. }
  518. bh_assert(num + 1 < (uint32)_JIT_REG_CONST_IDX_FLAG);
  519. r = jit_reg_new(kind, _JIT_REG_CONST_IDX_FLAG | num);
  520. memcpy(cc->_const_val._value[kind] + size * num, val, size);
  521. cc->_const_val._next[kind][num] = cc->_const_val._hash_table[slot];
  522. cc->_const_val._hash_table[slot] = r;
  523. cc->_const_val._num[kind] = num + 1;
  524. return r;
  525. }
  526. static inline int32
  527. get_const_val_in_reg(JitReg reg)
  528. {
  529. int shift = 8 * sizeof(reg) - _JIT_REG_KIND_SHIFT + 1;
  530. return ((int32)(reg << shift)) >> shift;
  531. }
  532. #define _JIT_CC_NEW_CONST_HELPER(KIND, TYPE, val) \
  533. do { \
  534. JitReg reg = jit_reg_new( \
  535. JIT_REG_KIND_##KIND, \
  536. (_JIT_REG_CONST_VAL_FLAG | ((JitReg)val & ~_JIT_REG_KIND_MASK))); \
  537. \
  538. if ((TYPE)get_const_val_in_reg(reg) == val) \
  539. return reg; \
  540. return _jit_cc_new_const(cc, JIT_REG_KIND_##KIND, sizeof(val), &val); \
  541. } while (0)
  542. JitReg
  543. jit_cc_new_const_I32_rel(JitCompContext *cc, int32 val, uint32 rel)
  544. {
  545. uint64 val64 = (uint64)(uint32)val | ((uint64)rel << 32);
  546. _JIT_CC_NEW_CONST_HELPER(I32, uint64, val64);
  547. }
  548. JitReg
  549. jit_cc_new_const_I64(JitCompContext *cc, int64 val)
  550. {
  551. _JIT_CC_NEW_CONST_HELPER(I64, int64, val);
  552. }
  553. JitReg
  554. jit_cc_new_const_F32(JitCompContext *cc, float val)
  555. {
  556. int32 float_neg_zero = 0x80000000;
  557. if (!memcmp(&val, &float_neg_zero, sizeof(float)))
  558. /* Create const -0.0f */
  559. return _jit_cc_new_const(cc, JIT_REG_KIND_F32, sizeof(float), &val);
  560. _JIT_CC_NEW_CONST_HELPER(F32, float, val);
  561. }
  562. JitReg
  563. jit_cc_new_const_F64(JitCompContext *cc, double val)
  564. {
  565. int64 double_neg_zero = 0x8000000000000000ll;
  566. if (!memcmp(&val, &double_neg_zero, sizeof(double)))
  567. /* Create const -0.0d */
  568. return _jit_cc_new_const(cc, JIT_REG_KIND_F64, sizeof(double), &val);
  569. _JIT_CC_NEW_CONST_HELPER(F64, double, val);
  570. }
  571. #undef _JIT_CC_NEW_CONST_HELPER
  572. #define _JIT_CC_GET_CONST_HELPER(KIND, TYPE) \
  573. do { \
  574. bh_assert(jit_reg_kind(reg) == JIT_REG_KIND_##KIND); \
  575. bh_assert(jit_reg_is_const(reg)); \
  576. \
  577. return (jit_reg_is_const_val(reg) \
  578. ? (TYPE)get_const_val_in_reg(reg) \
  579. : *(TYPE *)(address_of_const(cc, reg, sizeof(TYPE)))); \
  580. } while (0)
  581. static uint64
  582. jit_cc_get_const_I32_helper(JitCompContext *cc, JitReg reg)
  583. {
  584. _JIT_CC_GET_CONST_HELPER(I32, uint64);
  585. }
  586. uint32
  587. jit_cc_get_const_I32_rel(JitCompContext *cc, JitReg reg)
  588. {
  589. return (uint32)(jit_cc_get_const_I32_helper(cc, reg) >> 32);
  590. }
  591. int32
  592. jit_cc_get_const_I32(JitCompContext *cc, JitReg reg)
  593. {
  594. return (int32)(jit_cc_get_const_I32_helper(cc, reg));
  595. }
  596. int64
  597. jit_cc_get_const_I64(JitCompContext *cc, JitReg reg)
  598. {
  599. _JIT_CC_GET_CONST_HELPER(I64, int64);
  600. }
  601. float
  602. jit_cc_get_const_F32(JitCompContext *cc, JitReg reg)
  603. {
  604. _JIT_CC_GET_CONST_HELPER(F32, float);
  605. }
  606. double
  607. jit_cc_get_const_F64(JitCompContext *cc, JitReg reg)
  608. {
  609. _JIT_CC_GET_CONST_HELPER(F64, double);
  610. }
  611. #undef _JIT_CC_GET_CONST_HELPER
  612. #define _JIT_REALLOC_ANN(TYPE, NAME, ANN, POSTFIX) \
  613. if (successful && cc->_ann._##ANN##_##NAME##_enabled) { \
  614. TYPE *ptr = _jit_realloc(cc->_ann._##ANN##_##NAME POSTFIX, \
  615. sizeof(TYPE) * capacity, sizeof(TYPE) * num); \
  616. if (ptr) \
  617. cc->_ann._##ANN##_##NAME POSTFIX = ptr; \
  618. else \
  619. successful = false; \
  620. }
  621. JitReg
  622. jit_cc_new_label(JitCompContext *cc)
  623. {
  624. unsigned num = cc->_ann._label_num;
  625. unsigned capacity = cc->_ann._label_capacity;
  626. bool successful = true;
  627. bh_assert(num <= capacity);
  628. if (num == capacity) {
  629. capacity = capacity > 0 ? (capacity + capacity / 2) : 16;
  630. #define EMPTY_POSTFIX
  631. #define ANN_LABEL(TYPE, NAME) _JIT_REALLOC_ANN(TYPE, NAME, label, EMPTY_POSTFIX)
  632. #include "jit_ir.def"
  633. #undef ANN_LABEL
  634. #undef EMPTY_POSTFIX
  635. if (!successful) {
  636. jit_set_last_error(cc, "create label register failed");
  637. return 0;
  638. }
  639. cc->_ann._label_capacity = capacity;
  640. }
  641. cc->_ann._label_num = num + 1;
  642. return jit_reg_new(JIT_REG_KIND_L32, num);
  643. }
  644. JitBasicBlock *
  645. jit_cc_new_basic_block(JitCompContext *cc, int n)
  646. {
  647. JitReg label = jit_cc_new_label(cc);
  648. JitBasicBlock *block = NULL;
  649. if (label && (block = jit_basic_block_new(label, n)))
  650. /* Void 0 register indicates error in creation. */
  651. *(jit_annl_basic_block(cc, label)) = block;
  652. else
  653. jit_set_last_error(cc, "create basic block failed");
  654. return block;
  655. }
  656. JitBasicBlock *
  657. jit_cc_resize_basic_block(JitCompContext *cc, JitBasicBlock *block, int n)
  658. {
  659. JitReg label = jit_basic_block_label(block);
  660. JitInsn *insn = jit_basic_block_first_insn(block);
  661. JitBasicBlock *new_block = jit_basic_block_new(label, n);
  662. if (!new_block) {
  663. jit_set_last_error(cc, "resize basic block failed");
  664. return NULL;
  665. }
  666. jit_insn_unlink(block);
  667. if (insn != block)
  668. jit_insn_insert_before(insn, new_block);
  669. bh_assert(*(jit_annl_basic_block(cc, label)) == block);
  670. *(jit_annl_basic_block(cc, label)) = new_block;
  671. jit_insn_delete(block);
  672. return new_block;
  673. }
  674. bool
  675. jit_cc_enable_insn_hash(JitCompContext *cc, unsigned n)
  676. {
  677. if (jit_anni_is_enabled__hash_link(cc))
  678. return true;
  679. if (!jit_anni_enable__hash_link(cc))
  680. return false;
  681. /* The table must not exist. */
  682. bh_assert(!cc->_insn_hash_table._table);
  683. /* Integer overflow cannot happen because n << 4G (at most several
  684. times of 64K in the most extreme case). */
  685. if (!(cc->_insn_hash_table._table =
  686. jit_calloc(n * sizeof(*cc->_insn_hash_table._table)))) {
  687. jit_anni_disable__hash_link(cc);
  688. return false;
  689. }
  690. cc->_insn_hash_table._size = n;
  691. return true;
  692. }
  693. void
  694. jit_cc_disable_insn_hash(JitCompContext *cc)
  695. {
  696. jit_anni_disable__hash_link(cc);
  697. jit_free(cc->_insn_hash_table._table);
  698. cc->_insn_hash_table._table = NULL;
  699. cc->_insn_hash_table._size = 0;
  700. }
  701. void
  702. jit_cc_reset_insn_hash(JitCompContext *cc)
  703. {
  704. if (jit_anni_is_enabled__hash_link(cc))
  705. memset(cc->_insn_hash_table._table, 0,
  706. cc->_insn_hash_table._size
  707. * sizeof(*cc->_insn_hash_table._table));
  708. }
  709. JitInsn *
  710. jit_cc_set_insn_uid(JitCompContext *cc, JitInsn *insn)
  711. {
  712. if (insn) {
  713. unsigned num = cc->_ann._insn_num;
  714. unsigned capacity = cc->_ann._insn_capacity;
  715. bool successful = true;
  716. bh_assert(num <= capacity);
  717. if (num == capacity) {
  718. capacity = capacity > 0 ? (capacity + capacity / 2) : 64;
  719. #define EMPTY_POSTFIX
  720. #define ANN_INSN(TYPE, NAME) _JIT_REALLOC_ANN(TYPE, NAME, insn, EMPTY_POSTFIX)
  721. #include "jit_ir.def"
  722. #undef ANN_INSN
  723. #undef EMPTY_POSTFIX
  724. if (!successful) {
  725. jit_set_last_error(cc, "set insn uid failed");
  726. return NULL;
  727. }
  728. cc->_ann._insn_capacity = capacity;
  729. }
  730. cc->_ann._insn_num = num + 1;
  731. insn->uid = num;
  732. }
  733. return insn;
  734. }
  735. JitInsn *
  736. _jit_cc_set_insn_uid_for_new_insn(JitCompContext *cc, JitInsn *insn)
  737. {
  738. if (jit_cc_set_insn_uid(cc, insn))
  739. return insn;
  740. jit_insn_delete(insn);
  741. return NULL;
  742. }
  743. JitReg
  744. jit_cc_new_reg(JitCompContext *cc, unsigned kind)
  745. {
  746. unsigned num = jit_cc_reg_num(cc, kind);
  747. unsigned capacity = cc->_ann._reg_capacity[kind];
  748. bool successful = true;
  749. bh_assert(num <= capacity);
  750. if (num == capacity) {
  751. capacity = (capacity == 0
  752. /* Initialize the capacity to be larger than hard
  753. register number. */
  754. ? cc->hreg_info->info[kind].num + 16
  755. : capacity + capacity / 2);
  756. #define ANN_REG(TYPE, NAME) _JIT_REALLOC_ANN(TYPE, NAME, reg, [kind])
  757. #include "jit_ir.def"
  758. #undef ANN_REG
  759. if (!successful) {
  760. jit_set_last_error(cc, "create register failed");
  761. return 0;
  762. }
  763. cc->_ann._reg_capacity[kind] = capacity;
  764. }
  765. cc->_ann._reg_num[kind] = num + 1;
  766. return jit_reg_new(kind, num);
  767. }
  768. #undef _JIT_REALLOC_ANN
  769. #define ANN_LABEL(TYPE, NAME) \
  770. bool jit_annl_enable_##NAME(JitCompContext *cc) \
  771. { \
  772. if (cc->_ann._label_##NAME##_enabled) \
  773. return true; \
  774. \
  775. if (cc->_ann._label_capacity > 0 \
  776. && !(cc->_ann._label_##NAME = \
  777. jit_calloc(cc->_ann._label_capacity * sizeof(TYPE)))) { \
  778. jit_set_last_error(cc, "annl enable " #NAME "failed"); \
  779. return false; \
  780. } \
  781. \
  782. cc->_ann._label_##NAME##_enabled = 1; \
  783. return true; \
  784. }
  785. #define ANN_INSN(TYPE, NAME) \
  786. bool jit_anni_enable_##NAME(JitCompContext *cc) \
  787. { \
  788. if (cc->_ann._insn_##NAME##_enabled) \
  789. return true; \
  790. \
  791. if (cc->_ann._insn_capacity > 0 \
  792. && !(cc->_ann._insn_##NAME = \
  793. jit_calloc(cc->_ann._insn_capacity * sizeof(TYPE)))) { \
  794. jit_set_last_error(cc, "anni enable " #NAME "failed"); \
  795. return false; \
  796. } \
  797. \
  798. cc->_ann._insn_##NAME##_enabled = 1; \
  799. return true; \
  800. }
  801. #define ANN_REG(TYPE, NAME) \
  802. bool jit_annr_enable_##NAME(JitCompContext *cc) \
  803. { \
  804. unsigned k; \
  805. \
  806. if (cc->_ann._reg_##NAME##_enabled) \
  807. return true; \
  808. \
  809. for (k = JIT_REG_KIND_VOID; k < JIT_REG_KIND_L32; k++) \
  810. if (cc->_ann._reg_capacity[k] > 0 \
  811. && !(cc->_ann._reg_##NAME[k] = jit_calloc( \
  812. cc->_ann._reg_capacity[k] * sizeof(TYPE)))) { \
  813. jit_set_last_error(cc, "annr enable " #NAME "failed"); \
  814. jit_annr_disable_##NAME(cc); \
  815. return false; \
  816. } \
  817. \
  818. cc->_ann._reg_##NAME##_enabled = 1; \
  819. return true; \
  820. }
  821. #include "jit_ir.def"
  822. #undef ANN_LABEL
  823. #undef ANN_INSN
  824. #undef ANN_REG
  825. #define ANN_LABEL(TYPE, NAME) \
  826. void jit_annl_disable_##NAME(JitCompContext *cc) \
  827. { \
  828. jit_free(cc->_ann._label_##NAME); \
  829. cc->_ann._label_##NAME = NULL; \
  830. cc->_ann._label_##NAME##_enabled = 0; \
  831. }
  832. #define ANN_INSN(TYPE, NAME) \
  833. void jit_anni_disable_##NAME(JitCompContext *cc) \
  834. { \
  835. jit_free(cc->_ann._insn_##NAME); \
  836. cc->_ann._insn_##NAME = NULL; \
  837. cc->_ann._insn_##NAME##_enabled = 0; \
  838. }
  839. #define ANN_REG(TYPE, NAME) \
  840. void jit_annr_disable_##NAME(JitCompContext *cc) \
  841. { \
  842. unsigned k; \
  843. \
  844. for (k = JIT_REG_KIND_VOID; k < JIT_REG_KIND_L32; k++) { \
  845. jit_free(cc->_ann._reg_##NAME[k]); \
  846. cc->_ann._reg_##NAME[k] = NULL; \
  847. } \
  848. \
  849. cc->_ann._reg_##NAME##_enabled = 0; \
  850. }
  851. #include "jit_ir.def"
  852. #undef ANN_LABEL
  853. #undef ANN_INSN
  854. #undef ANN_REG
  855. char *
  856. jit_get_last_error(JitCompContext *cc)
  857. {
  858. return cc->last_error[0] == '\0' ? NULL : cc->last_error;
  859. }
  860. void
  861. jit_set_last_error_v(JitCompContext *cc, const char *format, ...)
  862. {
  863. va_list args;
  864. va_start(args, format);
  865. vsnprintf(cc->last_error, sizeof(cc->last_error), format, args);
  866. va_end(args);
  867. }
  868. void
  869. jit_set_last_error(JitCompContext *cc, const char *error)
  870. {
  871. if (error)
  872. snprintf(cc->last_error, sizeof(cc->last_error), "Error: %s", error);
  873. else
  874. cc->last_error[0] = '\0';
  875. }
  876. bool
  877. jit_cc_update_cfg(JitCompContext *cc)
  878. {
  879. JitBasicBlock *block;
  880. unsigned block_index, end, succ_index, idx;
  881. JitReg *target;
  882. bool retval = false;
  883. if (!jit_annl_enable_pred_num(cc))
  884. return false;
  885. /* Update pred_num of all blocks. */
  886. JIT_FOREACH_BLOCK_ENTRY_EXIT(cc, block_index, end, block)
  887. {
  888. JitRegVec succs = jit_basic_block_succs(block);
  889. JIT_REG_VEC_FOREACH(succs, succ_index, target)
  890. if (jit_reg_is_kind(L32, *target))
  891. *(jit_annl_pred_num(cc, *target)) += 1;
  892. }
  893. /* Resize predecessor vectors of body blocks. */
  894. JIT_FOREACH_BLOCK(cc, block_index, end, block)
  895. {
  896. if (!jit_cc_resize_basic_block(
  897. cc, block,
  898. *(jit_annl_pred_num(cc, jit_basic_block_label(block)))))
  899. goto cleanup_and_return;
  900. }
  901. /* Fill in predecessor vectors all blocks. */
  902. JIT_FOREACH_BLOCK_REVERSE_ENTRY_EXIT(cc, block_index, block)
  903. {
  904. JitRegVec succs = jit_basic_block_succs(block), preds;
  905. JIT_REG_VEC_FOREACH(succs, succ_index, target)
  906. if (jit_reg_is_kind(L32, *target)) {
  907. preds = jit_basic_block_preds(*(jit_annl_basic_block(cc, *target)));
  908. bh_assert(*(jit_annl_pred_num(cc, *target)) > 0);
  909. idx = *(jit_annl_pred_num(cc, *target)) - 1;
  910. *(jit_annl_pred_num(cc, *target)) = idx;
  911. *(jit_reg_vec_at(&preds, idx)) = jit_basic_block_label(block);
  912. }
  913. }
  914. retval = true;
  915. cleanup_and_return:
  916. jit_annl_disable_pred_num(cc);
  917. return retval;
  918. }
  919. void
  920. jit_value_stack_push(JitValueStack *stack, JitValue *value)
  921. {
  922. if (!stack->value_list_head)
  923. stack->value_list_head = stack->value_list_end = value;
  924. else {
  925. stack->value_list_end->next = value;
  926. value->prev = stack->value_list_end;
  927. stack->value_list_end = value;
  928. }
  929. }
  930. JitValue *
  931. jit_value_stack_pop(JitValueStack *stack)
  932. {
  933. JitValue *value = stack->value_list_end;
  934. bh_assert(stack->value_list_end);
  935. if (stack->value_list_head == stack->value_list_end)
  936. stack->value_list_head = stack->value_list_end = NULL;
  937. else {
  938. stack->value_list_end = stack->value_list_end->prev;
  939. stack->value_list_end->next = NULL;
  940. value->prev = NULL;
  941. }
  942. return value;
  943. }
  944. void
  945. jit_value_stack_destroy(JitValueStack *stack)
  946. {
  947. JitValue *value = stack->value_list_head, *p;
  948. while (value) {
  949. p = value->next;
  950. jit_free(value);
  951. value = p;
  952. }
  953. stack->value_list_head = NULL;
  954. stack->value_list_end = NULL;
  955. }
  956. void
  957. jit_block_stack_push(JitBlockStack *stack, JitBlock *block)
  958. {
  959. if (!stack->block_list_head)
  960. stack->block_list_head = stack->block_list_end = block;
  961. else {
  962. stack->block_list_end->next = block;
  963. block->prev = stack->block_list_end;
  964. stack->block_list_end = block;
  965. }
  966. }
  967. JitBlock *
  968. jit_block_stack_top(JitBlockStack *stack)
  969. {
  970. return stack->block_list_end;
  971. }
  972. JitBlock *
  973. jit_block_stack_pop(JitBlockStack *stack)
  974. {
  975. JitBlock *block = stack->block_list_end;
  976. bh_assert(stack->block_list_end);
  977. if (stack->block_list_head == stack->block_list_end)
  978. stack->block_list_head = stack->block_list_end = NULL;
  979. else {
  980. stack->block_list_end = stack->block_list_end->prev;
  981. stack->block_list_end->next = NULL;
  982. block->prev = NULL;
  983. }
  984. return block;
  985. }
  986. void
  987. jit_block_stack_destroy(JitBlockStack *stack)
  988. {
  989. JitBlock *block = stack->block_list_head, *p;
  990. while (block) {
  991. p = block->next;
  992. jit_value_stack_destroy(&block->value_stack);
  993. jit_block_destroy(block);
  994. block = p;
  995. }
  996. stack->block_list_head = NULL;
  997. stack->block_list_end = NULL;
  998. }
  999. bool
  1000. jit_block_add_incoming_insn(JitBlock *block, JitInsn *insn, uint32 opnd_idx)
  1001. {
  1002. JitIncomingInsn *incoming_insn;
  1003. if (!(incoming_insn = jit_calloc((uint32)sizeof(JitIncomingInsn))))
  1004. return false;
  1005. incoming_insn->insn = insn;
  1006. incoming_insn->opnd_idx = opnd_idx;
  1007. incoming_insn->next = block->incoming_insns_for_end_bb;
  1008. block->incoming_insns_for_end_bb = incoming_insn;
  1009. return true;
  1010. }
  1011. void
  1012. jit_block_destroy(JitBlock *block)
  1013. {
  1014. JitIncomingInsn *incoming_insn, *incoming_insn_next;
  1015. jit_value_stack_destroy(&block->value_stack);
  1016. if (block->param_types)
  1017. jit_free(block->param_types);
  1018. if (block->result_types)
  1019. jit_free(block->result_types);
  1020. incoming_insn = block->incoming_insns_for_end_bb;
  1021. while (incoming_insn) {
  1022. incoming_insn_next = incoming_insn->next;
  1023. jit_free(incoming_insn);
  1024. incoming_insn = incoming_insn_next;
  1025. }
  1026. jit_free(block);
  1027. }
  1028. static inline uint8
  1029. to_stack_value_type(uint8 type)
  1030. {
  1031. #if WASM_ENABLE_REF_TYPES != 0
  1032. if (type == VALUE_TYPE_EXTERNREF || type == VALUE_TYPE_FUNCREF)
  1033. return VALUE_TYPE_I32;
  1034. #endif
  1035. return type;
  1036. }
  1037. bool
  1038. jit_cc_pop_value(JitCompContext *cc, uint8 type, JitReg *p_value)
  1039. {
  1040. JitValue *jit_value = NULL;
  1041. JitReg value = 0;
  1042. if (!jit_block_stack_top(&cc->block_stack)) {
  1043. jit_set_last_error(cc, "WASM block stack underflow");
  1044. return false;
  1045. }
  1046. if (!jit_block_stack_top(&cc->block_stack)->value_stack.value_list_end) {
  1047. jit_set_last_error(cc, "WASM data stack underflow");
  1048. return false;
  1049. }
  1050. jit_value = jit_value_stack_pop(
  1051. &jit_block_stack_top(&cc->block_stack)->value_stack);
  1052. bh_assert(jit_value);
  1053. if (jit_value->type != to_stack_value_type(type)) {
  1054. jit_set_last_error(cc, "invalid WASM stack data type");
  1055. jit_free(jit_value);
  1056. return false;
  1057. }
  1058. switch (jit_value->type) {
  1059. case VALUE_TYPE_I32:
  1060. value = pop_i32(cc->jit_frame);
  1061. break;
  1062. case VALUE_TYPE_I64:
  1063. value = pop_i64(cc->jit_frame);
  1064. break;
  1065. case VALUE_TYPE_F32:
  1066. value = pop_f32(cc->jit_frame);
  1067. break;
  1068. case VALUE_TYPE_F64:
  1069. value = pop_f64(cc->jit_frame);
  1070. break;
  1071. default:
  1072. bh_assert(0);
  1073. break;
  1074. }
  1075. bh_assert(cc->jit_frame->sp == jit_value->value);
  1076. bh_assert(value == jit_value->value->reg);
  1077. *p_value = value;
  1078. jit_free(jit_value);
  1079. return true;
  1080. }
  1081. bool
  1082. jit_cc_push_value(JitCompContext *cc, uint8 type, JitReg value)
  1083. {
  1084. JitValue *jit_value;
  1085. if (!jit_block_stack_top(&cc->block_stack)) {
  1086. jit_set_last_error(cc, "WASM block stack underflow");
  1087. return false;
  1088. }
  1089. if (!(jit_value = jit_calloc(sizeof(JitValue)))) {
  1090. jit_set_last_error(cc, "allocate memory failed");
  1091. return false;
  1092. }
  1093. bh_assert(value);
  1094. jit_value->type = to_stack_value_type(type);
  1095. jit_value->value = cc->jit_frame->sp;
  1096. jit_value_stack_push(&jit_block_stack_top(&cc->block_stack)->value_stack,
  1097. jit_value);
  1098. switch (jit_value->type) {
  1099. case VALUE_TYPE_I32:
  1100. push_i32(cc->jit_frame, value);
  1101. break;
  1102. case VALUE_TYPE_I64:
  1103. push_i64(cc->jit_frame, value);
  1104. break;
  1105. case VALUE_TYPE_F32:
  1106. push_f32(cc->jit_frame, value);
  1107. break;
  1108. case VALUE_TYPE_F64:
  1109. push_f64(cc->jit_frame, value);
  1110. break;
  1111. }
  1112. return true;
  1113. }
  1114. bool
  1115. _jit_insn_check_opnd_access_Reg(const JitInsn *insn, unsigned n)
  1116. {
  1117. unsigned opcode = insn->opcode;
  1118. return (insn_opnd_kind[opcode] == JIT_OPND_KIND_Reg
  1119. && n < insn_opnd_num[opcode]);
  1120. }
  1121. bool
  1122. _jit_insn_check_opnd_access_VReg(const JitInsn *insn, unsigned n)
  1123. {
  1124. unsigned opcode = insn->opcode;
  1125. return (insn_opnd_kind[opcode] == JIT_OPND_KIND_VReg
  1126. && n < insn->_opnd._opnd_VReg._reg_num);
  1127. }
  1128. bool
  1129. _jit_insn_check_opnd_access_LookupSwitch(const JitInsn *insn)
  1130. {
  1131. unsigned opcode = insn->opcode;
  1132. return (insn_opnd_kind[opcode] == JIT_OPND_KIND_LookupSwitch);
  1133. }
  1134. bool
  1135. jit_lock_reg_in_insn(JitCompContext *cc, JitInsn *the_insn, JitReg reg_to_lock)
  1136. {
  1137. bool ret = false;
  1138. JitInsn *prevent_spill = NULL;
  1139. JitInsn *indicate_using = NULL;
  1140. if (!the_insn)
  1141. goto just_return;
  1142. if (jit_cc_is_hreg_fixed(cc, reg_to_lock)) {
  1143. ret = true;
  1144. goto just_return;
  1145. }
  1146. /**
  1147. * give the virtual register of the locked hard register a minimum, non-zero
  1148. * distance, * so as to prevent it from being spilled out
  1149. */
  1150. prevent_spill = jit_insn_new_MOV(reg_to_lock, reg_to_lock);
  1151. if (!prevent_spill)
  1152. goto just_return;
  1153. jit_insn_insert_before(the_insn, prevent_spill);
  1154. /**
  1155. * announce the locked hard register is being used, and do necessary spill
  1156. * ASAP
  1157. */
  1158. indicate_using = jit_insn_new_MOV(reg_to_lock, reg_to_lock);
  1159. if (!indicate_using)
  1160. goto just_return;
  1161. jit_insn_insert_after(the_insn, indicate_using);
  1162. ret = true;
  1163. just_return:
  1164. if (!ret)
  1165. jit_set_last_error(cc, "generate insn failed");
  1166. return ret;
  1167. }