jit_ir.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427
  1. /*
  2. * Copyright (C) 2021 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. #include "jit_ir.h"
  6. #include "jit_codegen.h"
  7. #include "jit_frontend.h"
  8. /**
  9. * Operand kinds of instructions.
  10. */
  11. enum {
  12. JIT_OPND_KIND_Reg,
  13. JIT_OPND_KIND_VReg,
  14. JIT_OPND_KIND_LookupSwitch,
  15. };
  16. /**
  17. * Operand kind of each instruction.
  18. */
  19. static const uint8 insn_opnd_kind[] = {
  20. #define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) JIT_OPND_KIND_##OPND_KIND,
  21. #include "jit_ir.def"
  22. #undef INSN
  23. };
  24. /**
  25. * Operand number of each instruction.
  26. */
  27. static const uint8 insn_opnd_num[] = {
  28. #define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) OPND_NUM,
  29. #include "jit_ir.def"
  30. #undef INSN
  31. };
  32. /**
  33. * Operand number of each instruction.
  34. */
  35. static const uint8 insn_opnd_first_use[] = {
  36. #define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) FIRST_USE,
  37. #include "jit_ir.def"
  38. #undef INSN
  39. };
  40. #define JIT_INSN_NEW_Reg(OPND_NUM) \
  41. jit_calloc(offsetof(JitInsn, _opnd) + sizeof(JitReg) * (OPND_NUM))
  42. #define JIT_INSN_NEW_VReg(OPND_NUM) \
  43. jit_calloc(offsetof(JitInsn, _opnd._opnd_VReg._reg) \
  44. + sizeof(JitReg) * (OPND_NUM))
  45. JitInsn *
  46. _jit_insn_new_Reg_0(JitOpcode opc)
  47. {
  48. JitInsn *insn = JIT_INSN_NEW_Reg(0);
  49. if (insn) {
  50. insn->opcode = opc;
  51. }
  52. return insn;
  53. }
  54. JitInsn *
  55. _jit_insn_new_Reg_1(JitOpcode opc, JitReg r0)
  56. {
  57. JitInsn *insn = JIT_INSN_NEW_Reg(1);
  58. if (insn) {
  59. insn->opcode = opc;
  60. *jit_insn_opnd(insn, 0) = r0;
  61. }
  62. return insn;
  63. }
  64. JitInsn *
  65. _jit_insn_new_Reg_2(JitOpcode opc, JitReg r0, JitReg r1)
  66. {
  67. JitInsn *insn = JIT_INSN_NEW_Reg(2);
  68. if (insn) {
  69. insn->opcode = opc;
  70. *jit_insn_opnd(insn, 0) = r0;
  71. *jit_insn_opnd(insn, 1) = r1;
  72. }
  73. return insn;
  74. }
  75. JitInsn *
  76. _jit_insn_new_Reg_3(JitOpcode opc, JitReg r0, JitReg r1, JitReg r2)
  77. {
  78. JitInsn *insn = JIT_INSN_NEW_Reg(3);
  79. if (insn) {
  80. insn->opcode = opc;
  81. *jit_insn_opnd(insn, 0) = r0;
  82. *jit_insn_opnd(insn, 1) = r1;
  83. *jit_insn_opnd(insn, 2) = r2;
  84. }
  85. return insn;
  86. }
  87. JitInsn *
  88. _jit_insn_new_Reg_4(JitOpcode opc, JitReg r0, JitReg r1, JitReg r2, JitReg r3)
  89. {
  90. JitInsn *insn = JIT_INSN_NEW_Reg(4);
  91. if (insn) {
  92. insn->opcode = opc;
  93. *jit_insn_opnd(insn, 0) = r0;
  94. *jit_insn_opnd(insn, 1) = r1;
  95. *jit_insn_opnd(insn, 2) = r2;
  96. *jit_insn_opnd(insn, 3) = r3;
  97. }
  98. return insn;
  99. }
  100. JitInsn *
  101. _jit_insn_new_Reg_5(JitOpcode opc, JitReg r0, JitReg r1, JitReg r2, JitReg r3,
  102. JitReg r4)
  103. {
  104. JitInsn *insn = JIT_INSN_NEW_Reg(5);
  105. if (insn) {
  106. insn->opcode = opc;
  107. *jit_insn_opnd(insn, 0) = r0;
  108. *jit_insn_opnd(insn, 1) = r1;
  109. *jit_insn_opnd(insn, 2) = r2;
  110. *jit_insn_opnd(insn, 3) = r3;
  111. *jit_insn_opnd(insn, 4) = r4;
  112. }
  113. return insn;
  114. }
  115. JitInsn *
  116. _jit_insn_new_VReg_1(JitOpcode opc, JitReg r0, int n)
  117. {
  118. JitInsn *insn = JIT_INSN_NEW_VReg(1 + n);
  119. if (insn) {
  120. insn->opcode = opc;
  121. insn->_opnd._opnd_VReg._reg_num = 1 + n;
  122. *(jit_insn_opndv(insn, 0)) = r0;
  123. }
  124. return insn;
  125. }
  126. JitInsn *
  127. _jit_insn_new_VReg_2(JitOpcode opc, JitReg r0, JitReg r1, int n)
  128. {
  129. JitInsn *insn = JIT_INSN_NEW_VReg(2 + n);
  130. if (insn) {
  131. insn->opcode = opc;
  132. insn->_opnd._opnd_VReg._reg_num = 2 + n;
  133. *(jit_insn_opndv(insn, 0)) = r0;
  134. *(jit_insn_opndv(insn, 1)) = r1;
  135. }
  136. return insn;
  137. }
  138. JitInsn *
  139. _jit_insn_new_LookupSwitch_1(JitOpcode opc, JitReg value, uint32 num)
  140. {
  141. JitOpndLookupSwitch *opnd = NULL;
  142. JitInsn *insn =
  143. jit_calloc(offsetof(JitInsn, _opnd._opnd_LookupSwitch.match_pairs)
  144. + sizeof(opnd->match_pairs[0]) * num);
  145. if (insn) {
  146. insn->opcode = opc;
  147. opnd = jit_insn_opndls(insn);
  148. opnd->value = value;
  149. opnd->match_pairs_num = num;
  150. }
  151. return insn;
  152. }
  153. #undef JIT_INSN_NEW_Reg
  154. #undef JIT_INSN_NEW_VReg
  155. void
  156. jit_insn_insert_before(JitInsn *insn1, JitInsn *insn2)
  157. {
  158. bh_assert(insn1->prev);
  159. insn1->prev->next = insn2;
  160. insn2->prev = insn1->prev;
  161. insn2->next = insn1;
  162. insn1->prev = insn2;
  163. }
  164. void
  165. jit_insn_insert_after(JitInsn *insn1, JitInsn *insn2)
  166. {
  167. bh_assert(insn1->next);
  168. insn1->next->prev = insn2;
  169. insn2->next = insn1->next;
  170. insn2->prev = insn1;
  171. insn1->next = insn2;
  172. }
  173. void
  174. jit_insn_unlink(JitInsn *insn)
  175. {
  176. bh_assert(insn->prev);
  177. insn->prev->next = insn->next;
  178. bh_assert(insn->next);
  179. insn->next->prev = insn->prev;
  180. insn->prev = insn->next = NULL;
  181. }
  182. unsigned
  183. jit_insn_hash(JitInsn *insn)
  184. {
  185. const uint8 opcode = insn->opcode;
  186. unsigned hash = opcode, i;
  187. /* Currently, only instructions with Reg kind operand require
  188. hashing. For others, simply use opcode as the hash value. */
  189. if (insn_opnd_kind[opcode] != JIT_OPND_KIND_Reg
  190. || insn_opnd_num[opcode] < 1)
  191. return hash;
  192. /* All the instructions with hashing support must be in the
  193. assignment format, i.e. the first operand is the result (hence
  194. being ignored) and all the others are operands. This is also
  195. true for CHK instructions, whose first operand is the instruction
  196. pointer. */
  197. for (i = 1; i < insn_opnd_num[opcode]; i++)
  198. hash = ((hash << 5) - hash) + *(jit_insn_opnd(insn, i));
  199. return hash;
  200. }
  201. bool
  202. jit_insn_equal(JitInsn *insn1, JitInsn *insn2)
  203. {
  204. const uint8 opcode = insn1->opcode;
  205. unsigned i;
  206. if (insn2->opcode != opcode)
  207. return false;
  208. if (insn_opnd_kind[opcode] != JIT_OPND_KIND_Reg
  209. || insn_opnd_num[opcode] < 1)
  210. return false;
  211. for (i = 1; i < insn_opnd_num[opcode]; i++)
  212. if (*(jit_insn_opnd(insn1, i)) != *(jit_insn_opnd(insn2, i)))
  213. return false;
  214. return true;
  215. }
  216. JitRegVec
  217. jit_insn_opnd_regs(JitInsn *insn)
  218. {
  219. JitRegVec vec = { 0 };
  220. JitOpndLookupSwitch *ls;
  221. vec._stride = 1;
  222. switch (insn_opnd_kind[insn->opcode]) {
  223. case JIT_OPND_KIND_Reg:
  224. vec.num = insn_opnd_num[insn->opcode];
  225. vec._base = jit_insn_opnd(insn, 0);
  226. break;
  227. case JIT_OPND_KIND_VReg:
  228. vec.num = jit_insn_opndv_num(insn);
  229. vec._base = jit_insn_opndv(insn, 0);
  230. break;
  231. case JIT_OPND_KIND_LookupSwitch:
  232. ls = jit_insn_opndls(insn);
  233. vec.num = ls->match_pairs_num + 2;
  234. vec._base = &ls->value;
  235. vec._stride = sizeof(ls->match_pairs[0]) / sizeof(*vec._base);
  236. break;
  237. }
  238. return vec;
  239. }
  240. unsigned
  241. jit_insn_opnd_first_use(JitInsn *insn)
  242. {
  243. return insn_opnd_first_use[insn->opcode];
  244. }
  245. JitBasicBlock *
  246. jit_basic_block_new(JitReg label, int n)
  247. {
  248. JitBasicBlock *block = jit_insn_new_PHI(label, n);
  249. if (!block)
  250. return NULL;
  251. block->prev = block->next = block;
  252. return block;
  253. }
  254. void
  255. jit_basic_block_delete(JitBasicBlock *block)
  256. {
  257. JitInsn *insn, *next_insn, *end;
  258. if (!block)
  259. return;
  260. insn = jit_basic_block_first_insn(block);
  261. end = jit_basic_block_end_insn(block);
  262. for (; insn != end; insn = next_insn) {
  263. next_insn = insn->next;
  264. jit_insn_delete(insn);
  265. }
  266. jit_insn_delete(block);
  267. }
  268. JitRegVec
  269. jit_basic_block_preds(JitBasicBlock *block)
  270. {
  271. JitRegVec vec;
  272. vec.num = jit_insn_opndv_num(block) - 1;
  273. vec._base = vec.num > 0 ? jit_insn_opndv(block, 1) : NULL;
  274. vec._stride = 1;
  275. return vec;
  276. }
  277. JitRegVec
  278. jit_basic_block_succs(JitBasicBlock *block)
  279. {
  280. JitInsn *last_insn = jit_basic_block_last_insn(block);
  281. JitRegVec vec;
  282. vec.num = 0;
  283. vec._base = NULL;
  284. vec._stride = 1;
  285. switch (last_insn->opcode) {
  286. case JIT_OP_JMP:
  287. vec.num = 1;
  288. vec._base = jit_insn_opnd(last_insn, 0);
  289. break;
  290. case JIT_OP_BEQ:
  291. case JIT_OP_BNE:
  292. case JIT_OP_BGTS:
  293. case JIT_OP_BGES:
  294. case JIT_OP_BLTS:
  295. case JIT_OP_BLES:
  296. case JIT_OP_BGTU:
  297. case JIT_OP_BGEU:
  298. case JIT_OP_BLTU:
  299. case JIT_OP_BLEU:
  300. vec.num = 2;
  301. vec._base = jit_insn_opnd(last_insn, 1);
  302. break;
  303. case JIT_OP_LOOKUPSWITCH:
  304. {
  305. JitOpndLookupSwitch *opnd = jit_insn_opndls(last_insn);
  306. vec.num = opnd->match_pairs_num + 1;
  307. vec._base = &opnd->default_target;
  308. vec._stride = sizeof(opnd->match_pairs[0]) / sizeof(*vec._base);
  309. break;
  310. }
  311. default:
  312. vec._stride = 0;
  313. }
  314. return vec;
  315. }
  316. JitCompContext *
  317. jit_cc_init(JitCompContext *cc, unsigned htab_size)
  318. {
  319. JitBasicBlock *entry_block, *exit_block;
  320. unsigned i, num;
  321. memset(cc, 0, sizeof(*cc));
  322. cc->_reference_count = 1;
  323. jit_annl_enable_basic_block(cc);
  324. /* Create entry and exit blocks. They must be the first two
  325. blocks respectively. */
  326. if (!(entry_block = jit_cc_new_basic_block(cc, 0)))
  327. goto fail;
  328. if (!(exit_block = jit_cc_new_basic_block(cc, 0))) {
  329. jit_basic_block_delete(entry_block);
  330. goto fail;
  331. }
  332. /* Record the entry and exit labels, whose indexes must be 0 and 1
  333. respectively. */
  334. cc->entry_label = jit_basic_block_label(entry_block);
  335. cc->exit_label = jit_basic_block_label(exit_block);
  336. bh_assert(jit_reg_no(cc->entry_label) == 0
  337. && jit_reg_no(cc->exit_label) == 1);
  338. if (!(cc->exce_basic_blocks =
  339. jit_calloc(sizeof(JitBasicBlock *) * EXCE_NUM)))
  340. goto fail;
  341. if (!(cc->incoming_insns_for_exec_bbs =
  342. jit_calloc(sizeof(JitIncomingInsnList) * EXCE_NUM)))
  343. goto fail;
  344. cc->hreg_info = jit_codegen_get_hreg_info();
  345. bh_assert(cc->hreg_info->info[JIT_REG_KIND_I32].num > 3);
  346. /* Initialize virtual registers for hard registers. */
  347. for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) {
  348. if ((num = cc->hreg_info->info[i].num)) {
  349. /* Initialize the capacity to be large enough. */
  350. jit_cc_new_reg(cc, i);
  351. bh_assert(cc->_ann._reg_capacity[i] > num);
  352. cc->_ann._reg_num[i] = num;
  353. }
  354. }
  355. /* Create registers for frame pointer, exec_env and cmp. */
  356. cc->fp_reg = jit_reg_new(JIT_REG_KIND_PTR, cc->hreg_info->fp_hreg_index);
  357. cc->exec_env_reg =
  358. jit_reg_new(JIT_REG_KIND_PTR, cc->hreg_info->exec_env_hreg_index);
  359. cc->cmp_reg = jit_reg_new(JIT_REG_KIND_I32, cc->hreg_info->cmp_hreg_index);
  360. cc->_const_val._hash_table_size = htab_size;
  361. if (!(cc->_const_val._hash_table =
  362. jit_calloc(htab_size * sizeof(*cc->_const_val._hash_table))))
  363. goto fail;
  364. return cc;
  365. fail:
  366. jit_cc_destroy(cc);
  367. return NULL;
  368. }
  369. void
  370. jit_cc_destroy(JitCompContext *cc)
  371. {
  372. unsigned i, end;
  373. JitBasicBlock *block;
  374. JitIncomingInsn *incoming_insn, *incoming_insn_next;
  375. jit_block_stack_destroy(&cc->block_stack);
  376. if (cc->jit_frame) {
  377. if (cc->jit_frame->memory_regs)
  378. jit_free(cc->jit_frame->memory_regs);
  379. if (cc->jit_frame->table_regs)
  380. jit_free(cc->jit_frame->table_regs);
  381. jit_free(cc->jit_frame);
  382. }
  383. if (cc->memory_regs)
  384. jit_free(cc->memory_regs);
  385. if (cc->table_regs)
  386. jit_free(cc->table_regs);
  387. jit_free(cc->_const_val._hash_table);
  388. /* Release the instruction hash table. */
  389. jit_cc_disable_insn_hash(cc);
  390. jit_free(cc->exce_basic_blocks);
  391. if (cc->incoming_insns_for_exec_bbs) {
  392. for (i = 0; i < EXCE_NUM; i++) {
  393. incoming_insn = cc->incoming_insns_for_exec_bbs[i];
  394. while (incoming_insn) {
  395. incoming_insn_next = incoming_insn->next;
  396. jit_free(incoming_insn);
  397. incoming_insn = incoming_insn_next;
  398. }
  399. }
  400. jit_free(cc->incoming_insns_for_exec_bbs);
  401. }
  402. /* Release entry and exit blocks. */
  403. if (0 != cc->entry_label)
  404. jit_basic_block_delete(jit_cc_entry_basic_block(cc));
  405. if (0 != cc->exit_label)
  406. jit_basic_block_delete(jit_cc_exit_basic_block(cc));
  407. /* clang-format off */
  408. /* Release blocks and instructions. */
  409. JIT_FOREACH_BLOCK(cc, i, end, block)
  410. {
  411. jit_basic_block_delete(block);
  412. }
  413. /* clang-format on */
  414. /* Release constant values. */
  415. for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) {
  416. jit_free(cc->_const_val._value[i]);
  417. jit_free(cc->_const_val._next[i]);
  418. }
  419. /* Release storage of annotations. */
  420. #define ANN_LABEL(TYPE, NAME) jit_annl_disable_##NAME(cc);
  421. #define ANN_INSN(TYPE, NAME) jit_anni_disable_##NAME(cc);
  422. #define ANN_REG(TYPE, NAME) jit_annr_disable_##NAME(cc);
  423. #include "jit_ir.def"
  424. #undef ANN_LABEL
  425. #undef ANN_INSN
  426. #undef ANN_REG
  427. }
  428. void
  429. jit_cc_delete(JitCompContext *cc)
  430. {
  431. if (cc && --cc->_reference_count == 0) {
  432. jit_cc_destroy(cc);
  433. jit_free(cc);
  434. }
  435. }
  436. /*
  437. * Reallocate a memory block with the new_size.
  438. * TODO: replace this with imported jit_realloc when it's available.
  439. */
  440. static void *
  441. _jit_realloc(void *ptr, unsigned new_size, unsigned old_size)
  442. {
  443. void *new_ptr = jit_malloc(new_size);
  444. if (new_ptr) {
  445. bh_assert(new_size > old_size);
  446. if (ptr) {
  447. memcpy(new_ptr, ptr, old_size);
  448. memset((uint8 *)new_ptr + old_size, 0, new_size - old_size);
  449. jit_free(ptr);
  450. }
  451. else
  452. memset(new_ptr, 0, new_size);
  453. }
  454. return new_ptr;
  455. }
  456. static unsigned
  457. hash_of_const(unsigned kind, unsigned size, void *val)
  458. {
  459. uint8 *p = (uint8 *)val, *end = p + size;
  460. unsigned hash = kind;
  461. do
  462. hash = ((hash << 5) - hash) + *p++;
  463. while (p != end);
  464. return hash;
  465. }
  466. static inline void *
  467. address_of_const(JitCompContext *cc, JitReg reg, unsigned size)
  468. {
  469. int kind = jit_reg_kind(reg);
  470. unsigned no = jit_reg_no(reg);
  471. unsigned idx = no & ~_JIT_REG_CONST_IDX_FLAG;
  472. bh_assert(kind < JIT_REG_KIND_L32);
  473. bh_assert(jit_reg_is_const_idx(reg) && idx < cc->_const_val._num[kind]);
  474. return cc->_const_val._value[kind] + size * idx;
  475. }
  476. static inline JitReg
  477. next_of_const(JitCompContext *cc, JitReg reg)
  478. {
  479. int kind = jit_reg_kind(reg);
  480. unsigned no = jit_reg_no(reg);
  481. unsigned idx = no & ~_JIT_REG_CONST_IDX_FLAG;
  482. bh_assert(kind < JIT_REG_KIND_L32);
  483. bh_assert(jit_reg_is_const_idx(reg) && idx < cc->_const_val._num[kind]);
  484. return cc->_const_val._next[kind][idx];
  485. }
  486. /**
  487. * Put a constant value into the compilation context.
  488. *
  489. * @param cc compilation context
  490. * @param kind register kind
  491. * @param size size of the value
  492. * @param val pointer to value which must be aligned
  493. *
  494. * @return a constant register containing the value
  495. */
  496. static JitReg
  497. _jit_cc_new_const(JitCompContext *cc, int kind, unsigned size, void *val)
  498. {
  499. unsigned num = cc->_const_val._num[kind], slot;
  500. unsigned capacity = cc->_const_val._capacity[kind];
  501. uint8 *new_value;
  502. JitReg r, *new_next;
  503. bh_assert(num <= capacity);
  504. /* Find the existing value first. */
  505. slot = hash_of_const(kind, size, val) % cc->_const_val._hash_table_size;
  506. r = cc->_const_val._hash_table[slot];
  507. for (; r; r = next_of_const(cc, r))
  508. if (jit_reg_kind(r) == kind
  509. && !memcmp(val, address_of_const(cc, r, size), size))
  510. return r;
  511. if (num == capacity) {
  512. /* Increase the space of value and next. */
  513. capacity = capacity > 0 ? (capacity + capacity / 2) : 16;
  514. new_value = _jit_realloc(cc->_const_val._value[kind], size * capacity,
  515. size * num);
  516. new_next =
  517. _jit_realloc(cc->_const_val._next[kind],
  518. sizeof(*new_next) * capacity, sizeof(*new_next) * num);
  519. if (new_value && new_next) {
  520. cc->_const_val._value[kind] = new_value;
  521. cc->_const_val._next[kind] = new_next;
  522. }
  523. else {
  524. jit_set_last_error(cc, "create const register failed");
  525. jit_free(new_value);
  526. jit_free(new_next);
  527. return 0;
  528. }
  529. cc->_const_val._capacity[kind] = capacity;
  530. }
  531. bh_assert(num + 1 < (uint32)_JIT_REG_CONST_IDX_FLAG);
  532. r = jit_reg_new(kind, _JIT_REG_CONST_IDX_FLAG | num);
  533. memcpy(cc->_const_val._value[kind] + size * num, val, size);
  534. cc->_const_val._next[kind][num] = cc->_const_val._hash_table[slot];
  535. cc->_const_val._hash_table[slot] = r;
  536. cc->_const_val._num[kind] = num + 1;
  537. return r;
  538. }
  539. static inline int32
  540. get_const_val_in_reg(JitReg reg)
  541. {
  542. int shift = 8 * sizeof(reg) - _JIT_REG_KIND_SHIFT + 1;
  543. return ((int32)(reg << shift)) >> shift;
  544. }
  545. #define _JIT_CC_NEW_CONST_HELPER(KIND, TYPE, val) \
  546. do { \
  547. JitReg reg = jit_reg_new( \
  548. JIT_REG_KIND_##KIND, \
  549. (_JIT_REG_CONST_VAL_FLAG | ((JitReg)val & ~_JIT_REG_KIND_MASK))); \
  550. \
  551. if ((TYPE)get_const_val_in_reg(reg) == val) \
  552. return reg; \
  553. return _jit_cc_new_const(cc, JIT_REG_KIND_##KIND, sizeof(val), &val); \
  554. } while (0)
  555. JitReg
  556. jit_cc_new_const_I32_rel(JitCompContext *cc, int32 val, uint32 rel)
  557. {
  558. uint64 val64 = (uint64)(uint32)val | ((uint64)rel << 32);
  559. _JIT_CC_NEW_CONST_HELPER(I32, uint64, val64);
  560. }
  561. JitReg
  562. jit_cc_new_const_I64(JitCompContext *cc, int64 val)
  563. {
  564. _JIT_CC_NEW_CONST_HELPER(I64, int64, val);
  565. }
  566. JitReg
  567. jit_cc_new_const_F32(JitCompContext *cc, float val)
  568. {
  569. int32 float_neg_zero = 0x80000000;
  570. if (!memcmp(&val, &float_neg_zero, sizeof(float)))
  571. /* Create const -0.0f */
  572. return _jit_cc_new_const(cc, JIT_REG_KIND_F32, sizeof(float), &val);
  573. _JIT_CC_NEW_CONST_HELPER(F32, float, val);
  574. }
  575. JitReg
  576. jit_cc_new_const_F64(JitCompContext *cc, double val)
  577. {
  578. int64 double_neg_zero = 0x8000000000000000ll;
  579. if (!memcmp(&val, &double_neg_zero, sizeof(double)))
  580. /* Create const -0.0d */
  581. return _jit_cc_new_const(cc, JIT_REG_KIND_F64, sizeof(double), &val);
  582. _JIT_CC_NEW_CONST_HELPER(F64, double, val);
  583. }
  584. #undef _JIT_CC_NEW_CONST_HELPER
  585. #define _JIT_CC_GET_CONST_HELPER(KIND, TYPE) \
  586. do { \
  587. bh_assert(jit_reg_kind(reg) == JIT_REG_KIND_##KIND); \
  588. bh_assert(jit_reg_is_const(reg)); \
  589. \
  590. return (jit_reg_is_const_val(reg) \
  591. ? (TYPE)get_const_val_in_reg(reg) \
  592. : *(TYPE *)(address_of_const(cc, reg, sizeof(TYPE)))); \
  593. } while (0)
  594. static uint64
  595. jit_cc_get_const_I32_helper(JitCompContext *cc, JitReg reg)
  596. {
  597. _JIT_CC_GET_CONST_HELPER(I32, uint64);
  598. }
  599. uint32
  600. jit_cc_get_const_I32_rel(JitCompContext *cc, JitReg reg)
  601. {
  602. return (uint32)(jit_cc_get_const_I32_helper(cc, reg) >> 32);
  603. }
  604. int32
  605. jit_cc_get_const_I32(JitCompContext *cc, JitReg reg)
  606. {
  607. return (int32)(jit_cc_get_const_I32_helper(cc, reg));
  608. }
  609. int64
  610. jit_cc_get_const_I64(JitCompContext *cc, JitReg reg)
  611. {
  612. _JIT_CC_GET_CONST_HELPER(I64, int64);
  613. }
  614. float
  615. jit_cc_get_const_F32(JitCompContext *cc, JitReg reg)
  616. {
  617. _JIT_CC_GET_CONST_HELPER(F32, float);
  618. }
  619. double
  620. jit_cc_get_const_F64(JitCompContext *cc, JitReg reg)
  621. {
  622. _JIT_CC_GET_CONST_HELPER(F64, double);
  623. }
  624. #undef _JIT_CC_GET_CONST_HELPER
  625. #define _JIT_REALLOC_ANN(TYPE, NAME, ANN, POSTFIX) \
  626. if (successful && cc->_ann._##ANN##_##NAME##_enabled) { \
  627. TYPE *ptr = _jit_realloc(cc->_ann._##ANN##_##NAME POSTFIX, \
  628. sizeof(TYPE) * capacity, sizeof(TYPE) * num); \
  629. if (ptr) \
  630. cc->_ann._##ANN##_##NAME POSTFIX = ptr; \
  631. else \
  632. successful = false; \
  633. }
  634. JitReg
  635. jit_cc_new_label(JitCompContext *cc)
  636. {
  637. unsigned num = cc->_ann._label_num;
  638. unsigned capacity = cc->_ann._label_capacity;
  639. bool successful = true;
  640. bh_assert(num <= capacity);
  641. if (num == capacity) {
  642. capacity = capacity > 0 ? (capacity + capacity / 2) : 16;
  643. #define EMPTY_POSTFIX
  644. #define ANN_LABEL(TYPE, NAME) _JIT_REALLOC_ANN(TYPE, NAME, label, EMPTY_POSTFIX)
  645. #include "jit_ir.def"
  646. #undef ANN_LABEL
  647. #undef EMPTY_POSTFIX
  648. if (!successful) {
  649. jit_set_last_error(cc, "create label register failed");
  650. return 0;
  651. }
  652. cc->_ann._label_capacity = capacity;
  653. }
  654. cc->_ann._label_num = num + 1;
  655. return jit_reg_new(JIT_REG_KIND_L32, num);
  656. }
  657. JitBasicBlock *
  658. jit_cc_new_basic_block(JitCompContext *cc, int n)
  659. {
  660. JitReg label = jit_cc_new_label(cc);
  661. JitBasicBlock *block = NULL;
  662. if (label && (block = jit_basic_block_new(label, n)))
  663. /* Void 0 register indicates error in creation. */
  664. *(jit_annl_basic_block(cc, label)) = block;
  665. else
  666. jit_set_last_error(cc, "create basic block failed");
  667. return block;
  668. }
  669. JitBasicBlock *
  670. jit_cc_resize_basic_block(JitCompContext *cc, JitBasicBlock *block, int n)
  671. {
  672. JitReg label = jit_basic_block_label(block);
  673. JitInsn *insn = jit_basic_block_first_insn(block);
  674. JitBasicBlock *new_block = jit_basic_block_new(label, n);
  675. if (!new_block) {
  676. jit_set_last_error(cc, "resize basic block failed");
  677. return NULL;
  678. }
  679. jit_insn_unlink(block);
  680. if (insn != block)
  681. jit_insn_insert_before(insn, new_block);
  682. bh_assert(*(jit_annl_basic_block(cc, label)) == block);
  683. *(jit_annl_basic_block(cc, label)) = new_block;
  684. jit_insn_delete(block);
  685. return new_block;
  686. }
  687. bool
  688. jit_cc_enable_insn_hash(JitCompContext *cc, unsigned n)
  689. {
  690. if (jit_anni_is_enabled__hash_link(cc))
  691. return true;
  692. if (!jit_anni_enable__hash_link(cc))
  693. return false;
  694. /* The table must not exist. */
  695. bh_assert(!cc->_insn_hash_table._table);
  696. /* Integer overflow cannot happen because n << 4G (at most several
  697. times of 64K in the most extreme case). */
  698. if (!(cc->_insn_hash_table._table =
  699. jit_calloc(n * sizeof(*cc->_insn_hash_table._table)))) {
  700. jit_anni_disable__hash_link(cc);
  701. return false;
  702. }
  703. cc->_insn_hash_table._size = n;
  704. return true;
  705. }
  706. void
  707. jit_cc_disable_insn_hash(JitCompContext *cc)
  708. {
  709. jit_anni_disable__hash_link(cc);
  710. jit_free(cc->_insn_hash_table._table);
  711. cc->_insn_hash_table._table = NULL;
  712. cc->_insn_hash_table._size = 0;
  713. }
  714. void
  715. jit_cc_reset_insn_hash(JitCompContext *cc)
  716. {
  717. if (jit_anni_is_enabled__hash_link(cc))
  718. memset(cc->_insn_hash_table._table, 0,
  719. cc->_insn_hash_table._size
  720. * sizeof(*cc->_insn_hash_table._table));
  721. }
  722. JitInsn *
  723. jit_cc_set_insn_uid(JitCompContext *cc, JitInsn *insn)
  724. {
  725. if (insn) {
  726. unsigned num = cc->_ann._insn_num;
  727. unsigned capacity = cc->_ann._insn_capacity;
  728. bool successful = true;
  729. bh_assert(num <= capacity);
  730. if (num == capacity) {
  731. capacity = capacity > 0 ? (capacity + capacity / 2) : 64;
  732. #define EMPTY_POSTFIX
  733. #define ANN_INSN(TYPE, NAME) _JIT_REALLOC_ANN(TYPE, NAME, insn, EMPTY_POSTFIX)
  734. #include "jit_ir.def"
  735. #undef ANN_INSN
  736. #undef EMPTY_POSTFIX
  737. if (!successful) {
  738. jit_set_last_error(cc, "set insn uid failed");
  739. return NULL;
  740. }
  741. cc->_ann._insn_capacity = capacity;
  742. }
  743. cc->_ann._insn_num = num + 1;
  744. insn->uid = num;
  745. }
  746. return insn;
  747. }
  748. JitInsn *
  749. _jit_cc_set_insn_uid_for_new_insn(JitCompContext *cc, JitInsn *insn)
  750. {
  751. if (jit_cc_set_insn_uid(cc, insn))
  752. return insn;
  753. jit_insn_delete(insn);
  754. return NULL;
  755. }
  756. JitReg
  757. jit_cc_new_reg(JitCompContext *cc, unsigned kind)
  758. {
  759. unsigned num = jit_cc_reg_num(cc, kind);
  760. unsigned capacity = cc->_ann._reg_capacity[kind];
  761. bool successful = true;
  762. bh_assert(num <= capacity);
  763. if (num == capacity) {
  764. capacity = (capacity == 0
  765. /* Initialize the capacity to be larger than hard
  766. register number. */
  767. ? cc->hreg_info->info[kind].num + 16
  768. : capacity + capacity / 2);
  769. #define ANN_REG(TYPE, NAME) _JIT_REALLOC_ANN(TYPE, NAME, reg, [kind])
  770. #include "jit_ir.def"
  771. #undef ANN_REG
  772. if (!successful) {
  773. jit_set_last_error(cc, "create register failed");
  774. return 0;
  775. }
  776. cc->_ann._reg_capacity[kind] = capacity;
  777. }
  778. cc->_ann._reg_num[kind] = num + 1;
  779. return jit_reg_new(kind, num);
  780. }
  781. #undef _JIT_REALLOC_ANN
  782. #define ANN_LABEL(TYPE, NAME) \
  783. bool jit_annl_enable_##NAME(JitCompContext *cc) \
  784. { \
  785. if (cc->_ann._label_##NAME##_enabled) \
  786. return true; \
  787. \
  788. if (cc->_ann._label_capacity > 0 \
  789. && !(cc->_ann._label_##NAME = \
  790. jit_calloc(cc->_ann._label_capacity * sizeof(TYPE)))) { \
  791. jit_set_last_error(cc, "annl enable " #NAME "failed"); \
  792. return false; \
  793. } \
  794. \
  795. cc->_ann._label_##NAME##_enabled = 1; \
  796. return true; \
  797. }
  798. #define ANN_INSN(TYPE, NAME) \
  799. bool jit_anni_enable_##NAME(JitCompContext *cc) \
  800. { \
  801. if (cc->_ann._insn_##NAME##_enabled) \
  802. return true; \
  803. \
  804. if (cc->_ann._insn_capacity > 0 \
  805. && !(cc->_ann._insn_##NAME = \
  806. jit_calloc(cc->_ann._insn_capacity * sizeof(TYPE)))) { \
  807. jit_set_last_error(cc, "anni enable " #NAME "failed"); \
  808. return false; \
  809. } \
  810. \
  811. cc->_ann._insn_##NAME##_enabled = 1; \
  812. return true; \
  813. }
  814. #define ANN_REG(TYPE, NAME) \
  815. bool jit_annr_enable_##NAME(JitCompContext *cc) \
  816. { \
  817. unsigned k; \
  818. \
  819. if (cc->_ann._reg_##NAME##_enabled) \
  820. return true; \
  821. \
  822. for (k = JIT_REG_KIND_VOID; k < JIT_REG_KIND_L32; k++) \
  823. if (cc->_ann._reg_capacity[k] > 0 \
  824. && !(cc->_ann._reg_##NAME[k] = jit_calloc( \
  825. cc->_ann._reg_capacity[k] * sizeof(TYPE)))) { \
  826. jit_set_last_error(cc, "annr enable " #NAME "failed"); \
  827. jit_annr_disable_##NAME(cc); \
  828. return false; \
  829. } \
  830. \
  831. cc->_ann._reg_##NAME##_enabled = 1; \
  832. return true; \
  833. }
  834. #include "jit_ir.def"
  835. #undef ANN_LABEL
  836. #undef ANN_INSN
  837. #undef ANN_REG
  838. #define ANN_LABEL(TYPE, NAME) \
  839. void jit_annl_disable_##NAME(JitCompContext *cc) \
  840. { \
  841. jit_free(cc->_ann._label_##NAME); \
  842. cc->_ann._label_##NAME = NULL; \
  843. cc->_ann._label_##NAME##_enabled = 0; \
  844. }
  845. #define ANN_INSN(TYPE, NAME) \
  846. void jit_anni_disable_##NAME(JitCompContext *cc) \
  847. { \
  848. jit_free(cc->_ann._insn_##NAME); \
  849. cc->_ann._insn_##NAME = NULL; \
  850. cc->_ann._insn_##NAME##_enabled = 0; \
  851. }
  852. #define ANN_REG(TYPE, NAME) \
  853. void jit_annr_disable_##NAME(JitCompContext *cc) \
  854. { \
  855. unsigned k; \
  856. \
  857. for (k = JIT_REG_KIND_VOID; k < JIT_REG_KIND_L32; k++) { \
  858. jit_free(cc->_ann._reg_##NAME[k]); \
  859. cc->_ann._reg_##NAME[k] = NULL; \
  860. } \
  861. \
  862. cc->_ann._reg_##NAME##_enabled = 0; \
  863. }
  864. #include "jit_ir.def"
  865. #undef ANN_LABEL
  866. #undef ANN_INSN
  867. #undef ANN_REG
  868. char *
  869. jit_get_last_error(JitCompContext *cc)
  870. {
  871. return cc->last_error[0] == '\0' ? NULL : cc->last_error;
  872. }
  873. void
  874. jit_set_last_error_v(JitCompContext *cc, const char *format, ...)
  875. {
  876. va_list args;
  877. va_start(args, format);
  878. vsnprintf(cc->last_error, sizeof(cc->last_error), format, args);
  879. va_end(args);
  880. }
  881. void
  882. jit_set_last_error(JitCompContext *cc, const char *error)
  883. {
  884. if (error)
  885. snprintf(cc->last_error, sizeof(cc->last_error), "Error: %s", error);
  886. else
  887. cc->last_error[0] = '\0';
  888. }
  889. bool
  890. jit_cc_update_cfg(JitCompContext *cc)
  891. {
  892. JitBasicBlock *block;
  893. unsigned block_index, end, succ_index, idx;
  894. JitReg *target;
  895. bool retval = false;
  896. if (!jit_annl_enable_pred_num(cc))
  897. return false;
  898. /* Update pred_num of all blocks. */
  899. JIT_FOREACH_BLOCK_ENTRY_EXIT(cc, block_index, end, block)
  900. {
  901. JitRegVec succs = jit_basic_block_succs(block);
  902. JIT_REG_VEC_FOREACH(succs, succ_index, target)
  903. if (jit_reg_is_kind(L32, *target))
  904. *(jit_annl_pred_num(cc, *target)) += 1;
  905. }
  906. /* Resize predecessor vectors of body blocks. */
  907. JIT_FOREACH_BLOCK(cc, block_index, end, block)
  908. {
  909. if (!jit_cc_resize_basic_block(
  910. cc, block,
  911. *(jit_annl_pred_num(cc, jit_basic_block_label(block)))))
  912. goto cleanup_and_return;
  913. }
  914. /* Fill in predecessor vectors all blocks. */
  915. JIT_FOREACH_BLOCK_REVERSE_ENTRY_EXIT(cc, block_index, block)
  916. {
  917. JitRegVec succs = jit_basic_block_succs(block), preds;
  918. JIT_REG_VEC_FOREACH(succs, succ_index, target)
  919. if (jit_reg_is_kind(L32, *target)) {
  920. preds = jit_basic_block_preds(*(jit_annl_basic_block(cc, *target)));
  921. bh_assert(*(jit_annl_pred_num(cc, *target)) > 0);
  922. idx = *(jit_annl_pred_num(cc, *target)) - 1;
  923. *(jit_annl_pred_num(cc, *target)) = idx;
  924. *(jit_reg_vec_at(&preds, idx)) = jit_basic_block_label(block);
  925. }
  926. }
  927. retval = true;
  928. cleanup_and_return:
  929. jit_annl_disable_pred_num(cc);
  930. return retval;
  931. }
  932. void
  933. jit_value_stack_push(JitValueStack *stack, JitValue *value)
  934. {
  935. if (!stack->value_list_head)
  936. stack->value_list_head = stack->value_list_end = value;
  937. else {
  938. stack->value_list_end->next = value;
  939. value->prev = stack->value_list_end;
  940. stack->value_list_end = value;
  941. }
  942. }
  943. JitValue *
  944. jit_value_stack_pop(JitValueStack *stack)
  945. {
  946. JitValue *value = stack->value_list_end;
  947. bh_assert(stack->value_list_end);
  948. if (stack->value_list_head == stack->value_list_end)
  949. stack->value_list_head = stack->value_list_end = NULL;
  950. else {
  951. stack->value_list_end = stack->value_list_end->prev;
  952. stack->value_list_end->next = NULL;
  953. value->prev = NULL;
  954. }
  955. return value;
  956. }
  957. void
  958. jit_value_stack_destroy(JitValueStack *stack)
  959. {
  960. JitValue *value = stack->value_list_head, *p;
  961. while (value) {
  962. p = value->next;
  963. jit_free(value);
  964. value = p;
  965. }
  966. stack->value_list_head = NULL;
  967. stack->value_list_end = NULL;
  968. }
  969. void
  970. jit_block_stack_push(JitBlockStack *stack, JitBlock *block)
  971. {
  972. if (!stack->block_list_head)
  973. stack->block_list_head = stack->block_list_end = block;
  974. else {
  975. stack->block_list_end->next = block;
  976. block->prev = stack->block_list_end;
  977. stack->block_list_end = block;
  978. }
  979. }
  980. JitBlock *
  981. jit_block_stack_top(JitBlockStack *stack)
  982. {
  983. return stack->block_list_end;
  984. }
  985. JitBlock *
  986. jit_block_stack_pop(JitBlockStack *stack)
  987. {
  988. JitBlock *block = stack->block_list_end;
  989. bh_assert(stack->block_list_end);
  990. if (stack->block_list_head == stack->block_list_end)
  991. stack->block_list_head = stack->block_list_end = NULL;
  992. else {
  993. stack->block_list_end = stack->block_list_end->prev;
  994. stack->block_list_end->next = NULL;
  995. block->prev = NULL;
  996. }
  997. return block;
  998. }
  999. void
  1000. jit_block_stack_destroy(JitBlockStack *stack)
  1001. {
  1002. JitBlock *block = stack->block_list_head, *p;
  1003. while (block) {
  1004. p = block->next;
  1005. jit_value_stack_destroy(&block->value_stack);
  1006. jit_block_destroy(block);
  1007. block = p;
  1008. }
  1009. stack->block_list_head = NULL;
  1010. stack->block_list_end = NULL;
  1011. }
  1012. bool
  1013. jit_block_add_incoming_insn(JitBlock *block, JitInsn *insn, uint32 opnd_idx)
  1014. {
  1015. JitIncomingInsn *incoming_insn;
  1016. if (!(incoming_insn = jit_calloc((uint32)sizeof(JitIncomingInsn))))
  1017. return false;
  1018. incoming_insn->insn = insn;
  1019. incoming_insn->opnd_idx = opnd_idx;
  1020. incoming_insn->next = block->incoming_insns_for_end_bb;
  1021. block->incoming_insns_for_end_bb = incoming_insn;
  1022. return true;
  1023. }
  1024. void
  1025. jit_block_destroy(JitBlock *block)
  1026. {
  1027. JitIncomingInsn *incoming_insn, *incoming_insn_next;
  1028. jit_value_stack_destroy(&block->value_stack);
  1029. if (block->param_types)
  1030. jit_free(block->param_types);
  1031. if (block->result_types)
  1032. jit_free(block->result_types);
  1033. incoming_insn = block->incoming_insns_for_end_bb;
  1034. while (incoming_insn) {
  1035. incoming_insn_next = incoming_insn->next;
  1036. jit_free(incoming_insn);
  1037. incoming_insn = incoming_insn_next;
  1038. }
  1039. jit_free(block);
  1040. }
  1041. static inline uint8
  1042. to_stack_value_type(uint8 type)
  1043. {
  1044. #if WASM_ENABLE_REF_TYPES != 0
  1045. if (type == VALUE_TYPE_EXTERNREF || type == VALUE_TYPE_FUNCREF)
  1046. return VALUE_TYPE_I32;
  1047. #endif
  1048. return type;
  1049. }
  1050. bool
  1051. jit_cc_pop_value(JitCompContext *cc, uint8 type, JitReg *p_value)
  1052. {
  1053. JitValue *jit_value = NULL;
  1054. JitReg value = 0;
  1055. if (!jit_block_stack_top(&cc->block_stack)) {
  1056. jit_set_last_error(cc, "WASM block stack underflow");
  1057. return false;
  1058. }
  1059. if (!jit_block_stack_top(&cc->block_stack)->value_stack.value_list_end) {
  1060. jit_set_last_error(cc, "WASM data stack underflow");
  1061. return false;
  1062. }
  1063. jit_value = jit_value_stack_pop(
  1064. &jit_block_stack_top(&cc->block_stack)->value_stack);
  1065. bh_assert(jit_value);
  1066. if (jit_value->type != to_stack_value_type(type)) {
  1067. jit_set_last_error(cc, "invalid WASM stack data type");
  1068. jit_free(jit_value);
  1069. return false;
  1070. }
  1071. switch (jit_value->type) {
  1072. case VALUE_TYPE_I32:
  1073. value = pop_i32(cc->jit_frame);
  1074. break;
  1075. case VALUE_TYPE_I64:
  1076. value = pop_i64(cc->jit_frame);
  1077. break;
  1078. case VALUE_TYPE_F32:
  1079. value = pop_f32(cc->jit_frame);
  1080. break;
  1081. case VALUE_TYPE_F64:
  1082. value = pop_f64(cc->jit_frame);
  1083. break;
  1084. default:
  1085. bh_assert(0);
  1086. break;
  1087. }
  1088. bh_assert(cc->jit_frame->sp == jit_value->value);
  1089. bh_assert(value == jit_value->value->reg);
  1090. *p_value = value;
  1091. jit_free(jit_value);
  1092. return true;
  1093. }
  1094. bool
  1095. jit_cc_push_value(JitCompContext *cc, uint8 type, JitReg value)
  1096. {
  1097. JitValue *jit_value;
  1098. if (!jit_block_stack_top(&cc->block_stack)) {
  1099. jit_set_last_error(cc, "WASM block stack underflow");
  1100. return false;
  1101. }
  1102. if (!(jit_value = jit_calloc(sizeof(JitValue)))) {
  1103. jit_set_last_error(cc, "allocate memory failed");
  1104. return false;
  1105. }
  1106. bh_assert(value);
  1107. jit_value->type = to_stack_value_type(type);
  1108. jit_value->value = cc->jit_frame->sp;
  1109. jit_value_stack_push(&jit_block_stack_top(&cc->block_stack)->value_stack,
  1110. jit_value);
  1111. switch (jit_value->type) {
  1112. case VALUE_TYPE_I32:
  1113. push_i32(cc->jit_frame, value);
  1114. break;
  1115. case VALUE_TYPE_I64:
  1116. push_i64(cc->jit_frame, value);
  1117. break;
  1118. case VALUE_TYPE_F32:
  1119. push_f32(cc->jit_frame, value);
  1120. break;
  1121. case VALUE_TYPE_F64:
  1122. push_f64(cc->jit_frame, value);
  1123. break;
  1124. }
  1125. return true;
  1126. }
  1127. bool
  1128. _jit_insn_check_opnd_access_Reg(const JitInsn *insn, unsigned n)
  1129. {
  1130. unsigned opcode = insn->opcode;
  1131. return (insn_opnd_kind[opcode] == JIT_OPND_KIND_Reg
  1132. && n < insn_opnd_num[opcode]);
  1133. }
  1134. bool
  1135. _jit_insn_check_opnd_access_VReg(const JitInsn *insn, unsigned n)
  1136. {
  1137. unsigned opcode = insn->opcode;
  1138. return (insn_opnd_kind[opcode] == JIT_OPND_KIND_VReg
  1139. && n < insn->_opnd._opnd_VReg._reg_num);
  1140. }
  1141. bool
  1142. _jit_insn_check_opnd_access_LookupSwitch(const JitInsn *insn)
  1143. {
  1144. unsigned opcode = insn->opcode;
  1145. return (insn_opnd_kind[opcode] == JIT_OPND_KIND_LookupSwitch);
  1146. }
  1147. bool
  1148. jit_lock_reg_in_insn(JitCompContext *cc, JitInsn *the_insn, JitReg reg_to_lock)
  1149. {
  1150. bool ret = false;
  1151. JitInsn *prevent_spill = NULL;
  1152. JitInsn *indicate_using = NULL;
  1153. if (!the_insn)
  1154. goto just_return;
  1155. if (jit_cc_is_hreg_fixed(cc, reg_to_lock)) {
  1156. ret = true;
  1157. goto just_return;
  1158. }
  1159. /**
  1160. * give the virtual register of the locked hard register a minimum, non-zero
  1161. * distance, * so as to prevent it from being spilled out
  1162. */
  1163. prevent_spill = jit_insn_new_MOV(reg_to_lock, reg_to_lock);
  1164. if (!prevent_spill)
  1165. goto just_return;
  1166. jit_insn_insert_before(the_insn, prevent_spill);
  1167. /**
  1168. * announce the locked hard register is being used, and do necessary spill
  1169. * ASAP
  1170. */
  1171. indicate_using = jit_insn_new_MOV(reg_to_lock, reg_to_lock);
  1172. if (!indicate_using)
  1173. goto just_return;
  1174. jit_insn_insert_after(the_insn, indicate_using);
  1175. ret = true;
  1176. just_return:
  1177. if (!ret)
  1178. jit_set_last_error(cc, "generate insn failed");
  1179. return ret;
  1180. }