asmthumb.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. /*
  2. * This file is part of the MicroPython project, http://micropython.org/
  3. *
  4. * The MIT License (MIT)
  5. *
  6. * Copyright (c) 2013, 2014 Damien P. George
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in
  16. * all copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  24. * THE SOFTWARE.
  25. */
  26. #include <stdio.h>
  27. #include <assert.h>
  28. #include <string.h>
  29. #include "py/mpconfig.h"
  30. // wrapper around everything in this file
  31. #if MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
  32. #include "py/mphal.h"
  33. #include "py/asmthumb.h"
  34. #define UNSIGNED_FIT8(x) (((x) & 0xffffff00) == 0)
  35. #define UNSIGNED_FIT16(x) (((x) & 0xffff0000) == 0)
  36. #define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
  37. #define SIGNED_FIT9(x) (((x) & 0xffffff00) == 0) || (((x) & 0xffffff00) == 0xffffff00)
  38. #define SIGNED_FIT12(x) (((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800)
  39. #define SIGNED_FIT23(x) (((x) & 0xffc00000) == 0) || (((x) & 0xffc00000) == 0xffc00000)
  40. static inline byte *asm_thumb_get_cur_to_write_bytes(asm_thumb_t *as, int n) {
  41. return mp_asm_base_get_cur_to_write_bytes(&as->base, n);
  42. }
  43. void asm_thumb_end_pass(asm_thumb_t *as) {
  44. (void)as;
  45. // could check labels are resolved...
  46. #if defined(MCU_SERIES_F7)
  47. if (as->base.pass == MP_ASM_PASS_EMIT) {
  48. // flush D-cache, so the code emitted is stored in memory
  49. MP_HAL_CLEAN_DCACHE(as->base.code_base, as->base.code_size);
  50. // invalidate I-cache
  51. SCB_InvalidateICache();
  52. }
  53. #endif
  54. }
  55. /*
  56. STATIC void asm_thumb_write_byte_1(asm_thumb_t *as, byte b1) {
  57. byte *c = asm_thumb_get_cur_to_write_bytes(as, 1);
  58. c[0] = b1;
  59. }
  60. */
  61. /*
  62. #define IMM32_L0(x) ((x) & 0xff)
  63. #define IMM32_L1(x) (((x) >> 8) & 0xff)
  64. #define IMM32_L2(x) (((x) >> 16) & 0xff)
  65. #define IMM32_L3(x) (((x) >> 24) & 0xff)
  66. STATIC void asm_thumb_write_word32(asm_thumb_t *as, int w32) {
  67. byte *c = asm_thumb_get_cur_to_write_bytes(as, 4);
  68. c[0] = IMM32_L0(w32);
  69. c[1] = IMM32_L1(w32);
  70. c[2] = IMM32_L2(w32);
  71. c[3] = IMM32_L3(w32);
  72. }
  73. */
  74. // rlolist is a bit map indicating desired lo-registers
  75. #define OP_PUSH_RLIST(rlolist) (0xb400 | (rlolist))
  76. #define OP_PUSH_RLIST_LR(rlolist) (0xb400 | 0x0100 | (rlolist))
  77. #define OP_POP_RLIST(rlolist) (0xbc00 | (rlolist))
  78. #define OP_POP_RLIST_PC(rlolist) (0xbc00 | 0x0100 | (rlolist))
  79. #define OP_ADD_SP(num_words) (0xb000 | (num_words))
  80. #define OP_SUB_SP(num_words) (0xb080 | (num_words))
  81. // locals:
  82. // - stored on the stack in ascending order
  83. // - numbered 0 through num_locals-1
  84. // - SP points to first local
  85. //
  86. // | SP
  87. // v
  88. // l0 l1 l2 ... l(n-1)
  89. // ^ ^
  90. // | low address | high address in RAM
  91. void asm_thumb_entry(asm_thumb_t *as, int num_locals) {
  92. // work out what to push and how many extra spaces to reserve on stack
  93. // so that we have enough for all locals and it's aligned an 8-byte boundary
  94. // we push extra regs (r1, r2, r3) to help do the stack adjustment
  95. // we probably should just always subtract from sp, since this would be more efficient
  96. // for push rlist, lowest numbered register at the lowest address
  97. uint reglist;
  98. uint stack_adjust;
  99. if (num_locals < 0) {
  100. num_locals = 0;
  101. }
  102. // don't pop r0 because it's used for return value
  103. switch (num_locals) {
  104. case 0:
  105. reglist = 0xf2;
  106. stack_adjust = 0;
  107. break;
  108. case 1:
  109. reglist = 0xf2;
  110. stack_adjust = 0;
  111. break;
  112. case 2:
  113. reglist = 0xfe;
  114. stack_adjust = 0;
  115. break;
  116. case 3:
  117. reglist = 0xfe;
  118. stack_adjust = 0;
  119. break;
  120. default:
  121. reglist = 0xfe;
  122. stack_adjust = ((num_locals - 3) + 1) & (~1);
  123. break;
  124. }
  125. asm_thumb_op16(as, OP_PUSH_RLIST_LR(reglist));
  126. if (stack_adjust > 0) {
  127. asm_thumb_op16(as, OP_SUB_SP(stack_adjust));
  128. }
  129. as->push_reglist = reglist;
  130. as->stack_adjust = stack_adjust;
  131. }
  132. void asm_thumb_exit(asm_thumb_t *as) {
  133. if (as->stack_adjust > 0) {
  134. asm_thumb_op16(as, OP_ADD_SP(as->stack_adjust));
  135. }
  136. asm_thumb_op16(as, OP_POP_RLIST_PC(as->push_reglist));
  137. }
  138. STATIC mp_uint_t get_label_dest(asm_thumb_t *as, uint label) {
  139. assert(label < as->base.max_num_labels);
  140. return as->base.label_offsets[label];
  141. }
  142. void asm_thumb_op16(asm_thumb_t *as, uint op) {
  143. byte *c = asm_thumb_get_cur_to_write_bytes(as, 2);
  144. if (c != NULL) {
  145. // little endian
  146. c[0] = op;
  147. c[1] = op >> 8;
  148. }
  149. }
  150. void asm_thumb_op32(asm_thumb_t *as, uint op1, uint op2) {
  151. byte *c = asm_thumb_get_cur_to_write_bytes(as, 4);
  152. if (c != NULL) {
  153. // little endian, op1 then op2
  154. c[0] = op1;
  155. c[1] = op1 >> 8;
  156. c[2] = op2;
  157. c[3] = op2 >> 8;
  158. }
  159. }
  160. #define OP_FORMAT_4(op, rlo_dest, rlo_src) ((op) | ((rlo_src) << 3) | (rlo_dest))
  161. void asm_thumb_format_4(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src) {
  162. assert(rlo_dest < ASM_THUMB_REG_R8);
  163. assert(rlo_src < ASM_THUMB_REG_R8);
  164. asm_thumb_op16(as, OP_FORMAT_4(op, rlo_dest, rlo_src));
  165. }
  166. void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) {
  167. uint op_lo;
  168. if (reg_src < 8) {
  169. op_lo = reg_src << 3;
  170. } else {
  171. op_lo = 0x40 | ((reg_src - 8) << 3);
  172. }
  173. if (reg_dest < 8) {
  174. op_lo |= reg_dest;
  175. } else {
  176. op_lo |= 0x80 | (reg_dest - 8);
  177. }
  178. // mov reg_dest, reg_src
  179. asm_thumb_op16(as, 0x4600 | op_lo);
  180. }
  181. // if loading lo half with movw, the i16 value will be zero extended into the r32 register!
  182. void asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) {
  183. assert(reg_dest < ASM_THUMB_REG_R15);
  184. // mov[wt] reg_dest, #i16_src
  185. asm_thumb_op32(as, mov_op | ((i16_src >> 1) & 0x0400) | ((i16_src >> 12) & 0xf), ((i16_src << 4) & 0x7000) | (reg_dest << 8) | (i16_src & 0xff));
  186. }
  187. #define OP_B_N(byte_offset) (0xe000 | (((byte_offset) >> 1) & 0x07ff))
  188. bool asm_thumb_b_n_label(asm_thumb_t *as, uint label) {
  189. mp_uint_t dest = get_label_dest(as, label);
  190. mp_int_t rel = dest - as->base.code_offset;
  191. rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
  192. asm_thumb_op16(as, OP_B_N(rel));
  193. return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT12(rel);
  194. }
  195. #define OP_BCC_N(cond, byte_offset) (0xd000 | ((cond) << 8) | (((byte_offset) >> 1) & 0x00ff))
  196. // all these bit arithmetics need coverage testing!
  197. #define OP_BCC_W_HI(cond, byte_offset) (0xf000 | ((cond) << 6) | (((byte_offset) >> 10) & 0x0400) | (((byte_offset) >> 14) & 0x003f))
  198. #define OP_BCC_W_LO(byte_offset) (0x8000 | ((byte_offset) & 0x2000) | (((byte_offset) >> 1) & 0x0fff))
  199. bool asm_thumb_bcc_nw_label(asm_thumb_t *as, int cond, uint label, bool wide) {
  200. mp_uint_t dest = get_label_dest(as, label);
  201. mp_int_t rel = dest - as->base.code_offset;
  202. rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
  203. if (!wide) {
  204. asm_thumb_op16(as, OP_BCC_N(cond, rel));
  205. return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT9(rel);
  206. } else {
  207. asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
  208. return true;
  209. }
  210. }
  211. #define OP_BL_HI(byte_offset) (0xf000 | (((byte_offset) >> 12) & 0x07ff))
  212. #define OP_BL_LO(byte_offset) (0xf800 | (((byte_offset) >> 1) & 0x07ff))
  213. bool asm_thumb_bl_label(asm_thumb_t *as, uint label) {
  214. mp_uint_t dest = get_label_dest(as, label);
  215. mp_int_t rel = dest - as->base.code_offset;
  216. rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
  217. asm_thumb_op32(as, OP_BL_HI(rel), OP_BL_LO(rel));
  218. return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT23(rel);
  219. }
  220. void asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) {
  221. // movw, movt does it in 8 bytes
  222. // ldr [pc, #], dw does it in 6 bytes, but we might not reach to end of code for dw
  223. asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
  224. asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVT, reg_dest, i32 >> 16);
  225. }
  226. void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) {
  227. if (reg_dest < 8 && UNSIGNED_FIT8(i32)) {
  228. asm_thumb_mov_rlo_i8(as, reg_dest, i32);
  229. } else if (UNSIGNED_FIT16(i32)) {
  230. asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
  231. } else {
  232. asm_thumb_mov_reg_i32(as, reg_dest, i32);
  233. }
  234. }
  235. // i32 is stored as a full word in the code, and aligned to machine-word boundary
  236. // TODO this is very inefficient, improve it!
  237. void asm_thumb_mov_reg_i32_aligned(asm_thumb_t *as, uint reg_dest, int i32) {
  238. // align on machine-word + 2
  239. if ((as->base.code_offset & 3) == 0) {
  240. asm_thumb_op16(as, ASM_THUMB_OP_NOP);
  241. }
  242. // jump over the i32 value (instruction prefetch adds 2 to PC)
  243. asm_thumb_op16(as, OP_B_N(2));
  244. // store i32 on machine-word aligned boundary
  245. mp_asm_base_data(&as->base, 4, i32);
  246. // do the actual load of the i32 value
  247. asm_thumb_mov_reg_i32_optimised(as, reg_dest, i32);
  248. }
  249. #define OP_STR_TO_SP_OFFSET(rlo_dest, word_offset) (0x9000 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
  250. #define OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset) (0x9800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
  251. void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num, uint rlo_src) {
  252. assert(rlo_src < ASM_THUMB_REG_R8);
  253. int word_offset = local_num;
  254. assert(as->base.pass < MP_ASM_PASS_EMIT || word_offset >= 0);
  255. asm_thumb_op16(as, OP_STR_TO_SP_OFFSET(rlo_src, word_offset));
  256. }
  257. void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num) {
  258. assert(rlo_dest < ASM_THUMB_REG_R8);
  259. int word_offset = local_num;
  260. assert(as->base.pass < MP_ASM_PASS_EMIT || word_offset >= 0);
  261. asm_thumb_op16(as, OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset));
  262. }
  263. #define OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset) (0xa800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
  264. void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num) {
  265. assert(rlo_dest < ASM_THUMB_REG_R8);
  266. int word_offset = local_num;
  267. assert(as->base.pass < MP_ASM_PASS_EMIT || word_offset >= 0);
  268. asm_thumb_op16(as, OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset));
  269. }
  270. // this could be wrong, because it should have a range of +/- 16MiB...
  271. #define OP_BW_HI(byte_offset) (0xf000 | (((byte_offset) >> 12) & 0x07ff))
  272. #define OP_BW_LO(byte_offset) (0xb800 | (((byte_offset) >> 1) & 0x07ff))
  273. void asm_thumb_b_label(asm_thumb_t *as, uint label) {
  274. mp_uint_t dest = get_label_dest(as, label);
  275. mp_int_t rel = dest - as->base.code_offset;
  276. rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
  277. if (dest != (mp_uint_t)-1 && rel <= -4) {
  278. // is a backwards jump, so we know the size of the jump on the first pass
  279. // calculate rel assuming 12 bit relative jump
  280. if (SIGNED_FIT12(rel)) {
  281. asm_thumb_op16(as, OP_B_N(rel));
  282. } else {
  283. goto large_jump;
  284. }
  285. } else {
  286. // is a forwards jump, so need to assume it's large
  287. large_jump:
  288. asm_thumb_op32(as, OP_BW_HI(rel), OP_BW_LO(rel));
  289. }
  290. }
  291. void asm_thumb_bcc_label(asm_thumb_t *as, int cond, uint label) {
  292. mp_uint_t dest = get_label_dest(as, label);
  293. mp_int_t rel = dest - as->base.code_offset;
  294. rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
  295. if (dest != (mp_uint_t)-1 && rel <= -4) {
  296. // is a backwards jump, so we know the size of the jump on the first pass
  297. // calculate rel assuming 9 bit relative jump
  298. if (SIGNED_FIT9(rel)) {
  299. asm_thumb_op16(as, OP_BCC_N(cond, rel));
  300. } else {
  301. goto large_jump;
  302. }
  303. } else {
  304. // is a forwards jump, so need to assume it's large
  305. large_jump:
  306. asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
  307. }
  308. }
  309. #define OP_BLX(reg) (0x4780 | ((reg) << 3))
  310. #define OP_SVC(arg) (0xdf00 | (arg))
  311. void asm_thumb_bl_ind(asm_thumb_t *as, void *fun_ptr, uint fun_id, uint reg_temp) {
  312. /* TODO make this use less bytes
  313. uint rlo_base = ASM_THUMB_REG_R3;
  314. uint rlo_dest = ASM_THUMB_REG_R7;
  315. uint word_offset = 4;
  316. asm_thumb_op16(as, 0x0000);
  317. asm_thumb_op16(as, 0x6800 | (word_offset << 6) | (rlo_base << 3) | rlo_dest); // ldr rlo_dest, [rlo_base, #offset]
  318. asm_thumb_op16(as, 0x4780 | (ASM_THUMB_REG_R9 << 3)); // blx reg
  319. */
  320. if (fun_id < 32) {
  321. // load ptr to function from table, indexed by fun_id (must be in range 0-31); 4 bytes
  322. asm_thumb_op16(as, ASM_THUMB_FORMAT_9_10_ENCODE(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, reg_temp, ASM_THUMB_REG_R7, fun_id));
  323. asm_thumb_op16(as, OP_BLX(reg_temp));
  324. } else {
  325. // load ptr to function into register using immediate; 6 bytes
  326. asm_thumb_mov_reg_i32(as, reg_temp, (mp_uint_t)fun_ptr);
  327. asm_thumb_op16(as, OP_BLX(reg_temp));
  328. }
  329. }
  330. #endif // MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB