asmarm.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. /*
  2. * This file is part of the MicroPython project, http://micropython.org/
  3. *
  4. * The MIT License (MIT)
  5. *
  6. * Copyright (c) 2014 Fabian Vogt
  7. * Copyright (c) 2013, 2014 Damien P. George
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy
  10. * of this software and associated documentation files (the "Software"), to deal
  11. * in the Software without restriction, including without limitation the rights
  12. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13. * copies of the Software, and to permit persons to whom the Software is
  14. * furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in
  17. * all copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  22. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  25. * THE SOFTWARE.
  26. */
  27. #include <stdio.h>
  28. #include <assert.h>
  29. #include <string.h>
  30. #include "py/mpconfig.h"
  31. // wrapper around everything in this file
  32. #if MICROPY_EMIT_ARM
  33. #include "py/asmarm.h"
  34. #define SIGNED_FIT24(x) (((x) & 0xff800000) == 0) || (((x) & 0xff000000) == 0xff000000)
  35. void asm_arm_end_pass(asm_arm_t *as) {
  36. if (as->base.pass == MP_ASM_PASS_EMIT) {
  37. #ifdef __arm__
  38. // flush I- and D-cache
  39. asm volatile(
  40. "0:"
  41. "mrc p15, 0, r15, c7, c10, 3\n"
  42. "bne 0b\n"
  43. "mov r0, #0\n"
  44. "mcr p15, 0, r0, c7, c7, 0\n"
  45. : : : "r0", "cc");
  46. #endif
  47. }
  48. }
  49. // Insert word into instruction flow
  50. STATIC void emit(asm_arm_t *as, uint op) {
  51. uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 4);
  52. if (c != NULL) {
  53. *(uint32_t*)c = op;
  54. }
  55. }
  56. // Insert word into instruction flow, add "ALWAYS" condition code
  57. STATIC void emit_al(asm_arm_t *as, uint op) {
  58. emit(as, op | ASM_ARM_CC_AL);
  59. }
  60. // Basic instructions without condition code
  61. STATIC uint asm_arm_op_push(uint reglist) {
  62. // stmfd sp!, {reglist}
  63. return 0x92d0000 | (reglist & 0xFFFF);
  64. }
  65. STATIC uint asm_arm_op_pop(uint reglist) {
  66. // ldmfd sp!, {reglist}
  67. return 0x8bd0000 | (reglist & 0xFFFF);
  68. }
  69. STATIC uint asm_arm_op_mov_reg(uint rd, uint rn) {
  70. // mov rd, rn
  71. return 0x1a00000 | (rd << 12) | rn;
  72. }
  73. STATIC uint asm_arm_op_mov_imm(uint rd, uint imm) {
  74. // mov rd, #imm
  75. return 0x3a00000 | (rd << 12) | imm;
  76. }
  77. STATIC uint asm_arm_op_mvn_imm(uint rd, uint imm) {
  78. // mvn rd, #imm
  79. return 0x3e00000 | (rd << 12) | imm;
  80. }
  81. STATIC uint asm_arm_op_add_imm(uint rd, uint rn, uint imm) {
  82. // add rd, rn, #imm
  83. return 0x2800000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
  84. }
  85. STATIC uint asm_arm_op_add_reg(uint rd, uint rn, uint rm) {
  86. // add rd, rn, rm
  87. return 0x0800000 | (rn << 16) | (rd << 12) | rm;
  88. }
  89. STATIC uint asm_arm_op_sub_imm(uint rd, uint rn, uint imm) {
  90. // sub rd, rn, #imm
  91. return 0x2400000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
  92. }
  93. STATIC uint asm_arm_op_sub_reg(uint rd, uint rn, uint rm) {
  94. // sub rd, rn, rm
  95. return 0x0400000 | (rn << 16) | (rd << 12) | rm;
  96. }
  97. STATIC uint asm_arm_op_mul_reg(uint rd, uint rm, uint rs) {
  98. // mul rd, rm, rs
  99. assert(rd != rm);
  100. return 0x0000090 | (rd << 16) | (rs << 8) | rm;
  101. }
  102. STATIC uint asm_arm_op_and_reg(uint rd, uint rn, uint rm) {
  103. // and rd, rn, rm
  104. return 0x0000000 | (rn << 16) | (rd << 12) | rm;
  105. }
  106. STATIC uint asm_arm_op_eor_reg(uint rd, uint rn, uint rm) {
  107. // eor rd, rn, rm
  108. return 0x0200000 | (rn << 16) | (rd << 12) | rm;
  109. }
  110. STATIC uint asm_arm_op_orr_reg(uint rd, uint rn, uint rm) {
  111. // orr rd, rn, rm
  112. return 0x1800000 | (rn << 16) | (rd << 12) | rm;
  113. }
  114. void asm_arm_bkpt(asm_arm_t *as) {
  115. // bkpt #0
  116. emit_al(as, 0x1200070);
  117. }
  118. // locals:
  119. // - stored on the stack in ascending order
  120. // - numbered 0 through num_locals-1
  121. // - SP points to first local
  122. //
  123. // | SP
  124. // v
  125. // l0 l1 l2 ... l(n-1)
  126. // ^ ^
  127. // | low address | high address in RAM
  128. void asm_arm_entry(asm_arm_t *as, int num_locals) {
  129. if (num_locals < 0) {
  130. num_locals = 0;
  131. }
  132. as->stack_adjust = 0;
  133. as->push_reglist = 1 << ASM_ARM_REG_R1
  134. | 1 << ASM_ARM_REG_R2
  135. | 1 << ASM_ARM_REG_R3
  136. | 1 << ASM_ARM_REG_R4
  137. | 1 << ASM_ARM_REG_R5
  138. | 1 << ASM_ARM_REG_R6
  139. | 1 << ASM_ARM_REG_R7
  140. | 1 << ASM_ARM_REG_R8;
  141. // Only adjust the stack if there are more locals than usable registers
  142. if (num_locals > 3) {
  143. as->stack_adjust = num_locals * 4;
  144. // Align stack to 8 bytes
  145. if (num_locals & 1) {
  146. as->stack_adjust += 4;
  147. }
  148. }
  149. emit_al(as, asm_arm_op_push(as->push_reglist | 1 << ASM_ARM_REG_LR));
  150. if (as->stack_adjust > 0) {
  151. emit_al(as, asm_arm_op_sub_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
  152. }
  153. }
  154. void asm_arm_exit(asm_arm_t *as) {
  155. if (as->stack_adjust > 0) {
  156. emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
  157. }
  158. emit_al(as, asm_arm_op_pop(as->push_reglist | (1 << ASM_ARM_REG_PC)));
  159. }
  160. void asm_arm_push(asm_arm_t *as, uint reglist) {
  161. emit_al(as, asm_arm_op_push(reglist));
  162. }
  163. void asm_arm_pop(asm_arm_t *as, uint reglist) {
  164. emit_al(as, asm_arm_op_pop(reglist));
  165. }
  166. void asm_arm_mov_reg_reg(asm_arm_t *as, uint reg_dest, uint reg_src) {
  167. emit_al(as, asm_arm_op_mov_reg(reg_dest, reg_src));
  168. }
  169. void asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm) {
  170. // TODO: There are more variants of immediate values
  171. if ((imm & 0xFF) == imm) {
  172. emit_al(as, asm_arm_op_mov_imm(rd, imm));
  173. } else if (imm < 0 && imm >= -256) {
  174. // mvn is "move not", not "move negative"
  175. emit_al(as, asm_arm_op_mvn_imm(rd, ~imm));
  176. } else {
  177. //Insert immediate into code and jump over it
  178. emit_al(as, 0x59f0000 | (rd << 12)); // ldr rd, [pc]
  179. emit_al(as, 0xa000000); // b pc
  180. emit(as, imm);
  181. }
  182. }
  183. void asm_arm_mov_local_reg(asm_arm_t *as, int local_num, uint rd) {
  184. // str rd, [sp, #local_num*4]
  185. emit_al(as, 0x58d0000 | (rd << 12) | (local_num << 2));
  186. }
  187. void asm_arm_mov_reg_local(asm_arm_t *as, uint rd, int local_num) {
  188. // ldr rd, [sp, #local_num*4]
  189. emit_al(as, 0x59d0000 | (rd << 12) | (local_num << 2));
  190. }
  191. void asm_arm_cmp_reg_i8(asm_arm_t *as, uint rd, int imm) {
  192. // cmp rd, #imm
  193. emit_al(as, 0x3500000 | (rd << 16) | (imm & 0xFF));
  194. }
  195. void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn) {
  196. // cmp rd, rn
  197. emit_al(as, 0x1500000 | (rd << 16) | rn);
  198. }
  199. void asm_arm_setcc_reg(asm_arm_t *as, uint rd, uint cond) {
  200. emit(as, asm_arm_op_mov_imm(rd, 1) | cond); // movCOND rd, #1
  201. emit(as, asm_arm_op_mov_imm(rd, 0) | (cond ^ (1 << 28))); // mov!COND rd, #0
  202. }
  203. void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  204. // add rd, rn, rm
  205. emit_al(as, asm_arm_op_add_reg(rd, rn, rm));
  206. }
  207. void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  208. // sub rd, rn, rm
  209. emit_al(as, asm_arm_op_sub_reg(rd, rn, rm));
  210. }
  211. void asm_arm_mul_reg_reg_reg(asm_arm_t *as, uint rd, uint rs, uint rm) {
  212. // rs and rm are swapped because of restriction rd!=rm
  213. // mul rd, rm, rs
  214. emit_al(as, asm_arm_op_mul_reg(rd, rm, rs));
  215. }
  216. void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  217. // and rd, rn, rm
  218. emit_al(as, asm_arm_op_and_reg(rd, rn, rm));
  219. }
  220. void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  221. // eor rd, rn, rm
  222. emit_al(as, asm_arm_op_eor_reg(rd, rn, rm));
  223. }
  224. void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
  225. // orr rd, rn, rm
  226. emit_al(as, asm_arm_op_orr_reg(rd, rn, rm));
  227. }
  228. void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num) {
  229. // add rd, sp, #local_num*4
  230. emit_al(as, asm_arm_op_add_imm(rd, ASM_ARM_REG_SP, local_num << 2));
  231. }
  232. void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs) {
  233. // mov rd, rd, lsl rs
  234. emit_al(as, 0x1a00010 | (rd << 12) | (rs << 8) | rd);
  235. }
  236. void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
  237. // mov rd, rd, asr rs
  238. emit_al(as, 0x1a00050 | (rd << 12) | (rs << 8) | rd);
  239. }
  240. void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn, uint byte_offset) {
  241. // ldr rd, [rn, #off]
  242. emit_al(as, 0x5900000 | (rn << 16) | (rd << 12) | byte_offset);
  243. }
  244. void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn) {
  245. // ldrh rd, [rn]
  246. emit_al(as, 0x1d000b0 | (rn << 16) | (rd << 12));
  247. }
  248. void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn) {
  249. // ldrb rd, [rn]
  250. emit_al(as, 0x5d00000 | (rn << 16) | (rd << 12));
  251. }
  252. void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset) {
  253. // str rd, [rm, #off]
  254. emit_al(as, 0x5800000 | (rm << 16) | (rd << 12) | byte_offset);
  255. }
  256. void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm) {
  257. // strh rd, [rm]
  258. emit_al(as, 0x1c000b0 | (rm << 16) | (rd << 12));
  259. }
  260. void asm_arm_strb_reg_reg(asm_arm_t *as, uint rd, uint rm) {
  261. // strb rd, [rm]
  262. emit_al(as, 0x5c00000 | (rm << 16) | (rd << 12));
  263. }
  264. void asm_arm_str_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
  265. // str rd, [rm, rn, lsl #2]
  266. emit_al(as, 0x7800100 | (rm << 16) | (rd << 12) | rn);
  267. }
  268. void asm_arm_strh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
  269. // strh doesn't support scaled register index
  270. emit_al(as, 0x1a00080 | (ASM_ARM_REG_R8 << 12) | rn); // mov r8, rn, lsl #1
  271. emit_al(as, 0x18000b0 | (rm << 16) | (rd << 12) | ASM_ARM_REG_R8); // strh rd, [rm, r8]
  272. }
  273. void asm_arm_strb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
  274. // strb rd, [rm, rn]
  275. emit_al(as, 0x7c00000 | (rm << 16) | (rd << 12) | rn);
  276. }
  277. void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label) {
  278. assert(label < as->base.max_num_labels);
  279. mp_uint_t dest = as->base.label_offsets[label];
  280. mp_int_t rel = dest - as->base.code_offset;
  281. rel -= 8; // account for instruction prefetch, PC is 8 bytes ahead of this instruction
  282. rel >>= 2; // in ARM mode the branch target is 32-bit aligned, so the 2 LSB are omitted
  283. if (SIGNED_FIT24(rel)) {
  284. emit(as, cond | 0xa000000 | (rel & 0xffffff));
  285. } else {
  286. printf("asm_arm_bcc: branch does not fit in 24 bits\n");
  287. }
  288. }
  289. void asm_arm_b_label(asm_arm_t *as, uint label) {
  290. asm_arm_bcc_label(as, ASM_ARM_CC_AL, label);
  291. }
  292. void asm_arm_bl_ind(asm_arm_t *as, void *fun_ptr, uint fun_id, uint reg_temp) {
  293. // If the table offset fits into the ldr instruction
  294. if (fun_id < (0x1000 / 4)) {
  295. emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_LR, ASM_ARM_REG_PC)); // mov lr, pc
  296. emit_al(as, 0x597f000 | (fun_id << 2)); // ldr pc, [r7, #fun_id*4]
  297. return;
  298. }
  299. emit_al(as, 0x59f0004 | (reg_temp << 12)); // ldr rd, [pc, #4]
  300. // Set lr after fun_ptr
  301. emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_LR, ASM_ARM_REG_PC, 4)); // add lr, pc, #4
  302. emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_PC, reg_temp)); // mov pc, reg_temp
  303. emit(as, (uint) fun_ptr);
  304. }
  305. #endif // MICROPY_EMIT_ARM