invokeNative_thumb.s 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. .text
  6. .align 2
  7. #ifndef BH_PLATFORM_DARWIN
  8. .globl invokeNative
  9. .type invokeNative, function
  10. invokeNative:
  11. #else
  12. .globl _invokeNative
  13. _invokeNative:
  14. #endif /* end of BH_PLATFORM_DARWIN */
  15. /*
  16. * Arguments passed in:
  17. *
  18. * r0 function ptr
  19. * r1 argv
  20. * r2 argc
  21. */
  22. push {r4, r5, r6, r7}
  23. push {lr}
  24. sub sp, sp, #4 /* make sp 8 byte aligned */
  25. mov ip, r0 /* ip = function ptr */
  26. mov r4, r1 /* r4 = argv */
  27. mov r5, r2 /* r5 = argc */
  28. cmp r5, #1 /* at least one argument required: exec_env */
  29. blt return
  30. mov r6, #0 /* increased stack size */
  31. ldr r0, [r4] /* r0 = argv[0] = exec_env */
  32. add r4, r4, #4 /* r4 += 4 */
  33. cmp r5, #1
  34. beq call_func
  35. ldr r1, [r4] /* r1 = argv[1] */
  36. add r4, r4, #4
  37. cmp r5, #2
  38. beq call_func
  39. ldr r2, [r4] /* r2 = argv[2] */
  40. add r4, r4, #4
  41. cmp r5, #3
  42. beq call_func
  43. ldr r3, [r4] /* r3 = argv[3] */
  44. add r4, r4, #4
  45. cmp r5, #4
  46. beq call_func
  47. sub r5, r5, #4 /* argc -= 4, now we have r0 ~ r3 */
  48. /* Ensure address is 8 byte aligned */
  49. lsl r6, r5, #2 /* r6 = argc * 4 */
  50. mov r7, #7
  51. add r6, r6, r7 /* r6 = (r6 + 7) & ~7 */
  52. bic r6, r6, r7
  53. add r6, r6, #4 /* +4 because odd(5) registers are in stack */
  54. mov r7, sp
  55. sub r7, r7, r6 /* reserved stack space for left arguments */
  56. mov sp, r7
  57. mov lr, r2 /* save r2 */
  58. loop_args: /* copy left arguments to stack */
  59. cmp r5, #0
  60. beq call_func1
  61. ldr r2, [r4]
  62. add r4, r4, #4
  63. str r2, [r7]
  64. add r7, r7, #4
  65. sub r5, r5, #1
  66. b loop_args
  67. call_func1:
  68. mov r2, lr /* restore r2 */
  69. call_func:
  70. blx ip
  71. add sp, sp, r6 /* restore sp */
  72. return:
  73. add sp, sp, #4 /* make sp 8 byte aligned */
  74. pop {r3}
  75. pop {r4, r5, r6, r7}
  76. mov lr, r3
  77. bx lr