invokeNative_thumb_vfp.s 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. .text
  6. .align 2
  7. #ifndef BH_PLATFORM_DARWIN
  8. .globl invokeNative
  9. .type invokeNative, function
  10. invokeNative:
  11. #else
  12. .globl _invokeNative
  13. _invokeNative:
  14. #endif /* end of BH_PLATFORM_DARWIN */
  15. /*
  16. * Arguments passed in:
  17. *
  18. * r0 function ptr
  19. * r1 argv
  20. * r2 nstacks
  21. */
  22. push {r4, r5, r6, r7}
  23. push {lr}
  24. sub sp, sp, #4 /* make sp 8 byte aligned */
  25. mov ip, r0 /* ip = function ptr */
  26. mov r4, r1 /* r4 = argv */
  27. mov r5, r2 /* r5 = nstacks */
  28. mov r7, sp
  29. /* Fill all int args */
  30. ldr r0, [r4, #0] /* r0 = *(int*)&argv[0] = exec_env */
  31. ldr r1, [r4, #4] /* r1 = *(int*)&argv[1] */
  32. ldr r2, [r4, #8] /* r2 = *(int*)&argv[2] */
  33. ldr r3, [r4, #12] /* r3 = *(int*)&argv[3] */
  34. add r4, r4, #16 /* r4 points to float args */
  35. /* Fill all float/double args to 16 single-precision registers, s0-s15, */
  36. /* which may also be accessed as 8 double-precision registers, d0-d7 (with */
  37. /* d0 overlapping s0, s1; d1 overlapping s2, s3; etc). */
  38. vldr s0, [r4, #0] /* s0 = *(float*)&argv[4] */
  39. vldr s1, [r4, #4]
  40. vldr s2, [r4, #8]
  41. vldr s3, [r4, #12]
  42. vldr s4, [r4, #16]
  43. vldr s5, [r4, #20]
  44. vldr s6, [r4, #24]
  45. vldr s7, [r4, #28]
  46. vldr s8, [r4, #32]
  47. vldr s9, [r4, #36]
  48. vldr s10, [r4, #40]
  49. vldr s11, [r4, #44]
  50. vldr s12, [r4, #48]
  51. vldr s13, [r4, #52]
  52. vldr s14, [r4, #56]
  53. vldr s15, [r4, #60]
  54. /* Directly call the fucntion if no args in stack */
  55. cmp r5, #0
  56. beq call_func
  57. mov lr, r2 /* save r2 */
  58. /* Fill all stack args: reserve stack space and fill ony by one */
  59. add r4, r4, #64 /* r4 points to stack args */
  60. mov r6, sp
  61. mov r7, #7
  62. bic r6, r6, r7 /* Ensure stack is 8 byte aligned */
  63. lsl r2, r5, #2 /* r2 = nstacks * 4 */
  64. add r2, r2, #7 /* r2 = (r2 + 7) & ~7 */
  65. bic r2, r2, r7
  66. sub r6, r6, r2 /* reserved stack space for stack arguments */
  67. mov r7, sp
  68. mov sp, r6
  69. loop_stack_args: /* copy stack arguments to stack */
  70. cmp r5, #0
  71. beq call_func1
  72. ldr r2, [r4] /* Note: caller should insure int64 and */
  73. add r4, r4, #4 /* double are placed in 8 bytes aligned address */
  74. str r2, [r6]
  75. add r6, r6, #4
  76. sub r5, r5, #1
  77. b loop_stack_args
  78. call_func1:
  79. mov r2, lr /* restore r2 */
  80. call_func:
  81. blx ip
  82. mov sp, r7 /* restore sp */
  83. return:
  84. add sp, sp, #4 /* make sp 8 byte aligned */
  85. pop {r3}
  86. pop {r4, r5, r6, r7}
  87. mov lr, r3
  88. bx lr