invokeNative_arm_vfp.s 2.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. .text
  6. .align 2
  7. #ifndef BH_PLATFORM_DARWIN
  8. .globl invokeNative
  9. .type invokeNative, function
  10. invokeNative:
  11. #else
  12. .globl _invokeNative
  13. _invokeNative:
  14. #endif /* end of BH_PLATFORM_DARWIN */
  15. /*
  16. * Arguments passed in:
  17. *
  18. * r0 function ptr
  19. * r1 argv
  20. * r2 nstacks
  21. */
  22. stmfd sp!, {r4, r5, r6, r7, lr}
  23. sub sp, sp, #4 /* make sp 8 byte aligned */
  24. mov ip, r0 /* ip = function ptr */
  25. mov r4, r1 /* r4 = argv */
  26. mov r5, r2 /* r5 = nstacks */
  27. mov r6, sp
  28. /* Fill all int args */
  29. ldr r0, [r4], #4 /* r0 = *(int*)&argv[0] = exec_env */
  30. ldr r1, [r4], #4 /* r1 = *(int*)&argv[1] */
  31. ldr r2, [r4], #4 /* r2 = *(int*)&argv[2] */
  32. ldr r3, [r4], #4 /* r3 = *(int*)&argv[3] */
  33. /* Fill all float/double args to 16 single-precision registers, s0-s15, */
  34. /* which may also be accessed as 8 double-precision registers, d0-d7 (with */
  35. /* d0 overlapping s0, s1; d1 overlapping s2, s3; etc). */
  36. vldr s0, [r4, #0] /* s0 = *(float*)&argv[4] */
  37. vldr s1, [r4, #4]
  38. vldr s2, [r4, #8]
  39. vldr s3, [r4, #12]
  40. vldr s4, [r4, #16]
  41. vldr s5, [r4, #20]
  42. vldr s6, [r4, #24]
  43. vldr s7, [r4, #28]
  44. vldr s8, [r4, #32]
  45. vldr s9, [r4, #36]
  46. vldr s10, [r4, #40]
  47. vldr s11, [r4, #44]
  48. vldr s12, [r4, #48]
  49. vldr s13, [r4, #52]
  50. vldr s14, [r4, #56]
  51. vldr s15, [r4, #60]
  52. /* Directly call the function if no args in stack */
  53. cmp r5, #0
  54. beq call_func
  55. /* Fill all stack args: reserve stack space and fill one by one */
  56. add r4, r4, #64 /* r4 points to stack args */
  57. bic sp, sp, #7 /* Ensure stack is 8 byte aligned */
  58. mov r7, r5, lsl#2 /* r7 = nstacks * 4 */
  59. add r7, r7, #7 /* r7 = (r7 + 7) & ~7 */
  60. bic r7, r7, #7
  61. sub sp, sp, r7 /* reserved stack space for stack arguments */
  62. mov r7, sp
  63. loop_stack_args: /* copy stack arguments to stack */
  64. cmp r5, #0
  65. beq call_func
  66. ldr lr, [r4], #4 /* Note: caller should insure int64 and */
  67. str lr, [r7], #4 /* double are placed in 8 bytes aligned address */
  68. sub r5, r5, #1
  69. b loop_stack_args
  70. call_func:
  71. blx ip
  72. mov sp, r6 /* restore sp */
  73. return:
  74. add sp, sp, #4 /* make sp 8 byte aligned */
  75. ldmfd sp!, {r4, r5, r6, r7, lr}
  76. bx lr
  77. #if defined(__linux__) && defined(__ELF__)
  78. .section .note.GNU-stack,"",%progbits
  79. #endif