invokeNative_arm.s 1.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. .text
  6. .align 2
  7. .global invokeNative
  8. .type invokeNative,function
  9. /*
  10. * Arguments passed in:
  11. *
  12. * r0 function ptr
  13. * r1 argv
  14. * r2 argc
  15. */
  16. invokeNative:
  17. stmfd sp!, {r4, r5, r6, r7, lr}
  18. mov ip, r0 /* ip = function ptr */
  19. mov r4, r1 /* r4 = argv */
  20. mov r5, r2 /* r5 = argc */
  21. cmp r5, #1 /* at least one argument required: exec_env */
  22. blt return
  23. mov r6, #0 /* increased stack size */
  24. ldr r0, [r4], #4 /* r0 = argv[0] = exec_env */
  25. cmp r5, #1
  26. beq call_func
  27. ldr r1, [r4], #4 /* r1 = argv[1] */
  28. cmp r5, #2
  29. beq call_func
  30. ldr r2, [r4], #4 /* r2 = argv[2] */
  31. cmp r5, #3
  32. beq call_func
  33. ldr r3, [r4], #4 /* r3 = argv[3] */
  34. cmp r5, #4
  35. beq call_func
  36. sub r5, r5, #4 /* argc -= 4, now we have r0 ~ r3 */
  37. /* Ensure address is 8 byte aligned */
  38. mov r6, r5, lsl#2 /* r6 = argc * 4 */
  39. add r6, r6, #7 /* r6 = (r6 + 7) & ~7 */
  40. bic r6, r6, #7
  41. add r6, r6, #4 /* +4 because odd(5) registers are in stack */
  42. sub sp, sp, r6 /* reserved stack space for left arguments */
  43. mov r7, sp
  44. loop_args: /* copy left arguments to stack */
  45. cmp r5, #0
  46. beq call_func
  47. ldr lr, [r4], #4
  48. str lr, [r7], #4
  49. sub r5, r5, #1
  50. b loop_args
  51. call_func:
  52. blx ip
  53. add sp, sp, r6 /* restore sp */
  54. return:
  55. ldmfd sp!, {r4, r5, r6, r7, lr}
  56. bx lr