invokeNative_arm.s 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. */
  5. .text
  6. .align 2
  7. .global invokeNative
  8. .type invokeNative,function
  9. /*
  10. * Arguments passed in:
  11. *
  12. * r0 function ptr
  13. * r1 argv
  14. * r2 argc
  15. */
  16. invokeNative:
  17. stmfd sp!, {r4, r5, r6, r7, lr}
  18. sub sp, sp, #4 /* make sp 8 byte aligned */
  19. mov ip, r0 /* ip = function ptr */
  20. mov r4, r1 /* r4 = argv */
  21. mov r5, r2 /* r5 = argc */
  22. cmp r5, #1 /* at least one argument required: exec_env */
  23. blt return
  24. mov r6, #0 /* increased stack size */
  25. ldr r0, [r4], #4 /* r0 = argv[0] = exec_env */
  26. cmp r5, #1
  27. beq call_func
  28. ldr r1, [r4], #4 /* r1 = argv[1] */
  29. cmp r5, #2
  30. beq call_func
  31. ldr r2, [r4], #4 /* r2 = argv[2] */
  32. cmp r5, #3
  33. beq call_func
  34. ldr r3, [r4], #4 /* r3 = argv[3] */
  35. cmp r5, #4
  36. beq call_func
  37. sub r5, r5, #4 /* argc -= 4, now we have r0 ~ r3 */
  38. /* Ensure address is 8 byte aligned */
  39. mov r6, r5, lsl#2 /* r6 = argc * 4 */
  40. add r6, r6, #7 /* r6 = (r6 + 7) & ~7 */
  41. bic r6, r6, #7
  42. sub sp, sp, r6 /* reserved stack space for left arguments */
  43. mov r7, sp
  44. loop_args: /* copy left arguments to stack */
  45. cmp r5, #0
  46. beq call_func
  47. ldr lr, [r4], #4
  48. str lr, [r7], #4
  49. sub r5, r5, #1
  50. b loop_args
  51. call_func:
  52. blx ip
  53. add sp, sp, r6 /* restore sp */
  54. return:
  55. add sp, sp, #4
  56. ldmfd sp!, {r4, r5, r6, r7, lr}
  57. bx lr