invokeNative_em64.s 2.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970
  1. /*
  2. * Copyright (C) 2019 Intel Corporation. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. .text
  17. .align 2
  18. .globl invokeNative
  19. .type invokeNative, @function
  20. invokeNative:
  21. /* rdi - function ptr */
  22. /* rsi - argv */
  23. /* rdx - n_stacks */
  24. push %rbp
  25. mov %rsp, %rbp
  26. mov %rdx, %r10
  27. mov %rsp, %r11 /* Check that stack is aligned on */
  28. and $8, %r11 /* 16 bytes. This code may be removed */
  29. je check_stack_succ /* when we are sure that compiler always */
  30. int3 /* calls us with aligned stack */
  31. check_stack_succ:
  32. mov %r10, %r11 /* Align stack on 16 bytes before pushing */
  33. and $1, %r11 /* stack arguments in case we have an odd */
  34. shl $3, %r11 /* number of stack arguments */
  35. sub %r11, %rsp
  36. /* store memory args */
  37. movq %rdi, %r11 /* func ptr */
  38. movq %r10, %rcx /* counter */
  39. lea 64+48-8(%rsi,%rcx,8), %r10
  40. sub %rsp, %r10
  41. cmpq $0, %rcx
  42. je push_args_end
  43. push_args:
  44. push 0(%rsp,%r10)
  45. loop push_args
  46. push_args_end:
  47. /* fill all fp args */
  48. movq 0x00(%rsi), %xmm0
  49. movq 0x08(%rsi), %xmm1
  50. movq 0x10(%rsi), %xmm2
  51. movq 0x18(%rsi), %xmm3
  52. movq 0x20(%rsi), %xmm4
  53. movq 0x28(%rsi), %xmm5
  54. movq 0x30(%rsi), %xmm6
  55. movq 0x38(%rsi), %xmm7
  56. /* fill all int args */
  57. movq 0x40(%rsi), %rdi
  58. movq 0x50(%rsi), %rdx
  59. movq 0x58(%rsi), %rcx
  60. movq 0x60(%rsi), %r8
  61. movq 0x68(%rsi), %r9
  62. movq 0x48(%rsi), %rsi
  63. call *%r11
  64. leave
  65. ret