فهرست منبع

Refine interpreter call native process and memory boundary check (#124)

wenyongh 6 سال پیش
والد
کامیت
42dc2d65a1

+ 54 - 56
core/iwasm/runtime/vmcore-wasm/invokeNative_em64.s

@@ -1,72 +1,70 @@
-//    Licensed to the Apache Software Foundation (ASF) under one or more
-//    contributor license agreements.  See the NOTICE file distributed with
-//    this work for additional information regarding copyright ownership.
-//    The ASF licenses this file to You under the Apache License, Version 2.0
-//    (the "License"); you may not use this file except in compliance with
-//    the License.  You may obtain a copy of the License at
-// 
-//      http://www.apache.org/licenses/LICENSE-2.0
-// 
-//   Unless required by applicable law or agreed to in writing, software
-//   distributed under the License is distributed on an "AS IS" BASIS,
-//   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//   See the License for the specific language governing permissions and
-//   limitations under the License.
-//
-//   Author: Ivan Volosyuk
-//
+/*
+ * Copyright (C) 2019 Intel Corporation.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
     .text
     .text
     .align 2
     .align 2
 .globl invokeNative
 .globl invokeNative
     .type    invokeNative, @function
     .type    invokeNative, @function
 invokeNative:
 invokeNative:
-    /*  rdi - memory */
-    /*  rsi - n fp args */
-    /*  rdx - n mem args */
-    /*  rcx - function ptr */
+    /*  rdi - function ptr */
+    /*  rsi - argv */
+    /*  rdx - n_stacks */
 
 
     push %rbp
     push %rbp
     mov %rsp, %rbp
     mov %rsp, %rbp
 
 
-    /* cycle to fill all fp args */
-    movq 8(%rdi), %xmm0
-    movq 16(%rdi), %xmm1
-    movq 24(%rdi), %xmm2
-    movq 32(%rdi), %xmm3
-    movq 40(%rdi), %xmm4
-    movq 48(%rdi), %xmm5
-    movq 56(%rdi), %xmm6
-    movq 64(%rdi), %xmm7
-
-    mov %rsp, %r10 /* Check that stack is aligned on */
-    and $8, %r10   /* 16 bytes. This code may be removed */
-    jz no_abort    /* when we are sure that compiler always */
-    int3           /* calls us with aligned stack */
-no_abort:
-    mov %rdx, %r10 /* Align stack on 16 bytes before pushing */
-    and $1, %r10   /* stack arguments in case we have an odd */
-    shl $3, %r10   /* number of stack arguments */
-    sub %r10, %rsp
+    mov %rdx, %r10
+    mov %rsp, %r11      /* Check that stack is aligned on */
+    and $8, %r11        /* 16 bytes. This code may be removed */
+    je check_stack_succ /* when we are sure that compiler always */
+    int3                /* calls us with aligned stack */
+check_stack_succ:
+    mov %r10, %r11      /* Align stack on 16 bytes before pushing */
+    and $1, %r11        /* stack arguments in case we have an odd */
+    shl $3, %r11        /* number of stack arguments */
+    sub %r11, %rsp
     /* store memory args */
     /* store memory args */
-    movq %rcx, %r10 /* func ptr */
-    movq %rdx, %rcx /* counter */
-    lea 8+64+48-8(%rdi,%rcx,8), %rdx
-    sub %rsp, %rdx
+    movq %rdi, %r11     /* func ptr */
+    movq %r10, %rcx     /* counter */
+    lea 64+48-8(%rsi,%rcx,8), %r10
+    sub %rsp, %r10
     cmpq $0, %rcx
     cmpq $0, %rcx
-    jz cycle_end
-cycle:
-    push 0(%rsp,%rdx)
-    loop cycle
-cycle_end:
-    movq 80(%rdi), %rsi
-    movq 88(%rdi), %rdx
-    movq 96(%rdi), %rcx
-    movq 104(%rdi), %r8
-    movq 112(%rdi), %r9
+    je push_args_end
+push_args:
+    push 0(%rsp,%r10)
+    loop push_args
+push_args_end:
+    /* fill all fp args */
+    movq 0x00(%rsi), %xmm0
+    movq 0x08(%rsi), %xmm1
+    movq 0x10(%rsi), %xmm2
+    movq 0x18(%rsi), %xmm3
+    movq 0x20(%rsi), %xmm4
+    movq 0x28(%rsi), %xmm5
+    movq 0x30(%rsi), %xmm6
+    movq 0x38(%rsi), %xmm7
 
 
-    movq 72(%rdi), %rdi
+    /* fill all int args */
+    movq 0x40(%rsi), %rdi
+    movq 0x50(%rsi), %rdx
+    movq 0x58(%rsi), %rcx
+    movq 0x60(%rsi), %r8
+    movq 0x68(%rsi), %r9
+    movq 0x48(%rsi), %rsi
 
 
-    call *%r10
+    call *%r11
     leave
     leave
     ret
     ret
 
 

+ 24 - 42
core/iwasm/runtime/vmcore-wasm/invokeNative_ia32.s

@@ -1,56 +1,38 @@
-// Copyright (C) 2019 Intel Corporation.  All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+/*
+ * Copyright (C) 2019 Intel Corporation.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 
 
-//    Licensed to the Apache Software Foundation (ASF) under one or more
-//    contributor license agreements.  See the NOTICE file distributed with
-//    this work for additional information regarding copyright ownership.
-//    The ASF licenses this file to You under the Apache License, Version 2.0
-//    (the "License"); you may not use this file except in compliance with
-//    the License.  You may obtain a copy of the License at
-// 
-//      http://www.apache.org/licenses/LICENSE-2.0
-// 
-//   Unless required by applicable law or agreed to in writing, software
-//   distributed under the License is distributed on an "AS IS" BASIS,
-//   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-//   See the License for the specific language governing permissions and
-//   limitations under the License.
-//
-//   Author: Ivan Volosyuk
-//
     .text
     .text
     .align 2
     .align 2
 .globl invokeNative
 .globl invokeNative
     .type   invokeNative, @function
     .type   invokeNative, @function
 invokeNative:
 invokeNative:
-
     push    %ebp
     push    %ebp
     movl    %esp, %ebp
     movl    %esp, %ebp
-    push    %ecx
-    movl    8(%ebp), %eax           /* eax = argv */
-    movl    12(%ebp), %ecx          /* ecx = argc */
+    movl    16(%ebp), %ecx          /* ecx = argc */
+    movl    12(%ebp), %edx          /* edx = argv */
     test    %ecx, %ecx
     test    %ecx, %ecx
-    je      restore_ecx             /* if ecx == 0, skip pushing arguments */
-    leal    -4(%eax,%ecx,4), %eax   /* eax = eax + ecx * 4 - 4 */
-    subl    %esp, %eax              /* eax = eax - esp */
+    jz      skip_push_args          /* if ecx == 0, skip pushing arguments */
+    leal    -4(%edx,%ecx,4), %edx   /* edx = edx + ecx * 4 - 4 */
+    subl    %esp, %edx              /* edx = edx - esp */
 1:
 1:
-    push    0(%esp,%eax)
+    push    0(%esp,%edx)
     loop    1b                      /* loop ecx counts */
     loop    1b                      /* loop ecx counts */
-restore_ecx:
-    movl    -4(%ebp), %ecx          /* restore ecx */
-    movl    16(%ebp), %eax          /* eax = func_ptr */
-    call    *%eax
+skip_push_args:
+    movl    8(%ebp), %edx           /* edx = func_ptr */
+    call    *%edx
     leave
     leave
     ret
     ret
 
 

+ 26 - 22
core/iwasm/runtime/vmcore-wasm/wasm_interp.c

@@ -75,17 +75,17 @@ GET_F64_FROM_ADDR (uint32 *addr)
 #endif  /* WASM_CPU_SUPPORTS_UNALIGNED_64BIT_ACCESS != 0 */
 #endif  /* WASM_CPU_SUPPORTS_UNALIGNED_64BIT_ACCESS != 0 */
 
 
 #if WASM_ENABLE_EXT_MEMORY_SPACE != 0
 #if WASM_ENABLE_EXT_MEMORY_SPACE != 0
-#define CHECK_EXT_MEMORY_SPACE() \
+#define CHECK_EXT_MEMORY_SPACE()                                                \
     else if (module->ext_mem_data                                               \
     else if (module->ext_mem_data                                               \
              && module->ext_mem_base_offset <= offset1                          \
              && module->ext_mem_base_offset <= offset1                          \
              && offset1 < module->ext_mem_base_offset                           \
              && offset1 < module->ext_mem_base_offset                           \
                           + module->ext_mem_size) {                             \
                           + module->ext_mem_size) {                             \
+        /* If offset1 is in valid range, maddr must also be in valid range,     \
+           no need to check it again. */                                        \
         maddr = module->ext_mem_data                                            \
         maddr = module->ext_mem_data                                            \
                 + (offset1 - module->ext_mem_base_offset);                      \
                 + (offset1 - module->ext_mem_base_offset);                      \
-        if (maddr < module->ext_mem_data)                                       \
-          goto out_of_bounds;                                                   \
-        maddr1 = maddr + LOAD_SIZE[opcode - WASM_OP_I32_LOAD];                  \
-        if (maddr1 > module->ext_mem_data_end)                                  \
+        if (maddr + LOAD_SIZE[opcode - WASM_OP_I32_LOAD] >                      \
+                module->ext_mem_data_end)                                       \
           goto out_of_bounds;                                                   \
           goto out_of_bounds;                                                   \
     }
     }
 #else
 #else
@@ -94,26 +94,25 @@ GET_F64_FROM_ADDR (uint32 *addr)
 
 
 #define CHECK_MEMORY_OVERFLOW() do {                                            \
 #define CHECK_MEMORY_OVERFLOW() do {                                            \
     uint32 offset1 = offset + addr;                                             \
     uint32 offset1 = offset + addr;                                             \
-    uint8 *maddr1;                                                              \
-    if (flags != 2)                                                             \
-      LOG_VERBOSE("unaligned load/store in wasm interp, flag is: %d.\n", flags);\
-    if (offset1 < offset)                                                       \
-      goto out_of_bounds;                                                       \
-    if (offset1 < heap_base_offset) {                                           \
+    /* if (flags != 2)                                                          \
+      LOG_VERBOSE("unaligned load/store in wasm interp, flag: %d.\n", flags); */\
+    /* The WASM spec doesn't require that the dynamic address operand must be   \
+       unsigned, so we don't check whether integer overflow or not here. */     \
+    /* if (offset1 < offset)                                                    \
+      goto out_of_bounds; */                                                    \
+    if (offset1 < memory_data_size) {                                           \
+      /* If offset1 is in valid range, maddr must also be in valid range,       \
+         no need to check it again. */                                          \
       maddr = memory->memory_data + offset1;                                    \
       maddr = memory->memory_data + offset1;                                    \
-      if (maddr < memory->base_addr)                                            \
-        goto out_of_bounds;                                                     \
-      maddr1 = maddr + LOAD_SIZE[opcode - WASM_OP_I32_LOAD];                    \
-      if (maddr1 > memory->end_addr)                                            \
+      if (maddr + LOAD_SIZE[opcode - WASM_OP_I32_LOAD] > memory->end_addr)      \
         goto out_of_bounds;                                                     \
         goto out_of_bounds;                                                     \
     }                                                                           \
     }                                                                           \
-    else if (offset1 < memory->heap_base_offset                                 \
-                       + (memory->heap_data_end - memory->heap_data)) {         \
+    else if (offset1 > heap_base_offset                                         \
+             && offset1 < heap_base_offset + heap_data_size) {                  \
+      /* If offset1 is in valid range, maddr must also be in valid range,       \
+         no need to check it again. */                                          \
       maddr = memory->heap_data + offset1 - memory->heap_base_offset;           \
       maddr = memory->heap_data + offset1 - memory->heap_base_offset;           \
-      if (maddr < memory->heap_data)                                            \
-        goto out_of_bounds;                                                     \
-      maddr1 = maddr + LOAD_SIZE[opcode - WASM_OP_I32_LOAD];                    \
-      if (maddr1 > memory->heap_data_end)                                       \
+      if (maddr + LOAD_SIZE[opcode - WASM_OP_I32_LOAD] > memory->heap_data_end) \
         goto out_of_bounds;                                                     \
         goto out_of_bounds;                                                     \
     }                                                                           \
     }                                                                           \
     CHECK_EXT_MEMORY_SPACE()                                                    \
     CHECK_EXT_MEMORY_SPACE()                                                    \
@@ -684,7 +683,11 @@ wasm_interp_call_func_bytecode(WASMThread *self,
 {
 {
   WASMModuleInstance *module = self->module_inst;
   WASMModuleInstance *module = self->module_inst;
   WASMMemoryInstance *memory = module->default_memory;
   WASMMemoryInstance *memory = module->default_memory;
-  int32 heap_base_offset = memory ? memory->heap_base_offset : 0;
+  uint32 memory_data_size = memory
+                            ? NumBytesPerPage * memory->cur_page_count : 0;
+  uint32 heap_base_offset = memory ? memory->heap_base_offset : 0;
+  uint32 heap_data_size = memory
+                          ? memory->heap_data_end - memory->heap_data : 0;
   WASMTableInstance *table = module->default_table;
   WASMTableInstance *table = module->default_table;
   uint8 opcode_IMPDEP2 = WASM_OP_IMPDEP2;
   uint8 opcode_IMPDEP2 = WASM_OP_IMPDEP2;
   WASMInterpFrame *frame = NULL;
   WASMInterpFrame *frame = NULL;
@@ -1302,6 +1305,7 @@ wasm_interp_call_func_bytecode(WASMThread *self,
           PUSH_I32(prev_page_count);
           PUSH_I32(prev_page_count);
           /* update the memory instance ptr */
           /* update the memory instance ptr */
           memory = module->default_memory;
           memory = module->default_memory;
+          memory_data_size = NumBytesPerPage * memory->cur_page_count;
         }
         }
 
 
         (void)reserved;
         (void)reserved;

+ 23 - 23
core/iwasm/runtime/vmcore-wasm/wasm_runtime.c

@@ -1459,13 +1459,13 @@ word_copy(uint32 *dest, uint32 *src, unsigned num)
 #if !defined(__x86_64__) && !defined(__amd_64__)
 #if !defined(__x86_64__) && !defined(__amd_64__)
 
 
 typedef void (*GenericFunctionPointer)();
 typedef void (*GenericFunctionPointer)();
-int64 invokeNative(uint32 *args, uint32 sz, GenericFunctionPointer f);
+int64 invokeNative(GenericFunctionPointer f, uint32 *args, uint32 sz);
 
 
-typedef float64 (*Float64FuncPtr)(uint32*, uint32, GenericFunctionPointer);
-typedef float32 (*Float32FuncPtr)(uint32*, uint32, GenericFunctionPointer);
-typedef int64 (*Int64FuncPtr)(uint32*, uint32, GenericFunctionPointer);
-typedef int32 (*Int32FuncPtr)(uint32*, uint32, GenericFunctionPointer);
-typedef void (*VoidFuncPtr)(uint32*, uint32, GenericFunctionPointer);
+typedef float64 (*Float64FuncPtr)(GenericFunctionPointer f, uint32*, uint32);
+typedef float32 (*Float32FuncPtr)(GenericFunctionPointer f, uint32*, uint32);
+typedef int64 (*Int64FuncPtr)(GenericFunctionPointer f, uint32*, uint32);
+typedef int32 (*Int32FuncPtr)(GenericFunctionPointer f, uint32*, uint32);
+typedef void (*VoidFuncPtr)(GenericFunctionPointer f, uint32*, uint32);
 
 
 static Int64FuncPtr invokeNative_Int64 = (Int64FuncPtr)invokeNative;
 static Int64FuncPtr invokeNative_Int64 = (Int64FuncPtr)invokeNative;
 static Int32FuncPtr invokeNative_Int32 = (Int32FuncPtr)invokeNative;
 static Int32FuncPtr invokeNative_Int32 = (Int32FuncPtr)invokeNative;
@@ -1528,21 +1528,21 @@ wasm_runtime_invoke_native(void *func_ptr, WASMType *func_type,
 
 
     argc1 = j;
     argc1 = j;
     if (func_type->result_count == 0) {
     if (func_type->result_count == 0) {
-        invokeNative_Void(argv1, argc1, func_ptr);
+        invokeNative_Void(func_ptr, argv1, argc1);
     }
     }
     else {
     else {
         switch (func_type->types[func_type->param_count]) {
         switch (func_type->types[func_type->param_count]) {
             case VALUE_TYPE_I32:
             case VALUE_TYPE_I32:
-                ret[0] = invokeNative_Int32(argv1, argc1, func_ptr);
+                ret[0] = invokeNative_Int32(func_ptr, argv1, argc1);
                 break;
                 break;
             case VALUE_TYPE_I64:
             case VALUE_TYPE_I64:
-                PUT_I64_TO_ADDR(ret, invokeNative_Int64(argv1, argc1, func_ptr));
+                PUT_I64_TO_ADDR(ret, invokeNative_Int64(func_ptr, argv1, argc1));
                 break;
                 break;
             case VALUE_TYPE_F32:
             case VALUE_TYPE_F32:
-                *(float32*)ret = invokeNative_Float32(argv1, argc1, func_ptr);
+                *(float32*)ret = invokeNative_Float32(func_ptr, argv1, argc1);
                 break;
                 break;
             case VALUE_TYPE_F64:
             case VALUE_TYPE_F64:
-                PUT_F64_TO_ADDR(ret, invokeNative_Float64(argv1, argc1, func_ptr));
+                PUT_F64_TO_ADDR(ret, invokeNative_Float64(func_ptr, argv1, argc1));
                 break;
                 break;
             default:
             default:
                 wasm_assert(0);
                 wasm_assert(0);
@@ -1558,13 +1558,13 @@ wasm_runtime_invoke_native(void *func_ptr, WASMType *func_type,
 #else /* else of !defined(__x86_64__) && !defined(__amd_64__) */
 #else /* else of !defined(__x86_64__) && !defined(__amd_64__) */
 
 
 typedef void (*GenericFunctionPointer)();
 typedef void (*GenericFunctionPointer)();
-int64 invokeNative(uint64 *args, uint64 n_fps, uint64 n_stacks, GenericFunctionPointer f);
+int64 invokeNative(GenericFunctionPointer f, uint64 *args, uint64 n_stacks);
 
 
-typedef float64 (*Float64FuncPtr)(uint64*, uint64, uint64, GenericFunctionPointer);
-typedef float32 (*Float32FuncPtr)(uint64*, uint64, uint64, GenericFunctionPointer);
-typedef int64 (*Int64FuncPtr)(uint64*,uint64, uint64, GenericFunctionPointer);
-typedef int32 (*Int32FuncPtr)(uint64*, uint64, uint64, GenericFunctionPointer);
-typedef void (*VoidFuncPtr)(uint64*, uint64, uint64, GenericFunctionPointer);
+typedef float64 (*Float64FuncPtr)(GenericFunctionPointer, uint64*, uint64);
+typedef float32 (*Float32FuncPtr)(GenericFunctionPointer, uint64*, uint64);
+typedef int64 (*Int64FuncPtr)(GenericFunctionPointer, uint64*,uint64);
+typedef int32 (*Int32FuncPtr)(GenericFunctionPointer, uint64*, uint64);
+typedef void (*VoidFuncPtr)(GenericFunctionPointer, uint64*, uint64);
 
 
 static Float64FuncPtr invokeNative_Float64 = (Float64FuncPtr)invokeNative;
 static Float64FuncPtr invokeNative_Float64 = (Float64FuncPtr)invokeNative;
 static Float32FuncPtr invokeNative_Float32 = (Float32FuncPtr)invokeNative;
 static Float32FuncPtr invokeNative_Float32 = (Float32FuncPtr)invokeNative;
@@ -1604,7 +1604,7 @@ wasm_runtime_invoke_native(void *func_ptr, WASMType *func_type,
         }
         }
     }
     }
 
 
-    fps = argv1 + 1;
+    fps = argv1;
     ints = fps + MAX_REG_FLOATS;
     ints = fps + MAX_REG_FLOATS;
     stacks = ints + MAX_REG_INTS;
     stacks = ints + MAX_REG_INTS;
 
 
@@ -1645,21 +1645,21 @@ wasm_runtime_invoke_native(void *func_ptr, WASMType *func_type,
     }
     }
 
 
     if (func_type->result_count == 0) {
     if (func_type->result_count == 0) {
-        invokeNative_Void(argv1, n_fps, n_stacks, func_ptr);
+        invokeNative_Void(func_ptr, argv1, n_stacks);
     }
     }
     else {
     else {
         switch (func_type->types[func_type->param_count]) {
         switch (func_type->types[func_type->param_count]) {
             case VALUE_TYPE_I32:
             case VALUE_TYPE_I32:
-                ret[0] = invokeNative_Int32(argv1, n_fps, n_stacks, func_ptr);
+                ret[0] = invokeNative_Int32(func_ptr, argv1, n_stacks);
                 break;
                 break;
             case VALUE_TYPE_I64:
             case VALUE_TYPE_I64:
-                PUT_I64_TO_ADDR(ret, invokeNative_Int64(argv1, n_fps, n_stacks, func_ptr));
+                PUT_I64_TO_ADDR(ret, invokeNative_Int64(func_ptr, argv1, n_stacks));
                 break;
                 break;
             case VALUE_TYPE_F32:
             case VALUE_TYPE_F32:
-                *(float32*)ret = invokeNative_Float32(argv1, n_fps, n_stacks, func_ptr);
+                *(float32*)ret = invokeNative_Float32(func_ptr, argv1, n_stacks);
                 break;
                 break;
             case VALUE_TYPE_F64:
             case VALUE_TYPE_F64:
-                PUT_F64_TO_ADDR(ret, invokeNative_Float64(argv1, n_fps, n_stacks, func_ptr));
+                PUT_F64_TO_ADDR(ret, invokeNative_Float64(func_ptr, argv1, n_stacks));
                 break;
                 break;
             default:
             default:
                 wasm_assert(0);
                 wasm_assert(0);