Browse Source

Implement riscv support for interpreter (#505)

Wang Ning 5 năm trước cách đây
mục cha
commit
da8c879953

+ 13 - 5
build-scripts/config_common.cmake

@@ -34,8 +34,16 @@ elseif (WAMR_BUILD_TARGET STREQUAL "MIPS")
   add_definitions(-DBUILD_TARGET_MIPS)
 elseif (WAMR_BUILD_TARGET STREQUAL "XTENSA")
   add_definitions(-DBUILD_TARGET_XTENSA)
+elseif (WAMR_BUILD_TARGET STREQUAL "RISCV64" OR WAMR_BUILD_TARGET STREQUAL "RISCV64_LP64D")
+  add_definitions(-DBUILD_TARGET_RISCV64_LP64D)
+elseif (WAMR_BUILD_TARGET STREQUAL "RISCV64_LP64")
+  add_definitions(-DBUILD_TARGET_RISCV64_LP64)
+elseif (WAMR_BUILD_TARGET STREQUAL "RISCV32" OR WAMR_BUILD_TARGET STREQUAL "RISCV32_ILP32D")
+  add_definitions(-DBUILD_TARGET_RISCV32_ILP32D)
+elseif (WAMR_BUILD_TARGET STREQUAL "RISCV32_ILP32")
+  add_definitions(-DBUILD_TARGET_RISCV32_ILP32)
 else ()
-   message (FATAL_ERROR "-- WAMR build target isn't set")
+  message (FATAL_ERROR "-- WAMR build target isn't set")
 endif ()
 
 if (CMAKE_BUILD_TYPE STREQUAL "Debug")
@@ -43,7 +51,7 @@ if (CMAKE_BUILD_TYPE STREQUAL "Debug")
 endif ()
 
 if (CMAKE_SIZEOF_VOID_P EQUAL 8)
-  if (WAMR_BUILD_TARGET STREQUAL "X86_64" OR WAMR_BUILD_TARGET STREQUAL "AMD_64" OR WAMR_BUILD_TARGET MATCHES "AARCH64.*")
+  if (WAMR_BUILD_TARGET STREQUAL "X86_64" OR WAMR_BUILD_TARGET STREQUAL "AMD_64" OR WAMR_BUILD_TARGET MATCHES "AARCH64.*" OR WAMR_BUILD_TARGET MATCHES "RISCV64.*")
     # Add -fPIC flag if build as 64-bit
     set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
     set (CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "${CMAKE_SHARED_LIBRARY_LINK_C_FLAGS} -fPIC")
@@ -55,10 +63,10 @@ if (CMAKE_SIZEOF_VOID_P EQUAL 8)
 endif ()
 
 if (WAMR_BUILD_TARGET MATCHES "ARM.*")
-    set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -marm")
+  set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -marm")
 elseif (WAMR_BUILD_TARGET MATCHES "THUMB.*")
-    set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mthumb")
-    set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -Wa,-mthumb")
+  set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mthumb")
+  set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -Wa,-mthumb")
 endif ()
 
 if (NOT WAMR_BUILD_INTERP EQUAL 1)

+ 12 - 2
build-scripts/runtime_lib.cmake

@@ -31,9 +31,14 @@ endif ()
 # Set default options
 
 # Set WAMR_BUILD_TARGET, currently values supported:
-# "X86_64", "AMD_64", "X86_32", "AARCH64[sub]", "ARM[sub]", "THUMB[sub]", "MIPS", "XTENSA"
+# "X86_64", "AMD_64", "X86_32", "AARCH64[sub]", "ARM[sub]", "THUMB[sub]",
+# "MIPS", "XTENSA", "RISCV64[sub]", "RISCV32[sub]"
 if (NOT DEFINED WAMR_BUILD_TARGET)
-    if (CMAKE_SIZEOF_VOID_P EQUAL 8)
+    if (CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
+        set (WAMR_BUILD_TARGET "AARCH64")
+    elseif (CMAKE_SYSTEM_PROCESSOR STREQUAL "riscv64")
+        set (WAMR_BUILD_TARGET "RISCV64")
+    elseif (CMAKE_SIZEOF_VOID_P EQUAL 8)
         # Build as X86_64 by default in 64-bit platform
         set (WAMR_BUILD_TARGET "X86_64")
     else ()
@@ -47,6 +52,11 @@ if (WAMR_BUILD_INTERP EQUAL 1 OR WAMR_BUILD_JIT EQUAL 1)
     include (${IWASM_DIR}/interpreter/iwasm_interp.cmake)
 endif ()
 
+if (WAMR_BUILD_TARGET MATCHES "RISCV.*" AND WAMR_BUILD_AOT EQUAL 1)
+    set (WAMR_BUILD_AOT 0)
+    message ("-- WAMR AOT disabled as it isn't supported by riscv currently")
+endif ()
+
 if (WAMR_BUILD_AOT EQUAL 1)
     include (${IWASM_DIR}/aot/iwasm_aot.cmake)
     if (WAMR_BUILD_JIT EQUAL 1)

+ 10 - 2
core/config.h

@@ -15,7 +15,11 @@
     && !defined(BUILD_TARGET_THUMB) \
     && !defined(BUILD_TARGET_THUMB_VFP) \
     && !defined(BUILD_TARGET_MIPS) \
-    && !defined(BUILD_TARGET_XTENSA)
+    && !defined(BUILD_TARGET_XTENSA) \
+    && !defined(BUILD_TARGET_RISCV64_LP64D) \
+    && !defined(BUILD_TARGET_RISCV64_LP64) \
+    && !defined(BUILD_TARGET_RISCV32_ILP32D) \
+    && !defined(BUILD_TARGET_RISCV32_ILP32)
 #if defined(__x86_64__) || defined(__x86_64)
 #define BUILD_TARGET_X86_64
 #elif defined(__amd64__) || defined(__amd64)
@@ -34,6 +38,10 @@
 #define BUILD_TARGET_MIPS
 #elif defined(__XTENSA__)
 #define BUILD_TARGET_XTENSA
+#elif defined(__riscv) && (__riscv_xlen == 64)
+#define BUILD_TARGET_RISCV64_LP64D
+#elif defined(__riscv) && (__riscv_xlen == 32)
+#define BUILD_TARGET_RISCV32_ILP32D
 #else
 #error "Build target isn't set"
 #endif
@@ -224,7 +232,7 @@
 
 /* Default min/max heap size of each app */
 #define APP_HEAP_SIZE_DEFAULT (8 * 1024)
-#define APP_HEAP_SIZE_MIN (512)
+#define APP_HEAP_SIZE_MIN (256)
 #define APP_HEAP_SIZE_MAX (512 * 1024 * 1024)
 
 /* Default wasm stack size of each app */

+ 4 - 4
core/iwasm/aot/aot_reloc.h

@@ -44,8 +44,8 @@ typedef struct {
     REG_SYM(aot_call_indirect),           \
     REG_SYM(wasm_runtime_enlarge_memory), \
     REG_SYM(wasm_runtime_set_exception),  \
-    REG_SYM(aot_memset),                  \
-    REG_SYM(aot_memmove),                 \
+    {"memset", (void*)aot_memset},        \
+    {"memmove", (void*)aot_memmove},      \
     REG_BULK_MEMORY_SYM()                 \
     REG_ATOMIC_WAIT_SYM()                 \
     REG_AOT_TRACE_SYM()
@@ -56,8 +56,8 @@ typedef struct {
     REG_SYM(aot_call_indirect),           \
     REG_SYM(wasm_runtime_enlarge_memory), \
     REG_SYM(wasm_runtime_set_exception),  \
-    REG_SYM(aot_memset),                  \
-    REG_SYM(aot_memmove),                 \
+    {"memset", (void*)aot_memset},        \
+    {"memmove", (void*)aot_memmove},      \
     REG_SYM(fmin),                        \
     REG_SYM(fminf),                       \
     REG_SYM(fmax),                        \

+ 95 - 0
core/iwasm/common/arch/invokeNative_riscv32_ilp32.s

@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2019 Intel Corporation.  All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+        .text
+        .align  2
+#ifndef BH_PLATFORM_DARWIN
+        .globl invokeNative
+        .type  invokeNative, function
+invokeNative:
+#else
+        .globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+
+
+/*
+ * Arguments passed in:
+ *
+ * a0 function ptr
+ * a1 argv
+ * a2 nstacks
+ */
+
+/*
+ * sp (stack pointer)
+ *    |- sw to store 32-bit values from register to memory
+ *    |- lw to load from stack to register
+ * fp/s0 (frame pointer)
+ * a0-a7 (8 integer arguments)
+ *    |- sw to store
+ *    |- lw to load
+ * t0-t6 (temporaries regisgers)
+ *    |- caller saved
+ */
+
+        /* reserve space on stack to save return address and frame pointer */
+        addi      sp, sp, -8
+        sw        fp, 0(sp)            /* save frame pointer */
+        sw        ra, 4(sp)            /* save return address */
+
+        mv        fp, sp               /* set frame pointer to bottom of fixed frame */
+
+        /* save function ptr, argv & nstacks */
+        mv        t0, a0               /* t0 = function ptr */
+        mv        t1, a1               /* t1 = argv array address */
+        mv        t2, a2               /* t2 = nstack */
+
+        /* fill in a0-7 integer-registers */
+        lw        a0, 0(t1)            /* a0 = argv[0] */
+        lw        a1, 4(t1)            /* a1 = argv[1] */
+        lw        a2, 8(t1)            /* a2 = argv[2] */
+        lw        a3, 12(t1)           /* a3 = argv[3] */
+        lw        a4, 16(t1)           /* a4 = argv[4] */
+        lw        a5, 20(t1)           /* a5 = argv[5] */
+        lw        a6, 24(t1)           /* a6 = argv[6] */
+        lw        a7, 28(t1)           /* a7 = argv[7] */
+
+        addi      t1, t1, 32           /* t1 points to stack args */
+
+        /* directly call the function if no args in stack,
+           x0 always holds 0 */
+        beq       t2, x0, call_func
+
+        /* reserve enough stack space for function arguments */
+        sll       t3, t2, 2             /* shift left 2 bits. t3 = n_stacks * 4 */
+        sub       sp, sp, t3
+
+        /* make 16-byte aligned */
+        and       sp, sp, ~15
+
+        /* save sp in t4 register */
+        mv        t4, sp
+
+        /* copy left arguments from caller stack to own frame stack */
+loop_stack_args:
+        beq       t2, x0, call_func
+        lw        t5, 0(t1)             /* load stack argument, t5 = argv[i] */
+        sw        t5, 0(t4)             /* store t5 to reseved stack, sp[j] = t5 */
+        addi      t1, t1, 4             /* move to next stack argument */
+        addi      t4, t4, 4             /* move to next stack pointer */
+        addi      t2, t2, -1            /* decrease t2 every loop, nstacks = nstacks -1 */
+        j loop_stack_args
+
+call_func:
+        jalr      t0
+
+        /* restore registers pushed in stack or saved in another register */
+return:
+        mv        sp, fp                /* restore sp saved in fp before function call */
+        lw        fp, 0(sp)             /* load previous frame poniter to fp register */
+        lw        ra, 4(sp)             /* load previous return address to ra register */
+        addi      sp, sp, 8             /* pop frame, restore sp */
+        jr        ra
+

+ 104 - 0
core/iwasm/common/arch/invokeNative_riscv32_ilp32d.s

@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2019 Intel Corporation.  All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+        .text
+        .align  2
+#ifndef BH_PLATFORM_DARWIN
+        .globl invokeNative
+        .type  invokeNative, function
+invokeNative:
+#else
+        .globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+
+
+/*
+ * Arguments passed in:
+ *
+ * a0 function ptr
+ * a1 argv
+ * a2 nstacks
+ */
+
+/*
+ * sp (stack pointer)
+ *    |- sw to store 32-bit values from register to memory
+ *    |- lw to load from stack to register
+ * fp/s0 (frame pointer)
+ * a0-a7 (8 integer arguments)
+ *    |- sw to store
+ *    |- lw to load
+ * t0-t6 (temporaries regisgers)
+ *    |- caller saved
+ */
+
+        /* reserve space on stack to save return address and frame pointer */
+        addi      sp, sp, -8
+        sw        fp, 0(sp)            /* save frame pointer */
+        sw        ra, 4(sp)            /* save return address */
+
+        mv        fp, sp               /* set frame pointer to bottom of fixed frame */
+
+        /* save function ptr, argv & nstacks */
+        mv        t0, a0               /* t0 = function ptr */
+        mv        t1, a1               /* t1 = argv array address */
+        mv        t2, a2               /* t2 = nstack */
+
+        /* fill in a0-7 integer-registers */
+        lw        a0, 0(t1)            /* a0 = argv[0] */
+        lw        a1, 4(t1)            /* a1 = argv[1] */
+        lw        a2, 8(t1)            /* a2 = argv[2] */
+        lw        a3, 12(t1)           /* a3 = argv[3] */
+        lw        a4, 16(t1)           /* a4 = argv[4] */
+        lw        a5, 20(t1)           /* a5 = argv[5] */
+        lw        a6, 24(t1)           /* a6 = argv[6] */
+        lw        a7, 28(t1)           /* a7 = argv[7] */
+
+        /* fill in fa0-7 float-registers*/
+        fld       fa0, 32(t1)          /* fa0 = argv[8] */
+        fld       fa1, 40(t1)          /* fa1 = argv[9] */
+        fld       fa2, 48(t1)          /* fa2 = argv[10] */
+        fld       fa3, 56(t1)          /* fa3 = argv[11] */
+        fld       fa4, 64(t1)          /* fa4 = argv[12] */
+        fld       fa5, 72(t1)          /* fa5 = argv[13] */
+        fld       fa6, 80(t1)          /* fa6 = argv[14] */
+        fld       fa7, 88(t1)          /* fa7 = argv[15] */
+
+        addi      t1, t1, 96           /* t1 points to stack args */
+
+        /* directly call the function if no args in stack,
+           x0 always holds 0 */
+        beq       t2, x0, call_func
+
+        /* reserve enough stack space for function arguments */
+        sll       t3, t2, 2             /* shift left 2 bits. t3 = n_stacks * 4 */
+        sub       sp, sp, t3
+
+        /* make 16-byte aligned */
+        and       sp, sp, ~15
+
+        /* save sp in t4 register */
+        mv        t4, sp
+
+        /* copy left arguments from caller stack to own frame stack */
+loop_stack_args:
+        beq       t2, x0, call_func
+        lw        t5, 0(t1)             /* load stack argument, t5 = argv[i] */
+        sw        t5, 0(t4)             /* store t5 to reseved stack, sp[j] = t5 */
+        addi      t1, t1, 4             /* move to next stack argument */
+        addi      t4, t4, 4             /* move to next stack pointer */
+        addi      t2, t2, -1            /* decrease t2 every loop, nstacks = nstacks -1 */
+        j loop_stack_args
+
+call_func:
+        jalr      t0
+
+        /* restore registers pushed in stack or saved in another register */
+return:
+        mv        sp, fp                /* restore sp saved in fp before function call */
+        lw        fp, 0(sp)             /* load previous frame poniter to fp register */
+        lw        ra, 4(sp)             /* load previous return address to ra register */
+        addi      sp, sp, 8             /* pop frame, restore sp */
+        jr        ra

+ 95 - 0
core/iwasm/common/arch/invokeNative_riscv64_lp64.s

@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2019 Intel Corporation.  All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+        .text
+        .align  2
+#ifndef BH_PLATFORM_DARWIN
+        .globl invokeNative
+        .type  invokeNative, function
+invokeNative:
+#else
+        .globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+
+
+/*
+ * Arguments passed in:
+ *
+ * a0 function ptr
+ * a1 argv
+ * a2 nstacks
+ */
+
+/*
+ * sp (stack pointer)
+ *    |- sd to store 64-bit values from register to memory
+ *    |- ld to load from stack to register
+ * fp/s0 (frame pointer)
+ * a0-a7 (8 integer arguments)
+ *    |- sd to store
+ *    |- ld to load
+ * t0-t6 (temporaries regisgers)
+ *    |- caller saved
+ */
+
+        /* reserve space on stack to save return address and frame pointer */
+        addi      sp, sp, -16
+        sd        fp, 0(sp)            /* save frame pointer */
+        sd        ra, 8(sp)            /* save return address */
+
+        mv        fp, sp               /* set frame pointer to bottom of fixed frame */
+
+        /* save function ptr, argv & nstacks */
+        mv        t0, a0               /* t0 = function ptr */
+        mv        t1, a1               /* t1 = argv array address */
+        mv        t2, a2               /* t2 = nstack */
+
+        /* fill in a0-7 integer-registers*/
+        ld        a0, 0(t1)            /* a0 = argv[0] */
+        ld        a1, 8(t1)            /* a1 = argv[1] */
+        ld        a2, 16(t1)           /* a2 = argv[2] */
+        ld        a3, 24(t1)           /* a3 = argv[3] */
+        ld        a4, 32(t1)           /* a4 = argv[4] */
+        ld        a5, 40(t1)           /* a5 = argv[5] */
+        ld        a6, 48(t1)           /* a6 = argv[6] */
+        ld        a7, 56(t1)           /* a7 = argv[7] */
+
+        addi      t1, t1, 64           /* t1 points to stack args */
+
+        /* directly call the function if no args in stack,
+           x0 always holds 0 */
+        beq       t2, x0, call_func
+
+        /* reserve enough stack space for function arguments */
+        sll       t3, t2, 3             /* shift left 3 bits. t3 = n_stacks * 8 */
+        sub       sp, sp, t3
+
+        /* make 16-byte aligned */
+        and       sp, sp, ~(15LL)
+
+        /* save sp in t4 register */
+        mv        t4, sp
+
+        /* copy left arguments from caller stack to own frame stack */
+loop_stack_args:
+        beq       t2, x0, call_func
+        ld        t5, 0(t1)             /* load stack argument, t5 = argv[i] */
+        sd        t5, 0(t4)             /* store t5 to reseved stack, sp[j] = t5 */
+        addi      t1, t1, 8             /* move to next stack argument */
+        addi      t4, t4, 8             /* move to next stack pointer */
+        addi      t2, t2, -1            /* decrease t2 every loop, nstacks = nstacks -1 */
+        j loop_stack_args
+
+call_func:
+        jalr      t0
+
+        /* restore registers pushed in stack or saved in another register */
+return:
+        mv        sp, fp                /* restore sp saved in fp before function call */
+        ld        fp, 0(sp)             /* load previous frame poniter to fp register */
+        ld        ra, 8(sp)             /* load previous return address to ra register */
+        addi      sp, sp, 16            /* pop frame, restore sp */
+        jr        ra
+

+ 108 - 0
core/iwasm/common/arch/invokeNative_riscv64_lp64d.s

@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2019 Intel Corporation.  All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+        .text
+        .align  2
+#ifndef BH_PLATFORM_DARWIN
+        .globl invokeNative
+        .type  invokeNative, function
+invokeNative:
+#else
+        .globl _invokeNative
+_invokeNative:
+#endif /* end of BH_PLATFORM_DARWIN */
+
+/*
+ * Arguments passed in:
+ *
+ * a0 function ptr
+ * a1 argv
+ * a2 nstacks
+ */
+
+/*
+ * sp (stack pointer)
+ *    |- sd to store 64-bit values from register to memory
+ *    |- ld to load from stack to register
+ * fp/s0 (frame pointer)
+ * a0-a7 (8 integer arguments)
+ *    |- sd to store
+ *    |- ld to load
+ * fa0-a7 (8 float arguments)
+ *    |- fsd to store
+ *    |- fld to load
+ * t0-t6 (temporaries regisgers)
+ *    |- caller saved
+ */
+
+        /* reserve space on stack to save return address and frame pointer */
+        addi      sp, sp, -16
+        sd        fp, 0(sp)             /* save frame pointer */
+        sd        ra, 8(sp)             /* save return address */
+
+        mv        fp, sp                /* set frame pointer to bottom of fixed frame */
+
+        /* save function ptr, argv & nstacks */
+        mv        t0, a0                /* t0 = function ptr */
+        mv        t1, a1                /* t1 = argv array address */
+        mv        t2, a2                /* t2 = nstack */
+
+        /* fill in fa0-7 float-registers*/
+        fld       fa0, 0(t1)            /* fa0 = argv[0] */
+        fld       fa1, 8(t1)            /* fa1 = argv[1] */
+        fld       fa2, 16(t1)           /* fa2 = argv[2] */
+        fld       fa3, 24(t1)           /* fa3 = argv[3] */
+        fld       fa4, 32(t1)           /* fa4 = argv[4] */
+        fld       fa5, 40(t1)           /* fa5 = argv[5] */
+        fld       fa6, 48(t1)           /* fa6 = argv[6] */
+        fld       fa7, 56(t1)           /* fa7 = argv[7] */
+
+        /* fill in a0-7 integer-registers*/
+        ld        a0, 64(t1)            /* a0 = argv[8] */
+        ld        a1, 72(t1)            /* a1 = argv[9] */
+        ld        a2, 80(t1)            /* a2 = argv[10] */
+        ld        a3, 88(t1)            /* a3 = argv[11] */
+        ld        a4, 96(t1)            /* a4 = argv[12] */
+        ld        a5, 104(t1)           /* a5 = argv[13] */
+        ld        a6, 112(t1)           /* a6 = argv[14] */
+        ld        a7, 120(t1)           /* a7 = argv[15] */
+
+        addi      t1, t1, 128           /* t1 points to stack args */
+
+        /* directly call the function if no args in stack,
+           x0 always holds 0 */
+        beq       t2, x0, call_func
+
+        /* reserve enough stack space for function arguments */
+        sll       t3, t2, 3             /* shift left 3 bits. t3 = n_stacks * 8 */
+        sub       sp, sp, t3
+
+        /* make 16-byte aligned */
+        and       sp, sp, ~(15LL)
+
+        /* save sp in t4 register */
+        mv        t4, sp
+
+        /* copy left arguments from caller stack to own frame stack */
+loop_stack_args:
+        beq       t2, x0, call_func
+        ld        t5, 0(t1)             /* load stack argument, t5 = argv[i] */
+        sd        t5, 0(t4)             /* store t5 to reseved stack, sp[j] = t5 */
+        addi      t1, t1, 8             /* move to next stack argument */
+        addi      t4, t4, 8             /* move to next stack pointer */
+        addi      t2, t2, -1            /* decrease t2 every loop, nstacks = nstacks -1 */
+        j loop_stack_args
+
+call_func:
+        jalr      t0
+
+        /* restore registers pushed in stack or saved in another register */
+return:
+        mv        sp, fp                /* restore sp saved in fp before function call */
+        ld        fp, 0(sp)             /* load previous frame poniter to fp register */
+        ld        ra, 8(sp)             /* load previous return address to ra register */
+        addi      sp, sp, 16            /* pop frame, restore sp */
+        jr        ra
+
+

+ 8 - 0
core/iwasm/common/iwasm_common.cmake

@@ -48,6 +48,14 @@ elseif (WAMR_BUILD_TARGET STREQUAL "MIPS")
   set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_mips.s)
 elseif (WAMR_BUILD_TARGET STREQUAL "XTENSA")
   set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_xtensa.s)
+elseif (WAMR_BUILD_TARGET STREQUAL "RISCV64" OR WAMR_BUILD_TARGET STREQUAL "RISCV64_LP64D")
+  set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_riscv64_lp64d.s)
+elseif (WAMR_BUILD_TARGET STREQUAL "RISCV64_LP64")
+  set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_riscv64_lp64.s)
+elseif (WAMR_BUILD_TARGET STREQUAL "RISCV32" OR WAMR_BUILD_TARGET STREQUAL "RISCV32_ILP32D")
+  set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_riscv32_ilp32d.s)
+elseif (WAMR_BUILD_TARGET STREQUAL "RISCV32_ILP32")
+  set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_riscv32_ilp32.s)
 elseif (WAMR_BUILD_TARGET STREQUAL "GENERAL")
   # Use invokeNative_general.c instead of assembly code,
   # but the maximum number of native arguments is limited to 20,

+ 187 - 18
core/iwasm/common/wasm_runtime_common.c

@@ -2704,7 +2704,10 @@ fail:
   } while (0)
 
 /* The invoke native implementation on ARM platform with VFP co-processor */
-#if defined(BUILD_TARGET_ARM_VFP) || defined(BUILD_TARGET_THUMB_VFP)
+#if defined(BUILD_TARGET_ARM_VFP) \
+    || defined(BUILD_TARGET_THUMB_VFP) \
+    || defined(BUILD_TARGET_RISCV32_ILP32D) \
+    || defined(BUILD_TARGET_RISCV32_ILP32)
 typedef void (*GenericFunctionPointer)();
 int64 invokeNative(GenericFunctionPointer f, uint32 *args, uint32 n_stacks);
 
@@ -2714,14 +2717,20 @@ typedef int64 (*Int64FuncPtr)(GenericFunctionPointer, uint32*,uint32);
 typedef int32 (*Int32FuncPtr)(GenericFunctionPointer, uint32*, uint32);
 typedef void (*VoidFuncPtr)(GenericFunctionPointer, uint32*, uint32);
 
-static Float64FuncPtr invokeNative_Float64 = (Float64FuncPtr)invokeNative;
-static Float32FuncPtr invokeNative_Float32 = (Float32FuncPtr)invokeNative;
-static Int64FuncPtr invokeNative_Int64 = (Int64FuncPtr)invokeNative;
-static Int32FuncPtr invokeNative_Int32 = (Int32FuncPtr)invokeNative;
-static VoidFuncPtr invokeNative_Void = (VoidFuncPtr)invokeNative;
+static Float64FuncPtr invokeNative_Float64 = (Float64FuncPtr)(uintptr_t)invokeNative;
+static Float32FuncPtr invokeNative_Float32 = (Float32FuncPtr)(uintptr_t)invokeNative;
+static Int64FuncPtr invokeNative_Int64 = (Int64FuncPtr)(uintptr_t)invokeNative;
+static Int32FuncPtr invokeNative_Int32 = (Int32FuncPtr)(uintptr_t)invokeNative;
+static VoidFuncPtr invokeNative_Void = (VoidFuncPtr)(uintptr_t)invokeNative;
 
+#if !defined(BUILD_TARGET_RISCV32_ILP32D) \
+    && !defined(BUILD_TARGET_RISCV32_ILP32)
 #define MAX_REG_INTS   4
 #define MAX_REG_FLOATS 16
+#else
+#define MAX_REG_INTS   8
+#define MAX_REG_FLOATS 8
+#endif
 
 bool
 wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
@@ -2731,12 +2740,19 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
 {
     WASMModuleInstanceCommon *module = wasm_runtime_get_module_inst(exec_env);
     /* argv buf layout: int args(fix cnt) + float args(fix cnt) + stack args */
-    uint32 argv_buf[32], *argv1 = argv_buf, *fps, *ints, *stacks, size;
-    uint32 *argv_src = argv, i, argc1, n_ints = 0, n_fps = 0, n_stacks = 0;
+    uint32 argv_buf[32], *argv1 = argv_buf, *ints, *stacks, size;
+    uint32 *argv_src = argv, i, argc1, n_ints = 0, n_stacks = 0;
     uint32 arg_i32, ptr_len;
     uint32 result_count = func_type->result_count;
     uint32 ext_ret_count = result_count > 1 ? result_count - 1 : 0;
     bool ret = false;
+#if !defined(BUILD_TARGET_RISCV32_ILP32)
+    uint32 *fps;
+    int n_fps = 0;
+#else
+#define fps ints
+#define n_fps n_ints
+#endif
 
     n_ints++; /* exec env */
 
@@ -2751,18 +2767,29 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
                 break;
             case VALUE_TYPE_I64:
                 if (n_ints < MAX_REG_INTS - 1) {
+#if !defined(BUILD_TARGET_RISCV32_ILP32) && !defined(BUILD_TARGET_RISCV32_ILP32D)
                     /* 64-bit data must be 8 bytes aligned in arm */
                     if (n_ints & 1)
                         n_ints++;
+#endif
                     n_ints += 2;
                 }
+#if defined(BUILD_TARGET_RISCV32_ILP32) || defined(BUILD_TARGET_RISCV32_ILP32D)
+                /* part in register, part in stack */
+                else if (n_ints == MAX_REG_INTS - 1) {
+                    n_ints++;
+                    n_stacks++;
+                }
+#endif
                 else {
-                    /* 64-bit data must be 8 bytes aligned in arm */
+                    /* 64-bit data in stack must be 8 bytes aligned
+                       in arm and riscv32 */
                     if (n_stacks & 1)
                         n_stacks++;
                     n_stacks += 2;
                 }
                 break;
+#if !defined(BUILD_TARGET_RISCV32_ILP32D)
             case VALUE_TYPE_F32:
                 if (n_fps < MAX_REG_FLOATS)
                     n_fps++;
@@ -2771,11 +2798,19 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
                 break;
             case VALUE_TYPE_F64:
                 if (n_fps < MAX_REG_FLOATS - 1) {
+#if !defined(BUILD_TARGET_RISCV32_ILP32)
                     /* 64-bit data must be 8 bytes aligned in arm */
                     if (n_fps & 1)
                         n_fps++;
+#endif
                     n_fps += 2;
                 }
+#if defined(BUILD_TARGET_RISCV32_ILP32)
+                else if (n_fps == MAX_REG_FLOATS - 1) {
+                    n_fps++;
+                    n_stacks++;
+                }
+#endif
                 else {
                     /* 64-bit data must be 8 bytes aligned in arm */
                     if (n_stacks & 1)
@@ -2783,6 +2818,31 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
                     n_stacks += 2;
                 }
                 break;
+#else /* BUILD_TARGET_RISCV32_ILP32D */
+            case VALUE_TYPE_F32:
+            case VALUE_TYPE_F64:
+                if (n_fps < MAX_REG_FLOATS) {
+                    n_fps++;
+                }
+                else if (func_type->types[i] == VALUE_TYPE_F32
+                         && n_ints < MAX_REG_INTS) {
+                    /* use int reg firstly if available */
+                    n_ints++;
+                }
+                else if (func_type->types[i] == VALUE_TYPE_F64
+                         && n_ints < MAX_REG_INTS - 1) {
+                    /* use int regs firstly if available */
+                    if (n_ints & 1)
+                        n_ints++;
+                    ints += 2;
+                }
+                else {
+                    /* 64-bit data in stack must be 8 bytes aligned in riscv32 */
+                    if (n_stacks & 1)
+                        n_stacks++;
+                    n_stacks += 2;
+                }
+#endif /* BUILD_TARGET_RISCV32_ILP32D */
             default:
                 bh_assert(0);
                 break;
@@ -2796,7 +2856,14 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
             n_stacks++;
     }
 
+#if !defined(BUILD_TARGET_RISCV32_ILP32) && !defined(BUILD_TARGET_RISCV32_ILP32D)
     argc1 = MAX_REG_INTS + MAX_REG_FLOATS + n_stacks;
+#elif defined(BUILD_TARGET_RISCV32_ILP32)
+    argc1 = MAX_REG_INTS + n_stacks;
+#else
+    argc1 = MAX_REG_INTS + MAX_REG_FLOATS * 2 + n_stacks;
+#endif
+
     if (argc1 > sizeof(argv_buf) / sizeof(uint32)) {
         size = sizeof(uint32) * (uint32)argc1;
         if (!(argv1 = runtime_malloc((uint32)size, exec_env->module_inst,
@@ -2806,8 +2873,15 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
     }
 
     ints = argv1;
+#if !defined(BUILD_TARGET_RISCV32_ILP32) && !defined(BUILD_TARGET_RISCV32_ILP32D)
     fps = ints + MAX_REG_INTS;
     stacks = fps + MAX_REG_FLOATS;
+#elif defined(BUILD_TARGET_RISCV32_ILP32)
+    stacks = ints + MAX_REG_INTS;
+#else
+    fps = ints + MAX_REG_INTS;
+    stacks = fps + MAX_REG_FLOATS * 2;
+#endif
 
     n_ints = 0;
     n_fps = 0;
@@ -2854,45 +2928,121 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
                 break;
             }
             case VALUE_TYPE_I64:
+            {
                 if (n_ints < MAX_REG_INTS - 1) {
+#if !defined(BUILD_TARGET_RISCV32_ILP32) && !defined(BUILD_TARGET_RISCV32_ILP32D)
                     /* 64-bit data must be 8 bytes aligned in arm */
                     if (n_ints & 1)
                         n_ints++;
+#endif
                     *(uint64*)&ints[n_ints] = *(uint64*)argv_src;
                     n_ints += 2;
+                    argv_src += 2;
+                }
+#if defined(BUILD_TARGET_RISCV32_ILP32) || defined(BUILD_TARGET_RISCV32_ILP32D)
+                else if (n_ints == MAX_REG_INTS - 1) {
+                    ints[n_ints++] = *argv_src++;
+                    stacks[n_stacks++] = *argv_src++;
                 }
+#endif
                 else {
-                    /* 64-bit data must be 8 bytes aligned in arm */
+                    /* 64-bit data in stack must be 8 bytes aligned
+                       in arm and riscv32 */
                     if (n_stacks & 1)
                         n_stacks++;
                     *(uint64*)&stacks[n_stacks] = *(uint64*)argv_src;
                     n_stacks += 2;
+                    argv_src += 2;
                 }
-                argv_src += 2;
                 break;
+            }
+#if !defined(BUILD_TARGET_RISCV32_ILP32D)
             case VALUE_TYPE_F32:
+            {
                 if (n_fps < MAX_REG_FLOATS)
                     *(float32*)&fps[n_fps++] = *(float32*)argv_src++;
                 else
                     *(float32*)&stacks[n_stacks++] = *(float32*)argv_src++;
                 break;
+            }
             case VALUE_TYPE_F64:
+            {
                 if (n_fps < MAX_REG_FLOATS - 1) {
+#if !defined(BUILD_TARGET_RISCV32_ILP32)
                     /* 64-bit data must be 8 bytes aligned in arm */
                     if (n_fps & 1)
                         n_fps++;
+#endif
                     *(float64*)&fps[n_fps] = *(float64*)argv_src;
                     n_fps += 2;
+                    argv_src += 2;
+                }
+#if defined(BUILD_TARGET_RISCV32_ILP32)
+                else if (n_fps == MAX_REG_FLOATS - 1) {
+                    fps[n_fps++] = *argv_src++;
+                    stacks[n_stacks++] = *argv_src++;
                 }
+#endif
                 else {
                     /* 64-bit data must be 8 bytes aligned in arm */
                     if (n_stacks & 1)
                         n_stacks++;
                     *(float64*)&stacks[n_stacks] = *(float64*)argv_src;
                     n_stacks += 2;
+                    argv_src += 2;
+                }
+                break;
+            }
+#else /* BUILD_TARGET_RISCV32_ILP32D */
+            case VALUE_TYPE_F32:
+            case VALUE_TYPE_F64:
+            {
+                if (n_fps < MAX_REG_FLOATS) {
+                    if (func_type->types[i] == VALUE_TYPE_F32) {
+                        *(float32*)&fps[n_fps * 2] = *(float32*)argv_src++;
+                        /* NaN boxing, the upper bits of a valid NaN-boxed
+                          value must be all 1s. */
+                        fps[n_fps * 2 + 1] = 0xFFFFFFFF;
+                    }
+                    else {
+                        *(float64*)&fps[n_fps * 2] = *(float64*)argv_src;
+                        argv_src += 2;
+                    }
+                    n_fps++;
+                }
+                else if (func_type->types[i] == VALUE_TYPE_F32
+                         && n_ints < MAX_REG_INTS) {
+                    /* use int reg firstly if available */
+                    *(float32*)&ints[n_ints++] = *(float32*)argv_src++;
+                }
+                else if (func_type->types[i] == VALUE_TYPE_F64
+                         && n_ints < MAX_REG_INTS - 1) {
+                    /* use int regs firstly if available */
+                    if (n_ints & 1)
+                        n_ints++;
+                    *(float64*)&ints[n_ints] = *(float64*)argv_src;
+                    n_ints += 2;
+                    argv_src += 2;
+                }
+                else {
+                    /* 64-bit data in stack must be 8 bytes aligned in riscv32 */
+                    if (n_stacks & 1)
+                        n_stacks++;
+                    if (func_type->types[i] == VALUE_TYPE_F32) {
+                        *(float32*)&stacks[n_stacks] = *(float32*)argv_src++;
+                        /* NaN boxing, the upper bits of a valid NaN-boxed
+                          value must be all 1s. */
+                        stacks[n_stacks + 1] = 0xFFFFFFFF;
+                    }
+                    else {
+                        *(float64*)&stacks[n_stacks] = *(float64*)argv_src;
+                        argv_src += 2;
+                    }
+                    n_stacks += 2;
                 }
-                argv_src += 2;
                 break;
+            }
+#endif /* BUILD_TARGET_RISCV32_ILP32D */
             default:
                 bh_assert(0);
                 break;
@@ -2939,7 +3089,10 @@ fail:
         wasm_runtime_free(argv1);
     return ret;
 }
-#endif /* end of defined(BUILD_TARGET_ARM_VFP) || defined(BUILD_TARGET_THUMB_VFP) */
+#endif /* end of defined(BUILD_TARGET_ARM_VFP)
+          || defined(BUILD_TARGET_THUMB_VFP) \
+          || defined(BUILD_TARGET_RISCV32_ILP32D)
+          || defined(BUILD_TARGET_RISCV32_ILP32) */
 
 #if defined(BUILD_TARGET_X86_32) \
     || defined(BUILD_TARGET_ARM) \
@@ -3101,7 +3254,9 @@ fail:
 
 #if defined(BUILD_TARGET_X86_64) \
    || defined(BUILD_TARGET_AMD_64) \
-   || defined(BUILD_TARGET_AARCH64)
+   || defined(BUILD_TARGET_AARCH64) \
+   || defined(BUILD_TARGET_RISCV64_LP64D) \
+   || defined(BUILD_TARGET_RISCV64_LP64)
 
 #if WASM_ENABLE_SIMD != 0
 #ifdef v128
@@ -3138,11 +3293,15 @@ static V128FuncPtr invokeNative_V128 = (V128FuncPtr)(uintptr_t)invokeNative;
 #define MAX_REG_INTS  4
 #else /* else of defined(_WIN32) || defined(_WIN32_) */
 #define MAX_REG_FLOATS  8
-#if defined(BUILD_TARGET_AARCH64)
+#if defined(BUILD_TARGET_AARCH64) \
+    || defined(BUILD_TARGET_RISCV64_LP64D) \
+    || defined(BUILD_TARGET_RISCV64_LP64)
 #define MAX_REG_INTS  8
 #else
 #define MAX_REG_INTS  6
-#endif /* end of defined(BUILD_TARGET_AARCH64 */
+#endif /* end of defined(BUILD_TARGET_AARCH64) \
+          || defined(BUILD_TARGET_RISCV64_LP64D) \
+          || defined(BUILD_TARGET_RISCV64_LP64) */
 #endif /* end of defined(_WIN32) || defined(_WIN32_) */
 
 bool
@@ -3158,13 +3317,17 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
     uint32 result_count = func_type->result_count;
     uint32 ext_ret_count = result_count > 1 ? result_count - 1 : 0;
     bool ret = false;
+#ifndef BUILD_TARGET_RISCV64_LP64
 #if WASM_ENABLE_SIMD == 0
     uint64 *fps;
 #else
     v128 *fps;
 #endif
+#else /* else of BUILD_TARGET_RISCV64_LP64 */
+#define fps ints
+#endif /* end of BUILD_TARGET_RISCV64_LP64 */
 
-#if defined(_WIN32) || defined(_WIN32_)
+#if defined(_WIN32) || defined(_WIN32_) || defined(BUILD_TARGET_RISCV64_LP64)
     /* important difference in calling conventions */
 #define n_fps n_ints
 #else
@@ -3186,6 +3349,7 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
         }
     }
 
+#ifndef BUILD_TARGET_RISCV64_LP64
 #if WASM_ENABLE_SIMD == 0
     fps = argv1;
     ints = fps + MAX_REG_FLOATS;
@@ -3193,6 +3357,9 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
     fps = (v128 *)argv1;
     ints = (uint64 *)(fps + MAX_REG_FLOATS);
 #endif
+#else /* else of BUILD_TARGET_RISCV64_LP64 */
+    ints = argv1;
+#endif /* end of BUILD_TARGET_RISCV64_LP64 */
     stacks = ints + MAX_REG_INTS;
 
     ints[n_ints++] = (uint64)(uintptr_t)exec_env;
@@ -3326,7 +3493,9 @@ fail:
 
 #endif /* end of defined(BUILD_TARGET_X86_64) \
                  || defined(BUILD_TARGET_AMD_64) \
-                 || defined(BUILD_TARGET_AARCH64) */
+                 || defined(BUILD_TARGET_AARCH64) \
+                 || defined(BUILD_TARGET_RISCV64_LP64D) \
+                 || defined(BUILD_TARGET_RISCV64_LP64) */
 
 bool
 wasm_runtime_call_indirect(WASMExecEnv *exec_env,

+ 1 - 0
core/iwasm/interpreter/wasm_runtime.c

@@ -294,6 +294,7 @@ memory_instantiate(WASMModuleInstance *module_inst,
         }
     }
 #endif
+    LOG_VERBOSE("Memory instantiate success.");
     return memory;
 #if WASM_ENABLE_SHARED_MEMORY != 0
 fail5:

+ 1 - 1
core/shared/platform/zephyr/zephyr_platform.c

@@ -108,7 +108,7 @@ os_vprintf(const char *fmt, va_list ap)
 {
 #if 0
     struct out_context ctx = { 0 };
-    z_vprintk(char_out, &ctx, fmt, ap);
+    cbvprintf(char_out, &ctx, fmt, ap);
     return ctx.count;
 #else
     vprintk(fmt, ap);

+ 4 - 1
doc/build_wamr.md

@@ -19,7 +19,10 @@ The script `runtime_lib.cmake` defines a number of variables for configuring the
 
 - **WAMR_BUILD_PLATFORM**:  set the target platform. It can be set to any platform name (folder name) under folder [core/shared/platform](../core/shared/platform).
 
-- **WAMR_BUILD_TARGET**: set the target CPU architecture. Current supported targets are:  X86_64, X86_32, AArch64, ARM, THUMB, XTENSA and MIPS. For AArch64, ARM and THUMB, the format is \<arch>\[\<sub-arch>]\[_VFP] where \<sub-arch> is the ARM sub-architecture and the "_VFP" suffix means VFP coprocessor registers s0-s15 (d0-d7) are used for passing arguments or returning results in standard procedure-call. Both \<sub-arch> and "_VFP" are optional, e.g. AARCH64, AARCH64V8, AARCHV8.1, ARMV7, ARMV7_VFP, THUMBV7, THUMBV7_VFP and so on.
+- **WAMR_BUILD_TARGET**: set the target CPU architecture. Current supported targets are:  X86_64, X86_32, AARCH64, ARM, THUMB, XTENSA, RISCV64 and MIPS.
+  - For AARCH64, ARM and THUMB, the format is \<arch>\[\<sub-arch>]\[_VFP], where \<sub-arch> is the ARM sub-architecture and the "_VFP" suffix means using VFP coprocessor registers s0-s15 (d0-d7) for passing arguments or returning results in standard procedure-call. Both \<sub-arch> and "_VFP" are optional, e.g. AARCH64, AARCH64V8, AARCHV8.1, ARMV7, ARMV7_VFP, THUMBV7, THUMBV7_VFP and so on.
+  - For RISCV64, the format is \<arch\>[_abi], where "_abi" is optional, currently the supported formats are RISCV64, RISCV64_LP64D and RISCV64_LP64: RISCV64 and RISCV64_LP64D are identical, using [LP64D](https://github.com/riscv/riscv-elf-psabi-doc/blob/master/riscv-elf.md#-named-abis) as abi (LP64 with hardware floating-point calling convention for FLEN=64). And RISCV64_LP64 uses [LP64](https://github.com/riscv/riscv-elf-psabi-doc/blob/master/riscv-elf.md#-named-abis) as abi (Integer calling-convention only, and hardware floating-point calling convention is not used).
+  - For RISCV32, the format is \<arch\>[_abi], where "_abi" is optional, currently the supported formats are RISCV32, RISCV32_ILP32D and RISCV32_ILP32: RISCV32 and RISCV32_ILP32D are identical, using [ILP32D](https://github.com/riscv/riscv-elf-psabi-doc/blob/master/riscv-elf.md#-named-abis) as abi (ILP32 with hardware floating-point calling convention for FLEN=64). And RISCV32_ILP32 uses [ILP32](https://github.com/riscv/riscv-elf-psabi-doc/blob/master/riscv-elf.md#-named-abis) as abi (Integer calling-convention only, and hardware floating-point calling convention is not used).
 
 ```bash
 cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM

+ 4 - 2
product-mini/platforms/darwin/CMakeLists.txt

@@ -12,11 +12,13 @@ set (CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "")
 set (CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
 
 # Set WAMR_BUILD_TARGET, currently values supported:
-# "X86_64", "AMD_64", "X86_32", "ARM[sub]", "THUMB[sub]", "MIPS", "XTENSA"
+# "X86_64", "AMD_64", "X86_32", "AARCH64[sub]", "ARM[sub]", "THUMB[sub]",
+# "MIPS", "XTENSA", "RISCV64[sub]", "RISCV32[sub]"
 if (NOT DEFINED WAMR_BUILD_TARGET)
   if (CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
-    # Handle Apple Silicon
     set (WAMR_BUILD_TARGET "AARCH64")
+  elseif (CMAKE_SYSTEM_PROCESSOR STREQUAL "riscv64")
+    set (WAMR_BUILD_TARGET "RISCV64")
   elseif (CMAKE_SIZEOF_VOID_P EQUAL 8)
     # Build as X86_64 by default in 64-bit platform
     set (WAMR_BUILD_TARGET "X86_64")

+ 7 - 2
product-mini/platforms/linux/CMakeLists.txt

@@ -15,9 +15,14 @@ set (CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
 set (CMAKE_C_STANDARD 99)
 
 # Set WAMR_BUILD_TARGET, currently values supported:
-# "X86_64", "AMD_64", "X86_32", "AARCH64[sub]", "ARM[sub]", "THUMB[sub]", "MIPS", "XTENSA"
+# "X86_64", "AMD_64", "X86_32", "AARCH64[sub]", "ARM[sub]", "THUMB[sub]",
+# "MIPS", "XTENSA", "RISCV64[sub]", "RISCV32[sub]"
 if (NOT DEFINED WAMR_BUILD_TARGET)
-  if (CMAKE_SIZEOF_VOID_P EQUAL 8)
+  if (CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
+    set (WAMR_BUILD_TARGET "AARCH64")
+  elseif (CMAKE_SYSTEM_PROCESSOR STREQUAL "riscv64")
+    set (WAMR_BUILD_TARGET "RISCV64")
+  elseif (CMAKE_SIZEOF_VOID_P EQUAL 8)
     # Build as X86_64 by default in 64-bit platform
     set (WAMR_BUILD_TARGET "X86_64")
   else ()

+ 4 - 0
product-mini/platforms/zephyr/simple/CMakeLists.txt

@@ -36,6 +36,10 @@ if (NOT DEFINED WAMR_BUILD_LIBC_WASI)
   set (WAMR_BUILD_LIBC_WASI 0)
 endif ()
 
+if (WAMR_BUILD_TARGET STREQUAL "RISCV64_LP64" OR WAMR_BUILD_TARGET STREQUAL "RISCV32_ILP32")
+  set (WAMR_BUILD_FAST_INTERP 1)
+endif ()
+
 set (WAMR_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/wamr)
 
 include (${WAMR_ROOT_DIR}/build-scripts/runtime_lib.cmake)

+ 21 - 1
product-mini/platforms/zephyr/simple/build_and_run.sh

@@ -8,17 +8,21 @@ STM32_TARGET="stm32"
 QEMU_CORTEX_A53="qemu_cortex_a53"
 XTENSA_QEMU_TARGET="xtensa-qemu"
 ESP32_TARGET="esp32"
+QEMU_RISCV64_TARGET="qemu_riscv64"
+QEMU_RISCV32_TARGET="qemu_riscv32"
 
 usage ()
 {
         echo "USAGE:"
-        echo "$0 $X86_TARGET|$STM32_TARGET|$QEMU_CORTEX_A53|$XTENSA_QEMU_TARGET|$ESP32_TARGET"
+        echo "$0 $X86_TARGET|$STM32_TARGET|$QEMU_CORTEX_A53|$XTENSA_QEMU_TARGET|$ESP32_TARGET|$QEMU_RISCV64_TARGET|$QEMU_RISCV32_TARGET"
         echo "Example:"
         echo "        $0 $X86_TARGET"
         echo "        $0 $STM32_TARGET"
         echo "        $0 $QEMU_CORTEX_A53"
         echo "        $0 $XTENSA_QEMU_TARGET"
         echo "        $0 $ESP32_TARGET"
+        echo "        $0 $QEMU_RISCV64_TARGET"
+        echo "        $0 $QEMU_RISCV32_TARGET"
         exit 1
 }
 
@@ -68,6 +72,22 @@ case $TARGET in
                            -DWAMR_BUILD_TARGET=AARCH64
                 west build -t run
                 ;;
+        $QEMU_RISCV64_TARGET)
+                west build -b qemu_riscv64 \
+                            . -p always -- \
+                            -DCONF_FILE=prj_qemu_riscv64.conf \
+                            -DWAMR_BUILD_TARGET=RISCV64_LP64 \
+                            -DWAMR_BUILD_AOT=0
+                west build -t run
+                ;;
+        $QEMU_RISCV32_TARGET)
+                west build -b qemu_riscv32 \
+                            . -p always -- \
+                            -DCONF_FILE=prj_qemu_riscv32.conf \
+                            -DWAMR_BUILD_TARGET=RISCV32_ILP32 \
+                            -DWAMR_BUILD_AOT=0
+                west build -t run
+                ;;
         *)
                 echo "unsupported target: $TARGET"
                 usage

+ 6 - 0
product-mini/platforms/zephyr/simple/prj_qemu_riscv32.conf

@@ -0,0 +1,6 @@
+# Copyright (C) 2019 Intel Corporation.  All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+CONFIG_STACK_SENTINEL=y
+CONFIG_PRINTK=y
+CONFIG_LOG=n

+ 6 - 0
product-mini/platforms/zephyr/simple/prj_qemu_riscv64.conf

@@ -0,0 +1,6 @@
+# Copyright (C) 2019 Intel Corporation.  All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+CONFIG_STACK_SENTINEL=y
+CONFIG_PRINTK=y
+CONFIG_LOG=n

+ 19 - 1
product-mini/platforms/zephyr/simple/src/main.c

@@ -9,11 +9,28 @@
 #include "bh_assert.h"
 #include "bh_log.h"
 #include "wasm_export.h"
+#if defined(BUILD_TARGET_RISCV64_LP64) || defined(BUILD_TARGET_RISCV32_ILP32)
+#include "test_wasm_riscv64.h"
+#else
 #include "test_wasm.h"
+#endif /* end of BUILD_TARGET_RISCV64_LP64 || BUILD_TARGET_RISCV32_ILP32 */
 
 #include <zephyr.h>
 #include <sys/printk.h>
 
+#if defined(BUILD_TARGET_RISCV64_LP64) || defined(BUILD_TARGET_RISCV32_ILP32)
+#if defined(BUILD_TARGET_RISCV64_LP64)
+#define CONFIG_GLOBAL_HEAP_BUF_SIZE 4360
+#define CONFIG_APP_STACK_SIZE 288
+#define CONFIG_MAIN_THREAD_STACK_SIZE 2400
+#else
+#define CONFIG_GLOBAL_HEAP_BUF_SIZE 5120
+#define CONFIG_APP_STACK_SIZE 512
+#define CONFIG_MAIN_THREAD_STACK_SIZE 4096
+#endif
+#define CONFIG_APP_HEAP_SIZE 256
+#else /* else of BUILD_TARGET_RISCV64_LP64 || BUILD_TARGET_RISCV32_ILP32 */
+
 #define CONFIG_GLOBAL_HEAP_BUF_SIZE 131072
 #define CONFIG_APP_STACK_SIZE 8192
 #define CONFIG_APP_HEAP_SIZE 8192
@@ -24,6 +41,8 @@
 #define CONFIG_MAIN_THREAD_STACK_SIZE 4096
 #endif
 
+#endif /* end of BUILD_TARGET_RISCV64_LP64 || BUILD_TARGET_RISCV32_ILP32 */
+
 static int app_argc;
 static char **app_argv;
 
@@ -120,7 +139,6 @@ void iwasm_main(void *arg1, void *arg2, void *arg3)
     (void) arg2;
     (void) arg3;
 
-
     memset(&init_args, 0, sizeof(RuntimeInitArgs));
 
     init_args.mem_alloc_type = Alloc_With_Pool;

+ 41 - 0
product-mini/platforms/zephyr/simple/src/test_wasm_riscv64.h

@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2019 Intel Corporation.  All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+unsigned char __aligned(4) wasm_test_file[] = {
+  0x00, 0x61, 0x73, 0x6D, 0x01, 0x00, 0x00, 0x00, 0x01, 0x10, 0x03, 0x60,
+  0x01, 0x7F, 0x01, 0x7F, 0x60, 0x02, 0x7F, 0x7F, 0x01, 0x7F, 0x60, 0x01,
+  0x7F, 0x00, 0x02, 0x31, 0x04, 0x03, 0x65, 0x6E, 0x76, 0x04, 0x70, 0x75,
+  0x74, 0x73, 0x00, 0x00, 0x03, 0x65, 0x6E, 0x76, 0x06, 0x6D, 0x61, 0x6C,
+  0x6C, 0x6F, 0x63, 0x00, 0x00, 0x03, 0x65, 0x6E, 0x76, 0x06, 0x70, 0x72,
+  0x69, 0x6E, 0x74, 0x66, 0x00, 0x01, 0x03, 0x65, 0x6E, 0x76, 0x04, 0x66,
+  0x72, 0x65, 0x65, 0x00, 0x02, 0x03, 0x02, 0x01, 0x01, 0x04, 0x05, 0x01,
+  0x70, 0x01, 0x01, 0x01, 0x05, 0x03, 0x01, 0x00, 0x01, 0x06, 0x12, 0x03,
+  0x7F, 0x01, 0x41, 0xC0, 0x01, 0x0B, 0x7F, 0x00, 0x41, 0x3A, 0x0B, 0x7F,
+  0x00, 0x41, 0xC0, 0x01, 0x0B, 0x07, 0x2C, 0x04, 0x06, 0x6D, 0x65, 0x6D,
+  0x6F, 0x72, 0x79, 0x02, 0x00, 0x04, 0x6D, 0x61, 0x69, 0x6E, 0x00, 0x04,
+  0x0A, 0x5F, 0x5F, 0x64, 0x61, 0x74, 0x61, 0x5F, 0x65, 0x6E, 0x64, 0x03,
+  0x01, 0x0B, 0x5F, 0x5F, 0x68, 0x65, 0x61, 0x70, 0x5F, 0x62, 0x61, 0x73,
+  0x65, 0x03, 0x02, 0x0A, 0xB1, 0x01, 0x01, 0xAE, 0x01, 0x01, 0x03, 0x7F,
+  0x23, 0x80, 0x80, 0x80, 0x80, 0x00, 0x41, 0x20, 0x6B, 0x22, 0x02, 0x24,
+  0x80, 0x80, 0x80, 0x80, 0x00, 0x41, 0x9B, 0x80, 0x80, 0x80, 0x00, 0x10,
+  0x80, 0x80, 0x80, 0x80, 0x00, 0x1A, 0x02, 0x40, 0x02, 0x40, 0x41, 0x10,
+  0x10, 0x81, 0x80, 0x80, 0x80, 0x00, 0x22, 0x03, 0x0D, 0x00, 0x41, 0xA8,
+  0x80, 0x80, 0x80, 0x00, 0x10, 0x80, 0x80, 0x80, 0x80, 0x00, 0x1A, 0x41,
+  0x7F, 0x21, 0x04, 0x0C, 0x01, 0x0B, 0x20, 0x02, 0x20, 0x03, 0x36, 0x02,
+  0x10, 0x41, 0x80, 0x80, 0x80, 0x80, 0x00, 0x20, 0x02, 0x41, 0x10, 0x6A,
+  0x10, 0x82, 0x80, 0x80, 0x80, 0x00, 0x1A, 0x41, 0x00, 0x21, 0x04, 0x20,
+  0x03, 0x41, 0x04, 0x6A, 0x41, 0x00, 0x2F, 0x00, 0x91, 0x80, 0x80, 0x80,
+  0x00, 0x3B, 0x00, 0x00, 0x20, 0x03, 0x41, 0x00, 0x28, 0x00, 0x8D, 0x80,
+  0x80, 0x80, 0x00, 0x36, 0x00, 0x00, 0x20, 0x02, 0x20, 0x03, 0x36, 0x02,
+  0x00, 0x41, 0x93, 0x80, 0x80, 0x80, 0x00, 0x20, 0x02, 0x10, 0x82, 0x80,
+  0x80, 0x80, 0x00, 0x1A, 0x20, 0x03, 0x10, 0x83, 0x80, 0x80, 0x80, 0x00,
+  0x0B, 0x20, 0x02, 0x41, 0x20, 0x6A, 0x24, 0x80, 0x80, 0x80, 0x80, 0x00,
+  0x20, 0x04, 0x0B, 0x0B, 0x40, 0x01, 0x00, 0x41, 0x00, 0x0B, 0x3A, 0x62,
+  0x75, 0x66, 0x20, 0x70, 0x74, 0x72, 0x3A, 0x20, 0x25, 0x70, 0x0A, 0x00,
+  0x31, 0x32, 0x33, 0x34, 0x0A, 0x00, 0x62, 0x75, 0x66, 0x3A, 0x20, 0x25,
+  0x73, 0x00, 0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x20, 0x77, 0x6F, 0x72, 0x6C,
+  0x64, 0x21, 0x00, 0x6D, 0x61, 0x6C, 0x6C, 0x6F, 0x63, 0x20, 0x62, 0x75,
+  0x66, 0x20, 0x66, 0x61, 0x69, 0x6C, 0x65, 0x64, 0x00
+};

+ 27 - 0
product-mini/platforms/zephyr/simple/src/wasm-app-riscv64/build.sh

@@ -0,0 +1,27 @@
+# Copyright (C) 2019 Intel Corporation.  All rights reserved.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+WAMR_DIR=${PWD}/../../..
+
+echo "Build wasm app .."
+/opt/wasi-sdk/bin/clang -O3 \
+        -z stack-size=128 -Wl,--initial-memory=65536 \
+        -Wl,--global-base=0 \
+        -o test.wasm main.c \
+        -Wl,--export=main -Wl,--export=__main_argc_argv \
+        -Wl,--export=__data_end -Wl,--export=__heap_base \
+        -Wl,--strip-all,--no-entry \
+        -Wl,--allow-undefined \
+        -nostdlib \
+
+echo "Build binarydump tool .."
+rm -fr build && mkdir build && cd build
+cmake ../../../../../../../test-tools/binarydump-tool
+make
+cd ..
+
+echo "Generate test_wasm.h .."
+./build/binarydump -o test_wasm.h -n wasm_test_file test.wasm
+cp -a test_wasm.h ../test_wasm_riscv64.h
+
+echo "Done"

+ 28 - 0
product-mini/platforms/zephyr/simple/src/wasm-app-riscv64/main.c

@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2019 Intel Corporation.  All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(int argc, char **argv)
+{
+    char *buf;
+
+    printf("Hello world!\n");
+
+    buf = malloc(16);
+    if (!buf) {
+        printf("malloc buf failed\n");
+        return -1;
+    }
+
+    printf("buf ptr: %p\n", buf);
+
+    snprintf(buf, 1024, "%s", "1234\n");
+    printf("buf: %s", buf);
+
+    free(buf);
+    return 0;
+}