Просмотр исходного кода

Zephyr User Mode Support (#3650)

Add support for the [Zephyr Usermode](https://docs.zephyrproject.org/latest/kernel/usermode/index.html)
to the Zephyr port.

The following changes are applied:
- Fix `signbit`, check if it is defined already and only implement it, if not
- Introduce `sys_mutex` and `sys_sem` in favour of `k_mutex` and `k_sem`, when `CONFIG_USERMODE` is enabled
- Remove the installation of the `_stdout_hook_iwasm()` when `CONFIG_USERMODE` is enabled, otherwise this
  causes MPU errors since the std hook is in the kernel space
- Add a thread name for debugging
mxsdlr 1 год назад
Родитель
Сommit
4c5f35b0e3

+ 43 - 4
core/shared/platform/zephyr/platform_internal.h

@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2019 Intel Corporation.  All rights reserved.
+ * SPDX-FileCopyrightText: 2024 Siemens AG (For Zephyr usermode changes)
  * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  */
 
@@ -18,7 +19,6 @@
 #include <misc/printk.h>
 #endif
 #else /* else of KERNEL_VERSION_NUMBER < 0x030200 */
-#include <zephyr/kernel.h>
 #include <zephyr/sys/printk.h>
 #endif /* end of KERNEL_VERSION_NUMBER < 0x030200 */
 
@@ -37,12 +37,14 @@
 #endif
 
 #if KERNEL_VERSION_NUMBER < 0x030200 /* version 3.2.0 */
+#include <zephyr.h>
 #include <net/net_pkt.h>
 #include <net/net_if.h>
 #include <net/net_ip.h>
 #include <net/net_core.h>
 #include <net/net_context.h>
 #else /* else of KERNEL_VERSION_NUMBER < 0x030200 */
+#include <zephyr/kernel.h>
 #include <zephyr/net/net_pkt.h>
 #include <zephyr/net/net_if.h>
 #include <zephyr/net/net_ip.h>
@@ -50,6 +52,11 @@
 #include <zephyr/net/net_context.h>
 #endif /* end of KERNEL_VERSION_NUMBER < 0x030200 */
 
+#ifdef CONFIG_USERSPACE
+#include <zephyr/sys/mutex.h>
+#include <zephyr/sys/sem.h>
+#endif /* end of CONFIG_USERSPACE */
+
 #if KERNEL_VERSION_NUMBER >= 0x030300 /* version 3.3.0 */
 #include <zephyr/cache.h>
 #endif /* end of KERNEL_VERSION_NUMBER > 0x030300 */
@@ -64,10 +71,39 @@
 #endif
 #endif
 
+#ifdef signbit /* probably since Zephyr v3.5.0 a new picolib is included */
+#define BH_HAS_SIGNBIT 1
+#endif
+
 #ifndef BH_PLATFORM_ZEPHYR
 #define BH_PLATFORM_ZEPHYR
 #endif
 
+// Synchronization primitives for usermode
+#ifdef CONFIG_USERSPACE
+#define mutex_t struct sys_mutex
+#define mutex_init(mtx) sys_mutex_init(mtx)
+#define mutex_lock(mtx, timeout) sys_mutex_lock(mtx, timeout)
+#define mutex_unlock(mtx) sys_mutex_unlock(mtx)
+
+#define sem_t struct sys_sem
+#define sem_init(sem, init_count, limit) sys_sem_init(sem, init_count, limit)
+#define sem_give(sem) sys_sem_give(sem)
+#define sem_take(sem, timeout) sys_sem_take(sem, timeout)
+#define sem_count_get(sem) sys_sem_count_get(sem)
+#else /* else of CONFIG_USERSPACE */
+#define mutex_t struct k_mutex
+#define mutex_init(mtx) k_mutex_init(mtx)
+#define mutex_lock(mtx, timeout) k_mutex_lock(mtx, timeout)
+#define mutex_unlock(mtx) k_mutex_unlock(mtx)
+
+#define sem_t struct k_sem
+#define sem_init(sem, init_count, limit) k_sem_init(sem, init_count, limit)
+#define sem_give(sem) k_sem_give(sem)
+#define sem_take(sem, timeout) k_sem_take(sem, timeout)
+#define sem_count_get(sem) k_sem_count_get(sem)
+#endif /* end of CONFIG_USERSPACE */
+
 #define BH_APPLET_PRESERVED_STACK_SIZE (2 * BH_KB)
 
 /* Default thread priority */
@@ -75,7 +111,7 @@
 
 typedef struct k_thread korp_thread;
 typedef korp_thread *korp_tid;
-typedef struct k_mutex korp_mutex;
+typedef mutex_t korp_mutex;
 typedef unsigned int korp_sem;
 
 /* korp_rwlock is used in platform_api_extension.h,
@@ -87,7 +123,7 @@ typedef struct {
 struct os_thread_wait_node;
 typedef struct os_thread_wait_node *os_thread_wait_list;
 typedef struct korp_cond {
-    struct k_mutex wait_list_lock;
+    mutex_t wait_list_lock;
     os_thread_wait_list thread_wait_list;
 } korp_cond;
 
@@ -120,11 +156,14 @@ float fmaxf(float x, float y);
 float rintf(float x);
 float fabsf(float x);
 float truncf(float x);
-int signbit(double x);
 int isnan(double x);
 double pow(double x, double y);
 double scalbn(double x, int n);
 
+#ifndef BH_HAS_SIGNBIT
+int signbit(double x);
+#endif
+
 unsigned long long int strtoull(const char *nptr, char **endptr, int base);
 double strtod(const char *nptr, char **endptr);
 float strtof(const char *nptr, char **endptr);

+ 5 - 0
core/shared/platform/zephyr/zephyr_platform.c

@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2019 Intel Corporation.  All rights reserved.
+ * SPDX-FileCopyrightText: 2024 Siemens AG (For Zephyr usermode changes)
  * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  */
 
@@ -35,12 +36,14 @@ disable_mpu_rasr_xn(void)
 #endif /* end of CONFIG_ARM_MPU */
 #endif
 
+#ifndef CONFIG_USERSPACE
 static int
 _stdout_hook_iwasm(int c)
 {
     printk("%c", (char)c);
     return 1;
 }
+#endif
 
 int
 os_thread_sys_init();
@@ -51,9 +54,11 @@ os_thread_sys_destroy();
 int
 bh_platform_init()
 {
+#ifndef CONFIG_USERSPACE
     extern void __stdout_hook_install(int (*hook)(int));
     /* Enable printf() in Zephyr */
     __stdout_hook_install(_stdout_hook_iwasm);
+#endif
 
 #if WASM_ENABLE_AOT != 0
 #ifdef CONFIG_ARM_MPU

+ 57 - 54
core/shared/platform/zephyr/zephyr_thread.c

@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2019 Intel Corporation.  All rights reserved.
+ * SPDX-FileCopyrightText: 2024 Siemens AG (For Zephyr usermode changes)
  * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  */
 
@@ -33,22 +34,22 @@
 static K_THREAD_STACK_ARRAY_DEFINE(mpu_stacks, BH_ZEPHYR_MPU_STACK_COUNT,
                                    BH_ZEPHYR_MPU_STACK_SIZE);
 static bool mpu_stack_allocated[BH_ZEPHYR_MPU_STACK_COUNT];
-static struct k_mutex mpu_stack_lock;
+static mutex_t mpu_stack_lock;
 
 static char *
 mpu_stack_alloc()
 {
     int i;
 
-    k_mutex_lock(&mpu_stack_lock, K_FOREVER);
+    mutex_lock(&mpu_stack_lock, K_FOREVER);
     for (i = 0; i < BH_ZEPHYR_MPU_STACK_COUNT; i++) {
         if (!mpu_stack_allocated[i]) {
             mpu_stack_allocated[i] = true;
-            k_mutex_unlock(&mpu_stack_lock);
+            mutex_unlock(&mpu_stack_lock);
             return (char *)mpu_stacks[i];
         }
     }
-    k_mutex_unlock(&mpu_stack_lock);
+    mutex_unlock(&mpu_stack_lock);
     return NULL;
 }
 
@@ -57,17 +58,17 @@ mpu_stack_free(char *stack)
 {
     int i;
 
-    k_mutex_lock(&mpu_stack_lock, K_FOREVER);
+    mutex_lock(&mpu_stack_lock, K_FOREVER);
     for (i = 0; i < BH_ZEPHYR_MPU_STACK_COUNT; i++) {
         if ((char *)mpu_stacks[i] == stack)
             mpu_stack_allocated[i] = false;
     }
-    k_mutex_unlock(&mpu_stack_lock);
+    mutex_unlock(&mpu_stack_lock);
 }
 #endif
 
 typedef struct os_thread_wait_node {
-    struct k_sem sem;
+    sem_t sem;
     os_thread_wait_list next;
 } os_thread_wait_node;
 
@@ -79,7 +80,7 @@ typedef struct os_thread_data {
     /* Jeff thread local root */
     void *tlr;
     /* Lock for waiting list */
-    struct k_mutex wait_list_lock;
+    mutex_t wait_list_lock;
     /* Waiting list of other threads who are joining this thread */
     os_thread_wait_list thread_wait_list;
     /* Thread stack size */
@@ -106,13 +107,13 @@ static bool is_thread_sys_inited = false;
 static os_thread_data supervisor_thread_data;
 
 /* Lock for thread data list */
-static struct k_mutex thread_data_lock;
+static mutex_t thread_data_lock;
 
 /* Thread data list */
 static os_thread_data *thread_data_list = NULL;
 
 /* Lock for thread object list */
-static struct k_mutex thread_obj_lock;
+static mutex_t thread_obj_lock;
 
 /* Thread object list */
 static os_thread_obj *thread_obj_list = NULL;
@@ -120,7 +121,7 @@ static os_thread_obj *thread_obj_list = NULL;
 static void
 thread_data_list_add(os_thread_data *thread_data)
 {
-    k_mutex_lock(&thread_data_lock, K_FOREVER);
+    mutex_lock(&thread_data_lock, K_FOREVER);
     if (!thread_data_list)
         thread_data_list = thread_data;
     else {
@@ -128,7 +129,7 @@ thread_data_list_add(os_thread_data *thread_data)
         os_thread_data *p = thread_data_list;
         while (p) {
             if (p == thread_data) {
-                k_mutex_unlock(&thread_data_lock);
+                mutex_unlock(&thread_data_lock);
                 return;
             }
             p = p->next;
@@ -138,13 +139,13 @@ thread_data_list_add(os_thread_data *thread_data)
         thread_data->next = thread_data_list;
         thread_data_list = thread_data;
     }
-    k_mutex_unlock(&thread_data_lock);
+    mutex_unlock(&thread_data_lock);
 }
 
 static void
 thread_data_list_remove(os_thread_data *thread_data)
 {
-    k_mutex_lock(&thread_data_lock, K_FOREVER);
+    mutex_lock(&thread_data_lock, K_FOREVER);
     if (thread_data_list) {
         if (thread_data_list == thread_data)
             thread_data_list = thread_data_list->next;
@@ -157,32 +158,32 @@ thread_data_list_remove(os_thread_data *thread_data)
                 p->next = p->next->next;
         }
     }
-    k_mutex_unlock(&thread_data_lock);
+    mutex_unlock(&thread_data_lock);
 }
 
 static os_thread_data *
 thread_data_list_lookup(k_tid_t tid)
 {
-    k_mutex_lock(&thread_data_lock, K_FOREVER);
+    mutex_lock(&thread_data_lock, K_FOREVER);
     if (thread_data_list) {
         os_thread_data *p = thread_data_list;
         while (p) {
             if (p->tid == tid) {
                 /* Found */
-                k_mutex_unlock(&thread_data_lock);
+                mutex_unlock(&thread_data_lock);
                 return p;
             }
             p = p->next;
         }
     }
-    k_mutex_unlock(&thread_data_lock);
+    mutex_unlock(&thread_data_lock);
     return NULL;
 }
 
 static void
 thread_obj_list_add(os_thread_obj *thread_obj)
 {
-    k_mutex_lock(&thread_obj_lock, K_FOREVER);
+    mutex_lock(&thread_obj_lock, K_FOREVER);
     if (!thread_obj_list)
         thread_obj_list = thread_obj;
     else {
@@ -190,14 +191,14 @@ thread_obj_list_add(os_thread_obj *thread_obj)
         thread_obj->next = thread_obj_list;
         thread_obj_list = thread_obj;
     }
-    k_mutex_unlock(&thread_obj_lock);
+    mutex_unlock(&thread_obj_lock);
 }
 
 static void
 thread_obj_list_reclaim()
 {
     os_thread_obj *p, *p_prev;
-    k_mutex_lock(&thread_obj_lock, K_FOREVER);
+    mutex_lock(&thread_obj_lock, K_FOREVER);
     p_prev = NULL;
     p = thread_obj_list;
     while (p) {
@@ -218,7 +219,7 @@ thread_obj_list_reclaim()
             p = p->next;
         }
     }
-    k_mutex_unlock(&thread_obj_lock);
+    mutex_unlock(&thread_obj_lock);
 }
 
 int
@@ -228,10 +229,10 @@ os_thread_sys_init()
         return BHT_OK;
 
 #if BH_ENABLE_ZEPHYR_MPU_STACK != 0
-    k_mutex_init(&mpu_stack_lock);
+    mutex_init(&mpu_stack_lock);
 #endif
-    k_mutex_init(&thread_data_lock);
-    k_mutex_init(&thread_obj_lock);
+    mutex_init(&thread_data_lock);
+    mutex_init(&thread_obj_lock);
 
     /* Initialize supervisor thread data */
     memset(&supervisor_thread_data, 0, sizeof(supervisor_thread_data));
@@ -264,19 +265,19 @@ os_thread_cleanup(void)
     os_thread_data *thread_data = thread_data_current();
 
     bh_assert(thread_data != NULL);
-    k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
+    mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
     if (thread_data->thread_wait_list) {
         /* Signal each joining thread */
         os_thread_wait_list head = thread_data->thread_wait_list;
         while (head) {
             os_thread_wait_list next = head->next;
-            k_sem_give(&head->sem);
+            sem_give(&head->sem);
             /* head will be freed by joining thread */
             head = next;
         }
         thread_data->thread_wait_list = NULL;
     }
-    k_mutex_unlock(&thread_data->wait_list_lock);
+    mutex_unlock(&thread_data->wait_list_lock);
 
     thread_data_list_remove(thread_data);
     /* Set flag to true for the next thread creating to
@@ -341,7 +342,7 @@ os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
     }
 
     memset(thread_data, 0, thread_data_size);
-    k_mutex_init(&thread_data->wait_list_lock);
+    mutex_init(&thread_data->wait_list_lock);
     thread_data->stack_size = stack_size;
     thread_data->tid = tid;
 
@@ -360,6 +361,8 @@ os_thread_create_with_prio(korp_tid *p_tid, thread_start_routine_t start,
 
     bh_assert(tid == thread_data->tid);
 
+    k_thread_name_set(tid, "wasm-zephyr");
+
     /* Set thread custom data */
     thread_data_list_add(thread_data);
     thread_obj_list_add((os_thread_obj *)tid);
@@ -394,14 +397,14 @@ os_thread_join(korp_tid thread, void **value_ptr)
     if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
         return BHT_ERROR;
 
-    k_sem_init(&node->sem, 0, 1);
+    sem_init(&node->sem, 0, 1);
     node->next = NULL;
 
     /* Get thread data */
     thread_data = thread_data_list_lookup(thread);
     bh_assert(thread_data != NULL);
 
-    k_mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
+    mutex_lock(&thread_data->wait_list_lock, K_FOREVER);
     if (!thread_data->thread_wait_list)
         thread_data->thread_wait_list = node;
     else {
@@ -411,10 +414,10 @@ os_thread_join(korp_tid thread, void **value_ptr)
             p = p->next;
         p->next = node;
     }
-    k_mutex_unlock(&thread_data->wait_list_lock);
+    mutex_unlock(&thread_data->wait_list_lock);
 
     /* Wait the sem */
-    k_sem_take(&node->sem, K_FOREVER);
+    sem_take(&node->sem, K_FOREVER);
 
     /* Wait some time for the thread to be actually terminated */
     k_sleep(Z_TIMEOUT_MS(100));
@@ -427,14 +430,14 @@ os_thread_join(korp_tid thread, void **value_ptr)
 int
 os_mutex_init(korp_mutex *mutex)
 {
-    k_mutex_init(mutex);
+    mutex_init(mutex);
     return BHT_OK;
 }
 
 int
 os_recursive_mutex_init(korp_mutex *mutex)
 {
-    k_mutex_init(mutex);
+    mutex_init(mutex);
     return BHT_OK;
 }
 
@@ -448,16 +451,16 @@ os_mutex_destroy(korp_mutex *mutex)
 int
 os_mutex_lock(korp_mutex *mutex)
 {
-    return k_mutex_lock(mutex, K_FOREVER);
+    return mutex_lock(mutex, K_FOREVER);
 }
 
 int
 os_mutex_unlock(korp_mutex *mutex)
 {
 #if KERNEL_VERSION_NUMBER >= 0x020200 /* version 2.2.0 */
-    return k_mutex_unlock(mutex);
+    return mutex_unlock(mutex);
 #else
-    k_mutex_unlock(mutex);
+    mutex_unlock(mutex);
     return 0;
 #endif
 }
@@ -465,7 +468,7 @@ os_mutex_unlock(korp_mutex *mutex)
 int
 os_cond_init(korp_cond *cond)
 {
-    k_mutex_init(&cond->wait_list_lock);
+    mutex_init(&cond->wait_list_lock);
     cond->thread_wait_list = NULL;
     return BHT_OK;
 }
@@ -486,10 +489,10 @@ os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex, bool timed, int mills)
     if (!(node = BH_MALLOC(sizeof(os_thread_wait_node))))
         return BHT_ERROR;
 
-    k_sem_init(&node->sem, 0, 1);
+    sem_init(&node->sem, 0, 1);
     node->next = NULL;
 
-    k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
+    mutex_lock(&cond->wait_list_lock, K_FOREVER);
     if (!cond->thread_wait_list)
         cond->thread_wait_list = node;
     else {
@@ -499,15 +502,15 @@ os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex, bool timed, int mills)
             p = p->next;
         p->next = node;
     }
-    k_mutex_unlock(&cond->wait_list_lock);
+    mutex_unlock(&cond->wait_list_lock);
 
     /* Unlock mutex, wait sem and lock mutex again */
-    k_mutex_unlock(mutex);
-    k_sem_take(&node->sem, timed ? Z_TIMEOUT_MS(mills) : K_FOREVER);
-    k_mutex_lock(mutex, K_FOREVER);
+    mutex_unlock(mutex);
+    sem_take(&node->sem, timed ? Z_TIMEOUT_MS(mills) : K_FOREVER);
+    mutex_lock(mutex, K_FOREVER);
 
     /* Remove wait node from wait list */
-    k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
+    mutex_lock(&cond->wait_list_lock, K_FOREVER);
     if (cond->thread_wait_list == node)
         cond->thread_wait_list = node->next;
     else {
@@ -518,7 +521,7 @@ os_cond_wait_internal(korp_cond *cond, korp_mutex *mutex, bool timed, int mills)
         p->next = node->next;
     }
     BH_FREE(node);
-    k_mutex_unlock(&cond->wait_list_lock);
+    mutex_unlock(&cond->wait_list_lock);
 
     return BHT_OK;
 }
@@ -556,10 +559,10 @@ int
 os_cond_signal(korp_cond *cond)
 {
     /* Signal the head wait node of wait list */
-    k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
+    mutex_lock(&cond->wait_list_lock, K_FOREVER);
     if (cond->thread_wait_list)
-        k_sem_give(&cond->thread_wait_list->sem);
-    k_mutex_unlock(&cond->wait_list_lock);
+        sem_give(&cond->thread_wait_list->sem);
+    mutex_unlock(&cond->wait_list_lock);
 
     return BHT_OK;
 }
@@ -567,7 +570,7 @@ os_cond_signal(korp_cond *cond)
 uint8 *
 os_thread_get_stack_boundary()
 {
-#if defined(CONFIG_THREAD_STACK_INFO)
+#if defined(CONFIG_THREAD_STACK_INFO) && !defined(CONFIG_USERSPACE)
     korp_tid thread = k_current_get();
     return (uint8 *)thread->stack_info.start;
 #else
@@ -598,13 +601,13 @@ int
 os_cond_broadcast(korp_cond *cond)
 {
     os_thread_wait_node *node;
-    k_mutex_lock(&cond->wait_list_lock, K_FOREVER);
+    mutex_lock(&cond->wait_list_lock, K_FOREVER);
     node = cond->thread_wait_list;
     while (node) {
         os_thread_wait_node *next = node->next;
-        k_sem_give(&node->sem);
+        sem_give(&node->sem);
         node = next;
     }
-    k_mutex_unlock(&cond->wait_list_lock);
+    mutex_unlock(&cond->wait_list_lock);
     return BHT_OK;
 }