Просмотр исходного кода

feat(freertos): Added changes for multi-core RISC-V port for FreeRTOS

This commit updates the FreeRTOS port layer for multi-core RISC-V targets.
Sudeep Mohanty 2 лет назад
Родитель
Сommit
4e51c6b049

+ 3 - 1
components/esp_hw_support/rtc_module.c

@@ -30,11 +30,13 @@
 static const char *TAG = "rtc_module";
 static const char *TAG = "rtc_module";
 #endif
 #endif
 
 
+// rtc_spinlock is used by other peripheral drivers
+portMUX_TYPE rtc_spinlock = portMUX_INITIALIZER_UNLOCKED;
+
 #if !CONFIG_IDF_TARGET_ESP32C6 && !CONFIG_IDF_TARGET_ESP32H2 && !CONFIG_IDF_TARGET_ESP32P4 // TODO: IDF-8008
 #if !CONFIG_IDF_TARGET_ESP32C6 && !CONFIG_IDF_TARGET_ESP32H2 && !CONFIG_IDF_TARGET_ESP32P4 // TODO: IDF-8008
 
 
 #define NOT_REGISTERED      (-1)
 #define NOT_REGISTERED      (-1)
 
 
-portMUX_TYPE rtc_spinlock = portMUX_INITIALIZER_UNLOCKED;
 // Disable the interrupt which cannot work without cache.
 // Disable the interrupt which cannot work without cache.
 static DRAM_ATTR uint32_t rtc_intr_cache;
 static DRAM_ATTR uint32_t rtc_intr_cache;
 static DRAM_ATTR uint32_t rtc_intr_enabled;
 static DRAM_ATTR uint32_t rtc_intr_enabled;

+ 242 - 74
components/freertos/FreeRTOS-Kernel/portable/riscv/include/freertos/portmacro.h

@@ -36,6 +36,7 @@
 #define PORTMACRO_H
 #define PORTMACRO_H
 
 
 #include "sdkconfig.h"
 #include "sdkconfig.h"
+#include "freertos/FreeRTOSConfig.h"
 
 
 /* Macros used instead ofsetoff() for better performance of interrupt handler */
 /* Macros used instead ofsetoff() for better performance of interrupt handler */
 #define PORT_OFFSET_PX_STACK 0x30
 #define PORT_OFFSET_PX_STACK 0x30
@@ -128,6 +129,22 @@ typedef uint32_t TickType_t;
 
 
 // --------------------- Interrupts ------------------------
 // --------------------- Interrupts ------------------------
 
 
+/**
+ * @brief Disable interrupts in a nested manner (meant to be called from ISRs)
+ *
+ * @warning Only applies to current CPU.
+ * @return UBaseType_t Previous interrupt level
+ */
+UBaseType_t xPortSetInterruptMaskFromISR(void);
+
+/**
+ * @brief Reenable interrupts in a nested manner (meant to be called from ISRs)
+ *
+ * @warning Only applies to current CPU.
+ * @param prev_int_level Previous interrupt level
+ */
+void vPortClearInterruptMaskFromISR(UBaseType_t prev_int_level);
+
 /**
 /**
  * @brief Checks if the current core is in an ISR context
  * @brief Checks if the current core is in an ISR context
  *
  *
@@ -155,63 +172,44 @@ BaseType_t xPortInIsrContext(void);
 BaseType_t xPortInterruptedFromISRContext(void);
 BaseType_t xPortInterruptedFromISRContext(void);
 
 
 /* ---------------------- Spinlocks ------------------------
 /* ---------------------- Spinlocks ------------------------
- - Spinlocks added to match API with SMP FreeRTOS. Single core RISC-V does not need spin locks
- - Because single core does not have a primitive spinlock data type, we have to implement one here
- * @note [refactor-todo] Refactor critical section API so that this is no longer required
+ * - Modifications made to critical sections to support SMP
+ * - See "Critical Sections & Disabling Interrupts" in docs/api-guides/freertos-smp.rst for more details
+ * - Remark: For the ESP32, portENTER_CRITICAL and portENTER_CRITICAL_ISR both alias vPortEnterCritical, meaning that
+ *           either function can be called both from ISR as well as task context. This is not standard FreeRTOS
+ *           behavior; please keep this in mind if you need any compatibility with other FreeRTOS implementations.
+ * @note [refactor-todo] Check if these comments are still true
  * ------------------------------------------------------ */
  * ------------------------------------------------------ */
 
 
-//TODO: IDF-7566
-#if !CONFIG_IDF_TARGET_ESP32P4
-/**
- * @brief Spinlock object
- * Owner:
- *  - Set to 0 if uninitialized
- *  - Set to portMUX_FREE_VAL when free
- *  - Set to CORE_ID_REGVAL_PRO or CORE_ID_REGVAL_AP when locked
- *  - Any other value indicates corruption
- * Count:
- *  - 0 if unlocked
- *  - Recursive count if locked
- *
- * @note Not a true spinlock as single core RISC-V does not have atomic compare and set instruction
- * @note Keep portMUX_INITIALIZER_UNLOCKED in sync with this struct
- */
-typedef struct {
-    uint32_t owner;
-    uint32_t count;
-} portMUX_TYPE;
-
-#else
 typedef spinlock_t                          portMUX_TYPE;               /**< Spinlock type used by FreeRTOS critical sections */
 typedef spinlock_t                          portMUX_TYPE;               /**< Spinlock type used by FreeRTOS critical sections */
-#endif
 
 
-/**< Spinlock initializer */
-#define portMUX_INITIALIZER_UNLOCKED {                      \
-            .owner = portMUX_FREE_VAL,                      \
-            .count = 0,                                     \
-        }
-#define portMUX_FREE_VAL                    SPINLOCK_FREE           /**< Spinlock is free. [refactor-todo] check if this is still required */
-#define portMUX_NO_TIMEOUT                  SPINLOCK_WAIT_FOREVER   /**< When passed for 'timeout_cycles', spin forever if necessary. [refactor-todo] check if this is still required */
-#define portMUX_TRY_LOCK                    SPINLOCK_NO_WAIT        /**< Try to acquire the spinlock a single time only. [refactor-todo] check if this is still required */
-#define portMUX_INITIALIZE(mux)    ({ \
-    (mux)->owner = portMUX_FREE_VAL; \
-    (mux)->count = 0; \
-})
+#define portMUX_INITIALIZER_UNLOCKED        SPINLOCK_INITIALIZER        /**< Spinlock initializer */
+#define portMUX_FREE_VAL                    SPINLOCK_FREE               /**< Spinlock is free. [refactor-todo] check if this is still required */
+#define portMUX_NO_TIMEOUT                  SPINLOCK_WAIT_FOREVER       /**< When passed for 'timeout_cycles', spin forever if necessary. [refactor-todo] check if this is still required */
+#define portMUX_TRY_LOCK                    SPINLOCK_NO_WAIT            /**< Try to acquire the spinlock a single time only. [refactor-todo] check if this is still required */
+#define portMUX_INITIALIZE(mux)             spinlock_initialize(mux)    /*< Initialize a spinlock to its unlocked state */
 
 
 // ------------------ Critical Sections --------------------
 // ------------------ Critical Sections --------------------
 
 
+/*
+This RISC-V port provides two kinds of critical section APIs, viz., one those take a spinlock argument and one those do
+not -
+
+These sets of APIs are -
+1. vPortEnterCritical(void) and vPortExitCritical(void)
+2. vPortEnterCriticalMultiCore(portMUX_TYPE *mux) and vPortExitCriticalMultiCore(portMUX_TYPE *MUX)
+
+This is primarily done to be compatible with some IDF examples such as esp_zigbee_gateway which have a reference
+to vPortEnterCritical(void) and vPortExitCritical(void) from precompiled libraries (.a).
+TODO: IDF-8089
+*/
+
 /**
 /**
  * @brief Enter a critical section
  * @brief Enter a critical section
  *
  *
  * - Simply disable interrupts
  * - Simply disable interrupts
  * - Can be nested
  * - Can be nested
  */
  */
-//TODO: IDF-7566
-#if !CONFIG_IDF_TARGET_ESP32P4
 void vPortEnterCritical(void);
 void vPortEnterCritical(void);
-#else
-void vPortEnterCritical(portMUX_TYPE *mux);
-#endif
 
 
 /**
 /**
  * @brief Exit a critical section
  * @brief Exit a critical section
@@ -219,12 +217,121 @@ void vPortEnterCritical(portMUX_TYPE *mux);
  * - Reenables interrupts
  * - Reenables interrupts
  * - Can be nested
  * - Can be nested
  */
  */
-//TODO: IDF-7566
-#if !CONFIG_IDF_TARGET_ESP32P4
 void vPortExitCritical(void);
 void vPortExitCritical(void);
-#else
-void vPortExitCritical(portMUX_TYPE *mux);
-#endif
+
+#if (configNUM_CORES > 1)
+/**
+ * @brief Enter an SMP critical section with a timeout
+ *
+ * This function enters an SMP critical section by disabling interrupts then
+ * taking a spinlock with a specified timeout.
+ *
+ * This function can be called in a nested manner.
+ *
+ * @note This function is made non-inline on purpose to reduce code size
+ * @param mux Spinlock
+ * @param timeout Timeout to wait for spinlock in number of CPU cycles.
+ *                Use portMUX_NO_TIMEOUT to wait indefinitely
+ *                Use portMUX_TRY_LOCK to only getting the spinlock a single time
+ * @retval pdPASS Critical section entered (spinlock taken)
+ * @retval pdFAIL If timed out waiting for spinlock (will not occur if using portMUX_NO_TIMEOUT)
+ */
+BaseType_t xPortEnterCriticalTimeout(portMUX_TYPE *mux, BaseType_t timeout);
+
+/**
+ * @brief Enter an SMP critical section
+ *
+ * This function enters an SMP critical section by disabling interrupts then
+ * taking a spinlock with an unlimited timeout.
+ *
+ * This function can be called in a nested manner
+ *
+ * @param[in] mux Spinlock
+ */
+static inline void __attribute__((always_inline)) vPortEnterCriticalMultiCore(portMUX_TYPE *mux);
+
+/**
+ * @brief Exit an SMP critical section
+ *
+ * This function can be called in a nested manner. On the outer most level of nesting, this function will:
+ *
+ * - Release the spinlock
+ * - Restore the previous interrupt level before the critical section was entered
+ *
+ * If still nesting, this function simply decrements a critical nesting count
+ *
+ * @note This function is made non-inline on purpose to reduce code size
+ * @param[in] mux Spinlock
+ */
+void vPortExitCriticalMultiCore(portMUX_TYPE *mux);
+
+/**
+ * @brief FreeRTOS Compliant version of xPortEnterCriticalTimeout()
+ *
+ * Compliant version of xPortEnterCriticalTimeout() will ensure that this is
+ * called from a task context only. An abort is called otherwise.
+ *
+ * @note This function is made non-inline on purpose to reduce code size
+ *
+ * @param mux Spinlock
+ * @param timeout Timeout
+ * @return BaseType_t
+ */
+BaseType_t xPortEnterCriticalTimeoutCompliance(portMUX_TYPE *mux, BaseType_t timeout);
+
+/**
+ * @brief FreeRTOS compliant version of vPortEnterCritical()
+ *
+ * Compliant version of vPortEnterCritical() will ensure that this is
+ * called from a task context only. An abort is called otherwise.
+ *
+ * @param[in] mux Spinlock
+ */
+static inline void __attribute__((always_inline)) vPortEnterCriticalCompliance(portMUX_TYPE *mux);
+
+/**
+ * @brief FreeRTOS compliant version of vPortExitCritical()
+ *
+ * Compliant version of vPortExitCritical() will ensure that this is
+ * called from a task context only. An abort is called otherwise.
+ *
+ * @note This function is made non-inline on purpose to reduce code size
+ * @param[in] mux Spinlock
+ */
+void vPortExitCriticalCompliance(portMUX_TYPE *mux);
+
+/**
+ * @brief Safe version of enter critical timeout
+ *
+ * Safe version of enter critical will automatically select between
+ * portTRY_ENTER_CRITICAL() and portTRY_ENTER_CRITICAL_ISR()
+ *
+ * @param mux Spinlock
+ * @param timeout Timeout
+ * @return BaseType_t
+ */
+static inline BaseType_t __attribute__((always_inline)) xPortEnterCriticalTimeoutSafe(portMUX_TYPE *mux, BaseType_t timeout);
+
+/**
+ * @brief Safe version of enter critical
+ *
+ * Safe version of enter critical will automatically select between
+ * portENTER_CRITICAL() and portENTER_CRITICAL_ISR()
+ *
+ * @param[in] mux Spinlock
+ */
+static inline void __attribute__((always_inline)) vPortEnterCriticalSafe(portMUX_TYPE *mux);
+
+/**
+ * @brief Safe version of exit critical
+ *
+ * Safe version of enter critical will automatically select between
+ * portEXIT_CRITICAL() and portEXIT_CRITICAL_ISR()
+ *
+ * @param[in] mux Spinlock
+ */
+static inline void __attribute__((always_inline)) vPortExitCriticalSafe(portMUX_TYPE *mux);
+#endif /* (configNUM_CORES > 1) */
 
 
 // ---------------------- Yielding -------------------------
 // ---------------------- Yielding -------------------------
 
 
@@ -332,13 +439,50 @@ FORCE_INLINE_ATTR BaseType_t xPortGetCoreID(void)
 
 
 #define portDISABLE_INTERRUPTS()            portSET_INTERRUPT_MASK_FROM_ISR()
 #define portDISABLE_INTERRUPTS()            portSET_INTERRUPT_MASK_FROM_ISR()
 #define portENABLE_INTERRUPTS()             portCLEAR_INTERRUPT_MASK_FROM_ISR(1)
 #define portENABLE_INTERRUPTS()             portCLEAR_INTERRUPT_MASK_FROM_ISR(1)
-#define portSET_INTERRUPT_MASK_FROM_ISR()                       vPortSetInterruptMask()
-#define portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedStatusValue)   vPortClearInterruptMask(uxSavedStatusValue)
+
+/**
+ * ISR versions to enable/disable interrupts
+ */
+#define portSET_INTERRUPT_MASK_FROM_ISR()                   xPortSetInterruptMaskFromISR()
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR(prev_level)       vPortClearInterruptMaskFromISR(prev_level)
+
+/**
+ * @brief Used by FreeRTOS functions to call the correct version of critical section API
+ */
+#if ( configNUM_CORES > 1 )
+#define portCHECK_IF_IN_ISR()   xPortInIsrContext()
+#endif
 
 
 // ------------------ Critical Sections --------------------
 // ------------------ Critical Sections --------------------
 
 
-//TODO: IDF-7566
-#if !CONFIG_IDF_TARGET_ESP32P4
+#if (configNUM_CORES > 1)
+/**
+ * @brief FreeRTOS critical section macros
+ *
+ * - Added a spinlock argument for SMP
+ * - Can be nested
+ * - Compliance versions will assert if regular critical section API is used in ISR context
+ * - Safe versions can be called from either contexts
+ */
+#ifdef CONFIG_FREERTOS_CHECK_PORT_CRITICAL_COMPLIANCE
+#define portTRY_ENTER_CRITICAL(mux, timeout)        xPortEnterCriticalTimeoutCompliance(mux, timeout)
+#define portENTER_CRITICAL(mux)                     vPortEnterCriticalCompliance(mux)
+#define portEXIT_CRITICAL(mux)                      vPortExitCriticalCompliance(mux)
+#else
+#define portTRY_ENTER_CRITICAL(mux, timeout)        xPortEnterCriticalTimeout(mux, timeout)
+#define portENTER_CRITICAL(mux)                     vPortEnterCriticalMultiCore(mux)
+#define portEXIT_CRITICAL(mux)                      vPortExitCriticalMultiCore(mux)
+#endif /* CONFIG_FREERTOS_CHECK_PORT_CRITICAL_COMPLIANCE */
+
+#define portTRY_ENTER_CRITICAL_ISR(mux, timeout)    xPortEnterCriticalTimeout(mux, timeout)
+#define portENTER_CRITICAL_ISR(mux)                 vPortEnterCriticalMultiCore(mux)
+#define portEXIT_CRITICAL_ISR(mux)                  vPortExitCriticalMultiCore(mux)
+
+#define portTRY_ENTER_CRITICAL_SAFE(mux, timeout)   xPortEnterCriticalTimeoutSafe(mux)
+#define portENTER_CRITICAL_SAFE(mux)                vPortEnterCriticalSafe(mux)
+#define portEXIT_CRITICAL_SAFE(mux)                 vPortExitCriticalSafe(mux)
+#else
+/* Single-core variants of the critical section macros */
 #define portENTER_CRITICAL(mux)                 {(void)mux;  vPortEnterCritical();}
 #define portENTER_CRITICAL(mux)                 {(void)mux;  vPortEnterCritical();}
 #define portEXIT_CRITICAL(mux)                  {(void)mux;  vPortExitCritical();}
 #define portEXIT_CRITICAL(mux)                  {(void)mux;  vPortExitCritical();}
 #define portTRY_ENTER_CRITICAL(mux, timeout)    ({  \
 #define portTRY_ENTER_CRITICAL(mux, timeout)    ({  \
@@ -347,16 +491,6 @@ FORCE_INLINE_ATTR BaseType_t xPortGetCoreID(void)
     BaseType_t ret = pdPASS;                        \
     BaseType_t ret = pdPASS;                        \
     ret;                                            \
     ret;                                            \
 })
 })
-#else
-#define portENTER_CRITICAL(mux)                 {vPortEnterCritical(mux);}
-#define portEXIT_CRITICAL(mux)                  {vPortExitCritical(mux);}
-#define portTRY_ENTER_CRITICAL(mux, timeout)    ({  \
-    (void)timeout;                                  \
-    vPortEnterCritical(mux);                        \
-    BaseType_t ret = pdPASS;                        \
-    ret;                                            \
-})
-#endif
 
 
 //In single-core RISC-V, we can use the same critical section API
 //In single-core RISC-V, we can use the same critical section API
 #define portENTER_CRITICAL_ISR(mux)                 portENTER_CRITICAL(mux)
 #define portENTER_CRITICAL_ISR(mux)                 portENTER_CRITICAL(mux)
@@ -380,10 +514,8 @@ FORCE_INLINE_ATTR BaseType_t xPortGetCoreID(void)
 })
 })
 #define portTRY_ENTER_CRITICAL_SAFE(mux, timeout)   portENTER_CRITICAL_SAFE(mux, timeout)
 #define portTRY_ENTER_CRITICAL_SAFE(mux, timeout)   portENTER_CRITICAL_SAFE(mux, timeout)
 
 
-//TODO: IDF-7566
-#if CONFIG_IDF_TARGET_ESP32P4
-#define portCHECK_IF_IN_ISR()   xPortInIsrContext()
-#endif
+#endif /* (configNUM_CORES > 1) */
+
 // ---------------------- Yielding -------------------------
 // ---------------------- Yielding -------------------------
 
 
 #define portYIELD() vPortYield()
 #define portYIELD() vPortYield()
@@ -458,17 +590,53 @@ extern void vPortCleanUpTCB ( void *pxTCB );
 
 
 // --------------------- Interrupts ------------------------
 // --------------------- Interrupts ------------------------
 
 
+// ------------------ Critical Sections --------------------
+
+#if (configNUM_CORES > 1)
+static inline void __attribute__((always_inline)) vPortEnterCriticalMultiCore(portMUX_TYPE *mux)
+{
+    xPortEnterCriticalTimeout(mux, portMUX_NO_TIMEOUT);
+}
+
+static inline void __attribute__((always_inline)) vPortEnterCriticalCompliance(portMUX_TYPE *mux)
+{
+    xPortEnterCriticalTimeoutCompliance(mux, portMUX_NO_TIMEOUT);
+}
+
+static inline BaseType_t __attribute__((always_inline)) xPortEnterCriticalTimeoutSafe(portMUX_TYPE *mux, BaseType_t timeout)
+{
+    BaseType_t ret;
+    if (xPortInIsrContext()) {
+        ret = portTRY_ENTER_CRITICAL_ISR(mux, timeout);
+    } else {
+        ret = portTRY_ENTER_CRITICAL(mux, timeout);
+    }
+    return ret;
+}
+
+static inline void __attribute__((always_inline)) vPortEnterCriticalSafe(portMUX_TYPE *mux)
+{
+    xPortEnterCriticalTimeoutSafe(mux, portMUX_NO_TIMEOUT);
+}
+
+static inline void __attribute__((always_inline)) vPortExitCriticalSafe(portMUX_TYPE *mux)
+{
+    if (xPortInIsrContext()) {
+        portEXIT_CRITICAL_ISR(mux);
+    } else {
+        portEXIT_CRITICAL(mux);
+    }
+}
+#endif /* (configNUM_CORES > 1) */
+
 // ---------------------- Yielding -------------------------
 // ---------------------- Yielding -------------------------
 
 
 FORCE_INLINE_ATTR bool xPortCanYield(void)
 FORCE_INLINE_ATTR bool xPortCanYield(void)
 {
 {
-//TODO: IDF-7566
-#if SOC_INT_CLIC_SUPPORTED
-    uint32_t threshold = REG_READ(INTERRUPT_CORE0_CPU_INT_THRESH_REG + 0x10000 * xPortGetCoreID());
-    threshold = threshold >> (24 + (8 - NLBITS));
-#else
     uint32_t threshold = REG_READ(INTERRUPT_CORE0_CPU_INT_THRESH_REG);
     uint32_t threshold = REG_READ(INTERRUPT_CORE0_CPU_INT_THRESH_REG);
-#endif
+#if SOC_INT_CLIC_SUPPORTED
+    threshold = threshold >> (CLIC_CPU_INT_THRESH_S + (8 - NLBITS));
+#endif /* SOC_INT_CLIC_SUPPORTED */
     /* when enter critical code, FreeRTOS will mask threshold to RVHAL_EXCM_LEVEL
     /* when enter critical code, FreeRTOS will mask threshold to RVHAL_EXCM_LEVEL
      * and exit critical code, will recover threshold value (1). so threshold <= 1
      * and exit critical code, will recover threshold value (1). so threshold <= 1
      * means not in critical code
      * means not in critical code
@@ -513,8 +681,8 @@ bool xPortcheckValidStackMem(const void *ptr);
 // --------------------- App-Trace -------------------------
 // --------------------- App-Trace -------------------------
 
 
 #if CONFIG_APPTRACE_SV_ENABLE
 #if CONFIG_APPTRACE_SV_ENABLE
-extern int xPortSwitchFlag;
-#define os_task_switch_is_pended(_cpu_) (xPortSwitchFlag)
+extern volatile UBaseType_t xPortSwitchFlag[portNUM_PROCESSORS];
+#define os_task_switch_is_pended(_cpu_) (xPortSwitchFlag[_cpu_])
 #else
 #else
 #define os_task_switch_is_pended(_cpu_) (false)
 #define os_task_switch_is_pended(_cpu_) (false)
 #endif
 #endif

+ 168 - 167
components/freertos/FreeRTOS-Kernel/portable/riscv/port.c

@@ -57,7 +57,6 @@
 #include "port_systick.h"
 #include "port_systick.h"
 #include "esp_memory_utils.h"
 #include "esp_memory_utils.h"
 #if CONFIG_IDF_TARGET_ESP32P4
 #if CONFIG_IDF_TARGET_ESP32P4
-//TODO: IDF-7566
 #include "soc/hp_system_reg.h"
 #include "soc/hp_system_reg.h"
 #endif
 #endif
 
 
@@ -78,42 +77,20 @@ _Static_assert(offsetof( StaticTask_t, pxDummy8 ) == PORT_OFFSET_PX_END_OF_STACK
  *
  *
  * ------------------------------------------------------------------------------------------------------------------ */
  * ------------------------------------------------------------------------------------------------------------------ */
 
 
-//TODO: IDF-7566
-#if !CONFIG_IDF_TARGET_ESP32P4
-/**
- * @brief A variable is used to keep track of the critical section nesting.
- * @note This variable has to be stored as part of the task context and must be initialized to a non zero value
- *       to ensure interrupts don't inadvertently become unmasked before the scheduler starts.
- *       As it is stored as part of the task context it will automatically be set to 0 when the first task is started.
- */
-static UBaseType_t uxCriticalNesting = 0;
-static UBaseType_t uxSavedInterruptState = 0;
-BaseType_t uxSchedulerRunning = 0;  // Duplicate of xSchedulerRunning, accessible to port files
-UBaseType_t uxInterruptNesting = 0;
-BaseType_t xPortSwitchFlag = 0;
-__attribute__((aligned(16))) StackType_t xIsrStack[configISR_STACK_SIZE];
-StackType_t *xIsrStackTop = &xIsrStack[0] + (configISR_STACK_SIZE & (~((portPOINTER_SIZE_TYPE)portBYTE_ALIGNMENT_MASK)));
-
-#else
-/* uxCriticalNesting will be increased by 1 each time one processor is entering a critical section
- * and will be decreased by 1 each time one processor is exiting a critical section
- */
-volatile UBaseType_t uxCriticalNesting[portNUM_PROCESSORS] = {0};
-volatile UBaseType_t uxSavedInterruptState[portNUM_PROCESSORS] = {0};
-volatile BaseType_t uxSchedulerRunning[portNUM_PROCESSORS] = {0};
-volatile UBaseType_t uxInterruptNesting[portNUM_PROCESSORS] = {0};
-volatile BaseType_t xPortSwitchFlag[portNUM_PROCESSORS] = {0};
-/* core0 interrupt stack space */
-__attribute__((aligned(16))) static StackType_t xIsrStack[configISR_STACK_SIZE];
-/* core1 interrupt stack space */
-__attribute__((aligned(16))) static StackType_t xIsrStack1[configISR_STACK_SIZE];
-/* core0 interrupt stack top, passed to sp */
-StackType_t *xIsrStackTop = &xIsrStack[0] + (configISR_STACK_SIZE & (~((portPOINTER_SIZE_TYPE)portBYTE_ALIGNMENT_MASK)));
-/* core1 interrupt stack top, passed to sp */
-StackType_t *xIsrStackTop1 = &xIsrStack1[0] + (configISR_STACK_SIZE & (~((portPOINTER_SIZE_TYPE)portBYTE_ALIGNMENT_MASK)));
-#endif
-
+volatile UBaseType_t port_xSchedulerRunning[portNUM_PROCESSORS] = {0}; // Indicates whether scheduler is running on a per-core basis
+volatile UBaseType_t port_uxInterruptNesting[portNUM_PROCESSORS] = {0};  // Interrupt nesting level. Increased/decreased in portasm.c
+volatile UBaseType_t port_uxCriticalNesting[portNUM_PROCESSORS] = {0};
+volatile UBaseType_t port_uxOldInterruptState[portNUM_PROCESSORS] = {0};
+volatile UBaseType_t xPortSwitchFlag[portNUM_PROCESSORS] = {0};
 
 
+/*
+*******************************************************************************
+* Interrupt stack. The size of the interrupt stack is determined by the config
+* parameter "configISR_STACK_SIZE" in FreeRTOSConfig.h
+*******************************************************************************
+*/
+__attribute__((aligned(16))) StackType_t xIsrStack[portNUM_PROCESSORS][configISR_STACK_SIZE];
+StackType_t *xIsrStackTop[portNUM_PROCESSORS] = {0};
 
 
 /* ------------------------------------------------ FreeRTOS Portable --------------------------------------------------
 /* ------------------------------------------------ FreeRTOS Portable --------------------------------------------------
  * - Provides implementation for functions required by FreeRTOS
  * - Provides implementation for functions required by FreeRTOS
@@ -122,30 +99,33 @@ StackType_t *xIsrStackTop1 = &xIsrStack1[0] + (configISR_STACK_SIZE & (~((portPO
 
 
 // ----------------- Scheduler Start/End -------------------
 // ----------------- Scheduler Start/End -------------------
 
 
-//TODO: IDF-7566
 BaseType_t xPortStartScheduler(void)
 BaseType_t xPortStartScheduler(void)
 {
 {
-#if !CONFIG_IDF_TARGET_ESP32P4
-    uxInterruptNesting = 0;
-    uxCriticalNesting = 0;
-    uxSchedulerRunning = 0;
-#else
+    /* Initialize all kernel state tracking variables */
     BaseType_t coreID = xPortGetCoreID();
     BaseType_t coreID = xPortGetCoreID();
-    uxInterruptNesting[coreID] = 0;
-    uxCriticalNesting[coreID] = 0;
-    uxSchedulerRunning[coreID] = 0;
-#endif
+    port_uxInterruptNesting[coreID] = 0;
+    port_uxCriticalNesting[coreID] = 0;
+    port_xSchedulerRunning[coreID] = 0;
+
+    /* Initialize ISR Stack top */
+    for (int i = 0; i < portNUM_PROCESSORS; i++) {
+        xIsrStackTop[i] = &xIsrStack[i][0] + (configISR_STACK_SIZE & (~((portPOINTER_SIZE_TYPE)portBYTE_ALIGNMENT_MASK)));
+    }
 
 
     /* Setup the hardware to generate the tick. */
     /* Setup the hardware to generate the tick. */
     vPortSetupTimer();
     vPortSetupTimer();
 
 
+#if !SOC_INT_CLIC_SUPPORTED
     esprv_intc_int_set_threshold(1); /* set global INTC masking level */
     esprv_intc_int_set_threshold(1); /* set global INTC masking level */
+#else
+    esprv_intc_int_set_threshold(0); /* set global CLIC masking level. When CLIC is supported, all interrupt priority levels less than or equal to the threshold level are masked. */
+#endif /* !SOC_INT_CLIC_SUPPORTED */
     rv_utils_intr_global_enable();
     rv_utils_intr_global_enable();
 
 
     vPortYield();
     vPortYield();
 
 
-    /*Should not get here*/
-    return pdFALSE;
+    /* Should not get here */
+    return pdTRUE;
 }
 }
 
 
 void vPortEndScheduler(void)
 void vPortEndScheduler(void)
@@ -272,7 +252,7 @@ FORCE_INLINE_ATTR UBaseType_t uxInitialiseStackFrame(UBaseType_t uxStackPointer,
     /*
     /*
     Allocate space for the task's starting interrupt stack frame.
     Allocate space for the task's starting interrupt stack frame.
     - The stack frame must be allocated to a 16-byte aligned address.
     - The stack frame must be allocated to a 16-byte aligned address.
-    - We use XT_STK_FRMSZ (instead of sizeof(XtExcFrame)) as it rounds up the total size to a multiple of 16.
+    - We use RV_STK_FRMSZ as it rounds up the total size to a multiple of 16.
     */
     */
     uxStackPointer = STACKPTR_ALIGN_DOWN(16, uxStackPointer - RV_STK_FRMSZ);
     uxStackPointer = STACKPTR_ALIGN_DOWN(16, uxStackPointer - RV_STK_FRMSZ);
 
 
@@ -323,6 +303,8 @@ StackType_t *pxPortInitialiseStack(StackType_t *pxTopOfStack, TaskFunction_t pxC
     UBaseType_t uxStackPointer = (UBaseType_t)pxTopOfStack;
     UBaseType_t uxStackPointer = (UBaseType_t)pxTopOfStack;
     configASSERT((uxStackPointer & portBYTE_ALIGNMENT_MASK) == 0);
     configASSERT((uxStackPointer & portBYTE_ALIGNMENT_MASK) == 0);
 
 
+    // IDF-7770: Support FPU context save area for P4
+
     // Initialize GCC TLS area
     // Initialize GCC TLS area
     uint32_t threadptr_reg_init;
     uint32_t threadptr_reg_init;
     uxStackPointer = uxInitialiseStackTLS(uxStackPointer, &threadptr_reg_init);
     uxStackPointer = uxInitialiseStackTLS(uxStackPointer, &threadptr_reg_init);
@@ -334,7 +316,6 @@ StackType_t *pxPortInitialiseStack(StackType_t *pxTopOfStack, TaskFunction_t pxC
 
 
     // Return the task's current stack pointer address which should point to the starting interrupt stack frame
     // Return the task's current stack pointer address which should point to the starting interrupt stack frame
     return (StackType_t *)uxStackPointer;
     return (StackType_t *)uxStackPointer;
-    //TODO: IDF-2393
 }
 }
 
 
 
 
@@ -345,112 +326,47 @@ StackType_t *pxPortInitialiseStack(StackType_t *pxTopOfStack, TaskFunction_t pxC
 
 
 // --------------------- Interrupts ------------------------
 // --------------------- Interrupts ------------------------
 
 
-//TODO: IDF-7566
 BaseType_t xPortInIsrContext(void)
 BaseType_t xPortInIsrContext(void)
 {
 {
-#if !CONFIG_IDF_TARGET_ESP32P4
-    return uxInterruptNesting;
-#else
-    BaseType_t coreID = xPortGetCoreID();
-    return uxInterruptNesting[coreID];
-#endif
-}
-
-BaseType_t IRAM_ATTR xPortInterruptedFromISRContext(void)
-{
-    /* For single core, this can be the same as xPortInIsrContext() because reading it is atomic */
-#if !CONFIG_IDF_TARGET_ESP32P4
-    return uxInterruptNesting;
-#else
-    BaseType_t coreID = xPortGetCoreID();
-    return uxInterruptNesting[coreID];
-#endif
-}
-
-// ---------------------- Spinlocks ------------------------
+#if (configNUM_CORES > 1)
+    unsigned int irqStatus;
+    BaseType_t ret;
 
 
+    /* Disable interrupts to fetch the coreID atomically */
+    irqStatus = portSET_INTERRUPT_MASK_FROM_ISR();
 
 
+    /* Return the interrupt nexting counter for this core */
+    ret = port_uxInterruptNesting[xPortGetCoreID()];
 
 
-// ------------------ Critical Sections --------------------
-
-//TODO: IDF-7566
-#if !CONFIG_IDF_TARGET_ESP32P4
-void vPortEnterCritical(void)
-{
-    BaseType_t state = portSET_INTERRUPT_MASK_FROM_ISR();
-    uxCriticalNesting++;
-
-    if (uxCriticalNesting == 1) {
-        uxSavedInterruptState = state;
-    }
-}
-
-void vPortExitCritical(void)
-{
-    if (uxCriticalNesting > 0) {
-        uxCriticalNesting--;
-        if (uxCriticalNesting == 0) {
-            portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptState);
-        }
-    }
-}
+    /* Restore interrupts */
+    portCLEAR_INTERRUPT_MASK_FROM_ISR(irqStatus);
 
 
+    return ret;
 #else
 #else
-void vPortEnterCritical(portMUX_TYPE *mux)
-{
-    BaseType_t coreID = xPortGetCoreID();
-    BaseType_t state = portSET_INTERRUPT_MASK_FROM_ISR();
-
-    spinlock_acquire((spinlock_t *)mux, SPINLOCK_WAIT_FOREVER);
-    uxCriticalNesting[coreID]++;
-
-    if (uxCriticalNesting[coreID] == 1) {
-        uxSavedInterruptState[coreID] = state;
-    }
+    /* Optimize the call for single-core targets */
+    return port_uxInterruptNesting[0];
+#endif /* (configNUM_CORES > 1) */
 }
 }
 
 
-void vPortExitCritical(portMUX_TYPE *mux)
+BaseType_t IRAM_ATTR xPortInterruptedFromISRContext(void)
 {
 {
-    spinlock_release((spinlock_t *)mux);
-
-    BaseType_t coreID = xPortGetCoreID();
-    if (uxCriticalNesting[coreID] > 0) {
-        uxCriticalNesting[coreID]--;
-        if (uxCriticalNesting[coreID] == 0) {
-            portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptState[coreID]);
-        }
-    }
+    /* Return the interrupt nexting counter for this core */
+    return port_uxInterruptNesting[xPortGetCoreID()];
 }
 }
-#endif
-
-// ---------------------- Yielding -------------------------
 
 
-//TODO: IDF-7566
-int vPortSetInterruptMask(void)
+UBaseType_t xPortSetInterruptMaskFromISR(void)
 {
 {
-    int ret;
-    unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
-    ret = REG_READ(INTERRUPT_CORE0_CPU_INT_THRESH_REG);
+    UBaseType_t prev_int_level = 0;
 
 
-#if !CONFIG_IDF_TARGET_ESP32P4
+#if !SOC_INT_CLIC_SUPPORTED
+    unsigned old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
+    prev_int_level = REG_READ(INTERRUPT_CORE0_CPU_INT_THRESH_REG);
     REG_WRITE(INTERRUPT_CORE0_CPU_INT_THRESH_REG, RVHAL_EXCM_LEVEL);
     REG_WRITE(INTERRUPT_CORE0_CPU_INT_THRESH_REG, RVHAL_EXCM_LEVEL);
     RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
     RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
 #else
 #else
-    #define RVHAL_EXCM_THRESHOLD_VALUE   (((RVHAL_EXCM_LEVEL << (8 - NLBITS)) | 0x1f) << CLIC_CPU_INT_THRESH_S)
-
-    REG_WRITE(INTERRUPT_CORE0_CPU_INT_THRESH_REG, RVHAL_EXCM_THRESHOLD_VALUE);
-    /**
-     * TODO: IDF-7898
-     * Here is an issue that,
-     * 1. Set the CLIC_INT_THRESH_REG to mask off interrupts whose level is lower than `intlevel`.
-     * 2. Set MSTATUS_MIE (global interrupt), then program may jump to interrupt vector.
-     * 3. The register value change in Step 1 may happen during Step 2.
-     *
-     * To prevent this, here a fence is used
-     */
-    rv_utils_memory_barrier();
-    RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
-#endif
+    /* When CLIC is supported, all interrupt priority levels less than or equal to the threshold level are masked. */
+    prev_int_level = rv_utils_set_intlevel(RVHAL_EXCM_LEVEL - 1);
+#endif /* !SOC_INIT_CLIC_SUPPORTED */
     /**
     /**
      * In theory, this function should not return immediately as there is a
      * In theory, this function should not return immediately as there is a
      * delay between the moment we mask the interrupt threshold register and
      * delay between the moment we mask the interrupt threshold register and
@@ -462,12 +378,16 @@ int vPortSetInterruptMask(void)
      * followed by two instructions: `ret` and `csrrs` (RV_SET_CSR).
      * followed by two instructions: `ret` and `csrrs` (RV_SET_CSR).
      * That's why we don't need any additional nop instructions here.
      * That's why we don't need any additional nop instructions here.
      */
      */
-    return ret;
+    return prev_int_level;
 }
 }
 
 
-void vPortClearInterruptMask(int mask)
+void vPortClearInterruptMaskFromISR(UBaseType_t prev_int_level)
 {
 {
-    REG_WRITE(INTERRUPT_CORE0_CPU_INT_THRESH_REG, mask);
+#if !SOC_INT_CLIC_SUPPORTED
+    REG_WRITE(INTERRUPT_CORE0_CPU_INT_THRESH_REG, prev_int_level);
+#else
+    rv_utils_restore_intlevel(prev_int_level);
+#endif /* SOC_INIT_CLIC_SUPPORTED */
     /**
     /**
      * The delay between the moment we unmask the interrupt threshold register
      * The delay between the moment we unmask the interrupt threshold register
      * and the moment the potential requested interrupt is triggered is not
      * and the moment the potential requested interrupt is triggered is not
@@ -488,41 +408,123 @@ void vPortClearInterruptMask(int mask)
     asm volatile ( "nop" );
     asm volatile ( "nop" );
 }
 }
 
 
-//TODO: IDF-7566
-#if !CONFIG_IDF_TARGET_ESP32P4
-void vPortYield(void)
+// ------------------ Critical Sections --------------------
+
+#if (configNUM_CORES > 1)
+BaseType_t __attribute__((optimize("-O3"))) xPortEnterCriticalTimeout(portMUX_TYPE *mux, BaseType_t timeout)
 {
 {
-    if (uxInterruptNesting) {
-        vPortYieldFromISR();
+    /* Interrupts may already be disabled (if this function is called in nested
+     * manner). However, there's no atomic operation that will allow us to check,
+     * thus we have to disable interrupts again anyways.
+     *
+     * However, if this is call is NOT nested (i.e., the first call to enter a
+     * critical section), we will save the previous interrupt level so that the
+     * saved level can be restored on the last call to exit the critical.
+     */
+    BaseType_t xOldInterruptLevel = portSET_INTERRUPT_MASK_FROM_ISR();
+    if (!spinlock_acquire(mux, timeout)) {
+        //Timed out attempting to get spinlock. Restore previous interrupt level and return
+        portCLEAR_INTERRUPT_MASK_FROM_ISR(xOldInterruptLevel);
+        return pdFAIL;
+    }
+    //Spinlock acquired. Increment the critical nesting count.
+    BaseType_t coreID = xPortGetCoreID();
+    BaseType_t newNesting = port_uxCriticalNesting[coreID] + 1;
+    port_uxCriticalNesting[coreID] = newNesting;
+    //If this is the first entry to a critical section. Save the old interrupt level.
+    if ( newNesting == 1 ) {
+        port_uxOldInterruptState[coreID] = xOldInterruptLevel;
+    }
+    return pdPASS;
+}
+
+void __attribute__((optimize("-O3"))) vPortExitCriticalMultiCore(portMUX_TYPE *mux)
+{
+    /* This function may be called in a nested manner. Therefore, we only need
+     * to reenable interrupts if this is the last call to exit the critical. We
+     * can use the nesting count to determine whether this is the last exit call.
+     */
+    spinlock_release(mux);
+    BaseType_t coreID = xPortGetCoreID();
+    BaseType_t nesting = port_uxCriticalNesting[coreID];
+
+    if (nesting > 0) {
+        nesting--;
+        port_uxCriticalNesting[coreID] = nesting;
+        //This is the last exit call, restore the saved interrupt level
+        if ( nesting == 0 ) {
+            portCLEAR_INTERRUPT_MASK_FROM_ISR(port_uxOldInterruptState[coreID]);
+        }
+    }
+}
+
+
+BaseType_t xPortEnterCriticalTimeoutCompliance(portMUX_TYPE *mux, BaseType_t timeout)
+{
+    BaseType_t ret;
+    if (!xPortInIsrContext()) {
+        ret = xPortEnterCriticalTimeout(mux, timeout);
+    } else {
+        esp_rom_printf("port*_CRITICAL called from ISR context. Aborting!\n");
+        abort();
+        ret = pdFAIL;
+    }
+    return ret;
+}
+
+void vPortExitCriticalCompliance(portMUX_TYPE *mux)
+{
+    if (!xPortInIsrContext()) {
+        vPortExitCriticalMultiCore(mux);
     } else {
     } else {
+        esp_rom_printf("port*_CRITICAL called from ISR context. Aborting!\n");
+        abort();
+    }
+}
+#endif /* (configNUM_CORES > 1) */
 
 
-        esp_crosscore_int_send_yield(0);
-        /* There are 3-4 instructions of latency between triggering the software
-           interrupt and the CPU interrupt happening. Make sure it happened before
-           we return, otherwise vTaskDelay() may return and execute 1-2
-           instructions before the delay actually happens.
+void vPortEnterCritical(void)
+{
+#if (configNUM_CORES > 1)
+        esp_rom_printf("vPortEnterCritical(void) is not supported on single-core targets. Please use vPortEnterCriticalMultiCore(portMUX_TYPE *mux) instead.\n");
+        abort();
+#endif /* (configNUM_CORES > 1) */
+    BaseType_t state = portSET_INTERRUPT_MASK_FROM_ISR();
+    port_uxCriticalNesting[0]++;
 
 
-           (We could use the WFI instruction here, but there is a chance that
-           the interrupt will happen while evaluating the other two conditions
-           for an instant yield, and if that happens then the WFI would be
-           waiting for the next interrupt to occur...)
-        */
-        while (uxSchedulerRunning && uxCriticalNesting == 0 && REG_READ(SYSTEM_CPU_INTR_FROM_CPU_0_REG) != 0) {}
+    if (port_uxCriticalNesting[0] == 1) {
+        port_uxOldInterruptState[0] = state;
     }
     }
 }
 }
 
 
-void vPortYieldFromISR( void )
+void vPortExitCritical(void)
 {
 {
-    traceISR_EXIT_TO_SCHEDULER();
-    uxSchedulerRunning = 1;
-    xPortSwitchFlag = 1;
+#if (configNUM_CORES > 1)
+        esp_rom_printf("vPortExitCritical(void) is not supported on single-core targets. Please use vPortExitCriticalMultiCore(portMUX_TYPE *mux) instead.\n");
+        abort();
+#endif /* (configNUM_CORES > 1) */
+    if (port_uxCriticalNesting[0] > 0) {
+        port_uxCriticalNesting[0]--;
+        if (port_uxCriticalNesting[0] == 0) {
+            portCLEAR_INTERRUPT_MASK_FROM_ISR(port_uxOldInterruptState[0]);
+        }
+    }
 }
 }
 
 
-#else
+// ---------------------- Yielding -------------------------
+
 void vPortYield(void)
 void vPortYield(void)
 {
 {
     BaseType_t coreID = xPortGetCoreID();
     BaseType_t coreID = xPortGetCoreID();
-    if (uxInterruptNesting[coreID]) {
+    int system_cpu_int_reg;
+
+#if !CONFIG_IDF_TARGET_ESP32P4
+    system_cpu_int_reg = SYSTEM_CPU_INTR_FROM_CPU_0_REG;
+#else
+    system_cpu_int_reg = HP_SYSTEM_CPU_INT_FROM_CPU_0_REG;
+#endif /* !CONFIG_IDF_TARGET_ESP32P4 */
+
+    if (port_uxInterruptNesting[coreID]) {
         vPortYieldFromISR();
         vPortYieldFromISR();
     } else {
     } else {
         esp_crosscore_int_send_yield(coreID);
         esp_crosscore_int_send_yield(coreID);
@@ -536,7 +538,7 @@ void vPortYield(void)
            for an instant yield, and if that happens then the WFI would be
            for an instant yield, and if that happens then the WFI would be
            waiting for the next interrupt to occur...)
            waiting for the next interrupt to occur...)
         */
         */
-        while (uxSchedulerRunning[coreID] && uxCriticalNesting[coreID] == 0 && REG_READ(HP_SYSTEM_CPU_INT_FROM_CPU_0_REG + 4*coreID) != 0) {}
+        while (port_xSchedulerRunning[coreID] && port_uxCriticalNesting[coreID] == 0 && REG_READ(system_cpu_int_reg + 4 * coreID) != 0) {}
     }
     }
 }
 }
 
 
@@ -544,10 +546,9 @@ void vPortYieldFromISR( void )
 {
 {
     traceISR_EXIT_TO_SCHEDULER();
     traceISR_EXIT_TO_SCHEDULER();
     BaseType_t coreID = xPortGetCoreID();
     BaseType_t coreID = xPortGetCoreID();
-    uxSchedulerRunning[coreID] = 1;
+    port_xSchedulerRunning[coreID] = 1;
     xPortSwitchFlag[coreID] = 1;
     xPortSwitchFlag[coreID] = 1;
 }
 }
-#endif
 
 
 void vPortYieldOtherCore(BaseType_t coreid)
 void vPortYieldOtherCore(BaseType_t coreid)
 {
 {

+ 112 - 120
components/freertos/FreeRTOS-Kernel/portable/riscv/portasm.S

@@ -5,17 +5,16 @@
  */
  */
 #include "sdkconfig.h"
 #include "sdkconfig.h"
 #include "portmacro.h"
 #include "portmacro.h"
+#include "freertos/FreeRTOSConfig.h"
+#include "soc/soc_caps.h"
+
 #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
 #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
 #include "esp_private/hw_stack_guard.h"
 #include "esp_private/hw_stack_guard.h"
 #endif
 #endif
 
 
-    .global uxInterruptNesting
-    .global uxSchedulerRunning
+    .global port_uxInterruptNesting
+    .global port_xSchedulerRunning
     .global xIsrStackTop
     .global xIsrStackTop
-#if CONFIG_IDF_TARGET_ESP32P4
-//TODO: IDF-7566
-    .global xIsrStackTop1
-#endif
     .global pxCurrentTCB
     .global pxCurrentTCB
     .global vTaskSwitchContext
     .global vTaskSwitchContext
     .global xPortSwitchFlag
     .global xPortSwitchFlag
@@ -31,8 +30,9 @@
     .section .text
     .section .text
 
 
 /**
 /**
- * This function makes the RTOS aware about a ISR entering, it takes the
- * current task stack saved, places into the TCB, loads the ISR stack.
+ * This function makes the RTOS aware about an ISR entering. It takes the
+ * current task stack pointer and places it into the pxCurrentTCB.
+ * It then loads the ISR stack into sp.
  * TODO: ISR nesting code improvements ?
  * TODO: ISR nesting code improvements ?
  */
  */
 
 
@@ -42,59 +42,53 @@ rtos_int_enter:
 #if CONFIG_IDF_TARGET_ESP32P4
 #if CONFIG_IDF_TARGET_ESP32P4
     //TODO: IDF-7861
     //TODO: IDF-7861
     /* preserve the return address */
     /* preserve the return address */
-    mv t1, ra
-    mv t2, a0
+    mv      t1, ra
+    mv      t2, a0
 #endif
 #endif
 
 
-//TODO: IDF-7566
-#if !CONFIG_IDF_TARGET_ESP32P4
-    /* scheduler not enabled, jump directly to ISR handler */
-    lw t0, uxSchedulerRunning
-    beq t0,zero, rtos_enter_end
+    /* If the scheduler is not enabled, jump directly to the ISR handler */
+#if ( configNUM_CORES > 1 )
+    csrr    t6, mhartid                     /* t6 = coreID */
+    slli    t6, t6, 2                       /* t6 = coreID * 4 */
+    la      t0, port_xSchedulerRunning      /* t0 = &port_xSchedulerRunning */
+    add     t0, t0, t6                      /* t0 = &port_xSchedulerRunning[coreID] */
+    lw      t0, (t0)                        /* t0 = port_xSchedulerRunning[coreID] */
 #else
 #else
-    /* scheduler not enabled, jump directly to ISR handler */
-    csrr t6, mhartid          /* t6 = coreID */
-    slli t6, t6, 2            /* t6 = coreID * 4 */
-    la t0, uxSchedulerRunning /* t0 = &uxSchedulerRunning */
-    add t0, t0, t6            /* t0 = &uxSchedulerRunning[coreID] */
-    lw t0, (t0)               /* t0 = uxSchedulerRunning[coreID] */
-    beq t0,zero, rtos_enter_end
-#endif
-
-    /* increments the ISR nesting count */
-    la t3, uxInterruptNesting
-#if CONFIG_IDF_TARGET_ESP32P4
-//TODO: IDF-7566
-    add t3, t3, t6
-#endif
-    lw t4, 0x0(t3)
-    addi t5,t4,1
-    sw  t5, 0x0(t3)
-
-    /* If reached here from another low-prio ISR, skip stack pushing to TCB */
-    bne t4,zero, rtos_enter_end
+    lw      t0, port_xSchedulerRunning      /* t0 = port_xSchedulerRunning */
+#endif /* (configNUM_CORES > 1) */
+    beq     t0, zero, rtos_int_enter_end    /* if (port_xSchedulerRunning[coreID] == 0) jump to rtos_int_enter_end */
+
+    /* Increment the ISR nesting count */
+    la      t3, port_uxInterruptNesting     /* t3 = &port_usInterruptNesting */
+#if ( configNUM_CORES > 1 )
+    add     t3, t3, t6                      /* t3 = &port_uxInterruptNesting[coreID] // t6 already contains coreID * 4 */
+#endif /* ( configNUM_CORES > 1 ) */
+    lw      t4, 0x0(t3)                     /* t4 = port_uxInterruptNesting[coreID] */
+    addi    t5, t4, 1                       /* t5 = t4 + 1 */
+    sw      t5, 0x0(t3)                     /* port_uxInterruptNesting[coreID] = t5 */
+
+    /* If we reached here from another low-prio ISR, i.e, port_uxInterruptNesting[coreID] > 0, then skip stack pushing to TCB */
+    bne     t4, zero, rtos_int_enter_end    /* if (port_uxInterruptNesting[coreID] > 0) jump to rtos_int_enter_end */
 
 
 #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
 #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
     /* esp_hw_stack_guard_monitor_stop(); */
     /* esp_hw_stack_guard_monitor_stop(); */
     ESP_HW_STACK_GUARD_MONITOR_STOP_CPU0
     ESP_HW_STACK_GUARD_MONITOR_STOP_CPU0
 #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
 #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
 
 
-    /* Save current TCB and load the ISR stack */
-//TODO: IDF-7566
-#if !CONFIG_IDF_TARGET_ESP32P4
-    lw  t0, pxCurrentTCB
-    sw  sp, 0x0(t0)
-    lw  sp, xIsrStackTop
+    /* Save the current sp in pxCurrentTCB[coreID] and load the ISR stack on to sp */
+#if ( configNUM_CORES > 1 )
+    la      t0, pxCurrentTCB                /* t0 = &pxCurrentTCB */
+    add     t0, t0, t6                      /* t0 = &pxCurrentTCB[coreID] // t6 already contains coreID * 4 */
+    lw      t0, (t0)                        /* t0 = pxCurrentTCB[coreID] */
+    sw      sp, 0x0(t0)                     /* pxCurrentTCB[coreID] = sp */
+    la      t0, xIsrStackTop                /* t0 = &xIsrStackTop */
+    add     t0, t0, t6                      /* t0 = &xIsrStackTop[coreID] // t6 already contains coreID * 4 */
+    lw      sp, 0x0(t0)                     /* sp = xIsrStackTop[coreID] */
 #else
 #else
-    la  t0, pxCurrentTCB      /* t0 = &pxCurrentTCB */
-    add t0, t0, t6            /* t0 = &pxCurrentTCB[coreID] */
-    lw  t0, (t0)              /* t0 = pxCurrentTCB[coreID] */
-    sw 	t2, 0x0(t0)
-    lw  sp, xIsrStackTop
-    csrr t6, mhartid
-    beq t6, zero, rtos_enter_end
-    lw  sp, xIsrStackTop1
-#endif
+    lw      t0, pxCurrentTCB                /* t0 = pxCurrentTCB */
+    sw      sp, 0x0(t0)                     /* pxCurrentTCB = sp */
+    lw      sp, xIsrStackTop                /* sp = xIsrStackTop */
+#endif /* ( configNUM_CORES > 1 ) */
 
 
 #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
 #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
     /* esp_hw_stack_guard_set_bounds(xIsrStack, xIsrStackTop); */
     /* esp_hw_stack_guard_set_bounds(xIsrStack, xIsrStackTop); */
@@ -104,91 +98,99 @@ rtos_int_enter:
     ESP_HW_STACK_GUARD_MONITOR_START_CPU0
     ESP_HW_STACK_GUARD_MONITOR_START_CPU0
 #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
 #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
 
 
-rtos_enter_end:
+rtos_int_enter_end:
 #if CONFIG_IDF_TARGET_ESP32P4
 #if CONFIG_IDF_TARGET_ESP32P4
     //TODO: IDF-7861
     //TODO: IDF-7861
-    mv  ra, t1
+    mv      ra, t1
 #endif
 #endif
     ret
     ret
 
 
 /**
 /**
- * Recovers the next task to run stack pointer.
+ * Restore the stack pointer of the next task to run.
  */
  */
     .global rtos_int_exit
     .global rtos_int_exit
     .type rtos_int_exit, @function
     .type rtos_int_exit, @function
 rtos_int_exit:
 rtos_int_exit:
-    /* may skip RTOS aware interrupt since scheduler was not started */
 
 
-//TODO: IDF-7566
-#if !CONFIG_IDF_TARGET_ESP32P4
-    lw t0, uxSchedulerRunning
+    /* Skip if the scheduler was not started */
+#if ( configNUM_CORES > 1 )
+    csrr    t1, mhartid                     /* t1 = coreID */
+    slli    t1, t1, 2                       /* t1 = t1 * 4 */
+    la      t0, port_xSchedulerRunning      /* t0 = &port_xSchedulerRunning */
+    add     t0, t0, t1                      /* t0 = &port_xSchedulerRunning[coreID] */
+    lw      t0, (t0)                        /* t0 = port_xSchedulerRunning[coreID] */
 #else
 #else
-    csrr t1, mhartid
-    slli t1, t1, 2
-    la t0, uxSchedulerRunning      /* t0 = &uxSchedulerRunning */
-    add t0, t0, t1                 /* t0 = &uxSchedulerRunning[coreID] */
-    lw t0, (t0)
-#endif
-    beq t0,zero, rtos_exit_end
-
-    /* update nesting interrupts counter */
-    la t2, uxInterruptNesting
-#if CONFIG_IDF_TARGET_ESP32P4
-//TODO: IDF-7566
-    add t2, t2, t1
+    lw      t0, port_xSchedulerRunning      /* t0 = port_xSchedulerRunning */
+#endif /* ( configNUM_CORES > 1 ) */
+    beq     t0, zero, rtos_int_exit_end     /* if (port_uxSchewdulerRunning == 0) jump to rtos_int_exit_end */
+
+    /* Decrement interrupt nesting counter */
+    la      t2, port_uxInterruptNesting     /* t2 = &port_uxInterruptNesting */
+#if ( configNUM_CORES > 1 )
+    add     t2, t2, t1                      /* t2 = &port_uxInterruptNesting[coreID] // t1 already contains coreID * 4 */
 #endif
 #endif
-    lw t3, 0x0(t2)
+    lw      t3, 0x0(t2)                     /* t3 = port_uxInterruptNesting[coreID] */
 
 
-    /* Already zero, protect against underflow */
-    beq t3, zero, isr_skip_decrement
-    addi t3,t3, -1
-    sw  t3, 0x0(t2)
+    /* If the interrupt nesting counter is already zero, then protect against underflow */
+    beq     t3, zero, isr_skip_decrement    /* if (port_uxInterruptNesting[coreID] == 0) jump to isr_skip_decrement */
+    addi    t3, t3, -1                      /* t3 = t3 - 1 */
+    sw      t3, 0x0(t2)                     /* port_uxInterruptNesting[coreID] = t3 */
 
 
 isr_skip_decrement:
 isr_skip_decrement:
 
 
-    /* may still have interrupts pending, skip section below and exit */
-    bne t3,zero,rtos_exit_end
+    /* We may still have interrupts pending. Skip the section below and exit */
+    bne     t3, zero, rtos_int_exit_end     /* (if port_uxInterruptNesting[coreID] > 0) jump to rtos_int_exit_end */
 
 
-    /* Schedule the next task if a yield is pending */
-    la t0, xPortSwitchFlag
-#if CONFIG_IDF_TARGET_ESP32P4
-//TODO: IDF-7566
-    add t0, t0, t1
-#endif
-    lw t2, 0x0(t0)
-    beq t2, zero, no_switch
-
-    /* preserve return address and schedule next task
-       stack pointer for riscv should always be 16 byte aligned */
-    addi sp,sp,-16
-    sw  ra, 0(sp)
-    call vTaskSwitchContext
-    lw  ra, 0(sp)
-    addi sp, sp, 16
-
-    /* Clears the switch pending flag */
-    la t0, xPortSwitchFlag
-#if CONFIG_IDF_TARGET_ESP32P4
-//TODO: IDF-7566
+    /* Schedule the next task if an yield is pending */
+    la      t0, xPortSwitchFlag             /* t0 = &xPortSwitchFlag */
+#if ( configNUM_CORES > 1 )
+    add     t0, t0, t1                      /* t0 = &xPortSwitchFlag[coreID] // t1 already contains coreID * 4 */
+#endif /* ( configNUM_CORES > 1 ) */
+    lw      t2, 0x0(t0)                     /* t2 = xPortSwitchFlag[coreID] */
+    beq     t2, zero, no_switch             /* if (xPortSwitchFlag[coreID] == 0) jump to no_switch */
+
+    /* Save the return address on the stack and create space on the stack for the c-routine call to schedule
+     * the next task. Stack pointer for RISC-V should always be 16 byte aligned. After the switch, restore
+     * the return address and sp.
+     */
+    addi    sp, sp, -16                     /* sp = sp - 16 */
+    sw      ra, 0(sp)                       /* sp = ra */
+    call    vTaskSwitchContext              /* vTaskSwitchContext() */
+    lw      ra, 0(sp)                       /* ra = sp */
+    addi    sp, sp, 16                      /* sp = sp + 16 */
+
+    /* Clear the switch pending flag */
+    la      t0, xPortSwitchFlag             /* t0 = &xPortSwitchFlag */
+#if ( configNUM_CORES > 1 )
     /* c routine vTaskSwitchContext may change the temp registers, so we read again */
     /* c routine vTaskSwitchContext may change the temp registers, so we read again */
-    csrr t3, mhartid
-    slli t3, t3, 2
-    add t0, t0, t3
-#endif
-    mv t2, zero
-    sw  t2, 0x0(t0)
+    csrr    t3, mhartid                     /* t3 = coreID */
+    slli    t3, t3, 2                       /* t3 = t3 * 4 */
+    add     t0, t0, t3                      /* t0 = &xPortSwitchFlag[coreID] */
+#endif /* ( configNUM_CORES > 1 ) */
+    mv      t2, zero                        /* t2 = 0 */
+    sw      t2, 0x0(t0)                     /* xPortSwitchFlag[coreID] = t2 */
 
 
 no_switch:
 no_switch:
 
 
-#if !CONFIG_IDF_TARGET_ESP32P4   //TODO: IDF-7566
+#if SOC_INT_CLIC_SUPPORTED
+    /* Recover the stack of next task and prepare to exit */
+    la      a0, pxCurrentTCB                /* a0 = &pxCurrentTCB */
+#if ( configNUM_CORES > 1 )
+    csrr    t3, mhartid                     /* t3 = coreID */
+    slli    t3, t3, 2                       /* t3 = t3 * 4 */
+    add     a0, a0, t3                      /* a0 = &pxCurrentTCB[coreID] */
+#endif /* ( configNUM_CORES > 1 ) */
+    lw      a0, (a0)                        /* a0 = pxCurrentTCB[coreID] */
+    lw      a0, 0x0(a0)                     /* a0 = previous sp */
+#else
 #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
 #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
     /* esp_hw_stack_guard_monitor_stop(); */
     /* esp_hw_stack_guard_monitor_stop(); */
     ESP_HW_STACK_GUARD_MONITOR_STOP_CPU0
     ESP_HW_STACK_GUARD_MONITOR_STOP_CPU0
 #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
 #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
 
 
     /* Recover the stack of next task */
     /* Recover the stack of next task */
-    lw t0, pxCurrentTCB
-    lw sp, 0x0(t0)
+    lw      t0, pxCurrentTCB
+    lw      sp, 0x0(t0)
 
 
 #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
 #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
     /* esp_hw_stack_guard_set_bounds(pxCurrentTCB[0]->pxStack,
     /* esp_hw_stack_guard_set_bounds(pxCurrentTCB[0]->pxStack,
@@ -200,17 +202,7 @@ no_switch:
     /* esp_hw_stack_guard_monitor_start(); */
     /* esp_hw_stack_guard_monitor_start(); */
     ESP_HW_STACK_GUARD_MONITOR_START_CPU0
     ESP_HW_STACK_GUARD_MONITOR_START_CPU0
 #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
 #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
+#endif /* SOC_INT_CLIC_SUPPORTED */
 
 
-#else
-    /* Recover the stack of next task and prepare to exit : */
-    la a0, pxCurrentTCB
-    /* We may come here from a branch, so we re-cal here */
-    csrr t3, mhartid
-    slli t3, t3, 2
-    add a0, a0, t3    /* a0 = &pxCurrentTCB[coreID] */
-    lw a0, (a0)       /* a0 = pxCurrentTCB[coreID] */
-    lw a0, 0x0(a0)    /* a0 = previous sp */
-#endif  //#if !CONFIG_IDF_TARGET_ESP32P4
-
-rtos_exit_end:
+rtos_int_exit_end:
     ret
     ret

+ 1 - 1
components/freertos/FreeRTOS-Kernel/portable/xtensa/include/freertos/portmacro.h

@@ -209,7 +209,7 @@ static inline void vPortClearInterruptMaskFromISR(UBaseType_t prev_level);
  * - See "Critical Sections & Disabling Interrupts" in docs/api-guides/freertos-smp.rst for more details
  * - See "Critical Sections & Disabling Interrupts" in docs/api-guides/freertos-smp.rst for more details
  * - Remark: For the ESP32, portENTER_CRITICAL and portENTER_CRITICAL_ISR both alias vPortEnterCritical, meaning that
  * - Remark: For the ESP32, portENTER_CRITICAL and portENTER_CRITICAL_ISR both alias vPortEnterCritical, meaning that
  *           either function can be called both from ISR as well as task context. This is not standard FreeRTOS
  *           either function can be called both from ISR as well as task context. This is not standard FreeRTOS
- *           behaviorr; please keep this in mind if you need any compatibility with other FreeRTOS implementations.
+ *           behavior; please keep this in mind if you need any compatibility with other FreeRTOS implementations.
  * @note [refactor-todo] Check if these comments are still true
  * @note [refactor-todo] Check if these comments are still true
  * ------------------------------------------------------ */
  * ------------------------------------------------------ */
 
 

+ 0 - 13
components/freertos/FreeRTOS-Kernel/tasks.c

@@ -185,8 +185,6 @@
 
 
 /*-----------------------------------------------------------*/
 /*-----------------------------------------------------------*/
 
 
-//TODO: IDF-7566
-#if !CONFIG_IDF_TARGET_ESP32P4
     #define taskSELECT_HIGHEST_PRIORITY_TASK()                                                     \
     #define taskSELECT_HIGHEST_PRIORITY_TASK()                                                     \
     {                                                                                              \
     {                                                                                              \
         UBaseType_t uxTopPriority;                                                                 \
         UBaseType_t uxTopPriority;                                                                 \
@@ -196,17 +194,6 @@
         configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 );    \
         configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 );    \
         listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB[ 0 ], &( pxReadyTasksLists[ uxTopPriority ] ) ); \
         listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB[ 0 ], &( pxReadyTasksLists[ uxTopPriority ] ) ); \
     } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
     } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
-#else
-    #define taskSELECT_HIGHEST_PRIORITY_TASK()                                                     \
-    {                                                                                              \
-        UBaseType_t uxTopPriority;                                                                 \
-                                                                                                   \
-        /* Find the highest priority list that contains ready tasks. */                            \
-        portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority );                             \
-        configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 );    \
-        listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB[ xPortGetCoreID() ], &( pxReadyTasksLists[ uxTopPriority ] ) ); \
-    } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
-#endif
 
 
 /*-----------------------------------------------------------*/
 /*-----------------------------------------------------------*/
 
 

+ 0 - 1
components/freertos/Kconfig

@@ -30,7 +30,6 @@ menu "FreeRTOS"
             # Todo: Replace with CONFIG_NUM_CORES (IDF-4986)
             # Todo: Replace with CONFIG_NUM_CORES (IDF-4986)
             bool "Run FreeRTOS only on first core"
             bool "Run FreeRTOS only on first core"
             default "y" if IDF_TARGET_ESP32S2 || IDF_TARGET_LINUX
             default "y" if IDF_TARGET_ESP32S2 || IDF_TARGET_LINUX
-            default "y" if IDF_TARGET_ESP32P4  #TODO: IDF-7566
             select ESP_SYSTEM_SINGLE_CORE_MODE
             select ESP_SYSTEM_SINGLE_CORE_MODE
             help
             help
                 This version of FreeRTOS normally takes control of all cores of the CPU. Select this if you only want
                 This version of FreeRTOS normally takes control of all cores of the CPU. Select this if you only want

+ 0 - 9
components/freertos/app_startup.c

@@ -111,19 +111,10 @@ void esp_startup_start_app_other_cores(void)
     }
     }
 
 
     // Wait for CPU0 to start FreeRTOS before progressing
     // Wait for CPU0 to start FreeRTOS before progressing
-    //TODO: IDF-7566
-#if !CONFIG_IDF_TARGET_ESP32P4
     extern volatile unsigned port_xSchedulerRunning[portNUM_PROCESSORS];
     extern volatile unsigned port_xSchedulerRunning[portNUM_PROCESSORS];
     while (port_xSchedulerRunning[0] == 0) {
     while (port_xSchedulerRunning[0] == 0) {
         ;
         ;
     }
     }
-#else
-    extern volatile unsigned uxSchedulerRunning[portNUM_PROCESSORS];
-    while (uxSchedulerRunning[0] == 0) {
-        ;
-    }
-#endif
-
 
 
 #if CONFIG_APPTRACE_ENABLE
 #if CONFIG_APPTRACE_ENABLE
     // [refactor-todo] move to esp_system initialization
     // [refactor-todo] move to esp_system initialization