Explorar o código

Merge branch 'change/freertos_local_crit_section_macro' into 'master'

change(freertos/idf): Refactor thread safety convenience macros

Closes IDF-8161

See merge request espressif/esp-idf!26805
Darian %!s(int64=2) %!d(string=hai) anos
pai
achega
c4eea80c07

+ 2 - 2
components/freertos/FreeRTOS-Kernel/event_groups.c

@@ -534,11 +534,11 @@ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
     EventGroup_t const * const pxEventBits = xEventGroup;
     EventBits_t uxReturn;
 
-    uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
+    prvENTER_CRITICAL_OR_MASK_ISR( ( portMUX_TYPE * ) &( pxEventBits->xEventGroupLock ), uxSavedInterruptStatus );
     {
         uxReturn = pxEventBits->uxEventBits;
     }
-    portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
+    prvEXIT_CRITICAL_OR_UNMASK_ISR( ( portMUX_TYPE * ) &( pxEventBits->xEventGroupLock ), uxSavedInterruptStatus );
 
     return uxReturn;
 } /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */

+ 10 - 60
components/freertos/FreeRTOS-Kernel/queue.c

@@ -54,56 +54,6 @@
  * correct privileged Vs unprivileged linkage and placement. */
 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
 
-/* Some code sections require extra critical sections when building for SMP
- * ( configNUMBER_OF_CORES > 1 ). */
-#if ( configNUMBER_OF_CORES > 1 )
-/* Macros that Enter/exit a critical section only when building for SMP */
-    #define taskENTER_CRITICAL_SMP_ONLY( pxLock )         taskENTER_CRITICAL( pxLock )
-    #define taskEXIT_CRITICAL_SMP_ONLY( pxLock )          taskEXIT_CRITICAL( pxLock )
-    #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )    prvTaskEnterCriticalSafeSMPOnly( pxLock )
-    #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )     prvTaskExitCriticalSafeSMPOnly( pxLock )
-
-    static inline __attribute__( ( always_inline ) )
-    void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
-    {
-        if( portCHECK_IF_IN_ISR() == pdFALSE )
-        {
-            taskENTER_CRITICAL( pxLock );
-        }
-        else
-        {
-            #ifdef __clang_analyzer__
-                /* Teach clang-tidy that ISR version macro can be different */
-                configASSERT( 1 );
-            #endif
-            taskENTER_CRITICAL_ISR( pxLock );
-        }
-    }
-
-    static inline __attribute__( ( always_inline ) )
-    void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
-    {
-        if( portCHECK_IF_IN_ISR() == pdFALSE )
-        {
-            taskEXIT_CRITICAL( pxLock );
-        }
-        else
-        {
-            #ifdef __clang_analyzer__
-                /* Teach clang-tidy that ISR version macro can be different */
-                configASSERT( 1 );
-            #endif
-            taskEXIT_CRITICAL_ISR( pxLock );
-        }
-    }
-#else /* configNUMBER_OF_CORES > 1 */
-    /* Macros that Enter/exit a critical section only when building for SMP */
-    #define taskENTER_CRITICAL_SMP_ONLY( pxLock )
-    #define taskEXIT_CRITICAL_SMP_ONLY( pxLock )
-    #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
-    #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
-#endif /* configNUMBER_OF_CORES > 1 */
-
 /* Single core FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList()
  * calls are deterministic (as queue locks use scheduler suspension instead of
  * critical sections). However, the SMP implementation is non-deterministic
@@ -3109,7 +3059,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
 
         /* For SMP, we need to take the queue registry lock in case another
          * core updates the register simultaneously. */
-        taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
+        prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
         {
             if( pcQueueName != NULL )
             {
@@ -3145,7 +3095,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
             }
         }
         /* Release the previously taken queue registry lock. */
-        taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
+        prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
     }
 
 #endif /* configQUEUE_REGISTRY_SIZE */
@@ -3162,7 +3112,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
 
         /* For SMP, we need to take the queue registry lock in case another
          * core updates the register simultaneously. */
-        taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
+        prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
         {
             /* Note there is nothing here to protect against another task adding or
              * removing entries from the registry while it is being searched. */
@@ -3181,7 +3131,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
             }
         }
         /* Release the previously taken queue registry lock. */
-        taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
+        prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
 
         return pcReturn;
     } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
@@ -3199,7 +3149,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
 
         /* For SMP, we need to take the queue registry lock in case another
          * core updates the register simultaneously. */
-        taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
+        prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
         {
             /* See if the handle of the queue being unregistered in actually in the
              * registry. */
@@ -3223,7 +3173,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
             }
         }
         /* Release the previously taken queue registry lock. */
-        taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
+        prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock );
     } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
 
 #endif /* configQUEUE_REGISTRY_SIZE */
@@ -3247,7 +3197,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
 
         /* For SMP, we need to take the queue's xQueueLock as we are about to
          * access the queue. */
-        taskENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
+        prvENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
         {
             #if ( queueUSE_LOCKS == 1 )
             {
@@ -3278,7 +3228,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
             #endif /* queueUSE_LOCKS == 1 */
         }
         /* Release the previously taken xQueueLock. */
-        taskEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
+        prvEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) );
     }
 
 #endif /* configUSE_TIMERS */
@@ -3413,7 +3363,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
 
         /* In SMP, queue sets have their own xQueueLock. Thus we need to also
          * acquire the queue set's xQueueLock before accessing it. */
-        taskENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
+        prvENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
         {
             if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
             {
@@ -3463,7 +3413,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
             }
         }
         /* Release the previously acquired queue set's xQueueLock. */
-        taskEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
+        prvEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) );
 
         return xReturn;
     }

+ 35 - 103
components/freertos/FreeRTOS-Kernel/tasks.c

@@ -65,74 +65,6 @@
     #include <stdio.h>
 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
 
-/* Some code sections require extra critical sections when building for SMP
- * ( configNUMBER_OF_CORES > 1 ). */
-#if ( configNUMBER_OF_CORES > 1 )
-    /* Macros that enter/exit a critical section only when building for SMP */
-    #define taskENTER_CRITICAL_SMP_ONLY( pxLock )             taskENTER_CRITICAL( pxLock )
-    #define taskEXIT_CRITICAL_SMP_ONLY( pxLock )              taskEXIT_CRITICAL( pxLock )
-    #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock )         taskENTER_CRITICAL_ISR( pxLock )
-    #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock )          taskEXIT_CRITICAL_ISR( pxLock )
-    #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )        prvTaskEnterCriticalSafeSMPOnly( pxLock )
-    #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )         prvTaskExitCriticalSafeSMPOnly( pxLock )
-    /* Macros that enter/exit a critical section only when building for single-core */
-    #define taskENTER_CRITICAL_SC_ONLY( pxLock )              taskENTER_CRITICAL( pxLock )
-    #define taskEXIT_CRITICAL_SC_ONLY( pxLock )               taskEXIT_CRITICAL( pxLock )
-    /* Macros that enable/disable interrupts only when building for SMP */
-    #define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY()             portSET_INTERRUPT_MASK_FROM_ISR()
-    #define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus )    portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus )
-
-    static inline __attribute__( ( always_inline ) )
-    void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
-    {
-        if( portCHECK_IF_IN_ISR() == pdFALSE )
-        {
-            taskENTER_CRITICAL( pxLock );
-        }
-        else
-        {
-            #ifdef __clang_analyzer__
-                /* Teach clang-tidy that ISR version macro can be different */
-                configASSERT( 1 );
-            #endif
-            taskENTER_CRITICAL_ISR( pxLock );
-        }
-    }
-
-    static inline __attribute__( ( always_inline ) )
-    void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
-    {
-        if( portCHECK_IF_IN_ISR() == pdFALSE )
-        {
-            taskEXIT_CRITICAL( pxLock );
-        }
-        else
-        {
-            #ifdef __clang_analyzer__
-                /* Teach clang-tidy that ISR version macro can be different */
-                configASSERT( 1 );
-            #endif
-            taskEXIT_CRITICAL_ISR( pxLock );
-        }
-    }
-
-#else /* configNUMBER_OF_CORES > 1 */
-    /* Macros that enter/exit a critical section only when building for SMP */
-    #define taskENTER_CRITICAL_SMP_ONLY( pxLock )
-    #define taskEXIT_CRITICAL_SMP_ONLY( pxLock )
-    #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock )
-    #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock )
-    #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
-    #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
-    /* Macros that enter/exit a critical section only when building for single-core */
-    #define taskENTER_CRITICAL_SC_ONLY( pxLock )              taskENTER_CRITICAL( pxLock )
-    #define taskEXIT_CRITICAL_SC_ONLY( pxLock )               taskEXIT_CRITICAL( pxLock )
-    /* Macros that enable/disable interrupts only when building for SMP */
-    #define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY()             ( ( UBaseType_t ) 0 )
-    #define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus )    ( ( void ) uxStatus )
-
-#endif /* configNUMBER_OF_CORES > 1 */
-
 #if ( configUSE_PREEMPTION == 0 )
 
 /* If the cooperative scheduler is being used then a yield should not be
@@ -1475,7 +1407,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
 
         /* For SMP, we need to take the kernel lock here as we are about to
          * access kernel data structures. */
-        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
         {
             /* Force a reschedule if it is the currently running task that has just
              * been deleted. */
@@ -1493,7 +1425,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
             }
         }
         /* Release the previously taken kernel lock. */
-        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
     }
 
 #endif /* INCLUDE_vTaskDelete */
@@ -2448,7 +2380,7 @@ void vTaskStartScheduler( void )
 
         /* For SMP, we need to take the kernel lock here as we are about to
          * access kernel data structures. */
-        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
         {
             #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) )
             {
@@ -2463,7 +2395,7 @@ void vTaskStartScheduler( void )
             xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
         }
         /* Release the previously taken kernel lock. */
-        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 
         /* If configGENERATE_RUN_TIME_STATS is defined then the following
          * macro must be defined to configure the timer/counter used to generate
@@ -2513,12 +2445,12 @@ void vTaskEndScheduler( void )
 
     /* For SMP, we need to take the kernel lock here as we are about to access
      * kernel data structures. */
-    taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+    prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
     {
         xSchedulerRunning = pdFALSE;
     }
     /* Release the previously taken kernel lock. */
-    taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
+    prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
     vPortEndScheduler();
 }
 /*----------------------------------------------------------*/
@@ -2768,7 +2700,7 @@ TickType_t xTaskGetTickCountFromISR( void )
 
     /* For SMP, we need to take the kernel lock here as we are about to access
      * kernel data structures. */
-    taskENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
+    prvENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
     {
         uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
         {
@@ -2777,7 +2709,7 @@ TickType_t xTaskGetTickCountFromISR( void )
         portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
     }
     /* Release the previously taken kernel lock. */
-    taskEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
+    prvEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock );
 
     return xReturn;
 }
@@ -3176,7 +3108,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
                  * the event list too.  Interrupts can touch the event list item,
                  * even though the scheduler is suspended, so a critical section
                  * is used. */
-                taskENTER_CRITICAL_SC_ONLY( &xKernelLock );
+                prvENTER_CRITICAL_SC_ONLY( &xKernelLock );
                 {
                     if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
                     {
@@ -3192,7 +3124,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
                         mtCOVERAGE_TEST_MARKER();
                     }
                 }
-                taskEXIT_CRITICAL_SC_ONLY( &xKernelLock );
+                prvEXIT_CRITICAL_SC_ONLY( &xKernelLock );
 
                 /* Place the unblocked task into the appropriate ready list. */
                 prvAddTaskToReadyList( pxTCB );
@@ -3255,7 +3187,7 @@ BaseType_t xTaskIncrementTick( void )
     /* For SMP, we need to take the kernel lock here as we are about to access
      * kernel data structures (unlike single core which calls this function with
      * interrupts disabled). */
-    taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
+    prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
     {
         if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
         {
@@ -3423,7 +3355,7 @@ BaseType_t xTaskIncrementTick( void )
 
     /* Release the previously taken kernel lock as we have finished accessing
      * the kernel data structures. */
-    taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
+    prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
 
     #if ( configUSE_TICK_HOOK == 1 )
     {
@@ -3646,7 +3578,7 @@ void vTaskSwitchContext( void )
     /* For SMP, we need to take the kernel lock here as we are about to access
     * kernel data structures (unlike single core which calls this function with
     * either interrupts disabled or when the scheduler hasn't started yet). */
-    taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
+    prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
     {
         /* Get current core ID as we can no longer be preempted. */
         const BaseType_t xCurCoreID = portGET_CORE_ID();
@@ -3731,7 +3663,7 @@ void vTaskSwitchContext( void )
 
     /* Release the previously taken kernel lock as we have finished accessing
      * the kernel data structures. */
-    taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
+    prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock );
 }
 /*-----------------------------------------------------------*/
 
@@ -3746,7 +3678,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
 
     /* For SMP, we need to take the kernel lock here as we are about to access
      * kernel data structures. */
-    taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+    prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
     {
         /* Place the event list item of the TCB in the appropriate event list.
          * This is placed in the list in priority order so the highest priority task
@@ -3764,7 +3696,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
         prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
     }
     /* Release the previously taken kernel lock. */
-    taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
+    prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 }
 /*-----------------------------------------------------------*/
 
@@ -3792,7 +3724,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
 
     /* For SMP, we need to take the kernel lock here as we are about to access
      * kernel data structures. */
-    taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+    prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
     {
         /* Store the item value in the event list item.  It is safe to access the
          * event list item here as interrupts won't access the event list item of a
@@ -3809,7 +3741,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
         prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
     }
     /* Release the previously taken kernel lock. */
-    taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
+    prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 }
 /*-----------------------------------------------------------*/
 
@@ -3829,7 +3761,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
 
         /* For SMP, we need to take the kernel lock here as we are about to access
          * kernel data structures. */
-        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
         {
             /* Place the event list item of the TCB in the appropriate event list.
              * In this case it is assume that this is the only task that is going to
@@ -4628,7 +4560,7 @@ static void prvCheckTasksWaitingTermination( void )
 
         /* A critical section is required for SMP in case another core modifies
          * the task simultaneously. */
-        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
         {
             pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
             pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] );
@@ -4730,7 +4662,7 @@ static void prvCheckTasksWaitingTermination( void )
             }
         }
         /* Exit the previously entered critical section. */
-        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
     }
 
 #endif /* configUSE_TRACE_FACILITY */
@@ -4952,11 +4884,11 @@ static void prvResetNextTaskUnblockTime( void )
          * For single-core a critical section is not required as this is not
          * called from an interrupt and the current TCB will always be the same
          * for any individual execution thread. */
-        uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY();
+        uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY();
         {
             xReturn = pxCurrentTCBs[ portGET_CORE_ID() ];
         }
-        taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
+        prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
 
         return xReturn;
     }
@@ -4980,7 +4912,7 @@ static void prvResetNextTaskUnblockTime( void )
          *
          * We use the ISR versions of interrupt macros as this function could be
          * called inside critical sections. */
-        uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY();
+        uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY();
         {
             if( xSchedulerRunning == pdFALSE )
             {
@@ -4998,7 +4930,7 @@ static void prvResetNextTaskUnblockTime( void )
                 }
             }
         }
-        taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
+        prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus );
 
         return xReturn;
     }
@@ -5015,7 +4947,7 @@ static void prvResetNextTaskUnblockTime( void )
 
         /* For SMP, we need to take the kernel lock here as we are about to
          * access kernel data structures. */
-        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
         {
             /* Get current core ID as we can no longer be preempted. */
             const BaseType_t xCurCoreID = portGET_CORE_ID();
@@ -5098,7 +5030,7 @@ static void prvResetNextTaskUnblockTime( void )
             }
         }
         /* Release the previously taken kernel lock. */
-        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 
         return xReturn;
     }
@@ -5115,7 +5047,7 @@ static void prvResetNextTaskUnblockTime( void )
 
         /* For SMP, we need to take the kernel lock here as we are about to
          * access kernel data structures. */
-        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
         {
             if( pxMutexHolder != NULL )
             {
@@ -5185,7 +5117,7 @@ static void prvResetNextTaskUnblockTime( void )
             }
         }
         /* Release the previously taken kernel lock. */
-        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 
         return xReturn;
     }
@@ -5204,7 +5136,7 @@ static void prvResetNextTaskUnblockTime( void )
 
         /* For SMP, we need to take the kernel lock here as we are about to
          * access kernel data structures. */
-        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
         {
             if( pxMutexHolder != NULL )
             {
@@ -5300,7 +5232,7 @@ static void prvResetNextTaskUnblockTime( void )
             }
         }
         /* Release the previously taken kernel lock. */
-        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
     }
 
 #endif /* configUSE_MUTEXES */
@@ -5635,7 +5567,7 @@ TickType_t uxTaskResetEventItemValue( void )
 
     /* For SMP, we need to take the kernel lock here to ensure nothing else
      * modifies the task's event item value simultaneously. */
-    taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+    prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
     {
         /* Get current core ID as we can no longer be preempted. */
         const BaseType_t xCurCoreID = portGET_CORE_ID();
@@ -5646,7 +5578,7 @@ TickType_t uxTaskResetEventItemValue( void )
          * queues and semaphores. */
         listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
     }
-    taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
+    prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
     /* Release the previously taken kernel lock. */
 
     return uxReturn;
@@ -5661,7 +5593,7 @@ TickType_t uxTaskResetEventItemValue( void )
 
         /* For SMP, we need to take the kernel lock here as we are about to
          * access kernel data structures. */
-        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
         {
             /* Get current core ID as we can no longer be preempted. */
             const BaseType_t xCurCoreID = portGET_CORE_ID();
@@ -5676,7 +5608,7 @@ TickType_t uxTaskResetEventItemValue( void )
             xReturn = pxCurrentTCBs[ xCurCoreID ];
         }
         /* Release the previously taken kernel lock. */
-        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 
         return xReturn;
     }

+ 4 - 4
components/freertos/esp_additions/freertos_tasks_c_additions.h

@@ -507,12 +507,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
 
         /* For SMP, we need to take the kernel lock here as we are about to
          * access kernel data structures. */
-        taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
         {
             ulRunTimeCounter = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter;
         }
         /* Release the previously taken kernel lock. */
-        taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
+        prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
 
         return ulRunTimeCounter;
     }
@@ -539,12 +539,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask )
         {
             /* For SMP, we need to take the kernel lock here as we are about
              * to access kernel data structures. */
-            taskENTER_CRITICAL_SMP_ONLY( &xKernelLock );
+            prvENTER_CRITICAL_SMP_ONLY( &xKernelLock );
             {
                 ulReturn = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter / ulTotalTime;
             }
             /* Release the previously taken kernel lock. */
-            taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
+            prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock );
         }
         else
         {

+ 93 - 29
components/freertos/esp_additions/include/esp_private/freertos_idf_additions_priv.h

@@ -13,6 +13,7 @@
 
 #include "sdkconfig.h"
 #include "freertos/FreeRTOS.h"
+#include "freertos/task.h"
 
 /* *INDENT-OFF* */
 #ifdef __cplusplus
@@ -26,52 +27,117 @@
 
 /*
  * The following macros are convenience macros used to account for different
- * thread safety behavior between Vanilla FreeRTOS (i.e., single-core) and ESP-IDF
- * FreeRTOS (i.e., multi-core SMP).
+ * thread safety behavior between single-core and SMP in ESP-IDF FreeRTOS.
  *
  * For thread saftey...
  *
- * - Vanilla FreeRTOS will use the following for thread safety (depending on situation)
+ * - Single-core will use the following for thread safety (depending on situation)
  *      - `vTaskSuspendAll()`/`xTaskResumeAll()` for non-deterministic operations
  *      - Critical sections or disabling interrupts for deterministic operations
- * - ESP-IDF FreeRTOS will always use critical sections (determinism is not supported)
- *
- * [refactor-todo]: Define these locally in each kernel source file (IDF-8161)
+ * - SMP will always use critical sections (determinism is not supported)
  */
 #if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) )
 
-    #define prvENTER_CRITICAL_OR_SUSPEND_ALL( x )    taskENTER_CRITICAL( ( x ) )
-    #define prvEXIT_CRITICAL_OR_RESUME_ALL( x )      ( { taskEXIT_CRITICAL( ( x ) ); pdFALSE; } )
-    #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \
-    {                                                                  \
-        taskENTER_CRITICAL_ISR( ( pxLock ) );                          \
-        ( void ) ( uxInterruptStatus );                                \
+/* Macros that use a critical section when building for SMP */
+    #define prvENTER_CRITICAL_OR_SUSPEND_ALL( pxLock )    taskENTER_CRITICAL( ( pxLock ) )
+    #define prvEXIT_CRITICAL_OR_RESUME_ALL( pxLock )      ( { taskEXIT_CRITICAL( ( pxLock ) ); pdFALSE; } )
+    #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxStatus ) \
+    {                                                         \
+        taskENTER_CRITICAL_ISR( ( pxLock ) );                 \
+        ( void ) ( uxStatus );                                \
+    }
+    #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxStatus ) \
+    {                                                          \
+        taskEXIT_CRITICAL_ISR( ( pxLock ) );                   \
+        ( void ) ( uxStatus );                                 \
     }
-    #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \
-    {                                                                   \
-        taskEXIT_CRITICAL_ISR( ( pxLock ) );                            \
-        ( void ) ( uxInterruptStatus );                                 \
+
+/* Macros that enter/exit a critical section only when building for SMP */
+    #define prvENTER_CRITICAL_SMP_ONLY( pxLock )         taskENTER_CRITICAL( pxLock )
+    #define prvEXIT_CRITICAL_SMP_ONLY( pxLock )          taskEXIT_CRITICAL( pxLock )
+    #define prvENTER_CRITICAL_ISR_SMP_ONLY( pxLock )     taskENTER_CRITICAL_ISR( pxLock )
+    #define prvEXIT_CRITICAL_ISR_SMP_ONLY( pxLock )      taskEXIT_CRITICAL_ISR( pxLock )
+    #define prvENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )    prvTaskEnterCriticalSafeSMPOnly( pxLock )
+    #define prvEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )     prvTaskExitCriticalSafeSMPOnly( pxLock )
+
+    static inline __attribute__( ( always_inline ) )
+    void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
+    {
+        if( portCHECK_IF_IN_ISR() == pdFALSE )
+        {
+            taskENTER_CRITICAL( pxLock );
+        }
+        else
+        {
+            #ifdef __clang_analyzer__
+                /* Teach clang-tidy that ISR version macro can be different */
+                configASSERT( 1 );
+            #endif
+            taskENTER_CRITICAL_ISR( pxLock );
+        }
+    }
+
+    static inline __attribute__( ( always_inline ) )
+    void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock )
+    {
+        if( portCHECK_IF_IN_ISR() == pdFALSE )
+        {
+            taskEXIT_CRITICAL( pxLock );
+        }
+        else
+        {
+            #ifdef __clang_analyzer__
+                /* Teach clang-tidy that ISR version macro can be different */
+                configASSERT( 1 );
+            #endif
+            taskEXIT_CRITICAL_ISR( pxLock );
+        }
     }
 
+/* Macros that enter/exit a critical section only when building for single-core */
+    #define prvENTER_CRITICAL_SC_ONLY( pxLock )
+    #define prvEXIT_CRITICAL_SC_ONLY( pxLock )
+
+/* Macros that enable/disable interrupts only when building for SMP */
+    #define prvDISABLE_INTERRUPTS_ISR_SMP_ONLY()             portSET_INTERRUPT_MASK_FROM_ISR()
+    #define prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxStatus )    portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus )
+
 #elif ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) )
 
-    #define prvENTER_CRITICAL_OR_SUSPEND_ALL( x )    ( { vTaskSuspendAll(); ( void ) ( x ); } )
-    #define prvEXIT_CRITICAL_OR_RESUME_ALL( x )      xTaskResumeAll()
-    #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \
-    {                                                                  \
-        ( uxInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR();     \
-        ( void ) ( pxLock );                                           \
+/* Macros that suspend the scheduler or disables interrupts when building for single-core */
+    #define prvENTER_CRITICAL_OR_SUSPEND_ALL( pxLock )    ( { vTaskSuspendAll(); ( void ) ( pxLock ); } )
+    #define prvEXIT_CRITICAL_OR_RESUME_ALL( pxLock )      xTaskResumeAll()
+    #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxStatus ) \
+    {                                                         \
+        ( uxStatus ) = portSET_INTERRUPT_MASK_FROM_ISR();     \
+        ( void ) ( pxLock );                                  \
     }
-    #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \
-    {                                                                   \
-        portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxInterruptStatus ) );     \
-        ( void ) ( pxLock );                                            \
+    #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxStatus ) \
+    {                                                          \
+        portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxStatus ) );     \
+        ( void ) ( pxLock );                                   \
     }
 
+/* Macros that enter/exit a critical section only when building for SMP */
+    #define prvENTER_CRITICAL_SMP_ONLY( pxLock )
+    #define prvEXIT_CRITICAL_SMP_ONLY( pxLock )
+    #define prvENTER_CRITICAL_ISR_SMP_ONLY( pxLock )
+    #define prvEXIT_CRITICAL_ISR_SMP_ONLY( pxLock )
+    #define prvENTER_CRITICAL_SAFE_SMP_ONLY( pxLock )
+    #define prvEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock )
+
+/* Macros that enter/exit a critical section only when building for single-core */
+    #define prvENTER_CRITICAL_SC_ONLY( pxLock )              taskENTER_CRITICAL( pxLock )
+    #define prvEXIT_CRITICAL_SC_ONLY( pxLock )               taskEXIT_CRITICAL( pxLock )
+
+/* Macros that enable/disable interrupts only when building for SMP */
+    #define prvDISABLE_INTERRUPTS_ISR_SMP_ONLY()             ( ( UBaseType_t ) 0 )
+    #define prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxStatus )    ( ( void ) uxStatus )
+
 #endif /* ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) ) */
 
 /*
- * In ESP-IDF FreeRTOS (i.e., multi-core SMP) uses spinlocks to protect different
+ * In ESP-IDF FreeRTOS under SMP builds, spinlocks are to protect different
  * groups of data. This function is a wrapper to take the "xKernelLock" spinlock
  * of tasks.c.
  *
@@ -87,8 +153,6 @@
  * vEventGroupDelete() as both those functions will directly access event lists
  * (which are kernel data structures). Thus, a wrapper function must be provided
  * to take/release the "xKernelLock" from outside tasks.c.
- *
- * [refactor-todo]: Extern this locally in event groups (IDF-8161)
  */
 #if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) )