|
|
@@ -58,7 +58,6 @@
|
|
|
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
|
|
|
#undef _REENT_INIT_PTR
|
|
|
#define _REENT_INIT_PTR esp_reent_init
|
|
|
-extern void esp_vApplicationTickHook(void);
|
|
|
extern void esp_vApplicationIdleHook(void);
|
|
|
#endif //ESP_PLATFORM
|
|
|
|
|
|
@@ -3075,33 +3074,34 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
|
|
|
|
|
|
BaseType_t xTaskIncrementTick( void )
|
|
|
{
|
|
|
+#ifdef ESP_PLATFORM
|
|
|
+ #if ( configNUM_CORES > 1 )
|
|
|
+ {
|
|
|
+ /* Only Core 0 should ever call this function. */
|
|
|
+ configASSERT( xPortGetCoreID() == 0 );
|
|
|
+ }
|
|
|
+ #endif /* ( configNUM_CORES > 1 ) */
|
|
|
+#endif // ESP_PLATFORM
|
|
|
TCB_t * pxTCB;
|
|
|
TickType_t xItemValue;
|
|
|
BaseType_t xSwitchRequired = pdFALSE;
|
|
|
|
|
|
- /* Only allow core 0 increase the tick count in the case of xPortSysTickHandler processing. */
|
|
|
- /* And allow core 0 and core 1 to unwind uxPendedTicks during xTaskResumeAll. */
|
|
|
-
|
|
|
- if (xPortInIsrContext())
|
|
|
- {
|
|
|
- #if ( configUSE_TICK_HOOK == 1 )
|
|
|
- vApplicationTickHook();
|
|
|
- #endif /* configUSE_TICK_HOOK */
|
|
|
- esp_vApplicationTickHook();
|
|
|
- if (xPortGetCoreID() != 0 )
|
|
|
- {
|
|
|
- return pdTRUE;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
/* Called by the portable layer each time a tick interrupt occurs.
|
|
|
* Increments the tick then checks to see if the new tick value will cause any
|
|
|
* tasks to be unblocked. */
|
|
|
traceTASK_INCREMENT_TICK( xTickCount );
|
|
|
|
|
|
- if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
|
|
|
+#ifdef ESP_PLATFORM
|
|
|
+ /* We need a critical section here as we are about to access kernel data
|
|
|
+ * structures:
|
|
|
+ * - Other cores could be accessing them simultaneously
|
|
|
+ * - Unlike other ports, we call xTaskIncrementTick() without disabling nested
|
|
|
+ * interrupts, which in turn is disabled by the critical section. */
|
|
|
+ taskENTER_CRITICAL_ISR();
|
|
|
+#endif // ESP_PLATFORM
|
|
|
+
|
|
|
+ if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
|
|
|
{
|
|
|
- taskENTER_CRITICAL_ISR();
|
|
|
/* Minor optimisation. The tick count cannot change in this
|
|
|
* block. */
|
|
|
const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
|
|
|
@@ -3154,7 +3154,7 @@ BaseType_t xTaskIncrementTick( void )
|
|
|
* state - so record the item value in
|
|
|
* xNextTaskUnblockTime. */
|
|
|
xNextTaskUnblockTime = xItemValue;
|
|
|
- break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */
|
|
|
+ break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
@@ -3187,7 +3187,14 @@ BaseType_t xTaskIncrementTick( void )
|
|
|
* only be performed if the unblocked task has a
|
|
|
* priority that is equal to or higher than the
|
|
|
* currently executing task. */
|
|
|
- if( pxTCB->uxPriority >= pxCurrentTCB[xPortGetCoreID()]->uxPriority )
|
|
|
+#if defined(ESP_PLATFORM) && ( configNUM_CORES > 1 )
|
|
|
+ /* Since this function is only run on core 0, we
|
|
|
+ * only need to switch contexts if the unblocked task
|
|
|
+ * can run on core 0. */
|
|
|
+ if( ( pxTCB->xCoreID == 0 || pxTCB->xCoreID == tskNO_AFFINITY ) && (pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority) )
|
|
|
+#else
|
|
|
+ if( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority )
|
|
|
+#endif
|
|
|
{
|
|
|
xSwitchRequired = pdTRUE;
|
|
|
}
|
|
|
@@ -3206,7 +3213,7 @@ BaseType_t xTaskIncrementTick( void )
|
|
|
* writer has not explicitly turned time slicing off. */
|
|
|
#if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
|
|
|
{
|
|
|
- if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[xPortGetCoreID()]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
|
|
|
+ if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[ 0 ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
|
|
|
{
|
|
|
xSwitchRequired = pdTRUE;
|
|
|
}
|
|
|
@@ -3216,28 +3223,152 @@ BaseType_t xTaskIncrementTick( void )
|
|
|
}
|
|
|
}
|
|
|
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
|
|
|
+
|
|
|
+#ifdef ESP_PLATFORM
|
|
|
+ #if ( configUSE_TICK_HOOK == 1 )
|
|
|
+ TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
|
|
|
+ #endif /* configUSE_TICK_HOOK */
|
|
|
+ /* Exit the critical section as we have finished accessing the kernel data structures. */
|
|
|
taskEXIT_CRITICAL_ISR();
|
|
|
+#endif // ESP_PLATFORM
|
|
|
+
|
|
|
+ #if ( configUSE_TICK_HOOK == 1 )
|
|
|
+ {
|
|
|
+ /* Guard against the tick hook being called when the pended tick
|
|
|
+ * count is being unwound (when the scheduler is being unlocked). */
|
|
|
+#ifdef ESP_PLATFORM
|
|
|
+ if( xPendedCounts == ( TickType_t ) 0 )
|
|
|
+#else
|
|
|
+ if( xPendedTicks == ( TickType_t ) 0 )
|
|
|
+#endif
|
|
|
+ {
|
|
|
+ vApplicationTickHook();
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ mtCOVERAGE_TEST_MARKER();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ #endif /* configUSE_TICK_HOOK */
|
|
|
+
|
|
|
+ #if ( configUSE_PREEMPTION == 1 )
|
|
|
+ {
|
|
|
+ if( xYieldPending[ 0 ] != pdFALSE )
|
|
|
+ {
|
|
|
+ xSwitchRequired = pdTRUE;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ mtCOVERAGE_TEST_MARKER();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ #endif /* configUSE_PREEMPTION */
|
|
|
}
|
|
|
else
|
|
|
{
|
|
|
++xPendedTicks;
|
|
|
+#ifdef ESP_PLATFORM
|
|
|
+ /* Exit the critical section as we have finished accessing the kernel data structures. */
|
|
|
+ taskEXIT_CRITICAL_ISR();
|
|
|
+#endif // ESP_PLATFORM
|
|
|
+
|
|
|
+ /* The tick hook gets called at regular intervals, even if the
|
|
|
+ * scheduler is locked. */
|
|
|
+ #if ( configUSE_TICK_HOOK == 1 )
|
|
|
+ {
|
|
|
+ vApplicationTickHook();
|
|
|
+ }
|
|
|
+ #endif
|
|
|
}
|
|
|
|
|
|
- #if ( configUSE_PREEMPTION == 1 )
|
|
|
+ return xSwitchRequired;
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef ESP_PLATFORM
|
|
|
+#if ( configNUM_CORES > 1 )
|
|
|
+ BaseType_t xTaskIncrementTickOtherCores( void )
|
|
|
{
|
|
|
- if( xYieldPending[xPortGetCoreID()] != pdFALSE )
|
|
|
- {
|
|
|
- xSwitchRequired = pdTRUE;
|
|
|
- }
|
|
|
- else
|
|
|
- {
|
|
|
- mtCOVERAGE_TEST_MARKER();
|
|
|
+ /* Minor optimization. This function can never switch cores mid
|
|
|
+ * execution */
|
|
|
+ BaseType_t xCoreID = xPortGetCoreID();
|
|
|
+ BaseType_t xSwitchRequired = pdFALSE;
|
|
|
+ /* This function should never be called by Core 0. */
|
|
|
+ configASSERT( xCoreID != 0 );
|
|
|
+
|
|
|
+ /* Called by the portable layer each time a tick interrupt occurs.
|
|
|
+ * Increments the tick then checks to see if the new tick value will cause any
|
|
|
+ * tasks to be unblocked. */
|
|
|
+ traceTASK_INCREMENT_TICK( xTickCount );
|
|
|
+
|
|
|
+ if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE )
|
|
|
+ {
|
|
|
+ /* We need a critical section here as we are about to access kernel data
|
|
|
+ * structures:
|
|
|
+ * - Other cores could be accessing them simultaneously
|
|
|
+ * - Unlike other ports, we call xTaskIncrementTick() without disabling
|
|
|
+ * nested interrupts, which in turn is disabled by the critical
|
|
|
+ * section. */
|
|
|
+ taskENTER_CRITICAL_ISR();
|
|
|
+
|
|
|
+ /* A task being unblocked cannot cause an immediate context switch
|
|
|
+ * if preemption is turned off. */
|
|
|
+ #if ( configUSE_PREEMPTION == 1 )
|
|
|
+ {
|
|
|
+ /* Check if core 0 calling xTaskIncrementTick() has
|
|
|
+ * unblocked a task that can be run. */
|
|
|
+ if( uxTopReadyPriority > pxCurrentTCB[xCoreID]->uxPriority )
|
|
|
+ {
|
|
|
+ xSwitchRequired = pdTRUE;
|
|
|
+ } else {
|
|
|
+ mtCOVERAGE_TEST_MARKER();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ #endif
|
|
|
+
|
|
|
+ /* Tasks of equal priority to the currently running task will share
|
|
|
+ * processing time (time slice) if preemption is on, and the application
|
|
|
+ * writer has not explicitly turned time slicing off. */
|
|
|
+ #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
|
|
|
+ {
|
|
|
+ if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[ xCoreID ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
|
|
|
+ {
|
|
|
+ xSwitchRequired = pdTRUE;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ mtCOVERAGE_TEST_MARKER();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
|
|
|
+
|
|
|
+ /* Exit the critical section as we have finished accessing the kernel data structures. */
|
|
|
+ taskEXIT_CRITICAL_ISR();
|
|
|
+
|
|
|
+ #if ( configUSE_PREEMPTION == 1 )
|
|
|
+ {
|
|
|
+ if( xYieldPending[ xCoreID ] != pdFALSE )
|
|
|
+ {
|
|
|
+ xSwitchRequired = pdTRUE;
|
|
|
+ }
|
|
|
+ else
|
|
|
+ {
|
|
|
+ mtCOVERAGE_TEST_MARKER();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ #endif /* configUSE_PREEMPTION */
|
|
|
}
|
|
|
+
|
|
|
+ #if ( configUSE_TICK_HOOK == 1 )
|
|
|
+ {
|
|
|
+ vApplicationTickHook();
|
|
|
+ }
|
|
|
+ #endif
|
|
|
+
|
|
|
+ return xSwitchRequired;
|
|
|
}
|
|
|
- #endif /* configUSE_PREEMPTION */
|
|
|
+#endif /* ( configNUM_CORES > 1 ) */
|
|
|
+#endif // ESP_PLATFORM
|
|
|
|
|
|
- return xSwitchRequired;
|
|
|
-}
|
|
|
/*-----------------------------------------------------------*/
|
|
|
|
|
|
#if ( configUSE_APPLICATION_TASK_TAG == 1 )
|