ソースを参照

Release coprocessor registers when a task is deleted.

Jeroen Domburg 8 年 前
コミット
da5d166356

+ 1 - 0
components/freertos/include/freertos/portable.h

@@ -210,6 +210,7 @@ BaseType_t xPortInIsrContext();
 #if( portUSING_MPU_WRAPPERS == 1 )
 #if( portUSING_MPU_WRAPPERS == 1 )
 	struct xMEMORY_REGION;
 	struct xMEMORY_REGION;
 	void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMORY_REGION * const xRegions, StackType_t *pxBottomOfStack, uint32_t usStackDepth ) PRIVILEGED_FUNCTION;
 	void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMORY_REGION * const xRegions, StackType_t *pxBottomOfStack, uint32_t usStackDepth ) PRIVILEGED_FUNCTION;
+	void vPortReleaseTaskMPUSettings( xMPU_SETTINGS *xMPUSettings );
 #endif
 #endif
 
 
 /* Multi-core: get current core ID */
 /* Multi-core: get current core ID */

+ 5 - 0
components/freertos/include/freertos/portmacro.h

@@ -172,6 +172,7 @@ typedef struct {
 #define portASSERT_IF_IN_ISR()        vPortAssertIfInISR()
 #define portASSERT_IF_IN_ISR()        vPortAssertIfInISR()
 void vPortAssertIfInISR();
 void vPortAssertIfInISR();
 
 
+
 #define portCRITICAL_NESTING_IN_TCB 1 
 #define portCRITICAL_NESTING_IN_TCB 1 
 
 
 /*
 /*
@@ -313,6 +314,10 @@ typedef struct {
 	#define PRIVILEGED_DATA
 	#define PRIVILEGED_DATA
 #endif
 #endif
 
 
+
+void _xt_coproc_release(void * coproc_sa_base);
+
+
 // porttrace
 // porttrace
 #if configUSE_TRACE_FACILITY_2
 #if configUSE_TRACE_FACILITY_2
 #include "porttrace.h"
 #include "porttrace.h"

+ 7 - 0
components/freertos/port.c

@@ -253,6 +253,13 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMOR
 	 */
 	 */
 	#endif
 	#endif
 }
 }
+
+void vPortReleaseTaskMPUSettings( xMPU_SETTINGS *xMPUSettings )
+{
+	/* If task has live floating point registers somewhere, release them */
+	_xt_coproc_release( xMPUSettings->coproc_area );
+}
+
 #endif
 #endif
 
 
 /*
 /*

+ 14 - 3
components/freertos/tasks.c

@@ -3550,9 +3550,16 @@ static void prvCheckTasksWaitingTermination( void )
 
 
 				{
 				{
 					pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) );
 					pxTCB = ( TCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) );
-					( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
-					--uxCurrentNumberOfTasks;
-					--uxTasksDeleted;
+					/* We only want to kill tasks that ran on this core because e.g. _xt_coproc_release needs to
+					   be called on the core the process is pinned on, if any */
+					if( pxTCB->xCoreID == tskNO_AFFINITY || pxTCB->xCoreID == xPortGetCoreID()) {
+						( void ) uxListRemove( &( pxTCB->xGenericListItem ) );
+						--uxCurrentNumberOfTasks;
+						--uxTasksDeleted;
+					} else {
+						/* Need to wait until the idle task on the other processor kills that task first. */
+						break;
+					}
 				}
 				}
 				
 				
 				#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
 				#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
@@ -3770,6 +3777,10 @@ BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
 		}
 		}
 		#endif /* configUSE_NEWLIB_REENTRANT */
 		#endif /* configUSE_NEWLIB_REENTRANT */
 
 
+		#if ( portUSING_MPU_WRAPPERS == 1 )
+			vPortReleaseTaskMPUSettings( &( pxTCB->xMPUSettings) );
+		#endif
+
 		#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
 		#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
 		{
 		{
 			/* The task can only have been allocated dynamically - free both
 			/* The task can only have been allocated dynamically - free both

+ 3 - 0
components/freertos/xtensa_context.S

@@ -378,6 +378,9 @@ May be called when a thread terminates or completes but does not delete
 the co-proc save area, to avoid the exception handler having to save the 
 the co-proc save area, to avoid the exception handler having to save the 
 thread's co-proc state before another thread can use it (optimization).
 thread's co-proc state before another thread can use it (optimization).
 
 
+Needs to be called on the processor the thread was running on. Unpinned threads
+won't have an entry here because they get pinned as soon they use a coprocessor.
+
 Entry Conditions:
 Entry Conditions:
     A2  = Pointer to base of co-processor state save area.
     A2  = Pointer to base of co-processor state save area.
 
 

+ 9 - 3
components/freertos/xtensa_vectors.S

@@ -931,7 +931,6 @@ _xt_coproc_exc:
     addx4   a0, a5, a0                      /* a0 = &_xt_coproc_mask[n] */
     addx4   a0, a5, a0                      /* a0 = &_xt_coproc_mask[n] */
     l32i    a0, a0, 0                       /* a0 = (n << 16) | (1 << n) */
     l32i    a0, a0, 0                       /* a0 = (n << 16) | (1 << n) */
 
 
-    /* TODO: Remove this as soon as coprocessor state moving works across cores - JD */
     /* FPU operations are incompatible with non-pinned tasks. If we have a FPU operation
     /* FPU operations are incompatible with non-pinned tasks. If we have a FPU operation
        here, to keep the entire thing from crashing, it's better to pin the task to whatever
        here, to keep the entire thing from crashing, it's better to pin the task to whatever
        core we're running on now. */
        core we're running on now. */
@@ -944,14 +943,21 @@ _xt_coproc_exc:
 
 
     /* Grab correct xt_coproc_owner_sa for this core */
     /* Grab correct xt_coproc_owner_sa for this core */
     movi    a2, XCHAL_CP_MAX << 2
     movi    a2, XCHAL_CP_MAX << 2
-    mull    a2, a2, a3
+    mull    a2, a2, a3                      /* multiply by current processor id */
     movi    a3, _xt_coproc_owner_sa         /* a3 = base of owner array */
     movi    a3, _xt_coproc_owner_sa         /* a3 = base of owner array */
-    add     a3, a3, a2
+    add     a3, a3, a2                      /* a3 = owner area needed for this processor */
 
 
     extui   a2, a0, 0, 16                   /* coprocessor bitmask portion */
     extui   a2, a0, 0, 16                   /* coprocessor bitmask portion */
     or      a4, a4, a2                      /* a4 = CPENABLE | (1 << n) */
     or      a4, a4, a2                      /* a4 = CPENABLE | (1 << n) */
     wsr     a4, CPENABLE
     wsr     a4, CPENABLE
 
 
+/* 
+Keep loading _xt_coproc_owner_sa[n] atomic (=load once, then use that value
+everywhere): _xt_coproc_release assumes it works like this in order not to need
+locking.
+*/
+
+
     /* Get old coprocessor owner thread (save area ptr) and assign new one.  */
     /* Get old coprocessor owner thread (save area ptr) and assign new one.  */
     addx4   a3,  a5, a3                      /* a3 = &_xt_coproc_owner_sa[n] */
     addx4   a3,  a5, a3                      /* a3 = &_xt_coproc_owner_sa[n] */
     l32i    a2,  a3, 0                       /* a2 = old owner's save area */
     l32i    a2,  a3, 0                       /* a2 = old owner's save area */