port.c 80 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786
  1. /*
  2. * FreeRTOS Kernel <DEVELOPMENT BRANCH>
  3. * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  4. * Copyright 2025-2026 Arm Limited and/or its affiliates
  5. * <open-source-office@arm.com>
  6. *
  7. * SPDX-License-Identifier: MIT
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy of
  10. * this software and associated documentation files (the "Software"), to deal in
  11. * the Software without restriction, including without limitation the rights to
  12. * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
  13. * the Software, and to permit persons to whom the Software is furnished to do so,
  14. * subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in all
  17. * copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
  21. * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
  22. * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  23. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  24. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. * https://www.FreeRTOS.org
  27. * https://github.com/FreeRTOS
  28. *
  29. */
  30. /* Standard includes. */
  31. #include <stdlib.h>
  32. #include <string.h>
  33. /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
  34. * all the API functions to use the MPU wrappers. That should only be done when
  35. * task.h is included from an application file. */
  36. #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  37. /* Scheduler includes. */
  38. #include "FreeRTOS.h"
  39. #include "task.h"
  40. /* MPU includes. */
  41. #include "mpu_wrappers.h"
  42. #include "mpu_syscall_numbers.h"
  43. #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  44. #ifndef configINTERRUPT_CONTROLLER_BASE_ADDRESS
  45. #error configINTERRUPT_CONTROLLER_BASE_ADDRESS must be defined. Refer to Cortex-A equivalent: /* https://www.freertos.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors */
  46. #endif
  47. #ifndef configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET
  48. #error configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET must be defined. Refer to Cortex-A equivalent: /* https://www.freertos.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors */
  49. #endif
  50. #ifndef configUNIQUE_INTERRUPT_PRIORITIES
  51. #error configUNIQUE_INTERRUPT_PRIORITIES must be defined. Refer to Cortex-A equivalent: /* https://www.freertos.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors */
  52. #endif
  53. #ifndef configSETUP_TICK_INTERRUPT
  54. #error configSETUP_TICK_INTERRUPT() must be defined. Refer to Cortex-A equivalent: /* https://www.freertos.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors */
  55. #endif /* configSETUP_TICK_INTERRUPT */
  56. #ifndef configMAX_API_CALL_INTERRUPT_PRIORITY
  57. #error configMAX_API_CALL_INTERRUPT_PRIORITY must be defined. Refer to Cortex-A equivalent: /* https://www.freertos.org/Using-FreeRTOS-on-Cortex-A-Embedded-Processors */
  58. #endif
  59. #if configMAX_API_CALL_INTERRUPT_PRIORITY == 0
  60. #error "configMAX_API_CALL_INTERRUPT_PRIORITY must not be set to 0"
  61. #endif
  62. #if configMAX_API_CALL_INTERRUPT_PRIORITY > configUNIQUE_INTERRUPT_PRIORITIES
  63. #error "configMAX_API_CALL_INTERRUPT_PRIORITY must be less than or equal to configUNIQUE_INTERRUPT_PRIORITIES as the lower the numeric priority value the higher the logical interrupt priority"
  64. #endif
  65. #if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1
  66. /* Check the configuration. */
  67. #if ( configMAX_PRIORITIES > 32 )
  68. #error "configUSE_PORT_OPTIMISED_TASK_SELECTION can only be set to 1 when configMAX_PRIORITIES is less than or equal to 32. It is very rare that a system requires more than 10 to 15 different priorities as tasks that share a priority will time slice."
  69. #endif
  70. #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
  71. /* In case security extensions are implemented. */
  72. #if configMAX_API_CALL_INTERRUPT_PRIORITY <= ( configUNIQUE_INTERRUPT_PRIORITIES / 2 )
  73. #error "configMAX_API_CALL_INTERRUPT_PRIORITY must be greater than ( configUNIQUE_INTERRUPT_PRIORITIES / 2 )"
  74. #endif
  75. #ifndef configCLEAR_TICK_INTERRUPT
  76. #error configCLEAR_TICK_INTERRUPT must be defined in FreeRTOSConfig.h to clear which ever interrupt was used to generate the tick interrupt.
  77. #endif
  78. #if configNUMBER_OF_CORES < 1
  79. #error configNUMBER_OF_CORES must be set to 1 or greater. If the application is not using multiple cores then set configNUMBER_OF_CORES to 1.
  80. #endif /* configNUMBER_OF_CORES < 1 */
  81. #ifndef configENABLE_MPU
  82. #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU.
  83. #endif /* #if ( configENABLE_MPU == 1 ) */
  84. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 != 0) )
  85. #error Arm Cortex-R82 port supports only MPU Wrapper V2.
  86. #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 != 0) */
  87. /* A critical section is exited when the critical section nesting count reaches
  88. * this value. */
  89. #define portNO_CRITICAL_NESTING ( ( size_t ) 0 )
  90. /* Macro to unmask all interrupt priorities. */
  91. #define portCLEAR_INTERRUPT_PRIORITIES_MASK() __asm volatile ( "SVC %0" : : "i" ( portSVC_UNMASK_ALL_INTERRUPTS ) : "memory" )
  92. /* Macro to unmask all interrupt priorities from EL1. */
  93. #define portCLEAR_INTERRUPT_PRIORITIES_MASK_FROM_EL1() \
  94. { \
  95. __asm volatile ( \
  96. " MSR DAIFSET, # 2 \n" \
  97. " DSB SY \n" \
  98. " ISB SY \n" \
  99. " MOV X0, %0 \n" \
  100. " MSR ICC_PMR_EL1, X0 \n" \
  101. " DSB SY \n" \
  102. " ISB SY \n" \
  103. " MSR DAIFCLR, # 2 \n" \
  104. " DSB SY \n" \
  105. " ISB SY \n" \
  106. : \
  107. : "i" ( portUNMASK_VALUE ) \
  108. ); \
  109. }
  110. /* Tasks are not created with a floating point context, but can be given a
  111. * floating point context after they have been created. A variable is stored as
  112. * part of the tasks context that holds portNO_FLOATING_POINT_CONTEXT if the task
  113. * does not have an FPU context, or any other value if the task does have an FPU
  114. * context. */
  115. #define portNO_FLOATING_POINT_CONTEXT ( ( StackType_t ) 0 )
  116. /* Constants required to setup the initial task context. */
  117. #define portSP_ELx ( ( StackType_t ) 0x01 )
  118. #define portSP_EL0 ( ( StackType_t ) 0x00 )
  119. #define portEL1 ( ( StackType_t ) 0x04 )
  120. #define portEL0 ( ( StackType_t ) 0x00 )
  121. #define portINITIAL_PSTATE_EL0 ( portEL0 | portSP_EL0 )
  122. #define portINITIAL_PSTATE_EL1 ( portEL1 | portSP_EL0 )
  123. /* Used by portASSERT_IF_INTERRUPT_PRIORITY_INVALID() when ensuring the binary
  124. * point is zero. */
  125. #define portBINARY_POINT_BITS ( ( uint8_t ) 0x03 )
  126. /* Masks all bits in the APSR other than the mode bits. */
  127. #define portAPSR_MODE_BITS_MASK ( 0x0C )
  128. /* The I bit in the DAIF bits. */
  129. #define portDAIF_I ( 0x80 )
  130. #define portMAX_8_BIT_VALUE ( ( uint8_t ) 0xff )
  131. #define portBIT_0_SET ( ( uint8_t ) 0x01 )
  132. /* The space on the stack required to hold the FPU registers.
  133. * There are 32 128-bit plus 2 64-bit status registers. */
  134. #define portFPU_REGISTER_WORDS ( ( 32 * 2 ) + 2 )
  135. /*-----------------------------------------------------------*/
  136. #if ( configENABLE_MPU == 1 )
  137. /**
  138. * @brief Setup the Memory Protection Unit (MPU).
  139. */
  140. PRIVILEGED_FUNCTION void vSetupMPU( void );
  141. /**
  142. * @brief Enable the Memory Protection Unit (MPU).
  143. */
  144. PRIVILEGED_FUNCTION void vEnableMPU( void );
  145. /**
  146. * @brief Called from an ISR and returns the core ID the code is executing on.
  147. *
  148. * @return uint8_t The core ID.
  149. */
  150. PRIVILEGED_FUNCTION uint8_t ucPortGetCoreIDFromIsr( void );
  151. /**
  152. * @brief Checks whether or not the calling task is privileged.
  153. *
  154. * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
  155. */
  156. PRIVILEGED_FUNCTION BaseType_t xPortIsTaskPrivileged( void );
  157. /**
  158. * @brief Extract MPU region's access permissions from the Protection Region Base Address Register
  159. * (PRBAR_EL1) value.
  160. *
  161. * @param ullPrbarEl1Value PRBAR_EL1 value for the MPU region.
  162. *
  163. * @return uint32_t Access permissions.
  164. */
  165. PRIVILEGED_FUNCTION static uint32_t prvGetRegionAccessPermissions( uint64_t ullPrbarEl1Value );
  166. /**
  167. * @brief Does the necessary work to enter a system call.
  168. *
  169. * @param pullPrivilegedOnlyTaskStack The task's privileged SP when the SVC was raised.
  170. * @param ucSystemCallNumber The system call number of the system call.
  171. */
  172. PRIVILEGED_FUNCTION void vSystemCallEnter( uint64_t * pullPrivilegedOnlyTaskStack,
  173. uint8_t ucSystemCallNumber );
  174. /**
  175. * @brief Raise SVC for exiting from a system call.
  176. */
  177. PRIVILEGED_FUNCTION __attribute__( ( naked ) ) void vRequestSystemCallExit( void );
  178. /**
  179. * @brief Sets up the task stack so that upon returning from
  180. * SVC, the task stack is used again.
  181. *
  182. * @param ullSystemCallReturnValue The actual system call return value.
  183. */
  184. PRIVILEGED_FUNCTION void vSystemCallExit( uint64_t ullSystemCallReturnValue );
  185. #endif /* #if ( configENABLE_MPU == 1 ) */
  186. /*
  187. * Starts the first task executing. This function is necessarily written in
  188. * assembly code so is implemented in portASM.s.
  189. */
  190. extern void vPortRestoreTaskContext( void );
  191. extern void vGIC_EnableIRQ( uint32_t ulInterruptID );
  192. extern void vGIC_SetPriority( uint32_t ulInterruptID, uint32_t ulPriority );
  193. extern void vGIC_PowerUpRedistributor( void );
  194. extern void vGIC_EnableCPUInterface( void );
  195. /*-----------------------------------------------------------*/
  196. #if ( configNUMBER_OF_CORES == 1 )
  197. PRIVILEGED_DATA volatile uint64_t ullCriticalNesting = 0ULL;
  198. /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
  199. * then floating point context must be saved and restored for the task. */
  200. PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext = pdFALSE;
  201. /* Set to 1 to pend a context switch from an ISR. */
  202. PRIVILEGED_DATA uint64_t ullPortYieldRequired = pdFALSE;
  203. /* Counts the interrupt nesting depth. A context switch is only performed if
  204. * if the nesting depth is 0. */
  205. PRIVILEGED_DATA uint64_t ullPortInterruptNesting = 0;
  206. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  207. PRIVILEGED_DATA volatile uint64_t ullCriticalNestings[ configNUMBER_OF_CORES ] = { 0 };
  208. /* Flags to check if the secondary cores are ready. */
  209. PRIVILEGED_DATA volatile uint8_t ucSecondaryCoresReadyFlags[ configNUMBER_OF_CORES - 1 ] = { 0 };
  210. /* Flag to signal that the primary core has done all the shared initialisations. */
  211. PRIVILEGED_DATA volatile uint8_t ucPrimaryCoreInitDoneFlag = 0;
  212. /* Saved as part of the task context. If ullPortTaskHasFPUContext is non-zero
  213. * then floating point context must be saved and restored for the task. */
  214. PRIVILEGED_DATA uint64_t ullPortTaskHasFPUContext[ configNUMBER_OF_CORES ] = { pdFALSE };
  215. /* Set to 1 to pend a context switch from an ISR. */
  216. PRIVILEGED_DATA uint64_t ullPortYieldRequired[ configNUMBER_OF_CORES ] = { pdFALSE };
  217. /* Counts the interrupt nesting depth. A context switch is only performed if
  218. * if the nesting depth is 0. */
  219. PRIVILEGED_DATA uint64_t ullPortInterruptNestings[ configNUMBER_OF_CORES ] = { 0 };
  220. #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
  221. #if ( configENABLE_MPU == 1 )
  222. /* Set to pdTRUE when the scheduler is started. */
  223. PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
  224. #endif /* ( configENABLE_MPU == 1 ) */
  225. /*-----------------------------------------------------------*/
  226. #if ( configENABLE_MPU == 1 )
  227. StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
  228. TaskFunction_t pxCode,
  229. void * pvParameters,
  230. BaseType_t xRunPrivileged,
  231. xMPU_SETTINGS * xMPUSettings )
  232. {
  233. uint32_t ulIndex = 0;
  234. /* Layout must match portRESTORE_CONTEXT pop order (descending stack):
  235. * 1) FPU flag, 2) Critical nesting, 3) Optional FPU save area,
  236. * 4) ELR (PC), 5) SPSR (PSTATE), 6) GPRs in restore order pairs.
  237. */
  238. /* 1) FPU flag and 2) Critical nesting. */
  239. #if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT )
  240. xMPUSettings->ullContext[ ulIndex++ ] = portNO_FLOATING_POINT_CONTEXT; /* FPU flag */
  241. xMPUSettings->ullContext[ ulIndex++ ] = portNO_CRITICAL_NESTING; /* Critical nesting */
  242. #elif ( configUSE_TASK_FPU_SUPPORT == portTASK_HAVE_FPU_CONTEXT_BY_DEFAULT )
  243. xMPUSettings->ullContext[ ulIndex++ ] = pdTRUE; /* FPU flag */
  244. xMPUSettings->ullContext[ ulIndex++ ] = portNO_CRITICAL_NESTING; /* Critical nesting */
  245. #if ( configNUMBER_OF_CORES == 1 )
  246. ullPortTaskHasFPUContext = pdTRUE;
  247. #else
  248. ullPortTaskHasFPUContext[ portGET_CORE_ID() ] = pdTRUE;
  249. #endif
  250. #else
  251. #error "Invalid configUSE_TASK_FPU_SUPPORT setting - must be 1 or 2."
  252. #endif
  253. /* 3) Optional FPU save area immediately after the flag+critical pair. */
  254. #if ( configUSE_TASK_FPU_SUPPORT == portTASK_HAVE_FPU_CONTEXT_BY_DEFAULT )
  255. memset( &xMPUSettings->ullContext[ ulIndex ], 0x00, portFPU_REGISTER_WORDS * sizeof( StackType_t ) );
  256. ulIndex += portFPU_REGISTER_WORDS;
  257. #endif
  258. /* 4) ELR (PC) and 5) SPSR (PSTATE). */
  259. xMPUSettings->ullContext[ ulIndex++ ] = ( StackType_t ) pxCode; /* ELR */
  260. if( xRunPrivileged == pdTRUE )
  261. {
  262. xMPUSettings->ullContext[ ulIndex++ ] = portINITIAL_PSTATE_EL1; /* SPSR */
  263. }
  264. else
  265. {
  266. xMPUSettings->ullContext[ ulIndex++ ] = portINITIAL_PSTATE_EL0; /* SPSR */
  267. }
  268. /* 6) General-purpose registers in the order expected by restoreallgpregisters. */
  269. xMPUSettings->ullContext[ ulIndex++ ] = ( StackType_t ) 0x00; /* X30 (LR) */
  270. xMPUSettings->ullContext[ ulIndex++ ] = ( StackType_t ) 0x00; /* XZR (dummy) */
  271. xMPUSettings->ullContext[ ulIndex++ ] = 0x2828282828282828ULL; /* X28 */
  272. xMPUSettings->ullContext[ ulIndex++ ] = 0x2929292929292929ULL; /* X29 */
  273. xMPUSettings->ullContext[ ulIndex++ ] = 0x2626262626262626ULL; /* X26 */
  274. xMPUSettings->ullContext[ ulIndex++ ] = 0x2727272727272727ULL; /* X27 */
  275. xMPUSettings->ullContext[ ulIndex++ ] = 0x2424242424242424ULL; /* X24 */
  276. xMPUSettings->ullContext[ ulIndex++ ] = 0x2525252525252525ULL; /* X25 */
  277. xMPUSettings->ullContext[ ulIndex++ ] = 0x2222222222222222ULL; /* X22 */
  278. xMPUSettings->ullContext[ ulIndex++ ] = 0x2323232323232323ULL; /* X23 */
  279. xMPUSettings->ullContext[ ulIndex++ ] = 0x2020202020202020ULL; /* X20 */
  280. xMPUSettings->ullContext[ ulIndex++ ] = 0x2121212121212121ULL; /* X21 */
  281. xMPUSettings->ullContext[ ulIndex++ ] = 0x1818181818181818ULL; /* X18 */
  282. xMPUSettings->ullContext[ ulIndex++ ] = 0x1919191919191919ULL; /* X19 */
  283. xMPUSettings->ullContext[ ulIndex++ ] = 0x1616161616161616ULL; /* X16 */
  284. xMPUSettings->ullContext[ ulIndex++ ] = 0x1717171717171717ULL; /* X17 */
  285. xMPUSettings->ullContext[ ulIndex++ ] = 0x1414141414141414ULL; /* X14 */
  286. xMPUSettings->ullContext[ ulIndex++ ] = 0x1515151515151515ULL; /* X15 */
  287. xMPUSettings->ullContext[ ulIndex++ ] = 0x1212121212121212ULL; /* X12 */
  288. xMPUSettings->ullContext[ ulIndex++ ] = 0x1313131313131313ULL; /* X13 */
  289. xMPUSettings->ullContext[ ulIndex++ ] = 0x1010101010101010ULL; /* X10 */
  290. xMPUSettings->ullContext[ ulIndex++ ] = 0x1111111111111111ULL; /* X11 */
  291. xMPUSettings->ullContext[ ulIndex++ ] = 0x0808080808080808ULL; /* X8 */
  292. xMPUSettings->ullContext[ ulIndex++ ] = 0x0909090909090909ULL; /* X9 */
  293. xMPUSettings->ullContext[ ulIndex++ ] = 0x0606060606060606ULL; /* X6 */
  294. xMPUSettings->ullContext[ ulIndex++ ] = 0x0707070707070707ULL; /* X7 */
  295. xMPUSettings->ullContext[ ulIndex++ ] = 0x0404040404040404ULL; /* X4 */
  296. xMPUSettings->ullContext[ ulIndex++ ] = 0x0505050505050505ULL; /* X5 */
  297. xMPUSettings->ullContext[ ulIndex++ ] = 0x0202020202020202ULL; /* X2 */
  298. xMPUSettings->ullContext[ ulIndex++ ] = 0x0303030303030303ULL; /* X3 */
  299. xMPUSettings->ullContext[ ulIndex++ ] = ( StackType_t ) pvParameters; /* X0 */
  300. xMPUSettings->ullContext[ ulIndex++ ] = 0x0101010101010101ULL; /* X1 */
  301. if( xRunPrivileged == pdTRUE )
  302. {
  303. xMPUSettings->ullTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
  304. }
  305. else
  306. {
  307. xMPUSettings->ullTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
  308. }
  309. xMPUSettings->ullTaskUnprivilegedSP = ( ( uint64_t ) pxTopOfStack & portMPU_PRBAR_EL1_ADDRESS_MASK );
  310. return &( xMPUSettings->ullContext[ 0 ] );
  311. }
  312. #else /* #if ( configENABLE_MPU == 1 ) */
  313. /*
  314. * See header file for description.
  315. */
  316. StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
  317. TaskFunction_t pxCode,
  318. void * pvParameters )
  319. {
  320. /* Setup the initial stack of the task. The stack is set exactly as
  321. * expected by the portRESTORE_CONTEXT() macro. */
  322. /* First all the general purpose registers. */
  323. pxTopOfStack--;
  324. *pxTopOfStack = 0x0101010101010101ULL; /* R1 */
  325. pxTopOfStack--;
  326. *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
  327. pxTopOfStack--;
  328. *pxTopOfStack = 0x0303030303030303ULL; /* R3 */
  329. pxTopOfStack--;
  330. *pxTopOfStack = 0x0202020202020202ULL; /* R2 */
  331. pxTopOfStack--;
  332. *pxTopOfStack = 0x0505050505050505ULL; /* R5 */
  333. pxTopOfStack--;
  334. *pxTopOfStack = 0x0404040404040404ULL; /* R4 */
  335. pxTopOfStack--;
  336. *pxTopOfStack = 0x0707070707070707ULL; /* R7 */
  337. pxTopOfStack--;
  338. *pxTopOfStack = 0x0606060606060606ULL; /* R6 */
  339. pxTopOfStack--;
  340. *pxTopOfStack = 0x0909090909090909ULL; /* R9 */
  341. pxTopOfStack--;
  342. *pxTopOfStack = 0x0808080808080808ULL; /* R8 */
  343. pxTopOfStack--;
  344. *pxTopOfStack = 0x1111111111111111ULL; /* R11 */
  345. pxTopOfStack--;
  346. *pxTopOfStack = 0x1010101010101010ULL; /* R10 */
  347. pxTopOfStack--;
  348. *pxTopOfStack = 0x1313131313131313ULL; /* R13 */
  349. pxTopOfStack--;
  350. *pxTopOfStack = 0x1212121212121212ULL; /* R12 */
  351. pxTopOfStack--;
  352. *pxTopOfStack = 0x1515151515151515ULL; /* R15 */
  353. pxTopOfStack--;
  354. *pxTopOfStack = 0x1414141414141414ULL; /* R14 */
  355. pxTopOfStack--;
  356. *pxTopOfStack = 0x1717171717171717ULL; /* R17 */
  357. pxTopOfStack--;
  358. *pxTopOfStack = 0x1616161616161616ULL; /* R16 */
  359. pxTopOfStack--;
  360. *pxTopOfStack = 0x1919191919191919ULL; /* R19 */
  361. pxTopOfStack--;
  362. *pxTopOfStack = 0x1818181818181818ULL; /* R18 */
  363. pxTopOfStack--;
  364. *pxTopOfStack = 0x2121212121212121ULL; /* R21 */
  365. pxTopOfStack--;
  366. *pxTopOfStack = 0x2020202020202020ULL; /* R20 */
  367. pxTopOfStack--;
  368. *pxTopOfStack = 0x2323232323232323ULL; /* R23 */
  369. pxTopOfStack--;
  370. *pxTopOfStack = 0x2222222222222222ULL; /* R22 */
  371. pxTopOfStack--;
  372. *pxTopOfStack = 0x2525252525252525ULL; /* R25 */
  373. pxTopOfStack--;
  374. *pxTopOfStack = 0x2424242424242424ULL; /* R24 */
  375. pxTopOfStack--;
  376. *pxTopOfStack = 0x2727272727272727ULL; /* R27 */
  377. pxTopOfStack--;
  378. *pxTopOfStack = 0x2626262626262626ULL; /* R26 */
  379. pxTopOfStack--;
  380. *pxTopOfStack = 0x2929292929292929ULL; /* R29 */
  381. pxTopOfStack--;
  382. *pxTopOfStack = 0x2828282828282828ULL; /* R28 */
  383. pxTopOfStack--;
  384. *pxTopOfStack = ( StackType_t ) 0x00; /* XZR - has no effect, used so there are an even number of registers. */
  385. pxTopOfStack--;
  386. *pxTopOfStack = ( StackType_t ) 0x00; /* R30 - procedure call link register. */
  387. pxTopOfStack--;
  388. *pxTopOfStack = portINITIAL_PSTATE_EL0;
  389. pxTopOfStack--;
  390. *pxTopOfStack = ( StackType_t ) pxCode; /* Exception return address. */
  391. #if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT )
  392. {
  393. /* The task will start with a critical nesting count of 0 as interrupts are
  394. * enabled. */
  395. pxTopOfStack--;
  396. *pxTopOfStack = portNO_CRITICAL_NESTING;
  397. /* The task will start without a floating point context. A task that
  398. * uses the floating point hardware must call vPortTaskUsesFPU() before
  399. * executing any floating point instructions. */
  400. pxTopOfStack--;
  401. *pxTopOfStack = portNO_FLOATING_POINT_CONTEXT;
  402. }
  403. #elif ( configUSE_TASK_FPU_SUPPORT == portTASK_HAVE_FPU_CONTEXT_BY_DEFAULT )
  404. {
  405. /* The task will start with a floating point context. Leave enough
  406. * space for the registers - and ensure they are initialised to 0. */
  407. pxTopOfStack -= portFPU_REGISTER_WORDS;
  408. memset( pxTopOfStack, 0x00, portFPU_REGISTER_WORDS * sizeof( StackType_t ) );
  409. /* The task will start with a critical nesting count of 0 as interrupts are
  410. * enabled. */
  411. pxTopOfStack--;
  412. *pxTopOfStack = portNO_CRITICAL_NESTING;
  413. pxTopOfStack--;
  414. *pxTopOfStack = pdTRUE;
  415. #if ( configNUMBER_OF_CORES == 1 )
  416. ullPortTaskHasFPUContext = pdTRUE;
  417. #else
  418. ullPortTaskHasFPUContext[ portGET_CORE_ID() ] = pdTRUE;
  419. #endif
  420. }
  421. #else /* if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT ) */
  422. {
  423. #error "Invalid configUSE_TASK_FPU_SUPPORT setting - configUSE_TASK_FPU_SUPPORT must be set to 1, 2, or left undefined."
  424. }
  425. #endif /* if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT ) */
  426. return pxTopOfStack;
  427. }
  428. #endif /* #if ( configENABLE_MPU == 1 ) */
  429. /*-----------------------------------------------------------*/
  430. #if ( configENABLE_MPU == 1 )
  431. /**
  432. * @brief Store a task's MPU settings in its TCB.
  433. *
  434. * @ingroup Task Context
  435. * @ingroup MPU Control
  436. *
  437. * @param xMPUSettings The MPU settings in TCB.
  438. * @param xRegions The MPU regions requested by the task.
  439. * @param pxBottomOfStack The base address of the task's Stack.
  440. * @param xStackDepth The length of the task's stack.
  441. */
  442. void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
  443. const struct xMEMORY_REGION * const xRegions,
  444. StackType_t * pxBottomOfStack,
  445. StackType_t xStackDepth ) /* PRIVILEGED_FUNCTION */
  446. {
  447. uint64_t ullRegionStartAddress, ullRegionEndAddress;
  448. uint8_t ucIndex = 0, ucRegionNumber;
  449. #if defined( __ARMCC_VERSION )
  450. /* Declaration when these variable are defined in code instead of being
  451. * exported from linker scripts. */
  452. extern uint64_t * __privileged_sram_start__;
  453. extern uint64_t * __privileged_sram_end__;
  454. #else
  455. /* Declaration when these variable are exported from linker scripts. */
  456. extern uint64_t __privileged_sram_start__[];
  457. extern uint64_t __privileged_sram_end__[];
  458. #endif /* defined( __ARMCC_VERSION ) */
  459. /* Setup MAIR_EL1. */
  460. xMPUSettings->ullMairEl1 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_EL1_ATTR0_POS ) & portMPU_MAIR_EL1_ATTR0_MASK );
  461. xMPUSettings->ullMairEl1 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_EL1_ATTR1_POS ) & portMPU_MAIR_EL1_ATTR1_MASK );
  462. /* This function is called automatically when the task is created - in
  463. * which case the stack region parameters will be valid. At all other
  464. * times the stack parameters will not be valid and it is assumed that
  465. * the stack region has already been configured. */
  466. if( xStackDepth > 0 )
  467. {
  468. ullRegionStartAddress = ( uint64_t ) pxBottomOfStack;
  469. ullRegionEndAddress = ( uint64_t ) pxBottomOfStack + ( xStackDepth * ( configSTACK_DEPTH_TYPE ) sizeof( StackType_t ) ) - 1;
  470. /* If the stack is within the privileged SRAM, do not protect it
  471. * using a separate MPU region. This is needed because this
  472. * region is already protected using an MPU region and ARMv8-R does
  473. * not allow overlapping MPU regions.
  474. */
  475. if( ( ullRegionStartAddress >= ( uint64_t ) __privileged_sram_start__ ) &&
  476. ( ullRegionEndAddress <= ( uint64_t ) __privileged_sram_end__ ) )
  477. {
  478. xMPUSettings->xRegionsSettings[ portSTACK_REGION_INDEX ].ullPrbarEl1 = 0;
  479. xMPUSettings->xRegionsSettings[ portSTACK_REGION_INDEX ].ullPrlarEl1 = 0;
  480. }
  481. else
  482. {
  483. /* Define the region that allows access to the stack. */
  484. ullRegionStartAddress &= portMPU_PRBAR_EL1_ADDRESS_MASK;
  485. ullRegionEndAddress &= portMPU_PRLAR_EL1_ADDRESS_MASK;
  486. xMPUSettings->xRegionsSettings[ portSTACK_REGION_INDEX ].ullPrbarEl1 = ( ullRegionStartAddress ) |
  487. ( portMPU_REGION_INNER_SHAREABLE ) |
  488. ( portMPU_REGION_READ_WRITE ) |
  489. ( portMPU_REGION_EXECUTE_NEVER );
  490. xMPUSettings->xRegionsSettings[ portSTACK_REGION_INDEX ].ullPrlarEl1 = ( ullRegionEndAddress ) |
  491. ( portMPU_PRLAR_EL1_ATTR_INDEX0 ) |
  492. ( portMPU_PRLAR_EL1_REGION_ENABLE );
  493. }
  494. }
  495. /* User supplied configurable regions. */
  496. for( ucRegionNumber = 1; ucRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ucRegionNumber++ )
  497. {
  498. /* If xRegions is NULL i.e. the task has not specified any MPU
  499. * region, the else part ensures that all the configurable MPU
  500. * regions are invalidated.
  501. * The minimum region size is 64 Bytes.
  502. */
  503. if( xRegions != NULL )
  504. {
  505. /* Configure the region only if the base address is non-NULL.
  506. * The user may choose to use only a subset of the available MPU regions.
  507. * This check prevents undefined regions (i.e. regions with a NULL base
  508. * address) from being configured and from triggering the size-check
  509. * assertion below.
  510. */
  511. if ( xRegions[ ucIndex ].pvBaseAddress != NULL )
  512. {
  513. configASSERT( xRegions[ ucIndex ].ulLengthInBytes >= 64UL );
  514. uint64_t ullPrbarEl1RegValue, ullPrlarEl1RegValue;
  515. /* Translate the generic region definition contained in xRegions
  516. * into the ARMv8-R specific MPU settings that are then stored in
  517. * xMPUSettings.
  518. */
  519. ullRegionStartAddress = ( ( uint64_t ) xRegions[ ucIndex ].pvBaseAddress ) & portMPU_PRBAR_EL1_ADDRESS_MASK;
  520. ullRegionEndAddress = ( uint64_t ) xRegions[ ucIndex ].pvBaseAddress + xRegions[ ucIndex ].ulLengthInBytes - 1;
  521. ullRegionEndAddress &= portMPU_PRLAR_EL1_ADDRESS_MASK;
  522. /* Checks for overlaps with another user defined regions and stack region, which are already configured. */
  523. for( uint8_t ucUserRegionNumber = 0; ucUserRegionNumber < portNUM_CONFIGURABLE_REGIONS; ucUserRegionNumber++ )
  524. {
  525. /* Check for overlap. */
  526. if( ( portIS_ADDRESS_WITHIN_RANGE( ullRegionStartAddress,
  527. ( xMPUSettings->xRegionsSettings[ ucUserRegionNumber ].ullPrbarEl1 & portMPU_PRBAR_EL1_ADDRESS_MASK ),
  528. ( xMPUSettings->xRegionsSettings[ ucUserRegionNumber ].ullPrlarEl1 & portMPU_PRLAR_EL1_ADDRESS_MASK ) ) ||
  529. ( portIS_ADDRESS_WITHIN_RANGE( ullRegionEndAddress,
  530. ( xMPUSettings->xRegionsSettings[ ucUserRegionNumber ].ullPrbarEl1 & portMPU_PRBAR_EL1_ADDRESS_MASK ),
  531. ( xMPUSettings->xRegionsSettings[ ucUserRegionNumber ].ullPrlarEl1 & portMPU_PRLAR_EL1_ADDRESS_MASK ) ) ) ) )
  532. {
  533. /* Overlap detected - assert. */
  534. configASSERT( NULL );
  535. }
  536. }
  537. /* Checks for overlaps with kernel programmed regions which are already programmed as part of vSetupMPU. */
  538. for (uint8_t ucProgrammedRegionIndex = 0; ucProgrammedRegionIndex < 4; ucProgrammedRegionIndex++)
  539. {
  540. __asm volatile ( "msr PRSELR_EL1, %0" : : "r" ( ( uint64_t ) ucProgrammedRegionIndex ) );
  541. __asm volatile ( "mrs %0, PRBAR_EL1" : "=r" ( ullPrbarEl1RegValue ) );
  542. ullPrbarEl1RegValue &= portMPU_PRBAR_EL1_ADDRESS_MASK;
  543. __asm volatile ( "mrs %0, PRLAR_EL1" : "=r" ( ullPrlarEl1RegValue ) );
  544. ullPrlarEl1RegValue &= portMPU_PRLAR_EL1_ADDRESS_MASK;
  545. /* Check for overlap. */
  546. if( ( portIS_ADDRESS_WITHIN_RANGE( ullRegionStartAddress,
  547. ullPrbarEl1RegValue,
  548. ullPrlarEl1RegValue ) ) ||
  549. ( portIS_ADDRESS_WITHIN_RANGE( ullRegionEndAddress,
  550. ullPrbarEl1RegValue,
  551. ullPrlarEl1RegValue ) ) )
  552. {
  553. /* Overlap detected - assert. */
  554. configASSERT( NULL );
  555. }
  556. }
  557. /* Start address. */
  558. xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 = ( ullRegionStartAddress );
  559. /* RO/RW. */
  560. if( ( xRegions[ ucIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 )
  561. {
  562. xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 |= ( portMPU_REGION_READ_ONLY );
  563. }
  564. else
  565. {
  566. xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 |= ( portMPU_REGION_READ_WRITE );
  567. }
  568. /* XN. */
  569. if( ( xRegions[ ucIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 )
  570. {
  571. xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 |= ( portMPU_REGION_EXECUTE_NEVER );
  572. }
  573. /* SH. */
  574. if( ( xRegions[ ucIndex ].ulParameters & tskMPU_REGION_INNER_SHAREABLE ) != 0 )
  575. {
  576. xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 |= ( portMPU_REGION_INNER_SHAREABLE );
  577. }
  578. else if( ( xRegions[ ucIndex ].ulParameters & tskMPU_REGION_OUTER_SHAREABLE ) != 0 )
  579. {
  580. xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 |= ( portMPU_REGION_OUTER_SHAREABLE );
  581. }
  582. else
  583. {
  584. xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 |= ( portMPU_REGION_NON_SHAREABLE );
  585. }
  586. /* End Address. */
  587. xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrlarEl1 = ( ullRegionEndAddress ) |
  588. ( portMPU_PRLAR_EL1_REGION_ENABLE );
  589. /* Normal memory/ Device memory. */
  590. if( ( xRegions[ ucIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 )
  591. {
  592. /* Attr1 in MAIR_EL1 is configured as device memory. */
  593. xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrlarEl1 |= portMPU_PRLAR_EL1_ATTR_INDEX1;
  594. }
  595. else
  596. {
  597. /* Attr0 in MAIR_EL1 is configured as normal memory. */
  598. xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrlarEl1 |= portMPU_PRLAR_EL1_ATTR_INDEX0;
  599. }
  600. }
  601. }
  602. else
  603. {
  604. /* Invalidate the region. */
  605. xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrbarEl1 = 0UL;
  606. xMPUSettings->xRegionsSettings[ ucRegionNumber ].ullPrlarEl1 = 0UL;
  607. }
  608. ucIndex++;
  609. }
  610. }
  611. /*-----------------------------------------------------------*/
  612. void vSetupMPU( void ) /* PRIVILEGED_FUNCTION */
  613. {
  614. #if defined( __ARMCC_VERSION )
  615. /* Declaration when these variable are defined in code instead of being
  616. * exported from linker scripts.
  617. */
  618. extern uint64_t * __privileged_functions_start__;
  619. extern uint64_t * __privileged_functions_end__;
  620. extern uint64_t * __syscalls_flash_start__;
  621. extern uint64_t * __syscalls_flash_end__;
  622. extern uint64_t * __unprivileged_flash_start__;
  623. extern uint64_t * __unprivileged_flash_end__;
  624. extern uint64_t * __privileged_sram_start__;
  625. extern uint64_t * __privileged_sram_end__;
  626. #else /* if defined( __ARMCC_VERSION ) */
  627. /* Declaration when these variable are exported from linker scripts. */
  628. extern uint64_t __privileged_functions_start__[];
  629. extern uint64_t __privileged_functions_end__[];
  630. extern uint64_t __syscalls_flash_start__[];
  631. extern uint64_t __syscalls_flash_end__[];
  632. extern uint64_t __unprivileged_flash_start__[];
  633. extern uint64_t __unprivileged_flash_end__[];
  634. extern uint64_t __privileged_sram_start__[];
  635. extern uint64_t __privileged_sram_end__[];
  636. #endif /* defined( __ARMCC_VERSION ) */
  637. /* The only permitted number of regions are 16 or 32. */
  638. configASSERT( ( configTOTAL_MPU_REGIONS == 16 ) || ( configTOTAL_MPU_REGIONS == 32 ) );
  639. /* MAIR_EL1 - Index 0. */
  640. uint64_t ullMairEl1RegValue = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_EL1_ATTR0_POS ) & portMPU_MAIR_EL1_ATTR0_MASK );
  641. /* MAIR_EL1 - Index 1. */
  642. ullMairEl1RegValue |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_EL1_ATTR1_POS ) & portMPU_MAIR_EL1_ATTR1_MASK );
  643. __asm volatile ( "msr MAIR_EL1, %0" : : "r" ( ullMairEl1RegValue ) );
  644. /* Setup privileged flash as Read Only so that privileged tasks can
  645. * read it but not modify.
  646. */
  647. uint64_t ullPrselrEl1RegValue = portPRIVILEGED_FLASH_REGION;
  648. __asm volatile ( "msr PRSELR_EL1, %0" : : "r" ( ullPrselrEl1RegValue ) );
  649. uint64_t ullPrbarEl1RegValue = ( ( ( uint64_t ) __privileged_functions_start__ ) & portMPU_PRBAR_EL1_ADDRESS_MASK ) |
  650. ( portMPU_REGION_NON_SHAREABLE ) |
  651. ( portMPU_REGION_PRIVILEGED_READ_ONLY );
  652. __asm volatile ( "msr PRBAR_EL1, %0" : : "r" ( ullPrbarEl1RegValue ) );
  653. uint64_t ullPrlarEl1RegValue = ( ( ( uint64_t ) __privileged_functions_end__ ) & portMPU_PRLAR_EL1_ADDRESS_MASK ) |
  654. ( portMPU_PRLAR_EL1_ATTR_INDEX0 ) |
  655. ( portMPU_PRLAR_EL1_REGION_ENABLE );
  656. __asm volatile ( "msr PRLAR_EL1, %0" : : "r" ( ullPrlarEl1RegValue ) );
  657. /* Setup unprivileged flash as Read Only by both privileged and
  658. * unprivileged tasks. All tasks can read it but no-one can modify.
  659. */
  660. ullPrselrEl1RegValue = portUNPRIVILEGED_FLASH_REGION;
  661. __asm volatile ( "msr PRSELR_EL1, %0" : : "r" ( ullPrselrEl1RegValue ) );
  662. ullPrbarEl1RegValue = ( ( ( uint64_t ) __unprivileged_flash_start__ ) & portMPU_PRBAR_EL1_ADDRESS_MASK ) |
  663. ( portMPU_REGION_NON_SHAREABLE ) |
  664. ( portMPU_REGION_READ_ONLY );
  665. __asm volatile ( "msr PRBAR_EL1, %0" : : "r" ( ullPrbarEl1RegValue ) );
  666. ullPrlarEl1RegValue = ( ( ( uint64_t ) __unprivileged_flash_end__ ) & portMPU_PRLAR_EL1_ADDRESS_MASK ) |
  667. ( portMPU_PRLAR_EL1_ATTR_INDEX0 ) |
  668. ( portMPU_PRLAR_EL1_REGION_ENABLE );
  669. __asm volatile ( "msr PRLAR_EL1, %0" : : "r" ( ullPrlarEl1RegValue ) );
  670. /* Setup unprivileged syscalls flash as Read Only by both privileged
  671. * and unprivileged tasks. All tasks can read it but no-one can modify.
  672. */
  673. ullPrselrEl1RegValue = portUNPRIVILEGED_SYSCALLS_REGION;
  674. __asm volatile ( "msr PRSELR_EL1, %0" : : "r" ( ullPrselrEl1RegValue ) );
  675. ullPrbarEl1RegValue = ( ( ( uint64_t ) __syscalls_flash_start__ ) & portMPU_PRBAR_EL1_ADDRESS_MASK ) |
  676. ( portMPU_REGION_NON_SHAREABLE ) |
  677. ( portMPU_REGION_READ_ONLY );
  678. __asm volatile ( "msr PRBAR_EL1, %0" : : "r" ( ullPrbarEl1RegValue ) );
  679. ullPrlarEl1RegValue = ( ( ( uint64_t ) __syscalls_flash_end__ ) & portMPU_PRLAR_EL1_ADDRESS_MASK ) |
  680. ( portMPU_PRLAR_EL1_ATTR_INDEX0 ) |
  681. ( portMPU_PRLAR_EL1_REGION_ENABLE );
  682. __asm volatile ( "msr PRLAR_EL1, %0" : : "r" ( ullPrlarEl1RegValue ) );
  683. /* Setup RAM containing kernel data for privileged access only. */
  684. ullPrselrEl1RegValue = portPRIVILEGED_RAM_REGION;
  685. __asm volatile ( "msr PRSELR_EL1, %0" : : "r" ( ullPrselrEl1RegValue ) );
  686. ullPrbarEl1RegValue = ( ( ( uint64_t ) __privileged_sram_start__ ) & portMPU_PRBAR_EL1_ADDRESS_MASK ) |
  687. ( portMPU_REGION_INNER_SHAREABLE ) |
  688. ( portMPU_REGION_PRIVILEGED_READ_WRITE ) |
  689. ( portMPU_REGION_EXECUTE_NEVER );
  690. __asm volatile ( "msr PRBAR_EL1, %0" : : "r" ( ullPrbarEl1RegValue ) );
  691. ullPrlarEl1RegValue = ( ( ( uint64_t ) __privileged_sram_end__ ) & portMPU_PRLAR_EL1_ADDRESS_MASK ) |
  692. ( portMPU_PRLAR_EL1_ATTR_INDEX0 ) |
  693. ( portMPU_PRLAR_EL1_REGION_ENABLE );
  694. __asm volatile ( "msr PRLAR_EL1, %0" : : "r" ( ullPrlarEl1RegValue ) );
  695. }
  696. /*-----------------------------------------------------------*/
  697. void vEnableMPU( void ) /* PRIVILEGED_FUNCTION */
  698. {
  699. uint64_t ullSctlrEl1RegValue;
  700. __asm volatile ( "mrs %0, SCTLR_EL1" : "=r" ( ullSctlrEl1RegValue ) );
  701. /* Enable the MPU. Also enable privileged access to the
  702. * background region.
  703. */
  704. ullSctlrEl1RegValue |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT );
  705. __asm volatile ( "msr SCTLR_EL1, %0" : : "r" ( ullSctlrEl1RegValue ) );
  706. /* Ensure the write to SCTLR_EL1 is committed before
  707. * returning.
  708. */
  709. __asm volatile ( "isb" );
  710. }
  711. /*-----------------------------------------------------------*/
  712. BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
  713. {
  714. BaseType_t xTaskIsPrivileged = pdFALSE;
  715. #if ( configNUMBER_OF_CORES == 1 )
  716. extern TaskHandle_t pxCurrentTCB;
  717. xMPU_SETTINGS * pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
  718. #else
  719. extern TaskHandle_t pxCurrentTCBs[ configNUMBER_OF_CORES ];
  720. xMPU_SETTINGS * pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCBs[ portGET_CORE_ID_FROM_ISR() ] );
  721. #endif
  722. if( ( pxMpuSettings->ullTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
  723. {
  724. xTaskIsPrivileged = pdTRUE;
  725. }
  726. return xTaskIsPrivileged;
  727. }
  728. /*-----------------------------------------------------------*/
  729. static uint32_t prvGetRegionAccessPermissions( uint64_t ullPrbarEl1Value ) /* PRIVILEGED_FUNCTION */
  730. {
  731. uint32_t ulAccessPermissions = 0;
  732. if( ( ullPrbarEl1Value & portMPU_PRBAR_EL1_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
  733. {
  734. ulAccessPermissions = tskMPU_READ_PERMISSION;
  735. }
  736. if( ( ullPrbarEl1Value & portMPU_PRBAR_EL1_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
  737. {
  738. ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
  739. }
  740. return ulAccessPermissions;
  741. }
  742. /*-----------------------------------------------------------*/
  743. BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
  744. uint32_t ulBufferLength,
  745. uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
  746. {
  747. uint32_t i;
  748. uint64_t ullBufferStartAddress, ullBufferEndAddress;
  749. BaseType_t xAccessGranted = pdFALSE;
  750. const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
  751. if( xSchedulerRunning == pdFALSE )
  752. {
  753. /* Grant access to all the kernel objects before the scheduler
  754. * is started. It is necessary because there is no task running
  755. * yet and therefore, we cannot use the permissions of any
  756. * task. */
  757. xAccessGranted = pdTRUE;
  758. }
  759. else if( ( xTaskMpuSettings->ullTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
  760. {
  761. xAccessGranted = pdTRUE;
  762. }
  763. else
  764. {
  765. if( portADD_UINT64_WILL_OVERFLOW( ( ( uint64_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
  766. {
  767. ullBufferStartAddress = ( uint64_t ) pvBuffer;
  768. ullBufferEndAddress = ( ( ( uint64_t ) pvBuffer ) + ulBufferLength - 1UL );
  769. for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
  770. {
  771. /* Is the MPU region enabled? */
  772. if( ( xTaskMpuSettings->xRegionsSettings[ i ].ullPrlarEl1 & portMPU_PRLAR_EL1_REGION_ENABLE ) == portMPU_PRLAR_EL1_REGION_ENABLE )
  773. {
  774. if( portIS_ADDRESS_WITHIN_RANGE( ullBufferStartAddress,
  775. portEXTRACT_FIRST_ADDRESS_FROM_PRBAR_EL1( xTaskMpuSettings->xRegionsSettings[ i ].ullPrbarEl1 ),
  776. portEXTRACT_LAST_ADDRESS_FROM_PRLAR_EL1( xTaskMpuSettings->xRegionsSettings[ i ].ullPrlarEl1 ) ) &&
  777. portIS_ADDRESS_WITHIN_RANGE( ullBufferEndAddress,
  778. portEXTRACT_FIRST_ADDRESS_FROM_PRBAR_EL1( xTaskMpuSettings->xRegionsSettings[ i ].ullPrbarEl1 ),
  779. portEXTRACT_LAST_ADDRESS_FROM_PRLAR_EL1( xTaskMpuSettings->xRegionsSettings[ i ].ullPrlarEl1 ) ) &&
  780. portIS_AUTHORIZED( ulAccessRequested,
  781. prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ullPrbarEl1 ) ) )
  782. {
  783. xAccessGranted = pdTRUE;
  784. break;
  785. }
  786. }
  787. }
  788. }
  789. }
  790. return xAccessGranted;
  791. }
  792. /*-----------------------------------------------------------*/
  793. void vSystemCallEnter( uint64_t * pullPrivilegedOnlyTaskStack,
  794. uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
  795. {
  796. #if ( configNUMBER_OF_CORES == 1 )
  797. extern TaskHandle_t pxCurrentTCB;
  798. #else
  799. extern TaskHandle_t pxCurrentTCBs[ configNUMBER_OF_CORES ];
  800. #endif
  801. extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
  802. xMPU_SETTINGS * pxMpuSettings;
  803. uint64_t ullSystemCallLocation; /* Address where SVC was raised. */
  804. __asm volatile ( "MRS %0, ELR_EL1" : "=r" ( ullSystemCallLocation ) );
  805. #if defined( __ARMCC_VERSION )
  806. /* Declaration when these variable are defined in code instead of being
  807. * exported from linker scripts.
  808. */
  809. extern uint64_t * __syscalls_flash_start__;
  810. extern uint64_t * __syscalls_flash_end__;
  811. #else
  812. /* Declaration when these variable are exported from linker scripts. */
  813. extern uint64_t __syscalls_flash_start__[];
  814. extern uint64_t __syscalls_flash_end__[];
  815. #endif /* #if defined( __ARMCC_VERSION ) */
  816. #if ( configNUMBER_OF_CORES == 1 )
  817. pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
  818. #else
  819. pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCBs[ portGET_CORE_ID_FROM_ISR() ] );
  820. #endif
  821. /* Checks:
  822. * 1. SVC is raised from the system call section (i.e. application is
  823. * not raising SVC directly).
  824. * 2. We do not need to check that ucSystemCallNumber is within range
  825. * because the assembly SVC handler checks that before calling
  826. * this function.
  827. */
  828. if( ( ullSystemCallLocation >= ( uint64_t ) __syscalls_flash_start__ ) &&
  829. ( ullSystemCallLocation <= ( uint64_t ) __syscalls_flash_end__ ) &&
  830. ( uxSystemCallImplementations[ ucSystemCallNumber ] != 0 ) )
  831. {
  832. /* Store the value of the Link Register before the SVC was raised.
  833. * It contains the address of the caller of the System Call entry
  834. * point (i.e. the caller of the MPU_<API>). We need to restore it
  835. * when we exit from the system call.
  836. */
  837. pxMpuSettings->xSystemCallInfo.ullLinkRegisterAtSystemCallEntry = pullPrivilegedOnlyTaskStack[ portOFFSET_TO_LR ];
  838. /* Capture user-mode SP at system call entry. */
  839. uint64_t ullUserSpAtEntry;
  840. __asm volatile ( "MRS %0, SP_EL0" : "=r" ( ullUserSpAtEntry ) );
  841. pxMpuSettings->xSystemCallInfo.ullUserSPAtSystemCallEntry = ullUserSpAtEntry;
  842. /* Setup the MPU_<API> inputs, the system call stack, and SPSR. */
  843. __asm volatile (
  844. "MOV X0, %0 \n"
  845. "MOV X1, %1 \n"
  846. "MOV X2, %2 \n"
  847. "MOV X3, %3 \n"
  848. "MSR ELR_EL1, %4 \n"
  849. "MSR SP_EL0, %5 \n"
  850. "MSR SPSR_EL1, %6 \n"
  851. :
  852. : "r" ( pullPrivilegedOnlyTaskStack[ portOFFSET_TO_X0 ] ),
  853. "r" ( pullPrivilegedOnlyTaskStack[ portOFFSET_TO_X1 ] ),
  854. "r" ( pullPrivilegedOnlyTaskStack[ portOFFSET_TO_X2 ] ),
  855. "r" ( pullPrivilegedOnlyTaskStack[ portOFFSET_TO_X3 ] ),
  856. "r" ( ( uint64_t ) uxSystemCallImplementations[ ucSystemCallNumber ] ),
  857. "r" ( &( pxMpuSettings->ullContext[ MAX_CONTEXT_SIZE + configSYSTEM_CALL_STACK_SIZE ] ) ),
  858. "r" ( portINITIAL_PSTATE_EL1 )
  859. : "memory", "x0", "x1", "x2", "x3"
  860. );
  861. }
  862. }
  863. /*-----------------------------------------------------------*/
  864. void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
  865. {
  866. __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
  867. }
  868. /*-----------------------------------------------------------*/
  869. void vSystemCallExit( uint64_t ullSystemCallReturnValue ) /* PRIVILEGED_FUNCTION */
  870. {
  871. #if ( configNUMBER_OF_CORES == 1 )
  872. extern TaskHandle_t pxCurrentTCB;
  873. #else
  874. extern TaskHandle_t pxCurrentTCBs[ configNUMBER_OF_CORES ];
  875. #endif
  876. xMPU_SETTINGS * pxMpuSettings;
  877. uint64_t ullSystemCallLocation; /* Address where SVC was raised. */
  878. __asm volatile ( "MRS %0, ELR_EL1" : "=r" ( ullSystemCallLocation ) );
  879. #if defined( __ARMCC_VERSION )
  880. /* Declaration when these variable are defined in code instead of being
  881. * exported from linker scripts. */
  882. extern uint64_t * __privileged_functions_start__;
  883. extern uint64_t * __privileged_functions_end__;
  884. #else
  885. /* Declaration when these variable are exported from linker scripts. */
  886. extern uint64_t __privileged_functions_start__[];
  887. extern uint64_t __privileged_functions_end__[];
  888. #endif /* #if defined( __ARMCC_VERSION ) */
  889. #if ( configNUMBER_OF_CORES == 1 )
  890. pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
  891. #else
  892. pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCBs[ portGET_CORE_ID_FROM_ISR() ] );
  893. #endif
  894. /* Check:
  895. * SVC is raised from the privileged code (i.e. application is not
  896. * raising SVC directly). This SVC is only raised from
  897. * vRequestSystemCallExit which is in the privileged code section.
  898. */
  899. if( ( ullSystemCallLocation >= ( uint64_t ) __privileged_functions_start__ ) &&
  900. ( ullSystemCallLocation <= ( uint64_t ) __privileged_functions_end__ ) )
  901. {
  902. __asm volatile (
  903. "MSR ELR_EL1, %0 \n" /* Return to the MPU_<API> caller. */
  904. "MSR SP_EL0, %1 \n" /* Restore user SP saved at syscall entry. */
  905. "MSR SPSR_EL1, %3 \n" /* Ensure return to EL0. */
  906. "MOV X0, %2 \n" /* Move the system call return value to X0. */
  907. :
  908. : "r" ( pxMpuSettings->xSystemCallInfo.ullLinkRegisterAtSystemCallEntry ),
  909. "r" ( pxMpuSettings->xSystemCallInfo.ullUserSPAtSystemCallEntry ),
  910. "r" ( ullSystemCallReturnValue ),
  911. "r" ( ( uint64_t ) portINITIAL_PSTATE_EL0 )
  912. : "memory"
  913. );
  914. }
  915. }
  916. /*-----------------------------------------------------------*/
  917. #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
  918. void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
  919. int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
  920. {
  921. uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
  922. xMPU_SETTINGS * xTaskMpuSettings;
  923. /* Calculate the Access Control List entry index and bit position
  924. * within that entry. */
  925. ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
  926. ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
  927. xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
  928. /* Set the bit corresponding to the kernel object to grant access. */
  929. xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
  930. }
  931. /*-----------------------------------------------------------*/
  932. void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
  933. int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
  934. {
  935. uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
  936. xMPU_SETTINGS * xTaskMpuSettings;
  937. /* Calculate the Access Control List entry index and bit position
  938. * within that entry. */
  939. ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
  940. ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
  941. xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
  942. /* Clear the bit corresponding to the kernel object to revoke access. */
  943. xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
  944. }
  945. /*-----------------------------------------------------------*/
  946. BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
  947. {
  948. uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
  949. BaseType_t xAccessGranted = pdFALSE;
  950. const xMPU_SETTINGS * xTaskMpuSettings;
  951. if( xSchedulerRunning == pdFALSE )
  952. {
  953. /* Grant access to all the kernel objects before the scheduler
  954. * is started. It is necessary because there is no task running
  955. * yet and therefore, we cannot use the permissions of any
  956. * task. */
  957. xAccessGranted = pdTRUE;
  958. }
  959. else
  960. {
  961. xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
  962. ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
  963. ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
  964. if( ( xTaskMpuSettings->ullTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
  965. {
  966. xAccessGranted = pdTRUE;
  967. }
  968. else
  969. {
  970. if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
  971. {
  972. xAccessGranted = pdTRUE;
  973. }
  974. }
  975. }
  976. return xAccessGranted;
  977. }
  978. /*-----------------------------------------------------------*/
  979. #else /* configENABLE_ACCESS_CONTROL_LIST == 1 */
  980. BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
  981. {
  982. ( void ) lInternalIndexOfKernelObject;
  983. /* If Access Control List feature is not used, all the tasks have
  984. * access to all the kernel objects. */
  985. return pdTRUE;
  986. }
  987. /*-----------------------------------------------------------*/
  988. #endif /* configENABLE_ACCESS_CONTROL_LIST == 1 */
  989. #endif /* #if ( configENABLE_MPU == 1 ) */
  990. BaseType_t xPortStartScheduler( void )
  991. {
  992. uint64_t ullAPSR;
  993. #if ( configASSERT_DEFINED == 1 )
  994. {
  995. volatile uint8_t ucOriginalPriority;
  996. volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + portINTERRUPT_PRIORITY_REGISTER_OFFSET );
  997. volatile uint8_t ucMaxPriorityValue;
  998. /* Determine how many priority bits are implemented in the GIC.
  999. *
  1000. * Save the interrupt priority value that is about to be clobbered. */
  1001. ucOriginalPriority = *pucFirstUserPriorityRegister;
  1002. /* Determine the number of priority bits available. First write to
  1003. * all possible bits. */
  1004. *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE;
  1005. /* Read the value back to see how many bits stuck. */
  1006. ucMaxPriorityValue = *pucFirstUserPriorityRegister;
  1007. /* Shift to the least significant bits. */
  1008. while( ( ucMaxPriorityValue & portBIT_0_SET ) != portBIT_0_SET )
  1009. {
  1010. ucMaxPriorityValue >>= ( uint8_t ) 0x01;
  1011. }
  1012. /* Sanity check configUNIQUE_INTERRUPT_PRIORITIES matches the read
  1013. * value. */
  1014. configASSERT( ucMaxPriorityValue >= portLOWEST_INTERRUPT_PRIORITY );
  1015. /* Restore the clobbered interrupt priority register to its original
  1016. * value. */
  1017. *pucFirstUserPriorityRegister = ucOriginalPriority;
  1018. }
  1019. #endif /* configASSERT_DEFINED */
  1020. __asm volatile ( "MRS %0, CurrentEL" : "=r" ( ullAPSR ) );
  1021. ullAPSR &= portAPSR_MODE_BITS_MASK;
  1022. configASSERT( ullAPSR == portEL1 );
  1023. #if ( configENABLE_MPU == 1 )
  1024. {
  1025. /* Setup the Memory Protection Unit (MPU). */
  1026. vSetupMPU();
  1027. }
  1028. #endif /* #if ( configENABLE_MPU == 1 ) */
  1029. /* Interrupts are turned off in the CPU itself to ensure a tick does
  1030. * not execute while the scheduler is being started. Interrupts are
  1031. * automatically turned back on in the CPU when the first task starts
  1032. * executing. */
  1033. __asm volatile ( "MSR DAIFSET, #2\n"
  1034. "DSB SY\n"
  1035. "ISB SY\n" ::: "memory" );
  1036. #if ( configNUMBER_OF_CORES > 1 )
  1037. /* Start the timer that generates the tick ISR. */
  1038. configSETUP_TICK_INTERRUPT();
  1039. ucPrimaryCoreInitDoneFlag = 1;
  1040. __asm volatile ( "SEV \n"
  1041. "DSB SY \n"
  1042. "ISB SY \n"
  1043. ::: "memory" );
  1044. /* Hold the primary core here until all the secondary cores are ready, this would be achieved only when
  1045. * all elements of ucSecondaryCoresReadyFlags are set.
  1046. */
  1047. while( 1 )
  1048. {
  1049. BaseType_t xAllCoresReady = pdTRUE;
  1050. for( uint8_t ucCoreID = 0; ucCoreID < ( configNUMBER_OF_CORES - 1 ); ucCoreID++ )
  1051. {
  1052. if( ucSecondaryCoresReadyFlags[ ucCoreID ] != pdTRUE )
  1053. {
  1054. xAllCoresReady = pdFALSE;
  1055. break;
  1056. }
  1057. }
  1058. if ( xAllCoresReady == pdTRUE )
  1059. {
  1060. break;
  1061. }
  1062. }
  1063. #else /* if ( configNUMBER_OF_CORES > 1 ) */
  1064. /* Start the timer that generates the tick ISR. */
  1065. configSETUP_TICK_INTERRUPT();
  1066. #endif /* if ( configNUMBER_OF_CORES > 1 ) */
  1067. #if ( configENABLE_MPU == 1 )
  1068. xSchedulerRunning = pdTRUE;
  1069. /* Enable the Memory Protection Unit (MPU)
  1070. * MPU is only enabled after the primary and secondary handshakes
  1071. * are done as to prevent inconsistent MPU regions attributes across
  1072. * different cores resulting in unupdated values of the handshake
  1073. * flags.
  1074. */
  1075. vEnableMPU();
  1076. #endif /* #if ( configENABLE_MPU == 1 ) */
  1077. /* Start the first task executing. */
  1078. vPortRestoreTaskContext();
  1079. return 0;
  1080. }
  1081. /*-----------------------------------------------------------*/
  1082. void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
  1083. {
  1084. /* Stub implementation for ports where there is nothing to return to
  1085. * Artificially force an assert. */
  1086. configASSERT( NULL );
  1087. }
  1088. /*-----------------------------------------------------------*/
  1089. #if ( configNUMBER_OF_CORES == 1 )
  1090. PRIVILEGED_FUNCTION void vPortEnterCritical( void )
  1091. {
  1092. /* Mask interrupts up to the max syscall interrupt priority. */
  1093. uxPortSetInterruptMask();
  1094. /* Now interrupts are disabled ullCriticalNesting can be accessed
  1095. * directly. Increment ullCriticalNesting to keep a count of how many times
  1096. * portENTER_CRITICAL() has been called. */
  1097. ullCriticalNesting++;
  1098. /* This is not the interrupt safe version of the enter critical function so
  1099. * assert() if it is being called from an interrupt context. Only API
  1100. * functions that end in "FromISR" can be used in an interrupt. Only assert if
  1101. * the critical nesting count is 1 to protect against recursive calls if the
  1102. * assert function also uses a critical section. */
  1103. if( ullCriticalNesting == 1ULL )
  1104. {
  1105. configASSERT( ullPortInterruptNesting == 0 );
  1106. }
  1107. }
  1108. /*-----------------------------------------------------------*/
  1109. PRIVILEGED_FUNCTION void vPortExitCritical( void )
  1110. {
  1111. if( ullCriticalNesting > portNO_CRITICAL_NESTING )
  1112. {
  1113. /* Decrement the nesting count as the critical section is being
  1114. * exited. */
  1115. ullCriticalNesting--;
  1116. /* If the nesting level has reached zero then all interrupt
  1117. * priorities must be re-enabled. */
  1118. if( ullCriticalNesting == portNO_CRITICAL_NESTING )
  1119. {
  1120. /* Critical nesting has reached zero so all interrupt priorities
  1121. * should be unmasked. */
  1122. portCLEAR_INTERRUPT_PRIORITIES_MASK();
  1123. }
  1124. }
  1125. }
  1126. #endif /* if ( configNUMBER_OF_CORES == 1 ) */
  1127. /*-----------------------------------------------------------*/
  1128. void FreeRTOS_Tick_Handler( void )
  1129. {
  1130. /* Must be the lowest possible priority. */
  1131. uint64_t ullRunningInterruptPriority;
  1132. __asm volatile ( "MRS %0, ICC_RPR_EL1" : "=r" ( ullRunningInterruptPriority ) );
  1133. configASSERT( ullRunningInterruptPriority == ( portLOWEST_USABLE_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) );
  1134. /* Interrupts should not be enabled before this point. */
  1135. #if ( configASSERT_DEFINED == 1 )
  1136. {
  1137. uint64_t ullMaskBits;
  1138. __asm volatile ( "MRS %0, DAIF" : "=r" ( ullMaskBits )::"memory" );
  1139. configASSERT( ( ullMaskBits & portDAIF_I ) != 0 );
  1140. }
  1141. #endif /* configASSERT_DEFINED */
  1142. /* Set interrupt mask before altering scheduler structures. The tick
  1143. * interrupt runs at the lowest priority, so interrupts cannot already be masked,
  1144. * so there is no need to save and restore the current mask value. It is
  1145. * necessary to turn off interrupts in the CPU itself while the ICCPMR is being
  1146. * updated.
  1147. */
  1148. UBaseType_t uxInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
  1149. #if ( configNUMBER_OF_CORES > 1 )
  1150. UBaseType_t x = portENTER_CRITICAL_FROM_ISR();
  1151. #endif /* if ( configNUMBER_OF_CORES > 1 ) */
  1152. /* Increment the RTOS tick. */
  1153. if( xTaskIncrementTick() != pdFALSE )
  1154. {
  1155. #if ( configNUMBER_OF_CORES == 1 )
  1156. ullPortYieldRequired = pdTRUE;
  1157. #else
  1158. ullPortYieldRequired[ portGET_CORE_ID_FROM_ISR() ] = pdTRUE;
  1159. #endif
  1160. }
  1161. #if ( configNUMBER_OF_CORES > 1 )
  1162. portEXIT_CRITICAL_FROM_ISR(x);
  1163. #endif /* if ( configNUMBER_OF_CORES > 1 ) */
  1164. /* Ensure all interrupt priorities are active again. */
  1165. portCLEAR_INTERRUPT_MASK_FROM_ISR( uxInterruptStatus );
  1166. /* Ok to enable interrupts after the interrupt source has been cleared. */
  1167. configCLEAR_TICK_INTERRUPT();
  1168. }
  1169. /*-----------------------------------------------------------*/
  1170. #if ( configUSE_TASK_FPU_SUPPORT == portTASK_NO_FPU_CONTEXT_BY_DEFAULT )
  1171. void vPortTaskUsesFPU( void )
  1172. {
  1173. /* A task is registering the fact that it needs an FPU context. Set the
  1174. * FPU flag (which is saved as part of the task context). */
  1175. #if ( configNUMBER_OF_CORES == 1 )
  1176. ullPortTaskHasFPUContext = pdTRUE;
  1177. #else
  1178. ullPortTaskHasFPUContext[ portGET_CORE_ID() ] = pdTRUE;
  1179. #endif
  1180. /* Consider initialising the FPSR here - but probably not necessary in
  1181. * AArch64. */
  1182. }
  1183. #endif /* configUSE_TASK_FPU_SUPPORT */
  1184. /*-----------------------------------------------------------*/
  1185. void vPortClearInterruptMask( UBaseType_t uxNewMaskValue )
  1186. {
  1187. if( uxNewMaskValue == portUNMASK_VALUE )
  1188. {
  1189. /* Unmask all interrupt priorities. */
  1190. portCLEAR_INTERRUPT_PRIORITIES_MASK();
  1191. }
  1192. else
  1193. {
  1194. __asm volatile (
  1195. "SVC %0 \n"
  1196. :
  1197. : "i" ( portSVC_UNMASK_INTERRUPTS )
  1198. : "memory"
  1199. );
  1200. }
  1201. }
  1202. void vPortClearInterruptMaskFromISR( UBaseType_t uxNewMaskValue )
  1203. {
  1204. __asm volatile (
  1205. "MSR DAIFSET, #2 \n"
  1206. "DSB SY \n"
  1207. "ISB SY \n"
  1208. "MSR ICC_PMR_EL1, %0 \n"
  1209. "DSB SY \n"
  1210. "ISB SY \n"
  1211. "MSR DAIFCLR, #2 \n"
  1212. "DSB SY \n"
  1213. "ISB SY \n"
  1214. :
  1215. : "r" ( uxNewMaskValue )
  1216. );
  1217. }
  1218. /*-----------------------------------------------------------*/
  1219. UBaseType_t uxPortSetInterruptMask( void )
  1220. {
  1221. UBaseType_t ullPMRValue;
  1222. /* Use SVC so this can be called safely from EL0 tasks. */
  1223. __asm volatile (
  1224. "svc %1 \n"
  1225. "mov %0, x0 \n"
  1226. : "=r" ( ullPMRValue )
  1227. : "i" ( portSVC_MASK_ALL_INTERRUPTS )
  1228. : "x0", "memory"
  1229. );
  1230. return ullPMRValue;
  1231. }
  1232. /* EL1/ISR variant to avoid SVC from interrupt context. */
  1233. UBaseType_t uxPortSetInterruptMaskFromISR( void )
  1234. {
  1235. UBaseType_t ullPMRValue;
  1236. __asm volatile ( "MRS %0, ICC_PMR_EL1" : "=r" ( ullPMRValue ) );
  1237. if( ullPMRValue != ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) )
  1238. {
  1239. __asm volatile ( "MSR DAIFSET, #2 \n"
  1240. "DSB SY \n"
  1241. "ISB SY \n"
  1242. "MSR ICC_PMR_EL1, %0 \n"
  1243. "DSB SY \n"
  1244. "ISB SY \n"
  1245. "MSR DAIFCLR, #2 \n"
  1246. "DSB SY \n"
  1247. "ISB SY \n"
  1248. ::"r" ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) : "memory" );
  1249. }
  1250. return ullPMRValue;
  1251. }
  1252. /*-----------------------------------------------------------*/
  1253. #if ( configASSERT_DEFINED == 1 )
  1254. void vPortValidateInterruptPriority( void )
  1255. {
  1256. /* The following assertion will fail if a service routine (ISR) for
  1257. * an interrupt that has been assigned a priority above
  1258. * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
  1259. * function. ISR safe FreeRTOS API functions must *only* be called
  1260. * from interrupts that have been assigned a priority at or below
  1261. * configMAX_SYSCALL_INTERRUPT_PRIORITY.
  1262. *
  1263. * Numerically low interrupt priority numbers represent logically high
  1264. * interrupt priorities, therefore the priority of the interrupt must
  1265. * be set to a value equal to or numerically *higher* than
  1266. * configMAX_SYSCALL_INTERRUPT_PRIORITY.
  1267. *
  1268. * FreeRTOS maintains separate thread and ISR API functions to ensure
  1269. * interrupt entry is as fast and simple as possible. */
  1270. uint64_t ullRunningInterruptPriority;
  1271. __asm volatile ( "MRS %0, ICC_RPR_EL1" : "=r" ( ullRunningInterruptPriority ) );
  1272. configASSERT( ullRunningInterruptPriority >= ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) );
  1273. }
  1274. #endif /* configASSERT_DEFINED */
  1275. /*-----------------------------------------------------------*/
  1276. /*
  1277. * If the application provides an implementation of vApplicationIRQHandler(),
  1278. * then it will get called directly without saving the FPU registers on
  1279. * interrupt entry, and this weak implementation of
  1280. * vApplicationFPUSafeIRQHandler() is just provided to remove linkage errors -
  1281. * it should never actually get called so its implementation contains a
  1282. * call to configASSERT() that will always fail.
  1283. *
  1284. * If the application provides its own implementation of
  1285. * vApplicationFPUSafeIRQHandler() then the implementation of
  1286. * vApplicationIRQHandler() provided in portASM.S will save the FPU registers
  1287. * before calling it.
  1288. *
  1289. * Therefore, if the application writer wants FPU registers to be saved on
  1290. * interrupt entry their IRQ handler must be called
  1291. * vApplicationFPUSafeIRQHandler(), and if the application writer does not want
  1292. * FPU registers to be saved on interrupt entry their IRQ handler must be
  1293. * called vApplicationIRQHandler().
  1294. */
  1295. __attribute__( ( weak ) ) void vApplicationFPUSafeIRQHandler( uint32_t ulICCIAR )
  1296. {
  1297. ( void ) ulICCIAR;
  1298. configASSERT( ( volatile void * ) NULL );
  1299. }
  1300. /*-----------------------------------------------------------*/
  1301. #if ( configNUMBER_OF_CORES > 1 )
  1302. /* Which core owns the lock? Keep in privileged, shareable RAM. */
  1303. PRIVILEGED_DATA volatile uint64_t ullOwnedByCore[ portMAX_CORE_COUNT ];
  1304. /* Lock count a core owns. */
  1305. PRIVILEGED_DATA volatile uint64_t ullRecursionCountByLock[ eLockCount ];
  1306. /* Index 0 is used for ISR lock and Index 1 is used for task lock. */
  1307. PRIVILEGED_DATA uint32_t ulGateWord[ eLockCount ];
  1308. void vInterruptCore( uint32_t ulInterruptID, uint8_t ucCoreID )
  1309. {
  1310. uint64_t ulRegVal = 0;
  1311. uint32_t ulCoreMask = ( 1UL << ucCoreID );
  1312. ulRegVal |= ( (ulCoreMask & 0xFFFF) | ( ( ulInterruptID & 0xF ) << 24U ) );
  1313. __asm volatile (
  1314. "str x0, [ sp, #-0x10 ]! \n"
  1315. "mov x0, %0 \n"
  1316. "svc %1 \n"
  1317. "ldr x0, [ sp ], # 0x10 \n"
  1318. :
  1319. : "r" ( ulRegVal ), "i" ( portSVC_INTERRUPT_CORE )
  1320. : "memory", "w1"
  1321. );
  1322. }
  1323. /*-----------------------------------------------------------*/
  1324. static inline void prvSpinUnlock( uint32_t * ulLock )
  1325. {
  1326. /* Conservative unlock: preserve original barriers for broad HW/FVP. */
  1327. __asm volatile (
  1328. "dmb sy \n"
  1329. "mov w1, #0 \n"
  1330. "str w1, [%x0] \n"
  1331. "sev \n"
  1332. "dsb sy \n"
  1333. "isb sy \n"
  1334. :
  1335. : "r" ( ulLock )
  1336. : "memory", "w1"
  1337. );
  1338. }
  1339. /*-----------------------------------------------------------*/
  1340. static inline uint32_t prvSpinTrylock( uint32_t * ulLock )
  1341. {
  1342. /*
  1343. * Conservative LDXR/STXR trylock:
  1344. * - Return 1 immediately if busy, clearing exclusive state (CLREX).
  1345. * - Retry STXR only on spurious failure when observed free.
  1346. * - DMB on success to preserve expected acquire semantics.
  1347. */
  1348. register uint32_t ulRet;
  1349. __asm volatile (
  1350. "1: \n"
  1351. "ldxr w1, [%x1] \n"
  1352. "cbnz w1, 2f \n" /* Busy -> return 1 */
  1353. "mov w2, #1 \n"
  1354. "stxr w3, w2, [%x1] \n" /* w3 = status */
  1355. "cbnz w3, 1b \n" /* Retry on STXR failure */
  1356. "dmb sy \n" /* Acquire barrier on success */
  1357. "mov %w0, #0 \n" /* Success */
  1358. "b 3f \n"
  1359. "2: \n"
  1360. "clrex \n" /* Clear monitor when busy */
  1361. "mov %w0, #1 \n" /* Busy */
  1362. "3: \n"
  1363. : "=r" ( ulRet )
  1364. : "r" ( ulLock )
  1365. : "memory", "w1", "w2", "w3"
  1366. );
  1367. return ulRet;
  1368. }
  1369. /*-----------------------------------------------------------*/
  1370. /* Read 64b value shared between cores. */
  1371. static inline uint64_t prvGet64( volatile uint64_t * x )
  1372. {
  1373. __asm( "dsb sy" );
  1374. return *x;
  1375. }
  1376. /*-----------------------------------------------------------*/
  1377. /* Write 64b value shared between cores. */
  1378. static inline void prvSet64( volatile uint64_t * x,
  1379. uint64_t value )
  1380. {
  1381. *x = value;
  1382. __asm( "dsb sy" );
  1383. }
  1384. /*-----------------------------------------------------------*/
  1385. void vPortRecursiveLock( uint8_t ucCoreID,
  1386. ePortRTOSLock eLockNum,
  1387. BaseType_t uxAcquire )
  1388. {
  1389. /* Validate the core ID and lock number. */
  1390. configASSERT( ucCoreID < portMAX_CORE_COUNT );
  1391. configASSERT( eLockNum < eLockCount );
  1392. uint32_t ulLockBit = 1u << eLockNum;
  1393. /* Lock acquire */
  1394. if( uxAcquire )
  1395. {
  1396. /* Check if spinlock is available.
  1397. * If spinlock is not available check if the core owns the lock.
  1398. * If the core owns the lock wait increment the lock count by the core.
  1399. * If core does not own the lock wait for the spinlock.
  1400. */
  1401. if( prvSpinTrylock( &ulGateWord[ eLockNum ] ) != 0 )
  1402. {
  1403. /* Check if the core owns the spinlock. */
  1404. if( prvGet64( &ullOwnedByCore[ ucCoreID ] ) & ulLockBit )
  1405. {
  1406. configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) != 255u );
  1407. prvSet64( &ullRecursionCountByLock[ eLockNum ], ( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) + 1 ) );
  1408. return;
  1409. }
  1410. /* Preload the gate word into the cache. */
  1411. uint32_t dummy = ulGateWord[ eLockNum ];
  1412. dummy++;
  1413. while( prvSpinTrylock( &ulGateWord[ eLockNum ] ) != 0 )
  1414. {
  1415. /* Follow Arm's recommended way of sleeping
  1416. * sevl is used to prime the wait loop.
  1417. * The first wfe wakes immediately because sevl has set the flag.
  1418. * Check the lock, if it's not available, issue a second wfe to sleep.
  1419. * This guarantees the core actually goes to sleep.
  1420. */
  1421. __asm volatile (
  1422. "sevl \n"
  1423. "1: wfe \n"
  1424. "ldr w2, [%x0] \n"
  1425. "cbnz w2, 1b \n"
  1426. :
  1427. : "r" ( &ulGateWord[ eLockNum ] )
  1428. : "memory", "w2"
  1429. );
  1430. }
  1431. }
  1432. /* Add barrier to ensure lock is taken before we proceed. */
  1433. __asm__ __volatile__ ( "dmb sy" ::: "memory" );
  1434. /* Assert the lock count is 0 when the spinlock is free and is acquired. */
  1435. configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) == 0 );
  1436. /* Set lock count as 1. */
  1437. prvSet64( &ullRecursionCountByLock[ eLockNum ], 1 );
  1438. /* Set ullOwnedByCore. */
  1439. prvSet64( &ullOwnedByCore[ ucCoreID ], ( prvGet64( &ullOwnedByCore[ ucCoreID ] ) | ulLockBit ) );
  1440. }
  1441. /* Lock release. */
  1442. else
  1443. {
  1444. /* Assert the lock is not free already. */
  1445. configASSERT( ( prvGet64( &ullOwnedByCore[ ucCoreID ] ) & ulLockBit ) != 0 );
  1446. configASSERT( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) != 0 );
  1447. /* Reduce ullRecursionCountByLock by 1. */
  1448. prvSet64( &ullRecursionCountByLock[ eLockNum ], ( prvGet64( &ullRecursionCountByLock[ eLockNum ] ) - 1 ) );
  1449. if( !prvGet64( &ullRecursionCountByLock[ eLockNum ] ) )
  1450. {
  1451. prvSet64( &ullOwnedByCore[ ucCoreID ], ( prvGet64( &ullOwnedByCore[ ucCoreID ] ) & ~ulLockBit ) );
  1452. prvSpinUnlock( &ulGateWord[ eLockNum ] );
  1453. /* Add barrier to ensure lock status is reflected before we proceed. */
  1454. __asm__ __volatile__ ( "dmb sy" ::: "memory" );
  1455. }
  1456. }
  1457. }
  1458. /*-----------------------------------------------------------*/
  1459. uint8_t ucPortGetCoreID( void )
  1460. {
  1461. /* Use SVC to obtain the core ID in a way that is safe when called
  1462. * from EL0 tasks. ISRs and EL1 code should use
  1463. * ucPortGetCoreIDFromIsr()/portGET_CORE_ID_FROM_ISR().
  1464. */
  1465. uint8_t ucCoreID;
  1466. __asm volatile (
  1467. "svc %1 \n"
  1468. "mov %w0, w0 \n"
  1469. : "=r" ( ucCoreID )
  1470. : "i" ( portSVC_GET_CORE_ID )
  1471. : "x0", "memory"
  1472. );
  1473. return ucCoreID;
  1474. }
  1475. /*-----------------------------------------------------------*/
  1476. uint8_t ucPortGetCoreIDFromIsr ( void ) /* PRIVILEGED_FUNCTION */
  1477. {
  1478. uint64_t ullMpidrEl1;
  1479. __asm volatile ( "MRS %0, MPIDR_EL1" : "=r" ( ullMpidrEl1 ) );
  1480. return ( uint8_t ) ( ullMpidrEl1 & 0xff );
  1481. }
  1482. /*------------------------------------------------------------*/
  1483. void FreeRTOS_SGI_Handler( void )
  1484. {
  1485. /* Must be the lowest possible priority. */
  1486. uint64_t ullRunningInterruptPriority;
  1487. __asm volatile ( "MRS %0, ICC_RPR_EL1" : "=r" ( ullRunningInterruptPriority ) );
  1488. configASSERT( ullRunningInterruptPriority == ( portLOWEST_USABLE_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) );
  1489. /* Interrupts should not be enabled before this point. */
  1490. #if ( configASSERT_DEFINED == 1 )
  1491. {
  1492. uint64_t ullMaskBits;
  1493. __asm volatile ( "mrs %0, DAIF" : "=r" ( ullMaskBits )::"memory" );
  1494. configASSERT( ( ullMaskBits & portDAIF_I ) != 0 );
  1495. }
  1496. #endif /* configASSERT_DEFINED */
  1497. /* Set interrupt mask before altering scheduler structures. The SGI
  1498. * interrupt runs at the lowest priority, so interrupts cannot already be masked,
  1499. * so there is no need to save and restore the current mask value. It is
  1500. * necessary to turn off interrupts in the CPU itself while the ICCPMR is being
  1501. * updated.
  1502. */
  1503. UBaseType_t uxInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
  1504. UBaseType_t uxSavedInterruptStatus = portENTER_CRITICAL_FROM_ISR();
  1505. #if ( configNUMBER_OF_CORES == 1 )
  1506. ullPortYieldRequired = pdTRUE;
  1507. #else
  1508. ullPortYieldRequired[ portGET_CORE_ID_FROM_ISR() ] = pdTRUE;
  1509. #endif
  1510. portEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
  1511. portCLEAR_INTERRUPT_MASK_FROM_ISR( uxInterruptStatus );
  1512. }
  1513. /*-----------------------------------------------------------*/
  1514. #endif /* if( configNUMBER_OF_CORES > 1 ) */