port.c 72 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673
  1. /*
  2. * FreeRTOS Kernel <DEVELOPMENT BRANCH>
  3. * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  4. * Copyright 2025 Arm Limited and/or its affiliates
  5. * <open-source-office@arm.com>
  6. *
  7. * SPDX-License-Identifier: MIT
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy of
  10. * this software and associated documentation files (the "Software"), to deal in
  11. * the Software without restriction, including without limitation the rights to
  12. * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
  13. * the Software, and to permit persons to whom the Software is furnished to do so,
  14. * subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in all
  17. * copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
  21. * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
  22. * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  23. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  24. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. * https://www.FreeRTOS.org
  27. * https://github.com/FreeRTOS
  28. *
  29. */
  30. /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
  31. * all the API functions to use the MPU wrappers. That should only be done when
  32. * task.h is included from an application file. */
  33. #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  34. /* Scheduler includes. */
  35. #include "FreeRTOS.h"
  36. #include "task.h"
  37. /* MPU includes. */
  38. #include "mpu_wrappers.h"
  39. #include "mpu_syscall_numbers.h"
  40. /* Portasm includes. */
  41. #include "portasm.h"
  42. #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  43. /*-----------------------------------------------------------*/
  44. /**
  45. * @brief Prototype of all Interrupt Service Routines (ISRs).
  46. */
  47. typedef void ( * portISR_t )( void );
  48. /*-----------------------------------------------------------*/
  49. /**
  50. * @brief Constants required to manipulate the NVIC.
  51. */
  52. #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
  53. #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
  54. #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
  55. #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
  56. #define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xe000ed1c ) )
  57. #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
  58. #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
  59. #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
  60. #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
  61. #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
  62. #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
  63. #define portMIN_INTERRUPT_PRIORITY ( 255UL )
  64. #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
  65. #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
  66. /*-----------------------------------------------------------*/
  67. /**
  68. * @brief Constants required to manipulate the SCB.
  69. */
  70. #define portSCB_VTOR_REG ( *( ( portISR_t ** ) 0xe000ed08 ) )
  71. #define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( ( volatile uint32_t * ) 0xe000ed24 ) )
  72. #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL )
  73. /*-----------------------------------------------------------*/
  74. /**
  75. * @brief Constants used to check the installation of the FreeRTOS interrupt handlers.
  76. */
  77. #define portVECTOR_INDEX_SVC ( 11 )
  78. #define portVECTOR_INDEX_PENDSV ( 14 )
  79. /*-----------------------------------------------------------*/
  80. /**
  81. * @brief Constants used during system call enter and exit.
  82. */
  83. #define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
  84. #define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
  85. /*-----------------------------------------------------------*/
  86. /**
  87. * @brief Offsets in the stack to the parameters when inside the SVC handler.
  88. */
  89. #define portOFFSET_TO_LR ( 5 )
  90. #define portOFFSET_TO_PC ( 6 )
  91. #define portOFFSET_TO_PSR ( 7 )
  92. /*-----------------------------------------------------------*/
  93. /**
  94. * @brief Constants required to manipulate the MPU.
  95. */
  96. #define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
  97. #define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
  98. #define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
  99. #define portMPU_RASR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
  100. /* MPU Region Attribute and Size Register (RASR) bitmasks. */
  101. #define portMPU_RASR_AP_BITMASK ( 0x7UL << 24UL )
  102. #define portMPU_RASR_S_C_B_BITMASK ( 0x7UL )
  103. #define portMPU_RASR_S_C_B_LOCATION ( 16UL )
  104. #define portMPU_RASR_SIZE_BITMASK ( 0x1FUL << 1UL )
  105. #define portMPU_RASR_REGION_ENABLE_BITMASK ( 0x1UL )
  106. /* MPU Region Base Address Register (RBAR) bitmasks. */
  107. #define portMPU_RBAR_ADDRESS_BITMASK ( 0xFFFFFF00UL )
  108. #define portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ( 0x1UL << 4UL )
  109. #define portMPU_RBAR_REGION_NUMBER_BITMASK ( 0x0000000FUL )
  110. /* MPU Control Register (MPU_CTRL) bitmasks. */
  111. #define portMPU_CTRL_ENABLE_BITMASK ( 0x1UL )
  112. #define portMPU_CTRL_PRIV_BACKGROUND_ENABLE_BITMASK ( 0x1UL << 2UL ) /* PRIVDEFENA bit. */
  113. /* Expected value of the portMPU_TYPE register. */
  114. #define portEXPECTED_MPU_TYPE_VALUE ( 0x8UL << 8UL ) /* 8 DREGION unified. */
  115. /* Extract first address of the MPU region as encoded in the
  116. * RBAR (Region Base Address Register) value. */
  117. #define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
  118. ( ( rbar ) & portMPU_RBAR_ADDRESS_BITMASK )
  119. /* Extract size of the MPU region as encoded in the
  120. * RASR (Region Attribute and Size Register) value. */
  121. #define portEXTRACT_REGION_SIZE_FROM_RASR( rasr ) \
  122. ( 1 << ( ( ( ( rasr ) & portMPU_RASR_SIZE_BITMASK ) >> 1 )+ 1 ) )
  123. /* Does addr lies within [start, end] address range? */
  124. #define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
  125. ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
  126. /* Is the access request satisfied by the available permissions? */
  127. #define portIS_AUTHORIZED( accessRequest, permissions ) \
  128. ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
  129. /* Max value that fits in a uint32_t type. */
  130. #define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
  131. /* Check if adding a and b will result in overflow. */
  132. #define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
  133. /*-----------------------------------------------------------*/
  134. /**
  135. * @brief The maximum 24-bit number.
  136. *
  137. * It is needed because the systick is a 24-bit counter.
  138. */
  139. #define portMAX_24_BIT_NUMBER ( 0xffffffUL )
  140. /**
  141. * @brief A fiddle factor to estimate the number of SysTick counts that would
  142. * have occurred while the SysTick counter is stopped during tickless idle
  143. * calculations.
  144. */
  145. #define portMISSED_COUNTS_FACTOR ( 94UL )
  146. /*-----------------------------------------------------------*/
  147. /**
  148. * @brief Constants required to set up the initial stack.
  149. */
  150. #define portINITIAL_XPSR ( 0x01000000 )
  151. /**
  152. * @brief Initial EXC_RETURN value.
  153. *
  154. * FF FF FF FD
  155. * 1111 1111 1111 1111 1111 1111 1111 1101
  156. *
  157. * Bit[3] - 1 --> Return to the Thread mode.
  158. * Bit[2] - 1 --> Restore registers from the process stack.
  159. * Bit[1] - 0 --> Reserved, 0.
  160. * Bit[0] - 0 --> Reserved, 1.
  161. */
  162. #define portINITIAL_EXC_RETURN ( 0xfffffffdUL )
  163. /**
  164. * @brief CONTROL register privileged bit mask.
  165. *
  166. * Bit[0] in CONTROL register tells the privilege:
  167. * Bit[0] = 0 ==> The task is privileged.
  168. * Bit[0] = 1 ==> The task is not privileged.
  169. */
  170. #define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL )
  171. /**
  172. * @brief Initial CONTROL register values.
  173. */
  174. #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 )
  175. #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 )
  176. /**
  177. * @brief Let the user override the default SysTick clock rate. If defined by the
  178. * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
  179. * configuration register.
  180. */
  181. #ifndef configSYSTICK_CLOCK_HZ
  182. #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ )
  183. /* Ensure the SysTick is clocked at the same frequency as the core. */
  184. #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT )
  185. #else
  186. /* Select the option to clock SysTick not at the same frequency as the core. */
  187. #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 )
  188. #endif
  189. /**
  190. * @brief Let the user override the pre-loading of the initial LR with the
  191. * address of prvTaskExitError() in case it messes up unwinding of the stack
  192. * in the debugger.
  193. */
  194. #ifdef configTASK_RETURN_ADDRESS
  195. #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
  196. #else
  197. #define portTASK_RETURN_ADDRESS prvTaskExitError
  198. #endif
  199. /**
  200. * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
  201. * when a task is created. This helps in debugging at the cost of code size.
  202. */
  203. #define portPRELOAD_REGISTERS 1
  204. /*-----------------------------------------------------------*/
  205. /**
  206. * @brief Used to catch tasks that attempt to return from their implementing
  207. * function.
  208. */
  209. static void prvTaskExitError( void );
  210. #if ( configENABLE_MPU == 1 )
  211. /**
  212. * @brief Setup the Memory Protection Unit (MPU).
  213. */
  214. static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
  215. #endif /* configENABLE_MPU */
  216. /**
  217. * @brief Setup the timer to generate the tick interrupts.
  218. *
  219. * The implementation in this file is weak to allow application writers to
  220. * change the timer used to generate the tick interrupt.
  221. */
  222. void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
  223. /**
  224. * @brief Checks whether the current execution context is interrupt.
  225. *
  226. * @return pdTRUE if the current execution context is interrupt, pdFALSE
  227. * otherwise.
  228. */
  229. BaseType_t xPortIsInsideInterrupt( void );
  230. /**
  231. * @brief Yield the processor.
  232. */
  233. void vPortYield( void ) PRIVILEGED_FUNCTION;
  234. /**
  235. * @brief Enter critical section.
  236. */
  237. void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
  238. /**
  239. * @brief Exit from critical section.
  240. */
  241. void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
  242. /**
  243. * @brief SysTick handler.
  244. */
  245. void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
  246. /**
  247. * @brief C part of SVC handler.
  248. */
  249. portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
  250. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
  251. /**
  252. * @brief Sets up the system call stack so that upon returning from
  253. * SVC, the system call stack is used.
  254. *
  255. * @param pulTaskStack The current SP when the SVC was raised.
  256. * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
  257. * @param ucSystemCallNumber The system call number of the system call.
  258. */
  259. void vSystemCallEnter( uint32_t * pulTaskStack,
  260. uint32_t ulLR,
  261. uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
  262. #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
  263. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
  264. /**
  265. * @brief Raise SVC for exiting from a system call.
  266. */
  267. void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
  268. #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
  269. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
  270. /**
  271. * @brief Sets up the task stack so that upon returning from
  272. * SVC, the task stack is used again.
  273. *
  274. * @param pulSystemCallStack The current SP when the SVC was raised.
  275. * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
  276. */
  277. void vSystemCallExit( uint32_t * pulSystemCallStack,
  278. uint32_t ulLR ) PRIVILEGED_FUNCTION;
  279. #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
  280. #if ( configENABLE_MPU == 1 )
  281. /**
  282. * @brief Checks whether or not the calling task is privileged.
  283. *
  284. * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
  285. */
  286. BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
  287. #endif /* configENABLE_MPU == 1 */
  288. /*-----------------------------------------------------------*/
  289. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
  290. /**
  291. * @brief This variable is set to pdTRUE when the scheduler is started.
  292. */
  293. PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
  294. #endif
  295. /**
  296. * @brief Each task maintains its own interrupt status in the critical nesting
  297. * variable.
  298. */
  299. PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
  300. #if ( configUSE_TICKLESS_IDLE == 1 )
  301. /**
  302. * @brief The number of SysTick increments that make up one tick period.
  303. */
  304. PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
  305. /**
  306. * @brief The maximum number of tick periods that can be suppressed is
  307. * limited by the 24 bit resolution of the SysTick timer.
  308. */
  309. PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
  310. /**
  311. * @brief Compensate for the CPU cycles that pass while the SysTick is
  312. * stopped (low power functionality only).
  313. */
  314. PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
  315. #endif /* configUSE_TICKLESS_IDLE */
  316. /*-----------------------------------------------------------*/
  317. #if ( configUSE_TICKLESS_IDLE == 1 )
  318. __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
  319. {
  320. uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
  321. TickType_t xModifiableIdleTime;
  322. /* Make sure the SysTick reload value does not overflow the counter. */
  323. if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
  324. {
  325. xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
  326. }
  327. /* Enter a critical section but don't use the taskENTER_CRITICAL()
  328. * method as that will mask interrupts that should exit sleep mode. */
  329. __asm volatile ( "cpsid i" ::: "memory" );
  330. __asm volatile ( "dsb" );
  331. __asm volatile ( "isb" );
  332. /* If a context switch is pending or a task is waiting for the scheduler
  333. * to be unsuspended then abandon the low power entry. */
  334. if( eTaskConfirmSleepModeStatus() == eAbortSleep )
  335. {
  336. /* Re-enable interrupts - see comments above the cpsid instruction
  337. * above. */
  338. __asm volatile ( "cpsie i" ::: "memory" );
  339. }
  340. else
  341. {
  342. /* Stop the SysTick momentarily. The time the SysTick is stopped for
  343. * is accounted for as best it can be, but using the tickless mode will
  344. * inevitably result in some tiny drift of the time maintained by the
  345. * kernel with respect to calendar time. */
  346. portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
  347. /* Use the SysTick current-value register to determine the number of
  348. * SysTick decrements remaining until the next tick interrupt. If the
  349. * current-value register is zero, then there are actually
  350. * ulTimerCountsForOneTick decrements remaining, not zero, because the
  351. * SysTick requests the interrupt when decrementing from 1 to 0. */
  352. ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
  353. if( ulSysTickDecrementsLeft == 0 )
  354. {
  355. ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
  356. }
  357. /* Calculate the reload value required to wait xExpectedIdleTime
  358. * tick periods. -1 is used because this code normally executes part
  359. * way through the first tick period. But if the SysTick IRQ is now
  360. * pending, then clear the IRQ, suppressing the first tick, and correct
  361. * the reload value to reflect that the second tick period is already
  362. * underway. The expected idle time is always at least two ticks. */
  363. ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
  364. if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
  365. {
  366. portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
  367. ulReloadValue -= ulTimerCountsForOneTick;
  368. }
  369. if( ulReloadValue > ulStoppedTimerCompensation )
  370. {
  371. ulReloadValue -= ulStoppedTimerCompensation;
  372. }
  373. /* Set the new reload value. */
  374. portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
  375. /* Clear the SysTick count flag and set the count value back to
  376. * zero. */
  377. portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
  378. /* Restart SysTick. */
  379. portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
  380. /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
  381. * set its parameter to 0 to indicate that its implementation contains
  382. * its own wait for interrupt or wait for event instruction, and so wfi
  383. * should not be executed again. However, the original expected idle
  384. * time variable must remain unmodified, so a copy is taken. */
  385. xModifiableIdleTime = xExpectedIdleTime;
  386. configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
  387. if( xModifiableIdleTime > 0 )
  388. {
  389. __asm volatile ( "dsb" ::: "memory" );
  390. __asm volatile ( "wfi" );
  391. __asm volatile ( "isb" );
  392. }
  393. configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
  394. /* Re-enable interrupts to allow the interrupt that brought the MCU
  395. * out of sleep mode to execute immediately. See comments above
  396. * the cpsid instruction above. */
  397. __asm volatile ( "cpsie i" ::: "memory" );
  398. __asm volatile ( "dsb" );
  399. __asm volatile ( "isb" );
  400. /* Disable interrupts again because the clock is about to be stopped
  401. * and interrupts that execute while the clock is stopped will increase
  402. * any slippage between the time maintained by the RTOS and calendar
  403. * time. */
  404. __asm volatile ( "cpsid i" ::: "memory" );
  405. __asm volatile ( "dsb" );
  406. __asm volatile ( "isb" );
  407. /* Disable the SysTick clock without reading the
  408. * portNVIC_SYSTICK_CTRL_REG register to ensure the
  409. * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
  410. * the time the SysTick is stopped for is accounted for as best it can
  411. * be, but using the tickless mode will inevitably result in some tiny
  412. * drift of the time maintained by the kernel with respect to calendar
  413. * time*/
  414. portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
  415. /* Determine whether the SysTick has already counted to zero. */
  416. if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
  417. {
  418. uint32_t ulCalculatedLoadValue;
  419. /* The tick interrupt ended the sleep (or is now pending), and
  420. * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG
  421. * with whatever remains of the new tick period. */
  422. ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
  423. /* Don't allow a tiny value, or values that have somehow
  424. * underflowed because the post sleep hook did something
  425. * that took too long or because the SysTick current-value register
  426. * is zero. */
  427. if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
  428. {
  429. ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
  430. }
  431. portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
  432. /* As the pending tick will be processed as soon as this
  433. * function exits, the tick value maintained by the tick is stepped
  434. * forward by one less than the time spent waiting. */
  435. ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
  436. }
  437. else
  438. {
  439. /* Something other than the tick interrupt ended the sleep. */
  440. /* Use the SysTick current-value register to determine the
  441. * number of SysTick decrements remaining until the expected idle
  442. * time would have ended. */
  443. ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
  444. #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
  445. {
  446. /* If the SysTick is not using the core clock, the current-
  447. * value register might still be zero here. In that case, the
  448. * SysTick didn't load from the reload register, and there are
  449. * ulReloadValue decrements remaining in the expected idle
  450. * time, not zero. */
  451. if( ulSysTickDecrementsLeft == 0 )
  452. {
  453. ulSysTickDecrementsLeft = ulReloadValue;
  454. }
  455. }
  456. #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
  457. /* Work out how long the sleep lasted rounded to complete tick
  458. * periods (not the ulReload value which accounted for part
  459. * ticks). */
  460. ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
  461. /* How many complete tick periods passed while the processor
  462. * was waiting? */
  463. ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
  464. /* The reload value is set to whatever fraction of a single tick
  465. * period remains. */
  466. portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
  467. }
  468. /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
  469. * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If
  470. * the SysTick is not using the core clock, temporarily configure it to
  471. * use the core clock. This configuration forces the SysTick to load
  472. * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
  473. * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready
  474. * to receive the standard value immediately. */
  475. portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
  476. portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
  477. #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
  478. {
  479. portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
  480. }
  481. #else
  482. {
  483. /* The temporary usage of the core clock has served its purpose,
  484. * as described above. Resume usage of the other clock. */
  485. portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
  486. if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
  487. {
  488. /* The partial tick period already ended. Be sure the SysTick
  489. * counts it only once. */
  490. portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
  491. }
  492. portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
  493. portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
  494. }
  495. #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
  496. /* Step the tick to account for any tick periods that elapsed. */
  497. vTaskStepTick( ulCompleteTickPeriods );
  498. /* Exit with interrupts enabled. */
  499. __asm volatile ( "cpsie i" ::: "memory" );
  500. }
  501. }
  502. #endif /* configUSE_TICKLESS_IDLE */
  503. /*-----------------------------------------------------------*/
  504. __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
  505. {
  506. /* Calculate the constants required to configure the tick interrupt. */
  507. #if ( configUSE_TICKLESS_IDLE == 1 )
  508. {
  509. ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
  510. xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
  511. ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
  512. }
  513. #endif /* configUSE_TICKLESS_IDLE */
  514. /* Stop and reset SysTick.
  515. *
  516. * QEMU versions older than 7.0.0 contain a bug which causes an error if we
  517. * enable SysTick without first selecting a valid clock source. We trigger
  518. * the bug if we change clock sources from a clock with a zero clock period
  519. * to one with a nonzero clock period and enable Systick at the same time.
  520. * So we configure the CLKSOURCE bit here, prior to setting the ENABLE bit.
  521. * This workaround avoids the bug in QEMU versions older than 7.0.0. */
  522. portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG;
  523. portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
  524. /* Configure SysTick to interrupt at the requested rate. */
  525. portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
  526. portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
  527. }
  528. /*-----------------------------------------------------------*/
  529. static void prvTaskExitError( void )
  530. {
  531. volatile uint32_t ulDummy = 0UL;
  532. /* A function that implements a task must not exit or attempt to return to
  533. * its caller as there is nothing to return to. If a task wants to exit it
  534. * should instead call vTaskDelete( NULL ). Artificially force an assert()
  535. * to be triggered if configASSERT() is defined, then stop here so
  536. * application writers can catch the error. */
  537. configASSERT( ulCriticalNesting == ~0UL );
  538. portDISABLE_INTERRUPTS();
  539. while( ulDummy == 0 )
  540. {
  541. /* This file calls prvTaskExitError() after the scheduler has been
  542. * started to remove a compiler warning about the function being
  543. * defined but never called. ulDummy is used purely to quieten other
  544. * warnings about code appearing after this function is called - making
  545. * ulDummy volatile makes the compiler think the function could return
  546. * and therefore not output an 'unreachable code' warning for code that
  547. * appears after it. */
  548. }
  549. }
  550. /*-----------------------------------------------------------*/
  551. #if ( configENABLE_MPU == 1 )
  552. static uint32_t prvGetMPURegionSizeSetting( uint32_t ulActualSizeInBytes )
  553. {
  554. uint32_t ulRegionSize, ulReturnValue = 7UL;
  555. /* 256 is the smallest region size, 31 is the largest valid value for
  556. * ulReturnValue. */
  557. for( ulRegionSize = 256UL; ulReturnValue < 31UL; ( ulRegionSize <<= 1UL ) )
  558. {
  559. if( ulActualSizeInBytes <= ulRegionSize )
  560. {
  561. break;
  562. }
  563. else
  564. {
  565. ulReturnValue++;
  566. }
  567. }
  568. /* Shift the code by one before returning so it can be written directly
  569. * into the the correct bit position of the attribute register. */
  570. return( ulReturnValue << 1UL );
  571. }
  572. #endif /* configENABLE_MPU */
  573. /*-----------------------------------------------------------*/
  574. #if ( configENABLE_MPU == 1 )
  575. static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
  576. {
  577. #if defined( __ARMCC_VERSION )
  578. /* Declaration when these variable are defined in code instead of being
  579. * exported from linker scripts. */
  580. extern uint32_t * __privileged_functions_start__;
  581. extern uint32_t * __privileged_functions_end__;
  582. extern uint32_t * __FLASH_segment_start__;
  583. extern uint32_t * __FLASH_segment_end__;
  584. extern uint32_t * __privileged_sram_start__;
  585. extern uint32_t * __privileged_sram_end__;
  586. #else /* if defined( __ARMCC_VERSION ) */
  587. /* Declaration when these variable are exported from linker scripts. */
  588. extern uint32_t __privileged_functions_start__[];
  589. extern uint32_t __privileged_functions_end__[];
  590. extern uint32_t __FLASH_segment_start__[];
  591. extern uint32_t __FLASH_segment_end__[];
  592. extern uint32_t __privileged_sram_start__[];
  593. extern uint32_t __privileged_sram_end__[];
  594. #endif /* defined( __ARMCC_VERSION ) */
  595. /* Ensure that the MPU is present. */
  596. configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
  597. /* Check that the MPU is present. */
  598. if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
  599. {
  600. /* Setup privileged flash as Read Only so that privileged tasks can
  601. * read it but not modify. */
  602. portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) | /* Base address. */
  603. ( portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ) |
  604. ( portPRIVILEGED_FLASH_REGION ) );
  605. portMPU_RASR_REG = ( ( portMPU_REGION_PRIV_RO_UNPRIV_NA ) |
  606. ( ( configS_C_B_FLASH & portMPU_RASR_S_C_B_BITMASK ) << portMPU_RASR_S_C_B_LOCATION ) |
  607. ( prvGetMPURegionSizeSetting( ( uint32_t ) __privileged_functions_end__ - ( uint32_t ) __privileged_functions_start__ ) ) |
  608. ( portMPU_RASR_REGION_ENABLE_BITMASK ) );
  609. /* Setup unprivileged flash as Read Only by both privileged and
  610. * unprivileged tasks. All tasks can read it but no-one can modify. */
  611. portMPU_RBAR_REG = ( ( ( uint32_t ) __FLASH_segment_start__ ) | /* Base address. */
  612. ( portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ) |
  613. ( portUNPRIVILEGED_FLASH_REGION ) );
  614. portMPU_RASR_REG = ( ( portMPU_REGION_PRIV_RO_UNPRIV_RO ) |
  615. ( ( configS_C_B_FLASH & portMPU_RASR_S_C_B_BITMASK ) << portMPU_RASR_S_C_B_LOCATION ) |
  616. ( prvGetMPURegionSizeSetting( ( uint32_t ) __FLASH_segment_end__ - ( uint32_t ) __FLASH_segment_start__ ) ) |
  617. ( portMPU_RASR_REGION_ENABLE_BITMASK ) );
  618. /* Setup RAM containing kernel data for privileged access only. */
  619. portMPU_RBAR_REG = ( ( uint32_t ) __privileged_sram_start__ ) | /* Base address. */
  620. ( portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ) |
  621. ( portPRIVILEGED_RAM_REGION );
  622. portMPU_RASR_REG = ( ( portMPU_REGION_PRIV_RW_UNPRIV_NA ) |
  623. ( portMPU_REGION_EXECUTE_NEVER ) |
  624. ( ( configS_C_B_SRAM & portMPU_RASR_S_C_B_BITMASK ) << portMPU_RASR_S_C_B_LOCATION ) |
  625. prvGetMPURegionSizeSetting( ( uint32_t ) __privileged_sram_end__ - ( uint32_t ) __privileged_sram_start__ ) |
  626. ( portMPU_RASR_REGION_ENABLE_BITMASK ) );
  627. /* Enable MPU with privileged background access i.e. unmapped
  628. * regions have privileged access. */
  629. portMPU_CTRL_REG |= ( portMPU_CTRL_PRIV_BACKGROUND_ENABLE_BITMASK |
  630. portMPU_CTRL_ENABLE_BITMASK );
  631. }
  632. }
  633. #endif /* configENABLE_MPU */
  634. /*-----------------------------------------------------------*/
  635. void vPortYield( void ) /* PRIVILEGED_FUNCTION */
  636. {
  637. /* Set a PendSV to request a context switch. */
  638. portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
  639. /* Barriers are normally not required but do ensure the code is
  640. * completely within the specified behaviour for the architecture. */
  641. __asm volatile ( "dsb" ::: "memory" );
  642. __asm volatile ( "isb" );
  643. }
  644. /*-----------------------------------------------------------*/
  645. void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
  646. {
  647. portDISABLE_INTERRUPTS();
  648. ulCriticalNesting++;
  649. /* Barriers are normally not required but do ensure the code is
  650. * completely within the specified behaviour for the architecture. */
  651. __asm volatile ( "dsb" ::: "memory" );
  652. __asm volatile ( "isb" );
  653. }
  654. /*-----------------------------------------------------------*/
  655. void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
  656. {
  657. configASSERT( ulCriticalNesting );
  658. ulCriticalNesting--;
  659. if( ulCriticalNesting == 0 )
  660. {
  661. portENABLE_INTERRUPTS();
  662. }
  663. }
  664. /*-----------------------------------------------------------*/
  665. void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
  666. {
  667. uint32_t ulPreviousMask;
  668. ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
  669. traceISR_ENTER();
  670. {
  671. /* Increment the RTOS tick. */
  672. if( xTaskIncrementTick() != pdFALSE )
  673. {
  674. traceISR_EXIT_TO_SCHEDULER();
  675. /* Pend a context switch. */
  676. portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
  677. }
  678. else
  679. {
  680. traceISR_EXIT();
  681. }
  682. }
  683. portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
  684. }
  685. /*-----------------------------------------------------------*/
  686. void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
  687. {
  688. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
  689. #if defined( __ARMCC_VERSION )
  690. /* Declaration when these variable are defined in code instead of being
  691. * exported from linker scripts. */
  692. extern uint32_t * __syscalls_flash_start__;
  693. extern uint32_t * __syscalls_flash_end__;
  694. #else
  695. /* Declaration when these variable are exported from linker scripts. */
  696. extern uint32_t __syscalls_flash_start__[];
  697. extern uint32_t __syscalls_flash_end__[];
  698. #endif /* defined( __ARMCC_VERSION ) */
  699. #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
  700. uint32_t ulPC;
  701. uint8_t ucSVCNumber;
  702. /* Register are stored on the stack in the following order - R0, R1, R2, R3,
  703. * R12, LR, PC, xPSR. */
  704. ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
  705. ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
  706. switch( ucSVCNumber )
  707. {
  708. case portSVC_START_SCHEDULER:
  709. /* Setup the context of the first task so that the first task starts
  710. * executing. */
  711. vRestoreContextOfFirstTask();
  712. break;
  713. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
  714. case portSVC_RAISE_PRIVILEGE:
  715. /* Only raise the privilege, if the svc was raised from any of
  716. * the system calls. */
  717. if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
  718. ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
  719. {
  720. vRaisePrivilege();
  721. }
  722. break;
  723. #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
  724. #if ( configENABLE_MPU == 1 )
  725. case portSVC_YIELD:
  726. vPortYield();
  727. break;
  728. #endif /* configENABLE_MPU == 1 */
  729. default:
  730. /* Incorrect SVC call. */
  731. configASSERT( pdFALSE );
  732. }
  733. }
  734. /*-----------------------------------------------------------*/
  735. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
  736. void vSystemCallEnter( uint32_t * pulTaskStack,
  737. uint32_t ulLR,
  738. uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
  739. {
  740. extern TaskHandle_t pxCurrentTCB;
  741. extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
  742. xMPU_SETTINGS * pxMpuSettings;
  743. uint32_t * pulSystemCallStack;
  744. uint32_t ulSystemCallLocation, i;
  745. /* Hardware Saved Stack Frame Size upon Exception entry:
  746. * Basic frame (R0-R3, R12, LR, PC, and xPSR) = 8 words.
  747. */
  748. const uint32_t ulHardwareSavedExceptionFrameSize = 8;
  749. #if defined( __ARMCC_VERSION )
  750. /* Declaration when these variable are defined in code instead of being
  751. * exported from linker scripts. */
  752. extern uint32_t * __syscalls_flash_start__;
  753. extern uint32_t * __syscalls_flash_end__;
  754. #else
  755. /* Declaration when these variable are exported from linker scripts. */
  756. extern uint32_t __syscalls_flash_start__[];
  757. extern uint32_t __syscalls_flash_end__[];
  758. #endif /* #if defined( __ARMCC_VERSION ) */
  759. ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
  760. pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
  761. /* Checks:
  762. * 1. SVC is raised from the system call section (i.e. application is
  763. * not raising SVC directly).
  764. * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
  765. * it is non-NULL only during the execution of a system call (i.e.
  766. * between system call enter and exit).
  767. * 3. System call is not for a kernel API disabled by the configuration
  768. * in FreeRTOSConfig.h.
  769. * 4. We do not need to check that ucSystemCallNumber is within range
  770. * because the assembly SVC handler checks that before calling
  771. * this function.
  772. */
  773. if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
  774. ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
  775. ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
  776. ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
  777. {
  778. pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
  779. /* Make space on the system call stack for the stack frame. */
  780. pulSystemCallStack = pulSystemCallStack - ulHardwareSavedExceptionFrameSize;
  781. /* Copy the stack frame. */
  782. for( i = 0; i < ulHardwareSavedExceptionFrameSize; i++ )
  783. {
  784. pulSystemCallStack[ i ] = pulTaskStack[ i ];
  785. }
  786. /* Store the value of the Link Register before the SVC was raised.
  787. * It contains the address of the caller of the System Call entry
  788. * point (i.e. the caller of the MPU_<API>). We need to restore it
  789. * when we exit from the system call. */
  790. pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
  791. /* Use the pulSystemCallStack in thread mode. */
  792. __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
  793. /* Start executing the system call upon returning from this handler. */
  794. pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
  795. /* Raise a request to exit from the system call upon finishing the
  796. * system call. */
  797. pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
  798. /* Remember the location where we should copy the stack frame when we exit from
  799. * the system call. */
  800. pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulHardwareSavedExceptionFrameSize;
  801. /* Record if the hardware used padding to force the stack pointer
  802. * to be double word aligned. */
  803. if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
  804. {
  805. pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
  806. }
  807. else
  808. {
  809. pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
  810. }
  811. /* We ensure in pxPortInitialiseStack that the system call stack is
  812. * double word aligned and therefore, there is no need of padding.
  813. * Clear the bit[9] of stacked xPSR. */
  814. pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
  815. /* Raise the privilege for the duration of the system call. */
  816. __asm volatile
  817. (
  818. " .syntax unified \n"
  819. " mrs r0, control \n" /* Obtain current control value. */
  820. " movs r1, #1 \n" /* r1 = 1. */
  821. " bics r0, r1 \n" /* Clear nPRIV bit. */
  822. " msr control, r0 \n" /* Write back new control value. */
  823. ::: "r0", "r1", "memory"
  824. );
  825. }
  826. }
  827. #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
  828. /*-----------------------------------------------------------*/
  829. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
  830. void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
  831. {
  832. __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
  833. }
  834. #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
  835. /*-----------------------------------------------------------*/
  836. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
  837. void vSystemCallExit( uint32_t * pulSystemCallStack,
  838. uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
  839. {
  840. extern TaskHandle_t pxCurrentTCB;
  841. xMPU_SETTINGS * pxMpuSettings;
  842. uint32_t * pulTaskStack;
  843. uint32_t ulSystemCallLocation, i;
  844. /* Hardware Saved Stack Frame Size upon Exception entry:
  845. * Basic frame (R0-R3, R12, LR, PC, and xPSR) = 8 words.
  846. */
  847. const uint32_t ulHardwareSavedExceptionFrameSize = 8;
  848. #if defined( __ARMCC_VERSION )
  849. /* Declaration when these variable are defined in code instead of being
  850. * exported from linker scripts. */
  851. extern uint32_t * __privileged_functions_start__;
  852. extern uint32_t * __privileged_functions_end__;
  853. #else
  854. /* Declaration when these variable are exported from linker scripts. */
  855. extern uint32_t __privileged_functions_start__[];
  856. extern uint32_t __privileged_functions_end__[];
  857. #endif /* #if defined( __ARMCC_VERSION ) */
  858. ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
  859. pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
  860. /* Checks:
  861. * 1. SVC is raised from the privileged code (i.e. application is not
  862. * raising SVC directly). This SVC is only raised from
  863. * vRequestSystemCallExit which is in the privileged code section.
  864. * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
  865. * this means that we previously entered a system call and the
  866. * application is not attempting to exit without entering a system
  867. * call.
  868. */
  869. if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
  870. ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
  871. ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
  872. {
  873. pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
  874. /* Make space on the task stack for the stack frame. */
  875. pulTaskStack = pulTaskStack - ulHardwareSavedExceptionFrameSize;
  876. /* Copy the stack frame. */
  877. for( i = 0; i < ulHardwareSavedExceptionFrameSize; i++ )
  878. {
  879. pulTaskStack[ i ] = pulSystemCallStack[ i ];
  880. }
  881. /* Use the pulTaskStack in thread mode. */
  882. __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
  883. /* Return to the caller of the System Call entry point (i.e. the
  884. * caller of the MPU_<API>). */
  885. pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
  886. /* Ensure that LR has a valid value.*/
  887. pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
  888. /* If the hardware used padding to force the stack pointer
  889. * to be double word aligned, set the stacked xPSR bit[9],
  890. * otherwise clear it. */
  891. if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
  892. {
  893. pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
  894. }
  895. else
  896. {
  897. pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
  898. }
  899. /* This is not NULL only for the duration of the system call. */
  900. pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
  901. /* Drop the privilege before returning to the thread mode. */
  902. __asm volatile
  903. (
  904. " .syntax unified \n"
  905. " mrs r0, control \n" /* Obtain current control value. */
  906. " movs r1, #1 \n" /* r1 = 1. */
  907. " orrs r0, r1 \n" /* Set nPRIV bit. */
  908. " msr control, r0 \n" /* Write back new control value. */
  909. ::: "r0", "r1", "memory"
  910. );
  911. }
  912. }
  913. #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
  914. /*-----------------------------------------------------------*/
  915. #if ( configENABLE_MPU == 1 )
  916. BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
  917. {
  918. BaseType_t xTaskIsPrivileged = pdFALSE;
  919. const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
  920. if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
  921. {
  922. xTaskIsPrivileged = pdTRUE;
  923. }
  924. return xTaskIsPrivileged;
  925. }
  926. #endif /* configENABLE_MPU == 1 */
  927. /*-----------------------------------------------------------*/
  928. #if ( configENABLE_MPU == 1 )
  929. StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
  930. TaskFunction_t pxCode,
  931. void * pvParameters,
  932. BaseType_t xRunPrivileged,
  933. xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
  934. {
  935. xMPUSettings->ulContext[ 0 ] = 0x04040404; /* r4. */
  936. xMPUSettings->ulContext[ 1 ] = 0x05050505; /* r5. */
  937. xMPUSettings->ulContext[ 2 ] = 0x06060606; /* r6. */
  938. xMPUSettings->ulContext[ 3 ] = 0x07070707; /* r7. */
  939. xMPUSettings->ulContext[ 4 ] = 0x08080808; /* r8. */
  940. xMPUSettings->ulContext[ 5 ] = 0x09090909; /* r9. */
  941. xMPUSettings->ulContext[ 6 ] = 0x10101010; /* r10. */
  942. xMPUSettings->ulContext[ 7 ] = 0x11111111; /* r11. */
  943. xMPUSettings->ulContext[ 8 ] = ( uint32_t ) pvParameters; /* r0. */
  944. xMPUSettings->ulContext[ 9 ] = 0x01010101; /* r1. */
  945. xMPUSettings->ulContext[ 10 ] = 0x02020202; /* r2. */
  946. xMPUSettings->ulContext[ 11 ] = 0x03030303; /* r3. */
  947. xMPUSettings->ulContext[ 12 ] = 0x12121212; /* r12. */
  948. xMPUSettings->ulContext[ 13 ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
  949. xMPUSettings->ulContext[ 14 ] = ( uint32_t ) pxCode; /* PC. */
  950. xMPUSettings->ulContext[ 15 ] = portINITIAL_XPSR; /* xPSR. */
  951. xMPUSettings->ulContext[ 16 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
  952. if( xRunPrivileged == pdTRUE )
  953. {
  954. xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
  955. xMPUSettings->ulContext[ 17 ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
  956. }
  957. else
  958. {
  959. xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
  960. xMPUSettings->ulContext[ 17 ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
  961. }
  962. xMPUSettings->ulContext[ 18 ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
  963. #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
  964. {
  965. /* Ensure that the system call stack is double word aligned. */
  966. xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
  967. xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
  968. ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
  969. /* This is not NULL only for the duration of a system call. */
  970. xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
  971. }
  972. #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
  973. return &( xMPUSettings->ulContext[ 19 ] );
  974. }
  975. #else /* configENABLE_MPU */
  976. StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
  977. TaskFunction_t pxCode,
  978. void * pvParameters ) /* PRIVILEGED_FUNCTION */
  979. {
  980. /* Simulate the stack frame as it would be created by a context switch
  981. * interrupt. */
  982. #if ( portPRELOAD_REGISTERS == 0 )
  983. {
  984. pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
  985. *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
  986. pxTopOfStack--;
  987. *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
  988. pxTopOfStack--;
  989. *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
  990. pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
  991. *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
  992. pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
  993. *pxTopOfStack = portINITIAL_EXC_RETURN;
  994. }
  995. #else /* portPRELOAD_REGISTERS */
  996. {
  997. pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
  998. *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
  999. pxTopOfStack--;
  1000. *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
  1001. pxTopOfStack--;
  1002. *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
  1003. pxTopOfStack--;
  1004. *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
  1005. pxTopOfStack--;
  1006. *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
  1007. pxTopOfStack--;
  1008. *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
  1009. pxTopOfStack--;
  1010. *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
  1011. pxTopOfStack--;
  1012. *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
  1013. pxTopOfStack--;
  1014. *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
  1015. pxTopOfStack--;
  1016. *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
  1017. pxTopOfStack--;
  1018. *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
  1019. pxTopOfStack--;
  1020. *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
  1021. pxTopOfStack--;
  1022. *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
  1023. pxTopOfStack--;
  1024. *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
  1025. pxTopOfStack--;
  1026. *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
  1027. pxTopOfStack--;
  1028. *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
  1029. pxTopOfStack--;
  1030. *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
  1031. }
  1032. #endif /* portPRELOAD_REGISTERS */
  1033. return pxTopOfStack;
  1034. }
  1035. #endif /* configENABLE_MPU */
  1036. /*-----------------------------------------------------------*/
  1037. BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
  1038. {
  1039. /* An application can install FreeRTOS interrupt handlers in one of the
  1040. * following ways:
  1041. * 1. Direct Routing - Install the functions SVC_Handler and PendSV_Handler
  1042. * for SVCall and PendSV interrupts respectively.
  1043. * 2. Indirect Routing - Install separate handlers for SVCall and PendSV
  1044. * interrupts and route program control from those handlers to
  1045. * SVC_Handler and PendSV_Handler functions.
  1046. *
  1047. * Applications that use Indirect Routing must set
  1048. * configCHECK_HANDLER_INSTALLATION to 0 in their FreeRTOSConfig.h. Direct
  1049. * routing, which is validated here when configCHECK_HANDLER_INSTALLATION
  1050. * is 1, should be preferred when possible. */
  1051. #if ( configCHECK_HANDLER_INSTALLATION == 1 )
  1052. {
  1053. const portISR_t * const pxVectorTable = portSCB_VTOR_REG;
  1054. /* Validate that the application has correctly installed the FreeRTOS
  1055. * handlers for SVCall and PendSV interrupts. We do not check the
  1056. * installation of the SysTick handler because the application may
  1057. * choose to drive the RTOS tick using a timer other than the SysTick
  1058. * timer by overriding the weak function vPortSetupTimerInterrupt().
  1059. *
  1060. * Assertion failures here indicate incorrect installation of the
  1061. * FreeRTOS handlers. For help installing the FreeRTOS handlers, see
  1062. * https://www.freertos.org/Why-FreeRTOS/FAQs.
  1063. *
  1064. * Systems with a configurable address for the interrupt vector table
  1065. * can also encounter assertion failures or even system faults here if
  1066. * VTOR is not set correctly to point to the application's vector table. */
  1067. configASSERT( pxVectorTable[ portVECTOR_INDEX_SVC ] == SVC_Handler );
  1068. configASSERT( pxVectorTable[ portVECTOR_INDEX_PENDSV ] == PendSV_Handler );
  1069. }
  1070. #endif /* configCHECK_HANDLER_INSTALLATION */
  1071. /* Make PendSV and SysTick the lowest priority interrupts, and make SVCall
  1072. * the highest priority. */
  1073. portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
  1074. portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
  1075. portNVIC_SHPR2_REG = 0;
  1076. #if ( configENABLE_MPU == 1 )
  1077. {
  1078. /* Setup the Memory Protection Unit (MPU). */
  1079. prvSetupMPU();
  1080. }
  1081. #endif /* configENABLE_MPU */
  1082. /* Start the timer that generates the tick ISR. Interrupts are disabled
  1083. * here already. */
  1084. vPortSetupTimerInterrupt();
  1085. /* Initialize the critical nesting count ready for the first task. */
  1086. ulCriticalNesting = 0;
  1087. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
  1088. {
  1089. xSchedulerRunning = pdTRUE;
  1090. }
  1091. #endif
  1092. /* Start the first task. */
  1093. vStartFirstTask();
  1094. /* Should never get here as the tasks will now be executing. Call the task
  1095. * exit error function to prevent compiler warnings about a static function
  1096. * not being called in the case that the application writer overrides this
  1097. * functionality by defining configTASK_RETURN_ADDRESS. Call
  1098. * vTaskSwitchContext() so link time optimization does not remove the
  1099. * symbol. */
  1100. vTaskSwitchContext();
  1101. prvTaskExitError();
  1102. /* Should not get here. */
  1103. return 0;
  1104. }
  1105. /*-----------------------------------------------------------*/
  1106. void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
  1107. {
  1108. /* Not implemented in ports where there is nothing to return to.
  1109. * Artificially force an assert. */
  1110. configASSERT( ulCriticalNesting == 1000UL );
  1111. }
  1112. /*-----------------------------------------------------------*/
  1113. #if ( configENABLE_MPU == 1 )
  1114. void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
  1115. const struct xMEMORY_REGION * const xRegions,
  1116. StackType_t * pxBottomOfStack,
  1117. configSTACK_DEPTH_TYPE uxStackDepth )
  1118. {
  1119. #if defined( __ARMCC_VERSION )
  1120. /* Declaration when these variable are defined in code instead of being
  1121. * exported from linker scripts. */
  1122. extern uint32_t * __SRAM_segment_start__;
  1123. extern uint32_t * __SRAM_segment_end__;
  1124. extern uint32_t * __privileged_sram_start__;
  1125. extern uint32_t * __privileged_sram_end__;
  1126. #else
  1127. /* Declaration when these variable are exported from linker scripts. */
  1128. extern uint32_t __SRAM_segment_start__[];
  1129. extern uint32_t __SRAM_segment_end__[];
  1130. extern uint32_t __privileged_sram_start__[];
  1131. extern uint32_t __privileged_sram_end__[];
  1132. #endif /* defined( __ARMCC_VERSION ) */
  1133. int32_t lIndex;
  1134. uint32_t ul;
  1135. if( xRegions == NULL )
  1136. {
  1137. /* No MPU regions are specified so allow access to all RAM. */
  1138. xMPUSettings->xRegionsSettings[ 0 ].ulRBAR =
  1139. ( ( ( uint32_t ) __SRAM_segment_start__ ) | /* Base address. */
  1140. ( portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ) |
  1141. ( portSTACK_REGION ) ); /* Region number. */
  1142. xMPUSettings->xRegionsSettings[ 0 ].ulRASR =
  1143. ( ( portMPU_REGION_PRIV_RW_UNPRIV_RW ) |
  1144. ( portMPU_REGION_EXECUTE_NEVER ) |
  1145. ( ( configS_C_B_SRAM & portMPU_RASR_S_C_B_BITMASK ) << portMPU_RASR_S_C_B_LOCATION ) |
  1146. ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) |
  1147. ( portMPU_RASR_REGION_ENABLE_BITMASK ) );
  1148. /* Invalidate user configurable regions. */
  1149. for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ )
  1150. {
  1151. xMPUSettings->xRegionsSettings[ ul ].ulRBAR = ( ( ul - 1UL ) | portMPU_RBAR_REGION_NUMBER_VALID_BITMASK );
  1152. xMPUSettings->xRegionsSettings[ ul ].ulRASR = 0UL;
  1153. }
  1154. }
  1155. else
  1156. {
  1157. /* This function is called automatically when the task is created - in
  1158. * which case the stack region parameters will be valid. At all other
  1159. * times the stack parameters will not be valid and it is assumed that the
  1160. * stack region has already been configured. */
  1161. if( uxStackDepth > 0 )
  1162. {
  1163. /* Define the region that allows access to the stack. */
  1164. xMPUSettings->xRegionsSettings[ 0 ].ulRBAR =
  1165. ( ( ( uint32_t ) pxBottomOfStack ) |
  1166. ( portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ) |
  1167. ( portSTACK_REGION ) ); /* Region number. */
  1168. xMPUSettings->xRegionsSettings[ 0 ].ulRASR =
  1169. ( ( portMPU_REGION_PRIV_RW_UNPRIV_RW ) |
  1170. ( portMPU_REGION_EXECUTE_NEVER ) |
  1171. ( prvGetMPURegionSizeSetting( uxStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) |
  1172. ( ( configS_C_B_SRAM & portMPU_RASR_S_C_B_BITMASK ) << portMPU_RASR_S_C_B_LOCATION ) |
  1173. ( portMPU_RASR_REGION_ENABLE_BITMASK ) );
  1174. }
  1175. lIndex = 0;
  1176. for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ )
  1177. {
  1178. if( ( xRegions[ lIndex ] ).ulLengthInBytes > 0UL )
  1179. {
  1180. /* Translate the generic region definition contained in
  1181. * xRegions into the CM0+ specific MPU settings that are then
  1182. * stored in xMPUSettings. */
  1183. xMPUSettings->xRegionsSettings[ ul ].ulRBAR =
  1184. ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) |
  1185. ( portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ) |
  1186. ( ul - 1UL ); /* Region number. */
  1187. xMPUSettings->xRegionsSettings[ ul ].ulRASR =
  1188. ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) |
  1189. ( xRegions[ lIndex ].ulParameters ) |
  1190. ( portMPU_RASR_REGION_ENABLE_BITMASK );
  1191. }
  1192. else
  1193. {
  1194. /* Invalidate the region. */
  1195. xMPUSettings->xRegionsSettings[ ul ].ulRBAR = ( ( ul - 1UL ) | portMPU_RBAR_REGION_NUMBER_VALID_BITMASK );
  1196. xMPUSettings->xRegionsSettings[ ul ].ulRASR = 0UL;
  1197. }
  1198. lIndex++;
  1199. }
  1200. }
  1201. }
  1202. #endif /* configENABLE_MPU */
  1203. /*-----------------------------------------------------------*/
  1204. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
  1205. BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
  1206. uint32_t ulBufferLength,
  1207. uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
  1208. {
  1209. uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
  1210. uint32_t ulRegionStart, ulRegionSize, ulRegionEnd;
  1211. uint32_t ulMPURegionAccessPermissions;
  1212. BaseType_t xAccessGranted = pdFALSE;
  1213. const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
  1214. if( xSchedulerRunning == pdFALSE )
  1215. {
  1216. /* Grant access to all the kernel objects before the scheduler
  1217. * is started. It is necessary because there is no task running
  1218. * yet and therefore, we cannot use the permissions of any
  1219. * task. */
  1220. xAccessGranted = pdTRUE;
  1221. }
  1222. else if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
  1223. {
  1224. xAccessGranted = pdTRUE;
  1225. }
  1226. else
  1227. {
  1228. if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
  1229. {
  1230. ulBufferStartAddress = ( uint32_t ) pvBuffer;
  1231. ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
  1232. for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
  1233. {
  1234. /* Is the MPU region enabled? */
  1235. if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRASR &
  1236. portMPU_RASR_REGION_ENABLE_BITMASK ) == portMPU_RASR_REGION_ENABLE_BITMASK )
  1237. {
  1238. ulRegionStart = portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR );
  1239. ulRegionSize = portEXTRACT_REGION_SIZE_FROM_RASR( xTaskMpuSettings->xRegionsSettings[ i ].ulRASR );
  1240. ulRegionEnd = ulRegionStart + ulRegionSize;
  1241. if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
  1242. ulRegionStart,
  1243. ulRegionEnd ) &&
  1244. portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
  1245. ulRegionStart,
  1246. ulRegionEnd ) )
  1247. {
  1248. ulMPURegionAccessPermissions = xTaskMpuSettings->xRegionsSettings[ i ].ulRASR &
  1249. portMPU_RASR_AP_BITMASK;
  1250. if( ulAccessRequested == tskMPU_READ_PERMISSION ) /* RO. */
  1251. {
  1252. if( ( ulMPURegionAccessPermissions == portMPU_REGION_PRIV_RW_UNPRIV_RO ) ||
  1253. ( ulMPURegionAccessPermissions == portMPU_REGION_PRIV_RO_UNPRIV_RO ) ||
  1254. ( ulMPURegionAccessPermissions == portMPU_REGION_PRIV_RW_UNPRIV_RW ) )
  1255. {
  1256. xAccessGranted = pdTRUE;
  1257. break;
  1258. }
  1259. }
  1260. else if( ( ulAccessRequested & tskMPU_WRITE_PERMISSION ) != 0UL ) /* W or RW. */
  1261. {
  1262. if( ulMPURegionAccessPermissions == portMPU_REGION_PRIV_RW_UNPRIV_RW )
  1263. {
  1264. xAccessGranted = pdTRUE;
  1265. break;
  1266. }
  1267. }
  1268. }
  1269. }
  1270. }
  1271. }
  1272. }
  1273. return xAccessGranted;
  1274. }
  1275. #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
  1276. /*-----------------------------------------------------------*/
  1277. BaseType_t xPortIsInsideInterrupt( void )
  1278. {
  1279. uint32_t ulCurrentInterrupt;
  1280. BaseType_t xReturn;
  1281. /* Obtain the number of the currently executing interrupt. Interrupt Program
  1282. * Status Register (IPSR) holds the exception number of the currently-executing
  1283. * exception or zero for Thread mode.*/
  1284. __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
  1285. if( ulCurrentInterrupt == 0 )
  1286. {
  1287. xReturn = pdFALSE;
  1288. }
  1289. else
  1290. {
  1291. xReturn = pdTRUE;
  1292. }
  1293. return xReturn;
  1294. }
  1295. /*-----------------------------------------------------------*/
  1296. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
  1297. void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
  1298. int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
  1299. {
  1300. uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
  1301. xMPU_SETTINGS * xTaskMpuSettings;
  1302. ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
  1303. ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
  1304. xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
  1305. xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
  1306. }
  1307. #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
  1308. /*-----------------------------------------------------------*/
  1309. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
  1310. void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
  1311. int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
  1312. {
  1313. uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
  1314. xMPU_SETTINGS * xTaskMpuSettings;
  1315. ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
  1316. ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
  1317. xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
  1318. xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
  1319. }
  1320. #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
  1321. /*-----------------------------------------------------------*/
  1322. #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
  1323. #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
  1324. BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
  1325. {
  1326. uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
  1327. BaseType_t xAccessGranted = pdFALSE;
  1328. const xMPU_SETTINGS * xTaskMpuSettings;
  1329. if( xSchedulerRunning == pdFALSE )
  1330. {
  1331. /* Grant access to all the kernel objects before the scheduler
  1332. * is started. It is necessary because there is no task running
  1333. * yet and therefore, we cannot use the permissions of any
  1334. * task. */
  1335. xAccessGranted = pdTRUE;
  1336. }
  1337. else
  1338. {
  1339. xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
  1340. ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
  1341. ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
  1342. if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
  1343. {
  1344. xAccessGranted = pdTRUE;
  1345. }
  1346. else
  1347. {
  1348. if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
  1349. {
  1350. xAccessGranted = pdTRUE;
  1351. }
  1352. }
  1353. }
  1354. return xAccessGranted;
  1355. }
  1356. #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
  1357. BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
  1358. {
  1359. ( void ) lInternalIndexOfKernelObject;
  1360. /* If Access Control List feature is not used, all the tasks have
  1361. * access to all the kernel objects. */
  1362. return pdTRUE;
  1363. }
  1364. #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
  1365. #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
  1366. /*-----------------------------------------------------------*/