portASM.S 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159
  1. /*
  2. * FreeRTOS Kernel <DEVELOPMENT BRANCH>
  3. * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  4. * Copyright 2025-2026 Arm Limited and/or its affiliates
  5. * <open-source-office@arm.com>
  6. *
  7. * SPDX-License-Identifier: MIT
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy of
  10. * this software and associated documentation files (the "Software"), to deal in
  11. * the Software without restriction, including without limitation the rights to
  12. * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
  13. * the Software, and to permit persons to whom the Software is furnished to do so,
  14. * subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in all
  17. * copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
  21. * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
  22. * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  23. * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  24. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. * https://www.FreeRTOS.org
  27. * https://github.com/FreeRTOS
  28. *
  29. */
  30. /*
  31. * This file is tailored for ARM Cortex-R82 with SMP enabled.
  32. * It includes macros and functions for saving/restoring task context,
  33. * handling interrupts, and supporting multi-core operations.
  34. */
  35. #include "FreeRTOSConfig.h"
  36. #include "portmacro.h"
  37. /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION
  38. * is defined correctly and privileged functions are placed in correct sections. */
  39. #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  40. /* System call numbers includes. */
  41. #include "mpu_syscall_numbers.h"
  42. /* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the
  43. * header files. */
  44. #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
  45. .text
  46. /* Variables and functions. */
  47. #if ( configNUMBER_OF_CORES == 1 )
  48. .extern pxCurrentTCB
  49. .extern ullCriticalNesting
  50. .extern ullPortInterruptNesting
  51. #else /* #if ( configNUMBER_OF_CORES == 1 ) */
  52. .extern pxCurrentTCBs
  53. .extern ullCriticalNestings
  54. .extern ullPortInterruptNestings
  55. #endif
  56. .extern vTaskSwitchContext
  57. .extern vApplicationIRQHandler
  58. .extern ullPortTaskHasFPUContext
  59. .extern ullPortYieldRequired
  60. .extern _freertos_vector_table
  61. #if ( configENABLE_MPU == 1 )
  62. .extern xPortIsTaskPrivileged
  63. .extern vSystemCallEnter
  64. .extern vSystemCallExit
  65. .extern vRequestSystemCallExit
  66. .extern uxSystemCallImplementations
  67. #endif /* #if ( configENABLE_MPU == 1 ) */
  68. .global FreeRTOS_IRQ_Handler
  69. .global FreeRTOS_SWI_Handler
  70. .global vPortSaveTaskContext
  71. .global vPortRestoreTaskContext
  72. #if ( configENABLE_MPU == 1 )
  73. .macro portLOAD_MPU_REGIONS_ADDRESSES
  74. MOV X3, # portSTACK_REGION /* Task's first programmed region is its stack region as the first four MPU regions are already programmed.*/
  75. MOV X4, # configTOTAL_MPU_REGIONS - 1 /* Upper limit = configTOTAL_MPU_REGIONS - 1 */
  76. 1 :
  77. CMP X3, X4 /* Compare i with ( configTOTAL_MPU_REGIONS - 1 ) */
  78. B.GT 2f /* if i > ( configTOTAL_MPU_REGIONS - 1 ), exit loop */
  79. MSR PRSELR_EL1, X3 /* Program PRSELR_EL1. */
  80. ISB /* Ensure PRSELR selection takes effect before registers access. */
  81. LDP X1, X2, [ X0 ], # 0x10 /* Retrieve ullPrbarEl1 and ullPrlarEl1r */
  82. MSR PRBAR_EL1, X1 /* Program PRBAR_EL1. */
  83. MSR PRLAR_EL1, X2 /* Program PRLAR_EL1. */
  84. ADD X3, X3, # 1 /* i++ */
  85. B 1b
  86. 2 :
  87. DSB SY
  88. ISB
  89. .endm
  90. .macro portSTORE_MPU_REGIONS_ADDRESSES
  91. MOV X3, # portSTACK_REGION /* Task's first programmed region is its stack region as the first four MPU regions are already programmed.*/
  92. MOV X4, # configTOTAL_MPU_REGIONS - 1 /* Upper limit = configTOTAL_MPU_REGIONS - 1 */
  93. 1 :
  94. CMP X3, X4 /* Compare i with ( configTOTAL_MPU_REGIONS - 1 ) */
  95. B.GT 2f /* if i > ( configTOTAL_MPU_REGIONS - 1 ), exit loop */
  96. MSR PRSELR_EL1, X3 /* Program PRSELR_EL1. */
  97. ISB /* Ensure PRSELR selection takes effect before registers access. */
  98. MRS X1, PRBAR_EL1 /* Retrieve PRBAR_EL1. */
  99. MRS X2, PRLAR_EL1 /* Retrieve PRLAR_EL1. */
  100. STP X1, X2, [ X0 ], # 0x10 /* Store PRBAR_EL1 and PRLAR_EL1 in ullPrbarEl1 and ullPrlarEl1r */
  101. ADD X3, X3, # 1 /* i++ */
  102. B 1b
  103. 2 :
  104. /* No additional barrier required after reading PR* registers. */
  105. .endm
  106. #endif /* #if ( configENABLE_MPU == 1 ) */
  107. /*-----------------------------------------------------------*/
  108. .macro savefuncontextgpregs
  109. /* Save function context general-purpose registers. */
  110. STP X0, X1, [ SP, # - 0x10 ] !
  111. STP X2, X3, [ SP, # - 0x10 ] !
  112. STP X4, X5, [ SP, # - 0x10 ] !
  113. STP X6, X7, [ SP, # - 0x10 ] !
  114. STP X8, X9, [ SP, # - 0x10 ] !
  115. STP X10, X11, [ SP, # - 0x10 ] !
  116. STP X12, X13, [ SP, # - 0x10 ] !
  117. STP X14, X15, [ SP, # - 0x10 ] !
  118. STP X16, X17, [ SP, # - 0x10 ] !
  119. STP X18, X29, [ SP, # - 0x10 ] !
  120. STR X30, [ SP, # - 0x10 ] !
  121. .endm
  122. /*-----------------------------------------------------------*/
  123. .macro savesyscallcontextgpregs
  124. /* Save system call context general-purpose registers. */
  125. STP X4, X5, [ SP, # - 0x10 ] !
  126. STP X6, X7, [ SP, # - 0x10 ] !
  127. STP X8, X9, [ SP, # - 0x10 ] !
  128. STP X10, X11, [ SP, # - 0x10 ] !
  129. STP X12, X13, [ SP, # - 0x10 ] !
  130. STP X14, X15, [ SP, # - 0x10 ] !
  131. STP X16, X17, [ SP, # - 0x10 ] !
  132. STP X18, X29, [ SP, # - 0x10 ] !
  133. .endm
  134. /*-----------------------------------------------------------*/
  135. .macro restorefuncontextgpregs
  136. /* Restore function context general-purpose registers. */
  137. LDR X30, [ SP ], # 0x10
  138. LDP X18, X29, [ SP ], # 0x10
  139. LDP X16, X17, [ SP ], # 0x10
  140. LDP X14, X15, [ SP ], # 0x10
  141. LDP X12, X13, [ SP ], # 0x10
  142. LDP X10, X11, [ SP ], # 0x10
  143. LDP X8, X9, [ SP ], # 0x10
  144. LDP X6, X7, [ SP ], # 0x10
  145. LDP X4, X5, [ SP ], # 0x10
  146. LDP X2, X3, [ SP ], # 0x10
  147. LDP X0, X1, [ SP ], # 0x10
  148. .endm
  149. /*-----------------------------------------------------------*/
  150. .macro restorefuncontextgpregexceptx0
  151. /* Restore function context general-purpose registers while discarding old X0. */
  152. LDR X30, [ SP ], # 0x10
  153. LDP X18, X29, [ SP ], # 0x10
  154. LDP X16, X17, [ SP ], # 0x10
  155. LDP X14, X15, [ SP ], # 0x10
  156. LDP X12, X13, [ SP ], # 0x10
  157. LDP X10, X11, [ SP ], # 0x10
  158. LDP X8, X9, [ SP ], # 0x10
  159. LDP X6, X7, [ SP ], # 0x10
  160. LDP X4, X5, [ SP ], # 0x10
  161. LDP X2, X3, [ SP ], # 0x10
  162. LDP XZR, X1, [ SP ], # 0x10
  163. .endm
  164. /*-----------------------------------------------------------*/
  165. .macro restoresyscallcontextgpregs
  166. /* Restore system call context general-purpose registers. */
  167. LDP X18, X29, [ SP ], # 0x10
  168. LDP X16, X17, [ SP ], # 0x10
  169. LDP X14, X15, [ SP ], # 0x10
  170. LDP X12, X13, [ SP ], # 0x10
  171. LDP X10, X11, [ SP ], # 0x10
  172. LDP X8, X9, [ SP ], # 0x10
  173. LDP X6, X7, [ SP ], # 0x10
  174. LDP X4, X5, [ SP ], # 0x10
  175. .endm
  176. /*-----------------------------------------------------------*/
  177. .macro saveallgpregisters
  178. /* Save all general-purpose registers on stack. */
  179. STP X0, X1, [ SP, # - 0x10 ] !
  180. STP X2, X3, [ SP, # - 0x10 ] !
  181. STP X4, X5, [ SP, # - 0x10 ] !
  182. STP X6, X7, [ SP, # - 0x10 ] !
  183. STP X8, X9, [ SP, # - 0x10 ] !
  184. STP X10, X11, [ SP, # - 0x10 ] !
  185. STP X12, X13, [ SP, # - 0x10 ] !
  186. STP X14, X15, [ SP, # - 0x10 ] !
  187. STP X16, X17, [ SP, # - 0x10 ] !
  188. STP X18, X19, [ SP, # - 0x10 ] !
  189. STP X20, X21, [ SP, # - 0x10 ] !
  190. STP X22, X23, [ SP, # - 0x10 ] !
  191. STP X24, X25, [ SP, # - 0x10 ] !
  192. STP X26, X27, [ SP, # - 0x10 ] !
  193. STP X28, X29, [ SP, # - 0x10 ] !
  194. STP X30, XZR, [ SP, # - 0x10 ] !
  195. .endm
  196. /*-----------------------------------------------------------*/
  197. .macro restoreallgpregisters
  198. /* Restore all general-purpose registers from stack. */
  199. LDP X30, XZR, [ SP ], # 0x10
  200. LDP X28, X29, [ SP ], # 0x10
  201. LDP X26, X27, [ SP ], # 0x10
  202. LDP X24, X25, [ SP ], # 0x10
  203. LDP X22, X23, [ SP ], # 0x10
  204. LDP X20, X21, [ SP ], # 0x10
  205. LDP X18, X19, [ SP ], # 0x10
  206. LDP X16, X17, [ SP ], # 0x10
  207. LDP X14, X15, [ SP ], # 0x10
  208. LDP X12, X13, [ SP ], # 0x10
  209. LDP X10, X11, [ SP ], # 0x10
  210. LDP X8, X9, [ SP ], # 0x10
  211. LDP X6, X7, [ SP ], # 0x10
  212. LDP X4, X5, [ SP ], # 0x10
  213. LDP X2, X3, [ SP ], # 0x10
  214. LDP X0, X1, [ SP ], # 0x10
  215. .endm
  216. /*-----------------------------------------------------------*/
  217. .macro savefloatregisters
  218. /* Save floating-point registers and configuration/status registers. */
  219. STP Q0, Q1, [ SP, # - 0x20 ] !
  220. STP Q2, Q3, [ SP, # - 0x20 ] !
  221. STP Q4, Q5, [ SP, # - 0x20 ] !
  222. STP Q6, Q7, [ SP, # - 0x20 ] !
  223. STP Q8, Q9, [ SP, # - 0x20 ] !
  224. STP Q10, Q11, [ SP, # - 0x20 ] !
  225. STP Q12, Q13, [ SP, # - 0x20 ] !
  226. STP Q14, Q15, [ SP, # - 0x20 ] !
  227. STP Q16, Q17, [ SP, # - 0x20 ] !
  228. STP Q18, Q19, [ SP, # - 0x20 ] !
  229. STP Q20, Q21, [ SP, # - 0x20 ] !
  230. STP Q22, Q23, [ SP, # - 0x20 ] !
  231. STP Q24, Q25, [ SP, # - 0x20 ] !
  232. STP Q26, Q27, [ SP, # - 0x20 ] !
  233. STP Q28, Q29, [ SP, # - 0x20 ] !
  234. STP Q30, Q31, [ SP, # - 0x20 ] !
  235. MRS X9, FPSR
  236. MRS X10, FPCR
  237. STP W9, W10, [ SP, # - 0x10 ] !
  238. .endm
  239. /*-----------------------------------------------------------*/
  240. .macro restorefloatregisters
  241. /* Restore floating-point registers and configuration/status registers. */
  242. LDP W9, W10, [ SP ], # 0x10
  243. MSR FPSR, X9
  244. MSR FPCR, X10
  245. LDP Q30, Q31, [ SP ], # 0x20
  246. LDP Q28, Q29, [ SP ], # 0x20
  247. LDP Q26, Q27, [ SP ], # 0x20
  248. LDP Q24, Q25, [ SP ], # 0x20
  249. LDP Q22, Q23, [ SP ], # 0x20
  250. LDP Q20, Q21, [ SP ], # 0x20
  251. LDP Q18, Q19, [ SP ], # 0x20
  252. LDP Q16, Q17, [ SP ], # 0x20
  253. LDP Q14, Q15, [ SP ], # 0x20
  254. LDP Q12, Q13, [ SP ], # 0x20
  255. LDP Q10, Q11, [ SP ], # 0x20
  256. LDP Q8, Q9, [ SP ], # 0x20
  257. LDP Q6, Q7, [ SP ], # 0x20
  258. LDP Q4, Q5, [ SP ], # 0x20
  259. LDP Q2, Q3, [ SP ], # 0x20
  260. LDP Q0, Q1, [ SP ], # 0x20
  261. .endm
  262. /*-----------------------------------------------------------*/
  263. .macro portSAVE_CONTEXT
  264. #if ( configENABLE_MPU == 1 )
  265. /* Switch to use the EL1 stack pointer. */
  266. MSR SPSEL, # 1
  267. /* Store X0-X4 as they are being used to save the user allocated task stack and to program the MPU */
  268. STP X0, X1, [ SP, # - 0x10 ] !
  269. STP X2, X3, [ SP, # - 0x10 ] !
  270. STR X4, [ SP, # - 0x10 ] !
  271. /* Switch to use the EL0 stack pointer. */
  272. MSR SPSEL, # 0
  273. /* Store user allocated task stack and use ullContext as the SP */
  274. #if ( configNUMBER_OF_CORES == 1 )
  275. adrp X0, pxCurrentTCB
  276. add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */
  277. #else
  278. adrp X0, pxCurrentTCBs
  279. add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */
  280. /* Get the core ID to index the TCB correctly. */
  281. MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
  282. AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */
  283. LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
  284. ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */
  285. #endif
  286. LDR X1, [ X0 ]
  287. ADD X1, X1, #8 /* X1 = X1 + 8, X1 now points to ullTaskUnprivilegedSP in TCB. */
  288. MOV X0, SP
  289. STR X0, [ X1 ] /* Save ullTaskUnprivilegedSP on task's TCB */
  290. SUB X1, X1, #8 /* X1 = X1 - 8, X1 now points to pxTopOfStack in TCB. */
  291. LDR X1, [ X1 ]
  292. MOV SP, X1 /* Use pxTopOfStack ( ullContext ) as the SP. */
  293. savefuncontextgpregs
  294. #if ( configNUMBER_OF_CORES > 1 )
  295. MRS X1, ELR_EL1 /* Save ELR_EL1 before calling xPortIsTaskPrivileged which would change its value in case of multicore */
  296. STR X1, [ SP, # - 0x10 ] !
  297. #endif
  298. BL xPortIsTaskPrivileged
  299. #if ( configNUMBER_OF_CORES > 1 )
  300. LDR X1, [ SP ], # 0x10
  301. MSR ELR_EL1, X1
  302. #endif
  303. CBNZ X0, 3f /* If task is privileged, skip saving MPU context. */
  304. #if ( configNUMBER_OF_CORES == 1 )
  305. adrp X0, pxCurrentTCB
  306. add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */
  307. #else
  308. adrp X0, pxCurrentTCBs
  309. add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */
  310. /* Get the core ID to index the TCB correctly. */
  311. MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
  312. AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */
  313. LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
  314. ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */
  315. #endif
  316. LDR X0, [ X0 ]
  317. ADD X0, X0, #16 /* X0 = X0 + 16. X0 now points to MAIR_EL1 in TCB. */
  318. MRS X1, MAIR_EL1 /* X1 = MAIR_EL1. */
  319. STR X1, [ X0 ], # 0x8 /* Store MAIR_EL1 in TCB, X0 = X0 + 8. */
  320. portSTORE_MPU_REGIONS_ADDRESSES /* Store MPU region addresses onto TCB. */
  321. 3 :
  322. restorefuncontextgpregs
  323. MSR SPSEL, # 1
  324. /* Restore X0-X4. */
  325. LDR X4, [ SP ], # 0x10
  326. LDP X2, X3, [ SP ], # 0x10
  327. LDP X0, X1, [ SP ], # 0x10
  328. #endif /* #if ( configENABLE_MPU == 1 ) */
  329. MSR SPSEL, # 0
  330. /* Save the entire context. */
  331. saveallgpregisters
  332. /* Save the SPSR and ELR values. */
  333. MRS X3, SPSR_EL1
  334. MRS X2, ELR_EL1
  335. STP X2, X3, [ SP, # - 0x10 ] !
  336. /* Save the critical section nesting depth. */
  337. #if ( configNUMBER_OF_CORES == 1 )
  338. adrp X0, ullCriticalNesting
  339. add X0, X0, :lo12:ullCriticalNesting /* X0 = &ullCriticalNesting */
  340. #else
  341. adrp X0, ullCriticalNestings
  342. add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */
  343. /* Calculate per-core index using MPIDR_EL1 for SMP support. */
  344. MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */
  345. AND X1, X1, # 0xff /* Extract Aff0 (core ID). */
  346. LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes). */
  347. ADD X0, X0, X1 /* Add offset to base address. */
  348. #endif
  349. LDR X3, [ X0 ]
  350. /* Save the FPU context indicator. */
  351. adrp X0, ullPortTaskHasFPUContext
  352. add X0, X0, :lo12:ullPortTaskHasFPUContext /* X0 = &ullPortTaskHasFPUContext */
  353. #if ( configNUMBER_OF_CORES > 1 )
  354. ADD X0, X0, X1 /* Add to the base of the FPU array. */
  355. #endif
  356. LDR X2, [ X0 ]
  357. /* Save the FPU context, if any (32 128-bit registers). */
  358. CBZ X2, 4f /* FPU context not present, skip saving FPU registers. */
  359. savefloatregisters
  360. 4 :
  361. /* Store the critical nesting count and FPU context indicator. */
  362. STP X2, X3, [ SP, # - 0x10 ] !
  363. #if ( configNUMBER_OF_CORES == 1 )
  364. adrp X0, pxCurrentTCB
  365. add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */
  366. #else
  367. adrp X0, pxCurrentTCBs
  368. add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */
  369. MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register .*/
  370. AND X1, X1, # 0xff /* Extract core ID. */
  371. LSL X1, X1, # 3 /* Multiply core ID by pointer size. */
  372. ADD X0, X0, X1 /* Offset for current core's TCB pointer. */
  373. #endif
  374. LDR X1, [ X0 ]
  375. MOV X0, SP
  376. STR X0, [ X1 ] /* Save pxTopOfStack on the TCB. */
  377. /* Switch to use the EL1 stack pointer. */
  378. MSR SPSEL, # 1
  379. .endm
  380. /*-----------------------------------------------------------*/
  381. .macro portRESTORE_CONTEXT
  382. #if ( configENABLE_MPU == 1 )
  383. /* Switch to use the EL1 stack pointer. */
  384. MSR SPSEL, # 1
  385. savefuncontextgpregs
  386. BL xPortIsTaskPrivileged
  387. CBNZ X0, 3f /* If task is privileged, skip restoring MPU context. */
  388. /* Switch to use the EL0 stack pointer. */
  389. MSR SPSEL, # 0
  390. #if ( configNUMBER_OF_CORES == 1 )
  391. adrp X0, pxCurrentTCB
  392. add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */
  393. #else
  394. adrp X0, pxCurrentTCBs
  395. add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */
  396. /* Get the core ID to index the TCB correctly. */
  397. MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
  398. AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */
  399. LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
  400. ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */
  401. #endif
  402. LDR X0, [ X0 ]
  403. DMB SY /* Complete outstanding transfers before disabling MPU. */
  404. MRS X1, SCTLR_EL1 /* X1 = SCTLR_EL1 */
  405. BIC X1, X1, # (1 << 0) /* Clears bit 0 of X1 */
  406. MSR SCTLR_EL1, X1 /* Disable MPU. */
  407. ADD X0, X0, #16 /* X0 = X0 + 16. X0 now points to MAIR_EL1 in TCB. */
  408. LDR X1, [ X0 ], # 0x8 /* X1 = *X0 i.e. X1 = MAIR_EL1, X0 = X0 + 8. */
  409. MSR MAIR_EL1, X1 /* Program MAIR_EL1. */
  410. portLOAD_MPU_REGIONS_ADDRESSES /* Load MPU region addresses from TCB. */
  411. MRS X1, SCTLR_EL1 /* X1 = SCTLR_EL1 */
  412. ORR X1, X1, # (1 << 0) /* Sets bit 0 of X1 */
  413. MSR SCTLR_EL1, X1 /* Enable MPU. */
  414. DSB SY /* Force memory writes before continuing. */
  415. 3 :
  416. MSR SPSEL, # 1
  417. restorefuncontextgpregs
  418. #endif /* #if ( configENABLE_MPU == 1 ) */
  419. /* Switch to use the EL0 stack pointer. */
  420. MSR SPSEL, # 0
  421. #if ( configNUMBER_OF_CORES == 1 )
  422. adrp X0, pxCurrentTCB
  423. add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */
  424. #else
  425. adrp X0, pxCurrentTCBs
  426. add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */
  427. /* Get the core ID to index the TCB correctly. */
  428. MRS X2, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
  429. AND X2, X2, # 0xff /* Extract Aff0 which contains the core ID */
  430. LSL X2, X2, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
  431. ADD X0, X0, X2 /* Add the offset for the current core's TCB pointer */
  432. #endif
  433. LDR X1, [ X0 ]
  434. LDR X0, [ X1 ] /* X0 = Location of saved context in TCB. */
  435. MOV SP, X0
  436. LDP X2, X3, [ SP ], # 0x10 /* Retrieve critical nesting and FPU indicator */
  437. #if ( configNUMBER_OF_CORES == 1 )
  438. adrp X0, ullCriticalNesting
  439. add X0, X0, :lo12:ullCriticalNesting /* X0 = &ullCriticalNesting */
  440. #else
  441. adrp X0, ullCriticalNestings
  442. add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */
  443. /* Calculate offset for current core's ullCriticalNesting */
  444. MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
  445. AND X1, X1, # 0xff /* Extract Aff0, which contains the core ID */
  446. LSL X1, X1, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
  447. ADD X0, X0, X1 /* Add offset for the current core's ullCriticalNesting */
  448. #endif
  449. MOV X1, # 255 /* Default mask */
  450. CBZ X3, 4f
  451. MOV X1, # portMAX_API_PRIORITY_MASK
  452. 4:
  453. MSR ICC_PMR_EL1, X1 /* Set interrupt mask */
  454. DSB SY
  455. ISB SY
  456. STR X3, [ X0 ] /* Restore critical nesting */
  457. /* Restore the FPU context indicator. */
  458. adrp X0, ullPortTaskHasFPUContext
  459. add X0, X0, :lo12:ullPortTaskHasFPUContext /* X0 = &ullPortTaskHasFPUContext */
  460. #if ( configNUMBER_OF_CORES > 1 )
  461. MRS X1, MPIDR_EL1 /* Read Multiprocessor Affinity Register */
  462. AND X1, X1, # 0xff /* Extract Aff0, which contains the core ID */
  463. LSL X1, X1, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system) */
  464. ADD X0, X0, X1 /* Add to the base of the FPU array */
  465. #endif
  466. STR X2, [ X0 ]
  467. /* Restore the FPU context, if any. */
  468. CBZ X2, 5f
  469. restorefloatregisters
  470. 5:
  471. LDP X2, X3, [ SP ], # 0x10 /* Restore SPSR and ELR */
  472. MSR SPSR_EL1, X3
  473. MSR ELR_EL1, X2
  474. restoreallgpregisters
  475. #if ( configENABLE_MPU == 1 )
  476. /* Save pxTopOfStack ( ullContext ) on the task's TCB and set SP_EL0 to ullTaskUnprivilegedSP. */
  477. MSR SPSEL, # 1
  478. STP X8, X9, [ SP, # - 0x10 ] !
  479. STR X10, [ SP, # - 0x10 ] !
  480. #if ( configNUMBER_OF_CORES == 1 )
  481. adrp X8, pxCurrentTCB
  482. add X8, X8, :lo12:pxCurrentTCB /* X8 = &pxCurrentTCB */
  483. #else
  484. adrp X8, pxCurrentTCBs
  485. add X8, X8, :lo12:pxCurrentTCBs /* X8 = &pxCurrentTCBs */
  486. /* Get the core ID to index the TCB correctly. */
  487. MRS X10, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
  488. AND X10, X10, # 0xff /* Extract Aff0 which contains the core ID */
  489. LSL X10, X10, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
  490. ADD X8, X8, X10 /* Add the offset for the current core's TCB pointer */
  491. #endif
  492. LDR X9, [ X8 ]
  493. MRS X8, SP_EL0
  494. STR X8, [ X9 ] /* Store pxTopOfStack on task's TCB */
  495. ADD X9, X9, #8 /* X9 = X9 + 8. X1 now points to ullTaskUnprivilegedSP in TCB. */
  496. LDR X9, [ X9 ]
  497. MSR SP_EL0, X9 /* Use ullTaskUnprivilegedSP as SP_EL0. */
  498. LDR X10, [ SP ], # 0x10
  499. LDP X8, X9, [ SP ], # 0x10
  500. #endif /* #if ( configENABLE_MPU == 1 ) */
  501. /* Switch to use the EL1 stack pointer. */
  502. MSR SPSEL, # 1
  503. .endm
  504. /*-----------------------------------------------------------*/
  505. /******************************************************************************
  506. * FreeRTOS_SWI_Handler handler is used to perform a context switch.
  507. *****************************************************************************/
  508. .align 8
  509. .type FreeRTOS_SWI_Handler, % function
  510. FreeRTOS_SWI_Handler:
  511. /* Save X0-X5 temporarily as they are used in the handler. */
  512. STP X0, X1, [SP, #-0x10]!
  513. STP X2, X3, [SP, #-0x10]!
  514. STP X4, X5, [SP, #-0x10]!
  515. MRS X4, ELR_EL1 /* Save exception return address. */
  516. MRS X5, SPSR_EL1 /* Save program status register address. */
  517. /* Decide action based on SVC immediate without corrupting any task context. */
  518. MRS X0, ESR_EL1
  519. /* Extract exception class. */
  520. LSR X1, X0, # 26
  521. CMP X1, # 0x15 /* 0x15 = SVC instruction. */
  522. B.NE FreeRTOS_Abort
  523. /* Extract SVC immediate from ISS[15:0]. */
  524. AND X2, X0, # 0xFFFF
  525. /* portSVC_YIELD: yield from a running task. */
  526. CMP X2, # portSVC_YIELD
  527. B.EQ FreeRTOS_Yield
  528. /* portSVC_START_FIRST_TASK: start first task on this core without saving any prior context. */
  529. CMP X2, # portSVC_START_FIRST_TASK
  530. B.EQ Start_First_Task
  531. 1:
  532. /* portSVC_DISABLE_INTERRUPTS: disable IRQs (DAIF.I) in SPSR_EL1 without touching task context. */
  533. CMP X2, # portSVC_DISABLE_INTERRUPTS
  534. B.NE 2f
  535. ORR X5, X5, # (1 << portPSTATE_I_BIT) /* Set I bit in SPSR_EL1 */
  536. MSR ELR_EL1, X4
  537. MSR SPSR_EL1, X5
  538. LDP X4, X5, [SP], #0x10
  539. LDP X2, X3, [SP], #0x10
  540. LDP X0, X1, [SP], #0x10
  541. DSB SY
  542. ISB SY
  543. ERET
  544. 2:
  545. /* portSVC_ENABLE_INTERRUPTS: enable IRQs (DAIF.I clear) in SPSR_EL1 without touching task context. */
  546. CMP X2, # portSVC_ENABLE_INTERRUPTS
  547. B.NE 3f
  548. BIC X5, X5, # (1 << portPSTATE_I_BIT) /* Clear I bit in SPSR_EL1 */
  549. MSR ELR_EL1, X4
  550. MSR SPSR_EL1, X5
  551. LDP X4, X5, [SP], #0x10
  552. LDP X2, X3, [SP], #0x10
  553. LDP X0, X1, [SP], #0x10
  554. ERET
  555. 3:
  556. /* portSVC_GET_CORE_ID: return core ID in X0 (Aff0 of MPIDR_EL1). */
  557. CMP X2, # portSVC_GET_CORE_ID
  558. B.NE 4f
  559. MRS X0, MPIDR_EL1
  560. AND X0, X0, # 0xff
  561. MSR SPSR_EL1, X5
  562. /* Restore X5-X1 while discarding old X0. */
  563. LDP X4, X5, [SP], #0x10
  564. LDP X2, X3, [ SP ], # 0x10
  565. LDP XZR, X1, [ SP ], # 0x10
  566. ERET
  567. 4:
  568. /* portSVC_MASK_ALL_INTERRUPTS: set ICC_PMR_EL1 to max API mask and return previous-mask-equal flag in X0. */
  569. CMP X2, # portSVC_MASK_ALL_INTERRUPTS
  570. B.NE 5f
  571. /* Read current PMR and compare. */
  572. MRS X0, ICC_PMR_EL1
  573. CMP X0, # portMAX_API_PRIORITY_MASK
  574. B.EQ 41f
  575. /* Disable IRQs while updating PMR. */
  576. MSR DAIFSET, # 2
  577. DSB SY
  578. ISB SY
  579. /* Write new PMR value. */
  580. MOV X1, # portMAX_API_PRIORITY_MASK
  581. MSR ICC_PMR_EL1, X1
  582. DSB SY
  583. ISB SY
  584. /* Re-enable IRQs. */
  585. MSR DAIFCLR, # 2
  586. DSB SY
  587. ISB SY
  588. MSR ELR_EL1, X4
  589. MSR SPSR_EL1, X5
  590. 41:
  591. /* Restore X5-X1 while discarding old X0. */
  592. LDP X4, X5, [ SP ], # 0x10
  593. LDP X2, X3, [ SP ], # 0x10
  594. LDP XZR, X1, [ SP ], # 0x10
  595. ERET
  596. 5:
  597. /* portSVC_UNMASK_ALL_INTERRUPTS: set ICC_PMR_EL1 to portUNMASK_VALUE to unmask all interrupts. */
  598. CMP X2, # portSVC_UNMASK_ALL_INTERRUPTS
  599. B.NE 6f
  600. /* Disable IRQs while updating PMR. */
  601. MSR DAIFSET, # 2
  602. DSB SY
  603. ISB SY
  604. MOV X0, #portUNMASK_VALUE /* Unmask all interrupts. */
  605. MSR ICC_PMR_EL1, X0
  606. DSB SY
  607. ISB SY
  608. /* Re-enable IRQs. */
  609. MSR DAIFCLR, # 2
  610. DSB SY
  611. ISB SY
  612. MSR ELR_EL1, X4
  613. MSR SPSR_EL1, X5
  614. LDP X4, X5, [SP], #0x10
  615. LDP X2, X3, [SP], #0x10
  616. LDP X0, X1, [SP], #0x10
  617. ERET
  618. 6:
  619. /* portSVC_UNMASK_INTERRUPTS: set ICC_PMR_EL1 to uxNewMaskValue stored in X0. */
  620. CMP X2, # portSVC_UNMASK_INTERRUPTS
  621. B.NE 7f
  622. /* Disable IRQs while updating PMR. */
  623. MSR DAIFSET, # 2
  624. DSB SY
  625. ISB SY
  626. LDR X0, [ SP, # 0x20 ] /* Original X0 */
  627. MSR ICC_PMR_EL1, X0
  628. DSB SY
  629. ISB SY
  630. /* Re-enable IRQs. */
  631. MSR DAIFCLR, # 2
  632. DSB SY
  633. ISB SY
  634. MSR ELR_EL1, X4
  635. MSR SPSR_EL1, X5
  636. LDP X4, X5, [SP], #0x10
  637. LDP X2, X3, [SP], #0x10
  638. LDP X0, X1, [SP], #0x10
  639. ERET
  640. 7:
  641. #if ( configENABLE_MPU == 1 )
  642. /* portSVC_CHECK_PRIVILEGE: Check if the task is a privileged task */
  643. CMP X2, # portSVC_CHECK_PRIVILEGE
  644. B.NE 8f
  645. savefuncontextgpregs
  646. BL xPortIsTaskPrivileged
  647. restorefuncontextgpregexceptx0 /* xPortIsTaskPrivileged() return value is stored in X0. */
  648. MSR ELR_EL1, X4
  649. MSR SPSR_EL1, X5
  650. /* Restore X5-X1 while discarding old X0. */
  651. LDP X4, X5, [ SP ], # 0x10
  652. LDP X2, X3, [ SP ], # 0x10
  653. LDP XZR, X1, [ SP ], # 0x10
  654. ERET
  655. #endif /* #if ( configENABLE_MPU == 1 ) */
  656. 8:
  657. /* portSVC_SAVE_TASK_CONTEXT: Save task's context */
  658. CMP X2, # portSVC_SAVE_TASK_CONTEXT
  659. B.NE 9f
  660. MSR ELR_EL1, X4
  661. MSR SPSR_EL1, X5
  662. /* Restore X5-X0. */
  663. LDP X4, X5, [ SP ], # 0x10
  664. LDP X2, X3, [ SP ], # 0x10
  665. LDP X0, X1, [ SP ], # 0x10
  666. portSAVE_CONTEXT
  667. ERET
  668. 9:
  669. /* portSVC_RESTORE_CONTEXT: Restore task's context */
  670. CMP X2, # portSVC_RESTORE_CONTEXT
  671. B.NE 10f
  672. MSR ELR_EL1, X4
  673. MSR SPSR_EL1, X5
  674. /* Restore X5-X0. */
  675. LDP X4, X5, [ SP ], # 0x10
  676. LDP X2, X3, [ SP ], # 0x10
  677. LDP X0, X1, [ SP ], # 0x10
  678. portRESTORE_CONTEXT
  679. ERET
  680. 10:
  681. /* portSVC_DELETE_CURRENT_TASK: Delete current task */
  682. CMP X2, # portSVC_DELETE_CURRENT_TASK
  683. B.NE 11f
  684. /* Restore X5-X0. */
  685. LDP X4, X5, [ SP ], #0x10
  686. LDP X2, X3, [ SP ], # 0x10
  687. LDP X0, X1, [ SP ], # 0x10
  688. #if ( configNUMBER_OF_CORES == 1 )
  689. adrp X0, pxCurrentTCB
  690. add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */
  691. #else
  692. adrp X0, pxCurrentTCBs
  693. add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */
  694. /* Get the core ID to index the TCB correctly. */
  695. MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
  696. AND X1, X1, # 0xff /* Extract Aff0 which contains the core ID */
  697. LSL X1, X1, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
  698. ADD X0, X0, X1 /* Add the offset for the current core's TCB pointer */
  699. #endif
  700. LDR X0, [ X0 ] /* X0 = pxCurrentTCB */
  701. B vTaskDelete
  702. 11:
  703. /* portSVC_INTERRUPT_CORE: Interrupt core */
  704. CMP X2, # portSVC_INTERRUPT_CORE
  705. B.NE 12f
  706. LDR X0, [ SP, # 0x20 ] /* Original X0 */
  707. MSR ICC_SGI1R_EL1, X0 /* X0 contains the value to write to ICC_SGI1R_EL1 */
  708. MSR ELR_EL1, X4
  709. MSR SPSR_EL1, X5
  710. /* Restore X5-X0. */
  711. LDP X4, X5, [SP], #0x10
  712. LDP X2, X3, [ SP ], # 0x10
  713. LDP X0, X1, [ SP ], # 0x10
  714. ERET
  715. 12:
  716. #if ( configENABLE_MPU == 1 )
  717. /* ---------- SystemCallEnter? ---------------------------------*/
  718. LDR X3, =NUM_SYSTEM_CALLS
  719. CMP X2, X3
  720. BLO 121f /* imm 0 … NUM_SYSCALLS-1 */
  721. /* ---------- SystemCallExit? ----------------------------------*/
  722. LDR X3, =portSVC_SYSTEM_CALL_EXIT
  723. CMP X2, X3
  724. BEQ 122f
  725. /* ---------- SystemCallEnter -------------------------------------*/
  726. 121:
  727. /* If calling task is privileged, directly tail-call the implementation at EL1. */
  728. savefuncontextgpregs
  729. BL xPortIsTaskPrivileged
  730. restorefuncontextgpregexceptx0 /* X0 holds pdTRUE if privileged */
  731. CBNZ X0, priv_path
  732. /* Unprivileged tasks path */
  733. #if ( configNUMBER_OF_CORES == 1 )
  734. adrp X0, pxCurrentTCB
  735. add X0, X0, :lo12:pxCurrentTCB /* X0 = &pxCurrentTCB */
  736. #else
  737. adrp X0, pxCurrentTCBs
  738. add X0, X0, :lo12:pxCurrentTCBs /* X0 = &pxCurrentTCBs */
  739. /* Get the core ID to index the TCB correctly. */
  740. MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register */
  741. AND X1, X1, # 0xff /* Extract Aff0 which contains the core ID */
  742. LSL X1, X1, # 3 /* Scale the core ID to the size of a pointer (64-bit system) */
  743. ADD X0, X0, X1 /* Add the offset for the current core's TCB pointer */
  744. #endif
  745. LDR X0, [ X0 ]
  746. LDR X0, [ X0 ] /* X0 = Location of saved context in TCB. */
  747. /* Save inputs (X0-X3) and LR (X30)
  748. * onto the current task's context to be used by the system call implementation.
  749. */
  750. STR X30, [ X0, # ( portOFFSET_TO_LR * 8 ) ]
  751. /* Read original X0, X1, X2, and X3 from the EL1 stack without modifying SP, and store.
  752. * [SP+0x20] -> X0, [SP+0x28] -> X1, [SP+0x10] -> X2, [SP+0x18] -> X3. */
  753. LDR X1, [ SP, # 0x20 ] /* Original X0 */
  754. STR X1, [ X0, # ( portOFFSET_TO_X0 * 8 ) ]
  755. LDR X1, [ SP, # 0x28 ] /* Original X1 */
  756. STR X1, [ X0, # ( portOFFSET_TO_X1 * 8 ) ]
  757. LDR X1, [ SP, # 0x10 ] /* Original X2 */
  758. STR X1, [ X0, # ( portOFFSET_TO_X2 * 8 ) ]
  759. LDR X1, [ SP, # 0x18 ] /* Original X3 */
  760. STR X1, [ X0, # ( portOFFSET_TO_X3 * 8 ) ]
  761. /* Restore X2-X5 to their original values, discard X1 and X0 as they contain system call number
  762. * and location of task's saved context in TCB.
  763. */
  764. MOV X1, X2 /* Pass system call */
  765. LDP X4, X5, [ SP ], #0x10
  766. LDP X2, X3, [ SP ], #0x10
  767. ADD SP, SP, #0x10 /* Discard X0 and X1 */
  768. savesyscallcontextgpregs
  769. BL vSystemCallEnter /* returns after programming ELR/SPSR/SP_EL0 and args */
  770. /* Set LR for the syscall implementation to point to vRequestSystemCallExit. */
  771. adrp X30, vRequestSystemCallExit
  772. add X30, X30, :lo12:vRequestSystemCallExit
  773. restoresyscallcontextgpregs
  774. ERET
  775. priv_path:
  776. /* Load implementation address: uxSystemCallImplementations[X2] (64-bit entries). */
  777. adrp X3, uxSystemCallImplementations
  778. add X3, X3, :lo12:uxSystemCallImplementations
  779. LSL X2, X2, #3 /* Multiply index by size of pointer (8 bytes). */
  780. ADD X3, X3, X2 /* X3 = &uxSystemCallImplementations[X2] */
  781. LDR X3, [ X3 ] /* X3 = uxSystemCallImplementations[X2] */
  782. /* Return from exception directly to implementation; preserve original LR and registers. */
  783. MSR ELR_EL1, X3
  784. MSR SPSR_EL1, X5
  785. /* Restore X5-X0. */
  786. LDP X4, X5, [ SP ], #0x10
  787. LDP X2, X3, [ SP ], #0x10
  788. LDP X0, X1, [ SP ], #0x10
  789. ERET
  790. /* ---------- SystemCallExit -----------------------------------*/
  791. 122:
  792. LDR X0, [ SP, # 0x20 ] /* Restore X0 without changing SP as it contains system call return value */
  793. savefuncontextgpregs
  794. BL vSystemCallExit
  795. restorefuncontextgpregexceptx0
  796. /* Restore X5-X1 while discarding old X0. */
  797. LDP X4, X5, [ SP ], #0x10
  798. LDP X2, X3, [ SP ], #0x10
  799. LDP XZR, X1, [ SP ], #0x10
  800. ERET
  801. #endif /* #if ( configENABLE_MPU == 1 ) */
  802. /* ---------- Unexpected EC – just hang in place ---------------------------*/
  803. FreeRTOS_Abort:
  804. B FreeRTOS_Abort
  805. FreeRTOS_Yield:
  806. MSR SPSR_EL1, X5
  807. /* Check if the task is in a critical section by inspecting ullCriticalNesting. */
  808. #if ( configNUMBER_OF_CORES > 1 )
  809. adrp X0, ullCriticalNestings
  810. add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */
  811. MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */
  812. AND X1, X1, # 0xff /* Extract Aff0 (core ID). */
  813. LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes). */
  814. ADD X0, X0, X1 /* Add offset to base address. */
  815. LDR X1, [ X0 ] /* Load ullCriticalNesting for this core. */
  816. CBNZ X1, Skip_Context_Switch /* Skip context switch if in a critical section. */
  817. #endif
  818. /* Restore X5-X0 to their original values before saving full context. */
  819. LDP X4, X5, [SP], #0x10
  820. LDP X2, X3, [SP], #0x10
  821. LDP X0, X1, [SP], #0x10
  822. portSAVE_CONTEXT
  823. savefuncontextgpregs
  824. #if ( configNUMBER_OF_CORES > 1 )
  825. MRS x0, mpidr_el1
  826. AND x0, x0, 255
  827. #endif
  828. BL vTaskSwitchContext
  829. restorefuncontextgpregs
  830. portRESTORE_CONTEXT
  831. ERET
  832. Skip_Context_Switch:
  833. /* Restore X5-X0 to their original values. */
  834. LDP X4, X5, [SP], #0x10
  835. LDP X2, X3, [SP], #0x10
  836. LDP X0, X1, [SP], #0x10
  837. ERET
  838. Start_First_Task:
  839. /* Restore X5-X0 to their original values. */
  840. LDP X4, X5, [SP], #0x10
  841. LDP X2, X3, [SP], #0x10
  842. LDP X0, X1, [SP], #0x10
  843. portRESTORE_CONTEXT
  844. ERET
  845. /******************************************************************************
  846. * vPortSaveTaskContext is used to save the task's context into its stack.
  847. *****************************************************************************/
  848. .align 8
  849. .type vPortSaveTaskContext, % function
  850. vPortSaveTaskContext:
  851. portSAVE_CONTEXT
  852. RET
  853. /******************************************************************************
  854. * vPortRestoreTaskContext is used to start the scheduler.
  855. *****************************************************************************/
  856. .align 8
  857. .type vPortRestoreTaskContext, % function
  858. vPortRestoreTaskContext:
  859. .set freertos_vector_base, _freertos_vector_table
  860. /* Install the FreeRTOS interrupt handlers. */
  861. LDR X1, = freertos_vector_base
  862. MSR VBAR_EL1, X1
  863. DSB SY
  864. ISB SY
  865. /* Start the first task. */
  866. portRESTORE_CONTEXT
  867. ERET
  868. /******************************************************************************
  869. * FreeRTOS_IRQ_Handler handles IRQ entry and exit.
  870. *
  871. * This handler is supposed to be used only for IRQs and never for FIQs. Per ARM
  872. * GIC documentation [1], Group 0 interrupts are always signaled as FIQs. Since
  873. * this handler is only for IRQs, We can safely assume Group 1 while accessing
  874. * Interrupt Acknowledge and End Of Interrupt registers and therefore, use
  875. * ICC_IAR1_EL1 and ICC_EOIR1_EL1.
  876. *
  877. * [1] https://developer.arm.com/documentation/198123/0300/Arm-CoreLink-GIC-fundamentals
  878. *****************************************************************************/
  879. .align 8
  880. .type FreeRTOS_IRQ_Handler, % function
  881. FreeRTOS_IRQ_Handler:
  882. /* Save volatile registers. */
  883. saveallgpregisters
  884. savefloatregisters
  885. /* Save the SPSR and ELR. */
  886. MRS X3, SPSR_EL1
  887. MRS X2, ELR_EL1
  888. STP X2, X3, [ SP, # - 0x10 ] !
  889. /* Increment the interrupt nesting counter. */
  890. #if ( configNUMBER_OF_CORES == 1 )
  891. adrp X5, ullPortInterruptNesting
  892. add X5, X5, :lo12:ullPortInterruptNesting /* X5 = &ullPortInterruptNesting */
  893. #else
  894. adrp X5, ullPortInterruptNestings
  895. add X5, X5, :lo12:ullPortInterruptNestings /* X5 = &ullPortInterruptNestings */
  896. MRS X2, MPIDR_EL1 /* Read Multiprocessor Affinity Register. */
  897. AND X2, X2, # 0xff /* Extract Aff0, which contains the core ID. */
  898. LSL X2, X2, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system). */
  899. /* Calculate offset for the current core's ullPortYieldRequired and load its address. */
  900. ADD X5, X5, X2 /* Add offset for the current core's ullPortYieldRequired. */
  901. #endif
  902. LDR X1, [ X5 ] /* Old nesting count in X1. */
  903. ADD X6, X1, # 1
  904. STR X6, [ X5 ] /* Address of nesting count variable in X5. */
  905. /* Maintain the interrupt nesting information across the function call. */
  906. STP X1, X5, [ SP, # - 0x10 ] !
  907. /* Read interrupt ID from the interrupt acknowledge register and store it
  908. * in X0 for future parameter and interrupt clearing use. */
  909. MRS X0, ICC_IAR1_EL1
  910. /* Maintain the interrupt ID value across the function call. */
  911. STP X0, X1, [ SP, # - 0x10 ] !
  912. savefuncontextgpregs
  913. /* Call the C handler. */
  914. BL vApplicationIRQHandler
  915. restorefuncontextgpregs
  916. /* Disable interrupts. */
  917. MSR DAIFSET, # 2
  918. DSB SY
  919. ISB SY
  920. /* Restore the interrupt ID value. */
  921. LDP X0, X1, [ SP ], # 0x10
  922. /* End IRQ processing by writing interrupt ID value to the EOI register. */
  923. MSR ICC_EOIR1_EL1, X0
  924. /* Restore the critical nesting count. */
  925. LDP X1, X5, [ SP ], # 0x10
  926. STR X1, [ X5 ]
  927. /* Has interrupt nesting unwound? */
  928. CMP X1, # 0
  929. B.NE Exit_IRQ_No_Context_Switch
  930. /* Is a context switch required? */
  931. adrp X0, ullPortYieldRequired
  932. add X0, X0, :lo12:ullPortYieldRequired /* X0 = &ullPortYieldRequired */
  933. #if ( configNUMBER_OF_CORES > 1 )
  934. MRS X2, MPIDR_EL1 /* Read Multiprocessor Affinity Register. */
  935. AND X2, X2, # 0xff /* Extract Aff0, which contains the core ID. */
  936. LSL X2, X2, # 3 /* Scale core ID to the size of a pointer (assuming 64-bit system). */
  937. /* Calculate offset for the current core's ullPortYieldRequired and load its address. */
  938. ADD X0, X0, X2 /* Add offset for the current core's ullPortYieldRequired. */
  939. #endif
  940. LDR X1, [ X0 ]
  941. CMP X1, # 0
  942. B.EQ Exit_IRQ_No_Context_Switch
  943. /* Check if the task is in a critical section by inspecting ullCriticalNesting. */
  944. #if ( configNUMBER_OF_CORES > 1 )
  945. adrp X0, ullCriticalNestings
  946. add X0, X0, :lo12:ullCriticalNestings /* X0 = &ullCriticalNestings */
  947. MRS X1, MPIDR_EL1 /* Read the Multiprocessor Affinity Register. */
  948. AND X1, X1, # 0xff /* Extract Aff0 (core ID). */
  949. LSL X1, X1, # 3 /* Multiply core ID by pointer size (8 bytes). */
  950. ADD X0, X0, X1 /* Add offset to base address. */
  951. LDR X1, [ X0 ] /* Load ullCriticalNesting for this core. */
  952. CBNZ X1, Exit_IRQ_No_Context_Switch /* Skip context switch if in a critical section. */
  953. #endif
  954. /* Reset ullPortYieldRequired to 0. */
  955. MOV X2, # 0
  956. STR X2, [ X0 ]
  957. /* Restore volatile registers. */
  958. LDP X4, X5, [ SP ], # 0x10 /* SPSR and ELR. */
  959. MSR SPSR_EL1, X5
  960. MSR ELR_EL1, X4
  961. DSB SY
  962. ISB SY
  963. restorefloatregisters
  964. restoreallgpregisters
  965. /* Save the context of the current task and select a new task to run. */
  966. portSAVE_CONTEXT
  967. #if configNUMBER_OF_CORES > 1
  968. MRS x0, mpidr_el1
  969. AND x0, x0, 255
  970. #endif
  971. savefuncontextgpregs
  972. BL vTaskSwitchContext
  973. restorefuncontextgpregs
  974. portRESTORE_CONTEXT
  975. ERET
  976. Exit_IRQ_No_Context_Switch:
  977. /* Restore volatile registers. */
  978. LDP X4, X5, [ SP ], # 0x10 /* SPSR and ELR. */
  979. MSR SPSR_EL1, X5
  980. MSR ELR_EL1, X4
  981. DSB SY
  982. ISB SY
  983. restorefloatregisters
  984. restoreallgpregisters
  985. ERET
  986. /******************************************************************************
  987. * If the application provides an implementation of vApplicationIRQHandler(),
  988. * then it will get called directly without saving the FPU registers on
  989. * interrupt entry, and this weak implementation of
  990. * vApplicationIRQHandler() will not get called.
  991. *
  992. * If the application provides its own implementation of
  993. * vApplicationFPUSafeIRQHandler() then this implementation of
  994. * vApplicationIRQHandler() will be called, save the FPU registers, and then
  995. * call vApplicationFPUSafeIRQHandler().
  996. *
  997. * Therefore, if the application writer wants FPU registers to be saved on
  998. * interrupt entry their IRQ handler must be called
  999. * vApplicationFPUSafeIRQHandler(), and if the application writer does not want
  1000. * FPU registers to be saved on interrupt entry their IRQ handler must be
  1001. * called vApplicationIRQHandler().
  1002. *****************************************************************************/
  1003. .align 8
  1004. .weak vApplicationIRQHandler
  1005. .type vApplicationIRQHandler, % function
  1006. vApplicationIRQHandler:
  1007. /* Save FPU registers (32 128-bits + 2 64-bits configuration and status registers). */
  1008. savefloatregisters
  1009. savefuncontextgpregs
  1010. /* Call the C handler. */
  1011. BL vApplicationFPUSafeIRQHandler
  1012. restorefuncontextgpregs
  1013. /* Restore FPU registers. */
  1014. restorefloatregisters
  1015. RET
  1016. .end