xtensa_vectors.S 83 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2019 Cadence Design Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: MIT
  5. *
  6. * SPDX-FileContributor: 2016-2023 Espressif Systems (Shanghai) CO LTD
  7. */
  8. /*
  9. * Copyright (c) 2015-2019 Cadence Design Systems, Inc.
  10. *
  11. * Permission is hereby granted, free of charge, to any person obtaining
  12. * a copy of this software and associated documentation files (the
  13. * "Software"), to deal in the Software without restriction, including
  14. * without limitation the rights to use, copy, modify, merge, publish,
  15. * distribute, sublicense, and/or sell copies of the Software, and to
  16. * permit persons to whom the Software is furnished to do so, subject to
  17. * the following conditions:
  18. *
  19. * The above copyright notice and this permission notice shall be included
  20. * in all copies or substantial portions of the Software.
  21. *
  22. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  23. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  24. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  25. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  26. * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  27. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  28. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  29. */
  30. /*
  31. XTENSA VECTORS AND LOW LEVEL HANDLERS FOR AN RTOS
  32. Xtensa low level exception and interrupt vectors and handlers for an RTOS.
  33. Interrupt handlers and user exception handlers support interaction with
  34. the RTOS by calling XT_RTOS_INT_ENTER and XT_RTOS_INT_EXIT before and
  35. after user's specific interrupt handlers. These macros are defined in
  36. xtensa_<rtos>.h to call suitable functions in a specific RTOS.
  37. Users can install application-specific interrupt handlers for low and
  38. medium level interrupts, by calling xt_set_interrupt_handler(). These
  39. handlers can be written in C, and must obey C calling convention. The
  40. handler table is indexed by the interrupt number. Each handler may be
  41. provided with an argument.
  42. Note that the system timer interrupt is handled specially, and is
  43. dispatched to the RTOS-specific handler. This timer cannot be hooked
  44. by application code.
  45. Optional hooks are also provided to install a handler per level at
  46. run-time, made available by compiling this source file with
  47. '-DXT_INTEXC_HOOKS' (useful for automated testing).
  48. !! This file is a template that usually needs to be modified to handle !!
  49. !! application specific interrupts. Search USER_EDIT for helpful comments !!
  50. !! on where to insert handlers and how to write them. !!
  51. Users can also install application-specific exception handlers in the
  52. same way, by calling xt_set_exception_handler(). One handler slot is
  53. provided for each exception type. Note that some exceptions are handled
  54. by the porting layer itself, and cannot be taken over by application
  55. code in this manner. These are the alloca, syscall, and coprocessor
  56. exceptions.
  57. The exception handlers can be written in C, and must follow C calling
  58. convention. Each handler is passed a pointer to an exception frame as
  59. its single argument. The exception frame is created on the stack, and
  60. holds the saved context of the thread that took the exception. If the
  61. handler returns, the context will be restored and the instruction that
  62. caused the exception will be retried. If the handler makes any changes
  63. to the saved state in the exception frame, the changes will be applied
  64. when restoring the context.
  65. Because Xtensa is a configurable architecture, this port supports all user
  66. generated configurations (except restrictions stated in the release notes).
  67. This is accomplished by conditional compilation using macros and functions
  68. defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
  69. Only the relevant parts of this file will be included in your RTOS build.
  70. For example, this file provides interrupt vector templates for all types and
  71. all priority levels, but only the ones in your configuration are built.
  72. NOTES on the use of 'call0' for long jumps instead of 'j':
  73. 1. This file should be assembled with the -mlongcalls option to xt-xcc.
  74. 2. The -mlongcalls compiler option causes 'call0 dest' to be expanded to
  75. a sequence 'l32r a0, dest' 'callx0 a0' which works regardless of the
  76. distance from the call to the destination. The linker then relaxes
  77. it back to 'call0 dest' if it determines that dest is within range.
  78. This allows more flexibility in locating code without the performance
  79. overhead of the 'l32r' literal data load in cases where the destination
  80. is in range of 'call0'. There is an additional benefit in that 'call0'
  81. has a longer range than 'j' due to the target being word-aligned, so
  82. the 'l32r' sequence is less likely needed.
  83. 3. The use of 'call0' with -mlongcalls requires that register a0 not be
  84. live at the time of the call, which is always the case for a function
  85. call but needs to be ensured if 'call0' is used as a jump in lieu of 'j'.
  86. 4. This use of 'call0' is independent of the C function call ABI.
  87. */
  88. #include "xtensa_rtos.h"
  89. #include "esp_private/panic_reason.h"
  90. #include "sdkconfig.h"
  91. #include "soc/soc.h"
  92. #include "xt_asm_utils.h"
  93. /*
  94. --------------------------------------------------------------------------------
  95. In order for backtracing to be able to trace from the pre-exception stack
  96. across to the exception stack (including nested interrupts), we need to create
  97. a pseudo base-save area to make it appear like the exception dispatcher was
  98. triggered by a CALL4 from the pre-exception code. In reality, the exception
  99. dispatcher uses the same window as pre-exception code, and only CALL0s are
  100. used within the exception dispatcher.
  101. To create the pseudo base-save area, we need to store a copy of the pre-exception's
  102. base save area (a0 to a4) below the exception dispatcher's SP. EXCSAVE_x will
  103. be used to store a copy of the SP that points to the interrupted code's exception
  104. frame just in case the exception dispatcher's SP does not point to the exception
  105. frame (which is the case when switching from task to interrupt stack).
  106. Clearing the pseudo base-save area is uncessary as the interrupt dispatcher
  107. will restore the current SP to that of the pre-exception SP.
  108. --------------------------------------------------------------------------------
  109. */
  110. #ifdef CONFIG_FREERTOS_INTERRUPT_BACKTRACE
  111. #define XT_DEBUG_BACKTRACE 1
  112. #endif
  113. /*
  114. --------------------------------------------------------------------------------
  115. Defines used to access _xtos_interrupt_table.
  116. --------------------------------------------------------------------------------
  117. */
  118. #define XIE_HANDLER 0
  119. #define XIE_ARG 4
  120. #define XIE_SIZE 8
  121. /*
  122. Macro get_percpu_entry_for - convert a per-core ID into a multicore entry.
  123. Basically does reg=reg*portNUM_PROCESSORS+current_core_id
  124. Multiple versions here to optimize for specific portNUM_PROCESSORS values.
  125. */
  126. .macro get_percpu_entry_for reg scratch
  127. #if (portNUM_PROCESSORS == 1)
  128. /* No need to do anything */
  129. #elif (portNUM_PROCESSORS == 2)
  130. /* Optimized 2-core code. */
  131. getcoreid \scratch
  132. addx2 \reg,\reg,\scratch
  133. #else
  134. /* Generalized n-core code. Untested! */
  135. movi \scratch,portNUM_PROCESSORS
  136. mull \scratch,\reg,\scratch
  137. getcoreid \reg
  138. add \reg,\scratch,\reg
  139. #endif
  140. .endm
  141. /*
  142. --------------------------------------------------------------------------------
  143. Macro extract_msb - return the input with only the highest bit set.
  144. Input : "ain" - Input value, clobbered.
  145. Output : "aout" - Output value, has only one bit set, MSB of "ain".
  146. The two arguments must be different AR registers.
  147. --------------------------------------------------------------------------------
  148. */
  149. .macro extract_msb aout ain
  150. 1:
  151. addi \aout, \ain, -1 /* aout = ain - 1 */
  152. and \ain, \ain, \aout /* ain = ain & aout */
  153. bnez \ain, 1b /* repeat until ain == 0 */
  154. addi \aout, \aout, 1 /* return aout + 1 */
  155. .endm
  156. /*
  157. --------------------------------------------------------------------------------
  158. Macro dispatch_c_isr - dispatch interrupts to user ISRs.
  159. This will dispatch to user handlers (if any) that are registered in the
  160. XTOS dispatch table (_xtos_interrupt_table). These handlers would have
  161. been registered by calling _xtos_set_interrupt_handler(). There is one
  162. exception - the timer interrupt used by the OS will not be dispatched
  163. to a user handler - this must be handled by the caller of this macro.
  164. Level triggered and software interrupts are automatically deasserted by
  165. this code.
  166. ASSUMPTIONS:
  167. -- PS.INTLEVEL is set to "level" at entry
  168. -- PS.EXCM = 0, C calling enabled
  169. NOTE: For CALL0 ABI, a12-a15 have not yet been saved.
  170. NOTE: This macro will use registers a0 and a2-a6. The arguments are:
  171. level -- interrupt level
  172. mask -- interrupt bitmask for this level
  173. --------------------------------------------------------------------------------
  174. */
  175. .macro dispatch_c_isr level mask
  176. #ifdef CONFIG_PM_TRACE
  177. movi a6, 0 /* = ESP_PM_TRACE_IDLE */
  178. getcoreid a7
  179. call4 esp_pm_trace_exit
  180. #endif // CONFIG_PM_TRACE
  181. /* Get mask of pending, enabled interrupts at this level into a2. */
  182. .L_xt_user_int_\level :
  183. rsr a2, INTENABLE
  184. rsr a3, INTERRUPT
  185. movi a4, \mask
  186. and a2, a2, a3
  187. and a2, a2, a4
  188. beqz a2, 9f /* nothing to do */
  189. /* This bit of code provides a nice debug backtrace in the debugger.
  190. It does take a few more instructions, so undef XT_DEBUG_BACKTRACE
  191. if you want to save the cycles.
  192. At this point, the exception frame should have been allocated and filled,
  193. and current sp points to the interrupt stack (for non-nested interrupt)
  194. or below the allocated exception frame (for nested interrupts). Copy the
  195. pre-exception's base save area below the current SP.
  196. */
  197. #ifdef XT_DEBUG_BACKTRACE
  198. #ifndef __XTENSA_CALL0_ABI__
  199. rsr a0, EXCSAVE_1 + \level - 1 /* Get exception frame pointer stored in EXCSAVE_x */
  200. l32i a3, a0, XT_STK_A0 /* Copy pre-exception a0 (return address) */
  201. s32e a3, a1, -16
  202. l32i a3, a0, XT_STK_A1 /* Copy pre-exception a1 (stack pointer) */
  203. s32e a3, a1, -12
  204. /* Backtracing only needs a0 and a1, no need to create full base save area.
  205. Also need to change current frame's return address to point to pre-exception's
  206. last run instruction.
  207. */
  208. rsr a0, EPC_1 + \level - 1 /* return address */
  209. movi a4, 0xC0000000 /* constant with top 2 bits set (call size) */
  210. or a0, a0, a4 /* set top 2 bits */
  211. addx2 a0, a4, a0 /* clear top bit -- simulating call4 size */
  212. #endif
  213. #endif
  214. #ifdef CONFIG_PM_ENABLE
  215. call4 esp_pm_impl_isr_hook
  216. #endif
  217. #ifdef XT_INTEXC_HOOKS
  218. /* Call interrupt hook if present to (pre)handle interrupts. */
  219. movi a4, _xt_intexc_hooks
  220. l32i a4, a4, \level << 2
  221. beqz a4, 2f
  222. #ifdef __XTENSA_CALL0_ABI__
  223. callx0 a4
  224. beqz a2, 9f
  225. #else
  226. mov a6, a2
  227. callx4 a4
  228. beqz a6, 9f
  229. mov a2, a6
  230. #endif
  231. 2:
  232. #endif
  233. /* Now look up in the dispatch table and call user ISR if any. */
  234. /* If multiple bits are set then MSB has highest priority. */
  235. extract_msb a4, a2 /* a4 = MSB of a2, a2 trashed */
  236. #ifdef XT_USE_SWPRI
  237. /* Enable all interrupts at this level that are numerically higher
  238. than the one we just selected, since they are treated as higher
  239. priority.
  240. */
  241. movi a3, \mask /* a3 = all interrupts at this level */
  242. add a2, a4, a4 /* a2 = a4 << 1 */
  243. addi a2, a2, -1 /* a2 = mask of 1's <= a4 bit */
  244. and a2, a2, a3 /* a2 = mask of all bits <= a4 at this level */
  245. movi a3, _xt_intdata
  246. l32i a6, a3, 4 /* a6 = _xt_vpri_mask */
  247. neg a2, a2
  248. addi a2, a2, -1 /* a2 = mask to apply */
  249. and a5, a6, a2 /* mask off all bits <= a4 bit */
  250. s32i a5, a3, 4 /* update _xt_vpri_mask */
  251. rsr a3, INTENABLE
  252. and a3, a3, a2 /* mask off all bits <= a4 bit */
  253. wsr a3, INTENABLE
  254. rsil a3, \level - 1 /* lower interrupt level by 1 */
  255. #endif
  256. #ifdef XT_RTOS_TIMER_INT
  257. movi a3, XT_TIMER_INTEN /* a3 = timer interrupt bit */
  258. wsr a4, INTCLEAR /* clear sw or edge-triggered interrupt */
  259. beq a3, a4, 7f /* if timer interrupt then skip table */
  260. #else
  261. wsr a4, INTCLEAR /* clear sw or edge-triggered interrupt */
  262. #endif // XT_RTOS_TIMER_INT
  263. find_ms_setbit a3, a4, a3, 0 /* a3 = interrupt number */
  264. get_percpu_entry_for a3, a12
  265. movi a4, _xt_interrupt_table
  266. addx8 a3, a3, a4 /* a3 = address of interrupt table entry */
  267. l32i a4, a3, XIE_HANDLER /* a4 = handler address */
  268. #ifdef __XTENSA_CALL0_ABI__
  269. mov a12, a6 /* save in callee-saved reg */
  270. l32i a2, a3, XIE_ARG /* a2 = handler arg */
  271. callx0 a4 /* call handler */
  272. mov a2, a12
  273. #else
  274. mov a2, a6 /* save in windowed reg */
  275. l32i a6, a3, XIE_ARG /* a6 = handler arg */
  276. callx4 a4 /* call handler */
  277. #endif
  278. #ifdef XT_USE_SWPRI
  279. j 8f
  280. #else
  281. j .L_xt_user_int_\level /* check for more interrupts */
  282. #endif
  283. #ifdef XT_RTOS_TIMER_INT
  284. 7:
  285. .ifeq XT_TIMER_INTPRI - \level
  286. .L_xt_user_int_timer_\level :
  287. /*
  288. Interrupt handler for the RTOS tick timer if at this level.
  289. We'll be reading the interrupt state again after this call
  290. so no need to preserve any registers except a6 (vpri_mask).
  291. */
  292. #ifdef __XTENSA_CALL0_ABI__
  293. mov a12, a6
  294. call0 XT_RTOS_TIMER_INT
  295. mov a2, a12
  296. #else
  297. mov a2, a6
  298. call4 XT_RTOS_TIMER_INT
  299. #endif
  300. .endif
  301. #endif // XT_RTOS_TIMER_INT
  302. #ifdef XT_USE_SWPRI
  303. j 8f
  304. #else
  305. j .L_xt_user_int_\level /* check for more interrupts */
  306. #endif
  307. #ifdef XT_USE_SWPRI
  308. 8:
  309. /* Restore old value of _xt_vpri_mask from a2. Also update INTENABLE from
  310. virtual _xt_intenable which _could_ have changed during interrupt
  311. processing. */
  312. movi a3, _xt_intdata
  313. l32i a4, a3, 0 /* a4 = _xt_intenable */
  314. s32i a2, a3, 4 /* update _xt_vpri_mask */
  315. and a4, a4, a2 /* a4 = masked intenable */
  316. wsr a4, INTENABLE /* update INTENABLE */
  317. #endif
  318. 9:
  319. /* done */
  320. .endm
  321. .section .rodata, "a"
  322. .align 4
  323. /*
  324. --------------------------------------------------------------------------------
  325. Hooks to dynamically install handlers for exceptions and interrupts.
  326. Allows automated regression frameworks to install handlers per test.
  327. Consists of an array of function pointers indexed by interrupt level,
  328. with index 0 containing the entry for user exceptions.
  329. Initialized with all 0s, meaning no handler is installed at each level.
  330. See comment in xtensa_rtos.h for more details.
  331. *WARNING* This array is for all CPUs, that is, installing a hook for
  332. one CPU will install it for all others as well!
  333. --------------------------------------------------------------------------------
  334. */
  335. #ifdef XT_INTEXC_HOOKS
  336. .data
  337. .global _xt_intexc_hooks
  338. .type _xt_intexc_hooks,@object
  339. .align 4
  340. _xt_intexc_hooks:
  341. .fill XT_INTEXC_HOOK_NUM, 4, 0
  342. #endif
  343. /*
  344. --------------------------------------------------------------------------------
  345. EXCEPTION AND LEVEL 1 INTERRUPT VECTORS AND LOW LEVEL HANDLERS
  346. (except window exception vectors).
  347. Each vector goes at a predetermined location according to the Xtensa
  348. hardware configuration, which is ensured by its placement in a special
  349. section known to the Xtensa linker support package (LSP). It performs
  350. the minimum necessary before jumping to the handler in the .text section.
  351. The corresponding handler goes in the normal .text section. It sets up
  352. the appropriate stack frame, saves a few vector-specific registers and
  353. calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
  354. and enter the RTOS, then sets up a C environment. It then calls the
  355. user's interrupt handler code (which may be coded in C) and finally
  356. calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
  357. While XT_RTOS_INT_EXIT does not return directly to the interruptee,
  358. eventually the RTOS scheduler will want to dispatch the interrupted
  359. task or handler. The scheduler will return to the exit point that was
  360. saved in the interrupt stack frame at XT_STK_EXIT.
  361. --------------------------------------------------------------------------------
  362. */
  363. /*
  364. --------------------------------------------------------------------------------
  365. Debug Exception.
  366. --------------------------------------------------------------------------------
  367. */
  368. #if XCHAL_HAVE_DEBUG
  369. .begin literal_prefix .DebugExceptionVector
  370. .section .DebugExceptionVector.text, "ax"
  371. .global _DebugExceptionVector
  372. .align 4
  373. .global xt_debugexception
  374. _DebugExceptionVector:
  375. wsr a0, EXCSAVE+XCHAL_DEBUGLEVEL /* preserve a0 */
  376. J xt_debugexception /* load exception handler */
  377. .end literal_prefix
  378. .global xt_debugexception
  379. .weak xt_debugexception
  380. .set xt_debugexception, _xt_debugexception
  381. .section .iram1,"ax"
  382. .type _xt_debugexception,@function
  383. .align 4
  384. _xt_debugexception:
  385. #if (CONFIG_ESP32_ECO3_CACHE_LOCK_FIX && CONFIG_BTDM_CTRL_HLI)
  386. s32i a0, sp, XT_STK_EXIT
  387. #define XT_DEBUGCAUSE_DI (5)
  388. getcoreid a0
  389. #if (CONFIG_BTDM_CTRL_PINNED_TO_CORE == PRO_CPU_NUM)
  390. beqz a0, 1f
  391. #else
  392. bnez a0, 1f
  393. #endif
  394. rsr a0, DEBUGCAUSE
  395. extui a0, a0, XT_DEBUGCAUSE_DI, 1
  396. bnez a0, _xt_debug_di_exc
  397. 1:
  398. #endif //(CONFIG_ESP32_ECO3_CACHE_LOCK_FIX && CONFIG_BTDM_CTRL_HLI)
  399. movi a0,PANIC_RSN_DEBUGEXCEPTION
  400. wsr a0,EXCCAUSE
  401. /* _xt_panic assumes a level 1 exception. As we're
  402. crashing anyhow, copy EPC & EXCSAVE from DEBUGLEVEL
  403. to level 1. */
  404. rsr a0,(EPC + XCHAL_DEBUGLEVEL)
  405. wsr a0,EPC_1
  406. rsr a0,(EXCSAVE + XCHAL_DEBUGLEVEL)
  407. wsr a0,EXCSAVE_1
  408. #if CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
  409. J _xt_panic_gdbstub /* For gdbstub we make jump */
  410. #else
  411. call0 _xt_panic /* does not return */
  412. #endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
  413. rfi XCHAL_DEBUGLEVEL
  414. #if (CONFIG_ESP32_ECO3_CACHE_LOCK_FIX && CONFIG_BTDM_CTRL_HLI)
  415. .align 4
  416. _xt_debug_di_exc:
  417. /*
  418. The delay time can be calculated by the following formula:
  419. T = ceil(0.25 + max(t1, t2)) us
  420. t1 = 80 / f1, t2 = (1 + 14/N) * 20 / f2
  421. f1: PSRAM access frequency, unit: MHz.
  422. f2: Flash access frequency, unit: MHz.
  423. When flash is slow/fast read, N = 1.
  424. When flash is DOUT/DIO read, N = 2.
  425. When flash is QOUT/QIO read, N = 4.
  426. And after testing, when CPU frequency is 240 MHz, it will take 1us to loop 27 times.
  427. */
  428. #if defined(CONFIG_ESPTOOLPY_FLASHMODE_QIO) || defined(CONFIG_ESPTOOLPY_FLASHMODE_QOUT)
  429. # if defined(CONFIG_ESPTOOLPY_FLASHFREQ_80M) && defined(CONFIG_SPIRAM_SPEED_80M)
  430. movi a0, 54
  431. # elif defined(CONFIG_ESPTOOLPY_FLASHFREQ_80M) && defined(CONFIG_SPIRAM_SPEED_40M)
  432. movi a0, 81
  433. # elif defined(CONFIG_ESPTOOLPY_FLASHFREQ_40M) && defined(CONFIG_SPIRAM_SPEED_40M)
  434. movi a0, 81
  435. # elif defined(CONFIG_ESPTOOLPY_FLASHFREQ_26M) && defined(CONFIG_SPIRAM_SPEED_40M)
  436. movi a0, 108
  437. # else
  438. movi a0, 135
  439. # endif
  440. #elif defined(CONFIG_ESPTOOLPY_FLASHMODE_DIO) || defined(CONFIG_ESPTOOLPY_FLASHMODE_DOUT)
  441. # if defined(CONFIG_ESPTOOLPY_FLASHFREQ_80M) && defined(CONFIG_SPIRAM_SPEED_80M)
  442. movi a0, 81
  443. # elif defined(CONFIG_ESPTOOLPY_FLASHFREQ_80M) && defined(CONFIG_SPIRAM_SPEED_40M)
  444. movi a0, 81
  445. # elif defined(CONFIG_ESPTOOLPY_FLASHFREQ_40M) && defined(CONFIG_SPIRAM_SPEED_40M)
  446. movi a0, 135
  447. # elif defined(CONFIG_ESPTOOLPY_FLASHFREQ_26M) && defined(CONFIG_SPIRAM_SPEED_40M)
  448. movi a0, 189
  449. # else
  450. movi a0, 243
  451. # endif
  452. #else
  453. movi a0, 243
  454. #endif
  455. 1: addi a0, a0, -1 /* delay_us(N) */
  456. .rept 4
  457. nop
  458. .endr
  459. bnez a0, 1b
  460. rsr a0, EXCSAVE+XCHAL_DEBUGLEVEL
  461. rfi XCHAL_DEBUGLEVEL
  462. #endif //(CONFIG_ESP32_ECO3_CACHE_LOCK_FIX && CONFIG_BTDM_CTRL_HLI)
  463. #endif // XCHAL_HAVE_DEBUG
  464. /*
  465. --------------------------------------------------------------------------------
  466. Double Exception.
  467. Double exceptions are not a normal occurrence. They indicate a bug of some kind.
  468. --------------------------------------------------------------------------------
  469. */
  470. #ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR
  471. .begin literal_prefix .DoubleExceptionVector
  472. .section .DoubleExceptionVector.text, "ax"
  473. .global _DoubleExceptionVector
  474. .align 4
  475. _DoubleExceptionVector:
  476. #if XCHAL_HAVE_DEBUG
  477. break 1, 4 /* unhandled double exception */
  478. #endif
  479. movi a0,PANIC_RSN_DOUBLEEXCEPTION
  480. wsr a0,EXCCAUSE
  481. call0 _xt_panic /* does not return */
  482. rfde /* make a0 point here not later */
  483. .end literal_prefix
  484. #endif /* XCHAL_DOUBLEEXC_VECTOR_VADDR */
  485. /*
  486. --------------------------------------------------------------------------------
  487. Kernel Exception (including Level 1 Interrupt from kernel mode).
  488. --------------------------------------------------------------------------------
  489. */
  490. .begin literal_prefix .KernelExceptionVector
  491. .section .KernelExceptionVector.text, "ax"
  492. .global _KernelExceptionVector
  493. .align 4
  494. _KernelExceptionVector:
  495. wsr a0, EXCSAVE_1 /* preserve a0 */
  496. call0 _xt_kernel_exc /* kernel exception handler */
  497. /* never returns here - call0 is used as a jump (see note at top) */
  498. .end literal_prefix
  499. .section .iram1,"ax"
  500. .align 4
  501. _xt_kernel_exc:
  502. #if XCHAL_HAVE_DEBUG
  503. break 1, 0 /* unhandled kernel exception */
  504. #endif
  505. movi a0,PANIC_RSN_KERNELEXCEPTION
  506. wsr a0,EXCCAUSE
  507. call0 _xt_panic /* does not return */
  508. rfe /* make a0 point here not there */
  509. /*
  510. --------------------------------------------------------------------------------
  511. User Exception (including Level 1 Interrupt from user mode).
  512. --------------------------------------------------------------------------------
  513. */
  514. .begin literal_prefix .UserExceptionVector
  515. .section .UserExceptionVector.text, "ax"
  516. .global _UserExceptionVector
  517. .type _UserExceptionVector,@function
  518. .align 4
  519. _UserExceptionVector:
  520. wsr a0, EXCSAVE_1 /* preserve a0 */
  521. call0 _xt_user_exc /* user exception handler */
  522. /* never returns here - call0 is used as a jump (see note at top) */
  523. .end literal_prefix
  524. /*
  525. --------------------------------------------------------------------------------
  526. Insert some waypoints for jumping beyond the signed 8-bit range of
  527. conditional branch instructions, so the conditional branchces to specific
  528. exception handlers are not taken in the mainline. Saves some cycles in the
  529. mainline.
  530. --------------------------------------------------------------------------------
  531. */
  532. #ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
  533. .global LoadStoreErrorHandler
  534. .global AlignmentErrorHandler
  535. #endif
  536. .section .iram1,"ax"
  537. #if XCHAL_HAVE_WINDOWED
  538. .align 4
  539. _xt_to_alloca_exc:
  540. call0 _xt_alloca_exc /* in window vectors section */
  541. /* never returns here - call0 is used as a jump (see note at top) */
  542. #endif
  543. .align 4
  544. _xt_to_syscall_exc:
  545. call0 _xt_syscall_exc
  546. /* never returns here - call0 is used as a jump (see note at top) */
  547. #if XCHAL_CP_NUM > 0
  548. .align 4
  549. _xt_to_coproc_exc:
  550. call0 _xt_coproc_exc
  551. /* never returns here - call0 is used as a jump (see note at top) */
  552. #endif
  553. #ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
  554. .align 4
  555. _call_loadstore_handler:
  556. call0 LoadStoreErrorHandler
  557. /* This will return only if wrong opcode or address out of range*/
  558. j .LS_exit
  559. .align 4
  560. _call_alignment_handler:
  561. call0 AlignmentErrorHandler
  562. /* This will return only if wrong opcode or address out of range*/
  563. addi a0, a0, 1
  564. j .LS_exit
  565. #endif
  566. /*
  567. --------------------------------------------------------------------------------
  568. User exception handler.
  569. --------------------------------------------------------------------------------
  570. */
  571. .type _xt_user_exc,@function
  572. .align 4
  573. _xt_user_exc:
  574. /* If level 1 interrupt then jump to the dispatcher */
  575. rsr a0, EXCCAUSE
  576. bnei a0, EXCCAUSE_LEVEL1INTERRUPT, _xt_handle_exc
  577. j _xt_lowint1
  578. _xt_handle_exc:
  579. /* Handle any coprocessor exceptions. Rely on the fact that exception
  580. numbers above EXCCAUSE_CP0_DISABLED all relate to the coprocessors.
  581. */
  582. #if XCHAL_CP_NUM > 0
  583. bgeui a0, EXCCAUSE_CP0_DISABLED, _xt_to_coproc_exc
  584. #endif
  585. /* Handle alloca and syscall exceptions */
  586. #if XCHAL_HAVE_WINDOWED
  587. beqi a0, EXCCAUSE_ALLOCA, _xt_to_alloca_exc
  588. #endif
  589. beqi a0, EXCCAUSE_SYSCALL, _xt_to_syscall_exc
  590. #ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
  591. beqi a0, EXCCAUSE_LOAD_STORE_ERROR, _call_loadstore_handler
  592. addi a0, a0, -1
  593. beqi a0, 8, _call_alignment_handler
  594. addi a0, a0, 1
  595. .LS_exit:
  596. #endif
  597. /* Handle all other exceptions. All can have user-defined handlers. */
  598. /* NOTE: we'll stay on the user stack for exception handling. */
  599. /* Allocate exception frame and save minimal context. */
  600. mov a0, sp
  601. addi sp, sp, -XT_STK_FRMSZ
  602. s32i a0, sp, XT_STK_A1
  603. #if XCHAL_HAVE_WINDOWED
  604. s32e a0, sp, -12 /* for debug backtrace */
  605. #endif
  606. rsr a0, PS /* save interruptee's PS */
  607. s32i a0, sp, XT_STK_PS
  608. rsr a0, EPC_1 /* save interruptee's PC */
  609. s32i a0, sp, XT_STK_PC
  610. rsr a0, EXCSAVE_1 /* save interruptee's a0 */
  611. s32i a0, sp, XT_STK_A0
  612. #if XCHAL_HAVE_WINDOWED
  613. s32e a0, sp, -16 /* for debug backtrace */
  614. #endif
  615. s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */
  616. s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */
  617. call0 _xt_context_save
  618. /* Save exc cause and vaddr into exception frame */
  619. rsr a0, EXCCAUSE
  620. s32i a0, sp, XT_STK_EXCCAUSE
  621. rsr a0, EXCVADDR
  622. s32i a0, sp, XT_STK_EXCVADDR
  623. /* Set up PS for C, reenable debug and NMI interrupts, and clear EXCM. */
  624. #ifdef __XTENSA_CALL0_ABI__
  625. movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 2) | PS_UM
  626. #else
  627. movi a0, PS_INTLEVEL(XCHAL_DEBUGLEVEL - 2) | PS_UM | PS_WOE
  628. #endif
  629. wsr a0, PS
  630. /*
  631. Create pseudo base save area. At this point, sp is still pointing to the
  632. allocated and filled exception stack frame.
  633. */
  634. #ifdef XT_DEBUG_BACKTRACE
  635. #ifndef __XTENSA_CALL0_ABI__
  636. l32i a3, sp, XT_STK_A0 /* Copy pre-exception a0 (return address) */
  637. s32e a3, sp, -16
  638. l32i a3, sp, XT_STK_A1 /* Copy pre-exception a1 (stack pointer) */
  639. s32e a3, sp, -12
  640. rsr a0, EPC_1 /* return address for debug backtrace */
  641. movi a5, 0xC0000000 /* constant with top 2 bits set (call size) */
  642. rsync /* wait for WSR.PS to complete */
  643. or a0, a0, a5 /* set top 2 bits */
  644. addx2 a0, a5, a0 /* clear top bit -- thus simulating call4 size */
  645. #else
  646. rsync /* wait for WSR.PS to complete */
  647. #endif
  648. #endif
  649. rsr a2, EXCCAUSE /* recover exc cause */
  650. #ifdef XT_INTEXC_HOOKS
  651. /*
  652. Call exception hook to pre-handle exceptions (if installed).
  653. Pass EXCCAUSE in a2, and check result in a2 (if -1, skip default handling).
  654. */
  655. movi a4, _xt_intexc_hooks
  656. l32i a4, a4, 0 /* user exception hook index 0 */
  657. beqz a4, 1f
  658. .Ln_xt_user_exc_call_hook:
  659. #ifdef __XTENSA_CALL0_ABI__
  660. callx0 a4
  661. beqi a2, -1, .L_xt_user_done
  662. #else
  663. mov a6, a2
  664. callx4 a4
  665. beqi a6, -1, .L_xt_user_done
  666. mov a2, a6
  667. #endif
  668. 1:
  669. #endif
  670. rsr a2, EXCCAUSE /* recover exc cause */
  671. movi a3, _xt_exception_table
  672. get_percpu_entry_for a2, a4
  673. addx4 a4, a2, a3 /* a4 = address of exception table entry */
  674. l32i a4, a4, 0 /* a4 = handler address */
  675. #ifdef __XTENSA_CALL0_ABI__
  676. mov a2, sp /* a2 = pointer to exc frame */
  677. callx0 a4 /* call handler */
  678. #else
  679. mov a6, sp /* a6 = pointer to exc frame */
  680. callx4 a4 /* call handler */
  681. #endif
  682. .L_xt_user_done:
  683. /* Restore context and return */
  684. call0 _xt_context_restore
  685. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  686. wsr a0, PS
  687. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  688. wsr a0, EPC_1
  689. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  690. l32i sp, sp, XT_STK_A1 /* remove exception frame */
  691. rsync /* ensure PS and EPC written */
  692. rfe /* PS.EXCM is cleared */
  693. /*
  694. --------------------------------------------------------------------------------
  695. Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
  696. on entry and used to return to a thread or interrupted interrupt handler.
  697. --------------------------------------------------------------------------------
  698. */
  699. .global _xt_user_exit
  700. .type _xt_user_exit,@function
  701. .align 4
  702. _xt_user_exit:
  703. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  704. wsr a0, PS
  705. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  706. wsr a0, EPC_1
  707. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  708. l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
  709. rsync /* ensure PS and EPC written */
  710. rfe /* PS.EXCM is cleared */
  711. /*
  712. --------------------------------------------------------------------------------
  713. Syscall Exception Handler (jumped to from User Exception Handler).
  714. Syscall 0 is required to spill the register windows (no-op in Call 0 ABI).
  715. Only syscall 0 is handled here. Other syscalls return -1 to caller in a2.
  716. --------------------------------------------------------------------------------
  717. */
  718. .section .iram1,"ax"
  719. .type _xt_syscall_exc,@function
  720. .align 4
  721. _xt_syscall_exc:
  722. #ifdef __XTENSA_CALL0_ABI__
  723. /*
  724. Save minimal regs for scratch. Syscall 0 does nothing in Call0 ABI.
  725. Use a minimal stack frame (16B) to save A2 & A3 for scratch.
  726. PS.EXCM could be cleared here, but unlikely to improve worst-case latency.
  727. rsr a0, PS
  728. addi a0, a0, -PS_EXCM_MASK
  729. wsr a0, PS
  730. */
  731. addi sp, sp, -16
  732. s32i a2, sp, 8
  733. s32i a3, sp, 12
  734. #else /* Windowed ABI */
  735. /*
  736. Save necessary context and spill the register windows.
  737. PS.EXCM is still set and must remain set until after the spill.
  738. Reuse context save function though it saves more than necessary.
  739. For this reason, a full interrupt stack frame is allocated.
  740. */
  741. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  742. s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */
  743. s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */
  744. call0 _xt_context_save
  745. #endif
  746. /*
  747. Grab the interruptee's PC and skip over the 'syscall' instruction.
  748. If it's at the end of a zero-overhead loop and it's not on the last
  749. iteration, decrement loop counter and skip to beginning of loop.
  750. */
  751. rsr a2, EPC_1 /* a2 = PC of 'syscall' */
  752. addi a3, a2, 3 /* ++PC */
  753. #if XCHAL_HAVE_LOOPS
  754. rsr a0, LEND /* if (PC == LEND */
  755. bne a3, a0, 1f
  756. rsr a0, LCOUNT /* && LCOUNT != 0) */
  757. beqz a0, 1f /* { */
  758. addi a0, a0, -1 /* --LCOUNT */
  759. rsr a3, LBEG /* PC = LBEG */
  760. wsr a0, LCOUNT /* } */
  761. #endif
  762. 1: wsr a3, EPC_1 /* update PC */
  763. /* Restore interruptee's context and return from exception. */
  764. #ifdef __XTENSA_CALL0_ABI__
  765. l32i a2, sp, 8
  766. l32i a3, sp, 12
  767. addi sp, sp, 16
  768. #else
  769. call0 _xt_context_restore
  770. addi sp, sp, XT_STK_FRMSZ
  771. #endif
  772. movi a0, -1
  773. movnez a2, a0, a2 /* return -1 if not syscall 0 */
  774. rsr a0, EXCSAVE_1
  775. rfe
  776. /*
  777. --------------------------------------------------------------------------------
  778. Co-Processor Exception Handler (jumped to from User Exception Handler).
  779. These exceptions are generated by co-processor instructions, which are only
  780. allowed in thread code (not in interrupts or kernel code). This restriction is
  781. deliberately imposed to reduce the burden of state-save/restore in interrupts.
  782. --------------------------------------------------------------------------------
  783. */
  784. #if XCHAL_CP_NUM > 0
  785. .section .rodata, "a"
  786. /* Offset to CP n save area in thread's CP save area. */
  787. .global _xt_coproc_sa_offset
  788. .type _xt_coproc_sa_offset,@object
  789. .align 16 /* minimize crossing cache boundaries */
  790. _xt_coproc_sa_offset:
  791. .word XT_CP0_SA, XT_CP1_SA, XT_CP2_SA, XT_CP3_SA
  792. .word XT_CP4_SA, XT_CP5_SA, XT_CP6_SA, XT_CP7_SA
  793. /* Bitmask for CP n's CPENABLE bit. */
  794. .type _xt_coproc_mask,@object
  795. .align 16,,8 /* try to keep it all in one cache line */
  796. .set i, 0
  797. _xt_coproc_mask:
  798. .rept XCHAL_CP_MAX
  799. .long (i<<16) | (1<<i) // upper 16-bits = i, lower = bitmask
  800. .set i, i+1
  801. .endr
  802. .data
  803. /* Owner thread of CP n, identified by thread's CP save area (0 = unowned). */
  804. .global _xt_coproc_owner_sa
  805. .type _xt_coproc_owner_sa,@object
  806. .align 16,,XCHAL_CP_MAX<<2 /* minimize crossing cache boundaries */
  807. _xt_coproc_owner_sa:
  808. .space (XCHAL_CP_MAX * portNUM_PROCESSORS) << 2
  809. /* Spinlock per core for accessing _xt_coproc_owner_sa array
  810. *
  811. * 0 = Spinlock available
  812. * PRID = Spinlock taken
  813. *
  814. * The lock provides mutual exclusion for accessing the _xt_coproc_owner_sa array.
  815. * The array can be modified by multiple cores simultaneously (via _xt_coproc_exc
  816. * and _xt_coproc_release). Therefore, this spinlock is defined to ensure thread
  817. * safe access of the _xt_coproc_owner_sa array.
  818. */
  819. #if portNUM_PROCESSORS > 1
  820. .global _xt_coproc_owner_sa_lock
  821. .type _xt_coproc_owner_sa_lock,@object
  822. .align 16 /* minimize crossing cache boundaries */
  823. _xt_coproc_owner_sa_lock:
  824. .space 4
  825. #endif /* portNUM_PROCESSORS > 1 */
  826. .section .iram1,"ax"
  827. .align 4
  828. .L_goto_invalid:
  829. j .L_xt_coproc_invalid /* not in a thread (invalid) */
  830. .align 4
  831. .L_goto_done:
  832. j .L_xt_coproc_done
  833. /*
  834. --------------------------------------------------------------------------------
  835. Coprocessor exception handler.
  836. At entry, only a0 has been saved (in EXCSAVE_1).
  837. --------------------------------------------------------------------------------
  838. */
  839. .type _xt_coproc_exc,@function
  840. .align 4
  841. _xt_coproc_exc:
  842. /* Allocate interrupt stack frame and save minimal context. */
  843. mov a0, sp /* sp == a1 */
  844. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  845. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  846. #if XCHAL_HAVE_WINDOWED
  847. s32e a0, sp, -12 /* for debug backtrace */
  848. #endif
  849. rsr a0, PS /* save interruptee's PS */
  850. s32i a0, sp, XT_STK_PS
  851. rsr a0, EPC_1 /* save interruptee's PC */
  852. s32i a0, sp, XT_STK_PC
  853. rsr a0, EXCSAVE_1 /* save interruptee's a0 */
  854. s32i a0, sp, XT_STK_A0
  855. #if XCHAL_HAVE_WINDOWED
  856. s32e a0, sp, -16 /* for debug backtrace */
  857. #endif
  858. movi a0, _xt_user_exit /* save exit point for dispatch */
  859. s32i a0, sp, XT_STK_EXIT
  860. rsr a0, EXCCAUSE
  861. s32i a5, sp, XT_STK_A5 /* save a5 */
  862. addi a5, a0, -EXCCAUSE_CP0_DISABLED /* a5 = CP index */
  863. /* Save a few more of interruptee's registers (a5 was already saved). */
  864. s32i a2, sp, XT_STK_A2
  865. s32i a3, sp, XT_STK_A3
  866. s32i a4, sp, XT_STK_A4
  867. #if portNUM_PROCESSORS > 1
  868. /* If multicore, we must save two more interruptee's register to use as
  869. * scratch when taking/releasing the _xt_coproc_owner_sa_lock spinlock. */
  870. s32i a6, sp, XT_STK_A6
  871. s32i a7, sp, XT_STK_A7
  872. #endif /* portNUM_PROCESSORS > 1 */
  873. s32i a15, sp, XT_STK_A15
  874. /* Call the RTOS coprocessor exception hook */
  875. call0 XT_RTOS_CP_EXC_HOOK
  876. /* Get co-processor state save area of new owner thread. */
  877. call0 XT_RTOS_CP_STATE /* a15 = new owner's save area */
  878. #if !CONFIG_FREERTOS_FPU_IN_ISR
  879. beqz a15, .L_goto_invalid /* not in a thread (invalid) */
  880. #endif
  881. /* Enable the co-processor's bit in CPENABLE. */
  882. movi a0, _xt_coproc_mask
  883. rsr a4, CPENABLE /* a4 = CPENABLE */
  884. addx4 a0, a5, a0 /* a0 = &_xt_coproc_mask[n] */
  885. l32i a0, a0, 0 /* a0 = (n << 16) | (1 << n) */
  886. extui a2, a0, 0, 16 /* coprocessor bitmask portion */
  887. or a4, a4, a2 /* a4 = CPENABLE | (1 << n) */
  888. wsr a4, CPENABLE
  889. /* Grab the xt_coproc_owner_sa owner array for current core */
  890. getcoreid a3 /* a3 = current core ID */
  891. movi a2, XCHAL_CP_MAX << 2 /* a2 = size of an owner array */
  892. mull a2, a2, a3 /* a2 = offset to the owner array of the current core*/
  893. movi a3, _xt_coproc_owner_sa /* a3 = base of all owner arrays */
  894. add a3, a3, a2 /* a3 = base of owner array of the current core */
  895. #if portNUM_PROCESSORS > 1
  896. /* If multicore, we must also acquire the _xt_coproc_owner_sa_lock spinlock
  897. * to ensure thread safe access of _xt_coproc_owner_sa between cores. */
  898. spinlock_take a6 a7 _xt_coproc_owner_sa_lock
  899. #endif /* portNUM_PROCESSORS > 1 */
  900. /* Get old coprocessor owner thread (save area ptr) and assign new one. */
  901. addx4 a3, a5, a3 /* a3 = &_xt_coproc_owner_sa[n] */
  902. l32i a2, a3, 0 /* a2 = old owner's save area */
  903. s32i a15, a3, 0 /* _xt_coproc_owner_sa[n] = new */
  904. rsync /* ensure wsr.CPENABLE is complete */
  905. #if portNUM_PROCESSORS > 1
  906. /* Release previously taken spinlock */
  907. spinlock_release a6 a7 _xt_coproc_owner_sa_lock
  908. #endif /* portNUM_PROCESSORS > 1 */
  909. /* Only need to context switch if new owner != old owner. */
  910. /* If float is necessary on ISR, we need to remove this check */
  911. /* below, because on restoring from ISR we may have new == old condition used
  912. * to force cp restore to next thread
  913. * Todo: IDF-6418
  914. */
  915. #if !CONFIG_FREERTOS_FPU_IN_ISR
  916. bne a15, a2, .L_switch_context
  917. j .L_goto_done /* new owner == old, we're done */
  918. .L_switch_context:
  919. #endif
  920. /* If no old owner then nothing to save. */
  921. beqz a2, .L_check_new
  922. /* If old owner not actively using CP then nothing to save. */
  923. l16ui a4, a2, XT_CPENABLE /* a4 = old owner's CPENABLE */
  924. bnone a4, a0, .L_check_new /* old owner not using CP */
  925. .L_save_old:
  926. /* Save old owner's coprocessor state. */
  927. movi a5, _xt_coproc_sa_offset
  928. /* Mark old owner state as no longer active (CPENABLE bit n clear). */
  929. xor a4, a4, a0 /* clear CP bit in CPENABLE */
  930. s16i a4, a2, XT_CPENABLE /* update old owner's CPENABLE */
  931. extui a4, a0, 16, 5 /* a4 = CP index = n */
  932. addx4 a5, a4, a5 /* a5 = &_xt_coproc_sa_offset[n] */
  933. /* Mark old owner state as saved (CPSTORED bit n set). */
  934. l16ui a4, a2, XT_CPSTORED /* a4 = old owner's CPSTORED */
  935. l32i a5, a5, 0 /* a5 = XT_CP[n]_SA offset */
  936. or a4, a4, a0 /* set CP in old owner's CPSTORED */
  937. s16i a4, a2, XT_CPSTORED /* update old owner's CPSTORED */
  938. l32i a2, a2, XT_CP_ASA /* ptr to actual (aligned) save area */
  939. extui a3, a0, 16, 5 /* a3 = CP index = n */
  940. add a2, a2, a5 /* a2 = old owner's area for CP n */
  941. /*
  942. The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
  943. It is theoretically possible for Xtensa processor designers to write TIE
  944. that causes more address registers to be affected, but it is generally
  945. unlikely. If that ever happens, more registers needs to be saved/restored
  946. around this macro invocation, and the value in a15 needs to be recomputed.
  947. */
  948. xchal_cpi_store_funcbody
  949. .L_check_new:
  950. /* Check if any state has to be restored for new owner. */
  951. /* NOTE: a15 = new owner's save area, cannot be zero when we get here. */
  952. beqz a15, .L_xt_coproc_done
  953. l16ui a3, a15, XT_CPSTORED /* a3 = new owner's CPSTORED */
  954. movi a4, _xt_coproc_sa_offset
  955. bnone a3, a0, .L_check_cs /* full CP not saved, check callee-saved */
  956. xor a3, a3, a0 /* CPSTORED bit is set, clear it */
  957. s16i a3, a15, XT_CPSTORED /* update new owner's CPSTORED */
  958. /* Adjust new owner's save area pointers to area for CP n. */
  959. extui a3, a0, 16, 5 /* a3 = CP index = n */
  960. addx4 a4, a3, a4 /* a4 = &_xt_coproc_sa_offset[n] */
  961. l32i a4, a4, 0 /* a4 = XT_CP[n]_SA */
  962. l32i a5, a15, XT_CP_ASA /* ptr to actual (aligned) save area */
  963. add a2, a4, a5 /* a2 = new owner's area for CP */
  964. /*
  965. The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
  966. It is theoretically possible for Xtensa processor designers to write TIE
  967. that causes more address registers to be affected, but it is generally
  968. unlikely. If that ever happens, more registers needs to be saved/restored
  969. around this macro invocation.
  970. */
  971. xchal_cpi_load_funcbody
  972. /* Restore interruptee's saved registers. */
  973. /* Can omit rsync for wsr.CPENABLE here because _xt_user_exit does it. */
  974. .L_xt_coproc_done:
  975. l32i a15, sp, XT_STK_A15
  976. #if portNUM_PROCESSORS > 1
  977. l32i a6, sp, XT_STK_A6
  978. l32i a7, sp, XT_STK_A7
  979. #endif /* portNUM_PROCESSORS > 1 */
  980. l32i a5, sp, XT_STK_A5
  981. l32i a4, sp, XT_STK_A4
  982. l32i a3, sp, XT_STK_A3
  983. l32i a2, sp, XT_STK_A2
  984. call0 _xt_user_exit /* return via exit dispatcher */
  985. /* Never returns here - call0 is used as a jump (see note at top) */
  986. .L_check_cs:
  987. /* a0 = CP mask in low bits, a15 = new owner's save area */
  988. l16ui a2, a15, XT_CP_CS_ST /* a2 = mask of CPs saved */
  989. bnone a2, a0, .L_xt_coproc_done /* if no match then done */
  990. and a2, a2, a0 /* a2 = which CPs to restore */
  991. extui a2, a2, 0, 8 /* extract low 8 bits */
  992. #if portNUM_PROCESSORS == 1
  993. s32i a6, sp, XT_STK_A6 /* save extra needed regs */
  994. s32i a7, sp, XT_STK_A7
  995. #endif /* portNUM_PROCESSORS == 1 */
  996. s32i a13, sp, XT_STK_A13
  997. s32i a14, sp, XT_STK_A14
  998. call0 _xt_coproc_restorecs /* restore CP registers */
  999. #if portNUM_PROCESSORS == 1
  1000. l32i a6, sp, XT_STK_A6 /* restore saved registers */
  1001. l32i a7, sp, XT_STK_A7
  1002. #endif /* portNUM_PROCESSORS == 1 */
  1003. l32i a13, sp, XT_STK_A13
  1004. l32i a14, sp, XT_STK_A14
  1005. j .L_xt_coproc_done
  1006. /* Co-processor exception occurred outside a thread (not supported). */
  1007. .L_xt_coproc_invalid:
  1008. movi a0,PANIC_RSN_COPROCEXCEPTION
  1009. wsr a0,EXCCAUSE
  1010. call0 _xt_panic /* not in a thread (invalid) */
  1011. /* never returns */
  1012. #endif /* XCHAL_CP_NUM */
  1013. /*
  1014. -------------------------------------------------------------------------------
  1015. Level 1 interrupt dispatch. Assumes stack frame has not been allocated yet.
  1016. -------------------------------------------------------------------------------
  1017. */
  1018. .section .iram1,"ax"
  1019. .type _xt_lowint1,@function
  1020. .align 4
  1021. _xt_lowint1:
  1022. mov a0, sp /* sp == a1 */
  1023. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  1024. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  1025. rsr a0, PS /* save interruptee's PS */
  1026. s32i a0, sp, XT_STK_PS
  1027. rsr a0, EPC_1 /* save interruptee's PC */
  1028. s32i a0, sp, XT_STK_PC
  1029. rsr a0, EXCSAVE_1 /* save interruptee's a0 */
  1030. s32i a0, sp, XT_STK_A0
  1031. movi a0, _xt_user_exit /* save exit point for dispatch */
  1032. s32i a0, sp, XT_STK_EXIT
  1033. /* EXCSAVE_1 should now be free to use. Use it to keep a copy of the
  1034. current stack pointer that points to the exception frame (XT_STK_FRAME).*/
  1035. #ifdef XT_DEBUG_BACKTRACE
  1036. #ifndef __XTENSA_CALL0_ABI__
  1037. mov a0, sp
  1038. wsr a0, EXCSAVE_1
  1039. #endif
  1040. #endif
  1041. /* Save rest of interrupt context and enter RTOS. */
  1042. call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
  1043. /* !! We are now on the RTOS system stack !! */
  1044. /* Set up PS for C, enable interrupts above this level and clear EXCM. */
  1045. #ifdef __XTENSA_CALL0_ABI__
  1046. movi a0, PS_INTLEVEL(1) | PS_UM
  1047. #else
  1048. movi a0, PS_INTLEVEL(1) | PS_UM | PS_WOE
  1049. #endif
  1050. wsr a0, PS
  1051. rsync
  1052. /* OK to call C code at this point, dispatch user ISRs */
  1053. dispatch_c_isr 1 XCHAL_INTLEVEL1_MASK
  1054. /* Done handling interrupts, transfer control to OS */
  1055. call0 XT_RTOS_INT_EXIT /* does not return directly here */
  1056. /*
  1057. -------------------------------------------------------------------------------
  1058. MEDIUM PRIORITY (LEVEL 2+) INTERRUPT VECTORS AND LOW LEVEL HANDLERS.
  1059. Medium priority interrupts are by definition those with priority greater
  1060. than 1 and not greater than XCHAL_EXCM_LEVEL. These are disabled by
  1061. setting PS.EXCM and therefore can easily support a C environment for
  1062. handlers in C, and interact safely with an RTOS.
  1063. Each vector goes at a predetermined location according to the Xtensa
  1064. hardware configuration, which is ensured by its placement in a special
  1065. section known to the Xtensa linker support package (LSP). It performs
  1066. the minimum necessary before jumping to the handler in the .text section.
  1067. The corresponding handler goes in the normal .text section. It sets up
  1068. the appropriate stack frame, saves a few vector-specific registers and
  1069. calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
  1070. and enter the RTOS, then sets up a C environment. It then calls the
  1071. user's interrupt handler code (which may be coded in C) and finally
  1072. calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
  1073. While XT_RTOS_INT_EXIT does not return directly to the interruptee,
  1074. eventually the RTOS scheduler will want to dispatch the interrupted
  1075. task or handler. The scheduler will return to the exit point that was
  1076. saved in the interrupt stack frame at XT_STK_EXIT.
  1077. -------------------------------------------------------------------------------
  1078. */
  1079. #if XCHAL_EXCM_LEVEL >= 2
  1080. .begin literal_prefix .Level2InterruptVector
  1081. .section .Level2InterruptVector.text, "ax"
  1082. .global _Level2Vector
  1083. .type _Level2Vector,@function
  1084. .align 4
  1085. _Level2Vector:
  1086. wsr a0, EXCSAVE_2 /* preserve a0 */
  1087. call0 _xt_medint2 /* load interrupt handler */
  1088. /* never returns here - call0 is used as a jump (see note at top) */
  1089. .end literal_prefix
  1090. .section .iram1,"ax"
  1091. .type _xt_medint2,@function
  1092. .align 4
  1093. _xt_medint2:
  1094. mov a0, sp /* sp == a1 */
  1095. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  1096. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  1097. rsr a0, EPS_2 /* save interruptee's PS */
  1098. s32i a0, sp, XT_STK_PS
  1099. rsr a0, EPC_2 /* save interruptee's PC */
  1100. s32i a0, sp, XT_STK_PC
  1101. rsr a0, EXCSAVE_2 /* save interruptee's a0 */
  1102. s32i a0, sp, XT_STK_A0
  1103. movi a0, _xt_medint2_exit /* save exit point for dispatch */
  1104. s32i a0, sp, XT_STK_EXIT
  1105. /* EXCSAVE_2 should now be free to use. Use it to keep a copy of the
  1106. current stack pointer that points to the exception frame (XT_STK_FRAME).*/
  1107. #ifdef XT_DEBUG_BACKTRACE
  1108. #ifndef __XTENSA_CALL0_ABI__
  1109. mov a0, sp
  1110. wsr a0, EXCSAVE_2
  1111. #endif
  1112. #endif
  1113. /* Save rest of interrupt context and enter RTOS. */
  1114. call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
  1115. /* !! We are now on the RTOS system stack !! */
  1116. /* Set up PS for C, enable interrupts above this level and clear EXCM. */
  1117. #ifdef __XTENSA_CALL0_ABI__
  1118. movi a0, PS_INTLEVEL(2) | PS_UM
  1119. #else
  1120. movi a0, PS_INTLEVEL(2) | PS_UM | PS_WOE
  1121. #endif
  1122. wsr a0, PS
  1123. rsync
  1124. /* OK to call C code at this point, dispatch user ISRs */
  1125. dispatch_c_isr 2 XCHAL_INTLEVEL2_MASK
  1126. /* Done handling interrupts, transfer control to OS */
  1127. call0 XT_RTOS_INT_EXIT /* does not return directly here */
  1128. /*
  1129. Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
  1130. on entry and used to return to a thread or interrupted interrupt handler.
  1131. */
  1132. .global _xt_medint2_exit
  1133. .type _xt_medint2_exit,@function
  1134. .align 4
  1135. _xt_medint2_exit:
  1136. /* Restore only level-specific regs (the rest were already restored) */
  1137. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  1138. wsr a0, EPS_2
  1139. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  1140. wsr a0, EPC_2
  1141. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  1142. l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
  1143. rsync /* ensure EPS and EPC written */
  1144. rfi 2
  1145. #endif /* Level 2 */
  1146. #if XCHAL_EXCM_LEVEL >= 3
  1147. .begin literal_prefix .Level3InterruptVector
  1148. .section .Level3InterruptVector.text, "ax"
  1149. .global _Level3Vector
  1150. .type _Level3Vector,@function
  1151. .align 4
  1152. _Level3Vector:
  1153. wsr a0, EXCSAVE_3 /* preserve a0 */
  1154. call0 _xt_medint3 /* load interrupt handler */
  1155. /* never returns here - call0 is used as a jump (see note at top) */
  1156. .end literal_prefix
  1157. .section .iram1,"ax"
  1158. .type _xt_medint3,@function
  1159. .align 4
  1160. _xt_medint3:
  1161. mov a0, sp /* sp == a1 */
  1162. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  1163. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  1164. rsr a0, EPS_3 /* save interruptee's PS */
  1165. s32i a0, sp, XT_STK_PS
  1166. rsr a0, EPC_3 /* save interruptee's PC */
  1167. s32i a0, sp, XT_STK_PC
  1168. rsr a0, EXCSAVE_3 /* save interruptee's a0 */
  1169. s32i a0, sp, XT_STK_A0
  1170. movi a0, _xt_medint3_exit /* save exit point for dispatch */
  1171. s32i a0, sp, XT_STK_EXIT
  1172. /* EXCSAVE_3 should now be free to use. Use it to keep a copy of the
  1173. current stack pointer that points to the exception frame (XT_STK_FRAME).*/
  1174. #ifdef XT_DEBUG_BACKTRACE
  1175. #ifndef __XTENSA_CALL0_ABI__
  1176. mov a0, sp
  1177. wsr a0, EXCSAVE_3
  1178. #endif
  1179. #endif
  1180. /* Save rest of interrupt context and enter RTOS. */
  1181. call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
  1182. /* !! We are now on the RTOS system stack !! */
  1183. /* Set up PS for C, enable interrupts above this level and clear EXCM. */
  1184. #ifdef __XTENSA_CALL0_ABI__
  1185. movi a0, PS_INTLEVEL(3) | PS_UM
  1186. #else
  1187. movi a0, PS_INTLEVEL(3) | PS_UM | PS_WOE
  1188. #endif
  1189. wsr a0, PS
  1190. rsync
  1191. /* OK to call C code at this point, dispatch user ISRs */
  1192. dispatch_c_isr 3 XCHAL_INTLEVEL3_MASK
  1193. /* Done handling interrupts, transfer control to OS */
  1194. call0 XT_RTOS_INT_EXIT /* does not return directly here */
  1195. /*
  1196. Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
  1197. on entry and used to return to a thread or interrupted interrupt handler.
  1198. */
  1199. .global _xt_medint3_exit
  1200. .type _xt_medint3_exit,@function
  1201. .align 4
  1202. _xt_medint3_exit:
  1203. /* Restore only level-specific regs (the rest were already restored) */
  1204. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  1205. wsr a0, EPS_3
  1206. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  1207. wsr a0, EPC_3
  1208. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  1209. l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
  1210. rsync /* ensure EPS and EPC written */
  1211. rfi 3
  1212. #endif /* Level 3 */
  1213. #if XCHAL_EXCM_LEVEL >= 4
  1214. .begin literal_prefix .Level4InterruptVector
  1215. .section .Level4InterruptVector.text, "ax"
  1216. .global _Level4Vector
  1217. .type _Level4Vector,@function
  1218. .align 4
  1219. _Level4Vector:
  1220. wsr a0, EXCSAVE_4 /* preserve a0 */
  1221. call0 _xt_medint4 /* load interrupt handler */
  1222. .end literal_prefix
  1223. .section .iram1,"ax"
  1224. .type _xt_medint4,@function
  1225. .align 4
  1226. _xt_medint4:
  1227. mov a0, sp /* sp == a1 */
  1228. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  1229. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  1230. rsr a0, EPS_4 /* save interruptee's PS */
  1231. s32i a0, sp, XT_STK_PS
  1232. rsr a0, EPC_4 /* save interruptee's PC */
  1233. s32i a0, sp, XT_STK_PC
  1234. rsr a0, EXCSAVE_4 /* save interruptee's a0 */
  1235. s32i a0, sp, XT_STK_A0
  1236. movi a0, _xt_medint4_exit /* save exit point for dispatch */
  1237. s32i a0, sp, XT_STK_EXIT
  1238. /* EXCSAVE_4 should now be free to use. Use it to keep a copy of the
  1239. current stack pointer that points to the exception frame (XT_STK_FRAME).*/
  1240. #ifdef XT_DEBUG_BACKTRACE
  1241. #ifndef __XTENSA_CALL0_ABI__
  1242. mov a0, sp
  1243. wsr a0, EXCSAVE_4
  1244. #endif
  1245. #endif
  1246. /* Save rest of interrupt context and enter RTOS. */
  1247. call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
  1248. /* !! We are now on the RTOS system stack !! */
  1249. /* Set up PS for C, enable interrupts above this level and clear EXCM. */
  1250. #ifdef __XTENSA_CALL0_ABI__
  1251. movi a0, PS_INTLEVEL(4) | PS_UM
  1252. #else
  1253. movi a0, PS_INTLEVEL(4) | PS_UM | PS_WOE
  1254. #endif
  1255. wsr a0, PS
  1256. rsync
  1257. /* OK to call C code at this point, dispatch user ISRs */
  1258. dispatch_c_isr 4 XCHAL_INTLEVEL4_MASK
  1259. /* Done handling interrupts, transfer control to OS */
  1260. call0 XT_RTOS_INT_EXIT /* does not return directly here */
  1261. /*
  1262. Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
  1263. on entry and used to return to a thread or interrupted interrupt handler.
  1264. */
  1265. .global _xt_medint4_exit
  1266. .type _xt_medint4_exit,@function
  1267. .align 4
  1268. _xt_medint4_exit:
  1269. /* Restore only level-specific regs (the rest were already restored) */
  1270. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  1271. wsr a0, EPS_4
  1272. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  1273. wsr a0, EPC_4
  1274. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  1275. l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
  1276. rsync /* ensure EPS and EPC written */
  1277. rfi 4
  1278. #endif /* Level 4 */
  1279. #if XCHAL_EXCM_LEVEL >= 5
  1280. .begin literal_prefix .Level5InterruptVector
  1281. .section .Level5InterruptVector.text, "ax"
  1282. .global _Level5Vector
  1283. .type _Level5Vector,@function
  1284. .align 4
  1285. _Level5Vector:
  1286. wsr a0, EXCSAVE_5 /* preserve a0 */
  1287. call0 _xt_medint5 /* load interrupt handler */
  1288. .end literal_prefix
  1289. .section .iram1,"ax"
  1290. .type _xt_medint5,@function
  1291. .align 4
  1292. _xt_medint5:
  1293. mov a0, sp /* sp == a1 */
  1294. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  1295. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  1296. rsr a0, EPS_5 /* save interruptee's PS */
  1297. s32i a0, sp, XT_STK_PS
  1298. rsr a0, EPC_5 /* save interruptee's PC */
  1299. s32i a0, sp, XT_STK_PC
  1300. rsr a0, EXCSAVE_5 /* save interruptee's a0 */
  1301. s32i a0, sp, XT_STK_A0
  1302. movi a0, _xt_medint5_exit /* save exit point for dispatch */
  1303. s32i a0, sp, XT_STK_EXIT
  1304. /* EXCSAVE_5 should now be free to use. Use it to keep a copy of the
  1305. current stack pointer that points to the exception frame (XT_STK_FRAME).*/
  1306. #ifdef XT_DEBUG_BACKTRACE
  1307. #ifndef __XTENSA_CALL0_ABI__
  1308. mov a0, sp
  1309. wsr a0, EXCSAVE_5
  1310. #endif
  1311. #endif
  1312. /* Save rest of interrupt context and enter RTOS. */
  1313. call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
  1314. /* !! We are now on the RTOS system stack !! */
  1315. /* Set up PS for C, enable interrupts above this level and clear EXCM. */
  1316. #ifdef __XTENSA_CALL0_ABI__
  1317. movi a0, PS_INTLEVEL(5) | PS_UM
  1318. #else
  1319. movi a0, PS_INTLEVEL(5) | PS_UM | PS_WOE
  1320. #endif
  1321. wsr a0, PS
  1322. rsync
  1323. /* OK to call C code at this point, dispatch user ISRs */
  1324. dispatch_c_isr 5 XCHAL_INTLEVEL5_MASK
  1325. /* Done handling interrupts, transfer control to OS */
  1326. call0 XT_RTOS_INT_EXIT /* does not return directly here */
  1327. /*
  1328. Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
  1329. on entry and used to return to a thread or interrupted interrupt handler.
  1330. */
  1331. .global _xt_medint5_exit
  1332. .type _xt_medint5_exit,@function
  1333. .align 4
  1334. _xt_medint5_exit:
  1335. /* Restore only level-specific regs (the rest were already restored) */
  1336. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  1337. wsr a0, EPS_5
  1338. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  1339. wsr a0, EPC_5
  1340. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  1341. l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
  1342. rsync /* ensure EPS and EPC written */
  1343. rfi 5
  1344. #endif /* Level 5 */
  1345. #if XCHAL_EXCM_LEVEL >= 6
  1346. .begin literal_prefix .Level6InterruptVector
  1347. .section .Level6InterruptVector.text, "ax"
  1348. .global _Level6Vector
  1349. .type _Level6Vector,@function
  1350. .align 4
  1351. _Level6Vector:
  1352. wsr a0, EXCSAVE_6 /* preserve a0 */
  1353. call0 _xt_medint6 /* load interrupt handler */
  1354. .end literal_prefix
  1355. .section .iram1,"ax"
  1356. .type _xt_medint6,@function
  1357. .align 4
  1358. _xt_medint6:
  1359. mov a0, sp /* sp == a1 */
  1360. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  1361. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  1362. rsr a0, EPS_6 /* save interruptee's PS */
  1363. s32i a0, sp, XT_STK_PS
  1364. rsr a0, EPC_6 /* save interruptee's PC */
  1365. s32i a0, sp, XT_STK_PC
  1366. rsr a0, EXCSAVE_6 /* save interruptee's a0 */
  1367. s32i a0, sp, XT_STK_A0
  1368. movi a0, _xt_medint6_exit /* save exit point for dispatch */
  1369. s32i a0, sp, XT_STK_EXIT
  1370. /* EXCSAVE_6 should now be free to use. Use it to keep a copy of the
  1371. current stack pointer that points to the exception frame (XT_STK_FRAME).*/
  1372. #ifdef XT_DEBUG_BACKTRACE
  1373. #ifndef __XTENSA_CALL0_ABI__
  1374. mov a0, sp
  1375. wsr a0, EXCSAVE_6
  1376. #endif
  1377. #endif
  1378. /* Save rest of interrupt context and enter RTOS. */
  1379. call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
  1380. /* !! We are now on the RTOS system stack !! */
  1381. /* Set up PS for C, enable interrupts above this level and clear EXCM. */
  1382. #ifdef __XTENSA_CALL0_ABI__
  1383. movi a0, PS_INTLEVEL(6) | PS_UM
  1384. #else
  1385. movi a0, PS_INTLEVEL(6) | PS_UM | PS_WOE
  1386. #endif
  1387. wsr a0, PS
  1388. rsync
  1389. /* OK to call C code at this point, dispatch user ISRs */
  1390. dispatch_c_isr 6 XCHAL_INTLEVEL6_MASK
  1391. /* Done handling interrupts, transfer control to OS */
  1392. call0 XT_RTOS_INT_EXIT /* does not return directly here */
  1393. /*
  1394. Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
  1395. on entry and used to return to a thread or interrupted interrupt handler.
  1396. */
  1397. .global _xt_medint6_exit
  1398. .type _xt_medint6_exit,@function
  1399. .align 4
  1400. _xt_medint6_exit:
  1401. /* Restore only level-specific regs (the rest were already restored) */
  1402. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  1403. wsr a0, EPS_6
  1404. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  1405. wsr a0, EPC_6
  1406. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  1407. l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
  1408. rsync /* ensure EPS and EPC written */
  1409. rfi 6
  1410. #endif /* Level 6 */
  1411. /*******************************************************************************
  1412. HIGH PRIORITY (LEVEL > XCHAL_EXCM_LEVEL) INTERRUPT VECTORS AND HANDLERS
  1413. High priority interrupts are by definition those with priorities greater
  1414. than XCHAL_EXCM_LEVEL. This includes non-maskable (NMI). High priority
  1415. interrupts cannot interact with the RTOS, that is they must save all regs
  1416. they use and not call any RTOS function.
  1417. A further restriction imposed by the Xtensa windowed architecture is that
  1418. high priority interrupts must not modify the stack area even logically
  1419. "above" the top of the interrupted stack (they need to provide their
  1420. own stack or static save area).
  1421. Cadence Design Systems recommends high priority interrupt handlers be coded in assembly
  1422. and used for purposes requiring very short service times.
  1423. Here are templates for high priority (level 2+) interrupt vectors.
  1424. They assume only one interrupt per level to avoid the burden of identifying
  1425. which interrupts at this level are pending and enabled. This allows for
  1426. minimum latency and avoids having to save/restore a2 in addition to a0.
  1427. If more than one interrupt per high priority level is configured, this burden
  1428. is on the handler which in any case must provide a way to save and restore
  1429. registers it uses without touching the interrupted stack.
  1430. Each vector goes at a predetermined location according to the Xtensa
  1431. hardware configuration, which is ensured by its placement in a special
  1432. section known to the Xtensa linker support package (LSP). It performs
  1433. the minimum necessary before jumping to the handler in the .text section.
  1434. *******************************************************************************/
  1435. /*
  1436. Currently only shells for high priority interrupt handlers are provided
  1437. here. However a template and example can be found in the Cadence Design Systems tools
  1438. documentation: "Microprocessor Programmer's Guide".
  1439. */
  1440. #if XCHAL_NUM_INTLEVELS >=2 && XCHAL_EXCM_LEVEL <2 && XCHAL_DEBUGLEVEL !=2
  1441. .begin literal_prefix .Level2InterruptVector
  1442. .section .Level2InterruptVector.text, "ax"
  1443. .global _Level2Vector
  1444. .type _Level2Vector,@function
  1445. .global xt_highint2
  1446. .align 4
  1447. _Level2Vector:
  1448. wsr a0, EXCSAVE_2 /* preserve a0 */
  1449. call0 xt_highint2 /* load interrupt handler */
  1450. .end literal_prefix
  1451. .global xt_highint2
  1452. .weak xt_highint2
  1453. .set xt_highint2, _xt_highint2
  1454. .section .iram1, "ax"
  1455. .type _xt_highint2,@function
  1456. .align 4
  1457. _xt_highint2:
  1458. #ifdef XT_INTEXC_HOOKS
  1459. /* Call interrupt hook if present to (pre)handle interrupts. */
  1460. movi a0, _xt_intexc_hooks
  1461. l32i a0, a0, 2<<2
  1462. beqz a0, 1f
  1463. .Ln_xt_highint2_call_hook:
  1464. callx0 a0 /* must NOT disturb stack! */
  1465. 1:
  1466. #endif
  1467. /* USER_EDIT:
  1468. ADD HIGH PRIORITY LEVEL 2 INTERRUPT HANDLER CODE HERE.
  1469. */
  1470. .align 4
  1471. .L_xt_highint2_exit:
  1472. rsr a0, EXCSAVE_2 /* restore a0 */
  1473. rfi 2
  1474. #endif /* Level 2 */
  1475. #if XCHAL_NUM_INTLEVELS >=3 && XCHAL_EXCM_LEVEL <3 && XCHAL_DEBUGLEVEL !=3
  1476. .begin literal_prefix .Level3InterruptVector
  1477. .section .Level3InterruptVector.text, "ax"
  1478. .global _Level3Vector
  1479. .type _Level3Vector,@function
  1480. .global xt_highint3
  1481. .align 4
  1482. _Level3Vector:
  1483. wsr a0, EXCSAVE_3 /* preserve a0 */
  1484. call0 xt_highint3 /* load interrupt handler */
  1485. /* never returns here - call0 is used as a jump (see note at top) */
  1486. .end literal_prefix
  1487. .global xt_highint3
  1488. .weak xt_highint3
  1489. .set xt_highint3, _xt_highint3
  1490. .section .iram1, "ax"
  1491. .type _xt_highint3,@function
  1492. .align 4
  1493. _xt_highint3:
  1494. #ifdef XT_INTEXC_HOOKS
  1495. /* Call interrupt hook if present to (pre)handle interrupts. */
  1496. movi a0, _xt_intexc_hooks
  1497. l32i a0, a0, 3<<2
  1498. beqz a0, 1f
  1499. .Ln_xt_highint3_call_hook:
  1500. callx0 a0 /* must NOT disturb stack! */
  1501. 1:
  1502. #endif
  1503. /* USER_EDIT:
  1504. ADD HIGH PRIORITY LEVEL 3 INTERRUPT HANDLER CODE HERE.
  1505. */
  1506. .align 4
  1507. .L_xt_highint3_exit:
  1508. rsr a0, EXCSAVE_3 /* restore a0 */
  1509. rfi 3
  1510. #endif /* Level 3 */
  1511. #if XCHAL_NUM_INTLEVELS >=4 && XCHAL_EXCM_LEVEL <4 && XCHAL_DEBUGLEVEL !=4
  1512. .begin literal_prefix .Level4InterruptVector
  1513. .section .Level4InterruptVector.text, "ax"
  1514. .global _Level4Vector
  1515. .type _Level4Vector,@function
  1516. .global xt_highint4
  1517. .align 4
  1518. _Level4Vector:
  1519. wsr a0, EXCSAVE_4 /* preserve a0 */
  1520. call0 xt_highint4 /* load interrupt handler */
  1521. /* never returns here - call0 is used as a jump (see note at top) */
  1522. .end literal_prefix
  1523. .global xt_highint4
  1524. .weak xt_highint4
  1525. .set xt_highint4, _xt_highint4
  1526. .section .iram1, "ax"
  1527. .type _xt_highint4,@function
  1528. .align 4
  1529. _xt_highint4:
  1530. #ifdef XT_INTEXC_HOOKS
  1531. /* Call interrupt hook if present to (pre)handle interrupts. */
  1532. movi a0, _xt_intexc_hooks
  1533. l32i a0, a0, 4<<2
  1534. beqz a0, 1f
  1535. .Ln_xt_highint4_call_hook:
  1536. callx0 a0 /* must NOT disturb stack! */
  1537. 1:
  1538. #endif
  1539. /* USER_EDIT:
  1540. ADD HIGH PRIORITY LEVEL 4 INTERRUPT HANDLER CODE HERE.
  1541. */
  1542. .align 4
  1543. .L_xt_highint4_exit:
  1544. rsr a0, EXCSAVE_4 /* restore a0 */
  1545. rfi 4
  1546. #endif /* Level 4 */
  1547. #if XCHAL_NUM_INTLEVELS >=5 && XCHAL_EXCM_LEVEL <5 && XCHAL_DEBUGLEVEL !=5
  1548. .begin literal_prefix .Level5InterruptVector
  1549. .section .Level5InterruptVector.text, "ax"
  1550. .global _Level5Vector
  1551. .type _Level5Vector,@function
  1552. .global xt_highint5
  1553. .align 4
  1554. _Level5Vector:
  1555. wsr a0, EXCSAVE_5 /* preserve a0 */
  1556. call0 xt_highint5 /* load interrupt handler */
  1557. /* never returns here - call0 is used as a jump (see note at top) */
  1558. .end literal_prefix
  1559. .global xt_highint5
  1560. .weak xt_highint5
  1561. .set xt_highint5, _xt_highint5
  1562. .section .iram1, "ax"
  1563. .type _xt_highint5,@function
  1564. .align 4
  1565. _xt_highint5:
  1566. #ifdef XT_INTEXC_HOOKS
  1567. /* Call interrupt hook if present to (pre)handle interrupts. */
  1568. movi a0, _xt_intexc_hooks
  1569. l32i a0, a0, 5<<2
  1570. beqz a0, 1f
  1571. .Ln_xt_highint5_call_hook:
  1572. callx0 a0 /* must NOT disturb stack! */
  1573. 1:
  1574. #endif
  1575. /* USER_EDIT:
  1576. ADD HIGH PRIORITY LEVEL 5 INTERRUPT HANDLER CODE HERE.
  1577. */
  1578. .align 4
  1579. .L_xt_highint5_exit:
  1580. rsr a0, EXCSAVE_5 /* restore a0 */
  1581. rfi 5
  1582. #endif /* Level 5 */
  1583. #if XCHAL_NUM_INTLEVELS >=6 && XCHAL_EXCM_LEVEL <6 && XCHAL_DEBUGLEVEL !=6
  1584. .begin literal_prefix .Level6InterruptVector
  1585. .section .Level6InterruptVector.text, "ax"
  1586. .global _Level6Vector
  1587. .type _Level6Vector,@function
  1588. .global xt_highint6
  1589. .align 4
  1590. _Level6Vector:
  1591. wsr a0, EXCSAVE_6 /* preserve a0 */
  1592. call0 xt_highint6 /* load interrupt handler */
  1593. /* never returns here - call0 is used as a jump (see note at top) */
  1594. .end literal_prefix
  1595. .global xt_highint6
  1596. .weak xt_highint6
  1597. .set xt_highint6, _xt_highint6
  1598. .section .iram1, "ax"
  1599. .type _xt_highint6,@function
  1600. .align 4
  1601. _xt_highint6:
  1602. #ifdef XT_INTEXC_HOOKS
  1603. /* Call interrupt hook if present to (pre)handle interrupts. */
  1604. movi a0, _xt_intexc_hooks
  1605. l32i a0, a0, 6<<2
  1606. beqz a0, 1f
  1607. .Ln_xt_highint6_call_hook:
  1608. callx0 a0 /* must NOT disturb stack! */
  1609. 1:
  1610. #endif
  1611. /* USER_EDIT:
  1612. ADD HIGH PRIORITY LEVEL 6 INTERRUPT HANDLER CODE HERE.
  1613. */
  1614. .align 4
  1615. .L_xt_highint6_exit:
  1616. rsr a0, EXCSAVE_6 /* restore a0 */
  1617. rfi 6
  1618. #endif /* Level 6 */
  1619. #if XCHAL_HAVE_NMI
  1620. .begin literal_prefix .NMIExceptionVector
  1621. .section .NMIExceptionVector.text, "ax"
  1622. .global _NMIExceptionVector
  1623. .type _NMIExceptionVector,@function
  1624. .global xt_nmi
  1625. .align 4
  1626. _NMIExceptionVector:
  1627. wsr a0, EXCSAVE + XCHAL_NMILEVEL /* preserve a0 */
  1628. call0 xt_nmi /* load interrupt handler */
  1629. /* never returns here - call0 is used as a jump (see note at top) */
  1630. .end literal_prefix
  1631. .global xt_nmi
  1632. .weak xt_nmi
  1633. .set xt_nmi, _xt_nmi
  1634. .section .iram1, "ax"
  1635. .type _xt_nmi,@function
  1636. .align 4
  1637. _xt_nmi:
  1638. #ifdef XT_INTEXC_HOOKS
  1639. /* Call interrupt hook if present to (pre)handle interrupts. */
  1640. movi a0, _xt_intexc_hooks
  1641. l32i a0, a0, XCHAL_NMILEVEL<<2
  1642. beqz a0, 1f
  1643. .Ln_xt_nmi_call_hook:
  1644. callx0 a0 /* must NOT disturb stack! */
  1645. 1:
  1646. #endif
  1647. /* USER_EDIT:
  1648. ADD HIGH PRIORITY NON-MASKABLE INTERRUPT (NMI) HANDLER CODE HERE.
  1649. */
  1650. .align 4
  1651. .L_xt_nmi_exit:
  1652. rsr a0, EXCSAVE + XCHAL_NMILEVEL /* restore a0 */
  1653. rfi XCHAL_NMILEVEL
  1654. #endif /* NMI */
  1655. /*******************************************************************************
  1656. WINDOW OVERFLOW AND UNDERFLOW EXCEPTION VECTORS AND ALLOCA EXCEPTION HANDLER
  1657. Here is the code for each window overflow/underflow exception vector and
  1658. (interspersed) efficient code for handling the alloca exception cause.
  1659. Window exceptions are handled entirely in the vector area and are very
  1660. tight for performance. The alloca exception is also handled entirely in
  1661. the window vector area so comes at essentially no cost in code size.
  1662. Users should never need to modify them and Cadence Design Systems recommends
  1663. they do not.
  1664. Window handlers go at predetermined vector locations according to the
  1665. Xtensa hardware configuration, which is ensured by their placement in a
  1666. special section known to the Xtensa linker support package (LSP). Since
  1667. their offsets in that section are always the same, the LSPs do not define
  1668. a section per vector.
  1669. These things are coded for XEA2 only (XEA1 is not supported).
  1670. Note on Underflow Handlers:
  1671. The underflow handler for returning from call[i+1] to call[i]
  1672. must preserve all the registers from call[i+1]'s window.
  1673. In particular, a0 and a1 must be preserved because the RETW instruction
  1674. will be reexecuted (and may even underflow if an intervening exception
  1675. has flushed call[i]'s registers).
  1676. Registers a2 and up may contain return values.
  1677. *******************************************************************************/
  1678. #if XCHAL_HAVE_WINDOWED
  1679. .section .WindowVectors.text, "ax"
  1680. /*
  1681. --------------------------------------------------------------------------------
  1682. Window Overflow Exception for Call4.
  1683. Invoked if a call[i] referenced a register (a4-a15)
  1684. that contains data from ancestor call[j];
  1685. call[j] had done a call4 to call[j+1].
  1686. On entry here:
  1687. window rotated to call[j] start point;
  1688. a0-a3 are registers to be saved;
  1689. a4-a15 must be preserved;
  1690. a5 is call[j+1]'s stack pointer.
  1691. --------------------------------------------------------------------------------
  1692. */
  1693. .org 0x0
  1694. .global _WindowOverflow4
  1695. _WindowOverflow4:
  1696. s32e a0, a5, -16 /* save a0 to call[j+1]'s stack frame */
  1697. s32e a1, a5, -12 /* save a1 to call[j+1]'s stack frame */
  1698. s32e a2, a5, -8 /* save a2 to call[j+1]'s stack frame */
  1699. s32e a3, a5, -4 /* save a3 to call[j+1]'s stack frame */
  1700. rfwo /* rotates back to call[i] position */
  1701. /*
  1702. --------------------------------------------------------------------------------
  1703. Window Underflow Exception for Call4
  1704. Invoked by RETW returning from call[i+1] to call[i]
  1705. where call[i]'s registers must be reloaded (not live in ARs);
  1706. where call[i] had done a call4 to call[i+1].
  1707. On entry here:
  1708. window rotated to call[i] start point;
  1709. a0-a3 are undefined, must be reloaded with call[i].reg[0..3];
  1710. a4-a15 must be preserved (they are call[i+1].reg[0..11]);
  1711. a5 is call[i+1]'s stack pointer.
  1712. --------------------------------------------------------------------------------
  1713. */
  1714. .org 0x40
  1715. .global _WindowUnderflow4
  1716. _WindowUnderflow4:
  1717. l32e a0, a5, -16 /* restore a0 from call[i+1]'s stack frame */
  1718. l32e a1, a5, -12 /* restore a1 from call[i+1]'s stack frame */
  1719. l32e a2, a5, -8 /* restore a2 from call[i+1]'s stack frame */
  1720. l32e a3, a5, -4 /* restore a3 from call[i+1]'s stack frame */
  1721. rfwu
  1722. /*
  1723. --------------------------------------------------------------------------------
  1724. Handle alloca exception generated by interruptee executing 'movsp'.
  1725. This uses space between the window vectors, so is essentially "free".
  1726. All interruptee's regs are intact except a0 which is saved in EXCSAVE_1,
  1727. and PS.EXCM has been set by the exception hardware (can't be interrupted).
  1728. The fact the alloca exception was taken means the registers associated with
  1729. the base-save area have been spilled and will be restored by the underflow
  1730. handler, so those 4 registers are available for scratch.
  1731. The code is optimized to avoid unaligned branches and minimize cache misses.
  1732. --------------------------------------------------------------------------------
  1733. */
  1734. .align 4
  1735. .global _xt_alloca_exc
  1736. _xt_alloca_exc:
  1737. rsr a0, WINDOWBASE /* grab WINDOWBASE before rotw changes it */
  1738. rotw -1 /* WINDOWBASE goes to a4, new a0-a3 are scratch */
  1739. rsr a2, PS
  1740. extui a3, a2, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS
  1741. xor a3, a3, a4 /* bits changed from old to current windowbase */
  1742. rsr a4, EXCSAVE_1 /* restore original a0 (now in a4) */
  1743. slli a3, a3, XCHAL_PS_OWB_SHIFT
  1744. xor a2, a2, a3 /* flip changed bits in old window base */
  1745. wsr a2, PS /* update PS.OWB to new window base */
  1746. rsync
  1747. bbci.l a4, 31, _WindowUnderflow4
  1748. rotw -1 /* original a0 goes to a8 */
  1749. bbci.l a8, 30, _WindowUnderflow8
  1750. rotw -1
  1751. j _WindowUnderflow12
  1752. /*
  1753. --------------------------------------------------------------------------------
  1754. Window Overflow Exception for Call8
  1755. Invoked if a call[i] referenced a register (a4-a15)
  1756. that contains data from ancestor call[j];
  1757. call[j] had done a call8 to call[j+1].
  1758. On entry here:
  1759. window rotated to call[j] start point;
  1760. a0-a7 are registers to be saved;
  1761. a8-a15 must be preserved;
  1762. a9 is call[j+1]'s stack pointer.
  1763. --------------------------------------------------------------------------------
  1764. */
  1765. .org 0x80
  1766. .global _WindowOverflow8
  1767. _WindowOverflow8:
  1768. s32e a0, a9, -16 /* save a0 to call[j+1]'s stack frame */
  1769. l32e a0, a1, -12 /* a0 <- call[j-1]'s sp
  1770. (used to find end of call[j]'s frame) */
  1771. s32e a1, a9, -12 /* save a1 to call[j+1]'s stack frame */
  1772. s32e a2, a9, -8 /* save a2 to call[j+1]'s stack frame */
  1773. s32e a3, a9, -4 /* save a3 to call[j+1]'s stack frame */
  1774. s32e a4, a0, -32 /* save a4 to call[j]'s stack frame */
  1775. s32e a5, a0, -28 /* save a5 to call[j]'s stack frame */
  1776. s32e a6, a0, -24 /* save a6 to call[j]'s stack frame */
  1777. s32e a7, a0, -20 /* save a7 to call[j]'s stack frame */
  1778. rfwo /* rotates back to call[i] position */
  1779. /*
  1780. --------------------------------------------------------------------------------
  1781. Window Underflow Exception for Call8
  1782. Invoked by RETW returning from call[i+1] to call[i]
  1783. where call[i]'s registers must be reloaded (not live in ARs);
  1784. where call[i] had done a call8 to call[i+1].
  1785. On entry here:
  1786. window rotated to call[i] start point;
  1787. a0-a7 are undefined, must be reloaded with call[i].reg[0..7];
  1788. a8-a15 must be preserved (they are call[i+1].reg[0..7]);
  1789. a9 is call[i+1]'s stack pointer.
  1790. --------------------------------------------------------------------------------
  1791. */
  1792. .org 0xC0
  1793. .global _WindowUnderflow8
  1794. _WindowUnderflow8:
  1795. l32e a0, a9, -16 /* restore a0 from call[i+1]'s stack frame */
  1796. l32e a1, a9, -12 /* restore a1 from call[i+1]'s stack frame */
  1797. l32e a2, a9, -8 /* restore a2 from call[i+1]'s stack frame */
  1798. l32e a7, a1, -12 /* a7 <- call[i-1]'s sp
  1799. (used to find end of call[i]'s frame) */
  1800. l32e a3, a9, -4 /* restore a3 from call[i+1]'s stack frame */
  1801. l32e a4, a7, -32 /* restore a4 from call[i]'s stack frame */
  1802. l32e a5, a7, -28 /* restore a5 from call[i]'s stack frame */
  1803. l32e a6, a7, -24 /* restore a6 from call[i]'s stack frame */
  1804. l32e a7, a7, -20 /* restore a7 from call[i]'s stack frame */
  1805. rfwu
  1806. /*
  1807. --------------------------------------------------------------------------------
  1808. Window Overflow Exception for Call12
  1809. Invoked if a call[i] referenced a register (a4-a15)
  1810. that contains data from ancestor call[j];
  1811. call[j] had done a call12 to call[j+1].
  1812. On entry here:
  1813. window rotated to call[j] start point;
  1814. a0-a11 are registers to be saved;
  1815. a12-a15 must be preserved;
  1816. a13 is call[j+1]'s stack pointer.
  1817. --------------------------------------------------------------------------------
  1818. */
  1819. .org 0x100
  1820. .global _WindowOverflow12
  1821. _WindowOverflow12:
  1822. s32e a0, a13, -16 /* save a0 to call[j+1]'s stack frame */
  1823. l32e a0, a1, -12 /* a0 <- call[j-1]'s sp
  1824. (used to find end of call[j]'s frame) */
  1825. s32e a1, a13, -12 /* save a1 to call[j+1]'s stack frame */
  1826. s32e a2, a13, -8 /* save a2 to call[j+1]'s stack frame */
  1827. s32e a3, a13, -4 /* save a3 to call[j+1]'s stack frame */
  1828. s32e a4, a0, -48 /* save a4 to end of call[j]'s stack frame */
  1829. s32e a5, a0, -44 /* save a5 to end of call[j]'s stack frame */
  1830. s32e a6, a0, -40 /* save a6 to end of call[j]'s stack frame */
  1831. s32e a7, a0, -36 /* save a7 to end of call[j]'s stack frame */
  1832. s32e a8, a0, -32 /* save a8 to end of call[j]'s stack frame */
  1833. s32e a9, a0, -28 /* save a9 to end of call[j]'s stack frame */
  1834. s32e a10, a0, -24 /* save a10 to end of call[j]'s stack frame */
  1835. s32e a11, a0, -20 /* save a11 to end of call[j]'s stack frame */
  1836. rfwo /* rotates back to call[i] position */
  1837. /*
  1838. --------------------------------------------------------------------------------
  1839. Window Underflow Exception for Call12
  1840. Invoked by RETW returning from call[i+1] to call[i]
  1841. where call[i]'s registers must be reloaded (not live in ARs);
  1842. where call[i] had done a call12 to call[i+1].
  1843. On entry here:
  1844. window rotated to call[i] start point;
  1845. a0-a11 are undefined, must be reloaded with call[i].reg[0..11];
  1846. a12-a15 must be preserved (they are call[i+1].reg[0..3]);
  1847. a13 is call[i+1]'s stack pointer.
  1848. --------------------------------------------------------------------------------
  1849. */
  1850. .org 0x140
  1851. .global _WindowUnderflow12
  1852. _WindowUnderflow12:
  1853. l32e a0, a13, -16 /* restore a0 from call[i+1]'s stack frame */
  1854. l32e a1, a13, -12 /* restore a1 from call[i+1]'s stack frame */
  1855. l32e a2, a13, -8 /* restore a2 from call[i+1]'s stack frame */
  1856. l32e a11, a1, -12 /* a11 <- call[i-1]'s sp
  1857. (used to find end of call[i]'s frame) */
  1858. l32e a3, a13, -4 /* restore a3 from call[i+1]'s stack frame */
  1859. l32e a4, a11, -48 /* restore a4 from end of call[i]'s stack frame */
  1860. l32e a5, a11, -44 /* restore a5 from end of call[i]'s stack frame */
  1861. l32e a6, a11, -40 /* restore a6 from end of call[i]'s stack frame */
  1862. l32e a7, a11, -36 /* restore a7 from end of call[i]'s stack frame */
  1863. l32e a8, a11, -32 /* restore a8 from end of call[i]'s stack frame */
  1864. l32e a9, a11, -28 /* restore a9 from end of call[i]'s stack frame */
  1865. l32e a10, a11, -24 /* restore a10 from end of call[i]'s stack frame */
  1866. l32e a11, a11, -20 /* restore a11 from end of call[i]'s stack frame */
  1867. rfwu
  1868. #endif /* XCHAL_HAVE_WINDOWED */
  1869. .section .UserEnter.text, "ax"
  1870. .global call_user_start
  1871. .type call_user_start,@function
  1872. .align 4
  1873. .literal_position