xtensa_vectors.S 75 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936
  1. /*******************************************************************************
  2. Copyright (c) 2006-2015 Cadence Design Systems Inc.
  3. Permission is hereby granted, free of charge, to any person obtaining
  4. a copy of this software and associated documentation files (the
  5. "Software"), to deal in the Software without restriction, including
  6. without limitation the rights to use, copy, modify, merge, publish,
  7. distribute, sublicense, and/or sell copies of the Software, and to
  8. permit persons to whom the Software is furnished to do so, subject to
  9. the following conditions:
  10. The above copyright notice and this permission notice shall be included
  11. in all copies or substantial portions of the Software.
  12. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  13. EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  14. MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  15. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  16. CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  17. TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  18. SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  19. --------------------------------------------------------------------------------
  20. XTENSA VECTORS AND LOW LEVEL HANDLERS FOR AN RTOS
  21. Xtensa low level exception and interrupt vectors and handlers for an RTOS.
  22. Interrupt handlers and user exception handlers support interaction with
  23. the RTOS by calling XT_RTOS_INT_ENTER and XT_RTOS_INT_EXIT before and
  24. after user's specific interrupt handlers. These macros are defined in
  25. xtensa_<rtos>.h to call suitable functions in a specific RTOS.
  26. Users can install application-specific interrupt handlers for low and
  27. medium level interrupts, by calling xt_set_interrupt_handler(). These
  28. handlers can be written in C, and must obey C calling convention. The
  29. handler table is indexed by the interrupt number. Each handler may be
  30. provided with an argument.
  31. Note that the system timer interrupt is handled specially, and is
  32. dispatched to the RTOS-specific handler. This timer cannot be hooked
  33. by application code.
  34. Optional hooks are also provided to install a handler per level at
  35. run-time, made available by compiling this source file with
  36. '-DXT_INTEXC_HOOKS' (useful for automated testing).
  37. !! This file is a template that usually needs to be modified to handle !!
  38. !! application specific interrupts. Search USER_EDIT for helpful comments !!
  39. !! on where to insert handlers and how to write them. !!
  40. Users can also install application-specific exception handlers in the
  41. same way, by calling xt_set_exception_handler(). One handler slot is
  42. provided for each exception type. Note that some exceptions are handled
  43. by the porting layer itself, and cannot be taken over by application
  44. code in this manner. These are the alloca, syscall, and coprocessor
  45. exceptions.
  46. The exception handlers can be written in C, and must follow C calling
  47. convention. Each handler is passed a pointer to an exception frame as
  48. its single argument. The exception frame is created on the stack, and
  49. holds the saved context of the thread that took the exception. If the
  50. handler returns, the context will be restored and the instruction that
  51. caused the exception will be retried. If the handler makes any changes
  52. to the saved state in the exception frame, the changes will be applied
  53. when restoring the context.
  54. Because Xtensa is a configurable architecture, this port supports all user
  55. generated configurations (except restrictions stated in the release notes).
  56. This is accomplished by conditional compilation using macros and functions
  57. defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
  58. Only the relevant parts of this file will be included in your RTOS build.
  59. For example, this file provides interrupt vector templates for all types and
  60. all priority levels, but only the ones in your configuration are built.
  61. NOTES on the use of 'call0' for long jumps instead of 'j':
  62. 1. This file should be assembled with the -mlongcalls option to xt-xcc.
  63. 2. The -mlongcalls compiler option causes 'call0 dest' to be expanded to
  64. a sequence 'l32r a0, dest' 'callx0 a0' which works regardless of the
  65. distance from the call to the destination. The linker then relaxes
  66. it back to 'call0 dest' if it determines that dest is within range.
  67. This allows more flexibility in locating code without the performance
  68. overhead of the 'l32r' literal data load in cases where the destination
  69. is in range of 'call0'. There is an additional benefit in that 'call0'
  70. has a longer range than 'j' due to the target being word-aligned, so
  71. the 'l32r' sequence is less likely needed.
  72. 3. The use of 'call0' with -mlongcalls requires that register a0 not be
  73. live at the time of the call, which is always the case for a function
  74. call but needs to be ensured if 'call0' is used as a jump in lieu of 'j'.
  75. 4. This use of 'call0' is independent of the C function call ABI.
  76. *******************************************************************************/
  77. #include "xtensa_rtos.h"
  78. #include "esp_private/panic_reason.h"
  79. #include "sdkconfig.h"
  80. #include "soc/soc.h"
  81. /*
  82. Define for workaround: pin no-cpu-affinity tasks to a cpu when fpu is used.
  83. Please change this when the tcb structure is changed
  84. */
  85. #define TASKTCB_XCOREID_OFFSET (0x38+configMAX_TASK_NAME_LEN+3)&~3
  86. .extern pxCurrentTCB
  87. /*
  88. --------------------------------------------------------------------------------
  89. In order for backtracing to be able to trace from the pre-exception stack
  90. across to the exception stack (including nested interrupts), we need to create
  91. a pseudo base-save area to make it appear like the exception dispatcher was
  92. triggered by a CALL4 from the pre-exception code. In reality, the exception
  93. dispatcher uses the same window as pre-exception code, and only CALL0s are
  94. used within the exception dispatcher.
  95. To create the pseudo base-save area, we need to store a copy of the pre-exception's
  96. base save area (a0 to a4) below the exception dispatcher's SP. EXCSAVE_x will
  97. be used to store a copy of the SP that points to the interrupted code's exception
  98. frame just in case the exception dispatcher's SP does not point to the exception
  99. frame (which is the case when switching from task to interrupt stack).
  100. Clearing the pseudo base-save area is uncessary as the interrupt dispatcher
  101. will restore the current SP to that of the pre-exception SP.
  102. --------------------------------------------------------------------------------
  103. */
  104. #ifdef CONFIG_FREERTOS_INTERRUPT_BACKTRACE
  105. #define XT_DEBUG_BACKTRACE 1
  106. #endif
  107. /*
  108. --------------------------------------------------------------------------------
  109. Defines used to access _xtos_interrupt_table.
  110. --------------------------------------------------------------------------------
  111. */
  112. #define XIE_HANDLER 0
  113. #define XIE_ARG 4
  114. #define XIE_SIZE 8
  115. /*
  116. Macro get_percpu_entry_for - convert a per-core ID into a multicore entry.
  117. Basically does reg=reg*portNUM_PROCESSORS+current_core_id
  118. Multiple versions here to optimize for specific portNUM_PROCESSORS values.
  119. */
  120. .macro get_percpu_entry_for reg scratch
  121. #if (portNUM_PROCESSORS == 1)
  122. /* No need to do anything */
  123. #elif (portNUM_PROCESSORS == 2)
  124. /* Optimized 2-core code. */
  125. getcoreid \scratch
  126. addx2 \reg,\reg,\scratch
  127. #else
  128. /* Generalized n-core code. Untested! */
  129. movi \scratch,portNUM_PROCESSORS
  130. mull \scratch,\reg,\scratch
  131. getcoreid \reg
  132. add \reg,\scratch,\reg
  133. #endif
  134. .endm
  135. /*
  136. --------------------------------------------------------------------------------
  137. Macro extract_msb - return the input with only the highest bit set.
  138. Input : "ain" - Input value, clobbered.
  139. Output : "aout" - Output value, has only one bit set, MSB of "ain".
  140. The two arguments must be different AR registers.
  141. --------------------------------------------------------------------------------
  142. */
  143. .macro extract_msb aout ain
  144. 1:
  145. addi \aout, \ain, -1 /* aout = ain - 1 */
  146. and \ain, \ain, \aout /* ain = ain & aout */
  147. bnez \ain, 1b /* repeat until ain == 0 */
  148. addi \aout, \aout, 1 /* return aout + 1 */
  149. .endm
  150. /*
  151. --------------------------------------------------------------------------------
  152. Macro dispatch_c_isr - dispatch interrupts to user ISRs.
  153. This will dispatch to user handlers (if any) that are registered in the
  154. XTOS dispatch table (_xtos_interrupt_table). These handlers would have
  155. been registered by calling _xtos_set_interrupt_handler(). There is one
  156. exception - the timer interrupt used by the OS will not be dispatched
  157. to a user handler - this must be handled by the caller of this macro.
  158. Level triggered and software interrupts are automatically deasserted by
  159. this code.
  160. ASSUMPTIONS:
  161. -- PS.INTLEVEL is set to "level" at entry
  162. -- PS.EXCM = 0, C calling enabled
  163. NOTE: For CALL0 ABI, a12-a15 have not yet been saved.
  164. NOTE: This macro will use registers a0 and a2-a7. The arguments are:
  165. level -- interrupt level
  166. mask -- interrupt bitmask for this level
  167. --------------------------------------------------------------------------------
  168. */
  169. .macro dispatch_c_isr level mask
  170. #ifdef CONFIG_PM_TRACE
  171. movi a6, 0 /* = ESP_PM_TRACE_IDLE */
  172. getcoreid a7
  173. call4 esp_pm_trace_exit
  174. #endif // CONFIG_PM_TRACE
  175. /* Get mask of pending, enabled interrupts at this level into a2. */
  176. .L_xt_user_int_&level&:
  177. rsr a2, INTENABLE
  178. rsr a3, INTERRUPT
  179. movi a4, \mask
  180. and a2, a2, a3
  181. and a2, a2, a4
  182. beqz a2, 9f /* nothing to do */
  183. /* This bit of code provides a nice debug backtrace in the debugger.
  184. It does take a few more instructions, so undef XT_DEBUG_BACKTRACE
  185. if you want to save the cycles.
  186. At this point, the exception frame should have been allocated and filled,
  187. and current sp points to the interrupt stack (for non-nested interrupt)
  188. or below the allocated exception frame (for nested interrupts). Copy the
  189. pre-exception's base save area below the current SP.
  190. */
  191. #ifdef XT_DEBUG_BACKTRACE
  192. #ifndef __XTENSA_CALL0_ABI__
  193. rsr a0, EXCSAVE_1 + \level - 1 /* Get exception frame pointer stored in EXCSAVE_x */
  194. l32i a3, a0, XT_STK_A0 /* Copy pre-exception a0 (return address) */
  195. s32e a3, a1, -16
  196. l32i a3, a0, XT_STK_A1 /* Copy pre-exception a1 (stack pointer) */
  197. s32e a3, a1, -12
  198. /* Backtracing only needs a0 and a1, no need to create full base save area.
  199. Also need to change current frame's return address to point to pre-exception's
  200. last run instruction.
  201. */
  202. rsr a0, EPC_1 + \level - 1 /* return address */
  203. movi a4, 0xC0000000 /* constant with top 2 bits set (call size) */
  204. or a0, a0, a4 /* set top 2 bits */
  205. addx2 a0, a4, a0 /* clear top bit -- simulating call4 size */
  206. #endif
  207. #endif
  208. #ifdef CONFIG_PM_ENABLE
  209. call4 esp_pm_impl_isr_hook
  210. #endif
  211. #ifdef XT_INTEXC_HOOKS
  212. /* Call interrupt hook if present to (pre)handle interrupts. */
  213. movi a4, _xt_intexc_hooks
  214. l32i a4, a4, \level << 2
  215. beqz a4, 2f
  216. #ifdef __XTENSA_CALL0_ABI__
  217. callx0 a4
  218. beqz a2, 9f
  219. #else
  220. mov a6, a2
  221. callx4 a4
  222. beqz a6, 9f
  223. mov a2, a6
  224. #endif
  225. 2:
  226. #endif
  227. /* Now look up in the dispatch table and call user ISR if any. */
  228. /* If multiple bits are set then MSB has highest priority. */
  229. extract_msb a4, a2 /* a4 = MSB of a2, a2 trashed */
  230. #ifdef XT_USE_SWPRI
  231. /* Enable all interrupts at this level that are numerically higher
  232. than the one we just selected, since they are treated as higher
  233. priority.
  234. */
  235. movi a3, \mask /* a3 = all interrupts at this level */
  236. add a2, a4, a4 /* a2 = a4 << 1 */
  237. addi a2, a2, -1 /* a2 = mask of 1's <= a4 bit */
  238. and a2, a2, a3 /* a2 = mask of all bits <= a4 at this level */
  239. movi a3, _xt_intdata
  240. l32i a6, a3, 4 /* a6 = _xt_vpri_mask */
  241. neg a2, a2
  242. addi a2, a2, -1 /* a2 = mask to apply */
  243. and a5, a6, a2 /* mask off all bits <= a4 bit */
  244. s32i a5, a3, 4 /* update _xt_vpri_mask */
  245. rsr a3, INTENABLE
  246. and a3, a3, a2 /* mask off all bits <= a4 bit */
  247. wsr a3, INTENABLE
  248. rsil a3, \level - 1 /* lower interrupt level by 1 */
  249. #endif
  250. movi a3, XT_TIMER_INTEN /* a3 = timer interrupt bit */
  251. wsr a4, INTCLEAR /* clear sw or edge-triggered interrupt */
  252. beq a3, a4, 7f /* if timer interrupt then skip table */
  253. find_ms_setbit a3, a4, a3, 0 /* a3 = interrupt number */
  254. get_percpu_entry_for a3, a12
  255. movi a4, _xt_interrupt_table
  256. addx8 a3, a3, a4 /* a3 = address of interrupt table entry */
  257. l32i a4, a3, XIE_HANDLER /* a4 = handler address */
  258. #ifdef __XTENSA_CALL0_ABI__
  259. mov a12, a6 /* save in callee-saved reg */
  260. l32i a2, a3, XIE_ARG /* a2 = handler arg */
  261. callx0 a4 /* call handler */
  262. mov a2, a12
  263. #else
  264. mov a2, a6 /* save in windowed reg */
  265. l32i a6, a3, XIE_ARG /* a6 = handler arg */
  266. callx4 a4 /* call handler */
  267. #endif
  268. #ifdef XT_USE_SWPRI
  269. j 8f
  270. #else
  271. j .L_xt_user_int_&level& /* check for more interrupts */
  272. #endif
  273. 7:
  274. .ifeq XT_TIMER_INTPRI - \level
  275. .L_xt_user_int_timer_&level&:
  276. /*
  277. Interrupt handler for the RTOS tick timer if at this level.
  278. We'll be reading the interrupt state again after this call
  279. so no need to preserve any registers except a6 (vpri_mask).
  280. */
  281. #ifdef __XTENSA_CALL0_ABI__
  282. mov a12, a6
  283. call0 XT_RTOS_TIMER_INT
  284. mov a2, a12
  285. #else
  286. mov a2, a6
  287. call4 XT_RTOS_TIMER_INT
  288. #endif
  289. .endif
  290. #ifdef XT_USE_SWPRI
  291. j 8f
  292. #else
  293. j .L_xt_user_int_&level& /* check for more interrupts */
  294. #endif
  295. #ifdef XT_USE_SWPRI
  296. 8:
  297. /* Restore old value of _xt_vpri_mask from a2. Also update INTENABLE from
  298. virtual _xt_intenable which _could_ have changed during interrupt
  299. processing. */
  300. movi a3, _xt_intdata
  301. l32i a4, a3, 0 /* a4 = _xt_intenable */
  302. s32i a2, a3, 4 /* update _xt_vpri_mask */
  303. and a4, a4, a2 /* a4 = masked intenable */
  304. wsr a4, INTENABLE /* update INTENABLE */
  305. #endif
  306. 9:
  307. /* done */
  308. .endm
  309. .section .rodata, "a"
  310. .align 4
  311. /*
  312. --------------------------------------------------------------------------------
  313. Hooks to dynamically install handlers for exceptions and interrupts.
  314. Allows automated regression frameworks to install handlers per test.
  315. Consists of an array of function pointers indexed by interrupt level,
  316. with index 0 containing the entry for user exceptions.
  317. Initialized with all 0s, meaning no handler is installed at each level.
  318. See comment in xtensa_rtos.h for more details.
  319. *WARNING* This array is for all CPUs, that is, installing a hook for
  320. one CPU will install it for all others as well!
  321. --------------------------------------------------------------------------------
  322. */
  323. #ifdef XT_INTEXC_HOOKS
  324. .data
  325. .global _xt_intexc_hooks
  326. .type _xt_intexc_hooks,@object
  327. .align 4
  328. _xt_intexc_hooks:
  329. .fill XT_INTEXC_HOOK_NUM, 4, 0
  330. #endif
  331. /*
  332. --------------------------------------------------------------------------------
  333. EXCEPTION AND LEVEL 1 INTERRUPT VECTORS AND LOW LEVEL HANDLERS
  334. (except window exception vectors).
  335. Each vector goes at a predetermined location according to the Xtensa
  336. hardware configuration, which is ensured by its placement in a special
  337. section known to the Xtensa linker support package (LSP). It performs
  338. the minimum necessary before jumping to the handler in the .text section.
  339. The corresponding handler goes in the normal .text section. It sets up
  340. the appropriate stack frame, saves a few vector-specific registers and
  341. calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
  342. and enter the RTOS, then sets up a C environment. It then calls the
  343. user's interrupt handler code (which may be coded in C) and finally
  344. calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
  345. While XT_RTOS_INT_EXIT does not return directly to the interruptee,
  346. eventually the RTOS scheduler will want to dispatch the interrupted
  347. task or handler. The scheduler will return to the exit point that was
  348. saved in the interrupt stack frame at XT_STK_EXIT.
  349. --------------------------------------------------------------------------------
  350. */
  351. /*
  352. --------------------------------------------------------------------------------
  353. Debug Exception.
  354. --------------------------------------------------------------------------------
  355. */
  356. #if XCHAL_HAVE_DEBUG
  357. .begin literal_prefix .DebugExceptionVector
  358. .section .DebugExceptionVector.text, "ax"
  359. .global _DebugExceptionVector
  360. .align 4
  361. .global xt_debugexception
  362. _DebugExceptionVector:
  363. wsr a0, EXCSAVE+XCHAL_DEBUGLEVEL /* preserve a0 */
  364. call0 xt_debugexception /* load exception handler */
  365. .end literal_prefix
  366. #endif
  367. /*
  368. --------------------------------------------------------------------------------
  369. Double Exception.
  370. Double exceptions are not a normal occurrence. They indicate a bug of some kind.
  371. --------------------------------------------------------------------------------
  372. */
  373. #ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR
  374. .begin literal_prefix .DoubleExceptionVector
  375. .section .DoubleExceptionVector.text, "ax"
  376. .global _DoubleExceptionVector
  377. .align 4
  378. _DoubleExceptionVector:
  379. #if XCHAL_HAVE_DEBUG
  380. break 1, 4 /* unhandled double exception */
  381. #endif
  382. movi a0,PANIC_RSN_DOUBLEEXCEPTION
  383. wsr a0,EXCCAUSE
  384. call0 _xt_panic /* does not return */
  385. rfde /* make a0 point here not later */
  386. .end literal_prefix
  387. #endif /* XCHAL_DOUBLEEXC_VECTOR_VADDR */
  388. /*
  389. --------------------------------------------------------------------------------
  390. Kernel Exception (including Level 1 Interrupt from kernel mode).
  391. --------------------------------------------------------------------------------
  392. */
  393. .begin literal_prefix .KernelExceptionVector
  394. .section .KernelExceptionVector.text, "ax"
  395. .global _KernelExceptionVector
  396. .align 4
  397. _KernelExceptionVector:
  398. wsr a0, EXCSAVE_1 /* preserve a0 */
  399. call0 _xt_kernel_exc /* kernel exception handler */
  400. /* never returns here - call0 is used as a jump (see note at top) */
  401. .end literal_prefix
  402. .section .iram1,"ax"
  403. .align 4
  404. _xt_kernel_exc:
  405. #if XCHAL_HAVE_DEBUG
  406. break 1, 0 /* unhandled kernel exception */
  407. #endif
  408. movi a0,PANIC_RSN_KERNELEXCEPTION
  409. wsr a0,EXCCAUSE
  410. call0 _xt_panic /* does not return */
  411. rfe /* make a0 point here not there */
  412. /*
  413. --------------------------------------------------------------------------------
  414. User Exception (including Level 1 Interrupt from user mode).
  415. --------------------------------------------------------------------------------
  416. */
  417. .begin literal_prefix .UserExceptionVector
  418. .section .UserExceptionVector.text, "ax"
  419. .global _UserExceptionVector
  420. .type _UserExceptionVector,@function
  421. .align 4
  422. _UserExceptionVector:
  423. wsr a0, EXCSAVE_1 /* preserve a0 */
  424. call0 _xt_user_exc /* user exception handler */
  425. /* never returns here - call0 is used as a jump (see note at top) */
  426. .end literal_prefix
  427. /*
  428. --------------------------------------------------------------------------------
  429. Insert some waypoints for jumping beyond the signed 8-bit range of
  430. conditional branch instructions, so the conditional branchces to specific
  431. exception handlers are not taken in the mainline. Saves some cycles in the
  432. mainline.
  433. --------------------------------------------------------------------------------
  434. */
  435. #ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
  436. .global LoadStoreErrorHandler
  437. .global AlignmentErrorHandler
  438. #endif
  439. .section .iram1,"ax"
  440. #if XCHAL_HAVE_WINDOWED
  441. .align 4
  442. _xt_to_alloca_exc:
  443. call0 _xt_alloca_exc /* in window vectors section */
  444. /* never returns here - call0 is used as a jump (see note at top) */
  445. #endif
  446. .align 4
  447. _xt_to_syscall_exc:
  448. call0 _xt_syscall_exc
  449. /* never returns here - call0 is used as a jump (see note at top) */
  450. #if XCHAL_CP_NUM > 0
  451. .align 4
  452. _xt_to_coproc_exc:
  453. call0 _xt_coproc_exc
  454. /* never returns here - call0 is used as a jump (see note at top) */
  455. #endif
  456. #ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
  457. .align 4
  458. _call_loadstore_handler:
  459. call0 LoadStoreErrorHandler
  460. /* This will return only if wrong opcode or address out of range*/
  461. j .LS_exit
  462. .align 4
  463. _call_alignment_handler:
  464. call0 AlignmentErrorHandler
  465. /* This will return only if wrong opcode or address out of range*/
  466. addi a0, a0, 1
  467. j .LS_exit
  468. #endif
  469. /*
  470. --------------------------------------------------------------------------------
  471. User exception handler.
  472. --------------------------------------------------------------------------------
  473. */
  474. .type _xt_user_exc,@function
  475. .align 4
  476. _xt_user_exc:
  477. /* If level 1 interrupt then jump to the dispatcher */
  478. rsr a0, EXCCAUSE
  479. beqi a0, EXCCAUSE_LEVEL1INTERRUPT, _xt_lowint1
  480. /* Handle any coprocessor exceptions. Rely on the fact that exception
  481. numbers above EXCCAUSE_CP0_DISABLED all relate to the coprocessors.
  482. */
  483. #if XCHAL_CP_NUM > 0
  484. bgeui a0, EXCCAUSE_CP0_DISABLED, _xt_to_coproc_exc
  485. #endif
  486. /* Handle alloca and syscall exceptions */
  487. #if XCHAL_HAVE_WINDOWED
  488. beqi a0, EXCCAUSE_ALLOCA, _xt_to_alloca_exc
  489. #endif
  490. beqi a0, EXCCAUSE_SYSCALL, _xt_to_syscall_exc
  491. #ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
  492. beqi a0, EXCCAUSE_LOAD_STORE_ERROR, _call_loadstore_handler
  493. addi a0, a0, -1
  494. beqi a0, 8, _call_alignment_handler
  495. addi a0, a0, 1
  496. .LS_exit:
  497. #endif
  498. /* Handle all other exceptions. All can have user-defined handlers. */
  499. /* NOTE: we'll stay on the user stack for exception handling. */
  500. /* Allocate exception frame and save minimal context. */
  501. mov a0, sp
  502. addi sp, sp, -XT_STK_FRMSZ
  503. s32i a0, sp, XT_STK_A1
  504. #if XCHAL_HAVE_WINDOWED
  505. s32e a0, sp, -12 /* for debug backtrace */
  506. #endif
  507. rsr a0, PS /* save interruptee's PS */
  508. s32i a0, sp, XT_STK_PS
  509. rsr a0, EPC_1 /* save interruptee's PC */
  510. s32i a0, sp, XT_STK_PC
  511. #if XCHAL_HAVE_WINDOWED
  512. s32e a0, sp, -16 /* for debug backtrace */
  513. #endif
  514. s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */
  515. s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */
  516. call0 _xt_context_save
  517. /* Save exc cause and vaddr into exception frame */
  518. rsr a0, EXCCAUSE
  519. s32i a0, sp, XT_STK_EXCCAUSE
  520. rsr a0, EXCVADDR
  521. s32i a0, sp, XT_STK_EXCVADDR
  522. /* _xt_context_save seems to save the current a0, but we need the interuptees a0. Fix this. */
  523. rsr a0, EXCSAVE_1 /* save interruptee's a0 */
  524. s32i a0, sp, XT_STK_A0
  525. /* Set up PS for C, reenable hi-pri interrupts, and clear EXCM. */
  526. #ifdef __XTENSA_CALL0_ABI__
  527. movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM
  528. #else
  529. movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE
  530. #endif
  531. wsr a0, PS
  532. /*
  533. Create pseudo base save area. At this point, sp is still pointing to the
  534. allocated and filled exception stack frame.
  535. */
  536. #ifdef XT_DEBUG_BACKTRACE
  537. #ifndef __XTENSA_CALL0_ABI__
  538. l32i a3, sp, XT_STK_A0 /* Copy pre-exception a0 (return address) */
  539. s32e a3, sp, -16
  540. l32i a3, sp, XT_STK_A1 /* Copy pre-exception a1 (stack pointer) */
  541. s32e a3, sp, -12
  542. rsr a0, EPC_1 /* return address for debug backtrace */
  543. movi a5, 0xC0000000 /* constant with top 2 bits set (call size) */
  544. rsync /* wait for WSR.PS to complete */
  545. or a0, a0, a5 /* set top 2 bits */
  546. addx2 a0, a5, a0 /* clear top bit -- thus simulating call4 size */
  547. #else
  548. rsync /* wait for WSR.PS to complete */
  549. #endif
  550. #endif
  551. rsr a2, EXCCAUSE /* recover exc cause */
  552. #ifdef XT_INTEXC_HOOKS
  553. /*
  554. Call exception hook to pre-handle exceptions (if installed).
  555. Pass EXCCAUSE in a2, and check result in a2 (if -1, skip default handling).
  556. */
  557. movi a4, _xt_intexc_hooks
  558. l32i a4, a4, 0 /* user exception hook index 0 */
  559. beqz a4, 1f
  560. .Ln_xt_user_exc_call_hook:
  561. #ifdef __XTENSA_CALL0_ABI__
  562. callx0 a4
  563. beqi a2, -1, .L_xt_user_done
  564. #else
  565. mov a6, a2
  566. callx4 a4
  567. beqi a6, -1, .L_xt_user_done
  568. mov a2, a6
  569. #endif
  570. 1:
  571. #endif
  572. rsr a2, EXCCAUSE /* recover exc cause */
  573. movi a3, _xt_exception_table
  574. get_percpu_entry_for a2, a4
  575. addx4 a4, a2, a3 /* a4 = address of exception table entry */
  576. l32i a4, a4, 0 /* a4 = handler address */
  577. #ifdef __XTENSA_CALL0_ABI__
  578. mov a2, sp /* a2 = pointer to exc frame */
  579. callx0 a4 /* call handler */
  580. #else
  581. mov a6, sp /* a6 = pointer to exc frame */
  582. callx4 a4 /* call handler */
  583. #endif
  584. .L_xt_user_done:
  585. /* Restore context and return */
  586. call0 _xt_context_restore
  587. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  588. wsr a0, PS
  589. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  590. wsr a0, EPC_1
  591. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  592. l32i sp, sp, XT_STK_A1 /* remove exception frame */
  593. rsync /* ensure PS and EPC written */
  594. rfe /* PS.EXCM is cleared */
  595. /*
  596. --------------------------------------------------------------------------------
  597. Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
  598. on entry and used to return to a thread or interrupted interrupt handler.
  599. --------------------------------------------------------------------------------
  600. */
  601. .global _xt_user_exit
  602. .type _xt_user_exit,@function
  603. .align 4
  604. _xt_user_exit:
  605. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  606. wsr a0, PS
  607. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  608. wsr a0, EPC_1
  609. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  610. l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
  611. rsync /* ensure PS and EPC written */
  612. rfe /* PS.EXCM is cleared */
  613. /*
  614. --------------------------------------------------------------------------------
  615. Syscall Exception Handler (jumped to from User Exception Handler).
  616. Syscall 0 is required to spill the register windows (no-op in Call 0 ABI).
  617. Only syscall 0 is handled here. Other syscalls return -1 to caller in a2.
  618. --------------------------------------------------------------------------------
  619. */
  620. .section .iram1,"ax"
  621. .type _xt_syscall_exc,@function
  622. .align 4
  623. _xt_syscall_exc:
  624. #ifdef __XTENSA_CALL0_ABI__
  625. /*
  626. Save minimal regs for scratch. Syscall 0 does nothing in Call0 ABI.
  627. Use a minimal stack frame (16B) to save A2 & A3 for scratch.
  628. PS.EXCM could be cleared here, but unlikely to improve worst-case latency.
  629. rsr a0, PS
  630. addi a0, a0, -PS_EXCM_MASK
  631. wsr a0, PS
  632. */
  633. addi sp, sp, -16
  634. s32i a2, sp, 8
  635. s32i a3, sp, 12
  636. #else /* Windowed ABI */
  637. /*
  638. Save necessary context and spill the register windows.
  639. PS.EXCM is still set and must remain set until after the spill.
  640. Reuse context save function though it saves more than necessary.
  641. For this reason, a full interrupt stack frame is allocated.
  642. */
  643. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  644. s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */
  645. s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */
  646. call0 _xt_context_save
  647. #endif
  648. /*
  649. Grab the interruptee's PC and skip over the 'syscall' instruction.
  650. If it's at the end of a zero-overhead loop and it's not on the last
  651. iteration, decrement loop counter and skip to beginning of loop.
  652. */
  653. rsr a2, EPC_1 /* a2 = PC of 'syscall' */
  654. addi a3, a2, 3 /* ++PC */
  655. #if XCHAL_HAVE_LOOPS
  656. rsr a0, LEND /* if (PC == LEND */
  657. bne a3, a0, 1f
  658. rsr a0, LCOUNT /* && LCOUNT != 0) */
  659. beqz a0, 1f /* { */
  660. addi a0, a0, -1 /* --LCOUNT */
  661. rsr a3, LBEG /* PC = LBEG */
  662. wsr a0, LCOUNT /* } */
  663. #endif
  664. 1: wsr a3, EPC_1 /* update PC */
  665. /* Restore interruptee's context and return from exception. */
  666. #ifdef __XTENSA_CALL0_ABI__
  667. l32i a2, sp, 8
  668. l32i a3, sp, 12
  669. addi sp, sp, 16
  670. #else
  671. call0 _xt_context_restore
  672. addi sp, sp, XT_STK_FRMSZ
  673. #endif
  674. movi a0, -1
  675. movnez a2, a0, a2 /* return -1 if not syscall 0 */
  676. rsr a0, EXCSAVE_1
  677. rfe
  678. /*
  679. --------------------------------------------------------------------------------
  680. Co-Processor Exception Handler (jumped to from User Exception Handler).
  681. These exceptions are generated by co-processor instructions, which are only
  682. allowed in thread code (not in interrupts or kernel code). This restriction is
  683. deliberately imposed to reduce the burden of state-save/restore in interrupts.
  684. --------------------------------------------------------------------------------
  685. */
  686. #if XCHAL_CP_NUM > 0
  687. .section .rodata, "a"
  688. /* Offset to CP n save area in thread's CP save area. */
  689. .global _xt_coproc_sa_offset
  690. .type _xt_coproc_sa_offset,@object
  691. .align 16 /* minimize crossing cache boundaries */
  692. _xt_coproc_sa_offset:
  693. .word XT_CP0_SA, XT_CP1_SA, XT_CP2_SA, XT_CP3_SA
  694. .word XT_CP4_SA, XT_CP5_SA, XT_CP6_SA, XT_CP7_SA
  695. /* Bitmask for CP n's CPENABLE bit. */
  696. .type _xt_coproc_mask,@object
  697. .align 16,,8 /* try to keep it all in one cache line */
  698. .set i, 0
  699. _xt_coproc_mask:
  700. .rept XCHAL_CP_MAX
  701. .long (i<<16) | (1<<i) // upper 16-bits = i, lower = bitmask
  702. .set i, i+1
  703. .endr
  704. .data
  705. /* Owner thread of CP n, identified by thread's CP save area (0 = unowned). */
  706. .global _xt_coproc_owner_sa
  707. .type _xt_coproc_owner_sa,@object
  708. .align 16,,XCHAL_CP_MAX<<2 /* minimize crossing cache boundaries */
  709. _xt_coproc_owner_sa:
  710. .space (XCHAL_CP_MAX * portNUM_PROCESSORS) << 2
  711. .section .iram1,"ax"
  712. .align 4
  713. .L_goto_invalid:
  714. j .L_xt_coproc_invalid /* not in a thread (invalid) */
  715. .align 4
  716. .L_goto_done:
  717. j .L_xt_coproc_done
  718. /*
  719. --------------------------------------------------------------------------------
  720. Coprocessor exception handler.
  721. At entry, only a0 has been saved (in EXCSAVE_1).
  722. --------------------------------------------------------------------------------
  723. */
  724. .type _xt_coproc_exc,@function
  725. .align 4
  726. _xt_coproc_exc:
  727. /* Allocate interrupt stack frame and save minimal context. */
  728. mov a0, sp /* sp == a1 */
  729. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  730. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  731. #if XCHAL_HAVE_WINDOWED
  732. s32e a0, sp, -12 /* for debug backtrace */
  733. #endif
  734. rsr a0, PS /* save interruptee's PS */
  735. s32i a0, sp, XT_STK_PS
  736. rsr a0, EPC_1 /* save interruptee's PC */
  737. s32i a0, sp, XT_STK_PC
  738. rsr a0, EXCSAVE_1 /* save interruptee's a0 */
  739. s32i a0, sp, XT_STK_A0
  740. #if XCHAL_HAVE_WINDOWED
  741. s32e a0, sp, -16 /* for debug backtrace */
  742. #endif
  743. movi a0, _xt_user_exit /* save exit point for dispatch */
  744. s32i a0, sp, XT_STK_EXIT
  745. rsr a0, EXCCAUSE
  746. s32i a5, sp, XT_STK_A5 /* save a5 */
  747. addi a5, a0, -EXCCAUSE_CP0_DISABLED /* a5 = CP index */
  748. /* Save a few more of interruptee's registers (a5 was already saved). */
  749. s32i a2, sp, XT_STK_A2
  750. s32i a3, sp, XT_STK_A3
  751. s32i a4, sp, XT_STK_A4
  752. s32i a15, sp, XT_STK_A15
  753. /* Get co-processor state save area of new owner thread. */
  754. call0 XT_RTOS_CP_STATE /* a15 = new owner's save area */
  755. #ifndef CONFIG_FREERTOS_FPU_IN_ISR
  756. beqz a15, .L_goto_invalid
  757. #endif
  758. /*When FPU in ISR is enabled we could deal with zeroed a15 */
  759. /* Enable the co-processor's bit in CPENABLE. */
  760. movi a0, _xt_coproc_mask
  761. rsr a4, CPENABLE /* a4 = CPENABLE */
  762. addx4 a0, a5, a0 /* a0 = &_xt_coproc_mask[n] */
  763. l32i a0, a0, 0 /* a0 = (n << 16) | (1 << n) */
  764. /* FPU operations are incompatible with non-pinned tasks. If we have a FPU operation
  765. here, to keep the entire thing from crashing, it's better to pin the task to whatever
  766. core we're running on now. */
  767. movi a2, pxCurrentTCB
  768. getcoreid a3
  769. addx4 a2, a3, a2
  770. l32i a2, a2, 0 /* a2 = start of pxCurrentTCB[cpuid] */
  771. addi a2, a2, TASKTCB_XCOREID_OFFSET /* offset to xCoreID in tcb struct */
  772. s32i a3, a2, 0 /* store current cpuid */
  773. /* Grab correct xt_coproc_owner_sa for this core */
  774. movi a2, XCHAL_CP_MAX << 2
  775. mull a2, a2, a3 /* multiply by current processor id */
  776. movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
  777. add a3, a3, a2 /* a3 = owner area needed for this processor */
  778. extui a2, a0, 0, 16 /* coprocessor bitmask portion */
  779. or a4, a4, a2 /* a4 = CPENABLE | (1 << n) */
  780. wsr a4, CPENABLE
  781. /*
  782. Keep loading _xt_coproc_owner_sa[n] atomic (=load once, then use that value
  783. everywhere): _xt_coproc_release assumes it works like this in order not to need
  784. locking.
  785. */
  786. /* Get old coprocessor owner thread (save area ptr) and assign new one. */
  787. addx4 a3, a5, a3 /* a3 = &_xt_coproc_owner_sa[n] */
  788. l32i a2, a3, 0 /* a2 = old owner's save area */
  789. s32i a15, a3, 0 /* _xt_coproc_owner_sa[n] = new */
  790. rsync /* ensure wsr.CPENABLE is complete */
  791. /* Only need to context switch if new owner != old owner. */
  792. /* If float is necessary on ISR, we need to remove this check */
  793. /* below, because on restoring from ISR we may have new == old condition used
  794. * to force cp restore to next thread
  795. */
  796. #ifndef CONFIG_FREERTOS_FPU_IN_ISR
  797. beq a15, a2, .L_goto_done /* new owner == old, we're done */
  798. #endif
  799. /* If no old owner then nothing to save. */
  800. beqz a2, .L_check_new
  801. /* If old owner not actively using CP then nothing to save. */
  802. l16ui a4, a2, XT_CPENABLE /* a4 = old owner's CPENABLE */
  803. bnone a4, a0, .L_check_new /* old owner not using CP */
  804. .L_save_old:
  805. /* Save old owner's coprocessor state. */
  806. movi a5, _xt_coproc_sa_offset
  807. /* Mark old owner state as no longer active (CPENABLE bit n clear). */
  808. xor a4, a4, a0 /* clear CP bit in CPENABLE */
  809. s16i a4, a2, XT_CPENABLE /* update old owner's CPENABLE */
  810. extui a4, a0, 16, 5 /* a4 = CP index = n */
  811. addx4 a5, a4, a5 /* a5 = &_xt_coproc_sa_offset[n] */
  812. /* Mark old owner state as saved (CPSTORED bit n set). */
  813. l16ui a4, a2, XT_CPSTORED /* a4 = old owner's CPSTORED */
  814. l32i a5, a5, 0 /* a5 = XT_CP[n]_SA offset */
  815. or a4, a4, a0 /* set CP in old owner's CPSTORED */
  816. s16i a4, a2, XT_CPSTORED /* update old owner's CPSTORED */
  817. l32i a2, a2, XT_CP_ASA /* ptr to actual (aligned) save area */
  818. extui a3, a0, 16, 5 /* a3 = CP index = n */
  819. add a2, a2, a5 /* a2 = old owner's area for CP n */
  820. /*
  821. The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
  822. It is theoretically possible for Xtensa processor designers to write TIE
  823. that causes more address registers to be affected, but it is generally
  824. unlikely. If that ever happens, more registers needs to be saved/restored
  825. around this macro invocation, and the value in a15 needs to be recomputed.
  826. */
  827. xchal_cpi_store_funcbody
  828. .L_check_new:
  829. /* Check if any state has to be restored for new owner. */
  830. /* NOTE: a15 = new owner's save area, cannot be zero when we get here. */
  831. beqz a15, .L_xt_coproc_done
  832. l16ui a3, a15, XT_CPSTORED /* a3 = new owner's CPSTORED */
  833. movi a4, _xt_coproc_sa_offset
  834. bnone a3, a0, .L_check_cs /* full CP not saved, check callee-saved */
  835. xor a3, a3, a0 /* CPSTORED bit is set, clear it */
  836. s16i a3, a15, XT_CPSTORED /* update new owner's CPSTORED */
  837. /* Adjust new owner's save area pointers to area for CP n. */
  838. extui a3, a0, 16, 5 /* a3 = CP index = n */
  839. addx4 a4, a3, a4 /* a4 = &_xt_coproc_sa_offset[n] */
  840. l32i a4, a4, 0 /* a4 = XT_CP[n]_SA */
  841. l32i a5, a15, XT_CP_ASA /* ptr to actual (aligned) save area */
  842. add a2, a4, a5 /* a2 = new owner's area for CP */
  843. /*
  844. The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
  845. It is theoretically possible for Xtensa processor designers to write TIE
  846. that causes more address registers to be affected, but it is generally
  847. unlikely. If that ever happens, more registers needs to be saved/restored
  848. around this macro invocation.
  849. */
  850. xchal_cpi_load_funcbody
  851. /* Restore interruptee's saved registers. */
  852. /* Can omit rsync for wsr.CPENABLE here because _xt_user_exit does it. */
  853. .L_xt_coproc_done:
  854. l32i a15, sp, XT_STK_A15
  855. l32i a5, sp, XT_STK_A5
  856. l32i a4, sp, XT_STK_A4
  857. l32i a3, sp, XT_STK_A3
  858. l32i a2, sp, XT_STK_A2
  859. call0 _xt_user_exit /* return via exit dispatcher */
  860. /* Never returns here - call0 is used as a jump (see note at top) */
  861. .L_check_cs:
  862. /* a0 = CP mask in low bits, a15 = new owner's save area */
  863. l16ui a2, a15, XT_CP_CS_ST /* a2 = mask of CPs saved */
  864. bnone a2, a0, .L_xt_coproc_done /* if no match then done */
  865. and a2, a2, a0 /* a2 = which CPs to restore */
  866. extui a2, a2, 0, 8 /* extract low 8 bits */
  867. s32i a6, sp, XT_STK_A6 /* save extra needed regs */
  868. s32i a7, sp, XT_STK_A7
  869. s32i a13, sp, XT_STK_A13
  870. s32i a14, sp, XT_STK_A14
  871. call0 _xt_coproc_restorecs /* restore CP registers */
  872. l32i a6, sp, XT_STK_A6 /* restore saved registers */
  873. l32i a7, sp, XT_STK_A7
  874. l32i a13, sp, XT_STK_A13
  875. l32i a14, sp, XT_STK_A14
  876. j .L_xt_coproc_done
  877. /* Co-processor exception occurred outside a thread (not supported). */
  878. .L_xt_coproc_invalid:
  879. movi a0,PANIC_RSN_COPROCEXCEPTION
  880. wsr a0,EXCCAUSE
  881. call0 _xt_panic /* not in a thread (invalid) */
  882. /* never returns */
  883. #endif /* XCHAL_CP_NUM */
  884. /*
  885. -------------------------------------------------------------------------------
  886. Level 1 interrupt dispatch. Assumes stack frame has not been allocated yet.
  887. -------------------------------------------------------------------------------
  888. */
  889. .section .iram1,"ax"
  890. .type _xt_lowint1,@function
  891. .align 4
  892. _xt_lowint1:
  893. mov a0, sp /* sp == a1 */
  894. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  895. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  896. rsr a0, PS /* save interruptee's PS */
  897. s32i a0, sp, XT_STK_PS
  898. rsr a0, EPC_1 /* save interruptee's PC */
  899. s32i a0, sp, XT_STK_PC
  900. rsr a0, EXCSAVE_1 /* save interruptee's a0 */
  901. s32i a0, sp, XT_STK_A0
  902. movi a0, _xt_user_exit /* save exit point for dispatch */
  903. s32i a0, sp, XT_STK_EXIT
  904. /* EXCSAVE_1 should now be free to use. Use it to keep a copy of the
  905. current stack pointer that points to the exception frame (XT_STK_FRAME).*/
  906. #ifdef XT_DEBUG_BACKTRACE
  907. #ifndef __XTENSA_CALL0_ABI__
  908. mov a0, sp
  909. wsr a0, EXCSAVE_1
  910. #endif
  911. #endif
  912. /* Save rest of interrupt context and enter RTOS. */
  913. call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
  914. /* !! We are now on the RTOS system stack !! */
  915. /* Set up PS for C, enable interrupts above this level and clear EXCM. */
  916. #ifdef __XTENSA_CALL0_ABI__
  917. movi a0, PS_INTLEVEL(1) | PS_UM
  918. #else
  919. movi a0, PS_INTLEVEL(1) | PS_UM | PS_WOE
  920. #endif
  921. wsr a0, PS
  922. rsync
  923. /* OK to call C code at this point, dispatch user ISRs */
  924. dispatch_c_isr 1 XCHAL_INTLEVEL1_MASK
  925. /* Done handling interrupts, transfer control to OS */
  926. call0 XT_RTOS_INT_EXIT /* does not return directly here */
  927. /*
  928. -------------------------------------------------------------------------------
  929. MEDIUM PRIORITY (LEVEL 2+) INTERRUPT VECTORS AND LOW LEVEL HANDLERS.
  930. Medium priority interrupts are by definition those with priority greater
  931. than 1 and not greater than XCHAL_EXCM_LEVEL. These are disabled by
  932. setting PS.EXCM and therefore can easily support a C environment for
  933. handlers in C, and interact safely with an RTOS.
  934. Each vector goes at a predetermined location according to the Xtensa
  935. hardware configuration, which is ensured by its placement in a special
  936. section known to the Xtensa linker support package (LSP). It performs
  937. the minimum necessary before jumping to the handler in the .text section.
  938. The corresponding handler goes in the normal .text section. It sets up
  939. the appropriate stack frame, saves a few vector-specific registers and
  940. calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
  941. and enter the RTOS, then sets up a C environment. It then calls the
  942. user's interrupt handler code (which may be coded in C) and finally
  943. calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
  944. While XT_RTOS_INT_EXIT does not return directly to the interruptee,
  945. eventually the RTOS scheduler will want to dispatch the interrupted
  946. task or handler. The scheduler will return to the exit point that was
  947. saved in the interrupt stack frame at XT_STK_EXIT.
  948. -------------------------------------------------------------------------------
  949. */
  950. #if XCHAL_EXCM_LEVEL >= 2
  951. .begin literal_prefix .Level2InterruptVector
  952. .section .Level2InterruptVector.text, "ax"
  953. .global _Level2Vector
  954. .type _Level2Vector,@function
  955. .align 4
  956. _Level2Vector:
  957. wsr a0, EXCSAVE_2 /* preserve a0 */
  958. call0 _xt_medint2 /* load interrupt handler */
  959. /* never returns here - call0 is used as a jump (see note at top) */
  960. .end literal_prefix
  961. .section .iram1,"ax"
  962. .type _xt_medint2,@function
  963. .align 4
  964. _xt_medint2:
  965. mov a0, sp /* sp == a1 */
  966. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  967. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  968. rsr a0, EPS_2 /* save interruptee's PS */
  969. s32i a0, sp, XT_STK_PS
  970. rsr a0, EPC_2 /* save interruptee's PC */
  971. s32i a0, sp, XT_STK_PC
  972. rsr a0, EXCSAVE_2 /* save interruptee's a0 */
  973. s32i a0, sp, XT_STK_A0
  974. movi a0, _xt_medint2_exit /* save exit point for dispatch */
  975. s32i a0, sp, XT_STK_EXIT
  976. /* EXCSAVE_2 should now be free to use. Use it to keep a copy of the
  977. current stack pointer that points to the exception frame (XT_STK_FRAME).*/
  978. #ifdef XT_DEBUG_BACKTRACE
  979. #ifndef __XTENSA_CALL0_ABI__
  980. mov a0, sp
  981. wsr a0, EXCSAVE_2
  982. #endif
  983. #endif
  984. /* Save rest of interrupt context and enter RTOS. */
  985. call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
  986. /* !! We are now on the RTOS system stack !! */
  987. /* Set up PS for C, enable interrupts above this level and clear EXCM. */
  988. #ifdef __XTENSA_CALL0_ABI__
  989. movi a0, PS_INTLEVEL(2) | PS_UM
  990. #else
  991. movi a0, PS_INTLEVEL(2) | PS_UM | PS_WOE
  992. #endif
  993. wsr a0, PS
  994. rsync
  995. /* OK to call C code at this point, dispatch user ISRs */
  996. dispatch_c_isr 2 XCHAL_INTLEVEL2_MASK
  997. /* Done handling interrupts, transfer control to OS */
  998. call0 XT_RTOS_INT_EXIT /* does not return directly here */
  999. /*
  1000. Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
  1001. on entry and used to return to a thread or interrupted interrupt handler.
  1002. */
  1003. .global _xt_medint2_exit
  1004. .type _xt_medint2_exit,@function
  1005. .align 4
  1006. _xt_medint2_exit:
  1007. /* Restore only level-specific regs (the rest were already restored) */
  1008. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  1009. wsr a0, EPS_2
  1010. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  1011. wsr a0, EPC_2
  1012. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  1013. l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
  1014. rsync /* ensure EPS and EPC written */
  1015. rfi 2
  1016. #endif /* Level 2 */
  1017. #if XCHAL_EXCM_LEVEL >= 3
  1018. .begin literal_prefix .Level3InterruptVector
  1019. .section .Level3InterruptVector.text, "ax"
  1020. .global _Level3Vector
  1021. .type _Level3Vector,@function
  1022. .align 4
  1023. _Level3Vector:
  1024. wsr a0, EXCSAVE_3 /* preserve a0 */
  1025. call0 _xt_medint3 /* load interrupt handler */
  1026. /* never returns here - call0 is used as a jump (see note at top) */
  1027. .end literal_prefix
  1028. .section .iram1,"ax"
  1029. .type _xt_medint3,@function
  1030. .align 4
  1031. _xt_medint3:
  1032. mov a0, sp /* sp == a1 */
  1033. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  1034. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  1035. rsr a0, EPS_3 /* save interruptee's PS */
  1036. s32i a0, sp, XT_STK_PS
  1037. rsr a0, EPC_3 /* save interruptee's PC */
  1038. s32i a0, sp, XT_STK_PC
  1039. rsr a0, EXCSAVE_3 /* save interruptee's a0 */
  1040. s32i a0, sp, XT_STK_A0
  1041. movi a0, _xt_medint3_exit /* save exit point for dispatch */
  1042. s32i a0, sp, XT_STK_EXIT
  1043. /* EXCSAVE_3 should now be free to use. Use it to keep a copy of the
  1044. current stack pointer that points to the exception frame (XT_STK_FRAME).*/
  1045. #ifdef XT_DEBUG_BACKTRACE
  1046. #ifndef __XTENSA_CALL0_ABI__
  1047. mov a0, sp
  1048. wsr a0, EXCSAVE_3
  1049. #endif
  1050. #endif
  1051. /* Save rest of interrupt context and enter RTOS. */
  1052. call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
  1053. /* !! We are now on the RTOS system stack !! */
  1054. /* Set up PS for C, enable interrupts above this level and clear EXCM. */
  1055. #ifdef __XTENSA_CALL0_ABI__
  1056. movi a0, PS_INTLEVEL(3) | PS_UM
  1057. #else
  1058. movi a0, PS_INTLEVEL(3) | PS_UM | PS_WOE
  1059. #endif
  1060. wsr a0, PS
  1061. rsync
  1062. /* OK to call C code at this point, dispatch user ISRs */
  1063. dispatch_c_isr 3 XCHAL_INTLEVEL3_MASK
  1064. /* Done handling interrupts, transfer control to OS */
  1065. call0 XT_RTOS_INT_EXIT /* does not return directly here */
  1066. /*
  1067. Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
  1068. on entry and used to return to a thread or interrupted interrupt handler.
  1069. */
  1070. .global _xt_medint3_exit
  1071. .type _xt_medint3_exit,@function
  1072. .align 4
  1073. _xt_medint3_exit:
  1074. /* Restore only level-specific regs (the rest were already restored) */
  1075. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  1076. wsr a0, EPS_3
  1077. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  1078. wsr a0, EPC_3
  1079. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  1080. l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
  1081. rsync /* ensure EPS and EPC written */
  1082. rfi 3
  1083. #endif /* Level 3 */
  1084. #if XCHAL_EXCM_LEVEL >= 4
  1085. .begin literal_prefix .Level4InterruptVector
  1086. .section .Level4InterruptVector.text, "ax"
  1087. .global _Level4Vector
  1088. .type _Level4Vector,@function
  1089. .align 4
  1090. _Level4Vector:
  1091. wsr a0, EXCSAVE_4 /* preserve a0 */
  1092. call0 _xt_medint4 /* load interrupt handler */
  1093. .end literal_prefix
  1094. .section .iram1,"ax"
  1095. .type _xt_medint4,@function
  1096. .align 4
  1097. _xt_medint4:
  1098. mov a0, sp /* sp == a1 */
  1099. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  1100. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  1101. rsr a0, EPS_4 /* save interruptee's PS */
  1102. s32i a0, sp, XT_STK_PS
  1103. rsr a0, EPC_4 /* save interruptee's PC */
  1104. s32i a0, sp, XT_STK_PC
  1105. rsr a0, EXCSAVE_4 /* save interruptee's a0 */
  1106. s32i a0, sp, XT_STK_A0
  1107. movi a0, _xt_medint4_exit /* save exit point for dispatch */
  1108. s32i a0, sp, XT_STK_EXIT
  1109. /* EXCSAVE_4 should now be free to use. Use it to keep a copy of the
  1110. current stack pointer that points to the exception frame (XT_STK_FRAME).*/
  1111. #ifdef XT_DEBUG_BACKTRACE
  1112. #ifndef __XTENSA_CALL0_ABI__
  1113. mov a0, sp
  1114. wsr a0, EXCSAVE_4
  1115. #endif
  1116. #endif
  1117. /* Save rest of interrupt context and enter RTOS. */
  1118. call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
  1119. /* !! We are now on the RTOS system stack !! */
  1120. /* Set up PS for C, enable interrupts above this level and clear EXCM. */
  1121. #ifdef __XTENSA_CALL0_ABI__
  1122. movi a0, PS_INTLEVEL(4) | PS_UM
  1123. #else
  1124. movi a0, PS_INTLEVEL(4) | PS_UM | PS_WOE
  1125. #endif
  1126. wsr a0, PS
  1127. rsync
  1128. /* OK to call C code at this point, dispatch user ISRs */
  1129. dispatch_c_isr 4 XCHAL_INTLEVEL4_MASK
  1130. /* Done handling interrupts, transfer control to OS */
  1131. call0 XT_RTOS_INT_EXIT /* does not return directly here */
  1132. /*
  1133. Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
  1134. on entry and used to return to a thread or interrupted interrupt handler.
  1135. */
  1136. .global _xt_medint4_exit
  1137. .type _xt_medint4_exit,@function
  1138. .align 4
  1139. _xt_medint4_exit:
  1140. /* Restore only level-specific regs (the rest were already restored) */
  1141. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  1142. wsr a0, EPS_4
  1143. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  1144. wsr a0, EPC_4
  1145. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  1146. l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
  1147. rsync /* ensure EPS and EPC written */
  1148. rfi 4
  1149. #endif /* Level 4 */
  1150. #if XCHAL_EXCM_LEVEL >= 5
  1151. .begin literal_prefix .Level5InterruptVector
  1152. .section .Level5InterruptVector.text, "ax"
  1153. .global _Level5Vector
  1154. .type _Level5Vector,@function
  1155. .align 4
  1156. _Level5Vector:
  1157. wsr a0, EXCSAVE_5 /* preserve a0 */
  1158. call0 _xt_medint5 /* load interrupt handler */
  1159. .end literal_prefix
  1160. .section .iram1,"ax"
  1161. .type _xt_medint5,@function
  1162. .align 4
  1163. _xt_medint5:
  1164. mov a0, sp /* sp == a1 */
  1165. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  1166. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  1167. rsr a0, EPS_5 /* save interruptee's PS */
  1168. s32i a0, sp, XT_STK_PS
  1169. rsr a0, EPC_5 /* save interruptee's PC */
  1170. s32i a0, sp, XT_STK_PC
  1171. rsr a0, EXCSAVE_5 /* save interruptee's a0 */
  1172. s32i a0, sp, XT_STK_A0
  1173. movi a0, _xt_medint5_exit /* save exit point for dispatch */
  1174. s32i a0, sp, XT_STK_EXIT
  1175. /* EXCSAVE_5 should now be free to use. Use it to keep a copy of the
  1176. current stack pointer that points to the exception frame (XT_STK_FRAME).*/
  1177. #ifdef XT_DEBUG_BACKTRACE
  1178. #ifndef __XTENSA_CALL0_ABI__
  1179. mov a0, sp
  1180. wsr a0, EXCSAVE_5
  1181. #endif
  1182. #endif
  1183. /* Save rest of interrupt context and enter RTOS. */
  1184. call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
  1185. /* !! We are now on the RTOS system stack !! */
  1186. /* Set up PS for C, enable interrupts above this level and clear EXCM. */
  1187. #ifdef __XTENSA_CALL0_ABI__
  1188. movi a0, PS_INTLEVEL(5) | PS_UM
  1189. #else
  1190. movi a0, PS_INTLEVEL(5) | PS_UM | PS_WOE
  1191. #endif
  1192. wsr a0, PS
  1193. rsync
  1194. /* OK to call C code at this point, dispatch user ISRs */
  1195. dispatch_c_isr 5 XCHAL_INTLEVEL5_MASK
  1196. /* Done handling interrupts, transfer control to OS */
  1197. call0 XT_RTOS_INT_EXIT /* does not return directly here */
  1198. /*
  1199. Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
  1200. on entry and used to return to a thread or interrupted interrupt handler.
  1201. */
  1202. .global _xt_medint5_exit
  1203. .type _xt_medint5_exit,@function
  1204. .align 4
  1205. _xt_medint5_exit:
  1206. /* Restore only level-specific regs (the rest were already restored) */
  1207. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  1208. wsr a0, EPS_5
  1209. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  1210. wsr a0, EPC_5
  1211. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  1212. l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
  1213. rsync /* ensure EPS and EPC written */
  1214. rfi 5
  1215. #endif /* Level 5 */
  1216. #if XCHAL_EXCM_LEVEL >= 6
  1217. .begin literal_prefix .Level6InterruptVector
  1218. .section .Level6InterruptVector.text, "ax"
  1219. .global _Level6Vector
  1220. .type _Level6Vector,@function
  1221. .align 4
  1222. _Level6Vector:
  1223. wsr a0, EXCSAVE_6 /* preserve a0 */
  1224. call0 _xt_medint6 /* load interrupt handler */
  1225. .end literal_prefix
  1226. .section .iram1,"ax"
  1227. .type _xt_medint6,@function
  1228. .align 4
  1229. _xt_medint6:
  1230. mov a0, sp /* sp == a1 */
  1231. addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
  1232. s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
  1233. rsr a0, EPS_6 /* save interruptee's PS */
  1234. s32i a0, sp, XT_STK_PS
  1235. rsr a0, EPC_6 /* save interruptee's PC */
  1236. s32i a0, sp, XT_STK_PC
  1237. rsr a0, EXCSAVE_6 /* save interruptee's a0 */
  1238. s32i a0, sp, XT_STK_A0
  1239. movi a0, _xt_medint6_exit /* save exit point for dispatch */
  1240. s32i a0, sp, XT_STK_EXIT
  1241. /* EXCSAVE_6 should now be free to use. Use it to keep a copy of the
  1242. current stack pointer that points to the exception frame (XT_STK_FRAME).*/
  1243. #ifdef XT_DEBUG_BACKTRACE
  1244. #ifndef __XTENSA_CALL0_ABI__
  1245. mov a0, sp
  1246. wsr a0, EXCSAVE_6
  1247. #endif
  1248. #endif
  1249. /* Save rest of interrupt context and enter RTOS. */
  1250. call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
  1251. /* !! We are now on the RTOS system stack !! */
  1252. /* Set up PS for C, enable interrupts above this level and clear EXCM. */
  1253. #ifdef __XTENSA_CALL0_ABI__
  1254. movi a0, PS_INTLEVEL(6) | PS_UM
  1255. #else
  1256. movi a0, PS_INTLEVEL(6) | PS_UM | PS_WOE
  1257. #endif
  1258. wsr a0, PS
  1259. rsync
  1260. /* OK to call C code at this point, dispatch user ISRs */
  1261. dispatch_c_isr 6 XCHAL_INTLEVEL6_MASK
  1262. /* Done handling interrupts, transfer control to OS */
  1263. call0 XT_RTOS_INT_EXIT /* does not return directly here */
  1264. /*
  1265. Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
  1266. on entry and used to return to a thread or interrupted interrupt handler.
  1267. */
  1268. .global _xt_medint6_exit
  1269. .type _xt_medint6_exit,@function
  1270. .align 4
  1271. _xt_medint6_exit:
  1272. /* Restore only level-specific regs (the rest were already restored) */
  1273. l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
  1274. wsr a0, EPS_6
  1275. l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
  1276. wsr a0, EPC_6
  1277. l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
  1278. l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
  1279. rsync /* ensure EPS and EPC written */
  1280. rfi 6
  1281. #endif /* Level 6 */
  1282. /*******************************************************************************
  1283. HIGH PRIORITY (LEVEL > XCHAL_EXCM_LEVEL) INTERRUPT VECTORS AND HANDLERS
  1284. High priority interrupts are by definition those with priorities greater
  1285. than XCHAL_EXCM_LEVEL. This includes non-maskable (NMI). High priority
  1286. interrupts cannot interact with the RTOS, that is they must save all regs
  1287. they use and not call any RTOS function.
  1288. A further restriction imposed by the Xtensa windowed architecture is that
  1289. high priority interrupts must not modify the stack area even logically
  1290. "above" the top of the interrupted stack (they need to provide their
  1291. own stack or static save area).
  1292. Cadence Design Systems recommends high priority interrupt handlers be coded in assembly
  1293. and used for purposes requiring very short service times.
  1294. Here are templates for high priority (level 2+) interrupt vectors.
  1295. They assume only one interrupt per level to avoid the burden of identifying
  1296. which interrupts at this level are pending and enabled. This allows for
  1297. minimum latency and avoids having to save/restore a2 in addition to a0.
  1298. If more than one interrupt per high priority level is configured, this burden
  1299. is on the handler which in any case must provide a way to save and restore
  1300. registers it uses without touching the interrupted stack.
  1301. Each vector goes at a predetermined location according to the Xtensa
  1302. hardware configuration, which is ensured by its placement in a special
  1303. section known to the Xtensa linker support package (LSP). It performs
  1304. the minimum necessary before jumping to the handler in the .text section.
  1305. *******************************************************************************/
  1306. /*
  1307. These stubs just call xt_highintX/xt_nmi to handle the real interrupt. Please define
  1308. these in an external assembly source file. If these symbols are not defined anywhere
  1309. else, the defaults in xtensa_vector_defaults.S are used.
  1310. */
  1311. #if XCHAL_NUM_INTLEVELS >=2 && XCHAL_EXCM_LEVEL <2 && XCHAL_DEBUGLEVEL !=2
  1312. .begin literal_prefix .Level2InterruptVector
  1313. .section .Level2InterruptVector.text, "ax"
  1314. .global _Level2Vector
  1315. .type _Level2Vector,@function
  1316. .global xt_highint2
  1317. .align 4
  1318. _Level2Vector:
  1319. wsr a0, EXCSAVE_2 /* preserve a0 */
  1320. call0 xt_highint2 /* load interrupt handler */
  1321. .end literal_prefix
  1322. #endif /* Level 2 */
  1323. #if XCHAL_NUM_INTLEVELS >=3 && XCHAL_EXCM_LEVEL <3 && XCHAL_DEBUGLEVEL !=3
  1324. .begin literal_prefix .Level3InterruptVector
  1325. .section .Level3InterruptVector.text, "ax"
  1326. .global _Level3Vector
  1327. .type _Level3Vector,@function
  1328. .global xt_highint3
  1329. .align 4
  1330. _Level3Vector:
  1331. wsr a0, EXCSAVE_3 /* preserve a0 */
  1332. call0 xt_highint3 /* load interrupt handler */
  1333. /* never returns here - call0 is used as a jump (see note at top) */
  1334. .end literal_prefix
  1335. #endif /* Level 3 */
  1336. #if XCHAL_NUM_INTLEVELS >=4 && XCHAL_EXCM_LEVEL <4 && XCHAL_DEBUGLEVEL !=4
  1337. .begin literal_prefix .Level4InterruptVector
  1338. .section .Level4InterruptVector.text, "ax"
  1339. .global _Level4Vector
  1340. .type _Level4Vector,@function
  1341. .global xt_highint4
  1342. .align 4
  1343. _Level4Vector:
  1344. wsr a0, EXCSAVE_4 /* preserve a0 */
  1345. call0 xt_highint4 /* load interrupt handler */
  1346. /* never returns here - call0 is used as a jump (see note at top) */
  1347. .end literal_prefix
  1348. #endif /* Level 4 */
  1349. #if XCHAL_NUM_INTLEVELS >=5 && XCHAL_EXCM_LEVEL <5 && XCHAL_DEBUGLEVEL !=5
  1350. .begin literal_prefix .Level5InterruptVector
  1351. .section .Level5InterruptVector.text, "ax"
  1352. .global _Level5Vector
  1353. .type _Level5Vector,@function
  1354. .global xt_highint5
  1355. .align 4
  1356. _Level5Vector:
  1357. wsr a0, EXCSAVE_5 /* preserve a0 */
  1358. call0 xt_highint5 /* load interrupt handler */
  1359. /* never returns here - call0 is used as a jump (see note at top) */
  1360. .end literal_prefix
  1361. #endif /* Level 5 */
  1362. #if XCHAL_NUM_INTLEVELS >=6 && XCHAL_EXCM_LEVEL <6 && XCHAL_DEBUGLEVEL !=6
  1363. .begin literal_prefix .Level6InterruptVector
  1364. .section .Level6InterruptVector.text, "ax"
  1365. .global _Level6Vector
  1366. .type _Level6Vector,@function
  1367. .global xt_highint6
  1368. .align 4
  1369. _Level6Vector:
  1370. wsr a0, EXCSAVE_6 /* preserve a0 */
  1371. call0 xt_highint6 /* load interrupt handler */
  1372. /* never returns here - call0 is used as a jump (see note at top) */
  1373. .end literal_prefix
  1374. #endif /* Level 6 */
  1375. #if XCHAL_HAVE_NMI
  1376. .begin literal_prefix .NMIExceptionVector
  1377. .section .NMIExceptionVector.text, "ax"
  1378. .global _NMIExceptionVector
  1379. .type _NMIExceptionVector,@function
  1380. .global xt_nmi
  1381. .align 4
  1382. _NMIExceptionVector:
  1383. wsr a0, EXCSAVE + XCHAL_NMILEVEL _ /* preserve a0 */
  1384. call0 xt_nmi /* load interrupt handler */
  1385. /* never returns here - call0 is used as a jump (see note at top) */
  1386. .end literal_prefix
  1387. #endif /* NMI */
  1388. /*******************************************************************************
  1389. WINDOW OVERFLOW AND UNDERFLOW EXCEPTION VECTORS AND ALLOCA EXCEPTION HANDLER
  1390. Here is the code for each window overflow/underflow exception vector and
  1391. (interspersed) efficient code for handling the alloca exception cause.
  1392. Window exceptions are handled entirely in the vector area and are very
  1393. tight for performance. The alloca exception is also handled entirely in
  1394. the window vector area so comes at essentially no cost in code size.
  1395. Users should never need to modify them and Cadence Design Systems recommends
  1396. they do not.
  1397. Window handlers go at predetermined vector locations according to the
  1398. Xtensa hardware configuration, which is ensured by their placement in a
  1399. special section known to the Xtensa linker support package (LSP). Since
  1400. their offsets in that section are always the same, the LSPs do not define
  1401. a section per vector.
  1402. These things are coded for XEA2 only (XEA1 is not supported).
  1403. Note on Underflow Handlers:
  1404. The underflow handler for returning from call[i+1] to call[i]
  1405. must preserve all the registers from call[i+1]'s window.
  1406. In particular, a0 and a1 must be preserved because the RETW instruction
  1407. will be reexecuted (and may even underflow if an intervening exception
  1408. has flushed call[i]'s registers).
  1409. Registers a2 and up may contain return values.
  1410. *******************************************************************************/
  1411. #if XCHAL_HAVE_WINDOWED
  1412. .section .WindowVectors.text, "ax"
  1413. /*
  1414. --------------------------------------------------------------------------------
  1415. Window Overflow Exception for Call4.
  1416. Invoked if a call[i] referenced a register (a4-a15)
  1417. that contains data from ancestor call[j];
  1418. call[j] had done a call4 to call[j+1].
  1419. On entry here:
  1420. window rotated to call[j] start point;
  1421. a0-a3 are registers to be saved;
  1422. a4-a15 must be preserved;
  1423. a5 is call[j+1]'s stack pointer.
  1424. --------------------------------------------------------------------------------
  1425. */
  1426. .org 0x0
  1427. .global _WindowOverflow4
  1428. _WindowOverflow4:
  1429. s32e a0, a5, -16 /* save a0 to call[j+1]'s stack frame */
  1430. s32e a1, a5, -12 /* save a1 to call[j+1]'s stack frame */
  1431. s32e a2, a5, -8 /* save a2 to call[j+1]'s stack frame */
  1432. s32e a3, a5, -4 /* save a3 to call[j+1]'s stack frame */
  1433. rfwo /* rotates back to call[i] position */
  1434. /*
  1435. --------------------------------------------------------------------------------
  1436. Window Underflow Exception for Call4
  1437. Invoked by RETW returning from call[i+1] to call[i]
  1438. where call[i]'s registers must be reloaded (not live in ARs);
  1439. where call[i] had done a call4 to call[i+1].
  1440. On entry here:
  1441. window rotated to call[i] start point;
  1442. a0-a3 are undefined, must be reloaded with call[i].reg[0..3];
  1443. a4-a15 must be preserved (they are call[i+1].reg[0..11]);
  1444. a5 is call[i+1]'s stack pointer.
  1445. --------------------------------------------------------------------------------
  1446. */
  1447. .org 0x40
  1448. .global _WindowUnderflow4
  1449. _WindowUnderflow4:
  1450. l32e a0, a5, -16 /* restore a0 from call[i+1]'s stack frame */
  1451. l32e a1, a5, -12 /* restore a1 from call[i+1]'s stack frame */
  1452. l32e a2, a5, -8 /* restore a2 from call[i+1]'s stack frame */
  1453. l32e a3, a5, -4 /* restore a3 from call[i+1]'s stack frame */
  1454. rfwu
  1455. /*
  1456. --------------------------------------------------------------------------------
  1457. Handle alloca exception generated by interruptee executing 'movsp'.
  1458. This uses space between the window vectors, so is essentially "free".
  1459. All interruptee's regs are intact except a0 which is saved in EXCSAVE_1,
  1460. and PS.EXCM has been set by the exception hardware (can't be interrupted).
  1461. The fact the alloca exception was taken means the registers associated with
  1462. the base-save area have been spilled and will be restored by the underflow
  1463. handler, so those 4 registers are available for scratch.
  1464. The code is optimized to avoid unaligned branches and minimize cache misses.
  1465. --------------------------------------------------------------------------------
  1466. */
  1467. .align 4
  1468. .global _xt_alloca_exc
  1469. _xt_alloca_exc:
  1470. rsr a0, WINDOWBASE /* grab WINDOWBASE before rotw changes it */
  1471. rotw -1 /* WINDOWBASE goes to a4, new a0-a3 are scratch */
  1472. rsr a2, PS
  1473. extui a3, a2, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS
  1474. xor a3, a3, a4 /* bits changed from old to current windowbase */
  1475. rsr a4, EXCSAVE_1 /* restore original a0 (now in a4) */
  1476. slli a3, a3, XCHAL_PS_OWB_SHIFT
  1477. xor a2, a2, a3 /* flip changed bits in old window base */
  1478. wsr a2, PS /* update PS.OWB to new window base */
  1479. rsync
  1480. _bbci.l a4, 31, _WindowUnderflow4
  1481. rotw -1 /* original a0 goes to a8 */
  1482. _bbci.l a8, 30, _WindowUnderflow8
  1483. rotw -1
  1484. j _WindowUnderflow12
  1485. /*
  1486. --------------------------------------------------------------------------------
  1487. Window Overflow Exception for Call8
  1488. Invoked if a call[i] referenced a register (a4-a15)
  1489. that contains data from ancestor call[j];
  1490. call[j] had done a call8 to call[j+1].
  1491. On entry here:
  1492. window rotated to call[j] start point;
  1493. a0-a7 are registers to be saved;
  1494. a8-a15 must be preserved;
  1495. a9 is call[j+1]'s stack pointer.
  1496. --------------------------------------------------------------------------------
  1497. */
  1498. .org 0x80
  1499. .global _WindowOverflow8
  1500. _WindowOverflow8:
  1501. s32e a0, a9, -16 /* save a0 to call[j+1]'s stack frame */
  1502. l32e a0, a1, -12 /* a0 <- call[j-1]'s sp
  1503. (used to find end of call[j]'s frame) */
  1504. s32e a1, a9, -12 /* save a1 to call[j+1]'s stack frame */
  1505. s32e a2, a9, -8 /* save a2 to call[j+1]'s stack frame */
  1506. s32e a3, a9, -4 /* save a3 to call[j+1]'s stack frame */
  1507. s32e a4, a0, -32 /* save a4 to call[j]'s stack frame */
  1508. s32e a5, a0, -28 /* save a5 to call[j]'s stack frame */
  1509. s32e a6, a0, -24 /* save a6 to call[j]'s stack frame */
  1510. s32e a7, a0, -20 /* save a7 to call[j]'s stack frame */
  1511. rfwo /* rotates back to call[i] position */
  1512. /*
  1513. --------------------------------------------------------------------------------
  1514. Window Underflow Exception for Call8
  1515. Invoked by RETW returning from call[i+1] to call[i]
  1516. where call[i]'s registers must be reloaded (not live in ARs);
  1517. where call[i] had done a call8 to call[i+1].
  1518. On entry here:
  1519. window rotated to call[i] start point;
  1520. a0-a7 are undefined, must be reloaded with call[i].reg[0..7];
  1521. a8-a15 must be preserved (they are call[i+1].reg[0..7]);
  1522. a9 is call[i+1]'s stack pointer.
  1523. --------------------------------------------------------------------------------
  1524. */
  1525. .org 0xC0
  1526. .global _WindowUnderflow8
  1527. _WindowUnderflow8:
  1528. l32e a0, a9, -16 /* restore a0 from call[i+1]'s stack frame */
  1529. l32e a1, a9, -12 /* restore a1 from call[i+1]'s stack frame */
  1530. l32e a2, a9, -8 /* restore a2 from call[i+1]'s stack frame */
  1531. l32e a7, a1, -12 /* a7 <- call[i-1]'s sp
  1532. (used to find end of call[i]'s frame) */
  1533. l32e a3, a9, -4 /* restore a3 from call[i+1]'s stack frame */
  1534. l32e a4, a7, -32 /* restore a4 from call[i]'s stack frame */
  1535. l32e a5, a7, -28 /* restore a5 from call[i]'s stack frame */
  1536. l32e a6, a7, -24 /* restore a6 from call[i]'s stack frame */
  1537. l32e a7, a7, -20 /* restore a7 from call[i]'s stack frame */
  1538. rfwu
  1539. /*
  1540. --------------------------------------------------------------------------------
  1541. Window Overflow Exception for Call12
  1542. Invoked if a call[i] referenced a register (a4-a15)
  1543. that contains data from ancestor call[j];
  1544. call[j] had done a call12 to call[j+1].
  1545. On entry here:
  1546. window rotated to call[j] start point;
  1547. a0-a11 are registers to be saved;
  1548. a12-a15 must be preserved;
  1549. a13 is call[j+1]'s stack pointer.
  1550. --------------------------------------------------------------------------------
  1551. */
  1552. .org 0x100
  1553. .global _WindowOverflow12
  1554. _WindowOverflow12:
  1555. s32e a0, a13, -16 /* save a0 to call[j+1]'s stack frame */
  1556. l32e a0, a1, -12 /* a0 <- call[j-1]'s sp
  1557. (used to find end of call[j]'s frame) */
  1558. s32e a1, a13, -12 /* save a1 to call[j+1]'s stack frame */
  1559. s32e a2, a13, -8 /* save a2 to call[j+1]'s stack frame */
  1560. s32e a3, a13, -4 /* save a3 to call[j+1]'s stack frame */
  1561. s32e a4, a0, -48 /* save a4 to end of call[j]'s stack frame */
  1562. s32e a5, a0, -44 /* save a5 to end of call[j]'s stack frame */
  1563. s32e a6, a0, -40 /* save a6 to end of call[j]'s stack frame */
  1564. s32e a7, a0, -36 /* save a7 to end of call[j]'s stack frame */
  1565. s32e a8, a0, -32 /* save a8 to end of call[j]'s stack frame */
  1566. s32e a9, a0, -28 /* save a9 to end of call[j]'s stack frame */
  1567. s32e a10, a0, -24 /* save a10 to end of call[j]'s stack frame */
  1568. s32e a11, a0, -20 /* save a11 to end of call[j]'s stack frame */
  1569. rfwo /* rotates back to call[i] position */
  1570. /*
  1571. --------------------------------------------------------------------------------
  1572. Window Underflow Exception for Call12
  1573. Invoked by RETW returning from call[i+1] to call[i]
  1574. where call[i]'s registers must be reloaded (not live in ARs);
  1575. where call[i] had done a call12 to call[i+1].
  1576. On entry here:
  1577. window rotated to call[i] start point;
  1578. a0-a11 are undefined, must be reloaded with call[i].reg[0..11];
  1579. a12-a15 must be preserved (they are call[i+1].reg[0..3]);
  1580. a13 is call[i+1]'s stack pointer.
  1581. --------------------------------------------------------------------------------
  1582. */
  1583. .org 0x140
  1584. .global _WindowUnderflow12
  1585. _WindowUnderflow12:
  1586. l32e a0, a13, -16 /* restore a0 from call[i+1]'s stack frame */
  1587. l32e a1, a13, -12 /* restore a1 from call[i+1]'s stack frame */
  1588. l32e a2, a13, -8 /* restore a2 from call[i+1]'s stack frame */
  1589. l32e a11, a1, -12 /* a11 <- call[i-1]'s sp
  1590. (used to find end of call[i]'s frame) */
  1591. l32e a3, a13, -4 /* restore a3 from call[i+1]'s stack frame */
  1592. l32e a4, a11, -48 /* restore a4 from end of call[i]'s stack frame */
  1593. l32e a5, a11, -44 /* restore a5 from end of call[i]'s stack frame */
  1594. l32e a6, a11, -40 /* restore a6 from end of call[i]'s stack frame */
  1595. l32e a7, a11, -36 /* restore a7 from end of call[i]'s stack frame */
  1596. l32e a8, a11, -32 /* restore a8 from end of call[i]'s stack frame */
  1597. l32e a9, a11, -28 /* restore a9 from end of call[i]'s stack frame */
  1598. l32e a10, a11, -24 /* restore a10 from end of call[i]'s stack frame */
  1599. l32e a11, a11, -20 /* restore a11 from end of call[i]'s stack frame */
  1600. rfwu
  1601. #endif /* XCHAL_HAVE_WINDOWED */
  1602. .section .UserEnter.text, "ax"
  1603. .global call_user_start
  1604. .type call_user_start,@function
  1605. .align 4
  1606. .literal_position