xtensa_context.S 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. /*
  2. * SPDX-FileCopyrightText: 2015-2019 Cadence Design Systems, Inc.
  3. *
  4. * SPDX-License-Identifier: MIT
  5. *
  6. * SPDX-FileContributor: 2016-2022 Espressif Systems (Shanghai) CO LTD
  7. */
  8. /*
  9. * Copyright (c) 2015-2019 Cadence Design Systems, Inc.
  10. *
  11. * Permission is hereby granted, free of charge, to any person obtaining
  12. * a copy of this software and associated documentation files (the
  13. * "Software"), to deal in the Software without restriction, including
  14. * without limitation the rights to use, copy, modify, merge, publish,
  15. * distribute, sublicense, and/or sell copies of the Software, and to
  16. * permit persons to whom the Software is furnished to do so, subject to
  17. * the following conditions:
  18. *
  19. * The above copyright notice and this permission notice shall be included
  20. * in all copies or substantial portions of the Software.
  21. *
  22. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  23. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  24. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  25. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  26. * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  27. * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  28. * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  29. */
  30. /*
  31. * XTENSA CONTEXT SAVE AND RESTORE ROUTINES
  32. *
  33. * Low-level Call0 functions for handling generic context save and restore of
  34. * registers not specifically addressed by the interrupt vectors and handlers.
  35. * Those registers (not handled by these functions) are PC, PS, A0, A1 (SP).
  36. * Except for the calls to RTOS functions, this code is generic to Xtensa.
  37. *
  38. * Note that in Call0 ABI, interrupt handlers are expected to preserve the callee-
  39. * save regs (A12-A15), which is always the case if the handlers are coded in C.
  40. * However A12, A13 are made available as scratch registers for interrupt dispatch
  41. * code, so are presumed saved anyway, and are always restored even in Call0 ABI.
  42. * Only A14, A15 are truly handled as callee-save regs.
  43. *
  44. * Because Xtensa is a configurable architecture, this port supports all user
  45. * generated configurations (except restrictions stated in the release notes).
  46. * This is accomplished by conditional compilation using macros and functions
  47. * defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
  48. * Only the processor state included in your configuration is saved and restored,
  49. * including any processor state added by user configuration options or TIE.
  50. */
  51. /* Warn nicely if this file gets named with a lowercase .s instead of .S: */
  52. #define NOERROR #
  53. NOERROR: .error "C preprocessor needed for this file: make sure its filename\
  54. ends in uppercase .S, or use xt-xcc's -x assembler-with-cpp option."
  55. #include "xtensa_rtos.h"
  56. #include "xtensa_context.h"
  57. #include "xt_asm_utils.h"
  58. #ifdef XT_USE_OVLY
  59. #include <xtensa/overlay_os_asm.h>
  60. #endif
  61. .text
  62. /*******************************************************************************
  63. _xt_context_save
  64. !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
  65. Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the
  66. interrupt stack frame defined in xtensa_rtos.h.
  67. Its counterpart is _xt_context_restore (which also restores A12, A13).
  68. Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame.
  69. This function preserves A12 & A13 in order to provide the caller with 2 scratch
  70. regs that need not be saved over the call to this function. The choice of which
  71. 2 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw,
  72. to avoid moving data more than necessary. Caller can assign regs accordingly.
  73. Entry Conditions:
  74. A0 = Return address in caller.
  75. A1 = Stack pointer of interrupted thread or handler ("interruptee").
  76. Original A12, A13 have already been saved in the interrupt stack frame.
  77. Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the
  78. point of interruption.
  79. If windowed ABI, PS.EXCM = 1 (exceptions disabled).
  80. Exit conditions:
  81. A0 = Return address in caller.
  82. A1 = Stack pointer of interrupted thread or handler ("interruptee").
  83. A12, A13 as at entry (preserved).
  84. If windowed ABI, PS.EXCM = 1 (exceptions disabled).
  85. *******************************************************************************/
  86. .global _xt_context_save
  87. .type _xt_context_save,@function
  88. .align 4
  89. .literal_position
  90. .align 4
  91. _xt_context_save:
  92. s32i a2, sp, XT_STK_A2
  93. s32i a3, sp, XT_STK_A3
  94. s32i a4, sp, XT_STK_A4
  95. s32i a5, sp, XT_STK_A5
  96. s32i a6, sp, XT_STK_A6
  97. s32i a7, sp, XT_STK_A7
  98. s32i a8, sp, XT_STK_A8
  99. s32i a9, sp, XT_STK_A9
  100. s32i a10, sp, XT_STK_A10
  101. s32i a11, sp, XT_STK_A11
  102. /*
  103. Call0 ABI callee-saved regs a12-15 do not need to be saved here.
  104. a12-13 are the caller's responsibility so it can use them as scratch.
  105. So only need to save a14-a15 here for Windowed ABI (not Call0).
  106. */
  107. #ifndef __XTENSA_CALL0_ABI__
  108. s32i a14, sp, XT_STK_A14
  109. s32i a15, sp, XT_STK_A15
  110. #endif
  111. rsr a3, SAR
  112. s32i a3, sp, XT_STK_SAR
  113. #if XCHAL_HAVE_LOOPS
  114. rsr a3, LBEG
  115. s32i a3, sp, XT_STK_LBEG
  116. rsr a3, LEND
  117. s32i a3, sp, XT_STK_LEND
  118. rsr a3, LCOUNT
  119. s32i a3, sp, XT_STK_LCOUNT
  120. #endif
  121. #ifdef XT_USE_SWPRI
  122. /* Save virtual priority mask */
  123. movi a3, _xt_vpri_mask
  124. l32i a3, a3, 0
  125. s32i a3, sp, XT_STK_VPRI
  126. #endif
  127. #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
  128. mov a9, a0 /* preserve ret addr */
  129. #endif
  130. s32i a12, sp, XT_STK_TMP0 /* temp. save stuff in stack frame */
  131. s32i a13, sp, XT_STK_TMP1
  132. s32i a9, sp, XT_STK_TMP2
  133. l32i a12, sp, XT_STK_A12 /* recover original a9,12,13 */
  134. l32i a13, sp, XT_STK_A13
  135. l32i a9, sp, XT_STK_A9
  136. #if XCHAL_EXTRA_SA_SIZE > 0
  137. addi a2, sp, XT_STK_EXTRA /* where to save it */
  138. # if XCHAL_EXTRA_SA_ALIGN > 16
  139. movi a3, -XCHAL_EXTRA_SA_ALIGN
  140. and a2, a2, a3 /* align dynamically >16 bytes */
  141. # endif
  142. call0 xthal_save_extra_nw /* destroys a0,2,3 */
  143. #endif
  144. #ifndef __XTENSA_CALL0_ABI__
  145. #ifdef XT_USE_OVLY
  146. l32i a9, sp, XT_STK_PC /* recover saved PC */
  147. _xt_overlay_get_state a9, a12, a13
  148. s32i a9, sp, XT_STK_OVLY /* save overlay state */
  149. #endif
  150. /* SPILL_ALL_WINDOWS macro requires window overflow exceptions to be enabled,
  151. * i.e. PS.EXCM cleared and PS.WOE set.
  152. * Since we are going to clear PS.EXCM, we also need to increase INTLEVEL
  153. * at least to XCHAL_EXCM_LEVEL. This matches that value of effective INTLEVEL
  154. * at entry (CINTLEVEL=max(PS.INTLEVEL, XCHAL_EXCM_LEVEL) when PS.EXCM is set.
  155. * Since WindowOverflow exceptions will trigger inside SPILL_ALL_WINDOWS,
  156. * need to save/restore EPC1 as well.
  157. * Note: even though a4-a15 are saved into the exception frame, we should not
  158. * clobber them until after SPILL_ALL_WINDOWS. This is because these registers
  159. * may contain live windows belonging to previous frames in the call stack.
  160. * These frames will be spilled by SPILL_ALL_WINDOWS, and if the register was
  161. * used as a temporary by this code, the temporary value would get stored
  162. * onto the stack, instead of the real value.
  163. */
  164. rsr a2, PS /* to be restored after SPILL_ALL_WINDOWS */
  165. movi a0, PS_INTLEVEL_MASK
  166. and a3, a2, a0 /* get the current INTLEVEL */
  167. bgeui a3, XCHAL_EXCM_LEVEL, 1f /* calculate max(INTLEVEL, XCHAL_EXCM_LEVEL) */
  168. movi a3, XCHAL_EXCM_LEVEL
  169. 1:
  170. movi a0, PS_UM | PS_WOE /* clear EXCM, enable window overflow, set new INTLEVEL */
  171. or a3, a3, a0
  172. wsr a3, ps
  173. rsr a0, EPC1 /* to be restored after SPILL_ALL_WINDOWS */
  174. addi sp, sp, XT_STK_FRMSZ /* go back to spill register region */
  175. SPILL_ALL_WINDOWS /* place the live register windows there */
  176. addi sp, sp, -XT_STK_FRMSZ /* return the current stack pointer and proceed with context save*/
  177. wsr a2, PS /* restore to the value at entry */
  178. rsync
  179. wsr a0, EPC1 /* likewise */
  180. #endif /* __XTENSA_CALL0_ABI__ */
  181. l32i a12, sp, XT_STK_TMP0 /* restore the temp saved registers */
  182. l32i a13, sp, XT_STK_TMP1 /* our return address is there */
  183. l32i a9, sp, XT_STK_TMP2
  184. #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
  185. mov a0, a9 /* retrieve ret addr */
  186. #endif
  187. ret
  188. /*******************************************************************************
  189. _xt_context_restore
  190. !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!
  191. Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0
  192. ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt
  193. stack frame defined in xtensa_rtos.h .
  194. Its counterpart is _xt_context_save (whose caller saved A12, A13).
  195. Caller is responsible to restore PC, PS, A0, A1 (SP).
  196. Entry Conditions:
  197. A0 = Return address in caller.
  198. A1 = Stack pointer of interrupted thread or handler ("interruptee").
  199. Exit conditions:
  200. A0 = Return address in caller.
  201. A1 = Stack pointer of interrupted thread or handler ("interruptee").
  202. Other processor state except PC, PS, A0, A1 (SP), is as at the point
  203. of interruption.
  204. *******************************************************************************/
  205. .global _xt_context_restore
  206. .type _xt_context_restore,@function
  207. .align 4
  208. .literal_position
  209. .align 4
  210. _xt_context_restore:
  211. #if XCHAL_EXTRA_SA_SIZE > 0
  212. /*
  213. NOTE: Normally the xthal_restore_extra_nw macro only affects address
  214. registers a2-a5. It is theoretically possible for Xtensa processor
  215. designers to write TIE that causes more address registers to be
  216. affected, but it is generally unlikely. If that ever happens,
  217. more registers need to be saved/restored around this macro invocation.
  218. Here we only assume a13 is preserved.
  219. Future Xtensa tools releases might limit the regs that can be affected.
  220. */
  221. mov a13, a0 /* preserve ret addr */
  222. addi a2, sp, XT_STK_EXTRA /* where to find it */
  223. # if XCHAL_EXTRA_SA_ALIGN > 16
  224. movi a3, -XCHAL_EXTRA_SA_ALIGN
  225. and a2, a2, a3 /* align dynamically >16 bytes */
  226. # endif
  227. call0 xthal_restore_extra_nw /* destroys a0,2,3,4,5 */
  228. mov a0, a13 /* retrieve ret addr */
  229. #endif
  230. #if XCHAL_HAVE_LOOPS
  231. l32i a2, sp, XT_STK_LBEG
  232. l32i a3, sp, XT_STK_LEND
  233. wsr a2, LBEG
  234. l32i a2, sp, XT_STK_LCOUNT
  235. wsr a3, LEND
  236. wsr a2, LCOUNT
  237. #endif
  238. #ifdef XT_USE_OVLY
  239. /*
  240. If we are using overlays, this is a good spot to check if we need
  241. to restore an overlay for the incoming task. Here we have a bunch
  242. of registers to spare. Note that this step is going to use a few
  243. bytes of storage below SP (SP-20 to SP-32) if an overlay is going
  244. to be restored.
  245. */
  246. l32i a2, sp, XT_STK_PC /* retrieve PC */
  247. l32i a3, sp, XT_STK_PS /* retrieve PS */
  248. l32i a4, sp, XT_STK_OVLY /* retrieve overlay state */
  249. l32i a5, sp, XT_STK_A1 /* retrieve stack ptr */
  250. _xt_overlay_check_map a2, a3, a4, a5, a6
  251. s32i a2, sp, XT_STK_PC /* save updated PC */
  252. s32i a3, sp, XT_STK_PS /* save updated PS */
  253. #endif
  254. #ifdef XT_USE_SWPRI
  255. /* Restore virtual interrupt priority and interrupt enable */
  256. movi a3, _xt_intdata
  257. l32i a4, a3, 0 /* a4 = _xt_intenable */
  258. l32i a5, sp, XT_STK_VPRI /* a5 = saved _xt_vpri_mask */
  259. and a4, a4, a5
  260. wsr a4, INTENABLE /* update INTENABLE */
  261. s32i a5, a3, 4 /* restore _xt_vpri_mask */
  262. #endif
  263. l32i a3, sp, XT_STK_SAR
  264. l32i a2, sp, XT_STK_A2
  265. wsr a3, SAR
  266. l32i a3, sp, XT_STK_A3
  267. l32i a4, sp, XT_STK_A4
  268. l32i a5, sp, XT_STK_A5
  269. l32i a6, sp, XT_STK_A6
  270. l32i a7, sp, XT_STK_A7
  271. l32i a8, sp, XT_STK_A8
  272. l32i a9, sp, XT_STK_A9
  273. l32i a10, sp, XT_STK_A10
  274. l32i a11, sp, XT_STK_A11
  275. /*
  276. Call0 ABI callee-saved regs a12-15 do not need to be restored here.
  277. However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(),
  278. so need to be restored anyway, despite being callee-saved in Call0.
  279. */
  280. l32i a12, sp, XT_STK_A12
  281. l32i a13, sp, XT_STK_A13
  282. #ifndef __XTENSA_CALL0_ABI__
  283. l32i a14, sp, XT_STK_A14
  284. l32i a15, sp, XT_STK_A15
  285. #endif
  286. ret
  287. /*******************************************************************************
  288. _xt_coproc_init
  289. Initializes global co-processor management data, setting all co-processors
  290. to "unowned". Leaves CPENABLE as it found it (does NOT clear it).
  291. Called during initialization of the RTOS, before any threads run.
  292. This may be called from normal Xtensa single-threaded application code which
  293. might use co-processors. The Xtensa run-time initialization enables all
  294. co-processors. They must remain enabled here, else a co-processor exception
  295. might occur outside of a thread, which the exception handler doesn't expect.
  296. Entry Conditions:
  297. Xtensa single-threaded run-time environment is in effect.
  298. No thread is yet running.
  299. Exit conditions:
  300. None.
  301. Obeys ABI conventions per prototype:
  302. void _xt_coproc_init(void)
  303. *******************************************************************************/
  304. #if XCHAL_CP_NUM > 0
  305. .global _xt_coproc_init
  306. .type _xt_coproc_init,@function
  307. .align 4
  308. .literal_position
  309. .align 4
  310. _xt_coproc_init:
  311. ENTRY0
  312. /* Initialize thread co-processor ownerships to 0 (unowned). */
  313. movi a2, _xt_coproc_owner_sa /* a2 = base of owner array */
  314. addi a3, a2, (XCHAL_CP_MAX*portNUM_PROCESSORS) << 2 /* a3 = top+1 of owner array */
  315. movi a4, 0 /* a4 = 0 (unowned) */
  316. 1: s32i a4, a2, 0
  317. addi a2, a2, 4
  318. bltu a2, a3, 1b
  319. RET0
  320. #endif
  321. /*******************************************************************************
  322. _xt_coproc_release
  323. Releases any and all co-processors owned by a given thread. The thread is
  324. identified by it's co-processor state save area defined in xtensa_context.h .
  325. Must be called before a thread's co-proc save area is deleted to avoid
  326. memory corruption when the exception handler tries to save the state.
  327. May be called when a thread terminates or completes but does not delete
  328. the co-proc save area, to avoid the exception handler having to save the
  329. thread's co-proc state before another thread can use it (optimization).
  330. Needs to be called on the processor the thread was running on. Unpinned threads
  331. won't have an entry here because they get pinned as soon they use a coprocessor.
  332. Entry Conditions:
  333. A2 = Pointer to base of co-processor state save area.
  334. A3 = Core ID of the pinned task
  335. Exit conditions:
  336. None.
  337. Obeys ABI conventions per prototype:
  338. void _xt_coproc_release(void * coproc_sa_base, BaseType_t xCoreID)
  339. *******************************************************************************/
  340. #if XCHAL_CP_NUM > 0
  341. .global _xt_coproc_release
  342. .type _xt_coproc_release,@function
  343. .align 4
  344. .literal_position
  345. .align 4
  346. _xt_coproc_release:
  347. ENTRY0 /* a2 = base of save area */
  348. /* a3 = core ID */
  349. rsil a7, XCHAL_EXCM_LEVEL /* lock interrupts */
  350. /* Aquire spinlock before proceeding with the routine.
  351. * Refer _xt_coproc_exc for details on the puspose of
  352. * the _xt_coproc_owner_sa_lock lock and its intended use.
  353. */
  354. .L_spinlock_loop:
  355. mov a8, a3 /* Save a copy of the core ID in a8 */
  356. movi a10, _xt_coproc_owner_sa_lock /* a10 = base address of lock variable */
  357. addx4 a10, a8, a10 /* Use core ID in a8 to calculate the offset to the lock variable for the core */
  358. movi a11, 0 /* a11 = 0 */
  359. wsr a11, scompare1 /* scompare1 = a11 :- Expect the spinlock to be free (value = 0) */
  360. movi a11, 1 /* a11 = 1 :- Write 1 to take the spinlock */
  361. s32c1i a11, a10, 0 /* if (lock == scompare1) {tmp = lock; lock = a11; a11 = tmp} else {a11 = lock} */
  362. bnez a11, .L_spinlock_loop /* if (a11 != 0) {loop} :- Keep spinning until the spinlock is available */
  363. movi a4, XCHAL_CP_MAX << 2
  364. mull a3, a3, a4
  365. movi a4, _xt_coproc_owner_sa /* a4 = base of owner array */
  366. add a4, a4, a3
  367. addi a5, a4, XCHAL_CP_MAX << 2 /* a5 = top+1 of owner array */
  368. movi a6, 0 /* a6 = 0 (unowned) */
  369. 1: l32i a8, a4, 0 /* a8 = owner at a4 */
  370. bne a2, a8, 2f /* if (coproc_sa_base == owner) */
  371. s32i a3, a4, 0 /* owner = unowned */
  372. 2: addi a4, a4, 1<<2 /* a4 = next entry in owner array */
  373. bltu a4, a5, 1b /* repeat until end of array */
  374. 3: wsr a7, PS /* restore interrupts */
  375. /* Release spinlock */
  376. movi a11, 0 /* a11 = 0 */
  377. s32ri a11, a10, 0 /* a10 = base address of lock variable. Write 0 to release the lock */
  378. RET0
  379. #endif
  380. /*******************************************************************************
  381. _xt_coproc_savecs
  382. If there is a current thread and it has a coprocessor state save area, then
  383. save all callee-saved state into this area. This function is called from the
  384. solicited context switch handler. It calls a system-specific function to get
  385. the coprocessor save area base address.
  386. Entry conditions:
  387. - The thread being switched out is still the current thread.
  388. - CPENABLE state reflects which coprocessors are active.
  389. - Registers have been saved/spilled already.
  390. Exit conditions:
  391. - All necessary CP callee-saved state has been saved.
  392. - Registers a2-a7, a13-a15 have been trashed.
  393. Must be called from assembly code only, using CALL0.
  394. *******************************************************************************/
  395. #if XCHAL_CP_NUM > 0
  396. .extern _xt_coproc_sa_offset /* external reference */
  397. .global _xt_coproc_savecs
  398. .type _xt_coproc_savecs,@function
  399. .align 4
  400. .literal_position
  401. .align 4
  402. _xt_coproc_savecs:
  403. /* At entry, CPENABLE should be showing which CPs are enabled. */
  404. rsr a2, CPENABLE /* a2 = which CPs are enabled */
  405. beqz a2, .Ldone /* quick exit if none */
  406. mov a14, a0 /* save return address */
  407. call0 XT_RTOS_CP_STATE /* get address of CP save area */
  408. mov a0, a14 /* restore return address */
  409. beqz a15, .Ldone /* if none then nothing to do */
  410. s16i a2, a15, XT_CP_CS_ST /* save mask of CPs being stored */
  411. movi a13, _xt_coproc_sa_offset /* array of CP save offsets */
  412. l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
  413. #if XCHAL_CP0_SA_SIZE
  414. bbci.l a2, 0, 2f /* CP 0 not enabled */
  415. l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
  416. add a3, a14, a15 /* a3 = save area for CP 0 */
  417. xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  418. 2:
  419. #endif
  420. #if XCHAL_CP1_SA_SIZE
  421. bbci.l a2, 1, 2f /* CP 1 not enabled */
  422. l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
  423. add a3, a14, a15 /* a3 = save area for CP 1 */
  424. xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  425. 2:
  426. #endif
  427. #if XCHAL_CP2_SA_SIZE
  428. bbci.l a2, 2, 2f
  429. l32i a14, a13, 8
  430. add a3, a14, a15
  431. xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  432. 2:
  433. #endif
  434. #if XCHAL_CP3_SA_SIZE
  435. bbci.l a2, 3, 2f
  436. l32i a14, a13, 12
  437. add a3, a14, a15
  438. xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  439. 2:
  440. #endif
  441. #if XCHAL_CP4_SA_SIZE
  442. bbci.l a2, 4, 2f
  443. l32i a14, a13, 16
  444. add a3, a14, a15
  445. xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  446. 2:
  447. #endif
  448. #if XCHAL_CP5_SA_SIZE
  449. bbci.l a2, 5, 2f
  450. l32i a14, a13, 20
  451. add a3, a14, a15
  452. xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  453. 2:
  454. #endif
  455. #if XCHAL_CP6_SA_SIZE
  456. bbci.l a2, 6, 2f
  457. l32i a14, a13, 24
  458. add a3, a14, a15
  459. xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  460. 2:
  461. #endif
  462. #if XCHAL_CP7_SA_SIZE
  463. bbci.l a2, 7, 2f
  464. l32i a14, a13, 28
  465. add a3, a14, a15
  466. xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  467. 2:
  468. #endif
  469. .Ldone:
  470. ret
  471. #endif
  472. /*******************************************************************************
  473. _xt_coproc_restorecs
  474. Restore any callee-saved coprocessor state for the incoming thread.
  475. This function is called from coprocessor exception handling, when giving
  476. ownership to a thread that solicited a context switch earlier. It calls a
  477. system-specific function to get the coprocessor save area base address.
  478. Entry conditions:
  479. - The incoming thread is set as the current thread.
  480. - CPENABLE is set up correctly for all required coprocessors.
  481. - a2 = mask of coprocessors to be restored.
  482. Exit conditions:
  483. - All necessary CP callee-saved state has been restored.
  484. - CPENABLE - unchanged.
  485. - Registers a2-a7, a13-a15 have been trashed.
  486. Must be called from assembly code only, using CALL0.
  487. *******************************************************************************/
  488. #if XCHAL_CP_NUM > 0
  489. .global _xt_coproc_restorecs
  490. .type _xt_coproc_restorecs,@function
  491. .align 4
  492. .literal_position
  493. .align 4
  494. _xt_coproc_restorecs:
  495. mov a14, a0 /* save return address */
  496. call0 XT_RTOS_CP_STATE /* get address of CP save area */
  497. mov a0, a14 /* restore return address */
  498. beqz a15, .Ldone2 /* if none then nothing to do */
  499. l16ui a3, a15, XT_CP_CS_ST /* a3 = which CPs have been saved */
  500. xor a3, a3, a2 /* clear the ones being restored */
  501. s32i a3, a15, XT_CP_CS_ST /* update saved CP mask */
  502. movi a13, _xt_coproc_sa_offset /* array of CP save offsets */
  503. l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */
  504. #if XCHAL_CP0_SA_SIZE
  505. bbci.l a2, 0, 2f /* CP 0 not enabled */
  506. l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */
  507. add a3, a14, a15 /* a3 = save area for CP 0 */
  508. xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  509. 2:
  510. #endif
  511. #if XCHAL_CP1_SA_SIZE
  512. bbci.l a2, 1, 2f /* CP 1 not enabled */
  513. l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */
  514. add a3, a14, a15 /* a3 = save area for CP 1 */
  515. xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  516. 2:
  517. #endif
  518. #if XCHAL_CP2_SA_SIZE
  519. bbci.l a2, 2, 2f
  520. l32i a14, a13, 8
  521. add a3, a14, a15
  522. xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  523. 2:
  524. #endif
  525. #if XCHAL_CP3_SA_SIZE
  526. bbci.l a2, 3, 2f
  527. l32i a14, a13, 12
  528. add a3, a14, a15
  529. xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  530. 2:
  531. #endif
  532. #if XCHAL_CP4_SA_SIZE
  533. bbci.l a2, 4, 2f
  534. l32i a14, a13, 16
  535. add a3, a14, a15
  536. xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  537. 2:
  538. #endif
  539. #if XCHAL_CP5_SA_SIZE
  540. bbci.l a2, 5, 2f
  541. l32i a14, a13, 20
  542. add a3, a14, a15
  543. xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  544. 2:
  545. #endif
  546. #if XCHAL_CP6_SA_SIZE
  547. bbci.l a2, 6, 2f
  548. l32i a14, a13, 24
  549. add a3, a14, a15
  550. xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  551. 2:
  552. #endif
  553. #if XCHAL_CP7_SA_SIZE
  554. bbci.l a2, 7, 2f
  555. l32i a14, a13, 28
  556. add a3, a14, a15
  557. xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
  558. 2:
  559. #endif
  560. .Ldone2:
  561. ret
  562. #endif