1 /*******************************************************************************
\r
2 Copyright (c) 2006-2015 Cadence Design Systems Inc.
\r
4 Permission is hereby granted, free of charge, to any person obtaining
\r
5 a copy of this software and associated documentation files (the
\r
6 "Software"), to deal in the Software without restriction, including
\r
7 without limitation the rights to use, copy, modify, merge, publish,
\r
8 distribute, sublicense, and/or sell copies of the Software, and to
\r
9 permit persons to whom the Software is furnished to do so, subject to
\r
10 the following conditions:
\r
12 The above copyright notice and this permission notice shall be included
\r
13 in all copies or substantial portions of the Software.
\r
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
16 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
17 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
18 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
\r
19 CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
\r
20 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
\r
21 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
22 --------------------------------------------------------------------------------
\r
24 XTENSA VECTORS AND LOW LEVEL HANDLERS FOR AN RTOS
\r
26 Xtensa low level exception and interrupt vectors and handlers for an RTOS.
\r
28 Interrupt handlers and user exception handlers support interaction with
\r
29 the RTOS by calling XT_RTOS_INT_ENTER and XT_RTOS_INT_EXIT before and
\r
30 after user's specific interrupt handlers. These macros are defined in
\r
31 xtensa_<rtos>.h to call suitable functions in a specific RTOS.
\r
33 Users can install application-specific interrupt handlers for low and
\r
34 medium level interrupts, by calling xt_set_interrupt_handler(). These
\r
35 handlers can be written in C, and must obey C calling convention. The
\r
36 handler table is indexed by the interrupt number. Each handler may be
\r
37 provided with an argument.
\r
39 Note that the system timer interrupt is handled specially, and is
\r
40 dispatched to the RTOS-specific handler. This timer cannot be hooked
\r
41 by application code.
\r
43 Optional hooks are also provided to install a handler per level at
\r
44 run-time, made available by compiling this source file with
\r
45 '-DXT_INTEXC_HOOKS' (useful for automated testing).
\r
47 !! This file is a template that usually needs to be modified to handle !!
\r
48 !! application specific interrupts. Search USER_EDIT for helpful comments !!
\r
49 !! on where to insert handlers and how to write them. !!
\r
51 Users can also install application-specific exception handlers in the
\r
52 same way, by calling xt_set_exception_handler(). One handler slot is
\r
53 provided for each exception type. Note that some exceptions are handled
\r
54 by the porting layer itself, and cannot be taken over by application
\r
55 code in this manner. These are the alloca, syscall, and coprocessor
\r
58 The exception handlers can be written in C, and must follow C calling
\r
59 convention. Each handler is passed a pointer to an exception frame as
\r
60 its single argument. The exception frame is created on the stack, and
\r
61 holds the saved context of the thread that took the exception. If the
\r
62 handler returns, the context will be restored and the instruction that
\r
63 caused the exception will be retried. If the handler makes any changes
\r
64 to the saved state in the exception frame, the changes will be applied
\r
65 when restoring the context.
\r
67 Because Xtensa is a configurable architecture, this port supports all user
\r
68 generated configurations (except restrictions stated in the release notes).
\r
69 This is accomplished by conditional compilation using macros and functions
\r
70 defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
\r
71 Only the relevant parts of this file will be included in your RTOS build.
\r
72 For example, this file provides interrupt vector templates for all types and
\r
73 all priority levels, but only the ones in your configuration are built.
\r
75 NOTES on the use of 'call0' for long jumps instead of 'j':
\r
76 1. This file should be assembled with the -mlongcalls option to xt-xcc.
\r
77 2. The -mlongcalls compiler option causes 'call0 dest' to be expanded to
\r
78 a sequence 'l32r a0, dest' 'callx0 a0' which works regardless of the
\r
79 distance from the call to the destination. The linker then relaxes
\r
80 it back to 'call0 dest' if it determines that dest is within range.
\r
81 This allows more flexibility in locating code without the performance
\r
82 overhead of the 'l32r' literal data load in cases where the destination
\r
83 is in range of 'call0'. There is an additional benefit in that 'call0'
\r
84 has a longer range than 'j' due to the target being word-aligned, so
\r
85 the 'l32r' sequence is less likely needed.
\r
86 3. The use of 'call0' with -mlongcalls requires that register a0 not be
\r
87 live at the time of the call, which is always the case for a function
\r
88 call but needs to be ensured if 'call0' is used as a jump in lieu of 'j'.
\r
89 4. This use of 'call0' is independent of the C function call ABI.
\r
91 *******************************************************************************/
\r
93 #include "xtensa_rtos.h"
\r
94 #include "esp_panic.h"
\r
95 #include "sdkconfig.h"
\r
96 #include "soc/soc.h"
\r
97 #include "soc/dport_reg.h"
\r
100 Define for workaround: pin no-cpu-affinity tasks to a cpu when fpu is used.
\r
101 Please change this when the tcb structure is changed
\r
103 #define TASKTCB_XCOREID_OFFSET (0x38+configMAX_TASK_NAME_LEN+3)&~3
\r
104 .extern pxCurrentTCB
\r
106 /* Enable stack backtrace across exception/interrupt - see below */
\r
107 #ifdef CONFIG_FREERTOS_INTERRUPT_BACKTRACE
\r
108 #define XT_DEBUG_BACKTRACE 1
\r
113 --------------------------------------------------------------------------------
\r
114 Defines used to access _xtos_interrupt_table.
\r
115 --------------------------------------------------------------------------------
\r
117 #define XIE_HANDLER 0
\r
123 Macro get_percpu_entry_for - convert a per-core ID into a multicore entry.
\r
124 Basically does reg=reg*portNUM_PROCESSORS+current_core_id
\r
125 Multiple versions here to optimize for specific portNUM_PROCESSORS values.
\r
127 .macro get_percpu_entry_for reg scratch
\r
128 #if (portNUM_PROCESSORS == 1)
\r
129 /* No need to do anything */
\r
130 #elif (portNUM_PROCESSORS == 2)
\r
131 /* Optimized 2-core code. */
\r
133 addx2 \reg,\reg,\scratch
\r
135 /* Generalized n-core code. Untested! */
\r
136 movi \scratch,portNUM_PROCESSORS
\r
137 mull \scratch,\reg,\scratch
\r
139 add \reg,\scratch,\reg
\r
143 --------------------------------------------------------------------------------
\r
144 Macro extract_msb - return the input with only the highest bit set.
\r
146 Input : "ain" - Input value, clobbered.
\r
147 Output : "aout" - Output value, has only one bit set, MSB of "ain".
\r
148 The two arguments must be different AR registers.
\r
149 --------------------------------------------------------------------------------
\r
152 .macro extract_msb aout ain
\r
154 addi \aout, \ain, -1 /* aout = ain - 1 */
\r
155 and \ain, \ain, \aout /* ain = ain & aout */
\r
156 bnez \ain, 1b /* repeat until ain == 0 */
\r
157 addi \aout, \aout, 1 /* return aout + 1 */
\r
161 --------------------------------------------------------------------------------
\r
162 Macro dispatch_c_isr - dispatch interrupts to user ISRs.
\r
163 This will dispatch to user handlers (if any) that are registered in the
\r
164 XTOS dispatch table (_xtos_interrupt_table). These handlers would have
\r
165 been registered by calling _xtos_set_interrupt_handler(). There is one
\r
166 exception - the timer interrupt used by the OS will not be dispatched
\r
167 to a user handler - this must be handled by the caller of this macro.
\r
169 Level triggered and software interrupts are automatically deasserted by
\r
173 -- PS.INTLEVEL is set to "level" at entry
\r
174 -- PS.EXCM = 0, C calling enabled
\r
176 NOTE: For CALL0 ABI, a12-a15 have not yet been saved.
\r
178 NOTE: This macro will use registers a0 and a2-a7. The arguments are:
\r
179 level -- interrupt level
\r
180 mask -- interrupt bitmask for this level
\r
181 --------------------------------------------------------------------------------
\r
184 .macro dispatch_c_isr level mask
\r
186 #ifdef CONFIG_PM_TRACE
\r
187 movi a6, 0 /* = ESP_PM_TRACE_IDLE */
\r
189 call4 esp_pm_trace_exit
\r
190 #endif // CONFIG_PM_TRACE
\r
192 /* Get mask of pending, enabled interrupts at this level into a2. */
\r
194 .L_xt_user_int_&level&:
\r
200 beqz a2, 9f /* nothing to do */
\r
202 /* This bit of code provides a nice debug backtrace in the debugger.
\r
203 It does take a few more instructions, so undef XT_DEBUG_BACKTRACE
\r
204 if you want to save the cycles.
\r
206 #ifdef XT_DEBUG_BACKTRACE
\r
207 #ifndef __XTENSA_CALL0_ABI__
\r
208 rsr a0, EPC_1 + \level - 1 /* return address */
\r
209 movi a4, 0xC0000000 /* constant with top 2 bits set (call size) */
\r
210 or a0, a0, a4 /* set top 2 bits */
\r
211 addx2 a0, a4, a0 /* clear top bit -- simulating call4 size */
\r
215 #ifdef CONFIG_PM_ENABLE
\r
216 call4 esp_pm_impl_isr_hook
\r
219 #ifdef XT_INTEXC_HOOKS
\r
220 /* Call interrupt hook if present to (pre)handle interrupts. */
\r
221 movi a4, _xt_intexc_hooks
\r
222 l32i a4, a4, \level << 2
\r
224 #ifdef __XTENSA_CALL0_ABI__
\r
236 /* Now look up in the dispatch table and call user ISR if any. */
\r
237 /* If multiple bits are set then MSB has highest priority. */
\r
239 extract_msb a4, a2 /* a4 = MSB of a2, a2 trashed */
\r
241 #ifdef XT_USE_SWPRI
\r
242 /* Enable all interrupts at this level that are numerically higher
\r
243 than the one we just selected, since they are treated as higher
\r
246 movi a3, \mask /* a3 = all interrupts at this level */
\r
247 add a2, a4, a4 /* a2 = a4 << 1 */
\r
248 addi a2, a2, -1 /* a2 = mask of 1's <= a4 bit */
\r
249 and a2, a2, a3 /* a2 = mask of all bits <= a4 at this level */
\r
250 movi a3, _xt_intdata
\r
251 l32i a6, a3, 4 /* a6 = _xt_vpri_mask */
\r
253 addi a2, a2, -1 /* a2 = mask to apply */
\r
254 and a5, a6, a2 /* mask off all bits <= a4 bit */
\r
255 s32i a5, a3, 4 /* update _xt_vpri_mask */
\r
257 and a3, a3, a2 /* mask off all bits <= a4 bit */
\r
259 rsil a3, \level - 1 /* lower interrupt level by 1 */
\r
262 movi a3, XT_TIMER_INTEN /* a3 = timer interrupt bit */
\r
263 wsr a4, INTCLEAR /* clear sw or edge-triggered interrupt */
\r
264 beq a3, a4, 7f /* if timer interrupt then skip table */
\r
266 find_ms_setbit a3, a4, a3, 0 /* a3 = interrupt number */
\r
268 get_percpu_entry_for a3, a12
\r
269 movi a4, _xt_interrupt_table
\r
270 addx8 a3, a3, a4 /* a3 = address of interrupt table entry */
\r
271 l32i a4, a3, XIE_HANDLER /* a4 = handler address */
\r
272 #ifdef __XTENSA_CALL0_ABI__
\r
273 mov a12, a6 /* save in callee-saved reg */
\r
274 l32i a2, a3, XIE_ARG /* a2 = handler arg */
\r
275 callx0 a4 /* call handler */
\r
278 mov a2, a6 /* save in windowed reg */
\r
279 l32i a6, a3, XIE_ARG /* a6 = handler arg */
\r
280 callx4 a4 /* call handler */
\r
283 #ifdef XT_USE_SWPRI
\r
286 j .L_xt_user_int_&level& /* check for more interrupts */
\r
291 .ifeq XT_TIMER_INTPRI - \level
\r
292 .L_xt_user_int_timer_&level&:
\r
294 Interrupt handler for the RTOS tick timer if at this level.
\r
295 We'll be reading the interrupt state again after this call
\r
296 so no need to preserve any registers except a6 (vpri_mask).
\r
299 #ifdef __XTENSA_CALL0_ABI__
\r
301 call0 XT_RTOS_TIMER_INT
\r
305 call4 XT_RTOS_TIMER_INT
\r
309 #ifdef XT_USE_SWPRI
\r
312 j .L_xt_user_int_&level& /* check for more interrupts */
\r
315 #ifdef XT_USE_SWPRI
\r
317 /* Restore old value of _xt_vpri_mask from a2. Also update INTENABLE from
\r
318 virtual _xt_intenable which _could_ have changed during interrupt
\r
321 movi a3, _xt_intdata
\r
322 l32i a4, a3, 0 /* a4 = _xt_intenable */
\r
323 s32i a2, a3, 4 /* update _xt_vpri_mask */
\r
324 and a4, a4, a2 /* a4 = masked intenable */
\r
325 wsr a4, INTENABLE /* update INTENABLE */
\r
335 --------------------------------------------------------------------------------
\r
337 Should be reached by call0 (preferable) or jump only. If call0, a0 says where
\r
338 from. If on simulator, display panic message and abort, else loop indefinitely.
\r
339 --------------------------------------------------------------------------------
\r
342 .section .iram1,"ax"
\r
343 .global panicHandler
\r
346 .type _xt_panic,@function
\r
352 /* Allocate exception frame and save minimal context. */
\r
354 addi sp, sp, -XT_STK_FRMSZ
\r
355 s32i a0, sp, XT_STK_A1
\r
356 #if XCHAL_HAVE_WINDOWED
\r
357 s32e a0, sp, -12 /* for debug backtrace */
\r
359 rsr a0, PS /* save interruptee's PS */
\r
360 s32i a0, sp, XT_STK_PS
\r
361 rsr a0, EPC_1 /* save interruptee's PC */
\r
362 s32i a0, sp, XT_STK_PC
\r
363 #if XCHAL_HAVE_WINDOWED
\r
364 s32e a0, sp, -16 /* for debug backtrace */
\r
366 s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */
\r
367 s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */
\r
368 call0 _xt_context_save
\r
370 /* Save exc cause and vaddr into exception frame */
\r
372 s32i a0, sp, XT_STK_EXCCAUSE
\r
374 s32i a0, sp, XT_STK_EXCVADDR
\r
376 /* _xt_context_save seems to save the current a0, but we need the interuptees a0. Fix this. */
\r
377 rsr a0, EXCSAVE_1 /* save interruptee's a0 */
\r
379 s32i a0, sp, XT_STK_A0
\r
381 /* Set up PS for C, disable all interrupts except NMI and debug, and clear EXCM. */
\r
382 movi a0, PS_INTLEVEL(5) | PS_UM | PS_WOE
\r
385 //Call panic handler
\r
391 //Call using call0. Prints the hex char in a2. Kills a3, a4, a5
\r
395 panic_print_hex_loop:
\r
397 extui a5, a5, 16, 8
\r
398 bgei a5,64,panic_print_hex_loop
\r
401 bgei a5,10,panic_print_hex_a
\r
403 j panic_print_hex_ok
\r
406 panic_print_hex_ok:
\r
411 bnei a4,0,panic_print_hex_loop
\r
419 .section .rodata, "a"
\r
425 --------------------------------------------------------------------------------
\r
426 Hooks to dynamically install handlers for exceptions and interrupts.
\r
427 Allows automated regression frameworks to install handlers per test.
\r
428 Consists of an array of function pointers indexed by interrupt level,
\r
429 with index 0 containing the entry for user exceptions.
\r
430 Initialized with all 0s, meaning no handler is installed at each level.
\r
431 See comment in xtensa_rtos.h for more details.
\r
433 *WARNING* This array is for all CPUs, that is, installing a hook for
\r
434 one CPU will install it for all others as well!
\r
435 --------------------------------------------------------------------------------
\r
438 #ifdef XT_INTEXC_HOOKS
\r
440 .global _xt_intexc_hooks
\r
441 .type _xt_intexc_hooks,@object
\r
445 .fill XT_INTEXC_HOOK_NUM, 4, 0
\r
450 --------------------------------------------------------------------------------
\r
451 EXCEPTION AND LEVEL 1 INTERRUPT VECTORS AND LOW LEVEL HANDLERS
\r
452 (except window exception vectors).
\r
454 Each vector goes at a predetermined location according to the Xtensa
\r
455 hardware configuration, which is ensured by its placement in a special
\r
456 section known to the Xtensa linker support package (LSP). It performs
\r
457 the minimum necessary before jumping to the handler in the .text section.
\r
459 The corresponding handler goes in the normal .text section. It sets up
\r
460 the appropriate stack frame, saves a few vector-specific registers and
\r
461 calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
\r
462 and enter the RTOS, then sets up a C environment. It then calls the
\r
463 user's interrupt handler code (which may be coded in C) and finally
\r
464 calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
\r
466 While XT_RTOS_INT_EXIT does not return directly to the interruptee,
\r
467 eventually the RTOS scheduler will want to dispatch the interrupted
\r
468 task or handler. The scheduler will return to the exit point that was
\r
469 saved in the interrupt stack frame at XT_STK_EXIT.
\r
470 --------------------------------------------------------------------------------
\r
475 --------------------------------------------------------------------------------
\r
477 --------------------------------------------------------------------------------
\r
480 #if XCHAL_HAVE_DEBUG
\r
482 .begin literal_prefix .DebugExceptionVector
\r
483 .section .DebugExceptionVector.text, "ax"
\r
484 .global _DebugExceptionVector
\r
486 .global xt_debugexception
\r
487 _DebugExceptionVector:
\r
488 wsr a0, EXCSAVE+XCHAL_DEBUGLEVEL /* preserve a0 */
\r
489 call0 xt_debugexception /* load exception handler */
\r
491 .end literal_prefix
\r
496 --------------------------------------------------------------------------------
\r
498 Double exceptions are not a normal occurrence. They indicate a bug of some kind.
\r
499 --------------------------------------------------------------------------------
\r
502 #ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR
\r
504 .begin literal_prefix .DoubleExceptionVector
\r
505 .section .DoubleExceptionVector.text, "ax"
\r
506 .global _DoubleExceptionVector
\r
509 _DoubleExceptionVector:
\r
511 #if XCHAL_HAVE_DEBUG
\r
512 break 1, 4 /* unhandled double exception */
\r
514 movi a0,PANIC_RSN_DOUBLEEXCEPTION
\r
516 call0 _xt_panic /* does not return */
\r
517 rfde /* make a0 point here not later */
\r
519 .end literal_prefix
\r
521 #endif /* XCHAL_DOUBLEEXC_VECTOR_VADDR */
\r
524 --------------------------------------------------------------------------------
\r
525 Kernel Exception (including Level 1 Interrupt from kernel mode).
\r
526 --------------------------------------------------------------------------------
\r
529 .begin literal_prefix .KernelExceptionVector
\r
530 .section .KernelExceptionVector.text, "ax"
\r
531 .global _KernelExceptionVector
\r
534 _KernelExceptionVector:
\r
536 wsr a0, EXCSAVE_1 /* preserve a0 */
\r
537 call0 _xt_kernel_exc /* kernel exception handler */
\r
538 /* never returns here - call0 is used as a jump (see note at top) */
\r
540 .end literal_prefix
\r
542 .section .iram1,"ax"
\r
546 #if XCHAL_HAVE_DEBUG
\r
547 break 1, 0 /* unhandled kernel exception */
\r
549 movi a0,PANIC_RSN_KERNELEXCEPTION
\r
551 call0 _xt_panic /* does not return */
\r
552 rfe /* make a0 point here not there */
\r
556 --------------------------------------------------------------------------------
\r
557 User Exception (including Level 1 Interrupt from user mode).
\r
558 --------------------------------------------------------------------------------
\r
561 .begin literal_prefix .UserExceptionVector
\r
562 .section .UserExceptionVector.text, "ax"
\r
563 .global _UserExceptionVector
\r
564 .type _UserExceptionVector,@function
\r
567 _UserExceptionVector:
\r
569 wsr a0, EXCSAVE_1 /* preserve a0 */
\r
570 call0 _xt_user_exc /* user exception handler */
\r
571 /* never returns here - call0 is used as a jump (see note at top) */
\r
573 .end literal_prefix
\r
576 --------------------------------------------------------------------------------
\r
577 Insert some waypoints for jumping beyond the signed 8-bit range of
\r
578 conditional branch instructions, so the conditional branchces to specific
\r
579 exception handlers are not taken in the mainline. Saves some cycles in the
\r
581 --------------------------------------------------------------------------------
\r
584 #ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
\r
585 .global LoadStoreErrorHandler
\r
586 .global AlignmentErrorHandler
\r
589 .section .iram1,"ax"
\r
591 #if XCHAL_HAVE_WINDOWED
\r
594 call0 _xt_alloca_exc /* in window vectors section */
\r
595 /* never returns here - call0 is used as a jump (see note at top) */
\r
599 _xt_to_syscall_exc:
\r
600 call0 _xt_syscall_exc
\r
601 /* never returns here - call0 is used as a jump (see note at top) */
\r
603 #if XCHAL_CP_NUM > 0
\r
606 call0 _xt_coproc_exc
\r
607 /* never returns here - call0 is used as a jump (see note at top) */
\r
610 #ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
\r
612 _call_loadstore_handler:
\r
613 call0 LoadStoreErrorHandler
\r
614 /* This will return only if wrong opcode or address out of range*/
\r
618 _call_alignment_handler:
\r
619 call0 AlignmentErrorHandler
\r
620 /* This will return only if wrong opcode or address out of range*/
\r
626 --------------------------------------------------------------------------------
\r
627 User exception handler.
\r
628 --------------------------------------------------------------------------------
\r
631 .type _xt_user_exc,@function
\r
636 /* If level 1 interrupt then jump to the dispatcher */
\r
638 beqi a0, EXCCAUSE_LEVEL1INTERRUPT, _xt_lowint1
\r
640 /* Handle any coprocessor exceptions. Rely on the fact that exception
\r
641 numbers above EXCCAUSE_CP0_DISABLED all relate to the coprocessors.
\r
643 #if XCHAL_CP_NUM > 0
\r
644 bgeui a0, EXCCAUSE_CP0_DISABLED, _xt_to_coproc_exc
\r
647 /* Handle alloca and syscall exceptions */
\r
648 #if XCHAL_HAVE_WINDOWED
\r
649 beqi a0, EXCCAUSE_ALLOCA, _xt_to_alloca_exc
\r
651 beqi a0, EXCCAUSE_SYSCALL, _xt_to_syscall_exc
\r
653 #ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
\r
654 beqi a0, EXCCAUSE_LOAD_STORE_ERROR, _call_loadstore_handler
\r
657 beqi a0, 8, _call_alignment_handler
\r
662 /* Handle all other exceptions. All can have user-defined handlers. */
\r
663 /* NOTE: we'll stay on the user stack for exception handling. */
\r
665 /* Allocate exception frame and save minimal context. */
\r
667 addi sp, sp, -XT_STK_FRMSZ
\r
668 s32i a0, sp, XT_STK_A1
\r
669 #if XCHAL_HAVE_WINDOWED
\r
670 s32e a0, sp, -12 /* for debug backtrace */
\r
672 rsr a0, PS /* save interruptee's PS */
\r
673 s32i a0, sp, XT_STK_PS
\r
674 rsr a0, EPC_1 /* save interruptee's PC */
\r
675 s32i a0, sp, XT_STK_PC
\r
676 #if XCHAL_HAVE_WINDOWED
\r
677 s32e a0, sp, -16 /* for debug backtrace */
\r
679 s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */
\r
680 s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */
\r
681 call0 _xt_context_save
\r
683 /* Save exc cause and vaddr into exception frame */
\r
685 s32i a0, sp, XT_STK_EXCCAUSE
\r
687 s32i a0, sp, XT_STK_EXCVADDR
\r
689 /* _xt_context_save seems to save the current a0, but we need the interuptees a0. Fix this. */
\r
690 rsr a0, EXCSAVE_1 /* save interruptee's a0 */
\r
691 s32i a0, sp, XT_STK_A0
\r
693 /* Set up PS for C, reenable hi-pri interrupts, and clear EXCM. */
\r
694 #ifdef __XTENSA_CALL0_ABI__
\r
695 movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM
\r
697 movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE
\r
701 #ifdef XT_DEBUG_BACKTRACE
\r
702 #ifndef __XTENSA_CALL0_ABI__
\r
703 rsr a0, EPC_1 /* return address for debug backtrace */
\r
704 movi a5, 0xC0000000 /* constant with top 2 bits set (call size) */
\r
705 rsync /* wait for WSR.PS to complete */
\r
706 or a0, a0, a5 /* set top 2 bits */
\r
707 addx2 a0, a5, a0 /* clear top bit -- thus simulating call4 size */
\r
709 rsync /* wait for WSR.PS to complete */
\r
713 rsr a2, EXCCAUSE /* recover exc cause */
\r
715 #ifdef XT_INTEXC_HOOKS
\r
717 Call exception hook to pre-handle exceptions (if installed).
\r
718 Pass EXCCAUSE in a2, and check result in a2 (if -1, skip default handling).
\r
720 movi a4, _xt_intexc_hooks
\r
721 l32i a4, a4, 0 /* user exception hook index 0 */
\r
723 .Ln_xt_user_exc_call_hook:
\r
724 #ifdef __XTENSA_CALL0_ABI__
\r
726 beqi a2, -1, .L_xt_user_done
\r
730 beqi a6, -1, .L_xt_user_done
\r
736 rsr a2, EXCCAUSE /* recover exc cause */
\r
737 movi a3, _xt_exception_table
\r
738 get_percpu_entry_for a2, a4
\r
739 addx4 a4, a2, a3 /* a4 = address of exception table entry */
\r
740 l32i a4, a4, 0 /* a4 = handler address */
\r
741 #ifdef __XTENSA_CALL0_ABI__
\r
742 mov a2, sp /* a2 = pointer to exc frame */
\r
743 callx0 a4 /* call handler */
\r
745 mov a6, sp /* a6 = pointer to exc frame */
\r
746 callx4 a4 /* call handler */
\r
751 /* Restore context and return */
\r
752 call0 _xt_context_restore
\r
753 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
\r
755 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
\r
757 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
\r
758 l32i sp, sp, XT_STK_A1 /* remove exception frame */
\r
759 rsync /* ensure PS and EPC written */
\r
760 rfe /* PS.EXCM is cleared */
\r
764 --------------------------------------------------------------------------------
\r
765 Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
\r
766 on entry and used to return to a thread or interrupted interrupt handler.
\r
767 --------------------------------------------------------------------------------
\r
770 .global _xt_user_exit
\r
771 .type _xt_user_exit,@function
\r
774 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
\r
776 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
\r
778 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
\r
779 l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
\r
780 rsync /* ensure PS and EPC written */
\r
781 rfe /* PS.EXCM is cleared */
\r
786 --------------------------------------------------------------------------------
\r
787 Syscall Exception Handler (jumped to from User Exception Handler).
\r
788 Syscall 0 is required to spill the register windows (no-op in Call 0 ABI).
\r
789 Only syscall 0 is handled here. Other syscalls return -1 to caller in a2.
\r
790 --------------------------------------------------------------------------------
\r
793 .section .iram1,"ax"
\r
794 .type _xt_syscall_exc,@function
\r
798 #ifdef __XTENSA_CALL0_ABI__
\r
800 Save minimal regs for scratch. Syscall 0 does nothing in Call0 ABI.
\r
801 Use a minimal stack frame (16B) to save A2 & A3 for scratch.
\r
802 PS.EXCM could be cleared here, but unlikely to improve worst-case latency.
\r
804 addi a0, a0, -PS_EXCM_MASK
\r
810 #else /* Windowed ABI */
\r
812 Save necessary context and spill the register windows.
\r
813 PS.EXCM is still set and must remain set until after the spill.
\r
814 Reuse context save function though it saves more than necessary.
\r
815 For this reason, a full interrupt stack frame is allocated.
\r
817 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
\r
818 s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */
\r
819 s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */
\r
820 call0 _xt_context_save
\r
824 Grab the interruptee's PC and skip over the 'syscall' instruction.
\r
825 If it's at the end of a zero-overhead loop and it's not on the last
\r
826 iteration, decrement loop counter and skip to beginning of loop.
\r
828 rsr a2, EPC_1 /* a2 = PC of 'syscall' */
\r
829 addi a3, a2, 3 /* ++PC */
\r
830 #if XCHAL_HAVE_LOOPS
\r
831 rsr a0, LEND /* if (PC == LEND */
\r
833 rsr a0, LCOUNT /* && LCOUNT != 0) */
\r
834 beqz a0, 1f /* { */
\r
835 addi a0, a0, -1 /* --LCOUNT */
\r
836 rsr a3, LBEG /* PC = LBEG */
\r
837 wsr a0, LCOUNT /* } */
\r
839 1: wsr a3, EPC_1 /* update PC */
\r
841 /* Restore interruptee's context and return from exception. */
\r
842 #ifdef __XTENSA_CALL0_ABI__
\r
847 call0 _xt_context_restore
\r
848 addi sp, sp, XT_STK_FRMSZ
\r
851 movnez a2, a0, a2 /* return -1 if not syscall 0 */
\r
856 --------------------------------------------------------------------------------
\r
857 Co-Processor Exception Handler (jumped to from User Exception Handler).
\r
858 These exceptions are generated by co-processor instructions, which are only
\r
859 allowed in thread code (not in interrupts or kernel code). This restriction is
\r
860 deliberately imposed to reduce the burden of state-save/restore in interrupts.
\r
861 --------------------------------------------------------------------------------
\r
863 #if XCHAL_CP_NUM > 0
\r
865 .section .rodata, "a"
\r
867 /* Offset to CP n save area in thread's CP save area. */
\r
868 .global _xt_coproc_sa_offset
\r
869 .type _xt_coproc_sa_offset,@object
\r
870 .align 16 /* minimize crossing cache boundaries */
\r
871 _xt_coproc_sa_offset:
\r
872 .word XT_CP0_SA, XT_CP1_SA, XT_CP2_SA, XT_CP3_SA
\r
873 .word XT_CP4_SA, XT_CP5_SA, XT_CP6_SA, XT_CP7_SA
\r
875 /* Bitmask for CP n's CPENABLE bit. */
\r
876 .type _xt_coproc_mask,@object
\r
877 .align 16,,8 /* try to keep it all in one cache line */
\r
881 .long (i<<16) | (1<<i) // upper 16-bits = i, lower = bitmask
\r
887 /* Owner thread of CP n, identified by thread's CP save area (0 = unowned). */
\r
888 .global _xt_coproc_owner_sa
\r
889 .type _xt_coproc_owner_sa,@object
\r
890 .align 16,,XCHAL_CP_MAX<<2 /* minimize crossing cache boundaries */
\r
891 _xt_coproc_owner_sa:
\r
892 .space (XCHAL_CP_MAX * portNUM_PROCESSORS) << 2
\r
894 .section .iram1,"ax"
\r
899 j .L_xt_coproc_invalid /* not in a thread (invalid) */
\r
902 j .L_xt_coproc_done
\r
906 --------------------------------------------------------------------------------
\r
907 Coprocessor exception handler.
\r
908 At entry, only a0 has been saved (in EXCSAVE_1).
\r
909 --------------------------------------------------------------------------------
\r
912 .type _xt_coproc_exc,@function
\r
917 /* Allocate interrupt stack frame and save minimal context. */
\r
918 mov a0, sp /* sp == a1 */
\r
919 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
\r
920 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
\r
921 #if XCHAL_HAVE_WINDOWED
\r
922 s32e a0, sp, -12 /* for debug backtrace */
\r
924 rsr a0, PS /* save interruptee's PS */
\r
925 s32i a0, sp, XT_STK_PS
\r
926 rsr a0, EPC_1 /* save interruptee's PC */
\r
927 s32i a0, sp, XT_STK_PC
\r
928 rsr a0, EXCSAVE_1 /* save interruptee's a0 */
\r
929 s32i a0, sp, XT_STK_A0
\r
930 #if XCHAL_HAVE_WINDOWED
\r
931 s32e a0, sp, -16 /* for debug backtrace */
\r
933 movi a0, _xt_user_exit /* save exit point for dispatch */
\r
934 s32i a0, sp, XT_STK_EXIT
\r
937 s32i a5, sp, XT_STK_A5 /* save a5 */
\r
938 addi a5, a0, -EXCCAUSE_CP0_DISABLED /* a5 = CP index */
\r
940 /* Save a few more of interruptee's registers (a5 was already saved). */
\r
941 s32i a2, sp, XT_STK_A2
\r
942 s32i a3, sp, XT_STK_A3
\r
943 s32i a4, sp, XT_STK_A4
\r
944 s32i a15, sp, XT_STK_A15
\r
946 /* Get co-processor state save area of new owner thread. */
\r
947 call0 XT_RTOS_CP_STATE /* a15 = new owner's save area */
\r
948 beqz a15, .L_goto_invalid /* not in a thread (invalid) */
\r
950 /* Enable the co-processor's bit in CPENABLE. */
\r
951 movi a0, _xt_coproc_mask
\r
952 rsr a4, CPENABLE /* a4 = CPENABLE */
\r
953 addx4 a0, a5, a0 /* a0 = &_xt_coproc_mask[n] */
\r
954 l32i a0, a0, 0 /* a0 = (n << 16) | (1 << n) */
\r
956 /* FPU operations are incompatible with non-pinned tasks. If we have a FPU operation
\r
957 here, to keep the entire thing from crashing, it's better to pin the task to whatever
\r
958 core we're running on now. */
\r
959 movi a2, pxCurrentTCB
\r
962 l32i a2, a2, 0 /* a2 = start of pxCurrentTCB[cpuid] */
\r
963 addi a2, a2, TASKTCB_XCOREID_OFFSET /* offset to xCoreID in tcb struct */
\r
964 s32i a3, a2, 0 /* store current cpuid */
\r
966 /* Grab correct xt_coproc_owner_sa for this core */
\r
967 movi a2, XCHAL_CP_MAX << 2
\r
968 mull a2, a2, a3 /* multiply by current processor id */
\r
969 movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
\r
970 add a3, a3, a2 /* a3 = owner area needed for this processor */
\r
972 extui a2, a0, 0, 16 /* coprocessor bitmask portion */
\r
973 or a4, a4, a2 /* a4 = CPENABLE | (1 << n) */
\r
977 Keep loading _xt_coproc_owner_sa[n] atomic (=load once, then use that value
\r
978 everywhere): _xt_coproc_release assumes it works like this in order not to need
\r
983 /* Get old coprocessor owner thread (save area ptr) and assign new one. */
\r
984 addx4 a3, a5, a3 /* a3 = &_xt_coproc_owner_sa[n] */
\r
985 l32i a2, a3, 0 /* a2 = old owner's save area */
\r
986 s32i a15, a3, 0 /* _xt_coproc_owner_sa[n] = new */
\r
987 rsync /* ensure wsr.CPENABLE is complete */
\r
989 /* Only need to context switch if new owner != old owner. */
\r
990 beq a15, a2, .L_goto_done /* new owner == old, we're done */
\r
992 /* If no old owner then nothing to save. */
\r
993 beqz a2, .L_check_new
\r
995 /* If old owner not actively using CP then nothing to save. */
\r
996 l16ui a4, a2, XT_CPENABLE /* a4 = old owner's CPENABLE */
\r
997 bnone a4, a0, .L_check_new /* old owner not using CP */
\r
1000 /* Save old owner's coprocessor state. */
\r
1002 movi a5, _xt_coproc_sa_offset
\r
1004 /* Mark old owner state as no longer active (CPENABLE bit n clear). */
\r
1005 xor a4, a4, a0 /* clear CP bit in CPENABLE */
\r
1006 s16i a4, a2, XT_CPENABLE /* update old owner's CPENABLE */
\r
1008 extui a4, a0, 16, 5 /* a4 = CP index = n */
\r
1009 addx4 a5, a4, a5 /* a5 = &_xt_coproc_sa_offset[n] */
\r
1011 /* Mark old owner state as saved (CPSTORED bit n set). */
\r
1012 l16ui a4, a2, XT_CPSTORED /* a4 = old owner's CPSTORED */
\r
1013 l32i a5, a5, 0 /* a5 = XT_CP[n]_SA offset */
\r
1014 or a4, a4, a0 /* set CP in old owner's CPSTORED */
\r
1015 s16i a4, a2, XT_CPSTORED /* update old owner's CPSTORED */
\r
1016 l32i a2, a2, XT_CP_ASA /* ptr to actual (aligned) save area */
\r
1017 extui a3, a0, 16, 5 /* a3 = CP index = n */
\r
1018 add a2, a2, a5 /* a2 = old owner's area for CP n */
\r
1021 The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
\r
1022 It is theoretically possible for Xtensa processor designers to write TIE
\r
1023 that causes more address registers to be affected, but it is generally
\r
1024 unlikely. If that ever happens, more registers needs to be saved/restored
\r
1025 around this macro invocation, and the value in a15 needs to be recomputed.
\r
1027 xchal_cpi_store_funcbody
\r
1030 /* Check if any state has to be restored for new owner. */
\r
1031 /* NOTE: a15 = new owner's save area, cannot be zero when we get here. */
\r
1033 l16ui a3, a15, XT_CPSTORED /* a3 = new owner's CPSTORED */
\r
1034 movi a4, _xt_coproc_sa_offset
\r
1035 bnone a3, a0, .L_check_cs /* full CP not saved, check callee-saved */
\r
1036 xor a3, a3, a0 /* CPSTORED bit is set, clear it */
\r
1037 s16i a3, a15, XT_CPSTORED /* update new owner's CPSTORED */
\r
1039 /* Adjust new owner's save area pointers to area for CP n. */
\r
1040 extui a3, a0, 16, 5 /* a3 = CP index = n */
\r
1041 addx4 a4, a3, a4 /* a4 = &_xt_coproc_sa_offset[n] */
\r
1042 l32i a4, a4, 0 /* a4 = XT_CP[n]_SA */
\r
1043 l32i a5, a15, XT_CP_ASA /* ptr to actual (aligned) save area */
\r
1044 add a2, a4, a5 /* a2 = new owner's area for CP */
\r
1047 The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.
\r
1048 It is theoretically possible for Xtensa processor designers to write TIE
\r
1049 that causes more address registers to be affected, but it is generally
\r
1050 unlikely. If that ever happens, more registers needs to be saved/restored
\r
1051 around this macro invocation.
\r
1053 xchal_cpi_load_funcbody
\r
1055 /* Restore interruptee's saved registers. */
\r
1056 /* Can omit rsync for wsr.CPENABLE here because _xt_user_exit does it. */
\r
1057 .L_xt_coproc_done:
\r
1058 l32i a15, sp, XT_STK_A15
\r
1059 l32i a5, sp, XT_STK_A5
\r
1060 l32i a4, sp, XT_STK_A4
\r
1061 l32i a3, sp, XT_STK_A3
\r
1062 l32i a2, sp, XT_STK_A2
\r
1063 call0 _xt_user_exit /* return via exit dispatcher */
\r
1064 /* Never returns here - call0 is used as a jump (see note at top) */
\r
1067 /* a0 = CP mask in low bits, a15 = new owner's save area */
\r
1068 l16ui a2, a15, XT_CP_CS_ST /* a2 = mask of CPs saved */
\r
1069 bnone a2, a0, .L_xt_coproc_done /* if no match then done */
\r
1070 and a2, a2, a0 /* a2 = which CPs to restore */
\r
1071 extui a2, a2, 0, 8 /* extract low 8 bits */
\r
1072 s32i a6, sp, XT_STK_A6 /* save extra needed regs */
\r
1073 s32i a7, sp, XT_STK_A7
\r
1074 s32i a13, sp, XT_STK_A13
\r
1075 s32i a14, sp, XT_STK_A14
\r
1076 call0 _xt_coproc_restorecs /* restore CP registers */
\r
1077 l32i a6, sp, XT_STK_A6 /* restore saved registers */
\r
1078 l32i a7, sp, XT_STK_A7
\r
1079 l32i a13, sp, XT_STK_A13
\r
1080 l32i a14, sp, XT_STK_A14
\r
1081 j .L_xt_coproc_done
\r
1083 /* Co-processor exception occurred outside a thread (not supported). */
\r
1084 .L_xt_coproc_invalid:
\r
1085 movi a0,PANIC_RSN_COPROCEXCEPTION
\r
1087 call0 _xt_panic /* not in a thread (invalid) */
\r
1088 /* never returns */
\r
1091 #endif /* XCHAL_CP_NUM */
\r
1095 -------------------------------------------------------------------------------
\r
1096 Level 1 interrupt dispatch. Assumes stack frame has not been allocated yet.
\r
1097 -------------------------------------------------------------------------------
\r
1100 .section .iram1,"ax"
\r
1101 .type _xt_lowint1,@function
\r
1105 mov a0, sp /* sp == a1 */
\r
1106 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
\r
1107 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
\r
1108 rsr a0, PS /* save interruptee's PS */
\r
1109 s32i a0, sp, XT_STK_PS
\r
1110 rsr a0, EPC_1 /* save interruptee's PC */
\r
1111 s32i a0, sp, XT_STK_PC
\r
1112 rsr a0, EXCSAVE_1 /* save interruptee's a0 */
\r
1113 s32i a0, sp, XT_STK_A0
\r
1114 movi a0, _xt_user_exit /* save exit point for dispatch */
\r
1115 s32i a0, sp, XT_STK_EXIT
\r
1117 /* Save rest of interrupt context and enter RTOS. */
\r
1118 call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
\r
1120 /* !! We are now on the RTOS system stack !! */
\r
1122 /* Set up PS for C, enable interrupts above this level and clear EXCM. */
\r
1123 #ifdef __XTENSA_CALL0_ABI__
\r
1124 movi a0, PS_INTLEVEL(1) | PS_UM
\r
1126 movi a0, PS_INTLEVEL(1) | PS_UM | PS_WOE
\r
1131 /* OK to call C code at this point, dispatch user ISRs */
\r
1133 dispatch_c_isr 1 XCHAL_INTLEVEL1_MASK
\r
1135 /* Done handling interrupts, transfer control to OS */
\r
1136 call0 XT_RTOS_INT_EXIT /* does not return directly here */
\r
1140 -------------------------------------------------------------------------------
\r
1141 MEDIUM PRIORITY (LEVEL 2+) INTERRUPT VECTORS AND LOW LEVEL HANDLERS.
\r
1143 Medium priority interrupts are by definition those with priority greater
\r
1144 than 1 and not greater than XCHAL_EXCM_LEVEL. These are disabled by
\r
1145 setting PS.EXCM and therefore can easily support a C environment for
\r
1146 handlers in C, and interact safely with an RTOS.
\r
1148 Each vector goes at a predetermined location according to the Xtensa
\r
1149 hardware configuration, which is ensured by its placement in a special
\r
1150 section known to the Xtensa linker support package (LSP). It performs
\r
1151 the minimum necessary before jumping to the handler in the .text section.
\r
1153 The corresponding handler goes in the normal .text section. It sets up
\r
1154 the appropriate stack frame, saves a few vector-specific registers and
\r
1155 calls XT_RTOS_INT_ENTER to save the rest of the interrupted context
\r
1156 and enter the RTOS, then sets up a C environment. It then calls the
\r
1157 user's interrupt handler code (which may be coded in C) and finally
\r
1158 calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.
\r
1160 While XT_RTOS_INT_EXIT does not return directly to the interruptee,
\r
1161 eventually the RTOS scheduler will want to dispatch the interrupted
\r
1162 task or handler. The scheduler will return to the exit point that was
\r
1163 saved in the interrupt stack frame at XT_STK_EXIT.
\r
1164 -------------------------------------------------------------------------------
\r
1167 #if XCHAL_EXCM_LEVEL >= 2
\r
1169 .begin literal_prefix .Level2InterruptVector
\r
1170 .section .Level2InterruptVector.text, "ax"
\r
1171 .global _Level2Vector
\r
1172 .type _Level2Vector,@function
\r
1175 wsr a0, EXCSAVE_2 /* preserve a0 */
\r
1176 call0 _xt_medint2 /* load interrupt handler */
\r
1177 /* never returns here - call0 is used as a jump (see note at top) */
\r
1179 .end literal_prefix
\r
1181 .section .iram1,"ax"
\r
1182 .type _xt_medint2,@function
\r
1185 mov a0, sp /* sp == a1 */
\r
1186 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
\r
1187 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
\r
1188 rsr a0, EPS_2 /* save interruptee's PS */
\r
1189 s32i a0, sp, XT_STK_PS
\r
1190 rsr a0, EPC_2 /* save interruptee's PC */
\r
1191 s32i a0, sp, XT_STK_PC
\r
1192 rsr a0, EXCSAVE_2 /* save interruptee's a0 */
\r
1193 s32i a0, sp, XT_STK_A0
\r
1194 movi a0, _xt_medint2_exit /* save exit point for dispatch */
\r
1195 s32i a0, sp, XT_STK_EXIT
\r
1197 /* Save rest of interrupt context and enter RTOS. */
\r
1198 call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
\r
1200 /* !! We are now on the RTOS system stack !! */
\r
1202 /* Set up PS for C, enable interrupts above this level and clear EXCM. */
\r
1203 #ifdef __XTENSA_CALL0_ABI__
\r
1204 movi a0, PS_INTLEVEL(2) | PS_UM
\r
1206 movi a0, PS_INTLEVEL(2) | PS_UM | PS_WOE
\r
1211 /* OK to call C code at this point, dispatch user ISRs */
\r
1213 dispatch_c_isr 2 XCHAL_INTLEVEL2_MASK
\r
1215 /* Done handling interrupts, transfer control to OS */
\r
1216 call0 XT_RTOS_INT_EXIT /* does not return directly here */
\r
1219 Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
\r
1220 on entry and used to return to a thread or interrupted interrupt handler.
\r
1222 .global _xt_medint2_exit
\r
1223 .type _xt_medint2_exit,@function
\r
1226 /* Restore only level-specific regs (the rest were already restored) */
\r
1227 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
\r
1229 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
\r
1231 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
\r
1232 l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
\r
1233 rsync /* ensure EPS and EPC written */
\r
1236 #endif /* Level 2 */
\r
1238 #if XCHAL_EXCM_LEVEL >= 3
\r
1240 .begin literal_prefix .Level3InterruptVector
\r
1241 .section .Level3InterruptVector.text, "ax"
\r
1242 .global _Level3Vector
\r
1243 .type _Level3Vector,@function
\r
1246 wsr a0, EXCSAVE_3 /* preserve a0 */
\r
1247 call0 _xt_medint3 /* load interrupt handler */
\r
1248 /* never returns here - call0 is used as a jump (see note at top) */
\r
1250 .end literal_prefix
\r
1252 .section .iram1,"ax"
\r
1253 .type _xt_medint3,@function
\r
1256 mov a0, sp /* sp == a1 */
\r
1257 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
\r
1258 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
\r
1259 rsr a0, EPS_3 /* save interruptee's PS */
\r
1260 s32i a0, sp, XT_STK_PS
\r
1261 rsr a0, EPC_3 /* save interruptee's PC */
\r
1262 s32i a0, sp, XT_STK_PC
\r
1263 rsr a0, EXCSAVE_3 /* save interruptee's a0 */
\r
1264 s32i a0, sp, XT_STK_A0
\r
1265 movi a0, _xt_medint3_exit /* save exit point for dispatch */
\r
1266 s32i a0, sp, XT_STK_EXIT
\r
1268 /* Save rest of interrupt context and enter RTOS. */
\r
1269 call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
\r
1271 /* !! We are now on the RTOS system stack !! */
\r
1273 /* Set up PS for C, enable interrupts above this level and clear EXCM. */
\r
1274 #ifdef __XTENSA_CALL0_ABI__
\r
1275 movi a0, PS_INTLEVEL(3) | PS_UM
\r
1277 movi a0, PS_INTLEVEL(3) | PS_UM | PS_WOE
\r
1282 /* OK to call C code at this point, dispatch user ISRs */
\r
1284 dispatch_c_isr 3 XCHAL_INTLEVEL3_MASK
\r
1286 /* Done handling interrupts, transfer control to OS */
\r
1287 call0 XT_RTOS_INT_EXIT /* does not return directly here */
\r
1290 Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
\r
1291 on entry and used to return to a thread or interrupted interrupt handler.
\r
1293 .global _xt_medint3_exit
\r
1294 .type _xt_medint3_exit,@function
\r
1297 /* Restore only level-specific regs (the rest were already restored) */
\r
1298 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
\r
1300 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
\r
1302 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
\r
1303 l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
\r
1304 rsync /* ensure EPS and EPC written */
\r
1307 #endif /* Level 3 */
\r
1309 #if XCHAL_EXCM_LEVEL >= 4
\r
1311 .begin literal_prefix .Level4InterruptVector
\r
1312 .section .Level4InterruptVector.text, "ax"
\r
1313 .global _Level4Vector
\r
1314 .type _Level4Vector,@function
\r
1317 wsr a0, EXCSAVE_4 /* preserve a0 */
\r
1318 call0 _xt_medint4 /* load interrupt handler */
\r
1320 .end literal_prefix
\r
1322 .section .iram1,"ax"
\r
1323 .type _xt_medint4,@function
\r
1326 mov a0, sp /* sp == a1 */
\r
1327 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
\r
1328 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
\r
1329 rsr a0, EPS_4 /* save interruptee's PS */
\r
1330 s32i a0, sp, XT_STK_PS
\r
1331 rsr a0, EPC_4 /* save interruptee's PC */
\r
1332 s32i a0, sp, XT_STK_PC
\r
1333 rsr a0, EXCSAVE_4 /* save interruptee's a0 */
\r
1334 s32i a0, sp, XT_STK_A0
\r
1335 movi a0, _xt_medint4_exit /* save exit point for dispatch */
\r
1336 s32i a0, sp, XT_STK_EXIT
\r
1338 /* Save rest of interrupt context and enter RTOS. */
\r
1339 call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
\r
1341 /* !! We are now on the RTOS system stack !! */
\r
1343 /* Set up PS for C, enable interrupts above this level and clear EXCM. */
\r
1344 #ifdef __XTENSA_CALL0_ABI__
\r
1345 movi a0, PS_INTLEVEL(4) | PS_UM
\r
1347 movi a0, PS_INTLEVEL(4) | PS_UM | PS_WOE
\r
1352 /* OK to call C code at this point, dispatch user ISRs */
\r
1354 dispatch_c_isr 4 XCHAL_INTLEVEL4_MASK
\r
1356 /* Done handling interrupts, transfer control to OS */
\r
1357 call0 XT_RTOS_INT_EXIT /* does not return directly here */
\r
1360 Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
\r
1361 on entry and used to return to a thread or interrupted interrupt handler.
\r
1363 .global _xt_medint4_exit
\r
1364 .type _xt_medint4_exit,@function
\r
1367 /* Restore only level-specific regs (the rest were already restored) */
\r
1368 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
\r
1370 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
\r
1372 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
\r
1373 l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
\r
1374 rsync /* ensure EPS and EPC written */
\r
1377 #endif /* Level 4 */
\r
1379 #if XCHAL_EXCM_LEVEL >= 5
\r
1381 .begin literal_prefix .Level5InterruptVector
\r
1382 .section .Level5InterruptVector.text, "ax"
\r
1383 .global _Level5Vector
\r
1384 .type _Level5Vector,@function
\r
1387 wsr a0, EXCSAVE_5 /* preserve a0 */
\r
1388 call0 _xt_medint5 /* load interrupt handler */
\r
1390 .end literal_prefix
\r
1392 .section .iram1,"ax"
\r
1393 .type _xt_medint5,@function
\r
1396 mov a0, sp /* sp == a1 */
\r
1397 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
\r
1398 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
\r
1399 rsr a0, EPS_5 /* save interruptee's PS */
\r
1400 s32i a0, sp, XT_STK_PS
\r
1401 rsr a0, EPC_5 /* save interruptee's PC */
\r
1402 s32i a0, sp, XT_STK_PC
\r
1403 rsr a0, EXCSAVE_5 /* save interruptee's a0 */
\r
1404 s32i a0, sp, XT_STK_A0
\r
1405 movi a0, _xt_medint5_exit /* save exit point for dispatch */
\r
1406 s32i a0, sp, XT_STK_EXIT
\r
1408 /* Save rest of interrupt context and enter RTOS. */
\r
1409 call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
\r
1411 /* !! We are now on the RTOS system stack !! */
\r
1413 /* Set up PS for C, enable interrupts above this level and clear EXCM. */
\r
1414 #ifdef __XTENSA_CALL0_ABI__
\r
1415 movi a0, PS_INTLEVEL(5) | PS_UM
\r
1417 movi a0, PS_INTLEVEL(5) | PS_UM | PS_WOE
\r
1422 /* OK to call C code at this point, dispatch user ISRs */
\r
1424 dispatch_c_isr 5 XCHAL_INTLEVEL5_MASK
\r
1426 /* Done handling interrupts, transfer control to OS */
\r
1427 call0 XT_RTOS_INT_EXIT /* does not return directly here */
\r
1430 Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
\r
1431 on entry and used to return to a thread or interrupted interrupt handler.
\r
1433 .global _xt_medint5_exit
\r
1434 .type _xt_medint5_exit,@function
\r
1437 /* Restore only level-specific regs (the rest were already restored) */
\r
1438 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
\r
1440 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
\r
1442 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
\r
1443 l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
\r
1444 rsync /* ensure EPS and EPC written */
\r
1447 #endif /* Level 5 */
\r
1449 #if XCHAL_EXCM_LEVEL >= 6
\r
1451 .begin literal_prefix .Level6InterruptVector
\r
1452 .section .Level6InterruptVector.text, "ax"
\r
1453 .global _Level6Vector
\r
1454 .type _Level6Vector,@function
\r
1457 wsr a0, EXCSAVE_6 /* preserve a0 */
\r
1458 call0 _xt_medint6 /* load interrupt handler */
\r
1460 .end literal_prefix
\r
1462 .section .iram1,"ax"
\r
1463 .type _xt_medint6,@function
\r
1466 mov a0, sp /* sp == a1 */
\r
1467 addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */
\r
1468 s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */
\r
1469 rsr a0, EPS_6 /* save interruptee's PS */
\r
1470 s32i a0, sp, XT_STK_PS
\r
1471 rsr a0, EPC_6 /* save interruptee's PC */
\r
1472 s32i a0, sp, XT_STK_PC
\r
1473 rsr a0, EXCSAVE_6 /* save interruptee's a0 */
\r
1474 s32i a0, sp, XT_STK_A0
\r
1475 movi a0, _xt_medint6_exit /* save exit point for dispatch */
\r
1476 s32i a0, sp, XT_STK_EXIT
\r
1478 /* Save rest of interrupt context and enter RTOS. */
\r
1479 call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */
\r
1481 /* !! We are now on the RTOS system stack !! */
\r
1483 /* Set up PS for C, enable interrupts above this level and clear EXCM. */
\r
1484 #ifdef __XTENSA_CALL0_ABI__
\r
1485 movi a0, PS_INTLEVEL(6) | PS_UM
\r
1487 movi a0, PS_INTLEVEL(6) | PS_UM | PS_WOE
\r
1492 /* OK to call C code at this point, dispatch user ISRs */
\r
1494 dispatch_c_isr 6 XCHAL_INTLEVEL6_MASK
\r
1496 /* Done handling interrupts, transfer control to OS */
\r
1497 call0 XT_RTOS_INT_EXIT /* does not return directly here */
\r
1500 Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT
\r
1501 on entry and used to return to a thread or interrupted interrupt handler.
\r
1503 .global _xt_medint6_exit
\r
1504 .type _xt_medint6_exit,@function
\r
1507 /* Restore only level-specific regs (the rest were already restored) */
\r
1508 l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */
\r
1510 l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */
\r
1512 l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */
\r
1513 l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */
\r
1514 rsync /* ensure EPS and EPC written */
\r
1517 #endif /* Level 6 */
\r
1520 /*******************************************************************************
\r
1522 HIGH PRIORITY (LEVEL > XCHAL_EXCM_LEVEL) INTERRUPT VECTORS AND HANDLERS
\r
1524 High priority interrupts are by definition those with priorities greater
\r
1525 than XCHAL_EXCM_LEVEL. This includes non-maskable (NMI). High priority
\r
1526 interrupts cannot interact with the RTOS, that is they must save all regs
\r
1527 they use and not call any RTOS function.
\r
1529 A further restriction imposed by the Xtensa windowed architecture is that
\r
1530 high priority interrupts must not modify the stack area even logically
\r
1531 "above" the top of the interrupted stack (they need to provide their
\r
1532 own stack or static save area).
\r
1534 Cadence Design Systems recommends high priority interrupt handlers be coded in assembly
\r
1535 and used for purposes requiring very short service times.
\r
1537 Here are templates for high priority (level 2+) interrupt vectors.
\r
1538 They assume only one interrupt per level to avoid the burden of identifying
\r
1539 which interrupts at this level are pending and enabled. This allows for
\r
1540 minimum latency and avoids having to save/restore a2 in addition to a0.
\r
1541 If more than one interrupt per high priority level is configured, this burden
\r
1542 is on the handler which in any case must provide a way to save and restore
\r
1543 registers it uses without touching the interrupted stack.
\r
1545 Each vector goes at a predetermined location according to the Xtensa
\r
1546 hardware configuration, which is ensured by its placement in a special
\r
1547 section known to the Xtensa linker support package (LSP). It performs
\r
1548 the minimum necessary before jumping to the handler in the .text section.
\r
1550 *******************************************************************************/
\r
1553 These stubs just call xt_highintX/xt_nmi to handle the real interrupt. Please define
\r
1554 these in an external assembly source file. If these symbols are not defined anywhere
\r
1555 else, the defaults in xtensa_vector_defaults.S are used.
\r
1558 #if XCHAL_NUM_INTLEVELS >=2 && XCHAL_EXCM_LEVEL <2 && XCHAL_DEBUGLEVEL !=2
\r
1560 .begin literal_prefix .Level2InterruptVector
\r
1561 .section .Level2InterruptVector.text, "ax"
\r
1562 .global _Level2Vector
\r
1563 .type _Level2Vector,@function
\r
1564 .global xt_highint2
\r
1567 wsr a0, EXCSAVE_2 /* preserve a0 */
\r
1568 call0 xt_highint2 /* load interrupt handler */
\r
1570 .end literal_prefix
\r
1572 #endif /* Level 2 */
\r
1574 #if XCHAL_NUM_INTLEVELS >=3 && XCHAL_EXCM_LEVEL <3 && XCHAL_DEBUGLEVEL !=3
\r
1576 .begin literal_prefix .Level3InterruptVector
\r
1577 .section .Level3InterruptVector.text, "ax"
\r
1578 .global _Level3Vector
\r
1579 .type _Level3Vector,@function
\r
1580 .global xt_highint3
\r
1583 wsr a0, EXCSAVE_3 /* preserve a0 */
\r
1584 call0 xt_highint3 /* load interrupt handler */
\r
1585 /* never returns here - call0 is used as a jump (see note at top) */
\r
1587 .end literal_prefix
\r
1589 #endif /* Level 3 */
\r
1591 #if XCHAL_NUM_INTLEVELS >=4 && XCHAL_EXCM_LEVEL <4 && XCHAL_DEBUGLEVEL !=4
\r
1593 .begin literal_prefix .Level4InterruptVector
\r
1594 .section .Level4InterruptVector.text, "ax"
\r
1595 .global _Level4Vector
\r
1596 .type _Level4Vector,@function
\r
1597 .global xt_highint4
\r
1600 wsr a0, EXCSAVE_4 /* preserve a0 */
\r
1601 call0 xt_highint4 /* load interrupt handler */
\r
1602 /* never returns here - call0 is used as a jump (see note at top) */
\r
1604 .end literal_prefix
\r
1606 #endif /* Level 4 */
\r
1608 #if XCHAL_NUM_INTLEVELS >=5 && XCHAL_EXCM_LEVEL <5 && XCHAL_DEBUGLEVEL !=5
\r
1610 .begin literal_prefix .Level5InterruptVector
\r
1611 .section .Level5InterruptVector.text, "ax"
\r
1612 .global _Level5Vector
\r
1613 .type _Level5Vector,@function
\r
1614 .global xt_highint5
\r
1617 wsr a0, EXCSAVE_5 /* preserve a0 */
\r
1618 call0 xt_highint5 /* load interrupt handler */
\r
1619 /* never returns here - call0 is used as a jump (see note at top) */
\r
1621 .end literal_prefix
\r
1623 #endif /* Level 5 */
\r
1625 #if XCHAL_NUM_INTLEVELS >=6 && XCHAL_EXCM_LEVEL <6 && XCHAL_DEBUGLEVEL !=6
\r
1627 .begin literal_prefix .Level6InterruptVector
\r
1628 .section .Level6InterruptVector.text, "ax"
\r
1629 .global _Level6Vector
\r
1630 .type _Level6Vector,@function
\r
1631 .global xt_highint6
\r
1634 wsr a0, EXCSAVE_6 /* preserve a0 */
\r
1635 call0 xt_highint6 /* load interrupt handler */
\r
1636 /* never returns here - call0 is used as a jump (see note at top) */
\r
1638 .end literal_prefix
\r
1640 #endif /* Level 6 */
\r
1642 #if XCHAL_HAVE_NMI
\r
1644 .begin literal_prefix .NMIExceptionVector
\r
1645 .section .NMIExceptionVector.text, "ax"
\r
1646 .global _NMIExceptionVector
\r
1647 .type _NMIExceptionVector,@function
\r
1650 _NMIExceptionVector:
\r
1651 wsr a0, EXCSAVE + XCHAL_NMILEVEL _ /* preserve a0 */
\r
1652 call0 xt_nmi /* load interrupt handler */
\r
1653 /* never returns here - call0 is used as a jump (see note at top) */
\r
1655 .end literal_prefix
\r
1660 /*******************************************************************************
\r
1662 WINDOW OVERFLOW AND UNDERFLOW EXCEPTION VECTORS AND ALLOCA EXCEPTION HANDLER
\r
1664 Here is the code for each window overflow/underflow exception vector and
\r
1665 (interspersed) efficient code for handling the alloca exception cause.
\r
1666 Window exceptions are handled entirely in the vector area and are very
\r
1667 tight for performance. The alloca exception is also handled entirely in
\r
1668 the window vector area so comes at essentially no cost in code size.
\r
1669 Users should never need to modify them and Cadence Design Systems recommends
\r
1672 Window handlers go at predetermined vector locations according to the
\r
1673 Xtensa hardware configuration, which is ensured by their placement in a
\r
1674 special section known to the Xtensa linker support package (LSP). Since
\r
1675 their offsets in that section are always the same, the LSPs do not define
\r
1676 a section per vector.
\r
1678 These things are coded for XEA2 only (XEA1 is not supported).
\r
1680 Note on Underflow Handlers:
\r
1681 The underflow handler for returning from call[i+1] to call[i]
\r
1682 must preserve all the registers from call[i+1]'s window.
\r
1683 In particular, a0 and a1 must be preserved because the RETW instruction
\r
1684 will be reexecuted (and may even underflow if an intervening exception
\r
1685 has flushed call[i]'s registers).
\r
1686 Registers a2 and up may contain return values.
\r
1688 *******************************************************************************/
\r
1690 #if XCHAL_HAVE_WINDOWED
\r
1692 .section .WindowVectors.text, "ax"
\r
1695 --------------------------------------------------------------------------------
\r
1696 Window Overflow Exception for Call4.
\r
1698 Invoked if a call[i] referenced a register (a4-a15)
\r
1699 that contains data from ancestor call[j];
\r
1700 call[j] had done a call4 to call[j+1].
\r
1702 window rotated to call[j] start point;
\r
1703 a0-a3 are registers to be saved;
\r
1704 a4-a15 must be preserved;
\r
1705 a5 is call[j+1]'s stack pointer.
\r
1706 --------------------------------------------------------------------------------
\r
1710 .global _WindowOverflow4
\r
1713 s32e a0, a5, -16 /* save a0 to call[j+1]'s stack frame */
\r
1714 s32e a1, a5, -12 /* save a1 to call[j+1]'s stack frame */
\r
1715 s32e a2, a5, -8 /* save a2 to call[j+1]'s stack frame */
\r
1716 s32e a3, a5, -4 /* save a3 to call[j+1]'s stack frame */
\r
1717 rfwo /* rotates back to call[i] position */
\r
1720 --------------------------------------------------------------------------------
\r
1721 Window Underflow Exception for Call4
\r
1723 Invoked by RETW returning from call[i+1] to call[i]
\r
1724 where call[i]'s registers must be reloaded (not live in ARs);
\r
1725 where call[i] had done a call4 to call[i+1].
\r
1727 window rotated to call[i] start point;
\r
1728 a0-a3 are undefined, must be reloaded with call[i].reg[0..3];
\r
1729 a4-a15 must be preserved (they are call[i+1].reg[0..11]);
\r
1730 a5 is call[i+1]'s stack pointer.
\r
1731 --------------------------------------------------------------------------------
\r
1735 .global _WindowUnderflow4
\r
1736 _WindowUnderflow4:
\r
1738 l32e a0, a5, -16 /* restore a0 from call[i+1]'s stack frame */
\r
1739 l32e a1, a5, -12 /* restore a1 from call[i+1]'s stack frame */
\r
1740 l32e a2, a5, -8 /* restore a2 from call[i+1]'s stack frame */
\r
1741 l32e a3, a5, -4 /* restore a3 from call[i+1]'s stack frame */
\r
1745 --------------------------------------------------------------------------------
\r
1746 Handle alloca exception generated by interruptee executing 'movsp'.
\r
1747 This uses space between the window vectors, so is essentially "free".
\r
1748 All interruptee's regs are intact except a0 which is saved in EXCSAVE_1,
\r
1749 and PS.EXCM has been set by the exception hardware (can't be interrupted).
\r
1750 The fact the alloca exception was taken means the registers associated with
\r
1751 the base-save area have been spilled and will be restored by the underflow
\r
1752 handler, so those 4 registers are available for scratch.
\r
1753 The code is optimized to avoid unaligned branches and minimize cache misses.
\r
1754 --------------------------------------------------------------------------------
\r
1758 .global _xt_alloca_exc
\r
1761 rsr a0, WINDOWBASE /* grab WINDOWBASE before rotw changes it */
\r
1762 rotw -1 /* WINDOWBASE goes to a4, new a0-a3 are scratch */
\r
1764 extui a3, a2, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS
\r
1765 xor a3, a3, a4 /* bits changed from old to current windowbase */
\r
1766 rsr a4, EXCSAVE_1 /* restore original a0 (now in a4) */
\r
1767 slli a3, a3, XCHAL_PS_OWB_SHIFT
\r
1768 xor a2, a2, a3 /* flip changed bits in old window base */
\r
1769 wsr a2, PS /* update PS.OWB to new window base */
\r
1772 _bbci.l a4, 31, _WindowUnderflow4
\r
1773 rotw -1 /* original a0 goes to a8 */
\r
1774 _bbci.l a8, 30, _WindowUnderflow8
\r
1776 j _WindowUnderflow12
\r
1779 --------------------------------------------------------------------------------
\r
1780 Window Overflow Exception for Call8
\r
1782 Invoked if a call[i] referenced a register (a4-a15)
\r
1783 that contains data from ancestor call[j];
\r
1784 call[j] had done a call8 to call[j+1].
\r
1786 window rotated to call[j] start point;
\r
1787 a0-a7 are registers to be saved;
\r
1788 a8-a15 must be preserved;
\r
1789 a9 is call[j+1]'s stack pointer.
\r
1790 --------------------------------------------------------------------------------
\r
1794 .global _WindowOverflow8
\r
1797 s32e a0, a9, -16 /* save a0 to call[j+1]'s stack frame */
\r
1798 l32e a0, a1, -12 /* a0 <- call[j-1]'s sp
\r
1799 (used to find end of call[j]'s frame) */
\r
1800 s32e a1, a9, -12 /* save a1 to call[j+1]'s stack frame */
\r
1801 s32e a2, a9, -8 /* save a2 to call[j+1]'s stack frame */
\r
1802 s32e a3, a9, -4 /* save a3 to call[j+1]'s stack frame */
\r
1803 s32e a4, a0, -32 /* save a4 to call[j]'s stack frame */
\r
1804 s32e a5, a0, -28 /* save a5 to call[j]'s stack frame */
\r
1805 s32e a6, a0, -24 /* save a6 to call[j]'s stack frame */
\r
1806 s32e a7, a0, -20 /* save a7 to call[j]'s stack frame */
\r
1807 rfwo /* rotates back to call[i] position */
\r
1810 --------------------------------------------------------------------------------
\r
1811 Window Underflow Exception for Call8
\r
1813 Invoked by RETW returning from call[i+1] to call[i]
\r
1814 where call[i]'s registers must be reloaded (not live in ARs);
\r
1815 where call[i] had done a call8 to call[i+1].
\r
1817 window rotated to call[i] start point;
\r
1818 a0-a7 are undefined, must be reloaded with call[i].reg[0..7];
\r
1819 a8-a15 must be preserved (they are call[i+1].reg[0..7]);
\r
1820 a9 is call[i+1]'s stack pointer.
\r
1821 --------------------------------------------------------------------------------
\r
1825 .global _WindowUnderflow8
\r
1826 _WindowUnderflow8:
\r
1828 l32e a0, a9, -16 /* restore a0 from call[i+1]'s stack frame */
\r
1829 l32e a1, a9, -12 /* restore a1 from call[i+1]'s stack frame */
\r
1830 l32e a2, a9, -8 /* restore a2 from call[i+1]'s stack frame */
\r
1831 l32e a7, a1, -12 /* a7 <- call[i-1]'s sp
\r
1832 (used to find end of call[i]'s frame) */
\r
1833 l32e a3, a9, -4 /* restore a3 from call[i+1]'s stack frame */
\r
1834 l32e a4, a7, -32 /* restore a4 from call[i]'s stack frame */
\r
1835 l32e a5, a7, -28 /* restore a5 from call[i]'s stack frame */
\r
1836 l32e a6, a7, -24 /* restore a6 from call[i]'s stack frame */
\r
1837 l32e a7, a7, -20 /* restore a7 from call[i]'s stack frame */
\r
1841 --------------------------------------------------------------------------------
\r
1842 Window Overflow Exception for Call12
\r
1844 Invoked if a call[i] referenced a register (a4-a15)
\r
1845 that contains data from ancestor call[j];
\r
1846 call[j] had done a call12 to call[j+1].
\r
1848 window rotated to call[j] start point;
\r
1849 a0-a11 are registers to be saved;
\r
1850 a12-a15 must be preserved;
\r
1851 a13 is call[j+1]'s stack pointer.
\r
1852 --------------------------------------------------------------------------------
\r
1856 .global _WindowOverflow12
\r
1857 _WindowOverflow12:
\r
1859 s32e a0, a13, -16 /* save a0 to call[j+1]'s stack frame */
\r
1860 l32e a0, a1, -12 /* a0 <- call[j-1]'s sp
\r
1861 (used to find end of call[j]'s frame) */
\r
1862 s32e a1, a13, -12 /* save a1 to call[j+1]'s stack frame */
\r
1863 s32e a2, a13, -8 /* save a2 to call[j+1]'s stack frame */
\r
1864 s32e a3, a13, -4 /* save a3 to call[j+1]'s stack frame */
\r
1865 s32e a4, a0, -48 /* save a4 to end of call[j]'s stack frame */
\r
1866 s32e a5, a0, -44 /* save a5 to end of call[j]'s stack frame */
\r
1867 s32e a6, a0, -40 /* save a6 to end of call[j]'s stack frame */
\r
1868 s32e a7, a0, -36 /* save a7 to end of call[j]'s stack frame */
\r
1869 s32e a8, a0, -32 /* save a8 to end of call[j]'s stack frame */
\r
1870 s32e a9, a0, -28 /* save a9 to end of call[j]'s stack frame */
\r
1871 s32e a10, a0, -24 /* save a10 to end of call[j]'s stack frame */
\r
1872 s32e a11, a0, -20 /* save a11 to end of call[j]'s stack frame */
\r
1873 rfwo /* rotates back to call[i] position */
\r
1876 --------------------------------------------------------------------------------
\r
1877 Window Underflow Exception for Call12
\r
1879 Invoked by RETW returning from call[i+1] to call[i]
\r
1880 where call[i]'s registers must be reloaded (not live in ARs);
\r
1881 where call[i] had done a call12 to call[i+1].
\r
1883 window rotated to call[i] start point;
\r
1884 a0-a11 are undefined, must be reloaded with call[i].reg[0..11];
\r
1885 a12-a15 must be preserved (they are call[i+1].reg[0..3]);
\r
1886 a13 is call[i+1]'s stack pointer.
\r
1887 --------------------------------------------------------------------------------
\r
1891 .global _WindowUnderflow12
\r
1892 _WindowUnderflow12:
\r
1894 l32e a0, a13, -16 /* restore a0 from call[i+1]'s stack frame */
\r
1895 l32e a1, a13, -12 /* restore a1 from call[i+1]'s stack frame */
\r
1896 l32e a2, a13, -8 /* restore a2 from call[i+1]'s stack frame */
\r
1897 l32e a11, a1, -12 /* a11 <- call[i-1]'s sp
\r
1898 (used to find end of call[i]'s frame) */
\r
1899 l32e a3, a13, -4 /* restore a3 from call[i+1]'s stack frame */
\r
1900 l32e a4, a11, -48 /* restore a4 from end of call[i]'s stack frame */
\r
1901 l32e a5, a11, -44 /* restore a5 from end of call[i]'s stack frame */
\r
1902 l32e a6, a11, -40 /* restore a6 from end of call[i]'s stack frame */
\r
1903 l32e a7, a11, -36 /* restore a7 from end of call[i]'s stack frame */
\r
1904 l32e a8, a11, -32 /* restore a8 from end of call[i]'s stack frame */
\r
1905 l32e a9, a11, -28 /* restore a9 from end of call[i]'s stack frame */
\r
1906 l32e a10, a11, -24 /* restore a10 from end of call[i]'s stack frame */
\r
1907 l32e a11, a11, -20 /* restore a11 from end of call[i]'s stack frame */
\r
1910 #endif /* XCHAL_HAVE_WINDOWED */
\r
1912 .section .UserEnter.text, "ax"
\r
1913 .global call_user_start
\r
1914 .type call_user_start,@function
\r