--- /dev/null
+/*\r
+ * Since at least FreeRTOS V7.5.3 uxTopUsedPriority is no longer\r
+ * present in the kernel, so it has to be supplied by other means for\r
+ * OpenOCD's threads awareness.\r
+ *\r
+ * Add this file to your project, and, if you're using --gc-sections,\r
+ * ``--undefined=uxTopUsedPriority'' (or\r
+ * ``-Wl,--undefined=uxTopUsedPriority'' when using gcc for final\r
+ * linking) to your LDFLAGS; same with all the other symbols you need.\r
+ */\r
+\r
+#include "FreeRTOS.h"\r
+#include "esp_attr.h"\r
+#include "sdkconfig.h"\r
+\r
+#ifdef __GNUC__\r
+#define USED __attribute__((used))\r
+#else\r
+#define USED\r
+#endif\r
+\r
+#ifdef CONFIG_ESP32_DEBUG_OCDAWARE\r
+const int USED DRAM_ATTR uxTopUsedPriority = configMAX_PRIORITIES - 1;\r
+#endif\r
--- /dev/null
+/*******************************************************************************\r
+// Copyright (c) 2003-2015 Cadence Design Systems, Inc.\r
+//\r
+// Permission is hereby granted, free of charge, to any person obtaining\r
+// a copy of this software and associated documentation files (the\r
+// "Software"), to deal in the Software without restriction, including\r
+// without limitation the rights to use, copy, modify, merge, publish,\r
+// distribute, sublicense, and/or sell copies of the Software, and to\r
+// permit persons to whom the Software is furnished to do so, subject to\r
+// the following conditions:\r
+//\r
+// The above copyright notice and this permission notice shall be included\r
+// in all copies or substantial portions of the Software.\r
+//\r
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+/*\r
+ * This utility helps benchmarking interrupt latency and context switches.\r
+ * In order to enable it, set configBENCHMARK to 1 in FreeRTOSConfig.h.\r
+ * You will also need to download the FreeRTOS_trace patch that contains\r
+ * portbenchmark.c and the complete version of portbenchmark.h\r
+ */\r
+\r
+#ifndef PORTBENCHMARK_H\r
+#define PORTBENCHMARK_H\r
+\r
+#if configBENCHMARK\r
+ #error "You need to download the FreeRTOS_trace patch that overwrites this file"\r
+#endif\r
+\r
+#define portbenchmarkINTERRUPT_DISABLE()\r
+#define portbenchmarkINTERRUPT_RESTORE(newstate)\r
+#define portbenchmarkIntLatency()\r
+#define portbenchmarkIntWait()\r
+#define portbenchmarkReset()\r
+#define portbenchmarkPrint()\r
+\r
+#endif /* PORTBENCHMARK */\r
--- /dev/null
+/*\r
+ FreeRTOS V8.2.0 - Copyright (C) 2015 Real Time Engineers Ltd.\r
+ All rights reserved\r
+\r
+ VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.\r
+\r
+ ***************************************************************************\r
+ * *\r
+ * FreeRTOS provides completely free yet professionally developed, *\r
+ * robust, strictly quality controlled, supported, and cross *\r
+ * platform software that has become a de facto standard. *\r
+ * *\r
+ * Help yourself get started quickly and support the FreeRTOS *\r
+ * project by purchasing a FreeRTOS tutorial book, reference *\r
+ * manual, or both from: http://www.FreeRTOS.org/Documentation *\r
+ * *\r
+ * Thank you! *\r
+ * *\r
+ ***************************************************************************\r
+\r
+ This file is part of the FreeRTOS distribution.\r
+\r
+ FreeRTOS is free software; you can redistribute it and/or modify it under\r
+ the terms of the GNU General Public License (version 2) as published by the\r
+ Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.\r
+\r
+ >>! NOTE: The modification to the GPL is included to allow you to !<<\r
+ >>! distribute a combined work that includes FreeRTOS without being !<<\r
+ >>! obliged to provide the source code for proprietary components !<<\r
+ >>! outside of the FreeRTOS kernel. !<<\r
+\r
+ FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY\r
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\r
+ FOR A PARTICULAR PURPOSE. Full license text is available from the following\r
+ link: http://www.freertos.org/a00114.html\r
+\r
+ 1 tab == 4 spaces!\r
+\r
+ ***************************************************************************\r
+ * *\r
+ * Having a problem? Start by reading the FAQ "My application does *\r
+ * not run, what could be wrong?" *\r
+ * *\r
+ * http://www.FreeRTOS.org/FAQHelp.html *\r
+ * *\r
+ ***************************************************************************\r
+\r
+ http://www.FreeRTOS.org - Documentation, books, training, latest versions,\r
+ license and Real Time Engineers Ltd. contact details.\r
+\r
+ http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,\r
+ including FreeRTOS+Trace - an indispensable productivity tool, a DOS\r
+ compatible FAT file system, and our tiny thread aware UDP/IP stack.\r
+\r
+ http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High\r
+ Integrity Systems to sell under the OpenRTOS brand. Low cost OpenRTOS\r
+ licenses offer ticketed support, indemnification and middleware.\r
+\r
+ http://www.SafeRTOS.com - High Integrity Systems also provide a safety\r
+ engineered and independently SIL3 certified version for use in safety and\r
+ mission critical applications that require provable dependability.\r
+\r
+ 1 tab == 4 spaces!\r
+*/\r
+\r
+#ifndef PORTMACRO_H\r
+#define PORTMACRO_H\r
+\r
+#ifdef __cplusplus\r
+extern "C" {\r
+#endif\r
+\r
+#ifndef __ASSEMBLER__\r
+\r
+#include <stdint.h>\r
+\r
+#include <xtensa/hal.h>\r
+#include <xtensa/config/core.h>\r
+#include <xtensa/config/system.h> /* required for XSHAL_CLIB */\r
+#include <xtensa/xtruntime.h>\r
+#include "esp_timer.h" /* required for FreeRTOS run time stats */\r
+#include "esp_system.h"\r
+\r
+\r
+#include <esp_heap_caps.h>\r
+#include "soc/soc_memory_layout.h"\r
+\r
+//#include "xtensa_context.h"\r
+\r
+/*-----------------------------------------------------------\r
+ * Port specific definitions.\r
+ *\r
+ * The settings in this file configure FreeRTOS correctly for the\r
+ * given hardware and compiler.\r
+ *\r
+ * These settings should not be altered.\r
+ *-----------------------------------------------------------\r
+ */\r
+\r
+/* Type definitions. */\r
+\r
+#define portCHAR int8_t\r
+#define portFLOAT float\r
+#define portDOUBLE double\r
+#define portLONG int32_t\r
+#define portSHORT int16_t\r
+#define portSTACK_TYPE uint8_t\r
+#define portBASE_TYPE int\r
+\r
+typedef portSTACK_TYPE StackType_t;\r
+typedef portBASE_TYPE BaseType_t;\r
+typedef unsigned portBASE_TYPE UBaseType_t;\r
+\r
+#if( configUSE_16_BIT_TICKS == 1 )\r
+ typedef uint16_t TickType_t;\r
+ #define portMAX_DELAY ( TickType_t ) 0xffff\r
+#else\r
+ typedef uint32_t TickType_t;\r
+ #define portMAX_DELAY ( TickType_t ) 0xffffffffUL\r
+#endif\r
+/*-----------------------------------------------------------*/\r
+\r
+// portbenchmark\r
+#include "portbenchmark.h"\r
+\r
+#include "sdkconfig.h"\r
+#include "esp_attr.h"\r
+\r
+/* "mux" data structure (spinlock) */\r
+typedef struct {\r
+ /* owner field values:\r
+ * 0 - Uninitialized (invalid)\r
+ * portMUX_FREE_VAL - Mux is free, can be locked by either CPU\r
+ * CORE_ID_PRO / CORE_ID_APP - Mux is locked to the particular core\r
+ *\r
+ * Any value other than portMUX_FREE_VAL, CORE_ID_PRO, CORE_ID_APP indicates corruption\r
+ */\r
+ uint32_t owner;\r
+ /* count field:\r
+ * If mux is unlocked, count should be zero.\r
+ * If mux is locked, count is non-zero & represents the number of recursive locks on the mux.\r
+ */\r
+ uint32_t count;\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+ const char *lastLockedFn;\r
+ int lastLockedLine;\r
+#endif\r
+} portMUX_TYPE;\r
+\r
+#define portMUX_FREE_VAL 0xB33FFFFF\r
+\r
+/* Special constants for vPortCPUAcquireMutexTimeout() */\r
+#define portMUX_NO_TIMEOUT (-1) /* When passed for 'timeout_cycles', spin forever if necessary */\r
+#define portMUX_TRY_LOCK 0 /* Try to acquire the spinlock a single time only */\r
+\r
+// Keep this in sync with the portMUX_TYPE struct definition please.\r
+#ifndef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+#define portMUX_INITIALIZER_UNLOCKED { \\r
+ .owner = portMUX_FREE_VAL, \\r
+ .count = 0, \\r
+ }\r
+#else\r
+#define portMUX_INITIALIZER_UNLOCKED { \\r
+ .owner = portMUX_FREE_VAL, \\r
+ .count = 0, \\r
+ .lastLockedFn = "(never locked)", \\r
+ .lastLockedLine = -1 \\r
+ }\r
+#endif\r
+\r
+\r
+#define portASSERT_IF_IN_ISR() vPortAssertIfInISR()\r
+void vPortAssertIfInISR();\r
+\r
+#define portCRITICAL_NESTING_IN_TCB 1\r
+\r
+/*\r
+Modifications to portENTER_CRITICAL.\r
+\r
+For an introduction, see "Critical Sections & Disabling Interrupts" in docs/api-guides/freertos-smp.rst\r
+\r
+The original portENTER_CRITICAL only disabled the ISRs. This is enough for single-CPU operation: by\r
+disabling the interrupts, there is no task switch so no other tasks can meddle in the data, and because\r
+interrupts are disabled, ISRs can't corrupt data structures either.\r
+\r
+For multiprocessing, things get a bit more hairy. First of all, disabling the interrupts doesn't stop\r
+the tasks or ISRs on the other processors meddling with our CPU. For tasks, this is solved by adding\r
+a spinlock to the portENTER_CRITICAL macro. A task running on the other CPU accessing the same data will\r
+spinlock in the portENTER_CRITICAL code until the first CPU is done.\r
+\r
+For ISRs, we now also need muxes: while portENTER_CRITICAL disabling interrupts will stop ISRs on the same\r
+CPU from meddling with the data, it does not stop interrupts on the other cores from interfering with the\r
+data. For this, we also use a spinlock in the routines called by the ISR, but these spinlocks\r
+do not disable the interrupts (because they already are).\r
+\r
+This all assumes that interrupts are either entirely disabled or enabled. Interrupt priority levels\r
+will break this scheme.\r
+\r
+Remark: For the ESP32, portENTER_CRITICAL and portENTER_CRITICAL_ISR both alias vTaskEnterCritical, meaning\r
+that either function can be called both from ISR as well as task context. This is not standard FreeRTOS \r
+behaviour; please keep this in mind if you need any compatibility with other FreeRTOS implementations.\r
+*/\r
+void vPortCPUInitializeMutex(portMUX_TYPE *mux);\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+#error CONFIG_FREERTOS_PORTMUX_DEBUG not supported in Amazon FreeRTOS\r
+#endif\r
+\r
+void vTaskExitCritical();\r
+void vTaskEnterCritical();\r
+static inline void vPortConsumeSpinlockArg(int unused, ...)\r
+{\r
+}\r
+\r
+/** @brief Acquire a portmux spinlock with a timeout\r
+ *\r
+ * @param mux Pointer to portmux to acquire.\r
+ * @param timeout_cycles Timeout to spin, in CPU cycles. Pass portMUX_NO_TIMEOUT to wait forever,\r
+ * portMUX_TRY_LOCK to try a single time to acquire the lock.\r
+ *\r
+ * @return true if mutex is successfully acquired, false on timeout.\r
+ */\r
+bool vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout_cycles);\r
+void vPortCPUReleaseMutex(portMUX_TYPE *mux);\r
+\r
+#define portENTER_CRITICAL(...) do { vTaskEnterCritical(); vPortConsumeSpinlockArg(0, ##__VA_ARGS__); } while(0)\r
+#define portEXIT_CRITICAL(...) do { vTaskExitCritical(); vPortConsumeSpinlockArg(0, ##__VA_ARGS__); } while(0)\r
+\r
+\r
+#define portENTER_CRITICAL_ISR(mux) vPortCPUAcquireMutexTimeout(mux, portMUX_NO_TIMEOUT)\r
+#define portEXIT_CRITICAL_ISR(mux) vPortCPUReleaseMutex(mux)\r
+\r
+#define portENTER_CRITICAL_SAFE(mux) do { \\r
+ if (xPortInIsrContext()) { \\r
+ portENTER_CRITICAL_ISR(mux); \\r
+ } else { \\r
+ portENTER_CRITICAL(mux); \\r
+ } \\r
+ } while(0)\r
+\r
+#define portEXIT_CRITICAL_SAFE(mux) do { \\r
+ if (xPortInIsrContext()) { \\r
+ portEXIT_CRITICAL_ISR(mux); \\r
+ } else { \\r
+ portEXIT_CRITICAL(mux); \\r
+ } \\r
+ } while(0)\r
+\r
+\r
+// Critical section management. NW-TODO: replace XTOS_SET_INTLEVEL with more efficient version, if any?\r
+// These cannot be nested. They should be used with a lot of care and cannot be called from interrupt level.\r
+//\r
+// Only applies to one CPU. See notes above & below for reasons not to use these.\r
+#define portDISABLE_INTERRUPTS() do { XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL); portbenchmarkINTERRUPT_DISABLE(); } while (0)\r
+#define portENABLE_INTERRUPTS() do { portbenchmarkINTERRUPT_RESTORE(0); XTOS_SET_INTLEVEL(0); } while (0)\r
+\r
+// Cleaner solution allows nested interrupts disabling and restoring via local registers or stack.\r
+// They can be called from interrupts too.\r
+// WARNING: Only applies to current CPU. See notes above.\r
+static inline unsigned portENTER_CRITICAL_NESTED() {\r
+ unsigned state = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);\r
+ portbenchmarkINTERRUPT_DISABLE();\r
+ return state;\r
+}\r
+#define portEXIT_CRITICAL_NESTED(state) do { portbenchmarkINTERRUPT_RESTORE(state); XTOS_RESTORE_JUST_INTLEVEL(state); } while (0)\r
+\r
+// These FreeRTOS versions are similar to the nested versions above\r
+#define portSET_INTERRUPT_MASK_FROM_ISR() portENTER_CRITICAL_NESTED()\r
+#define portCLEAR_INTERRUPT_MASK_FROM_ISR(state) portEXIT_CRITICAL_NESTED(state)\r
+\r
+//Because the ROM routines don't necessarily handle a stack in external RAM correctly, we force\r
+//the stack memory to always be internal.\r
+#define pvPortMallocTcbMem(size) heap_caps_malloc(size, MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT)\r
+#define pvPortMallocStackMem(size) heap_caps_malloc(size, MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT)\r
+\r
+//xTaskCreateStatic uses these functions to check incoming memory.\r
+#define portVALID_TCB_MEM(ptr) (esp_ptr_internal(ptr) && esp_ptr_byte_accessible(ptr))\r
+#ifdef CONFIG_SPIRAM_ALLOW_STACK_EXTERNAL_MEMORY\r
+#define portVALID_STACK_MEM(ptr) esp_ptr_byte_accessible(ptr)\r
+#else\r
+#define portVALID_STACK_MEM(ptr) (esp_ptr_internal(ptr) && esp_ptr_byte_accessible(ptr))\r
+#endif\r
+\r
+/*\r
+ * Wrapper for the Xtensa compare-and-set instruction. This subroutine will atomically compare\r
+ * *addr to 'compare'. If *addr == compare, *addr is set to *set. *set is updated with the previous\r
+ * value of *addr (either 'compare' or some other value.)\r
+ *\r
+ * Warning: From the ISA docs: in some (unspecified) cases, the s32c1i instruction may return the\r
+ * *bitwise inverse* of the old mem if the mem wasn't written. This doesn't seem to happen on the\r
+ * ESP32 (portMUX assertions would fail).\r
+ */\r
+static inline void uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set) {\r
+ __asm__ __volatile__ (\r
+ "WSR %2,SCOMPARE1 \n"\r
+ "S32C1I %0, %1, 0 \n"\r
+ :"=r"(*set)\r
+ :"r"(addr), "r"(compare), "0"(*set)\r
+ );\r
+}\r
+\r
+void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set);\r
+\r
+/*-----------------------------------------------------------*/\r
+\r
+/* Architecture specifics. */\r
+#define portSTACK_GROWTH ( -1 )\r
+#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )\r
+#define portBYTE_ALIGNMENT 4\r
+#define portNOP() XT_NOP()\r
+/*-----------------------------------------------------------*/\r
+\r
+/* Fine resolution time */\r
+#define portGET_RUN_TIME_COUNTER_VALUE() xthal_get_ccount()\r
+//ccount or esp_timer are initialized elsewhere\r
+#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS()\r
+\r
+#ifdef CONFIG_FREERTOS_RUN_TIME_STATS_USING_ESP_TIMER\r
+/* Coarse resolution time (us) */\r
+#define portALT_GET_RUN_TIME_COUNTER_VALUE(x) x = (uint32_t)esp_timer_get_time()\r
+#endif\r
+\r
+\r
+\r
+/* Kernel utilities. */\r
+void vPortYield( void );\r
+void _frxt_setup_switch( void );\r
+#define portYIELD() vPortYield()\r
+#define portYIELD_FROM_ISR() {traceISR_EXIT_TO_SCHEDULER(); _frxt_setup_switch();}\r
+\r
+static inline uint32_t xPortGetCoreID();\r
+\r
+/*-----------------------------------------------------------*/\r
+\r
+/* Task function macros as described on the FreeRTOS.org WEB site. */\r
+#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )\r
+#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )\r
+\r
+// When coprocessors are defined, we to maintain a pointer to coprocessors area.\r
+// We currently use a hack: redefine field xMPU_SETTINGS in TCB block as a structure that can hold:\r
+// MPU wrappers, coprocessor area pointer, trace code structure, and more if needed.\r
+// The field is normally used for memory protection. FreeRTOS should create another general purpose field.\r
+typedef struct {\r
+ #if XCHAL_CP_NUM > 0\r
+ volatile StackType_t* coproc_area; // Pointer to coprocessor save area; MUST BE FIRST\r
+ #endif\r
+\r
+ #if portUSING_MPU_WRAPPERS\r
+ // Define here mpu_settings, which is port dependent\r
+ int mpu_setting; // Just a dummy example here; MPU not ported to Xtensa yet\r
+ #endif\r
+\r
+ #if configUSE_TRACE_FACILITY_2\r
+ struct {\r
+ // Cf. porttraceStamp()\r
+ int taskstamp; /* Stamp from inside task to see where we are */\r
+ int taskstampcount; /* A counter usually incremented when we restart the task's loop */\r
+ } porttrace;\r
+ #endif\r
+} xMPU_SETTINGS;\r
+\r
+// Main hack to use MPU_wrappers even when no MPU is defined (warning: mpu_setting should not be accessed; otherwise move this above xMPU_SETTINGS)\r
+#if (XCHAL_CP_NUM > 0 || configUSE_TRACE_FACILITY_2) && !portUSING_MPU_WRAPPERS // If MPU wrappers not used, we still need to allocate coproc area\r
+ #undef portUSING_MPU_WRAPPERS\r
+ #define portUSING_MPU_WRAPPERS 1 // Enable it to allocate coproc area\r
+ #define MPU_WRAPPERS_H // Override mpu_wrapper.h to disable unwanted code\r
+ #define PRIVILEGED_FUNCTION\r
+ #define PRIVILEGED_DATA\r
+#endif\r
+\r
+bool vApplicationSleep( TickType_t xExpectedIdleTime );\r
+\r
+#define portSUPPRESS_TICKS_AND_SLEEP( idleTime ) vApplicationSleep( idleTime )\r
+\r
+\r
+\r
+void _xt_coproc_release(volatile void * coproc_sa_base);\r
+\r
+\r
+/*\r
+ * Map to the memory management routines required for the port.\r
+ *\r
+ * Note that libc standard malloc/free are also available for\r
+ * non-FreeRTOS-specific code, and behave the same as\r
+ * pvPortMalloc()/vPortFree().\r
+ */\r
+#define pvPortMalloc heap_caps_malloc_default\r
+#define vPortFree heap_caps_free\r
+#define xPortGetFreeHeapSize esp_get_free_heap_size\r
+#define xPortGetMinimumEverFreeHeapSize esp_get_minimum_free_heap_size\r
+\r
+/*\r
+ * Send an interrupt to another core in order to make the task running\r
+ * on it yield for a higher-priority task.\r
+ */\r
+\r
+void vPortYieldOtherCore( BaseType_t coreid) PRIVILEGED_FUNCTION;\r
+\r
+\r
+/*\r
+ Callback to set a watchpoint on the end of the stack. Called every context switch to change the stack\r
+ watchpoint around.\r
+ */\r
+void vPortSetStackWatchpoint( void* pxStackStart );\r
+\r
+/*\r
+ * Returns true if the current core is in ISR context; low prio ISR, med prio ISR or timer tick ISR. High prio ISRs\r
+ * aren't detected here, but they normally cannot call C code, so that should not be an issue anyway.\r
+ */\r
+BaseType_t xPortInIsrContext();\r
+\r
+/*\r
+ * This function will be called in High prio ISRs. Returns true if the current core was in ISR context\r
+ * before calling into high prio ISR context.\r
+ */\r
+BaseType_t xPortInterruptedFromISRContext();\r
+\r
+/*\r
+ * The structures and methods of manipulating the MPU are contained within the\r
+ * port layer.\r
+ *\r
+ * Fills the xMPUSettings structure with the memory region information\r
+ * contained in xRegions.\r
+ */\r
+#if( portUSING_MPU_WRAPPERS == 1 )\r
+ struct xMEMORY_REGION;\r
+ void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMORY_REGION * const xRegions, StackType_t *pxBottomOfStack, uint32_t usStackDepth ) PRIVILEGED_FUNCTION;\r
+ void vPortReleaseTaskMPUSettings( xMPU_SETTINGS *xMPUSettings );\r
+#endif\r
+\r
+/* Multi-core: get current core ID */\r
+static inline uint32_t IRAM_ATTR xPortGetCoreID() {\r
+ int id;\r
+ asm (\r
+ "rsr.prid %0\n"\r
+ " extui %0,%0,13,1"\r
+ :"=r"(id));\r
+ return id;\r
+}\r
+\r
+/* Get tick rate per second */\r
+uint32_t xPortGetTickRateHz(void);\r
+\r
+// porttrace\r
+#if configUSE_TRACE_FACILITY_2\r
+#include "porttrace.h"\r
+#endif\r
+\r
+// configASSERT_2 if requested\r
+#if configASSERT_2\r
+#include <stdio.h>\r
+void exit(int);\r
+#define configASSERT( x ) if (!(x)) { porttracePrint(-1); printf("\nAssertion failed in %s:%d\n", __FILE__, __LINE__); exit(-1); }\r
+#endif\r
+\r
+#endif // __ASSEMBLER__\r
+\r
+#ifdef __cplusplus\r
+}\r
+#endif\r
+\r
+#endif /* PORTMACRO_H */\r
+\r
--- /dev/null
+/*******************************************************************************\r
+Copyright (c) 2006-2015 Cadence Design Systems Inc.\r
+\r
+Permission is hereby granted, free of charge, to any person obtaining\r
+a copy of this software and associated documentation files (the\r
+"Software"), to deal in the Software without restriction, including\r
+without limitation the rights to use, copy, modify, merge, publish,\r
+distribute, sublicense, and/or sell copies of the Software, and to\r
+permit persons to whom the Software is furnished to do so, subject to\r
+the following conditions:\r
+\r
+The above copyright notice and this permission notice shall be included\r
+in all copies or substantial portions of the Software.\r
+\r
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+******************************************************************************/\r
+\r
+/******************************************************************************\r
+ Xtensa-specific API for RTOS ports.\r
+******************************************************************************/\r
+\r
+#ifndef __XTENSA_API_H__\r
+#define __XTENSA_API_H__\r
+\r
+#include <xtensa/hal.h>\r
+\r
+#include "xtensa_context.h"\r
+\r
+\r
+/* Typedef for C-callable interrupt handler function */\r
+typedef void (*xt_handler)(void *);\r
+\r
+/* Typedef for C-callable exception handler function */\r
+typedef void (*xt_exc_handler)(XtExcFrame *);\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ Call this function to set a handler for the specified exception. The handler\r
+ will be installed on the core that calls this function.\r
+\r
+ n - Exception number (type)\r
+ f - Handler function address, NULL to uninstall handler.\r
+\r
+ The handler will be passed a pointer to the exception frame, which is created\r
+ on the stack of the thread that caused the exception.\r
+\r
+ If the handler returns, the thread context will be restored and the faulting\r
+ instruction will be retried. Any values in the exception frame that are\r
+ modified by the handler will be restored as part of the context. For details\r
+ of the exception frame structure see xtensa_context.h.\r
+-------------------------------------------------------------------------------\r
+*/\r
+extern xt_exc_handler xt_set_exception_handler(int n, xt_exc_handler f);\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ Call this function to set a handler for the specified interrupt. The handler\r
+ will be installed on the core that calls this function.\r
+ \r
+ n - Interrupt number.\r
+ f - Handler function address, NULL to uninstall handler.\r
+ arg - Argument to be passed to handler.\r
+-------------------------------------------------------------------------------\r
+*/\r
+extern xt_handler xt_set_interrupt_handler(int n, xt_handler f, void * arg);\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ Call this function to enable the specified interrupts on the core that runs\r
+ this code.\r
+\r
+ mask - Bit mask of interrupts to be enabled.\r
+-------------------------------------------------------------------------------\r
+*/\r
+extern void xt_ints_on(unsigned int mask);\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ Call this function to disable the specified interrupts on the core that runs\r
+ this code.\r
+\r
+ mask - Bit mask of interrupts to be disabled.\r
+-------------------------------------------------------------------------------\r
+*/\r
+extern void xt_ints_off(unsigned int mask);\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ Call this function to set the specified (s/w) interrupt.\r
+-------------------------------------------------------------------------------\r
+*/\r
+static inline void xt_set_intset(unsigned int arg)\r
+{\r
+ xthal_set_intset(arg);\r
+}\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ Call this function to clear the specified (s/w or edge-triggered)\r
+ interrupt.\r
+-------------------------------------------------------------------------------\r
+*/\r
+static inline void xt_set_intclear(unsigned int arg)\r
+{\r
+ xthal_set_intclear(arg);\r
+}\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ Call this function to get handler's argument for the specified interrupt.\r
+ \r
+ n - Interrupt number.\r
+-------------------------------------------------------------------------------\r
+*/\r
+extern void * xt_get_interrupt_handler_arg(int n);\r
+\r
+#endif /* __XTENSA_API_H__ */\r
+\r
--- /dev/null
+/*******************************************************************************\r
+// Copyright (c) 2003-2015 Cadence Design Systems, Inc.\r
+//\r
+// Permission is hereby granted, free of charge, to any person obtaining\r
+// a copy of this software and associated documentation files (the\r
+// "Software"), to deal in the Software without restriction, including\r
+// without limitation the rights to use, copy, modify, merge, publish,\r
+// distribute, sublicense, and/or sell copies of the Software, and to\r
+// permit persons to whom the Software is furnished to do so, subject to\r
+// the following conditions:\r
+//\r
+// The above copyright notice and this permission notice shall be included\r
+// in all copies or substantial portions of the Software.\r
+//\r
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+--------------------------------------------------------------------------------\r
+\r
+ Configuration-specific information for Xtensa build. This file must be\r
+ included in FreeRTOSConfig.h to properly set up the config-dependent\r
+ parameters correctly.\r
+\r
+ NOTE: To enable thread-safe C library support, XT_USE_THREAD_SAFE_CLIB must\r
+ be defined to be > 0 somewhere above or on the command line.\r
+\r
+*******************************************************************************/\r
+\r
+#ifndef XTENSA_CONFIG_H\r
+#define XTENSA_CONFIG_H\r
+\r
+#ifdef __cplusplus\r
+extern "C" {\r
+#endif\r
+\r
+#include <xtensa/hal.h>\r
+#include <xtensa/config/core.h>\r
+#include <xtensa/config/system.h> /* required for XSHAL_CLIB */\r
+\r
+#include "xtensa_context.h"\r
+\r
+\r
+/*-----------------------------------------------------------------------------\r
+* STACK REQUIREMENTS\r
+*\r
+* This section defines the minimum stack size, and the extra space required to\r
+* be allocated for saving coprocessor state and/or C library state information\r
+* (if thread safety is enabled for the C library). The sizes are in bytes.\r
+*\r
+* Stack sizes for individual tasks should be derived from these minima based on\r
+* the maximum call depth of the task and the maximum level of interrupt nesting.\r
+* A minimum stack size is defined by XT_STACK_MIN_SIZE. This minimum is based\r
+* on the requirement for a task that calls nothing else but can be interrupted.\r
+* This assumes that interrupt handlers do not call more than a few levels deep.\r
+* If this is not true, i.e. one or more interrupt handlers make deep calls then\r
+* the minimum must be increased.\r
+*\r
+* If the Xtensa processor configuration includes coprocessors, then space is \r
+* allocated to save the coprocessor state on the stack.\r
+*\r
+* If thread safety is enabled for the C runtime library, (XT_USE_THREAD_SAFE_CLIB\r
+* is defined) then space is allocated to save the C library context in the TCB.\r
+* \r
+* Allocating insufficient stack space is a common source of hard-to-find errors.\r
+* During development, it is best to enable the FreeRTOS stack checking features.\r
+*\r
+* Usage:\r
+* \r
+* XT_USE_THREAD_SAFE_CLIB -- Define this to a nonzero value to enable thread-safe\r
+* use of the C library. This will require extra stack\r
+* space to be allocated for tasks that use the C library\r
+* reentrant functions. See below for more information.\r
+* \r
+* NOTE: The Xtensa toolchain supports multiple C libraries and not all of them\r
+* support thread safety. Check your core configuration to see which C library\r
+* was chosen for your system.\r
+* \r
+* XT_STACK_MIN_SIZE -- The minimum stack size for any task. It is recommended\r
+* that you do not use a stack smaller than this for any\r
+* task. In case you want to use stacks smaller than this\r
+* size, you must verify that the smaller size(s) will work\r
+* under all operating conditions.\r
+*\r
+* XT_STACK_EXTRA -- The amount of extra stack space to allocate for a task\r
+* that does not make C library reentrant calls. Add this\r
+* to the amount of stack space required by the task itself.\r
+*\r
+* XT_STACK_EXTRA_CLIB -- The amount of space to allocate for C library state.\r
+*\r
+-----------------------------------------------------------------------------*/\r
+\r
+/* Extra space required for interrupt/exception hooks. */\r
+#ifdef XT_INTEXC_HOOKS\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ #define STK_INTEXC_EXTRA 0x200\r
+ #else\r
+ #define STK_INTEXC_EXTRA 0x180\r
+ #endif\r
+#else\r
+ #define STK_INTEXC_EXTRA 0\r
+#endif\r
+\r
+#define XT_CLIB_CONTEXT_AREA_SIZE 0\r
+\r
+/*------------------------------------------------------------------------------\r
+ Extra size -- interrupt frame plus coprocessor save area plus hook space.\r
+ NOTE: Make sure XT_INTEXC_HOOKS is undefined unless you really need the hooks.\r
+------------------------------------------------------------------------------*/\r
+#ifdef __XTENSA_CALL0_ABI__\r
+ #define XT_XTRA_SIZE (XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x10 + XT_CP_SIZE)\r
+#else\r
+ #define XT_XTRA_SIZE (XT_STK_FRMSZ + STK_INTEXC_EXTRA + 0x20 + XT_CP_SIZE)\r
+#endif\r
+\r
+/*------------------------------------------------------------------------------\r
+ Space allocated for user code -- function calls and local variables.\r
+ NOTE: This number can be adjusted to suit your needs. You must verify that the\r
+ amount of space you reserve is adequate for the worst-case conditions in your\r
+ application.\r
+ NOTE: The windowed ABI requires more stack, since space has to be reserved\r
+ for spilling register windows.\r
+------------------------------------------------------------------------------*/\r
+#ifdef __XTENSA_CALL0_ABI__\r
+ #define XT_USER_SIZE 0x200\r
+#else\r
+ #define XT_USER_SIZE 0x400\r
+#endif\r
+\r
+/* Minimum recommended stack size. */\r
+#define XT_STACK_MIN_SIZE ((XT_XTRA_SIZE + XT_USER_SIZE) / sizeof(unsigned char))\r
+\r
+/* OS overhead with and without C library thread context. */\r
+#define XT_STACK_EXTRA (XT_XTRA_SIZE)\r
+#define XT_STACK_EXTRA_CLIB (XT_XTRA_SIZE + XT_CLIB_CONTEXT_AREA_SIZE)\r
+\r
+\r
+#ifdef __cplusplus\r
+}\r
+#endif\r
+\r
+#endif /* XTENSA_CONFIG_H */\r
+\r
--- /dev/null
+/*******************************************************************************\r
+Copyright (c) 2006-2015 Cadence Design Systems Inc.\r
+\r
+Permission is hereby granted, free of charge, to any person obtaining\r
+a copy of this software and associated documentation files (the\r
+"Software"), to deal in the Software without restriction, including\r
+without limitation the rights to use, copy, modify, merge, publish,\r
+distribute, sublicense, and/or sell copies of the Software, and to\r
+permit persons to whom the Software is furnished to do so, subject to\r
+the following conditions:\r
+\r
+The above copyright notice and this permission notice shall be included\r
+in all copies or substantial portions of the Software.\r
+\r
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+--------------------------------------------------------------------------------\r
+\r
+ XTENSA CONTEXT FRAMES AND MACROS FOR RTOS ASSEMBLER SOURCES\r
+\r
+This header contains definitions and macros for use primarily by Xtensa\r
+RTOS assembly coded source files. It includes and uses the Xtensa hardware\r
+abstraction layer (HAL) to deal with config specifics. It may also be\r
+included in C source files.\r
+\r
+!! Supports only Xtensa Exception Architecture 2 (XEA2). XEA1 not supported. !!\r
+\r
+NOTE: The Xtensa architecture requires stack pointer alignment to 16 bytes.\r
+\r
+*******************************************************************************/\r
+\r
+#ifndef XTENSA_CONTEXT_H\r
+#define XTENSA_CONTEXT_H\r
+\r
+#ifdef __ASSEMBLER__\r
+#include <xtensa/coreasm.h>\r
+#endif\r
+\r
+#include <xtensa/config/tie.h>\r
+#include <xtensa/corebits.h>\r
+#include <xtensa/config/system.h>\r
+#include <xtensa/xtruntime-frames.h>\r
+\r
+\r
+/* Align a value up to nearest n-byte boundary, where n is a power of 2. */\r
+#define ALIGNUP(n, val) (((val) + (n)-1) & -(n))\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ Macros that help define structures for both C and assembler.\r
+-------------------------------------------------------------------------------\r
+*/\r
+\r
+#ifdef STRUCT_BEGIN\r
+#undef STRUCT_BEGIN\r
+#undef STRUCT_FIELD\r
+#undef STRUCT_AFIELD\r
+#undef STRUCT_END\r
+#endif\r
+\r
+#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__)\r
+\r
+#define STRUCT_BEGIN .pushsection .text; .struct 0\r
+#define STRUCT_FIELD(ctype,size,asname,name) asname: .space size\r
+#define STRUCT_AFIELD(ctype,size,asname,name,n) asname: .space (size)*(n)\r
+#define STRUCT_END(sname) sname##Size:; .popsection\r
+\r
+#else\r
+\r
+#define STRUCT_BEGIN typedef struct {\r
+#define STRUCT_FIELD(ctype,size,asname,name) ctype name;\r
+#define STRUCT_AFIELD(ctype,size,asname,name,n) ctype name[n];\r
+#define STRUCT_END(sname) } sname;\r
+\r
+#endif //_ASMLANGUAGE || __ASSEMBLER__\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ INTERRUPT/EXCEPTION STACK FRAME FOR A THREAD OR NESTED INTERRUPT\r
+\r
+ A stack frame of this structure is allocated for any interrupt or exception.\r
+ It goes on the current stack. If the RTOS has a system stack for handling \r
+ interrupts, every thread stack must allow space for just one interrupt stack \r
+ frame, then nested interrupt stack frames go on the system stack.\r
+\r
+ The frame includes basic registers (explicit) and "extra" registers introduced \r
+ by user TIE or the use of the MAC16 option in the user's Xtensa config.\r
+ The frame size is minimized by omitting regs not applicable to user's config.\r
+\r
+ For Windowed ABI, this stack frame includes the interruptee's base save area,\r
+ another base save area to manage gcc nested functions, and a little temporary \r
+ space to help manage the spilling of the register windows.\r
+-------------------------------------------------------------------------------\r
+*/\r
+\r
+STRUCT_BEGIN\r
+STRUCT_FIELD (long, 4, XT_STK_EXIT, exit) /* exit point for dispatch */\r
+STRUCT_FIELD (long, 4, XT_STK_PC, pc) /* return PC */\r
+STRUCT_FIELD (long, 4, XT_STK_PS, ps) /* return PS */\r
+STRUCT_FIELD (long, 4, XT_STK_A0, a0)\r
+STRUCT_FIELD (long, 4, XT_STK_A1, a1) /* stack pointer before interrupt */\r
+STRUCT_FIELD (long, 4, XT_STK_A2, a2)\r
+STRUCT_FIELD (long, 4, XT_STK_A3, a3)\r
+STRUCT_FIELD (long, 4, XT_STK_A4, a4)\r
+STRUCT_FIELD (long, 4, XT_STK_A5, a5)\r
+STRUCT_FIELD (long, 4, XT_STK_A6, a6)\r
+STRUCT_FIELD (long, 4, XT_STK_A7, a7)\r
+STRUCT_FIELD (long, 4, XT_STK_A8, a8)\r
+STRUCT_FIELD (long, 4, XT_STK_A9, a9)\r
+STRUCT_FIELD (long, 4, XT_STK_A10, a10)\r
+STRUCT_FIELD (long, 4, XT_STK_A11, a11)\r
+STRUCT_FIELD (long, 4, XT_STK_A12, a12)\r
+STRUCT_FIELD (long, 4, XT_STK_A13, a13)\r
+STRUCT_FIELD (long, 4, XT_STK_A14, a14)\r
+STRUCT_FIELD (long, 4, XT_STK_A15, a15)\r
+STRUCT_FIELD (long, 4, XT_STK_SAR, sar)\r
+STRUCT_FIELD (long, 4, XT_STK_EXCCAUSE, exccause)\r
+STRUCT_FIELD (long, 4, XT_STK_EXCVADDR, excvaddr)\r
+#if XCHAL_HAVE_LOOPS\r
+STRUCT_FIELD (long, 4, XT_STK_LBEG, lbeg)\r
+STRUCT_FIELD (long, 4, XT_STK_LEND, lend)\r
+STRUCT_FIELD (long, 4, XT_STK_LCOUNT, lcount)\r
+#endif\r
+#ifndef __XTENSA_CALL0_ABI__\r
+/* Temporary space for saving stuff during window spill */\r
+STRUCT_FIELD (long, 4, XT_STK_TMP0, tmp0)\r
+STRUCT_FIELD (long, 4, XT_STK_TMP1, tmp1)\r
+STRUCT_FIELD (long, 4, XT_STK_TMP2, tmp2)\r
+#endif\r
+#ifdef XT_USE_SWPRI\r
+/* Storage for virtual priority mask */\r
+STRUCT_FIELD (long, 4, XT_STK_VPRI, vpri)\r
+#endif\r
+#ifdef XT_USE_OVLY\r
+/* Storage for overlay state */\r
+STRUCT_FIELD (long, 4, XT_STK_OVLY, ovly)\r
+#endif\r
+STRUCT_END(XtExcFrame)\r
+\r
+#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__)\r
+#define XT_STK_NEXT1 XtExcFrameSize\r
+#else\r
+#define XT_STK_NEXT1 sizeof(XtExcFrame)\r
+#endif\r
+\r
+/* Allocate extra storage if needed */\r
+#if XCHAL_EXTRA_SA_SIZE != 0\r
+\r
+#if XCHAL_EXTRA_SA_ALIGN <= 16\r
+#define XT_STK_EXTRA ALIGNUP(XCHAL_EXTRA_SA_ALIGN, XT_STK_NEXT1)\r
+#else\r
+/* If need more alignment than stack, add space for dynamic alignment */\r
+#define XT_STK_EXTRA (ALIGNUP(XCHAL_EXTRA_SA_ALIGN, XT_STK_NEXT1) + XCHAL_EXTRA_SA_ALIGN)\r
+#endif\r
+#define XT_STK_NEXT2 (XT_STK_EXTRA + XCHAL_EXTRA_SA_SIZE)\r
+\r
+#else\r
+\r
+#define XT_STK_NEXT2 XT_STK_NEXT1 \r
+\r
+#endif\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ This is the frame size. Add space for 4 registers (interruptee's base save\r
+ area) and some space for gcc nested functions if any.\r
+-------------------------------------------------------------------------------\r
+*/\r
+#define XT_STK_FRMSZ (ALIGNUP(0x10, XT_STK_NEXT2) + 0x20)\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ SOLICITED STACK FRAME FOR A THREAD\r
+\r
+ A stack frame of this structure is allocated whenever a thread enters the \r
+ RTOS kernel intentionally (and synchronously) to submit to thread scheduling.\r
+ It goes on the current thread's stack.\r
+\r
+ The solicited frame only includes registers that are required to be preserved\r
+ by the callee according to the compiler's ABI conventions, some space to save \r
+ the return address for returning to the caller, and the caller's PS register.\r
+\r
+ For Windowed ABI, this stack frame includes the caller's base save area.\r
+\r
+ Note on XT_SOL_EXIT field:\r
+ It is necessary to distinguish a solicited from an interrupt stack frame.\r
+ This field corresponds to XT_STK_EXIT in the interrupt stack frame and is\r
+ always at the same offset (0). It can be written with a code (usually 0) \r
+ to distinguish a solicted frame from an interrupt frame. An RTOS port may\r
+ opt to ignore this field if it has another way of distinguishing frames.\r
+-------------------------------------------------------------------------------\r
+*/\r
+\r
+STRUCT_BEGIN\r
+#ifdef __XTENSA_CALL0_ABI__\r
+STRUCT_FIELD (long, 4, XT_SOL_EXIT, exit)\r
+STRUCT_FIELD (long, 4, XT_SOL_PC, pc)\r
+STRUCT_FIELD (long, 4, XT_SOL_PS, ps)\r
+STRUCT_FIELD (long, 4, XT_SOL_NEXT, next)\r
+STRUCT_FIELD (long, 4, XT_SOL_A12, a12) /* should be on 16-byte alignment */\r
+STRUCT_FIELD (long, 4, XT_SOL_A13, a13)\r
+STRUCT_FIELD (long, 4, XT_SOL_A14, a14)\r
+STRUCT_FIELD (long, 4, XT_SOL_A15, a15)\r
+#else\r
+STRUCT_FIELD (long, 4, XT_SOL_EXIT, exit)\r
+STRUCT_FIELD (long, 4, XT_SOL_PC, pc)\r
+STRUCT_FIELD (long, 4, XT_SOL_PS, ps)\r
+STRUCT_FIELD (long, 4, XT_SOL_NEXT, next)\r
+STRUCT_FIELD (long, 4, XT_SOL_A0, a0) /* should be on 16-byte alignment */\r
+STRUCT_FIELD (long, 4, XT_SOL_A1, a1)\r
+STRUCT_FIELD (long, 4, XT_SOL_A2, a2)\r
+STRUCT_FIELD (long, 4, XT_SOL_A3, a3)\r
+#endif\r
+STRUCT_END(XtSolFrame)\r
+\r
+/* Size of solicited stack frame */\r
+#define XT_SOL_FRMSZ ALIGNUP(0x10, XtSolFrameSize)\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ CO-PROCESSOR STATE SAVE AREA FOR A THREAD\r
+\r
+ The RTOS must provide an area per thread to save the state of co-processors\r
+ when that thread does not have control. Co-processors are context-switched\r
+ lazily (on demand) only when a new thread uses a co-processor instruction,\r
+ otherwise a thread retains ownership of the co-processor even when it loses\r
+ control of the processor. An Xtensa co-processor exception is triggered when\r
+ any co-processor instruction is executed by a thread that is not the owner,\r
+ and the context switch of that co-processor is then peformed by the handler.\r
+ Ownership represents which thread's state is currently in the co-processor.\r
+\r
+ Co-processors may not be used by interrupt or exception handlers. If an \r
+ co-processor instruction is executed by an interrupt or exception handler,\r
+ the co-processor exception handler will trigger a kernel panic and freeze.\r
+ This restriction is introduced to reduce the overhead of saving and restoring\r
+ co-processor state (which can be quite large) and in particular remove that\r
+ overhead from interrupt handlers.\r
+\r
+ The co-processor state save area may be in any convenient per-thread location\r
+ such as in the thread control block or above the thread stack area. It need\r
+ not be in the interrupt stack frame since interrupts don't use co-processors.\r
+\r
+ Along with the save area for each co-processor, two bitmasks with flags per \r
+ co-processor (laid out as in the CPENABLE reg) help manage context-switching\r
+ co-processors as efficiently as possible:\r
+\r
+ XT_CPENABLE\r
+ The contents of a non-running thread's CPENABLE register.\r
+ It represents the co-processors owned (and whose state is still needed)\r
+ by the thread. When a thread is preempted, its CPENABLE is saved here.\r
+ When a thread solicits a context-swtich, its CPENABLE is cleared - the\r
+ compiler has saved the (caller-saved) co-proc state if it needs to.\r
+ When a non-running thread loses ownership of a CP, its bit is cleared.\r
+ When a thread runs, it's XT_CPENABLE is loaded into the CPENABLE reg.\r
+ Avoids co-processor exceptions when no change of ownership is needed.\r
+\r
+ XT_CPSTORED\r
+ A bitmask with the same layout as CPENABLE, a bit per co-processor.\r
+ Indicates whether the state of each co-processor is saved in the state \r
+ save area. When a thread enters the kernel, only the state of co-procs\r
+ still enabled in CPENABLE is saved. When the co-processor exception \r
+ handler assigns ownership of a co-processor to a thread, it restores \r
+ the saved state only if this bit is set, and clears this bit.\r
+\r
+ XT_CP_CS_ST\r
+ A bitmask with the same layout as CPENABLE, a bit per co-processor.\r
+ Indicates whether callee-saved state is saved in the state save area.\r
+ Callee-saved state is saved by itself on a solicited context switch,\r
+ and restored when needed by the coprocessor exception handler.\r
+ Unsolicited switches will cause the entire coprocessor to be saved\r
+ when necessary.\r
+\r
+ XT_CP_ASA\r
+ Pointer to the aligned save area. Allows it to be aligned more than\r
+ the overall save area (which might only be stack-aligned or TCB-aligned).\r
+ Especially relevant for Xtensa cores configured with a very large data\r
+ path that requires alignment greater than 16 bytes (ABI stack alignment).\r
+-------------------------------------------------------------------------------\r
+*/\r
+\r
+#if XCHAL_CP_NUM > 0\r
+\r
+/* Offsets of each coprocessor save area within the 'aligned save area': */\r
+#define XT_CP0_SA 0\r
+#define XT_CP1_SA ALIGNUP(XCHAL_CP1_SA_ALIGN, XT_CP0_SA + XCHAL_CP0_SA_SIZE)\r
+#define XT_CP2_SA ALIGNUP(XCHAL_CP2_SA_ALIGN, XT_CP1_SA + XCHAL_CP1_SA_SIZE)\r
+#define XT_CP3_SA ALIGNUP(XCHAL_CP3_SA_ALIGN, XT_CP2_SA + XCHAL_CP2_SA_SIZE)\r
+#define XT_CP4_SA ALIGNUP(XCHAL_CP4_SA_ALIGN, XT_CP3_SA + XCHAL_CP3_SA_SIZE)\r
+#define XT_CP5_SA ALIGNUP(XCHAL_CP5_SA_ALIGN, XT_CP4_SA + XCHAL_CP4_SA_SIZE)\r
+#define XT_CP6_SA ALIGNUP(XCHAL_CP6_SA_ALIGN, XT_CP5_SA + XCHAL_CP5_SA_SIZE)\r
+#define XT_CP7_SA ALIGNUP(XCHAL_CP7_SA_ALIGN, XT_CP6_SA + XCHAL_CP6_SA_SIZE)\r
+#define XT_CP_SA_SIZE ALIGNUP(16, XT_CP7_SA + XCHAL_CP7_SA_SIZE)\r
+\r
+/* Offsets within the overall save area: */\r
+#define XT_CPENABLE 0 /* (2 bytes) coprocessors active for this thread */\r
+#define XT_CPSTORED 2 /* (2 bytes) coprocessors saved for this thread */\r
+#define XT_CP_CS_ST 4 /* (2 bytes) coprocessor callee-saved regs stored for this thread */\r
+#define XT_CP_ASA 8 /* (4 bytes) ptr to aligned save area */\r
+/* Overall size allows for dynamic alignment: */\r
+#define XT_CP_SIZE (12 + XT_CP_SA_SIZE + XCHAL_TOTAL_SA_ALIGN)\r
+#else\r
+#define XT_CP_SIZE 0\r
+#endif\r
+\r
+\r
+/*\r
+ Macro to get the current core ID. Only uses the reg given as an argument.\r
+ Reading PRID on the ESP32 gives us 0xCDCD on the PRO processor (0)\r
+ and 0xABAB on the APP CPU (1). We can distinguish between the two by checking\r
+ bit 13: it's 1 on the APP and 0 on the PRO processor.\r
+*/\r
+#ifdef __ASSEMBLER__\r
+ .macro getcoreid reg\r
+ rsr.prid \reg\r
+ extui \reg,\reg,13,1\r
+ .endm\r
+#endif\r
+\r
+#define CORE_ID_PRO 0xCDCD\r
+#define CORE_ID_APP 0xABAB\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ MACROS TO HANDLE ABI SPECIFICS OF FUNCTION ENTRY AND RETURN\r
+\r
+ Convenient where the frame size requirements are the same for both ABIs.\r
+ ENTRY(sz), RET(sz) are for framed functions (have locals or make calls).\r
+ ENTRY0, RET0 are for frameless functions (no locals, no calls).\r
+\r
+ where size = size of stack frame in bytes (must be >0 and aligned to 16).\r
+ For framed functions the frame is created and the return address saved at\r
+ base of frame (Call0 ABI) or as determined by hardware (Windowed ABI).\r
+ For frameless functions, there is no frame and return address remains in a0.\r
+ Note: Because CPP macros expand to a single line, macros requiring multi-line \r
+ expansions are implemented as assembler macros.\r
+-------------------------------------------------------------------------------\r
+*/\r
+\r
+#ifdef __ASSEMBLER__\r
+#ifdef __XTENSA_CALL0_ABI__\r
+ /* Call0 */\r
+ #define ENTRY(sz) entry1 sz\r
+ .macro entry1 size=0x10\r
+ addi sp, sp, -\size\r
+ s32i a0, sp, 0\r
+ .endm\r
+ #define ENTRY0 \r
+ #define RET(sz) ret1 sz\r
+ .macro ret1 size=0x10\r
+ l32i a0, sp, 0\r
+ addi sp, sp, \size\r
+ ret\r
+ .endm\r
+ #define RET0 ret\r
+#else\r
+ /* Windowed */\r
+ #define ENTRY(sz) entry sp, sz\r
+ #define ENTRY0 entry sp, 0x10\r
+ #define RET(sz) retw\r
+ #define RET0 retw\r
+#endif\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+#endif /* XTENSA_CONTEXT_H */\r
+\r
--- /dev/null
+/*******************************************************************************\r
+// Copyright (c) 2003-2015 Cadence Design Systems, Inc.\r
+//\r
+// Permission is hereby granted, free of charge, to any person obtaining\r
+// a copy of this software and associated documentation files (the\r
+// "Software"), to deal in the Software without restriction, including\r
+// without limitation the rights to use, copy, modify, merge, publish,\r
+// distribute, sublicense, and/or sell copies of the Software, and to\r
+// permit persons to whom the Software is furnished to do so, subject to\r
+// the following conditions:\r
+//\r
+// The above copyright notice and this permission notice shall be included\r
+// in all copies or substantial portions of the Software.\r
+//\r
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+--------------------------------------------------------------------------------\r
+\r
+ RTOS-SPECIFIC INFORMATION FOR XTENSA RTOS ASSEMBLER SOURCES\r
+ (FreeRTOS Port)\r
+\r
+This header is the primary glue between generic Xtensa RTOS support\r
+sources and a specific RTOS port for Xtensa. It contains definitions\r
+and macros for use primarily by Xtensa assembly coded source files.\r
+\r
+Macros in this header map callouts from generic Xtensa files to specific\r
+RTOS functions. It may also be included in C source files.\r
+\r
+Xtensa RTOS ports support all RTOS-compatible configurations of the Xtensa \r
+architecture, using the Xtensa hardware abstraction layer (HAL) to deal \r
+with configuration specifics.\r
+\r
+Should be included by all Xtensa generic and RTOS port-specific sources.\r
+\r
+*******************************************************************************/\r
+\r
+#ifndef XTENSA_RTOS_H\r
+#define XTENSA_RTOS_H\r
+\r
+#ifdef __ASSEMBLER__\r
+#include <xtensa/coreasm.h>\r
+#else\r
+#include <xtensa/config/core.h>\r
+#endif\r
+\r
+#include <xtensa/corebits.h>\r
+#include <xtensa/config/system.h>\r
+\r
+/*\r
+Include any RTOS specific definitions that are needed by this header.\r
+*/\r
+#include "FreeRTOSConfig.h"\r
+\r
+/*\r
+Convert FreeRTOSConfig definitions to XTENSA definitions.\r
+However these can still be overridden from the command line.\r
+*/\r
+\r
+#ifndef XT_SIMULATOR\r
+ #if configXT_SIMULATOR\r
+ #define XT_SIMULATOR 1 /* Simulator mode */\r
+ #endif\r
+#endif\r
+\r
+#ifndef XT_BOARD\r
+ #if configXT_BOARD\r
+ #define XT_BOARD 1 /* Board mode */\r
+ #endif\r
+#endif\r
+\r
+#ifndef XT_TIMER_INDEX\r
+ #if defined configXT_TIMER_INDEX\r
+ #define XT_TIMER_INDEX configXT_TIMER_INDEX /* Index of hardware timer to be used */\r
+ #endif\r
+#endif\r
+\r
+#ifndef XT_INTEXC_HOOKS\r
+ #if configXT_INTEXC_HOOKS\r
+ #define XT_INTEXC_HOOKS 1 /* Enables exception hooks */\r
+ #endif\r
+#endif\r
+\r
+#if !defined(XT_SIMULATOR) && !defined(XT_BOARD)\r
+ #error Either XT_SIMULATOR or XT_BOARD must be defined.\r
+#endif\r
+\r
+\r
+/*\r
+Name of RTOS (for messages).\r
+*/\r
+#define XT_RTOS_NAME FreeRTOS\r
+\r
+/*\r
+Check some Xtensa configuration requirements and report error if not met.\r
+Error messages can be customize to the RTOS port.\r
+*/\r
+\r
+#if !XCHAL_HAVE_XEA2\r
+#error "FreeRTOS/Xtensa requires XEA2 (exception architecture 2)."\r
+#endif\r
+\r
+\r
+/*******************************************************************************\r
+\r
+RTOS CALLOUT MACROS MAPPED TO RTOS PORT-SPECIFIC FUNCTIONS.\r
+\r
+Define callout macros used in generic Xtensa code to interact with the RTOS.\r
+The macros are simply the function names for use in calls from assembler code.\r
+Some of these functions may call back to generic functions in xtensa_context.h .\r
+\r
+*******************************************************************************/\r
+\r
+/*\r
+Inform RTOS of entry into an interrupt handler that will affect it. \r
+Allows RTOS to manage switch to any system stack and count nesting level.\r
+Called after minimal context has been saved, with interrupts disabled.\r
+RTOS port can call0 _xt_context_save to save the rest of the context.\r
+May only be called from assembly code by the 'call0' instruction.\r
+*/\r
+// void XT_RTOS_INT_ENTER(void)\r
+#define XT_RTOS_INT_ENTER _frxt_int_enter\r
+\r
+/*\r
+Inform RTOS of completion of an interrupt handler, and give control to\r
+RTOS to perform thread/task scheduling, switch back from any system stack\r
+and restore the context, and return to the exit dispatcher saved in the\r
+stack frame at XT_STK_EXIT. RTOS port can call0 _xt_context_restore\r
+to save the context saved in XT_RTOS_INT_ENTER via _xt_context_save,\r
+leaving only a minimal part of the context to be restored by the exit\r
+dispatcher. This function does not return to the place it was called from.\r
+May only be called from assembly code by the 'call0' instruction.\r
+*/\r
+// void XT_RTOS_INT_EXIT(void)\r
+#define XT_RTOS_INT_EXIT _frxt_int_exit\r
+\r
+/*\r
+Inform RTOS of the occurrence of a tick timer interrupt.\r
+If RTOS has no tick timer, leave XT_RTOS_TIMER_INT undefined.\r
+May be coded in or called from C or assembly, per ABI conventions.\r
+RTOS may optionally define XT_TICK_PER_SEC in its own way (eg. macro).\r
+*/\r
+// void XT_RTOS_TIMER_INT(void)\r
+#define XT_RTOS_TIMER_INT _frxt_timer_int\r
+#define XT_TICK_PER_SEC configTICK_RATE_HZ\r
+\r
+/*\r
+Return in a15 the base address of the co-processor state save area for the \r
+thread that triggered a co-processor exception, or 0 if no thread was running.\r
+The state save area is structured as defined in xtensa_context.h and has size \r
+XT_CP_SIZE. Co-processor instructions should only be used in thread code, never\r
+in interrupt handlers or the RTOS kernel. May only be called from assembly code\r
+and by the 'call0' instruction. A result of 0 indicates an unrecoverable error. \r
+The implementation may use only a2-4, a15 (all other regs must be preserved).\r
+*/\r
+// void* XT_RTOS_CP_STATE(void)\r
+#define XT_RTOS_CP_STATE _frxt_task_coproc_state\r
+\r
+\r
+/*******************************************************************************\r
+\r
+HOOKS TO DYNAMICALLY INSTALL INTERRUPT AND EXCEPTION HANDLERS PER LEVEL.\r
+\r
+This Xtensa RTOS port provides hooks for dynamically installing exception\r
+and interrupt handlers to facilitate automated testing where each test\r
+case can install its own handler for user exceptions and each interrupt\r
+priority (level). This consists of an array of function pointers indexed\r
+by interrupt priority, with index 0 being the user exception handler hook.\r
+Each entry in the array is initially 0, and may be replaced by a function \r
+pointer of type XT_INTEXC_HOOK. A handler may be uninstalled by installing 0.\r
+\r
+The handler for low and medium priority obeys ABI conventions so may be coded\r
+in C. For the exception handler, the cause is the contents of the EXCCAUSE\r
+reg, and the result is -1 if handled, else the cause (still needs handling).\r
+For interrupt handlers, the cause is a mask of pending enabled interrupts at\r
+that level, and the result is the same mask with the bits for the handled\r
+interrupts cleared (those not cleared still need handling). This allows a test\r
+case to either pre-handle or override the default handling for the exception\r
+or interrupt level (see xtensa_vectors.S).\r
+\r
+High priority handlers (including NMI) must be coded in assembly, are always\r
+called by 'call0' regardless of ABI, must preserve all registers except a0,\r
+and must not use or modify the interrupted stack. The hook argument 'cause'\r
+is not passed and the result is ignored, so as not to burden the caller with\r
+saving and restoring a2 (it assumes only one interrupt per level - see the\r
+discussion in high priority interrupts in xtensa_vectors.S). The handler\r
+therefore should be coded to prototype 'void h(void)' even though it plugs\r
+into an array of handlers of prototype 'unsigned h(unsigned)'.\r
+\r
+To enable interrupt/exception hooks, compile the RTOS with '-DXT_INTEXC_HOOKS'.\r
+\r
+*******************************************************************************/\r
+\r
+#define XT_INTEXC_HOOK_NUM (1 + XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI)\r
+\r
+#ifndef __ASSEMBLER__\r
+typedef unsigned (*XT_INTEXC_HOOK)(unsigned cause);\r
+extern volatile XT_INTEXC_HOOK _xt_intexc_hooks[XT_INTEXC_HOOK_NUM];\r
+#endif\r
+\r
+\r
+/*******************************************************************************\r
+\r
+CONVENIENCE INCLUSIONS.\r
+\r
+Ensures RTOS specific files need only include this one Xtensa-generic header.\r
+These headers are included last so they can use the RTOS definitions above.\r
+\r
+*******************************************************************************/\r
+\r
+#include "xtensa_context.h"\r
+\r
+#ifdef XT_RTOS_TIMER_INT\r
+#include "xtensa_timer.h"\r
+#endif\r
+\r
+\r
+/*******************************************************************************\r
+\r
+Xtensa Port Version.\r
+\r
+*******************************************************************************/\r
+\r
+#define XTENSA_PORT_VERSION 1.4.2\r
+#define XTENSA_PORT_VERSION_STRING "1.4.2"\r
+\r
+#endif /* XTENSA_RTOS_H */\r
+\r
--- /dev/null
+/*******************************************************************************\r
+// Copyright (c) 2003-2015 Cadence Design Systems, Inc.\r
+//\r
+// Permission is hereby granted, free of charge, to any person obtaining\r
+// a copy of this software and associated documentation files (the\r
+// "Software"), to deal in the Software without restriction, including\r
+// without limitation the rights to use, copy, modify, merge, publish,\r
+// distribute, sublicense, and/or sell copies of the Software, and to\r
+// permit persons to whom the Software is furnished to do so, subject to\r
+// the following conditions:\r
+//\r
+// The above copyright notice and this permission notice shall be included\r
+// in all copies or substantial portions of the Software.\r
+//\r
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+--------------------------------------------------------------------------------\r
+\r
+ XTENSA INFORMATION FOR RTOS TICK TIMER AND CLOCK FREQUENCY\r
+\r
+This header contains definitions and macros for use primarily by Xtensa\r
+RTOS assembly coded source files. It includes and uses the Xtensa hardware\r
+abstraction layer (HAL) to deal with config specifics. It may also be\r
+included in C source files.\r
+\r
+User may edit to modify timer selection and to specify clock frequency and\r
+tick duration to match timer interrupt to the real-time tick duration.\r
+\r
+If the RTOS has no timer interrupt, then there is no tick timer and the\r
+clock frequency is irrelevant, so all of these macros are left undefined\r
+and the Xtensa core configuration need not have a timer.\r
+\r
+*******************************************************************************/\r
+\r
+#ifndef XTENSA_TIMER_H\r
+#define XTENSA_TIMER_H\r
+\r
+#ifdef __ASSEMBLER__\r
+#include <xtensa/coreasm.h>\r
+#endif\r
+\r
+#include <xtensa/corebits.h>\r
+#include <xtensa/config/system.h>\r
+\r
+#include "xtensa_rtos.h" /* in case this wasn't included directly */\r
+\r
+#include "FreeRTOSConfig.h"\r
+\r
+/*\r
+Select timer to use for periodic tick, and determine its interrupt number \r
+and priority. User may specify a timer by defining XT_TIMER_INDEX with -D,\r
+in which case its validity is checked (it must exist in this core and must \r
+not be on a high priority interrupt - an error will be reported in invalid).\r
+Otherwise select the first low or medium priority interrupt timer available.\r
+*/\r
+#if XCHAL_NUM_TIMERS == 0\r
+\r
+ #error "This Xtensa configuration is unsupported, it has no timers."\r
+\r
+#else\r
+\r
+#ifndef XT_TIMER_INDEX\r
+ #if XCHAL_TIMER3_INTERRUPT != XTHAL_TIMER_UNCONFIGURED\r
+ #if XCHAL_INT_LEVEL(XCHAL_TIMER3_INTERRUPT) <= XCHAL_EXCM_LEVEL\r
+ #undef XT_TIMER_INDEX\r
+ #define XT_TIMER_INDEX 3\r
+ #endif\r
+ #endif\r
+ #if XCHAL_TIMER2_INTERRUPT != XTHAL_TIMER_UNCONFIGURED\r
+ #if XCHAL_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL\r
+ #undef XT_TIMER_INDEX\r
+ #define XT_TIMER_INDEX 2\r
+ #endif\r
+ #endif\r
+ #if XCHAL_TIMER1_INTERRUPT != XTHAL_TIMER_UNCONFIGURED\r
+ #if XCHAL_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL\r
+ #undef XT_TIMER_INDEX\r
+ #define XT_TIMER_INDEX 1\r
+ #endif\r
+ #endif\r
+ #if XCHAL_TIMER0_INTERRUPT != XTHAL_TIMER_UNCONFIGURED\r
+ #if XCHAL_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL\r
+ #undef XT_TIMER_INDEX\r
+ #define XT_TIMER_INDEX 0\r
+ #endif\r
+ #endif\r
+#endif\r
+#ifndef XT_TIMER_INDEX\r
+ #error "There is no suitable timer in this Xtensa configuration."\r
+#endif\r
+\r
+#define XT_CCOMPARE (CCOMPARE + XT_TIMER_INDEX)\r
+#define XT_TIMER_INTNUM XCHAL_TIMER_INTERRUPT(XT_TIMER_INDEX)\r
+#define XT_TIMER_INTPRI XCHAL_INT_LEVEL(XT_TIMER_INTNUM)\r
+#define XT_TIMER_INTEN (1 << XT_TIMER_INTNUM)\r
+\r
+#if XT_TIMER_INTNUM == XTHAL_TIMER_UNCONFIGURED\r
+ #error "The timer selected by XT_TIMER_INDEX does not exist in this core."\r
+#elif XT_TIMER_INTPRI > XCHAL_EXCM_LEVEL\r
+ #error "The timer interrupt cannot be high priority (use medium or low)."\r
+#endif\r
+\r
+#endif /* XCHAL_NUM_TIMERS */\r
+\r
+/*\r
+Set processor clock frequency, used to determine clock divisor for timer tick.\r
+User should BE SURE TO ADJUST THIS for the Xtensa platform being used.\r
+If using a supported board via the board-independent API defined in xtbsp.h,\r
+this may be left undefined and frequency and tick divisor will be computed \r
+and cached during run-time initialization.\r
+\r
+NOTE ON SIMULATOR:\r
+Under the Xtensa instruction set simulator, the frequency can only be estimated \r
+because it depends on the speed of the host and the version of the simulator.\r
+Also because it runs much slower than hardware, it is not possible to achieve\r
+real-time performance for most applications under the simulator. A frequency\r
+too low does not allow enough time between timer interrupts, starving threads.\r
+To obtain a more convenient but non-real-time tick duration on the simulator, \r
+compile with xt-xcc option "-DXT_SIMULATOR".\r
+Adjust this frequency to taste (it's not real-time anyway!).\r
+*/\r
+#if defined(XT_SIMULATOR) && !defined(XT_CLOCK_FREQ)\r
+#define XT_CLOCK_FREQ configCPU_CLOCK_HZ\r
+#endif\r
+\r
+#if !defined(XT_CLOCK_FREQ) && !defined(XT_BOARD)\r
+ #error "XT_CLOCK_FREQ must be defined for the target platform."\r
+#endif\r
+\r
+/*\r
+Default number of timer "ticks" per second (default 100 for 10ms tick).\r
+RTOS may define this in its own way (if applicable) in xtensa_rtos.h.\r
+User may redefine this to an optimal value for the application, either by\r
+editing this here or in xtensa_rtos.h, or compiling with xt-xcc option\r
+"-DXT_TICK_PER_SEC=<value>" where <value> is a suitable number.\r
+*/\r
+#ifndef XT_TICK_PER_SEC\r
+#define XT_TICK_PER_SEC configTICK_RATE_HZ /* 10 ms tick = 100 ticks per second */\r
+#endif\r
+\r
+/*\r
+Derivation of clock divisor for timer tick and interrupt (one per tick).\r
+*/\r
+#ifdef XT_CLOCK_FREQ\r
+#define XT_TICK_DIVISOR (XT_CLOCK_FREQ / XT_TICK_PER_SEC)\r
+#endif\r
+\r
+#ifndef __ASSEMBLER__\r
+extern unsigned _xt_tick_divisor;\r
+extern void _xt_tick_divisor_init(void);\r
+#endif\r
+\r
+#endif /* XTENSA_TIMER_H */\r
+\r
--- /dev/null
+/*\r
+ FreeRTOS V8.2.0 - Copyright (C) 2015 Real Time Engineers Ltd.\r
+ All rights reserved\r
+\r
+ VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.\r
+\r
+ This file is part of the FreeRTOS distribution.\r
+\r
+ FreeRTOS is free software; you can redistribute it and/or modify it under\r
+ the terms of the GNU General Public License (version 2) as published by the\r
+ Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.\r
+\r
+ ***************************************************************************\r
+ >>! NOTE: The modification to the GPL is included to allow you to !<<\r
+ >>! distribute a combined work that includes FreeRTOS without being !<<\r
+ >>! obliged to provide the source code for proprietary components !<<\r
+ >>! outside of the FreeRTOS kernel. !<<\r
+ ***************************************************************************\r
+\r
+ FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY\r
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\r
+ FOR A PARTICULAR PURPOSE. Full license text is available on the following\r
+ link: http://www.freertos.org/a00114.html\r
+\r
+ ***************************************************************************\r
+ * *\r
+ * FreeRTOS provides completely free yet professionally developed, *\r
+ * robust, strictly quality controlled, supported, and cross *\r
+ * platform software that is more than just the market leader, it *\r
+ * is the industry's de facto standard. *\r
+ * *\r
+ * Help yourself get started quickly while simultaneously helping *\r
+ * to support the FreeRTOS project by purchasing a FreeRTOS *\r
+ * tutorial book, reference manual, or both: *\r
+ * http://www.FreeRTOS.org/Documentation *\r
+ * *\r
+ ***************************************************************************\r
+\r
+ http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading\r
+ the FAQ page "My application does not run, what could be wrong?". Have you\r
+ defined configASSERT()?\r
+\r
+ http://www.FreeRTOS.org/support - In return for receiving this top quality\r
+ embedded software for free we request you assist our global community by\r
+ participating in the support forum.\r
+\r
+ http://www.FreeRTOS.org/training - Investing in training allows your team to\r
+ be as productive as possible as early as possible. Now you can receive\r
+ FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers\r
+ Ltd, and the world's leading authority on the world's leading RTOS.\r
+\r
+ http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,\r
+ including FreeRTOS+Trace - an indispensable productivity tool, a DOS\r
+ compatible FAT file system, and our tiny thread aware UDP/IP stack.\r
+\r
+ http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.\r
+ Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.\r
+\r
+ http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High\r
+ Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS\r
+ licenses offer ticketed support, indemnification and commercial middleware.\r
+\r
+ http://www.SafeRTOS.com - High Integrity Systems also provide a safety\r
+ engineered and independently SIL3 certified version for use in safety and\r
+ mission critical applications that require provable dependability.\r
+\r
+ 1 tab == 4 spaces!\r
+*/\r
+\r
+/*******************************************************************************\r
+// Copyright (c) 2003-2015 Cadence Design Systems, Inc.\r
+//\r
+// Permission is hereby granted, free of charge, to any person obtaining\r
+// a copy of this software and associated documentation files (the\r
+// "Software"), to deal in the Software without restriction, including\r
+// without limitation the rights to use, copy, modify, merge, publish,\r
+// distribute, sublicense, and/or sell copies of the Software, and to\r
+// permit persons to whom the Software is furnished to do so, subject to\r
+// the following conditions:\r
+//\r
+// The above copyright notice and this permission notice shall be included\r
+// in all copies or substantial portions of the Software.\r
+//\r
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+#include <stdlib.h>\r
+#include <xtensa/config/core.h>\r
+\r
+#include "xtensa_rtos.h"\r
+\r
+#include "rom/ets_sys.h"\r
+#include "soc/cpu.h"\r
+\r
+#include "FreeRTOS.h"\r
+#include "task.h"\r
+\r
+#include "esp_panic.h"\r
+#include "esp_heap_caps.h"\r
+#include "esp_crosscore_int.h"\r
+\r
+#include "esp_intr_alloc.h"\r
+\r
+/* Defined in portasm.h */\r
+extern void _frxt_tick_timer_init(void);\r
+\r
+/* Defined in xtensa_context.S */\r
+extern void _xt_coproc_init(void);\r
+\r
+\r
+#if CONFIG_FREERTOS_CORETIMER_0\r
+ #define SYSTICK_INTR_ID (ETS_INTERNAL_TIMER0_INTR_SOURCE+ETS_INTERNAL_INTR_SOURCE_OFF)\r
+#endif\r
+#if CONFIG_FREERTOS_CORETIMER_1\r
+ #define SYSTICK_INTR_ID (ETS_INTERNAL_TIMER1_INTR_SOURCE+ETS_INTERNAL_INTR_SOURCE_OFF)\r
+#endif\r
+\r
+/*-----------------------------------------------------------*/\r
+\r
+unsigned port_xSchedulerRunning[portNUM_PROCESSORS] = {0}; // Duplicate of inaccessible xSchedulerRunning; needed at startup to avoid counting nesting\r
+unsigned port_interruptNesting[portNUM_PROCESSORS] = {0}; // Interrupt nesting level. Increased/decreased in portasm.c, _frxt_int_enter/_frxt_int_exit\r
+\r
+/*-----------------------------------------------------------*/\r
+\r
+// User exception dispatcher when exiting\r
+void _xt_user_exit(void);\r
+\r
+/*\r
+ * Stack initialization\r
+ */\r
+#if portUSING_MPU_WRAPPERS\r
+StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters, BaseType_t xRunPrivileged )\r
+#else\r
+StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters )\r
+#endif\r
+{\r
+ StackType_t *sp, *tp;\r
+ XtExcFrame *frame;\r
+ #if XCHAL_CP_NUM > 0\r
+ uint32_t *p;\r
+ #endif\r
+\r
+ /* Create interrupt stack frame aligned to 16 byte boundary */\r
+ sp = (StackType_t *) (((UBaseType_t)(pxTopOfStack + 1) - XT_CP_SIZE - XT_STK_FRMSZ) & ~0xf);\r
+\r
+ /* Clear the entire frame (do not use memset() because we don't depend on C library) */\r
+ for (tp = sp; tp <= pxTopOfStack; ++tp)\r
+ *tp = 0;\r
+\r
+ frame = (XtExcFrame *) sp;\r
+\r
+ /* Explicitly initialize certain saved registers */\r
+ frame->pc = (UBaseType_t) pxCode; /* task entrypoint */\r
+ frame->a0 = 0; /* to terminate GDB backtrace */\r
+ frame->a1 = (UBaseType_t) sp + XT_STK_FRMSZ; /* physical top of stack frame */\r
+ frame->exit = (UBaseType_t) _xt_user_exit; /* user exception exit dispatcher */\r
+\r
+ /* Set initial PS to int level 0, EXCM disabled ('rfe' will enable), user mode. */\r
+ /* Also set entry point argument parameter. */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ frame->a2 = (UBaseType_t) pvParameters;\r
+ frame->ps = PS_UM | PS_EXCM;\r
+ #else\r
+ /* + for windowed ABI also set WOE and CALLINC (pretend task was 'call4'd). */\r
+ frame->a6 = (UBaseType_t) pvParameters;\r
+ frame->ps = PS_UM | PS_EXCM | PS_WOE | PS_CALLINC(1);\r
+ #endif\r
+\r
+ #ifdef XT_USE_SWPRI\r
+ /* Set the initial virtual priority mask value to all 1's. */\r
+ frame->vpri = 0xFFFFFFFF;\r
+ #endif\r
+\r
+ #if XCHAL_CP_NUM > 0\r
+ /* Init the coprocessor save area (see xtensa_context.h) */\r
+ /* No access to TCB here, so derive indirectly. Stack growth is top to bottom.\r
+ * //p = (uint32_t *) xMPUSettings->coproc_area;\r
+ */\r
+ p = (uint32_t *)(((uint32_t) pxTopOfStack - XT_CP_SIZE) & ~0xf);\r
+ p[0] = 0;\r
+ p[1] = 0;\r
+ p[2] = (((uint32_t) p) + 12 + XCHAL_TOTAL_SA_ALIGN - 1) & -XCHAL_TOTAL_SA_ALIGN;\r
+ #endif\r
+\r
+ return sp;\r
+}\r
+\r
+/*-----------------------------------------------------------*/\r
+\r
+void vPortEndScheduler( void )\r
+{\r
+ /* It is unlikely that the Xtensa port will get stopped. If required simply\r
+ disable the tick interrupt here. */\r
+}\r
+\r
+/*-----------------------------------------------------------*/\r
+\r
+BaseType_t xPortStartScheduler( void )\r
+{\r
+ // Interrupts are disabled at this point and stack contains PS with enabled interrupts when task context is restored\r
+\r
+ #if XCHAL_CP_NUM > 0\r
+ /* Initialize co-processor management for tasks. Leave CPENABLE alone. */\r
+ _xt_coproc_init();\r
+ #endif\r
+\r
+ /* Init the tick divisor value */\r
+ _xt_tick_divisor_init();\r
+\r
+ /* Setup the hardware to generate the tick. */\r
+ _frxt_tick_timer_init();\r
+\r
+ port_xSchedulerRunning[xPortGetCoreID()] = 1;\r
+\r
+ // Cannot be directly called from C; never returns\r
+ __asm__ volatile ("call0 _frxt_dispatch\n");\r
+\r
+ /* Should not get here. */\r
+ return pdTRUE;\r
+}\r
+/*-----------------------------------------------------------*/\r
+\r
+BaseType_t xPortSysTickHandler( void )\r
+{\r
+ BaseType_t ret;\r
+ unsigned interruptMask;\r
+\r
+ portbenchmarkIntLatency();\r
+ traceISR_ENTER(SYSTICK_INTR_ID);\r
+\r
+ /* Interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY must be\r
+ * disabled before calling xTaskIncrementTick as it access the\r
+ * kernel lists. */\r
+ interruptMask = portSET_INTERRUPT_MASK_FROM_ISR();\r
+ {\r
+ ret = xTaskIncrementTick();\r
+ }\r
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( interruptMask );\r
+\r
+ if( ret != pdFALSE )\r
+ {\r
+ portYIELD_FROM_ISR();\r
+ } else {\r
+ traceISR_EXIT();\r
+ }\r
+ return ret;\r
+}\r
+\r
+\r
+void vPortYieldOtherCore( BaseType_t coreid ) {\r
+ esp_crosscore_int_send_yield( coreid );\r
+}\r
+\r
+/*-----------------------------------------------------------*/\r
+\r
+/*\r
+ * Used to set coprocessor area in stack. Current hack is to reuse MPU pointer for coprocessor area.\r
+ */\r
+#if portUSING_MPU_WRAPPERS\r
+void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMORY_REGION * const xRegions, StackType_t *pxBottomOfStack, uint32_t usStackDepth )\r
+{\r
+ #if XCHAL_CP_NUM > 0\r
+ xMPUSettings->coproc_area = (StackType_t*)((((uint32_t)(pxBottomOfStack + usStackDepth - 1)) - XT_CP_SIZE ) & ~0xf);\r
+\r
+\r
+ /* NOTE: we cannot initialize the coprocessor save area here because FreeRTOS is going to\r
+ * clear the stack area after we return. This is done in pxPortInitialiseStack().\r
+ */\r
+ #endif\r
+}\r
+\r
+void vPortReleaseTaskMPUSettings( xMPU_SETTINGS *xMPUSettings )\r
+{\r
+ /* If task has live floating point registers somewhere, release them */\r
+ _xt_coproc_release( xMPUSettings->coproc_area );\r
+}\r
+\r
+#endif\r
+\r
+/*\r
+ * Returns true if the current core is in ISR context; low prio ISR, med prio ISR or timer tick ISR. High prio ISRs\r
+ * aren't detected here, but they normally cannot call C code, so that should not be an issue anyway.\r
+ */\r
+BaseType_t xPortInIsrContext()\r
+{\r
+ unsigned int irqStatus;\r
+ BaseType_t ret;\r
+ irqStatus=portENTER_CRITICAL_NESTED();\r
+ ret=(port_interruptNesting[xPortGetCoreID()] != 0);\r
+ portEXIT_CRITICAL_NESTED(irqStatus);\r
+ return ret;\r
+}\r
+\r
+/*\r
+ * This function will be called in High prio ISRs. Returns true if the current core was in ISR context\r
+ * before calling into high prio ISR context.\r
+ */\r
+BaseType_t IRAM_ATTR xPortInterruptedFromISRContext()\r
+{\r
+ return (port_interruptNesting[xPortGetCoreID()] != 0);\r
+}\r
+\r
+void vPortAssertIfInISR()\r
+{\r
+ if (xPortInIsrContext()) {\r
+ ets_printf("core=%d port_interruptNesting=%d\n\n", xPortGetCoreID(), port_interruptNesting[xPortGetCoreID()]);\r
+ }\r
+ configASSERT(!xPortInIsrContext());\r
+}\r
+\r
+/*\r
+ * For kernel use: Initialize a per-CPU mux. Mux will be initialized unlocked.\r
+ */\r
+void vPortCPUInitializeMutex(portMUX_TYPE *mux) {\r
+\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+ ets_printf("Initializing mux %p\n", mux);\r
+ mux->lastLockedFn="(never locked)";\r
+ mux->lastLockedLine=-1;\r
+#endif\r
+ mux->owner=portMUX_FREE_VAL;\r
+ mux->count=0;\r
+}\r
+\r
+#include "portmux_impl.h"\r
+\r
+/*\r
+ * For kernel use: Acquire a per-CPU mux. Spinlocks, so don't hold on to these muxes for too long.\r
+ */\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+void vPortCPUAcquireMutex(portMUX_TYPE *mux, const char *fnName, int line) {\r
+ unsigned int irqStatus = portENTER_CRITICAL_NESTED();\r
+ vPortCPUAcquireMutexIntsDisabled(mux, portMUX_NO_TIMEOUT, fnName, line);\r
+ portEXIT_CRITICAL_NESTED(irqStatus);\r
+}\r
+\r
+bool vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout_cycles, const char *fnName, int line) {\r
+ unsigned int irqStatus = portENTER_CRITICAL_NESTED();\r
+ bool result = vPortCPUAcquireMutexIntsDisabled(mux, timeout_cycles, fnName, line);\r
+ portEXIT_CRITICAL_NESTED(irqStatus);\r
+ return result;\r
+}\r
+\r
+#else\r
+void vPortCPUAcquireMutex(portMUX_TYPE *mux) {\r
+ unsigned int irqStatus = portENTER_CRITICAL_NESTED();\r
+ vPortCPUAcquireMutexIntsDisabled(mux, portMUX_NO_TIMEOUT);\r
+ portEXIT_CRITICAL_NESTED(irqStatus);\r
+}\r
+\r
+bool vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout_cycles) {\r
+ unsigned int irqStatus = portENTER_CRITICAL_NESTED();\r
+ bool result = vPortCPUAcquireMutexIntsDisabled(mux, timeout_cycles);\r
+ portEXIT_CRITICAL_NESTED(irqStatus);\r
+ return result;\r
+}\r
+#endif\r
+\r
+\r
+/*\r
+ * For kernel use: Release a per-CPU mux\r
+ *\r
+ * Mux must be already locked by this core\r
+ */\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+void vPortCPUReleaseMutex(portMUX_TYPE *mux, const char *fnName, int line) {\r
+ unsigned int irqStatus = portENTER_CRITICAL_NESTED();\r
+ vPortCPUReleaseMutexIntsDisabled(mux, fnName, line);\r
+ portEXIT_CRITICAL_NESTED(irqStatus);\r
+}\r
+#else\r
+void vPortCPUReleaseMutex(portMUX_TYPE *mux) {\r
+ unsigned int irqStatus = portENTER_CRITICAL_NESTED();\r
+ vPortCPUReleaseMutexIntsDisabled(mux);\r
+ portEXIT_CRITICAL_NESTED(irqStatus);\r
+}\r
+#endif\r
+\r
+void vPortSetStackWatchpoint( void* pxStackStart ) {\r
+ //Set watchpoint 1 to watch the last 32 bytes of the stack.\r
+ //Unfortunately, the Xtensa watchpoints can't set a watchpoint on a random [base - base+n] region because\r
+ //the size works by masking off the lowest address bits. For that reason, we futz a bit and watch the lowest 32\r
+ //bytes of the stack we can actually watch. In general, this can cause the watchpoint to be triggered at most\r
+ //28 bytes early. The value 32 is chosen because it's larger than the stack canary, which in FreeRTOS is 20 bytes.\r
+ //This way, we make sure we trigger before/when the stack canary is corrupted, not after.\r
+ int addr=(int)pxStackStart;\r
+ addr=(addr+31)&(~31);\r
+ esp_set_watchpoint(1, (char*)addr, 32, ESP_WATCHPOINT_STORE);\r
+}\r
+\r
+#if defined(CONFIG_SPIRAM_SUPPORT)\r
+/*\r
+ * Compare & set (S32C1) does not work in external RAM. Instead, this routine uses a mux (in internal memory) to fake it.\r
+ */\r
+static portMUX_TYPE extram_mux = portMUX_INITIALIZER_UNLOCKED;\r
+\r
+void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set) {\r
+ uint32_t prev;\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+ vPortCPUAcquireMutexIntsDisabled(&extram_mux, portMUX_NO_TIMEOUT, __FUNCTION__, __LINE__);\r
+#else\r
+ vPortCPUAcquireMutexIntsDisabled(&extram_mux, portMUX_NO_TIMEOUT); \r
+#endif\r
+ prev=*addr;\r
+ if (prev==compare) {\r
+ *addr=*set;\r
+ }\r
+ *set=prev;\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+ vPortCPUReleaseMutexIntsDisabled(&extram_mux, __FUNCTION__, __LINE__);\r
+#else\r
+ vPortCPUReleaseMutexIntsDisabled(&extram_mux);\r
+#endif\r
+}\r
+#endif //defined(CONFIG_SPIRAM_SUPPORT)\r
+\r
+\r
+\r
+uint32_t xPortGetTickRateHz(void) {\r
+ return (uint32_t)configTICK_RATE_HZ;\r
+}\r
--- /dev/null
+/*\r
+//-----------------------------------------------------------------------------\r
+// Copyright (c) 2003-2015 Cadence Design Systems, Inc.\r
+//\r
+// Permission is hereby granted, free of charge, to any person obtaining\r
+// a copy of this software and associated documentation files (the\r
+// "Software"), to deal in the Software without restriction, including\r
+// without limitation the rights to use, copy, modify, merge, publish,\r
+// distribute, sublicense, and/or sell copies of the Software, and to\r
+// permit persons to whom the Software is furnished to do so, subject to\r
+// the following conditions:\r
+//\r
+// The above copyright notice and this permission notice shall be included\r
+// in all copies or substantial portions of the Software.\r
+//\r
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+//-----------------------------------------------------------------------------\r
+*/\r
+\r
+#include "xtensa_rtos.h"\r
+#include "sdkconfig.h"\r
+\r
+#define TOPOFSTACK_OFFS 0x00 /* StackType_t *pxTopOfStack */\r
+#define CP_TOPOFSTACK_OFFS 0x04 /* xMPU_SETTINGS.coproc_area */\r
+\r
+.extern pxCurrentTCB\r
+\r
+/*\r
+*******************************************************************************\r
+* Interrupt stack. The size of the interrupt stack is determined by the config\r
+* parameter "configISR_STACK_SIZE" in FreeRTOSConfig.h\r
+*******************************************************************************\r
+*/\r
+\r
+ .data\r
+ .align 16\r
+ .global port_IntStack\r
+ .global port_IntStackTop\r
+ .global port_switch_flag\r
+port_IntStack:\r
+ .space configISR_STACK_SIZE*portNUM_PROCESSORS /* This allocates stacks for each individual CPU. */\r
+port_IntStackTop:\r
+ .word 0\r
+port_switch_flag:\r
+ .space portNUM_PROCESSORS*4 /* One flag for each individual CPU. */\r
+\r
+ .text\r
+\r
+/*\r
+*******************************************************************************\r
+* _frxt_setup_switch\r
+* void _frxt_setup_switch(void);\r
+* \r
+* Sets an internal flag indicating that a task switch is required on return\r
+* from interrupt handling.\r
+* \r
+*******************************************************************************\r
+*/\r
+ .global _frxt_setup_switch\r
+ .type _frxt_setup_switch,@function\r
+ .align 4\r
+_frxt_setup_switch:\r
+\r
+ ENTRY(16)\r
+\r
+ getcoreid a3\r
+ movi a2, port_switch_flag\r
+ addx4 a2, a3, a2\r
+\r
+ movi a3, 1\r
+ s32i a3, a2, 0\r
+\r
+ RET(16)\r
+\r
+\r
+\r
+\r
+\r
+\r
+/*\r
+*******************************************************************************\r
+* _frxt_int_enter\r
+* void _frxt_int_enter(void)\r
+*\r
+* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_ENTER function for\r
+* freeRTOS. Saves the rest of the interrupt context (not already saved).\r
+* May only be called from assembly code by the 'call0' instruction, with\r
+* interrupts disabled.\r
+* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.\r
+*\r
+*******************************************************************************\r
+*/\r
+ .globl _frxt_int_enter\r
+ .type _frxt_int_enter,@function\r
+ .align 4\r
+_frxt_int_enter:\r
+\r
+ /* Save a12-13 in the stack frame as required by _xt_context_save. */\r
+ s32i a12, a1, XT_STK_A12\r
+ s32i a13, a1, XT_STK_A13\r
+\r
+ /* Save return address in a safe place (free a0). */\r
+ mov a12, a0\r
+\r
+ /* Save the rest of the interrupted context (preserves A12-13). */\r
+ call0 _xt_context_save\r
+\r
+ /*\r
+ Save interrupted task's SP in TCB only if not nesting.\r
+ Manage nesting directly rather than call the generic IntEnter()\r
+ (in windowed ABI we can't call a C function here anyway because PS.EXCM is still set).\r
+ */\r
+ getcoreid a4\r
+ movi a2, port_xSchedulerRunning\r
+ addx4 a2, a4, a2\r
+ movi a3, port_interruptNesting\r
+ addx4 a3, a4, a3\r
+ l32i a2, a2, 0 /* a2 = port_xSchedulerRunning */\r
+ beqz a2, 1f /* scheduler not running, no tasks */\r
+ l32i a2, a3, 0 /* a2 = port_interruptNesting */\r
+ addi a2, a2, 1 /* increment nesting count */\r
+ s32i a2, a3, 0 /* save nesting count */\r
+ bnei a2, 1, .Lnested /* !=0 before incr, so nested */\r
+\r
+ movi a2, pxCurrentTCB\r
+ addx4 a2, a4, a2\r
+ l32i a2, a2, 0 /* a2 = current TCB */\r
+ beqz a2, 1f\r
+ s32i a1, a2, TOPOFSTACK_OFFS /* pxCurrentTCB->pxTopOfStack = SP */\r
+ movi a1, port_IntStack+configISR_STACK_SIZE /* a1 = top of intr stack for CPU 0 */\r
+ movi a2, configISR_STACK_SIZE /* add configISR_STACK_SIZE * cpu_num to arrive at top of stack for cpu_num */\r
+ mull a2, a4, a2\r
+ add a1, a1, a2 /* for current proc */\r
+\r
+.Lnested:\r
+1:\r
+ mov a0, a12 /* restore return addr and return */\r
+ ret\r
+\r
+/*\r
+*******************************************************************************\r
+* _frxt_int_exit\r
+* void _frxt_int_exit(void)\r
+*\r
+* Implements the Xtensa RTOS porting layer's XT_RTOS_INT_EXIT function for\r
+* FreeRTOS. If required, calls vPortYieldFromInt() to perform task context\r
+* switching, restore the (possibly) new task's context, and return to the\r
+* exit dispatcher saved in the task's stack frame at XT_STK_EXIT.\r
+* May only be called from assembly code by the 'call0' instruction. Does not\r
+* return to caller.\r
+* See the description of the XT_RTOS_ENTER macro in xtensa_rtos.h.\r
+*\r
+*******************************************************************************\r
+*/\r
+ .globl _frxt_int_exit\r
+ .type _frxt_int_exit,@function\r
+ .align 4\r
+_frxt_int_exit:\r
+\r
+ getcoreid a4\r
+ movi a2, port_xSchedulerRunning\r
+ addx4 a2, a4, a2\r
+ movi a3, port_interruptNesting\r
+ addx4 a3, a4, a3\r
+ rsil a0, XCHAL_EXCM_LEVEL /* lock out interrupts */\r
+ l32i a2, a2, 0 /* a2 = port_xSchedulerRunning */\r
+ beqz a2, .Lnoswitch /* scheduler not running, no tasks */\r
+ l32i a2, a3, 0 /* a2 = port_interruptNesting */\r
+ addi a2, a2, -1 /* decrement nesting count */\r
+ s32i a2, a3, 0 /* save nesting count */\r
+ bnez a2, .Lnesting /* !=0 after decr so still nested */\r
+\r
+ movi a2, pxCurrentTCB\r
+ addx4 a2, a4, a2\r
+ l32i a2, a2, 0 /* a2 = current TCB */\r
+ beqz a2, 1f /* no task ? go to dispatcher */\r
+ l32i a1, a2, TOPOFSTACK_OFFS /* SP = pxCurrentTCB->pxTopOfStack */\r
+\r
+ movi a2, port_switch_flag /* address of switch flag */\r
+ addx4 a2, a4, a2 /* point to flag for this cpu */\r
+ l32i a3, a2, 0 /* a3 = port_switch_flag */\r
+ beqz a3, .Lnoswitch /* flag = 0 means no switch reqd */\r
+ movi a3, 0\r
+ s32i a3, a2, 0 /* zero out the flag for next time */\r
+\r
+1:\r
+ /*\r
+ Call0 ABI callee-saved regs a12-15 need to be saved before possible preemption.\r
+ However a12-13 were already saved by _frxt_int_enter().\r
+ */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ s32i a14, a1, XT_STK_A14\r
+ s32i a15, a1, XT_STK_A15\r
+ #endif\r
+\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ call0 vPortYieldFromInt /* call dispatch inside the function; never returns */\r
+ #else\r
+ call4 vPortYieldFromInt /* this one returns */\r
+ call0 _frxt_dispatch /* tail-call dispatcher */\r
+ /* Never returns here. */\r
+ #endif\r
+\r
+.Lnoswitch:\r
+ /*\r
+ If we came here then about to resume the interrupted task.\r
+ */\r
+\r
+.Lnesting:\r
+ /*\r
+ We come here only if there was no context switch, that is if this\r
+ is a nested interrupt, or the interrupted task was not preempted.\r
+ In either case there's no need to load the SP.\r
+ */\r
+\r
+ /* Restore full context from interrupt stack frame */\r
+ call0 _xt_context_restore\r
+\r
+ /*\r
+ Must return via the exit dispatcher corresponding to the entrypoint from which\r
+ this was called. Interruptee's A0, A1, PS, PC are restored and the interrupt\r
+ stack frame is deallocated in the exit dispatcher.\r
+ */\r
+ l32i a0, a1, XT_STK_EXIT\r
+ ret\r
+\r
+\r
+/*\r
+**********************************************************************************************************\r
+* _frxt_timer_int\r
+* void _frxt_timer_int(void)\r
+*\r
+* Implements the Xtensa RTOS porting layer's XT_RTOS_TIMER_INT function for FreeRTOS.\r
+* Called every timer interrupt.\r
+* Manages the tick timer and calls xPortSysTickHandler() every tick.\r
+* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.\r
+*\r
+* Callable from C (obeys ABI conventions). Implemented in assmebly code for performance.\r
+*\r
+**********************************************************************************************************\r
+*/\r
+ .globl _frxt_timer_int\r
+ .type _frxt_timer_int,@function\r
+ .align 4\r
+_frxt_timer_int:\r
+\r
+ /*\r
+ Xtensa timers work by comparing a cycle counter with a preset value. Once the match occurs\r
+ an interrupt is generated, and the handler has to set a new cycle count into the comparator.\r
+ To avoid clock drift due to interrupt latency, the new cycle count is computed from the old,\r
+ not the time the interrupt was serviced. However if a timer interrupt is ever serviced more\r
+ than one tick late, it is necessary to process multiple ticks until the new cycle count is\r
+ in the future, otherwise the next timer interrupt would not occur until after the cycle\r
+ counter had wrapped (2^32 cycles later).\r
+\r
+ do {\r
+ ticks++;\r
+ old_ccompare = read_ccompare_i();\r
+ write_ccompare_i( old_ccompare + divisor );\r
+ service one tick;\r
+ diff = read_ccount() - old_ccompare;\r
+ } while ( diff > divisor );\r
+ */\r
+\r
+ ENTRY(16)\r
+\r
+ #ifdef CONFIG_PM_TRACE\r
+ movi a6, 1 /* = ESP_PM_TRACE_TICK */\r
+ getcoreid a7\r
+ call4 esp_pm_trace_enter\r
+ #endif // CONFIG_PM_TRACE\r
+\r
+.L_xt_timer_int_catchup:\r
+\r
+ /* Update the timer comparator for the next tick. */\r
+ #ifdef XT_CLOCK_FREQ\r
+ movi a2, XT_TICK_DIVISOR /* a2 = comparator increment */\r
+ #else\r
+ movi a3, _xt_tick_divisor\r
+ l32i a2, a3, 0 /* a2 = comparator increment */\r
+ #endif\r
+ rsr a3, XT_CCOMPARE /* a3 = old comparator value */\r
+ add a4, a3, a2 /* a4 = new comparator value */\r
+ wsr a4, XT_CCOMPARE /* update comp. and clear interrupt */\r
+ esync\r
+\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ /* Preserve a2 and a3 across C calls. */\r
+ s32i a2, sp, 4\r
+ s32i a3, sp, 8\r
+ #endif\r
+\r
+ /* Call the FreeRTOS tick handler (see port.c). */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ call0 xPortSysTickHandler\r
+ #else\r
+ call4 xPortSysTickHandler\r
+ #endif\r
+\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ /* Restore a2 and a3. */\r
+ l32i a2, sp, 4\r
+ l32i a3, sp, 8\r
+ #endif\r
+\r
+ /* Check if we need to process more ticks to catch up. */\r
+ esync /* ensure comparator update complete */\r
+ rsr a4, CCOUNT /* a4 = cycle count */\r
+ sub a4, a4, a3 /* diff = ccount - old comparator */\r
+ blt a2, a4, .L_xt_timer_int_catchup /* repeat while diff > divisor */\r
+\r
+#ifdef CONFIG_PM_TRACE\r
+ movi a6, 1 /* = ESP_PM_TRACE_TICK */\r
+ getcoreid a7\r
+ call4 esp_pm_trace_exit\r
+#endif // CONFIG_PM_TRACE\r
+\r
+ RET(16)\r
+\r
+ /*\r
+**********************************************************************************************************\r
+* _frxt_tick_timer_init\r
+* void _frxt_tick_timer_init(void)\r
+*\r
+* Initialize timer and timer interrrupt handler (_xt_tick_divisor_init() has already been been called).\r
+* Callable from C (obeys ABI conventions on entry).\r
+*\r
+**********************************************************************************************************\r
+*/\r
+ .globl _frxt_tick_timer_init\r
+ .type _frxt_tick_timer_init,@function\r
+ .align 4\r
+_frxt_tick_timer_init:\r
+\r
+ ENTRY(16)\r
+\r
+\r
+ /* Set up the periodic tick timer (assume enough time to complete init). */\r
+ #ifdef XT_CLOCK_FREQ\r
+ movi a3, XT_TICK_DIVISOR\r
+ #else\r
+ movi a2, _xt_tick_divisor\r
+ l32i a3, a2, 0\r
+ #endif\r
+ rsr a2, CCOUNT /* current cycle count */\r
+ add a2, a2, a3 /* time of first timer interrupt */\r
+ wsr a2, XT_CCOMPARE /* set the comparator */\r
+\r
+ /*\r
+ Enable the timer interrupt at the device level. Don't write directly\r
+ to the INTENABLE register because it may be virtualized.\r
+ */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ movi a2, XT_TIMER_INTEN\r
+ call0 xt_ints_on\r
+ #else\r
+ movi a6, XT_TIMER_INTEN\r
+ call4 xt_ints_on\r
+ #endif\r
+\r
+ RET(16)\r
+\r
+/*\r
+**********************************************************************************************************\r
+* DISPATCH THE HIGH READY TASK\r
+* void _frxt_dispatch(void)\r
+*\r
+* Switch context to the highest priority ready task, restore its state and dispatch control to it.\r
+*\r
+* This is a common dispatcher that acts as a shared exit path for all the context switch functions\r
+* including vPortYield() and vPortYieldFromInt(), all of which tail-call this dispatcher\r
+* (for windowed ABI vPortYieldFromInt() calls it indirectly via _frxt_int_exit() ).\r
+*\r
+* The Xtensa port uses different stack frames for solicited and unsolicited task suspension (see\r
+* comments on stack frames in xtensa_context.h). This function restores the state accordingly.\r
+* If restoring a task that solicited entry, restores the minimal state and leaves CPENABLE clear.\r
+* If restoring a task that was preempted, restores all state including the task's CPENABLE.\r
+*\r
+* Entry:\r
+* pxCurrentTCB points to the TCB of the task to suspend,\r
+* Because it is tail-called without a true function entrypoint, it needs no 'entry' instruction.\r
+*\r
+* Exit:\r
+* If incoming task called vPortYield() (solicited), this function returns as if from vPortYield().\r
+* If incoming task was preempted by an interrupt, this function jumps to exit dispatcher.\r
+*\r
+**********************************************************************************************************\r
+*/\r
+ .globl _frxt_dispatch\r
+ .type _frxt_dispatch,@function\r
+ .align 4\r
+_frxt_dispatch:\r
+\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ call0 vTaskSwitchContext // Get next TCB to resume\r
+ movi a2, pxCurrentTCB\r
+ getcoreid a3\r
+ addx4 a2, a3, a2\r
+ #else\r
+ call4 vTaskSwitchContext // Get next TCB to resume\r
+ movi a2, pxCurrentTCB\r
+ getcoreid a3\r
+ addx4 a2, a3, a2\r
+ #endif\r
+ l32i a3, a2, 0\r
+ l32i sp, a3, TOPOFSTACK_OFFS /* SP = next_TCB->pxTopOfStack; */\r
+ s32i a3, a2, 0\r
+\r
+ /* Determine the type of stack frame. */\r
+ l32i a2, sp, XT_STK_EXIT /* exit dispatcher or solicited flag */\r
+ bnez a2, .L_frxt_dispatch_stk\r
+\r
+.L_frxt_dispatch_sol:\r
+\r
+ /* Solicited stack frame. Restore minimal context and return from vPortYield(). */\r
+ l32i a3, sp, XT_SOL_PS\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ l32i a12, sp, XT_SOL_A12\r
+ l32i a13, sp, XT_SOL_A13\r
+ l32i a14, sp, XT_SOL_A14\r
+ l32i a15, sp, XT_SOL_A15\r
+ #endif\r
+ l32i a0, sp, XT_SOL_PC\r
+ #if XCHAL_CP_NUM > 0\r
+ /* Ensure wsr.CPENABLE is complete (should be, it was cleared on entry). */\r
+ rsync\r
+ #endif\r
+ /* As soons as PS is restored, interrupts can happen. No need to sync PS. */\r
+ wsr a3, PS\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ addi sp, sp, XT_SOL_FRMSZ\r
+ ret\r
+ #else\r
+ retw\r
+ #endif\r
+\r
+.L_frxt_dispatch_stk:\r
+\r
+ #if XCHAL_CP_NUM > 0\r
+ /* Restore CPENABLE from task's co-processor save area. */\r
+ movi a3, pxCurrentTCB /* cp_state = */\r
+ getcoreid a2\r
+ addx4 a3, a2, a3\r
+ l32i a3, a3, 0\r
+ l32i a2, a3, CP_TOPOFSTACK_OFFS /* StackType_t *pxStack; */\r
+ l16ui a3, a2, XT_CPENABLE /* CPENABLE = cp_state->cpenable; */\r
+ wsr a3, CPENABLE\r
+ #endif\r
+\r
+ /* Interrupt stack frame. Restore full context and return to exit dispatcher. */\r
+ call0 _xt_context_restore\r
+\r
+ /* In Call0 ABI, restore callee-saved regs (A12, A13 already restored). */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ l32i a14, sp, XT_STK_A14\r
+ l32i a15, sp, XT_STK_A15\r
+ #endif\r
+\r
+ #if XCHAL_CP_NUM > 0\r
+ /* Ensure wsr.CPENABLE has completed. */\r
+ rsync\r
+ #endif\r
+\r
+ /*\r
+ Must return via the exit dispatcher corresponding to the entrypoint from which\r
+ this was called. Interruptee's A0, A1, PS, PC are restored and the interrupt\r
+ stack frame is deallocated in the exit dispatcher.\r
+ */\r
+ l32i a0, sp, XT_STK_EXIT\r
+ ret\r
+\r
+\r
+/*\r
+**********************************************************************************************************\r
+* PERFORM A SOLICTED CONTEXT SWITCH (from a task)\r
+* void vPortYield(void)\r
+*\r
+* This function saves the minimal state needed for a solicited task suspension, clears CPENABLE,\r
+* then tail-calls the dispatcher _frxt_dispatch() to perform the actual context switch\r
+*\r
+* At Entry:\r
+* pxCurrentTCB points to the TCB of the task to suspend\r
+* Callable from C (obeys ABI conventions on entry).\r
+*\r
+* Does not return to caller.\r
+*\r
+**********************************************************************************************************\r
+*/\r
+ .globl vPortYield\r
+ .type vPortYield,@function\r
+ .align 4\r
+vPortYield:\r
+\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ addi sp, sp, -XT_SOL_FRMSZ\r
+ #else\r
+ entry sp, XT_SOL_FRMSZ\r
+ #endif\r
+\r
+ rsr a2, PS\r
+ s32i a0, sp, XT_SOL_PC\r
+ s32i a2, sp, XT_SOL_PS\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ s32i a12, sp, XT_SOL_A12 /* save callee-saved registers */\r
+ s32i a13, sp, XT_SOL_A13\r
+ s32i a14, sp, XT_SOL_A14\r
+ s32i a15, sp, XT_SOL_A15\r
+ #else\r
+ /* Spill register windows. Calling xthal_window_spill() causes extra */\r
+ /* spills and reloads, so we will set things up to call the _nw version */\r
+ /* instead to save cycles. */\r
+ movi a6, ~(PS_WOE_MASK|PS_INTLEVEL_MASK) /* spills a4-a7 if needed */\r
+ and a2, a2, a6 /* clear WOE, INTLEVEL */\r
+ addi a2, a2, XCHAL_EXCM_LEVEL /* set INTLEVEL */\r
+ wsr a2, PS\r
+ rsync\r
+ call0 xthal_window_spill_nw\r
+ l32i a2, sp, XT_SOL_PS /* restore PS */\r
+ wsr a2, PS\r
+ #endif\r
+\r
+ rsil a2, XCHAL_EXCM_LEVEL /* disable low/med interrupts */\r
+\r
+ #if XCHAL_CP_NUM > 0\r
+ /* Save coprocessor callee-saved state (if any). At this point CPENABLE */\r
+ /* should still reflect which CPs were in use (enabled). */\r
+ call0 _xt_coproc_savecs\r
+ #endif\r
+\r
+ movi a2, pxCurrentTCB\r
+ getcoreid a3\r
+ addx4 a2, a3, a2\r
+ l32i a2, a2, 0 /* a2 = pxCurrentTCB */\r
+ movi a3, 0\r
+ s32i a3, sp, XT_SOL_EXIT /* 0 to flag as solicited frame */\r
+ s32i sp, a2, TOPOFSTACK_OFFS /* pxCurrentTCB->pxTopOfStack = SP */\r
+\r
+ #if XCHAL_CP_NUM > 0\r
+ /* Clear CPENABLE, also in task's co-processor state save area. */\r
+ l32i a2, a2, CP_TOPOFSTACK_OFFS /* a2 = pxCurrentTCB->cp_state */\r
+ movi a3, 0\r
+ wsr a3, CPENABLE\r
+ beqz a2, 1f\r
+ s16i a3, a2, XT_CPENABLE /* clear saved cpenable */\r
+1:\r
+ #endif\r
+\r
+ /* Tail-call dispatcher. */\r
+ call0 _frxt_dispatch\r
+ /* Never reaches here. */\r
+\r
+\r
+/*\r
+**********************************************************************************************************\r
+* PERFORM AN UNSOLICITED CONTEXT SWITCH (from an interrupt)\r
+* void vPortYieldFromInt(void)\r
+*\r
+* This calls the context switch hook (removed), saves and clears CPENABLE, then tail-calls the dispatcher\r
+* _frxt_dispatch() to perform the actual context switch.\r
+*\r
+* At Entry:\r
+* Interrupted task context has been saved in an interrupt stack frame at pxCurrentTCB->pxTopOfStack.\r
+* pxCurrentTCB points to the TCB of the task to suspend,\r
+* Callable from C (obeys ABI conventions on entry).\r
+*\r
+* At Exit:\r
+* Windowed ABI defers the actual context switch until the stack is unwound to interrupt entry.\r
+* Call0 ABI tail-calls the dispatcher directly (no need to unwind) so does not return to caller.\r
+*\r
+**********************************************************************************************************\r
+*/\r
+ .globl vPortYieldFromInt\r
+ .type vPortYieldFromInt,@function\r
+ .align 4\r
+vPortYieldFromInt:\r
+\r
+ ENTRY(16)\r
+\r
+ #if XCHAL_CP_NUM > 0\r
+ /* Save CPENABLE in task's co-processor save area, and clear CPENABLE. */\r
+ movi a3, pxCurrentTCB /* cp_state = */\r
+ getcoreid a2\r
+ addx4 a3, a2, a3\r
+ l32i a3, a3, 0\r
+\r
+ l32i a2, a3, CP_TOPOFSTACK_OFFS\r
+\r
+ rsr a3, CPENABLE\r
+ s16i a3, a2, XT_CPENABLE /* cp_state->cpenable = CPENABLE; */\r
+ movi a3, 0\r
+ wsr a3, CPENABLE /* disable all co-processors */\r
+ #endif\r
+\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ /* Tail-call dispatcher. */\r
+ call0 _frxt_dispatch\r
+ /* Never reaches here. */\r
+ #else\r
+ RET(16)\r
+ #endif\r
+\r
+/*\r
+**********************************************************************************************************\r
+* _frxt_task_coproc_state\r
+* void _frxt_task_coproc_state(void)\r
+*\r
+* Implements the Xtensa RTOS porting layer's XT_RTOS_CP_STATE function for FreeRTOS.\r
+*\r
+* May only be called when a task is running, not within an interrupt handler (returns 0 in that case).\r
+* May only be called from assembly code by the 'call0' instruction. Does NOT obey ABI conventions.\r
+* Returns in A15 a pointer to the base of the co-processor state save area for the current task.\r
+* See the detailed description of the XT_RTOS_ENTER macro in xtensa_rtos.h.\r
+*\r
+**********************************************************************************************************\r
+*/\r
+#if XCHAL_CP_NUM > 0\r
+\r
+ .globl _frxt_task_coproc_state\r
+ .type _frxt_task_coproc_state,@function\r
+ .align 4\r
+_frxt_task_coproc_state:\r
+\r
+\r
+ /* We can use a3 as a scratchpad, the instances of code calling XT_RTOS_CP_STATE don't seem to need it saved. */\r
+ getcoreid a3\r
+ movi a15, port_xSchedulerRunning /* if (port_xSchedulerRunning */\r
+ addx4 a15, a3,a15\r
+ l32i a15, a15, 0\r
+ beqz a15, 1f\r
+ movi a15, port_interruptNesting /* && port_interruptNesting == 0 */\r
+ addx4 a15, a3, a15\r
+ l32i a15, a15, 0\r
+ bnez a15, 1f\r
+\r
+ movi a15, pxCurrentTCB\r
+ addx4 a15, a3, a15\r
+ l32i a15, a15, 0 /* && pxCurrentTCB != 0) { */\r
+\r
+\r
+ beqz a15, 2f\r
+ l32i a15, a15, CP_TOPOFSTACK_OFFS\r
+ ret\r
+\r
+1: movi a15, 0\r
+2: ret\r
+\r
+#endif /* XCHAL_CP_NUM > 0 */\r
--- /dev/null
+/*\r
+ Copyright (C) 2016-2017 Espressif Shanghai PTE LTD\r
+ Copyright (C) 2015 Real Time Engineers Ltd.\r
+\r
+ All rights reserved\r
+\r
+ FreeRTOS is free software; you can redistribute it and/or modify it under\r
+ the terms of the GNU General Public License (version 2) as published by the\r
+ Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.\r
+\r
+ ***************************************************************************\r
+ >>! NOTE: The modification to the GPL is included to allow you to !<<\r
+ >>! distribute a combined work that includes FreeRTOS without being !<<\r
+ >>! obliged to provide the source code for proprietary components !<<\r
+ >>! outside of the FreeRTOS kernel. !<<\r
+ ***************************************************************************\r
+\r
+ FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY\r
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\r
+ FOR A PARTICULAR PURPOSE. Full license text is available on the following\r
+ link: http://www.freertos.org/a00114.html\r
+*/\r
+\r
+/* This header exists for performance reasons, in order to inline the\r
+ implementation of vPortCPUAcquireMutexIntsDisabled and\r
+ vPortCPUReleaseMutexIntsDisabled into the\r
+ vTaskEnterCritical/vTaskExitCritical functions in task.c as well as the\r
+ vPortCPUAcquireMutex/vPortCPUReleaseMutex implementations.\r
+\r
+ Normally this kind of performance hack is over the top, but\r
+ vTaskEnterCritical/vTaskExitCritical is called a great\r
+ deal by FreeRTOS internals.\r
+\r
+ It should be #included by freertos port.c or tasks.c, in esp-idf.\r
+\r
+ The way it works is that it essentially uses portmux_impl.inc.h as a\r
+ generator template of sorts. When no external memory is used, this \r
+ template is only used to generate the vPortCPUAcquireMutexIntsDisabledInternal\r
+ and vPortCPUReleaseMutexIntsDisabledInternal functions, which use S32C1 to\r
+ do an atomic compare & swap. When external memory is used the functions\r
+ vPortCPUAcquireMutexIntsDisabledExtram and vPortCPUReleaseMutexIntsDisabledExtram\r
+ are also generated, which use uxPortCompareSetExtram to fake the S32C1 instruction.\r
+ The wrapper functions vPortCPUAcquireMutexIntsDisabled and \r
+ vPortCPUReleaseMutexIntsDisabled will then use the appropriate function to do the\r
+ actual lock/unlock.\r
+*/\r
+#include "soc/cpu.h"\r
+#include "portable.h"\r
+\r
+/* XOR one core ID with this value to get the other core ID */\r
+#define CORE_ID_XOR_SWAP (CORE_ID_PRO ^ CORE_ID_APP)\r
+\r
+\r
+\r
+\r
+//Define the mux routines for use with muxes in internal RAM\r
+#define PORTMUX_AQUIRE_MUX_FN_NAME vPortCPUAcquireMutexIntsDisabledInternal\r
+#define PORTMUX_RELEASE_MUX_FN_NAME vPortCPUReleaseMutexIntsDisabledInternal\r
+#define PORTMUX_COMPARE_SET_FN_NAME uxPortCompareSet\r
+#include "portmux_impl.inc.h"\r
+#undef PORTMUX_AQUIRE_MUX_FN_NAME\r
+#undef PORTMUX_RELEASE_MUX_FN_NAME\r
+#undef PORTMUX_COMPARE_SET_FN_NAME\r
+\r
+\r
+#if defined(CONFIG_SPIRAM_SUPPORT)\r
+\r
+#define PORTMUX_AQUIRE_MUX_FN_NAME vPortCPUAcquireMutexIntsDisabledExtram\r
+#define PORTMUX_RELEASE_MUX_FN_NAME vPortCPUReleaseMutexIntsDisabledExtram\r
+#define PORTMUX_COMPARE_SET_FN_NAME uxPortCompareSetExtram\r
+#include "portmux_impl.inc.h"\r
+#undef PORTMUX_AQUIRE_MUX_FN_NAME\r
+#undef PORTMUX_RELEASE_MUX_FN_NAME\r
+#undef PORTMUX_COMPARE_SET_FN_NAME\r
+\r
+#endif\r
+\r
+\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+#define PORTMUX_AQUIRE_MUX_FN_ARGS portMUX_TYPE *mux, int timeout_cycles, const char *fnName, int line\r
+#define PORTMUX_RELEASE_MUX_FN_ARGS portMUX_TYPE *mux, const char *fnName, int line\r
+#define PORTMUX_AQUIRE_MUX_FN_CALL_ARGS(x) x, timeout_cycles, fnName, line\r
+#define PORTMUX_RELEASE_MUX_FN_CALL_ARGS(x) x, fnName, line\r
+#else\r
+#define PORTMUX_AQUIRE_MUX_FN_ARGS portMUX_TYPE *mux, int timeout_cycles\r
+#define PORTMUX_RELEASE_MUX_FN_ARGS portMUX_TYPE *mux\r
+#define PORTMUX_AQUIRE_MUX_FN_CALL_ARGS(x) x, timeout_cycles\r
+#define PORTMUX_RELEASE_MUX_FN_CALL_ARGS(x) x\r
+#endif\r
+\r
+\r
+static inline bool __attribute__((always_inline)) vPortCPUAcquireMutexIntsDisabled(PORTMUX_AQUIRE_MUX_FN_ARGS) {\r
+#if defined(CONFIG_SPIRAM_SUPPORT)\r
+ if (esp_ptr_external_ram(mux)) {\r
+ return vPortCPUAcquireMutexIntsDisabledExtram(PORTMUX_AQUIRE_MUX_FN_CALL_ARGS(mux));\r
+ }\r
+#endif\r
+ return vPortCPUAcquireMutexIntsDisabledInternal(PORTMUX_AQUIRE_MUX_FN_CALL_ARGS(mux));\r
+}\r
+\r
+\r
+static inline void vPortCPUReleaseMutexIntsDisabled(PORTMUX_RELEASE_MUX_FN_ARGS) {\r
+#if defined(CONFIG_SPIRAM_SUPPORT)\r
+ if (esp_ptr_external_ram(mux)) {\r
+ vPortCPUReleaseMutexIntsDisabledExtram(PORTMUX_RELEASE_MUX_FN_CALL_ARGS(mux));\r
+ return;\r
+ }\r
+#endif\r
+ vPortCPUReleaseMutexIntsDisabledInternal(PORTMUX_RELEASE_MUX_FN_CALL_ARGS(mux));\r
+}\r
+\r
--- /dev/null
+/*\r
+ Copyright (C) 2016-2017 Espressif Shanghai PTE LTD\r
+ Copyright (C) 2015 Real Time Engineers Ltd.\r
+\r
+ All rights reserved\r
+\r
+ FreeRTOS is free software; you can redistribute it and/or modify it under\r
+ the terms of the GNU General Public License (version 2) as published by the\r
+ Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.\r
+\r
+ ***************************************************************************\r
+ >>! NOTE: The modification to the GPL is included to allow you to !<<\r
+ >>! distribute a combined work that includes FreeRTOS without being !<<\r
+ >>! obliged to provide the source code for proprietary components !<<\r
+ >>! outside of the FreeRTOS kernel. !<<\r
+ ***************************************************************************\r
+\r
+ FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY\r
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\r
+ FOR A PARTICULAR PURPOSE. Full license text is available on the following\r
+ link: http://www.freertos.org/a00114.html\r
+*/\r
+\r
+\r
+/*\r
+ Warning: funky preprocessor hackery ahead. Including these headers will generate two\r
+ functions, which names are defined by the preprocessor macros \r
+ PORTMUX_AQUIRE_MUX_FN_NAME and PORTMUX_RELEASE_MUX_FN_NAME. In order to do the compare\r
+ and exchange function, they will use whatever PORTMUX_COMPARE_SET_FN_NAME resolves to.\r
+\r
+ In some scenarios, this header is included *twice* in portmux_impl.h: one time \r
+ for the 'normal' mux code which uses a compare&exchange routine, another time \r
+ to generate code for a second set of these routines that use a second mux \r
+ (in internal ram) to fake a compare&exchange on a variable in external memory.\r
+*/\r
+\r
+\r
+\r
+static inline bool __attribute__((always_inline))\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+PORTMUX_AQUIRE_MUX_FN_NAME(portMUX_TYPE *mux, int timeout_cycles, const char *fnName, int line) {\r
+#else\r
+PORTMUX_AQUIRE_MUX_FN_NAME(portMUX_TYPE *mux, int timeout_cycles) {\r
+#endif\r
+\r
+\r
+#if !CONFIG_FREERTOS_UNICORE\r
+ uint32_t res;\r
+ portBASE_TYPE coreID, otherCoreID;\r
+ uint32_t ccount_start;\r
+ bool set_timeout = timeout_cycles > portMUX_NO_TIMEOUT;\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+ if (!set_timeout) {\r
+ timeout_cycles = 10000; // Always set a timeout in debug mode\r
+ set_timeout = true;\r
+ }\r
+#endif\r
+ if (set_timeout) { // Timeout\r
+ RSR(CCOUNT, ccount_start);\r
+ }\r
+\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+ uint32_t owner = mux->owner;\r
+ if (owner != portMUX_FREE_VAL && owner != CORE_ID_PRO && owner != CORE_ID_APP) {\r
+ ets_printf("ERROR: vPortCPUAcquireMutex: mux %p is uninitialized (0x%X)! Called from %s line %d.\n", mux, owner, fnName, line);\r
+ mux->owner=portMUX_FREE_VAL;\r
+ }\r
+#endif\r
+\r
+ /* Spin until we own the core */\r
+\r
+ RSR(PRID, coreID);\r
+ /* Note: coreID is the full 32 bit core ID (CORE_ID_PRO/CORE_ID_APP),\r
+ not the 0/1 value returned by xPortGetCoreID()\r
+ */\r
+ otherCoreID = CORE_ID_XOR_SWAP ^ coreID;\r
+ do {\r
+ /* mux->owner should be one of portMUX_FREE_VAL, CORE_ID_PRO,\r
+ CORE_ID_APP:\r
+\r
+ - If portMUX_FREE_VAL, we want to atomically set to 'coreID'.\r
+ - If "our" coreID, we can drop through immediately.\r
+ - If "otherCoreID", we spin here.\r
+ */\r
+ res = coreID;\r
+ PORTMUX_COMPARE_SET_FN_NAME(&mux->owner, portMUX_FREE_VAL, &res);\r
+\r
+ if (res != otherCoreID) {\r
+ break; // mux->owner is "our" coreID\r
+ }\r
+\r
+ if (set_timeout) {\r
+ uint32_t ccount_now;\r
+ RSR(CCOUNT, ccount_now);\r
+ if (ccount_now - ccount_start > (unsigned)timeout_cycles) {\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+ ets_printf("Timeout on mux! last non-recursive lock %s line %d, curr %s line %d\n", mux->lastLockedFn, mux->lastLockedLine, fnName, line);\r
+ ets_printf("Owner 0x%x count %d\n", mux->owner, mux->count);\r
+#endif\r
+ return false;\r
+ }\r
+ }\r
+ } while (1);\r
+\r
+ assert(res == coreID || res == portMUX_FREE_VAL); /* any other value implies memory corruption or uninitialized mux */\r
+ assert((res == portMUX_FREE_VAL) == (mux->count == 0)); /* we're first to lock iff count is zero */\r
+ assert(mux->count < 0xFF); /* Bad count value implies memory corruption */\r
+\r
+ /* now we own it, we can increment the refcount */\r
+ mux->count++;\r
+\r
+\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+ if (res==portMUX_FREE_VAL) { //initial lock\r
+ mux->lastLockedFn=fnName;\r
+ mux->lastLockedLine=line;\r
+ } else {\r
+ ets_printf("Recursive lock: count=%d last non-recursive lock %s line %d, curr %s line %d\n", mux->count-1,\r
+ mux->lastLockedFn, mux->lastLockedLine, fnName, line);\r
+ }\r
+#endif /* CONFIG_FREERTOS_PORTMUX_DEBUG */\r
+#endif /* CONFIG_FREERTOS_UNICORE */\r
+ return true;\r
+}\r
+\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+static inline void PORTMUX_RELEASE_MUX_FN_NAME(portMUX_TYPE *mux, const char *fnName, int line) {\r
+#else\r
+static inline void PORTMUX_RELEASE_MUX_FN_NAME(portMUX_TYPE *mux) {\r
+#endif\r
+\r
+\r
+#if !CONFIG_FREERTOS_UNICORE\r
+ portBASE_TYPE coreID;\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+ const char *lastLockedFn=mux->lastLockedFn;\r
+ int lastLockedLine=mux->lastLockedLine;\r
+ mux->lastLockedFn=fnName;\r
+ mux->lastLockedLine=line;\r
+ uint32_t owner = mux->owner;\r
+ if (owner != portMUX_FREE_VAL && owner != CORE_ID_PRO && owner != CORE_ID_APP) {\r
+ ets_printf("ERROR: vPortCPUReleaseMutex: mux %p is invalid (0x%x)!\n", mux, mux->owner);\r
+ }\r
+#endif\r
+\r
+#if CONFIG_FREERTOS_PORTMUX_DEBUG || !defined(NDEBUG)\r
+ RSR(PRID, coreID);\r
+#endif\r
+\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG\r
+ if (coreID != mux->owner) {\r
+ ets_printf("ERROR: vPortCPUReleaseMutex: mux %p was already unlocked!\n", mux);\r
+ ets_printf("Last non-recursive unlock %s line %d, curr unlock %s line %d\n", lastLockedFn, lastLockedLine, fnName, line);\r
+ }\r
+#endif\r
+\r
+ assert(coreID == mux->owner); // This is a mutex we didn't lock, or it's corrupt\r
+\r
+ mux->count--;\r
+ if(mux->count == 0) {\r
+ mux->owner = portMUX_FREE_VAL;\r
+ } else {\r
+ assert(mux->count < 0x100); // Indicates memory corruption\r
+#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG_RECURSIVE\r
+ ets_printf("Recursive unlock: count=%d last locked %s line %d, curr %s line %d\n", mux->count, lastLockedFn, lastLockedLine, fnName, line);\r
+#endif\r
+ }\r
+#endif //!CONFIG_FREERTOS_UNICORE\r
+}\r
--- /dev/null
+/*******************************************************************************\r
+Copyright (c) 2006-2015 Cadence Design Systems Inc.\r
+\r
+Permission is hereby granted, free of charge, to any person obtaining\r
+a copy of this software and associated documentation files (the\r
+"Software"), to deal in the Software without restriction, including\r
+without limitation the rights to use, copy, modify, merge, publish,\r
+distribute, sublicense, and/or sell copies of the Software, and to\r
+permit persons to whom the Software is furnished to do so, subject to\r
+the following conditions:\r
+\r
+The above copyright notice and this permission notice shall be included\r
+in all copies or substantial portions of the Software.\r
+\r
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+--------------------------------------------------------------------------------\r
+\r
+ XTENSA CONTEXT SAVE AND RESTORE ROUTINES\r
+\r
+Low-level Call0 functions for handling generic context save and restore of\r
+registers not specifically addressed by the interrupt vectors and handlers.\r
+Those registers (not handled by these functions) are PC, PS, A0, A1 (SP).\r
+Except for the calls to RTOS functions, this code is generic to Xtensa.\r
+\r
+Note that in Call0 ABI, interrupt handlers are expected to preserve the callee-\r
+save regs (A12-A15), which is always the case if the handlers are coded in C.\r
+However A12, A13 are made available as scratch registers for interrupt dispatch\r
+code, so are presumed saved anyway, and are always restored even in Call0 ABI.\r
+Only A14, A15 are truly handled as callee-save regs.\r
+\r
+Because Xtensa is a configurable architecture, this port supports all user\r
+generated configurations (except restrictions stated in the release notes).\r
+This is accomplished by conditional compilation using macros and functions\r
+defined in the Xtensa HAL (hardware adaptation layer) for your configuration.\r
+Only the processor state included in your configuration is saved and restored,\r
+including any processor state added by user configuration options or TIE.\r
+\r
+*******************************************************************************/\r
+\r
+/* Warn nicely if this file gets named with a lowercase .s instead of .S: */\r
+#define NOERROR #\r
+NOERROR: .error "C preprocessor needed for this file: make sure its filename\\r
+ ends in uppercase .S, or use xt-xcc's -x assembler-with-cpp option."\r
+\r
+\r
+#include "xtensa_rtos.h"\r
+#include "xtensa_context.h"\r
+\r
+#ifdef XT_USE_OVLY\r
+#include <xtensa/overlay_os_asm.h>\r
+#endif\r
+\r
+ .text\r
+\r
+\r
+\r
+/*******************************************************************************\r
+\r
+_xt_context_save\r
+\r
+ !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!\r
+\r
+Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the\r
+interrupt stack frame defined in xtensa_rtos.h.\r
+Its counterpart is _xt_context_restore (which also restores A12, A13).\r
+\r
+Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame.\r
+This function preserves A12 & A13 in order to provide the caller with 2 scratch \r
+regs that need not be saved over the call to this function. The choice of which\r
+2 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw,\r
+to avoid moving data more than necessary. Caller can assign regs accordingly.\r
+\r
+Entry Conditions:\r
+ A0 = Return address in caller.\r
+ A1 = Stack pointer of interrupted thread or handler ("interruptee").\r
+ Original A12, A13 have already been saved in the interrupt stack frame.\r
+ Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the \r
+ point of interruption.\r
+ If windowed ABI, PS.EXCM = 1 (exceptions disabled).\r
+\r
+Exit conditions:\r
+ A0 = Return address in caller.\r
+ A1 = Stack pointer of interrupted thread or handler ("interruptee").\r
+ A12, A13 as at entry (preserved).\r
+ If windowed ABI, PS.EXCM = 1 (exceptions disabled).\r
+\r
+*******************************************************************************/\r
+\r
+ .global _xt_context_save\r
+ .type _xt_context_save,@function\r
+ .align 4\r
+ .literal_position\r
+ .align 4\r
+_xt_context_save:\r
+\r
+ s32i a2, sp, XT_STK_A2\r
+ s32i a3, sp, XT_STK_A3\r
+ s32i a4, sp, XT_STK_A4\r
+ s32i a5, sp, XT_STK_A5\r
+ s32i a6, sp, XT_STK_A6\r
+ s32i a7, sp, XT_STK_A7\r
+ s32i a8, sp, XT_STK_A8\r
+ s32i a9, sp, XT_STK_A9\r
+ s32i a10, sp, XT_STK_A10\r
+ s32i a11, sp, XT_STK_A11\r
+\r
+ /*\r
+ Call0 ABI callee-saved regs a12-15 do not need to be saved here.\r
+ a12-13 are the caller's responsibility so it can use them as scratch.\r
+ So only need to save a14-a15 here for Windowed ABI (not Call0).\r
+ */\r
+ #ifndef __XTENSA_CALL0_ABI__\r
+ s32i a14, sp, XT_STK_A14\r
+ s32i a15, sp, XT_STK_A15\r
+ #endif\r
+\r
+ rsr a3, SAR\r
+ s32i a3, sp, XT_STK_SAR\r
+\r
+ #if XCHAL_HAVE_LOOPS\r
+ rsr a3, LBEG\r
+ s32i a3, sp, XT_STK_LBEG\r
+ rsr a3, LEND\r
+ s32i a3, sp, XT_STK_LEND\r
+ rsr a3, LCOUNT\r
+ s32i a3, sp, XT_STK_LCOUNT\r
+ #endif\r
+\r
+ #ifdef XT_USE_SWPRI\r
+ /* Save virtual priority mask */\r
+ movi a3, _xt_vpri_mask\r
+ l32i a3, a3, 0\r
+ s32i a3, sp, XT_STK_VPRI\r
+ #endif\r
+\r
+ #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)\r
+ mov a9, a0 /* preserve ret addr */\r
+ #endif\r
+\r
+ #ifndef __XTENSA_CALL0_ABI__\r
+ /*\r
+ To spill the reg windows, temp. need pre-interrupt stack ptr and a4-15.\r
+ Need to save a9,12,13 temporarily (in frame temps) and recover originals.\r
+ Interrupts need to be disabled below XCHAL_EXCM_LEVEL and window overflow\r
+ and underflow exceptions disabled (assured by PS.EXCM == 1).\r
+ */\r
+ s32i a12, sp, XT_STK_TMP0 /* temp. save stuff in stack frame */\r
+ s32i a13, sp, XT_STK_TMP1 \r
+ s32i a9, sp, XT_STK_TMP2 \r
+\r
+ /*\r
+ Save the overlay state if we are supporting overlays. Since we just saved\r
+ three registers, we can conveniently use them here. Note that as of now,\r
+ overlays only work for windowed calling ABI.\r
+ */\r
+ #ifdef XT_USE_OVLY\r
+ l32i a9, sp, XT_STK_PC /* recover saved PC */\r
+ _xt_overlay_get_state a9, a12, a13\r
+ s32i a9, sp, XT_STK_OVLY /* save overlay state */\r
+ #endif\r
+\r
+ l32i a12, sp, XT_STK_A12 /* recover original a9,12,13 */\r
+ l32i a13, sp, XT_STK_A13\r
+ l32i a9, sp, XT_STK_A9\r
+ addi sp, sp, XT_STK_FRMSZ /* restore the interruptee's SP */\r
+ call0 xthal_window_spill_nw /* preserves only a4,5,8,9,12,13 */\r
+ addi sp, sp, -XT_STK_FRMSZ\r
+ l32i a12, sp, XT_STK_TMP0 /* recover stuff from stack frame */\r
+ l32i a13, sp, XT_STK_TMP1 \r
+ l32i a9, sp, XT_STK_TMP2 \r
+ #endif\r
+\r
+ #if XCHAL_EXTRA_SA_SIZE > 0\r
+ /* \r
+ NOTE: Normally the xthal_save_extra_nw macro only affects address\r
+ registers a2-a5. It is theoretically possible for Xtensa processor\r
+ designers to write TIE that causes more address registers to be\r
+ affected, but it is generally unlikely. If that ever happens,\r
+ more registers need to be saved/restored around this macro invocation.\r
+ Here we assume a9,12,13 are preserved.\r
+ Future Xtensa tools releases might limit the regs that can be affected.\r
+ */\r
+ addi a2, sp, XT_STK_EXTRA /* where to save it */\r
+ # if XCHAL_EXTRA_SA_ALIGN > 16\r
+ movi a3, -XCHAL_EXTRA_SA_ALIGN\r
+ and a2, a2, a3 /* align dynamically >16 bytes */\r
+ # endif\r
+ call0 xthal_save_extra_nw /* destroys a0,2,3,4,5 */\r
+ #endif\r
+\r
+ #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)\r
+ mov a0, a9 /* retrieve ret addr */\r
+ #endif\r
+\r
+ ret\r
+\r
+/*******************************************************************************\r
+\r
+_xt_context_restore\r
+\r
+ !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!\r
+\r
+Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0\r
+ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt \r
+stack frame defined in xtensa_rtos.h .\r
+Its counterpart is _xt_context_save (whose caller saved A12, A13).\r
+\r
+Caller is responsible to restore PC, PS, A0, A1 (SP).\r
+\r
+Entry Conditions:\r
+ A0 = Return address in caller.\r
+ A1 = Stack pointer of interrupted thread or handler ("interruptee").\r
+\r
+Exit conditions:\r
+ A0 = Return address in caller.\r
+ A1 = Stack pointer of interrupted thread or handler ("interruptee").\r
+ Other processor state except PC, PS, A0, A1 (SP), is as at the point \r
+ of interruption.\r
+\r
+*******************************************************************************/\r
+\r
+ .global _xt_context_restore\r
+ .type _xt_context_restore,@function\r
+ .align 4\r
+ .literal_position\r
+ .align 4\r
+_xt_context_restore:\r
+\r
+ #if XCHAL_EXTRA_SA_SIZE > 0\r
+ /* \r
+ NOTE: Normally the xthal_restore_extra_nw macro only affects address\r
+ registers a2-a5. It is theoretically possible for Xtensa processor\r
+ designers to write TIE that causes more address registers to be\r
+ affected, but it is generally unlikely. If that ever happens,\r
+ more registers need to be saved/restored around this macro invocation.\r
+ Here we only assume a13 is preserved.\r
+ Future Xtensa tools releases might limit the regs that can be affected.\r
+ */\r
+ mov a13, a0 /* preserve ret addr */\r
+ addi a2, sp, XT_STK_EXTRA /* where to find it */\r
+ # if XCHAL_EXTRA_SA_ALIGN > 16\r
+ movi a3, -XCHAL_EXTRA_SA_ALIGN\r
+ and a2, a2, a3 /* align dynamically >16 bytes */\r
+ # endif\r
+ call0 xthal_restore_extra_nw /* destroys a0,2,3,4,5 */\r
+ mov a0, a13 /* retrieve ret addr */\r
+ #endif\r
+\r
+ #if XCHAL_HAVE_LOOPS\r
+ l32i a2, sp, XT_STK_LBEG\r
+ l32i a3, sp, XT_STK_LEND\r
+ wsr a2, LBEG\r
+ l32i a2, sp, XT_STK_LCOUNT\r
+ wsr a3, LEND\r
+ wsr a2, LCOUNT\r
+ #endif\r
+\r
+ #ifdef XT_USE_OVLY\r
+ /*\r
+ If we are using overlays, this is a good spot to check if we need\r
+ to restore an overlay for the incoming task. Here we have a bunch\r
+ of registers to spare. Note that this step is going to use a few\r
+ bytes of storage below SP (SP-20 to SP-32) if an overlay is going\r
+ to be restored.\r
+ */\r
+ l32i a2, sp, XT_STK_PC /* retrieve PC */\r
+ l32i a3, sp, XT_STK_PS /* retrieve PS */\r
+ l32i a4, sp, XT_STK_OVLY /* retrieve overlay state */\r
+ l32i a5, sp, XT_STK_A1 /* retrieve stack ptr */\r
+ _xt_overlay_check_map a2, a3, a4, a5, a6\r
+ s32i a2, sp, XT_STK_PC /* save updated PC */\r
+ s32i a3, sp, XT_STK_PS /* save updated PS */\r
+ #endif\r
+\r
+ #ifdef XT_USE_SWPRI\r
+ /* Restore virtual interrupt priority and interrupt enable */\r
+ movi a3, _xt_intdata\r
+ l32i a4, a3, 0 /* a4 = _xt_intenable */\r
+ l32i a5, sp, XT_STK_VPRI /* a5 = saved _xt_vpri_mask */\r
+ and a4, a4, a5\r
+ wsr a4, INTENABLE /* update INTENABLE */\r
+ s32i a5, a3, 4 /* restore _xt_vpri_mask */\r
+ #endif\r
+\r
+ l32i a3, sp, XT_STK_SAR\r
+ l32i a2, sp, XT_STK_A2\r
+ wsr a3, SAR\r
+ l32i a3, sp, XT_STK_A3\r
+ l32i a4, sp, XT_STK_A4\r
+ l32i a5, sp, XT_STK_A5\r
+ l32i a6, sp, XT_STK_A6\r
+ l32i a7, sp, XT_STK_A7\r
+ l32i a8, sp, XT_STK_A8\r
+ l32i a9, sp, XT_STK_A9\r
+ l32i a10, sp, XT_STK_A10\r
+ l32i a11, sp, XT_STK_A11\r
+\r
+ /*\r
+ Call0 ABI callee-saved regs a12-15 do not need to be restored here.\r
+ However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(), \r
+ so need to be restored anyway, despite being callee-saved in Call0.\r
+ */\r
+ l32i a12, sp, XT_STK_A12\r
+ l32i a13, sp, XT_STK_A13\r
+ #ifndef __XTENSA_CALL0_ABI__\r
+ l32i a14, sp, XT_STK_A14\r
+ l32i a15, sp, XT_STK_A15\r
+ #endif\r
+\r
+ ret\r
+\r
+\r
+/*******************************************************************************\r
+\r
+_xt_coproc_init\r
+\r
+Initializes global co-processor management data, setting all co-processors\r
+to "unowned". Leaves CPENABLE as it found it (does NOT clear it).\r
+\r
+Called during initialization of the RTOS, before any threads run.\r
+\r
+This may be called from normal Xtensa single-threaded application code which\r
+might use co-processors. The Xtensa run-time initialization enables all \r
+co-processors. They must remain enabled here, else a co-processor exception\r
+might occur outside of a thread, which the exception handler doesn't expect.\r
+\r
+Entry Conditions:\r
+ Xtensa single-threaded run-time environment is in effect.\r
+ No thread is yet running.\r
+\r
+Exit conditions:\r
+ None.\r
+\r
+Obeys ABI conventions per prototype:\r
+ void _xt_coproc_init(void)\r
+\r
+*******************************************************************************/\r
+\r
+#if XCHAL_CP_NUM > 0\r
+\r
+ .global _xt_coproc_init\r
+ .type _xt_coproc_init,@function\r
+ .align 4\r
+ .literal_position\r
+ .align 4\r
+_xt_coproc_init:\r
+ ENTRY0\r
+\r
+ /* Initialize thread co-processor ownerships to 0 (unowned). */\r
+ movi a2, _xt_coproc_owner_sa /* a2 = base of owner array */\r
+ addi a3, a2, (XCHAL_CP_MAX*portNUM_PROCESSORS) << 2 /* a3 = top+1 of owner array */\r
+ movi a4, 0 /* a4 = 0 (unowned) */\r
+1: s32i a4, a2, 0\r
+ addi a2, a2, 4\r
+ bltu a2, a3, 1b\r
+\r
+ RET0\r
+\r
+#endif\r
+\r
+\r
+/*******************************************************************************\r
+\r
+_xt_coproc_release\r
+\r
+Releases any and all co-processors owned by a given thread. The thread is \r
+identified by it's co-processor state save area defined in xtensa_context.h .\r
+\r
+Must be called before a thread's co-proc save area is deleted to avoid\r
+memory corruption when the exception handler tries to save the state.\r
+May be called when a thread terminates or completes but does not delete\r
+the co-proc save area, to avoid the exception handler having to save the \r
+thread's co-proc state before another thread can use it (optimization).\r
+\r
+Needs to be called on the processor the thread was running on. Unpinned threads\r
+won't have an entry here because they get pinned as soon they use a coprocessor.\r
+\r
+Entry Conditions:\r
+ A2 = Pointer to base of co-processor state save area.\r
+\r
+Exit conditions:\r
+ None.\r
+\r
+Obeys ABI conventions per prototype:\r
+ void _xt_coproc_release(void * coproc_sa_base)\r
+\r
+*******************************************************************************/\r
+\r
+#if XCHAL_CP_NUM > 0\r
+\r
+ .global _xt_coproc_release\r
+ .type _xt_coproc_release,@function\r
+ .align 4\r
+ .literal_position\r
+ .align 4\r
+_xt_coproc_release:\r
+ ENTRY0 /* a2 = base of save area */\r
+\r
+ getcoreid a5\r
+ movi a3, XCHAL_CP_MAX << 2\r
+ mull a5, a5, a3\r
+ movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */\r
+ add a3, a3, a5\r
+\r
+ addi a4, a3, XCHAL_CP_MAX << 2 /* a4 = top+1 of owner array */\r
+ movi a5, 0 /* a5 = 0 (unowned) */\r
+\r
+ rsil a6, XCHAL_EXCM_LEVEL /* lock interrupts */\r
+\r
+1: l32i a7, a3, 0 /* a7 = owner at a3 */\r
+ bne a2, a7, 2f /* if (coproc_sa_base == owner) */\r
+ s32i a5, a3, 0 /* owner = unowned */\r
+2: addi a3, a3, 1<<2 /* a3 = next entry in owner array */\r
+ bltu a3, a4, 1b /* repeat until end of array */\r
+\r
+3: wsr a6, PS /* restore interrupts */\r
+\r
+ RET0\r
+\r
+#endif\r
+\r
+\r
+/*******************************************************************************\r
+_xt_coproc_savecs\r
+\r
+If there is a current thread and it has a coprocessor state save area, then\r
+save all callee-saved state into this area. This function is called from the\r
+solicited context switch handler. It calls a system-specific function to get\r
+the coprocessor save area base address.\r
+\r
+Entry conditions:\r
+ - The thread being switched out is still the current thread.\r
+ - CPENABLE state reflects which coprocessors are active.\r
+ - Registers have been saved/spilled already.\r
+\r
+Exit conditions:\r
+ - All necessary CP callee-saved state has been saved.\r
+ - Registers a2-a7, a13-a15 have been trashed.\r
+\r
+Must be called from assembly code only, using CALL0.\r
+*******************************************************************************/\r
+#if XCHAL_CP_NUM > 0\r
+\r
+ .extern _xt_coproc_sa_offset /* external reference */\r
+\r
+ .global _xt_coproc_savecs\r
+ .type _xt_coproc_savecs,@function\r
+ .align 4\r
+ .literal_position\r
+ .align 4\r
+_xt_coproc_savecs:\r
+\r
+ /* At entry, CPENABLE should be showing which CPs are enabled. */\r
+\r
+ rsr a2, CPENABLE /* a2 = which CPs are enabled */\r
+ beqz a2, .Ldone /* quick exit if none */\r
+ mov a14, a0 /* save return address */\r
+ call0 XT_RTOS_CP_STATE /* get address of CP save area */\r
+ mov a0, a14 /* restore return address */\r
+ beqz a15, .Ldone /* if none then nothing to do */\r
+ s16i a2, a15, XT_CP_CS_ST /* save mask of CPs being stored */\r
+ movi a13, _xt_coproc_sa_offset /* array of CP save offsets */\r
+ l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */\r
+\r
+#if XCHAL_CP0_SA_SIZE\r
+ bbci.l a2, 0, 2f /* CP 0 not enabled */\r
+ l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */\r
+ add a3, a14, a15 /* a3 = save area for CP 0 */\r
+ xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+#if XCHAL_CP1_SA_SIZE\r
+ bbci.l a2, 1, 2f /* CP 1 not enabled */\r
+ l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */\r
+ add a3, a14, a15 /* a3 = save area for CP 1 */\r
+ xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+#if XCHAL_CP2_SA_SIZE\r
+ bbci.l a2, 2, 2f\r
+ l32i a14, a13, 8\r
+ add a3, a14, a15\r
+ xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+#if XCHAL_CP3_SA_SIZE\r
+ bbci.l a2, 3, 2f\r
+ l32i a14, a13, 12\r
+ add a3, a14, a15\r
+ xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+#if XCHAL_CP4_SA_SIZE\r
+ bbci.l a2, 4, 2f\r
+ l32i a14, a13, 16\r
+ add a3, a14, a15\r
+ xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+#if XCHAL_CP5_SA_SIZE\r
+ bbci.l a2, 5, 2f\r
+ l32i a14, a13, 20\r
+ add a3, a14, a15\r
+ xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+#if XCHAL_CP6_SA_SIZE\r
+ bbci.l a2, 6, 2f\r
+ l32i a14, a13, 24\r
+ add a3, a14, a15\r
+ xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+#if XCHAL_CP7_SA_SIZE\r
+ bbci.l a2, 7, 2f\r
+ l32i a14, a13, 28\r
+ add a3, a14, a15\r
+ xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+.Ldone:\r
+ ret\r
+#endif\r
+\r
+\r
+/*******************************************************************************\r
+_xt_coproc_restorecs\r
+\r
+Restore any callee-saved coprocessor state for the incoming thread.\r
+This function is called from coprocessor exception handling, when giving\r
+ownership to a thread that solicited a context switch earlier. It calls a\r
+system-specific function to get the coprocessor save area base address.\r
+\r
+Entry conditions:\r
+ - The incoming thread is set as the current thread.\r
+ - CPENABLE is set up correctly for all required coprocessors.\r
+ - a2 = mask of coprocessors to be restored.\r
+\r
+Exit conditions:\r
+ - All necessary CP callee-saved state has been restored.\r
+ - CPENABLE - unchanged.\r
+ - Registers a2-a7, a13-a15 have been trashed.\r
+\r
+Must be called from assembly code only, using CALL0.\r
+*******************************************************************************/\r
+#if XCHAL_CP_NUM > 0\r
+\r
+ .global _xt_coproc_restorecs\r
+ .type _xt_coproc_restorecs,@function\r
+ .align 4\r
+ .literal_position\r
+ .align 4\r
+_xt_coproc_restorecs:\r
+\r
+ mov a14, a0 /* save return address */\r
+ call0 XT_RTOS_CP_STATE /* get address of CP save area */\r
+ mov a0, a14 /* restore return address */\r
+ beqz a15, .Ldone2 /* if none then nothing to do */\r
+ l16ui a3, a15, XT_CP_CS_ST /* a3 = which CPs have been saved */\r
+ xor a3, a3, a2 /* clear the ones being restored */\r
+ s32i a3, a15, XT_CP_CS_ST /* update saved CP mask */\r
+ movi a13, _xt_coproc_sa_offset /* array of CP save offsets */\r
+ l32i a15, a15, XT_CP_ASA /* a15 = base of aligned save area */\r
+ \r
+#if XCHAL_CP0_SA_SIZE\r
+ bbci.l a2, 0, 2f /* CP 0 not enabled */\r
+ l32i a14, a13, 0 /* a14 = _xt_coproc_sa_offset[0] */\r
+ add a3, a14, a15 /* a3 = save area for CP 0 */\r
+ xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2: \r
+#endif\r
+\r
+#if XCHAL_CP1_SA_SIZE\r
+ bbci.l a2, 1, 2f /* CP 1 not enabled */\r
+ l32i a14, a13, 4 /* a14 = _xt_coproc_sa_offset[1] */\r
+ add a3, a14, a15 /* a3 = save area for CP 1 */\r
+ xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+#if XCHAL_CP2_SA_SIZE\r
+ bbci.l a2, 2, 2f\r
+ l32i a14, a13, 8\r
+ add a3, a14, a15\r
+ xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+#if XCHAL_CP3_SA_SIZE\r
+ bbci.l a2, 3, 2f\r
+ l32i a14, a13, 12\r
+ add a3, a14, a15\r
+ xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+#if XCHAL_CP4_SA_SIZE\r
+ bbci.l a2, 4, 2f\r
+ l32i a14, a13, 16\r
+ add a3, a14, a15\r
+ xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+#if XCHAL_CP5_SA_SIZE\r
+ bbci.l a2, 5, 2f\r
+ l32i a14, a13, 20\r
+ add a3, a14, a15\r
+ xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+#if XCHAL_CP6_SA_SIZE\r
+ bbci.l a2, 6, 2f\r
+ l32i a14, a13, 24\r
+ add a3, a14, a15\r
+ xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+#if XCHAL_CP7_SA_SIZE\r
+ bbci.l a2, 7, 2f\r
+ l32i a14, a13, 28\r
+ add a3, a14, a15\r
+ xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL\r
+2:\r
+#endif\r
+\r
+.Ldone2:\r
+ ret\r
+\r
+#endif\r
+\r
+\r
--- /dev/null
+/*******************************************************************************\r
+// Copyright (c) 2003-2015 Cadence Design Systems, Inc.\r
+//\r
+// Permission is hereby granted, free of charge, to any person obtaining\r
+// a copy of this software and associated documentation files (the\r
+// "Software"), to deal in the Software without restriction, including\r
+// without limitation the rights to use, copy, modify, merge, publish,\r
+// distribute, sublicense, and/or sell copies of the Software, and to\r
+// permit persons to whom the Software is furnished to do so, subject to\r
+// the following conditions:\r
+//\r
+// The above copyright notice and this permission notice shall be included\r
+// in all copies or substantial portions of the Software.\r
+//\r
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+--------------------------------------------------------------------------------\r
+\r
+ XTENSA INITIALIZATION ROUTINES CODED IN C\r
+\r
+This file contains miscellaneous Xtensa RTOS-generic initialization functions\r
+that are implemented in C.\r
+\r
+*******************************************************************************/\r
+\r
+\r
+#ifdef XT_BOARD\r
+#include <xtensa/xtbsp.h>\r
+#endif\r
+\r
+#include "xtensa_rtos.h"\r
+#include "esp_clk.h"\r
+\r
+#ifdef XT_RTOS_TIMER_INT\r
+\r
+unsigned _xt_tick_divisor = 0; /* cached number of cycles per tick */\r
+\r
+void _xt_tick_divisor_init(void)\r
+{\r
+ _xt_tick_divisor = esp_clk_cpu_freq() / XT_TICK_PER_SEC;\r
+}\r
+\r
+/* Deprecated, to be removed */\r
+int xt_clock_freq(void)\r
+{\r
+ return esp_clk_cpu_freq();\r
+}\r
+\r
+#endif /* XT_RTOS_TIMER_INT */\r
+\r
--- /dev/null
+/*******************************************************************************\r
+Copyright (c) 2006-2015 Cadence Design Systems Inc.\r
+\r
+Permission is hereby granted, free of charge, to any person obtaining\r
+a copy of this software and associated documentation files (the\r
+"Software"), to deal in the Software without restriction, including\r
+without limitation the rights to use, copy, modify, merge, publish,\r
+distribute, sublicense, and/or sell copies of the Software, and to\r
+permit persons to whom the Software is furnished to do so, subject to\r
+the following conditions:\r
+\r
+The above copyright notice and this permission notice shall be included\r
+in all copies or substantial portions of the Software.\r
+\r
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+******************************************************************************/\r
+\r
+/******************************************************************************\r
+ Xtensa-specific interrupt and exception functions for RTOS ports.\r
+ Also see xtensa_intr_asm.S.\r
+******************************************************************************/\r
+\r
+#include <stdlib.h>\r
+\r
+#include <xtensa/config/core.h>\r
+\r
+#include "freertos/FreeRTOS.h"\r
+#include "freertos/xtensa_api.h"\r
+#include "freertos/portable.h"\r
+\r
+#include "rom/ets_sys.h"\r
+\r
+#if XCHAL_HAVE_EXCEPTIONS\r
+\r
+/* Handler table is in xtensa_intr_asm.S */\r
+\r
+extern xt_exc_handler _xt_exception_table[XCHAL_EXCCAUSE_NUM*portNUM_PROCESSORS];\r
+\r
+\r
+/*\r
+ Default handler for unhandled exceptions.\r
+ CHANGED: We do this in panic.c now\r
+*/\r
+\r
+//void xt_unhandled_exception(XtExcFrame *frame)\r
+//{\r
+ //exit(-1);\r
+//}\r
+extern void xt_unhandled_exception(XtExcFrame *frame);\r
+\r
+\r
+/*\r
+ This function registers a handler for the specified exception.\r
+ The function returns the address of the previous handler.\r
+ On error, it returns 0.\r
+*/\r
+xt_exc_handler xt_set_exception_handler(int n, xt_exc_handler f)\r
+{\r
+ xt_exc_handler old;\r
+\r
+ if( n < 0 || n >= XCHAL_EXCCAUSE_NUM )\r
+ return 0; /* invalid exception number */\r
+\r
+ /* Convert exception number to _xt_exception_table name */\r
+ n = n * portNUM_PROCESSORS + xPortGetCoreID();\r
+ old = _xt_exception_table[n];\r
+\r
+ if (f) {\r
+ _xt_exception_table[n] = f;\r
+ }\r
+ else {\r
+ _xt_exception_table[n] = &xt_unhandled_exception;\r
+ }\r
+\r
+ return ((old == &xt_unhandled_exception) ? 0 : old);\r
+}\r
+\r
+#endif\r
+\r
+#if XCHAL_HAVE_INTERRUPTS\r
+\r
+/* Handler table is in xtensa_intr_asm.S */\r
+\r
+typedef struct xt_handler_table_entry {\r
+ void * handler;\r
+ void * arg;\r
+} xt_handler_table_entry;\r
+\r
+extern xt_handler_table_entry _xt_interrupt_table[XCHAL_NUM_INTERRUPTS*portNUM_PROCESSORS];\r
+\r
+\r
+/*\r
+ Default handler for unhandled interrupts.\r
+*/\r
+void xt_unhandled_interrupt(void * arg)\r
+{\r
+ ets_printf("Unhandled interrupt %d on cpu %d!\n", (int)arg, xPortGetCoreID());\r
+}\r
+\r
+\r
+/*\r
+ This function registers a handler for the specified interrupt. The "arg"\r
+ parameter specifies the argument to be passed to the handler when it is\r
+ invoked. The function returns the address of the previous handler.\r
+ On error, it returns 0.\r
+*/\r
+xt_handler xt_set_interrupt_handler(int n, xt_handler f, void * arg)\r
+{\r
+ xt_handler_table_entry * entry;\r
+ xt_handler old;\r
+\r
+ if( n < 0 || n >= XCHAL_NUM_INTERRUPTS )\r
+ return 0; /* invalid interrupt number */\r
+ if( Xthal_intlevel[n] > XCHAL_EXCM_LEVEL )\r
+ return 0; /* priority level too high to safely handle in C */\r
+\r
+ /* Convert exception number to _xt_exception_table name */\r
+ n = n * portNUM_PROCESSORS + xPortGetCoreID();\r
+\r
+ entry = _xt_interrupt_table + n;\r
+ old = entry->handler;\r
+\r
+ if (f) {\r
+ entry->handler = f;\r
+ entry->arg = arg;\r
+ }\r
+ else {\r
+ entry->handler = &xt_unhandled_interrupt;\r
+ entry->arg = (void*)n;\r
+ }\r
+\r
+ return ((old == &xt_unhandled_interrupt) ? 0 : old);\r
+}\r
+\r
+#if CONFIG_SYSVIEW_ENABLE\r
+void * xt_get_interrupt_handler_arg(int n)\r
+{\r
+ xt_handler_table_entry * entry;\r
+\r
+ if( n < 0 || n >= XCHAL_NUM_INTERRUPTS )\r
+ return 0; /* invalid interrupt number */\r
+\r
+ /* Convert exception number to _xt_exception_table name */\r
+ n = n * portNUM_PROCESSORS + xPortGetCoreID();\r
+\r
+ entry = _xt_interrupt_table + n;\r
+ return entry->arg;\r
+}\r
+#endif\r
+\r
+#endif /* XCHAL_HAVE_INTERRUPTS */\r
+\r
--- /dev/null
+/*******************************************************************************\r
+Copyright (c) 2006-2015 Cadence Design Systems Inc.\r
+\r
+Permission is hereby granted, free of charge, to any person obtaining\r
+a copy of this software and associated documentation files (the\r
+"Software"), to deal in the Software without restriction, including\r
+without limitation the rights to use, copy, modify, merge, publish,\r
+distribute, sublicense, and/or sell copies of the Software, and to\r
+permit persons to whom the Software is furnished to do so, subject to\r
+the following conditions:\r
+\r
+The above copyright notice and this permission notice shall be included\r
+in all copies or substantial portions of the Software.\r
+\r
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+******************************************************************************/\r
+\r
+/******************************************************************************\r
+ Xtensa interrupt handling data and assembly routines.\r
+ Also see xtensa_intr.c and xtensa_vectors.S.\r
+******************************************************************************/\r
+\r
+#include <xtensa/hal.h>\r
+#include <xtensa/config/core.h>\r
+\r
+#include "xtensa_context.h"\r
+#include "FreeRTOSConfig.h"\r
+\r
+#if XCHAL_HAVE_INTERRUPTS\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ INTENABLE virtualization information.\r
+-------------------------------------------------------------------------------\r
+*/\r
+\r
+\r
+#if XT_USE_SWPRI\r
+/* Warning - this is not multicore-compatible. */\r
+ .data\r
+ .global _xt_intdata\r
+ .align 8\r
+_xt_intdata:\r
+ .global _xt_intenable\r
+ .type _xt_intenable,@object\r
+ .size _xt_intenable,4\r
+ .global _xt_vpri_mask\r
+ .type _xt_vpri_mask,@object\r
+ .size _xt_vpri_mask,4\r
+\r
+_xt_intenable: .word 0 /* Virtual INTENABLE */\r
+_xt_vpri_mask: .word 0xFFFFFFFF /* Virtual priority mask */\r
+#endif\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ Table of C-callable interrupt handlers for each interrupt. Note that not all\r
+ slots can be filled, because interrupts at level > EXCM_LEVEL will not be\r
+ dispatched to a C handler by default.\r
+\r
+ Stored as:\r
+ int 0 cpu 0\r
+ int 0 cpu 1\r
+ ...\r
+ int 0 cpu n\r
+ int 1 cpu 0\r
+ int 1 cpu 1\r
+ etc\r
+-------------------------------------------------------------------------------\r
+*/\r
+\r
+ .data\r
+ .global _xt_interrupt_table\r
+ .align 8\r
+\r
+_xt_interrupt_table:\r
+\r
+ .set i, 0\r
+ .rept XCHAL_NUM_INTERRUPTS*portNUM_PROCESSORS\r
+ .word xt_unhandled_interrupt /* handler address */\r
+ .word i /* handler arg (default: intnum) */\r
+ .set i, i+1\r
+ .endr\r
+\r
+#endif /* XCHAL_HAVE_INTERRUPTS */\r
+\r
+\r
+#if XCHAL_HAVE_EXCEPTIONS\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ Table of C-callable exception handlers for each exception. Note that not all\r
+ slots will be active, because some exceptions (e.g. coprocessor exceptions)\r
+ are always handled by the OS and cannot be hooked by user handlers.\r
+\r
+ Stored as:\r
+ exc 0 cpu 0\r
+ exc 0 cpu 1\r
+ ...\r
+ exc 0 cpu n\r
+ exc 1 cpu 0\r
+ exc 1 cpu 1\r
+ etc\r
+-------------------------------------------------------------------------------\r
+*/\r
+\r
+ .data\r
+ .global _xt_exception_table\r
+ .align 4\r
+\r
+_xt_exception_table:\r
+ .rept XCHAL_EXCCAUSE_NUM * portNUM_PROCESSORS\r
+ .word xt_unhandled_exception /* handler address */\r
+ .endr\r
+\r
+#endif\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ unsigned int xt_ints_on ( unsigned int mask )\r
+\r
+ Enables a set of interrupts. Does not simply set INTENABLE directly, but\r
+ computes it as a function of the current virtual priority if XT_USE_SWPRI is\r
+ enabled.\r
+ Can be called from interrupt handlers.\r
+-------------------------------------------------------------------------------\r
+*/\r
+\r
+ .text\r
+ .align 4\r
+ .global xt_ints_on\r
+ .type xt_ints_on,@function\r
+\r
+xt_ints_on:\r
+\r
+ ENTRY0\r
+\r
+#if XCHAL_HAVE_INTERRUPTS\r
+#if XT_USE_SWPRI\r
+ movi a3, 0\r
+ movi a4, _xt_intdata\r
+ xsr a3, INTENABLE /* Disables all interrupts */\r
+ rsync\r
+ l32i a3, a4, 0 /* a3 = _xt_intenable */\r
+ l32i a6, a4, 4 /* a6 = _xt_vpri_mask */\r
+ or a5, a3, a2 /* a5 = _xt_intenable | mask */\r
+ s32i a5, a4, 0 /* _xt_intenable |= mask */\r
+ and a5, a5, a6 /* a5 = _xt_intenable & _xt_vpri_mask */\r
+ wsr a5, INTENABLE /* Reenable interrupts */\r
+ mov a2, a3 /* Previous mask */\r
+#else\r
+ movi a3, 0\r
+ xsr a3, INTENABLE /* Disables all interrupts */\r
+ rsync\r
+ or a2, a3, a2 /* set bits in mask */\r
+ wsr a2, INTENABLE /* Re-enable ints */\r
+ rsync\r
+ mov a2, a3 /* return prev mask */\r
+#endif\r
+#else\r
+ movi a2, 0 /* Return zero */\r
+#endif\r
+ RET0\r
+\r
+ .size xt_ints_on, . - xt_ints_on\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ unsigned int xt_ints_off ( unsigned int mask )\r
+\r
+ Disables a set of interrupts. Does not simply set INTENABLE directly,\r
+ but computes it as a function of the current virtual priority if XT_USE_SWPRI is\r
+ enabled.\r
+ Can be called from interrupt handlers.\r
+-------------------------------------------------------------------------------\r
+*/\r
+\r
+ .text\r
+ .align 4\r
+ .global xt_ints_off\r
+ .type xt_ints_off,@function\r
+\r
+xt_ints_off:\r
+\r
+ ENTRY0\r
+#if XCHAL_HAVE_INTERRUPTS\r
+#if XT_USE_SWPRI\r
+ movi a3, 0\r
+ movi a4, _xt_intdata\r
+ xsr a3, INTENABLE /* Disables all interrupts */\r
+ rsync\r
+ l32i a3, a4, 0 /* a3 = _xt_intenable */\r
+ l32i a6, a4, 4 /* a6 = _xt_vpri_mask */\r
+ or a5, a3, a2 /* a5 = _xt_intenable | mask */\r
+ xor a5, a5, a2 /* a5 = _xt_intenable & ~mask */\r
+ s32i a5, a4, 0 /* _xt_intenable &= ~mask */\r
+ and a5, a5, a6 /* a5 = _xt_intenable & _xt_vpri_mask */\r
+ wsr a5, INTENABLE /* Reenable interrupts */\r
+ mov a2, a3 /* Previous mask */\r
+#else\r
+ movi a4, 0\r
+ xsr a4, INTENABLE /* Disables all interrupts */\r
+ rsync\r
+ or a3, a4, a2 /* set bits in mask */\r
+ xor a3, a3, a2 /* invert bits in mask set in mask, essentially clearing them */\r
+ wsr a3, INTENABLE /* Re-enable ints */\r
+ rsync\r
+ mov a2, a4 /* return prev mask */\r
+#endif\r
+#else\r
+ movi a2, 0 /* return zero */\r
+#endif\r
+ RET0\r
+\r
+ .size xt_ints_off, . - xt_ints_off\r
+\r
+\r
--- /dev/null
+// xtensa_overlay_os_hook.c -- Overlay manager OS hooks for FreeRTOS.\r
+\r
+// Copyright (c) 2015-2015 Cadence Design Systems Inc.\r
+//\r
+// Permission is hereby granted, free of charge, to any person obtaining\r
+// a copy of this software and associated documentation files (the\r
+// "Software"), to deal in the Software without restriction, including\r
+// without limitation the rights to use, copy, modify, merge, publish,\r
+// distribute, sublicense, and/or sell copies of the Software, and to\r
+// permit persons to whom the Software is furnished to do so, subject to\r
+// the following conditions:\r
+//\r
+// The above copyright notice and this permission notice shall be included\r
+// in all copies or substantial portions of the Software.\r
+//\r
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+\r
+\r
+#include "FreeRTOS.h"\r
+#include "semphr.h"\r
+\r
+#if configUSE_MUTEX\r
+\r
+/* Mutex object that controls access to the overlay. Currently only one\r
+ * overlay region is supported so one mutex suffices.\r
+ */\r
+static SemaphoreHandle_t xt_overlay_mutex;\r
+\r
+\r
+/* This function should be overridden to provide OS specific init such\r
+ * as the creation of a mutex lock that can be used for overlay locking.\r
+ * Typically this mutex would be set up with priority inheritance. See\r
+ * overlay manager documentation for more details.\r
+ */\r
+void xt_overlay_init_os(void)\r
+{\r
+ /* Create the mutex for overlay access. Priority inheritance is\r
+ * required.\r
+ */\r
+ xt_overlay_mutex = xSemaphoreCreateMutex();\r
+}\r
+\r
+\r
+/* This function locks access to shared overlay resources, typically\r
+ * by acquiring a mutex.\r
+ */\r
+void xt_overlay_lock(void)\r
+{\r
+ xSemaphoreTake(xt_overlay_mutex, 0);\r
+}\r
+\r
+\r
+/* This function releases access to shared overlay resources, typically\r
+ * by unlocking a mutex.\r
+ */\r
+void xt_overlay_unlock(void)\r
+{\r
+ xSemaphoreGive(xt_overlay_mutex);\r
+}\r
+\r
+#endif\r
--- /dev/null
+// Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD\r
+//\r
+// Licensed under the Apache License, Version 2.0 (the "License");\r
+// you may not use this file except in compliance with the License.\r
+// You may obtain a copy of the License at\r
+\r
+// http://www.apache.org/licenses/LICENSE-2.0\r
+//\r
+// Unless required by applicable law or agreed to in writing, software\r
+// distributed under the License is distributed on an "AS IS" BASIS,\r
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+// See the License for the specific language governing permissions and\r
+// limitations under the License.\r
+\r
+#include "xtensa_rtos.h"\r
+#include "esp_panic.h"\r
+#include "sdkconfig.h"\r
+#include "soc/soc.h"\r
+\r
+/*\r
+This file contains the default handlers for the high interrupt levels as well as some specialized exceptions. \r
+The default behaviour is to just exit the interrupt or call the panic handler on the exceptions\r
+*/\r
+\r
+\r
+#if XCHAL_HAVE_DEBUG\r
+ .global xt_debugexception\r
+ .weak xt_debugexception\r
+ .set xt_debugexception, _xt_debugexception\r
+ .section .iram1,"ax"\r
+ .type _xt_debugexception,@function\r
+ .align 4\r
+\r
+_xt_debugexception:\r
+ movi a0,PANIC_RSN_DEBUGEXCEPTION\r
+ wsr a0,EXCCAUSE\r
+ /* _xt_panic assumes a level 1 exception. As we're\r
+ crashing anyhow, copy EPC & EXCSAVE from DEBUGLEVEL\r
+ to level 1. */\r
+ rsr a0,(EPC + XCHAL_DEBUGLEVEL)\r
+ wsr a0,EPC_1\r
+ rsr a0,(EXCSAVE + XCHAL_DEBUGLEVEL)\r
+ wsr a0,EXCSAVE_1\r
+ call0 _xt_panic /* does not return */\r
+ rfi XCHAL_DEBUGLEVEL\r
+\r
+#endif /* Debug exception */\r
+\r
+\r
+#if XCHAL_NUM_INTLEVELS >=2 && XCHAL_EXCM_LEVEL <2 && XCHAL_DEBUGLEVEL !=2\r
+ .global xt_highint2\r
+ .weak xt_highint2\r
+ .set xt_highint2, _xt_highint2\r
+ .section .iram1,"ax"\r
+ .type _xt_highint2,@function\r
+ .align 4\r
+_xt_highint2:\r
+\r
+ /* Default handler does nothing; just returns */\r
+ .align 4\r
+.L_xt_highint2_exit:\r
+ rsr a0, EXCSAVE_2 /* restore a0 */\r
+ rfi 2\r
+\r
+#endif /* Level 2 */\r
+\r
+#if XCHAL_NUM_INTLEVELS >=3 && XCHAL_EXCM_LEVEL <3 && XCHAL_DEBUGLEVEL !=3\r
+\r
+ .global xt_highint3\r
+ .weak xt_highint3\r
+ .set xt_highint3, _xt_highint3\r
+ .section .iram1,"ax"\r
+ .type _xt_highint3,@function\r
+ .align 4\r
+_xt_highint3:\r
+\r
+ /* Default handler does nothing; just returns */\r
+\r
+ .align 4\r
+.L_xt_highint3_exit:\r
+ rsr a0, EXCSAVE_3 /* restore a0 */\r
+ rfi 3\r
+\r
+#endif /* Level 3 */\r
+\r
+#if XCHAL_NUM_INTLEVELS >=4 && XCHAL_EXCM_LEVEL <4 && XCHAL_DEBUGLEVEL !=4\r
+\r
+ .global xt_highint4\r
+ .weak xt_highint4\r
+ .set xt_highint4, _xt_highint4\r
+ .section .iram1,"ax"\r
+ .type _xt_highint4,@function\r
+ .align 4\r
+_xt_highint4:\r
+\r
+ /* Default handler does nothing; just returns */\r
+\r
+ .align 4\r
+.L_xt_highint4_exit:\r
+ rsr a0, EXCSAVE_4 /* restore a0 */\r
+ rfi 4\r
+\r
+#endif /* Level 4 */\r
+\r
+#if XCHAL_NUM_INTLEVELS >=5 && XCHAL_EXCM_LEVEL <5 && XCHAL_DEBUGLEVEL !=5\r
+\r
+ .global xt_highint5\r
+ .weak xt_highint5\r
+ .set xt_highint5, _xt_highint5\r
+ .section .iram1,"ax"\r
+ .type _xt_highint5,@function\r
+ .align 4\r
+_xt_highint5:\r
+\r
+ /* Default handler does nothing; just returns */\r
+\r
+ .align 4\r
+.L_xt_highint5_exit:\r
+ rsr a0, EXCSAVE_5 /* restore a0 */\r
+ rfi 5\r
+\r
+\r
+#endif /* Level 5 */\r
+\r
+#if XCHAL_NUM_INTLEVELS >=6 && XCHAL_EXCM_LEVEL <6 && XCHAL_DEBUGLEVEL !=6\r
+\r
+ .global _xt_highint6\r
+ .global xt_highint6\r
+ .weak xt_highint6\r
+ .set xt_highint6, _xt_highint6\r
+ .section .iram1,"ax"\r
+ .type _xt_highint6,@function\r
+ .align 4\r
+_xt_highint6:\r
+\r
+ /* Default handler does nothing; just returns */\r
+\r
+ .align 4\r
+.L_xt_highint6_exit:\r
+ rsr a0, EXCSAVE_6 /* restore a0 */\r
+ rfi 6\r
+\r
+#endif /* Level 6 */\r
+\r
+#if XCHAL_HAVE_NMI\r
+\r
+ .global _xt_nmi\r
+ .global xt_nmi\r
+ .weak xt_nmi\r
+ .set xt_nmi, _xt_nmi\r
+ .section .iram1,"ax"\r
+ .type _xt_nmi,@function\r
+ .align 4\r
+_xt_nmi:\r
+\r
+ /* Default handler does nothing; just returns */\r
+\r
+ .align 4\r
+.L_xt_nmi_exit:\r
+ rsr a0, EXCSAVE + XCHAL_NMILEVEL /* restore a0 */\r
+ rfi XCHAL_NMILEVEL\r
+\r
+#endif /* NMI */\r
+\r
--- /dev/null
+/*******************************************************************************\r
+Copyright (c) 2006-2015 Cadence Design Systems Inc.\r
+\r
+Permission is hereby granted, free of charge, to any person obtaining\r
+a copy of this software and associated documentation files (the\r
+"Software"), to deal in the Software without restriction, including\r
+without limitation the rights to use, copy, modify, merge, publish,\r
+distribute, sublicense, and/or sell copies of the Software, and to\r
+permit persons to whom the Software is furnished to do so, subject to\r
+the following conditions:\r
+\r
+The above copyright notice and this permission notice shall be included\r
+in all copies or substantial portions of the Software.\r
+\r
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r
+--------------------------------------------------------------------------------\r
+\r
+ XTENSA VECTORS AND LOW LEVEL HANDLERS FOR AN RTOS\r
+\r
+ Xtensa low level exception and interrupt vectors and handlers for an RTOS.\r
+\r
+ Interrupt handlers and user exception handlers support interaction with\r
+ the RTOS by calling XT_RTOS_INT_ENTER and XT_RTOS_INT_EXIT before and\r
+ after user's specific interrupt handlers. These macros are defined in\r
+ xtensa_<rtos>.h to call suitable functions in a specific RTOS.\r
+\r
+ Users can install application-specific interrupt handlers for low and\r
+ medium level interrupts, by calling xt_set_interrupt_handler(). These\r
+ handlers can be written in C, and must obey C calling convention. The\r
+ handler table is indexed by the interrupt number. Each handler may be\r
+ provided with an argument. \r
+\r
+ Note that the system timer interrupt is handled specially, and is\r
+ dispatched to the RTOS-specific handler. This timer cannot be hooked\r
+ by application code.\r
+\r
+ Optional hooks are also provided to install a handler per level at \r
+ run-time, made available by compiling this source file with \r
+ '-DXT_INTEXC_HOOKS' (useful for automated testing).\r
+\r
+!! This file is a template that usually needs to be modified to handle !!\r
+!! application specific interrupts. Search USER_EDIT for helpful comments !!\r
+!! on where to insert handlers and how to write them. !!\r
+\r
+ Users can also install application-specific exception handlers in the\r
+ same way, by calling xt_set_exception_handler(). One handler slot is\r
+ provided for each exception type. Note that some exceptions are handled\r
+ by the porting layer itself, and cannot be taken over by application\r
+ code in this manner. These are the alloca, syscall, and coprocessor\r
+ exceptions.\r
+\r
+ The exception handlers can be written in C, and must follow C calling\r
+ convention. Each handler is passed a pointer to an exception frame as\r
+ its single argument. The exception frame is created on the stack, and\r
+ holds the saved context of the thread that took the exception. If the\r
+ handler returns, the context will be restored and the instruction that\r
+ caused the exception will be retried. If the handler makes any changes\r
+ to the saved state in the exception frame, the changes will be applied\r
+ when restoring the context.\r
+\r
+ Because Xtensa is a configurable architecture, this port supports all user\r
+ generated configurations (except restrictions stated in the release notes).\r
+ This is accomplished by conditional compilation using macros and functions\r
+ defined in the Xtensa HAL (hardware adaptation layer) for your configuration.\r
+ Only the relevant parts of this file will be included in your RTOS build.\r
+ For example, this file provides interrupt vector templates for all types and\r
+ all priority levels, but only the ones in your configuration are built.\r
+\r
+ NOTES on the use of 'call0' for long jumps instead of 'j':\r
+ 1. This file should be assembled with the -mlongcalls option to xt-xcc.\r
+ 2. The -mlongcalls compiler option causes 'call0 dest' to be expanded to\r
+ a sequence 'l32r a0, dest' 'callx0 a0' which works regardless of the\r
+ distance from the call to the destination. The linker then relaxes\r
+ it back to 'call0 dest' if it determines that dest is within range.\r
+ This allows more flexibility in locating code without the performance\r
+ overhead of the 'l32r' literal data load in cases where the destination\r
+ is in range of 'call0'. There is an additional benefit in that 'call0'\r
+ has a longer range than 'j' due to the target being word-aligned, so \r
+ the 'l32r' sequence is less likely needed.\r
+ 3. The use of 'call0' with -mlongcalls requires that register a0 not be \r
+ live at the time of the call, which is always the case for a function \r
+ call but needs to be ensured if 'call0' is used as a jump in lieu of 'j'.\r
+ 4. This use of 'call0' is independent of the C function call ABI.\r
+\r
+*******************************************************************************/\r
+\r
+#include "xtensa_rtos.h"\r
+#include "esp_panic.h"\r
+#include "sdkconfig.h"\r
+#include "soc/soc.h"\r
+#include "soc/dport_reg.h"\r
+\r
+/*\r
+ Define for workaround: pin no-cpu-affinity tasks to a cpu when fpu is used.\r
+ Please change this when the tcb structure is changed\r
+*/\r
+#define TASKTCB_XCOREID_OFFSET (0x38+configMAX_TASK_NAME_LEN+3)&~3\r
+.extern pxCurrentTCB\r
+\r
+/* Enable stack backtrace across exception/interrupt - see below */\r
+#ifdef CONFIG_FREERTOS_INTERRUPT_BACKTRACE\r
+#define XT_DEBUG_BACKTRACE 1\r
+#endif\r
+\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+ Defines used to access _xtos_interrupt_table.\r
+--------------------------------------------------------------------------------\r
+*/\r
+#define XIE_HANDLER 0\r
+#define XIE_ARG 4\r
+#define XIE_SIZE 8\r
+\r
+\r
+/*\r
+ Macro get_percpu_entry_for - convert a per-core ID into a multicore entry.\r
+ Basically does reg=reg*portNUM_PROCESSORS+current_core_id\r
+ Multiple versions here to optimize for specific portNUM_PROCESSORS values.\r
+*/\r
+ .macro get_percpu_entry_for reg scratch\r
+#if (portNUM_PROCESSORS == 1)\r
+ /* No need to do anything */\r
+#elif (portNUM_PROCESSORS == 2)\r
+ /* Optimized 2-core code. */\r
+ getcoreid \scratch\r
+ addx2 \reg,\reg,\scratch\r
+#else\r
+ /* Generalized n-core code. Untested! */\r
+ movi \scratch,portNUM_PROCESSORS\r
+ mull \scratch,\reg,\scratch\r
+ getcoreid \reg\r
+ add \reg,\scratch,\reg\r
+#endif\r
+ .endm\r
+/*\r
+--------------------------------------------------------------------------------\r
+ Macro extract_msb - return the input with only the highest bit set.\r
+\r
+ Input : "ain" - Input value, clobbered.\r
+ Output : "aout" - Output value, has only one bit set, MSB of "ain".\r
+ The two arguments must be different AR registers.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .macro extract_msb aout ain\r
+1:\r
+ addi \aout, \ain, -1 /* aout = ain - 1 */\r
+ and \ain, \ain, \aout /* ain = ain & aout */\r
+ bnez \ain, 1b /* repeat until ain == 0 */\r
+ addi \aout, \aout, 1 /* return aout + 1 */\r
+ .endm\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+ Macro dispatch_c_isr - dispatch interrupts to user ISRs.\r
+ This will dispatch to user handlers (if any) that are registered in the\r
+ XTOS dispatch table (_xtos_interrupt_table). These handlers would have\r
+ been registered by calling _xtos_set_interrupt_handler(). There is one\r
+ exception - the timer interrupt used by the OS will not be dispatched\r
+ to a user handler - this must be handled by the caller of this macro.\r
+\r
+ Level triggered and software interrupts are automatically deasserted by\r
+ this code.\r
+\r
+ ASSUMPTIONS:\r
+ -- PS.INTLEVEL is set to "level" at entry\r
+ -- PS.EXCM = 0, C calling enabled\r
+\r
+ NOTE: For CALL0 ABI, a12-a15 have not yet been saved.\r
+\r
+ NOTE: This macro will use registers a0 and a2-a7. The arguments are:\r
+ level -- interrupt level\r
+ mask -- interrupt bitmask for this level\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .macro dispatch_c_isr level mask\r
+\r
+ #ifdef CONFIG_PM_TRACE\r
+ movi a6, 0 /* = ESP_PM_TRACE_IDLE */\r
+ getcoreid a7\r
+ call4 esp_pm_trace_exit\r
+ #endif // CONFIG_PM_TRACE\r
+\r
+ /* Get mask of pending, enabled interrupts at this level into a2. */\r
+\r
+.L_xt_user_int_&level&:\r
+ rsr a2, INTENABLE\r
+ rsr a3, INTERRUPT\r
+ movi a4, \mask\r
+ and a2, a2, a3\r
+ and a2, a2, a4\r
+ beqz a2, 9f /* nothing to do */\r
+\r
+ /* This bit of code provides a nice debug backtrace in the debugger.\r
+ It does take a few more instructions, so undef XT_DEBUG_BACKTRACE\r
+ if you want to save the cycles.\r
+ */\r
+ #ifdef XT_DEBUG_BACKTRACE\r
+ #ifndef __XTENSA_CALL0_ABI__\r
+ rsr a0, EPC_1 + \level - 1 /* return address */\r
+ movi a4, 0xC0000000 /* constant with top 2 bits set (call size) */\r
+ or a0, a0, a4 /* set top 2 bits */\r
+ addx2 a0, a4, a0 /* clear top bit -- simulating call4 size */\r
+ #endif\r
+ #endif\r
+\r
+ #ifdef CONFIG_PM_ENABLE\r
+ call4 esp_pm_impl_isr_hook\r
+ #endif\r
+\r
+ #ifdef XT_INTEXC_HOOKS\r
+ /* Call interrupt hook if present to (pre)handle interrupts. */\r
+ movi a4, _xt_intexc_hooks\r
+ l32i a4, a4, \level << 2\r
+ beqz a4, 2f\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ callx0 a4\r
+ beqz a2, 9f\r
+ #else\r
+ mov a6, a2\r
+ callx4 a4\r
+ beqz a6, 9f\r
+ mov a2, a6\r
+ #endif\r
+2:\r
+ #endif\r
+\r
+ /* Now look up in the dispatch table and call user ISR if any. */\r
+ /* If multiple bits are set then MSB has highest priority. */\r
+\r
+ extract_msb a4, a2 /* a4 = MSB of a2, a2 trashed */\r
+\r
+ #ifdef XT_USE_SWPRI\r
+ /* Enable all interrupts at this level that are numerically higher\r
+ than the one we just selected, since they are treated as higher\r
+ priority.\r
+ */\r
+ movi a3, \mask /* a3 = all interrupts at this level */\r
+ add a2, a4, a4 /* a2 = a4 << 1 */\r
+ addi a2, a2, -1 /* a2 = mask of 1's <= a4 bit */\r
+ and a2, a2, a3 /* a2 = mask of all bits <= a4 at this level */\r
+ movi a3, _xt_intdata\r
+ l32i a6, a3, 4 /* a6 = _xt_vpri_mask */\r
+ neg a2, a2\r
+ addi a2, a2, -1 /* a2 = mask to apply */\r
+ and a5, a6, a2 /* mask off all bits <= a4 bit */\r
+ s32i a5, a3, 4 /* update _xt_vpri_mask */\r
+ rsr a3, INTENABLE\r
+ and a3, a3, a2 /* mask off all bits <= a4 bit */\r
+ wsr a3, INTENABLE\r
+ rsil a3, \level - 1 /* lower interrupt level by 1 */\r
+ #endif\r
+\r
+ movi a3, XT_TIMER_INTEN /* a3 = timer interrupt bit */\r
+ wsr a4, INTCLEAR /* clear sw or edge-triggered interrupt */\r
+ beq a3, a4, 7f /* if timer interrupt then skip table */\r
+\r
+ find_ms_setbit a3, a4, a3, 0 /* a3 = interrupt number */\r
+\r
+ get_percpu_entry_for a3, a12\r
+ movi a4, _xt_interrupt_table\r
+ addx8 a3, a3, a4 /* a3 = address of interrupt table entry */\r
+ l32i a4, a3, XIE_HANDLER /* a4 = handler address */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ mov a12, a6 /* save in callee-saved reg */\r
+ l32i a2, a3, XIE_ARG /* a2 = handler arg */\r
+ callx0 a4 /* call handler */\r
+ mov a2, a12\r
+ #else\r
+ mov a2, a6 /* save in windowed reg */\r
+ l32i a6, a3, XIE_ARG /* a6 = handler arg */\r
+ callx4 a4 /* call handler */\r
+ #endif\r
+\r
+ #ifdef XT_USE_SWPRI\r
+ j 8f\r
+ #else\r
+ j .L_xt_user_int_&level& /* check for more interrupts */\r
+ #endif\r
+\r
+7:\r
+\r
+ .ifeq XT_TIMER_INTPRI - \level\r
+.L_xt_user_int_timer_&level&:\r
+ /*\r
+ Interrupt handler for the RTOS tick timer if at this level.\r
+ We'll be reading the interrupt state again after this call\r
+ so no need to preserve any registers except a6 (vpri_mask).\r
+ */\r
+\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ mov a12, a6\r
+ call0 XT_RTOS_TIMER_INT\r
+ mov a2, a12\r
+ #else\r
+ mov a2, a6\r
+ call4 XT_RTOS_TIMER_INT\r
+ #endif\r
+ .endif\r
+\r
+ #ifdef XT_USE_SWPRI\r
+ j 8f\r
+ #else\r
+ j .L_xt_user_int_&level& /* check for more interrupts */\r
+ #endif\r
+\r
+ #ifdef XT_USE_SWPRI\r
+8:\r
+ /* Restore old value of _xt_vpri_mask from a2. Also update INTENABLE from\r
+ virtual _xt_intenable which _could_ have changed during interrupt\r
+ processing. */\r
+\r
+ movi a3, _xt_intdata\r
+ l32i a4, a3, 0 /* a4 = _xt_intenable */\r
+ s32i a2, a3, 4 /* update _xt_vpri_mask */\r
+ and a4, a4, a2 /* a4 = masked intenable */\r
+ wsr a4, INTENABLE /* update INTENABLE */\r
+ #endif\r
+\r
+9:\r
+ /* done */\r
+\r
+ .endm\r
+\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+ Panic handler.\r
+ Should be reached by call0 (preferable) or jump only. If call0, a0 says where \r
+ from. If on simulator, display panic message and abort, else loop indefinitely.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .section .iram1,"ax"\r
+ .global panicHandler\r
+\r
+ .global _xt_panic\r
+ .type _xt_panic,@function\r
+ .align 4\r
+ .literal_position\r
+ .align 4\r
+\r
+_xt_panic:\r
+ /* Allocate exception frame and save minimal context. */\r
+ mov a0, sp\r
+ addi sp, sp, -XT_STK_FRMSZ\r
+ s32i a0, sp, XT_STK_A1\r
+ #if XCHAL_HAVE_WINDOWED\r
+ s32e a0, sp, -12 /* for debug backtrace */\r
+ #endif\r
+ rsr a0, PS /* save interruptee's PS */\r
+ s32i a0, sp, XT_STK_PS\r
+ rsr a0, EPC_1 /* save interruptee's PC */\r
+ s32i a0, sp, XT_STK_PC\r
+ #if XCHAL_HAVE_WINDOWED\r
+ s32e a0, sp, -16 /* for debug backtrace */\r
+ #endif\r
+ s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */\r
+ s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */\r
+ call0 _xt_context_save\r
+\r
+ /* Save exc cause and vaddr into exception frame */\r
+ rsr a0, EXCCAUSE\r
+ s32i a0, sp, XT_STK_EXCCAUSE\r
+ rsr a0, EXCVADDR\r
+ s32i a0, sp, XT_STK_EXCVADDR\r
+\r
+ /* _xt_context_save seems to save the current a0, but we need the interuptees a0. Fix this. */\r
+ rsr a0, EXCSAVE_1 /* save interruptee's a0 */\r
+\r
+ s32i a0, sp, XT_STK_A0\r
+\r
+ /* Set up PS for C, disable all interrupts except NMI and debug, and clear EXCM. */\r
+ movi a0, PS_INTLEVEL(5) | PS_UM | PS_WOE\r
+ wsr a0, PS\r
+\r
+ //Call panic handler\r
+ mov a6,sp\r
+ call4 panicHandler\r
+\r
+\r
+ .align 4\r
+//Call using call0. Prints the hex char in a2. Kills a3, a4, a5\r
+panic_print_hex:\r
+ movi a3,0x60000000\r
+ movi a4,8\r
+panic_print_hex_loop:\r
+ l32i a5, a3, 0x1c\r
+ extui a5, a5, 16, 8\r
+ bgei a5,64,panic_print_hex_loop\r
+\r
+ srli a5,a2,28\r
+ bgei a5,10,panic_print_hex_a\r
+ addi a5,a5,'0'\r
+ j panic_print_hex_ok\r
+panic_print_hex_a:\r
+ addi a5,a5,'A'-10\r
+panic_print_hex_ok:\r
+ s32i a5,a3,0\r
+ slli a2,a2,4\r
+ \r
+ addi a4,a4,-1\r
+ bnei a4,0,panic_print_hex_loop\r
+ movi a5,' '\r
+ s32i a5,a3,0\r
+\r
+ ret\r
+\r
+\r
+\r
+ .section .rodata, "a"\r
+ .align 4\r
+\r
+\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+ Hooks to dynamically install handlers for exceptions and interrupts.\r
+ Allows automated regression frameworks to install handlers per test.\r
+ Consists of an array of function pointers indexed by interrupt level, \r
+ with index 0 containing the entry for user exceptions.\r
+ Initialized with all 0s, meaning no handler is installed at each level.\r
+ See comment in xtensa_rtos.h for more details.\r
+\r
+ *WARNING* This array is for all CPUs, that is, installing a hook for \r
+ one CPU will install it for all others as well!\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ #ifdef XT_INTEXC_HOOKS\r
+ .data\r
+ .global _xt_intexc_hooks\r
+ .type _xt_intexc_hooks,@object\r
+ .align 4\r
+\r
+_xt_intexc_hooks:\r
+ .fill XT_INTEXC_HOOK_NUM, 4, 0\r
+ #endif\r
+\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+ EXCEPTION AND LEVEL 1 INTERRUPT VECTORS AND LOW LEVEL HANDLERS\r
+ (except window exception vectors).\r
+\r
+ Each vector goes at a predetermined location according to the Xtensa\r
+ hardware configuration, which is ensured by its placement in a special\r
+ section known to the Xtensa linker support package (LSP). It performs\r
+ the minimum necessary before jumping to the handler in the .text section.\r
+\r
+ The corresponding handler goes in the normal .text section. It sets up\r
+ the appropriate stack frame, saves a few vector-specific registers and\r
+ calls XT_RTOS_INT_ENTER to save the rest of the interrupted context\r
+ and enter the RTOS, then sets up a C environment. It then calls the\r
+ user's interrupt handler code (which may be coded in C) and finally \r
+ calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.\r
+\r
+ While XT_RTOS_INT_EXIT does not return directly to the interruptee,\r
+ eventually the RTOS scheduler will want to dispatch the interrupted\r
+ task or handler. The scheduler will return to the exit point that was\r
+ saved in the interrupt stack frame at XT_STK_EXIT.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+Debug Exception.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+#if XCHAL_HAVE_DEBUG\r
+\r
+ .begin literal_prefix .DebugExceptionVector\r
+ .section .DebugExceptionVector.text, "ax"\r
+ .global _DebugExceptionVector\r
+ .align 4\r
+ .global xt_debugexception\r
+_DebugExceptionVector:\r
+ wsr a0, EXCSAVE+XCHAL_DEBUGLEVEL /* preserve a0 */\r
+ call0 xt_debugexception /* load exception handler */\r
+\r
+ .end literal_prefix\r
+\r
+#endif\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+Double Exception.\r
+Double exceptions are not a normal occurrence. They indicate a bug of some kind.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+#ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR\r
+\r
+ .begin literal_prefix .DoubleExceptionVector\r
+ .section .DoubleExceptionVector.text, "ax"\r
+ .global _DoubleExceptionVector\r
+ .align 4\r
+\r
+_DoubleExceptionVector:\r
+\r
+ #if XCHAL_HAVE_DEBUG\r
+ break 1, 4 /* unhandled double exception */\r
+ #endif\r
+ movi a0,PANIC_RSN_DOUBLEEXCEPTION\r
+ wsr a0,EXCCAUSE\r
+ call0 _xt_panic /* does not return */\r
+ rfde /* make a0 point here not later */\r
+\r
+ .end literal_prefix\r
+\r
+#endif /* XCHAL_DOUBLEEXC_VECTOR_VADDR */\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+Kernel Exception (including Level 1 Interrupt from kernel mode).\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .begin literal_prefix .KernelExceptionVector\r
+ .section .KernelExceptionVector.text, "ax"\r
+ .global _KernelExceptionVector\r
+ .align 4\r
+\r
+_KernelExceptionVector:\r
+\r
+ wsr a0, EXCSAVE_1 /* preserve a0 */\r
+ call0 _xt_kernel_exc /* kernel exception handler */\r
+ /* never returns here - call0 is used as a jump (see note at top) */\r
+\r
+ .end literal_prefix\r
+\r
+ .section .iram1,"ax"\r
+ .align 4\r
+\r
+_xt_kernel_exc:\r
+ #if XCHAL_HAVE_DEBUG\r
+ break 1, 0 /* unhandled kernel exception */\r
+ #endif\r
+ movi a0,PANIC_RSN_KERNELEXCEPTION\r
+ wsr a0,EXCCAUSE\r
+ call0 _xt_panic /* does not return */\r
+ rfe /* make a0 point here not there */\r
+\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+User Exception (including Level 1 Interrupt from user mode).\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .begin literal_prefix .UserExceptionVector\r
+ .section .UserExceptionVector.text, "ax"\r
+ .global _UserExceptionVector\r
+ .type _UserExceptionVector,@function\r
+ .align 4\r
+\r
+_UserExceptionVector:\r
+\r
+ wsr a0, EXCSAVE_1 /* preserve a0 */\r
+ call0 _xt_user_exc /* user exception handler */\r
+ /* never returns here - call0 is used as a jump (see note at top) */\r
+\r
+ .end literal_prefix\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+ Insert some waypoints for jumping beyond the signed 8-bit range of\r
+ conditional branch instructions, so the conditional branchces to specific\r
+ exception handlers are not taken in the mainline. Saves some cycles in the\r
+ mainline.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .section .iram1,"ax"\r
+\r
+ #if XCHAL_HAVE_WINDOWED\r
+ .align 4\r
+_xt_to_alloca_exc:\r
+ call0 _xt_alloca_exc /* in window vectors section */\r
+ /* never returns here - call0 is used as a jump (see note at top) */\r
+ #endif\r
+\r
+ .align 4\r
+_xt_to_syscall_exc:\r
+ call0 _xt_syscall_exc\r
+ /* never returns here - call0 is used as a jump (see note at top) */\r
+\r
+ #if XCHAL_CP_NUM > 0\r
+ .align 4\r
+_xt_to_coproc_exc:\r
+ call0 _xt_coproc_exc\r
+ /* never returns here - call0 is used as a jump (see note at top) */\r
+ #endif\r
+\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+ User exception handler.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .type _xt_user_exc,@function\r
+ .align 4\r
+\r
+_xt_user_exc:\r
+\r
+ /* If level 1 interrupt then jump to the dispatcher */\r
+ rsr a0, EXCCAUSE\r
+ beqi a0, EXCCAUSE_LEVEL1INTERRUPT, _xt_lowint1\r
+\r
+ /* Handle any coprocessor exceptions. Rely on the fact that exception\r
+ numbers above EXCCAUSE_CP0_DISABLED all relate to the coprocessors.\r
+ */\r
+ #if XCHAL_CP_NUM > 0\r
+ bgeui a0, EXCCAUSE_CP0_DISABLED, _xt_to_coproc_exc\r
+ #endif\r
+\r
+ /* Handle alloca and syscall exceptions */\r
+ #if XCHAL_HAVE_WINDOWED\r
+ beqi a0, EXCCAUSE_ALLOCA, _xt_to_alloca_exc\r
+ #endif\r
+ beqi a0, EXCCAUSE_SYSCALL, _xt_to_syscall_exc\r
+\r
+ /* Handle all other exceptions. All can have user-defined handlers. */\r
+ /* NOTE: we'll stay on the user stack for exception handling. */\r
+\r
+ /* Allocate exception frame and save minimal context. */\r
+ mov a0, sp\r
+ addi sp, sp, -XT_STK_FRMSZ\r
+ s32i a0, sp, XT_STK_A1\r
+ #if XCHAL_HAVE_WINDOWED\r
+ s32e a0, sp, -12 /* for debug backtrace */\r
+ #endif\r
+ rsr a0, PS /* save interruptee's PS */\r
+ s32i a0, sp, XT_STK_PS\r
+ rsr a0, EPC_1 /* save interruptee's PC */\r
+ s32i a0, sp, XT_STK_PC\r
+ #if XCHAL_HAVE_WINDOWED\r
+ s32e a0, sp, -16 /* for debug backtrace */\r
+ #endif\r
+ s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */\r
+ s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */\r
+ call0 _xt_context_save\r
+\r
+ /* Save exc cause and vaddr into exception frame */\r
+ rsr a0, EXCCAUSE\r
+ s32i a0, sp, XT_STK_EXCCAUSE\r
+ rsr a0, EXCVADDR\r
+ s32i a0, sp, XT_STK_EXCVADDR\r
+\r
+ /* _xt_context_save seems to save the current a0, but we need the interuptees a0. Fix this. */\r
+ rsr a0, EXCSAVE_1 /* save interruptee's a0 */\r
+ s32i a0, sp, XT_STK_A0\r
+\r
+ /* Set up PS for C, reenable hi-pri interrupts, and clear EXCM. */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM\r
+ #else\r
+ movi a0, PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE\r
+ #endif\r
+ wsr a0, PS\r
+\r
+ #ifdef XT_DEBUG_BACKTRACE\r
+ #ifndef __XTENSA_CALL0_ABI__\r
+ rsr a0, EPC_1 /* return address for debug backtrace */\r
+ movi a5, 0xC0000000 /* constant with top 2 bits set (call size) */\r
+ rsync /* wait for WSR.PS to complete */\r
+ or a0, a0, a5 /* set top 2 bits */\r
+ addx2 a0, a5, a0 /* clear top bit -- thus simulating call4 size */\r
+ #else\r
+ rsync /* wait for WSR.PS to complete */\r
+ #endif\r
+ #endif\r
+\r
+ rsr a2, EXCCAUSE /* recover exc cause */\r
+\r
+ #ifdef XT_INTEXC_HOOKS\r
+ /*\r
+ Call exception hook to pre-handle exceptions (if installed).\r
+ Pass EXCCAUSE in a2, and check result in a2 (if -1, skip default handling).\r
+ */\r
+ movi a4, _xt_intexc_hooks\r
+ l32i a4, a4, 0 /* user exception hook index 0 */\r
+ beqz a4, 1f\r
+.Ln_xt_user_exc_call_hook:\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ callx0 a4\r
+ beqi a2, -1, .L_xt_user_done\r
+ #else\r
+ mov a6, a2\r
+ callx4 a4\r
+ beqi a6, -1, .L_xt_user_done\r
+ mov a2, a6\r
+ #endif\r
+1:\r
+ #endif\r
+\r
+ rsr a2, EXCCAUSE /* recover exc cause */\r
+ movi a3, _xt_exception_table\r
+ get_percpu_entry_for a2, a4\r
+ addx4 a4, a2, a3 /* a4 = address of exception table entry */\r
+ l32i a4, a4, 0 /* a4 = handler address */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ mov a2, sp /* a2 = pointer to exc frame */\r
+ callx0 a4 /* call handler */\r
+ #else\r
+ mov a6, sp /* a6 = pointer to exc frame */\r
+ callx4 a4 /* call handler */\r
+ #endif\r
+\r
+.L_xt_user_done:\r
+\r
+ /* Restore context and return */\r
+ call0 _xt_context_restore\r
+ l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */\r
+ wsr a0, PS\r
+ l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */\r
+ wsr a0, EPC_1\r
+ l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */\r
+ l32i sp, sp, XT_STK_A1 /* remove exception frame */\r
+ rsync /* ensure PS and EPC written */\r
+ rfe /* PS.EXCM is cleared */\r
+\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+ Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT\r
+ on entry and used to return to a thread or interrupted interrupt handler.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .global _xt_user_exit\r
+ .type _xt_user_exit,@function\r
+ .align 4\r
+_xt_user_exit:\r
+ l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */\r
+ wsr a0, PS\r
+ l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */\r
+ wsr a0, EPC_1\r
+ l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */\r
+ l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */\r
+ rsync /* ensure PS and EPC written */\r
+ rfe /* PS.EXCM is cleared */\r
+\r
+\r
+/*\r
+\r
+--------------------------------------------------------------------------------\r
+Syscall Exception Handler (jumped to from User Exception Handler).\r
+Syscall 0 is required to spill the register windows (no-op in Call 0 ABI).\r
+Only syscall 0 is handled here. Other syscalls return -1 to caller in a2.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .section .iram1,"ax"\r
+ .type _xt_syscall_exc,@function\r
+ .align 4\r
+_xt_syscall_exc:\r
+\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ /*\r
+ Save minimal regs for scratch. Syscall 0 does nothing in Call0 ABI.\r
+ Use a minimal stack frame (16B) to save A2 & A3 for scratch.\r
+ PS.EXCM could be cleared here, but unlikely to improve worst-case latency.\r
+ rsr a0, PS\r
+ addi a0, a0, -PS_EXCM_MASK\r
+ wsr a0, PS\r
+ */\r
+ addi sp, sp, -16\r
+ s32i a2, sp, 8\r
+ s32i a3, sp, 12\r
+ #else /* Windowed ABI */\r
+ /*\r
+ Save necessary context and spill the register windows.\r
+ PS.EXCM is still set and must remain set until after the spill.\r
+ Reuse context save function though it saves more than necessary.\r
+ For this reason, a full interrupt stack frame is allocated.\r
+ */\r
+ addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */\r
+ s32i a12, sp, XT_STK_A12 /* _xt_context_save requires A12- */\r
+ s32i a13, sp, XT_STK_A13 /* A13 to have already been saved */\r
+ call0 _xt_context_save\r
+ #endif\r
+\r
+ /*\r
+ Grab the interruptee's PC and skip over the 'syscall' instruction.\r
+ If it's at the end of a zero-overhead loop and it's not on the last\r
+ iteration, decrement loop counter and skip to beginning of loop.\r
+ */\r
+ rsr a2, EPC_1 /* a2 = PC of 'syscall' */\r
+ addi a3, a2, 3 /* ++PC */\r
+ #if XCHAL_HAVE_LOOPS\r
+ rsr a0, LEND /* if (PC == LEND */\r
+ bne a3, a0, 1f\r
+ rsr a0, LCOUNT /* && LCOUNT != 0) */\r
+ beqz a0, 1f /* { */\r
+ addi a0, a0, -1 /* --LCOUNT */\r
+ rsr a3, LBEG /* PC = LBEG */\r
+ wsr a0, LCOUNT /* } */\r
+ #endif\r
+1: wsr a3, EPC_1 /* update PC */\r
+\r
+ /* Restore interruptee's context and return from exception. */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ l32i a2, sp, 8\r
+ l32i a3, sp, 12\r
+ addi sp, sp, 16\r
+ #else\r
+ call0 _xt_context_restore\r
+ addi sp, sp, XT_STK_FRMSZ\r
+ #endif\r
+ movi a0, -1\r
+ movnez a2, a0, a2 /* return -1 if not syscall 0 */\r
+ rsr a0, EXCSAVE_1\r
+ rfe\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+Co-Processor Exception Handler (jumped to from User Exception Handler).\r
+These exceptions are generated by co-processor instructions, which are only\r
+allowed in thread code (not in interrupts or kernel code). This restriction is \r
+deliberately imposed to reduce the burden of state-save/restore in interrupts.\r
+--------------------------------------------------------------------------------\r
+*/\r
+#if XCHAL_CP_NUM > 0\r
+\r
+ .section .rodata, "a"\r
+\r
+/* Offset to CP n save area in thread's CP save area. */\r
+ .global _xt_coproc_sa_offset\r
+ .type _xt_coproc_sa_offset,@object\r
+ .align 16 /* minimize crossing cache boundaries */\r
+_xt_coproc_sa_offset:\r
+ .word XT_CP0_SA, XT_CP1_SA, XT_CP2_SA, XT_CP3_SA\r
+ .word XT_CP4_SA, XT_CP5_SA, XT_CP6_SA, XT_CP7_SA\r
+\r
+/* Bitmask for CP n's CPENABLE bit. */\r
+ .type _xt_coproc_mask,@object\r
+ .align 16,,8 /* try to keep it all in one cache line */\r
+ .set i, 0\r
+_xt_coproc_mask:\r
+ .rept XCHAL_CP_MAX\r
+ .long (i<<16) | (1<<i) // upper 16-bits = i, lower = bitmask\r
+ .set i, i+1\r
+ .endr\r
+\r
+ .data\r
+\r
+/* Owner thread of CP n, identified by thread's CP save area (0 = unowned). */\r
+ .global _xt_coproc_owner_sa\r
+ .type _xt_coproc_owner_sa,@object\r
+ .align 16,,XCHAL_CP_MAX<<2 /* minimize crossing cache boundaries */\r
+_xt_coproc_owner_sa:\r
+ .space (XCHAL_CP_MAX * portNUM_PROCESSORS) << 2\r
+\r
+ .section .iram1,"ax"\r
+\r
+\r
+ .align 4\r
+.L_goto_invalid:\r
+ j .L_xt_coproc_invalid /* not in a thread (invalid) */\r
+ .align 4\r
+.L_goto_done:\r
+ j .L_xt_coproc_done\r
+\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+ Coprocessor exception handler.\r
+ At entry, only a0 has been saved (in EXCSAVE_1).\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .type _xt_coproc_exc,@function\r
+ .align 4\r
+\r
+_xt_coproc_exc:\r
+\r
+ /* Allocate interrupt stack frame and save minimal context. */\r
+ mov a0, sp /* sp == a1 */\r
+ addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */\r
+ s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */\r
+ #if XCHAL_HAVE_WINDOWED\r
+ s32e a0, sp, -12 /* for debug backtrace */\r
+ #endif\r
+ rsr a0, PS /* save interruptee's PS */\r
+ s32i a0, sp, XT_STK_PS\r
+ rsr a0, EPC_1 /* save interruptee's PC */\r
+ s32i a0, sp, XT_STK_PC\r
+ rsr a0, EXCSAVE_1 /* save interruptee's a0 */\r
+ s32i a0, sp, XT_STK_A0\r
+ #if XCHAL_HAVE_WINDOWED\r
+ s32e a0, sp, -16 /* for debug backtrace */\r
+ #endif\r
+ movi a0, _xt_user_exit /* save exit point for dispatch */\r
+ s32i a0, sp, XT_STK_EXIT\r
+\r
+ rsr a0, EXCCAUSE\r
+ s32i a5, sp, XT_STK_A5 /* save a5 */\r
+ addi a5, a0, -EXCCAUSE_CP0_DISABLED /* a5 = CP index */\r
+\r
+ /* Save a few more of interruptee's registers (a5 was already saved). */\r
+ s32i a2, sp, XT_STK_A2\r
+ s32i a3, sp, XT_STK_A3\r
+ s32i a4, sp, XT_STK_A4\r
+ s32i a15, sp, XT_STK_A15\r
+\r
+ /* Get co-processor state save area of new owner thread. */\r
+ call0 XT_RTOS_CP_STATE /* a15 = new owner's save area */\r
+ beqz a15, .L_goto_invalid /* not in a thread (invalid) */\r
+\r
+ /* Enable the co-processor's bit in CPENABLE. */\r
+ movi a0, _xt_coproc_mask\r
+ rsr a4, CPENABLE /* a4 = CPENABLE */\r
+ addx4 a0, a5, a0 /* a0 = &_xt_coproc_mask[n] */\r
+ l32i a0, a0, 0 /* a0 = (n << 16) | (1 << n) */\r
+\r
+ /* FPU operations are incompatible with non-pinned tasks. If we have a FPU operation\r
+ here, to keep the entire thing from crashing, it's better to pin the task to whatever\r
+ core we're running on now. */\r
+ movi a2, pxCurrentTCB\r
+ getcoreid a3\r
+ addx4 a2, a3, a2\r
+ l32i a2, a2, 0 /* a2 = start of pxCurrentTCB[cpuid] */\r
+ addi a2, a2, TASKTCB_XCOREID_OFFSET /* offset to xCoreID in tcb struct */\r
+ s32i a3, a2, 0 /* store current cpuid */\r
+\r
+ /* Grab correct xt_coproc_owner_sa for this core */\r
+ movi a2, XCHAL_CP_MAX << 2\r
+ mull a2, a2, a3 /* multiply by current processor id */\r
+ movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */\r
+ add a3, a3, a2 /* a3 = owner area needed for this processor */\r
+\r
+ extui a2, a0, 0, 16 /* coprocessor bitmask portion */\r
+ or a4, a4, a2 /* a4 = CPENABLE | (1 << n) */\r
+ wsr a4, CPENABLE\r
+\r
+/* \r
+Keep loading _xt_coproc_owner_sa[n] atomic (=load once, then use that value\r
+everywhere): _xt_coproc_release assumes it works like this in order not to need\r
+locking.\r
+*/\r
+\r
+\r
+ /* Get old coprocessor owner thread (save area ptr) and assign new one. */\r
+ addx4 a3, a5, a3 /* a3 = &_xt_coproc_owner_sa[n] */\r
+ l32i a2, a3, 0 /* a2 = old owner's save area */\r
+ s32i a15, a3, 0 /* _xt_coproc_owner_sa[n] = new */\r
+ rsync /* ensure wsr.CPENABLE is complete */\r
+\r
+ /* Only need to context switch if new owner != old owner. */\r
+ beq a15, a2, .L_goto_done /* new owner == old, we're done */\r
+\r
+ /* If no old owner then nothing to save. */\r
+ beqz a2, .L_check_new\r
+\r
+ /* If old owner not actively using CP then nothing to save. */\r
+ l16ui a4, a2, XT_CPENABLE /* a4 = old owner's CPENABLE */\r
+ bnone a4, a0, .L_check_new /* old owner not using CP */\r
+\r
+.L_save_old:\r
+ /* Save old owner's coprocessor state. */\r
+\r
+ movi a5, _xt_coproc_sa_offset\r
+\r
+ /* Mark old owner state as no longer active (CPENABLE bit n clear). */\r
+ xor a4, a4, a0 /* clear CP bit in CPENABLE */\r
+ s16i a4, a2, XT_CPENABLE /* update old owner's CPENABLE */\r
+\r
+ extui a4, a0, 16, 5 /* a4 = CP index = n */\r
+ addx4 a5, a4, a5 /* a5 = &_xt_coproc_sa_offset[n] */\r
+\r
+ /* Mark old owner state as saved (CPSTORED bit n set). */\r
+ l16ui a4, a2, XT_CPSTORED /* a4 = old owner's CPSTORED */\r
+ l32i a5, a5, 0 /* a5 = XT_CP[n]_SA offset */\r
+ or a4, a4, a0 /* set CP in old owner's CPSTORED */\r
+ s16i a4, a2, XT_CPSTORED /* update old owner's CPSTORED */\r
+ l32i a2, a2, XT_CP_ASA /* ptr to actual (aligned) save area */\r
+ extui a3, a0, 16, 5 /* a3 = CP index = n */\r
+ add a2, a2, a5 /* a2 = old owner's area for CP n */\r
+\r
+ /*\r
+ The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.\r
+ It is theoretically possible for Xtensa processor designers to write TIE \r
+ that causes more address registers to be affected, but it is generally \r
+ unlikely. If that ever happens, more registers needs to be saved/restored\r
+ around this macro invocation, and the value in a15 needs to be recomputed.\r
+ */\r
+ xchal_cpi_store_funcbody\r
+\r
+.L_check_new:\r
+ /* Check if any state has to be restored for new owner. */\r
+ /* NOTE: a15 = new owner's save area, cannot be zero when we get here. */\r
+\r
+ l16ui a3, a15, XT_CPSTORED /* a3 = new owner's CPSTORED */\r
+ movi a4, _xt_coproc_sa_offset\r
+ bnone a3, a0, .L_check_cs /* full CP not saved, check callee-saved */\r
+ xor a3, a3, a0 /* CPSTORED bit is set, clear it */\r
+ s16i a3, a15, XT_CPSTORED /* update new owner's CPSTORED */\r
+\r
+ /* Adjust new owner's save area pointers to area for CP n. */\r
+ extui a3, a0, 16, 5 /* a3 = CP index = n */\r
+ addx4 a4, a3, a4 /* a4 = &_xt_coproc_sa_offset[n] */\r
+ l32i a4, a4, 0 /* a4 = XT_CP[n]_SA */\r
+ l32i a5, a15, XT_CP_ASA /* ptr to actual (aligned) save area */\r
+ add a2, a4, a5 /* a2 = new owner's area for CP */\r
+\r
+ /*\r
+ The config-specific HAL macro invoked below destroys a2-5, preserves a0-1.\r
+ It is theoretically possible for Xtensa processor designers to write TIE \r
+ that causes more address registers to be affected, but it is generally \r
+ unlikely. If that ever happens, more registers needs to be saved/restored\r
+ around this macro invocation.\r
+ */\r
+ xchal_cpi_load_funcbody\r
+\r
+ /* Restore interruptee's saved registers. */\r
+ /* Can omit rsync for wsr.CPENABLE here because _xt_user_exit does it. */\r
+.L_xt_coproc_done:\r
+ l32i a15, sp, XT_STK_A15\r
+ l32i a5, sp, XT_STK_A5\r
+ l32i a4, sp, XT_STK_A4\r
+ l32i a3, sp, XT_STK_A3\r
+ l32i a2, sp, XT_STK_A2\r
+ call0 _xt_user_exit /* return via exit dispatcher */\r
+ /* Never returns here - call0 is used as a jump (see note at top) */\r
+\r
+.L_check_cs:\r
+ /* a0 = CP mask in low bits, a15 = new owner's save area */\r
+ l16ui a2, a15, XT_CP_CS_ST /* a2 = mask of CPs saved */\r
+ bnone a2, a0, .L_xt_coproc_done /* if no match then done */\r
+ and a2, a2, a0 /* a2 = which CPs to restore */\r
+ extui a2, a2, 0, 8 /* extract low 8 bits */\r
+ s32i a6, sp, XT_STK_A6 /* save extra needed regs */\r
+ s32i a7, sp, XT_STK_A7\r
+ s32i a13, sp, XT_STK_A13\r
+ s32i a14, sp, XT_STK_A14\r
+ call0 _xt_coproc_restorecs /* restore CP registers */\r
+ l32i a6, sp, XT_STK_A6 /* restore saved registers */\r
+ l32i a7, sp, XT_STK_A7\r
+ l32i a13, sp, XT_STK_A13\r
+ l32i a14, sp, XT_STK_A14\r
+ j .L_xt_coproc_done\r
+\r
+ /* Co-processor exception occurred outside a thread (not supported). */\r
+.L_xt_coproc_invalid:\r
+ movi a0,PANIC_RSN_COPROCEXCEPTION\r
+ wsr a0,EXCCAUSE\r
+ call0 _xt_panic /* not in a thread (invalid) */\r
+ /* never returns */\r
+\r
+\r
+#endif /* XCHAL_CP_NUM */\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ Level 1 interrupt dispatch. Assumes stack frame has not been allocated yet.\r
+-------------------------------------------------------------------------------\r
+*/\r
+\r
+ .section .iram1,"ax"\r
+ .type _xt_lowint1,@function\r
+ .align 4\r
+\r
+_xt_lowint1:\r
+ mov a0, sp /* sp == a1 */\r
+ addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */\r
+ s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */\r
+ rsr a0, PS /* save interruptee's PS */\r
+ s32i a0, sp, XT_STK_PS\r
+ rsr a0, EPC_1 /* save interruptee's PC */\r
+ s32i a0, sp, XT_STK_PC\r
+ rsr a0, EXCSAVE_1 /* save interruptee's a0 */\r
+ s32i a0, sp, XT_STK_A0\r
+ movi a0, _xt_user_exit /* save exit point for dispatch */\r
+ s32i a0, sp, XT_STK_EXIT\r
+\r
+ /* Save rest of interrupt context and enter RTOS. */\r
+ call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */\r
+\r
+ /* !! We are now on the RTOS system stack !! */ \r
+\r
+ /* Set up PS for C, enable interrupts above this level and clear EXCM. */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ movi a0, PS_INTLEVEL(1) | PS_UM\r
+ #else \r
+ movi a0, PS_INTLEVEL(1) | PS_UM | PS_WOE\r
+ #endif\r
+ wsr a0, PS\r
+ rsync\r
+\r
+ /* OK to call C code at this point, dispatch user ISRs */\r
+\r
+ dispatch_c_isr 1 XCHAL_INTLEVEL1_MASK\r
+\r
+ /* Done handling interrupts, transfer control to OS */\r
+ call0 XT_RTOS_INT_EXIT /* does not return directly here */\r
+\r
+\r
+/*\r
+-------------------------------------------------------------------------------\r
+ MEDIUM PRIORITY (LEVEL 2+) INTERRUPT VECTORS AND LOW LEVEL HANDLERS.\r
+\r
+ Medium priority interrupts are by definition those with priority greater\r
+ than 1 and not greater than XCHAL_EXCM_LEVEL. These are disabled by\r
+ setting PS.EXCM and therefore can easily support a C environment for\r
+ handlers in C, and interact safely with an RTOS.\r
+\r
+ Each vector goes at a predetermined location according to the Xtensa\r
+ hardware configuration, which is ensured by its placement in a special\r
+ section known to the Xtensa linker support package (LSP). It performs\r
+ the minimum necessary before jumping to the handler in the .text section.\r
+\r
+ The corresponding handler goes in the normal .text section. It sets up\r
+ the appropriate stack frame, saves a few vector-specific registers and\r
+ calls XT_RTOS_INT_ENTER to save the rest of the interrupted context\r
+ and enter the RTOS, then sets up a C environment. It then calls the\r
+ user's interrupt handler code (which may be coded in C) and finally \r
+ calls XT_RTOS_INT_EXIT to transfer control to the RTOS for scheduling.\r
+\r
+ While XT_RTOS_INT_EXIT does not return directly to the interruptee,\r
+ eventually the RTOS scheduler will want to dispatch the interrupted\r
+ task or handler. The scheduler will return to the exit point that was\r
+ saved in the interrupt stack frame at XT_STK_EXIT.\r
+-------------------------------------------------------------------------------\r
+*/\r
+\r
+#if XCHAL_EXCM_LEVEL >= 2\r
+\r
+ .begin literal_prefix .Level2InterruptVector\r
+ .section .Level2InterruptVector.text, "ax"\r
+ .global _Level2Vector\r
+ .type _Level2Vector,@function\r
+ .align 4\r
+_Level2Vector:\r
+ wsr a0, EXCSAVE_2 /* preserve a0 */\r
+ call0 _xt_medint2 /* load interrupt handler */\r
+ /* never returns here - call0 is used as a jump (see note at top) */\r
+\r
+ .end literal_prefix\r
+\r
+ .section .iram1,"ax"\r
+ .type _xt_medint2,@function\r
+ .align 4\r
+_xt_medint2:\r
+ mov a0, sp /* sp == a1 */\r
+ addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */\r
+ s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */\r
+ rsr a0, EPS_2 /* save interruptee's PS */\r
+ s32i a0, sp, XT_STK_PS\r
+ rsr a0, EPC_2 /* save interruptee's PC */\r
+ s32i a0, sp, XT_STK_PC\r
+ rsr a0, EXCSAVE_2 /* save interruptee's a0 */\r
+ s32i a0, sp, XT_STK_A0\r
+ movi a0, _xt_medint2_exit /* save exit point for dispatch */\r
+ s32i a0, sp, XT_STK_EXIT\r
+\r
+ /* Save rest of interrupt context and enter RTOS. */\r
+ call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */\r
+\r
+ /* !! We are now on the RTOS system stack !! */\r
+\r
+ /* Set up PS for C, enable interrupts above this level and clear EXCM. */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ movi a0, PS_INTLEVEL(2) | PS_UM\r
+ #else\r
+ movi a0, PS_INTLEVEL(2) | PS_UM | PS_WOE\r
+ #endif\r
+ wsr a0, PS\r
+ rsync\r
+\r
+ /* OK to call C code at this point, dispatch user ISRs */\r
+\r
+ dispatch_c_isr 2 XCHAL_INTLEVEL2_MASK\r
+\r
+ /* Done handling interrupts, transfer control to OS */\r
+ call0 XT_RTOS_INT_EXIT /* does not return directly here */\r
+\r
+ /*\r
+ Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT\r
+ on entry and used to return to a thread or interrupted interrupt handler.\r
+ */\r
+ .global _xt_medint2_exit\r
+ .type _xt_medint2_exit,@function\r
+ .align 4\r
+_xt_medint2_exit:\r
+ /* Restore only level-specific regs (the rest were already restored) */\r
+ l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */\r
+ wsr a0, EPS_2\r
+ l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */\r
+ wsr a0, EPC_2\r
+ l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */\r
+ l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */\r
+ rsync /* ensure EPS and EPC written */\r
+ rfi 2\r
+\r
+#endif /* Level 2 */\r
+\r
+#if XCHAL_EXCM_LEVEL >= 3\r
+\r
+ .begin literal_prefix .Level3InterruptVector\r
+ .section .Level3InterruptVector.text, "ax"\r
+ .global _Level3Vector\r
+ .type _Level3Vector,@function\r
+ .align 4\r
+_Level3Vector:\r
+ wsr a0, EXCSAVE_3 /* preserve a0 */\r
+ call0 _xt_medint3 /* load interrupt handler */\r
+ /* never returns here - call0 is used as a jump (see note at top) */\r
+\r
+ .end literal_prefix\r
+\r
+ .section .iram1,"ax"\r
+ .type _xt_medint3,@function\r
+ .align 4\r
+_xt_medint3:\r
+ mov a0, sp /* sp == a1 */\r
+ addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */\r
+ s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */\r
+ rsr a0, EPS_3 /* save interruptee's PS */\r
+ s32i a0, sp, XT_STK_PS\r
+ rsr a0, EPC_3 /* save interruptee's PC */\r
+ s32i a0, sp, XT_STK_PC\r
+ rsr a0, EXCSAVE_3 /* save interruptee's a0 */\r
+ s32i a0, sp, XT_STK_A0\r
+ movi a0, _xt_medint3_exit /* save exit point for dispatch */\r
+ s32i a0, sp, XT_STK_EXIT\r
+\r
+ /* Save rest of interrupt context and enter RTOS. */\r
+ call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */\r
+\r
+ /* !! We are now on the RTOS system stack !! */\r
+\r
+ /* Set up PS for C, enable interrupts above this level and clear EXCM. */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ movi a0, PS_INTLEVEL(3) | PS_UM\r
+ #else\r
+ movi a0, PS_INTLEVEL(3) | PS_UM | PS_WOE\r
+ #endif\r
+ wsr a0, PS\r
+ rsync\r
+\r
+ /* OK to call C code at this point, dispatch user ISRs */\r
+\r
+ dispatch_c_isr 3 XCHAL_INTLEVEL3_MASK\r
+\r
+ /* Done handling interrupts, transfer control to OS */\r
+ call0 XT_RTOS_INT_EXIT /* does not return directly here */\r
+\r
+ /*\r
+ Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT\r
+ on entry and used to return to a thread or interrupted interrupt handler.\r
+ */\r
+ .global _xt_medint3_exit\r
+ .type _xt_medint3_exit,@function\r
+ .align 4\r
+_xt_medint3_exit:\r
+ /* Restore only level-specific regs (the rest were already restored) */\r
+ l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */\r
+ wsr a0, EPS_3\r
+ l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */\r
+ wsr a0, EPC_3\r
+ l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */\r
+ l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */\r
+ rsync /* ensure EPS and EPC written */\r
+ rfi 3\r
+\r
+#endif /* Level 3 */\r
+\r
+#if XCHAL_EXCM_LEVEL >= 4\r
+\r
+ .begin literal_prefix .Level4InterruptVector\r
+ .section .Level4InterruptVector.text, "ax"\r
+ .global _Level4Vector\r
+ .type _Level4Vector,@function\r
+ .align 4\r
+_Level4Vector:\r
+ wsr a0, EXCSAVE_4 /* preserve a0 */\r
+ call0 _xt_medint4 /* load interrupt handler */\r
+\r
+ .end literal_prefix\r
+\r
+ .section .iram1,"ax"\r
+ .type _xt_medint4,@function\r
+ .align 4\r
+_xt_medint4:\r
+ mov a0, sp /* sp == a1 */\r
+ addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */\r
+ s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */\r
+ rsr a0, EPS_4 /* save interruptee's PS */\r
+ s32i a0, sp, XT_STK_PS\r
+ rsr a0, EPC_4 /* save interruptee's PC */\r
+ s32i a0, sp, XT_STK_PC\r
+ rsr a0, EXCSAVE_4 /* save interruptee's a0 */\r
+ s32i a0, sp, XT_STK_A0\r
+ movi a0, _xt_medint4_exit /* save exit point for dispatch */\r
+ s32i a0, sp, XT_STK_EXIT\r
+\r
+ /* Save rest of interrupt context and enter RTOS. */\r
+ call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */\r
+\r
+ /* !! We are now on the RTOS system stack !! */\r
+\r
+ /* Set up PS for C, enable interrupts above this level and clear EXCM. */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ movi a0, PS_INTLEVEL(4) | PS_UM\r
+ #else\r
+ movi a0, PS_INTLEVEL(4) | PS_UM | PS_WOE\r
+ #endif\r
+ wsr a0, PS\r
+ rsync\r
+\r
+ /* OK to call C code at this point, dispatch user ISRs */\r
+\r
+ dispatch_c_isr 4 XCHAL_INTLEVEL4_MASK\r
+\r
+ /* Done handling interrupts, transfer control to OS */\r
+ call0 XT_RTOS_INT_EXIT /* does not return directly here */\r
+\r
+ /*\r
+ Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT\r
+ on entry and used to return to a thread or interrupted interrupt handler.\r
+ */\r
+ .global _xt_medint4_exit\r
+ .type _xt_medint4_exit,@function\r
+ .align 4\r
+_xt_medint4_exit:\r
+ /* Restore only level-specific regs (the rest were already restored) */\r
+ l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */\r
+ wsr a0, EPS_4\r
+ l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */\r
+ wsr a0, EPC_4\r
+ l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */\r
+ l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */\r
+ rsync /* ensure EPS and EPC written */\r
+ rfi 4\r
+\r
+#endif /* Level 4 */\r
+\r
+#if XCHAL_EXCM_LEVEL >= 5\r
+\r
+ .begin literal_prefix .Level5InterruptVector\r
+ .section .Level5InterruptVector.text, "ax"\r
+ .global _Level5Vector\r
+ .type _Level5Vector,@function\r
+ .align 4\r
+_Level5Vector:\r
+ wsr a0, EXCSAVE_5 /* preserve a0 */\r
+ call0 _xt_medint5 /* load interrupt handler */\r
+\r
+ .end literal_prefix\r
+\r
+ .section .iram1,"ax"\r
+ .type _xt_medint5,@function\r
+ .align 4\r
+_xt_medint5:\r
+ mov a0, sp /* sp == a1 */\r
+ addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */\r
+ s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */\r
+ rsr a0, EPS_5 /* save interruptee's PS */\r
+ s32i a0, sp, XT_STK_PS\r
+ rsr a0, EPC_5 /* save interruptee's PC */\r
+ s32i a0, sp, XT_STK_PC\r
+ rsr a0, EXCSAVE_5 /* save interruptee's a0 */\r
+ s32i a0, sp, XT_STK_A0\r
+ movi a0, _xt_medint5_exit /* save exit point for dispatch */\r
+ s32i a0, sp, XT_STK_EXIT\r
+\r
+ /* Save rest of interrupt context and enter RTOS. */\r
+ call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */\r
+\r
+ /* !! We are now on the RTOS system stack !! */\r
+\r
+ /* Set up PS for C, enable interrupts above this level and clear EXCM. */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ movi a0, PS_INTLEVEL(5) | PS_UM\r
+ #else\r
+ movi a0, PS_INTLEVEL(5) | PS_UM | PS_WOE\r
+ #endif\r
+ wsr a0, PS\r
+ rsync\r
+\r
+ /* OK to call C code at this point, dispatch user ISRs */\r
+\r
+ dispatch_c_isr 5 XCHAL_INTLEVEL5_MASK\r
+\r
+ /* Done handling interrupts, transfer control to OS */\r
+ call0 XT_RTOS_INT_EXIT /* does not return directly here */\r
+\r
+ /*\r
+ Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT\r
+ on entry and used to return to a thread or interrupted interrupt handler.\r
+ */\r
+ .global _xt_medint5_exit\r
+ .type _xt_medint5_exit,@function\r
+ .align 4\r
+_xt_medint5_exit:\r
+ /* Restore only level-specific regs (the rest were already restored) */\r
+ l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */\r
+ wsr a0, EPS_5\r
+ l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */\r
+ wsr a0, EPC_5\r
+ l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */\r
+ l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */\r
+ rsync /* ensure EPS and EPC written */\r
+ rfi 5\r
+\r
+#endif /* Level 5 */\r
+\r
+#if XCHAL_EXCM_LEVEL >= 6\r
+\r
+ .begin literal_prefix .Level6InterruptVector\r
+ .section .Level6InterruptVector.text, "ax"\r
+ .global _Level6Vector\r
+ .type _Level6Vector,@function\r
+ .align 4\r
+_Level6Vector:\r
+ wsr a0, EXCSAVE_6 /* preserve a0 */\r
+ call0 _xt_medint6 /* load interrupt handler */\r
+\r
+ .end literal_prefix\r
+\r
+ .section .iram1,"ax"\r
+ .type _xt_medint6,@function\r
+ .align 4\r
+_xt_medint6:\r
+ mov a0, sp /* sp == a1 */\r
+ addi sp, sp, -XT_STK_FRMSZ /* allocate interrupt stack frame */\r
+ s32i a0, sp, XT_STK_A1 /* save pre-interrupt SP */\r
+ rsr a0, EPS_6 /* save interruptee's PS */\r
+ s32i a0, sp, XT_STK_PS\r
+ rsr a0, EPC_6 /* save interruptee's PC */\r
+ s32i a0, sp, XT_STK_PC\r
+ rsr a0, EXCSAVE_6 /* save interruptee's a0 */\r
+ s32i a0, sp, XT_STK_A0\r
+ movi a0, _xt_medint6_exit /* save exit point for dispatch */\r
+ s32i a0, sp, XT_STK_EXIT\r
+\r
+ /* Save rest of interrupt context and enter RTOS. */\r
+ call0 XT_RTOS_INT_ENTER /* common RTOS interrupt entry */\r
+\r
+ /* !! We are now on the RTOS system stack !! */\r
+\r
+ /* Set up PS for C, enable interrupts above this level and clear EXCM. */\r
+ #ifdef __XTENSA_CALL0_ABI__\r
+ movi a0, PS_INTLEVEL(6) | PS_UM\r
+ #else\r
+ movi a0, PS_INTLEVEL(6) | PS_UM | PS_WOE\r
+ #endif\r
+ wsr a0, PS\r
+ rsync\r
+\r
+ /* OK to call C code at this point, dispatch user ISRs */\r
+\r
+ dispatch_c_isr 6 XCHAL_INTLEVEL6_MASK\r
+\r
+ /* Done handling interrupts, transfer control to OS */\r
+ call0 XT_RTOS_INT_EXIT /* does not return directly here */\r
+\r
+ /*\r
+ Exit point for dispatch. Saved in interrupt stack frame at XT_STK_EXIT\r
+ on entry and used to return to a thread or interrupted interrupt handler.\r
+ */\r
+ .global _xt_medint6_exit\r
+ .type _xt_medint6_exit,@function\r
+ .align 4\r
+_xt_medint6_exit:\r
+ /* Restore only level-specific regs (the rest were already restored) */\r
+ l32i a0, sp, XT_STK_PS /* retrieve interruptee's PS */\r
+ wsr a0, EPS_6\r
+ l32i a0, sp, XT_STK_PC /* retrieve interruptee's PC */\r
+ wsr a0, EPC_6\r
+ l32i a0, sp, XT_STK_A0 /* retrieve interruptee's A0 */\r
+ l32i sp, sp, XT_STK_A1 /* remove interrupt stack frame */\r
+ rsync /* ensure EPS and EPC written */\r
+ rfi 6\r
+\r
+#endif /* Level 6 */\r
+\r
+\r
+/*******************************************************************************\r
+\r
+HIGH PRIORITY (LEVEL > XCHAL_EXCM_LEVEL) INTERRUPT VECTORS AND HANDLERS\r
+\r
+High priority interrupts are by definition those with priorities greater\r
+than XCHAL_EXCM_LEVEL. This includes non-maskable (NMI). High priority\r
+interrupts cannot interact with the RTOS, that is they must save all regs\r
+they use and not call any RTOS function.\r
+\r
+A further restriction imposed by the Xtensa windowed architecture is that\r
+high priority interrupts must not modify the stack area even logically\r
+"above" the top of the interrupted stack (they need to provide their\r
+own stack or static save area).\r
+\r
+Cadence Design Systems recommends high priority interrupt handlers be coded in assembly\r
+and used for purposes requiring very short service times.\r
+\r
+Here are templates for high priority (level 2+) interrupt vectors.\r
+They assume only one interrupt per level to avoid the burden of identifying\r
+which interrupts at this level are pending and enabled. This allows for \r
+minimum latency and avoids having to save/restore a2 in addition to a0.\r
+If more than one interrupt per high priority level is configured, this burden\r
+is on the handler which in any case must provide a way to save and restore\r
+registers it uses without touching the interrupted stack.\r
+\r
+Each vector goes at a predetermined location according to the Xtensa\r
+hardware configuration, which is ensured by its placement in a special\r
+section known to the Xtensa linker support package (LSP). It performs\r
+the minimum necessary before jumping to the handler in the .text section.\r
+\r
+*******************************************************************************/\r
+\r
+/*\r
+These stubs just call xt_highintX/xt_nmi to handle the real interrupt. Please define\r
+these in an external assembly source file. If these symbols are not defined anywhere\r
+else, the defaults in xtensa_vector_defaults.S are used.\r
+*/\r
+\r
+#if XCHAL_NUM_INTLEVELS >=2 && XCHAL_EXCM_LEVEL <2 && XCHAL_DEBUGLEVEL !=2\r
+\r
+ .begin literal_prefix .Level2InterruptVector\r
+ .section .Level2InterruptVector.text, "ax"\r
+ .global _Level2Vector\r
+ .type _Level2Vector,@function\r
+ .global xt_highint2\r
+ .align 4\r
+_Level2Vector:\r
+ wsr a0, EXCSAVE_2 /* preserve a0 */\r
+ call0 xt_highint2 /* load interrupt handler */\r
+\r
+ .end literal_prefix\r
+\r
+#endif /* Level 2 */\r
+\r
+#if XCHAL_NUM_INTLEVELS >=3 && XCHAL_EXCM_LEVEL <3 && XCHAL_DEBUGLEVEL !=3\r
+\r
+ .begin literal_prefix .Level3InterruptVector\r
+ .section .Level3InterruptVector.text, "ax"\r
+ .global _Level3Vector\r
+ .type _Level3Vector,@function\r
+ .global xt_highint3\r
+ .align 4\r
+_Level3Vector:\r
+ wsr a0, EXCSAVE_3 /* preserve a0 */\r
+ call0 xt_highint3 /* load interrupt handler */\r
+ /* never returns here - call0 is used as a jump (see note at top) */\r
+\r
+ .end literal_prefix\r
+\r
+#endif /* Level 3 */\r
+\r
+#if XCHAL_NUM_INTLEVELS >=4 && XCHAL_EXCM_LEVEL <4 && XCHAL_DEBUGLEVEL !=4\r
+\r
+ .begin literal_prefix .Level4InterruptVector\r
+ .section .Level4InterruptVector.text, "ax"\r
+ .global _Level4Vector\r
+ .type _Level4Vector,@function\r
+ .global xt_highint4\r
+ .align 4\r
+_Level4Vector:\r
+ wsr a0, EXCSAVE_4 /* preserve a0 */\r
+ call0 xt_highint4 /* load interrupt handler */\r
+ /* never returns here - call0 is used as a jump (see note at top) */\r
+\r
+ .end literal_prefix\r
+\r
+#endif /* Level 4 */\r
+\r
+#if XCHAL_NUM_INTLEVELS >=5 && XCHAL_EXCM_LEVEL <5 && XCHAL_DEBUGLEVEL !=5\r
+\r
+ .begin literal_prefix .Level5InterruptVector\r
+ .section .Level5InterruptVector.text, "ax"\r
+ .global _Level5Vector\r
+ .type _Level5Vector,@function\r
+ .global xt_highint5\r
+ .align 4\r
+_Level5Vector:\r
+ wsr a0, EXCSAVE_5 /* preserve a0 */\r
+ call0 xt_highint5 /* load interrupt handler */\r
+ /* never returns here - call0 is used as a jump (see note at top) */\r
+\r
+ .end literal_prefix\r
+\r
+#endif /* Level 5 */\r
+\r
+#if XCHAL_NUM_INTLEVELS >=6 && XCHAL_EXCM_LEVEL <6 && XCHAL_DEBUGLEVEL !=6\r
+\r
+ .begin literal_prefix .Level6InterruptVector\r
+ .section .Level6InterruptVector.text, "ax"\r
+ .global _Level6Vector\r
+ .type _Level6Vector,@function\r
+ .global xt_highint6\r
+ .align 4\r
+_Level6Vector:\r
+ wsr a0, EXCSAVE_6 /* preserve a0 */\r
+ call0 xt_highint6 /* load interrupt handler */\r
+ /* never returns here - call0 is used as a jump (see note at top) */\r
+\r
+ .end literal_prefix\r
+\r
+#endif /* Level 6 */\r
+\r
+#if XCHAL_HAVE_NMI\r
+\r
+ .begin literal_prefix .NMIExceptionVector\r
+ .section .NMIExceptionVector.text, "ax"\r
+ .global _NMIExceptionVector\r
+ .type _NMIExceptionVector,@function\r
+ .global xt_nmi\r
+ .align 4\r
+_NMIExceptionVector:\r
+ wsr a0, EXCSAVE + XCHAL_NMILEVEL _ /* preserve a0 */\r
+ call0 xt_nmi /* load interrupt handler */\r
+ /* never returns here - call0 is used as a jump (see note at top) */\r
+\r
+ .end literal_prefix\r
+\r
+#endif /* NMI */\r
+\r
+\r
+/*******************************************************************************\r
+\r
+WINDOW OVERFLOW AND UNDERFLOW EXCEPTION VECTORS AND ALLOCA EXCEPTION HANDLER\r
+\r
+Here is the code for each window overflow/underflow exception vector and \r
+(interspersed) efficient code for handling the alloca exception cause.\r
+Window exceptions are handled entirely in the vector area and are very\r
+tight for performance. The alloca exception is also handled entirely in \r
+the window vector area so comes at essentially no cost in code size.\r
+Users should never need to modify them and Cadence Design Systems recommends \r
+they do not.\r
+\r
+Window handlers go at predetermined vector locations according to the\r
+Xtensa hardware configuration, which is ensured by their placement in a\r
+special section known to the Xtensa linker support package (LSP). Since\r
+their offsets in that section are always the same, the LSPs do not define\r
+a section per vector.\r
+\r
+These things are coded for XEA2 only (XEA1 is not supported).\r
+\r
+Note on Underflow Handlers:\r
+The underflow handler for returning from call[i+1] to call[i]\r
+must preserve all the registers from call[i+1]'s window.\r
+In particular, a0 and a1 must be preserved because the RETW instruction\r
+will be reexecuted (and may even underflow if an intervening exception\r
+has flushed call[i]'s registers).\r
+Registers a2 and up may contain return values.\r
+\r
+*******************************************************************************/\r
+\r
+#if XCHAL_HAVE_WINDOWED\r
+\r
+ .section .WindowVectors.text, "ax"\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+Window Overflow Exception for Call4.\r
+\r
+Invoked if a call[i] referenced a register (a4-a15)\r
+that contains data from ancestor call[j];\r
+call[j] had done a call4 to call[j+1].\r
+On entry here:\r
+ window rotated to call[j] start point;\r
+ a0-a3 are registers to be saved;\r
+ a4-a15 must be preserved;\r
+ a5 is call[j+1]'s stack pointer.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .org 0x0\r
+ .global _WindowOverflow4\r
+_WindowOverflow4:\r
+\r
+ s32e a0, a5, -16 /* save a0 to call[j+1]'s stack frame */\r
+ s32e a1, a5, -12 /* save a1 to call[j+1]'s stack frame */\r
+ s32e a2, a5, -8 /* save a2 to call[j+1]'s stack frame */\r
+ s32e a3, a5, -4 /* save a3 to call[j+1]'s stack frame */\r
+ rfwo /* rotates back to call[i] position */\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+Window Underflow Exception for Call4\r
+\r
+Invoked by RETW returning from call[i+1] to call[i]\r
+where call[i]'s registers must be reloaded (not live in ARs);\r
+where call[i] had done a call4 to call[i+1].\r
+On entry here:\r
+ window rotated to call[i] start point;\r
+ a0-a3 are undefined, must be reloaded with call[i].reg[0..3];\r
+ a4-a15 must be preserved (they are call[i+1].reg[0..11]);\r
+ a5 is call[i+1]'s stack pointer.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .org 0x40\r
+ .global _WindowUnderflow4\r
+_WindowUnderflow4:\r
+\r
+ l32e a0, a5, -16 /* restore a0 from call[i+1]'s stack frame */\r
+ l32e a1, a5, -12 /* restore a1 from call[i+1]'s stack frame */\r
+ l32e a2, a5, -8 /* restore a2 from call[i+1]'s stack frame */\r
+ l32e a3, a5, -4 /* restore a3 from call[i+1]'s stack frame */\r
+ rfwu\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+Handle alloca exception generated by interruptee executing 'movsp'.\r
+This uses space between the window vectors, so is essentially "free".\r
+All interruptee's regs are intact except a0 which is saved in EXCSAVE_1,\r
+and PS.EXCM has been set by the exception hardware (can't be interrupted).\r
+The fact the alloca exception was taken means the registers associated with\r
+the base-save area have been spilled and will be restored by the underflow\r
+handler, so those 4 registers are available for scratch.\r
+The code is optimized to avoid unaligned branches and minimize cache misses.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .align 4\r
+ .global _xt_alloca_exc\r
+_xt_alloca_exc:\r
+\r
+ rsr a0, WINDOWBASE /* grab WINDOWBASE before rotw changes it */\r
+ rotw -1 /* WINDOWBASE goes to a4, new a0-a3 are scratch */\r
+ rsr a2, PS\r
+ extui a3, a2, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS\r
+ xor a3, a3, a4 /* bits changed from old to current windowbase */\r
+ rsr a4, EXCSAVE_1 /* restore original a0 (now in a4) */\r
+ slli a3, a3, XCHAL_PS_OWB_SHIFT\r
+ xor a2, a2, a3 /* flip changed bits in old window base */\r
+ wsr a2, PS /* update PS.OWB to new window base */\r
+ rsync\r
+\r
+ _bbci.l a4, 31, _WindowUnderflow4\r
+ rotw -1 /* original a0 goes to a8 */\r
+ _bbci.l a8, 30, _WindowUnderflow8\r
+ rotw -1\r
+ j _WindowUnderflow12\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+Window Overflow Exception for Call8\r
+\r
+Invoked if a call[i] referenced a register (a4-a15)\r
+that contains data from ancestor call[j];\r
+call[j] had done a call8 to call[j+1].\r
+On entry here:\r
+ window rotated to call[j] start point;\r
+ a0-a7 are registers to be saved;\r
+ a8-a15 must be preserved;\r
+ a9 is call[j+1]'s stack pointer.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .org 0x80\r
+ .global _WindowOverflow8\r
+_WindowOverflow8:\r
+\r
+ s32e a0, a9, -16 /* save a0 to call[j+1]'s stack frame */\r
+ l32e a0, a1, -12 /* a0 <- call[j-1]'s sp\r
+ (used to find end of call[j]'s frame) */\r
+ s32e a1, a9, -12 /* save a1 to call[j+1]'s stack frame */\r
+ s32e a2, a9, -8 /* save a2 to call[j+1]'s stack frame */\r
+ s32e a3, a9, -4 /* save a3 to call[j+1]'s stack frame */\r
+ s32e a4, a0, -32 /* save a4 to call[j]'s stack frame */\r
+ s32e a5, a0, -28 /* save a5 to call[j]'s stack frame */\r
+ s32e a6, a0, -24 /* save a6 to call[j]'s stack frame */\r
+ s32e a7, a0, -20 /* save a7 to call[j]'s stack frame */\r
+ rfwo /* rotates back to call[i] position */\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+Window Underflow Exception for Call8\r
+\r
+Invoked by RETW returning from call[i+1] to call[i]\r
+where call[i]'s registers must be reloaded (not live in ARs);\r
+where call[i] had done a call8 to call[i+1].\r
+On entry here:\r
+ window rotated to call[i] start point;\r
+ a0-a7 are undefined, must be reloaded with call[i].reg[0..7];\r
+ a8-a15 must be preserved (they are call[i+1].reg[0..7]);\r
+ a9 is call[i+1]'s stack pointer.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .org 0xC0\r
+ .global _WindowUnderflow8\r
+_WindowUnderflow8:\r
+\r
+ l32e a0, a9, -16 /* restore a0 from call[i+1]'s stack frame */\r
+ l32e a1, a9, -12 /* restore a1 from call[i+1]'s stack frame */\r
+ l32e a2, a9, -8 /* restore a2 from call[i+1]'s stack frame */\r
+ l32e a7, a1, -12 /* a7 <- call[i-1]'s sp\r
+ (used to find end of call[i]'s frame) */\r
+ l32e a3, a9, -4 /* restore a3 from call[i+1]'s stack frame */\r
+ l32e a4, a7, -32 /* restore a4 from call[i]'s stack frame */\r
+ l32e a5, a7, -28 /* restore a5 from call[i]'s stack frame */\r
+ l32e a6, a7, -24 /* restore a6 from call[i]'s stack frame */\r
+ l32e a7, a7, -20 /* restore a7 from call[i]'s stack frame */\r
+ rfwu\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+Window Overflow Exception for Call12\r
+\r
+Invoked if a call[i] referenced a register (a4-a15)\r
+that contains data from ancestor call[j];\r
+call[j] had done a call12 to call[j+1].\r
+On entry here:\r
+ window rotated to call[j] start point;\r
+ a0-a11 are registers to be saved;\r
+ a12-a15 must be preserved;\r
+ a13 is call[j+1]'s stack pointer.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .org 0x100\r
+ .global _WindowOverflow12\r
+_WindowOverflow12:\r
+\r
+ s32e a0, a13, -16 /* save a0 to call[j+1]'s stack frame */\r
+ l32e a0, a1, -12 /* a0 <- call[j-1]'s sp\r
+ (used to find end of call[j]'s frame) */\r
+ s32e a1, a13, -12 /* save a1 to call[j+1]'s stack frame */\r
+ s32e a2, a13, -8 /* save a2 to call[j+1]'s stack frame */\r
+ s32e a3, a13, -4 /* save a3 to call[j+1]'s stack frame */\r
+ s32e a4, a0, -48 /* save a4 to end of call[j]'s stack frame */\r
+ s32e a5, a0, -44 /* save a5 to end of call[j]'s stack frame */\r
+ s32e a6, a0, -40 /* save a6 to end of call[j]'s stack frame */\r
+ s32e a7, a0, -36 /* save a7 to end of call[j]'s stack frame */\r
+ s32e a8, a0, -32 /* save a8 to end of call[j]'s stack frame */\r
+ s32e a9, a0, -28 /* save a9 to end of call[j]'s stack frame */\r
+ s32e a10, a0, -24 /* save a10 to end of call[j]'s stack frame */\r
+ s32e a11, a0, -20 /* save a11 to end of call[j]'s stack frame */\r
+ rfwo /* rotates back to call[i] position */\r
+\r
+/*\r
+--------------------------------------------------------------------------------\r
+Window Underflow Exception for Call12\r
+\r
+Invoked by RETW returning from call[i+1] to call[i]\r
+where call[i]'s registers must be reloaded (not live in ARs);\r
+where call[i] had done a call12 to call[i+1].\r
+On entry here:\r
+ window rotated to call[i] start point;\r
+ a0-a11 are undefined, must be reloaded with call[i].reg[0..11];\r
+ a12-a15 must be preserved (they are call[i+1].reg[0..3]);\r
+ a13 is call[i+1]'s stack pointer.\r
+--------------------------------------------------------------------------------\r
+*/\r
+\r
+ .org 0x140\r
+ .global _WindowUnderflow12\r
+_WindowUnderflow12:\r
+\r
+ l32e a0, a13, -16 /* restore a0 from call[i+1]'s stack frame */\r
+ l32e a1, a13, -12 /* restore a1 from call[i+1]'s stack frame */\r
+ l32e a2, a13, -8 /* restore a2 from call[i+1]'s stack frame */\r
+ l32e a11, a1, -12 /* a11 <- call[i-1]'s sp\r
+ (used to find end of call[i]'s frame) */\r
+ l32e a3, a13, -4 /* restore a3 from call[i+1]'s stack frame */\r
+ l32e a4, a11, -48 /* restore a4 from end of call[i]'s stack frame */\r
+ l32e a5, a11, -44 /* restore a5 from end of call[i]'s stack frame */\r
+ l32e a6, a11, -40 /* restore a6 from end of call[i]'s stack frame */\r
+ l32e a7, a11, -36 /* restore a7 from end of call[i]'s stack frame */\r
+ l32e a8, a11, -32 /* restore a8 from end of call[i]'s stack frame */\r
+ l32e a9, a11, -28 /* restore a9 from end of call[i]'s stack frame */\r
+ l32e a10, a11, -24 /* restore a10 from end of call[i]'s stack frame */\r
+ l32e a11, a11, -20 /* restore a11 from end of call[i]'s stack frame */\r
+ rfwu\r
+\r
+#endif /* XCHAL_HAVE_WINDOWED */\r
+\r
+ .section .UserEnter.text, "ax"\r
+ .global call_user_start\r
+ .type call_user_start,@function\r
+ .align 4\r
+ .literal_position\r
+\r
+\r
+\r