1 /******************************************************************************
3 * Copyright (C) 2010 - 2014 Xilinx, Inc. All rights reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and associated documentation files (the "Software"), to deal
7 * in the Software without restriction, including without limitation the rights
8 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 * copies of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * Use of the Software is limited solely to applications:
16 * (a) running on a Xilinx device, or
17 * (b) that interact with a Xilinx device through a bus or interconnect.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * XILINX BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
23 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
24 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 * Except as contained in this notice, the name of the Xilinx shall not be used
28 * in advertising or otherwise to promote the sale, use or other dealings in
29 * this Software without prior written authorization from Xilinx.
31 ******************************************************************************/
32 /*****************************************************************************/
37 * Contains required functions for the ARM cache functionality.
40 * MODIFICATION HISTORY:
42 * Ver Who Date Changes
43 * ----- ---- -------- -----------------------------------------------
44 * 1.00a ecm 01/29/10 First release
45 * 1.00a ecm 06/24/10 Moved the L1 and L2 specific function prototypes
46 * to xil_cache_mach.h to give access to sophisticated users
47 * 3.02a sdm 04/07/11 Updated Flush/InvalidateRange APIs to flush/invalidate
48 * L1 and L2 caches in a single loop and used dsb, L2 sync
49 * at the end of the loop.
50 * 3.04a sdm 01/02/12 Remove redundant dsb/dmb instructions in cache maintenance
52 * 3.07a asa 07/16/12 Corrected the L1 and L2 cache invalidation order.
53 * 3.07a sgd 09/18/12 Corrected the L2 cache enable and disable sequence.
54 * 3.10a srt 04/18/13 Implemented ARM Erratas. Please refer to file
55 * 'xil_errata.h' for errata description
56 * 3.10a asa 05/13/13 Modified cache disable APIs. The L2 cache disable
57 * operation was being done with L1 Data cache disabled. This is
58 * fixed so that L2 cache disable operation happens independent of
59 * L1 cache disable operation. This fixes CR #706464.
60 * Changes are done to do a L2 cache sync (poll reg7_?cache_?sync).
61 * This is done to fix the CR #700542.
62 * 3.11a asa 09/23/13 Modified the Xil_DCacheFlushRange and
63 * Xil_DCacheInvalidateRange to fix potential issues. Fixed other
64 * relevant cache APIs to disable and enable back the interrupts.
65 * This fixes CR #663885.
66 * 3.11a asa 09/28/13 Made changes for L2 cache sync operation. It is found
67 * out that for L2 cache flush/clean/invalidation by cache lines
68 * does not need a cache sync as these are atomic nature. Similarly
69 * figured out that for complete L2 cache flush/invalidation by way
70 * we need to wait for some more time in a loop till the status
71 * shows that the cache operation is completed.
72 * 4.00 pkp 24/01/14 Modified Xil_DCacheInvalidateRange to fix the bug. Few
73 * cache lines were missed to invalidate when unaligned address
74 * invalidation was accommodated. That fixes CR #766768.
75 * Also in Xil_L1DCacheInvalidate, while invalidating all L1D cache
76 * stack memory which contains return address was invalidated. So
77 * stack memory was flushed first and then L1D cache is invalidated.
78 * This is done to fix CR #763829
79 * 4.01 asa 05/09/14 Made changes in cortexa9/xil_cache.c to fix CR# 798230.
82 ******************************************************************************/
84 /***************************** Include Files *********************************/
86 #include "xil_cache.h"
87 #include "xil_cache_l.h"
89 #include "xpseudo_asm.h"
90 #include "xparameters.h"
91 #include "xreg_cortexa9.h"
93 #include "xil_errata.h"
94 #include "xil_exception.h"
96 /************************** Function Prototypes ******************************/
98 /************************** Variable Definitions *****************************/
100 #define IRQ_FIQ_MASK 0xC0 /* Mask IRQ and FIQ interrupts in cpsr */
103 extern int _stack_end;
107 /****************************************************************************
109 * Access L2 Debug Control Register.
111 * @param Value, value to be written to Debug Control Register.
117 ****************************************************************************/
119 static inline void Xil_L2WriteDebugCtrl(u32 Value)
121 static void Xil_L2WriteDebugCtrl(u32 Value)
124 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
125 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_DEBUG_CTRL_OFFSET, Value);
131 /****************************************************************************
133 * Perform L2 Cache Sync Operation.
141 ****************************************************************************/
143 static inline void Xil_L2CacheSync(void)
145 static void Xil_L2CacheSync(void)
148 #ifdef CONFIG_PL310_ERRATA_753970
149 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_DUMMY_CACHE_SYNC_OFFSET, 0x0);
151 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_SYNC_OFFSET, 0x0);
155 /****************************************************************************
157 * Enable the Data cache.
165 ****************************************************************************/
166 void Xil_DCacheEnable(void)
168 Xil_L1DCacheEnable();
172 /****************************************************************************
174 * Disable the Data cache.
182 ****************************************************************************/
183 void Xil_DCacheDisable(void)
185 Xil_L2CacheDisable();
186 Xil_L1DCacheDisable();
189 /****************************************************************************
191 * Invalidate the entire Data cache.
199 ****************************************************************************/
200 void Xil_DCacheInvalidate(void)
202 unsigned int currmask;
205 mtcpsr(currmask | IRQ_FIQ_MASK);
207 Xil_L2CacheInvalidate();
208 Xil_L1DCacheInvalidate();
213 /****************************************************************************
215 * Invalidate a Data cache line. If the byte specified by the address (adr)
216 * is cached by the Data cache, the cacheline containing that byte is
217 * invalidated. If the cacheline is modified (dirty), the modified contents
218 * are lost and are NOT written to system memory before the line is
221 * @param Address to be flushed.
225 * @note The bottom 4 bits are set to 0, forced by architecture.
227 ****************************************************************************/
228 void Xil_DCacheInvalidateLine(unsigned int adr)
230 unsigned int currmask;
233 mtcpsr(currmask | IRQ_FIQ_MASK);
235 Xil_L2CacheInvalidateLine(adr);
236 Xil_L1DCacheInvalidateLine(adr);
241 /****************************************************************************
243 * Invalidate the Data cache for the given address range.
244 * If the bytes specified by the address (adr) are cached by the Data cache,
245 * the cacheline containing that byte is invalidated. If the cacheline
246 * is modified (dirty), the modified contents are lost and are NOT
247 * written to system memory before the line is invalidated.
249 * @param Start address of range to be invalidated.
250 * @param Length of range to be invalidated in bytes.
256 ****************************************************************************/
257 void Xil_DCacheInvalidateRange(unsigned int adr, unsigned len)
259 const unsigned cacheline = 32;
261 unsigned int tempadr = adr;
262 unsigned int tempend;
263 unsigned int currmask;
264 volatile u32 *L2CCOffset = (volatile u32 *) (XPS_L2CC_BASEADDR +
265 XPS_L2CC_CACHE_INVLD_PA_OFFSET);
268 mtcpsr(currmask | IRQ_FIQ_MASK);
273 /* Select L1 Data cache in CSSR */
274 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
276 if (tempadr & (cacheline-1)) {
277 tempadr &= ~(cacheline - 1);
279 Xil_L1DCacheFlushLine(tempadr);
280 /* Disable Write-back and line fills */
281 Xil_L2WriteDebugCtrl(0x3);
282 Xil_L2CacheFlushLine(tempadr);
283 /* Enable Write-back and line fills */
284 Xil_L2WriteDebugCtrl(0x0);
286 tempadr += cacheline;
288 if (tempend & (cacheline-1)) {
289 tempend &= ~(cacheline - 1);
291 Xil_L1DCacheFlushLine(tempend);
292 /* Disable Write-back and line fills */
293 Xil_L2WriteDebugCtrl(0x3);
294 Xil_L2CacheFlushLine(tempend);
295 /* Enable Write-back and line fills */
296 Xil_L2WriteDebugCtrl(0x0);
300 while (tempadr < tempend) {
301 /* Invalidate L2 cache line */
302 *L2CCOffset = tempadr;
305 /* Invalidate L1 Data cache line */
306 __asm__ __volatile__("mcr " \
307 XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (tempadr));
309 { volatile register unsigned int Reg
310 __asm(XREG_CP15_INVAL_DC_LINE_MVA_POC);
313 tempadr += cacheline;
321 /****************************************************************************
323 * Flush the entire Data cache.
331 ****************************************************************************/
332 void Xil_DCacheFlush(void)
334 unsigned int currmask;
337 mtcpsr(currmask | IRQ_FIQ_MASK);
344 /****************************************************************************
346 * Flush a Data cache line. If the byte specified by the address (adr)
347 * is cached by the Data cache, the cacheline containing that byte is
348 * invalidated. If the cacheline is modified (dirty), the entire
349 * contents of the cacheline are written to system memory before the
350 * line is invalidated.
352 * @param Address to be flushed.
356 * @note The bottom 4 bits are set to 0, forced by architecture.
358 ****************************************************************************/
359 void Xil_DCacheFlushLine(unsigned int adr)
361 unsigned int currmask;
364 mtcpsr(currmask | IRQ_FIQ_MASK);
365 Xil_L1DCacheFlushLine(adr);
367 /* Disable Write-back and line fills */
368 Xil_L2WriteDebugCtrl(0x3);
370 Xil_L2CacheFlushLine(adr);
372 /* Enable Write-back and line fills */
373 Xil_L2WriteDebugCtrl(0x0);
378 /****************************************************************************
379 * Flush the Data cache for the given address range.
380 * If the bytes specified by the address (adr) are cached by the Data cache,
381 * the cacheline containing that byte is invalidated. If the cacheline
382 * is modified (dirty), the written to system memory first before the
383 * before the line is invalidated.
385 * @param Start address of range to be flushed.
386 * @param Length of range to be flushed in bytes.
392 ****************************************************************************/
393 void Xil_DCacheFlushRange(unsigned int adr, unsigned len)
395 const unsigned cacheline = 32;
397 unsigned int currmask;
398 volatile u32 *L2CCOffset = (volatile u32 *) (XPS_L2CC_BASEADDR +
399 XPS_L2CC_CACHE_INV_CLN_PA_OFFSET);
402 mtcpsr(currmask | IRQ_FIQ_MASK);
405 /* Back the starting address up to the start of a cache line
406 * perform cache operations until adr+len
409 adr &= ~(cacheline - 1);
413 /* Flush L1 Data cache line */
414 __asm__ __volatile__("mcr " \
415 XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC :: "r" (adr));
417 { volatile register unsigned int Reg
418 __asm(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC);
421 /* Flush L2 cache line */
430 /****************************************************************************
432 * Store a Data cache line. If the byte specified by the address (adr)
433 * is cached by the Data cache and the cacheline is modified (dirty),
434 * the entire contents of the cacheline are written to system memory.
435 * After the store completes, the cacheline is marked as unmodified
438 * @param Address to be stored.
442 * @note The bottom 4 bits are set to 0, forced by architecture.
444 ****************************************************************************/
445 void Xil_DCacheStoreLine(unsigned int adr)
447 unsigned int currmask;
450 mtcpsr(currmask | IRQ_FIQ_MASK);
452 Xil_L1DCacheStoreLine(adr);
453 Xil_L2CacheStoreLine(adr);
457 /****************************************************************************
459 * Enable the instruction cache.
467 ****************************************************************************/
468 void Xil_ICacheEnable(void)
470 Xil_L1ICacheEnable();
474 /****************************************************************************
476 * Disable the instruction cache.
484 ****************************************************************************/
485 void Xil_ICacheDisable(void)
487 Xil_L2CacheDisable();
488 Xil_L1ICacheDisable();
491 /****************************************************************************
493 * Invalidate the entire instruction cache.
501 ****************************************************************************/
502 void Xil_ICacheInvalidate(void)
504 unsigned int currmask;
507 mtcpsr(currmask | IRQ_FIQ_MASK);
509 Xil_L2CacheInvalidate();
510 Xil_L1ICacheInvalidate();
515 /****************************************************************************
517 * Invalidate an instruction cache line. If the instruction specified by the
518 * parameter adr is cached by the instruction cache, the cacheline containing
519 * that instruction is invalidated.
525 * @note The bottom 4 bits are set to 0, forced by architecture.
527 ****************************************************************************/
528 void Xil_ICacheInvalidateLine(unsigned int adr)
530 unsigned int currmask;
533 mtcpsr(currmask | IRQ_FIQ_MASK);
534 Xil_L2CacheInvalidateLine(adr);
535 Xil_L1ICacheInvalidateLine(adr);
539 /****************************************************************************
541 * Invalidate the instruction cache for the given address range.
542 * If the bytes specified by the address (adr) are cached by the Data cache,
543 * the cacheline containing that byte is invalidated. If the cacheline
544 * is modified (dirty), the modified contents are lost and are NOT
545 * written to system memory before the line is invalidated.
547 * @param Start address of range to be invalidated.
548 * @param Length of range to be invalidated in bytes.
554 ****************************************************************************/
555 void Xil_ICacheInvalidateRange(unsigned int adr, unsigned len)
557 const unsigned cacheline = 32;
559 volatile u32 *L2CCOffset = (volatile u32 *) (XPS_L2CC_BASEADDR +
560 XPS_L2CC_CACHE_INVLD_PA_OFFSET);
562 unsigned int currmask;
565 mtcpsr(currmask | IRQ_FIQ_MASK);
567 /* Back the starting address up to the start of a cache line
568 * perform cache operations until adr+len
571 adr = adr & ~(cacheline - 1);
573 /* Select cache L0 I-cache in CSSR */
574 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1);
577 /* Invalidate L2 cache line */
581 /* Invalidate L1 I-cache line */
582 __asm__ __volatile__("mcr " \
583 XREG_CP15_INVAL_IC_LINE_MVA_POU :: "r" (adr));
585 { volatile register unsigned int Reg
586 __asm(XREG_CP15_INVAL_IC_LINE_MVA_POU);
594 /* Wait for L1 and L2 invalidate to complete */
599 /****************************************************************************
601 * Enable the level 1 Data cache.
609 ****************************************************************************/
610 void Xil_L1DCacheEnable(void)
612 register unsigned int CtrlReg;
614 /* enable caches only if they are disabled */
616 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
618 { volatile register unsigned int Reg __asm(XREG_CP15_SYS_CONTROL);
621 if (CtrlReg & XREG_CP15_CONTROL_C_BIT) {
625 /* clean and invalidate the Data cache */
626 Xil_L1DCacheInvalidate();
628 /* enable the Data cache */
629 CtrlReg |= (XREG_CP15_CONTROL_C_BIT);
631 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
634 /****************************************************************************
636 * Disable the level 1 Data cache.
644 ****************************************************************************/
645 void Xil_L1DCacheDisable(void)
647 register unsigned int CtrlReg;
649 /* clean and invalidate the Data cache */
653 /* disable the Data cache */
654 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
656 { volatile register unsigned int Reg __asm(XREG_CP15_SYS_CONTROL);
660 CtrlReg &= ~(XREG_CP15_CONTROL_C_BIT);
662 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
665 /****************************************************************************
667 * Invalidate the level 1 Data cache.
673 * @note In Cortex A9, there is no cp instruction for invalidating
674 * the whole D-cache. This function invalidates each line by
677 ****************************************************************************/
678 void Xil_L1DCacheInvalidate(void)
680 register unsigned int CsidReg, C7Reg;
681 unsigned int CacheSize, LineSize, NumWays;
682 unsigned int Way, WayIndex, Set, SetIndex, NumSet;
683 unsigned int currmask;
686 unsigned int stack_start,stack_end,stack_size;
690 mtcpsr(currmask | IRQ_FIQ_MASK);
693 stack_end = (unsigned int )&_stack_end;
694 stack_start = (unsigned int )&_stack;
695 stack_size=stack_start-stack_end;
697 /*Flush stack memory to save return address*/
698 Xil_DCacheFlushRange(stack_end, stack_size);
701 /* Select cache level 0 and D cache in CSSR */
702 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
705 CsidReg = mfcp(XREG_CP15_CACHE_SIZE_ID);
707 { volatile register unsigned int Reg __asm(XREG_CP15_CACHE_SIZE_ID);
710 /* Determine Cache Size */
711 CacheSize = (CsidReg >> 13) & 0x1FF;
713 CacheSize *=128; /* to get number of bytes */
716 NumWays = (CsidReg & 0x3ff) >> 3;
719 /* Get the cacheline size, way size, index size from csidr */
720 LineSize = (CsidReg & 0x07) + 4;
722 NumSet = CacheSize/NumWays;
723 NumSet /= (1 << LineSize);
728 /* Invalidate all the cachelines */
729 for (WayIndex =0; WayIndex < NumWays; WayIndex++) {
730 for (SetIndex =0; SetIndex < NumSet; SetIndex++) {
733 /* Invalidate by Set/Way */
734 __asm__ __volatile__("mcr " \
735 XREG_CP15_INVAL_DC_LINE_SW :: "r" (C7Reg));
737 //mtcp(XREG_CP15_INVAL_DC_LINE_SW, C7Reg);
738 { volatile register unsigned int Reg
739 __asm(XREG_CP15_INVAL_DC_LINE_SW);
742 Set += (1 << LineSize);
748 /* Wait for L1 invalidate to complete */
753 /****************************************************************************
755 * Invalidate a level 1 Data cache line. If the byte specified by the address
756 * (Addr) is cached by the Data cache, the cacheline containing that byte is
757 * invalidated. If the cacheline is modified (dirty), the modified contents
758 * are lost and are NOT written to system memory before the line is
761 * @param Address to be flushed.
765 * @note The bottom 5 bits are set to 0, forced by architecture.
767 ****************************************************************************/
768 void Xil_L1DCacheInvalidateLine(unsigned int adr)
770 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
771 mtcp(XREG_CP15_INVAL_DC_LINE_MVA_POC, (adr & (~0x1F)));
773 /* Wait for L1 invalidate to complete */
777 /****************************************************************************
779 * Invalidate the level 1 Data cache for the given address range.
780 * If the bytes specified by the address (adr) are cached by the Data cache,
781 * the cacheline containing that byte is invalidated. If the cacheline
782 * is modified (dirty), the modified contents are lost and are NOT
783 * written to system memory before the line is invalidated.
785 * @param Start address of range to be invalidated.
786 * @param Length of range to be invalidated in bytes.
792 ****************************************************************************/
793 void Xil_L1DCacheInvalidateRange(unsigned int adr, unsigned len)
795 const unsigned cacheline = 32;
797 unsigned int currmask;
800 mtcpsr(currmask | IRQ_FIQ_MASK);
803 /* Back the starting address up to the start of a cache line
804 * perform cache operations until adr+len
807 adr = adr & ~(cacheline - 1);
809 /* Select cache L0 D-cache in CSSR */
810 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
814 __asm__ __volatile__("mcr " \
815 XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (adr));
817 { volatile register unsigned int Reg
818 __asm(XREG_CP15_INVAL_DC_LINE_MVA_POC);
825 /* Wait for L1 invalidate to complete */
830 /****************************************************************************
832 * Flush the level 1 Data cache.
838 * @note In Cortex A9, there is no cp instruction for flushing
839 * the whole D-cache. Need to flush each line.
841 ****************************************************************************/
842 void Xil_L1DCacheFlush(void)
844 register unsigned int CsidReg, C7Reg;
845 unsigned int CacheSize, LineSize, NumWays;
846 unsigned int Way, WayIndex, Set, SetIndex, NumSet;
847 unsigned int currmask;
850 mtcpsr(currmask | IRQ_FIQ_MASK);
852 /* Select cache level 0 and D cache in CSSR */
853 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
856 CsidReg = mfcp(XREG_CP15_CACHE_SIZE_ID);
858 { volatile register unsigned int Reg __asm(XREG_CP15_CACHE_SIZE_ID);
862 /* Determine Cache Size */
864 CacheSize = (CsidReg >> 13) & 0x1FF;
866 CacheSize *=128; /* to get number of bytes */
869 NumWays = (CsidReg & 0x3ff) >> 3;
872 /* Get the cacheline size, way size, index size from csidr */
873 LineSize = (CsidReg & 0x07) + 4;
875 NumSet = CacheSize/NumWays;
876 NumSet /= (1 << LineSize);
881 /* Invalidate all the cachelines */
882 for (WayIndex =0; WayIndex < NumWays; WayIndex++) {
883 for (SetIndex =0; SetIndex < NumSet; SetIndex++) {
885 /* Flush by Set/Way */
887 __asm__ __volatile__("mcr " \
888 XREG_CP15_CLEAN_INVAL_DC_LINE_SW :: "r" (C7Reg));
890 { volatile register unsigned int Reg
891 __asm(XREG_CP15_CLEAN_INVAL_DC_LINE_SW);
894 Set += (1 << LineSize);
900 /* Wait for L1 flush to complete */
905 /****************************************************************************
907 * Flush a level 1 Data cache line. If the byte specified by the address (adr)
908 * is cached by the Data cache, the cacheline containing that byte is
909 * invalidated. If the cacheline is modified (dirty), the entire
910 * contents of the cacheline are written to system memory before the
911 * line is invalidated.
913 * @param Address to be flushed.
917 * @note The bottom 5 bits are set to 0, forced by architecture.
919 ****************************************************************************/
920 void Xil_L1DCacheFlushLine(unsigned int adr)
922 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
923 mtcp(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC, (adr & (~0x1F)));
925 /* Wait for L1 flush to complete */
929 /****************************************************************************
930 * Flush the level 1 Data cache for the given address range.
931 * If the bytes specified by the address (adr) are cached by the Data cache,
932 * the cacheline containing that byte is invalidated. If the cacheline
933 * is modified (dirty), the written to system memory first before the
934 * before the line is invalidated.
936 * @param Start address of range to be flushed.
937 * @param Length of range to be flushed in bytes.
943 ****************************************************************************/
944 void Xil_L1DCacheFlushRange(unsigned int adr, unsigned len)
946 const unsigned cacheline = 32;
948 unsigned int currmask;
951 mtcpsr(currmask | IRQ_FIQ_MASK);
954 /* Back the starting address up to the start of a cache line
955 * perform cache operations until adr+len
958 adr = adr & ~(cacheline - 1);
960 /* Select cache L0 D-cache in CSSR */
961 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
965 __asm__ __volatile__("mcr " \
966 XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC :: "r" (adr));
968 { volatile register unsigned int Reg
969 __asm(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC);
976 /* Wait for L1 flush to complete */
981 /****************************************************************************
983 * Store a level 1 Data cache line. If the byte specified by the address (adr)
984 * is cached by the Data cache and the cacheline is modified (dirty),
985 * the entire contents of the cacheline are written to system memory.
986 * After the store completes, the cacheline is marked as unmodified
989 * @param Address to be stored.
993 * @note The bottom 5 bits are set to 0, forced by architecture.
995 ****************************************************************************/
996 void Xil_L1DCacheStoreLine(unsigned int adr)
998 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
999 mtcp(XREG_CP15_CLEAN_DC_LINE_MVA_POC, (adr & (~0x1F)));
1001 /* Wait for L1 store to complete */
1005 /****************************************************************************
1007 * Enable the level 1 instruction cache.
1015 ****************************************************************************/
1016 void Xil_L1ICacheEnable(void)
1018 register unsigned int CtrlReg;
1020 /* enable caches only if they are disabled */
1022 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
1024 { volatile register unsigned int Reg __asm(XREG_CP15_SYS_CONTROL);
1027 if (CtrlReg & XREG_CP15_CONTROL_I_BIT) {
1031 /* invalidate the instruction cache */
1032 mtcp(XREG_CP15_INVAL_IC_POU, 0);
1034 /* enable the instruction cache */
1035 CtrlReg |= (XREG_CP15_CONTROL_I_BIT);
1037 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
1040 /****************************************************************************
1042 * Disable level 1 the instruction cache.
1050 ****************************************************************************/
1051 void Xil_L1ICacheDisable(void)
1053 register unsigned int CtrlReg;
1057 /* invalidate the instruction cache */
1058 mtcp(XREG_CP15_INVAL_IC_POU, 0);
1060 /* disable the instruction cache */
1062 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
1064 { volatile register unsigned int Reg __asm(XREG_CP15_SYS_CONTROL);
1067 CtrlReg &= ~(XREG_CP15_CONTROL_I_BIT);
1069 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
1072 /****************************************************************************
1074 * Invalidate the entire level 1 instruction cache.
1082 ****************************************************************************/
1083 void Xil_L1ICacheInvalidate(void)
1085 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1);
1086 /* invalidate the instruction cache */
1087 mtcp(XREG_CP15_INVAL_IC_POU, 0);
1089 /* Wait for L1 invalidate to complete */
1093 /****************************************************************************
1095 * Invalidate a level 1 instruction cache line. If the instruction specified by
1096 * the parameter adr is cached by the instruction cache, the cacheline containing
1097 * that instruction is invalidated.
1103 * @note The bottom 5 bits are set to 0, forced by architecture.
1105 ****************************************************************************/
1106 void Xil_L1ICacheInvalidateLine(unsigned int adr)
1108 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1);
1109 mtcp(XREG_CP15_INVAL_IC_LINE_MVA_POU, (adr & (~0x1F)));
1111 /* Wait for L1 invalidate to complete */
1115 /****************************************************************************
1117 * Invalidate the level 1 instruction cache for the given address range.
1118 * If the bytes specified by the address (adr) are cached by the Data cache,
1119 * the cacheline containing that byte is invalidated. If the cacheline
1120 * is modified (dirty), the modified contents are lost and are NOT
1121 * written to system memory before the line is invalidated.
1123 * @param Start address of range to be invalidated.
1124 * @param Length of range to be invalidated in bytes.
1130 ****************************************************************************/
1131 void Xil_L1ICacheInvalidateRange(unsigned int adr, unsigned len)
1133 const unsigned cacheline = 32;
1135 unsigned int currmask;
1137 currmask = mfcpsr();
1138 mtcpsr(currmask | IRQ_FIQ_MASK);
1141 /* Back the starting address up to the start of a cache line
1142 * perform cache operations until adr+len
1145 adr = adr & ~(cacheline - 1);
1147 /* Select cache L0 I-cache in CSSR */
1148 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1);
1152 __asm__ __volatile__("mcr " \
1153 XREG_CP15_INVAL_IC_LINE_MVA_POU :: "r" (adr));
1155 { volatile register unsigned int Reg
1156 __asm(XREG_CP15_INVAL_IC_LINE_MVA_POU);
1163 /* Wait for L1 invalidate to complete */
1168 /****************************************************************************
1170 * Enable the L2 cache.
1178 ****************************************************************************/
1179 void Xil_L2CacheEnable(void)
1181 register unsigned int L2CCReg;
1183 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET);
1185 /* only enable if L2CC is currently disabled */
1186 if ((L2CCReg & 0x01) == 0) {
1187 /* set up the way size and latencies */
1188 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR +
1189 XPS_L2CC_AUX_CNTRL_OFFSET);
1190 L2CCReg &= XPS_L2CC_AUX_REG_ZERO_MASK;
1191 L2CCReg |= XPS_L2CC_AUX_REG_DEFAULT_MASK;
1192 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_AUX_CNTRL_OFFSET,
1194 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_TAG_RAM_CNTRL_OFFSET,
1195 XPS_L2CC_TAG_RAM_DEFAULT_MASK);
1196 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_DATA_RAM_CNTRL_OFFSET,
1197 XPS_L2CC_DATA_RAM_DEFAULT_MASK);
1199 /* Clear the pending interrupts */
1200 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR +
1201 XPS_L2CC_ISR_OFFSET);
1202 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_IAR_OFFSET, L2CCReg);
1204 Xil_L2CacheInvalidate();
1205 /* Enable the L2CC */
1206 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR +
1207 XPS_L2CC_CNTRL_OFFSET);
1208 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET,
1209 (L2CCReg | (0x01)));
1212 /* synchronize the processor */
1218 /****************************************************************************
1220 * Disable the L2 cache.
1228 ****************************************************************************/
1229 void Xil_L2CacheDisable(void)
1231 register unsigned int L2CCReg;
1233 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET);
1237 /* Clean and Invalidate L2 Cache */
1240 /* Disable the L2CC */
1241 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET);
1242 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET,
1243 (L2CCReg & (~0x01)));
1244 /* Wait for the cache operations to complete */
1250 /****************************************************************************
1252 * Invalidate the L2 cache. If the byte specified by the address (adr)
1253 * is cached by the Data cache, the cacheline containing that byte is
1254 * invalidated. If the cacheline is modified (dirty), the modified contents
1255 * are lost and are NOT written to system memory before the line is
1258 * @param Address to be flushed.
1262 * @note The bottom 4 bits are set to 0, forced by architecture.
1264 ****************************************************************************/
1265 void Xil_L2CacheInvalidate(void)
1267 /* Invalidate the caches */
1268 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_WAY_OFFSET,
1270 while((Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_WAY_OFFSET))
1273 /* Wait for the invalidate to complete */
1276 /* synchronize the processor */
1280 /****************************************************************************
1282 * Invalidate a level 2 cache line. If the byte specified by the address (adr)
1283 * is cached by the Data cache, the cacheline containing that byte is
1284 * invalidated. If the cacheline is modified (dirty), the modified contents
1285 * are lost and are NOT written to system memory before the line is
1288 * @param Address to be flushed.
1292 * @note The bottom 4 bits are set to 0, forced by architecture.
1294 ****************************************************************************/
1295 void Xil_L2CacheInvalidateLine(unsigned int adr)
1297 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_PA_OFFSET, adr);
1298 /* synchronize the processor */
1302 /****************************************************************************
1304 * Invalidate the level 2 cache for the given address range.
1305 * If the bytes specified by the address (adr) are cached by the Data cache,
1306 * the cacheline containing that byte is invalidated. If the cacheline
1307 * is modified (dirty), the modified contents are lost and are NOT
1308 * written to system memory before the line is invalidated.
1310 * @param Start address of range to be invalidated.
1311 * @param Length of range to be invalidated in bytes.
1317 ****************************************************************************/
1318 void Xil_L2CacheInvalidateRange(unsigned int adr, unsigned len)
1320 const unsigned cacheline = 32;
1322 volatile u32 *L2CCOffset = (volatile u32 *) (XPS_L2CC_BASEADDR +
1323 XPS_L2CC_CACHE_INVLD_PA_OFFSET);
1325 unsigned int currmask;
1327 currmask = mfcpsr();
1328 mtcpsr(currmask | IRQ_FIQ_MASK);
1331 /* Back the starting address up to the start of a cache line
1332 * perform cache operations until adr+len
1335 adr = adr & ~(cacheline - 1);
1337 /* Disable Write-back and line fills */
1338 Xil_L2WriteDebugCtrl(0x3);
1345 /* Enable Write-back and line fills */
1346 Xil_L2WriteDebugCtrl(0x0);
1349 /* synchronize the processor */
1354 /****************************************************************************
1356 * Flush the L2 cache. If the byte specified by the address (adr)
1357 * is cached by the Data cache, the cacheline containing that byte is
1358 * invalidated. If the cacheline is modified (dirty), the entire
1359 * contents of the cacheline are written to system memory before the
1360 * line is invalidated.
1362 * @param Address to be flushed.
1366 * @note The bottom 4 bits are set to 0, forced by architecture.
1368 ****************************************************************************/
1369 void Xil_L2CacheFlush(void)
1372 /* Flush the caches */
1374 /* Disable Write-back and line fills */
1375 Xil_L2WriteDebugCtrl(0x3);
1377 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_WAY_OFFSET,
1380 while((Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_WAY_OFFSET))
1384 /* Enable Write-back and line fills */
1385 Xil_L2WriteDebugCtrl(0x0);
1387 /* synchronize the processor */
1391 /****************************************************************************
1393 * Flush a level 2 cache line. If the byte specified by the address (adr)
1394 * is cached by the Data cache, the cacheline containing that byte is
1395 * invalidated. If the cacheline is modified (dirty), the entire
1396 * contents of the cacheline are written to system memory before the
1397 * line is invalidated.
1399 * @param Address to be flushed.
1403 * @note The bottom 4 bits are set to 0, forced by architecture.
1405 ****************************************************************************/
1406 void Xil_L2CacheFlushLine(unsigned int adr)
1408 #ifdef CONFIG_PL310_ERRATA_588369
1409 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_CLEAN_PA_OFFSET, adr);
1410 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_PA_OFFSET, adr);
1412 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_PA_OFFSET, adr);
1414 /* synchronize the processor */
1418 /****************************************************************************
1419 * Flush the level 2 cache for the given address range.
1420 * If the bytes specified by the address (adr) are cached by the Data cache,
1421 * the cacheline containing that byte is invalidated. If the cacheline
1422 * is modified (dirty), the written to system memory first before the
1423 * before the line is invalidated.
1425 * @param Start address of range to be flushed.
1426 * @param Length of range to be flushed in bytes.
1432 ****************************************************************************/
1433 void Xil_L2CacheFlushRange(unsigned int adr, unsigned len)
1435 const unsigned cacheline = 32;
1437 volatile u32 *L2CCOffset = (volatile u32 *) (XPS_L2CC_BASEADDR +
1438 XPS_L2CC_CACHE_INV_CLN_PA_OFFSET);
1440 unsigned int currmask;
1442 currmask = mfcpsr();
1443 mtcpsr(currmask | IRQ_FIQ_MASK);
1445 /* Back the starting address up to the start of a cache line
1446 * perform cache operations until adr+len
1449 adr = adr & ~(cacheline - 1);
1451 /* Disable Write-back and line fills */
1452 Xil_L2WriteDebugCtrl(0x3);
1460 /* Enable Write-back and line fills */
1461 Xil_L2WriteDebugCtrl(0x0);
1463 /* synchronize the processor */
1468 /****************************************************************************
1470 * Store a level 2 cache line. If the byte specified by the address (adr)
1471 * is cached by the Data cache and the cacheline is modified (dirty),
1472 * the entire contents of the cacheline are written to system memory.
1473 * After the store completes, the cacheline is marked as unmodified
1476 * @param Address to be stored.
1480 * @note The bottom 4 bits are set to 0, forced by architecture.
1482 ****************************************************************************/
1483 void Xil_L2CacheStoreLine(unsigned int adr)
1485 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_CLEAN_PA_OFFSET, adr);
1486 /* synchronize the processor */