1 /******************************************************************************
3 * (c) Copyright 2010-14 Xilinx, Inc. All rights reserved.
5 * This file contains confidential and proprietary information of Xilinx, Inc.
6 * and is protected under U.S. and international copyright and other
7 * intellectual property laws.
10 * This disclaimer is not a license and does not grant any rights to the
11 * materials distributed herewith. Except as otherwise provided in a valid
12 * license issued to you by Xilinx, and to the maximum extent permitted by
13 * applicable law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND WITH ALL
14 * FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS,
15 * IMPLIED, OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF
16 * MERCHANTABILITY, NON-INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE;
17 * and (2) Xilinx shall not be liable (whether in contract or tort, including
18 * negligence, or under any other theory of liability) for any loss or damage
19 * of any kind or nature related to, arising under or in connection with these
20 * materials, including for any direct, or any indirect, special, incidental,
21 * or consequential loss or damage (including loss of data, profits, goodwill,
22 * or any type of loss or damage suffered as a result of any action brought by
23 * a third party) even if such damage or loss was reasonably foreseeable or
24 * Xilinx had been advised of the possibility of the same.
26 * CRITICAL APPLICATIONS
27 * Xilinx products are not designed or intended to be fail-safe, or for use in
28 * any application requiring fail-safe performance, such as life-support or
29 * safety devices or systems, Class III medical devices, nuclear facilities,
30 * applications related to the deployment of airbags, or any other applications
31 * that could lead to death, personal injury, or severe property or
32 * environmental damage (individually and collectively, "Critical
33 * Applications"). Customer assumes the sole risk and liability of any use of
34 * Xilinx products in Critical Applications, subject only to applicable laws
35 * and regulations governing limitations on product liability.
37 * THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS PART OF THIS FILE
40 ******************************************************************************/
41 /*****************************************************************************/
46 * Contains required functions for the ARM cache functionality.
49 * MODIFICATION HISTORY:
51 * Ver Who Date Changes
52 * ----- ---- -------- -----------------------------------------------
53 * 1.00a ecm 01/29/10 First release
54 * 1.00a ecm 06/24/10 Moved the L1 and L2 specific function prototypes
55 * to xil_cache_mach.h to give access to sophisticated users
56 * 3.02a sdm 04/07/11 Updated Flush/InvalidateRange APIs to flush/invalidate
57 * L1 and L2 caches in a single loop and used dsb, L2 sync
58 * at the end of the loop.
59 * 3.04a sdm 01/02/12 Remove redundant dsb/dmb instructions in cache maintenance
61 * 3.07a asa 07/16/12 Corrected the L1 and L2 cache invalidation order.
62 * 3.07a sgd 09/18/12 Corrected the L2 cache enable and disable sequence.
63 * 3.10a srt 04/18/13 Implemented ARM Erratas. Please refer to file
64 * 'xil_errata.h' for errata description
65 * 3.10a asa 05/13/13 Modified cache disable APIs. The L2 cache disable
66 * operation was being done with L1 Data cache disabled. This is
67 * fixed so that L2 cache disable operation happens independent of
68 * L1 cache disable operation. This fixes CR #706464.
69 * Changes are done to do a L2 cache sync (poll reg7_?cache_?sync).
70 * This is done to fix the CR #700542.
71 * 3.11a asa 09/23/13 Modified the Xil_DCacheFlushRange and
72 * Xil_DCacheInvalidateRange to fix potential issues. Fixed other
73 * relevant cache APIs to disable and enable back the interrupts.
74 * This fixes CR #663885.
75 * 3.11a asa 09/28/13 Made changes for L2 cache sync operation. It is found
76 * out that for L2 cache flush/clean/invalidation by cache lines
77 * does not need a cache sync as these are atomic nature. Similarly
78 * figured out that for complete L2 cache flush/invalidation by way
79 * we need to wait for some more time in a loop till the status
80 * shows that the cache operation is completed.
81 * 4.00 pkp 24/01/14 Modified Xil_DCacheInvalidateRange to fix the bug. Few
82 * cache lines were missed to invalidate when unaligned address
83 * invalidation was accommodated. That fixes CR #766768.
84 * Also in Xil_L1DCacheInvalidate, while invalidating all L1D cache
85 * stack memory which contains return address was invalidated. So
86 * stack memory was flushed first and then L1D cache is invalidated.
87 * This is done to fix CR #763829
88 * 4.01 asa 05/09/14 Made changes in cortexa9/xil_cache.c to fix CR# 798230.
91 ******************************************************************************/
93 /***************************** Include Files *********************************/
95 #include "xil_cache.h"
96 #include "xil_cache_l.h"
98 #include "xpseudo_asm.h"
99 #include "xparameters.h"
100 #include "xreg_cortexa9.h"
102 #include "xil_errata.h"
103 #include "xil_exception.h"
105 /************************** Function Prototypes ******************************/
107 /************************** Variable Definitions *****************************/
109 #define IRQ_FIQ_MASK 0xC0 /* Mask IRQ and FIQ interrupts in cpsr */
112 extern int _stack_end;
116 /****************************************************************************
118 * Access L2 Debug Control Register.
120 * @param Value, value to be written to Debug Control Register.
126 ****************************************************************************/
128 static inline void Xil_L2WriteDebugCtrl(u32 Value)
130 static void Xil_L2WriteDebugCtrl(u32 Value)
133 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
134 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_DEBUG_CTRL_OFFSET, Value);
140 /****************************************************************************
142 * Perform L2 Cache Sync Operation.
150 ****************************************************************************/
152 static inline void Xil_L2CacheSync(void)
154 static void Xil_L2CacheSync(void)
157 #ifdef CONFIG_PL310_ERRATA_753970
158 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_DUMMY_CACHE_SYNC_OFFSET, 0x0);
160 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_SYNC_OFFSET, 0x0);
164 /****************************************************************************
166 * Enable the Data cache.
174 ****************************************************************************/
175 void Xil_DCacheEnable(void)
177 Xil_L1DCacheEnable();
181 /****************************************************************************
183 * Disable the Data cache.
191 ****************************************************************************/
192 void Xil_DCacheDisable(void)
194 Xil_L2CacheDisable();
195 Xil_L1DCacheDisable();
198 /****************************************************************************
200 * Invalidate the entire Data cache.
208 ****************************************************************************/
209 void Xil_DCacheInvalidate(void)
211 unsigned int currmask;
214 mtcpsr(currmask | IRQ_FIQ_MASK);
216 Xil_L2CacheInvalidate();
217 Xil_L1DCacheInvalidate();
222 /****************************************************************************
224 * Invalidate a Data cache line. If the byte specified by the address (adr)
225 * is cached by the Data cache, the cacheline containing that byte is
226 * invalidated. If the cacheline is modified (dirty), the modified contents
227 * are lost and are NOT written to system memory before the line is
230 * @param Address to be flushed.
234 * @note The bottom 4 bits are set to 0, forced by architecture.
236 ****************************************************************************/
237 void Xil_DCacheInvalidateLine(unsigned int adr)
239 unsigned int currmask;
242 mtcpsr(currmask | IRQ_FIQ_MASK);
244 Xil_L2CacheInvalidateLine(adr);
245 Xil_L1DCacheInvalidateLine(adr);
250 /****************************************************************************
252 * Invalidate the Data cache for the given address range.
253 * If the bytes specified by the address (adr) are cached by the Data cache,
254 * the cacheline containing that byte is invalidated. If the cacheline
255 * is modified (dirty), the modified contents are lost and are NOT
256 * written to system memory before the line is invalidated.
258 * @param Start address of range to be invalidated.
259 * @param Length of range to be invalidated in bytes.
265 ****************************************************************************/
266 void Xil_DCacheInvalidateRange(unsigned int adr, unsigned len)
268 const unsigned cacheline = 32;
270 unsigned int tempadr = adr;
271 unsigned int tempend;
272 unsigned int currmask;
273 volatile u32 *L2CCOffset = (volatile u32 *) (XPS_L2CC_BASEADDR +
274 XPS_L2CC_CACHE_INVLD_PA_OFFSET);
277 mtcpsr(currmask | IRQ_FIQ_MASK);
282 /* Select L1 Data cache in CSSR */
283 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
285 if (tempadr & (cacheline-1)) {
286 tempadr &= ~(cacheline - 1);
288 Xil_L1DCacheFlushLine(tempadr);
289 /* Disable Write-back and line fills */
290 Xil_L2WriteDebugCtrl(0x3);
291 Xil_L2CacheFlushLine(tempadr);
292 /* Enable Write-back and line fills */
293 Xil_L2WriteDebugCtrl(0x0);
295 tempadr += cacheline;
297 if (tempend & (cacheline-1)) {
298 tempend &= ~(cacheline - 1);
300 Xil_L1DCacheFlushLine(tempend);
301 /* Disable Write-back and line fills */
302 Xil_L2WriteDebugCtrl(0x3);
303 Xil_L2CacheFlushLine(tempend);
304 /* Enable Write-back and line fills */
305 Xil_L2WriteDebugCtrl(0x0);
309 while (tempadr < tempend) {
310 /* Invalidate L2 cache line */
311 *L2CCOffset = tempadr;
314 /* Invalidate L1 Data cache line */
315 __asm__ __volatile__("mcr " \
316 XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (tempadr));
318 { volatile register unsigned int Reg
319 __asm(XREG_CP15_INVAL_DC_LINE_MVA_POC);
322 tempadr += cacheline;
330 /****************************************************************************
332 * Flush the entire Data cache.
340 ****************************************************************************/
341 void Xil_DCacheFlush(void)
343 unsigned int currmask;
346 mtcpsr(currmask | IRQ_FIQ_MASK);
353 /****************************************************************************
355 * Flush a Data cache line. If the byte specified by the address (adr)
356 * is cached by the Data cache, the cacheline containing that byte is
357 * invalidated. If the cacheline is modified (dirty), the entire
358 * contents of the cacheline are written to system memory before the
359 * line is invalidated.
361 * @param Address to be flushed.
365 * @note The bottom 4 bits are set to 0, forced by architecture.
367 ****************************************************************************/
368 void Xil_DCacheFlushLine(unsigned int adr)
370 unsigned int currmask;
373 mtcpsr(currmask | IRQ_FIQ_MASK);
374 Xil_L1DCacheFlushLine(adr);
376 /* Disable Write-back and line fills */
377 Xil_L2WriteDebugCtrl(0x3);
379 Xil_L2CacheFlushLine(adr);
381 /* Enable Write-back and line fills */
382 Xil_L2WriteDebugCtrl(0x0);
387 /****************************************************************************
388 * Flush the Data cache for the given address range.
389 * If the bytes specified by the address (adr) are cached by the Data cache,
390 * the cacheline containing that byte is invalidated. If the cacheline
391 * is modified (dirty), the written to system memory first before the
392 * before the line is invalidated.
394 * @param Start address of range to be flushed.
395 * @param Length of range to be flushed in bytes.
401 ****************************************************************************/
402 void Xil_DCacheFlushRange(unsigned int adr, unsigned len)
404 const unsigned cacheline = 32;
406 unsigned int currmask;
407 volatile u32 *L2CCOffset = (volatile u32 *) (XPS_L2CC_BASEADDR +
408 XPS_L2CC_CACHE_INV_CLN_PA_OFFSET);
411 mtcpsr(currmask | IRQ_FIQ_MASK);
414 /* Back the starting address up to the start of a cache line
415 * perform cache operations until adr+len
418 adr &= ~(cacheline - 1);
422 /* Flush L1 Data cache line */
423 __asm__ __volatile__("mcr " \
424 XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC :: "r" (adr));
426 { volatile register unsigned int Reg
427 __asm(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC);
430 /* Flush L2 cache line */
439 /****************************************************************************
441 * Store a Data cache line. If the byte specified by the address (adr)
442 * is cached by the Data cache and the cacheline is modified (dirty),
443 * the entire contents of the cacheline are written to system memory.
444 * After the store completes, the cacheline is marked as unmodified
447 * @param Address to be stored.
451 * @note The bottom 4 bits are set to 0, forced by architecture.
453 ****************************************************************************/
454 void Xil_DCacheStoreLine(unsigned int adr)
456 unsigned int currmask;
459 mtcpsr(currmask | IRQ_FIQ_MASK);
461 Xil_L1DCacheStoreLine(adr);
462 Xil_L2CacheStoreLine(adr);
466 /****************************************************************************
468 * Enable the instruction cache.
476 ****************************************************************************/
477 void Xil_ICacheEnable(void)
479 Xil_L1ICacheEnable();
483 /****************************************************************************
485 * Disable the instruction cache.
493 ****************************************************************************/
494 void Xil_ICacheDisable(void)
496 Xil_L2CacheDisable();
497 Xil_L1ICacheDisable();
500 /****************************************************************************
502 * Invalidate the entire instruction cache.
510 ****************************************************************************/
511 void Xil_ICacheInvalidate(void)
513 unsigned int currmask;
516 mtcpsr(currmask | IRQ_FIQ_MASK);
518 Xil_L2CacheInvalidate();
519 Xil_L1ICacheInvalidate();
524 /****************************************************************************
526 * Invalidate an instruction cache line. If the instruction specified by the
527 * parameter adr is cached by the instruction cache, the cacheline containing
528 * that instruction is invalidated.
534 * @note The bottom 4 bits are set to 0, forced by architecture.
536 ****************************************************************************/
537 void Xil_ICacheInvalidateLine(unsigned int adr)
539 unsigned int currmask;
542 mtcpsr(currmask | IRQ_FIQ_MASK);
543 Xil_L2CacheInvalidateLine(adr);
544 Xil_L1ICacheInvalidateLine(adr);
548 /****************************************************************************
550 * Invalidate the instruction cache for the given address range.
551 * If the bytes specified by the address (adr) are cached by the Data cache,
552 * the cacheline containing that byte is invalidated. If the cacheline
553 * is modified (dirty), the modified contents are lost and are NOT
554 * written to system memory before the line is invalidated.
556 * @param Start address of range to be invalidated.
557 * @param Length of range to be invalidated in bytes.
563 ****************************************************************************/
564 void Xil_ICacheInvalidateRange(unsigned int adr, unsigned len)
566 const unsigned cacheline = 32;
568 volatile u32 *L2CCOffset = (volatile u32 *) (XPS_L2CC_BASEADDR +
569 XPS_L2CC_CACHE_INVLD_PA_OFFSET);
571 unsigned int currmask;
574 mtcpsr(currmask | IRQ_FIQ_MASK);
576 /* Back the starting address up to the start of a cache line
577 * perform cache operations until adr+len
580 adr = adr & ~(cacheline - 1);
582 /* Select cache L0 I-cache in CSSR */
583 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1);
586 /* Invalidate L2 cache line */
590 /* Invalidate L1 I-cache line */
591 __asm__ __volatile__("mcr " \
592 XREG_CP15_INVAL_IC_LINE_MVA_POU :: "r" (adr));
594 { volatile register unsigned int Reg
595 __asm(XREG_CP15_INVAL_IC_LINE_MVA_POU);
603 /* Wait for L1 and L2 invalidate to complete */
608 /****************************************************************************
610 * Enable the level 1 Data cache.
618 ****************************************************************************/
619 void Xil_L1DCacheEnable(void)
621 register unsigned int CtrlReg;
623 /* enable caches only if they are disabled */
625 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
627 { volatile register unsigned int Reg __asm(XREG_CP15_SYS_CONTROL);
630 if (CtrlReg & XREG_CP15_CONTROL_C_BIT) {
634 /* clean and invalidate the Data cache */
635 Xil_L1DCacheInvalidate();
637 /* enable the Data cache */
638 CtrlReg |= (XREG_CP15_CONTROL_C_BIT);
640 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
643 /****************************************************************************
645 * Disable the level 1 Data cache.
653 ****************************************************************************/
654 void Xil_L1DCacheDisable(void)
656 register unsigned int CtrlReg;
658 /* clean and invalidate the Data cache */
662 /* disable the Data cache */
663 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
665 { volatile register unsigned int Reg __asm(XREG_CP15_SYS_CONTROL);
669 CtrlReg &= ~(XREG_CP15_CONTROL_C_BIT);
671 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
674 /****************************************************************************
676 * Invalidate the level 1 Data cache.
682 * @note In Cortex A9, there is no cp instruction for invalidating
683 * the whole D-cache. This function invalidates each line by
686 ****************************************************************************/
687 void Xil_L1DCacheInvalidate(void)
689 register unsigned int CsidReg, C7Reg;
690 unsigned int CacheSize, LineSize, NumWays;
691 unsigned int Way, WayIndex, Set, SetIndex, NumSet;
692 unsigned int currmask;
695 unsigned int stack_start,stack_end,stack_size;
699 mtcpsr(currmask | IRQ_FIQ_MASK);
702 stack_end = (unsigned int )&_stack_end;
703 stack_start = (unsigned int )&_stack;
704 stack_size=stack_start-stack_end;
706 /*Flush stack memory to save return address*/
707 Xil_DCacheFlushRange(stack_end, stack_size);
710 /* Select cache level 0 and D cache in CSSR */
711 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
714 CsidReg = mfcp(XREG_CP15_CACHE_SIZE_ID);
716 { volatile register unsigned int Reg __asm(XREG_CP15_CACHE_SIZE_ID);
719 /* Determine Cache Size */
720 CacheSize = (CsidReg >> 13) & 0x1FF;
722 CacheSize *=128; /* to get number of bytes */
725 NumWays = (CsidReg & 0x3ff) >> 3;
728 /* Get the cacheline size, way size, index size from csidr */
729 LineSize = (CsidReg & 0x07) + 4;
731 NumSet = CacheSize/NumWays;
732 NumSet /= (1 << LineSize);
737 /* Invalidate all the cachelines */
738 for (WayIndex =0; WayIndex < NumWays; WayIndex++) {
739 for (SetIndex =0; SetIndex < NumSet; SetIndex++) {
742 /* Invalidate by Set/Way */
743 __asm__ __volatile__("mcr " \
744 XREG_CP15_INVAL_DC_LINE_SW :: "r" (C7Reg));
746 //mtcp(XREG_CP15_INVAL_DC_LINE_SW, C7Reg);
747 { volatile register unsigned int Reg
748 __asm(XREG_CP15_INVAL_DC_LINE_SW);
751 Set += (1 << LineSize);
757 /* Wait for L1 invalidate to complete */
762 /****************************************************************************
764 * Invalidate a level 1 Data cache line. If the byte specified by the address
765 * (Addr) is cached by the Data cache, the cacheline containing that byte is
766 * invalidated. If the cacheline is modified (dirty), the modified contents
767 * are lost and are NOT written to system memory before the line is
770 * @param Address to be flushed.
774 * @note The bottom 5 bits are set to 0, forced by architecture.
776 ****************************************************************************/
777 void Xil_L1DCacheInvalidateLine(unsigned int adr)
779 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
780 mtcp(XREG_CP15_INVAL_DC_LINE_MVA_POC, (adr & (~0x1F)));
782 /* Wait for L1 invalidate to complete */
786 /****************************************************************************
788 * Invalidate the level 1 Data cache for the given address range.
789 * If the bytes specified by the address (adr) are cached by the Data cache,
790 * the cacheline containing that byte is invalidated. If the cacheline
791 * is modified (dirty), the modified contents are lost and are NOT
792 * written to system memory before the line is invalidated.
794 * @param Start address of range to be invalidated.
795 * @param Length of range to be invalidated in bytes.
801 ****************************************************************************/
802 void Xil_L1DCacheInvalidateRange(unsigned int adr, unsigned len)
804 const unsigned cacheline = 32;
806 unsigned int currmask;
809 mtcpsr(currmask | IRQ_FIQ_MASK);
812 /* Back the starting address up to the start of a cache line
813 * perform cache operations until adr+len
816 adr = adr & ~(cacheline - 1);
818 /* Select cache L0 D-cache in CSSR */
819 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
823 __asm__ __volatile__("mcr " \
824 XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (adr));
826 { volatile register unsigned int Reg
827 __asm(XREG_CP15_INVAL_DC_LINE_MVA_POC);
834 /* Wait for L1 invalidate to complete */
839 /****************************************************************************
841 * Flush the level 1 Data cache.
847 * @note In Cortex A9, there is no cp instruction for flushing
848 * the whole D-cache. Need to flush each line.
850 ****************************************************************************/
851 void Xil_L1DCacheFlush(void)
853 register unsigned int CsidReg, C7Reg;
854 unsigned int CacheSize, LineSize, NumWays;
855 unsigned int Way, WayIndex, Set, SetIndex, NumSet;
856 unsigned int currmask;
859 mtcpsr(currmask | IRQ_FIQ_MASK);
861 /* Select cache level 0 and D cache in CSSR */
862 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
865 CsidReg = mfcp(XREG_CP15_CACHE_SIZE_ID);
867 { volatile register unsigned int Reg __asm(XREG_CP15_CACHE_SIZE_ID);
871 /* Determine Cache Size */
873 CacheSize = (CsidReg >> 13) & 0x1FF;
875 CacheSize *=128; /* to get number of bytes */
878 NumWays = (CsidReg & 0x3ff) >> 3;
881 /* Get the cacheline size, way size, index size from csidr */
882 LineSize = (CsidReg & 0x07) + 4;
884 NumSet = CacheSize/NumWays;
885 NumSet /= (1 << LineSize);
890 /* Invalidate all the cachelines */
891 for (WayIndex =0; WayIndex < NumWays; WayIndex++) {
892 for (SetIndex =0; SetIndex < NumSet; SetIndex++) {
894 /* Flush by Set/Way */
896 __asm__ __volatile__("mcr " \
897 XREG_CP15_CLEAN_INVAL_DC_LINE_SW :: "r" (C7Reg));
899 { volatile register unsigned int Reg
900 __asm(XREG_CP15_CLEAN_INVAL_DC_LINE_SW);
903 Set += (1 << LineSize);
909 /* Wait for L1 flush to complete */
914 /****************************************************************************
916 * Flush a level 1 Data cache line. If the byte specified by the address (adr)
917 * is cached by the Data cache, the cacheline containing that byte is
918 * invalidated. If the cacheline is modified (dirty), the entire
919 * contents of the cacheline are written to system memory before the
920 * line is invalidated.
922 * @param Address to be flushed.
926 * @note The bottom 5 bits are set to 0, forced by architecture.
928 ****************************************************************************/
929 void Xil_L1DCacheFlushLine(unsigned int adr)
931 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
932 mtcp(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC, (adr & (~0x1F)));
934 /* Wait for L1 flush to complete */
938 /****************************************************************************
939 * Flush the level 1 Data cache for the given address range.
940 * If the bytes specified by the address (adr) are cached by the Data cache,
941 * the cacheline containing that byte is invalidated. If the cacheline
942 * is modified (dirty), the written to system memory first before the
943 * before the line is invalidated.
945 * @param Start address of range to be flushed.
946 * @param Length of range to be flushed in bytes.
952 ****************************************************************************/
953 void Xil_L1DCacheFlushRange(unsigned int adr, unsigned len)
955 const unsigned cacheline = 32;
957 unsigned int currmask;
960 mtcpsr(currmask | IRQ_FIQ_MASK);
963 /* Back the starting address up to the start of a cache line
964 * perform cache operations until adr+len
967 adr = adr & ~(cacheline - 1);
969 /* Select cache L0 D-cache in CSSR */
970 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
974 __asm__ __volatile__("mcr " \
975 XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC :: "r" (adr));
977 { volatile register unsigned int Reg
978 __asm(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC);
985 /* Wait for L1 flush to complete */
990 /****************************************************************************
992 * Store a level 1 Data cache line. If the byte specified by the address (adr)
993 * is cached by the Data cache and the cacheline is modified (dirty),
994 * the entire contents of the cacheline are written to system memory.
995 * After the store completes, the cacheline is marked as unmodified
998 * @param Address to be stored.
1002 * @note The bottom 5 bits are set to 0, forced by architecture.
1004 ****************************************************************************/
1005 void Xil_L1DCacheStoreLine(unsigned int adr)
1007 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
1008 mtcp(XREG_CP15_CLEAN_DC_LINE_MVA_POC, (adr & (~0x1F)));
1010 /* Wait for L1 store to complete */
1014 /****************************************************************************
1016 * Enable the level 1 instruction cache.
1024 ****************************************************************************/
1025 void Xil_L1ICacheEnable(void)
1027 register unsigned int CtrlReg;
1029 /* enable caches only if they are disabled */
1031 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
1033 { volatile register unsigned int Reg __asm(XREG_CP15_SYS_CONTROL);
1036 if (CtrlReg & XREG_CP15_CONTROL_I_BIT) {
1040 /* invalidate the instruction cache */
1041 mtcp(XREG_CP15_INVAL_IC_POU, 0);
1043 /* enable the instruction cache */
1044 CtrlReg |= (XREG_CP15_CONTROL_I_BIT);
1046 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
1049 /****************************************************************************
1051 * Disable level 1 the instruction cache.
1059 ****************************************************************************/
1060 void Xil_L1ICacheDisable(void)
1062 register unsigned int CtrlReg;
1066 /* invalidate the instruction cache */
1067 mtcp(XREG_CP15_INVAL_IC_POU, 0);
1069 /* disable the instruction cache */
1071 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
1073 { volatile register unsigned int Reg __asm(XREG_CP15_SYS_CONTROL);
1076 CtrlReg &= ~(XREG_CP15_CONTROL_I_BIT);
1078 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
1081 /****************************************************************************
1083 * Invalidate the entire level 1 instruction cache.
1091 ****************************************************************************/
1092 void Xil_L1ICacheInvalidate(void)
1094 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1);
1095 /* invalidate the instruction cache */
1096 mtcp(XREG_CP15_INVAL_IC_POU, 0);
1098 /* Wait for L1 invalidate to complete */
1102 /****************************************************************************
1104 * Invalidate a level 1 instruction cache line. If the instruction specified by
1105 * the parameter adr is cached by the instruction cache, the cacheline containing
1106 * that instruction is invalidated.
1112 * @note The bottom 5 bits are set to 0, forced by architecture.
1114 ****************************************************************************/
1115 void Xil_L1ICacheInvalidateLine(unsigned int adr)
1117 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1);
1118 mtcp(XREG_CP15_INVAL_IC_LINE_MVA_POU, (adr & (~0x1F)));
1120 /* Wait for L1 invalidate to complete */
1124 /****************************************************************************
1126 * Invalidate the level 1 instruction cache for the given address range.
1127 * If the bytes specified by the address (adr) are cached by the Data cache,
1128 * the cacheline containing that byte is invalidated. If the cacheline
1129 * is modified (dirty), the modified contents are lost and are NOT
1130 * written to system memory before the line is invalidated.
1132 * @param Start address of range to be invalidated.
1133 * @param Length of range to be invalidated in bytes.
1139 ****************************************************************************/
1140 void Xil_L1ICacheInvalidateRange(unsigned int adr, unsigned len)
1142 const unsigned cacheline = 32;
1144 unsigned int currmask;
1146 currmask = mfcpsr();
1147 mtcpsr(currmask | IRQ_FIQ_MASK);
1150 /* Back the starting address up to the start of a cache line
1151 * perform cache operations until adr+len
1154 adr = adr & ~(cacheline - 1);
1156 /* Select cache L0 I-cache in CSSR */
1157 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1);
1161 __asm__ __volatile__("mcr " \
1162 XREG_CP15_INVAL_IC_LINE_MVA_POU :: "r" (adr));
1164 { volatile register unsigned int Reg
1165 __asm(XREG_CP15_INVAL_IC_LINE_MVA_POU);
1172 /* Wait for L1 invalidate to complete */
1177 /****************************************************************************
1179 * Enable the L2 cache.
1187 ****************************************************************************/
1188 void Xil_L2CacheEnable(void)
1190 register unsigned int L2CCReg;
1192 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET);
1194 /* only enable if L2CC is currently disabled */
1195 if ((L2CCReg & 0x01) == 0) {
1196 /* set up the way size and latencies */
1197 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR +
1198 XPS_L2CC_AUX_CNTRL_OFFSET);
1199 L2CCReg &= XPS_L2CC_AUX_REG_ZERO_MASK;
1200 L2CCReg |= XPS_L2CC_AUX_REG_DEFAULT_MASK;
1201 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_AUX_CNTRL_OFFSET,
1203 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_TAG_RAM_CNTRL_OFFSET,
1204 XPS_L2CC_TAG_RAM_DEFAULT_MASK);
1205 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_DATA_RAM_CNTRL_OFFSET,
1206 XPS_L2CC_DATA_RAM_DEFAULT_MASK);
1208 /* Clear the pending interrupts */
1209 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR +
1210 XPS_L2CC_ISR_OFFSET);
1211 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_IAR_OFFSET, L2CCReg);
1213 Xil_L2CacheInvalidate();
1214 /* Enable the L2CC */
1215 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR +
1216 XPS_L2CC_CNTRL_OFFSET);
1217 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET,
1218 (L2CCReg | (0x01)));
1221 /* synchronize the processor */
1227 /****************************************************************************
1229 * Disable the L2 cache.
1237 ****************************************************************************/
1238 void Xil_L2CacheDisable(void)
1240 register unsigned int L2CCReg;
1242 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET);
1246 /* Clean and Invalidate L2 Cache */
1249 /* Disable the L2CC */
1250 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET);
1251 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET,
1252 (L2CCReg & (~0x01)));
1253 /* Wait for the cache operations to complete */
1259 /****************************************************************************
1261 * Invalidate the L2 cache. If the byte specified by the address (adr)
1262 * is cached by the Data cache, the cacheline containing that byte is
1263 * invalidated. If the cacheline is modified (dirty), the modified contents
1264 * are lost and are NOT written to system memory before the line is
1267 * @param Address to be flushed.
1271 * @note The bottom 4 bits are set to 0, forced by architecture.
1273 ****************************************************************************/
1274 void Xil_L2CacheInvalidate(void)
1276 /* Invalidate the caches */
1277 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_WAY_OFFSET,
1279 while((Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_WAY_OFFSET))
1282 /* Wait for the invalidate to complete */
1285 /* synchronize the processor */
1289 /****************************************************************************
1291 * Invalidate a level 2 cache line. If the byte specified by the address (adr)
1292 * is cached by the Data cache, the cacheline containing that byte is
1293 * invalidated. If the cacheline is modified (dirty), the modified contents
1294 * are lost and are NOT written to system memory before the line is
1297 * @param Address to be flushed.
1301 * @note The bottom 4 bits are set to 0, forced by architecture.
1303 ****************************************************************************/
1304 void Xil_L2CacheInvalidateLine(unsigned int adr)
1306 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_PA_OFFSET, adr);
1307 /* synchronize the processor */
1311 /****************************************************************************
1313 * Invalidate the level 2 cache for the given address range.
1314 * If the bytes specified by the address (adr) are cached by the Data cache,
1315 * the cacheline containing that byte is invalidated. If the cacheline
1316 * is modified (dirty), the modified contents are lost and are NOT
1317 * written to system memory before the line is invalidated.
1319 * @param Start address of range to be invalidated.
1320 * @param Length of range to be invalidated in bytes.
1326 ****************************************************************************/
1327 void Xil_L2CacheInvalidateRange(unsigned int adr, unsigned len)
1329 const unsigned cacheline = 32;
1331 volatile u32 *L2CCOffset = (volatile u32 *) (XPS_L2CC_BASEADDR +
1332 XPS_L2CC_CACHE_INVLD_PA_OFFSET);
1334 unsigned int currmask;
1336 currmask = mfcpsr();
1337 mtcpsr(currmask | IRQ_FIQ_MASK);
1340 /* Back the starting address up to the start of a cache line
1341 * perform cache operations until adr+len
1344 adr = adr & ~(cacheline - 1);
1346 /* Disable Write-back and line fills */
1347 Xil_L2WriteDebugCtrl(0x3);
1354 /* Enable Write-back and line fills */
1355 Xil_L2WriteDebugCtrl(0x0);
1358 /* synchronize the processor */
1363 /****************************************************************************
1365 * Flush the L2 cache. If the byte specified by the address (adr)
1366 * is cached by the Data cache, the cacheline containing that byte is
1367 * invalidated. If the cacheline is modified (dirty), the entire
1368 * contents of the cacheline are written to system memory before the
1369 * line is invalidated.
1371 * @param Address to be flushed.
1375 * @note The bottom 4 bits are set to 0, forced by architecture.
1377 ****************************************************************************/
1378 void Xil_L2CacheFlush(void)
1381 /* Flush the caches */
1383 /* Disable Write-back and line fills */
1384 Xil_L2WriteDebugCtrl(0x3);
1386 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_WAY_OFFSET,
1389 while((Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_WAY_OFFSET))
1393 /* Enable Write-back and line fills */
1394 Xil_L2WriteDebugCtrl(0x0);
1396 /* synchronize the processor */
1400 /****************************************************************************
1402 * Flush a level 2 cache line. If the byte specified by the address (adr)
1403 * is cached by the Data cache, the cacheline containing that byte is
1404 * invalidated. If the cacheline is modified (dirty), the entire
1405 * contents of the cacheline are written to system memory before the
1406 * line is invalidated.
1408 * @param Address to be flushed.
1412 * @note The bottom 4 bits are set to 0, forced by architecture.
1414 ****************************************************************************/
1415 void Xil_L2CacheFlushLine(unsigned int adr)
1417 #ifdef CONFIG_PL310_ERRATA_588369
1418 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_CLEAN_PA_OFFSET, adr);
1419 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_PA_OFFSET, adr);
1421 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_PA_OFFSET, adr);
1423 /* synchronize the processor */
1427 /****************************************************************************
1428 * Flush the level 2 cache for the given address range.
1429 * If the bytes specified by the address (adr) are cached by the Data cache,
1430 * the cacheline containing that byte is invalidated. If the cacheline
1431 * is modified (dirty), the written to system memory first before the
1432 * before the line is invalidated.
1434 * @param Start address of range to be flushed.
1435 * @param Length of range to be flushed in bytes.
1441 ****************************************************************************/
1442 void Xil_L2CacheFlushRange(unsigned int adr, unsigned len)
1444 const unsigned cacheline = 32;
1446 volatile u32 *L2CCOffset = (volatile u32 *) (XPS_L2CC_BASEADDR +
1447 XPS_L2CC_CACHE_INV_CLN_PA_OFFSET);
1449 unsigned int currmask;
1451 currmask = mfcpsr();
1452 mtcpsr(currmask | IRQ_FIQ_MASK);
1454 /* Back the starting address up to the start of a cache line
1455 * perform cache operations until adr+len
1458 adr = adr & ~(cacheline - 1);
1460 /* Disable Write-back and line fills */
1461 Xil_L2WriteDebugCtrl(0x3);
1469 /* Enable Write-back and line fills */
1470 Xil_L2WriteDebugCtrl(0x0);
1472 /* synchronize the processor */
1477 /****************************************************************************
1479 * Store a level 2 cache line. If the byte specified by the address (adr)
1480 * is cached by the Data cache and the cacheline is modified (dirty),
1481 * the entire contents of the cacheline are written to system memory.
1482 * After the store completes, the cacheline is marked as unmodified
1485 * @param Address to be stored.
1489 * @note The bottom 4 bits are set to 0, forced by architecture.
1491 ****************************************************************************/
1492 void Xil_L2CacheStoreLine(unsigned int adr)
1494 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_CLEAN_PA_OFFSET, adr);
1495 /* synchronize the processor */