1 /******************************************************************************
3 * Copyright (C) 2010 - 2015 Xilinx, Inc. All rights reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and associated documentation files (the "Software"), to deal
7 * in the Software without restriction, including without limitation the rights
8 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 * copies of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * Use of the Software is limited solely to applications:
16 * (a) running on a Xilinx device, or
17 * (b) that interact with a Xilinx device through a bus or interconnect.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * XILINX BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
23 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
24 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 * Except as contained in this notice, the name of the Xilinx shall not be used
28 * in advertising or otherwise to promote the sale, use or other dealings in
29 * this Software without prior written authorization from Xilinx.
31 ******************************************************************************/
32 /*****************************************************************************/
37 * Contains required functions for the ARM cache functionality.
40 * MODIFICATION HISTORY:
42 * Ver Who Date Changes
43 * ----- ---- -------- -----------------------------------------------
44 * 1.00a ecm 01/29/10 First release
45 * 1.00a ecm 06/24/10 Moved the L1 and L2 specific function prototypes
46 * to xil_cache_mach.h to give access to sophisticated users
47 * 3.02a sdm 04/07/11 Updated Flush/InvalidateRange APIs to flush/invalidate
48 * L1 and L2 caches in a single loop and used dsb, L2 sync
49 * at the end of the loop.
50 * 3.04a sdm 01/02/12 Remove redundant dsb/dmb instructions in cache maintenance
52 * 3.07a asa 07/16/12 Corrected the L1 and L2 cache invalidation order.
53 * 3.07a sgd 09/18/12 Corrected the L2 cache enable and disable sequence.
54 * 3.10a srt 04/18/13 Implemented ARM Erratas. Please refer to file
55 * 'xil_errata.h' for errata description
56 * 3.10a asa 05/13/13 Modified cache disable APIs. The L2 cache disable
57 * operation was being done with L1 Data cache disabled. This is
58 * fixed so that L2 cache disable operation happens independent of
59 * L1 cache disable operation. This fixes CR #706464.
60 * Changes are done to do a L2 cache sync (poll reg7_?cache_?sync).
61 * This is done to fix the CR #700542.
62 * 3.11a asa 09/23/13 Modified the Xil_DCacheFlushRange and
63 * Xil_DCacheInvalidateRange to fix potential issues. Fixed other
64 * relevant cache APIs to disable and enable back the interrupts.
65 * This fixes CR #663885.
66 * 3.11a asa 09/28/13 Made changes for L2 cache sync operation. It is found
67 * out that for L2 cache flush/clean/invalidation by cache lines
68 * does not need a cache sync as these are atomic nature. Similarly
69 * figured out that for complete L2 cache flush/invalidation by way
70 * we need to wait for some more time in a loop till the status
71 * shows that the cache operation is completed.
72 * 4.00 pkp 24/01/14 Modified Xil_DCacheInvalidateRange to fix the bug. Few
73 * cache lines were missed to invalidate when unaligned address
74 * invalidation was accommodated. That fixes CR #766768.
75 * Also in Xil_L1DCacheInvalidate, while invalidating all L1D cache
76 * stack memory which contains return address was invalidated. So
77 * stack memory was flushed first and then L1D cache is invalidated.
78 * This is done to fix CR #763829
79 * 4.01 asa 05/09/14 Made changes in cortexa9/xil_cache.c to fix CR# 798230.
80 * 4.02 pkp 06/27/14 Added notes to Xil_L1DCacheInvalidateRange function for
81 * explanation of CR#785243
82 * 5.00 kvn 12/15/14 Xil_L2CacheInvalidate was modified to fix CR# 838835. L2 Cache
83 * has stack memory which has return address. Before invalidating
84 * cache, stack memory was flushed first and L2 Cache is invalidated.
85 * 5.01 pkp 05/12/15 Xil_DCacheInvalidateRange and Xil_DCacheFlushRange is modified
86 * to remove unnecessary dsb in the APIs. Instead of using dsb
87 * for L2 Cache, L2CacheSync has been used for each L2 cache line
88 * and single dsb has been used for L1 cache. Also L2CacheSync is
89 * added into Xil_L2CacheInvalidateRange API. Xil_L1DCacheInvalidate
90 * and Xil_L2CacheInvalidate APIs are modified to flush the complete
91 * stack instead of just System Stack
92 * 5.03 pkp 10/07/15 L2 Cache functionalities are avoided for the OpenAMP slave
93 * application(when USE_AMP flag is defined for BSP) as master CPU
94 * would be utilizing L2 cache for its operation
98 ******************************************************************************/
100 /***************************** Include Files *********************************/
102 #include "xil_cache.h"
103 #include "xil_cache_l.h"
105 #include "xpseudo_asm.h"
106 #include "xparameters.h"
107 #include "xreg_cortexa9.h"
109 #include "xil_errata.h"
110 #include "xil_exception.h"
112 /************************** Function Prototypes ******************************/
114 /************************** Variable Definitions *****************************/
116 #define IRQ_FIQ_MASK 0xC0U /* Mask IRQ and FIQ interrupts in cpsr */
119 extern s32 _stack_end;
120 extern s32 __undef_stack;
124 /****************************************************************************
126 * Access L2 Debug Control Register.
128 * @param Value, value to be written to Debug Control Register.
134 ****************************************************************************/
136 static inline void Xil_L2WriteDebugCtrl(u32 Value)
138 static void Xil_L2WriteDebugCtrl(u32 Value)
141 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
142 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_DEBUG_CTRL_OFFSET, Value);
148 /****************************************************************************
150 * Perform L2 Cache Sync Operation.
158 ****************************************************************************/
160 static inline void Xil_L2CacheSync(void)
162 static void Xil_L2CacheSync(void)
165 #ifdef CONFIG_PL310_ERRATA_753970
166 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_DUMMY_CACHE_SYNC_OFFSET, 0x0U);
168 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_SYNC_OFFSET, 0x0U);
172 /****************************************************************************
174 * Enable the Data cache.
182 ****************************************************************************/
183 void Xil_DCacheEnable(void)
185 Xil_L1DCacheEnable();
191 /****************************************************************************
193 * Disable the Data cache.
201 ****************************************************************************/
202 void Xil_DCacheDisable(void)
205 Xil_L2CacheDisable();
207 Xil_L1DCacheDisable();
210 /****************************************************************************
212 * Invalidate the entire Data cache.
220 ****************************************************************************/
221 void Xil_DCacheInvalidate(void)
226 mtcpsr(currmask | IRQ_FIQ_MASK);
228 Xil_L2CacheInvalidate();
230 Xil_L1DCacheInvalidate();
235 /****************************************************************************
237 * Invalidate a Data cache line. If the byte specified by the address (adr)
238 * is cached by the Data cache, the cacheline containing that byte is
239 * invalidated. If the cacheline is modified (dirty), the modified contents
240 * are lost and are NOT written to system memory before the line is
243 * @param Address to be flushed.
247 * @note The bottom 4 bits are set to 0, forced by architecture.
249 ****************************************************************************/
250 void Xil_DCacheInvalidateLine(u32 adr)
255 mtcpsr(currmask | IRQ_FIQ_MASK);
257 Xil_L2CacheInvalidateLine(adr);
259 Xil_L1DCacheInvalidateLine(adr);
264 /****************************************************************************
266 * Invalidate the Data cache for the given address range.
267 * If the bytes specified by the address (adr) are cached by the Data cache,
268 * the cacheline containing that byte is invalidated. If the cacheline
269 * is modified (dirty), the modified contents are lost and are NOT
270 * written to system memory before the line is invalidated.
272 * In this function, if start address or end address is not aligned to cache-line,
273 * particular cache-line containing unaligned start or end address is flush first
274 * and then invalidated the others as invalidating the same unaligned cache line
275 * may result into loss of data. This issue raises few possibilities.
278 * If the address to be invalidated is not cache-line aligned, the
279 * following choices are available:
280 * 1) Invalidate the cache line when required and do not bother much for the
281 * side effects. Though it sounds good, it can result in hard-to-debug issues.
282 * The problem is, if some other variable are allocated in the
283 * same cache line and had been recently updated (in cache), the invalidation
284 * would result in loss of data.
286 * 2) Flush the cache line first. This will ensure that if any other variable
287 * present in the same cache line and updated recently are flushed out to memory.
288 * Then it can safely be invalidated. Again it sounds good, but this can result
289 * in issues. For example, when the invalidation happens
290 * in a typical ISR (after a DMA transfer has updated the memory), then flushing
291 * the cache line means, loosing data that were updated recently before the ISR
294 * Linux prefers the second one. To have uniform implementation (across standalone
295 * and Linux), the second option is implemented.
296 * This being the case, follwoing needs to be taken care of:
297 * 1) Whenever possible, the addresses must be cache line aligned. Please nore that,
298 * not just start address, even the end address must be cache line aligned. If that
299 * is taken care of, this will always work.
300 * 2) Avoid situations where invalidation has to be done after the data is updated by
301 * peripheral/DMA directly into the memory. It is not tough to achieve (may be a bit
302 * risky). The common use case to do invalidation is when a DMA happens. Generally
303 * for such use cases, buffers can be allocated first and then start the DMA. The
304 * practice that needs to be followed here is, immediately after buffer allocation
305 * and before starting the DMA, do the invalidation. With this approach, invalidation
306 * need not to be done after the DMA transfer is over.
308 * This is going to always work if done carefully.
309 * However, the concern is, there is no guarantee that invalidate has not needed to be
310 * done after DMA is complete. For example, because of some reasons if the first cache
311 * line or last cache line (assuming the buffer in question comprises of multiple cache
312 * lines) are brought into cache (between the time it is invalidated and DMA completes)
313 * because of some speculative prefetching or reading data for a variable present
314 * in the same cache line, then we will have to invalidate the cache after DMA is complete.
317 * @param Start address of range to be invalidated.
318 * @param Length of range to be invalidated in bytes.
324 ****************************************************************************/
325 void Xil_DCacheInvalidateRange(INTPTR adr, u32 len)
327 const u32 cacheline = 32U;
332 volatile u32 *L2CCOffset = (volatile u32 *)(XPS_L2CC_BASEADDR +
333 XPS_L2CC_CACHE_INVLD_PA_OFFSET);
336 mtcpsr(currmask | IRQ_FIQ_MASK);
341 /* Select L1 Data cache in CSSR */
342 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
344 if ((tempadr & (cacheline-1U)) != 0U) {
345 tempadr &= (~(cacheline - 1U));
347 Xil_L1DCacheFlushLine(tempadr);
349 /* Disable Write-back and line fills */
350 Xil_L2WriteDebugCtrl(0x3U);
351 Xil_L2CacheFlushLine(tempadr);
352 /* Enable Write-back and line fills */
353 Xil_L2WriteDebugCtrl(0x0U);
356 tempadr += cacheline;
358 if ((tempend & (cacheline-1U)) != 0U) {
359 tempend &= (~(cacheline - 1U));
361 Xil_L1DCacheFlushLine(tempend);
363 /* Disable Write-back and line fills */
364 Xil_L2WriteDebugCtrl(0x3U);
365 Xil_L2CacheFlushLine(tempend);
366 /* Enable Write-back and line fills */
367 Xil_L2WriteDebugCtrl(0x0U);
372 while (tempadr < tempend) {
374 /* Invalidate L2 cache line */
375 *L2CCOffset = tempadr;
379 /* Invalidate L1 Data cache line */
380 #if defined (__GNUC__) || defined (__ICCARM__)
381 asm_cp15_inval_dc_line_mva_poc(tempadr);
383 { volatile register u32 Reg
384 __asm(XREG_CP15_INVAL_DC_LINE_MVA_POC);
387 tempadr += cacheline;
395 /****************************************************************************
397 * Flush the entire Data cache.
405 ****************************************************************************/
406 void Xil_DCacheFlush(void)
411 mtcpsr(currmask | IRQ_FIQ_MASK);
419 /****************************************************************************
421 * Flush a Data cache line. If the byte specified by the address (adr)
422 * is cached by the Data cache, the cacheline containing that byte is
423 * invalidated. If the cacheline is modified (dirty), the entire
424 * contents of the cacheline are written to system memory before the
425 * line is invalidated.
427 * @param Address to be flushed.
431 * @note The bottom 4 bits are set to 0, forced by architecture.
433 ****************************************************************************/
434 void Xil_DCacheFlushLine(u32 adr)
439 mtcpsr(currmask | IRQ_FIQ_MASK);
440 Xil_L1DCacheFlushLine(adr);
442 /* Disable Write-back and line fills */
443 Xil_L2WriteDebugCtrl(0x3U);
445 Xil_L2CacheFlushLine(adr);
447 /* Enable Write-back and line fills */
448 Xil_L2WriteDebugCtrl(0x0U);
454 /****************************************************************************
455 * Flush the Data cache for the given address range.
456 * If the bytes specified by the address (adr) are cached by the Data cache,
457 * the cacheline containing that byte is invalidated. If the cacheline
458 * is modified (dirty), the written to system memory first before the
459 * before the line is invalidated.
461 * @param Start address of range to be flushed.
462 * @param Length of range to be flushed in bytes.
468 ****************************************************************************/
469 void Xil_DCacheFlushRange(INTPTR adr, u32 len)
472 const u32 cacheline = 32U;
475 volatile u32 *L2CCOffset = (volatile u32 *)(XPS_L2CC_BASEADDR +
476 XPS_L2CC_CACHE_INV_CLN_PA_OFFSET);
479 mtcpsr(currmask | IRQ_FIQ_MASK);
482 /* Back the starting address up to the start of a cache line
483 * perform cache operations until adr+len
485 end = LocalAddr + len;
486 LocalAddr &= ~(cacheline - 1U);
488 while (LocalAddr < end) {
490 /* Flush L1 Data cache line */
491 #if defined (__GNUC__) || defined (__ICCARM__)
492 asm_cp15_clean_inval_dc_line_mva_poc(LocalAddr);
494 { volatile register u32 Reg
495 __asm(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC);
499 /* Flush L2 cache line */
500 *L2CCOffset = LocalAddr;
503 LocalAddr += cacheline;
509 /****************************************************************************
511 * Store a Data cache line. If the byte specified by the address (adr)
512 * is cached by the Data cache and the cacheline is modified (dirty),
513 * the entire contents of the cacheline are written to system memory.
514 * After the store completes, the cacheline is marked as unmodified
517 * @param Address to be stored.
521 * @note The bottom 4 bits are set to 0, forced by architecture.
523 ****************************************************************************/
524 void Xil_DCacheStoreLine(u32 adr)
529 mtcpsr(currmask | IRQ_FIQ_MASK);
531 Xil_L1DCacheStoreLine(adr);
533 Xil_L2CacheStoreLine(adr);
538 /****************************************************************************
540 * Enable the instruction cache.
548 ****************************************************************************/
549 void Xil_ICacheEnable(void)
551 Xil_L1ICacheEnable();
557 /****************************************************************************
559 * Disable the instruction cache.
567 ****************************************************************************/
568 void Xil_ICacheDisable(void)
571 Xil_L2CacheDisable();
573 Xil_L1ICacheDisable();
576 /****************************************************************************
578 * Invalidate the entire instruction cache.
586 ****************************************************************************/
587 void Xil_ICacheInvalidate(void)
592 mtcpsr(currmask | IRQ_FIQ_MASK);
594 Xil_L2CacheInvalidate();
596 Xil_L1ICacheInvalidate();
601 /****************************************************************************
603 * Invalidate an instruction cache line. If the instruction specified by the
604 * parameter adr is cached by the instruction cache, the cacheline containing
605 * that instruction is invalidated.
611 * @note The bottom 4 bits are set to 0, forced by architecture.
613 ****************************************************************************/
614 void Xil_ICacheInvalidateLine(u32 adr)
619 mtcpsr(currmask | IRQ_FIQ_MASK);
621 Xil_L2CacheInvalidateLine(adr);
623 Xil_L1ICacheInvalidateLine(adr);
627 /****************************************************************************
629 * Invalidate the instruction cache for the given address range.
630 * If the bytes specified by the address (adr) are cached by the Data cache,
631 * the cacheline containing that byte is invalidated. If the cacheline
632 * is modified (dirty), the modified contents are lost and are NOT
633 * written to system memory before the line is invalidated.
635 * @param Start address of range to be invalidated.
636 * @param Length of range to be invalidated in bytes.
642 ****************************************************************************/
643 void Xil_ICacheInvalidateRange(INTPTR adr, u32 len)
646 const u32 cacheline = 32U;
648 volatile u32 *L2CCOffset = (volatile u32 *)(XPS_L2CC_BASEADDR +
649 XPS_L2CC_CACHE_INVLD_PA_OFFSET);
654 mtcpsr(currmask | IRQ_FIQ_MASK);
656 /* Back the starting address up to the start of a cache line
657 * perform cache operations until adr+len
659 end = LocalAddr + len;
660 LocalAddr = LocalAddr & ~(cacheline - 1U);
662 /* Select cache L0 I-cache in CSSR */
663 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1U);
665 while (LocalAddr < end) {
667 /* Invalidate L2 cache line */
668 *L2CCOffset = LocalAddr;
672 /* Invalidate L1 I-cache line */
673 #if defined (__GNUC__) || defined (__ICCARM__)
674 asm_cp15_inval_ic_line_mva_pou(LocalAddr);
676 { volatile register u32 Reg
677 __asm(XREG_CP15_INVAL_IC_LINE_MVA_POU);
681 LocalAddr += cacheline;
685 /* Wait for L1 and L2 invalidate to complete */
690 /****************************************************************************
692 * Enable the level 1 Data cache.
700 ****************************************************************************/
701 void Xil_L1DCacheEnable(void)
703 register u32 CtrlReg;
705 /* enable caches only if they are disabled */
707 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
708 #elif defined (__ICCARM__)
709 mfcp(XREG_CP15_SYS_CONTROL, CtrlReg);
711 { volatile register u32 Reg __asm(XREG_CP15_SYS_CONTROL);
714 if ((CtrlReg & (XREG_CP15_CONTROL_C_BIT)) != 0U) {
718 /* clean and invalidate the Data cache */
719 Xil_L1DCacheInvalidate();
721 /* enable the Data cache */
722 CtrlReg |= (XREG_CP15_CONTROL_C_BIT);
724 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
727 /****************************************************************************
729 * Disable the level 1 Data cache.
737 ****************************************************************************/
738 void Xil_L1DCacheDisable(void)
740 register u32 CtrlReg;
742 /* clean and invalidate the Data cache */
746 /* disable the Data cache */
747 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
748 #elif defined (__ICCARM__)
749 mfcp(XREG_CP15_SYS_CONTROL, CtrlReg);
751 { volatile register u32 Reg __asm(XREG_CP15_SYS_CONTROL);
755 CtrlReg &= ~(XREG_CP15_CONTROL_C_BIT);
757 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
760 /****************************************************************************
762 * Invalidate the level 1 Data cache.
768 * @note In Cortex A9, there is no cp instruction for invalidating
769 * the whole D-cache. This function invalidates each line by
772 ****************************************************************************/
773 void Xil_L1DCacheInvalidate(void)
775 register u32 CsidReg, C7Reg;
776 u32 CacheSize, LineSize, NumWays;
777 u32 Way, WayIndex, Set, SetIndex, NumSet;
781 u32 stack_start,stack_end,stack_size;
785 mtcpsr(currmask | IRQ_FIQ_MASK);
788 stack_end = (u32)&_stack_end;
789 stack_start = (u32)&__undef_stack;
790 stack_size=stack_start-stack_end;
792 /*Flush stack memory to save return address*/
793 Xil_DCacheFlushRange(stack_end, stack_size);
796 /* Select cache level 0 and D cache in CSSR */
797 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
800 CsidReg = mfcp(XREG_CP15_CACHE_SIZE_ID);
801 #elif defined (__ICCARM__)
802 mfcp(XREG_CP15_CACHE_SIZE_ID, CsidReg);
804 { volatile register u32 Reg __asm(XREG_CP15_CACHE_SIZE_ID);
807 /* Determine Cache Size */
808 CacheSize = (CsidReg >> 13U) & 0x1FFU;
810 CacheSize *=128U; /* to get number of bytes */
813 NumWays = (CsidReg & 0x3ffU) >> 3U;
816 /* Get the cacheline size, way size, index size from csidr */
817 LineSize = (CsidReg & 0x07U) + 4U;
819 NumSet = CacheSize/NumWays;
820 NumSet /= (0x00000001U << LineSize);
825 /* Invalidate all the cachelines */
826 for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
827 for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
830 /* Invalidate by Set/Way */
831 #if defined (__GNUC__) || defined (__ICCARM__)
832 asm_cp15_inval_dc_line_sw(C7Reg);
834 /*mtcp(XREG_CP15_INVAL_DC_LINE_SW, C7Reg), */
835 { volatile register u32 Reg
836 __asm(XREG_CP15_INVAL_DC_LINE_SW);
839 Set += (0x00000001U << LineSize);
845 /* Wait for L1 invalidate to complete */
850 /****************************************************************************
852 * Invalidate a level 1 Data cache line. If the byte specified by the address
853 * (Addr) is cached by the Data cache, the cacheline containing that byte is
854 * invalidated. If the cacheline is modified (dirty), the modified contents
855 * are lost and are NOT written to system memory before the line is
858 * @param Address to be flushed.
862 * @note The bottom 5 bits are set to 0, forced by architecture.
864 ****************************************************************************/
865 void Xil_L1DCacheInvalidateLine(u32 adr)
867 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
868 mtcp(XREG_CP15_INVAL_DC_LINE_MVA_POC, (adr & (~0x1FU)));
870 /* Wait for L1 invalidate to complete */
874 /****************************************************************************
876 * Invalidate the level 1 Data cache for the given address range.
877 * If the bytes specified by the address (adr) are cached by the Data cache,
878 * the cacheline containing that byte is invalidated. If the cacheline
879 * is modified (dirty), the modified contents are lost and are NOT
880 * written to system memory before the line is invalidated.
882 * @param Start address of range to be invalidated.
883 * @param Length of range to be invalidated in bytes.
889 ****************************************************************************/
890 void Xil_L1DCacheInvalidateRange(u32 adr, u32 len)
893 const u32 cacheline = 32U;
898 mtcpsr(currmask | IRQ_FIQ_MASK);
901 /* Back the starting address up to the start of a cache line
902 * perform cache operations until adr+len
904 end = LocalAddr + len;
905 LocalAddr = LocalAddr & ~(cacheline - 1U);
907 /* Select cache L0 D-cache in CSSR */
908 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
910 while (LocalAddr < end) {
912 #if defined (__GNUC__) || defined (__ICCARM__)
913 asm_cp15_inval_dc_line_mva_poc(LocalAddr);
915 { volatile register u32 Reg
916 __asm(XREG_CP15_INVAL_DC_LINE_MVA_POC);
919 LocalAddr += cacheline;
923 /* Wait for L1 invalidate to complete */
928 /****************************************************************************
930 * Flush the level 1 Data cache.
936 * @note In Cortex A9, there is no cp instruction for flushing
937 * the whole D-cache. Need to flush each line.
939 ****************************************************************************/
940 void Xil_L1DCacheFlush(void)
942 register u32 CsidReg, C7Reg;
943 u32 CacheSize, LineSize, NumWays;
945 u32 WayIndex, Set, SetIndex, NumSet;
949 mtcpsr(currmask | IRQ_FIQ_MASK);
951 /* Select cache level 0 and D cache in CSSR */
952 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
955 CsidReg = mfcp(XREG_CP15_CACHE_SIZE_ID);
956 #elif defined (__ICCARM__)
957 mfcp(XREG_CP15_CACHE_SIZE_ID, CsidReg);
959 { volatile register u32 Reg __asm(XREG_CP15_CACHE_SIZE_ID);
963 /* Determine Cache Size */
965 CacheSize = (CsidReg >> 13U) & 0x1FFU;
967 CacheSize *=128U; /* to get number of bytes */
970 NumWays = (CsidReg & 0x3ffU) >> 3U;
973 /* Get the cacheline size, way size, index size from csidr */
974 LineSize = (CsidReg & 0x07U) + 4U;
976 NumSet = CacheSize/NumWays;
977 NumSet /= (0x00000001U << LineSize);
982 /* Invalidate all the cachelines */
983 for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
984 for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
986 /* Flush by Set/Way */
988 #if defined (__GNUC__) || defined (__ICCARM__)
989 asm_cp15_clean_inval_dc_line_sw(C7Reg);
991 { volatile register u32 Reg
992 __asm(XREG_CP15_CLEAN_INVAL_DC_LINE_SW);
995 Set += (0x00000001U << LineSize);
1001 /* Wait for L1 flush to complete */
1006 /****************************************************************************
1008 * Flush a level 1 Data cache line. If the byte specified by the address (adr)
1009 * is cached by the Data cache, the cacheline containing that byte is
1010 * invalidated. If the cacheline is modified (dirty), the entire
1011 * contents of the cacheline are written to system memory before the
1012 * line is invalidated.
1014 * @param Address to be flushed.
1018 * @note The bottom 5 bits are set to 0, forced by architecture.
1020 ****************************************************************************/
1021 void Xil_L1DCacheFlushLine(u32 adr)
1023 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
1024 mtcp(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC, (adr & (~0x1FU)));
1026 /* Wait for L1 flush to complete */
1030 /****************************************************************************
1031 * Flush the level 1 Data cache for the given address range.
1032 * If the bytes specified by the address (adr) are cached by the Data cache,
1033 * the cacheline containing that byte is invalidated. If the cacheline
1034 * is modified (dirty), the written to system memory first before the
1035 * before the line is invalidated.
1037 * @param Start address of range to be flushed.
1038 * @param Length of range to be flushed in bytes.
1044 ****************************************************************************/
1045 void Xil_L1DCacheFlushRange(u32 adr, u32 len)
1047 u32 LocalAddr = adr;
1048 const u32 cacheline = 32U;
1052 currmask = mfcpsr();
1053 mtcpsr(currmask | IRQ_FIQ_MASK);
1056 /* Back the starting address up to the start of a cache line
1057 * perform cache operations until adr+len
1059 end = LocalAddr + len;
1060 LocalAddr = LocalAddr & ~(cacheline - 1U);
1062 /* Select cache L0 D-cache in CSSR */
1063 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
1065 while (LocalAddr < end) {
1067 #if defined (__GNUC__) || defined (__ICCARM__)
1068 asm_cp15_clean_inval_dc_line_mva_poc(LocalAddr);
1070 { volatile register u32 Reg
1071 __asm(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC);
1074 LocalAddr += cacheline;
1078 /* Wait for L1 flush to complete */
1083 /****************************************************************************
1085 * Store a level 1 Data cache line. If the byte specified by the address (adr)
1086 * is cached by the Data cache and the cacheline is modified (dirty),
1087 * the entire contents of the cacheline are written to system memory.
1088 * After the store completes, the cacheline is marked as unmodified
1091 * @param Address to be stored.
1095 * @note The bottom 5 bits are set to 0, forced by architecture.
1097 ****************************************************************************/
1098 void Xil_L1DCacheStoreLine(u32 adr)
1100 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
1101 mtcp(XREG_CP15_CLEAN_DC_LINE_MVA_POC, (adr & (~0x1FU)));
1103 /* Wait for L1 store to complete */
1107 /****************************************************************************
1109 * Enable the level 1 instruction cache.
1117 ****************************************************************************/
1118 void Xil_L1ICacheEnable(void)
1120 register u32 CtrlReg;
1122 /* enable caches only if they are disabled */
1124 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
1125 #elif defined (__ICCARM__)
1126 mfcp(XREG_CP15_SYS_CONTROL, CtrlReg);
1128 { volatile register u32 Reg __asm(XREG_CP15_SYS_CONTROL);
1131 if ((CtrlReg & (XREG_CP15_CONTROL_I_BIT)) != 0U) {
1135 /* invalidate the instruction cache */
1136 mtcp(XREG_CP15_INVAL_IC_POU, 0U);
1138 /* enable the instruction cache */
1139 CtrlReg |= (XREG_CP15_CONTROL_I_BIT);
1141 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
1144 /****************************************************************************
1146 * Disable level 1 the instruction cache.
1154 ****************************************************************************/
1155 void Xil_L1ICacheDisable(void)
1157 register u32 CtrlReg;
1161 /* invalidate the instruction cache */
1162 mtcp(XREG_CP15_INVAL_IC_POU, 0U);
1164 /* disable the instruction cache */
1166 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
1167 #elif defined (__ICCARM__)
1168 mfcp(XREG_CP15_SYS_CONTROL, CtrlReg);
1170 { volatile register u32 Reg __asm(XREG_CP15_SYS_CONTROL);
1173 CtrlReg &= ~(XREG_CP15_CONTROL_I_BIT);
1175 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
1178 /****************************************************************************
1180 * Invalidate the entire level 1 instruction cache.
1188 ****************************************************************************/
1189 void Xil_L1ICacheInvalidate(void)
1191 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1U);
1192 /* invalidate the instruction cache */
1193 mtcp(XREG_CP15_INVAL_IC_POU, 0U);
1195 /* Wait for L1 invalidate to complete */
1199 /****************************************************************************
1201 * Invalidate a level 1 instruction cache line. If the instruction specified by
1202 * the parameter adr is cached by the instruction cache, the cacheline containing
1203 * that instruction is invalidated.
1209 * @note The bottom 5 bits are set to 0, forced by architecture.
1211 ****************************************************************************/
1212 void Xil_L1ICacheInvalidateLine(u32 adr)
1214 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1U);
1215 mtcp(XREG_CP15_INVAL_IC_LINE_MVA_POU, (adr & (~0x1FU)));
1217 /* Wait for L1 invalidate to complete */
1221 /****************************************************************************
1223 * Invalidate the level 1 instruction cache for the given address range.
1224 * If the bytes specified by the address (adr) are cached by the Data cache,
1225 * the cacheline containing that byte is invalidated. If the cacheline
1226 * is modified (dirty), the modified contents are lost and are NOT
1227 * written to system memory before the line is invalidated.
1229 * @param Start address of range to be invalidated.
1230 * @param Length of range to be invalidated in bytes.
1236 ****************************************************************************/
1237 void Xil_L1ICacheInvalidateRange(u32 adr, u32 len)
1239 u32 LocalAddr = adr;
1240 const u32 cacheline = 32U;
1244 currmask = mfcpsr();
1245 mtcpsr(currmask | IRQ_FIQ_MASK);
1248 /* Back the starting address up to the start of a cache line
1249 * perform cache operations until adr+len
1251 end = LocalAddr + len;
1252 LocalAddr = LocalAddr & ~(cacheline - 1U);
1254 /* Select cache L0 I-cache in CSSR */
1255 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1U);
1257 while (LocalAddr < end) {
1259 #if defined (__GNUC__) || defined (__ICCARM__)
1260 asm_cp15_inval_ic_line_mva_pou(LocalAddr);
1262 { volatile register u32 Reg
1263 __asm(XREG_CP15_INVAL_IC_LINE_MVA_POU);
1266 LocalAddr += cacheline;
1270 /* Wait for L1 invalidate to complete */
1276 /****************************************************************************
1278 * Enable the L2 cache.
1286 ****************************************************************************/
1287 void Xil_L2CacheEnable(void)
1289 register u32 L2CCReg;
1291 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET);
1293 /* only enable if L2CC is currently disabled */
1294 if ((L2CCReg & 0x01U) == 0U) {
1295 /* set up the way size and latencies */
1296 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR +
1297 XPS_L2CC_AUX_CNTRL_OFFSET);
1298 L2CCReg &= XPS_L2CC_AUX_REG_ZERO_MASK;
1299 L2CCReg |= XPS_L2CC_AUX_REG_DEFAULT_MASK;
1300 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_AUX_CNTRL_OFFSET,
1302 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_TAG_RAM_CNTRL_OFFSET,
1303 XPS_L2CC_TAG_RAM_DEFAULT_MASK);
1304 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_DATA_RAM_CNTRL_OFFSET,
1305 XPS_L2CC_DATA_RAM_DEFAULT_MASK);
1307 /* Clear the pending interrupts */
1308 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR +
1309 XPS_L2CC_ISR_OFFSET);
1310 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_IAR_OFFSET, L2CCReg);
1312 Xil_L2CacheInvalidate();
1313 /* Enable the L2CC */
1314 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR +
1315 XPS_L2CC_CNTRL_OFFSET);
1316 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET,
1317 (L2CCReg | (0x01U)));
1320 /* synchronize the processor */
1326 /****************************************************************************
1328 * Disable the L2 cache.
1336 ****************************************************************************/
1337 void Xil_L2CacheDisable(void)
1339 register u32 L2CCReg;
1341 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET);
1343 if((L2CCReg & 0x1U) != 0U) {
1345 /* Clean and Invalidate L2 Cache */
1348 /* Disable the L2CC */
1349 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET);
1350 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET,
1351 (L2CCReg & (~0x01U)));
1352 /* Wait for the cache operations to complete */
1358 /****************************************************************************
1360 * Invalidate the L2 cache. If the byte specified by the address (adr)
1361 * is cached by the Data cache, the cacheline containing that byte is
1362 * invalidated. If the cacheline is modified (dirty), the modified contents
1363 * are lost and are NOT written to system memory before the line is
1366 * @param Address to be flushed.
1370 * @note The bottom 4 bits are set to 0, forced by architecture.
1372 ****************************************************************************/
1373 void Xil_L2CacheInvalidate(void)
1376 u32 stack_start,stack_end,stack_size;
1377 stack_end = (u32)&_stack_end;
1378 stack_start = (u32)&__undef_stack;
1379 stack_size=stack_start-stack_end;
1381 /*Flush stack memory to save return address*/
1382 Xil_DCacheFlushRange(stack_end, stack_size);
1385 /* Invalidate the caches */
1386 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_WAY_OFFSET,
1388 ResultDCache = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_WAY_OFFSET)
1390 while(ResultDCache != (u32)0U) {
1391 ResultDCache = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_WAY_OFFSET)
1395 /* Wait for the invalidate to complete */
1398 /* synchronize the processor */
1402 /****************************************************************************
1404 * Invalidate a level 2 cache line. If the byte specified by the address (adr)
1405 * is cached by the Data cache, the cacheline containing that byte is
1406 * invalidated. If the cacheline is modified (dirty), the modified contents
1407 * are lost and are NOT written to system memory before the line is
1410 * @param Address to be flushed.
1414 * @note The bottom 4 bits are set to 0, forced by architecture.
1416 ****************************************************************************/
1417 void Xil_L2CacheInvalidateLine(u32 adr)
1419 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_PA_OFFSET, (u32)adr);
1420 /* synchronize the processor */
1424 /****************************************************************************
1426 * Invalidate the level 2 cache for the given address range.
1427 * If the bytes specified by the address (adr) are cached by the Data cache,
1428 * the cacheline containing that byte is invalidated. If the cacheline
1429 * is modified (dirty), the modified contents are lost and are NOT
1430 * written to system memory before the line is invalidated.
1432 * @param Start address of range to be invalidated.
1433 * @param Length of range to be invalidated in bytes.
1439 ****************************************************************************/
1440 void Xil_L2CacheInvalidateRange(u32 adr, u32 len)
1442 u32 LocalAddr = adr;
1443 const u32 cacheline = 32U;
1445 volatile u32 *L2CCOffset = (volatile u32 *)(XPS_L2CC_BASEADDR +
1446 XPS_L2CC_CACHE_INVLD_PA_OFFSET);
1450 currmask = mfcpsr();
1451 mtcpsr(currmask | IRQ_FIQ_MASK);
1454 /* Back the starting address up to the start of a cache line
1455 * perform cache operations until adr+len
1457 end = LocalAddr + len;
1458 LocalAddr = LocalAddr & ~(cacheline - 1U);
1460 /* Disable Write-back and line fills */
1461 Xil_L2WriteDebugCtrl(0x3U);
1463 while (LocalAddr < end) {
1464 *L2CCOffset = LocalAddr;
1466 LocalAddr += cacheline;
1469 /* Enable Write-back and line fills */
1470 Xil_L2WriteDebugCtrl(0x0U);
1473 /* synchronize the processor */
1478 /****************************************************************************
1480 * Flush the L2 cache. If the byte specified by the address (adr)
1481 * is cached by the Data cache, the cacheline containing that byte is
1482 * invalidated. If the cacheline is modified (dirty), the entire
1483 * contents of the cacheline are written to system memory before the
1484 * line is invalidated.
1486 * @param Address to be flushed.
1490 * @note The bottom 4 bits are set to 0, forced by architecture.
1492 ****************************************************************************/
1493 void Xil_L2CacheFlush(void)
1498 /* Flush the caches */
1500 /* Disable Write-back and line fills */
1501 Xil_L2WriteDebugCtrl(0x3U);
1503 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_WAY_OFFSET,
1505 ResultL2Cache = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_WAY_OFFSET)
1508 while(ResultL2Cache != (u32)0U) {
1509 ResultL2Cache = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_WAY_OFFSET)
1514 /* Enable Write-back and line fills */
1515 Xil_L2WriteDebugCtrl(0x0U);
1517 /* synchronize the processor */
1521 /****************************************************************************
1523 * Flush a level 2 cache line. If the byte specified by the address (adr)
1524 * is cached by the Data cache, the cacheline containing that byte is
1525 * invalidated. If the cacheline is modified (dirty), the entire
1526 * contents of the cacheline are written to system memory before the
1527 * line is invalidated.
1529 * @param Address to be flushed.
1533 * @note The bottom 4 bits are set to 0, forced by architecture.
1535 ****************************************************************************/
1536 void Xil_L2CacheFlushLine(u32 adr)
1538 #ifdef CONFIG_PL310_ERRATA_588369
1539 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_CLEAN_PA_OFFSET, adr);
1540 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_PA_OFFSET, adr);
1542 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_PA_OFFSET, adr);
1544 /* synchronize the processor */
1548 /****************************************************************************
1549 * Flush the level 2 cache for the given address range.
1550 * If the bytes specified by the address (adr) are cached by the Data cache,
1551 * the cacheline containing that byte is invalidated. If the cacheline
1552 * is modified (dirty), the written to system memory first before the
1553 * before the line is invalidated.
1555 * @param Start address of range to be flushed.
1556 * @param Length of range to be flushed in bytes.
1562 ****************************************************************************/
1563 void Xil_L2CacheFlushRange(u32 adr, u32 len)
1565 u32 LocalAddr = adr;
1566 const u32 cacheline = 32U;
1568 volatile u32 *L2CCOffset = (volatile u32 *)(XPS_L2CC_BASEADDR +
1569 XPS_L2CC_CACHE_INV_CLN_PA_OFFSET);
1573 currmask = mfcpsr();
1574 mtcpsr(currmask | IRQ_FIQ_MASK);
1576 /* Back the starting address up to the start of a cache line
1577 * perform cache operations until adr+len
1579 end = LocalAddr + len;
1580 LocalAddr = LocalAddr & ~(cacheline - 1U);
1582 /* Disable Write-back and line fills */
1583 Xil_L2WriteDebugCtrl(0x3U);
1585 while (LocalAddr < end) {
1586 *L2CCOffset = LocalAddr;
1588 LocalAddr += cacheline;
1591 /* Enable Write-back and line fills */
1592 Xil_L2WriteDebugCtrl(0x0U);
1594 /* synchronize the processor */
1599 /****************************************************************************
1601 * Store a level 2 cache line. If the byte specified by the address (adr)
1602 * is cached by the Data cache and the cacheline is modified (dirty),
1603 * the entire contents of the cacheline are written to system memory.
1604 * After the store completes, the cacheline is marked as unmodified
1607 * @param Address to be stored.
1611 * @note The bottom 4 bits are set to 0, forced by architecture.
1613 ****************************************************************************/
1614 void Xil_L2CacheStoreLine(u32 adr)
1616 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_CLEAN_PA_OFFSET, adr);
1617 /* synchronize the processor */