1 /******************************************************************************
3 * Copyright (C) 2010 - 2015 Xilinx, Inc. All rights reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and associated documentation files (the "Software"), to deal
7 * in the Software without restriction, including without limitation the rights
8 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 * copies of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * Use of the Software is limited solely to applications:
16 * (a) running on a Xilinx device, or
17 * (b) that interact with a Xilinx device through a bus or interconnect.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * XILINX BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
23 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
24 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 * Except as contained in this notice, the name of the Xilinx shall not be used
28 * in advertising or otherwise to promote the sale, use or other dealings in
29 * this Software without prior written authorization from Xilinx.
31 ******************************************************************************/
32 /*****************************************************************************/
37 * Contains required functions for the ARM cache functionality.
40 * MODIFICATION HISTORY:
42 * Ver Who Date Changes
43 * ----- ---- -------- -----------------------------------------------
44 * 1.00a ecm 01/29/10 First release
45 * 1.00a ecm 06/24/10 Moved the L1 and L2 specific function prototypes
46 * to xil_cache_mach.h to give access to sophisticated users
47 * 3.02a sdm 04/07/11 Updated Flush/InvalidateRange APIs to flush/invalidate
48 * L1 and L2 caches in a single loop and used dsb, L2 sync
49 * at the end of the loop.
50 * 3.04a sdm 01/02/12 Remove redundant dsb/dmb instructions in cache maintenance
52 * 3.07a asa 07/16/12 Corrected the L1 and L2 cache invalidation order.
53 * 3.07a sgd 09/18/12 Corrected the L2 cache enable and disable sequence.
54 * 3.10a srt 04/18/13 Implemented ARM Erratas. Please refer to file
55 * 'xil_errata.h' for errata description
56 * 3.10a asa 05/13/13 Modified cache disable APIs. The L2 cache disable
57 * operation was being done with L1 Data cache disabled. This is
58 * fixed so that L2 cache disable operation happens independent of
59 * L1 cache disable operation. This fixes CR #706464.
60 * Changes are done to do a L2 cache sync (poll reg7_?cache_?sync).
61 * This is done to fix the CR #700542.
62 * 3.11a asa 09/23/13 Modified the Xil_DCacheFlushRange and
63 * Xil_DCacheInvalidateRange to fix potential issues. Fixed other
64 * relevant cache APIs to disable and enable back the interrupts.
65 * This fixes CR #663885.
66 * 3.11a asa 09/28/13 Made changes for L2 cache sync operation. It is found
67 * out that for L2 cache flush/clean/invalidation by cache lines
68 * does not need a cache sync as these are atomic nature. Similarly
69 * figured out that for complete L2 cache flush/invalidation by way
70 * we need to wait for some more time in a loop till the status
71 * shows that the cache operation is completed.
72 * 4.00 pkp 24/01/14 Modified Xil_DCacheInvalidateRange to fix the bug. Few
73 * cache lines were missed to invalidate when unaligned address
74 * invalidation was accommodated. That fixes CR #766768.
75 * Also in Xil_L1DCacheInvalidate, while invalidating all L1D cache
76 * stack memory which contains return address was invalidated. So
77 * stack memory was flushed first and then L1D cache is invalidated.
78 * This is done to fix CR #763829
79 * 4.01 asa 05/09/14 Made changes in cortexa9/xil_cache.c to fix CR# 798230.
80 * 4.02 pkp 06/27/14 Added notes to Xil_L1DCacheInvalidateRange function for
81 * explanation of CR#785243
82 * 5.00 kvn 12/15/14 Xil_L2CacheInvalidate was modified to fix CR# 838835. L2 Cache
83 * has stack memory which has return address. Before invalidating
84 * cache, stack memory was flushed first and L2 Cache is invalidated.
85 * 5.01 pkp 05/12/15 Xil_DCacheInvalidateRange and Xil_DCacheFlushRange is modified
86 * to remove unnecessary dsb in the APIs. Instead of using dsb
87 * for L2 Cache, L2CacheSync has been used for each L2 cache line
88 * and single dsb has been used for L1 cache. Also L2CacheSync is
89 * added into Xil_L2CacheInvalidateRange API. Xil_L1DCacheInvalidate
90 * and Xil_L2CacheInvalidate APIs are modified to flush the complete
91 * stack instead of just System Stack
92 * 5.03 pkp 10/07/15 L2 Cache functionalities are avoided for the OpenAMP slave
93 * application(when USE_AMP flag is defined for BSP) as master CPU
94 * would be utilizing L2 cache for its operation
95 * 6.6 mus 12/07/17 Errata 753970 is not applicable for the PL130 cache controller
96 * version r0p2, which is present in zynq. So,removed the handling
97 * related to same.It fixes CR#989132.
98 * 6.6 asa 16/01/18 Changes made in Xil_L1DCacheInvalidate and Xil_L2CacheInvalidate
99 * routines to ensure the stack data flushed only when the respective
100 * caches are enabled. This fixes CR-992023.
104 ******************************************************************************/
106 /***************************** Include Files *********************************/
108 #include "xil_cache.h"
109 #include "xil_cache_l.h"
111 #include "xpseudo_asm.h"
112 #include "xparameters.h"
113 #include "xreg_cortexa9.h"
115 #include "xil_errata.h"
116 #include "xil_exception.h"
118 /************************** Function Prototypes ******************************/
120 /************************** Variable Definitions *****************************/
122 #define IRQ_FIQ_MASK 0xC0U /* Mask IRQ and FIQ interrupts in cpsr */
125 extern s32 _stack_end;
126 extern s32 __undef_stack;
130 /****************************************************************************
132 * Access L2 Debug Control Register.
134 * @param Value, value to be written to Debug Control Register.
140 ****************************************************************************/
142 static inline void Xil_L2WriteDebugCtrl(u32 Value)
144 static void Xil_L2WriteDebugCtrl(u32 Value)
147 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
148 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_DEBUG_CTRL_OFFSET, Value);
154 /****************************************************************************
156 * Perform L2 Cache Sync Operation.
164 ****************************************************************************/
166 static inline void Xil_L2CacheSync(void)
168 static void Xil_L2CacheSync(void)
171 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_SYNC_OFFSET, 0x0U);
174 /****************************************************************************/
176 * @brief Enable the Data cache.
184 ****************************************************************************/
185 void Xil_DCacheEnable(void)
187 Xil_L1DCacheEnable();
193 /****************************************************************************/
195 * @brief Disable the Data cache.
203 ****************************************************************************/
204 void Xil_DCacheDisable(void)
207 Xil_L2CacheDisable();
209 Xil_L1DCacheDisable();
212 /****************************************************************************/
214 * @brief Invalidate the entire Data cache.
222 ****************************************************************************/
223 void Xil_DCacheInvalidate(void)
228 mtcpsr(currmask | IRQ_FIQ_MASK);
230 Xil_L2CacheInvalidate();
232 Xil_L1DCacheInvalidate();
237 /*****************************************************************************/
239 * @brief Invalidate a Data cache line. If the byte specified by the address
240 * (adr) is cached by the Data cache, the cacheline containing that
241 * byte is invalidated. If the cacheline is modified (dirty), the
242 * modified contents are lost and are NOT written to the system memory
243 * before the line is invalidated.
245 * @param adr: 32bit address of the data to be flushed.
249 * @note The bottom 4 bits are set to 0, forced by architecture.
251 ****************************************************************************/
252 void Xil_DCacheInvalidateLine(u32 adr)
257 mtcpsr(currmask | IRQ_FIQ_MASK);
259 Xil_L2CacheInvalidateLine(adr);
261 Xil_L1DCacheInvalidateLine(adr);
267 /*****************************************************************************/
269 * @brief Invalidate the Data cache for the given address range.
270 * If the bytes specified by the address range are cached by the Data
271 * cache, the cachelines containing those bytes are invalidated. If
272 * the cachelines are modified (dirty), the modified contents are lost
273 * and NOT written to the system memory before the lines are
276 * In this function, if start address or end address is not aligned to
277 * cache-line, particular cache-line containing unaligned start or end
278 * address is flush first and then invalidated the others as
279 * invalidating the same unaligned cache line may result into loss of
280 * data. This issue raises few possibilities.
282 * If the address to be invalidated is not cache-line aligned, the
283 * following choices are available:
284 * 1. Invalidate the cache line when required and do not bother much
285 * for the side effects. Though it sounds good, it can result in
286 * hard-to-debug issues. The problem is, if some other variable are
287 * allocated in the same cache line and had been recently updated
288 * (in cache), the invalidation would result in loss of data.
289 * 2. Flush the cache line first. This will ensure that if any other
290 * variable present in the same cache line and updated recently are
291 * flushed out to memory. Then it can safely be invalidated. Again it
292 * sounds good, but this can result in issues. For example, when the
293 * invalidation happens in a typical ISR (after a DMA transfer has
294 * updated the memory), then flushing the cache line means, loosing
295 * data that were updated recently before the ISR got invoked.
297 * Linux prefers the second one. To have uniform implementation
298 * (across standalone and Linux), the second option is implemented.
299 * This being the case, follwoing needs to be taken care of:
300 * 1. Whenever possible, the addresses must be cache line aligned.
301 * Please nore that, not just start address, even the end address must
302 * be cache line aligned. If that is taken care of, this will always
304 * 2. Avoid situations where invalidation has to be done after the
305 * data is updated by peripheral/DMA directly into the memory. It is
306 * not tough to achieve (may be a bit risky). The common use case to
307 * do invalidation is when a DMA happens. Generally for such use
308 * cases, buffers can be allocated first and then start the DMA. The
309 * practice that needs to be followed here is, immediately after
310 * buffer allocation and before starting the DMA, do the invalidation.
311 * With this approach, invalidation need not to be done after the DMA
314 * This is going to always work if done carefully.
315 * However, the concern is, there is no guarantee that invalidate has
316 * not needed to be done after DMA is complete. For example, because
317 * of some reasons if the first cache line or last cache line
318 * (assuming the buffer in question comprises of multiple cache lines)
319 * are brought into cache (between the time it is invalidated and DMA
320 * completes) because of some speculative prefetching or reading data
321 * for a variable present in the same cache line, then we will have to
322 * invalidate the cache after DMA is complete.
325 * @param adr: 32bit start address of the range to be invalidated.
326 * @param len: Length of the range to be invalidated in bytes.
332 ****************************************************************************/
333 void Xil_DCacheInvalidateRange(INTPTR adr, u32 len)
335 const u32 cacheline = 32U;
340 volatile u32 *L2CCOffset = (volatile u32 *)(XPS_L2CC_BASEADDR +
341 XPS_L2CC_CACHE_INVLD_PA_OFFSET);
344 mtcpsr(currmask | IRQ_FIQ_MASK);
349 /* Select L1 Data cache in CSSR */
350 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
352 if ((tempadr & (cacheline-1U)) != 0U) {
353 tempadr &= (~(cacheline - 1U));
355 Xil_L1DCacheFlushLine(tempadr);
357 /* Disable Write-back and line fills */
358 Xil_L2WriteDebugCtrl(0x3U);
359 Xil_L2CacheFlushLine(tempadr);
360 /* Enable Write-back and line fills */
361 Xil_L2WriteDebugCtrl(0x0U);
364 tempadr += cacheline;
366 if ((tempend & (cacheline-1U)) != 0U) {
367 tempend &= (~(cacheline - 1U));
369 Xil_L1DCacheFlushLine(tempend);
371 /* Disable Write-back and line fills */
372 Xil_L2WriteDebugCtrl(0x3U);
373 Xil_L2CacheFlushLine(tempend);
374 /* Enable Write-back and line fills */
375 Xil_L2WriteDebugCtrl(0x0U);
380 while (tempadr < tempend) {
382 /* Invalidate L2 cache line */
383 *L2CCOffset = tempadr;
387 /* Invalidate L1 Data cache line */
388 #if defined (__GNUC__) || defined (__ICCARM__)
389 asm_cp15_inval_dc_line_mva_poc(tempadr);
391 { volatile register u32 Reg
392 __asm(XREG_CP15_INVAL_DC_LINE_MVA_POC);
395 tempadr += cacheline;
403 /****************************************************************************/
405 * @brief Flush the entire Data cache.
413 ****************************************************************************/
414 void Xil_DCacheFlush(void)
419 mtcpsr(currmask | IRQ_FIQ_MASK);
428 /****************************************************************************/
430 * @brief Flush a Data cache line. If the byte specified by the address (adr)
431 * is cached by the Data cache, the cacheline containing that byte is
432 * invalidated. If the cacheline is modified (dirty), the entire
433 * contents of the cacheline are written to system memory before the
434 * line is invalidated.
436 * @param adr: 32bit address of the data to be flushed.
440 * @note The bottom 4 bits are set to 0, forced by architecture.
442 ****************************************************************************/
443 void Xil_DCacheFlushLine(u32 adr)
448 mtcpsr(currmask | IRQ_FIQ_MASK);
449 Xil_L1DCacheFlushLine(adr);
451 /* Disable Write-back and line fills */
452 Xil_L2WriteDebugCtrl(0x3U);
454 Xil_L2CacheFlushLine(adr);
456 /* Enable Write-back and line fills */
457 Xil_L2WriteDebugCtrl(0x0U);
463 /****************************************************************************/
465 * @brief Flush the Data cache for the given address range.
466 * If the bytes specified by the address range are cached by the
467 * data cache, the cachelines containing those bytes are invalidated.
468 * If the cachelines are modified (dirty), they are written to the
469 * system memory before the lines are invalidated.
471 * @param adr: 32bit start address of the range to be flushed.
472 * @param len: Length of the range to be flushed in bytes.
478 ****************************************************************************/
479 void Xil_DCacheFlushRange(INTPTR adr, u32 len)
482 const u32 cacheline = 32U;
485 volatile u32 *L2CCOffset = (volatile u32 *)(XPS_L2CC_BASEADDR +
486 XPS_L2CC_CACHE_INV_CLN_PA_OFFSET);
489 mtcpsr(currmask | IRQ_FIQ_MASK);
492 /* Back the starting address up to the start of a cache line
493 * perform cache operations until adr+len
495 end = LocalAddr + len;
496 LocalAddr &= ~(cacheline - 1U);
498 while (LocalAddr < end) {
500 /* Flush L1 Data cache line */
501 #if defined (__GNUC__) || defined (__ICCARM__)
502 asm_cp15_clean_inval_dc_line_mva_poc(LocalAddr);
504 { volatile register u32 Reg
505 __asm(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC);
509 /* Flush L2 cache line */
510 *L2CCOffset = LocalAddr;
513 LocalAddr += cacheline;
519 /****************************************************************************/
521 * @brief Store a Data cache line. If the byte specified by the address (adr)
522 * is cached by the Data cache and the cacheline is modified (dirty),
523 * the entire contents of the cacheline are written to system memory.
524 * After the store completes, the cacheline is marked as unmodified
527 * @param adr: 32bit address of the data to be stored.
531 * @note The bottom 4 bits are set to 0, forced by architecture.
533 ****************************************************************************/
534 void Xil_DCacheStoreLine(u32 adr)
539 mtcpsr(currmask | IRQ_FIQ_MASK);
541 Xil_L1DCacheStoreLine(adr);
543 Xil_L2CacheStoreLine(adr);
548 /***************************************************************************/
550 * @brief Enable the instruction cache.
558 ****************************************************************************/
559 void Xil_ICacheEnable(void)
561 Xil_L1ICacheEnable();
567 /***************************************************************************/
569 * @brief Disable the instruction cache.
577 ****************************************************************************/
578 void Xil_ICacheDisable(void)
581 Xil_L2CacheDisable();
583 Xil_L1ICacheDisable();
587 /****************************************************************************/
589 * @brief Invalidate the entire instruction cache.
597 ****************************************************************************/
598 void Xil_ICacheInvalidate(void)
603 mtcpsr(currmask | IRQ_FIQ_MASK);
605 Xil_L2CacheInvalidate();
607 Xil_L1ICacheInvalidate();
612 /****************************************************************************/
614 * @brief Invalidate an instruction cache line. If the instruction specified
615 * by the address is cached by the instruction cache, the cacheline
616 * containing that instruction is invalidated.
618 * @param adr: 32bit address of the instruction to be invalidated.
622 * @note The bottom 4 bits are set to 0, forced by architecture.
624 ****************************************************************************/
625 void Xil_ICacheInvalidateLine(u32 adr)
630 mtcpsr(currmask | IRQ_FIQ_MASK);
632 Xil_L2CacheInvalidateLine(adr);
634 Xil_L1ICacheInvalidateLine(adr);
638 /****************************************************************************/
640 * @brief Invalidate the instruction cache for the given address range.
641 * If the instructions specified by the address range are cached by
642 * the instrunction cache, the cachelines containing those
643 * instructions are invalidated.
645 * @param adr: 32bit start address of the range to be invalidated.
646 * @param len: Length of the range to be invalidated in bytes.
652 ****************************************************************************/
653 void Xil_ICacheInvalidateRange(INTPTR adr, u32 len)
656 const u32 cacheline = 32U;
658 volatile u32 *L2CCOffset = (volatile u32 *)(XPS_L2CC_BASEADDR +
659 XPS_L2CC_CACHE_INVLD_PA_OFFSET);
664 mtcpsr(currmask | IRQ_FIQ_MASK);
666 /* Back the starting address up to the start of a cache line
667 * perform cache operations until adr+len
669 end = LocalAddr + len;
670 LocalAddr = LocalAddr & ~(cacheline - 1U);
672 /* Select cache L0 I-cache in CSSR */
673 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1U);
675 while (LocalAddr < end) {
677 /* Invalidate L2 cache line */
678 *L2CCOffset = LocalAddr;
682 /* Invalidate L1 I-cache line */
683 #if defined (__GNUC__) || defined (__ICCARM__)
684 asm_cp15_inval_ic_line_mva_pou(LocalAddr);
686 { volatile register u32 Reg
687 __asm(XREG_CP15_INVAL_IC_LINE_MVA_POU);
691 LocalAddr += cacheline;
695 /* Wait for L1 and L2 invalidate to complete */
700 /****************************************************************************/
702 * @brief Enable the level 1 Data cache.
710 ****************************************************************************/
711 void Xil_L1DCacheEnable(void)
713 register u32 CtrlReg;
715 /* enable caches only if they are disabled */
717 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
718 #elif defined (__ICCARM__)
719 mfcp(XREG_CP15_SYS_CONTROL, CtrlReg);
721 { volatile register u32 Reg __asm(XREG_CP15_SYS_CONTROL);
724 if ((CtrlReg & (XREG_CP15_CONTROL_C_BIT)) != 0U) {
728 /* clean and invalidate the Data cache */
729 Xil_L1DCacheInvalidate();
731 /* enable the Data cache */
732 CtrlReg |= (XREG_CP15_CONTROL_C_BIT);
734 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
737 /***************************************************************************/
739 * @brief Disable the level 1 Data cache.
747 ****************************************************************************/
748 void Xil_L1DCacheDisable(void)
750 register u32 CtrlReg;
752 /* clean and invalidate the Data cache */
756 /* disable the Data cache */
757 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
758 #elif defined (__ICCARM__)
759 mfcp(XREG_CP15_SYS_CONTROL, CtrlReg);
761 { volatile register u32 Reg __asm(XREG_CP15_SYS_CONTROL);
765 CtrlReg &= ~(XREG_CP15_CONTROL_C_BIT);
767 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
770 /****************************************************************************/
772 * @brief Invalidate the level 1 Data cache.
778 * @note In Cortex A9, there is no cp instruction for invalidating
779 * the whole D-cache. This function invalidates each line by
782 ****************************************************************************/
783 void Xil_L1DCacheInvalidate(void)
785 register u32 CsidReg, C7Reg;
786 u32 CacheSize, LineSize, NumWays;
787 u32 Way, WayIndex, Set, SetIndex, NumSet;
791 u32 stack_start,stack_end,stack_size;
792 register u32 CtrlReg;
796 mtcpsr(currmask | IRQ_FIQ_MASK);
799 stack_end = (u32)&_stack_end;
800 stack_start = (u32)&__undef_stack;
801 stack_size=stack_start-stack_end;
803 /* Check for the cache status. If cache is enabled, then only
804 * flush stack memory to save return address. If cache is disabled,
805 * dont flush anything as it might result in flushing stale date into
806 * memory which is undesirable.
808 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
809 if ((CtrlReg & (XREG_CP15_CONTROL_C_BIT)) != 0U) {
810 Xil_DCacheFlushRange(stack_end, stack_size);
814 /* Select cache level 0 and D cache in CSSR */
815 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
818 CsidReg = mfcp(XREG_CP15_CACHE_SIZE_ID);
819 #elif defined (__ICCARM__)
820 mfcp(XREG_CP15_CACHE_SIZE_ID, CsidReg);
822 { volatile register u32 Reg __asm(XREG_CP15_CACHE_SIZE_ID);
825 /* Determine Cache Size */
826 CacheSize = (CsidReg >> 13U) & 0x1FFU;
828 CacheSize *=128U; /* to get number of bytes */
831 NumWays = (CsidReg & 0x3ffU) >> 3U;
834 /* Get the cacheline size, way size, index size from csidr */
835 LineSize = (CsidReg & 0x07U) + 4U;
837 NumSet = CacheSize/NumWays;
838 NumSet /= (0x00000001U << LineSize);
843 /* Invalidate all the cachelines */
844 for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
845 for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
848 /* Invalidate by Set/Way */
849 #if defined (__GNUC__) || defined (__ICCARM__)
850 asm_cp15_inval_dc_line_sw(C7Reg);
852 /*mtcp(XREG_CP15_INVAL_DC_LINE_SW, C7Reg), */
853 { volatile register u32 Reg
854 __asm(XREG_CP15_INVAL_DC_LINE_SW);
857 Set += (0x00000001U << LineSize);
863 /* Wait for L1 invalidate to complete */
868 /****************************************************************************/
870 * @brief Invalidate a level 1 Data cache line. If the byte specified by the
871 * address (Addr) is cached by the Data cache, the cacheline
872 * containing that byte is invalidated. If the cacheline is modified
873 * (dirty), the modified contents are lost and are NOT written to
874 * system memory before the line is invalidated.
876 * @param adr: 32bit address of the data to be invalidated.
880 * @note The bottom 5 bits are set to 0, forced by architecture.
882 ****************************************************************************/
883 void Xil_L1DCacheInvalidateLine(u32 adr)
885 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
886 mtcp(XREG_CP15_INVAL_DC_LINE_MVA_POC, (adr & (~0x1FU)));
888 /* Wait for L1 invalidate to complete */
892 /****************************************************************************/
894 * @brief Invalidate the level 1 Data cache for the given address range.
895 * If the bytes specified by the address range are cached by the Data
896 * cache, the cachelines containing those bytes are invalidated. If the
897 * cachelines are modified (dirty), the modified contents are lost and
898 * NOT written to the system memory before the lines are invalidated.
900 * @param adr: 32bit start address of the range to be invalidated.
901 * @param len: Length of the range to be invalidated in bytes.
907 ****************************************************************************/
908 void Xil_L1DCacheInvalidateRange(u32 adr, u32 len)
911 const u32 cacheline = 32U;
916 mtcpsr(currmask | IRQ_FIQ_MASK);
919 /* Back the starting address up to the start of a cache line
920 * perform cache operations until adr+len
922 end = LocalAddr + len;
923 LocalAddr = LocalAddr & ~(cacheline - 1U);
925 /* Select cache L0 D-cache in CSSR */
926 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
928 while (LocalAddr < end) {
930 #if defined (__GNUC__) || defined (__ICCARM__)
931 asm_cp15_inval_dc_line_mva_poc(LocalAddr);
933 { volatile register u32 Reg
934 __asm(XREG_CP15_INVAL_DC_LINE_MVA_POC);
937 LocalAddr += cacheline;
941 /* Wait for L1 invalidate to complete */
946 /****************************************************************************/
948 * @brief Flush the level 1 Data cache.
954 * @note In Cortex A9, there is no cp instruction for flushing
955 * the whole D-cache. Need to flush each line.
957 ****************************************************************************/
958 void Xil_L1DCacheFlush(void)
960 register u32 CsidReg, C7Reg;
961 u32 CacheSize, LineSize, NumWays;
963 u32 WayIndex, Set, SetIndex, NumSet;
967 mtcpsr(currmask | IRQ_FIQ_MASK);
969 /* Select cache level 0 and D cache in CSSR */
970 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0);
973 CsidReg = mfcp(XREG_CP15_CACHE_SIZE_ID);
974 #elif defined (__ICCARM__)
975 mfcp(XREG_CP15_CACHE_SIZE_ID, CsidReg);
977 { volatile register u32 Reg __asm(XREG_CP15_CACHE_SIZE_ID);
981 /* Determine Cache Size */
983 CacheSize = (CsidReg >> 13U) & 0x1FFU;
985 CacheSize *=128U; /* to get number of bytes */
988 NumWays = (CsidReg & 0x3ffU) >> 3U;
991 /* Get the cacheline size, way size, index size from csidr */
992 LineSize = (CsidReg & 0x07U) + 4U;
994 NumSet = CacheSize/NumWays;
995 NumSet /= (0x00000001U << LineSize);
1000 /* Invalidate all the cachelines */
1001 for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
1002 for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
1004 /* Flush by Set/Way */
1006 #if defined (__GNUC__) || defined (__ICCARM__)
1007 asm_cp15_clean_inval_dc_line_sw(C7Reg);
1009 { volatile register u32 Reg
1010 __asm(XREG_CP15_CLEAN_INVAL_DC_LINE_SW);
1013 Set += (0x00000001U << LineSize);
1019 /* Wait for L1 flush to complete */
1024 /****************************************************************************/
1026 * @brief Flush a level 1 Data cache line. If the byte specified by the
1027 * address (adr) is cached by the Data cache, the cacheline containing
1028 * that byte is invalidated. If the cacheline is modified (dirty), the
1029 * entire contents of the cacheline are written to system memory
1030 * before the line is invalidated.
1032 * @param adr: 32bit address of the data to be flushed.
1036 * @note The bottom 5 bits are set to 0, forced by architecture.
1038 ****************************************************************************/
1039 void Xil_L1DCacheFlushLine(u32 adr)
1041 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
1042 mtcp(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC, (adr & (~0x1FU)));
1044 /* Wait for L1 flush to complete */
1048 /****************************************************************************/
1050 * @brief Flush the level 1 Data cache for the given address range.
1051 * If the bytes specified by the address range are cached by the Data
1052 * cache, the cacheline containing those bytes are invalidated. If the
1053 * cachelines are modified (dirty), they are written to system memory
1054 * before the lines are invalidated.
1056 * @param adr: 32bit start address of the range to be flushed.
1057 * @param len: Length of the range to be flushed in bytes.
1063 ****************************************************************************/
1064 void Xil_L1DCacheFlushRange(u32 adr, u32 len)
1066 u32 LocalAddr = adr;
1067 const u32 cacheline = 32U;
1071 currmask = mfcpsr();
1072 mtcpsr(currmask | IRQ_FIQ_MASK);
1075 /* Back the starting address up to the start of a cache line
1076 * perform cache operations until adr+len
1078 end = LocalAddr + len;
1079 LocalAddr = LocalAddr & ~(cacheline - 1U);
1081 /* Select cache L0 D-cache in CSSR */
1082 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
1084 while (LocalAddr < end) {
1086 #if defined (__GNUC__) || defined (__ICCARM__)
1087 asm_cp15_clean_inval_dc_line_mva_poc(LocalAddr);
1089 { volatile register u32 Reg
1090 __asm(XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC);
1093 LocalAddr += cacheline;
1097 /* Wait for L1 flush to complete */
1102 /****************************************************************************/
1104 * @brief Store a level 1 Data cache line. If the byte specified by the
1105 * address (adr) is cached by the Data cache and the cacheline is
1106 * modified (dirty), the entire contents of the cacheline are written
1107 * to system memory. After the store completes, the cacheline is
1108 * marked as unmodified (not dirty).
1110 * @param Address to be stored.
1114 * @note The bottom 5 bits are set to 0, forced by architecture.
1116 ****************************************************************************/
1117 void Xil_L1DCacheStoreLine(u32 adr)
1119 mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);
1120 mtcp(XREG_CP15_CLEAN_DC_LINE_MVA_POC, (adr & (~0x1FU)));
1122 /* Wait for L1 store to complete */
1127 /****************************************************************************/
1129 * @brief Enable the level 1 instruction cache.
1137 ****************************************************************************/
1138 void Xil_L1ICacheEnable(void)
1140 register u32 CtrlReg;
1142 /* enable caches only if they are disabled */
1144 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
1145 #elif defined (__ICCARM__)
1146 mfcp(XREG_CP15_SYS_CONTROL, CtrlReg);
1148 { volatile register u32 Reg __asm(XREG_CP15_SYS_CONTROL);
1151 if ((CtrlReg & (XREG_CP15_CONTROL_I_BIT)) != 0U) {
1155 /* invalidate the instruction cache */
1156 mtcp(XREG_CP15_INVAL_IC_POU, 0U);
1158 /* enable the instruction cache */
1159 CtrlReg |= (XREG_CP15_CONTROL_I_BIT);
1161 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
1164 /****************************************************************************/
1166 * @brief Disable level 1 the instruction cache.
1174 ****************************************************************************/
1175 void Xil_L1ICacheDisable(void)
1177 register u32 CtrlReg;
1181 /* invalidate the instruction cache */
1182 mtcp(XREG_CP15_INVAL_IC_POU, 0U);
1184 /* disable the instruction cache */
1186 CtrlReg = mfcp(XREG_CP15_SYS_CONTROL);
1187 #elif defined (__ICCARM__)
1188 mfcp(XREG_CP15_SYS_CONTROL, CtrlReg);
1190 { volatile register u32 Reg __asm(XREG_CP15_SYS_CONTROL);
1193 CtrlReg &= ~(XREG_CP15_CONTROL_I_BIT);
1195 mtcp(XREG_CP15_SYS_CONTROL, CtrlReg);
1198 /****************************************************************************/
1200 * @brief Invalidate the entire level 1 instruction cache.
1208 ****************************************************************************/
1209 void Xil_L1ICacheInvalidate(void)
1211 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1U);
1212 /* invalidate the instruction cache */
1213 mtcp(XREG_CP15_INVAL_IC_POU, 0U);
1215 /* Wait for L1 invalidate to complete */
1219 /****************************************************************************/
1221 * @brief Invalidate a level 1 instruction cache line. If the instruction
1222 * specified by the address is cached by the instruction cache, the
1223 * cacheline containing that instruction is invalidated.
1225 * @param adr: 32bit address of the instruction to be invalidated.
1229 * @note The bottom 5 bits are set to 0, forced by architecture.
1231 ****************************************************************************/
1232 void Xil_L1ICacheInvalidateLine(u32 adr)
1234 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1U);
1235 mtcp(XREG_CP15_INVAL_IC_LINE_MVA_POU, (adr & (~0x1FU)));
1237 /* Wait for L1 invalidate to complete */
1241 /****************************************************************************/
1243 * @brief Invalidate the level 1 instruction cache for the given address
1244 * range. If the instrucions specified by the address range are cached
1245 * by the instruction cache, the cacheline containing those bytes are
1248 * @param adr: 32bit start address of the range to be invalidated.
1249 * @param len: Length of the range to be invalidated in bytes.
1255 ****************************************************************************/
1256 void Xil_L1ICacheInvalidateRange(u32 adr, u32 len)
1258 u32 LocalAddr = adr;
1259 const u32 cacheline = 32U;
1263 currmask = mfcpsr();
1264 mtcpsr(currmask | IRQ_FIQ_MASK);
1267 /* Back the starting address up to the start of a cache line
1268 * perform cache operations until adr+len
1270 end = LocalAddr + len;
1271 LocalAddr = LocalAddr & ~(cacheline - 1U);
1273 /* Select cache L0 I-cache in CSSR */
1274 mtcp(XREG_CP15_CACHE_SIZE_SEL, 1U);
1276 while (LocalAddr < end) {
1278 #if defined (__GNUC__) || defined (__ICCARM__)
1279 asm_cp15_inval_ic_line_mva_pou(LocalAddr);
1281 { volatile register u32 Reg
1282 __asm(XREG_CP15_INVAL_IC_LINE_MVA_POU);
1285 LocalAddr += cacheline;
1289 /* Wait for L1 invalidate to complete */
1295 /****************************************************************************/
1297 * @brief Enable the L2 cache.
1305 ****************************************************************************/
1306 void Xil_L2CacheEnable(void)
1308 register u32 L2CCReg;
1310 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET);
1312 /* only enable if L2CC is currently disabled */
1313 if ((L2CCReg & 0x01U) == 0U) {
1314 /* set up the way size and latencies */
1315 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR +
1316 XPS_L2CC_AUX_CNTRL_OFFSET);
1317 L2CCReg &= XPS_L2CC_AUX_REG_ZERO_MASK;
1318 L2CCReg |= XPS_L2CC_AUX_REG_DEFAULT_MASK;
1319 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_AUX_CNTRL_OFFSET,
1321 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_TAG_RAM_CNTRL_OFFSET,
1322 XPS_L2CC_TAG_RAM_DEFAULT_MASK);
1323 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_DATA_RAM_CNTRL_OFFSET,
1324 XPS_L2CC_DATA_RAM_DEFAULT_MASK);
1326 /* Clear the pending interrupts */
1327 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR +
1328 XPS_L2CC_ISR_OFFSET);
1329 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_IAR_OFFSET, L2CCReg);
1331 Xil_L2CacheInvalidate();
1332 /* Enable the L2CC */
1333 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR +
1334 XPS_L2CC_CNTRL_OFFSET);
1335 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET,
1336 (L2CCReg | (0x01U)));
1339 /* synchronize the processor */
1345 /****************************************************************************/
1347 * @brief Disable the L2 cache.
1355 ****************************************************************************/
1356 void Xil_L2CacheDisable(void)
1358 register u32 L2CCReg;
1360 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET);
1362 if((L2CCReg & 0x1U) != 0U) {
1364 /* Clean and Invalidate L2 Cache */
1367 /* Disable the L2CC */
1368 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET);
1369 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET,
1370 (L2CCReg & (~0x01U)));
1371 /* Wait for the cache operations to complete */
1377 /*****************************************************************************/
1379 * @brief Invalidate the entire level 2 cache.
1387 ****************************************************************************/
1388 void Xil_L2CacheInvalidate(void)
1391 u32 stack_start,stack_end,stack_size;
1392 register u32 L2CCReg;
1393 stack_end = (u32)&_stack_end;
1394 stack_start = (u32)&__undef_stack;
1395 stack_size=stack_start-stack_end;
1397 /* Check for the cache status. If cache is enabled, then only
1398 * flush stack memory to save return address. If cache is disabled,
1399 * dont flush anything as it might result in flushing stale date into
1400 * memory which is undesirable.
1402 L2CCReg = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CNTRL_OFFSET);
1403 if ((L2CCReg & 0x01U) != 0U) {
1404 /*Flush stack memory to save return address*/
1405 Xil_DCacheFlushRange(stack_end, stack_size);
1410 /* Invalidate the caches */
1411 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_WAY_OFFSET,
1413 ResultDCache = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_WAY_OFFSET)
1415 while(ResultDCache != (u32)0U) {
1416 ResultDCache = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_WAY_OFFSET)
1420 /* Wait for the invalidate to complete */
1423 /* synchronize the processor */
1427 /*****************************************************************************/
1429 * @brief Invalidate a level 2 cache line. If the byte specified by the
1430 * address (adr) is cached by the Data cache, the cacheline containing
1431 * that byte is invalidated. If the cacheline is modified (dirty),
1432 * the modified contents are lost and are NOT written to system memory
1433 * before the line is invalidated.
1435 * @param adr: 32bit address of the data/instruction to be invalidated.
1439 * @note The bottom 4 bits are set to 0, forced by architecture.
1441 ****************************************************************************/
1442 void Xil_L2CacheInvalidateLine(u32 adr)
1444 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_PA_OFFSET, (u32)adr);
1445 /* synchronize the processor */
1449 /****************************************************************************/
1451 * @brief Invalidate the level 2 cache for the given address range.
1452 * If the bytes specified by the address range are cached by the L2
1453 * cache, the cacheline containing those bytes are invalidated. If the
1454 * cachelines are modified (dirty), the modified contents are lost and
1455 * are NOT written to system memory before the lines are invalidated.
1457 * @param adr: 32bit start address of the range to be invalidated.
1458 * @param len: Length of the range to be invalidated in bytes.
1464 ****************************************************************************/
1465 void Xil_L2CacheInvalidateRange(u32 adr, u32 len)
1467 u32 LocalAddr = adr;
1468 const u32 cacheline = 32U;
1470 volatile u32 *L2CCOffset = (volatile u32 *)(XPS_L2CC_BASEADDR +
1471 XPS_L2CC_CACHE_INVLD_PA_OFFSET);
1475 currmask = mfcpsr();
1476 mtcpsr(currmask | IRQ_FIQ_MASK);
1479 /* Back the starting address up to the start of a cache line
1480 * perform cache operations until adr+len
1482 end = LocalAddr + len;
1483 LocalAddr = LocalAddr & ~(cacheline - 1U);
1485 /* Disable Write-back and line fills */
1486 Xil_L2WriteDebugCtrl(0x3U);
1488 while (LocalAddr < end) {
1489 *L2CCOffset = LocalAddr;
1491 LocalAddr += cacheline;
1494 /* Enable Write-back and line fills */
1495 Xil_L2WriteDebugCtrl(0x0U);
1498 /* synchronize the processor */
1503 /****************************************************************************/
1505 * @brief Flush the entire level 2 cache.
1513 ****************************************************************************/
1514 void Xil_L2CacheFlush(void)
1518 /* Flush the caches */
1520 /* Disable Write-back and line fills */
1521 Xil_L2WriteDebugCtrl(0x3U);
1523 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_WAY_OFFSET,
1525 ResultL2Cache = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_WAY_OFFSET)
1528 while(ResultL2Cache != (u32)0U) {
1529 ResultL2Cache = Xil_In32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_WAY_OFFSET)
1534 /* Enable Write-back and line fills */
1535 Xil_L2WriteDebugCtrl(0x0U);
1537 /* synchronize the processor */
1541 /****************************************************************************/
1543 * @brief Flush a level 2 cache line. If the byte specified by the address
1544 * (adr) is cached by the L2 cache, the cacheline containing that
1545 * byte is invalidated. If the cacheline is modified (dirty), the
1546 * entire contents of the cacheline are written to system memory
1547 * before the line is invalidated.
1549 * @param adr: 32bit address of the data/instruction to be flushed.
1553 * @note The bottom 4 bits are set to 0, forced by architecture.
1555 ****************************************************************************/
1556 void Xil_L2CacheFlushLine(u32 adr)
1558 #ifdef CONFIG_PL310_ERRATA_588369
1559 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_CLEAN_PA_OFFSET, adr);
1560 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INVLD_PA_OFFSET, adr);
1562 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_INV_CLN_PA_OFFSET, adr);
1564 /* synchronize the processor */
1568 /****************************************************************************/
1570 * @brief Flush the level 2 cache for the given address range.
1571 * If the bytes specified by the address range are cached by the L2
1572 * cache, the cacheline containing those bytes are invalidated. If the
1573 * cachelines are modified (dirty), they are written to the system
1574 * memory before the lines are invalidated.
1576 * @param adr: 32bit start address of the range to be flushed.
1577 * @param len: Length of the range to be flushed in bytes.
1583 ****************************************************************************/
1584 void Xil_L2CacheFlushRange(u32 adr, u32 len)
1586 u32 LocalAddr = adr;
1587 const u32 cacheline = 32U;
1589 volatile u32 *L2CCOffset = (volatile u32 *)(XPS_L2CC_BASEADDR +
1590 XPS_L2CC_CACHE_INV_CLN_PA_OFFSET);
1594 currmask = mfcpsr();
1595 mtcpsr(currmask | IRQ_FIQ_MASK);
1597 /* Back the starting address up to the start of a cache line
1598 * perform cache operations until adr+len
1600 end = LocalAddr + len;
1601 LocalAddr = LocalAddr & ~(cacheline - 1U);
1603 /* Disable Write-back and line fills */
1604 Xil_L2WriteDebugCtrl(0x3U);
1606 while (LocalAddr < end) {
1607 *L2CCOffset = LocalAddr;
1609 LocalAddr += cacheline;
1612 /* Enable Write-back and line fills */
1613 Xil_L2WriteDebugCtrl(0x0U);
1615 /* synchronize the processor */
1620 /****************************************************************************/
1622 * @brief Store a level 2 cache line. If the byte specified by the address
1623 * (adr) is cached by the L2 cache and the cacheline is modified
1624 * (dirty), the entire contents of the cacheline are written to
1625 * system memory. After the store completes, the cacheline is marked
1626 * as unmodified (not dirty).
1628 * @param adr: 32bit address of the data/instruction to be stored.
1632 * @note The bottom 4 bits are set to 0, forced by architecture.
1634 ****************************************************************************/
1635 void Xil_L2CacheStoreLine(u32 adr)
1637 Xil_Out32(XPS_L2CC_BASEADDR + XPS_L2CC_CACHE_CLEAN_PA_OFFSET, adr);
1638 /* synchronize the processor */