1 /******************************************************************************
3 * Copyright (C) 2014 - 2016 Xilinx, Inc. All rights reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and associated documentation files (the "Software"), to deal
7 * in the Software without restriction, including without limitation the rights
8 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 * copies of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * Use of the Software is limited solely to applications:
16 * (a) running on a Xilinx device, or
17 * (b) that interact with a Xilinx device through a bus or interconnect.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * XILINX BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
23 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
24 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 * Except as contained in this notice, the name of the Xilinx shall not be used
28 * in advertising or otherwise to promote the sale, use or other dealings in
29 * this Software without prior written authorization from Xilinx.
31 ******************************************************************************/
32 /*****************************************************************************/
37 * Contains required functions for the ARM cache functionality. Cache APIs are
38 * yet to be implemented. They are left blank to avoid any compilation error
41 * MODIFICATION HISTORY:
43 * Ver Who Date Changes
44 * ----- ---- -------- -----------------------------------------------
45 * 5.00 pkp 05/29/14 First release
46 * 5.05 pkp 04/15/16 Updated the Xil_DCacheInvalidate,
47 * Xil_DCacheInvalidateLine and Xil_DCacheInvalidateRange
48 * functions description for proper explaination
52 ******************************************************************************/
54 /***************************** Include Files *********************************/
56 #include "xil_cache.h"
58 #include "xpseudo_asm.h"
59 #include "xparameters.h"
60 #include "xreg_cortexa53.h"
61 #include "xil_exception.h"
62 #include "bspconfig.h"
64 /************************** Function Prototypes ******************************/
66 /************************** Variable Definitions *****************************/
67 #define IRQ_FIQ_MASK 0xC0U /* Mask IRQ and FIQ interrupts in cpsr */
69 /****************************************************************************
71 * Enable the Data cache.
79 ****************************************************************************/
81 void Xil_DCacheEnable(void)
85 CtrlReg = mfcp(SCTLR_EL3);
87 /* enable caches only if they are disabled */
88 if((CtrlReg & XREG_CONTROL_DCACHE_BIT) == 0X00000000U){
90 /* invalidate the Data cache */
91 Xil_DCacheInvalidate();
93 CtrlReg |= XREG_CONTROL_DCACHE_BIT;
95 /* enable the Data cache for el3*/
96 mtcp(SCTLR_EL3,CtrlReg);
100 /****************************************************************************
102 * Disable the Data cache.
110 ****************************************************************************/
111 void Xil_DCacheDisable(void)
114 /* clean and invalidate the Data cache */
117 CtrlReg = mfcp(SCTLR_EL3);
119 CtrlReg &= ~(XREG_CONTROL_DCACHE_BIT);
121 /* disable the Data cache for el3*/
122 mtcp(SCTLR_EL3,CtrlReg);
125 /****************************************************************************
127 * Invalidate the Data cache. The contents present in the cache are cleaned and
134 * @note In Cortex-A53, functionality to simply invalide the cachelines
135 * is not present. Such operations are a problem for an environment
136 * that supports virtualisation. It would allow one OS to invalidate
137 * a line belonging to another OS which could lead to that OS crashing
138 * because of the loss of essential data. Hence, such operations are
139 * promoted to clean and invalidate which avoids such corruption.
141 ****************************************************************************/
142 void Xil_DCacheInvalidate(void)
144 register u32 CsidReg, C7Reg;
145 u32 LineSize, NumWays;
146 u32 Way, WayIndex,WayAdjust, Set, SetIndex, NumSet, CacheLevel;
150 mtcpsr(currmask | IRQ_FIQ_MASK);
153 /* Number of level of cache*/
156 /* Select cache level 0 and D cache in CSSR */
157 mtcp(CSSELR_EL1,CacheLevel);
160 CsidReg = mfcp(CCSIDR_EL1);
162 /* Get the cacheline size, way size, index size from csidr */
163 LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
166 NumWays = (CsidReg & 0x00001FFFU) >> 3U;
167 NumWays += 0X00000001U;
170 NumSet = (CsidReg >> 13U) & 0x00007FFFU;
171 NumSet += 0X00000001U;
173 WayAdjust = clz(NumWays) - (u32)0x0000001FU;
178 /* Invalidate all the cachelines */
179 for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
180 for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
181 C7Reg = Way | Set | CacheLevel;
183 Set += (0x00000001U << LineSize);
186 Way += (0x00000001U << WayAdjust);
189 /* Wait for invalidate to complete */
192 /* Select cache level 1 and D cache in CSSR */
193 CacheLevel += (0x00000001U<<1U) ;
194 mtcp(CSSELR_EL1,CacheLevel);
197 CsidReg = mfcp(CCSIDR_EL1);
199 /* Get the cacheline size, way size, index size from csidr */
200 LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
203 NumWays = (CsidReg & 0x00001FFFU) >> 3U;
204 NumWays += 0x00000001U;
207 NumSet = (CsidReg >> 13U) & 0x00007FFFU;
208 NumSet += 0x00000001U;
210 WayAdjust = clz(NumWays) - (u32)0x0000001FU;
215 /* Invalidate all the cachelines */
216 for (WayIndex = 0U; WayIndex < NumWays; WayIndex++) {
217 for (SetIndex = 0U; SetIndex < NumSet; SetIndex++) {
218 C7Reg = Way | Set | CacheLevel;
220 Set += (0x00000001U << LineSize);
223 Way += (0x00000001U << WayAdjust);
225 /* Wait for invalidate to complete */
231 /****************************************************************************
233 * Invalidate a Data cache line. The cacheline is cleaned and invalidated
235 * @param Address to be flushed.
239 * @note In Cortex-A53, functionality to simply invalide the cachelines
240 * is not present. Such operations are a problem for an environment
241 * that supports virtualisation. It would allow one OS to invalidate
242 * a line belonging to another OS which could lead to that OS crashing
243 * because of the loss of essential data. Hence, such operations are
244 * promoted to clean and invalidate which avoids such corruption.
246 ****************************************************************************/
247 void Xil_DCacheInvalidateLine(INTPTR adr)
252 mtcpsr(currmask | IRQ_FIQ_MASK);
254 /* Select cache level 0 and D cache in CSSR */
255 mtcp(CSSELR_EL1,0x0);
256 mtcpdc(IVAC,(adr & (~0x3F)));
257 /* Wait for invalidate to complete */
259 /* Select cache level 1 and D cache in CSSR */
260 mtcp(CSSELR_EL1,0x2);
261 mtcpdc(IVAC,(adr & (~0x3F)));
262 /* Wait for invalidate to complete */
267 /****************************************************************************
269 * Invalidate the Data cache for the given address range.
270 * The cachelines present in the adderss range are cleaned and invalidated
272 * @param Start address of range to be invalidated.
273 * @param Length of range to be invalidated in bytes.
277 * @note In Cortex-A53, functionality to simply invalide the cachelines
278 * is not present. Such operations are a problem for an environment
279 * that supports virtualisation. It would allow one OS to invalidate
280 * a line belonging to another OS which could lead to that OS crashing
281 * because of the loss of essential data. Hence, such operations are
282 * promoted to clean and invalidate which avoids such corruption.
284 ****************************************************************************/
285 void Xil_DCacheInvalidateRange(INTPTR adr, INTPTR len)
287 const u32 cacheline = 64U;
289 INTPTR tempadr = adr;
293 mtcpsr(currmask | IRQ_FIQ_MASK);
298 if ((tempadr & (cacheline-1U)) != 0U) {
299 tempadr &= (~(cacheline - 1U));
300 Xil_DCacheFlushLine(tempadr);
301 tempadr += cacheline;
303 if ((tempend & (cacheline-1U)) != 0U) {
304 tempend &= (~(cacheline - 1U));
305 Xil_DCacheFlushLine(tempend);
308 while (tempadr < tempend) {
309 /* Select cache level 0 and D cache in CSSR */
310 mtcp(CSSELR_EL1,0x0);
311 /* Invalidate Data cache line */
312 mtcpdc(IVAC,(tempadr & (~0x3F)));
313 /* Wait for invalidate to complete */
315 /* Select cache level 0 and D cache in CSSR */
316 mtcp(CSSELR_EL1,0x2);
317 /* Invalidate Data cache line */
318 mtcpdc(IVAC,(tempadr & (~0x3F)));
319 /* Wait for invalidate to complete */
321 tempadr += cacheline;
327 /****************************************************************************
329 * Flush the Data cache.
337 ****************************************************************************/
338 void Xil_DCacheFlush(void)
340 register u32 CsidReg, C7Reg;
341 u32 LineSize, NumWays;
342 u32 Way, WayIndex,WayAdjust, Set, SetIndex, NumSet, CacheLevel;
346 mtcpsr(currmask | IRQ_FIQ_MASK);
349 /* Number of level of cache*/
352 /* Select cache level 0 and D cache in CSSR */
353 mtcp(CSSELR_EL1,CacheLevel);
356 CsidReg = mfcp(CCSIDR_EL1);
358 /* Get the cacheline size, way size, index size from csidr */
359 LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
362 NumWays = (CsidReg & 0x00001FFFU) >> 3U;
363 NumWays += 0x00000001U;
366 NumSet = (CsidReg >> 13U) & 0x00007FFFU;
367 NumSet += 0x00000001U;
369 WayAdjust = clz(NumWays) - (u32)0x0000001FU;
374 /* Flush all the cachelines */
375 for (WayIndex = 0U; WayIndex < NumWays; WayIndex++) {
376 for (SetIndex = 0U; SetIndex < NumSet; SetIndex++) {
377 C7Reg = Way | Set | CacheLevel;
379 Set += (0x00000001U << LineSize);
382 Way += (0x00000001U << WayAdjust);
385 /* Wait for Flush to complete */
388 /* Select cache level 1 and D cache in CSSR */
389 CacheLevel += (0x00000001U << 1U);
390 mtcp(CSSELR_EL1,CacheLevel);
393 CsidReg = mfcp(CCSIDR_EL1);
395 /* Get the cacheline size, way size, index size from csidr */
396 LineSize = (CsidReg & 0x00000007U) + 0x00000004U;
399 NumWays = (CsidReg & 0x00001FFFU) >> 3U;
400 NumWays += 0x00000001U;
403 NumSet = (CsidReg >> 13U) & 0x00007FFFU;
404 NumSet += 0x00000001U;
406 WayAdjust=clz(NumWays) - (u32)0x0000001FU;
411 /* Flush all the cachelines */
412 for (WayIndex =0U; WayIndex < NumWays; WayIndex++) {
413 for (SetIndex =0U; SetIndex < NumSet; SetIndex++) {
414 C7Reg = Way | Set | CacheLevel;
416 Set += (0x00000001U << LineSize);
419 Way += (0x00000001U<<WayAdjust);
421 /* Wait for Flush to complete */
427 /****************************************************************************
429 * Flush a Data cache line. If the byte specified by the address (adr)
430 * is cached by the Data cache, the cacheline containing that byte is
431 * invalidated. If the cacheline is modified (dirty), the entire
432 * contents of the cacheline are written to system memory before the
433 * line is invalidated.
435 * @param Address to be flushed.
439 * @note The bottom 6 bits are set to 0, forced by architecture.
441 ****************************************************************************/
442 void Xil_DCacheFlushLine(INTPTR adr)
446 mtcpsr(currmask | IRQ_FIQ_MASK);
447 /* Select cache level 0 and D cache in CSSR */
448 mtcp(CSSELR_EL1,0x0);
449 mtcpdc(CIVAC,(adr & (~0x3F)));
450 /* Wait for flush to complete */
452 /* Select cache level 1 and D cache in CSSR */
453 mtcp(CSSELR_EL1,0x2);
454 mtcpdc(CIVAC,(adr & (~0x3F)));
455 /* Wait for flush to complete */
459 /****************************************************************************
460 * Flush the Data cache for the given address range.
461 * If the bytes specified by the address (adr) are cached by the Data cache,
462 * the cacheline containing that byte is invalidated. If the cacheline
463 * is modified (dirty), the written to system memory first before the
464 * before the line is invalidated.
466 * @param Start address of range to be flushed.
467 * @param Length of range to be flushed in bytes.
473 ****************************************************************************/
475 void Xil_DCacheFlushRange(INTPTR adr, INTPTR len)
477 const u32 cacheline = 64U;
479 INTPTR tempadr = adr;
483 mtcpsr(currmask | IRQ_FIQ_MASK);
484 if (len != 0x00000000U) {
487 if ((tempadr & (0x3F)) != 0) {
489 Xil_DCacheFlushLine(tempadr);
490 tempadr += cacheline;
492 if ((tempend & (0x3F)) != 0) {
494 Xil_DCacheFlushLine(tempend);
497 while (tempadr < tempend) {
498 /* Select cache level 0 and D cache in CSSR */
499 mtcp(CSSELR_EL1,0x0);
500 /* Flush Data cache line */
501 mtcpdc(CIVAC,(tempadr & (~0x3F)));
502 /* Wait for flush to complete */
504 /* Select cache level 1 and D cache in CSSR */
505 mtcp(CSSELR_EL1,0x2);
506 /* Flush Data cache line */
507 mtcpdc(CIVAC,(tempadr & (~0x3F)));
508 /* Wait for flush to complete */
510 tempadr += cacheline;
518 /****************************************************************************
520 * Enable the instruction cache.
528 ****************************************************************************/
531 void Xil_ICacheEnable(void)
535 CtrlReg = mfcp(SCTLR_EL3);
537 /* enable caches only if they are disabled */
538 if((CtrlReg & XREG_CONTROL_ICACHE_BIT)==0x00000000U){
539 /* invalidate the instruction cache */
540 Xil_ICacheInvalidate();
542 CtrlReg |= XREG_CONTROL_ICACHE_BIT;
544 /* enable the instruction cache for el3*/
545 mtcp(SCTLR_EL3,CtrlReg);
549 /****************************************************************************
551 * Disable the instruction cache.
559 ****************************************************************************/
560 void Xil_ICacheDisable(void)
564 CtrlReg = mfcp(SCTLR_EL3);
566 /* invalidate the instruction cache */
567 Xil_ICacheInvalidate();
568 CtrlReg &= ~(XREG_CONTROL_ICACHE_BIT);
569 /* disable the instruction cache */
570 mtcp(SCTLR_EL3,CtrlReg);
574 /****************************************************************************
576 * Invalidate the entire instruction cache.
584 ****************************************************************************/
585 void Xil_ICacheInvalidate(void)
587 unsigned int currmask;
589 mtcpsr(currmask | IRQ_FIQ_MASK);
590 mtcp(CSSELR_EL1,0x1);
592 /* invalidate the instruction cache */
594 /* Wait for invalidate to complete */
598 /****************************************************************************
600 * Invalidate an instruction cache line. If the instruction specified by the
601 * parameter adr is cached by the instruction cache, the cacheline containing
602 * that instruction is invalidated.
608 * @note The bottom 6 bits are set to 0, forced by architecture.
610 ****************************************************************************/
612 void Xil_ICacheInvalidateLine(INTPTR adr)
616 mtcpsr(currmask | IRQ_FIQ_MASK);
618 mtcp(CSSELR_EL1,0x1);
619 /*Invalidate I Cache line*/
620 mtcpic(IVAU,adr & (~0x3F));
621 /* Wait for invalidate to complete */
626 /****************************************************************************
628 * Invalidate the instruction cache for the given address range.
629 * If the bytes specified by the address (adr) are cached by the Data cache,
630 * the cacheline containing that byte is invalidated. If the cacheline
631 * is modified (dirty), the modified contents are lost and are NOT
632 * written to system memory before the line is invalidated.
634 * @param Start address of range to be invalidated.
635 * @param Length of range to be invalidated in bytes.
641 ****************************************************************************/
642 void Xil_ICacheInvalidateRange(INTPTR adr, INTPTR len)
644 const u32 cacheline = 64U;
646 INTPTR tempadr = adr;
650 mtcpsr(currmask | IRQ_FIQ_MASK);
652 if (len != 0x00000000U) {
655 tempadr &= ~(cacheline - 0x00000001U);
657 /* Select cache Level 0 I-cache in CSSR */
658 mtcp(CSSELR_EL1,0x1);
659 while (tempadr < tempend) {
660 /*Invalidate I Cache line*/
661 mtcpic(IVAU,adr & (~0x3F));
663 tempadr += cacheline;
666 /* Wait for invalidate to complete */