3 * David Feng <fenghua@phytium.com.cn>
5 * This file is based on sample code from ARMv8 ARM.
7 * SPDX-License-Identifier: GPL-2.0+
10 #include <asm-offsets.h>
12 #include <asm/macro.h>
13 #include <asm/system.h>
14 #include <linux/linkage.h>
17 * void __asm_dcache_level(level)
19 * flush or invalidate one level cache.
22 * x1: 0 clean & invalidate, 1 invalidate only
25 ENTRY(__asm_dcache_level)
27 msr csselr_el1, x12 /* select cache level */
28 isb /* sync change of cssidr_el1 */
29 mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
30 and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
31 add x2, x2, #4 /* x2 <- log2(cache line size) */
33 and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
34 clz w5, w3 /* bit position of #ways */
36 and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
37 /* x12 <- cache level << 1 */
38 /* x2 <- line length offset */
39 /* x3 <- number of cache ways - 1 */
40 /* x4 <- number of cache sets - 1 */
41 /* x5 <- bit position of #ways */
44 mov x6, x3 /* x6 <- working copy of #ways */
47 orr x9, x12, x7 /* map way and level to cisw value */
49 orr x9, x9, x7 /* map set number to cisw value */
53 1: dc cisw, x9 /* clean & invalidate by set/way */
54 2: subs x6, x6, #1 /* decrement the way */
56 subs x4, x4, #1 /* decrement the set */
60 ENDPROC(__asm_dcache_level)
63 * void __asm_flush_dcache_all(int invalidate_only)
65 * x0: 0 clean & invalidate, 1 invalidate only
67 * flush or invalidate all data cache by SET/WAY.
69 ENTRY(__asm_dcache_all)
72 mrs x10, clidr_el1 /* read clidr_el1 */
74 and x11, x11, #0x7 /* x11 <- loc */
75 cbz x11, finished /* if loc is 0, exit */
77 mov x0, #0 /* start flush at cache level 0 */
78 /* x0 <- cache level */
79 /* x10 <- clidr_el1 */
81 /* x15 <- return address */
85 add x12, x12, x0 /* x0 <- tripled cache level */
87 and x12, x12, #7 /* x12 <- cache type */
89 b.lt skip /* skip if no cache or icache */
90 bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
92 add x0, x0, #1 /* increment cache level */
97 msr csselr_el1, x0 /* restore csselr_el1 */
104 ENDPROC(__asm_dcache_all)
106 ENTRY(__asm_flush_dcache_all)
109 ENDPROC(__asm_flush_dcache_all)
111 ENTRY(__asm_invalidate_dcache_all)
114 ENDPROC(__asm_invalidate_dcache_all)
117 * void __asm_flush_dcache_range(start, end)
119 * clean & invalidate data cache in the range
124 ENTRY(__asm_flush_dcache_range)
129 lsl x2, x2, x3 /* cache line size */
131 /* x2 <- minimal cache line size in cache system */
134 1: dc civac, x0 /* clean & invalidate data or unified cache */
140 ENDPROC(__asm_flush_dcache_range)
143 * void __asm_invalidate_icache_all(void)
145 * invalidate all tlb entries.
147 ENTRY(__asm_invalidate_icache_all)
151 ENDPROC(__asm_invalidate_icache_all)
153 ENTRY(__asm_invalidate_l3_dcache)
154 mov x0, #0 /* return status as success */
156 ENDPROC(__asm_invalidate_l3_dcache)
157 .weak __asm_invalidate_l3_dcache
159 ENTRY(__asm_flush_l3_dcache)
160 mov x0, #0 /* return status as success */
162 ENDPROC(__asm_flush_l3_dcache)
163 .weak __asm_flush_l3_dcache
165 ENTRY(__asm_invalidate_l3_icache)
166 mov x0, #0 /* return status as success */
168 ENDPROC(__asm_invalidate_l3_icache)
169 .weak __asm_invalidate_l3_icache
172 * void __asm_switch_ttbr(ulong new_ttbr)
174 * Safely switches to a new page table.
176 ENTRY(__asm_switch_ttbr)
177 /* x2 = SCTLR (alive throghout the function) */
178 switch_el x4, 3f, 2f, 1f
186 /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
187 movn x1, #(CR_M | CR_C | CR_I)
189 switch_el x4, 3f, 2f, 1f
197 /* This call only clobbers x30 (lr) and x9 (unused) */
199 bl __asm_invalidate_tlb_all
201 /* From here on we're running safely with caches disabled */
203 /* Set TTBR to our first argument */
204 switch_el x4, 3f, 2f, 1f
212 /* Restore original SCTLR and thus enable caches again */
213 switch_el x4, 3f, 2f, 1f
222 ENDPROC(__asm_switch_ttbr)