X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=arch%2Farm%2Fcpu%2Farmv8%2Fcache.S;h=f1deaa723024e629c8a89aaee9c21bad0dff4a60;hb=79a34b71c943a80af5c6d9a2af736fbb37dcc14c;hp=546a83e8f8bb3daeb96260f24c02b32b8f2e0705;hpb=7f673c99c2d8d1aa21996c5b914f06d784b080ca;p=u-boot diff --git a/arch/arm/cpu/armv8/cache.S b/arch/arm/cpu/armv8/cache.S index 546a83e8f8..f1deaa7230 100644 --- a/arch/arm/cpu/armv8/cache.S +++ b/arch/arm/cpu/armv8/cache.S @@ -9,33 +9,32 @@ #include #include -#include #include +#include #include /* - * void __asm_flush_dcache_level(level) + * void __asm_dcache_level(level) * - * clean and invalidate one level cache. + * flush or invalidate one level cache. * * x0: cache level - * x1~x9: clobbered + * x1: 0 clean & invalidate, 1 invalidate only + * x2~x9: clobbered */ -ENTRY(__asm_flush_dcache_level) - lsl x1, x0, #1 - msr csselr_el1, x1 /* select cache level */ +ENTRY(__asm_dcache_level) + lsl x12, x0, #1 + msr csselr_el1, x12 /* select cache level */ isb /* sync change of cssidr_el1 */ mrs x6, ccsidr_el1 /* read the new cssidr_el1 */ and x2, x6, #7 /* x2 <- log2(cache line size)-4 */ add x2, x2, #4 /* x2 <- log2(cache line size) */ mov x3, #0x3ff and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */ - add w4, w3, w3 - sub w4, w4, 1 /* round up log2(#ways + 1) */ - clz w5, w4 /* bit position of #ways */ + clz w5, w3 /* bit position of #ways */ mov x4, #0x7fff and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */ - /* x1 <- cache level << 1 */ + /* x12 <- cache level << 1 */ /* x2 <- line length offset */ /* x3 <- number of cache ways - 1 */ /* x4 <- number of cache sets - 1 */ @@ -45,24 +44,30 @@ loop_set: mov x6, x3 /* x6 <- working copy of #ways */ loop_way: lsl x7, x6, x5 - orr x9, x1, x7 /* map way and level to cisw value */ + orr x9, x12, x7 /* map way and level to cisw value */ lsl x7, x4, x2 orr x9, x9, x7 /* map set number to cisw value */ - dc cisw, x9 /* clean & invalidate by set/way */ - subs x6, x6, #1 /* decrement the way */ + tbz w1, #0, 1f + dc isw, x9 + b 2f +1: dc cisw, x9 /* clean & invalidate by set/way */ +2: subs x6, x6, #1 /* decrement the way */ b.ge loop_way subs x4, x4, #1 /* decrement the set */ b.ge loop_set ret -ENDPROC(__asm_flush_dcache_level) +ENDPROC(__asm_dcache_level) /* - * void __asm_flush_dcache_all(void) + * void __asm_flush_dcache_all(int invalidate_only) * - * clean and invalidate all data cache by SET/WAY. + * x0: 0 clean & invalidate, 1 invalidate only + * + * flush or invalidate all data cache by SET/WAY. */ -ENTRY(__asm_flush_dcache_all) +ENTRY(__asm_dcache_all) + mov x1, x0 dsb sy mrs x10, clidr_el1 /* read clidr_el1 */ lsr x11, x10, #24 @@ -76,28 +81,38 @@ ENTRY(__asm_flush_dcache_all) /* x15 <- return address */ loop_level: - lsl x1, x0, #1 - add x1, x1, x0 /* x0 <- tripled cache level */ - lsr x1, x10, x1 - and x1, x1, #7 /* x1 <- cache type */ - cmp x1, #2 + lsl x12, x0, #1 + add x12, x12, x0 /* x0 <- tripled cache level */ + lsr x12, x10, x12 + and x12, x12, #7 /* x12 <- cache type */ + cmp x12, #2 b.lt skip /* skip if no cache or icache */ - bl __asm_flush_dcache_level + bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */ skip: add x0, x0, #1 /* increment cache level */ cmp x11, x0 b.gt loop_level mov x0, #0 - msr csselr_el1, x0 /* resotre csselr_el1 */ + msr csselr_el1, x0 /* restore csselr_el1 */ dsb sy isb mov lr, x15 finished: ret +ENDPROC(__asm_dcache_all) + +ENTRY(__asm_flush_dcache_all) + mov x0, #0 + b __asm_dcache_all ENDPROC(__asm_flush_dcache_all) +ENTRY(__asm_invalidate_dcache_all) + mov x0, #0x1 + b __asm_dcache_all +ENDPROC(__asm_invalidate_dcache_all) + /* * void __asm_flush_dcache_range(start, end) * @@ -134,3 +149,74 @@ ENTRY(__asm_invalidate_icache_all) isb sy ret ENDPROC(__asm_invalidate_icache_all) + +ENTRY(__asm_invalidate_l3_dcache) + mov x0, #0 /* return status as success */ + ret +ENDPROC(__asm_invalidate_l3_dcache) + .weak __asm_invalidate_l3_dcache + +ENTRY(__asm_flush_l3_dcache) + mov x0, #0 /* return status as success */ + ret +ENDPROC(__asm_flush_l3_dcache) + .weak __asm_flush_l3_dcache + +ENTRY(__asm_invalidate_l3_icache) + mov x0, #0 /* return status as success */ + ret +ENDPROC(__asm_invalidate_l3_icache) + .weak __asm_invalidate_l3_icache + +/* + * void __asm_switch_ttbr(ulong new_ttbr) + * + * Safely switches to a new page table. + */ +ENTRY(__asm_switch_ttbr) + /* x2 = SCTLR (alive throghout the function) */ + switch_el x4, 3f, 2f, 1f +3: mrs x2, sctlr_el3 + b 0f +2: mrs x2, sctlr_el2 + b 0f +1: mrs x2, sctlr_el1 +0: + + /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */ + movn x1, #(CR_M | CR_C | CR_I) + and x1, x2, x1 + switch_el x4, 3f, 2f, 1f +3: msr sctlr_el3, x1 + b 0f +2: msr sctlr_el2, x1 + b 0f +1: msr sctlr_el1, x1 +0: isb + + /* This call only clobbers x30 (lr) and x9 (unused) */ + mov x3, x30 + bl __asm_invalidate_tlb_all + + /* From here on we're running safely with caches disabled */ + + /* Set TTBR to our first argument */ + switch_el x4, 3f, 2f, 1f +3: msr ttbr0_el3, x0 + b 0f +2: msr ttbr0_el2, x0 + b 0f +1: msr ttbr0_el1, x0 +0: isb + + /* Restore original SCTLR and thus enable caches again */ + switch_el x4, 3f, 2f, 1f +3: msr sctlr_el3, x2 + b 0f +2: msr sctlr_el2, x2 + b 0f +1: msr sctlr_el1, x2 +0: isb + + ret x3 +ENDPROC(__asm_switch_ttbr)