dsb sy
ret
ENDPROC(__asm_flush_dcache_range)
+/*
+ * void __asm_invalidate_dcache_range(start, end)
+ *
+ * invalidate data cache in the range
+ *
+ * x0: start address
+ * x1: end address
+ */
+ENTRY(__asm_invalidate_dcache_range)
+ mrs x3, ctr_el0
+ ubfm x3, x3, #16, #19
+ mov x2, #4
+ lsl x2, x2, x3 /* cache line size */
+
+ /* x2 <- minimal cache line size in cache system */
+ sub x3, x2, #1
+ bic x0, x0, x3
+1: dc ivac, x0 /* invalidate data or unified cache */
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ ret
+ENDPROC(__asm_invalidate_dcache_range)
/*
* void __asm_invalidate_icache_all(void)
*/
void invalidate_dcache_range(unsigned long start, unsigned long stop)
{
- __asm_flush_dcache_range(start, stop);
+ __asm_invalidate_dcache_range(start, stop);
}
/*
void __asm_flush_dcache_all(void);
void __asm_invalidate_dcache_all(void);
void __asm_flush_dcache_range(u64 start, u64 end);
+
+/**
+ * __asm_invalidate_dcache_range() - Invalidate a range of virtual addresses
+ *
+ * This performance an invalidate from @start to @end - 1. Both addresses
+ * should be cache-aligned, otherwise this function will align the start
+ * address and may continue past the end address.
+ *
+ * Data in the address range is evicted from the cache and is not written back
+ * to memory.
+ *
+ * @start: Start address to invalidate
+ * @end: End address to invalidate up to (exclusive)
+ */
+void __asm_invalidate_dcache_range(u64 start, u64 end);
void __asm_invalidate_tlb_all(void);
void __asm_invalidate_icache_all(void);
int __asm_invalidate_l3_dcache(void);