3 * David Feng <fenghua@phytium.com.cn>
5 * SPDX-License-Identifier: GPL-2.0+
9 #include <asm/system.h>
10 #include <asm/armv8/mmu.h>
12 DECLARE_GLOBAL_DATA_PTR;
14 #ifndef CONFIG_SYS_DCACHE_OFF
16 #ifdef CONFIG_SYS_FULL_VA
17 static void set_ptl1_entry(u64 index, u64 ptl2_entry)
19 u64 *pgd = (u64 *)gd->arch.tlb_addr;
22 value = ptl2_entry | PTL1_TYPE_TABLE;
26 static void set_ptl2_block(u64 ptl1, u64 bfn, u64 address, u64 memory_attrs)
28 u64 *pmd = (u64 *)ptl1;
31 value = address | PTL2_TYPE_BLOCK | PTL2_BLOCK_AF;
32 value |= memory_attrs;
36 static struct mm_region mem_map[] = CONFIG_SYS_MEM_MAP;
38 #define PTL1_ENTRIES CONFIG_SYS_PTL1_ENTRIES
39 #define PTL2_ENTRIES CONFIG_SYS_PTL2_ENTRIES
41 static u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
48 /* Find the largest address we need to support */
49 for (i = 0; i < ARRAY_SIZE(mem_map); i++)
50 max_addr = max(max_addr, mem_map[i].base + mem_map[i].size);
52 /* Calculate the maximum physical (and thus virtual) address */
53 if (max_addr > (1ULL << 44)) {
56 } else if (max_addr > (1ULL << 42)) {
59 } else if (max_addr > (1ULL << 40)) {
62 } else if (max_addr > (1ULL << 36)) {
65 } else if (max_addr > (1ULL << 32)) {
74 tcr = TCR_EL1_RSVD | (ips << 32);
76 tcr = TCR_EL2_RSVD | (ips << 16);
78 tcr = TCR_EL3_RSVD | (ips << 16);
81 /* PTWs cacheable, inner/outer WBWA and inner shareable */
82 tcr |= TCR_TG0_64K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
83 tcr |= TCR_T0SZ(VA_BITS);
93 static void setup_pgtables(void)
96 unsigned long pmd = 0;
97 unsigned long address;
99 /* Setup the PMD pointers */
100 for (l1_e = 0; l1_e < CONFIG_SYS_MEM_MAP_SIZE; l1_e++) {
101 gd->arch.pmd_addr[l1_e] = gd->arch.tlb_addr +
102 PTL1_ENTRIES * sizeof(u64);
103 gd->arch.pmd_addr[l1_e] += PTL2_ENTRIES * sizeof(u64) * l1_e;
104 gd->arch.pmd_addr[l1_e] = ALIGN(gd->arch.pmd_addr[l1_e],
108 /* Setup the page tables */
109 for (l1_e = 0; l1_e < PTL1_ENTRIES; l1_e++) {
110 if (mem_map[pmd].base ==
111 (uintptr_t)l1_e << PTL2_BITS) {
112 set_ptl1_entry(l1_e, gd->arch.pmd_addr[pmd]);
114 for (l2_e = 0; l2_e < PTL2_ENTRIES; l2_e++) {
115 address = mem_map[pmd].base
116 + (uintptr_t)l2_e * BLOCK_SIZE;
117 set_ptl2_block(gd->arch.pmd_addr[pmd], l2_e,
118 address, mem_map[pmd].attrs);
123 set_ptl1_entry(l1_e, 0);
130 inline void set_pgtable_section(u64 *page_table, u64 index, u64 section,
131 u64 memory_type, u64 attribute)
135 value = section | PMD_TYPE_SECT | PMD_SECT_AF;
136 value |= PMD_ATTRINDX(memory_type);
138 page_table[index] = value;
141 inline void set_pgtable_table(u64 *page_table, u64 index, u64 *table_addr)
145 value = (u64)table_addr | PMD_TYPE_TABLE;
146 page_table[index] = value;
150 /* to activate the MMU we need to set up virtual memory */
151 __weak void mmu_setup(void)
153 #ifndef CONFIG_SYS_FULL_VA
155 u64 *page_table = (u64 *)gd->arch.tlb_addr, i, j;
159 #ifdef CONFIG_SYS_FULL_VA
160 unsigned long coreid = read_mpidr() & CONFIG_COREID_MASK;
162 /* Set up page tables only on BSP */
163 if (coreid == BSP_COREID)
167 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
170 /* Setup an identity-mapping for all spaces */
171 for (i = 0; i < (PGTABLE_SIZE >> 3); i++) {
172 set_pgtable_section(page_table, i, i << SECTION_SHIFT,
173 MT_DEVICE_NGNRNE, PMD_SECT_NON_SHARE);
176 /* Setup an identity-mapping for all RAM space */
177 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
178 ulong start = bd->bi_dram[i].start;
179 ulong end = bd->bi_dram[i].start + bd->bi_dram[i].size;
180 for (j = start >> SECTION_SHIFT;
181 j < end >> SECTION_SHIFT; j++) {
182 set_pgtable_section(page_table, j, j << SECTION_SHIFT,
183 MT_NORMAL, PMD_SECT_NON_SHARE);
190 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
191 TCR_EL1_RSVD | TCR_FLAGS | TCR_EL1_IPS_BITS,
193 } else if (el == 2) {
194 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
195 TCR_EL2_RSVD | TCR_FLAGS | TCR_EL2_IPS_BITS,
198 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
199 TCR_EL3_RSVD | TCR_FLAGS | TCR_EL3_IPS_BITS,
205 set_sctlr(get_sctlr() | CR_M);
209 * Performs a invalidation of the entire data cache at all levels
211 void invalidate_dcache_all(void)
213 __asm_invalidate_dcache_all();
217 * Performs a clean & invalidation of the entire data cache at all levels.
218 * This function needs to be inline to avoid using stack.
219 * __asm_flush_l3_cache return status of timeout
221 inline void flush_dcache_all(void)
225 __asm_flush_dcache_all();
226 ret = __asm_flush_l3_cache();
228 debug("flushing dcache returns 0x%x\n", ret);
230 debug("flushing dcache successfully.\n");
234 * Invalidates range in all levels of D-cache/unified cache
236 void invalidate_dcache_range(unsigned long start, unsigned long stop)
238 __asm_flush_dcache_range(start, stop);
242 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
244 void flush_dcache_range(unsigned long start, unsigned long stop)
246 __asm_flush_dcache_range(start, stop);
249 void dcache_enable(void)
251 /* The data cache is not active unless the mmu is enabled */
252 if (!(get_sctlr() & CR_M)) {
253 invalidate_dcache_all();
254 __asm_invalidate_tlb_all();
258 set_sctlr(get_sctlr() | CR_C);
261 void dcache_disable(void)
267 /* if cache isn't enabled no need to disable */
271 set_sctlr(sctlr & ~(CR_C|CR_M));
274 __asm_invalidate_tlb_all();
277 int dcache_status(void)
279 return (get_sctlr() & CR_C) != 0;
282 u64 *__weak arch_get_page_table(void) {
283 puts("No page table offset defined\n");
288 #ifndef CONFIG_SYS_FULL_VA
289 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
290 enum dcache_option option)
292 u64 *page_table = arch_get_page_table();
295 if (page_table == NULL)
298 end = ALIGN(start + size, (1 << MMU_SECTION_SHIFT)) >>
300 start = start >> MMU_SECTION_SHIFT;
301 for (upto = start; upto < end; upto++) {
302 page_table[upto] &= ~PMD_ATTRINDX_MASK;
303 page_table[upto] |= PMD_ATTRINDX(option);
305 asm volatile("dsb sy");
306 __asm_invalidate_tlb_all();
307 asm volatile("dsb sy");
309 start = start << MMU_SECTION_SHIFT;
310 end = end << MMU_SECTION_SHIFT;
311 flush_dcache_range(start, end);
312 asm volatile("dsb sy");
316 #else /* CONFIG_SYS_DCACHE_OFF */
318 void invalidate_dcache_all(void)
322 void flush_dcache_all(void)
326 void dcache_enable(void)
330 void dcache_disable(void)
334 int dcache_status(void)
339 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
340 enum dcache_option option)
344 #endif /* CONFIG_SYS_DCACHE_OFF */
346 #ifndef CONFIG_SYS_ICACHE_OFF
348 void icache_enable(void)
350 __asm_invalidate_icache_all();
351 set_sctlr(get_sctlr() | CR_I);
354 void icache_disable(void)
356 set_sctlr(get_sctlr() & ~CR_I);
359 int icache_status(void)
361 return (get_sctlr() & CR_I) != 0;
364 void invalidate_icache_all(void)
366 __asm_invalidate_icache_all();
369 #else /* CONFIG_SYS_ICACHE_OFF */
371 void icache_enable(void)
375 void icache_disable(void)
379 int icache_status(void)
384 void invalidate_icache_all(void)
388 #endif /* CONFIG_SYS_ICACHE_OFF */
391 * Enable dCache & iCache, whether cache is actually enabled
392 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
394 void __weak enable_caches(void)