3 * David Feng <fenghua@phytium.com.cn>
6 * Alexander Graf <agraf@suse.de>
8 * SPDX-License-Identifier: GPL-2.0+
12 #include <asm/system.h>
13 #include <asm/armv8/mmu.h>
15 DECLARE_GLOBAL_DATA_PTR;
17 #ifndef CONFIG_SYS_DCACHE_OFF
20 * With 4k page granule, a virtual address is split into 4 lookup parts
21 * spanning 9 bits each:
23 * _______________________________________________
25 * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off |
26 * |_______|_______|_______|_______|_______|_______|
27 * 63-48 47-39 38-30 29-21 20-12 11-00
31 * Lv0: FF8000000000 --
38 #ifdef CONFIG_SYS_FULL_VA
39 static struct mm_region mem_map[] = CONFIG_SYS_MEM_MAP;
41 static u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
48 /* Find the largest address we need to support */
49 for (i = 0; i < ARRAY_SIZE(mem_map); i++)
50 max_addr = max(max_addr, mem_map[i].base + mem_map[i].size);
52 /* Calculate the maximum physical (and thus virtual) address */
53 if (max_addr > (1ULL << 44)) {
56 } else if (max_addr > (1ULL << 42)) {
59 } else if (max_addr > (1ULL << 40)) {
62 } else if (max_addr > (1ULL << 36)) {
65 } else if (max_addr > (1ULL << 32)) {
74 tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
76 tcr = TCR_EL2_RSVD | (ips << 16);
78 tcr = TCR_EL3_RSVD | (ips << 16);
81 /* PTWs cacheable, inner/outer WBWA and inner shareable */
82 tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
83 tcr |= TCR_T0SZ(va_bits);
93 #define MAX_PTE_ENTRIES 512
95 static int pte_type(u64 *pte)
97 return *pte & PTE_TYPE_MASK;
100 /* Returns the LSB number for a PTE on level <level> */
101 static int level2shift(int level)
103 /* Page is 12 bits wide, every level translates 9 bits */
104 return (12 + 9 * (3 - level));
107 static u64 *find_pte(u64 addr, int level)
115 debug("addr=%llx level=%d\n", addr, level);
117 get_tcr(0, NULL, &va_bits);
121 if (level < start_level)
124 /* Walk through all page table levels to find our PTE */
125 pte = (u64*)gd->arch.tlb_addr;
126 for (i = start_level; i < 4; i++) {
127 idx = (addr >> level2shift(i)) & 0x1FF;
129 debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
134 /* PTE is no table (either invalid or block), can't traverse */
135 if (pte_type(pte) != PTE_TYPE_TABLE)
137 /* Off to the next level */
138 pte = (u64*)(*pte & 0x0000fffffffff000ULL);
141 /* Should never reach here */
145 /* Returns and creates a new full table (512 entries) */
146 static u64 *create_table(void)
148 u64 *new_table = (u64*)gd->arch.tlb_fillptr;
149 u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
151 /* Allocate MAX_PTE_ENTRIES pte entries */
152 gd->arch.tlb_fillptr += pt_len;
154 if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
155 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
156 "Please increase the size in get_page_table_size()",
157 gd->arch.tlb_fillptr - gd->arch.tlb_addr,
160 /* Mark all entries as invalid */
161 memset(new_table, 0, pt_len);
166 static void set_pte_table(u64 *pte, u64 *table)
168 /* Point *pte to the new table */
169 debug("Setting %p to addr=%p\n", pte, table);
170 *pte = PTE_TYPE_TABLE | (ulong)table;
173 /* Add one mm_region map entry to the page tables */
174 static void add_map(struct mm_region *map)
177 u64 addr = map->base;
178 u64 size = map->size;
179 u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
185 pte = find_pte(addr, 0);
186 if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
187 debug("Creating table for addr 0x%llx\n", addr);
188 new_table = create_table();
189 set_pte_table(pte, new_table);
192 for (level = 1; level < 4; level++) {
193 pte = find_pte(addr, level);
194 blocksize = 1ULL << level2shift(level);
195 debug("Checking if pte fits for addr=%llx size=%llx "
196 "blocksize=%llx\n", addr, size, blocksize);
197 if (size >= blocksize && !(addr & (blocksize - 1))) {
198 /* Page fits, create block PTE */
199 debug("Setting PTE %p to block addr=%llx\n",
205 } else if ((pte_type(pte) == PTE_TYPE_FAULT)) {
206 /* Page doesn't fit, create subpages */
207 debug("Creating subtable for addr 0x%llx "
208 "blksize=%llx\n", addr, blocksize);
209 new_table = create_table();
210 set_pte_table(pte, new_table);
216 /* Splits a block PTE into table with subpages spanning the old block */
217 static void split_block(u64 *pte, int level)
222 /* level describes the parent level, we need the child ones */
223 int levelshift = level2shift(level + 1);
225 if (pte_type(pte) != PTE_TYPE_BLOCK)
226 panic("PTE %p (%llx) is not a block. Some driver code wants to "
227 "modify dcache settings for an range not covered in "
228 "mem_map.", pte, old_pte);
230 new_table = create_table();
231 debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
233 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
234 new_table[i] = old_pte | (i << levelshift);
236 /* Level 3 block PTEs have the table type */
237 if ((level + 1) == 3)
238 new_table[i] |= PTE_TYPE_TABLE;
240 debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
243 /* Set the new table into effect */
244 set_pte_table(pte, new_table);
254 * This is a recursively called function to count the number of
255 * page tables we need to cover a particular PTE range. If you
256 * call this with level = -1 you basically get the full 48 bit
259 static int count_required_pts(u64 addr, int level, u64 maxaddr)
261 int levelshift = level2shift(level);
262 u64 levelsize = 1ULL << levelshift;
263 u64 levelmask = levelsize - 1;
264 u64 levelend = addr + levelsize;
267 enum pte_type pte_type = PTE_INVAL;
269 for (i = 0; i < ARRAY_SIZE(mem_map); i++) {
270 struct mm_region *map = &mem_map[i];
271 u64 start = map->base;
272 u64 end = start + map->size;
274 /* Check if the PTE would overlap with the map */
275 if (max(addr, start) <= min(levelend, end)) {
276 start = max(addr, start);
277 end = min(levelend, end);
279 /* We need a sub-pt for this level */
280 if ((start & levelmask) || (end & levelmask)) {
281 pte_type = PTE_LEVEL;
285 /* Lv0 can not do block PTEs, so do levels here too */
287 pte_type = PTE_LEVEL;
291 /* PTE is active, but fits into a block */
292 pte_type = PTE_BLOCK;
297 * Block PTEs at this level are already covered by the parent page
298 * table, so we only need to count sub page tables.
300 if (pte_type == PTE_LEVEL) {
301 int sublevel = level + 1;
302 u64 sublevelsize = 1ULL << level2shift(sublevel);
304 /* Account for the new sub page table ... */
307 /* ... and for all child page tables that one might have */
308 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
309 r += count_required_pts(addr, sublevel, maxaddr);
310 addr += sublevelsize;
312 if (addr >= maxaddr) {
314 * We reached the end of address space, no need
315 * to look any further.
325 /* Returns the estimated required size of all page tables */
326 u64 get_page_table_size(void)
328 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
333 get_tcr(0, NULL, &va_bits);
337 /* Account for all page tables we would need to cover our memory map */
338 size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits);
341 * We need to duplicate our page table once to have an emergency pt to
342 * resort to when splitting page tables later on
347 * We may need to split page tables later on if dcache settings change,
348 * so reserve up to 4 (random pick) page tables for that.
355 static void setup_pgtables(void)
360 * Allocate the first level we're on with invalidate entries.
361 * If the starting level is 0 (va_bits >= 39), then this is our
362 * Lv0 page table, otherwise it's the entry Lv1 page table.
366 /* Now add all MMU table entries one after another to the table */
367 for (i = 0; i < ARRAY_SIZE(mem_map); i++)
368 add_map(&mem_map[i]);
370 /* Create the same thing once more for our emergency page table */
374 static void setup_all_pgtables(void)
376 u64 tlb_addr = gd->arch.tlb_addr;
378 /* Reset the fill ptr */
379 gd->arch.tlb_fillptr = tlb_addr;
381 /* Create normal system page tables */
384 /* Create emergency page tables */
385 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
387 gd->arch.tlb_emerg = gd->arch.tlb_addr;
388 gd->arch.tlb_addr = tlb_addr;
393 inline void set_pgtable_section(u64 *page_table, u64 index, u64 section,
394 u64 memory_type, u64 attribute)
398 value = section | PMD_TYPE_SECT | PMD_SECT_AF;
399 value |= PMD_ATTRINDX(memory_type);
401 page_table[index] = value;
404 inline void set_pgtable_table(u64 *page_table, u64 index, u64 *table_addr)
408 value = (u64)table_addr | PMD_TYPE_TABLE;
409 page_table[index] = value;
413 /* to activate the MMU we need to set up virtual memory */
414 __weak void mmu_setup(void)
416 #ifndef CONFIG_SYS_FULL_VA
418 u64 *page_table = (u64 *)gd->arch.tlb_addr, i, j;
422 #ifdef CONFIG_SYS_FULL_VA
423 /* Set up page tables only once */
424 if (!gd->arch.tlb_fillptr)
425 setup_all_pgtables();
428 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
431 /* Setup an identity-mapping for all spaces */
432 for (i = 0; i < (PGTABLE_SIZE >> 3); i++) {
433 set_pgtable_section(page_table, i, i << SECTION_SHIFT,
434 MT_DEVICE_NGNRNE, PMD_SECT_NON_SHARE);
437 /* Setup an identity-mapping for all RAM space */
438 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
439 ulong start = bd->bi_dram[i].start;
440 ulong end = bd->bi_dram[i].start + bd->bi_dram[i].size;
441 for (j = start >> SECTION_SHIFT;
442 j < end >> SECTION_SHIFT; j++) {
443 set_pgtable_section(page_table, j, j << SECTION_SHIFT,
444 MT_NORMAL, PMD_SECT_NON_SHARE);
451 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
452 TCR_EL1_RSVD | TCR_FLAGS | TCR_EL1_IPS_BITS,
454 } else if (el == 2) {
455 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
456 TCR_EL2_RSVD | TCR_FLAGS | TCR_EL2_IPS_BITS,
459 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
460 TCR_EL3_RSVD | TCR_FLAGS | TCR_EL3_IPS_BITS,
466 set_sctlr(get_sctlr() | CR_M);
470 * Performs a invalidation of the entire data cache at all levels
472 void invalidate_dcache_all(void)
474 __asm_invalidate_dcache_all();
478 * Performs a clean & invalidation of the entire data cache at all levels.
479 * This function needs to be inline to avoid using stack.
480 * __asm_flush_l3_cache return status of timeout
482 inline void flush_dcache_all(void)
486 __asm_flush_dcache_all();
487 ret = __asm_flush_l3_cache();
489 debug("flushing dcache returns 0x%x\n", ret);
491 debug("flushing dcache successfully.\n");
495 * Invalidates range in all levels of D-cache/unified cache
497 void invalidate_dcache_range(unsigned long start, unsigned long stop)
499 __asm_flush_dcache_range(start, stop);
503 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
505 void flush_dcache_range(unsigned long start, unsigned long stop)
507 __asm_flush_dcache_range(start, stop);
510 void dcache_enable(void)
512 /* The data cache is not active unless the mmu is enabled */
513 if (!(get_sctlr() & CR_M)) {
514 invalidate_dcache_all();
515 __asm_invalidate_tlb_all();
519 set_sctlr(get_sctlr() | CR_C);
522 void dcache_disable(void)
528 /* if cache isn't enabled no need to disable */
532 set_sctlr(sctlr & ~(CR_C|CR_M));
535 __asm_invalidate_tlb_all();
538 int dcache_status(void)
540 return (get_sctlr() & CR_C) != 0;
543 u64 *__weak arch_get_page_table(void) {
544 puts("No page table offset defined\n");
549 #ifndef CONFIG_SYS_FULL_VA
550 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
551 enum dcache_option option)
553 u64 *page_table = arch_get_page_table();
556 if (page_table == NULL)
559 end = ALIGN(start + size, (1 << MMU_SECTION_SHIFT)) >>
561 start = start >> MMU_SECTION_SHIFT;
562 for (upto = start; upto < end; upto++) {
563 page_table[upto] &= ~PMD_ATTRINDX_MASK;
564 page_table[upto] |= PMD_ATTRINDX(option);
566 asm volatile("dsb sy");
567 __asm_invalidate_tlb_all();
568 asm volatile("dsb sy");
570 start = start << MMU_SECTION_SHIFT;
571 end = end << MMU_SECTION_SHIFT;
572 flush_dcache_range(start, end);
573 asm volatile("dsb sy");
576 static bool is_aligned(u64 addr, u64 size, u64 align)
578 return !(addr & (align - 1)) && !(size & (align - 1));
581 static u64 set_one_region(u64 start, u64 size, u64 attrs, int level)
583 int levelshift = level2shift(level);
584 u64 levelsize = 1ULL << levelshift;
585 u64 *pte = find_pte(start, level);
587 /* Can we can just modify the current level block PTE? */
588 if (is_aligned(start, size, levelsize)) {
589 *pte &= ~PMD_ATTRINDX_MASK;
591 debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
596 /* Unaligned or doesn't fit, maybe split block into table */
597 debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
599 /* Maybe we need to split the block into a table */
600 if (pte_type(pte) == PTE_TYPE_BLOCK)
601 split_block(pte, level);
603 /* And then double-check it became a table or already is one */
604 if (pte_type(pte) != PTE_TYPE_TABLE)
605 panic("PTE %p (%llx) for addr=%llx should be a table",
608 /* Roll on to the next page table level */
612 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
613 enum dcache_option option)
615 u64 attrs = PMD_ATTRINDX(option);
616 u64 real_start = start;
617 u64 real_size = size;
619 debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
622 * We can not modify page tables that we're currently running on,
623 * so we first need to switch to the "emergency" page tables where
624 * we can safely modify our primary page tables and then switch back
626 __asm_switch_ttbr(gd->arch.tlb_emerg);
629 * Loop through the address range until we find a page granule that fits
630 * our alignment constraints, then set it to the new cache attributes
636 for (level = 1; level < 4; level++) {
637 r = set_one_region(start, size, attrs, level);
639 /* PTE successfully replaced */
648 /* We're done modifying page tables, switch back to our primary ones */
649 __asm_switch_ttbr(gd->arch.tlb_addr);
652 * Make sure there's nothing stale in dcache for a region that might
653 * have caches off now
655 flush_dcache_range(real_start, real_start + real_size);
659 #else /* CONFIG_SYS_DCACHE_OFF */
661 void invalidate_dcache_all(void)
665 void flush_dcache_all(void)
669 void dcache_enable(void)
673 void dcache_disable(void)
677 int dcache_status(void)
682 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
683 enum dcache_option option)
687 #endif /* CONFIG_SYS_DCACHE_OFF */
689 #ifndef CONFIG_SYS_ICACHE_OFF
691 void icache_enable(void)
693 __asm_invalidate_icache_all();
694 set_sctlr(get_sctlr() | CR_I);
697 void icache_disable(void)
699 set_sctlr(get_sctlr() & ~CR_I);
702 int icache_status(void)
704 return (get_sctlr() & CR_I) != 0;
707 void invalidate_icache_all(void)
709 __asm_invalidate_icache_all();
712 #else /* CONFIG_SYS_ICACHE_OFF */
714 void icache_enable(void)
718 void icache_disable(void)
722 int icache_status(void)
727 void invalidate_icache_all(void)
731 #endif /* CONFIG_SYS_ICACHE_OFF */
734 * Enable dCache & iCache, whether cache is actually enabled
735 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
737 void __weak enable_caches(void)