X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=arch%2Farm%2Fcpu%2Farmv8%2Fcache_v8.c;h=adc7e1746f5ca01bea6487da069167a92f762182;hb=8bf5c1e191ad5761196ff1bcb54aae4007fee98a;hp=df15e0066d70ae421b7a187d92472f6dadff946d;hpb=7985cdf74b280f86a1c7440298a84f1fb2876fd9;p=u-boot diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c index df15e0066d..adc7e1746f 100644 --- a/arch/arm/cpu/armv8/cache_v8.c +++ b/arch/arm/cpu/armv8/cache_v8.c @@ -35,7 +35,7 @@ DECLARE_GLOBAL_DATA_PTR; * off: FFF */ -static u64 get_tcr(int el, u64 *pips, u64 *pva_bits) +u64 get_tcr(int el, u64 *pips, u64 *pva_bits) { u64 max_addr = 0; u64 ips, va_bits; @@ -44,7 +44,7 @@ static u64 get_tcr(int el, u64 *pips, u64 *pva_bits) /* Find the largest address we need to support */ for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) - max_addr = max(max_addr, mem_map[i].base + mem_map[i].size); + max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size); /* Calculate the maximum physical (and thus virtual) address */ if (max_addr > (1ULL << 44)) { @@ -167,49 +167,6 @@ static void set_pte_table(u64 *pte, u64 *table) *pte = PTE_TYPE_TABLE | (ulong)table; } -/* Add one mm_region map entry to the page tables */ -static void add_map(struct mm_region *map) -{ - u64 *pte; - u64 addr = map->base; - u64 size = map->size; - u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF; - u64 blocksize; - int level; - u64 *new_table; - - while (size) { - pte = find_pte(addr, 0); - if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) { - debug("Creating table for addr 0x%llx\n", addr); - new_table = create_table(); - set_pte_table(pte, new_table); - } - - for (level = 1; level < 4; level++) { - pte = find_pte(addr, level); - blocksize = 1ULL << level2shift(level); - debug("Checking if pte fits for addr=%llx size=%llx " - "blocksize=%llx\n", addr, size, blocksize); - if (size >= blocksize && !(addr & (blocksize - 1))) { - /* Page fits, create block PTE */ - debug("Setting PTE %p to block addr=%llx\n", - pte, addr); - *pte = addr | attrs; - addr += blocksize; - size -= blocksize; - break; - } else if ((pte_type(pte) == PTE_TYPE_FAULT)) { - /* Page doesn't fit, create subpages */ - debug("Creating subtable for addr 0x%llx " - "blksize=%llx\n", addr, blocksize); - new_table = create_table(); - set_pte_table(pte, new_table); - } - } - } -} - /* Splits a block PTE into table with subpages spanning the old block */ static void split_block(u64 *pte, int level) { @@ -241,6 +198,58 @@ static void split_block(u64 *pte, int level) set_pte_table(pte, new_table); } +/* Add one mm_region map entry to the page tables */ +static void add_map(struct mm_region *map) +{ + u64 *pte; + u64 virt = map->virt; + u64 phys = map->phys; + u64 size = map->size; + u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF; + u64 blocksize; + int level; + u64 *new_table; + + while (size) { + pte = find_pte(virt, 0); + if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) { + debug("Creating table for virt 0x%llx\n", virt); + new_table = create_table(); + set_pte_table(pte, new_table); + } + + for (level = 1; level < 4; level++) { + pte = find_pte(virt, level); + if (!pte) + panic("pte not found\n"); + + blocksize = 1ULL << level2shift(level); + debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n", + virt, size, blocksize); + if (size >= blocksize && !(virt & (blocksize - 1))) { + /* Page fits, create block PTE */ + debug("Setting PTE %p to block virt=%llx\n", + pte, virt); + *pte = phys | attrs; + virt += blocksize; + phys += blocksize; + size -= blocksize; + break; + } else if (pte_type(pte) == PTE_TYPE_FAULT) { + /* Page doesn't fit, create subpages */ + debug("Creating subtable for virt 0x%llx blksize=%llx\n", + virt, blocksize); + new_table = create_table(); + set_pte_table(pte, new_table); + } else if (pte_type(pte) == PTE_TYPE_BLOCK) { + debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n", + virt, blocksize); + split_block(pte, level); + } + } + } +} + enum pte_type { PTE_INVAL, PTE_BLOCK, @@ -265,7 +274,7 @@ static int count_required_pts(u64 addr, int level, u64 maxaddr) for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) { struct mm_region *map = &mem_map[i]; - u64 start = map->base; + u64 start = map->virt; u64 end = start + map->size; /* Check if the PTE would overlap with the map */ @@ -320,7 +329,7 @@ static int count_required_pts(u64 addr, int level, u64 maxaddr) } /* Returns the estimated required size of all page tables */ -u64 get_page_table_size(void) +__weak u64 get_page_table_size(void) { u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64); u64 size = 0; @@ -349,10 +358,13 @@ u64 get_page_table_size(void) return size; } -static void setup_pgtables(void) +void setup_pgtables(void) { int i; + if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr) + panic("Page table pointer not setup."); + /* * Allocate the first level we're on with invalidate entries. * If the starting level is 0 (va_bits >= 39), then this is our @@ -363,14 +375,12 @@ static void setup_pgtables(void) /* Now add all MMU table entries one after another to the table */ for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) add_map(&mem_map[i]); - - /* Create the same thing once more for our emergency page table */ - create_table(); } static void setup_all_pgtables(void) { u64 tlb_addr = gd->arch.tlb_addr; + u64 tlb_size = gd->arch.tlb_size; /* Reset the fill ptr */ gd->arch.tlb_fillptr = tlb_addr; @@ -379,10 +389,13 @@ static void setup_all_pgtables(void) setup_pgtables(); /* Create emergency page tables */ + gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr - + (uintptr_t)gd->arch.tlb_addr; gd->arch.tlb_addr = gd->arch.tlb_fillptr; setup_pgtables(); gd->arch.tlb_emerg = gd->arch.tlb_addr; gd->arch.tlb_addr = tlb_addr; + gd->arch.tlb_size = tlb_size; } /* to activate the MMU we need to set up virtual memory */ @@ -408,19 +421,20 @@ __weak void mmu_setup(void) void invalidate_dcache_all(void) { __asm_invalidate_dcache_all(); + __asm_invalidate_l3_dcache(); } /* * Performs a clean & invalidation of the entire data cache at all levels. * This function needs to be inline to avoid using stack. - * __asm_flush_l3_cache return status of timeout + * __asm_flush_l3_dcache return status of timeout */ inline void flush_dcache_all(void) { int ret; __asm_flush_dcache_all(); - ret = __asm_flush_l3_cache(); + ret = __asm_flush_l3_dcache(); if (ret) debug("flushing dcache returns 0x%x\n", ret); else @@ -432,7 +446,7 @@ inline void flush_dcache_all(void) */ void invalidate_dcache_range(unsigned long start, unsigned long stop) { - __asm_flush_dcache_range(start, stop); + __asm_invalidate_dcache_range(start, stop); } /* @@ -487,7 +501,8 @@ static bool is_aligned(u64 addr, u64 size, u64 align) return !(addr & (align - 1)) && !(size & (align - 1)); } -static u64 set_one_region(u64 start, u64 size, u64 attrs, int level) +/* Use flag to indicate if attrs has more than d-cache attributes */ +static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level) { int levelshift = level2shift(level); u64 levelsize = 1ULL << levelshift; @@ -495,8 +510,13 @@ static u64 set_one_region(u64 start, u64 size, u64 attrs, int level) /* Can we can just modify the current level block PTE? */ if (is_aligned(start, size, levelsize)) { - *pte &= ~PMD_ATTRINDX_MASK; - *pte |= attrs; + if (flag) { + *pte &= ~PMD_ATTRMASK; + *pte |= attrs & PMD_ATTRMASK; + } else { + *pte &= ~PMD_ATTRINDX_MASK; + *pte |= attrs & PMD_ATTRINDX_MASK; + } debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level); return levelsize; @@ -527,6 +547,9 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, debug("start=%lx size=%lx\n", (ulong)start, (ulong)size); + if (!gd->arch.tlb_emerg) + panic("Emergency page table not setup."); + /* * We can not modify page tables that we're currently running on, * so we first need to switch to the "emergency" page tables where @@ -543,7 +566,8 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, u64 r; for (level = 1; level < 4; level++) { - r = set_one_region(start, size, attrs, level); + /* Set d-cache attributes only */ + r = set_one_region(start, size, attrs, false, level); if (r) { /* PTE successfully replaced */ size -= r; @@ -564,8 +588,74 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, flush_dcache_range(real_start, real_start + real_size); } +/* + * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits. + * The procecess is break-before-make. The target region will be marked as + * invalid during the process of changing. + */ +void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs) +{ + int level; + u64 r, size, start; + + start = addr; + size = siz; + /* + * Loop through the address range until we find a page granule that fits + * our alignment constraints, then set it to "invalid". + */ + while (size > 0) { + for (level = 1; level < 4; level++) { + /* Set PTE to fault */ + r = set_one_region(start, size, PTE_TYPE_FAULT, true, + level); + if (r) { + /* PTE successfully invalidated */ + size -= r; + start += r; + break; + } + } + } + + flush_dcache_range(gd->arch.tlb_addr, + gd->arch.tlb_addr + gd->arch.tlb_size); + __asm_invalidate_tlb_all(); + + /* + * Loop through the address range until we find a page granule that fits + * our alignment constraints, then set it to the new cache attributes + */ + start = addr; + size = siz; + while (size > 0) { + for (level = 1; level < 4; level++) { + /* Set PTE to new attributes */ + r = set_one_region(start, size, attrs, true, level); + if (r) { + /* PTE successfully updated */ + size -= r; + start += r; + break; + } + } + } + flush_dcache_range(gd->arch.tlb_addr, + gd->arch.tlb_addr + gd->arch.tlb_size); + __asm_invalidate_tlb_all(); +} + #else /* CONFIG_SYS_DCACHE_OFF */ +/* + * For SPL builds, we may want to not have dcache enabled. Any real U-Boot + * running however really wants to have dcache and the MMU active. Check that + * everything is sane and give the developer a hint if it isn't. + */ +#ifndef CONFIG_SPL_BUILD +#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache. +#endif + void invalidate_dcache_all(void) { } @@ -598,7 +688,7 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, void icache_enable(void) { - __asm_invalidate_icache_all(); + invalidate_icache_all(); set_sctlr(get_sctlr() | CR_I); } @@ -615,6 +705,7 @@ int icache_status(void) void invalidate_icache_all(void) { __asm_invalidate_icache_all(); + __asm_invalidate_l3_icache(); } #else /* CONFIG_SYS_ICACHE_OFF */