X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=arch%2Farm%2Fcpu%2Farmv8%2Fcache_v8.c;h=6c5630c0a84c5c49162e414247ea3e9a5ccf755e;hb=8dda2e2f9e0976249f4a390e72f12533dbcb5ef4;hp=73628c96113614487c0247d0ce6f4f04e5d91487;hpb=5e2ec773bb6c5acf22d8652112856e87cff86ea4;p=u-boot diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c index 73628c9611..6c5630c0a8 100644 --- a/arch/arm/cpu/armv8/cache_v8.c +++ b/arch/arm/cpu/armv8/cache_v8.c @@ -35,10 +35,7 @@ DECLARE_GLOBAL_DATA_PTR; * off: FFF */ -#ifdef CONFIG_SYS_FULL_VA -static struct mm_region mem_map[] = CONFIG_SYS_MEM_MAP; - -static u64 get_tcr(int el, u64 *pips, u64 *pva_bits) +u64 get_tcr(int el, u64 *pips, u64 *pva_bits) { u64 max_addr = 0; u64 ips, va_bits; @@ -46,8 +43,8 @@ static u64 get_tcr(int el, u64 *pips, u64 *pva_bits) int i; /* Find the largest address we need to support */ - for (i = 0; i < ARRAY_SIZE(mem_map); i++) - max_addr = max(max_addr, mem_map[i].base + mem_map[i].size); + for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) + max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size); /* Calculate the maximum physical (and thus virtual) address */ if (max_addr > (1ULL << 44)) { @@ -170,49 +167,6 @@ static void set_pte_table(u64 *pte, u64 *table) *pte = PTE_TYPE_TABLE | (ulong)table; } -/* Add one mm_region map entry to the page tables */ -static void add_map(struct mm_region *map) -{ - u64 *pte; - u64 addr = map->base; - u64 size = map->size; - u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF; - u64 blocksize; - int level; - u64 *new_table; - - while (size) { - pte = find_pte(addr, 0); - if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) { - debug("Creating table for addr 0x%llx\n", addr); - new_table = create_table(); - set_pte_table(pte, new_table); - } - - for (level = 1; level < 4; level++) { - pte = find_pte(addr, level); - blocksize = 1ULL << level2shift(level); - debug("Checking if pte fits for addr=%llx size=%llx " - "blocksize=%llx\n", addr, size, blocksize); - if (size >= blocksize && !(addr & (blocksize - 1))) { - /* Page fits, create block PTE */ - debug("Setting PTE %p to block addr=%llx\n", - pte, addr); - *pte = addr | attrs; - addr += blocksize; - size -= blocksize; - break; - } else if ((pte_type(pte) == PTE_TYPE_FAULT)) { - /* Page doesn't fit, create subpages */ - debug("Creating subtable for addr 0x%llx " - "blksize=%llx\n", addr, blocksize); - new_table = create_table(); - set_pte_table(pte, new_table); - } - } - } -} - /* Splits a block PTE into table with subpages spanning the old block */ static void split_block(u64 *pte, int level) { @@ -244,6 +198,58 @@ static void split_block(u64 *pte, int level) set_pte_table(pte, new_table); } +/* Add one mm_region map entry to the page tables */ +static void add_map(struct mm_region *map) +{ + u64 *pte; + u64 virt = map->virt; + u64 phys = map->phys; + u64 size = map->size; + u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF; + u64 blocksize; + int level; + u64 *new_table; + + while (size) { + pte = find_pte(virt, 0); + if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) { + debug("Creating table for virt 0x%llx\n", virt); + new_table = create_table(); + set_pte_table(pte, new_table); + } + + for (level = 1; level < 4; level++) { + pte = find_pte(virt, level); + if (!pte) + panic("pte not found\n"); + + blocksize = 1ULL << level2shift(level); + debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n", + virt, size, blocksize); + if (size >= blocksize && !(virt & (blocksize - 1))) { + /* Page fits, create block PTE */ + debug("Setting PTE %p to block virt=%llx\n", + pte, virt); + *pte = phys | attrs; + virt += blocksize; + phys += blocksize; + size -= blocksize; + break; + } else if (pte_type(pte) == PTE_TYPE_FAULT) { + /* Page doesn't fit, create subpages */ + debug("Creating subtable for virt 0x%llx blksize=%llx\n", + virt, blocksize); + new_table = create_table(); + set_pte_table(pte, new_table); + } else if (pte_type(pte) == PTE_TYPE_BLOCK) { + debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n", + virt, blocksize); + split_block(pte, level); + } + } + } +} + enum pte_type { PTE_INVAL, PTE_BLOCK, @@ -266,9 +272,9 @@ static int count_required_pts(u64 addr, int level, u64 maxaddr) int i; enum pte_type pte_type = PTE_INVAL; - for (i = 0; i < ARRAY_SIZE(mem_map); i++) { + for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) { struct mm_region *map = &mem_map[i]; - u64 start = map->base; + u64 start = map->virt; u64 end = start + map->size; /* Check if the PTE would overlap with the map */ @@ -323,7 +329,7 @@ static int count_required_pts(u64 addr, int level, u64 maxaddr) } /* Returns the estimated required size of all page tables */ -u64 get_page_table_size(void) +__weak u64 get_page_table_size(void) { u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64); u64 size = 0; @@ -352,10 +358,13 @@ u64 get_page_table_size(void) return size; } -static void setup_pgtables(void) +void setup_pgtables(void) { int i; + if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr) + panic("Page table pointer not setup."); + /* * Allocate the first level we're on with invalidate entries. * If the starting level is 0 (va_bits >= 39), then this is our @@ -364,16 +373,14 @@ static void setup_pgtables(void) create_table(); /* Now add all MMU table entries one after another to the table */ - for (i = 0; i < ARRAY_SIZE(mem_map); i++) + for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) add_map(&mem_map[i]); - - /* Create the same thing once more for our emergency page table */ - create_table(); } static void setup_all_pgtables(void) { u64 tlb_addr = gd->arch.tlb_addr; + u64 tlb_size = gd->arch.tlb_size; /* Reset the fill ptr */ gd->arch.tlb_fillptr = tlb_addr; @@ -382,44 +389,20 @@ static void setup_all_pgtables(void) setup_pgtables(); /* Create emergency page tables */ + gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr - + (uintptr_t)gd->arch.tlb_addr; gd->arch.tlb_addr = gd->arch.tlb_fillptr; setup_pgtables(); gd->arch.tlb_emerg = gd->arch.tlb_addr; gd->arch.tlb_addr = tlb_addr; + gd->arch.tlb_size = tlb_size; } -#else - -inline void set_pgtable_section(u64 *page_table, u64 index, u64 section, - u64 memory_type, u64 attribute) -{ - u64 value; - - value = section | PMD_TYPE_SECT | PMD_SECT_AF; - value |= PMD_ATTRINDX(memory_type); - value |= attribute; - page_table[index] = value; -} - -inline void set_pgtable_table(u64 *page_table, u64 index, u64 *table_addr) -{ - u64 value; - - value = (u64)table_addr | PMD_TYPE_TABLE; - page_table[index] = value; -} -#endif - /* to activate the MMU we need to set up virtual memory */ __weak void mmu_setup(void) { -#ifndef CONFIG_SYS_FULL_VA - bd_t *bd = gd->bd; - u64 *page_table = (u64 *)gd->arch.tlb_addr, i, j; -#endif int el; -#ifdef CONFIG_SYS_FULL_VA /* Set up page tables only once */ if (!gd->arch.tlb_fillptr) setup_all_pgtables(); @@ -427,40 +410,6 @@ __weak void mmu_setup(void) el = current_el(); set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), MEMORY_ATTRIBUTES); -#else - /* Setup an identity-mapping for all spaces */ - for (i = 0; i < (PGTABLE_SIZE >> 3); i++) { - set_pgtable_section(page_table, i, i << SECTION_SHIFT, - MT_DEVICE_NGNRNE, PMD_SECT_NON_SHARE); - } - - /* Setup an identity-mapping for all RAM space */ - for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { - ulong start = bd->bi_dram[i].start; - ulong end = bd->bi_dram[i].start + bd->bi_dram[i].size; - for (j = start >> SECTION_SHIFT; - j < end >> SECTION_SHIFT; j++) { - set_pgtable_section(page_table, j, j << SECTION_SHIFT, - MT_NORMAL, PMD_SECT_NON_SHARE); - } - } - - /* load TTBR0 */ - el = current_el(); - if (el == 1) { - set_ttbr_tcr_mair(el, gd->arch.tlb_addr, - TCR_EL1_RSVD | TCR_FLAGS | TCR_EL1_IPS_BITS, - MEMORY_ATTRIBUTES); - } else if (el == 2) { - set_ttbr_tcr_mair(el, gd->arch.tlb_addr, - TCR_EL2_RSVD | TCR_FLAGS | TCR_EL2_IPS_BITS, - MEMORY_ATTRIBUTES); - } else { - set_ttbr_tcr_mair(el, gd->arch.tlb_addr, - TCR_EL3_RSVD | TCR_FLAGS | TCR_EL3_IPS_BITS, - MEMORY_ATTRIBUTES); - } -#endif /* enable the mmu */ set_sctlr(get_sctlr() | CR_M); @@ -472,19 +421,20 @@ __weak void mmu_setup(void) void invalidate_dcache_all(void) { __asm_invalidate_dcache_all(); + __asm_invalidate_l3_dcache(); } /* * Performs a clean & invalidation of the entire data cache at all levels. * This function needs to be inline to avoid using stack. - * __asm_flush_l3_cache return status of timeout + * __asm_flush_l3_dcache return status of timeout */ inline void flush_dcache_all(void) { int ret; __asm_flush_dcache_all(); - ret = __asm_flush_l3_cache(); + ret = __asm_flush_l3_dcache(); if (ret) debug("flushing dcache returns 0x%x\n", ret); else @@ -546,33 +496,6 @@ u64 *__weak arch_get_page_table(void) { return NULL; } -#ifndef CONFIG_SYS_FULL_VA -void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, - enum dcache_option option) -{ - u64 *page_table = arch_get_page_table(); - u64 upto, end; - - if (page_table == NULL) - return; - - end = ALIGN(start + size, (1 << MMU_SECTION_SHIFT)) >> - MMU_SECTION_SHIFT; - start = start >> MMU_SECTION_SHIFT; - for (upto = start; upto < end; upto++) { - page_table[upto] &= ~PMD_ATTRINDX_MASK; - page_table[upto] |= PMD_ATTRINDX(option); - } - asm volatile("dsb sy"); - __asm_invalidate_tlb_all(); - asm volatile("dsb sy"); - asm volatile("isb"); - start = start << MMU_SECTION_SHIFT; - end = end << MMU_SECTION_SHIFT; - flush_dcache_range(start, end); - asm volatile("dsb sy"); -} -#else static bool is_aligned(u64 addr, u64 size, u64 align) { return !(addr & (align - 1)) && !(size & (align - 1)); @@ -618,6 +541,9 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, debug("start=%lx size=%lx\n", (ulong)start, (ulong)size); + if (!gd->arch.tlb_emerg) + panic("Emergency page table not setup."); + /* * We can not modify page tables that we're currently running on, * so we first need to switch to the "emergency" page tables where @@ -654,10 +580,18 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, */ flush_dcache_range(real_start, real_start + real_size); } -#endif #else /* CONFIG_SYS_DCACHE_OFF */ +/* + * For SPL builds, we may want to not have dcache enabled. Any real U-Boot + * running however really wants to have dcache and the MMU active. Check that + * everything is sane and give the developer a hint if it isn't. + */ +#ifndef CONFIG_SPL_BUILD +#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache. +#endif + void invalidate_dcache_all(void) { } @@ -690,7 +624,7 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, void icache_enable(void) { - __asm_invalidate_icache_all(); + invalidate_icache_all(); set_sctlr(get_sctlr() | CR_I); } @@ -707,6 +641,7 @@ int icache_status(void) void invalidate_icache_all(void) { __asm_invalidate_icache_all(); + __asm_invalidate_l3_icache(); } #else /* CONFIG_SYS_ICACHE_OFF */