X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=arch%2Farm%2Fcpu%2Farmv8%2Fcache_v8.c;h=6c5630c0a84c5c49162e414247ea3e9a5ccf755e;hb=8dda2e2f9e0976249f4a390e72f12533dbcb5ef4;hp=b8867a79ce2f0ae4d3ae2b2ea71df7a4b95cce99;hpb=252cdb46ee33ff876823d0ce0a0190c3878c76ff;p=u-boot diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c index b8867a79ce..6c5630c0a8 100644 --- a/arch/arm/cpu/armv8/cache_v8.c +++ b/arch/arm/cpu/armv8/cache_v8.c @@ -44,7 +44,7 @@ u64 get_tcr(int el, u64 *pips, u64 *pva_bits) /* Find the largest address we need to support */ for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) - max_addr = max(max_addr, mem_map[i].base + mem_map[i].size); + max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size); /* Calculate the maximum physical (and thus virtual) address */ if (max_addr > (1ULL << 44)) { @@ -167,49 +167,6 @@ static void set_pte_table(u64 *pte, u64 *table) *pte = PTE_TYPE_TABLE | (ulong)table; } -/* Add one mm_region map entry to the page tables */ -static void add_map(struct mm_region *map) -{ - u64 *pte; - u64 addr = map->base; - u64 size = map->size; - u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF; - u64 blocksize; - int level; - u64 *new_table; - - while (size) { - pte = find_pte(addr, 0); - if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) { - debug("Creating table for addr 0x%llx\n", addr); - new_table = create_table(); - set_pte_table(pte, new_table); - } - - for (level = 1; level < 4; level++) { - pte = find_pte(addr, level); - blocksize = 1ULL << level2shift(level); - debug("Checking if pte fits for addr=%llx size=%llx " - "blocksize=%llx\n", addr, size, blocksize); - if (size >= blocksize && !(addr & (blocksize - 1))) { - /* Page fits, create block PTE */ - debug("Setting PTE %p to block addr=%llx\n", - pte, addr); - *pte = addr | attrs; - addr += blocksize; - size -= blocksize; - break; - } else if ((pte_type(pte) == PTE_TYPE_FAULT)) { - /* Page doesn't fit, create subpages */ - debug("Creating subtable for addr 0x%llx " - "blksize=%llx\n", addr, blocksize); - new_table = create_table(); - set_pte_table(pte, new_table); - } - } - } -} - /* Splits a block PTE into table with subpages spanning the old block */ static void split_block(u64 *pte, int level) { @@ -241,6 +198,58 @@ static void split_block(u64 *pte, int level) set_pte_table(pte, new_table); } +/* Add one mm_region map entry to the page tables */ +static void add_map(struct mm_region *map) +{ + u64 *pte; + u64 virt = map->virt; + u64 phys = map->phys; + u64 size = map->size; + u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF; + u64 blocksize; + int level; + u64 *new_table; + + while (size) { + pte = find_pte(virt, 0); + if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) { + debug("Creating table for virt 0x%llx\n", virt); + new_table = create_table(); + set_pte_table(pte, new_table); + } + + for (level = 1; level < 4; level++) { + pte = find_pte(virt, level); + if (!pte) + panic("pte not found\n"); + + blocksize = 1ULL << level2shift(level); + debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n", + virt, size, blocksize); + if (size >= blocksize && !(virt & (blocksize - 1))) { + /* Page fits, create block PTE */ + debug("Setting PTE %p to block virt=%llx\n", + pte, virt); + *pte = phys | attrs; + virt += blocksize; + phys += blocksize; + size -= blocksize; + break; + } else if (pte_type(pte) == PTE_TYPE_FAULT) { + /* Page doesn't fit, create subpages */ + debug("Creating subtable for virt 0x%llx blksize=%llx\n", + virt, blocksize); + new_table = create_table(); + set_pte_table(pte, new_table); + } else if (pte_type(pte) == PTE_TYPE_BLOCK) { + debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n", + virt, blocksize); + split_block(pte, level); + } + } + } +} + enum pte_type { PTE_INVAL, PTE_BLOCK, @@ -265,7 +274,7 @@ static int count_required_pts(u64 addr, int level, u64 maxaddr) for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) { struct mm_region *map = &mem_map[i]; - u64 start = map->base; + u64 start = map->virt; u64 end = start + map->size; /* Check if the PTE would overlap with the map */ @@ -371,6 +380,7 @@ void setup_pgtables(void) static void setup_all_pgtables(void) { u64 tlb_addr = gd->arch.tlb_addr; + u64 tlb_size = gd->arch.tlb_size; /* Reset the fill ptr */ gd->arch.tlb_fillptr = tlb_addr; @@ -379,10 +389,13 @@ static void setup_all_pgtables(void) setup_pgtables(); /* Create emergency page tables */ + gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr - + (uintptr_t)gd->arch.tlb_addr; gd->arch.tlb_addr = gd->arch.tlb_fillptr; setup_pgtables(); gd->arch.tlb_emerg = gd->arch.tlb_addr; gd->arch.tlb_addr = tlb_addr; + gd->arch.tlb_size = tlb_size; } /* to activate the MMU we need to set up virtual memory */ @@ -408,19 +421,20 @@ __weak void mmu_setup(void) void invalidate_dcache_all(void) { __asm_invalidate_dcache_all(); + __asm_invalidate_l3_dcache(); } /* * Performs a clean & invalidation of the entire data cache at all levels. * This function needs to be inline to avoid using stack. - * __asm_flush_l3_cache return status of timeout + * __asm_flush_l3_dcache return status of timeout */ inline void flush_dcache_all(void) { int ret; __asm_flush_dcache_all(); - ret = __asm_flush_l3_cache(); + ret = __asm_flush_l3_dcache(); if (ret) debug("flushing dcache returns 0x%x\n", ret); else @@ -610,7 +624,7 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, void icache_enable(void) { - __asm_invalidate_icache_all(); + invalidate_icache_all(); set_sctlr(get_sctlr() | CR_I); } @@ -627,6 +641,7 @@ int icache_status(void) void invalidate_icache_all(void) { __asm_invalidate_icache_all(); + __asm_invalidate_l3_icache(); } #else /* CONFIG_SYS_ICACHE_OFF */