X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=arch%2Farm%2Flib%2Fcache-cp15.c;h=f0c1b03728be3bca6ff16e0ded0abb4eeb19abae;hb=10e40d54b38b9b1916c35fc5c4ed2eff4b4bf117;hp=0291afa7bd531088f441808e9f9767cceb4d774b;hpb=45043cf804763d4cf93fec93d8294e385f87622a;p=u-boot diff --git a/arch/arm/lib/cache-cp15.c b/arch/arm/lib/cache-cp15.c index 0291afa7bd..f0c1b03728 100644 --- a/arch/arm/lib/cache-cp15.c +++ b/arch/arm/lib/cache-cp15.c @@ -22,23 +22,24 @@ __weak void arm_init_domains(void) { } -static void cp_delay (void) -{ - volatile int i; - - /* copro seems to need some delay between reading and writing */ - for (i = 0; i < 100; i++) - nop(); - asm volatile("" : : : "memory"); -} - void set_section_dcache(int section, enum dcache_option option) { +#ifdef CONFIG_ARMV7_LPAE + u64 *page_table = (u64 *)gd->arch.tlb_addr; + /* Need to set the access flag to not fault */ + u64 value = TTB_SECT_AP | TTB_SECT_AF; +#else u32 *page_table = (u32 *)gd->arch.tlb_addr; - u32 value; + u32 value = TTB_SECT_AP; +#endif - value = (section << MMU_SECTION_SHIFT) | (3 << 10); + /* Add the page offset */ + value |= ((u32)section << MMU_SECTION_SHIFT); + + /* Add caching bits */ value |= option; + + /* Set PTE */ page_table[section] = value; } @@ -50,16 +51,37 @@ __weak void mmu_page_table_flush(unsigned long start, unsigned long stop) void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size, enum dcache_option option) { +#ifdef CONFIG_ARMV7_LPAE + u64 *page_table = (u64 *)gd->arch.tlb_addr; +#else u32 *page_table = (u32 *)gd->arch.tlb_addr; +#endif + unsigned long startpt, stoppt; unsigned long upto, end; end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT; start = start >> MMU_SECTION_SHIFT; - debug("%s: start=%pa, size=%zu, option=%d\n", __func__, &start, size, +#ifdef CONFIG_ARMV7_LPAE + debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size, + option); +#else + debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size, option); +#endif for (upto = start; upto < end; upto++) set_section_dcache(upto, option); - mmu_page_table_flush((u32)&page_table[start], (u32)&page_table[end]); + + /* + * Make sure range is cache line aligned + * Only CPU maintains page tables, hence it is safe to always + * flush complete cache lines... + */ + + startpt = (unsigned long)&page_table[start]; + startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1); + stoppt = (unsigned long)&page_table[end]; + stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE); + mmu_page_table_flush(startpt, stoppt); } __weak void dram_bank_mmu_setup(int bank) @@ -68,8 +90,9 @@ __weak void dram_bank_mmu_setup(int bank) int i; debug("%s: bank: %d\n", __func__, bank); - for (i = bd->bi_dram[bank].start >> 20; - i < (bd->bi_dram[bank].start >> 20) + (bd->bi_dram[bank].size >> 20); + for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT; + i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) + + (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT); i++) { #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) set_section_dcache(i, DCACHE_WRITETHROUGH); @@ -89,16 +112,81 @@ static inline void mmu_setup(void) arm_init_before_mmu(); /* Set up an identity-mapping for all 4GB, rw for everyone */ - for (i = 0; i < 4096; i++) + for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++) set_section_dcache(i, DCACHE_OFF); for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { dram_bank_mmu_setup(i); } +#if defined(CONFIG_ARMV7_LPAE) && __LINUX_ARM_ARCH__ != 4 + /* Set up 4 PTE entries pointing to our 4 1GB page tables */ + for (i = 0; i < 4; i++) { + u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4)); + u64 tpt = gd->arch.tlb_addr + (4096 * i); + page_table[i] = tpt | TTB_PAGETABLE; + } + + reg = TTBCR_EAE; +#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) + reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT; +#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) + reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA; +#else + reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA; +#endif + + if (is_hyp()) { + /* Set HTCR to enable LPAE */ + asm volatile("mcr p15, 4, %0, c2, c0, 2" + : : "r" (reg) : "memory"); + /* Set HTTBR0 */ + asm volatile("mcrr p15, 4, %0, %1, c2" + : + : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) + : "memory"); + /* Set HMAIR */ + asm volatile("mcr p15, 4, %0, c10, c2, 0" + : : "r" (MEMORY_ATTRIBUTES) : "memory"); + } else { + /* Set TTBCR to enable LPAE */ + asm volatile("mcr p15, 0, %0, c2, c0, 2" + : : "r" (reg) : "memory"); + /* Set 64-bit TTBR0 */ + asm volatile("mcrr p15, 0, %0, %1, c2" + : + : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0) + : "memory"); + /* Set MAIR */ + asm volatile("mcr p15, 0, %0, c10, c2, 0" + : : "r" (MEMORY_ATTRIBUTES) : "memory"); + } +#elif defined(CONFIG_CPU_V7) + if (is_hyp()) { + /* Set HTCR to disable LPAE */ + asm volatile("mcr p15, 4, %0, c2, c0, 2" + : : "r" (0) : "memory"); + } else { + /* Set TTBCR to disable LPAE */ + asm volatile("mcr p15, 0, %0, c2, c0, 2" + : : "r" (0) : "memory"); + } + /* Set TTBR0 */ + reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK; +#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) + reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT; +#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC) + reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA; +#else + reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB; +#endif + asm volatile("mcr p15, 0, %0, c2, c0, 0" + : : "r" (reg) : "memory"); +#else /* Copy the page table address to cp15 */ asm volatile("mcr p15, 0, %0, c2, c0, 0" : : "r" (gd->arch.tlb_addr) : "memory"); +#endif /* Set the access control to all-supervisor */ asm volatile("mcr p15, 0, %0, c3, c0, 0" : : "r" (~0)); @@ -107,7 +195,6 @@ static inline void mmu_setup(void) /* and enable the mmu */ reg = get_cr(); /* get control reg. */ - cp_delay(); set_cr(reg | CR_M); } @@ -125,7 +212,6 @@ static void cache_enable(uint32_t cache_bit) if ((cache_bit == CR_C) && !mmu_enabled()) mmu_setup(); reg = get_cr(); /* get control reg. */ - cp_delay(); set_cr(reg | cache_bit); } @@ -135,7 +221,6 @@ static void cache_disable(uint32_t cache_bit) uint32_t reg; reg = get_cr(); - cp_delay(); if (cache_bit == CR_C) { /* if cache isn;t enabled no need to disable */ @@ -145,7 +230,7 @@ static void cache_disable(uint32_t cache_bit) cache_bit |= CR_M; } reg = get_cr(); - cp_delay(); + if (cache_bit == (CR_C | CR_M)) flush_dcache_all(); set_cr(reg & ~cache_bit);