2 * Copyright 2014-2015 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
10 #include <asm/system.h>
11 #include <asm/armv8/mmu.h>
13 #include <asm/arch/fsl_serdes.h>
14 #include <asm/arch/soc.h>
15 #include <asm/arch/cpu.h>
16 #include <asm/arch/speed.h>
18 #include <asm/arch/mp.h>
21 #include <fsl_debug_server.h>
22 #include <fsl-mc/fsl_mc.h>
23 #ifdef CONFIG_FSL_ESDHC
24 #include <fsl_esdhc.h>
27 DECLARE_GLOBAL_DATA_PTR;
29 void cpu_name(char *name)
31 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
32 unsigned int i, svr, ver;
34 svr = gur_in32(&gur->svr);
35 ver = SVR_SOC_VER(svr);
37 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
38 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
39 strcpy(name, cpu_type_list[i].name);
41 if (IS_E_PROCESSOR(svr))
46 if (i == ARRAY_SIZE(cpu_type_list))
47 strcpy(name, "unknown");
50 #ifndef CONFIG_SYS_DCACHE_OFF
52 * Set the block entries according to the information of the table.
54 static int set_block_entry(const struct sys_mmu_table *list,
55 struct table_info *table)
57 u64 block_size = 0, block_shift = 0;
58 u64 block_addr, index;
61 if (table->entry_size == BLOCK_SIZE_L1) {
62 block_size = BLOCK_SIZE_L1;
63 block_shift = SECTION_SHIFT_L1;
64 } else if (table->entry_size == BLOCK_SIZE_L2) {
65 block_size = BLOCK_SIZE_L2;
66 block_shift = SECTION_SHIFT_L2;
71 block_addr = list->phys_addr;
72 index = (list->virt_addr - table->table_base) >> block_shift;
74 for (j = 0; j < (list->size >> block_shift); j++) {
75 set_pgtable_section(table->ptr,
80 block_addr += block_size;
88 * Find the corresponding table entry for the list.
90 static int find_table(const struct sys_mmu_table *list,
91 struct table_info *table, u64 *level0_table)
93 u64 index = 0, level = 0;
94 u64 *level_table = level0_table;
95 u64 temp_base = 0, block_size = 0, block_shift = 0;
99 block_size = BLOCK_SIZE_L0;
100 block_shift = SECTION_SHIFT_L0;
101 } else if (level == 1) {
102 block_size = BLOCK_SIZE_L1;
103 block_shift = SECTION_SHIFT_L1;
104 } else if (level == 2) {
105 block_size = BLOCK_SIZE_L2;
106 block_shift = SECTION_SHIFT_L2;
110 while (list->virt_addr >= temp_base) {
112 temp_base += block_size;
115 temp_base -= block_size;
117 if ((level_table[index - 1] & PMD_TYPE_MASK) ==
119 level_table = (u64 *)(level_table[index - 1] &
127 if ((list->phys_addr + list->size) >
128 (temp_base + block_size * NUM_OF_ENTRY))
132 * Check the address and size of the list member is
133 * aligned with the block size.
135 if (((list->phys_addr & (block_size - 1)) != 0) ||
136 ((list->size & (block_size - 1)) != 0))
139 table->ptr = level_table;
140 table->table_base = temp_base -
141 ((index - 1) << block_shift);
142 table->entry_size = block_size;
151 * To start MMU before DDR is available, we create MMU table in SRAM.
152 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
153 * levels of translation tables here to cover 40-bit address space.
154 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
155 * Level 0 IA[39], table address @0
156 * Level 1 IA[38:30], table address @0x1000, 0x2000
157 * Level 2 IA[29:21], table address @0x3000, 0x4000
158 * Address above 0x5000 is free for other purpose.
160 static inline void early_mmu_setup(void)
163 u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE;
164 u64 *level1_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000);
165 u64 *level1_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000);
166 u64 *level2_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000);
167 u64 *level2_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x4000);
169 struct table_info table = {level0_table, 0, BLOCK_SIZE_L0};
171 /* Invalidate all table entries */
172 memset(level0_table, 0, 0x5000);
174 /* Fill in the table entries */
175 set_pgtable_table(level0_table, 0, level1_table0);
176 set_pgtable_table(level0_table, 1, level1_table1);
177 set_pgtable_table(level1_table0, 0, level2_table0);
179 #ifdef CONFIG_FSL_LSCH3
180 set_pgtable_table(level1_table0,
181 CONFIG_SYS_FLASH_BASE >> SECTION_SHIFT_L1,
184 /* Find the table and fill in the block entries */
185 for (i = 0; i < ARRAY_SIZE(early_mmu_table); i++) {
186 if (find_table(&early_mmu_table[i],
187 &table, level0_table) == 0) {
189 * If find_table() returns error, it cannot be dealt
190 * with here. Breakpoint can be added for debugging.
192 set_block_entry(&early_mmu_table[i], &table);
194 * If set_block_entry() returns error, it cannot be
195 * dealt with here too.
202 set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR,
204 set_sctlr(get_sctlr() | CR_M);
208 * The final tables look similar to early tables, but different in detail.
209 * These tables are in DRAM. Sub tables are added to enable cache for
212 * Level 1 table 0 contains 512 entries for each 1GB from 0 to 512GB.
213 * Level 1 table 1 contains 512 entries for each 1GB from 512GB to 1TB.
214 * Level 2 table 0 contains 512 entries for each 2MB from 0 to 1GB.
217 * Level 2 table 1 contains 512 entries for each 2MB from 32GB to 33GB.
219 static inline void final_mmu_setup(void)
222 u64 *level0_table = (u64 *)gd->arch.tlb_addr;
223 u64 *level1_table0 = (u64 *)(gd->arch.tlb_addr + 0x1000);
224 u64 *level1_table1 = (u64 *)(gd->arch.tlb_addr + 0x2000);
225 u64 *level2_table0 = (u64 *)(gd->arch.tlb_addr + 0x3000);
226 #ifdef CONFIG_FSL_LSCH3
227 u64 *level2_table1 = (u64 *)(gd->arch.tlb_addr + 0x4000);
229 struct table_info table = {level0_table, 0, BLOCK_SIZE_L0};
231 /* Invalidate all table entries */
232 memset(level0_table, 0, PGTABLE_SIZE);
234 /* Fill in the table entries */
235 set_pgtable_table(level0_table, 0, level1_table0);
236 set_pgtable_table(level0_table, 1, level1_table1);
237 set_pgtable_table(level1_table0, 0, level2_table0);
238 #ifdef CONFIG_FSL_LSCH3
239 set_pgtable_table(level1_table0,
240 CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1,
244 /* Find the table and fill in the block entries */
245 for (i = 0; i < ARRAY_SIZE(final_mmu_table); i++) {
246 if (find_table(&final_mmu_table[i],
247 &table, level0_table) == 0) {
248 if (set_block_entry(&final_mmu_table[i],
250 printf("MMU error: could not set block entry for %p\n",
251 &final_mmu_table[i]);
255 printf("MMU error: could not find the table for %p\n",
256 &final_mmu_table[i]);
260 /* flush new MMU table */
261 flush_dcache_range(gd->arch.tlb_addr,
262 gd->arch.tlb_addr + gd->arch.tlb_size);
264 /* point TTBR to the new table */
267 set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR_FINAL,
270 * MMU is already enabled, just need to invalidate TLB to load the
271 * new table. The new table is compatible with the current table, if
272 * MMU somehow walks through the new table before invalidation TLB,
273 * it still works. So we don't need to turn off MMU here.
277 int arch_cpu_init(void)
280 __asm_invalidate_dcache_all();
281 __asm_invalidate_tlb_all();
283 set_sctlr(get_sctlr() | CR_C);
288 * This function is called from lib/board.c.
289 * It recreates MMU table in main memory. MMU and d-cache are enabled earlier.
290 * There is no need to disable d-cache for this operation.
292 void enable_caches(void)
295 __asm_invalidate_tlb_all();
299 static inline u32 initiator_type(u32 cluster, int init_id)
301 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
302 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
305 type = gur_in32(&gur->tp_ityp[idx]);
306 if (type & TP_ITYP_AV)
314 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
315 int i = 0, count = 0;
316 u32 cluster, type, mask = 0;
321 cluster = gur_in32(&gur->tp_cluster[i].lower);
322 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
323 type = initiator_type(cluster, j);
325 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
331 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
337 * Return the number of cores on this SOC.
339 int cpu_numcores(void)
341 return hweight32(cpu_mask());
344 int fsl_qoriq_core_to_cluster(unsigned int core)
346 struct ccsr_gur __iomem *gur =
347 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
348 int i = 0, count = 0;
354 cluster = gur_in32(&gur->tp_cluster[i].lower);
355 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
356 if (initiator_type(cluster, j)) {
363 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
365 return -1; /* cannot identify the cluster */
368 u32 fsl_qoriq_core_to_type(unsigned int core)
370 struct ccsr_gur __iomem *gur =
371 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
372 int i = 0, count = 0;
378 cluster = gur_in32(&gur->tp_cluster[i].lower);
379 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
380 type = initiator_type(cluster, j);
388 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
390 return -1; /* cannot identify the cluster */
393 #ifdef CONFIG_DISPLAY_CPUINFO
394 int print_cpuinfo(void)
396 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
397 struct sys_info sysinfo;
399 unsigned int i, core;
405 printf(" %s (0x%x)\n", buf, gur_in32(&gur->svr));
406 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
407 get_sys_info(&sysinfo);
408 puts("Clock Configuration:");
409 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
412 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
413 printf("CPU%d(%s):%-4s MHz ", core,
414 type == TY_ITYP_VER_A7 ? "A7 " :
415 (type == TY_ITYP_VER_A53 ? "A53" :
416 (type == TY_ITYP_VER_A57 ? "A57" : " ")),
417 strmhz(buf, sysinfo.freq_processor[core]));
419 printf("\n Bus: %-4s MHz ",
420 strmhz(buf, sysinfo.freq_systembus));
421 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
422 #ifdef CONFIG_FSL_LSCH3
423 printf(" DP-DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus2));
428 * Display the RCW, so that no one gets confused as to what RCW
429 * we're actually using for this boot.
431 puts("Reset Configuration Word (RCW):");
432 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
433 rcw = gur_in32(&gur->rcwsr[i]);
435 printf("\n %08x:", i * 4);
436 printf(" %08x", rcw);
444 #ifdef CONFIG_FSL_ESDHC
445 int cpu_mmc_init(bd_t *bis)
447 return fsl_esdhc_mmc_init(bis);
451 int cpu_eth_init(bd_t *bis)
455 #ifdef CONFIG_FSL_MC_ENET
456 error = fsl_mc_ldpaa_init(bis);
461 int arch_early_init_r(void)
466 rv = fsl_layerscape_wake_seconday_cores();
468 printf("Did not wake secondary cores\n");
471 #ifdef CONFIG_SYS_HAS_SERDES
479 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
480 #ifdef CONFIG_FSL_LSCH3
481 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
483 #ifdef COUNTER_FREQUENCY_REAL
484 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
486 /* Update with accurate clock frequency */
487 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
490 #ifdef CONFIG_FSL_LSCH3
491 /* Enable timebase for all clusters.
492 * It is safe to do so even some clusters are not enabled.
494 out_le32(cltbenr, 0xf);
497 /* Enable clock for timer
498 * This is a global setting.
500 out_le32(cntcr, 0x1);
505 void reset_cpu(ulong addr)
507 u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
510 /* Raise RESET_REQ_B */
511 val = scfg_in32(rstcr);
513 scfg_out32(rstcr, val);