2 * Copyright 2014-2015 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
9 #include <linux/errno.h>
10 #include <asm/system.h>
11 #include <asm/armv8/mmu.h>
13 #include <asm/arch/fsl_serdes.h>
14 #include <asm/arch/soc.h>
15 #include <asm/arch/cpu.h>
16 #include <asm/arch/speed.h>
18 #include <asm/arch/mp.h>
21 #include <fsl-mc/fsl_mc.h>
22 #ifdef CONFIG_FSL_ESDHC
23 #include <fsl_esdhc.h>
25 #ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT
26 #include <asm/armv8/sec_firmware.h>
29 DECLARE_GLOBAL_DATA_PTR;
31 struct mm_region *mem_map = early_map;
33 void cpu_name(char *name)
35 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
36 unsigned int i, svr, ver;
38 svr = gur_in32(&gur->svr);
39 ver = SVR_SOC_VER(svr);
41 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
42 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
43 strcpy(name, cpu_type_list[i].name);
45 if (IS_E_PROCESSOR(svr))
48 sprintf(name + strlen(name), " Rev%d.%d",
49 SVR_MAJ(svr), SVR_MIN(svr));
53 if (i == ARRAY_SIZE(cpu_type_list))
54 strcpy(name, "unknown");
57 #ifndef CONFIG_SYS_DCACHE_OFF
59 * To start MMU before DDR is available, we create MMU table in SRAM.
60 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
61 * levels of translation tables here to cover 40-bit address space.
62 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
63 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
64 * Note, the debug print in cache_v8.c is not usable for debugging
65 * these early MMU tables because UART is not yet available.
67 static inline void early_mmu_setup(void)
69 unsigned int el = current_el();
71 /* global data is already setup, no allocation yet */
72 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
73 gd->arch.tlb_fillptr = gd->arch.tlb_addr;
74 gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
76 /* Create early page tables */
79 /* point TTBR to the new table */
80 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
81 get_tcr(el, NULL, NULL) &
82 ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
85 set_sctlr(get_sctlr() | CR_M);
89 * The final tables look similar to early tables, but different in detail.
90 * These tables are in DRAM. Sub tables are added to enable cache for
93 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
94 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
96 static inline void final_mmu_setup(void)
98 u64 tlb_addr_save = gd->arch.tlb_addr;
99 unsigned int el = current_el();
100 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
106 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
107 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
110 * Only use gd->arch.secure_ram if the address is
111 * recalculated. Align to 4KB for MMU table.
113 /* put page tables in secure ram */
114 index = ARRAY_SIZE(final_map) - 2;
115 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
116 final_map[index].virt = gd->arch.secure_ram & ~0x3;
117 final_map[index].phys = final_map[index].virt;
118 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
119 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
120 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
121 tlb_addr_save = gd->arch.tlb_addr;
123 /* Use allocated (board_f.c) memory for TLB */
124 tlb_addr_save = gd->arch.tlb_allocated;
125 gd->arch.tlb_addr = tlb_addr_save;
130 /* Reset the fill ptr */
131 gd->arch.tlb_fillptr = tlb_addr_save;
133 /* Create normal system page tables */
136 /* Create emergency page tables */
137 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
138 gd->arch.tlb_emerg = gd->arch.tlb_addr;
140 gd->arch.tlb_addr = tlb_addr_save;
142 /* flush new MMU table */
143 flush_dcache_range(gd->arch.tlb_addr,
144 gd->arch.tlb_addr + gd->arch.tlb_size);
146 /* point TTBR to the new table */
147 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
150 * EL3 MMU is already enabled, just need to invalidate TLB to load the
151 * new table. The new table is compatible with the current table, if
152 * MMU somehow walks through the new table before invalidation TLB,
153 * it still works. So we don't need to turn off MMU here.
154 * When EL2 MMU table is created by calling this function, MMU needs
157 set_sctlr(get_sctlr() | CR_M);
160 u64 get_page_table_size(void)
165 int arch_cpu_init(void)
168 __asm_invalidate_dcache_all();
169 __asm_invalidate_tlb_all();
171 set_sctlr(get_sctlr() | CR_C);
181 * This function is called from common/board_r.c.
182 * It recreates MMU table in main memory.
184 void enable_caches(void)
187 __asm_invalidate_tlb_all();
193 static inline u32 initiator_type(u32 cluster, int init_id)
195 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
196 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
199 type = gur_in32(&gur->tp_ityp[idx]);
200 if (type & TP_ITYP_AV)
208 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
209 int i = 0, count = 0;
210 u32 cluster, type, mask = 0;
215 cluster = gur_in32(&gur->tp_cluster[i].lower);
216 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
217 type = initiator_type(cluster, j);
219 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
225 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
231 * Return the number of cores on this SOC.
233 int cpu_numcores(void)
235 return hweight32(cpu_mask());
238 int fsl_qoriq_core_to_cluster(unsigned int core)
240 struct ccsr_gur __iomem *gur =
241 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
242 int i = 0, count = 0;
248 cluster = gur_in32(&gur->tp_cluster[i].lower);
249 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
250 if (initiator_type(cluster, j)) {
257 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
259 return -1; /* cannot identify the cluster */
262 u32 fsl_qoriq_core_to_type(unsigned int core)
264 struct ccsr_gur __iomem *gur =
265 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
266 int i = 0, count = 0;
272 cluster = gur_in32(&gur->tp_cluster[i].lower);
273 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
274 type = initiator_type(cluster, j);
282 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
284 return -1; /* cannot identify the cluster */
289 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
291 return gur_in32(&gur->svr);
294 #ifdef CONFIG_DISPLAY_CPUINFO
295 int print_cpuinfo(void)
297 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
298 struct sys_info sysinfo;
300 unsigned int i, core;
301 u32 type, rcw, svr = gur_in32(&gur->svr);
306 printf(" %s (0x%x)\n", buf, svr);
307 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
308 get_sys_info(&sysinfo);
309 puts("Clock Configuration:");
310 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
313 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
314 printf("CPU%d(%s):%-4s MHz ", core,
315 type == TY_ITYP_VER_A7 ? "A7 " :
316 (type == TY_ITYP_VER_A53 ? "A53" :
317 (type == TY_ITYP_VER_A57 ? "A57" :
318 (type == TY_ITYP_VER_A72 ? "A72" : " "))),
319 strmhz(buf, sysinfo.freq_processor[core]));
321 printf("\n Bus: %-4s MHz ",
322 strmhz(buf, sysinfo.freq_systembus));
323 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
324 #ifdef CONFIG_SYS_DPAA_FMAN
325 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
327 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
328 if (soc_has_dp_ddr()) {
329 printf(" DP-DDR: %-4s MT/s",
330 strmhz(buf, sysinfo.freq_ddrbus2));
336 * Display the RCW, so that no one gets confused as to what RCW
337 * we're actually using for this boot.
339 puts("Reset Configuration Word (RCW):");
340 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
341 rcw = gur_in32(&gur->rcwsr[i]);
343 printf("\n %08x:", i * 4);
344 printf(" %08x", rcw);
352 #ifdef CONFIG_FSL_ESDHC
353 int cpu_mmc_init(bd_t *bis)
355 return fsl_esdhc_mmc_init(bis);
359 int cpu_eth_init(bd_t *bis)
363 #ifdef CONFIG_FSL_MC_ENET
364 error = fsl_mc_ldpaa_init(bis);
366 #ifdef CONFIG_FMAN_ENET
367 fm_standard_init(bis);
372 int arch_early_init_r(void)
376 u32 psci_ver = 0xffffffff;
379 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
384 #if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && defined(CONFIG_ARMV8_PSCI)
385 /* Check the psci version to determine if the psci is supported */
386 psci_ver = sec_firmware_support_psci_version();
388 if (psci_ver == 0xffffffff) {
389 rv = fsl_layerscape_wake_seconday_cores();
391 printf("Did not wake secondary cores\n");
395 #ifdef CONFIG_SYS_HAS_SERDES
398 #ifdef CONFIG_FMAN_ENET
406 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
407 #ifdef CONFIG_FSL_LSCH3
408 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
410 #ifdef CONFIG_LS2080A
411 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
413 #ifdef COUNTER_FREQUENCY_REAL
414 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
416 /* Update with accurate clock frequency */
417 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
420 #ifdef CONFIG_FSL_LSCH3
421 /* Enable timebase for all clusters.
422 * It is safe to do so even some clusters are not enabled.
424 out_le32(cltbenr, 0xf);
427 #ifdef CONFIG_LS2080A
429 * In certain Layerscape SoCs, the clock for each core's
430 * has an enable bit in the PMU Physical Core Time Base Enable
431 * Register (PCTBENR), which allows the watchdog to operate.
433 setbits_le32(pctbenr, 0xff);
436 /* Enable clock for timer
437 * This is a global setting.
439 out_le32(cntcr, 0x1);
444 void reset_cpu(ulong addr)
446 u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
449 /* Raise RESET_REQ_B */
450 val = scfg_in32(rstcr);
452 scfg_out32(rstcr, val);
455 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
457 phys_size_t ram_top = ram_size;
459 #ifdef CONFIG_SYS_MEM_TOP_HIDE
460 #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function
463 /* Carve the MC private DRAM block from the end of DRAM */
464 #ifdef CONFIG_FSL_MC_ENET
465 ram_top -= mc_get_dram_block_size();
466 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);