2 * Copyright 2014-2015 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
8 #include <fsl_ddr_sdram.h>
10 #include <linux/errno.h>
11 #include <asm/system.h>
12 #include <asm/armv8/mmu.h>
14 #include <asm/arch/fsl_serdes.h>
15 #include <asm/arch/soc.h>
16 #include <asm/arch/cpu.h>
17 #include <asm/arch/speed.h>
18 #include <asm/arch/mp.h>
19 #include <efi_loader.h>
21 #include <fsl-mc/fsl_mc.h>
22 #ifdef CONFIG_FSL_ESDHC
23 #include <fsl_esdhc.h>
25 #include <asm/armv8/sec_firmware.h>
26 #ifdef CONFIG_SYS_FSL_DDR
30 DECLARE_GLOBAL_DATA_PTR;
32 struct mm_region *mem_map = early_map;
34 void cpu_name(char *name)
36 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
37 unsigned int i, svr, ver;
39 svr = gur_in32(&gur->svr);
40 ver = SVR_SOC_VER(svr);
42 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
43 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
44 strcpy(name, cpu_type_list[i].name);
46 if (IS_E_PROCESSOR(svr))
49 sprintf(name + strlen(name), " Rev%d.%d",
50 SVR_MAJ(svr), SVR_MIN(svr));
54 if (i == ARRAY_SIZE(cpu_type_list))
55 strcpy(name, "unknown");
58 #ifndef CONFIG_SYS_DCACHE_OFF
60 * To start MMU before DDR is available, we create MMU table in SRAM.
61 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
62 * levels of translation tables here to cover 40-bit address space.
63 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
64 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
65 * Note, the debug print in cache_v8.c is not usable for debugging
66 * these early MMU tables because UART is not yet available.
68 static inline void early_mmu_setup(void)
70 unsigned int el = current_el();
72 /* global data is already setup, no allocation yet */
73 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
74 gd->arch.tlb_fillptr = gd->arch.tlb_addr;
75 gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
77 /* Create early page tables */
80 /* point TTBR to the new table */
81 set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
82 get_tcr(el, NULL, NULL) &
83 ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
86 set_sctlr(get_sctlr() | CR_M);
89 static void fix_pcie_mmu_map(void)
91 #ifdef CONFIG_ARCH_LS2080A
94 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
96 svr = gur_in32(&gur->svr);
97 ver = SVR_SOC_VER(svr);
99 /* Fix PCIE base and size for LS2088A */
100 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
101 (ver == SVR_LS2048A) || (ver == SVR_LS2044A)) {
102 for (i = 0; i < ARRAY_SIZE(final_map); i++) {
103 switch (final_map[i].phys) {
104 case CONFIG_SYS_PCIE1_PHYS_ADDR:
105 final_map[i].phys = 0x2000000000ULL;
106 final_map[i].virt = 0x2000000000ULL;
107 final_map[i].size = 0x800000000ULL;
109 case CONFIG_SYS_PCIE2_PHYS_ADDR:
110 final_map[i].phys = 0x2800000000ULL;
111 final_map[i].virt = 0x2800000000ULL;
112 final_map[i].size = 0x800000000ULL;
114 case CONFIG_SYS_PCIE3_PHYS_ADDR:
115 final_map[i].phys = 0x3000000000ULL;
116 final_map[i].virt = 0x3000000000ULL;
117 final_map[i].size = 0x800000000ULL;
119 case CONFIG_SYS_PCIE4_PHYS_ADDR:
120 final_map[i].phys = 0x3800000000ULL;
121 final_map[i].virt = 0x3800000000ULL;
122 final_map[i].size = 0x800000000ULL;
133 * The final tables look similar to early tables, but different in detail.
134 * These tables are in DRAM. Sub tables are added to enable cache for
137 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
138 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
140 static inline void final_mmu_setup(void)
142 u64 tlb_addr_save = gd->arch.tlb_addr;
143 unsigned int el = current_el();
146 /* fix the final_map before filling in the block entries */
151 /* Update mapping for DDR to actual size */
152 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
154 * Find the entry for DDR mapping and update the address and
155 * size. Zero-sized mapping will be skipped when creating MMU
158 switch (final_map[index].virt) {
159 case CONFIG_SYS_FSL_DRAM_BASE1:
160 final_map[index].virt = gd->bd->bi_dram[0].start;
161 final_map[index].phys = gd->bd->bi_dram[0].start;
162 final_map[index].size = gd->bd->bi_dram[0].size;
164 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
165 case CONFIG_SYS_FSL_DRAM_BASE2:
166 #if (CONFIG_NR_DRAM_BANKS >= 2)
167 final_map[index].virt = gd->bd->bi_dram[1].start;
168 final_map[index].phys = gd->bd->bi_dram[1].start;
169 final_map[index].size = gd->bd->bi_dram[1].size;
171 final_map[index].size = 0;
175 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
176 case CONFIG_SYS_FSL_DRAM_BASE3:
177 #if (CONFIG_NR_DRAM_BANKS >= 3)
178 final_map[index].virt = gd->bd->bi_dram[2].start;
179 final_map[index].phys = gd->bd->bi_dram[2].start;
180 final_map[index].size = gd->bd->bi_dram[2].size;
182 final_map[index].size = 0;
191 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
192 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
195 * Only use gd->arch.secure_ram if the address is
196 * recalculated. Align to 4KB for MMU table.
198 /* put page tables in secure ram */
199 index = ARRAY_SIZE(final_map) - 2;
200 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
201 final_map[index].virt = gd->arch.secure_ram & ~0x3;
202 final_map[index].phys = final_map[index].virt;
203 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
204 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
205 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
206 tlb_addr_save = gd->arch.tlb_addr;
208 /* Use allocated (board_f.c) memory for TLB */
209 tlb_addr_save = gd->arch.tlb_allocated;
210 gd->arch.tlb_addr = tlb_addr_save;
215 /* Reset the fill ptr */
216 gd->arch.tlb_fillptr = tlb_addr_save;
218 /* Create normal system page tables */
221 /* Create emergency page tables */
222 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
223 gd->arch.tlb_emerg = gd->arch.tlb_addr;
225 gd->arch.tlb_addr = tlb_addr_save;
227 /* Disable cache and MMU */
228 dcache_disable(); /* TLBs are invalidated */
229 invalidate_icache_all();
231 /* point TTBR to the new table */
232 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
235 set_sctlr(get_sctlr() | CR_M);
238 u64 get_page_table_size(void)
243 int arch_cpu_init(void)
246 __asm_invalidate_dcache_all();
247 __asm_invalidate_tlb_all();
249 set_sctlr(get_sctlr() | CR_C);
259 * This function is called from common/board_r.c.
260 * It recreates MMU table in main memory.
262 void enable_caches(void)
265 __asm_invalidate_tlb_all();
271 u32 initiator_type(u32 cluster, int init_id)
273 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
274 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
277 type = gur_in32(&gur->tp_ityp[idx]);
278 if (type & TP_ITYP_AV)
284 u32 cpu_pos_mask(void)
286 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
288 u32 cluster, type, mask = 0;
293 cluster = gur_in32(&gur->tp_cluster[i].lower);
294 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
295 type = initiator_type(cluster, j);
296 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
297 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
300 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
307 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
308 int i = 0, count = 0;
309 u32 cluster, type, mask = 0;
314 cluster = gur_in32(&gur->tp_cluster[i].lower);
315 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
316 type = initiator_type(cluster, j);
318 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
324 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
330 * Return the number of cores on this SOC.
332 int cpu_numcores(void)
334 return hweight32(cpu_mask());
337 int fsl_qoriq_core_to_cluster(unsigned int core)
339 struct ccsr_gur __iomem *gur =
340 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
341 int i = 0, count = 0;
347 cluster = gur_in32(&gur->tp_cluster[i].lower);
348 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
349 if (initiator_type(cluster, j)) {
356 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
358 return -1; /* cannot identify the cluster */
361 u32 fsl_qoriq_core_to_type(unsigned int core)
363 struct ccsr_gur __iomem *gur =
364 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
365 int i = 0, count = 0;
371 cluster = gur_in32(&gur->tp_cluster[i].lower);
372 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
373 type = initiator_type(cluster, j);
381 } while ((cluster & TP_CLUSTER_EOC) == 0x0);
383 return -1; /* cannot identify the cluster */
386 #ifndef CONFIG_FSL_LSCH3
389 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
391 return gur_in32(&gur->svr);
395 #ifdef CONFIG_DISPLAY_CPUINFO
396 int print_cpuinfo(void)
398 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
399 struct sys_info sysinfo;
401 unsigned int i, core;
402 u32 type, rcw, svr = gur_in32(&gur->svr);
407 printf(" %s (0x%x)\n", buf, svr);
408 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
409 get_sys_info(&sysinfo);
410 puts("Clock Configuration:");
411 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
414 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
415 printf("CPU%d(%s):%-4s MHz ", core,
416 type == TY_ITYP_VER_A7 ? "A7 " :
417 (type == TY_ITYP_VER_A53 ? "A53" :
418 (type == TY_ITYP_VER_A57 ? "A57" :
419 (type == TY_ITYP_VER_A72 ? "A72" : " "))),
420 strmhz(buf, sysinfo.freq_processor[core]));
422 /* Display platform clock as Bus frequency. */
423 printf("\n Bus: %-4s MHz ",
424 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
425 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
426 #ifdef CONFIG_SYS_DPAA_FMAN
427 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
429 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
430 if (soc_has_dp_ddr()) {
431 printf(" DP-DDR: %-4s MT/s",
432 strmhz(buf, sysinfo.freq_ddrbus2));
438 * Display the RCW, so that no one gets confused as to what RCW
439 * we're actually using for this boot.
441 puts("Reset Configuration Word (RCW):");
442 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
443 rcw = gur_in32(&gur->rcwsr[i]);
445 printf("\n %08x:", i * 4);
446 printf(" %08x", rcw);
454 #ifdef CONFIG_FSL_ESDHC
455 int cpu_mmc_init(bd_t *bis)
457 return fsl_esdhc_mmc_init(bis);
461 int cpu_eth_init(bd_t *bis)
465 #ifdef CONFIG_FSL_MC_ENET
466 error = fsl_mc_ldpaa_init(bis);
468 #ifdef CONFIG_FMAN_ENET
469 fm_standard_init(bis);
474 static inline int check_psci(void)
476 unsigned int psci_ver;
478 psci_ver = sec_firmware_support_psci_version();
479 if (psci_ver == PSCI_INVALID_VER)
485 int arch_early_init_r(void)
487 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
490 * erratum A009635 is valid only for LS2080A SoC and
493 svr_dev_id = get_svr() >> 16;
494 if (svr_dev_id == SVR_DEV_LS2080A)
497 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
498 erratum_a009942_check_cpo();
501 debug("PSCI: PSCI does not exist.\n");
503 /* if PSCI does not exist, boot secondary cores here */
504 if (fsl_layerscape_wake_seconday_cores())
505 printf("Did not wake secondary cores\n");
508 #ifdef CONFIG_SYS_HAS_SERDES
511 #ifdef CONFIG_FMAN_ENET
519 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
520 #ifdef CONFIG_FSL_LSCH3
521 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
523 #ifdef CONFIG_ARCH_LS2080A
524 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
527 #ifdef COUNTER_FREQUENCY_REAL
528 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
530 /* Update with accurate clock frequency */
531 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
534 #ifdef CONFIG_FSL_LSCH3
535 /* Enable timebase for all clusters.
536 * It is safe to do so even some clusters are not enabled.
538 out_le32(cltbenr, 0xf);
541 #ifdef CONFIG_ARCH_LS2080A
543 * In certain Layerscape SoCs, the clock for each core's
544 * has an enable bit in the PMU Physical Core Time Base Enable
545 * Register (PCTBENR), which allows the watchdog to operate.
547 setbits_le32(pctbenr, 0xff);
549 * For LS2080A SoC and its personalities, timer controller
550 * offset is different
552 svr_dev_id = get_svr() >> 16;
553 if (svr_dev_id == SVR_DEV_LS2080A)
554 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
558 /* Enable clock for timer
559 * This is a global setting.
561 out_le32(cntcr, 0x1);
566 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
568 void __efi_runtime reset_cpu(ulong addr)
572 /* Raise RESET_REQ_B */
573 val = scfg_in32(rstcr);
575 scfg_out32(rstcr, val);
578 #ifdef CONFIG_EFI_LOADER
580 void __efi_runtime EFIAPI efi_reset_system(
581 enum efi_reset_type reset_type,
582 efi_status_t reset_status,
583 unsigned long data_size, void *reset_data)
585 switch (reset_type) {
590 case EFI_RESET_SHUTDOWN:
591 /* Nothing we can do */
598 void efi_reset_system_init(void)
600 efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
605 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
607 phys_size_t ram_top = ram_size;
609 #ifdef CONFIG_FSL_MC_ENET
610 /* The start address of MC reserved memory needs to be aligned. */
611 ram_top -= mc_get_dram_block_size();
612 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
615 return ram_size - ram_top;
618 phys_size_t get_effective_memsize(void)
620 phys_size_t ea_size, rem = 0;
623 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
624 * first region is 2GB space at 0x8000_0000. If the memory extends to
625 * the second region (or the third region if applicable), the secure
626 * memory and Management Complex (MC) memory should be put into the
627 * highest region, i.e. the end of DDR memory. CONFIG_MAX_MEM_MAPPED
628 * is set to the size of first region so U-Boot doesn't relocate itself
629 * into higher address. Should DDR be configured to skip the first
630 * region, this function needs to be adjusted.
632 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
633 ea_size = CONFIG_MAX_MEM_MAPPED;
634 rem = gd->ram_size - ea_size;
636 ea_size = gd->ram_size;
639 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
640 /* Check if we have enough space for secure memory */
641 if (rem > CONFIG_SYS_MEM_RESERVE_SECURE) {
642 rem -= CONFIG_SYS_MEM_RESERVE_SECURE;
644 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE) {
645 ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
646 rem = 0; /* Presume MC requires more memory */
648 printf("Error: No enough space for secure memory.\n");
652 /* Check if we have enough memory for MC */
653 if (rem < board_reserve_ram_top(rem)) {
654 /* Not enough memory in high region to reserve */
655 if (ea_size > board_reserve_ram_top(rem))
656 ea_size -= board_reserve_ram_top(rem);
658 printf("Error: No enough space for reserved memory.\n");
664 int dram_init_banksize(void)
666 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
667 phys_size_t dp_ddr_size;
671 * gd->ram_size has the total size of DDR memory, less reserved secure
672 * memory. The DDR extends from low region to high region(s) presuming
673 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
674 * the location of secure memory. gd->arch.resv_ram tracks the location
675 * of reserved memory for Management Complex (MC).
677 gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
678 if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
679 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
680 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
681 gd->bd->bi_dram[1].size = gd->ram_size -
682 CONFIG_SYS_DDR_BLOCK1_SIZE;
683 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
684 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
685 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
686 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
687 CONFIG_SYS_DDR_BLOCK2_SIZE;
688 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
692 gd->bd->bi_dram[0].size = gd->ram_size;
694 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
695 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
696 if (gd->bd->bi_dram[2].size >= CONFIG_SYS_MEM_RESERVE_SECURE) {
697 gd->bd->bi_dram[2].size -= CONFIG_SYS_MEM_RESERVE_SECURE;
698 gd->arch.secure_ram = gd->bd->bi_dram[2].start +
699 gd->bd->bi_dram[2].size;
700 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
701 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
705 if (gd->bd->bi_dram[1].size >= CONFIG_SYS_MEM_RESERVE_SECURE) {
706 gd->bd->bi_dram[1].size -=
707 CONFIG_SYS_MEM_RESERVE_SECURE;
708 gd->arch.secure_ram = gd->bd->bi_dram[1].start +
709 gd->bd->bi_dram[1].size;
710 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
711 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
712 } else if (gd->bd->bi_dram[0].size >
713 CONFIG_SYS_MEM_RESERVE_SECURE) {
714 gd->bd->bi_dram[0].size -=
715 CONFIG_SYS_MEM_RESERVE_SECURE;
716 gd->arch.secure_ram = gd->bd->bi_dram[0].start +
717 gd->bd->bi_dram[0].size;
718 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
719 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
722 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
724 #ifdef CONFIG_FSL_MC_ENET
725 /* Assign memory for MC */
726 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
727 if (gd->bd->bi_dram[2].size >=
728 board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
729 gd->arch.resv_ram = gd->bd->bi_dram[2].start +
730 gd->bd->bi_dram[2].size -
731 board_reserve_ram_top(gd->bd->bi_dram[2].size);
735 if (gd->bd->bi_dram[1].size >=
736 board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
737 gd->arch.resv_ram = gd->bd->bi_dram[1].start +
738 gd->bd->bi_dram[1].size -
739 board_reserve_ram_top(gd->bd->bi_dram[1].size);
740 } else if (gd->bd->bi_dram[0].size >
741 board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
742 gd->arch.resv_ram = gd->bd->bi_dram[0].start +
743 gd->bd->bi_dram[0].size -
744 board_reserve_ram_top(gd->bd->bi_dram[0].size);
747 #endif /* CONFIG_FSL_MC_ENET */
749 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
750 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
751 #error "This SoC shouldn't have DP DDR"
753 if (soc_has_dp_ddr()) {
754 /* initialize DP-DDR here */
757 * DDR controller use 0 as the base address for binding.
758 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
760 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
762 CONFIG_DP_DDR_NUM_CTRLS,
763 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
766 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
767 gd->bd->bi_dram[2].size = dp_ddr_size;
769 puts("Not detected");
777 #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_SPL_BUILD)
778 void efi_add_known_memory(void)
781 phys_addr_t ram_start, start;
782 phys_size_t ram_size;
786 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
787 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
788 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
789 #error "This SoC shouldn't have DP DDR"
792 continue; /* skip DP-DDR */
794 ram_start = gd->bd->bi_dram[i].start;
795 ram_size = gd->bd->bi_dram[i].size;
796 #ifdef CONFIG_RESV_RAM
797 if (gd->arch.resv_ram >= ram_start &&
798 gd->arch.resv_ram < ram_start + ram_size)
799 ram_size = gd->arch.resv_ram - ram_start;
801 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
802 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
804 efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY,
811 * Before DDR size is known, early MMU table have DDR mapped as device memory
812 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
813 * needs to be set for these mappings.
814 * If a special case configures DDR with holes in the mapping, the holes need
815 * to be marked as invalid. This is not implemented in this function.
817 void update_early_mmu_table(void)
819 if (!gd->arch.tlb_addr)
822 if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
823 mmu_change_region_attr(
824 CONFIG_SYS_SDRAM_BASE,
826 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
827 PTE_BLOCK_OUTER_SHARE |
831 mmu_change_region_attr(
832 CONFIG_SYS_SDRAM_BASE,
833 CONFIG_SYS_DDR_BLOCK1_SIZE,
834 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
835 PTE_BLOCK_OUTER_SHARE |
838 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
839 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
840 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
842 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
843 CONFIG_SYS_DDR_BLOCK2_SIZE) {
844 mmu_change_region_attr(
845 CONFIG_SYS_DDR_BLOCK2_BASE,
846 CONFIG_SYS_DDR_BLOCK2_SIZE,
847 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
848 PTE_BLOCK_OUTER_SHARE |
851 mmu_change_region_attr(
852 CONFIG_SYS_DDR_BLOCK3_BASE,
854 CONFIG_SYS_DDR_BLOCK1_SIZE -
855 CONFIG_SYS_DDR_BLOCK2_SIZE,
856 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
857 PTE_BLOCK_OUTER_SHARE |
863 mmu_change_region_attr(
864 CONFIG_SYS_DDR_BLOCK2_BASE,
866 CONFIG_SYS_DDR_BLOCK1_SIZE,
867 PTE_BLOCK_MEMTYPE(MT_NORMAL) |
868 PTE_BLOCK_OUTER_SHARE |
875 __weak int dram_init(void)
878 #if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD)
879 /* This will break-before-make MMU for DDR */
880 update_early_mmu_table();