2 * Copyright 2014-2015 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
9 #include <asm/system.h>
10 #include <asm/arch/mp.h>
11 #include <asm/arch/soc.h>
13 #include <asm/arch-fsl-layerscape/soc.h>
15 DECLARE_GLOBAL_DATA_PTR;
17 void *get_spin_tbl_addr(void)
22 phys_addr_t determine_mp_bootpg(void)
24 return (phys_addr_t)&secondary_boot_code;
27 #ifdef CONFIG_FSL_LSCH3
28 void wake_secondary_core_n(int cluster, int core, int cluster_cores)
30 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
31 struct ccsr_reset __iomem *rst = (void *)(CONFIG_SYS_FSL_RST_ADDR);
34 mpidr = ((cluster << 8) | core);
36 * mpidr_el1 register value of core which needs to be released
37 * is written to scratchrw[6] register
39 gur_out32(&gur->scratchrw[6], mpidr);
40 asm volatile("dsb st" : : : "memory");
41 rst->brrl |= 1 << ((cluster * cluster_cores) + core);
42 asm volatile("dsb st" : : : "memory");
44 * scratchrw[6] register value is polled
45 * when the value becomes zero, this means that this core is up
46 * and running, next core can be released now
48 while (gur_in32(&gur->scratchrw[6]) != 0)
53 int fsl_layerscape_wake_seconday_cores(void)
55 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
56 #ifdef CONFIG_FSL_LSCH3
57 struct ccsr_reset __iomem *rst = (void *)(CONFIG_SYS_FSL_RST_ADDR);
58 u32 svr, ver, cluster, type;
59 int j = 0, cluster_cores = 0;
60 #elif defined(CONFIG_FSL_LSCH2)
61 struct ccsr_scfg __iomem *scfg = (void *)(CONFIG_SYS_FSL_SCFG_ADDR);
63 u32 cores, cpu_up_mask = 1;
65 u64 *table = get_spin_tbl_addr();
67 #ifdef COUNTER_FREQUENCY_REAL
68 /* update for secondary cores */
69 __real_cntfrq = COUNTER_FREQUENCY_REAL;
70 flush_dcache_range((unsigned long)&__real_cntfrq,
71 (unsigned long)&__real_cntfrq + 8);
75 /* Clear spin table so that secondary processors
76 * observe the correct value after waking up from wfe.
78 memset(table, 0, CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE);
79 flush_dcache_range((unsigned long)table,
80 (unsigned long)table +
81 (CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE));
83 printf("Waking secondary cores to start from %lx\n", gd->relocaddr);
85 #ifdef CONFIG_FSL_LSCH3
86 gur_out32(&gur->bootlocptrh, (u32)(gd->relocaddr >> 32));
87 gur_out32(&gur->bootlocptrl, (u32)gd->relocaddr);
89 svr = gur_in32(&gur->svr);
90 ver = SVR_SOC_VER(svr);
91 if (ver == SVR_LS2080A || ver == SVR_LS2085A) {
92 gur_out32(&gur->scratchrw[6], 1);
93 asm volatile("dsb st" : : : "memory");
95 asm volatile("dsb st" : : : "memory");
98 * Release the cores out of reset one-at-a-time to avoid
102 cluster = in_le32(&gur->tp_cluster[i].lower);
103 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
104 type = initiator_type(cluster, j);
106 TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
111 cluster = in_le32(&gur->tp_cluster[i].lower);
112 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
113 type = initiator_type(cluster, j);
115 TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
116 wake_secondary_core_n(i, j,
120 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
122 #elif defined(CONFIG_FSL_LSCH2)
123 scfg_out32(&scfg->scratchrw[0], (u32)(gd->relocaddr >> 32));
124 scfg_out32(&scfg->scratchrw[1], (u32)gd->relocaddr);
125 asm volatile("dsb st" : : : "memory");
126 gur_out32(&gur->brrl, cores);
127 asm volatile("dsb st" : : : "memory");
129 /* Bootup online cores */
130 scfg_out32(&scfg->corebcr, cores);
132 /* This is needed as a precautionary measure.
133 * If some code before this has accidentally released the secondary
134 * cores then the pre-bootloader code will trap them in a "wfe" unless
135 * the scratchrw[6] is set. In this case we need a sev here to get these
136 * cores moving again.
141 flush_dcache_range((unsigned long)table, (unsigned long)table +
142 CONFIG_MAX_CPUS * 64);
143 for (i = 1; i < CONFIG_MAX_CPUS; i++) {
144 if (table[i * WORDS_PER_SPIN_TABLE_ENTRY +
145 SPIN_TABLE_ELEM_STATUS_IDX])
146 cpu_up_mask |= 1 << i;
148 if (hweight32(cpu_up_mask) == hweight32(cores))
153 printf("Not all cores (0x%x) are up (0x%x)\n",
157 printf("All (%d) cores are up.\n", hweight32(cores));
162 int is_core_valid(unsigned int core)
164 return !!((1 << core) & cpu_mask());
167 static int is_pos_valid(unsigned int pos)
169 return !!((1 << pos) & cpu_pos_mask());
172 int is_core_online(u64 cpu_id)
175 int pos = id_to_core(cpu_id);
176 table = (u64 *)get_spin_tbl_addr() + pos * WORDS_PER_SPIN_TABLE_ENTRY;
177 return table[SPIN_TABLE_ELEM_STATUS_IDX] == 1;
180 int cpu_reset(int nr)
182 puts("Feature is not implemented.\n");
187 int cpu_disable(int nr)
189 puts("Feature is not implemented.\n");
194 static int core_to_pos(int nr)
196 u32 cores = cpu_pos_mask();
201 } else if (nr >= hweight32(cores)) {
202 puts("Not a valid core number.\n");
206 for (i = 1; i < 32; i++) {
207 if (is_pos_valid(i)) {
220 int cpu_status(int nr)
226 table = (u64 *)get_spin_tbl_addr();
227 printf("table base @ 0x%p\n", table);
229 pos = core_to_pos(nr);
232 table = (u64 *)get_spin_tbl_addr() + pos *
233 WORDS_PER_SPIN_TABLE_ENTRY;
234 printf("table @ 0x%p\n", table);
235 printf(" addr - 0x%016llx\n",
236 table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX]);
237 printf(" status - 0x%016llx\n",
238 table[SPIN_TABLE_ELEM_STATUS_IDX]);
239 printf(" lpid - 0x%016llx\n",
240 table[SPIN_TABLE_ELEM_LPID_IDX]);
246 int cpu_release(int nr, int argc, char * const argv[])
249 u64 *table = (u64 *)get_spin_tbl_addr();
252 pos = core_to_pos(nr);
256 table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
257 boot_addr = simple_strtoull(argv[0], NULL, 16);
258 table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX] = boot_addr;
259 flush_dcache_range((unsigned long)table,
260 (unsigned long)table + SPIN_TABLE_ELEM_SIZE);
261 asm volatile("dsb st");
262 smp_kick_all_cpus(); /* only those with entry addr set will run */
264 * When the first release command runs, all cores are set to go. Those
265 * without a valid entry address will be trapped by "wfe". "sev" kicks
266 * them off to check the address again. When set, they continue to run.