#include <asm/system.h>
#include <asm/arch/mp.h>
#include <asm/arch/soc.h>
+#include "cpu.h"
+#include <asm/arch-fsl-layerscape/soc.h>
DECLARE_GLOBAL_DATA_PTR;
return (phys_addr_t)&secondary_boot_code;
}
+void update_os_arch_secondary_cores(uint8_t os_arch)
+{
+ u64 *table = get_spin_tbl_addr();
+ int i;
+
+ for (i = 1; i < CONFIG_MAX_CPUS; i++)
+ table[i * WORDS_PER_SPIN_TABLE_ENTRY +
+ SPIN_TABLE_ELEM_OS_ARCH_IDX] = os_arch;
+}
+
+#ifdef CONFIG_FSL_LSCH3
+void wake_secondary_core_n(int cluster, int core, int cluster_cores)
+{
+ struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
+ struct ccsr_reset __iomem *rst = (void *)(CONFIG_SYS_FSL_RST_ADDR);
+ u32 mpidr = 0;
+
+ mpidr = ((cluster << 8) | core);
+ /*
+ * mpidr_el1 register value of core which needs to be released
+ * is written to scratchrw[6] register
+ */
+ gur_out32(&gur->scratchrw[6], mpidr);
+ asm volatile("dsb st" : : : "memory");
+ rst->brrl |= 1 << ((cluster * cluster_cores) + core);
+ asm volatile("dsb st" : : : "memory");
+ /*
+ * scratchrw[6] register value is polled
+ * when the value becomes zero, this means that this core is up
+ * and running, next core can be released now
+ */
+ while (gur_in32(&gur->scratchrw[6]) != 0)
+ ;
+}
+#endif
+
int fsl_layerscape_wake_seconday_cores(void)
{
struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
+#ifdef CONFIG_FSL_LSCH3
struct ccsr_reset __iomem *rst = (void *)(CONFIG_SYS_FSL_RST_ADDR);
+ u32 svr, ver, cluster, type;
+ int j = 0, cluster_cores = 0;
+#elif defined(CONFIG_FSL_LSCH2)
+ struct ccsr_scfg __iomem *scfg = (void *)(CONFIG_SYS_FSL_SCFG_ADDR);
+#endif
u32 cores, cpu_up_mask = 1;
int i, timeout = 10;
u64 *table = get_spin_tbl_addr();
printf("Waking secondary cores to start from %lx\n", gd->relocaddr);
+#ifdef CONFIG_FSL_LSCH3
gur_out32(&gur->bootlocptrh, (u32)(gd->relocaddr >> 32));
gur_out32(&gur->bootlocptrl, (u32)gd->relocaddr);
- gur_out32(&gur->scratchrw[6], 1);
+
+ svr = gur_in32(&gur->svr);
+ ver = SVR_SOC_VER(svr);
+ if (ver == SVR_LS2080A || ver == SVR_LS2085A) {
+ gur_out32(&gur->scratchrw[6], 1);
+ asm volatile("dsb st" : : : "memory");
+ rst->brrl = cores;
+ asm volatile("dsb st" : : : "memory");
+ } else {
+ /*
+ * Release the cores out of reset one-at-a-time to avoid
+ * power spikes
+ */
+ i = 0;
+ cluster = in_le32(&gur->tp_cluster[i].lower);
+ for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
+ type = initiator_type(cluster, j);
+ if (type &&
+ TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
+ cluster_cores++;
+ }
+
+ do {
+ cluster = in_le32(&gur->tp_cluster[i].lower);
+ for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
+ type = initiator_type(cluster, j);
+ if (type &&
+ TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
+ wake_secondary_core_n(i, j,
+ cluster_cores);
+ }
+ i++;
+ } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
+ }
+#elif defined(CONFIG_FSL_LSCH2)
+ scfg_out32(&scfg->scratchrw[0], (u32)(gd->relocaddr >> 32));
+ scfg_out32(&scfg->scratchrw[1], (u32)gd->relocaddr);
asm volatile("dsb st" : : : "memory");
- rst->brrl = cores;
+ gur_out32(&gur->brrl, cores);
asm volatile("dsb st" : : : "memory");
+ /* Bootup online cores */
+ scfg_out32(&scfg->corebcr, cores);
+#endif
/* This is needed as a precautionary measure.
* If some code before this has accidentally released the secondary
* cores then the pre-bootloader code will trap them in a "wfe" unless
return !!((1 << core) & cpu_mask());
}
+static int is_pos_valid(unsigned int pos)
+{
+ return !!((1 << pos) & cpu_pos_mask());
+}
+
int is_core_online(u64 cpu_id)
{
u64 *table;
return 0;
}
-int core_to_pos(int nr)
+static int core_to_pos(int nr)
{
- u32 cores = cpu_mask();
+ u32 cores = cpu_pos_mask();
int i, count = 0;
if (nr == 0) {
}
for (i = 1; i < 32; i++) {
- if (is_core_valid(i)) {
+ if (is_pos_valid(i)) {
count++;
if (count == nr)
break;
}
}
- return count;
+ if (count != nr)
+ return -1;
+
+ return i;
}
int cpu_status(int nr)
(unsigned long)table + SPIN_TABLE_ELEM_SIZE);
asm volatile("dsb st");
smp_kick_all_cpus(); /* only those with entry addr set will run */
+ /*
+ * When the first release command runs, all cores are set to go. Those
+ * without a valid entry address will be trapped by "wfe". "sev" kicks
+ * them off to check the address again. When set, they continue to run.
+ */
+ asm volatile("sev");
return 0;
}