Using E6500 L1 cache as initram requires L2 cache enabled.
Add l2-cache cluster enabling.
Setup stash id for L1 cache as (coreID) * 2 + 32 + 0
Setup stash id for L2 cache as (cluster) * 2 + 32 + 1
Stash id for L2 is only set for Chassis 2.
Signed-off-by: York Sun <yorksun@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: Andy Fleming <afleming@freescale.com>
}
__attribute__((weak, alias("__fsl_serdes__init"))) void fsl_serdes_init(void);
+#ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2
+int enable_cluster_l2(void)
+{
+ int i = 0;
+ u32 cluster;
+ ccsr_gur_t *gur = (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
+ struct ccsr_cluster_l2 __iomem *l2cache;
+
+ cluster = in_be32(&gur->tp_cluster[i].lower);
+ if (cluster & TP_CLUSTER_EOC)
+ return 0;
+
+ /* The first cache has already been set up, so skip it */
+ i++;
+
+ /* Look through the remaining clusters, and set up their caches */
+ do {
+ l2cache = (void __iomem *)(CONFIG_SYS_FSL_CLUSTER_1_L2 + i * 0x40000);
+ cluster = in_be32(&gur->tp_cluster[i].lower);
+
+ /* set stash ID to (cluster) * 2 + 32 + 1 */
+ clrsetbits_be32(&l2cache->l2csr1, 0xff, 32 + i * 2 + 1);
+
+ printf("enable l2 for cluster %d %p\n", i, l2cache);
+
+ out_be32(&l2cache->l2csr0, L2CSR0_L2FI|L2CSR0_L2LFC);
+ while ((in_be32(&l2cache->l2csr0) &
+ (L2CSR0_L2FI|L2CSR0_L2LFC)) != 0)
+ ;
+ out_be32(&l2cache->l2csr0, L2CSR0_L2E);
+ i++;
+ } while (!(cluster & TP_CLUSTER_EOC));
+
+ return 0;
+}
+#endif
+
/*
* Initialize L2 as cache.
*
{
__maybe_unused u32 svr = get_svr();
#ifdef CONFIG_SYS_LBC_LCRR
- volatile fsl_lbc_t *lbc = LBC_BASE_ADDR;
+ fsl_lbc_t *lbc = (void __iomem *)LBC_BASE_ADDR;
+#endif
+#ifdef CONFIG_L2_CACHE
+ ccsr_l2cache_t *l2cache = (void __iomem *)CONFIG_SYS_MPC85xx_L2_ADDR;
+#elif defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2)
+ struct ccsr_cluster_l2 * l2cache = (void __iomem *)CONFIG_SYS_FSL_CLUSTER_1_L2;
#endif
#if defined(CONFIG_SYS_P4080_ERRATUM_CPU22) || \
puts ("L2: ");
#if defined(CONFIG_L2_CACHE)
- volatile ccsr_l2cache_t *l2cache = (void *)CONFIG_SYS_MPC85xx_L2_ADDR;
volatile uint cache_ctl;
uint ver;
u32 l2siz_field;
}
skip_l2:
+#elif defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2)
+ if (l2cache->l2csr0 & L2CSR0_L2E)
+ printf("%d KB enabled\n", (l2cache->l2cfg0 & 0x3fff) * 64);
+
+ enable_cluster_l2();
#else
puts("disabled\n");
#endif
/* we dont bother w/L3 since no platform of this type has one */
}
-#elif defined(CONFIG_BACKSIDE_L2_CACHE)
+#elif defined(CONFIG_BACKSIDE_L2_CACHE) || \
+ defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2)
static inline void ft_fixup_l2cache(void *blob)
{
int off, l2_off, l3_off = -1;
u32 *ph;
+#ifdef CONFIG_BACKSIDE_L2_CACHE
u32 l2cfg0 = mfspr(SPRN_L2CFG0);
+#else
+ struct ccsr_cluster_l2 *l2cache =
+ (struct ccsr_cluster_l2 __iomem *)(CONFIG_SYS_FSL_CLUSTER_1_L2);
+ u32 l2cfg0 = in_be32(&l2cache->l2cfg0);
+#endif
u32 size, line_size, num_ways, num_sets;
int has_l2 = 1;
if (has_l2) {
#ifdef CONFIG_SYS_CACHE_STASHING
u32 *reg = (u32 *)fdt_getprop(blob, off, "reg", 0);
+#ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2
+ /* Only initialize every eighth thread */
+ if (reg && !((*reg) % 8))
+#else
if (reg)
+#endif
fdt_setprop_cell(blob, l2_off, "cache-stash-id",
(*reg * 2) + 32 + 1);
#endif
slwi r8,r4,5
add r10,r3,r8
-#if defined(CONFIG_E500MC) && defined(CONFIG_SYS_CACHE_STASHING)
- /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
- slwi r8,r4,1
- addi r8,r8,32
- mtspr L1CSR2,r8
-#endif
-
#ifdef CONFIG_E6500
mfspr r0,SPRN_PIR
/*
mtspr SPRN_PIR,r4 /* write to PIR register */
+#ifdef CONFIG_SYS_CACHE_STASHING
+ /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
+ slwi r8,r4,1
+ addi r8,r8,32
+ mtspr L1CSR2,r8
+#endif
+
#if defined(CONFIG_SYS_P4080_ERRATUM_CPU22) || \
defined(CONFIG_SYS_FSL_ERRATUM_NMG_CPU_A011)
/*
*
*/
-#if defined(CONFIG_E500MC) && defined(CONFIG_SYS_CACHE_STASHING)
+#ifdef CONFIG_SYS_CACHE_STASHING
/* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
li r2,(32 + 0)
mtspr L1CSR2,r2
#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */
+#ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2
+create_ccsr_l2_tlb:
+ /*
+ * Create a TLB for the MMR location of CCSR
+ * to access L2CSR0 register
+ */
+ create_tlb0_entry 0, \
+ 0, BOOKE_PAGESZ_4K, \
+ CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, \
+ CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, MAS3_SW|MAS3_SR, \
+ CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
+
+enable_l2_cluster_l2:
+ /* enable L2 cache */
+ lis r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h
+ ori r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l
+ li r4, 33 /* stash id */
+ stw r4, 4(r3)
+ lis r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h
+ ori r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l
+ sync
+ stw r4, 0(r3) /* invalidate L2 */
+1: sync
+ lwz r0, 0(r3)
+ twi 0, r0, 0
+ isync
+ and. r1, r0, r4
+ bne 1b
+ lis r4, L2CSR0_L2E@h
+ sync
+ stw r4, 0(r3) /* eanble L2 */
+delete_ccsr_l2_tlb:
+ delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3
+#endif
+
#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
#define DCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000)
#define LAW_SIZE_1M 0x13
#define CONFIG_SYS_FSL_FM2_RX3_1G_OFFSET 0x58b000
#define CONFIG_SYS_FSL_FM2_RX4_1G_OFFSET 0x58c000
#define CONFIG_SYS_FSL_FM2_RX0_10G_OFFSET 0x590000
+#define CONFIG_SYS_FSL_CLUSTER_1_L2_OFFSET 0xC20000
#else
#define CONFIG_SYS_MPC85xx_ECM_OFFSET 0x0000
#define CONFIG_SYS_MPC85xx_DDR_OFFSET 0x2000
#define TSEC_BASE_ADDR (CONFIG_SYS_IMMR + CONFIG_SYS_TSEC1_OFFSET)
#define MDIO_BASE_ADDR (CONFIG_SYS_IMMR + CONFIG_SYS_MDIO1_OFFSET)
+#ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2
+struct ccsr_cluster_l2 {
+ u32 l2csr0; /* 0x000 L2 cache control and status register 0 */
+ u32 l2csr1; /* 0x004 L2 cache control and status register 1 */
+ u32 l2cfg0; /* 0x008 L2 cache configuration register 0 */
+ u8 res_0c[500];/* 0x00c - 0x1ff */
+ u32 l2pir0; /* 0x200 L2 cache partitioning ID register 0 */
+ u8 res_204[4];
+ u32 l2par0; /* 0x208 L2 cache partitioning allocation register 0 */
+ u32 l2pwr0; /* 0x20c L2 cache partitioning way register 0 */
+ u32 l2pir1; /* 0x210 L2 cache partitioning ID register 1 */
+ u8 res_214[4];
+ u32 l2par1; /* 0x218 L2 cache partitioning allocation register 1 */
+ u32 l2pwr1; /* 0x21c L2 cache partitioning way register 1 */
+ u32 u2pir2; /* 0x220 L2 cache partitioning ID register 2 */
+ u8 res_224[4];
+ u32 l2par2; /* 0x228 L2 cache partitioning allocation register 2 */
+ u32 l2pwr2; /* 0x22c L2 cache partitioning way register 2 */
+ u32 l2pir3; /* 0x230 L2 cache partitioning ID register 3 */
+ u8 res_234[4];
+ u32 l2par3; /* 0x238 L2 cache partitining allocation register 3 */
+ u32 l2pwr3; /* 0x23c L2 cache partitining way register 3 */
+ u32 l2pir4; /* 0x240 L2 cache partitioning ID register 3 */
+ u8 res244[4];
+ u32 l2par4; /* 0x248 L2 cache partitioning allocation register 3 */
+ u32 l2pwr4; /* 0x24c L2 cache partitioning way register 3 */
+ u32 l2pir5; /* 0x250 L2 cache partitioning ID register 3 */
+ u8 res_254[4];
+ u32 l2par5; /* 0x258 L2 cache partitioning allocation register 3 */
+ u32 l2pwr5; /* 0x25c L2 cache partitioning way register 3 */
+ u32 l2pir6; /* 0x260 L2 cache partitioning ID register 3 */
+ u8 res_264[4];
+ u32 l2par6; /* 0x268 L2 cache partitioning allocation register 3 */
+ u32 l2pwr6; /* 0x26c L2 cache partitioning way register 3 */
+ u32 l2pir7; /* 0x270 L2 cache partitioning ID register 3 */
+ u8 res274[4];
+ u32 l2par7; /* 0x278 L2 cache partitioning allocation register 3 */
+ u32 l2pwr7; /* 0x27c L2 cache partitioning way register 3 */
+ u8 res_280[0xb80]; /* 0x280 - 0xdff */
+ u32 l2errinjhi; /* 0xe00 L2 cache error injection mask high */
+ u32 l2errinjlo; /* 0xe04 L2 cache error injection mask low */
+ u32 l2errinjctl;/* 0xe08 L2 cache error injection control */
+ u8 res_e0c[20]; /* 0xe0c - 0x01f */
+ u32 l2captdatahi; /* 0xe20 L2 cache error capture data high */
+ u32 l2captdatalo; /* 0xe24 L2 cache error capture data low */
+ u32 l2captecc; /* 0xe28 L2 cache error capture ECC syndrome */
+ u8 res_e2c[20]; /* 0xe2c - 0xe3f */
+ u32 l2errdet; /* 0xe40 L2 cache error detect */
+ u32 l2errdis; /* 0xe44 L2 cache error disable */
+ u32 l2errinten; /* 0xe48 L2 cache error interrupt enable */
+ u32 l2errattr; /* 0xe4c L2 cache error attribute */
+ u32 l2erreaddr; /* 0xe50 L2 cache error extended address */
+ u32 l2erraddr; /* 0xe54 L2 cache error address */
+ u32 l2errctl; /* 0xe58 L2 cache error control */
+};
+#define CONFIG_SYS_FSL_CLUSTER_1_L2 \
+ (CONFIG_SYS_IMMR + CONFIG_SYS_FSL_CLUSTER_1_L2_OFFSET)
+#endif /* CONFIG_SYS_FSL_QORIQ_CHASSIS2 */
#endif /*__IMMAP_85xx__*/