5 * Texas Instruments, <www.ti.com>
7 * Aneesh V <aneesh@ti.com>
9 * SPDX-License-Identifier: GPL-2.0+
14 #include <asm/arch/clock.h>
15 #include <asm/arch/sys_proto.h>
16 #include <asm/omap_common.h>
17 #include <asm/utils.h>
18 #include <linux/compiler.h>
20 static int emif1_enabled = -1, emif2_enabled = -1;
22 void set_lpmode_selfrefresh(u32 base)
24 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
27 reg = readl(&emif->emif_pwr_mgmt_ctrl);
28 reg &= ~EMIF_REG_LP_MODE_MASK;
29 reg |= LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT;
30 reg &= ~EMIF_REG_SR_TIM_MASK;
31 writel(reg, &emif->emif_pwr_mgmt_ctrl);
33 /* dummy read for the new SR_TIM to be loaded */
34 readl(&emif->emif_pwr_mgmt_ctrl);
37 void force_emif_self_refresh()
39 set_lpmode_selfrefresh(EMIF1_BASE);
40 set_lpmode_selfrefresh(EMIF2_BASE);
43 inline u32 emif_num(u32 base)
45 if (base == EMIF1_BASE)
47 else if (base == EMIF2_BASE)
53 static inline u32 get_mr(u32 base, u32 cs, u32 mr_addr)
56 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
58 mr_addr |= cs << EMIF_REG_CS_SHIFT;
59 writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
60 if (omap_revision() == OMAP4430_ES2_0)
61 mr = readl(&emif->emif_lpddr2_mode_reg_data_es2);
63 mr = readl(&emif->emif_lpddr2_mode_reg_data);
64 debug("get_mr: EMIF%d cs %d mr %08x val 0x%x\n", emif_num(base),
66 if (((mr & 0x0000ff00) >> 8) == (mr & 0xff) &&
67 ((mr & 0x00ff0000) >> 16) == (mr & 0xff) &&
68 ((mr & 0xff000000) >> 24) == (mr & 0xff))
74 static inline void set_mr(u32 base, u32 cs, u32 mr_addr, u32 mr_val)
76 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
78 mr_addr |= cs << EMIF_REG_CS_SHIFT;
79 writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
80 writel(mr_val, &emif->emif_lpddr2_mode_reg_data);
83 void emif_reset_phy(u32 base)
85 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
88 iodft = readl(&emif->emif_iodft_tlgc);
89 iodft |= EMIF_REG_RESET_PHY_MASK;
90 writel(iodft, &emif->emif_iodft_tlgc);
93 static void do_lpddr2_init(u32 base, u32 cs)
96 const struct lpddr2_mr_regs *mr_regs;
98 get_lpddr2_mr_regs(&mr_regs);
99 /* Wait till device auto initialization is complete */
100 while (get_mr(base, cs, LPDDR2_MR0) & LPDDR2_MR0_DAI_MASK)
102 set_mr(base, cs, LPDDR2_MR10, mr_regs->mr10);
105 * Enough loops assuming a maximum of 2GHz
110 set_mr(base, cs, LPDDR2_MR1, mr_regs->mr1);
111 set_mr(base, cs, LPDDR2_MR16, mr_regs->mr16);
114 * Enable refresh along with writing MR2
115 * Encoding of RL in MR2 is (RL - 2)
117 mr_addr = LPDDR2_MR2 | EMIF_REG_REFRESH_EN_MASK;
118 set_mr(base, cs, mr_addr, mr_regs->mr2);
120 if (mr_regs->mr3 > 0)
121 set_mr(base, cs, LPDDR2_MR3, mr_regs->mr3);
124 static void lpddr2_init(u32 base, const struct emif_regs *regs)
126 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
129 clrbits_le32(&emif->emif_lpddr2_nvm_config, EMIF_REG_CS1NVMEN_MASK);
132 * Keep REG_INITREF_DIS = 1 to prevent re-initialization of SDRAM
133 * when EMIF_SDRAM_CONFIG register is written
135 setbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
138 * Set the SDRAM_CONFIG and PHY_CTRL for the
139 * un-locked frequency & default RL
141 writel(regs->sdram_config_init, &emif->emif_sdram_config);
142 writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
144 do_ext_phy_settings(base, regs);
146 do_lpddr2_init(base, CS0);
147 if (regs->sdram_config & EMIF_REG_EBANK_MASK)
148 do_lpddr2_init(base, CS1);
150 writel(regs->sdram_config, &emif->emif_sdram_config);
151 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
153 /* Enable refresh now */
154 clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
158 __weak void do_ext_phy_settings(u32 base, const struct emif_regs *regs)
162 void emif_update_timings(u32 base, const struct emif_regs *regs)
164 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
167 writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl_shdw);
169 writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl_shdw);
171 writel(regs->sdram_tim1, &emif->emif_sdram_tim_1_shdw);
172 writel(regs->sdram_tim2, &emif->emif_sdram_tim_2_shdw);
173 writel(regs->sdram_tim3, &emif->emif_sdram_tim_3_shdw);
174 if (omap_revision() == OMAP4430_ES1_0) {
175 /* ES1 bug EMIF should be in force idle during freq_update */
176 writel(0, &emif->emif_pwr_mgmt_ctrl);
178 writel(EMIF_PWR_MGMT_CTRL, &emif->emif_pwr_mgmt_ctrl);
179 writel(EMIF_PWR_MGMT_CTRL_SHDW, &emif->emif_pwr_mgmt_ctrl_shdw);
181 writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl_shdw);
182 writel(regs->zq_config, &emif->emif_zq_config);
183 writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
184 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
186 if ((omap_revision() >= OMAP5430_ES1_0) || is_dra7xx()) {
187 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0,
188 &emif->emif_l3_config);
189 } else if (omap_revision() >= OMAP4460_ES1_0) {
190 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_3_LL_0,
191 &emif->emif_l3_config);
193 writel(EMIF_L3_CONFIG_VAL_SYS_10_LL_0,
194 &emif->emif_l3_config);
198 static void omap5_ddr3_leveling(u32 base, const struct emif_regs *regs)
200 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
202 /* keep sdram in self-refresh */
203 writel(((LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT)
204 & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
208 * Set invert_clkout (if activated)--DDR_PHYCTRL_1
209 * Invert clock adds an additional half cycle delay on the
210 * command interface. The additional half cycle, is usually
211 * meant to enable leveling in the situation that DQS is later
212 * than CK on the board.It also helps provide some additional
213 * margin for leveling.
215 writel(regs->emif_ddr_phy_ctlr_1,
216 &emif->emif_ddr_phy_ctrl_1);
218 writel(regs->emif_ddr_phy_ctlr_1,
219 &emif->emif_ddr_phy_ctrl_1_shdw);
222 writel(((LP_MODE_DISABLE << EMIF_REG_LP_MODE_SHIFT)
223 & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
225 /* Launch Full leveling */
226 writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
228 /* Wait till full leveling is complete */
229 readl(&emif->emif_rd_wr_lvl_ctl);
232 /* Read data eye leveling no of samples */
233 config_data_eye_leveling_samples(base);
236 * Launch 8 incremental WR_LVL- to compensate for
239 writel(0x2 << EMIF_REG_WRLVLINC_INT_SHIFT,
240 &emif->emif_rd_wr_lvl_ctl);
244 /* Launch Incremental leveling */
245 writel(DDR3_INC_LVL, &emif->emif_rd_wr_lvl_ctl);
249 static void update_hwleveling_output(u32 base, const struct emif_regs *regs)
251 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
252 u32 *emif_ext_phy_ctrl_reg, *emif_phy_status;
255 emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[7];
256 phy = readl(&emif->emif_ddr_phy_ctrl_1);
258 /* Update PHY_REG_RDDQS_RATIO */
259 emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_7;
260 if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVL_MASK_MASK))
261 for (i = 0; i < PHY_RDDQS_RATIO_REGS; i++) {
262 reg = readl(emif_phy_status++);
263 writel(reg, emif_ext_phy_ctrl_reg++);
264 writel(reg, emif_ext_phy_ctrl_reg++);
267 /* Update PHY_REG_FIFO_WE_SLAVE_RATIO */
268 emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_2;
269 emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[12];
270 if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVLGATE_MASK_MASK))
271 for (i = 0; i < PHY_FIFO_WE_SLAVE_RATIO_REGS; i++) {
272 reg = readl(emif_phy_status++);
273 writel(reg, emif_ext_phy_ctrl_reg++);
274 writel(reg, emif_ext_phy_ctrl_reg++);
277 /* Update PHY_REG_WR_DQ/DQS_SLAVE_RATIO */
278 emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_12;
279 emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[17];
280 if (!(phy & EMIF_DDR_PHY_CTRL_1_WRLVL_MASK_MASK))
281 for (i = 0; i < PHY_REG_WR_DQ_SLAVE_RATIO_REGS; i++) {
282 reg = readl(emif_phy_status++);
283 writel(reg, emif_ext_phy_ctrl_reg++);
284 writel(reg, emif_ext_phy_ctrl_reg++);
287 /* Disable Leveling */
288 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
289 writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
290 writel(0x0, &emif->emif_rd_wr_lvl_rmp_ctl);
293 static void dra7_ddr3_leveling(u32 base, const struct emif_regs *regs)
295 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
297 /* Clear Error Status */
298 clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36,
299 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
300 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
302 clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36_shdw,
303 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
304 EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
306 /* Disable refreshed before leveling */
307 clrsetbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK,
308 EMIF_REG_INITREF_DIS_MASK);
310 /* Start Full leveling */
311 writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
315 /* Check for leveling timeout */
316 if (readl(&emif->emif_status) & EMIF_REG_LEVELING_TO_MASK) {
317 printf("Leveling timeout on EMIF%d\n", emif_num(base));
321 /* Enable refreshes after leveling */
322 clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
324 debug("HW leveling success\n");
326 * Update slave ratios in EXT_PHY_CTRLx registers
327 * as per HW leveling output
329 update_hwleveling_output(base, regs);
332 static void dra7_ddr3_init(u32 base, const struct emif_regs *regs)
334 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
337 emif_reset_phy(base);
338 writel(0x0, &emif->emif_pwr_mgmt_ctrl);
340 do_ext_phy_settings(base, regs);
342 writel(regs->ref_ctrl | EMIF_REG_INITREF_DIS_MASK,
343 &emif->emif_sdram_ref_ctrl);
344 /* Update timing registers */
345 writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
346 writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
347 writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
349 writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0, &emif->emif_l3_config);
350 writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
351 writel(regs->zq_config, &emif->emif_zq_config);
352 writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
353 writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
354 writel(regs->emif_rd_wr_lvl_ctl, &emif->emif_rd_wr_lvl_ctl);
356 writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
357 writel(regs->emif_rd_wr_exec_thresh, &emif->emif_rd_wr_exec_thresh);
359 writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
361 writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
362 writel(regs->sdram_config_init, &emif->emif_sdram_config);
366 writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl);
368 if (regs->emif_rd_wr_lvl_rmp_ctl & EMIF_REG_RDWRLVL_EN_MASK)
369 dra7_ddr3_leveling(base, regs);
372 static void omap5_ddr3_init(u32 base, const struct emif_regs *regs)
374 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
376 writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
377 writel(regs->sdram_config_init, &emif->emif_sdram_config);
379 * Set SDRAM_CONFIG and PHY control registers to locked frequency
380 * and RL =7. As the default values of the Mode Registers are not
381 * defined, contents of mode Registers must be fully initialized.
382 * H/W takes care of this initialization
384 writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
386 /* Update timing registers */
387 writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
388 writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
389 writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
391 writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
393 writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
394 writel(regs->sdram_config_init, &emif->emif_sdram_config);
395 do_ext_phy_settings(base, regs);
397 writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
398 omap5_ddr3_leveling(base, regs);
401 static void ddr3_init(u32 base, const struct emif_regs *regs)
404 omap5_ddr3_init(base, regs);
406 dra7_ddr3_init(base, regs);
409 #ifndef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
410 #define print_timing_reg(reg) debug(#reg" - 0x%08x\n", (reg))
413 * Organization and refresh requirements for LPDDR2 devices of different
414 * types and densities. Derived from JESD209-2 section 2.4
416 const struct lpddr2_addressing addressing_table[] = {
417 /* Banks tREFIx10 rowx32,rowx16 colx32,colx16 density */
418 {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_7, COL_8} },/*64M */
419 {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_8, COL_9} },/*128M */
420 {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_8, COL_9} },/*256M */
421 {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*512M */
422 {BANKS8, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*1GS4 */
423 {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_9, COL_10} },/*2GS4 */
424 {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_10, COL_11} },/*4G */
425 {BANKS8, T_REFI_3_9, {ROW_15, ROW_15}, {COL_10, COL_11} },/*8G */
426 {BANKS4, T_REFI_7_8, {ROW_14, ROW_14}, {COL_9, COL_10} },/*1GS2 */
427 {BANKS4, T_REFI_3_9, {ROW_15, ROW_15}, {COL_9, COL_10} },/*2GS2 */
430 static const u32 lpddr2_density_2_size_in_mbytes[] = {
444 * Calculate the period of DDR clock from frequency value and set the
445 * denominator and numerator in global variables for easy access later
447 static void set_ddr_clk_period(u32 freq)
451 * period_in_ns = 10^9/freq
455 cancel_out(T_num, T_den, 200);
460 * Convert time in nano seconds to number of cycles of DDR clock
462 static inline u32 ns_2_cycles(u32 ns)
464 return ((ns * (*T_den)) + (*T_num) - 1) / (*T_num);
468 * ns_2_cycles with the difference that the time passed is 2 times the actual
469 * value(to avoid fractions). The cycles returned is for the original value of
470 * the timing parameter
472 static inline u32 ns_x2_2_cycles(u32 ns)
474 return ((ns * (*T_den)) + (*T_num) * 2 - 1) / ((*T_num) * 2);
478 * Find addressing table index based on the device's type(S2 or S4) and
481 s8 addressing_table_index(u8 type, u8 density, u8 width)
484 if ((density > LPDDR2_DENSITY_8Gb) || (width == LPDDR2_IO_WIDTH_8))
488 * Look at the way ADDR_TABLE_INDEX* values have been defined
489 * in emif.h compared to LPDDR2_DENSITY_* values
490 * The table is layed out in the increasing order of density
491 * (ignoring type). The exceptions 1GS2 and 2GS2 have been placed
494 if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_1Gb))
495 index = ADDR_TABLE_INDEX1GS2;
496 else if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_2Gb))
497 index = ADDR_TABLE_INDEX2GS2;
501 debug("emif: addressing table index %d\n", index);
507 * Find the the right timing table from the array of timing
508 * tables of the device using DDR clock frequency
510 static const struct lpddr2_ac_timings *get_timings_table(const struct
511 lpddr2_ac_timings const *const *device_timings,
514 u32 i, temp, freq_nearest;
515 const struct lpddr2_ac_timings *timings = 0;
517 emif_assert(freq <= MAX_LPDDR2_FREQ);
518 emif_assert(device_timings);
521 * Start with the maximum allowed frequency - that is always safe
523 freq_nearest = MAX_LPDDR2_FREQ;
525 * Find the timings table that has the max frequency value:
526 * i. Above or equal to the DDR frequency - safe
527 * ii. The lowest that satisfies condition (i) - optimal
529 for (i = 0; (i < MAX_NUM_SPEEDBINS) && device_timings[i]; i++) {
530 temp = device_timings[i]->max_freq;
531 if ((temp >= freq) && (temp <= freq_nearest)) {
533 timings = device_timings[i];
536 debug("emif: timings table: %d\n", freq_nearest);
541 * Finds the value of emif_sdram_config_reg
542 * All parameters are programmed based on the device on CS0.
543 * If there is a device on CS1, it will be same as that on CS0 or
544 * it will be NVM. We don't support NVM yet.
545 * If cs1_device pointer is NULL it is assumed that there is no device
548 static u32 get_sdram_config_reg(const struct lpddr2_device_details *cs0_device,
549 const struct lpddr2_device_details *cs1_device,
550 const struct lpddr2_addressing *addressing,
555 config_reg |= (cs0_device->type + 4) << EMIF_REG_SDRAM_TYPE_SHIFT;
556 config_reg |= EMIF_INTERLEAVING_POLICY_MAX_INTERLEAVING <<
557 EMIF_REG_IBANK_POS_SHIFT;
559 config_reg |= cs0_device->io_width << EMIF_REG_NARROW_MODE_SHIFT;
561 config_reg |= RL << EMIF_REG_CL_SHIFT;
563 config_reg |= addressing->row_sz[cs0_device->io_width] <<
564 EMIF_REG_ROWSIZE_SHIFT;
566 config_reg |= addressing->num_banks << EMIF_REG_IBANK_SHIFT;
568 config_reg |= (cs1_device ? EBANK_CS1_EN : EBANK_CS1_DIS) <<
569 EMIF_REG_EBANK_SHIFT;
571 config_reg |= addressing->col_sz[cs0_device->io_width] <<
572 EMIF_REG_PAGESIZE_SHIFT;
577 static u32 get_sdram_ref_ctrl(u32 freq,
578 const struct lpddr2_addressing *addressing)
580 u32 ref_ctrl = 0, val = 0, freq_khz;
581 freq_khz = freq / 1000;
583 * refresh rate to be set is 'tREFI * freq in MHz
584 * division by 10000 to account for khz and x10 in t_REFI_us_x10
586 val = addressing->t_REFI_us_x10 * freq_khz / 10000;
587 ref_ctrl |= val << EMIF_REG_REFRESH_RATE_SHIFT;
592 static u32 get_sdram_tim_1_reg(const struct lpddr2_ac_timings *timings,
593 const struct lpddr2_min_tck *min_tck,
594 const struct lpddr2_addressing *addressing)
596 u32 tim1 = 0, val = 0;
597 val = max(min_tck->tWTR, ns_x2_2_cycles(timings->tWTRx2)) - 1;
598 tim1 |= val << EMIF_REG_T_WTR_SHIFT;
600 if (addressing->num_banks == BANKS8)
601 val = (timings->tFAW * (*T_den) + 4 * (*T_num) - 1) /
604 val = max(min_tck->tRRD, ns_2_cycles(timings->tRRD)) - 1;
606 tim1 |= val << EMIF_REG_T_RRD_SHIFT;
608 val = ns_2_cycles(timings->tRASmin + timings->tRPab) - 1;
609 tim1 |= val << EMIF_REG_T_RC_SHIFT;
611 val = max(min_tck->tRAS_MIN, ns_2_cycles(timings->tRASmin)) - 1;
612 tim1 |= val << EMIF_REG_T_RAS_SHIFT;
614 val = max(min_tck->tWR, ns_2_cycles(timings->tWR)) - 1;
615 tim1 |= val << EMIF_REG_T_WR_SHIFT;
617 val = max(min_tck->tRCD, ns_2_cycles(timings->tRCD)) - 1;
618 tim1 |= val << EMIF_REG_T_RCD_SHIFT;
620 val = max(min_tck->tRP_AB, ns_2_cycles(timings->tRPab)) - 1;
621 tim1 |= val << EMIF_REG_T_RP_SHIFT;
626 static u32 get_sdram_tim_2_reg(const struct lpddr2_ac_timings *timings,
627 const struct lpddr2_min_tck *min_tck)
629 u32 tim2 = 0, val = 0;
630 val = max(min_tck->tCKE, timings->tCKE) - 1;
631 tim2 |= val << EMIF_REG_T_CKE_SHIFT;
633 val = max(min_tck->tRTP, ns_x2_2_cycles(timings->tRTPx2)) - 1;
634 tim2 |= val << EMIF_REG_T_RTP_SHIFT;
637 * tXSRD = tRFCab + 10 ns. XSRD and XSNR should have the
640 val = ns_2_cycles(timings->tXSR) - 1;
641 tim2 |= val << EMIF_REG_T_XSRD_SHIFT;
642 tim2 |= val << EMIF_REG_T_XSNR_SHIFT;
644 val = max(min_tck->tXP, ns_x2_2_cycles(timings->tXPx2)) - 1;
645 tim2 |= val << EMIF_REG_T_XP_SHIFT;
650 static u32 get_sdram_tim_3_reg(const struct lpddr2_ac_timings *timings,
651 const struct lpddr2_min_tck *min_tck,
652 const struct lpddr2_addressing *addressing)
654 u32 tim3 = 0, val = 0;
655 val = min(timings->tRASmax * 10 / addressing->t_REFI_us_x10 - 1, 0xF);
656 tim3 |= val << EMIF_REG_T_RAS_MAX_SHIFT;
658 val = ns_2_cycles(timings->tRFCab) - 1;
659 tim3 |= val << EMIF_REG_T_RFC_SHIFT;
661 val = ns_x2_2_cycles(timings->tDQSCKMAXx2) - 1;
662 tim3 |= val << EMIF_REG_T_TDQSCKMAX_SHIFT;
664 val = ns_2_cycles(timings->tZQCS) - 1;
665 tim3 |= val << EMIF_REG_ZQ_ZQCS_SHIFT;
667 val = max(min_tck->tCKESR, ns_2_cycles(timings->tCKESR)) - 1;
668 tim3 |= val << EMIF_REG_T_CKESR_SHIFT;
673 static u32 get_zq_config_reg(const struct lpddr2_device_details *cs1_device,
674 const struct lpddr2_addressing *addressing,
680 EMIF_ZQCS_INTERVAL_DVFS_IN_US * 10 /
681 addressing->t_REFI_us_x10;
684 EMIF_ZQCS_INTERVAL_NORMAL_IN_US * 10 /
685 addressing->t_REFI_us_x10;
686 zq |= val << EMIF_REG_ZQ_REFINTERVAL_SHIFT;
688 zq |= (REG_ZQ_ZQCL_MULT - 1) << EMIF_REG_ZQ_ZQCL_MULT_SHIFT;
690 zq |= (REG_ZQ_ZQINIT_MULT - 1) << EMIF_REG_ZQ_ZQINIT_MULT_SHIFT;
692 zq |= REG_ZQ_SFEXITEN_ENABLE << EMIF_REG_ZQ_SFEXITEN_SHIFT;
695 * Assuming that two chipselects have a single calibration resistor
696 * If there are indeed two calibration resistors, then this flag should
697 * be enabled to take advantage of dual calibration feature.
698 * This data should ideally come from board files. But considering
699 * that none of the boards today have calibration resistors per CS,
700 * it would be an unnecessary overhead.
702 zq |= REG_ZQ_DUALCALEN_DISABLE << EMIF_REG_ZQ_DUALCALEN_SHIFT;
704 zq |= REG_ZQ_CS0EN_ENABLE << EMIF_REG_ZQ_CS0EN_SHIFT;
706 zq |= (cs1_device ? 1 : 0) << EMIF_REG_ZQ_CS1EN_SHIFT;
711 static u32 get_temp_alert_config(const struct lpddr2_device_details *cs1_device,
712 const struct lpddr2_addressing *addressing,
715 u32 alert = 0, interval;
717 TEMP_ALERT_POLL_INTERVAL_MS * 10000 / addressing->t_REFI_us_x10;
720 alert |= interval << EMIF_REG_TA_REFINTERVAL_SHIFT;
722 alert |= TEMP_ALERT_CONFIG_DEVCT_1 << EMIF_REG_TA_DEVCNT_SHIFT;
724 alert |= TEMP_ALERT_CONFIG_DEVWDT_32 << EMIF_REG_TA_DEVWDT_SHIFT;
726 alert |= 1 << EMIF_REG_TA_SFEXITEN_SHIFT;
728 alert |= 1 << EMIF_REG_TA_CS0EN_SHIFT;
730 alert |= (cs1_device ? 1 : 0) << EMIF_REG_TA_CS1EN_SHIFT;
735 static u32 get_read_idle_ctrl_reg(u8 volt_ramp)
737 u32 idle = 0, val = 0;
739 val = ns_2_cycles(READ_IDLE_INTERVAL_DVFS) / 64 - 1;
741 /*Maximum value in normal conditions - suggested by hw team */
743 idle |= val << EMIF_REG_READ_IDLE_INTERVAL_SHIFT;
745 idle |= EMIF_REG_READ_IDLE_LEN_VAL << EMIF_REG_READ_IDLE_LEN_SHIFT;
750 static u32 get_ddr_phy_ctrl_1(u32 freq, u8 RL)
752 u32 phy = 0, val = 0;
754 phy |= (RL + 2) << EMIF_REG_READ_LATENCY_SHIFT;
756 if (freq <= 100000000)
757 val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS;
758 else if (freq <= 200000000)
759 val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ;
761 val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ;
762 phy |= val << EMIF_REG_DLL_SLAVE_DLY_CTRL_SHIFT;
764 /* Other fields are constant magic values. Hardcode them together */
765 phy |= EMIF_DDR_PHY_CTRL_1_BASE_VAL <<
766 EMIF_EMIF_DDR_PHY_CTRL_1_BASE_VAL_SHIFT;
771 static u32 get_emif_mem_size(u32 base)
773 u32 size_mbytes = 0, temp;
774 struct emif_device_details dev_details;
775 struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
776 u32 emif_nr = emif_num(base);
778 emif_reset_phy(base);
779 dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
781 dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
783 emif_reset_phy(base);
785 if (dev_details.cs0_device_details) {
786 temp = dev_details.cs0_device_details->density;
787 size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
790 if (dev_details.cs1_device_details) {
791 temp = dev_details.cs1_device_details->density;
792 size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
794 /* convert to bytes */
795 return size_mbytes << 20;
798 /* Gets the encoding corresponding to a given DMM section size */
799 u32 get_dmm_section_size_map(u32 section_size)
802 * Section size mapping:
803 * 0x0: 16-MiB section
804 * 0x1: 32-MiB section
805 * 0x2: 64-MiB section
806 * 0x3: 128-MiB section
807 * 0x4: 256-MiB section
808 * 0x5: 512-MiB section
812 section_size >>= 24; /* divide by 16 MB */
813 return log_2_n_round_down(section_size);
816 static void emif_calculate_regs(
817 const struct emif_device_details *emif_dev_details,
818 u32 freq, struct emif_regs *regs)
821 const struct lpddr2_addressing *addressing;
822 const struct lpddr2_ac_timings *timings;
823 const struct lpddr2_min_tck *min_tck;
824 const struct lpddr2_device_details *cs0_dev_details =
825 emif_dev_details->cs0_device_details;
826 const struct lpddr2_device_details *cs1_dev_details =
827 emif_dev_details->cs1_device_details;
828 const struct lpddr2_device_timings *cs0_dev_timings =
829 emif_dev_details->cs0_device_timings;
831 emif_assert(emif_dev_details);
834 * You can not have a device on CS1 without one on CS0
835 * So configuring EMIF without a device on CS0 doesn't
838 emif_assert(cs0_dev_details);
839 emif_assert(cs0_dev_details->type != LPDDR2_TYPE_NVM);
841 * If there is a device on CS1 it should be same type as CS0
842 * (or NVM. But NVM is not supported in this driver yet)
844 emif_assert((cs1_dev_details == NULL) ||
845 (cs1_dev_details->type == LPDDR2_TYPE_NVM) ||
846 (cs0_dev_details->type == cs1_dev_details->type));
847 emif_assert(freq <= MAX_LPDDR2_FREQ);
849 set_ddr_clk_period(freq);
852 * The device on CS0 is used for all timing calculations
853 * There is only one set of registers for timings per EMIF. So, if the
854 * second CS(CS1) has a device, it should have the same timings as the
857 timings = get_timings_table(cs0_dev_timings->ac_timings, freq);
858 emif_assert(timings);
859 min_tck = cs0_dev_timings->min_tck;
861 temp = addressing_table_index(cs0_dev_details->type,
862 cs0_dev_details->density,
863 cs0_dev_details->io_width);
865 emif_assert((temp >= 0));
866 addressing = &(addressing_table[temp]);
867 emif_assert(addressing);
869 sys_freq = get_sys_clk_freq();
871 regs->sdram_config_init = get_sdram_config_reg(cs0_dev_details,
873 addressing, RL_BOOT);
875 regs->sdram_config = get_sdram_config_reg(cs0_dev_details,
877 addressing, RL_FINAL);
879 regs->ref_ctrl = get_sdram_ref_ctrl(freq, addressing);
881 regs->sdram_tim1 = get_sdram_tim_1_reg(timings, min_tck, addressing);
883 regs->sdram_tim2 = get_sdram_tim_2_reg(timings, min_tck);
885 regs->sdram_tim3 = get_sdram_tim_3_reg(timings, min_tck, addressing);
887 regs->read_idle_ctrl = get_read_idle_ctrl_reg(LPDDR2_VOLTAGE_STABLE);
889 regs->temp_alert_config =
890 get_temp_alert_config(cs1_dev_details, addressing, 0);
892 regs->zq_config = get_zq_config_reg(cs1_dev_details, addressing,
893 LPDDR2_VOLTAGE_STABLE);
895 regs->emif_ddr_phy_ctlr_1_init =
896 get_ddr_phy_ctrl_1(sys_freq / 2, RL_BOOT);
898 regs->emif_ddr_phy_ctlr_1 =
899 get_ddr_phy_ctrl_1(freq, RL_FINAL);
903 print_timing_reg(regs->sdram_config_init);
904 print_timing_reg(regs->sdram_config);
905 print_timing_reg(regs->ref_ctrl);
906 print_timing_reg(regs->sdram_tim1);
907 print_timing_reg(regs->sdram_tim2);
908 print_timing_reg(regs->sdram_tim3);
909 print_timing_reg(regs->read_idle_ctrl);
910 print_timing_reg(regs->temp_alert_config);
911 print_timing_reg(regs->zq_config);
912 print_timing_reg(regs->emif_ddr_phy_ctlr_1);
913 print_timing_reg(regs->emif_ddr_phy_ctlr_1_init);
915 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
917 #ifdef CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION
918 const char *get_lpddr2_type(u8 type_id)
930 const char *get_lpddr2_io_width(u8 width_id)
933 case LPDDR2_IO_WIDTH_8:
935 case LPDDR2_IO_WIDTH_16:
937 case LPDDR2_IO_WIDTH_32:
944 const char *get_lpddr2_manufacturer(u32 manufacturer)
946 switch (manufacturer) {
947 case LPDDR2_MANUFACTURER_SAMSUNG:
949 case LPDDR2_MANUFACTURER_QIMONDA:
951 case LPDDR2_MANUFACTURER_ELPIDA:
953 case LPDDR2_MANUFACTURER_ETRON:
955 case LPDDR2_MANUFACTURER_NANYA:
957 case LPDDR2_MANUFACTURER_HYNIX:
959 case LPDDR2_MANUFACTURER_MOSEL:
961 case LPDDR2_MANUFACTURER_WINBOND:
963 case LPDDR2_MANUFACTURER_ESMT:
965 case LPDDR2_MANUFACTURER_SPANSION:
967 case LPDDR2_MANUFACTURER_SST:
969 case LPDDR2_MANUFACTURER_ZMOS:
971 case LPDDR2_MANUFACTURER_INTEL:
973 case LPDDR2_MANUFACTURER_NUMONYX:
975 case LPDDR2_MANUFACTURER_MICRON:
982 static void display_sdram_details(u32 emif_nr, u32 cs,
983 struct lpddr2_device_details *device)
986 const char *type_str;
987 char density_str[10];
990 debug("EMIF%d CS%d\t", emif_nr, cs);
997 mfg_str = get_lpddr2_manufacturer(device->manufacturer);
998 type_str = get_lpddr2_type(device->type);
1000 density = lpddr2_density_2_size_in_mbytes[device->density];
1001 if ((density / 1024 * 1024) == density) {
1003 sprintf(density_str, "%d GB", density);
1005 sprintf(density_str, "%d MB", density);
1006 if (mfg_str && type_str)
1007 debug("%s\t\t%s\t%s\n", mfg_str, type_str, density_str);
1010 static u8 is_lpddr2_sdram_present(u32 base, u32 cs,
1011 struct lpddr2_device_details *lpddr2_device)
1015 mr = get_mr(base, cs, LPDDR2_MR0);
1017 /* Mode register value bigger than 8 bit */
1021 temp = (mr & LPDDR2_MR0_DI_MASK) >> LPDDR2_MR0_DI_SHIFT;
1026 temp = (mr & LPDDR2_MR0_DNVI_MASK) >> LPDDR2_MR0_DNVI_SHIFT;
1029 /* DNV supported - But DNV is only supported for NVM */
1033 mr = get_mr(base, cs, LPDDR2_MR4);
1035 /* Mode register value bigger than 8 bit */
1039 mr = get_mr(base, cs, LPDDR2_MR5);
1041 /* Mode register value bigger than 8 bit */
1045 if (!get_lpddr2_manufacturer(mr)) {
1046 /* Manufacturer not identified */
1049 lpddr2_device->manufacturer = mr;
1051 mr = get_mr(base, cs, LPDDR2_MR6);
1053 /* Mode register value bigger than 8 bit */
1057 mr = get_mr(base, cs, LPDDR2_MR7);
1059 /* Mode register value bigger than 8 bit */
1063 mr = get_mr(base, cs, LPDDR2_MR8);
1065 /* Mode register value bigger than 8 bit */
1069 temp = (mr & MR8_TYPE_MASK) >> MR8_TYPE_SHIFT;
1070 if (!get_lpddr2_type(temp)) {
1074 lpddr2_device->type = temp;
1076 temp = (mr & MR8_DENSITY_MASK) >> MR8_DENSITY_SHIFT;
1077 if (temp > LPDDR2_DENSITY_32Gb) {
1078 /* Density not supported */
1081 lpddr2_device->density = temp;
1083 temp = (mr & MR8_IO_WIDTH_MASK) >> MR8_IO_WIDTH_SHIFT;
1084 if (!get_lpddr2_io_width(temp)) {
1085 /* IO width unsupported value */
1088 lpddr2_device->io_width = temp;
1091 * If all the above tests pass we should
1092 * have a device on this chip-select
1097 struct lpddr2_device_details *emif_get_device_details(u32 emif_nr, u8 cs,
1098 struct lpddr2_device_details *lpddr2_dev_details)
1101 u32 base = (emif_nr == 1) ? EMIF1_BASE : EMIF2_BASE;
1103 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1105 if (!lpddr2_dev_details)
1108 /* Do the minimum init for mode register accesses */
1109 if (!(running_from_sdram() || warm_reset())) {
1110 phy = get_ddr_phy_ctrl_1(get_sys_clk_freq() / 2, RL_BOOT);
1111 writel(phy, &emif->emif_ddr_phy_ctrl_1);
1114 if (!(is_lpddr2_sdram_present(base, cs, lpddr2_dev_details)))
1117 display_sdram_details(emif_num(base), cs, lpddr2_dev_details);
1119 return lpddr2_dev_details;
1121 #endif /* CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION */
1123 static void do_sdram_init(u32 base)
1125 const struct emif_regs *regs;
1126 u32 in_sdram, emif_nr;
1128 debug(">>do_sdram_init() %x\n", base);
1130 in_sdram = running_from_sdram();
1131 emif_nr = (base == EMIF1_BASE) ? 1 : 2;
1133 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1134 emif_get_reg_dump(emif_nr, ®s);
1136 debug("EMIF: reg dump not provided\n");
1141 * The user has not provided the register values. We need to
1142 * calculate it based on the timings and the DDR frequency
1144 struct emif_device_details dev_details;
1145 struct emif_regs calculated_regs;
1148 * Get device details:
1149 * - Discovered if CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION is set
1150 * - Obtained from user otherwise
1152 struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
1153 emif_reset_phy(base);
1154 dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
1156 dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
1158 emif_reset_phy(base);
1160 /* Return if no devices on this EMIF */
1161 if (!dev_details.cs0_device_details &&
1162 !dev_details.cs1_device_details) {
1167 * Get device timings:
1168 * - Default timings specified by JESD209-2 if
1169 * CONFIG_SYS_DEFAULT_LPDDR2_TIMINGS is set
1170 * - Obtained from user otherwise
1172 emif_get_device_timings(emif_nr, &dev_details.cs0_device_timings,
1173 &dev_details.cs1_device_timings);
1175 /* Calculate the register values */
1176 emif_calculate_regs(&dev_details, omap_ddr_clk(), &calculated_regs);
1177 regs = &calculated_regs;
1178 #endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
1181 * Initializing the LPDDR2 device can not happen from SDRAM.
1182 * Changing the timing registers in EMIF can happen(going from one
1185 if (!in_sdram && (!warm_reset() || is_dra7xx())) {
1186 if (emif_sdram_type(regs->sdram_config) ==
1187 EMIF_SDRAM_TYPE_LPDDR2)
1188 lpddr2_init(base, regs);
1190 ddr3_init(base, regs);
1192 if (warm_reset() && (emif_sdram_type(regs->sdram_config) ==
1193 EMIF_SDRAM_TYPE_DDR3) && !is_dra7xx()) {
1194 set_lpmode_selfrefresh(base);
1195 emif_reset_phy(base);
1196 omap5_ddr3_leveling(base, regs);
1199 /* Write to the shadow registers */
1200 emif_update_timings(base, regs);
1202 debug("<<do_sdram_init() %x\n", base);
1205 void emif_post_init_config(u32 base)
1207 struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1208 u32 omap_rev = omap_revision();
1210 /* reset phy on ES2.0 */
1211 if (omap_rev == OMAP4430_ES2_0)
1212 emif_reset_phy(base);
1214 /* Put EMIF back in smart idle on ES1.0 */
1215 if (omap_rev == OMAP4430_ES1_0)
1216 writel(0x80000000, &emif->emif_pwr_mgmt_ctrl);
1219 void dmm_init(u32 base)
1221 const struct dmm_lisa_map_regs *lisa_map_regs;
1222 u32 i, section, valid;
1224 #ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1225 emif_get_dmm_regs(&lisa_map_regs);
1227 u32 emif1_size, emif2_size, mapped_size, section_map = 0;
1228 u32 section_cnt, sys_addr;
1229 struct dmm_lisa_map_regs lis_map_regs_calculated = {0};
1233 sys_addr = CONFIG_SYS_SDRAM_BASE;
1234 emif1_size = get_emif_mem_size(EMIF1_BASE);
1235 emif2_size = get_emif_mem_size(EMIF2_BASE);
1236 debug("emif1_size 0x%x emif2_size 0x%x\n", emif1_size, emif2_size);
1238 if (!emif1_size && !emif2_size)
1241 /* symmetric interleaved section */
1242 if (emif1_size && emif2_size) {
1243 mapped_size = min(emif1_size, emif2_size);
1244 section_map = DMM_LISA_MAP_INTERLEAVED_BASE_VAL;
1245 section_map |= 0 << EMIF_SDRC_ADDR_SHIFT;
1247 section_map |= (sys_addr >> 24) <<
1248 EMIF_SYS_ADDR_SHIFT;
1249 section_map |= get_dmm_section_size_map(mapped_size * 2)
1250 << EMIF_SYS_SIZE_SHIFT;
1251 lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1252 emif1_size -= mapped_size;
1253 emif2_size -= mapped_size;
1254 sys_addr += (mapped_size * 2);
1259 * Single EMIF section(we can have a maximum of 1 single EMIF
1260 * section- either EMIF1 or EMIF2 or none, but not both)
1263 section_map = DMM_LISA_MAP_EMIF1_ONLY_BASE_VAL;
1264 section_map |= get_dmm_section_size_map(emif1_size)
1265 << EMIF_SYS_SIZE_SHIFT;
1267 section_map |= (mapped_size >> 24) <<
1268 EMIF_SDRC_ADDR_SHIFT;
1270 section_map |= (sys_addr >> 24) << EMIF_SYS_ADDR_SHIFT;
1274 section_map = DMM_LISA_MAP_EMIF2_ONLY_BASE_VAL;
1275 section_map |= get_dmm_section_size_map(emif2_size) <<
1276 EMIF_SYS_SIZE_SHIFT;
1278 section_map |= mapped_size >> 24 << EMIF_SDRC_ADDR_SHIFT;
1280 section_map |= sys_addr >> 24 << EMIF_SYS_ADDR_SHIFT;
1284 if (section_cnt == 2) {
1285 /* Only 1 section - either symmetric or single EMIF */
1286 lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1287 lis_map_regs_calculated.dmm_lisa_map_2 = 0;
1288 lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1290 /* 2 sections - 1 symmetric, 1 single EMIF */
1291 lis_map_regs_calculated.dmm_lisa_map_2 = section_map;
1292 lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1295 /* TRAP for invalid TILER mappings in section 0 */
1296 lis_map_regs_calculated.dmm_lisa_map_0 = DMM_LISA_MAP_0_INVAL_ADDR_TRAP;
1298 if (omap_revision() >= OMAP4460_ES1_0)
1299 lis_map_regs_calculated.is_ma_present = 1;
1301 lisa_map_regs = &lis_map_regs_calculated;
1303 struct dmm_lisa_map_regs *hw_lisa_map_regs =
1304 (struct dmm_lisa_map_regs *)base;
1306 writel(0, &hw_lisa_map_regs->dmm_lisa_map_3);
1307 writel(0, &hw_lisa_map_regs->dmm_lisa_map_2);
1308 writel(0, &hw_lisa_map_regs->dmm_lisa_map_1);
1309 writel(0, &hw_lisa_map_regs->dmm_lisa_map_0);
1311 writel(lisa_map_regs->dmm_lisa_map_3,
1312 &hw_lisa_map_regs->dmm_lisa_map_3);
1313 writel(lisa_map_regs->dmm_lisa_map_2,
1314 &hw_lisa_map_regs->dmm_lisa_map_2);
1315 writel(lisa_map_regs->dmm_lisa_map_1,
1316 &hw_lisa_map_regs->dmm_lisa_map_1);
1317 writel(lisa_map_regs->dmm_lisa_map_0,
1318 &hw_lisa_map_regs->dmm_lisa_map_0);
1320 if (lisa_map_regs->is_ma_present) {
1322 (struct dmm_lisa_map_regs *)MA_BASE;
1324 writel(lisa_map_regs->dmm_lisa_map_3,
1325 &hw_lisa_map_regs->dmm_lisa_map_3);
1326 writel(lisa_map_regs->dmm_lisa_map_2,
1327 &hw_lisa_map_regs->dmm_lisa_map_2);
1328 writel(lisa_map_regs->dmm_lisa_map_1,
1329 &hw_lisa_map_regs->dmm_lisa_map_1);
1330 writel(lisa_map_regs->dmm_lisa_map_0,
1331 &hw_lisa_map_regs->dmm_lisa_map_0);
1333 setbits_le32(MA_PRIORITY, MA_HIMEM_INTERLEAVE_UN_MASK);
1337 * EMIF should be configured only when
1338 * memory is mapped on it. Using emif1_enabled
1339 * and emif2_enabled variables for this.
1343 for (i = 0; i < 4; i++) {
1344 section = __raw_readl(DMM_BASE + i*4);
1345 valid = (section & EMIF_SDRC_MAP_MASK) >>
1346 (EMIF_SDRC_MAP_SHIFT);
1361 static void do_bug0039_workaround(u32 base)
1363 u32 val, i, clkctrl;
1364 struct emif_reg_struct *emif_base = (struct emif_reg_struct *)base;
1365 const struct read_write_regs *bug_00339_regs;
1367 u32 *phy_status_base = &emif_base->emif_ddr_phy_status[0];
1368 u32 *phy_ctrl_base = &emif_base->emif_ddr_ext_phy_ctrl_1;
1373 bug_00339_regs = get_bug_regs(&iterations);
1375 /* Put EMIF in to idle */
1376 clkctrl = __raw_readl((*prcm)->cm_memif_clkstctrl);
1377 __raw_writel(0x0, (*prcm)->cm_memif_clkstctrl);
1379 /* Copy the phy status registers in to phy ctrl shadow registers */
1380 for (i = 0; i < iterations; i++) {
1381 val = __raw_readl(phy_status_base +
1382 bug_00339_regs[i].read_reg - 1);
1384 __raw_writel(val, phy_ctrl_base +
1385 ((bug_00339_regs[i].write_reg - 1) << 1));
1387 __raw_writel(val, phy_ctrl_base +
1388 (bug_00339_regs[i].write_reg << 1) - 1);
1391 /* Disable leveling */
1392 writel(0x0, &emif_base->emif_rd_wr_lvl_rmp_ctl);
1394 __raw_writel(clkctrl, (*prcm)->cm_memif_clkstctrl);
1398 * SDRAM initialization:
1399 * SDRAM initialization has two parts:
1400 * 1. Configuring the SDRAM device
1401 * 2. Update the AC timings related parameters in the EMIF module
1402 * (1) should be done only once and should not be done while we are
1403 * running from SDRAM.
1404 * (2) can and should be done more than once if OPP changes.
1405 * Particularly, this may be needed when we boot without SPL and
1406 * and using Configuration Header(CH). ROM code supports only at 50% OPP
1407 * at boot (low power boot). So u-boot has to switch to OPP100 and update
1408 * the frequency. So,
1409 * Doing (1) and (2) makes sense - first time initialization
1410 * Doing (2) and not (1) makes sense - OPP change (when using CH)
1411 * Doing (1) and not (2) doen't make sense
1412 * See do_sdram_init() for the details
1414 void sdram_init(void)
1416 u32 in_sdram, size_prog, size_detect;
1417 struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
1418 u32 sdram_type = emif_sdram_type(emif->emif_sdram_config);
1420 debug(">>sdram_init()\n");
1422 if (omap_hw_init_context() == OMAP_INIT_CONTEXT_UBOOT_AFTER_SPL)
1425 in_sdram = running_from_sdram();
1426 debug("in_sdram = %d\n", in_sdram);
1429 if ((sdram_type == EMIF_SDRAM_TYPE_LPDDR2) && !warm_reset())
1430 bypass_dpll((*prcm)->cm_clkmode_dpll_core);
1431 else if (sdram_type == EMIF_SDRAM_TYPE_DDR3)
1432 writel(CM_DLL_CTRL_NO_OVERRIDE, (*prcm)->cm_dll_ctrl);
1439 do_sdram_init(EMIF1_BASE);
1442 do_sdram_init(EMIF2_BASE);
1444 if (!(in_sdram || warm_reset())) {
1446 emif_post_init_config(EMIF1_BASE);
1448 emif_post_init_config(EMIF2_BASE);
1451 /* for the shadow registers to take effect */
1452 if (sdram_type == EMIF_SDRAM_TYPE_LPDDR2)
1455 /* Do some testing after the init */
1457 size_prog = omap_sdram_size();
1458 size_prog = log_2_n_round_down(size_prog);
1459 size_prog = (1 << size_prog);
1461 size_detect = get_ram_size((long *)CONFIG_SYS_SDRAM_BASE,
1463 /* Compare with the size programmed */
1464 if (size_detect != size_prog) {
1465 printf("SDRAM: identified size not same as expected"
1466 " size identified: %x expected: %x\n",
1470 debug("get_ram_size() successful");
1473 if (sdram_type == EMIF_SDRAM_TYPE_DDR3 &&
1474 (!in_sdram && !warm_reset()) && (!is_dra7xx())) {
1476 do_bug0039_workaround(EMIF1_BASE);
1478 do_bug0039_workaround(EMIF2_BASE);
1481 debug("<<sdram_init()\n");