2 * Copyright Altera Corporation (C) 2012-2015
4 * SPDX-License-Identifier: BSD-3-Clause
9 #include <asm/arch/sdram.h>
10 #include "sequencer.h"
11 #include "sequencer_auto.h"
12 #include "sequencer_auto_ac_init.h"
13 #include "sequencer_auto_inst_init.h"
14 #include "sequencer_defines.h"
16 static void scc_mgr_load_dqs_for_write_group(uint32_t write_group);
18 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
19 (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
21 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
22 (struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
24 static struct socfpga_sdr_reg_file *sdr_reg_file =
25 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
27 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
28 (struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
30 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
31 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
33 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
34 (struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
36 static struct socfpga_data_mgr *data_mgr =
37 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
39 static struct socfpga_sdr_ctrl *sdr_ctrl =
40 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
45 * In order to reduce ROM size, most of the selectable calibration steps are
46 * decided at compile time based on the user's calibration mode selection,
47 * as captured by the STATIC_CALIB_STEPS selection below.
49 * However, to support simulation-time selection of fast simulation mode, where
50 * we skip everything except the bare minimum, we need a few of the steps to
51 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
52 * check, which is based on the rtl-supplied value, or we dynamically compute
53 * the value to use based on the dynamically-chosen calibration mode
57 #define STATIC_IN_RTL_SIM 0
58 #define STATIC_SKIP_DELAY_LOOPS 0
60 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
61 STATIC_SKIP_DELAY_LOOPS)
63 /* calibration steps requested by the rtl */
64 uint16_t dyn_calib_steps;
67 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
68 * instead of static, we use boolean logic to select between
69 * non-skip and skip values
71 * The mask is set to include all bits when not-skipping, but is
75 uint16_t skip_delay_mask; /* mask off bits when skipping/not-skipping */
77 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
78 ((non_skip_value) & skip_delay_mask)
81 struct param_type *param;
82 uint32_t curr_shadow_reg;
84 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
85 uint32_t write_group, uint32_t use_dm,
86 uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
88 static void set_failing_group_stage(uint32_t group, uint32_t stage,
92 * Only set the global stage if there was not been any other
95 if (gbl->error_stage == CAL_STAGE_NIL) {
96 gbl->error_substage = substage;
97 gbl->error_stage = stage;
98 gbl->error_group = group;
102 static void reg_file_set_group(u16 set_group)
104 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
107 static void reg_file_set_stage(u8 set_stage)
109 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
112 static void reg_file_set_sub_stage(u8 set_sub_stage)
114 set_sub_stage &= 0xff;
115 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
118 static void initialize(void)
120 debug("%s:%d\n", __func__, __LINE__);
121 /* USER calibration has control over path to memory */
123 * In Hard PHY this is a 2-bit control:
127 writel(0x3, &phy_mgr_cfg->mux_sel);
129 /* USER memory clock is not stable we begin initialization */
130 writel(0, &phy_mgr_cfg->reset_mem_stbl);
132 /* USER calibration status all set to zero */
133 writel(0, &phy_mgr_cfg->cal_status);
135 writel(0, &phy_mgr_cfg->cal_debug_info);
137 if ((dyn_calib_steps & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) {
138 param->read_correct_mask_vg = ((uint32_t)1 <<
139 (RW_MGR_MEM_DQ_PER_READ_DQS /
140 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
141 param->write_correct_mask_vg = ((uint32_t)1 <<
142 (RW_MGR_MEM_DQ_PER_READ_DQS /
143 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
144 param->read_correct_mask = ((uint32_t)1 <<
145 RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
146 param->write_correct_mask = ((uint32_t)1 <<
147 RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
148 param->dm_correct_mask = ((uint32_t)1 <<
149 (RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH))
154 static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
156 uint32_t odt_mask_0 = 0;
157 uint32_t odt_mask_1 = 0;
158 uint32_t cs_and_odt_mask;
160 if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) {
161 if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) {
169 } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) {
171 if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
172 /* - Dual-Slot , Single-Rank
173 * (1 chip-select per DIMM)
175 * - RDIMM, 4 total CS (2 CS per DIMM)
177 * Since MEM_NUMBER_OF_RANKS is 2 they are
179 * with 2 CS each (special for RDIMM)
180 * Read: Turn on ODT on the opposite rank
181 * Write: Turn on ODT on all ranks
183 odt_mask_0 = 0x3 & ~(1 << rank);
187 * USER - Single-Slot , Dual-rank DIMMs
188 * (2 chip-selects per DIMM)
189 * USER Read: Turn on ODT off on all ranks
190 * USER Write: Turn on ODT on active rank
193 odt_mask_1 = 0x3 & (1 << rank);
198 * ----------+-----------------------+
201 * Read From +-----------------------+
202 * Rank | 3 | 2 | 1 | 0 |
203 * ----------+-----+-----+-----+-----+
204 * 0 | 0 | 1 | 0 | 0 |
205 * 1 | 1 | 0 | 0 | 0 |
206 * 2 | 0 | 0 | 0 | 1 |
207 * 3 | 0 | 0 | 1 | 0 |
208 * ----------+-----+-----+-----+-----+
211 * ----------+-----------------------+
214 * Write To +-----------------------+
215 * Rank | 3 | 2 | 1 | 0 |
216 * ----------+-----+-----+-----+-----+
217 * 0 | 0 | 1 | 0 | 1 |
218 * 1 | 1 | 0 | 1 | 0 |
219 * 2 | 0 | 1 | 0 | 1 |
220 * 3 | 1 | 0 | 1 | 0 |
221 * ----------+-----+-----+-----+-----+
248 (0xFF & ~(1 << rank)) |
249 ((0xFF & odt_mask_0) << 8) |
250 ((0xFF & odt_mask_1) << 16);
251 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
252 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
256 * scc_mgr_set() - Set SCC Manager register
257 * @off: Base offset in SCC Manager space
258 * @grp: Read/Write group
259 * @val: Value to be set
261 * This function sets the SCC Manager (Scan Chain Control Manager) register.
263 static void scc_mgr_set(u32 off, u32 grp, u32 val)
265 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
269 * scc_mgr_initialize() - Initialize SCC Manager registers
271 * Initialize SCC Manager registers.
273 static void scc_mgr_initialize(void)
276 * Clear register file for HPS. 16 (2^4) is the size of the
277 * full register file in the scc mgr:
278 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
279 * MEM_IF_READ_DQS_WIDTH - 1);
283 for (i = 0; i < 16; i++) {
284 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
285 __func__, __LINE__, i);
286 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i);
290 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase)
292 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
295 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay)
297 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
300 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
302 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
305 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
307 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
310 static void scc_mgr_set_dqs_io_in_delay(uint32_t write_group, uint32_t delay)
312 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
316 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
318 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
321 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
323 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
326 static void scc_mgr_set_dqs_out1_delay(uint32_t write_group,
329 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
333 static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
335 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
336 RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm,
340 /* load up dqs config settings */
341 static void scc_mgr_load_dqs(uint32_t dqs)
343 writel(dqs, &sdr_scc_mgr->dqs_ena);
346 /* load up dqs io config settings */
347 static void scc_mgr_load_dqs_io(void)
349 writel(0, &sdr_scc_mgr->dqs_io_ena);
352 /* load up dq config settings */
353 static void scc_mgr_load_dq(uint32_t dq_in_group)
355 writel(dq_in_group, &sdr_scc_mgr->dq_ena);
358 /* load up dm config settings */
359 static void scc_mgr_load_dm(uint32_t dm)
361 writel(dm, &sdr_scc_mgr->dm_ena);
364 static void scc_mgr_set_dqs_en_phase_all_ranks(uint32_t read_group,
369 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
370 r += NUM_RANKS_PER_SHADOW_REG) {
371 scc_mgr_set_dqs_en_phase(read_group, phase);
374 * USER although the h/w doesn't support different phases per
375 * shadow register, for simplicity our scc manager modeling
376 * keeps different phase settings per shadow reg, and it's
377 * important for us to keep them in sync to match h/w.
378 * for efficiency, the scan chain update should occur only
383 writel(read_group, &sdr_scc_mgr->dqs_ena);
384 writel(0, &sdr_scc_mgr->update);
389 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
394 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
395 r += NUM_RANKS_PER_SHADOW_REG) {
396 scc_mgr_set_dqdqs_output_phase(write_group, phase);
399 * USER although the h/w doesn't support different phases per
400 * shadow register, for simplicity our scc manager modeling
401 * keeps different phase settings per shadow reg, and it's
402 * important for us to keep them in sync to match h/w.
403 * for efficiency, the scan chain update should occur only
408 writel(write_group, &sdr_scc_mgr->dqs_ena);
409 writel(0, &sdr_scc_mgr->update);
414 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
419 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
420 r += NUM_RANKS_PER_SHADOW_REG) {
421 scc_mgr_set_dqs_en_delay(read_group, delay);
424 * In shadow register mode, the T11 settings are stored in
425 * registers in the core, which are updated by the DQS_ENA
426 * signals. Not issuing the SCC_MGR_UPD command allows us to
427 * save lots of rank switching overhead, by calling
428 * select_shadow_regs_for_update with update_scan_chains
432 writel(read_group, &sdr_scc_mgr->dqs_ena);
433 writel(0, &sdr_scc_mgr->update);
436 * In shadow register mode, the T11 settings are stored in
437 * registers in the core, which are updated by the DQS_ENA
438 * signals. Not issuing the SCC_MGR_UPD command allows us to
439 * save lots of rank switching overhead, by calling
440 * select_shadow_regs_for_update with update_scan_chains
443 writel(0, &sdr_scc_mgr->update);
446 static void scc_mgr_set_oct_out1_delay(uint32_t write_group, uint32_t delay)
449 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_OCT_OUT1_DELAY_OFFSET;
452 * Load the setting in the SCC manager
453 * Although OCT affects only write data, the OCT delay is controlled
454 * by the DQS logic block which is instantiated once per read group.
455 * For protocols where a write group consists of multiple read groups,
456 * the setting must be set multiple times.
458 for (read_group = write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH /
459 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
460 read_group < (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH /
461 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; ++read_group)
462 writel(delay, addr + (read_group << 2));
465 static void scc_mgr_set_hhp_extras(void)
468 * Load the fixed setting in the SCC manager
469 * bits: 0:0 = 1'b1 - dqs bypass
470 * bits: 1:1 = 1'b1 - dq bypass
471 * bits: 4:2 = 3'b001 - rfifo_mode
472 * bits: 6:5 = 2'b01 - rfifo clock_select
473 * bits: 7:7 = 1'b0 - separate gating from ungating setting
474 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
476 uint32_t value = (0<<8) | (0<<7) | (1<<5) | (1<<2) | (1<<1) | (1<<0);
477 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_HHP_GLOBALS_OFFSET;
479 writel(value, addr + SCC_MGR_HHP_EXTRAS_OFFSET);
483 * USER Zero all DQS config
484 * TODO: maybe rename to scc_mgr_zero_dqs_config (or something)
486 static void scc_mgr_zero_all(void)
491 * USER Zero all DQS config settings, across all groups and all
494 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
495 NUM_RANKS_PER_SHADOW_REG) {
496 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
498 * The phases actually don't exist on a per-rank basis,
499 * but there's no harm updating them several times, so
500 * let's keep the code simple.
502 scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
503 scc_mgr_set_dqs_en_phase(i, 0);
504 scc_mgr_set_dqs_en_delay(i, 0);
507 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
508 scc_mgr_set_dqdqs_output_phase(i, 0);
509 /* av/cv don't have out2 */
510 scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
514 /* multicast to all DQS group enables */
515 writel(0xff, &sdr_scc_mgr->dqs_ena);
516 writel(0, &sdr_scc_mgr->update);
519 static void scc_set_bypass_mode(uint32_t write_group, uint32_t mode)
521 /* mode = 0 : Do NOT bypass - Half Rate Mode */
522 /* mode = 1 : Bypass - Full Rate Mode */
524 /* only need to set once for all groups, pins, dq, dqs, dm */
525 if (write_group == 0) {
526 debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n", __func__,
528 scc_mgr_set_hhp_extras();
529 debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
532 /* multicast to all DQ enables */
533 writel(0xff, &sdr_scc_mgr->dq_ena);
534 writel(0xff, &sdr_scc_mgr->dm_ena);
536 /* update current DQS IO enable */
537 writel(0, &sdr_scc_mgr->dqs_io_ena);
539 /* update the DQS logic */
540 writel(write_group, &sdr_scc_mgr->dqs_ena);
543 writel(0, &sdr_scc_mgr->update);
546 static void scc_mgr_load_dqs_for_write_group(uint32_t write_group)
549 uint32_t addr = (u32)&sdr_scc_mgr->dqs_ena;
551 * Although OCT affects only write data, the OCT delay is controlled
552 * by the DQS logic block which is instantiated once per read group.
553 * For protocols where a write group consists of multiple read groups,
554 * the setting must be scanned multiple times.
556 for (read_group = write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH /
557 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
558 read_group < (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH /
559 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; ++read_group)
560 writel(read_group, addr);
563 static void scc_mgr_zero_group(uint32_t write_group, uint32_t test_begin,
568 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
569 NUM_RANKS_PER_SHADOW_REG) {
570 /* Zero all DQ config settings */
571 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
572 scc_mgr_set_dq_out1_delay(i, 0);
574 scc_mgr_set_dq_in_delay(i, 0);
577 /* multicast to all DQ enables */
578 writel(0xff, &sdr_scc_mgr->dq_ena);
580 /* Zero all DM config settings */
581 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
582 scc_mgr_set_dm_out1_delay(i, 0);
585 /* multicast to all DM enables */
586 writel(0xff, &sdr_scc_mgr->dm_ena);
588 /* zero all DQS io settings */
590 scc_mgr_set_dqs_io_in_delay(write_group, 0);
591 /* av/cv don't have out2 */
592 scc_mgr_set_dqs_out1_delay(write_group, IO_DQS_OUT_RESERVE);
593 scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
594 scc_mgr_load_dqs_for_write_group(write_group);
596 /* multicast to all DQS IO enables (only 1) */
597 writel(0, &sdr_scc_mgr->dqs_io_ena);
599 /* hit update to zero everything */
600 writel(0, &sdr_scc_mgr->update);
605 * apply and load a particular input delay for the DQ pins in a group
606 * group_bgn is the index of the first dq pin (in the write group)
608 static void scc_mgr_apply_group_dq_in_delay(uint32_t write_group,
609 uint32_t group_bgn, uint32_t delay)
613 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
614 scc_mgr_set_dq_in_delay(p, delay);
619 /* apply and load a particular output delay for the DQ pins in a group */
620 static void scc_mgr_apply_group_dq_out1_delay(uint32_t write_group,
626 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
627 scc_mgr_set_dq_out1_delay(i, delay1);
632 /* apply and load a particular output delay for the DM pins in a group */
633 static void scc_mgr_apply_group_dm_out1_delay(uint32_t write_group,
638 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
639 scc_mgr_set_dm_out1_delay(i, delay1);
645 /* apply and load delay on both DQS and OCT out1 */
646 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
649 scc_mgr_set_dqs_out1_delay(write_group, delay);
650 scc_mgr_load_dqs_io();
652 scc_mgr_set_oct_out1_delay(write_group, delay);
653 scc_mgr_load_dqs_for_write_group(write_group);
656 /* apply a delay to the entire output side: DQ, DM, DQS, OCT */
657 static void scc_mgr_apply_group_all_out_delay_add(uint32_t write_group,
661 uint32_t i, p, new_delay;
664 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
665 new_delay = READ_SCC_DQ_OUT2_DELAY;
668 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
669 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQ[%u,%u]:\
670 %u > %lu => %lu", __func__, __LINE__,
671 write_group, group_bgn, delay, i, p, new_delay,
672 (long unsigned int)IO_IO_OUT2_DELAY_MAX,
673 (long unsigned int)IO_IO_OUT2_DELAY_MAX);
674 new_delay = IO_IO_OUT2_DELAY_MAX;
681 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
682 new_delay = READ_SCC_DM_IO_OUT2_DELAY;
685 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
686 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DM[%u]:\
687 %u > %lu => %lu\n", __func__, __LINE__,
688 write_group, group_bgn, delay, i, new_delay,
689 (long unsigned int)IO_IO_OUT2_DELAY_MAX,
690 (long unsigned int)IO_IO_OUT2_DELAY_MAX);
691 new_delay = IO_IO_OUT2_DELAY_MAX;
698 new_delay = READ_SCC_DQS_IO_OUT2_DELAY;
701 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
702 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
703 " adding %u to OUT1\n", __func__, __LINE__,
704 write_group, group_bgn, delay, new_delay,
705 IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
706 new_delay - IO_IO_OUT2_DELAY_MAX);
707 scc_mgr_set_dqs_out1_delay(write_group, new_delay -
708 IO_IO_OUT2_DELAY_MAX);
709 new_delay = IO_IO_OUT2_DELAY_MAX;
712 scc_mgr_load_dqs_io();
715 new_delay = READ_SCC_OCT_OUT2_DELAY;
718 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
719 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
720 " adding %u to OUT1\n", __func__, __LINE__,
721 write_group, group_bgn, delay, new_delay,
722 IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
723 new_delay - IO_IO_OUT2_DELAY_MAX);
724 scc_mgr_set_oct_out1_delay(write_group, new_delay -
725 IO_IO_OUT2_DELAY_MAX);
726 new_delay = IO_IO_OUT2_DELAY_MAX;
729 scc_mgr_load_dqs_for_write_group(write_group);
733 * USER apply a delay to the entire output side (DQ, DM, DQS, OCT)
736 static void scc_mgr_apply_group_all_out_delay_add_all_ranks(
737 uint32_t write_group, uint32_t group_bgn, uint32_t delay)
741 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
742 r += NUM_RANKS_PER_SHADOW_REG) {
743 scc_mgr_apply_group_all_out_delay_add(write_group,
745 writel(0, &sdr_scc_mgr->update);
749 /* optimization used to recover some slots in ddr3 inst_rom */
750 /* could be applied to other protocols if we wanted to */
751 static void set_jump_as_return(void)
754 * to save space, we replace return with jump to special shared
755 * RETURN instruction so we set the counter to large value so that
758 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
759 writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
763 * should always use constants as argument to ensure all computations are
764 * performed at compile time
766 static void delay_for_n_mem_clocks(const uint32_t clocks)
773 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
776 afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
777 /* scale (rounding up) to get afi clocks */
780 * Note, we don't bother accounting for being off a little bit
781 * because of a few extra instructions in outer loops
782 * Note, the loops have a test at the end, and do the test before
783 * the decrement, and so always perform the loop
784 * 1 time more than the counter value
786 if (afi_clocks == 0) {
788 } else if (afi_clocks <= 0x100) {
789 inner = afi_clocks-1;
792 } else if (afi_clocks <= 0x10000) {
794 outer = (afi_clocks-1) >> 8;
799 c_loop = (afi_clocks-1) >> 16;
803 * rom instructions are structured as follows:
805 * IDLE_LOOP2: jnz cntr0, TARGET_A
806 * IDLE_LOOP1: jnz cntr1, TARGET_B
809 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
810 * TARGET_B is set to IDLE_LOOP2 as well
812 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
813 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
815 * a little confusing, but it helps save precious space in the inst_rom
816 * and sequencer rom and keeps the delays more accurate and reduces
819 if (afi_clocks <= 0x100) {
820 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
821 &sdr_rw_load_mgr_regs->load_cntr1);
823 writel(RW_MGR_IDLE_LOOP1,
824 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
826 writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
827 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
829 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
830 &sdr_rw_load_mgr_regs->load_cntr0);
832 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
833 &sdr_rw_load_mgr_regs->load_cntr1);
835 writel(RW_MGR_IDLE_LOOP2,
836 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
838 writel(RW_MGR_IDLE_LOOP2,
839 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
841 /* hack to get around compiler not being smart enough */
842 if (afi_clocks <= 0x10000) {
843 /* only need to run once */
844 writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS |
845 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
848 writel(RW_MGR_IDLE_LOOP2,
849 SDR_PHYGRP_RWMGRGRP_ADDRESS |
850 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
851 } while (c_loop-- != 0);
854 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
857 static void rw_mgr_mem_initialize(void)
860 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
861 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
863 debug("%s:%d\n", __func__, __LINE__);
865 /* The reset / cke part of initialization is broadcasted to all ranks */
866 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
867 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
870 * Here's how you load register for a loop
871 * Counters are located @ 0x800
872 * Jump address are located @ 0xC00
873 * For both, registers 0 to 3 are selected using bits 3 and 2, like
874 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
875 * I know this ain't pretty, but Avalon bus throws away the 2 least
879 /* start with memory RESET activated */
884 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
885 * If a and b are the number of iteration in 2 nested loops
886 * it takes the following number of cycles to complete the operation:
887 * number_of_cycles = ((2 + n) * a + 2) * b
888 * where n is the number of instruction in the inner loop
889 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
894 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL),
895 &sdr_rw_load_mgr_regs->load_cntr0);
896 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL),
897 &sdr_rw_load_mgr_regs->load_cntr1);
898 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL),
899 &sdr_rw_load_mgr_regs->load_cntr2);
901 /* Load jump address */
902 writel(RW_MGR_INIT_RESET_0_CKE_0,
903 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
904 writel(RW_MGR_INIT_RESET_0_CKE_0,
905 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
906 writel(RW_MGR_INIT_RESET_0_CKE_0,
907 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
909 /* Execute count instruction */
910 writel(RW_MGR_INIT_RESET_0_CKE_0, grpaddr);
912 /* indicate that memory is stable */
913 writel(1, &phy_mgr_cfg->reset_mem_stbl);
916 * transition the RESET to high
921 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
922 * If a and b are the number of iteration in 2 nested loops
923 * it takes the following number of cycles to complete the operation
924 * number_of_cycles = ((2 + n) * a + 2) * b
925 * where n is the number of instruction in the inner loop
926 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
931 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL),
932 &sdr_rw_load_mgr_regs->load_cntr0);
933 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL),
934 &sdr_rw_load_mgr_regs->load_cntr1);
935 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL),
936 &sdr_rw_load_mgr_regs->load_cntr2);
938 /* Load jump address */
939 writel(RW_MGR_INIT_RESET_1_CKE_0,
940 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
941 writel(RW_MGR_INIT_RESET_1_CKE_0,
942 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
943 writel(RW_MGR_INIT_RESET_1_CKE_0,
944 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
946 writel(RW_MGR_INIT_RESET_1_CKE_0, grpaddr);
948 /* bring up clock enable */
950 /* tXRP < 250 ck cycles */
951 delay_for_n_mem_clocks(250);
953 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
954 if (param->skip_ranks[r]) {
955 /* request to skip the rank */
960 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
963 * USER Use Mirror-ed commands for odd ranks if address
966 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
967 set_jump_as_return();
968 writel(RW_MGR_MRS2_MIRR, grpaddr);
969 delay_for_n_mem_clocks(4);
970 set_jump_as_return();
971 writel(RW_MGR_MRS3_MIRR, grpaddr);
972 delay_for_n_mem_clocks(4);
973 set_jump_as_return();
974 writel(RW_MGR_MRS1_MIRR, grpaddr);
975 delay_for_n_mem_clocks(4);
976 set_jump_as_return();
977 writel(RW_MGR_MRS0_DLL_RESET_MIRR, grpaddr);
979 set_jump_as_return();
980 writel(RW_MGR_MRS2, grpaddr);
981 delay_for_n_mem_clocks(4);
982 set_jump_as_return();
983 writel(RW_MGR_MRS3, grpaddr);
984 delay_for_n_mem_clocks(4);
985 set_jump_as_return();
986 writel(RW_MGR_MRS1, grpaddr);
987 set_jump_as_return();
988 writel(RW_MGR_MRS0_DLL_RESET, grpaddr);
990 set_jump_as_return();
991 writel(RW_MGR_ZQCL, grpaddr);
993 /* tZQinit = tDLLK = 512 ck cycles */
994 delay_for_n_mem_clocks(512);
999 * At the end of calibration we have to program the user settings in, and
1000 * USER hand off the memory to the user.
1002 static void rw_mgr_mem_handoff(void)
1005 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1006 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1008 debug("%s:%d\n", __func__, __LINE__);
1009 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
1010 if (param->skip_ranks[r])
1011 /* request to skip the rank */
1014 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
1016 /* precharge all banks ... */
1017 writel(RW_MGR_PRECHARGE_ALL, grpaddr);
1019 /* load up MR settings specified by user */
1022 * Use Mirror-ed commands for odd ranks if address
1025 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
1026 set_jump_as_return();
1027 writel(RW_MGR_MRS2_MIRR, grpaddr);
1028 delay_for_n_mem_clocks(4);
1029 set_jump_as_return();
1030 writel(RW_MGR_MRS3_MIRR, grpaddr);
1031 delay_for_n_mem_clocks(4);
1032 set_jump_as_return();
1033 writel(RW_MGR_MRS1_MIRR, grpaddr);
1034 delay_for_n_mem_clocks(4);
1035 set_jump_as_return();
1036 writel(RW_MGR_MRS0_USER_MIRR, grpaddr);
1038 set_jump_as_return();
1039 writel(RW_MGR_MRS2, grpaddr);
1040 delay_for_n_mem_clocks(4);
1041 set_jump_as_return();
1042 writel(RW_MGR_MRS3, grpaddr);
1043 delay_for_n_mem_clocks(4);
1044 set_jump_as_return();
1045 writel(RW_MGR_MRS1, grpaddr);
1046 delay_for_n_mem_clocks(4);
1047 set_jump_as_return();
1048 writel(RW_MGR_MRS0_USER, grpaddr);
1051 * USER need to wait tMOD (12CK or 15ns) time before issuing
1052 * other commands, but we will have plenty of NIOS cycles before
1053 * actual handoff so its okay.
1059 * performs a guaranteed read on the patterns we are going to use during a
1060 * read test to ensure memory works
1062 static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn,
1063 uint32_t group, uint32_t num_tries, uint32_t *bit_chk,
1067 uint32_t correct_mask_vg;
1068 uint32_t tmp_bit_chk;
1069 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1070 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1072 uint32_t base_rw_mgr;
1074 *bit_chk = param->read_correct_mask;
1075 correct_mask_vg = param->read_correct_mask_vg;
1077 for (r = rank_bgn; r < rank_end; r++) {
1078 if (param->skip_ranks[r])
1079 /* request to skip the rank */
1083 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1085 /* Load up a constant bursts of read commands */
1086 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1087 writel(RW_MGR_GUARANTEED_READ,
1088 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1090 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1091 writel(RW_MGR_GUARANTEED_READ_CONT,
1092 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1095 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1096 /* reset the fifos to get pointers to known state */
1098 writel(0, &phy_mgr_cmd->fifo_reset);
1099 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1100 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1102 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1103 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1105 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1106 writel(RW_MGR_GUARANTEED_READ, addr +
1107 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1110 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1111 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & (~base_rw_mgr));
1116 *bit_chk &= tmp_bit_chk;
1119 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1120 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1122 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1123 debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\
1124 %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask,
1125 (long unsigned int)(*bit_chk == param->read_correct_mask));
1126 return *bit_chk == param->read_correct_mask;
1129 static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks
1130 (uint32_t group, uint32_t num_tries, uint32_t *bit_chk)
1132 return rw_mgr_mem_calibrate_read_test_patterns(0, group,
1133 num_tries, bit_chk, 1);
1136 /* load up the patterns we are going to use during a read test */
1137 static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn,
1141 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1142 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1144 debug("%s:%d\n", __func__, __LINE__);
1145 for (r = rank_bgn; r < rank_end; r++) {
1146 if (param->skip_ranks[r])
1147 /* request to skip the rank */
1151 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1153 /* Load up a constant bursts */
1154 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1156 writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
1157 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1159 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1161 writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
1162 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1164 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
1166 writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
1167 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1169 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
1171 writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
1172 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1174 writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1175 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
1178 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1182 * try a read and see if it returns correct data back. has dummy reads
1183 * inserted into the mix used to align dqs enable. has more thorough checks
1184 * than the regular read test.
1186 static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group,
1187 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1188 uint32_t all_groups, uint32_t all_ranks)
1191 uint32_t correct_mask_vg;
1192 uint32_t tmp_bit_chk;
1193 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1194 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1196 uint32_t base_rw_mgr;
1198 *bit_chk = param->read_correct_mask;
1199 correct_mask_vg = param->read_correct_mask_vg;
1201 uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) &
1202 CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION);
1204 for (r = rank_bgn; r < rank_end; r++) {
1205 if (param->skip_ranks[r])
1206 /* request to skip the rank */
1210 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1212 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
1214 writel(RW_MGR_READ_B2B_WAIT1,
1215 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1217 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
1218 writel(RW_MGR_READ_B2B_WAIT2,
1219 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1221 if (quick_read_mode)
1222 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
1223 /* need at least two (1+1) reads to capture failures */
1224 else if (all_groups)
1225 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
1227 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
1229 writel(RW_MGR_READ_B2B,
1230 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1232 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
1233 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
1234 &sdr_rw_load_mgr_regs->load_cntr3);
1236 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
1238 writel(RW_MGR_READ_B2B,
1239 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1242 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1243 /* reset the fifos to get pointers to known state */
1244 writel(0, &phy_mgr_cmd->fifo_reset);
1245 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1246 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1248 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1249 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1252 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_ALL_GROUPS_OFFSET;
1254 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1256 writel(RW_MGR_READ_B2B, addr +
1257 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1260 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1261 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
1266 *bit_chk &= tmp_bit_chk;
1269 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1270 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1273 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1274 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\
1275 (%u == %u) => %lu", __func__, __LINE__, group,
1276 all_groups, *bit_chk, param->read_correct_mask,
1277 (long unsigned int)(*bit_chk ==
1278 param->read_correct_mask));
1279 return *bit_chk == param->read_correct_mask;
1281 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1282 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\
1283 (%u != %lu) => %lu\n", __func__, __LINE__,
1284 group, all_groups, *bit_chk, (long unsigned int)0,
1285 (long unsigned int)(*bit_chk != 0x00));
1286 return *bit_chk != 0x00;
1290 static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group,
1291 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1292 uint32_t all_groups)
1294 return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct,
1295 bit_chk, all_groups, 1);
1298 static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t *v)
1300 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
1304 static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t *v)
1308 for (i = 0; i < VFIFO_SIZE-1; i++)
1309 rw_mgr_incr_vfifo(grp, v);
1312 static int find_vfifo_read(uint32_t grp, uint32_t *bit_chk)
1315 uint32_t fail_cnt = 0;
1316 uint32_t test_status;
1318 for (v = 0; v < VFIFO_SIZE; ) {
1319 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo %u\n",
1320 __func__, __LINE__, v);
1321 test_status = rw_mgr_mem_calibrate_read_test_all_ranks
1322 (grp, 1, PASS_ONE_BIT, bit_chk, 0);
1330 /* fiddle with FIFO */
1331 rw_mgr_incr_vfifo(grp, &v);
1334 if (v >= VFIFO_SIZE) {
1335 /* no failing read found!! Something must have gone wrong */
1336 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo failed\n",
1337 __func__, __LINE__);
1344 static int find_working_phase(uint32_t *grp, uint32_t *bit_chk,
1345 uint32_t dtaps_per_ptap, uint32_t *work_bgn,
1346 uint32_t *v, uint32_t *d, uint32_t *p,
1347 uint32_t *i, uint32_t *max_working_cnt)
1349 uint32_t found_begin = 0;
1350 uint32_t tmp_delay = 0;
1351 uint32_t test_status;
1353 for (*d = 0; *d <= dtaps_per_ptap; (*d)++, tmp_delay +=
1354 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1355 *work_bgn = tmp_delay;
1356 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1358 for (*i = 0; *i < VFIFO_SIZE; (*i)++) {
1359 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_bgn +=
1360 IO_DELAY_PER_OPA_TAP) {
1361 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1364 rw_mgr_mem_calibrate_read_test_all_ranks
1365 (*grp, 1, PASS_ONE_BIT, bit_chk, 0);
1368 *max_working_cnt = 1;
1377 if (*p > IO_DQS_EN_PHASE_MAX)
1378 /* fiddle with FIFO */
1379 rw_mgr_incr_vfifo(*grp, v);
1386 if (*i >= VFIFO_SIZE) {
1387 /* cannot find working solution */
1388 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\
1389 ptap/dtap\n", __func__, __LINE__);
1396 static void sdr_backup_phase(uint32_t *grp, uint32_t *bit_chk,
1397 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1398 uint32_t *p, uint32_t *max_working_cnt)
1400 uint32_t found_begin = 0;
1403 /* Special case code for backing up a phase */
1405 *p = IO_DQS_EN_PHASE_MAX;
1406 rw_mgr_decr_vfifo(*grp, v);
1410 tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
1411 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1413 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn;
1414 (*d)++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1415 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1417 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
1421 *work_bgn = tmp_delay;
1426 /* We have found a working dtap before the ptap found above */
1427 if (found_begin == 1)
1428 (*max_working_cnt)++;
1431 * Restore VFIFO to old state before we decremented it
1435 if (*p > IO_DQS_EN_PHASE_MAX) {
1437 rw_mgr_incr_vfifo(*grp, v);
1440 scc_mgr_set_dqs_en_delay_all_ranks(*grp, 0);
1443 static int sdr_nonworking_phase(uint32_t *grp, uint32_t *bit_chk,
1444 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1445 uint32_t *p, uint32_t *i, uint32_t *max_working_cnt,
1448 uint32_t found_end = 0;
1451 *work_end += IO_DELAY_PER_OPA_TAP;
1452 if (*p > IO_DQS_EN_PHASE_MAX) {
1453 /* fiddle with FIFO */
1455 rw_mgr_incr_vfifo(*grp, v);
1458 for (; *i < VFIFO_SIZE + 1; (*i)++) {
1459 for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_end
1460 += IO_DELAY_PER_OPA_TAP) {
1461 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1463 if (!rw_mgr_mem_calibrate_read_test_all_ranks
1464 (*grp, 1, PASS_ONE_BIT, bit_chk, 0)) {
1468 (*max_working_cnt)++;
1475 if (*p > IO_DQS_EN_PHASE_MAX) {
1476 /* fiddle with FIFO */
1477 rw_mgr_incr_vfifo(*grp, v);
1482 if (*i >= VFIFO_SIZE + 1) {
1483 /* cannot see edge of failing read */
1484 debug_cond(DLEVEL == 2, "%s:%d sdr_nonworking_phase: end:\
1485 failed\n", __func__, __LINE__);
1492 static int sdr_find_window_centre(uint32_t *grp, uint32_t *bit_chk,
1493 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1494 uint32_t *p, uint32_t *work_mid,
1500 *work_mid = (*work_bgn + *work_end) / 2;
1502 debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
1503 *work_bgn, *work_end, *work_mid);
1504 /* Get the middle delay to be less than a VFIFO delay */
1505 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX;
1506 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
1508 debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
1509 while (*work_mid > tmp_delay)
1510 *work_mid -= tmp_delay;
1511 debug_cond(DLEVEL == 2, "new work_mid %d\n", *work_mid);
1514 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX && tmp_delay < *work_mid;
1515 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
1517 tmp_delay -= IO_DELAY_PER_OPA_TAP;
1518 debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", (*p) - 1, tmp_delay);
1519 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_mid; (*d)++,
1520 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP)
1522 debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", *d, tmp_delay);
1524 scc_mgr_set_dqs_en_phase_all_ranks(*grp, (*p) - 1);
1525 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1528 * push vfifo until we can successfully calibrate. We can do this
1529 * because the largest possible margin in 1 VFIFO cycle.
1531 for (i = 0; i < VFIFO_SIZE; i++) {
1532 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%u\n",
1534 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
1540 /* fiddle with FIFO */
1541 rw_mgr_incr_vfifo(*grp, v);
1544 if (i >= VFIFO_SIZE) {
1545 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \
1546 failed\n", __func__, __LINE__);
1553 /* find a good dqs enable to use */
1554 static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
1556 uint32_t v, d, p, i;
1557 uint32_t max_working_cnt;
1559 uint32_t dtaps_per_ptap;
1560 uint32_t work_bgn, work_mid, work_end;
1561 uint32_t found_passing_read, found_failing_read, initial_failing_dtap;
1563 debug("%s:%d %u\n", __func__, __LINE__, grp);
1565 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1567 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1568 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
1570 /* ************************************************************** */
1571 /* * Step 0 : Determine number of delay taps for each phase tap * */
1572 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP/IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1574 /* ********************************************************* */
1575 /* * Step 1 : First push vfifo until we get a failing read * */
1576 v = find_vfifo_read(grp, &bit_chk);
1578 max_working_cnt = 0;
1580 /* ******************************************************** */
1581 /* * step 2: find first working phase, increment in ptaps * */
1583 if (find_working_phase(&grp, &bit_chk, dtaps_per_ptap, &work_bgn, &v, &d,
1584 &p, &i, &max_working_cnt) == 0)
1587 work_end = work_bgn;
1590 * If d is 0 then the working window covers a phase tap and
1591 * we can follow the old procedure otherwise, we've found the beginning,
1592 * and we need to increment the dtaps until we find the end.
1595 /* ********************************************************* */
1596 /* * step 3a: if we have room, back off by one and
1597 increment in dtaps * */
1599 sdr_backup_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1602 /* ********************************************************* */
1603 /* * step 4a: go forward from working phase to non working
1604 phase, increment in ptaps * */
1605 if (sdr_nonworking_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1606 &i, &max_working_cnt, &work_end) == 0)
1609 /* ********************************************************* */
1610 /* * step 5a: back off one from last, increment in dtaps * */
1612 /* Special case code for backing up a phase */
1614 p = IO_DQS_EN_PHASE_MAX;
1615 rw_mgr_decr_vfifo(grp, &v);
1620 work_end -= IO_DELAY_PER_OPA_TAP;
1621 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1623 /* * The actual increment of dtaps is done outside of
1624 the if/else loop to share code */
1627 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p: \
1628 vfifo=%u ptap=%u\n", __func__, __LINE__,
1631 /* ******************************************************* */
1632 /* * step 3-5b: Find the right edge of the window using
1634 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase:vfifo=%u \
1635 ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__,
1638 work_end = work_bgn;
1640 /* * The actual increment of dtaps is done outside of the
1641 if/else loop to share code */
1643 /* Only here to counterbalance a subtract later on which is
1644 not needed if this branch of the algorithm is taken */
1648 /* The dtap increment to find the failing edge is done here */
1649 for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end +=
1650 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1651 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1652 end-2: dtap=%u\n", __func__, __LINE__, d);
1653 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1655 if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1662 /* Go back to working dtap */
1664 work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1666 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%u \
1667 ptap=%u dtap=%u end=%u\n", __func__, __LINE__,
1668 v, p, d-1, work_end);
1670 if (work_end < work_bgn) {
1672 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \
1673 failed\n", __func__, __LINE__);
1677 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n",
1678 __func__, __LINE__, work_bgn, work_end);
1680 /* *************************************************************** */
1682 * * We need to calculate the number of dtaps that equal a ptap
1683 * * To do that we'll back up a ptap and re-find the edge of the
1684 * * window using dtaps
1687 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \
1688 for tracking\n", __func__, __LINE__);
1690 /* Special case code for backing up a phase */
1692 p = IO_DQS_EN_PHASE_MAX;
1693 rw_mgr_decr_vfifo(grp, &v);
1694 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1695 cycle/phase: v=%u p=%u\n", __func__, __LINE__,
1699 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1700 phase only: v=%u p=%u", __func__, __LINE__,
1704 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1707 * Increase dtap until we first see a passing read (in case the
1708 * window is smaller than a ptap),
1709 * and then a failing read to mark the edge of the window again
1712 /* Find a passing read */
1713 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n",
1714 __func__, __LINE__);
1715 found_passing_read = 0;
1716 found_failing_read = 0;
1717 initial_failing_dtap = d;
1718 for (; d <= IO_DQS_EN_DELAY_MAX; d++) {
1719 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \
1720 read d=%u\n", __func__, __LINE__, d);
1721 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1723 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1726 found_passing_read = 1;
1731 if (found_passing_read) {
1732 /* Find a failing read */
1733 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \
1734 read\n", __func__, __LINE__);
1735 for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) {
1736 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1737 testing read d=%u\n", __func__, __LINE__, d);
1738 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1740 if (!rw_mgr_mem_calibrate_read_test_all_ranks
1741 (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
1742 found_failing_read = 1;
1747 debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \
1748 calculate dtaps", __func__, __LINE__);
1749 debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n");
1753 * The dynamically calculated dtaps_per_ptap is only valid if we
1754 * found a passing/failing read. If we didn't, it means d hit the max
1755 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
1756 * statically calculated value.
1758 if (found_passing_read && found_failing_read)
1759 dtaps_per_ptap = d - initial_failing_dtap;
1761 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
1762 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \
1763 - %u = %u", __func__, __LINE__, d,
1764 initial_failing_dtap, dtaps_per_ptap);
1766 /* ******************************************** */
1767 /* * step 6: Find the centre of the window * */
1768 if (sdr_find_window_centre(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1769 &work_mid, &work_end) == 0)
1772 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \
1773 vfifo=%u ptap=%u dtap=%u\n", __func__, __LINE__,
1779 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
1780 * dq_in_delay values
1783 rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
1784 (uint32_t write_group, uint32_t read_group, uint32_t test_bgn)
1792 const uint32_t delay_step = IO_IO_IN_DELAY_MAX /
1793 (RW_MGR_MEM_DQ_PER_READ_DQS-1);
1794 /* we start at zero, so have one less dq to devide among */
1796 debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group,
1799 /* try different dq_in_delays since the dq path is shorter than dqs */
1801 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
1802 r += NUM_RANKS_PER_SHADOW_REG) {
1803 for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS;
1804 i++, p++, d += delay_step) {
1805 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\
1806 vfifo_find_dqs_", __func__, __LINE__);
1807 debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ",
1808 write_group, read_group);
1809 debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d);
1810 scc_mgr_set_dq_in_delay(p, d);
1813 writel(0, &sdr_scc_mgr->update);
1816 found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group);
1818 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\
1819 en_phase_sweep_dq", __func__, __LINE__);
1820 debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \
1821 chain to zero\n", write_group, read_group, found);
1823 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
1824 r += NUM_RANKS_PER_SHADOW_REG) {
1825 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS;
1827 scc_mgr_set_dq_in_delay(p, 0);
1830 writel(0, &sdr_scc_mgr->update);
1836 /* per-bit deskew DQ and center */
1837 static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
1838 uint32_t write_group, uint32_t read_group, uint32_t test_bgn,
1839 uint32_t use_read_test, uint32_t update_fom)
1841 uint32_t i, p, d, min_index;
1843 * Store these as signed since there are comparisons with
1847 uint32_t sticky_bit_chk;
1848 int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1849 int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1850 int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS];
1852 int32_t orig_mid_min, mid_min;
1853 int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs,
1855 int32_t dq_margin, dqs_margin;
1857 uint32_t temp_dq_in_delay1, temp_dq_in_delay2;
1860 debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
1862 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET;
1863 start_dqs = readl(addr + (read_group << 2));
1864 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
1865 start_dqs_en = readl(addr + ((read_group << 2)
1866 - IO_DQS_EN_DELAY_OFFSET));
1868 /* set the left and right edge of each bit to an illegal value */
1869 /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
1871 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1872 left_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1873 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1876 /* Search for the left edge of the window for each bit */
1877 for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) {
1878 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d);
1880 writel(0, &sdr_scc_mgr->update);
1883 * Stop searching when the read test doesn't pass AND when
1884 * we've seen a passing read on every bit.
1886 if (use_read_test) {
1887 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1888 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1891 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1894 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1895 (read_group - (write_group *
1896 RW_MGR_MEM_IF_READ_DQS_WIDTH /
1897 RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1898 stop = (bit_chk == 0);
1900 sticky_bit_chk = sticky_bit_chk | bit_chk;
1901 stop = stop && (sticky_bit_chk == param->read_correct_mask);
1902 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \
1903 && %u", __func__, __LINE__, d,
1905 param->read_correct_mask, stop);
1910 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1912 /* Remember a passing test as the
1916 /* If a left edge has not been seen yet,
1917 then a future passing test will mark
1918 this edge as the right edge */
1920 IO_IO_IN_DELAY_MAX + 1) {
1921 right_edge[i] = -(d + 1);
1924 bit_chk = bit_chk >> 1;
1929 /* Reset DQ delay chains to 0 */
1930 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, 0);
1932 for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) {
1933 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
1934 %d right_edge[%u]: %d\n", __func__, __LINE__,
1935 i, left_edge[i], i, right_edge[i]);
1938 * Check for cases where we haven't found the left edge,
1939 * which makes our assignment of the the right edge invalid.
1940 * Reset it to the illegal value.
1942 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && (
1943 right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1944 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1945 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \
1946 right_edge[%u]: %d\n", __func__, __LINE__,
1951 * Reset sticky bit (except for bits where we have seen
1952 * both the left and right edge).
1954 sticky_bit_chk = sticky_bit_chk << 1;
1955 if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) &&
1956 (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1957 sticky_bit_chk = sticky_bit_chk | 1;
1964 /* Search for the right edge of the window for each bit */
1965 for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) {
1966 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
1967 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
1968 uint32_t delay = d + start_dqs_en;
1969 if (delay > IO_DQS_EN_DELAY_MAX)
1970 delay = IO_DQS_EN_DELAY_MAX;
1971 scc_mgr_set_dqs_en_delay(read_group, delay);
1973 scc_mgr_load_dqs(read_group);
1975 writel(0, &sdr_scc_mgr->update);
1978 * Stop searching when the read test doesn't pass AND when
1979 * we've seen a passing read on every bit.
1981 if (use_read_test) {
1982 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1983 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1986 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1989 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1990 (read_group - (write_group *
1991 RW_MGR_MEM_IF_READ_DQS_WIDTH /
1992 RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1993 stop = (bit_chk == 0);
1995 sticky_bit_chk = sticky_bit_chk | bit_chk;
1996 stop = stop && (sticky_bit_chk == param->read_correct_mask);
1998 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => %u == \
1999 %u && %u", __func__, __LINE__, d,
2000 sticky_bit_chk, param->read_correct_mask, stop);
2005 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2007 /* Remember a passing test as
2012 /* If a right edge has not been
2013 seen yet, then a future passing
2014 test will mark this edge as the
2016 if (right_edge[i] ==
2017 IO_IO_IN_DELAY_MAX + 1) {
2018 left_edge[i] = -(d + 1);
2021 /* d = 0 failed, but it passed
2022 when testing the left edge,
2023 so it must be marginal,
2025 if (right_edge[i] ==
2026 IO_IO_IN_DELAY_MAX + 1 &&
2032 /* If a right edge has not been
2033 seen yet, then a future passing
2034 test will mark this edge as the
2036 else if (right_edge[i] ==
2037 IO_IO_IN_DELAY_MAX +
2039 left_edge[i] = -(d + 1);
2044 debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\
2045 d=%u]: ", __func__, __LINE__, d);
2046 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ",
2047 (int)(bit_chk & 1), i, left_edge[i]);
2048 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2050 bit_chk = bit_chk >> 1;
2055 /* Check that all bits have a window */
2056 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2057 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
2058 %d right_edge[%u]: %d", __func__, __LINE__,
2059 i, left_edge[i], i, right_edge[i]);
2060 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i]
2061 == IO_IO_IN_DELAY_MAX + 1)) {
2063 * Restore delay chain settings before letting the loop
2064 * in rw_mgr_mem_calibrate_vfifo to retry different
2065 * dqs/ck relationships.
2067 scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs);
2068 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2069 scc_mgr_set_dqs_en_delay(read_group,
2072 scc_mgr_load_dqs(read_group);
2073 writel(0, &sdr_scc_mgr->update);
2075 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \
2076 find edge [%u]: %d %d", __func__, __LINE__,
2077 i, left_edge[i], right_edge[i]);
2078 if (use_read_test) {
2079 set_failing_group_stage(read_group *
2080 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2082 CAL_SUBSTAGE_VFIFO_CENTER);
2084 set_failing_group_stage(read_group *
2085 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2086 CAL_STAGE_VFIFO_AFTER_WRITES,
2087 CAL_SUBSTAGE_VFIFO_CENTER);
2093 /* Find middle of window for each DQ bit */
2094 mid_min = left_edge[0] - right_edge[0];
2096 for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2097 mid = left_edge[i] - right_edge[i];
2098 if (mid < mid_min) {
2105 * -mid_min/2 represents the amount that we need to move DQS.
2106 * If mid_min is odd and positive we'll need to add one to
2107 * make sure the rounding in further calculations is correct
2108 * (always bias to the right), so just add 1 for all positive values.
2113 mid_min = mid_min / 2;
2115 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
2116 __func__, __LINE__, mid_min, min_index);
2118 /* Determine the amount we can change DQS (which is -mid_min) */
2119 orig_mid_min = mid_min;
2120 new_dqs = start_dqs - mid_min;
2121 if (new_dqs > IO_DQS_IN_DELAY_MAX)
2122 new_dqs = IO_DQS_IN_DELAY_MAX;
2123 else if (new_dqs < 0)
2126 mid_min = start_dqs - new_dqs;
2127 debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2130 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2131 if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
2132 mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
2133 else if (start_dqs_en - mid_min < 0)
2134 mid_min += start_dqs_en - mid_min;
2136 new_dqs = start_dqs - mid_min;
2138 debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
2139 new_dqs=%d mid_min=%d\n", start_dqs,
2140 IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
2143 /* Initialize data for export structures */
2144 dqs_margin = IO_IO_IN_DELAY_MAX + 1;
2145 dq_margin = IO_IO_IN_DELAY_MAX + 1;
2147 /* add delay to bring centre of all DQ windows to the same "level" */
2148 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
2149 /* Use values before divide by 2 to reduce round off error */
2150 shift_dq = (left_edge[i] - right_edge[i] -
2151 (left_edge[min_index] - right_edge[min_index]))/2 +
2152 (orig_mid_min - mid_min);
2154 debug_cond(DLEVEL == 2, "vfifo_center: before: \
2155 shift_dq[%u]=%d\n", i, shift_dq);
2157 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET;
2158 temp_dq_in_delay1 = readl(addr + (p << 2));
2159 temp_dq_in_delay2 = readl(addr + (i << 2));
2161 if (shift_dq + (int32_t)temp_dq_in_delay1 >
2162 (int32_t)IO_IO_IN_DELAY_MAX) {
2163 shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2;
2164 } else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) {
2165 shift_dq = -(int32_t)temp_dq_in_delay1;
2167 debug_cond(DLEVEL == 2, "vfifo_center: after: \
2168 shift_dq[%u]=%d\n", i, shift_dq);
2169 final_dq[i] = temp_dq_in_delay1 + shift_dq;
2170 scc_mgr_set_dq_in_delay(p, final_dq[i]);
2173 debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i,
2174 left_edge[i] - shift_dq + (-mid_min),
2175 right_edge[i] + shift_dq - (-mid_min));
2176 /* To determine values for export structures */
2177 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2178 dq_margin = left_edge[i] - shift_dq + (-mid_min);
2180 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2181 dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2184 final_dqs = new_dqs;
2185 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2186 final_dqs_en = start_dqs_en - mid_min;
2189 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2190 scc_mgr_set_dqs_en_delay(read_group, final_dqs_en);
2191 scc_mgr_load_dqs(read_group);
2195 scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs);
2196 scc_mgr_load_dqs(read_group);
2197 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \
2198 dqs_margin=%d", __func__, __LINE__,
2199 dq_margin, dqs_margin);
2202 * Do not remove this line as it makes sure all of our decisions
2203 * have been applied. Apply the update bit.
2205 writel(0, &sdr_scc_mgr->update);
2207 return (dq_margin >= 0) && (dqs_margin >= 0);
2211 * calibrate the read valid prediction FIFO.
2213 * - read valid prediction will consist of finding a good DQS enable phase,
2214 * DQS enable delay, DQS input phase, and DQS input delay.
2215 * - we also do a per-bit deskew on the DQ lines.
2217 static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group,
2220 uint32_t p, d, rank_bgn, sr;
2221 uint32_t dtaps_per_ptap;
2224 uint32_t grp_calibrated;
2225 uint32_t write_group, write_test_bgn;
2226 uint32_t failed_substage;
2228 debug("%s:%d: %u %u\n", __func__, __LINE__, read_group, test_bgn);
2230 /* update info for sims */
2231 reg_file_set_stage(CAL_STAGE_VFIFO);
2233 write_group = read_group;
2234 write_test_bgn = test_bgn;
2236 /* USER Determine number of delay taps for each phase tap */
2239 while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
2241 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
2246 /* update info for sims */
2247 reg_file_set_group(read_group);
2251 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
2252 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2254 for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) {
2256 * In RLDRAMX we may be messing the delay of pins in
2257 * the same write group but outside of the current read
2258 * the group, but that's ok because we haven't
2259 * calibrated output side yet.
2262 scc_mgr_apply_group_all_out_delay_add_all_ranks
2263 (write_group, write_test_bgn, d);
2266 for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0;
2268 /* set a particular dqdqs phase */
2269 scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p);
2271 debug_cond(DLEVEL == 1, "%s:%d calibrate_vfifo: g=%u \
2272 p=%u d=%u\n", __func__, __LINE__,
2276 * Load up the patterns used by read calibration
2277 * using current DQDQS phase.
2279 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2280 if (!(gbl->phy_debug_mode_flags &
2281 PHY_DEBUG_DISABLE_GUARANTEED_READ)) {
2282 if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks
2283 (read_group, 1, &bit_chk)) {
2284 debug_cond(DLEVEL == 1, "%s:%d Guaranteed read test failed:",
2285 __func__, __LINE__);
2286 debug_cond(DLEVEL == 1, " g=%u p=%u d=%u\n",
2294 if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
2295 (write_group, read_group, test_bgn)) {
2297 * USER Read per-bit deskew can be done on a
2298 * per shadow register basis.
2300 for (rank_bgn = 0, sr = 0;
2301 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2302 rank_bgn += NUM_RANKS_PER_SHADOW_REG,
2305 * Determine if this set of ranks
2306 * should be skipped entirely.
2308 if (!param->skip_shadow_regs[sr]) {
2310 * If doing read after write
2311 * calibration, do not update
2312 * FOM, now - do it then.
2314 if (!rw_mgr_mem_calibrate_vfifo_center
2315 (rank_bgn, write_group,
2316 read_group, test_bgn, 1, 0)) {
2319 CAL_SUBSTAGE_VFIFO_CENTER;
2325 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2330 if (grp_calibrated == 0) {
2331 set_failing_group_stage(write_group, CAL_STAGE_VFIFO,
2337 * Reset the delay chains back to zero if they have moved > 1
2338 * (check for > 1 because loop will increase d even when pass in
2342 scc_mgr_zero_group(write_group, write_test_bgn, 1);
2347 /* VFIFO Calibration -- Read Deskew Calibration after write deskew */
2348 static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
2351 uint32_t rank_bgn, sr;
2352 uint32_t grp_calibrated;
2353 uint32_t write_group;
2355 debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
2357 /* update info for sims */
2359 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2360 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2362 write_group = read_group;
2364 /* update info for sims */
2365 reg_file_set_group(read_group);
2368 /* Read per-bit deskew can be done on a per shadow register basis */
2369 for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2370 rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
2371 /* Determine if this set of ranks should be skipped entirely */
2372 if (!param->skip_shadow_regs[sr]) {
2373 /* This is the last calibration round, update FOM here */
2374 if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
2385 if (grp_calibrated == 0) {
2386 set_failing_group_stage(write_group,
2387 CAL_STAGE_VFIFO_AFTER_WRITES,
2388 CAL_SUBSTAGE_VFIFO_CENTER);
2395 /* Calibrate LFIFO to find smallest read latency */
2396 static uint32_t rw_mgr_mem_calibrate_lfifo(void)
2401 debug("%s:%d\n", __func__, __LINE__);
2403 /* update info for sims */
2404 reg_file_set_stage(CAL_STAGE_LFIFO);
2405 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2407 /* Load up the patterns used by read calibration for all ranks */
2408 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2412 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2413 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
2414 __func__, __LINE__, gbl->curr_read_lat);
2416 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
2424 /* reduce read latency and see if things are working */
2426 gbl->curr_read_lat--;
2427 } while (gbl->curr_read_lat > 0);
2429 /* reset the fifos to get pointers to known state */
2431 writel(0, &phy_mgr_cmd->fifo_reset);
2434 /* add a fudge factor to the read latency that was determined */
2435 gbl->curr_read_lat += 2;
2436 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2437 debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
2438 read_lat=%u\n", __func__, __LINE__,
2439 gbl->curr_read_lat);
2442 set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
2443 CAL_SUBSTAGE_READ_LATENCY);
2445 debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
2446 read_lat=%u\n", __func__, __LINE__,
2447 gbl->curr_read_lat);
2453 * issue write test command.
2454 * two variants are provided. one that just tests a write pattern and
2455 * another that tests datamask functionality.
2457 static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
2460 uint32_t mcc_instruction;
2461 uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
2462 ENABLE_SUPER_QUICK_CALIBRATION);
2463 uint32_t rw_wl_nop_cycles;
2467 * Set counter and jump addresses for the right
2468 * number of NOP cycles.
2469 * The number of supported NOP cycles can range from -1 to infinity
2470 * Three different cases are handled:
2472 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
2473 * mechanism will be used to insert the right number of NOPs
2475 * 2. For a number of NOP cycles equals to 0, the micro-instruction
2476 * issuing the write command will jump straight to the
2477 * micro-instruction that turns on DQS (for DDRx), or outputs write
2478 * data (for RLD), skipping
2479 * the NOP micro-instruction all together
2481 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
2482 * turned on in the same micro-instruction that issues the write
2483 * command. Then we need
2484 * to directly jump to the micro-instruction that sends out the data
2486 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
2487 * (2 and 3). One jump-counter (0) is used to perform multiple
2488 * write-read operations.
2489 * one counter left to issue this command in "multiple-group" mode
2492 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
2494 if (rw_wl_nop_cycles == -1) {
2496 * CNTR 2 - We want to execute the special write operation that
2497 * turns on DQS right away and then skip directly to the
2498 * instruction that sends out the data. We set the counter to a
2499 * large number so that the jump is always taken.
2501 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2503 /* CNTR 3 - Not used */
2505 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
2506 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
2507 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2508 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2509 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2511 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
2512 writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
2513 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2514 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2515 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2517 } else if (rw_wl_nop_cycles == 0) {
2519 * CNTR 2 - We want to skip the NOP operation and go straight
2520 * to the DQS enable instruction. We set the counter to a large
2521 * number so that the jump is always taken.
2523 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2525 /* CNTR 3 - Not used */
2527 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2528 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
2529 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2531 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2532 writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
2533 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2537 * CNTR 2 - In this case we want to execute the next instruction
2538 * and NOT take the jump. So we set the counter to 0. The jump
2539 * address doesn't count.
2541 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
2542 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2545 * CNTR 3 - Set the nop counter to the number of cycles we
2546 * need to loop for, minus 1.
2548 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
2550 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2551 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2552 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2554 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2555 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2556 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2560 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
2561 RW_MGR_RESET_READ_DATAPATH_OFFSET);
2563 if (quick_write_mode)
2564 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
2566 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
2568 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
2571 * CNTR 1 - This is used to ensure enough time elapses
2572 * for read data to come back.
2574 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
2577 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
2578 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
2580 writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
2581 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
2584 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
2585 writel(mcc_instruction, addr + (group << 2));
2588 /* Test writes, can check for a single bit pass or multiple bit pass */
2589 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
2590 uint32_t write_group, uint32_t use_dm, uint32_t all_correct,
2591 uint32_t *bit_chk, uint32_t all_ranks)
2594 uint32_t correct_mask_vg;
2595 uint32_t tmp_bit_chk;
2597 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
2598 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
2599 uint32_t addr_rw_mgr;
2600 uint32_t base_rw_mgr;
2602 *bit_chk = param->write_correct_mask;
2603 correct_mask_vg = param->write_correct_mask_vg;
2605 for (r = rank_bgn; r < rank_end; r++) {
2606 if (param->skip_ranks[r]) {
2607 /* request to skip the rank */
2612 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
2615 addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS;
2616 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) {
2617 /* reset the fifos to get pointers to known state */
2618 writel(0, &phy_mgr_cmd->fifo_reset);
2620 tmp_bit_chk = tmp_bit_chk <<
2621 (RW_MGR_MEM_DQ_PER_WRITE_DQS /
2622 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
2623 rw_mgr_mem_calibrate_write_test_issue(write_group *
2624 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg,
2627 base_rw_mgr = readl(addr_rw_mgr);
2628 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
2632 *bit_chk &= tmp_bit_chk;
2636 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2637 debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \
2638 %u => %lu", write_group, use_dm,
2639 *bit_chk, param->write_correct_mask,
2640 (long unsigned int)(*bit_chk ==
2641 param->write_correct_mask));
2642 return *bit_chk == param->write_correct_mask;
2644 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2645 debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ",
2646 write_group, use_dm, *bit_chk);
2647 debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0,
2648 (long unsigned int)(*bit_chk != 0));
2649 return *bit_chk != 0x00;
2654 * center all windows. do per-bit-deskew to possibly increase size of
2657 static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn,
2658 uint32_t write_group, uint32_t test_bgn)
2660 uint32_t i, p, min_index;
2663 * Store these as signed since there are comparisons with
2667 uint32_t sticky_bit_chk;
2668 int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2669 int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2671 int32_t mid_min, orig_mid_min;
2672 int32_t new_dqs, start_dqs, shift_dq;
2673 int32_t dq_margin, dqs_margin, dm_margin;
2675 uint32_t temp_dq_out1_delay;
2678 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
2682 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2683 start_dqs = readl(addr +
2684 (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
2686 /* per-bit deskew */
2689 * set the left and right edge of each bit to an illegal value
2690 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
2693 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2694 left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2695 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2698 /* Search for the left edge of the window for each bit */
2699 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) {
2700 scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, d);
2702 writel(0, &sdr_scc_mgr->update);
2705 * Stop searching when the read test doesn't pass AND when
2706 * we've seen a passing read on every bit.
2708 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2709 0, PASS_ONE_BIT, &bit_chk, 0);
2710 sticky_bit_chk = sticky_bit_chk | bit_chk;
2711 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2712 debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => %u \
2713 == %u && %u [bit_chk= %u ]\n",
2714 d, sticky_bit_chk, param->write_correct_mask,
2720 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2723 * Remember a passing test as the
2729 * If a left edge has not been seen
2730 * yet, then a future passing test will
2731 * mark this edge as the right edge.
2734 IO_IO_OUT1_DELAY_MAX + 1) {
2735 right_edge[i] = -(d + 1);
2738 debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d);
2739 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2740 (int)(bit_chk & 1), i, left_edge[i]);
2741 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2743 bit_chk = bit_chk >> 1;
2748 /* Reset DQ delay chains to 0 */
2749 scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, 0);
2751 for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) {
2752 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2753 %d right_edge[%u]: %d\n", __func__, __LINE__,
2754 i, left_edge[i], i, right_edge[i]);
2757 * Check for cases where we haven't found the left edge,
2758 * which makes our assignment of the the right edge invalid.
2759 * Reset it to the illegal value.
2761 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) &&
2762 (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) {
2763 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2764 debug_cond(DLEVEL == 2, "%s:%d write_center: reset \
2765 right_edge[%u]: %d\n", __func__, __LINE__,
2770 * Reset sticky bit (except for bits where we have
2771 * seen the left edge).
2773 sticky_bit_chk = sticky_bit_chk << 1;
2774 if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1))
2775 sticky_bit_chk = sticky_bit_chk | 1;
2781 /* Search for the right edge of the window for each bit */
2782 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) {
2783 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2786 writel(0, &sdr_scc_mgr->update);
2789 * Stop searching when the read test doesn't pass AND when
2790 * we've seen a passing read on every bit.
2792 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2793 0, PASS_ONE_BIT, &bit_chk, 0);
2795 sticky_bit_chk = sticky_bit_chk | bit_chk;
2796 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2798 debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => %u == \
2799 %u && %u\n", d, sticky_bit_chk,
2800 param->write_correct_mask, stop);
2804 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS;
2806 /* d = 0 failed, but it passed when
2807 testing the left edge, so it must be
2808 marginal, set it to -1 */
2809 if (right_edge[i] ==
2810 IO_IO_OUT1_DELAY_MAX + 1 &&
2812 IO_IO_OUT1_DELAY_MAX + 1) {
2819 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2822 * Remember a passing test as
2829 * If a right edge has not
2830 * been seen yet, then a future
2831 * passing test will mark this
2832 * edge as the left edge.
2834 if (right_edge[i] ==
2835 IO_IO_OUT1_DELAY_MAX + 1)
2836 left_edge[i] = -(d + 1);
2839 * d = 0 failed, but it passed
2840 * when testing the left edge,
2841 * so it must be marginal, set
2844 if (right_edge[i] ==
2845 IO_IO_OUT1_DELAY_MAX + 1 &&
2847 IO_IO_OUT1_DELAY_MAX + 1)
2850 * If a right edge has not been
2851 * seen yet, then a future
2852 * passing test will mark this
2853 * edge as the left edge.
2855 else if (right_edge[i] ==
2856 IO_IO_OUT1_DELAY_MAX +
2858 left_edge[i] = -(d + 1);
2861 debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d);
2862 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2863 (int)(bit_chk & 1), i, left_edge[i]);
2864 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2866 bit_chk = bit_chk >> 1;
2871 /* Check that all bits have a window */
2872 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2873 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2874 %d right_edge[%u]: %d", __func__, __LINE__,
2875 i, left_edge[i], i, right_edge[i]);
2876 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) ||
2877 (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) {
2878 set_failing_group_stage(test_bgn + i,
2880 CAL_SUBSTAGE_WRITES_CENTER);
2885 /* Find middle of window for each DQ bit */
2886 mid_min = left_edge[0] - right_edge[0];
2888 for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2889 mid = left_edge[i] - right_edge[i];
2890 if (mid < mid_min) {
2897 * -mid_min/2 represents the amount that we need to move DQS.
2898 * If mid_min is odd and positive we'll need to add one to
2899 * make sure the rounding in further calculations is correct
2900 * (always bias to the right), so just add 1 for all positive values.
2904 mid_min = mid_min / 2;
2905 debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__,
2908 /* Determine the amount we can change DQS (which is -mid_min) */
2909 orig_mid_min = mid_min;
2910 new_dqs = start_dqs;
2912 debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
2913 mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min);
2914 /* Initialize data for export structures */
2915 dqs_margin = IO_IO_OUT1_DELAY_MAX + 1;
2916 dq_margin = IO_IO_OUT1_DELAY_MAX + 1;
2918 /* add delay to bring centre of all DQ windows to the same "level" */
2919 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
2920 /* Use values before divide by 2 to reduce round off error */
2921 shift_dq = (left_edge[i] - right_edge[i] -
2922 (left_edge[min_index] - right_edge[min_index]))/2 +
2923 (orig_mid_min - mid_min);
2925 debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \
2926 [%u]=%d\n", __func__, __LINE__, i, shift_dq);
2928 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2929 temp_dq_out1_delay = readl(addr + (i << 2));
2930 if (shift_dq + (int32_t)temp_dq_out1_delay >
2931 (int32_t)IO_IO_OUT1_DELAY_MAX) {
2932 shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay;
2933 } else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) {
2934 shift_dq = -(int32_t)temp_dq_out1_delay;
2936 debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n",
2938 scc_mgr_set_dq_out1_delay(i, temp_dq_out1_delay + shift_dq);
2941 debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i,
2942 left_edge[i] - shift_dq + (-mid_min),
2943 right_edge[i] + shift_dq - (-mid_min));
2944 /* To determine values for export structures */
2945 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2946 dq_margin = left_edge[i] - shift_dq + (-mid_min);
2948 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2949 dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2953 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
2954 writel(0, &sdr_scc_mgr->update);
2957 debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
2960 * set the left and right edge of each bit to an illegal value,
2961 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
2963 left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
2964 right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
2965 int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2966 int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2967 int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
2968 int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1;
2969 int32_t win_best = 0;
2971 /* Search for the/part of the window with DM shift */
2972 for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) {
2973 scc_mgr_apply_group_dm_out1_delay(write_group, d);
2974 writel(0, &sdr_scc_mgr->update);
2976 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
2977 PASS_ALL_BITS, &bit_chk,
2979 /* USE Set current end of the window */
2982 * If a starting edge of our window has not been seen
2983 * this is our current start of the DM window.
2985 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
2989 * If current window is bigger than best seen.
2990 * Set best seen to be current window.
2992 if ((end_curr-bgn_curr+1) > win_best) {
2993 win_best = end_curr-bgn_curr+1;
2994 bgn_best = bgn_curr;
2995 end_best = end_curr;
2998 /* We just saw a failing test. Reset temp edge */
2999 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3000 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3005 /* Reset DM delay chains to 0 */
3006 scc_mgr_apply_group_dm_out1_delay(write_group, 0);
3009 * Check to see if the current window nudges up aganist 0 delay.
3010 * If so we need to continue the search by shifting DQS otherwise DQS
3011 * search begins as a new search. */
3012 if (end_curr != 0) {
3013 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3014 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3017 /* Search for the/part of the window with DQS shifts */
3018 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) {
3020 * Note: This only shifts DQS, so are we limiting ourselve to
3021 * width of DQ unnecessarily.
3023 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
3026 writel(0, &sdr_scc_mgr->update);
3027 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
3028 PASS_ALL_BITS, &bit_chk,
3030 /* USE Set current end of the window */
3033 * If a beginning edge of our window has not been seen
3034 * this is our current begin of the DM window.
3036 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
3040 * If current window is bigger than best seen. Set best
3041 * seen to be current window.
3043 if ((end_curr-bgn_curr+1) > win_best) {
3044 win_best = end_curr-bgn_curr+1;
3045 bgn_best = bgn_curr;
3046 end_best = end_curr;
3049 /* We just saw a failing test. Reset temp edge */
3050 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3051 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3053 /* Early exit optimization: if ther remaining delay
3054 chain space is less than already seen largest window
3057 (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) {
3063 /* assign left and right edge for cal and reporting; */
3064 left_edge[0] = -1*bgn_best;
3065 right_edge[0] = end_best;
3067 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__,
3068 __LINE__, left_edge[0], right_edge[0]);
3070 /* Move DQS (back to orig) */
3071 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3075 /* Find middle of window for the DM bit */
3076 mid = (left_edge[0] - right_edge[0]) / 2;
3078 /* only move right, since we are not moving DQS/DQ */
3082 /* dm_marign should fail if we never find a window */
3086 dm_margin = left_edge[0] - mid;
3088 scc_mgr_apply_group_dm_out1_delay(write_group, mid);
3089 writel(0, &sdr_scc_mgr->update);
3091 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
3092 dm_margin=%d\n", __func__, __LINE__, left_edge[0],
3093 right_edge[0], mid, dm_margin);
3095 gbl->fom_out += dq_margin + dqs_margin;
3097 debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \
3098 dqs_margin=%d dm_margin=%d\n", __func__, __LINE__,
3099 dq_margin, dqs_margin, dm_margin);
3102 * Do not remove this line as it makes sure all of our
3103 * decisions have been applied.
3105 writel(0, &sdr_scc_mgr->update);
3106 return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0);
3109 /* calibrate the write operations */
3110 static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
3113 /* update info for sims */
3114 debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn);
3116 reg_file_set_stage(CAL_STAGE_WRITES);
3117 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3119 reg_file_set_group(g);
3121 if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) {
3122 set_failing_group_stage(g, CAL_STAGE_WRITES,
3123 CAL_SUBSTAGE_WRITES_CENTER);
3130 /* precharge all banks and activate row 0 in bank "000..." and bank "111..." */
3131 static void mem_precharge_and_activate(void)
3135 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
3136 if (param->skip_ranks[r]) {
3137 /* request to skip the rank */
3142 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
3144 /* precharge all banks ... */
3145 writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3146 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3148 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
3149 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1,
3150 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3152 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
3153 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
3154 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3157 writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3158 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3162 /* Configure various memory related parameters. */
3163 static void mem_config(void)
3165 uint32_t rlat, wlat;
3166 uint32_t rw_wl_nop_cycles;
3167 uint32_t max_latency;
3169 debug("%s:%d\n", __func__, __LINE__);
3170 /* read in write and read latency */
3171 wlat = readl(&data_mgr->t_wl_add);
3172 wlat += readl(&data_mgr->mem_t_add);
3174 /* WL for hard phy does not include additive latency */
3177 * add addtional write latency to offset the address/command extra
3178 * clock cycle. We change the AC mux setting causing AC to be delayed
3179 * by one mem clock cycle. Only do this for DDR3
3183 rlat = readl(&data_mgr->t_rl_add);
3185 rw_wl_nop_cycles = wlat - 2;
3186 gbl->rw_wl_nop_cycles = rw_wl_nop_cycles;
3189 * For AV/CV, lfifo is hardened and always runs at full rate so
3190 * max latency in AFI clocks, used here, is correspondingly smaller.
3192 max_latency = (1<<MAX_LATENCY_COUNT_WIDTH)/1 - 1;
3193 /* configure for a burst length of 8 */
3196 /* Adjust Write Latency for Hard PHY */
3199 /* set a pretty high read latency initially */
3200 gbl->curr_read_lat = rlat + 16;
3202 if (gbl->curr_read_lat > max_latency)
3203 gbl->curr_read_lat = max_latency;
3205 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3207 /* advertise write latency */
3208 gbl->curr_write_lat = wlat;
3209 writel(wlat - 2, &phy_mgr_cfg->afi_wlat);
3211 /* initialize bit slips */
3212 mem_precharge_and_activate();
3215 /* Set VFIFO and LFIFO to instant-on settings in skip calibration mode */
3216 static void mem_skip_calibrate(void)
3218 uint32_t vfifo_offset;
3221 debug("%s:%d\n", __func__, __LINE__);
3222 /* Need to update every shadow register set used by the interface */
3223 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
3224 r += NUM_RANKS_PER_SHADOW_REG) {
3226 * Set output phase alignment settings appropriate for
3229 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3230 scc_mgr_set_dqs_en_phase(i, 0);
3231 #if IO_DLL_CHAIN_LENGTH == 6
3232 scc_mgr_set_dqdqs_output_phase(i, 6);
3234 scc_mgr_set_dqdqs_output_phase(i, 7);
3239 * Write data arrives to the I/O two cycles before write
3240 * latency is reached (720 deg).
3241 * -> due to bit-slip in a/c bus
3242 * -> to allow board skew where dqs is longer than ck
3243 * -> how often can this happen!?
3244 * -> can claim back some ptaps for high freq
3245 * support if we can relax this, but i digress...
3247 * The write_clk leads mem_ck by 90 deg
3248 * The minimum ptap of the OPA is 180 deg
3249 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3250 * The write_clk is always delayed by 2 ptaps
3252 * Hence, to make DQS aligned to CK, we need to delay
3254 * (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
3256 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
3257 * gives us the number of ptaps, which simplies to:
3259 * (1.25 * IO_DLL_CHAIN_LENGTH - 2)
3261 scc_mgr_set_dqdqs_output_phase(i, (1.25 *
3262 IO_DLL_CHAIN_LENGTH - 2));
3264 writel(0xff, &sdr_scc_mgr->dqs_ena);
3265 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
3267 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
3268 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3269 SCC_MGR_GROUP_COUNTER_OFFSET);
3271 writel(0xff, &sdr_scc_mgr->dq_ena);
3272 writel(0xff, &sdr_scc_mgr->dm_ena);
3273 writel(0, &sdr_scc_mgr->update);
3276 /* Compensate for simulation model behaviour */
3277 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3278 scc_mgr_set_dqs_bus_in_delay(i, 10);
3279 scc_mgr_load_dqs(i);
3281 writel(0, &sdr_scc_mgr->update);
3284 * ArriaV has hard FIFOs that can only be initialized by incrementing
3287 vfifo_offset = CALIB_VFIFO_OFFSET;
3288 for (j = 0; j < vfifo_offset; j++) {
3289 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
3291 writel(0, &phy_mgr_cmd->fifo_reset);
3294 * For ACV with hard lfifo, we get the skip-cal setting from
3295 * generation-time constant.
3297 gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
3298 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3301 /* Memory calibration entry point */
3302 static uint32_t mem_calibrate(void)
3305 uint32_t rank_bgn, sr;
3306 uint32_t write_group, write_test_bgn;
3307 uint32_t read_group, read_test_bgn;
3308 uint32_t run_groups, current_run;
3309 uint32_t failing_groups = 0;
3310 uint32_t group_failed = 0;
3311 uint32_t sr_failed = 0;
3313 debug("%s:%d\n", __func__, __LINE__);
3314 /* Initialize the data settings */
3316 gbl->error_substage = CAL_SUBSTAGE_NIL;
3317 gbl->error_stage = CAL_STAGE_NIL;
3318 gbl->error_group = 0xff;
3324 uint32_t bypass_mode = 0x1;
3325 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3326 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3327 SCC_MGR_GROUP_COUNTER_OFFSET);
3328 scc_set_bypass_mode(i, bypass_mode);
3331 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3333 * Set VFIFO and LFIFO to instant-on settings in skip
3336 mem_skip_calibrate();
3338 for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3340 * Zero all delay chain/phase settings for all
3341 * groups and all shadow register sets.
3345 run_groups = ~param->skip_groups;
3347 for (write_group = 0, write_test_bgn = 0; write_group
3348 < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
3349 write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
3350 /* Initialized the group failure */
3353 current_run = run_groups & ((1 <<
3354 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3355 run_groups = run_groups >>
3356 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
3358 if (current_run == 0)
3361 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3362 SCC_MGR_GROUP_COUNTER_OFFSET);
3363 scc_mgr_zero_group(write_group, write_test_bgn,
3366 for (read_group = write_group *
3367 RW_MGR_MEM_IF_READ_DQS_WIDTH /
3368 RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3370 read_group < (write_group + 1) *
3371 RW_MGR_MEM_IF_READ_DQS_WIDTH /
3372 RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
3374 read_group++, read_test_bgn +=
3375 RW_MGR_MEM_DQ_PER_READ_DQS) {
3376 /* Calibrate the VFIFO */
3377 if (!((STATIC_CALIB_STEPS) &
3378 CALIB_SKIP_VFIFO)) {
3379 if (!rw_mgr_mem_calibrate_vfifo
3385 phy_debug_mode_flags &
3386 PHY_DEBUG_SWEEP_ALL_GROUPS)) {
3393 /* Calibrate the output side */
3394 if (group_failed == 0) {
3395 for (rank_bgn = 0, sr = 0; rank_bgn
3396 < RW_MGR_MEM_NUMBER_OF_RANKS;
3398 NUM_RANKS_PER_SHADOW_REG,
3401 if (!((STATIC_CALIB_STEPS) &
3402 CALIB_SKIP_WRITES)) {
3403 if ((STATIC_CALIB_STEPS)
3404 & CALIB_SKIP_DELAY_SWEEPS) {
3405 /* not needed in quick mode! */
3408 * Determine if this set of
3409 * ranks should be skipped
3412 if (!param->skip_shadow_regs[sr]) {
3413 if (!rw_mgr_mem_calibrate_writes
3414 (rank_bgn, write_group,
3418 phy_debug_mode_flags &
3419 PHY_DEBUG_SWEEP_ALL_GROUPS)) {
3431 if (group_failed == 0) {
3432 for (read_group = write_group *
3433 RW_MGR_MEM_IF_READ_DQS_WIDTH /
3434 RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3436 read_group < (write_group + 1)
3437 * RW_MGR_MEM_IF_READ_DQS_WIDTH
3438 / RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
3440 read_group++, read_test_bgn +=
3441 RW_MGR_MEM_DQ_PER_READ_DQS) {
3442 if (!((STATIC_CALIB_STEPS) &
3443 CALIB_SKIP_WRITES)) {
3444 if (!rw_mgr_mem_calibrate_vfifo_end
3445 (read_group, read_test_bgn)) {
3448 if (!(gbl->phy_debug_mode_flags
3449 & PHY_DEBUG_SWEEP_ALL_GROUPS)) {
3457 if (group_failed != 0)
3462 * USER If there are any failing groups then report
3465 if (failing_groups != 0)
3468 /* Calibrate the LFIFO */
3469 if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) {
3471 * If we're skipping groups as part of debug,
3472 * don't calibrate LFIFO.
3474 if (param->skip_groups == 0) {
3475 if (!rw_mgr_mem_calibrate_lfifo())
3483 * Do not remove this line as it makes sure all of our decisions
3484 * have been applied.
3486 writel(0, &sdr_scc_mgr->update);
3490 static uint32_t run_mem_calibrate(void)
3493 uint32_t debug_info;
3495 debug("%s:%d\n", __func__, __LINE__);
3497 /* Reset pass/fail status shown on afi_cal_success/fail */
3498 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
3500 /* stop tracking manger */
3501 uint32_t ctrlcfg = readl(&sdr_ctrl->ctrl_cfg);
3503 writel(ctrlcfg & 0xFFBFFFFF, &sdr_ctrl->ctrl_cfg);
3506 rw_mgr_mem_initialize();
3508 pass = mem_calibrate();
3510 mem_precharge_and_activate();
3511 writel(0, &phy_mgr_cmd->fifo_reset);
3515 * Don't return control of the PHY back to AFI when in debug mode.
3517 if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) {
3518 rw_mgr_mem_handoff();
3520 * In Hard PHY this is a 2-bit control:
3522 * 1: DDIO Mux Select
3524 writel(0x2, &phy_mgr_cfg->mux_sel);
3527 writel(ctrlcfg, &sdr_ctrl->ctrl_cfg);
3530 printf("%s: CALIBRATION PASSED\n", __FILE__);
3535 if (gbl->fom_in > 0xff)
3538 if (gbl->fom_out > 0xff)
3539 gbl->fom_out = 0xff;
3541 /* Update the FOM in the register file */
3542 debug_info = gbl->fom_in;
3543 debug_info |= gbl->fom_out << 8;
3544 writel(debug_info, &sdr_reg_file->fom);
3546 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3547 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
3549 printf("%s: CALIBRATION FAILED\n", __FILE__);
3551 debug_info = gbl->error_stage;
3552 debug_info |= gbl->error_substage << 8;
3553 debug_info |= gbl->error_group << 16;
3555 writel(debug_info, &sdr_reg_file->failing_stage);
3556 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3557 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
3559 /* Update the failing group/stage in the register file */
3560 debug_info = gbl->error_stage;
3561 debug_info |= gbl->error_substage << 8;
3562 debug_info |= gbl->error_group << 16;
3563 writel(debug_info, &sdr_reg_file->failing_stage);
3570 * hc_initialize_rom_data() - Initialize ROM data
3572 * Initialize ROM data.
3574 static void hc_initialize_rom_data(void)
3578 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
3579 for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++)
3580 writel(inst_rom_init[i], addr + (i << 2));
3582 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
3583 for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++)
3584 writel(ac_rom_init[i], addr + (i << 2));
3588 * initialize_reg_file() - Initialize SDR register file
3590 * Initialize SDR register file.
3592 static void initialize_reg_file(void)
3594 /* Initialize the register file with the correct data */
3595 writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature);
3596 writel(0, &sdr_reg_file->debug_data_addr);
3597 writel(0, &sdr_reg_file->cur_stage);
3598 writel(0, &sdr_reg_file->fom);
3599 writel(0, &sdr_reg_file->failing_stage);
3600 writel(0, &sdr_reg_file->debug1);
3601 writel(0, &sdr_reg_file->debug2);
3605 * initialize_hps_phy() - Initialize HPS PHY
3607 * Initialize HPS PHY.
3609 static void initialize_hps_phy(void)
3613 * Tracking also gets configured here because it's in the
3616 uint32_t trk_sample_count = 7500;
3617 uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
3619 * Format is number of outer loops in the 16 MSB, sample
3624 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3625 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3626 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3627 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3628 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3629 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3631 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3632 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3634 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3635 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3637 writel(reg, &sdr_ctrl->phy_ctrl0);
3640 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3642 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3643 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3644 trk_long_idle_sample_count);
3645 writel(reg, &sdr_ctrl->phy_ctrl1);
3648 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3649 trk_long_idle_sample_count >>
3650 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
3651 writel(reg, &sdr_ctrl->phy_ctrl2);
3654 static void initialize_tracking(void)
3656 uint32_t concatenated_longidle = 0x0;
3657 uint32_t concatenated_delays = 0x0;
3658 uint32_t concatenated_rw_addr = 0x0;
3659 uint32_t concatenated_refresh = 0x0;
3660 uint32_t trk_sample_count = 7500;
3661 uint32_t dtaps_per_ptap;
3665 * compute usable version of value in case we skip full
3670 while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
3672 tmp_delay += IO_DELAY_PER_DCHAIN_TAP;
3676 concatenated_longidle = concatenated_longidle ^ 10;
3677 /*longidle outer loop */
3678 concatenated_longidle = concatenated_longidle << 16;
3679 concatenated_longidle = concatenated_longidle ^ 100;
3680 /*longidle sample count */
3681 concatenated_delays = concatenated_delays ^ 243;
3682 /* trfc, worst case of 933Mhz 4Gb */
3683 concatenated_delays = concatenated_delays << 8;
3684 concatenated_delays = concatenated_delays ^ 14;
3685 /* trcd, worst case */
3686 concatenated_delays = concatenated_delays << 8;
3687 concatenated_delays = concatenated_delays ^ 10;
3689 concatenated_delays = concatenated_delays << 8;
3690 concatenated_delays = concatenated_delays ^ 4;
3693 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_IDLE;
3694 concatenated_rw_addr = concatenated_rw_addr << 8;
3695 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_ACTIVATE_1;
3696 concatenated_rw_addr = concatenated_rw_addr << 8;
3697 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_SGLE_READ;
3698 concatenated_rw_addr = concatenated_rw_addr << 8;
3699 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_PRECHARGE_ALL;
3701 concatenated_refresh = concatenated_refresh ^ RW_MGR_REFRESH_ALL;
3702 concatenated_refresh = concatenated_refresh << 24;
3703 concatenated_refresh = concatenated_refresh ^ 1000; /* trefi */
3705 /* Initialize the register file with the correct data */
3706 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
3707 writel(trk_sample_count, &sdr_reg_file->trk_sample_count);
3708 writel(concatenated_longidle, &sdr_reg_file->trk_longidle);
3709 writel(concatenated_delays, &sdr_reg_file->delays);
3710 writel(concatenated_rw_addr, &sdr_reg_file->trk_rw_mgr_addr);
3711 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH, &sdr_reg_file->trk_read_dqs_width);
3712 writel(concatenated_refresh, &sdr_reg_file->trk_rfsh);
3715 int sdram_calibration_full(void)
3717 struct param_type my_param;
3718 struct gbl_type my_gbl;
3725 /* Initialize the debug mode flags */
3726 gbl->phy_debug_mode_flags = 0;
3727 /* Set the calibration enabled by default */
3728 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3730 * Only sweep all groups (regardless of fail state) by default
3731 * Set enabled read test by default.
3733 #if DISABLE_GUARANTEED_READ
3734 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3736 /* Initialize the register file */
3737 initialize_reg_file();
3739 /* Initialize any PHY CSR */
3740 initialize_hps_phy();
3742 scc_mgr_initialize();
3744 initialize_tracking();
3746 /* USER Enable all ranks, groups */
3747 for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++)
3748 param->skip_ranks[i] = 0;
3749 for (i = 0; i < NUM_SHADOW_REGS; ++i)
3750 param->skip_shadow_regs[i] = 0;
3751 param->skip_groups = 0;
3753 printf("%s: Preparing to start memory calibration\n", __FILE__);
3755 debug("%s:%d\n", __func__, __LINE__);
3756 debug_cond(DLEVEL == 1,
3757 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
3758 RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
3759 RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS,
3760 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
3761 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
3762 debug_cond(DLEVEL == 1,
3763 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
3764 RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3765 RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH,
3766 IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP);
3767 debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
3768 IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH);
3769 debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
3770 IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX,
3771 IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX);
3772 debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
3773 IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX,
3774 IO_IO_OUT2_DELAY_MAX);
3775 debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
3776 IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE);
3778 hc_initialize_rom_data();
3780 /* update info for sims */
3781 reg_file_set_stage(CAL_STAGE_NIL);
3782 reg_file_set_group(0);
3785 * Load global needed for those actions that require
3786 * some dynamic calibration support.
3788 dyn_calib_steps = STATIC_CALIB_STEPS;
3790 * Load global to allow dynamic selection of delay loop settings
3791 * based on calibration mode.
3793 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3794 skip_delay_mask = 0xff;
3796 skip_delay_mask = 0x0;
3798 pass = run_mem_calibrate();
3800 printf("%s: Calibration complete\n", __FILE__);