2 * Copyright Altera Corporation (C) 2012-2015
4 * SPDX-License-Identifier: BSD-3-Clause
9 #include <asm/arch/sdram.h>
10 #include "sequencer.h"
11 #include "sequencer_auto.h"
12 #include "sequencer_auto_ac_init.h"
13 #include "sequencer_auto_inst_init.h"
14 #include "sequencer_defines.h"
16 static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
17 (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
19 static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
20 (struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
22 static struct socfpga_sdr_reg_file *sdr_reg_file =
23 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
25 static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
26 (struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
28 static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
29 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
31 static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
32 (struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
34 static struct socfpga_data_mgr *data_mgr =
35 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
37 static struct socfpga_sdr_ctrl *sdr_ctrl =
38 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
43 * In order to reduce ROM size, most of the selectable calibration steps are
44 * decided at compile time based on the user's calibration mode selection,
45 * as captured by the STATIC_CALIB_STEPS selection below.
47 * However, to support simulation-time selection of fast simulation mode, where
48 * we skip everything except the bare minimum, we need a few of the steps to
49 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
50 * check, which is based on the rtl-supplied value, or we dynamically compute
51 * the value to use based on the dynamically-chosen calibration mode
55 #define STATIC_IN_RTL_SIM 0
56 #define STATIC_SKIP_DELAY_LOOPS 0
58 #define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
59 STATIC_SKIP_DELAY_LOOPS)
61 /* calibration steps requested by the rtl */
62 uint16_t dyn_calib_steps;
65 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
66 * instead of static, we use boolean logic to select between
67 * non-skip and skip values
69 * The mask is set to include all bits when not-skipping, but is
73 uint16_t skip_delay_mask; /* mask off bits when skipping/not-skipping */
75 #define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
76 ((non_skip_value) & skip_delay_mask)
79 struct param_type *param;
80 uint32_t curr_shadow_reg;
82 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
83 uint32_t write_group, uint32_t use_dm,
84 uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
86 static void set_failing_group_stage(uint32_t group, uint32_t stage,
90 * Only set the global stage if there was not been any other
93 if (gbl->error_stage == CAL_STAGE_NIL) {
94 gbl->error_substage = substage;
95 gbl->error_stage = stage;
96 gbl->error_group = group;
100 static void reg_file_set_group(u16 set_group)
102 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
105 static void reg_file_set_stage(u8 set_stage)
107 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
110 static void reg_file_set_sub_stage(u8 set_sub_stage)
112 set_sub_stage &= 0xff;
113 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
116 static void initialize(void)
118 debug("%s:%d\n", __func__, __LINE__);
119 /* USER calibration has control over path to memory */
121 * In Hard PHY this is a 2-bit control:
125 writel(0x3, &phy_mgr_cfg->mux_sel);
127 /* USER memory clock is not stable we begin initialization */
128 writel(0, &phy_mgr_cfg->reset_mem_stbl);
130 /* USER calibration status all set to zero */
131 writel(0, &phy_mgr_cfg->cal_status);
133 writel(0, &phy_mgr_cfg->cal_debug_info);
135 if ((dyn_calib_steps & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) {
136 param->read_correct_mask_vg = ((uint32_t)1 <<
137 (RW_MGR_MEM_DQ_PER_READ_DQS /
138 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
139 param->write_correct_mask_vg = ((uint32_t)1 <<
140 (RW_MGR_MEM_DQ_PER_READ_DQS /
141 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
142 param->read_correct_mask = ((uint32_t)1 <<
143 RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
144 param->write_correct_mask = ((uint32_t)1 <<
145 RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
146 param->dm_correct_mask = ((uint32_t)1 <<
147 (RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH))
152 static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
154 uint32_t odt_mask_0 = 0;
155 uint32_t odt_mask_1 = 0;
156 uint32_t cs_and_odt_mask;
158 if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) {
159 if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) {
167 } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) {
169 if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
170 /* - Dual-Slot , Single-Rank
171 * (1 chip-select per DIMM)
173 * - RDIMM, 4 total CS (2 CS per DIMM)
175 * Since MEM_NUMBER_OF_RANKS is 2 they are
177 * with 2 CS each (special for RDIMM)
178 * Read: Turn on ODT on the opposite rank
179 * Write: Turn on ODT on all ranks
181 odt_mask_0 = 0x3 & ~(1 << rank);
185 * USER - Single-Slot , Dual-rank DIMMs
186 * (2 chip-selects per DIMM)
187 * USER Read: Turn on ODT off on all ranks
188 * USER Write: Turn on ODT on active rank
191 odt_mask_1 = 0x3 & (1 << rank);
196 * ----------+-----------------------+
199 * Read From +-----------------------+
200 * Rank | 3 | 2 | 1 | 0 |
201 * ----------+-----+-----+-----+-----+
202 * 0 | 0 | 1 | 0 | 0 |
203 * 1 | 1 | 0 | 0 | 0 |
204 * 2 | 0 | 0 | 0 | 1 |
205 * 3 | 0 | 0 | 1 | 0 |
206 * ----------+-----+-----+-----+-----+
209 * ----------+-----------------------+
212 * Write To +-----------------------+
213 * Rank | 3 | 2 | 1 | 0 |
214 * ----------+-----+-----+-----+-----+
215 * 0 | 0 | 1 | 0 | 1 |
216 * 1 | 1 | 0 | 1 | 0 |
217 * 2 | 0 | 1 | 0 | 1 |
218 * 3 | 1 | 0 | 1 | 0 |
219 * ----------+-----+-----+-----+-----+
246 (0xFF & ~(1 << rank)) |
247 ((0xFF & odt_mask_0) << 8) |
248 ((0xFF & odt_mask_1) << 16);
249 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
250 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
254 * scc_mgr_set() - Set SCC Manager register
255 * @off: Base offset in SCC Manager space
256 * @grp: Read/Write group
257 * @val: Value to be set
259 * This function sets the SCC Manager (Scan Chain Control Manager) register.
261 static void scc_mgr_set(u32 off, u32 grp, u32 val)
263 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
267 * scc_mgr_initialize() - Initialize SCC Manager registers
269 * Initialize SCC Manager registers.
271 static void scc_mgr_initialize(void)
274 * Clear register file for HPS. 16 (2^4) is the size of the
275 * full register file in the scc mgr:
276 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
277 * MEM_IF_READ_DQS_WIDTH - 1);
281 for (i = 0; i < 16; i++) {
282 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
283 __func__, __LINE__, i);
284 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i);
288 static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase)
290 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
293 static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay)
295 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
298 static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
300 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
303 static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
305 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
308 static void scc_mgr_set_dqs_io_in_delay(uint32_t delay)
310 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
314 static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
316 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
319 static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
321 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
324 static void scc_mgr_set_dqs_out1_delay(uint32_t delay)
326 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
330 static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
332 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
333 RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm,
337 /* load up dqs config settings */
338 static void scc_mgr_load_dqs(uint32_t dqs)
340 writel(dqs, &sdr_scc_mgr->dqs_ena);
343 /* load up dqs io config settings */
344 static void scc_mgr_load_dqs_io(void)
346 writel(0, &sdr_scc_mgr->dqs_io_ena);
349 /* load up dq config settings */
350 static void scc_mgr_load_dq(uint32_t dq_in_group)
352 writel(dq_in_group, &sdr_scc_mgr->dq_ena);
355 /* load up dm config settings */
356 static void scc_mgr_load_dm(uint32_t dm)
358 writel(dm, &sdr_scc_mgr->dm_ena);
362 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
363 * @off: Base offset in SCC Manager space
364 * @grp: Read/Write group
365 * @val: Value to be set
366 * @update: If non-zero, trigger SCC Manager update for all ranks
368 * This function sets the SCC Manager (Scan Chain Control Manager) register
369 * and optionally triggers the SCC update for all ranks.
371 static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
376 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
377 r += NUM_RANKS_PER_SHADOW_REG) {
378 scc_mgr_set(off, grp, val);
380 if (update || (r == 0)) {
381 writel(grp, &sdr_scc_mgr->dqs_ena);
382 writel(0, &sdr_scc_mgr->update);
387 static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
390 * USER although the h/w doesn't support different phases per
391 * shadow register, for simplicity our scc manager modeling
392 * keeps different phase settings per shadow reg, and it's
393 * important for us to keep them in sync to match h/w.
394 * for efficiency, the scan chain update should occur only
397 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET,
398 read_group, phase, 0);
401 static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
405 * USER although the h/w doesn't support different phases per
406 * shadow register, for simplicity our scc manager modeling
407 * keeps different phase settings per shadow reg, and it's
408 * important for us to keep them in sync to match h/w.
409 * for efficiency, the scan chain update should occur only
412 scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
413 write_group, phase, 0);
416 static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
420 * In shadow register mode, the T11 settings are stored in
421 * registers in the core, which are updated by the DQS_ENA
422 * signals. Not issuing the SCC_MGR_UPD command allows us to
423 * save lots of rank switching overhead, by calling
424 * select_shadow_regs_for_update with update_scan_chains
427 scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET,
428 read_group, delay, 1);
429 writel(0, &sdr_scc_mgr->update);
433 * scc_mgr_set_oct_out1_delay() - Set OCT output delay
434 * @write_group: Write group
435 * @delay: Delay value
437 * This function sets the OCT output delay in SCC manager.
439 static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
441 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
442 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
443 const int base = write_group * ratio;
446 * Load the setting in the SCC manager
447 * Although OCT affects only write data, the OCT delay is controlled
448 * by the DQS logic block which is instantiated once per read group.
449 * For protocols where a write group consists of multiple read groups,
450 * the setting must be set multiple times.
452 for (i = 0; i < ratio; i++)
453 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
457 * scc_mgr_set_hhp_extras() - Set HHP extras.
459 * Load the fixed setting in the SCC manager HHP extras.
461 static void scc_mgr_set_hhp_extras(void)
464 * Load the fixed setting in the SCC manager
465 * bits: 0:0 = 1'b1 - DQS bypass
466 * bits: 1:1 = 1'b1 - DQ bypass
467 * bits: 4:2 = 3'b001 - rfifo_mode
468 * bits: 6:5 = 2'b01 - rfifo clock_select
469 * bits: 7:7 = 1'b0 - separate gating from ungating setting
470 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
472 const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
473 (1 << 2) | (1 << 1) | (1 << 0);
474 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
475 SCC_MGR_HHP_GLOBALS_OFFSET |
476 SCC_MGR_HHP_EXTRAS_OFFSET;
478 debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n",
481 debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
486 * scc_mgr_zero_all() - Zero all DQS config
488 * Zero all DQS config.
490 static void scc_mgr_zero_all(void)
495 * USER Zero all DQS config settings, across all groups and all
498 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
499 r += NUM_RANKS_PER_SHADOW_REG) {
500 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
502 * The phases actually don't exist on a per-rank basis,
503 * but there's no harm updating them several times, so
504 * let's keep the code simple.
506 scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
507 scc_mgr_set_dqs_en_phase(i, 0);
508 scc_mgr_set_dqs_en_delay(i, 0);
511 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
512 scc_mgr_set_dqdqs_output_phase(i, 0);
513 /* Arria V/Cyclone V don't have out2. */
514 scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
518 /* Multicast to all DQS group enables. */
519 writel(0xff, &sdr_scc_mgr->dqs_ena);
520 writel(0, &sdr_scc_mgr->update);
524 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
525 * @write_group: Write group
527 * Set bypass mode and trigger SCC update.
529 static void scc_set_bypass_mode(const u32 write_group)
531 /* Multicast to all DQ enables. */
532 writel(0xff, &sdr_scc_mgr->dq_ena);
533 writel(0xff, &sdr_scc_mgr->dm_ena);
535 /* Update current DQS IO enable. */
536 writel(0, &sdr_scc_mgr->dqs_io_ena);
538 /* Update the DQS logic. */
539 writel(write_group, &sdr_scc_mgr->dqs_ena);
542 writel(0, &sdr_scc_mgr->update);
546 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
547 * @write_group: Write group
549 * Load DQS settings for Write Group, do not trigger SCC update.
551 static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
553 const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
554 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
555 const int base = write_group * ratio;
558 * Load the setting in the SCC manager
559 * Although OCT affects only write data, the OCT delay is controlled
560 * by the DQS logic block which is instantiated once per read group.
561 * For protocols where a write group consists of multiple read groups,
562 * the setting must be set multiple times.
564 for (i = 0; i < ratio; i++)
565 writel(base + i, &sdr_scc_mgr->dqs_ena);
569 * scc_mgr_zero_group() - Zero all configs for a group
571 * Zero DQ, DM, DQS and OCT configs for a group.
573 static void scc_mgr_zero_group(const u32 write_group, const int out_only)
577 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
578 r += NUM_RANKS_PER_SHADOW_REG) {
579 /* Zero all DQ config settings. */
580 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
581 scc_mgr_set_dq_out1_delay(i, 0);
583 scc_mgr_set_dq_in_delay(i, 0);
586 /* Multicast to all DQ enables. */
587 writel(0xff, &sdr_scc_mgr->dq_ena);
589 /* Zero all DM config settings. */
590 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
591 scc_mgr_set_dm_out1_delay(i, 0);
593 /* Multicast to all DM enables. */
594 writel(0xff, &sdr_scc_mgr->dm_ena);
596 /* Zero all DQS IO settings. */
598 scc_mgr_set_dqs_io_in_delay(0);
600 /* Arria V/Cyclone V don't have out2. */
601 scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE);
602 scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
603 scc_mgr_load_dqs_for_write_group(write_group);
605 /* Multicast to all DQS IO enables (only 1 in total). */
606 writel(0, &sdr_scc_mgr->dqs_io_ena);
608 /* Hit update to zero everything. */
609 writel(0, &sdr_scc_mgr->update);
614 * apply and load a particular input delay for the DQ pins in a group
615 * group_bgn is the index of the first dq pin (in the write group)
617 static void scc_mgr_apply_group_dq_in_delay(uint32_t group_bgn, uint32_t delay)
621 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
622 scc_mgr_set_dq_in_delay(p, delay);
628 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
629 * @delay: Delay value
631 * Apply and load a particular output delay for the DQ pins in a group.
633 static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
637 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
638 scc_mgr_set_dq_out1_delay(i, delay);
643 /* apply and load a particular output delay for the DM pins in a group */
644 static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1)
648 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
649 scc_mgr_set_dm_out1_delay(i, delay1);
655 /* apply and load delay on both DQS and OCT out1 */
656 static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
659 scc_mgr_set_dqs_out1_delay(delay);
660 scc_mgr_load_dqs_io();
662 scc_mgr_set_oct_out1_delay(write_group, delay);
663 scc_mgr_load_dqs_for_write_group(write_group);
666 /* apply a delay to the entire output side: DQ, DM, DQS, OCT */
667 static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
674 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++)
678 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
682 new_delay = READ_SCC_DQS_IO_OUT2_DELAY;
685 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
686 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
687 " adding %u to OUT1\n", __func__, __LINE__,
688 write_group, group_bgn, delay, new_delay,
689 IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
690 new_delay - IO_IO_OUT2_DELAY_MAX);
691 scc_mgr_set_dqs_out1_delay(new_delay -
692 IO_IO_OUT2_DELAY_MAX);
693 new_delay = IO_IO_OUT2_DELAY_MAX;
696 scc_mgr_load_dqs_io();
699 new_delay = READ_SCC_OCT_OUT2_DELAY;
702 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
703 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
704 " adding %u to OUT1\n", __func__, __LINE__,
705 write_group, group_bgn, delay, new_delay,
706 IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
707 new_delay - IO_IO_OUT2_DELAY_MAX);
708 scc_mgr_set_oct_out1_delay(write_group, new_delay -
709 IO_IO_OUT2_DELAY_MAX);
710 new_delay = IO_IO_OUT2_DELAY_MAX;
713 scc_mgr_load_dqs_for_write_group(write_group);
717 * USER apply a delay to the entire output side (DQ, DM, DQS, OCT)
720 static void scc_mgr_apply_group_all_out_delay_add_all_ranks(
721 uint32_t write_group, uint32_t group_bgn, uint32_t delay)
725 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
726 r += NUM_RANKS_PER_SHADOW_REG) {
727 scc_mgr_apply_group_all_out_delay_add(write_group,
729 writel(0, &sdr_scc_mgr->update);
733 /* optimization used to recover some slots in ddr3 inst_rom */
734 /* could be applied to other protocols if we wanted to */
735 static void set_jump_as_return(void)
738 * to save space, we replace return with jump to special shared
739 * RETURN instruction so we set the counter to large value so that
742 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
743 writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
747 * should always use constants as argument to ensure all computations are
748 * performed at compile time
750 static void delay_for_n_mem_clocks(const uint32_t clocks)
757 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
760 afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
761 /* scale (rounding up) to get afi clocks */
764 * Note, we don't bother accounting for being off a little bit
765 * because of a few extra instructions in outer loops
766 * Note, the loops have a test at the end, and do the test before
767 * the decrement, and so always perform the loop
768 * 1 time more than the counter value
770 if (afi_clocks == 0) {
772 } else if (afi_clocks <= 0x100) {
773 inner = afi_clocks-1;
776 } else if (afi_clocks <= 0x10000) {
778 outer = (afi_clocks-1) >> 8;
783 c_loop = (afi_clocks-1) >> 16;
787 * rom instructions are structured as follows:
789 * IDLE_LOOP2: jnz cntr0, TARGET_A
790 * IDLE_LOOP1: jnz cntr1, TARGET_B
793 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
794 * TARGET_B is set to IDLE_LOOP2 as well
796 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
797 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
799 * a little confusing, but it helps save precious space in the inst_rom
800 * and sequencer rom and keeps the delays more accurate and reduces
803 if (afi_clocks <= 0x100) {
804 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
805 &sdr_rw_load_mgr_regs->load_cntr1);
807 writel(RW_MGR_IDLE_LOOP1,
808 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
810 writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
811 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
813 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
814 &sdr_rw_load_mgr_regs->load_cntr0);
816 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
817 &sdr_rw_load_mgr_regs->load_cntr1);
819 writel(RW_MGR_IDLE_LOOP2,
820 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
822 writel(RW_MGR_IDLE_LOOP2,
823 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
825 /* hack to get around compiler not being smart enough */
826 if (afi_clocks <= 0x10000) {
827 /* only need to run once */
828 writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS |
829 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
832 writel(RW_MGR_IDLE_LOOP2,
833 SDR_PHYGRP_RWMGRGRP_ADDRESS |
834 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
835 } while (c_loop-- != 0);
838 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
841 static void rw_mgr_mem_initialize(void)
844 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
845 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
847 debug("%s:%d\n", __func__, __LINE__);
849 /* The reset / cke part of initialization is broadcasted to all ranks */
850 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
851 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
854 * Here's how you load register for a loop
855 * Counters are located @ 0x800
856 * Jump address are located @ 0xC00
857 * For both, registers 0 to 3 are selected using bits 3 and 2, like
858 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
859 * I know this ain't pretty, but Avalon bus throws away the 2 least
863 /* start with memory RESET activated */
868 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
869 * If a and b are the number of iteration in 2 nested loops
870 * it takes the following number of cycles to complete the operation:
871 * number_of_cycles = ((2 + n) * a + 2) * b
872 * where n is the number of instruction in the inner loop
873 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
878 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL),
879 &sdr_rw_load_mgr_regs->load_cntr0);
880 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL),
881 &sdr_rw_load_mgr_regs->load_cntr1);
882 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL),
883 &sdr_rw_load_mgr_regs->load_cntr2);
885 /* Load jump address */
886 writel(RW_MGR_INIT_RESET_0_CKE_0,
887 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
888 writel(RW_MGR_INIT_RESET_0_CKE_0,
889 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
890 writel(RW_MGR_INIT_RESET_0_CKE_0,
891 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
893 /* Execute count instruction */
894 writel(RW_MGR_INIT_RESET_0_CKE_0, grpaddr);
896 /* indicate that memory is stable */
897 writel(1, &phy_mgr_cfg->reset_mem_stbl);
900 * transition the RESET to high
905 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
906 * If a and b are the number of iteration in 2 nested loops
907 * it takes the following number of cycles to complete the operation
908 * number_of_cycles = ((2 + n) * a + 2) * b
909 * where n is the number of instruction in the inner loop
910 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
915 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL),
916 &sdr_rw_load_mgr_regs->load_cntr0);
917 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL),
918 &sdr_rw_load_mgr_regs->load_cntr1);
919 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL),
920 &sdr_rw_load_mgr_regs->load_cntr2);
922 /* Load jump address */
923 writel(RW_MGR_INIT_RESET_1_CKE_0,
924 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
925 writel(RW_MGR_INIT_RESET_1_CKE_0,
926 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
927 writel(RW_MGR_INIT_RESET_1_CKE_0,
928 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
930 writel(RW_MGR_INIT_RESET_1_CKE_0, grpaddr);
932 /* bring up clock enable */
934 /* tXRP < 250 ck cycles */
935 delay_for_n_mem_clocks(250);
937 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
938 if (param->skip_ranks[r]) {
939 /* request to skip the rank */
944 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
947 * USER Use Mirror-ed commands for odd ranks if address
950 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
951 set_jump_as_return();
952 writel(RW_MGR_MRS2_MIRR, grpaddr);
953 delay_for_n_mem_clocks(4);
954 set_jump_as_return();
955 writel(RW_MGR_MRS3_MIRR, grpaddr);
956 delay_for_n_mem_clocks(4);
957 set_jump_as_return();
958 writel(RW_MGR_MRS1_MIRR, grpaddr);
959 delay_for_n_mem_clocks(4);
960 set_jump_as_return();
961 writel(RW_MGR_MRS0_DLL_RESET_MIRR, grpaddr);
963 set_jump_as_return();
964 writel(RW_MGR_MRS2, grpaddr);
965 delay_for_n_mem_clocks(4);
966 set_jump_as_return();
967 writel(RW_MGR_MRS3, grpaddr);
968 delay_for_n_mem_clocks(4);
969 set_jump_as_return();
970 writel(RW_MGR_MRS1, grpaddr);
971 set_jump_as_return();
972 writel(RW_MGR_MRS0_DLL_RESET, grpaddr);
974 set_jump_as_return();
975 writel(RW_MGR_ZQCL, grpaddr);
977 /* tZQinit = tDLLK = 512 ck cycles */
978 delay_for_n_mem_clocks(512);
983 * At the end of calibration we have to program the user settings in, and
984 * USER hand off the memory to the user.
986 static void rw_mgr_mem_handoff(void)
989 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
990 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
992 debug("%s:%d\n", __func__, __LINE__);
993 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
994 if (param->skip_ranks[r])
995 /* request to skip the rank */
998 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
1000 /* precharge all banks ... */
1001 writel(RW_MGR_PRECHARGE_ALL, grpaddr);
1003 /* load up MR settings specified by user */
1006 * Use Mirror-ed commands for odd ranks if address
1009 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
1010 set_jump_as_return();
1011 writel(RW_MGR_MRS2_MIRR, grpaddr);
1012 delay_for_n_mem_clocks(4);
1013 set_jump_as_return();
1014 writel(RW_MGR_MRS3_MIRR, grpaddr);
1015 delay_for_n_mem_clocks(4);
1016 set_jump_as_return();
1017 writel(RW_MGR_MRS1_MIRR, grpaddr);
1018 delay_for_n_mem_clocks(4);
1019 set_jump_as_return();
1020 writel(RW_MGR_MRS0_USER_MIRR, grpaddr);
1022 set_jump_as_return();
1023 writel(RW_MGR_MRS2, grpaddr);
1024 delay_for_n_mem_clocks(4);
1025 set_jump_as_return();
1026 writel(RW_MGR_MRS3, grpaddr);
1027 delay_for_n_mem_clocks(4);
1028 set_jump_as_return();
1029 writel(RW_MGR_MRS1, grpaddr);
1030 delay_for_n_mem_clocks(4);
1031 set_jump_as_return();
1032 writel(RW_MGR_MRS0_USER, grpaddr);
1035 * USER need to wait tMOD (12CK or 15ns) time before issuing
1036 * other commands, but we will have plenty of NIOS cycles before
1037 * actual handoff so its okay.
1043 * performs a guaranteed read on the patterns we are going to use during a
1044 * read test to ensure memory works
1046 static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn,
1047 uint32_t group, uint32_t num_tries, uint32_t *bit_chk,
1051 uint32_t correct_mask_vg;
1052 uint32_t tmp_bit_chk;
1053 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1054 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1056 uint32_t base_rw_mgr;
1058 *bit_chk = param->read_correct_mask;
1059 correct_mask_vg = param->read_correct_mask_vg;
1061 for (r = rank_bgn; r < rank_end; r++) {
1062 if (param->skip_ranks[r])
1063 /* request to skip the rank */
1067 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1069 /* Load up a constant bursts of read commands */
1070 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1071 writel(RW_MGR_GUARANTEED_READ,
1072 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1074 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1075 writel(RW_MGR_GUARANTEED_READ_CONT,
1076 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1079 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1080 /* reset the fifos to get pointers to known state */
1082 writel(0, &phy_mgr_cmd->fifo_reset);
1083 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1084 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1086 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1087 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1089 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1090 writel(RW_MGR_GUARANTEED_READ, addr +
1091 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1094 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1095 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & (~base_rw_mgr));
1100 *bit_chk &= tmp_bit_chk;
1103 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1104 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1106 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1107 debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\
1108 %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask,
1109 (long unsigned int)(*bit_chk == param->read_correct_mask));
1110 return *bit_chk == param->read_correct_mask;
1113 static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks
1114 (uint32_t group, uint32_t num_tries, uint32_t *bit_chk)
1116 return rw_mgr_mem_calibrate_read_test_patterns(0, group,
1117 num_tries, bit_chk, 1);
1120 /* load up the patterns we are going to use during a read test */
1121 static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn,
1125 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1126 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1128 debug("%s:%d\n", __func__, __LINE__);
1129 for (r = rank_bgn; r < rank_end; r++) {
1130 if (param->skip_ranks[r])
1131 /* request to skip the rank */
1135 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1137 /* Load up a constant bursts */
1138 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1140 writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
1141 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1143 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1145 writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
1146 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1148 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
1150 writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
1151 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1153 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
1155 writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
1156 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1158 writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1159 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
1162 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1166 * try a read and see if it returns correct data back. has dummy reads
1167 * inserted into the mix used to align dqs enable. has more thorough checks
1168 * than the regular read test.
1170 static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group,
1171 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1172 uint32_t all_groups, uint32_t all_ranks)
1175 uint32_t correct_mask_vg;
1176 uint32_t tmp_bit_chk;
1177 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1178 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1180 uint32_t base_rw_mgr;
1182 *bit_chk = param->read_correct_mask;
1183 correct_mask_vg = param->read_correct_mask_vg;
1185 uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) &
1186 CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION);
1188 for (r = rank_bgn; r < rank_end; r++) {
1189 if (param->skip_ranks[r])
1190 /* request to skip the rank */
1194 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1196 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
1198 writel(RW_MGR_READ_B2B_WAIT1,
1199 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
1201 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
1202 writel(RW_MGR_READ_B2B_WAIT2,
1203 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1205 if (quick_read_mode)
1206 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
1207 /* need at least two (1+1) reads to capture failures */
1208 else if (all_groups)
1209 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
1211 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
1213 writel(RW_MGR_READ_B2B,
1214 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1216 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
1217 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
1218 &sdr_rw_load_mgr_regs->load_cntr3);
1220 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
1222 writel(RW_MGR_READ_B2B,
1223 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1226 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1227 /* reset the fifos to get pointers to known state */
1228 writel(0, &phy_mgr_cmd->fifo_reset);
1229 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1230 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1232 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1233 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1236 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_ALL_GROUPS_OFFSET;
1238 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1240 writel(RW_MGR_READ_B2B, addr +
1241 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1244 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1245 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
1250 *bit_chk &= tmp_bit_chk;
1253 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1254 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
1257 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1258 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\
1259 (%u == %u) => %lu", __func__, __LINE__, group,
1260 all_groups, *bit_chk, param->read_correct_mask,
1261 (long unsigned int)(*bit_chk ==
1262 param->read_correct_mask));
1263 return *bit_chk == param->read_correct_mask;
1265 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1266 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\
1267 (%u != %lu) => %lu\n", __func__, __LINE__,
1268 group, all_groups, *bit_chk, (long unsigned int)0,
1269 (long unsigned int)(*bit_chk != 0x00));
1270 return *bit_chk != 0x00;
1274 static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group,
1275 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1276 uint32_t all_groups)
1278 return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct,
1279 bit_chk, all_groups, 1);
1282 static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t *v)
1284 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
1288 static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t *v)
1292 for (i = 0; i < VFIFO_SIZE-1; i++)
1293 rw_mgr_incr_vfifo(grp, v);
1296 static int find_vfifo_read(uint32_t grp, uint32_t *bit_chk)
1299 uint32_t fail_cnt = 0;
1300 uint32_t test_status;
1302 for (v = 0; v < VFIFO_SIZE; ) {
1303 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo %u\n",
1304 __func__, __LINE__, v);
1305 test_status = rw_mgr_mem_calibrate_read_test_all_ranks
1306 (grp, 1, PASS_ONE_BIT, bit_chk, 0);
1314 /* fiddle with FIFO */
1315 rw_mgr_incr_vfifo(grp, &v);
1318 if (v >= VFIFO_SIZE) {
1319 /* no failing read found!! Something must have gone wrong */
1320 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo failed\n",
1321 __func__, __LINE__);
1328 static int find_working_phase(uint32_t *grp, uint32_t *bit_chk,
1329 uint32_t dtaps_per_ptap, uint32_t *work_bgn,
1330 uint32_t *v, uint32_t *d, uint32_t *p,
1331 uint32_t *i, uint32_t *max_working_cnt)
1333 uint32_t found_begin = 0;
1334 uint32_t tmp_delay = 0;
1335 uint32_t test_status;
1337 for (*d = 0; *d <= dtaps_per_ptap; (*d)++, tmp_delay +=
1338 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1339 *work_bgn = tmp_delay;
1340 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1342 for (*i = 0; *i < VFIFO_SIZE; (*i)++) {
1343 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_bgn +=
1344 IO_DELAY_PER_OPA_TAP) {
1345 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1348 rw_mgr_mem_calibrate_read_test_all_ranks
1349 (*grp, 1, PASS_ONE_BIT, bit_chk, 0);
1352 *max_working_cnt = 1;
1361 if (*p > IO_DQS_EN_PHASE_MAX)
1362 /* fiddle with FIFO */
1363 rw_mgr_incr_vfifo(*grp, v);
1370 if (*i >= VFIFO_SIZE) {
1371 /* cannot find working solution */
1372 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\
1373 ptap/dtap\n", __func__, __LINE__);
1380 static void sdr_backup_phase(uint32_t *grp, uint32_t *bit_chk,
1381 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1382 uint32_t *p, uint32_t *max_working_cnt)
1384 uint32_t found_begin = 0;
1387 /* Special case code for backing up a phase */
1389 *p = IO_DQS_EN_PHASE_MAX;
1390 rw_mgr_decr_vfifo(*grp, v);
1394 tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
1395 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1397 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn;
1398 (*d)++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1399 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1401 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
1405 *work_bgn = tmp_delay;
1410 /* We have found a working dtap before the ptap found above */
1411 if (found_begin == 1)
1412 (*max_working_cnt)++;
1415 * Restore VFIFO to old state before we decremented it
1419 if (*p > IO_DQS_EN_PHASE_MAX) {
1421 rw_mgr_incr_vfifo(*grp, v);
1424 scc_mgr_set_dqs_en_delay_all_ranks(*grp, 0);
1427 static int sdr_nonworking_phase(uint32_t *grp, uint32_t *bit_chk,
1428 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1429 uint32_t *p, uint32_t *i, uint32_t *max_working_cnt,
1432 uint32_t found_end = 0;
1435 *work_end += IO_DELAY_PER_OPA_TAP;
1436 if (*p > IO_DQS_EN_PHASE_MAX) {
1437 /* fiddle with FIFO */
1439 rw_mgr_incr_vfifo(*grp, v);
1442 for (; *i < VFIFO_SIZE + 1; (*i)++) {
1443 for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_end
1444 += IO_DELAY_PER_OPA_TAP) {
1445 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1447 if (!rw_mgr_mem_calibrate_read_test_all_ranks
1448 (*grp, 1, PASS_ONE_BIT, bit_chk, 0)) {
1452 (*max_working_cnt)++;
1459 if (*p > IO_DQS_EN_PHASE_MAX) {
1460 /* fiddle with FIFO */
1461 rw_mgr_incr_vfifo(*grp, v);
1466 if (*i >= VFIFO_SIZE + 1) {
1467 /* cannot see edge of failing read */
1468 debug_cond(DLEVEL == 2, "%s:%d sdr_nonworking_phase: end:\
1469 failed\n", __func__, __LINE__);
1476 static int sdr_find_window_centre(uint32_t *grp, uint32_t *bit_chk,
1477 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1478 uint32_t *p, uint32_t *work_mid,
1484 *work_mid = (*work_bgn + *work_end) / 2;
1486 debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
1487 *work_bgn, *work_end, *work_mid);
1488 /* Get the middle delay to be less than a VFIFO delay */
1489 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX;
1490 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
1492 debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
1493 while (*work_mid > tmp_delay)
1494 *work_mid -= tmp_delay;
1495 debug_cond(DLEVEL == 2, "new work_mid %d\n", *work_mid);
1498 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX && tmp_delay < *work_mid;
1499 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
1501 tmp_delay -= IO_DELAY_PER_OPA_TAP;
1502 debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", (*p) - 1, tmp_delay);
1503 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_mid; (*d)++,
1504 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP)
1506 debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", *d, tmp_delay);
1508 scc_mgr_set_dqs_en_phase_all_ranks(*grp, (*p) - 1);
1509 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1512 * push vfifo until we can successfully calibrate. We can do this
1513 * because the largest possible margin in 1 VFIFO cycle.
1515 for (i = 0; i < VFIFO_SIZE; i++) {
1516 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%u\n",
1518 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
1524 /* fiddle with FIFO */
1525 rw_mgr_incr_vfifo(*grp, v);
1528 if (i >= VFIFO_SIZE) {
1529 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \
1530 failed\n", __func__, __LINE__);
1537 /* find a good dqs enable to use */
1538 static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
1540 uint32_t v, d, p, i;
1541 uint32_t max_working_cnt;
1543 uint32_t dtaps_per_ptap;
1544 uint32_t work_bgn, work_mid, work_end;
1545 uint32_t found_passing_read, found_failing_read, initial_failing_dtap;
1547 debug("%s:%d %u\n", __func__, __LINE__, grp);
1549 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1551 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1552 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
1554 /* ************************************************************** */
1555 /* * Step 0 : Determine number of delay taps for each phase tap * */
1556 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP/IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1558 /* ********************************************************* */
1559 /* * Step 1 : First push vfifo until we get a failing read * */
1560 v = find_vfifo_read(grp, &bit_chk);
1562 max_working_cnt = 0;
1564 /* ******************************************************** */
1565 /* * step 2: find first working phase, increment in ptaps * */
1567 if (find_working_phase(&grp, &bit_chk, dtaps_per_ptap, &work_bgn, &v, &d,
1568 &p, &i, &max_working_cnt) == 0)
1571 work_end = work_bgn;
1574 * If d is 0 then the working window covers a phase tap and
1575 * we can follow the old procedure otherwise, we've found the beginning,
1576 * and we need to increment the dtaps until we find the end.
1579 /* ********************************************************* */
1580 /* * step 3a: if we have room, back off by one and
1581 increment in dtaps * */
1583 sdr_backup_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1586 /* ********************************************************* */
1587 /* * step 4a: go forward from working phase to non working
1588 phase, increment in ptaps * */
1589 if (sdr_nonworking_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1590 &i, &max_working_cnt, &work_end) == 0)
1593 /* ********************************************************* */
1594 /* * step 5a: back off one from last, increment in dtaps * */
1596 /* Special case code for backing up a phase */
1598 p = IO_DQS_EN_PHASE_MAX;
1599 rw_mgr_decr_vfifo(grp, &v);
1604 work_end -= IO_DELAY_PER_OPA_TAP;
1605 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1607 /* * The actual increment of dtaps is done outside of
1608 the if/else loop to share code */
1611 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p: \
1612 vfifo=%u ptap=%u\n", __func__, __LINE__,
1615 /* ******************************************************* */
1616 /* * step 3-5b: Find the right edge of the window using
1618 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase:vfifo=%u \
1619 ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__,
1622 work_end = work_bgn;
1624 /* * The actual increment of dtaps is done outside of the
1625 if/else loop to share code */
1627 /* Only here to counterbalance a subtract later on which is
1628 not needed if this branch of the algorithm is taken */
1632 /* The dtap increment to find the failing edge is done here */
1633 for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end +=
1634 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1635 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1636 end-2: dtap=%u\n", __func__, __LINE__, d);
1637 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1639 if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1646 /* Go back to working dtap */
1648 work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1650 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%u \
1651 ptap=%u dtap=%u end=%u\n", __func__, __LINE__,
1652 v, p, d-1, work_end);
1654 if (work_end < work_bgn) {
1656 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \
1657 failed\n", __func__, __LINE__);
1661 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n",
1662 __func__, __LINE__, work_bgn, work_end);
1664 /* *************************************************************** */
1666 * * We need to calculate the number of dtaps that equal a ptap
1667 * * To do that we'll back up a ptap and re-find the edge of the
1668 * * window using dtaps
1671 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \
1672 for tracking\n", __func__, __LINE__);
1674 /* Special case code for backing up a phase */
1676 p = IO_DQS_EN_PHASE_MAX;
1677 rw_mgr_decr_vfifo(grp, &v);
1678 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1679 cycle/phase: v=%u p=%u\n", __func__, __LINE__,
1683 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1684 phase only: v=%u p=%u", __func__, __LINE__,
1688 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1691 * Increase dtap until we first see a passing read (in case the
1692 * window is smaller than a ptap),
1693 * and then a failing read to mark the edge of the window again
1696 /* Find a passing read */
1697 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n",
1698 __func__, __LINE__);
1699 found_passing_read = 0;
1700 found_failing_read = 0;
1701 initial_failing_dtap = d;
1702 for (; d <= IO_DQS_EN_DELAY_MAX; d++) {
1703 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \
1704 read d=%u\n", __func__, __LINE__, d);
1705 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1707 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1710 found_passing_read = 1;
1715 if (found_passing_read) {
1716 /* Find a failing read */
1717 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \
1718 read\n", __func__, __LINE__);
1719 for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) {
1720 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1721 testing read d=%u\n", __func__, __LINE__, d);
1722 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1724 if (!rw_mgr_mem_calibrate_read_test_all_ranks
1725 (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
1726 found_failing_read = 1;
1731 debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \
1732 calculate dtaps", __func__, __LINE__);
1733 debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n");
1737 * The dynamically calculated dtaps_per_ptap is only valid if we
1738 * found a passing/failing read. If we didn't, it means d hit the max
1739 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
1740 * statically calculated value.
1742 if (found_passing_read && found_failing_read)
1743 dtaps_per_ptap = d - initial_failing_dtap;
1745 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
1746 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \
1747 - %u = %u", __func__, __LINE__, d,
1748 initial_failing_dtap, dtaps_per_ptap);
1750 /* ******************************************** */
1751 /* * step 6: Find the centre of the window * */
1752 if (sdr_find_window_centre(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1753 &work_mid, &work_end) == 0)
1756 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \
1757 vfifo=%u ptap=%u dtap=%u\n", __func__, __LINE__,
1763 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
1764 * dq_in_delay values
1767 rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
1768 (uint32_t write_group, uint32_t read_group, uint32_t test_bgn)
1776 const uint32_t delay_step = IO_IO_IN_DELAY_MAX /
1777 (RW_MGR_MEM_DQ_PER_READ_DQS-1);
1778 /* we start at zero, so have one less dq to devide among */
1780 debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group,
1783 /* try different dq_in_delays since the dq path is shorter than dqs */
1785 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
1786 r += NUM_RANKS_PER_SHADOW_REG) {
1787 for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++, d += delay_step) {
1788 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\
1789 vfifo_find_dqs_", __func__, __LINE__);
1790 debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ",
1791 write_group, read_group);
1792 debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d);
1793 scc_mgr_set_dq_in_delay(p, d);
1796 writel(0, &sdr_scc_mgr->update);
1799 found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group);
1801 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\
1802 en_phase_sweep_dq", __func__, __LINE__);
1803 debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \
1804 chain to zero\n", write_group, read_group, found);
1806 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
1807 r += NUM_RANKS_PER_SHADOW_REG) {
1808 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS;
1810 scc_mgr_set_dq_in_delay(p, 0);
1813 writel(0, &sdr_scc_mgr->update);
1819 /* per-bit deskew DQ and center */
1820 static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
1821 uint32_t write_group, uint32_t read_group, uint32_t test_bgn,
1822 uint32_t use_read_test, uint32_t update_fom)
1824 uint32_t i, p, d, min_index;
1826 * Store these as signed since there are comparisons with
1830 uint32_t sticky_bit_chk;
1831 int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1832 int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1833 int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS];
1835 int32_t orig_mid_min, mid_min;
1836 int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs,
1838 int32_t dq_margin, dqs_margin;
1840 uint32_t temp_dq_in_delay1, temp_dq_in_delay2;
1843 debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
1845 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET;
1846 start_dqs = readl(addr + (read_group << 2));
1847 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
1848 start_dqs_en = readl(addr + ((read_group << 2)
1849 - IO_DQS_EN_DELAY_OFFSET));
1851 /* set the left and right edge of each bit to an illegal value */
1852 /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
1854 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1855 left_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1856 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1859 /* Search for the left edge of the window for each bit */
1860 for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) {
1861 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d);
1863 writel(0, &sdr_scc_mgr->update);
1866 * Stop searching when the read test doesn't pass AND when
1867 * we've seen a passing read on every bit.
1869 if (use_read_test) {
1870 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1871 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1874 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1877 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1878 (read_group - (write_group *
1879 RW_MGR_MEM_IF_READ_DQS_WIDTH /
1880 RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1881 stop = (bit_chk == 0);
1883 sticky_bit_chk = sticky_bit_chk | bit_chk;
1884 stop = stop && (sticky_bit_chk == param->read_correct_mask);
1885 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \
1886 && %u", __func__, __LINE__, d,
1888 param->read_correct_mask, stop);
1893 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1895 /* Remember a passing test as the
1899 /* If a left edge has not been seen yet,
1900 then a future passing test will mark
1901 this edge as the right edge */
1903 IO_IO_IN_DELAY_MAX + 1) {
1904 right_edge[i] = -(d + 1);
1907 bit_chk = bit_chk >> 1;
1912 /* Reset DQ delay chains to 0 */
1913 scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
1915 for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) {
1916 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
1917 %d right_edge[%u]: %d\n", __func__, __LINE__,
1918 i, left_edge[i], i, right_edge[i]);
1921 * Check for cases where we haven't found the left edge,
1922 * which makes our assignment of the the right edge invalid.
1923 * Reset it to the illegal value.
1925 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && (
1926 right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1927 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1928 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \
1929 right_edge[%u]: %d\n", __func__, __LINE__,
1934 * Reset sticky bit (except for bits where we have seen
1935 * both the left and right edge).
1937 sticky_bit_chk = sticky_bit_chk << 1;
1938 if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) &&
1939 (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1940 sticky_bit_chk = sticky_bit_chk | 1;
1947 /* Search for the right edge of the window for each bit */
1948 for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) {
1949 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
1950 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
1951 uint32_t delay = d + start_dqs_en;
1952 if (delay > IO_DQS_EN_DELAY_MAX)
1953 delay = IO_DQS_EN_DELAY_MAX;
1954 scc_mgr_set_dqs_en_delay(read_group, delay);
1956 scc_mgr_load_dqs(read_group);
1958 writel(0, &sdr_scc_mgr->update);
1961 * Stop searching when the read test doesn't pass AND when
1962 * we've seen a passing read on every bit.
1964 if (use_read_test) {
1965 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1966 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1969 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1972 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1973 (read_group - (write_group *
1974 RW_MGR_MEM_IF_READ_DQS_WIDTH /
1975 RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1976 stop = (bit_chk == 0);
1978 sticky_bit_chk = sticky_bit_chk | bit_chk;
1979 stop = stop && (sticky_bit_chk == param->read_correct_mask);
1981 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => %u == \
1982 %u && %u", __func__, __LINE__, d,
1983 sticky_bit_chk, param->read_correct_mask, stop);
1988 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1990 /* Remember a passing test as
1995 /* If a right edge has not been
1996 seen yet, then a future passing
1997 test will mark this edge as the
1999 if (right_edge[i] ==
2000 IO_IO_IN_DELAY_MAX + 1) {
2001 left_edge[i] = -(d + 1);
2004 /* d = 0 failed, but it passed
2005 when testing the left edge,
2006 so it must be marginal,
2008 if (right_edge[i] ==
2009 IO_IO_IN_DELAY_MAX + 1 &&
2015 /* If a right edge has not been
2016 seen yet, then a future passing
2017 test will mark this edge as the
2019 else if (right_edge[i] ==
2020 IO_IO_IN_DELAY_MAX +
2022 left_edge[i] = -(d + 1);
2027 debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\
2028 d=%u]: ", __func__, __LINE__, d);
2029 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ",
2030 (int)(bit_chk & 1), i, left_edge[i]);
2031 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2033 bit_chk = bit_chk >> 1;
2038 /* Check that all bits have a window */
2039 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2040 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
2041 %d right_edge[%u]: %d", __func__, __LINE__,
2042 i, left_edge[i], i, right_edge[i]);
2043 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i]
2044 == IO_IO_IN_DELAY_MAX + 1)) {
2046 * Restore delay chain settings before letting the loop
2047 * in rw_mgr_mem_calibrate_vfifo to retry different
2048 * dqs/ck relationships.
2050 scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs);
2051 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2052 scc_mgr_set_dqs_en_delay(read_group,
2055 scc_mgr_load_dqs(read_group);
2056 writel(0, &sdr_scc_mgr->update);
2058 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \
2059 find edge [%u]: %d %d", __func__, __LINE__,
2060 i, left_edge[i], right_edge[i]);
2061 if (use_read_test) {
2062 set_failing_group_stage(read_group *
2063 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2065 CAL_SUBSTAGE_VFIFO_CENTER);
2067 set_failing_group_stage(read_group *
2068 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2069 CAL_STAGE_VFIFO_AFTER_WRITES,
2070 CAL_SUBSTAGE_VFIFO_CENTER);
2076 /* Find middle of window for each DQ bit */
2077 mid_min = left_edge[0] - right_edge[0];
2079 for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2080 mid = left_edge[i] - right_edge[i];
2081 if (mid < mid_min) {
2088 * -mid_min/2 represents the amount that we need to move DQS.
2089 * If mid_min is odd and positive we'll need to add one to
2090 * make sure the rounding in further calculations is correct
2091 * (always bias to the right), so just add 1 for all positive values.
2096 mid_min = mid_min / 2;
2098 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
2099 __func__, __LINE__, mid_min, min_index);
2101 /* Determine the amount we can change DQS (which is -mid_min) */
2102 orig_mid_min = mid_min;
2103 new_dqs = start_dqs - mid_min;
2104 if (new_dqs > IO_DQS_IN_DELAY_MAX)
2105 new_dqs = IO_DQS_IN_DELAY_MAX;
2106 else if (new_dqs < 0)
2109 mid_min = start_dqs - new_dqs;
2110 debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2113 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2114 if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
2115 mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
2116 else if (start_dqs_en - mid_min < 0)
2117 mid_min += start_dqs_en - mid_min;
2119 new_dqs = start_dqs - mid_min;
2121 debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
2122 new_dqs=%d mid_min=%d\n", start_dqs,
2123 IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
2126 /* Initialize data for export structures */
2127 dqs_margin = IO_IO_IN_DELAY_MAX + 1;
2128 dq_margin = IO_IO_IN_DELAY_MAX + 1;
2130 /* add delay to bring centre of all DQ windows to the same "level" */
2131 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
2132 /* Use values before divide by 2 to reduce round off error */
2133 shift_dq = (left_edge[i] - right_edge[i] -
2134 (left_edge[min_index] - right_edge[min_index]))/2 +
2135 (orig_mid_min - mid_min);
2137 debug_cond(DLEVEL == 2, "vfifo_center: before: \
2138 shift_dq[%u]=%d\n", i, shift_dq);
2140 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET;
2141 temp_dq_in_delay1 = readl(addr + (p << 2));
2142 temp_dq_in_delay2 = readl(addr + (i << 2));
2144 if (shift_dq + (int32_t)temp_dq_in_delay1 >
2145 (int32_t)IO_IO_IN_DELAY_MAX) {
2146 shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2;
2147 } else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) {
2148 shift_dq = -(int32_t)temp_dq_in_delay1;
2150 debug_cond(DLEVEL == 2, "vfifo_center: after: \
2151 shift_dq[%u]=%d\n", i, shift_dq);
2152 final_dq[i] = temp_dq_in_delay1 + shift_dq;
2153 scc_mgr_set_dq_in_delay(p, final_dq[i]);
2156 debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i,
2157 left_edge[i] - shift_dq + (-mid_min),
2158 right_edge[i] + shift_dq - (-mid_min));
2159 /* To determine values for export structures */
2160 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2161 dq_margin = left_edge[i] - shift_dq + (-mid_min);
2163 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2164 dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2167 final_dqs = new_dqs;
2168 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2169 final_dqs_en = start_dqs_en - mid_min;
2172 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2173 scc_mgr_set_dqs_en_delay(read_group, final_dqs_en);
2174 scc_mgr_load_dqs(read_group);
2178 scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs);
2179 scc_mgr_load_dqs(read_group);
2180 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \
2181 dqs_margin=%d", __func__, __LINE__,
2182 dq_margin, dqs_margin);
2185 * Do not remove this line as it makes sure all of our decisions
2186 * have been applied. Apply the update bit.
2188 writel(0, &sdr_scc_mgr->update);
2190 return (dq_margin >= 0) && (dqs_margin >= 0);
2194 * calibrate the read valid prediction FIFO.
2196 * - read valid prediction will consist of finding a good DQS enable phase,
2197 * DQS enable delay, DQS input phase, and DQS input delay.
2198 * - we also do a per-bit deskew on the DQ lines.
2200 static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group,
2203 uint32_t p, d, rank_bgn, sr;
2204 uint32_t dtaps_per_ptap;
2207 uint32_t grp_calibrated;
2208 uint32_t write_group, write_test_bgn;
2209 uint32_t failed_substage;
2211 debug("%s:%d: %u %u\n", __func__, __LINE__, read_group, test_bgn);
2213 /* update info for sims */
2214 reg_file_set_stage(CAL_STAGE_VFIFO);
2216 write_group = read_group;
2217 write_test_bgn = test_bgn;
2219 /* USER Determine number of delay taps for each phase tap */
2222 while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
2224 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
2229 /* update info for sims */
2230 reg_file_set_group(read_group);
2234 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
2235 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2237 for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) {
2239 * In RLDRAMX we may be messing the delay of pins in
2240 * the same write group but outside of the current read
2241 * the group, but that's ok because we haven't
2242 * calibrated output side yet.
2245 scc_mgr_apply_group_all_out_delay_add_all_ranks
2246 (write_group, write_test_bgn, d);
2249 for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0;
2251 /* set a particular dqdqs phase */
2252 scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p);
2254 debug_cond(DLEVEL == 1, "%s:%d calibrate_vfifo: g=%u \
2255 p=%u d=%u\n", __func__, __LINE__,
2259 * Load up the patterns used by read calibration
2260 * using current DQDQS phase.
2262 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2263 if (!(gbl->phy_debug_mode_flags &
2264 PHY_DEBUG_DISABLE_GUARANTEED_READ)) {
2265 if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks
2266 (read_group, 1, &bit_chk)) {
2267 debug_cond(DLEVEL == 1, "%s:%d Guaranteed read test failed:",
2268 __func__, __LINE__);
2269 debug_cond(DLEVEL == 1, " g=%u p=%u d=%u\n",
2277 if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
2278 (write_group, read_group, test_bgn)) {
2280 * USER Read per-bit deskew can be done on a
2281 * per shadow register basis.
2283 for (rank_bgn = 0, sr = 0;
2284 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2285 rank_bgn += NUM_RANKS_PER_SHADOW_REG,
2288 * Determine if this set of ranks
2289 * should be skipped entirely.
2291 if (!param->skip_shadow_regs[sr]) {
2293 * If doing read after write
2294 * calibration, do not update
2295 * FOM, now - do it then.
2297 if (!rw_mgr_mem_calibrate_vfifo_center
2298 (rank_bgn, write_group,
2299 read_group, test_bgn, 1, 0)) {
2302 CAL_SUBSTAGE_VFIFO_CENTER;
2308 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2313 if (grp_calibrated == 0) {
2314 set_failing_group_stage(write_group, CAL_STAGE_VFIFO,
2320 * Reset the delay chains back to zero if they have moved > 1
2321 * (check for > 1 because loop will increase d even when pass in
2325 scc_mgr_zero_group(write_group, 1);
2330 /* VFIFO Calibration -- Read Deskew Calibration after write deskew */
2331 static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
2334 uint32_t rank_bgn, sr;
2335 uint32_t grp_calibrated;
2336 uint32_t write_group;
2338 debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
2340 /* update info for sims */
2342 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2343 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2345 write_group = read_group;
2347 /* update info for sims */
2348 reg_file_set_group(read_group);
2351 /* Read per-bit deskew can be done on a per shadow register basis */
2352 for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2353 rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
2354 /* Determine if this set of ranks should be skipped entirely */
2355 if (!param->skip_shadow_regs[sr]) {
2356 /* This is the last calibration round, update FOM here */
2357 if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
2368 if (grp_calibrated == 0) {
2369 set_failing_group_stage(write_group,
2370 CAL_STAGE_VFIFO_AFTER_WRITES,
2371 CAL_SUBSTAGE_VFIFO_CENTER);
2378 /* Calibrate LFIFO to find smallest read latency */
2379 static uint32_t rw_mgr_mem_calibrate_lfifo(void)
2384 debug("%s:%d\n", __func__, __LINE__);
2386 /* update info for sims */
2387 reg_file_set_stage(CAL_STAGE_LFIFO);
2388 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2390 /* Load up the patterns used by read calibration for all ranks */
2391 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2395 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2396 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
2397 __func__, __LINE__, gbl->curr_read_lat);
2399 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
2407 /* reduce read latency and see if things are working */
2409 gbl->curr_read_lat--;
2410 } while (gbl->curr_read_lat > 0);
2412 /* reset the fifos to get pointers to known state */
2414 writel(0, &phy_mgr_cmd->fifo_reset);
2417 /* add a fudge factor to the read latency that was determined */
2418 gbl->curr_read_lat += 2;
2419 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
2420 debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
2421 read_lat=%u\n", __func__, __LINE__,
2422 gbl->curr_read_lat);
2425 set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
2426 CAL_SUBSTAGE_READ_LATENCY);
2428 debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
2429 read_lat=%u\n", __func__, __LINE__,
2430 gbl->curr_read_lat);
2436 * issue write test command.
2437 * two variants are provided. one that just tests a write pattern and
2438 * another that tests datamask functionality.
2440 static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
2443 uint32_t mcc_instruction;
2444 uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
2445 ENABLE_SUPER_QUICK_CALIBRATION);
2446 uint32_t rw_wl_nop_cycles;
2450 * Set counter and jump addresses for the right
2451 * number of NOP cycles.
2452 * The number of supported NOP cycles can range from -1 to infinity
2453 * Three different cases are handled:
2455 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
2456 * mechanism will be used to insert the right number of NOPs
2458 * 2. For a number of NOP cycles equals to 0, the micro-instruction
2459 * issuing the write command will jump straight to the
2460 * micro-instruction that turns on DQS (for DDRx), or outputs write
2461 * data (for RLD), skipping
2462 * the NOP micro-instruction all together
2464 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
2465 * turned on in the same micro-instruction that issues the write
2466 * command. Then we need
2467 * to directly jump to the micro-instruction that sends out the data
2469 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
2470 * (2 and 3). One jump-counter (0) is used to perform multiple
2471 * write-read operations.
2472 * one counter left to issue this command in "multiple-group" mode
2475 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
2477 if (rw_wl_nop_cycles == -1) {
2479 * CNTR 2 - We want to execute the special write operation that
2480 * turns on DQS right away and then skip directly to the
2481 * instruction that sends out the data. We set the counter to a
2482 * large number so that the jump is always taken.
2484 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2486 /* CNTR 3 - Not used */
2488 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
2489 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
2490 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2491 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2492 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2494 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
2495 writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
2496 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2497 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2498 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2500 } else if (rw_wl_nop_cycles == 0) {
2502 * CNTR 2 - We want to skip the NOP operation and go straight
2503 * to the DQS enable instruction. We set the counter to a large
2504 * number so that the jump is always taken.
2506 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
2508 /* CNTR 3 - Not used */
2510 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2511 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
2512 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2514 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2515 writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
2516 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2520 * CNTR 2 - In this case we want to execute the next instruction
2521 * and NOT take the jump. So we set the counter to 0. The jump
2522 * address doesn't count.
2524 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
2525 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2528 * CNTR 3 - Set the nop counter to the number of cycles we
2529 * need to loop for, minus 1.
2531 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
2533 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
2534 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2535 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2537 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
2538 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2539 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
2543 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
2544 RW_MGR_RESET_READ_DATAPATH_OFFSET);
2546 if (quick_write_mode)
2547 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
2549 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
2551 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
2554 * CNTR 1 - This is used to ensure enough time elapses
2555 * for read data to come back.
2557 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
2560 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
2561 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
2563 writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
2564 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
2567 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
2568 writel(mcc_instruction, addr + (group << 2));
2571 /* Test writes, can check for a single bit pass or multiple bit pass */
2572 static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
2573 uint32_t write_group, uint32_t use_dm, uint32_t all_correct,
2574 uint32_t *bit_chk, uint32_t all_ranks)
2577 uint32_t correct_mask_vg;
2578 uint32_t tmp_bit_chk;
2580 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
2581 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
2582 uint32_t addr_rw_mgr;
2583 uint32_t base_rw_mgr;
2585 *bit_chk = param->write_correct_mask;
2586 correct_mask_vg = param->write_correct_mask_vg;
2588 for (r = rank_bgn; r < rank_end; r++) {
2589 if (param->skip_ranks[r]) {
2590 /* request to skip the rank */
2595 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
2598 addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS;
2599 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) {
2600 /* reset the fifos to get pointers to known state */
2601 writel(0, &phy_mgr_cmd->fifo_reset);
2603 tmp_bit_chk = tmp_bit_chk <<
2604 (RW_MGR_MEM_DQ_PER_WRITE_DQS /
2605 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
2606 rw_mgr_mem_calibrate_write_test_issue(write_group *
2607 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg,
2610 base_rw_mgr = readl(addr_rw_mgr);
2611 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
2615 *bit_chk &= tmp_bit_chk;
2619 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2620 debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \
2621 %u => %lu", write_group, use_dm,
2622 *bit_chk, param->write_correct_mask,
2623 (long unsigned int)(*bit_chk ==
2624 param->write_correct_mask));
2625 return *bit_chk == param->write_correct_mask;
2627 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2628 debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ",
2629 write_group, use_dm, *bit_chk);
2630 debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0,
2631 (long unsigned int)(*bit_chk != 0));
2632 return *bit_chk != 0x00;
2637 * center all windows. do per-bit-deskew to possibly increase size of
2640 static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn,
2641 uint32_t write_group, uint32_t test_bgn)
2643 uint32_t i, p, min_index;
2646 * Store these as signed since there are comparisons with
2650 uint32_t sticky_bit_chk;
2651 int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2652 int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2654 int32_t mid_min, orig_mid_min;
2655 int32_t new_dqs, start_dqs, shift_dq;
2656 int32_t dq_margin, dqs_margin, dm_margin;
2658 uint32_t temp_dq_out1_delay;
2661 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
2665 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2666 start_dqs = readl(addr +
2667 (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
2669 /* per-bit deskew */
2672 * set the left and right edge of each bit to an illegal value
2673 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
2676 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2677 left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2678 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2681 /* Search for the left edge of the window for each bit */
2682 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) {
2683 scc_mgr_apply_group_dq_out1_delay(write_group, d);
2685 writel(0, &sdr_scc_mgr->update);
2688 * Stop searching when the read test doesn't pass AND when
2689 * we've seen a passing read on every bit.
2691 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2692 0, PASS_ONE_BIT, &bit_chk, 0);
2693 sticky_bit_chk = sticky_bit_chk | bit_chk;
2694 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2695 debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => %u \
2696 == %u && %u [bit_chk= %u ]\n",
2697 d, sticky_bit_chk, param->write_correct_mask,
2703 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2706 * Remember a passing test as the
2712 * If a left edge has not been seen
2713 * yet, then a future passing test will
2714 * mark this edge as the right edge.
2717 IO_IO_OUT1_DELAY_MAX + 1) {
2718 right_edge[i] = -(d + 1);
2721 debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d);
2722 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2723 (int)(bit_chk & 1), i, left_edge[i]);
2724 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2726 bit_chk = bit_chk >> 1;
2731 /* Reset DQ delay chains to 0 */
2732 scc_mgr_apply_group_dq_out1_delay(0);
2734 for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) {
2735 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2736 %d right_edge[%u]: %d\n", __func__, __LINE__,
2737 i, left_edge[i], i, right_edge[i]);
2740 * Check for cases where we haven't found the left edge,
2741 * which makes our assignment of the the right edge invalid.
2742 * Reset it to the illegal value.
2744 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) &&
2745 (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) {
2746 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2747 debug_cond(DLEVEL == 2, "%s:%d write_center: reset \
2748 right_edge[%u]: %d\n", __func__, __LINE__,
2753 * Reset sticky bit (except for bits where we have
2754 * seen the left edge).
2756 sticky_bit_chk = sticky_bit_chk << 1;
2757 if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1))
2758 sticky_bit_chk = sticky_bit_chk | 1;
2764 /* Search for the right edge of the window for each bit */
2765 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) {
2766 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2769 writel(0, &sdr_scc_mgr->update);
2772 * Stop searching when the read test doesn't pass AND when
2773 * we've seen a passing read on every bit.
2775 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2776 0, PASS_ONE_BIT, &bit_chk, 0);
2778 sticky_bit_chk = sticky_bit_chk | bit_chk;
2779 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2781 debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => %u == \
2782 %u && %u\n", d, sticky_bit_chk,
2783 param->write_correct_mask, stop);
2787 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS;
2789 /* d = 0 failed, but it passed when
2790 testing the left edge, so it must be
2791 marginal, set it to -1 */
2792 if (right_edge[i] ==
2793 IO_IO_OUT1_DELAY_MAX + 1 &&
2795 IO_IO_OUT1_DELAY_MAX + 1) {
2802 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2805 * Remember a passing test as
2812 * If a right edge has not
2813 * been seen yet, then a future
2814 * passing test will mark this
2815 * edge as the left edge.
2817 if (right_edge[i] ==
2818 IO_IO_OUT1_DELAY_MAX + 1)
2819 left_edge[i] = -(d + 1);
2822 * d = 0 failed, but it passed
2823 * when testing the left edge,
2824 * so it must be marginal, set
2827 if (right_edge[i] ==
2828 IO_IO_OUT1_DELAY_MAX + 1 &&
2830 IO_IO_OUT1_DELAY_MAX + 1)
2833 * If a right edge has not been
2834 * seen yet, then a future
2835 * passing test will mark this
2836 * edge as the left edge.
2838 else if (right_edge[i] ==
2839 IO_IO_OUT1_DELAY_MAX +
2841 left_edge[i] = -(d + 1);
2844 debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d);
2845 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2846 (int)(bit_chk & 1), i, left_edge[i]);
2847 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2849 bit_chk = bit_chk >> 1;
2854 /* Check that all bits have a window */
2855 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2856 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2857 %d right_edge[%u]: %d", __func__, __LINE__,
2858 i, left_edge[i], i, right_edge[i]);
2859 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) ||
2860 (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) {
2861 set_failing_group_stage(test_bgn + i,
2863 CAL_SUBSTAGE_WRITES_CENTER);
2868 /* Find middle of window for each DQ bit */
2869 mid_min = left_edge[0] - right_edge[0];
2871 for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2872 mid = left_edge[i] - right_edge[i];
2873 if (mid < mid_min) {
2880 * -mid_min/2 represents the amount that we need to move DQS.
2881 * If mid_min is odd and positive we'll need to add one to
2882 * make sure the rounding in further calculations is correct
2883 * (always bias to the right), so just add 1 for all positive values.
2887 mid_min = mid_min / 2;
2888 debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__,
2891 /* Determine the amount we can change DQS (which is -mid_min) */
2892 orig_mid_min = mid_min;
2893 new_dqs = start_dqs;
2895 debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
2896 mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min);
2897 /* Initialize data for export structures */
2898 dqs_margin = IO_IO_OUT1_DELAY_MAX + 1;
2899 dq_margin = IO_IO_OUT1_DELAY_MAX + 1;
2901 /* add delay to bring centre of all DQ windows to the same "level" */
2902 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
2903 /* Use values before divide by 2 to reduce round off error */
2904 shift_dq = (left_edge[i] - right_edge[i] -
2905 (left_edge[min_index] - right_edge[min_index]))/2 +
2906 (orig_mid_min - mid_min);
2908 debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \
2909 [%u]=%d\n", __func__, __LINE__, i, shift_dq);
2911 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
2912 temp_dq_out1_delay = readl(addr + (i << 2));
2913 if (shift_dq + (int32_t)temp_dq_out1_delay >
2914 (int32_t)IO_IO_OUT1_DELAY_MAX) {
2915 shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay;
2916 } else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) {
2917 shift_dq = -(int32_t)temp_dq_out1_delay;
2919 debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n",
2921 scc_mgr_set_dq_out1_delay(i, temp_dq_out1_delay + shift_dq);
2924 debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i,
2925 left_edge[i] - shift_dq + (-mid_min),
2926 right_edge[i] + shift_dq - (-mid_min));
2927 /* To determine values for export structures */
2928 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2929 dq_margin = left_edge[i] - shift_dq + (-mid_min);
2931 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2932 dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2936 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
2937 writel(0, &sdr_scc_mgr->update);
2940 debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
2943 * set the left and right edge of each bit to an illegal value,
2944 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
2946 left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
2947 right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
2948 int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2949 int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2950 int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
2951 int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1;
2952 int32_t win_best = 0;
2954 /* Search for the/part of the window with DM shift */
2955 for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) {
2956 scc_mgr_apply_group_dm_out1_delay(d);
2957 writel(0, &sdr_scc_mgr->update);
2959 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
2960 PASS_ALL_BITS, &bit_chk,
2962 /* USE Set current end of the window */
2965 * If a starting edge of our window has not been seen
2966 * this is our current start of the DM window.
2968 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
2972 * If current window is bigger than best seen.
2973 * Set best seen to be current window.
2975 if ((end_curr-bgn_curr+1) > win_best) {
2976 win_best = end_curr-bgn_curr+1;
2977 bgn_best = bgn_curr;
2978 end_best = end_curr;
2981 /* We just saw a failing test. Reset temp edge */
2982 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2983 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2988 /* Reset DM delay chains to 0 */
2989 scc_mgr_apply_group_dm_out1_delay(0);
2992 * Check to see if the current window nudges up aganist 0 delay.
2993 * If so we need to continue the search by shifting DQS otherwise DQS
2994 * search begins as a new search. */
2995 if (end_curr != 0) {
2996 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2997 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3000 /* Search for the/part of the window with DQS shifts */
3001 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) {
3003 * Note: This only shifts DQS, so are we limiting ourselve to
3004 * width of DQ unnecessarily.
3006 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
3009 writel(0, &sdr_scc_mgr->update);
3010 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
3011 PASS_ALL_BITS, &bit_chk,
3013 /* USE Set current end of the window */
3016 * If a beginning edge of our window has not been seen
3017 * this is our current begin of the DM window.
3019 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
3023 * If current window is bigger than best seen. Set best
3024 * seen to be current window.
3026 if ((end_curr-bgn_curr+1) > win_best) {
3027 win_best = end_curr-bgn_curr+1;
3028 bgn_best = bgn_curr;
3029 end_best = end_curr;
3032 /* We just saw a failing test. Reset temp edge */
3033 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3034 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3036 /* Early exit optimization: if ther remaining delay
3037 chain space is less than already seen largest window
3040 (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) {
3046 /* assign left and right edge for cal and reporting; */
3047 left_edge[0] = -1*bgn_best;
3048 right_edge[0] = end_best;
3050 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__,
3051 __LINE__, left_edge[0], right_edge[0]);
3053 /* Move DQS (back to orig) */
3054 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3058 /* Find middle of window for the DM bit */
3059 mid = (left_edge[0] - right_edge[0]) / 2;
3061 /* only move right, since we are not moving DQS/DQ */
3065 /* dm_marign should fail if we never find a window */
3069 dm_margin = left_edge[0] - mid;
3071 scc_mgr_apply_group_dm_out1_delay(mid);
3072 writel(0, &sdr_scc_mgr->update);
3074 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
3075 dm_margin=%d\n", __func__, __LINE__, left_edge[0],
3076 right_edge[0], mid, dm_margin);
3078 gbl->fom_out += dq_margin + dqs_margin;
3080 debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \
3081 dqs_margin=%d dm_margin=%d\n", __func__, __LINE__,
3082 dq_margin, dqs_margin, dm_margin);
3085 * Do not remove this line as it makes sure all of our
3086 * decisions have been applied.
3088 writel(0, &sdr_scc_mgr->update);
3089 return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0);
3092 /* calibrate the write operations */
3093 static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
3096 /* update info for sims */
3097 debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn);
3099 reg_file_set_stage(CAL_STAGE_WRITES);
3100 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3102 reg_file_set_group(g);
3104 if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) {
3105 set_failing_group_stage(g, CAL_STAGE_WRITES,
3106 CAL_SUBSTAGE_WRITES_CENTER);
3113 /* precharge all banks and activate row 0 in bank "000..." and bank "111..." */
3114 static void mem_precharge_and_activate(void)
3118 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
3119 if (param->skip_ranks[r]) {
3120 /* request to skip the rank */
3125 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
3127 /* precharge all banks ... */
3128 writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3129 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3131 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
3132 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1,
3133 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
3135 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
3136 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
3137 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
3140 writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3141 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
3145 /* Configure various memory related parameters. */
3146 static void mem_config(void)
3148 uint32_t rlat, wlat;
3149 uint32_t rw_wl_nop_cycles;
3150 uint32_t max_latency;
3152 debug("%s:%d\n", __func__, __LINE__);
3153 /* read in write and read latency */
3154 wlat = readl(&data_mgr->t_wl_add);
3155 wlat += readl(&data_mgr->mem_t_add);
3157 /* WL for hard phy does not include additive latency */
3160 * add addtional write latency to offset the address/command extra
3161 * clock cycle. We change the AC mux setting causing AC to be delayed
3162 * by one mem clock cycle. Only do this for DDR3
3166 rlat = readl(&data_mgr->t_rl_add);
3168 rw_wl_nop_cycles = wlat - 2;
3169 gbl->rw_wl_nop_cycles = rw_wl_nop_cycles;
3172 * For AV/CV, lfifo is hardened and always runs at full rate so
3173 * max latency in AFI clocks, used here, is correspondingly smaller.
3175 max_latency = (1<<MAX_LATENCY_COUNT_WIDTH)/1 - 1;
3176 /* configure for a burst length of 8 */
3179 /* Adjust Write Latency for Hard PHY */
3182 /* set a pretty high read latency initially */
3183 gbl->curr_read_lat = rlat + 16;
3185 if (gbl->curr_read_lat > max_latency)
3186 gbl->curr_read_lat = max_latency;
3188 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3190 /* advertise write latency */
3191 gbl->curr_write_lat = wlat;
3192 writel(wlat - 2, &phy_mgr_cfg->afi_wlat);
3194 /* initialize bit slips */
3195 mem_precharge_and_activate();
3198 /* Set VFIFO and LFIFO to instant-on settings in skip calibration mode */
3199 static void mem_skip_calibrate(void)
3201 uint32_t vfifo_offset;
3204 debug("%s:%d\n", __func__, __LINE__);
3205 /* Need to update every shadow register set used by the interface */
3206 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
3207 r += NUM_RANKS_PER_SHADOW_REG) {
3209 * Set output phase alignment settings appropriate for
3212 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3213 scc_mgr_set_dqs_en_phase(i, 0);
3214 #if IO_DLL_CHAIN_LENGTH == 6
3215 scc_mgr_set_dqdqs_output_phase(i, 6);
3217 scc_mgr_set_dqdqs_output_phase(i, 7);
3222 * Write data arrives to the I/O two cycles before write
3223 * latency is reached (720 deg).
3224 * -> due to bit-slip in a/c bus
3225 * -> to allow board skew where dqs is longer than ck
3226 * -> how often can this happen!?
3227 * -> can claim back some ptaps for high freq
3228 * support if we can relax this, but i digress...
3230 * The write_clk leads mem_ck by 90 deg
3231 * The minimum ptap of the OPA is 180 deg
3232 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3233 * The write_clk is always delayed by 2 ptaps
3235 * Hence, to make DQS aligned to CK, we need to delay
3237 * (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
3239 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
3240 * gives us the number of ptaps, which simplies to:
3242 * (1.25 * IO_DLL_CHAIN_LENGTH - 2)
3244 scc_mgr_set_dqdqs_output_phase(i, (1.25 *
3245 IO_DLL_CHAIN_LENGTH - 2));
3247 writel(0xff, &sdr_scc_mgr->dqs_ena);
3248 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
3250 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
3251 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3252 SCC_MGR_GROUP_COUNTER_OFFSET);
3254 writel(0xff, &sdr_scc_mgr->dq_ena);
3255 writel(0xff, &sdr_scc_mgr->dm_ena);
3256 writel(0, &sdr_scc_mgr->update);
3259 /* Compensate for simulation model behaviour */
3260 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3261 scc_mgr_set_dqs_bus_in_delay(i, 10);
3262 scc_mgr_load_dqs(i);
3264 writel(0, &sdr_scc_mgr->update);
3267 * ArriaV has hard FIFOs that can only be initialized by incrementing
3270 vfifo_offset = CALIB_VFIFO_OFFSET;
3271 for (j = 0; j < vfifo_offset; j++) {
3272 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
3274 writel(0, &phy_mgr_cmd->fifo_reset);
3277 * For ACV with hard lfifo, we get the skip-cal setting from
3278 * generation-time constant.
3280 gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
3281 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
3284 /* Memory calibration entry point */
3285 static uint32_t mem_calibrate(void)
3288 uint32_t rank_bgn, sr;
3289 uint32_t write_group, write_test_bgn;
3290 uint32_t read_group, read_test_bgn;
3291 uint32_t run_groups, current_run;
3292 uint32_t failing_groups = 0;
3293 uint32_t group_failed = 0;
3294 uint32_t sr_failed = 0;
3296 debug("%s:%d\n", __func__, __LINE__);
3297 /* Initialize the data settings */
3299 gbl->error_substage = CAL_SUBSTAGE_NIL;
3300 gbl->error_stage = CAL_STAGE_NIL;
3301 gbl->error_group = 0xff;
3307 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3308 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3309 SCC_MGR_GROUP_COUNTER_OFFSET);
3310 /* Only needed once to set all groups, pins, DQ, DQS, DM. */
3312 scc_mgr_set_hhp_extras();
3314 scc_set_bypass_mode(i);
3317 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3319 * Set VFIFO and LFIFO to instant-on settings in skip
3322 mem_skip_calibrate();
3324 for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3326 * Zero all delay chain/phase settings for all
3327 * groups and all shadow register sets.
3331 run_groups = ~param->skip_groups;
3333 for (write_group = 0, write_test_bgn = 0; write_group
3334 < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
3335 write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
3336 /* Initialized the group failure */
3339 current_run = run_groups & ((1 <<
3340 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3341 run_groups = run_groups >>
3342 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
3344 if (current_run == 0)
3347 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3348 SCC_MGR_GROUP_COUNTER_OFFSET);
3349 scc_mgr_zero_group(write_group, 0);
3351 for (read_group = write_group *
3352 RW_MGR_MEM_IF_READ_DQS_WIDTH /
3353 RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3355 read_group < (write_group + 1) *
3356 RW_MGR_MEM_IF_READ_DQS_WIDTH /
3357 RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
3359 read_group++, read_test_bgn +=
3360 RW_MGR_MEM_DQ_PER_READ_DQS) {
3361 /* Calibrate the VFIFO */
3362 if (!((STATIC_CALIB_STEPS) &
3363 CALIB_SKIP_VFIFO)) {
3364 if (!rw_mgr_mem_calibrate_vfifo
3370 phy_debug_mode_flags &
3371 PHY_DEBUG_SWEEP_ALL_GROUPS)) {
3378 /* Calibrate the output side */
3379 if (group_failed == 0) {
3380 for (rank_bgn = 0, sr = 0; rank_bgn
3381 < RW_MGR_MEM_NUMBER_OF_RANKS;
3383 NUM_RANKS_PER_SHADOW_REG,
3386 if (!((STATIC_CALIB_STEPS) &
3387 CALIB_SKIP_WRITES)) {
3388 if ((STATIC_CALIB_STEPS)
3389 & CALIB_SKIP_DELAY_SWEEPS) {
3390 /* not needed in quick mode! */
3393 * Determine if this set of
3394 * ranks should be skipped
3397 if (!param->skip_shadow_regs[sr]) {
3398 if (!rw_mgr_mem_calibrate_writes
3399 (rank_bgn, write_group,
3403 phy_debug_mode_flags &
3404 PHY_DEBUG_SWEEP_ALL_GROUPS)) {
3416 if (group_failed == 0) {
3417 for (read_group = write_group *
3418 RW_MGR_MEM_IF_READ_DQS_WIDTH /
3419 RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3421 read_group < (write_group + 1)
3422 * RW_MGR_MEM_IF_READ_DQS_WIDTH
3423 / RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
3425 read_group++, read_test_bgn +=
3426 RW_MGR_MEM_DQ_PER_READ_DQS) {
3427 if (!((STATIC_CALIB_STEPS) &
3428 CALIB_SKIP_WRITES)) {
3429 if (!rw_mgr_mem_calibrate_vfifo_end
3430 (read_group, read_test_bgn)) {
3433 if (!(gbl->phy_debug_mode_flags
3434 & PHY_DEBUG_SWEEP_ALL_GROUPS)) {
3442 if (group_failed != 0)
3447 * USER If there are any failing groups then report
3450 if (failing_groups != 0)
3453 /* Calibrate the LFIFO */
3454 if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) {
3456 * If we're skipping groups as part of debug,
3457 * don't calibrate LFIFO.
3459 if (param->skip_groups == 0) {
3460 if (!rw_mgr_mem_calibrate_lfifo())
3468 * Do not remove this line as it makes sure all of our decisions
3469 * have been applied.
3471 writel(0, &sdr_scc_mgr->update);
3475 static uint32_t run_mem_calibrate(void)
3478 uint32_t debug_info;
3480 debug("%s:%d\n", __func__, __LINE__);
3482 /* Reset pass/fail status shown on afi_cal_success/fail */
3483 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
3485 /* stop tracking manger */
3486 uint32_t ctrlcfg = readl(&sdr_ctrl->ctrl_cfg);
3488 writel(ctrlcfg & 0xFFBFFFFF, &sdr_ctrl->ctrl_cfg);
3491 rw_mgr_mem_initialize();
3493 pass = mem_calibrate();
3495 mem_precharge_and_activate();
3496 writel(0, &phy_mgr_cmd->fifo_reset);
3500 * Don't return control of the PHY back to AFI when in debug mode.
3502 if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) {
3503 rw_mgr_mem_handoff();
3505 * In Hard PHY this is a 2-bit control:
3507 * 1: DDIO Mux Select
3509 writel(0x2, &phy_mgr_cfg->mux_sel);
3512 writel(ctrlcfg, &sdr_ctrl->ctrl_cfg);
3515 printf("%s: CALIBRATION PASSED\n", __FILE__);
3520 if (gbl->fom_in > 0xff)
3523 if (gbl->fom_out > 0xff)
3524 gbl->fom_out = 0xff;
3526 /* Update the FOM in the register file */
3527 debug_info = gbl->fom_in;
3528 debug_info |= gbl->fom_out << 8;
3529 writel(debug_info, &sdr_reg_file->fom);
3531 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3532 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
3534 printf("%s: CALIBRATION FAILED\n", __FILE__);
3536 debug_info = gbl->error_stage;
3537 debug_info |= gbl->error_substage << 8;
3538 debug_info |= gbl->error_group << 16;
3540 writel(debug_info, &sdr_reg_file->failing_stage);
3541 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3542 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
3544 /* Update the failing group/stage in the register file */
3545 debug_info = gbl->error_stage;
3546 debug_info |= gbl->error_substage << 8;
3547 debug_info |= gbl->error_group << 16;
3548 writel(debug_info, &sdr_reg_file->failing_stage);
3555 * hc_initialize_rom_data() - Initialize ROM data
3557 * Initialize ROM data.
3559 static void hc_initialize_rom_data(void)
3563 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
3564 for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++)
3565 writel(inst_rom_init[i], addr + (i << 2));
3567 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
3568 for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++)
3569 writel(ac_rom_init[i], addr + (i << 2));
3573 * initialize_reg_file() - Initialize SDR register file
3575 * Initialize SDR register file.
3577 static void initialize_reg_file(void)
3579 /* Initialize the register file with the correct data */
3580 writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature);
3581 writel(0, &sdr_reg_file->debug_data_addr);
3582 writel(0, &sdr_reg_file->cur_stage);
3583 writel(0, &sdr_reg_file->fom);
3584 writel(0, &sdr_reg_file->failing_stage);
3585 writel(0, &sdr_reg_file->debug1);
3586 writel(0, &sdr_reg_file->debug2);
3590 * initialize_hps_phy() - Initialize HPS PHY
3592 * Initialize HPS PHY.
3594 static void initialize_hps_phy(void)
3598 * Tracking also gets configured here because it's in the
3601 uint32_t trk_sample_count = 7500;
3602 uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
3604 * Format is number of outer loops in the 16 MSB, sample
3609 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3610 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3611 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3612 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3613 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3614 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3616 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3617 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3619 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3620 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3622 writel(reg, &sdr_ctrl->phy_ctrl0);
3625 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3627 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3628 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3629 trk_long_idle_sample_count);
3630 writel(reg, &sdr_ctrl->phy_ctrl1);
3633 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3634 trk_long_idle_sample_count >>
3635 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
3636 writel(reg, &sdr_ctrl->phy_ctrl2);
3639 static void initialize_tracking(void)
3641 uint32_t concatenated_longidle = 0x0;
3642 uint32_t concatenated_delays = 0x0;
3643 uint32_t concatenated_rw_addr = 0x0;
3644 uint32_t concatenated_refresh = 0x0;
3645 uint32_t trk_sample_count = 7500;
3646 uint32_t dtaps_per_ptap;
3650 * compute usable version of value in case we skip full
3655 while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
3657 tmp_delay += IO_DELAY_PER_DCHAIN_TAP;
3661 concatenated_longidle = concatenated_longidle ^ 10;
3662 /*longidle outer loop */
3663 concatenated_longidle = concatenated_longidle << 16;
3664 concatenated_longidle = concatenated_longidle ^ 100;
3665 /*longidle sample count */
3666 concatenated_delays = concatenated_delays ^ 243;
3667 /* trfc, worst case of 933Mhz 4Gb */
3668 concatenated_delays = concatenated_delays << 8;
3669 concatenated_delays = concatenated_delays ^ 14;
3670 /* trcd, worst case */
3671 concatenated_delays = concatenated_delays << 8;
3672 concatenated_delays = concatenated_delays ^ 10;
3674 concatenated_delays = concatenated_delays << 8;
3675 concatenated_delays = concatenated_delays ^ 4;
3678 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_IDLE;
3679 concatenated_rw_addr = concatenated_rw_addr << 8;
3680 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_ACTIVATE_1;
3681 concatenated_rw_addr = concatenated_rw_addr << 8;
3682 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_SGLE_READ;
3683 concatenated_rw_addr = concatenated_rw_addr << 8;
3684 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_PRECHARGE_ALL;
3686 concatenated_refresh = concatenated_refresh ^ RW_MGR_REFRESH_ALL;
3687 concatenated_refresh = concatenated_refresh << 24;
3688 concatenated_refresh = concatenated_refresh ^ 1000; /* trefi */
3690 /* Initialize the register file with the correct data */
3691 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
3692 writel(trk_sample_count, &sdr_reg_file->trk_sample_count);
3693 writel(concatenated_longidle, &sdr_reg_file->trk_longidle);
3694 writel(concatenated_delays, &sdr_reg_file->delays);
3695 writel(concatenated_rw_addr, &sdr_reg_file->trk_rw_mgr_addr);
3696 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH, &sdr_reg_file->trk_read_dqs_width);
3697 writel(concatenated_refresh, &sdr_reg_file->trk_rfsh);
3700 int sdram_calibration_full(void)
3702 struct param_type my_param;
3703 struct gbl_type my_gbl;
3710 /* Initialize the debug mode flags */
3711 gbl->phy_debug_mode_flags = 0;
3712 /* Set the calibration enabled by default */
3713 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3715 * Only sweep all groups (regardless of fail state) by default
3716 * Set enabled read test by default.
3718 #if DISABLE_GUARANTEED_READ
3719 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3721 /* Initialize the register file */
3722 initialize_reg_file();
3724 /* Initialize any PHY CSR */
3725 initialize_hps_phy();
3727 scc_mgr_initialize();
3729 initialize_tracking();
3731 /* USER Enable all ranks, groups */
3732 for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++)
3733 param->skip_ranks[i] = 0;
3734 for (i = 0; i < NUM_SHADOW_REGS; ++i)
3735 param->skip_shadow_regs[i] = 0;
3736 param->skip_groups = 0;
3738 printf("%s: Preparing to start memory calibration\n", __FILE__);
3740 debug("%s:%d\n", __func__, __LINE__);
3741 debug_cond(DLEVEL == 1,
3742 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
3743 RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
3744 RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS,
3745 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
3746 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
3747 debug_cond(DLEVEL == 1,
3748 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
3749 RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3750 RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH,
3751 IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP);
3752 debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
3753 IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH);
3754 debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
3755 IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX,
3756 IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX);
3757 debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
3758 IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX,
3759 IO_IO_OUT2_DELAY_MAX);
3760 debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
3761 IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE);
3763 hc_initialize_rom_data();
3765 /* update info for sims */
3766 reg_file_set_stage(CAL_STAGE_NIL);
3767 reg_file_set_group(0);
3770 * Load global needed for those actions that require
3771 * some dynamic calibration support.
3773 dyn_calib_steps = STATIC_CALIB_STEPS;
3775 * Load global to allow dynamic selection of delay loop settings
3776 * based on calibration mode.
3778 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3779 skip_delay_mask = 0xff;
3781 skip_delay_mask = 0x0;
3783 pass = run_mem_calibrate();
3785 printf("%s: Calibration complete\n", __FILE__);