]> git.sur5r.net Git - u-boot/blobdiff - drivers/ddr/altera/sequencer.c
ddr: altera: Internal mem_calibrate() cleanup part 2
[u-boot] / drivers / ddr / altera / sequencer.c
index 28e32ff57501b797712d5fdfce159e832a401501..e2e7184ffffcbd060967125dd7ac14a26839e3b4 100644 (file)
@@ -113,10 +113,17 @@ static void reg_file_set_sub_stage(u8 set_sub_stage)
        clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
 }
 
-static void initialize(void)
+/**
+ * phy_mgr_initialize() - Initialize PHY Manager
+ *
+ * Initialize PHY Manager.
+ */
+static void phy_mgr_initialize(void)
 {
+       u32 ratio;
+
        debug("%s:%d\n", __func__, __LINE__);
-       /* USER calibration has control over path to memory */
+       /* Calibration has control over path to memory */
        /*
         * In Hard PHY this is a 2-bit control:
         * 0: AFI Mux Select
@@ -132,49 +139,55 @@ static void initialize(void)
 
        writel(0, &phy_mgr_cfg->cal_debug_info);
 
-       if ((dyn_calib_steps & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) {
-               param->read_correct_mask_vg  = ((uint32_t)1 <<
-                       (RW_MGR_MEM_DQ_PER_READ_DQS /
-                       RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
-               param->write_correct_mask_vg = ((uint32_t)1 <<
-                       (RW_MGR_MEM_DQ_PER_READ_DQS /
-                       RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
-               param->read_correct_mask     = ((uint32_t)1 <<
-                       RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
-               param->write_correct_mask    = ((uint32_t)1 <<
-                       RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
-               param->dm_correct_mask       = ((uint32_t)1 <<
-                       (RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH))
-                       - 1;
-       }
+       /* Init params only if we do NOT skip calibration. */
+       if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
+               return;
+
+       ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
+               RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
+       param->read_correct_mask_vg = (1 << ratio) - 1;
+       param->write_correct_mask_vg = (1 << ratio) - 1;
+       param->read_correct_mask = (1 << RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
+       param->write_correct_mask = (1 << RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
+       ratio = RW_MGR_MEM_DATA_WIDTH /
+               RW_MGR_MEM_DATA_MASK_WIDTH;
+       param->dm_correct_mask = (1 << ratio) - 1;
 }
 
-static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
+/**
+ * set_rank_and_odt_mask() - Set Rank and ODT mask
+ * @rank:      Rank mask
+ * @odt_mode:  ODT mode, OFF or READ_WRITE
+ *
+ * Set Rank and ODT mask (On-Die Termination).
+ */
+static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
 {
-       uint32_t odt_mask_0 = 0;
-       uint32_t odt_mask_1 = 0;
-       uint32_t cs_and_odt_mask;
+       u32 odt_mask_0 = 0;
+       u32 odt_mask_1 = 0;
+       u32 cs_and_odt_mask;
 
-       if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) {
-               if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) {
-                       /*
-                        * 1 Rank
-                        * Read: ODT = 0
-                        * Write: ODT = 1
-                        */
+       if (odt_mode == RW_MGR_ODT_MODE_OFF) {
+               odt_mask_0 = 0x0;
+               odt_mask_1 = 0x0;
+       } else {        /* RW_MGR_ODT_MODE_READ_WRITE */
+               switch (RW_MGR_MEM_NUMBER_OF_RANKS) {
+               case 1: /* 1 Rank */
+                       /* Read: ODT = 0 ; Write: ODT = 1 */
                        odt_mask_0 = 0x0;
                        odt_mask_1 = 0x1;
-               } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) {
-                       /* 2 Ranks */
+                       break;
+               case 2: /* 2 Ranks */
                        if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
-                               /* - Dual-Slot , Single-Rank
-                                * (1 chip-select per DIMM)
-                                * OR
-                                * - RDIMM, 4 total CS (2 CS per DIMM)
-                                * means 2 DIMM
-                                * Since MEM_NUMBER_OF_RANKS is 2 they are
-                                * both single rank
-                                * with 2 CS each (special for RDIMM)
+                               /*
+                                * - Dual-Slot , Single-Rank (1 CS per DIMM)
+                                *   OR
+                                * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
+                                *
+                                * Since MEM_NUMBER_OF_RANKS is 2, they
+                                * are both single rank with 2 CS each
+                                * (special for RDIMM).
+                                *
                                 * Read: Turn on ODT on the opposite rank
                                 * Write: Turn on ODT on all ranks
                                 */
@@ -182,19 +195,18 @@ static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
                                odt_mask_1 = 0x3;
                        } else {
                                /*
-                                * USER - Single-Slot , Dual-rank DIMMs
-                                * (2 chip-selects per DIMM)
-                                * USER Read: Turn on ODT off on all ranks
-                                * USER Write: Turn on ODT on active rank
+                                * - Single-Slot , Dual-Rank (2 CS per DIMM)
+                                *
+                                * Read: Turn on ODT off on all ranks
+                                * Write: Turn on ODT on active rank
                                 */
                                odt_mask_0 = 0x0;
                                odt_mask_1 = 0x3 & (1 << rank);
                        }
-               } else {
-                       /* 4 Ranks
-                        * Read:
+                       break;
+               case 4: /* 4 Ranks */
+                       /* Read:
                         * ----------+-----------------------+
-                        *           |                       |
                         *           |         ODT           |
                         * Read From +-----------------------+
                         *   Rank    |  3  |  2  |  1  |  0  |
@@ -207,7 +219,6 @@ static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
                         *
                         * Write:
                         * ----------+-----------------------+
-                        *           |                       |
                         *           |         ODT           |
                         * Write To  +-----------------------+
                         *   Rank    |  3  |  2  |  1  |  0  |
@@ -236,16 +247,13 @@ static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
                                odt_mask_1 = 0xA;
                                break;
                        }
+                       break;
                }
-       } else {
-               odt_mask_0 = 0x0;
-               odt_mask_1 = 0x0;
        }
 
-       cs_and_odt_mask =
-               (0xFF & ~(1 << rank)) |
-               ((0xFF & odt_mask_0) << 8) |
-               ((0xFF & odt_mask_1) << 16);
+       cs_and_odt_mask = (0xFF & ~(1 << rank)) |
+                         ((0xFF & odt_mask_0) << 8) |
+                         ((0xFF & odt_mask_1) << 16);
        writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
                                RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
 }
@@ -3086,21 +3094,24 @@ static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
        return 1;
 }
 
-/* precharge all banks and activate row 0 in bank "000..." and bank "111..." */
+/**
+ * mem_precharge_and_activate() - Precharge all banks and activate
+ *
+ * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
+ */
 static void mem_precharge_and_activate(void)
 {
-       uint32_t r;
+       int r;
 
        for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
-               if (param->skip_ranks[r]) {
-                       /* request to skip the rank */
+               /* Test if the rank should be skipped. */
+               if (param->skip_ranks[r])
                        continue;
-               }
 
-               /* set rank */
+               /* Set rank. */
                set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
 
-               /* precharge all banks ... */
+               /* Precharge all banks. */
                writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
                                             RW_MGR_RUN_SINGLE_GROUP_OFFSET);
 
@@ -3112,66 +3123,57 @@ static void mem_precharge_and_activate(void)
                writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
                        &sdr_rw_load_jump_mgr_regs->load_jump_add1);
 
-               /* activate rows */
+               /* Activate rows. */
                writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
                                                RW_MGR_RUN_SINGLE_GROUP_OFFSET);
        }
 }
 
-/* Configure various memory related parameters. */
-static void mem_config(void)
+/**
+ * mem_init_latency() - Configure memory RLAT and WLAT settings
+ *
+ * Configure memory RLAT and WLAT parameters.
+ */
+static void mem_init_latency(void)
 {
-       uint32_t rlat, wlat;
-       uint32_t rw_wl_nop_cycles;
-       uint32_t max_latency;
-
-       debug("%s:%d\n", __func__, __LINE__);
-       /* read in write and read latency */
-       wlat = readl(&data_mgr->t_wl_add);
-       wlat += readl(&data_mgr->mem_t_add);
-
-       /* WL for hard phy does not include additive latency */
-
        /*
-        * add addtional write latency to offset the address/command extra
-        * clock cycle. We change the AC mux setting causing AC to be delayed
-        * by one mem clock cycle. Only do this for DDR3
+        * For AV/CV, LFIFO is hardened and always runs at full rate
+        * so max latency in AFI clocks, used here, is correspondingly
+        * smaller.
         */
-       wlat = wlat + 1;
+       const u32 max_latency = (1 << MAX_LATENCY_COUNT_WIDTH) - 1;
+       u32 rlat, wlat;
 
-       rlat = readl(&data_mgr->t_rl_add);
-
-       rw_wl_nop_cycles = wlat - 2;
-       gbl->rw_wl_nop_cycles = rw_wl_nop_cycles;
+       debug("%s:%d\n", __func__, __LINE__);
 
        /*
-        * For AV/CV, lfifo is hardened and always runs at full rate so
-        * max latency in AFI clocks, used here, is correspondingly smaller.
+        * Read in write latency.
+        * WL for Hard PHY does not include additive latency.
         */
-       max_latency = (1<<MAX_LATENCY_COUNT_WIDTH)/1 - 1;
-       /* configure for a burst length of 8 */
+       wlat = readl(&data_mgr->t_wl_add);
+       wlat += readl(&data_mgr->mem_t_add);
 
-       /* write latency */
-       /* Adjust Write Latency for Hard PHY */
-       wlat = wlat + 1;
+       gbl->rw_wl_nop_cycles = wlat - 1;
 
-       /* set a pretty high read latency initially */
-       gbl->curr_read_lat = rlat + 16;
+       /* Read in readl latency. */
+       rlat = readl(&data_mgr->t_rl_add);
 
+       /* Set a pretty high read latency initially. */
+       gbl->curr_read_lat = rlat + 16;
        if (gbl->curr_read_lat > max_latency)
                gbl->curr_read_lat = max_latency;
 
        writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
 
-       /* advertise write latency */
-       gbl->curr_write_lat = wlat;
-       writel(wlat - 2, &phy_mgr_cfg->afi_wlat);
-
-       /* initialize bit slips */
-       mem_precharge_and_activate();
+       /* Advertise write latency. */
+       writel(wlat, &phy_mgr_cfg->afi_wlat);
 }
 
-/* Set VFIFO and LFIFO to instant-on settings in skip calibration mode */
+/**
+ * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
+ *
+ * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
+ */
 static void mem_skip_calibrate(void)
 {
        uint32_t vfifo_offset;
@@ -3180,7 +3182,7 @@ static void mem_skip_calibrate(void)
        debug("%s:%d\n", __func__, __LINE__);
        /* Need to update every shadow register set used by the interface */
        for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
-               r += NUM_RANKS_PER_SHADOW_REG) {
+            r += NUM_RANKS_PER_SHADOW_REG) {
                /*
                 * Set output phase alignment settings appropriate for
                 * skip calibration.
@@ -3217,8 +3219,8 @@ static void mem_skip_calibrate(void)
                         *
                         *    (1.25 * IO_DLL_CHAIN_LENGTH - 2)
                         */
-                       scc_mgr_set_dqdqs_output_phase(i, (1.25 *
-                               IO_DLL_CHAIN_LENGTH - 2));
+                       scc_mgr_set_dqdqs_output_phase(i,
+                                       1.25 * IO_DLL_CHAIN_LENGTH - 2);
                }
                writel(0xff, &sdr_scc_mgr->dqs_ena);
                writel(0xff, &sdr_scc_mgr->dqs_io_ena);
@@ -3244,14 +3246,13 @@ static void mem_skip_calibrate(void)
         * in sequencer.
         */
        vfifo_offset = CALIB_VFIFO_OFFSET;
-       for (j = 0; j < vfifo_offset; j++) {
+       for (j = 0; j < vfifo_offset; j++)
                writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
-       }
        writel(0, &phy_mgr_cmd->fifo_reset);
 
        /*
-        * For ACV with hard lfifo, we get the skip-cal setting from
-        * generation-time constant.
+        * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
+        * setting from generation-time constant.
         */
        gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
        writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
@@ -3267,18 +3268,24 @@ static uint32_t mem_calibrate(void)
        uint32_t run_groups, current_run;
        uint32_t failing_groups = 0;
        uint32_t group_failed = 0;
-       uint32_t sr_failed = 0;
+
+       const u32 rwdqs_ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
+                               RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
 
        debug("%s:%d\n", __func__, __LINE__);
-       /* Initialize the data settings */
 
+       /* Initialize the data settings */
        gbl->error_substage = CAL_SUBSTAGE_NIL;
        gbl->error_stage = CAL_STAGE_NIL;
        gbl->error_group = 0xff;
        gbl->fom_in = 0;
        gbl->fom_out = 0;
 
-       mem_config();
+       /* Initialize WLAT and RLAT. */
+       mem_init_latency();
+
+       /* Initialize bit slips. */
+       mem_precharge_and_activate();
 
        for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
                writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
@@ -3290,152 +3297,144 @@ static uint32_t mem_calibrate(void)
                scc_set_bypass_mode(i);
        }
 
+       /* Calibration is skipped. */
        if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
                /*
                 * Set VFIFO and LFIFO to instant-on settings in skip
                 * calibration mode.
                 */
                mem_skip_calibrate();
-       } else {
-               for (i = 0; i < NUM_CALIB_REPEAT; i++) {
-                       /*
-                        * Zero all delay chain/phase settings for all
-                        * groups and all shadow register sets.
-                        */
-                       scc_mgr_zero_all();
 
-                       run_groups = ~param->skip_groups;
-
-                       for (write_group = 0, write_test_bgn = 0; write_group
-                               < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
-                               write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
-                               /* Initialized the group failure */
-                               group_failed = 0;
+               /*
+                * Do not remove this line as it makes sure all of our
+                * decisions have been applied.
+                */
+               writel(0, &sdr_scc_mgr->update);
+               return 1;
+       }
 
-                               current_run = run_groups & ((1 <<
-                                       RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
-                               run_groups = run_groups >>
-                                       RW_MGR_NUM_DQS_PER_WRITE_GROUP;
+       /* Calibration is not skipped. */
+       for (i = 0; i < NUM_CALIB_REPEAT; i++) {
+               /*
+                * Zero all delay chain/phase settings for all
+                * groups and all shadow register sets.
+                */
+               scc_mgr_zero_all();
+
+               run_groups = ~param->skip_groups;
+
+               for (write_group = 0, write_test_bgn = 0; write_group
+                       < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
+                       write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
+                       /* Initialized the group failure */
+                       group_failed = 0;
+
+                       current_run = run_groups & ((1 <<
+                               RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
+                       run_groups = run_groups >>
+                               RW_MGR_NUM_DQS_PER_WRITE_GROUP;
+
+                       if (current_run == 0)
+                               continue;
+
+                       writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
+                                           SCC_MGR_GROUP_COUNTER_OFFSET);
+                       scc_mgr_zero_group(write_group, 0);
+
+                       for (read_group = write_group * rwdqs_ratio,
+                            read_test_bgn = 0;
+                            read_group < (write_group + 1) * rwdqs_ratio && group_failed == 0;
+                            read_group++,
+                            read_test_bgn += RW_MGR_MEM_DQ_PER_READ_DQS) {
+                               if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
+                                       continue;
 
-                               if (current_run == 0)
+                               /* Calibrate the VFIFO */
+                               if (rw_mgr_mem_calibrate_vfifo(read_group,
+                                                              read_test_bgn))
                                        continue;
 
-                               writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
-                                                   SCC_MGR_GROUP_COUNTER_OFFSET);
-                               scc_mgr_zero_group(write_group, 0);
+                               group_failed = 1;
+                               if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
+                                       return 0;
+                       }
+
+                       /* Calibrate the output side */
+                       if (group_failed == 0) {
+                               for (rank_bgn = 0, sr = 0;
+                                    rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
+                                    rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
+                                       if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
+                                               continue;
+
+                                       /* Not needed in quick mode! */
+                                       if (STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS)
+                                               continue;
 
+                                       /*
+                                        * Determine if this set of ranks
+                                        * should be skipped entirely.
+                                        */
+                                       if (param->skip_shadow_regs[sr])
+                                               continue;
+
+                                       /* Calibrate WRITEs */
+                                       if (rw_mgr_mem_calibrate_writes(rank_bgn,
+                                                       write_group, write_test_bgn))
+                                               continue;
+
+                                       group_failed = 1;
+                                       if (!(gbl->phy_debug_mode_flags & PHY_DEBUG_SWEEP_ALL_GROUPS))
+                                               return 0;
+                               }
+                       }
+
+                       if (group_failed == 0) {
                                for (read_group = write_group *
-                                       RW_MGR_MEM_IF_READ_DQS_WIDTH /
-                                       RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
-                                       read_test_bgn = 0;
-                                       read_group < (write_group + 1) *
-                                       RW_MGR_MEM_IF_READ_DQS_WIDTH /
-                                       RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
+                               RW_MGR_MEM_IF_READ_DQS_WIDTH /
+                               RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
+                               read_test_bgn = 0;
+                                       read_group < (write_group + 1)
+                                       * RW_MGR_MEM_IF_READ_DQS_WIDTH
+                                       RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
                                        group_failed == 0;
                                        read_group++, read_test_bgn +=
                                        RW_MGR_MEM_DQ_PER_READ_DQS) {
-                                       /* Calibrate the VFIFO */
                                        if (!((STATIC_CALIB_STEPS) &
-                                               CALIB_SKIP_VFIFO)) {
-                                               if (!rw_mgr_mem_calibrate_vfifo
-                                                       (read_group,
-                                                       read_test_bgn)) {
-                                                       group_failed = 1;
-
-                                                       if (!(gbl->
-                                                       phy_debug_mode_flags &
-                                               PHY_DEBUG_SWEEP_ALL_GROUPS)) {
-                                                               return 0;
-                                                       }
-                                               }
-                                       }
-                               }
-
-                               /* Calibrate the output side */
-                               if (group_failed == 0)  {
-                                       for (rank_bgn = 0, sr = 0; rank_bgn
-                                               < RW_MGR_MEM_NUMBER_OF_RANKS;
-                                               rank_bgn +=
-                                               NUM_RANKS_PER_SHADOW_REG,
-                                               ++sr) {
-                                               sr_failed = 0;
-                                               if (!((STATIC_CALIB_STEPS) &
                                                CALIB_SKIP_WRITES)) {
-                                                       if ((STATIC_CALIB_STEPS)
-                                               & CALIB_SKIP_DELAY_SWEEPS) {
-                                               /* not needed in quick mode! */
-                                                       } else {
-                                               /*
-                                                * Determine if this set of
-                                                * ranks should be skipped
-                                                * entirely.
-                                                */
-                                       if (!param->skip_shadow_regs[sr]) {
-                                               if (!rw_mgr_mem_calibrate_writes
-                                               (rank_bgn, write_group,
-                                               write_test_bgn)) {
-                                                       sr_failed = 1;
-                                                       if (!(gbl->
-                                                       phy_debug_mode_flags &
-                                               PHY_DEBUG_SWEEP_ALL_GROUPS)) {
-                                                               return 0;
-                                                                       }
-                                                                       }
-                                                               }
-                                                       }
-                                               }
-                                               if (sr_failed != 0)
-                                                       group_failed = 1;
-                                       }
-                               }
+                               if (!rw_mgr_mem_calibrate_vfifo_end
+                                       (read_group, read_test_bgn)) {
+                                               group_failed = 1;
 
-                               if (group_failed == 0) {
-                                       for (read_group = write_group *
-                                       RW_MGR_MEM_IF_READ_DQS_WIDTH /
-                                       RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
-                                       read_test_bgn = 0;
-                                               read_group < (write_group + 1)
-                                               * RW_MGR_MEM_IF_READ_DQS_WIDTH
-                                               / RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
-                                               group_failed == 0;
-                                               read_group++, read_test_bgn +=
-                                               RW_MGR_MEM_DQ_PER_READ_DQS) {
-                                               if (!((STATIC_CALIB_STEPS) &
-                                                       CALIB_SKIP_WRITES)) {
-                                       if (!rw_mgr_mem_calibrate_vfifo_end
-                                               (read_group, read_test_bgn)) {
-                                                       group_failed = 1;
-
-                                               if (!(gbl->phy_debug_mode_flags
-                                               & PHY_DEBUG_SWEEP_ALL_GROUPS)) {
-                                                               return 0;
-                                                               }
+                                       if (!(gbl->phy_debug_mode_flags
+                                       & PHY_DEBUG_SWEEP_ALL_GROUPS)) {
+                                                       return 0;
                                                        }
                                                }
                                        }
                                }
-
-                               if (group_failed != 0)
-                                       failing_groups++;
                        }
 
+                       if (group_failed != 0)
+                               failing_groups++;
+               }
+
+               /*
+                * USER If there are any failing groups then report
+                * the failure.
+                */
+               if (failing_groups != 0)
+                       return 0;
+
+               /* Calibrate the LFIFO */
+               if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) {
                        /*
-                        * USER If there are any failing groups then report
-                        * the failure.
+                        * If we're skipping groups as part of debug,
+                        * don't calibrate LFIFO.
                         */
-                       if (failing_groups != 0)
-                               return 0;
-
-                       /* Calibrate the LFIFO */
-                       if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) {
-                               /*
-                                * If we're skipping groups as part of debug,
-                                * don't calibrate LFIFO.
-                                */
-                               if (param->skip_groups == 0) {
-                                       if (!rw_mgr_mem_calibrate_lfifo())
-                                               return 0;
-                               }
+                       if (param->skip_groups == 0) {
+                               if (!rw_mgr_mem_calibrate_lfifo())
+                                       return 0;
                        }
                }
        }
@@ -3448,44 +3447,57 @@ static uint32_t mem_calibrate(void)
        return 1;
 }
 
-static uint32_t run_mem_calibrate(void)
+/**
+ * run_mem_calibrate() - Perform memory calibration
+ *
+ * This function triggers the entire memory calibration procedure.
+ */
+static int run_mem_calibrate(void)
 {
-       uint32_t pass;
-       uint32_t debug_info;
+       int pass;
 
        debug("%s:%d\n", __func__, __LINE__);
 
        /* Reset pass/fail status shown on afi_cal_success/fail */
        writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
 
-       /* stop tracking manger */
-       uint32_t ctrlcfg = readl(&sdr_ctrl->ctrl_cfg);
-
-       writel(ctrlcfg & 0xFFBFFFFF, &sdr_ctrl->ctrl_cfg);
+       /* Stop tracking manager. */
+       clrbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
 
-       initialize();
+       phy_mgr_initialize();
        rw_mgr_mem_initialize();
 
+       /* Perform the actual memory calibration. */
        pass = mem_calibrate();
 
        mem_precharge_and_activate();
        writel(0, &phy_mgr_cmd->fifo_reset);
 
+       /* Handoff. */
+       rw_mgr_mem_handoff();
        /*
-        * Handoff:
-        * Don't return control of the PHY back to AFI when in debug mode.
+        * In Hard PHY this is a 2-bit control:
+        * 0: AFI Mux Select
+        * 1: DDIO Mux Select
         */
-       if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) {
-               rw_mgr_mem_handoff();
-               /*
-                * In Hard PHY this is a 2-bit control:
-                * 0: AFI Mux Select
-                * 1: DDIO Mux Select
-                */
-               writel(0x2, &phy_mgr_cfg->mux_sel);
-       }
+       writel(0x2, &phy_mgr_cfg->mux_sel);
+
+       /* Start tracking manager. */
+       setbits_le32(&sdr_ctrl->ctrl_cfg, 1 << 22);
+
+       return pass;
+}
 
-       writel(ctrlcfg, &sdr_ctrl->ctrl_cfg);
+/**
+ * debug_mem_calibrate() - Report result of memory calibration
+ * @pass:      Value indicating whether calibration passed or failed
+ *
+ * This function reports the results of the memory calibration
+ * and writes debug information into the register file.
+ */
+static void debug_mem_calibrate(int pass)
+{
+       uint32_t debug_info;
 
        if (pass) {
                printf("%s: CALIBRATION PASSED\n", __FILE__);
@@ -3524,7 +3536,7 @@ static uint32_t run_mem_calibrate(void)
                writel(debug_info, &sdr_reg_file->failing_stage);
        }
 
-       return pass;
+       printf("%s: Calibration complete\n", __FILE__);
 }
 
 /**
@@ -3612,65 +3624,47 @@ static void initialize_hps_phy(void)
        writel(reg, &sdr_ctrl->phy_ctrl2);
 }
 
+/**
+ * initialize_tracking() - Initialize tracking
+ *
+ * Initialize the register file with usable initial data.
+ */
 static void initialize_tracking(void)
 {
-       uint32_t concatenated_longidle = 0x0;
-       uint32_t concatenated_delays = 0x0;
-       uint32_t concatenated_rw_addr = 0x0;
-       uint32_t concatenated_refresh = 0x0;
-       uint32_t trk_sample_count = 7500;
-       uint32_t dtaps_per_ptap;
-       uint32_t tmp_delay;
+       /*
+        * Initialize the register file with the correct data.
+        * Compute usable version of value in case we skip full
+        * computation later.
+        */
+       writel(DIV_ROUND_UP(IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP) - 1,
+              &sdr_reg_file->dtaps_per_ptap);
+
+       /* trk_sample_count */
+       writel(7500, &sdr_reg_file->trk_sample_count);
+
+       /* longidle outer loop [15:0] */
+       writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
 
        /*
-        * compute usable version of value in case we skip full
-        * computation later
+        * longidle sample count [31:24]
+        * trfc, worst case of 933Mhz 4Gb [23:16]
+        * trcd, worst case [15:8]
+        * vfifo wait [7:0]
         */
-       dtaps_per_ptap = 0;
-       tmp_delay = 0;
-       while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
-               dtaps_per_ptap++;
-               tmp_delay += IO_DELAY_PER_DCHAIN_TAP;
-       }
-       dtaps_per_ptap--;
-
-       concatenated_longidle = concatenated_longidle ^ 10;
-               /*longidle outer loop */
-       concatenated_longidle = concatenated_longidle << 16;
-       concatenated_longidle = concatenated_longidle ^ 100;
-               /*longidle sample count */
-       concatenated_delays = concatenated_delays ^ 243;
-               /* trfc, worst case of 933Mhz 4Gb */
-       concatenated_delays = concatenated_delays << 8;
-       concatenated_delays = concatenated_delays ^ 14;
-               /* trcd, worst case */
-       concatenated_delays = concatenated_delays << 8;
-       concatenated_delays = concatenated_delays ^ 10;
-               /* vfifo wait */
-       concatenated_delays = concatenated_delays << 8;
-       concatenated_delays = concatenated_delays ^ 4;
-               /* mux delay */
-
-       concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_IDLE;
-       concatenated_rw_addr = concatenated_rw_addr << 8;
-       concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_ACTIVATE_1;
-       concatenated_rw_addr = concatenated_rw_addr << 8;
-       concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_SGLE_READ;
-       concatenated_rw_addr = concatenated_rw_addr << 8;
-       concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_PRECHARGE_ALL;
-
-       concatenated_refresh = concatenated_refresh ^ RW_MGR_REFRESH_ALL;
-       concatenated_refresh = concatenated_refresh << 24;
-       concatenated_refresh = concatenated_refresh ^ 1000; /* trefi */
+       writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
+              &sdr_reg_file->delays);
 
-       /* Initialize the register file with the correct data */
-       writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
-       writel(trk_sample_count, &sdr_reg_file->trk_sample_count);
-       writel(concatenated_longidle, &sdr_reg_file->trk_longidle);
-       writel(concatenated_delays, &sdr_reg_file->delays);
-       writel(concatenated_rw_addr, &sdr_reg_file->trk_rw_mgr_addr);
-       writel(RW_MGR_MEM_IF_READ_DQS_WIDTH, &sdr_reg_file->trk_read_dqs_width);
-       writel(concatenated_refresh, &sdr_reg_file->trk_rfsh);
+       /* mux delay */
+       writel((RW_MGR_IDLE << 24) | (RW_MGR_ACTIVATE_1 << 16) |
+              (RW_MGR_SGLE_READ << 8) | (RW_MGR_PRECHARGE_ALL << 0),
+              &sdr_reg_file->trk_rw_mgr_addr);
+
+       writel(RW_MGR_MEM_IF_READ_DQS_WIDTH,
+              &sdr_reg_file->trk_read_dqs_width);
+
+       /* trefi [7:0] */
+       writel((RW_MGR_REFRESH_ALL << 24) | (1000 << 0),
+              &sdr_reg_file->trk_rfsh);
 }
 
 int sdram_calibration_full(void)
@@ -3678,13 +3672,13 @@ int sdram_calibration_full(void)
        struct param_type my_param;
        struct gbl_type my_gbl;
        uint32_t pass;
-       uint32_t i;
+
+       memset(&my_param, 0, sizeof(my_param));
+       memset(&my_gbl, 0, sizeof(my_gbl));
 
        param = &my_param;
        gbl = &my_gbl;
 
-       /* Initialize the debug mode flags */
-       gbl->phy_debug_mode_flags = 0;
        /* Set the calibration enabled by default */
        gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
        /*
@@ -3704,13 +3698,6 @@ int sdram_calibration_full(void)
 
        initialize_tracking();
 
-       /* USER Enable all ranks, groups */
-       for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++)
-               param->skip_ranks[i] = 0;
-       for (i = 0; i < NUM_SHADOW_REGS; ++i)
-               param->skip_shadow_regs[i] = 0;
-       param->skip_groups = 0;
-
        printf("%s: Preparing to start memory calibration\n", __FILE__);
 
        debug("%s:%d\n", __func__, __LINE__);
@@ -3757,7 +3744,6 @@ int sdram_calibration_full(void)
                skip_delay_mask = 0x0;
 
        pass = run_mem_calibrate();
-
-       printf("%s: Calibration complete\n", __FILE__);
+       debug_mem_calibrate(pass);
        return pass;
 }