1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/mtd/nand/pxa3xx_nand.c
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
13 #include <linux/errno.h>
15 #include <asm/arch/cpu.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/types.h>
20 #include "pxa3xx_nand.h"
22 DECLARE_GLOBAL_DATA_PTR;
24 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
25 #define CHIP_DELAY_TIMEOUT 200
26 #define NAND_STOP_DELAY 40
27 #define PAGE_CHUNK_SIZE (2048)
30 * Define a buffer size for the initial command that detects the flash device:
31 * STATUS, READID and PARAM. The largest of these is the PARAM command,
34 #define INIT_BUFFER_SIZE 256
36 /* registers and bit definitions */
37 #define NDCR (0x00) /* Control register */
38 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
39 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
40 #define NDSR (0x14) /* Status Register */
41 #define NDPCR (0x18) /* Page Count Register */
42 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
43 #define NDBDR1 (0x20) /* Bad Block Register 1 */
44 #define NDECCCTRL (0x28) /* ECC control */
45 #define NDDB (0x40) /* Data Buffer */
46 #define NDCB0 (0x48) /* Command Buffer0 */
47 #define NDCB1 (0x4C) /* Command Buffer1 */
48 #define NDCB2 (0x50) /* Command Buffer2 */
50 #define NDCR_SPARE_EN (0x1 << 31)
51 #define NDCR_ECC_EN (0x1 << 30)
52 #define NDCR_DMA_EN (0x1 << 29)
53 #define NDCR_ND_RUN (0x1 << 28)
54 #define NDCR_DWIDTH_C (0x1 << 27)
55 #define NDCR_DWIDTH_M (0x1 << 26)
56 #define NDCR_PAGE_SZ (0x1 << 24)
57 #define NDCR_NCSX (0x1 << 23)
58 #define NDCR_ND_MODE (0x3 << 21)
59 #define NDCR_NAND_MODE (0x0)
60 #define NDCR_CLR_PG_CNT (0x1 << 20)
61 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
62 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
63 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
65 #define NDCR_RA_START (0x1 << 15)
66 #define NDCR_PG_PER_BLK (0x1 << 14)
67 #define NDCR_ND_ARB_EN (0x1 << 12)
68 #define NDCR_INT_MASK (0xFFF)
70 #define NDSR_MASK (0xfff)
71 #define NDSR_ERR_CNT_OFF (16)
72 #define NDSR_ERR_CNT_MASK (0x1f)
73 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
74 #define NDSR_RDY (0x1 << 12)
75 #define NDSR_FLASH_RDY (0x1 << 11)
76 #define NDSR_CS0_PAGED (0x1 << 10)
77 #define NDSR_CS1_PAGED (0x1 << 9)
78 #define NDSR_CS0_CMDD (0x1 << 8)
79 #define NDSR_CS1_CMDD (0x1 << 7)
80 #define NDSR_CS0_BBD (0x1 << 6)
81 #define NDSR_CS1_BBD (0x1 << 5)
82 #define NDSR_UNCORERR (0x1 << 4)
83 #define NDSR_CORERR (0x1 << 3)
84 #define NDSR_WRDREQ (0x1 << 2)
85 #define NDSR_RDDREQ (0x1 << 1)
86 #define NDSR_WRCMDREQ (0x1)
88 #define NDCB0_LEN_OVRD (0x1 << 28)
89 #define NDCB0_ST_ROW_EN (0x1 << 26)
90 #define NDCB0_AUTO_RS (0x1 << 25)
91 #define NDCB0_CSEL (0x1 << 24)
92 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
93 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
94 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
95 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
96 #define NDCB0_NC (0x1 << 20)
97 #define NDCB0_DBC (0x1 << 19)
98 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
99 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
100 #define NDCB0_CMD2_MASK (0xff << 8)
101 #define NDCB0_CMD1_MASK (0xff)
102 #define NDCB0_ADDR_CYC_SHIFT (16)
104 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
105 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
106 #define EXT_CMD_TYPE_READ 4 /* Read */
107 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
108 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
109 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
110 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
112 /* macros for registers read/write */
113 #define nand_writel(info, off, val) \
114 writel((val), (info)->mmio_base + (off))
116 #define nand_readl(info, off) \
117 readl((info)->mmio_base + (off))
119 /* error code and state */
142 enum pxa3xx_nand_variant {
143 PXA3XX_NAND_VARIANT_PXA,
144 PXA3XX_NAND_VARIANT_ARMADA370,
147 struct pxa3xx_nand_host {
148 struct nand_chip chip;
149 struct mtd_info *mtd;
152 /* page size of attached chip */
156 /* calculated from pxa3xx_nand_flash data */
157 unsigned int col_addr_cycles;
158 unsigned int row_addr_cycles;
159 size_t read_id_bytes;
163 struct pxa3xx_nand_info {
164 struct nand_hw_control controller;
165 struct pxa3xx_nand_platform_data *pdata;
168 void __iomem *mmio_base;
169 unsigned long mmio_phys;
170 int cmd_complete, dev_ready;
172 unsigned int buf_start;
173 unsigned int buf_count;
174 unsigned int buf_size;
175 unsigned int data_buff_pos;
176 unsigned int oob_buff_pos;
178 unsigned char *data_buff;
179 unsigned char *oob_buff;
181 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
185 * This driver supports NFCv1 (as found in PXA SoC)
186 * and NFCv2 (as found in Armada 370/XP SoC).
188 enum pxa3xx_nand_variant variant;
191 int use_ecc; /* use HW ECC ? */
192 int ecc_bch; /* using BCH ECC? */
193 int use_spare; /* use spare ? */
196 unsigned int data_size; /* data to be read from FIFO */
197 unsigned int chunk_size; /* split commands chunk size */
198 unsigned int oob_size;
199 unsigned int spare_size;
200 unsigned int ecc_size;
201 unsigned int ecc_err_cnt;
202 unsigned int max_bitflips;
205 /* cached register value */
210 /* generated NDCBx register values */
217 static struct pxa3xx_nand_timing timing[] = {
218 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
219 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
220 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
221 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
224 static struct pxa3xx_nand_flash builtin_flash_types[] = {
225 { 0x46ec, 16, 16, &timing[1] },
226 { 0xdaec, 8, 8, &timing[1] },
227 { 0xd7ec, 8, 8, &timing[1] },
228 { 0xa12c, 8, 8, &timing[2] },
229 { 0xb12c, 16, 16, &timing[2] },
230 { 0xdc2c, 8, 8, &timing[2] },
231 { 0xcc2c, 16, 16, &timing[2] },
232 { 0xba20, 16, 16, &timing[3] },
235 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
236 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
237 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
239 static struct nand_bbt_descr bbt_main_descr = {
240 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
241 | NAND_BBT_2BIT | NAND_BBT_VERSION,
245 .maxblocks = 8, /* Last 8 blocks in each chip */
246 .pattern = bbt_pattern
249 static struct nand_bbt_descr bbt_mirror_descr = {
250 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
251 | NAND_BBT_2BIT | NAND_BBT_VERSION,
255 .maxblocks = 8, /* Last 8 blocks in each chip */
256 .pattern = bbt_mirror_pattern
260 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
263 32, 33, 34, 35, 36, 37, 38, 39,
264 40, 41, 42, 43, 44, 45, 46, 47,
265 48, 49, 50, 51, 52, 53, 54, 55,
266 56, 57, 58, 59, 60, 61, 62, 63},
267 .oobfree = { {2, 30} }
270 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
273 32, 33, 34, 35, 36, 37, 38, 39,
274 40, 41, 42, 43, 44, 45, 46, 47,
275 48, 49, 50, 51, 52, 53, 54, 55,
276 56, 57, 58, 59, 60, 61, 62, 63,
277 96, 97, 98, 99, 100, 101, 102, 103,
278 104, 105, 106, 107, 108, 109, 110, 111,
279 112, 113, 114, 115, 116, 117, 118, 119,
280 120, 121, 122, 123, 124, 125, 126, 127},
281 /* Bootrom looks in bytes 0 & 5 for bad blocks */
282 .oobfree = { {6, 26}, { 64, 32} }
285 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
288 32, 33, 34, 35, 36, 37, 38, 39,
289 40, 41, 42, 43, 44, 45, 46, 47,
290 48, 49, 50, 51, 52, 53, 54, 55,
291 56, 57, 58, 59, 60, 61, 62, 63},
295 #define NDTR0_tCH(c) (min((c), 7) << 19)
296 #define NDTR0_tCS(c) (min((c), 7) << 16)
297 #define NDTR0_tWH(c) (min((c), 7) << 11)
298 #define NDTR0_tWP(c) (min((c), 7) << 8)
299 #define NDTR0_tRH(c) (min((c), 7) << 3)
300 #define NDTR0_tRP(c) (min((c), 7) << 0)
302 #define NDTR1_tR(c) (min((c), 65535) << 16)
303 #define NDTR1_tWHR(c) (min((c), 15) << 4)
304 #define NDTR1_tAR(c) (min((c), 15) << 0)
306 /* convert nano-seconds to nand flash controller clock cycles */
307 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
309 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
311 /* We only support the Armada 370/XP/38x for now */
312 return PXA3XX_NAND_VARIANT_ARMADA370;
315 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
316 const struct pxa3xx_nand_timing *t)
318 struct pxa3xx_nand_info *info = host->info_data;
319 unsigned long nand_clk = mvebu_get_nand_clock();
320 uint32_t ndtr0, ndtr1;
322 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
323 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
324 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
325 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
326 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
327 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
329 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
330 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
331 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
333 info->ndtr0cs0 = ndtr0;
334 info->ndtr1cs0 = ndtr1;
335 nand_writel(info, NDTR0CS0, ndtr0);
336 nand_writel(info, NDTR1CS0, ndtr1);
339 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
340 const struct nand_sdr_timings *t)
342 struct pxa3xx_nand_info *info = host->info_data;
343 struct nand_chip *chip = &host->chip;
344 unsigned long nand_clk = mvebu_get_nand_clock();
345 uint32_t ndtr0, ndtr1;
347 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
348 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
349 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
350 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - tWH_min, 1000);
351 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
352 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - tREH_min, 1000);
353 u32 tR = chip->chip_delay * 1000;
354 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
355 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
357 /* fallback to a default value if tR = 0 */
361 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
362 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
363 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
364 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
365 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
366 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
368 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
369 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
370 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
372 info->ndtr0cs0 = ndtr0;
373 info->ndtr1cs0 = ndtr1;
374 nand_writel(info, NDTR0CS0, ndtr0);
375 nand_writel(info, NDTR1CS0, ndtr1);
378 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
380 const struct nand_sdr_timings *timings;
381 struct nand_chip *chip = &host->chip;
382 struct pxa3xx_nand_info *info = host->info_data;
383 const struct pxa3xx_nand_flash *f = NULL;
384 int mode, id, ntypes, i;
386 mode = onfi_get_async_timing_mode(chip);
387 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
388 ntypes = ARRAY_SIZE(builtin_flash_types);
390 chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
392 id = chip->read_byte(host->mtd);
393 id |= chip->read_byte(host->mtd) << 0x8;
395 for (i = 0; i < ntypes; i++) {
396 f = &builtin_flash_types[i];
398 if (f->chip_id == id)
403 dev_err(&info->pdev->dev, "Error: timings not found\n");
407 pxa3xx_nand_set_timing(host, f->timing);
409 if (f->flash_width == 16) {
410 info->reg_ndcr |= NDCR_DWIDTH_M;
411 chip->options |= NAND_BUSWIDTH_16;
414 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
416 mode = fls(mode) - 1;
420 timings = onfi_async_timing_mode_to_sdr_timings(mode);
422 return PTR_ERR(timings);
424 pxa3xx_nand_set_sdr_timing(host, timings);
431 * Set the data and OOB size, depending on the selected
432 * spare and ECC configuration.
433 * Only applicable to READ0, READOOB and PAGEPROG commands.
435 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
436 struct mtd_info *mtd)
438 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
440 info->data_size = mtd->writesize;
444 info->oob_size = info->spare_size;
446 info->oob_size += info->ecc_size;
450 * NOTE: it is a must to set ND_RUN first, then write
451 * command buffer, otherwise, it does not work.
452 * We enable all the interrupt at the same time, and
453 * let pxa3xx_nand_irq to handle all logic.
455 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
459 ndcr = info->reg_ndcr;
464 nand_writel(info, NDECCCTRL, 0x1);
466 ndcr &= ~NDCR_ECC_EN;
468 nand_writel(info, NDECCCTRL, 0x0);
471 ndcr &= ~NDCR_DMA_EN;
474 ndcr |= NDCR_SPARE_EN;
476 ndcr &= ~NDCR_SPARE_EN;
480 /* clear status bits and run */
481 nand_writel(info, NDCR, 0);
482 nand_writel(info, NDSR, NDSR_MASK);
483 nand_writel(info, NDCR, ndcr);
486 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
490 ndcr = nand_readl(info, NDCR);
491 nand_writel(info, NDCR, ndcr | int_mask);
494 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
500 * According to the datasheet, when reading from NDDB
501 * with BCH enabled, after each 32 bytes reads, we
502 * have to make sure that the NDSR.RDDREQ bit is set.
504 * Drain the FIFO 8 32 bits reads at a time, and skip
505 * the polling on the last read.
508 readsl(info->mmio_base + NDDB, data, 8);
511 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
512 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
513 dev_err(&info->pdev->dev,
514 "Timeout on RDDREQ while draining the FIFO\n");
524 readsl(info->mmio_base + NDDB, data, len);
527 static void handle_data_pio(struct pxa3xx_nand_info *info)
529 unsigned int do_bytes = min(info->data_size, info->chunk_size);
531 switch (info->state) {
532 case STATE_PIO_WRITING:
533 writesl(info->mmio_base + NDDB,
534 info->data_buff + info->data_buff_pos,
535 DIV_ROUND_UP(do_bytes, 4));
537 if (info->oob_size > 0)
538 writesl(info->mmio_base + NDDB,
539 info->oob_buff + info->oob_buff_pos,
540 DIV_ROUND_UP(info->oob_size, 4));
542 case STATE_PIO_READING:
544 info->data_buff + info->data_buff_pos,
545 DIV_ROUND_UP(do_bytes, 4));
547 if (info->oob_size > 0)
549 info->oob_buff + info->oob_buff_pos,
550 DIV_ROUND_UP(info->oob_size, 4));
553 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
558 /* Update buffer pointers for multi-page read/write */
559 info->data_buff_pos += do_bytes;
560 info->oob_buff_pos += info->oob_size;
561 info->data_size -= do_bytes;
564 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
566 handle_data_pio(info);
568 info->state = STATE_CMD_DONE;
569 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
572 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
574 unsigned int status, is_completed = 0, is_ready = 0;
575 unsigned int ready, cmd_done;
576 irqreturn_t ret = IRQ_HANDLED;
579 ready = NDSR_FLASH_RDY;
580 cmd_done = NDSR_CS0_CMDD;
583 cmd_done = NDSR_CS1_CMDD;
586 status = nand_readl(info, NDSR);
588 if (status & NDSR_UNCORERR)
589 info->retcode = ERR_UNCORERR;
590 if (status & NDSR_CORERR) {
591 info->retcode = ERR_CORERR;
592 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
594 info->ecc_err_cnt = NDSR_ERR_CNT(status);
596 info->ecc_err_cnt = 1;
599 * Each chunk composing a page is corrected independently,
600 * and we need to store maximum number of corrected bitflips
601 * to return it to the MTD layer in ecc.read_page().
603 info->max_bitflips = max_t(unsigned int,
607 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
608 info->state = (status & NDSR_RDDREQ) ?
609 STATE_PIO_READING : STATE_PIO_WRITING;
610 /* Call the IRQ thread in U-Boot directly */
611 pxa3xx_nand_irq_thread(info);
614 if (status & cmd_done) {
615 info->state = STATE_CMD_DONE;
618 if (status & ready) {
619 info->state = STATE_READY;
623 if (status & NDSR_WRCMDREQ) {
624 nand_writel(info, NDSR, NDSR_WRCMDREQ);
625 status &= ~NDSR_WRCMDREQ;
626 info->state = STATE_CMD_HANDLE;
629 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
630 * must be loaded by writing directly either 12 or 16
631 * bytes directly to NDCB0, four bytes at a time.
633 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
634 * but each NDCBx register can be read.
636 nand_writel(info, NDCB0, info->ndcb0);
637 nand_writel(info, NDCB0, info->ndcb1);
638 nand_writel(info, NDCB0, info->ndcb2);
640 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
641 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
642 nand_writel(info, NDCB0, info->ndcb3);
645 /* clear NDSR to let the controller exit the IRQ */
646 nand_writel(info, NDSR, status);
648 info->cmd_complete = 1;
655 static inline int is_buf_blank(uint8_t *buf, size_t len)
657 for (; len > 0; len--)
663 static void set_command_address(struct pxa3xx_nand_info *info,
664 unsigned int page_size, uint16_t column, int page_addr)
666 /* small page addr setting */
667 if (page_size < PAGE_CHUNK_SIZE) {
668 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
673 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
676 if (page_addr & 0xFF0000)
677 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
683 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
685 struct pxa3xx_nand_host *host = info->host[info->cs];
686 struct mtd_info *mtd = host->mtd;
688 /* reset data and oob column point to handle data */
692 info->data_buff_pos = 0;
693 info->oob_buff_pos = 0;
696 info->retcode = ERR_NONE;
697 info->ecc_err_cnt = 0;
703 case NAND_CMD_PAGEPROG:
705 case NAND_CMD_READOOB:
706 pxa3xx_set_datasize(info, mtd);
718 * If we are about to issue a read command, or about to set
719 * the write address, then clean the data buffer.
721 if (command == NAND_CMD_READ0 ||
722 command == NAND_CMD_READOOB ||
723 command == NAND_CMD_SEQIN) {
724 info->buf_count = mtd->writesize + mtd->oobsize;
725 memset(info->data_buff, 0xFF, info->buf_count);
729 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
730 int ext_cmd_type, uint16_t column, int page_addr)
732 int addr_cycle, exec_cmd;
733 struct pxa3xx_nand_host *host;
734 struct mtd_info *mtd;
736 host = info->host[info->cs];
742 info->ndcb0 = NDCB0_CSEL;
746 if (command == NAND_CMD_SEQIN)
749 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
750 + host->col_addr_cycles);
753 case NAND_CMD_READOOB:
755 info->buf_start = column;
756 info->ndcb0 |= NDCB0_CMD_TYPE(0)
760 if (command == NAND_CMD_READOOB)
761 info->buf_start += mtd->writesize;
764 * Multiple page read needs an 'extended command type' field,
765 * which is either naked-read or last-read according to the
768 if (mtd->writesize == PAGE_CHUNK_SIZE) {
769 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
770 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
771 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
773 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
774 info->ndcb3 = info->chunk_size +
778 set_command_address(info, mtd->writesize, column, page_addr);
783 info->buf_start = column;
784 set_command_address(info, mtd->writesize, 0, page_addr);
787 * Multiple page programming needs to execute the initial
788 * SEQIN command that sets the page address.
790 if (mtd->writesize > PAGE_CHUNK_SIZE) {
791 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
792 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
795 /* No data transfer in this case */
801 case NAND_CMD_PAGEPROG:
802 if (is_buf_blank(info->data_buff,
803 (mtd->writesize + mtd->oobsize))) {
808 /* Second command setting for large pages */
809 if (mtd->writesize > PAGE_CHUNK_SIZE) {
811 * Multiple page write uses the 'extended command'
812 * field. This can be used to issue a command dispatch
813 * or a naked-write depending on the current stage.
815 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
817 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
818 info->ndcb3 = info->chunk_size +
822 * This is the command dispatch that completes a chunked
823 * page program operation.
825 if (info->data_size == 0) {
826 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
827 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
834 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
838 | (NAND_CMD_PAGEPROG << 8)
845 info->buf_count = 256;
846 info->ndcb0 |= NDCB0_CMD_TYPE(0)
850 info->ndcb1 = (column & 0xFF);
852 info->data_size = 256;
855 case NAND_CMD_READID:
856 info->buf_count = host->read_id_bytes;
857 info->ndcb0 |= NDCB0_CMD_TYPE(3)
860 info->ndcb1 = (column & 0xFF);
864 case NAND_CMD_STATUS:
866 info->ndcb0 |= NDCB0_CMD_TYPE(4)
873 case NAND_CMD_ERASE1:
874 info->ndcb0 |= NDCB0_CMD_TYPE(2)
878 | (NAND_CMD_ERASE2 << 8)
880 info->ndcb1 = page_addr;
885 info->ndcb0 |= NDCB0_CMD_TYPE(5)
890 case NAND_CMD_ERASE2:
896 dev_err(&info->pdev->dev, "non-supported command %x\n",
904 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
905 int column, int page_addr)
907 struct nand_chip *chip = mtd_to_nand(mtd);
908 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
909 struct pxa3xx_nand_info *info = host->info_data;
913 * if this is a x16 device ,then convert the input
914 * "byte" address into a "word" address appropriate
915 * for indexing a word-oriented device
917 if (info->reg_ndcr & NDCR_DWIDTH_M)
921 * There may be different NAND chip hooked to
922 * different chip select, so check whether
923 * chip select has been changed, if yes, reset the timing
925 if (info->cs != host->cs) {
927 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
928 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
931 prepare_start_command(info, command);
933 info->state = STATE_PREPARED;
934 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
939 info->cmd_complete = 0;
942 pxa3xx_nand_start(info);
948 status = nand_readl(info, NDSR);
950 pxa3xx_nand_irq(info);
952 if (info->cmd_complete)
955 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
956 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
961 info->state = STATE_IDLE;
964 static void nand_cmdfunc_extended(struct mtd_info *mtd,
965 const unsigned command,
966 int column, int page_addr)
968 struct nand_chip *chip = mtd_to_nand(mtd);
969 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
970 struct pxa3xx_nand_info *info = host->info_data;
971 int exec_cmd, ext_cmd_type;
974 * if this is a x16 device then convert the input
975 * "byte" address into a "word" address appropriate
976 * for indexing a word-oriented device
978 if (info->reg_ndcr & NDCR_DWIDTH_M)
982 * There may be different NAND chip hooked to
983 * different chip select, so check whether
984 * chip select has been changed, if yes, reset the timing
986 if (info->cs != host->cs) {
988 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
989 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
992 /* Select the extended command for the first command */
995 case NAND_CMD_READOOB:
996 ext_cmd_type = EXT_CMD_TYPE_MONO;
999 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1001 case NAND_CMD_PAGEPROG:
1002 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1009 prepare_start_command(info, command);
1012 * Prepare the "is ready" completion before starting a command
1013 * transaction sequence. If the command is not executed the
1014 * completion will be completed, see below.
1016 * We can do that inside the loop because the command variable
1017 * is invariant and thus so is the exec_cmd.
1019 info->need_wait = 1;
1020 info->dev_ready = 0;
1025 info->state = STATE_PREPARED;
1026 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1029 info->need_wait = 0;
1030 info->dev_ready = 1;
1034 info->cmd_complete = 0;
1035 pxa3xx_nand_start(info);
1041 status = nand_readl(info, NDSR);
1043 pxa3xx_nand_irq(info);
1045 if (info->cmd_complete)
1048 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1049 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1054 /* Check if the sequence is complete */
1055 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1059 * After a splitted program command sequence has issued
1060 * the command dispatch, the command sequence is complete.
1062 if (info->data_size == 0 &&
1063 command == NAND_CMD_PAGEPROG &&
1064 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1067 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1068 /* Last read: issue a 'last naked read' */
1069 if (info->data_size == info->chunk_size)
1070 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1072 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1075 * If a splitted program command has no more data to transfer,
1076 * the command dispatch must be issued to complete.
1078 } else if (command == NAND_CMD_PAGEPROG &&
1079 info->data_size == 0) {
1080 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1084 info->state = STATE_IDLE;
1087 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1088 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1091 chip->write_buf(mtd, buf, mtd->writesize);
1092 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1097 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1098 struct nand_chip *chip, uint8_t *buf, int oob_required,
1101 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1102 struct pxa3xx_nand_info *info = host->info_data;
1104 chip->read_buf(mtd, buf, mtd->writesize);
1105 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1107 if (info->retcode == ERR_CORERR && info->use_ecc) {
1108 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1110 } else if (info->retcode == ERR_UNCORERR) {
1112 * for blank page (all 0xff), HW will calculate its ECC as
1113 * 0, which is different from the ECC information within
1114 * OOB, ignore such uncorrectable errors
1116 if (is_buf_blank(buf, mtd->writesize))
1117 info->retcode = ERR_NONE;
1119 mtd->ecc_stats.failed++;
1122 return info->max_bitflips;
1125 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1127 struct nand_chip *chip = mtd_to_nand(mtd);
1128 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1129 struct pxa3xx_nand_info *info = host->info_data;
1132 if (info->buf_start < info->buf_count)
1133 /* Has just send a new command? */
1134 retval = info->data_buff[info->buf_start++];
1139 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1141 struct nand_chip *chip = mtd_to_nand(mtd);
1142 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1143 struct pxa3xx_nand_info *info = host->info_data;
1144 u16 retval = 0xFFFF;
1146 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1147 retval = *((u16 *)(info->data_buff+info->buf_start));
1148 info->buf_start += 2;
1153 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1155 struct nand_chip *chip = mtd_to_nand(mtd);
1156 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1157 struct pxa3xx_nand_info *info = host->info_data;
1158 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1160 memcpy(buf, info->data_buff + info->buf_start, real_len);
1161 info->buf_start += real_len;
1164 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1165 const uint8_t *buf, int len)
1167 struct nand_chip *chip = mtd_to_nand(mtd);
1168 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1169 struct pxa3xx_nand_info *info = host->info_data;
1170 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1172 memcpy(info->data_buff + info->buf_start, buf, real_len);
1173 info->buf_start += real_len;
1176 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1181 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1183 struct nand_chip *chip = mtd_to_nand(mtd);
1184 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1185 struct pxa3xx_nand_info *info = host->info_data;
1187 if (info->need_wait) {
1190 info->need_wait = 0;
1196 status = nand_readl(info, NDSR);
1198 pxa3xx_nand_irq(info);
1200 if (info->dev_ready)
1203 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1204 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1205 return NAND_STATUS_FAIL;
1210 /* pxa3xx_nand_send_command has waited for command complete */
1211 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1212 if (info->retcode == ERR_NONE)
1215 return NAND_STATUS_FAIL;
1218 return NAND_STATUS_READY;
1221 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1223 struct pxa3xx_nand_host *host = info->host[info->cs];
1224 struct mtd_info *mtd = host->mtd;
1225 struct nand_chip *chip = mtd_to_nand(mtd);
1227 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1228 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1229 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1234 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1237 * We set 0 by hard coding here, for we don't support keep_config
1238 * when there is more than one chip attached to the controller
1240 struct pxa3xx_nand_host *host = info->host[0];
1241 uint32_t ndcr = nand_readl(info, NDCR);
1243 if (ndcr & NDCR_PAGE_SZ) {
1244 /* Controller's FIFO size */
1245 info->chunk_size = 2048;
1246 host->read_id_bytes = 4;
1248 info->chunk_size = 512;
1249 host->read_id_bytes = 2;
1252 /* Set an initial chunk size */
1253 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1254 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1255 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1259 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1261 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1262 if (info->data_buff == NULL)
1267 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1269 struct pxa3xx_nand_info *info = host->info_data;
1270 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1271 struct mtd_info *mtd;
1272 struct nand_chip *chip;
1273 const struct nand_sdr_timings *timings;
1276 mtd = info->host[info->cs]->mtd;
1277 chip = mtd_to_nand(mtd);
1279 /* configure default flash values */
1280 info->reg_ndcr = 0x0; /* enable all interrupts */
1281 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1282 info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1283 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1285 /* use the common timing to make a try */
1286 timings = onfi_async_timing_mode_to_sdr_timings(0);
1287 if (IS_ERR(timings))
1288 return PTR_ERR(timings);
1290 pxa3xx_nand_set_sdr_timing(host, timings);
1292 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1293 ret = chip->waitfunc(mtd, chip);
1294 if (ret & NAND_STATUS_FAIL)
1300 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1301 struct nand_ecc_ctrl *ecc,
1302 int strength, int ecc_stepsize, int page_size)
1304 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1305 info->chunk_size = 2048;
1306 info->spare_size = 40;
1307 info->ecc_size = 24;
1308 ecc->mode = NAND_ECC_HW;
1312 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1313 info->chunk_size = 512;
1314 info->spare_size = 8;
1316 ecc->mode = NAND_ECC_HW;
1321 * Required ECC: 4-bit correction per 512 bytes
1322 * Select: 16-bit correction per 2048 bytes
1324 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1326 info->chunk_size = 2048;
1327 info->spare_size = 32;
1328 info->ecc_size = 32;
1329 ecc->mode = NAND_ECC_HW;
1330 ecc->size = info->chunk_size;
1331 ecc->layout = &ecc_layout_2KB_bch4bit;
1334 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1336 info->chunk_size = 2048;
1337 info->spare_size = 32;
1338 info->ecc_size = 32;
1339 ecc->mode = NAND_ECC_HW;
1340 ecc->size = info->chunk_size;
1341 ecc->layout = &ecc_layout_4KB_bch4bit;
1345 * Required ECC: 8-bit correction per 512 bytes
1346 * Select: 16-bit correction per 1024 bytes
1348 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1350 info->chunk_size = 1024;
1351 info->spare_size = 0;
1352 info->ecc_size = 32;
1353 ecc->mode = NAND_ECC_HW;
1354 ecc->size = info->chunk_size;
1355 ecc->layout = &ecc_layout_4KB_bch8bit;
1358 dev_err(&info->pdev->dev,
1359 "ECC strength %d at page size %d is not supported\n",
1360 strength, page_size);
1367 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1369 struct nand_chip *chip = mtd_to_nand(mtd);
1370 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1371 struct pxa3xx_nand_info *info = host->info_data;
1372 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1374 uint16_t ecc_strength, ecc_step;
1376 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1379 /* Set a default chunk size */
1380 info->chunk_size = 512;
1382 ret = pxa3xx_nand_sensing(host);
1384 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1391 /* Device detection must be done with ECC disabled */
1392 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1393 nand_writel(info, NDECCCTRL, 0x0);
1395 if (nand_scan_ident(mtd, 1, NULL))
1398 if (!pdata->keep_config) {
1399 ret = pxa3xx_nand_init_timings(host);
1401 dev_err(&info->pdev->dev,
1402 "Failed to set timings: %d\n", ret);
1407 ret = pxa3xx_nand_config_flash(info);
1411 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1413 * We'll use a bad block table stored in-flash and don't
1414 * allow writing the bad block marker to the flash.
1416 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1417 chip->bbt_td = &bbt_main_descr;
1418 chip->bbt_md = &bbt_mirror_descr;
1422 * If the page size is bigger than the FIFO size, let's check
1423 * we are given the right variant and then switch to the extended
1424 * (aka splitted) command handling,
1426 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1427 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1428 chip->cmdfunc = nand_cmdfunc_extended;
1430 dev_err(&info->pdev->dev,
1431 "unsupported page size on this variant\n");
1436 if (pdata->ecc_strength && pdata->ecc_step_size) {
1437 ecc_strength = pdata->ecc_strength;
1438 ecc_step = pdata->ecc_step_size;
1440 ecc_strength = chip->ecc_strength_ds;
1441 ecc_step = chip->ecc_step_ds;
1444 /* Set default ECC strength requirements on non-ONFI devices */
1445 if (ecc_strength < 1 && ecc_step < 1) {
1450 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1451 ecc_step, mtd->writesize);
1455 /* calculate addressing information */
1456 if (mtd->writesize >= 2048)
1457 host->col_addr_cycles = 2;
1459 host->col_addr_cycles = 1;
1461 /* release the initial buffer */
1462 kfree(info->data_buff);
1464 /* allocate the real data + oob buffer */
1465 info->buf_size = mtd->writesize + mtd->oobsize;
1466 ret = pxa3xx_nand_init_buff(info);
1469 info->oob_buff = info->data_buff + mtd->writesize;
1471 if ((mtd->size >> chip->page_shift) > 65536)
1472 host->row_addr_cycles = 3;
1474 host->row_addr_cycles = 2;
1475 return nand_scan_tail(mtd);
1478 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1480 struct pxa3xx_nand_platform_data *pdata;
1481 struct pxa3xx_nand_host *host;
1482 struct nand_chip *chip = NULL;
1483 struct mtd_info *mtd;
1486 pdata = info->pdata;
1487 if (pdata->num_cs <= 0)
1490 info->variant = pxa3xx_nand_get_variant();
1491 for (cs = 0; cs < pdata->num_cs; cs++) {
1492 chip = (struct nand_chip *)
1493 ((u8 *)&info[1] + sizeof(*host) * cs);
1494 mtd = nand_to_mtd(chip);
1495 host = (struct pxa3xx_nand_host *)chip;
1496 info->host[cs] = host;
1499 host->info_data = info;
1500 host->read_id_bytes = 4;
1501 mtd->owner = THIS_MODULE;
1503 nand_set_controller_data(chip, host);
1504 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1505 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1506 chip->controller = &info->controller;
1507 chip->waitfunc = pxa3xx_nand_waitfunc;
1508 chip->select_chip = pxa3xx_nand_select_chip;
1509 chip->read_word = pxa3xx_nand_read_word;
1510 chip->read_byte = pxa3xx_nand_read_byte;
1511 chip->read_buf = pxa3xx_nand_read_buf;
1512 chip->write_buf = pxa3xx_nand_write_buf;
1513 chip->options |= NAND_NO_SUBPAGE_WRITE;
1514 chip->cmdfunc = nand_cmdfunc;
1517 /* Allocate a buffer to allow flash detection */
1518 info->buf_size = INIT_BUFFER_SIZE;
1519 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1520 if (info->data_buff == NULL) {
1522 goto fail_disable_clk;
1525 /* initialize all interrupts to be disabled */
1526 disable_int(info, NDSR_MASK);
1530 kfree(info->data_buff);
1535 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1537 struct pxa3xx_nand_platform_data *pdata;
1538 const void *blob = gd->fdt_blob;
1541 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1545 /* Get address decoding nodes from the FDT blob */
1547 node = fdt_node_offset_by_compatible(blob, node,
1548 "marvell,mvebu-pxa3xx-nand");
1552 /* Bypass disabeld nodes */
1553 if (!fdtdec_get_is_enabled(blob, node))
1556 /* Get the first enabled NAND controler base address */
1558 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1559 blob, node, "reg", 0, NULL, true);
1561 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1562 if (pdata->num_cs != 1) {
1563 pr_err("pxa3xx driver supports single CS only\n");
1567 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1568 pdata->enable_arbiter = 1;
1570 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1571 pdata->keep_config = 1;
1575 * If these are not set, they will be selected according
1576 * to the detected flash type.
1579 pdata->ecc_strength = fdtdec_get_int(blob, node,
1580 "nand-ecc-strength", 0);
1583 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1584 "nand-ecc-step-size", 0);
1586 info->pdata = pdata;
1588 /* Currently support only a single NAND controller */
1591 } while (node >= 0);
1596 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1598 struct pxa3xx_nand_platform_data *pdata;
1599 int ret, cs, probe_success;
1601 ret = pxa3xx_nand_probe_dt(info);
1605 pdata = info->pdata;
1607 ret = alloc_nand_resource(info);
1609 dev_err(&pdev->dev, "alloc nand resource failed\n");
1614 for (cs = 0; cs < pdata->num_cs; cs++) {
1615 struct mtd_info *mtd = info->host[cs]->mtd;
1618 * The mtd name matches the one used in 'mtdparts' kernel
1619 * parameter. This name cannot be changed or otherwise
1620 * user's mtd partitions configuration would get broken.
1622 mtd->name = "pxa3xx_nand-0";
1624 ret = pxa3xx_nand_scan(mtd);
1626 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1631 if (nand_register(cs, mtd))
1644 * Main initialization routine
1646 void board_nand_init(void)
1648 struct pxa3xx_nand_info *info;
1649 struct pxa3xx_nand_host *host;
1652 info = kzalloc(sizeof(*info) +
1653 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1658 ret = pxa3xx_nand_probe(info);