2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * SPDX-License-Identifier: GPL-2.0
14 #include <linux/errno.h>
16 #include <asm/arch/cpu.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/nand.h>
19 #include <linux/types.h>
21 #include "pxa3xx_nand.h"
23 DECLARE_GLOBAL_DATA_PTR;
25 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
26 #define CHIP_DELAY_TIMEOUT 200
27 #define NAND_STOP_DELAY 40
28 #define PAGE_CHUNK_SIZE (2048)
31 * Define a buffer size for the initial command that detects the flash device:
32 * STATUS, READID and PARAM. The largest of these is the PARAM command,
35 #define INIT_BUFFER_SIZE 256
37 /* registers and bit definitions */
38 #define NDCR (0x00) /* Control register */
39 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
40 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
41 #define NDSR (0x14) /* Status Register */
42 #define NDPCR (0x18) /* Page Count Register */
43 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
44 #define NDBDR1 (0x20) /* Bad Block Register 1 */
45 #define NDECCCTRL (0x28) /* ECC control */
46 #define NDDB (0x40) /* Data Buffer */
47 #define NDCB0 (0x48) /* Command Buffer0 */
48 #define NDCB1 (0x4C) /* Command Buffer1 */
49 #define NDCB2 (0x50) /* Command Buffer2 */
51 #define NDCR_SPARE_EN (0x1 << 31)
52 #define NDCR_ECC_EN (0x1 << 30)
53 #define NDCR_DMA_EN (0x1 << 29)
54 #define NDCR_ND_RUN (0x1 << 28)
55 #define NDCR_DWIDTH_C (0x1 << 27)
56 #define NDCR_DWIDTH_M (0x1 << 26)
57 #define NDCR_PAGE_SZ (0x1 << 24)
58 #define NDCR_NCSX (0x1 << 23)
59 #define NDCR_ND_MODE (0x3 << 21)
60 #define NDCR_NAND_MODE (0x0)
61 #define NDCR_CLR_PG_CNT (0x1 << 20)
62 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
63 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
64 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
66 #define NDCR_RA_START (0x1 << 15)
67 #define NDCR_PG_PER_BLK (0x1 << 14)
68 #define NDCR_ND_ARB_EN (0x1 << 12)
69 #define NDCR_INT_MASK (0xFFF)
71 #define NDSR_MASK (0xfff)
72 #define NDSR_ERR_CNT_OFF (16)
73 #define NDSR_ERR_CNT_MASK (0x1f)
74 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
75 #define NDSR_RDY (0x1 << 12)
76 #define NDSR_FLASH_RDY (0x1 << 11)
77 #define NDSR_CS0_PAGED (0x1 << 10)
78 #define NDSR_CS1_PAGED (0x1 << 9)
79 #define NDSR_CS0_CMDD (0x1 << 8)
80 #define NDSR_CS1_CMDD (0x1 << 7)
81 #define NDSR_CS0_BBD (0x1 << 6)
82 #define NDSR_CS1_BBD (0x1 << 5)
83 #define NDSR_UNCORERR (0x1 << 4)
84 #define NDSR_CORERR (0x1 << 3)
85 #define NDSR_WRDREQ (0x1 << 2)
86 #define NDSR_RDDREQ (0x1 << 1)
87 #define NDSR_WRCMDREQ (0x1)
89 #define NDCB0_LEN_OVRD (0x1 << 28)
90 #define NDCB0_ST_ROW_EN (0x1 << 26)
91 #define NDCB0_AUTO_RS (0x1 << 25)
92 #define NDCB0_CSEL (0x1 << 24)
93 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
94 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
95 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
96 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
97 #define NDCB0_NC (0x1 << 20)
98 #define NDCB0_DBC (0x1 << 19)
99 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
100 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
101 #define NDCB0_CMD2_MASK (0xff << 8)
102 #define NDCB0_CMD1_MASK (0xff)
103 #define NDCB0_ADDR_CYC_SHIFT (16)
105 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
106 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
107 #define EXT_CMD_TYPE_READ 4 /* Read */
108 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
109 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
110 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
111 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
113 /* macros for registers read/write */
114 #define nand_writel(info, off, val) \
115 writel((val), (info)->mmio_base + (off))
117 #define nand_readl(info, off) \
118 readl((info)->mmio_base + (off))
120 /* error code and state */
143 enum pxa3xx_nand_variant {
144 PXA3XX_NAND_VARIANT_PXA,
145 PXA3XX_NAND_VARIANT_ARMADA370,
148 struct pxa3xx_nand_host {
149 struct nand_chip chip;
150 struct mtd_info *mtd;
153 /* page size of attached chip */
157 /* calculated from pxa3xx_nand_flash data */
158 unsigned int col_addr_cycles;
159 unsigned int row_addr_cycles;
160 size_t read_id_bytes;
164 struct pxa3xx_nand_info {
165 struct nand_hw_control controller;
166 struct pxa3xx_nand_platform_data *pdata;
169 void __iomem *mmio_base;
170 unsigned long mmio_phys;
171 int cmd_complete, dev_ready;
173 unsigned int buf_start;
174 unsigned int buf_count;
175 unsigned int buf_size;
176 unsigned int data_buff_pos;
177 unsigned int oob_buff_pos;
179 unsigned char *data_buff;
180 unsigned char *oob_buff;
182 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
186 * This driver supports NFCv1 (as found in PXA SoC)
187 * and NFCv2 (as found in Armada 370/XP SoC).
189 enum pxa3xx_nand_variant variant;
192 int use_ecc; /* use HW ECC ? */
193 int ecc_bch; /* using BCH ECC? */
194 int use_spare; /* use spare ? */
197 unsigned int data_size; /* data to be read from FIFO */
198 unsigned int chunk_size; /* split commands chunk size */
199 unsigned int oob_size;
200 unsigned int spare_size;
201 unsigned int ecc_size;
202 unsigned int ecc_err_cnt;
203 unsigned int max_bitflips;
206 /* cached register value */
211 /* generated NDCBx register values */
218 static struct pxa3xx_nand_timing timing[] = {
219 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
220 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
221 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
222 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
225 static struct pxa3xx_nand_flash builtin_flash_types[] = {
226 { 0x46ec, 16, 16, &timing[1] },
227 { 0xdaec, 8, 8, &timing[1] },
228 { 0xd7ec, 8, 8, &timing[1] },
229 { 0xa12c, 8, 8, &timing[2] },
230 { 0xb12c, 16, 16, &timing[2] },
231 { 0xdc2c, 8, 8, &timing[2] },
232 { 0xcc2c, 16, 16, &timing[2] },
233 { 0xba20, 16, 16, &timing[3] },
236 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
237 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
238 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
240 static struct nand_bbt_descr bbt_main_descr = {
241 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
242 | NAND_BBT_2BIT | NAND_BBT_VERSION,
246 .maxblocks = 8, /* Last 8 blocks in each chip */
247 .pattern = bbt_pattern
250 static struct nand_bbt_descr bbt_mirror_descr = {
251 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
252 | NAND_BBT_2BIT | NAND_BBT_VERSION,
256 .maxblocks = 8, /* Last 8 blocks in each chip */
257 .pattern = bbt_mirror_pattern
261 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
264 32, 33, 34, 35, 36, 37, 38, 39,
265 40, 41, 42, 43, 44, 45, 46, 47,
266 48, 49, 50, 51, 52, 53, 54, 55,
267 56, 57, 58, 59, 60, 61, 62, 63},
268 .oobfree = { {2, 30} }
271 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
274 32, 33, 34, 35, 36, 37, 38, 39,
275 40, 41, 42, 43, 44, 45, 46, 47,
276 48, 49, 50, 51, 52, 53, 54, 55,
277 56, 57, 58, 59, 60, 61, 62, 63,
278 96, 97, 98, 99, 100, 101, 102, 103,
279 104, 105, 106, 107, 108, 109, 110, 111,
280 112, 113, 114, 115, 116, 117, 118, 119,
281 120, 121, 122, 123, 124, 125, 126, 127},
282 /* Bootrom looks in bytes 0 & 5 for bad blocks */
283 .oobfree = { {6, 26}, { 64, 32} }
286 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
289 32, 33, 34, 35, 36, 37, 38, 39,
290 40, 41, 42, 43, 44, 45, 46, 47,
291 48, 49, 50, 51, 52, 53, 54, 55,
292 56, 57, 58, 59, 60, 61, 62, 63},
296 #define NDTR0_tCH(c) (min((c), 7) << 19)
297 #define NDTR0_tCS(c) (min((c), 7) << 16)
298 #define NDTR0_tWH(c) (min((c), 7) << 11)
299 #define NDTR0_tWP(c) (min((c), 7) << 8)
300 #define NDTR0_tRH(c) (min((c), 7) << 3)
301 #define NDTR0_tRP(c) (min((c), 7) << 0)
303 #define NDTR1_tR(c) (min((c), 65535) << 16)
304 #define NDTR1_tWHR(c) (min((c), 15) << 4)
305 #define NDTR1_tAR(c) (min((c), 15) << 0)
307 /* convert nano-seconds to nand flash controller clock cycles */
308 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
310 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
312 /* We only support the Armada 370/XP/38x for now */
313 return PXA3XX_NAND_VARIANT_ARMADA370;
316 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
317 const struct pxa3xx_nand_timing *t)
319 struct pxa3xx_nand_info *info = host->info_data;
320 unsigned long nand_clk = mvebu_get_nand_clock();
321 uint32_t ndtr0, ndtr1;
323 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
324 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
325 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
326 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
327 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
328 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
330 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
331 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
332 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
334 info->ndtr0cs0 = ndtr0;
335 info->ndtr1cs0 = ndtr1;
336 nand_writel(info, NDTR0CS0, ndtr0);
337 nand_writel(info, NDTR1CS0, ndtr1);
340 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
341 const struct nand_sdr_timings *t)
343 struct pxa3xx_nand_info *info = host->info_data;
344 struct nand_chip *chip = &host->chip;
345 unsigned long nand_clk = mvebu_get_nand_clock();
346 uint32_t ndtr0, ndtr1;
348 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
349 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
350 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
351 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - tWH_min, 1000);
352 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
353 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - tREH_min, 1000);
354 u32 tR = chip->chip_delay * 1000;
355 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
356 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
358 /* fallback to a default value if tR = 0 */
362 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
363 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
364 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
365 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
366 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
367 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
369 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
370 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
371 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
373 info->ndtr0cs0 = ndtr0;
374 info->ndtr1cs0 = ndtr1;
375 nand_writel(info, NDTR0CS0, ndtr0);
376 nand_writel(info, NDTR1CS0, ndtr1);
379 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
381 const struct nand_sdr_timings *timings;
382 struct nand_chip *chip = &host->chip;
383 struct pxa3xx_nand_info *info = host->info_data;
384 const struct pxa3xx_nand_flash *f = NULL;
385 int mode, id, ntypes, i;
387 mode = onfi_get_async_timing_mode(chip);
388 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
389 ntypes = ARRAY_SIZE(builtin_flash_types);
391 chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
393 id = chip->read_byte(host->mtd);
394 id |= chip->read_byte(host->mtd) << 0x8;
396 for (i = 0; i < ntypes; i++) {
397 f = &builtin_flash_types[i];
399 if (f->chip_id == id)
404 dev_err(&info->pdev->dev, "Error: timings not found\n");
408 pxa3xx_nand_set_timing(host, f->timing);
410 if (f->flash_width == 16) {
411 info->reg_ndcr |= NDCR_DWIDTH_M;
412 chip->options |= NAND_BUSWIDTH_16;
415 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
417 mode = fls(mode) - 1;
421 timings = onfi_async_timing_mode_to_sdr_timings(mode);
423 return PTR_ERR(timings);
425 pxa3xx_nand_set_sdr_timing(host, timings);
432 * Set the data and OOB size, depending on the selected
433 * spare and ECC configuration.
434 * Only applicable to READ0, READOOB and PAGEPROG commands.
436 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
437 struct mtd_info *mtd)
439 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
441 info->data_size = mtd->writesize;
445 info->oob_size = info->spare_size;
447 info->oob_size += info->ecc_size;
451 * NOTE: it is a must to set ND_RUN first, then write
452 * command buffer, otherwise, it does not work.
453 * We enable all the interrupt at the same time, and
454 * let pxa3xx_nand_irq to handle all logic.
456 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
460 ndcr = info->reg_ndcr;
465 nand_writel(info, NDECCCTRL, 0x1);
467 ndcr &= ~NDCR_ECC_EN;
469 nand_writel(info, NDECCCTRL, 0x0);
472 ndcr &= ~NDCR_DMA_EN;
475 ndcr |= NDCR_SPARE_EN;
477 ndcr &= ~NDCR_SPARE_EN;
481 /* clear status bits and run */
482 nand_writel(info, NDCR, 0);
483 nand_writel(info, NDSR, NDSR_MASK);
484 nand_writel(info, NDCR, ndcr);
487 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
491 ndcr = nand_readl(info, NDCR);
492 nand_writel(info, NDCR, ndcr | int_mask);
495 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
501 * According to the datasheet, when reading from NDDB
502 * with BCH enabled, after each 32 bytes reads, we
503 * have to make sure that the NDSR.RDDREQ bit is set.
505 * Drain the FIFO 8 32 bits reads at a time, and skip
506 * the polling on the last read.
509 readsl(info->mmio_base + NDDB, data, 8);
512 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
513 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
514 dev_err(&info->pdev->dev,
515 "Timeout on RDDREQ while draining the FIFO\n");
525 readsl(info->mmio_base + NDDB, data, len);
528 static void handle_data_pio(struct pxa3xx_nand_info *info)
530 unsigned int do_bytes = min(info->data_size, info->chunk_size);
532 switch (info->state) {
533 case STATE_PIO_WRITING:
534 writesl(info->mmio_base + NDDB,
535 info->data_buff + info->data_buff_pos,
536 DIV_ROUND_UP(do_bytes, 4));
538 if (info->oob_size > 0)
539 writesl(info->mmio_base + NDDB,
540 info->oob_buff + info->oob_buff_pos,
541 DIV_ROUND_UP(info->oob_size, 4));
543 case STATE_PIO_READING:
545 info->data_buff + info->data_buff_pos,
546 DIV_ROUND_UP(do_bytes, 4));
548 if (info->oob_size > 0)
550 info->oob_buff + info->oob_buff_pos,
551 DIV_ROUND_UP(info->oob_size, 4));
554 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
559 /* Update buffer pointers for multi-page read/write */
560 info->data_buff_pos += do_bytes;
561 info->oob_buff_pos += info->oob_size;
562 info->data_size -= do_bytes;
565 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
567 handle_data_pio(info);
569 info->state = STATE_CMD_DONE;
570 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
573 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
575 unsigned int status, is_completed = 0, is_ready = 0;
576 unsigned int ready, cmd_done;
577 irqreturn_t ret = IRQ_HANDLED;
580 ready = NDSR_FLASH_RDY;
581 cmd_done = NDSR_CS0_CMDD;
584 cmd_done = NDSR_CS1_CMDD;
587 status = nand_readl(info, NDSR);
589 if (status & NDSR_UNCORERR)
590 info->retcode = ERR_UNCORERR;
591 if (status & NDSR_CORERR) {
592 info->retcode = ERR_CORERR;
593 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
595 info->ecc_err_cnt = NDSR_ERR_CNT(status);
597 info->ecc_err_cnt = 1;
600 * Each chunk composing a page is corrected independently,
601 * and we need to store maximum number of corrected bitflips
602 * to return it to the MTD layer in ecc.read_page().
604 info->max_bitflips = max_t(unsigned int,
608 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
609 info->state = (status & NDSR_RDDREQ) ?
610 STATE_PIO_READING : STATE_PIO_WRITING;
611 /* Call the IRQ thread in U-Boot directly */
612 pxa3xx_nand_irq_thread(info);
615 if (status & cmd_done) {
616 info->state = STATE_CMD_DONE;
619 if (status & ready) {
620 info->state = STATE_READY;
624 if (status & NDSR_WRCMDREQ) {
625 nand_writel(info, NDSR, NDSR_WRCMDREQ);
626 status &= ~NDSR_WRCMDREQ;
627 info->state = STATE_CMD_HANDLE;
630 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
631 * must be loaded by writing directly either 12 or 16
632 * bytes directly to NDCB0, four bytes at a time.
634 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
635 * but each NDCBx register can be read.
637 nand_writel(info, NDCB0, info->ndcb0);
638 nand_writel(info, NDCB0, info->ndcb1);
639 nand_writel(info, NDCB0, info->ndcb2);
641 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
642 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
643 nand_writel(info, NDCB0, info->ndcb3);
646 /* clear NDSR to let the controller exit the IRQ */
647 nand_writel(info, NDSR, status);
649 info->cmd_complete = 1;
656 static inline int is_buf_blank(uint8_t *buf, size_t len)
658 for (; len > 0; len--)
664 static void set_command_address(struct pxa3xx_nand_info *info,
665 unsigned int page_size, uint16_t column, int page_addr)
667 /* small page addr setting */
668 if (page_size < PAGE_CHUNK_SIZE) {
669 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
674 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
677 if (page_addr & 0xFF0000)
678 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
684 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
686 struct pxa3xx_nand_host *host = info->host[info->cs];
687 struct mtd_info *mtd = host->mtd;
689 /* reset data and oob column point to handle data */
693 info->data_buff_pos = 0;
694 info->oob_buff_pos = 0;
697 info->retcode = ERR_NONE;
698 info->ecc_err_cnt = 0;
704 case NAND_CMD_PAGEPROG:
706 case NAND_CMD_READOOB:
707 pxa3xx_set_datasize(info, mtd);
719 * If we are about to issue a read command, or about to set
720 * the write address, then clean the data buffer.
722 if (command == NAND_CMD_READ0 ||
723 command == NAND_CMD_READOOB ||
724 command == NAND_CMD_SEQIN) {
725 info->buf_count = mtd->writesize + mtd->oobsize;
726 memset(info->data_buff, 0xFF, info->buf_count);
730 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
731 int ext_cmd_type, uint16_t column, int page_addr)
733 int addr_cycle, exec_cmd;
734 struct pxa3xx_nand_host *host;
735 struct mtd_info *mtd;
737 host = info->host[info->cs];
743 info->ndcb0 = NDCB0_CSEL;
747 if (command == NAND_CMD_SEQIN)
750 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
751 + host->col_addr_cycles);
754 case NAND_CMD_READOOB:
756 info->buf_start = column;
757 info->ndcb0 |= NDCB0_CMD_TYPE(0)
761 if (command == NAND_CMD_READOOB)
762 info->buf_start += mtd->writesize;
765 * Multiple page read needs an 'extended command type' field,
766 * which is either naked-read or last-read according to the
769 if (mtd->writesize == PAGE_CHUNK_SIZE) {
770 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
771 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
772 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
774 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
775 info->ndcb3 = info->chunk_size +
779 set_command_address(info, mtd->writesize, column, page_addr);
784 info->buf_start = column;
785 set_command_address(info, mtd->writesize, 0, page_addr);
788 * Multiple page programming needs to execute the initial
789 * SEQIN command that sets the page address.
791 if (mtd->writesize > PAGE_CHUNK_SIZE) {
792 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
793 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
796 /* No data transfer in this case */
802 case NAND_CMD_PAGEPROG:
803 if (is_buf_blank(info->data_buff,
804 (mtd->writesize + mtd->oobsize))) {
809 /* Second command setting for large pages */
810 if (mtd->writesize > PAGE_CHUNK_SIZE) {
812 * Multiple page write uses the 'extended command'
813 * field. This can be used to issue a command dispatch
814 * or a naked-write depending on the current stage.
816 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
818 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
819 info->ndcb3 = info->chunk_size +
823 * This is the command dispatch that completes a chunked
824 * page program operation.
826 if (info->data_size == 0) {
827 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
828 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
835 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
839 | (NAND_CMD_PAGEPROG << 8)
846 info->buf_count = 256;
847 info->ndcb0 |= NDCB0_CMD_TYPE(0)
851 info->ndcb1 = (column & 0xFF);
853 info->data_size = 256;
856 case NAND_CMD_READID:
857 info->buf_count = host->read_id_bytes;
858 info->ndcb0 |= NDCB0_CMD_TYPE(3)
861 info->ndcb1 = (column & 0xFF);
865 case NAND_CMD_STATUS:
867 info->ndcb0 |= NDCB0_CMD_TYPE(4)
874 case NAND_CMD_ERASE1:
875 info->ndcb0 |= NDCB0_CMD_TYPE(2)
879 | (NAND_CMD_ERASE2 << 8)
881 info->ndcb1 = page_addr;
886 info->ndcb0 |= NDCB0_CMD_TYPE(5)
891 case NAND_CMD_ERASE2:
897 dev_err(&info->pdev->dev, "non-supported command %x\n",
905 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
906 int column, int page_addr)
908 struct nand_chip *chip = mtd_to_nand(mtd);
909 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
910 struct pxa3xx_nand_info *info = host->info_data;
914 * if this is a x16 device ,then convert the input
915 * "byte" address into a "word" address appropriate
916 * for indexing a word-oriented device
918 if (info->reg_ndcr & NDCR_DWIDTH_M)
922 * There may be different NAND chip hooked to
923 * different chip select, so check whether
924 * chip select has been changed, if yes, reset the timing
926 if (info->cs != host->cs) {
928 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
929 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
932 prepare_start_command(info, command);
934 info->state = STATE_PREPARED;
935 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
940 info->cmd_complete = 0;
943 pxa3xx_nand_start(info);
949 status = nand_readl(info, NDSR);
951 pxa3xx_nand_irq(info);
953 if (info->cmd_complete)
956 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
957 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
962 info->state = STATE_IDLE;
965 static void nand_cmdfunc_extended(struct mtd_info *mtd,
966 const unsigned command,
967 int column, int page_addr)
969 struct nand_chip *chip = mtd_to_nand(mtd);
970 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
971 struct pxa3xx_nand_info *info = host->info_data;
972 int exec_cmd, ext_cmd_type;
975 * if this is a x16 device then convert the input
976 * "byte" address into a "word" address appropriate
977 * for indexing a word-oriented device
979 if (info->reg_ndcr & NDCR_DWIDTH_M)
983 * There may be different NAND chip hooked to
984 * different chip select, so check whether
985 * chip select has been changed, if yes, reset the timing
987 if (info->cs != host->cs) {
989 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
990 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
993 /* Select the extended command for the first command */
996 case NAND_CMD_READOOB:
997 ext_cmd_type = EXT_CMD_TYPE_MONO;
1000 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1002 case NAND_CMD_PAGEPROG:
1003 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1010 prepare_start_command(info, command);
1013 * Prepare the "is ready" completion before starting a command
1014 * transaction sequence. If the command is not executed the
1015 * completion will be completed, see below.
1017 * We can do that inside the loop because the command variable
1018 * is invariant and thus so is the exec_cmd.
1020 info->need_wait = 1;
1021 info->dev_ready = 0;
1026 info->state = STATE_PREPARED;
1027 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1030 info->need_wait = 0;
1031 info->dev_ready = 1;
1035 info->cmd_complete = 0;
1036 pxa3xx_nand_start(info);
1042 status = nand_readl(info, NDSR);
1044 pxa3xx_nand_irq(info);
1046 if (info->cmd_complete)
1049 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1050 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1055 /* Check if the sequence is complete */
1056 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1060 * After a splitted program command sequence has issued
1061 * the command dispatch, the command sequence is complete.
1063 if (info->data_size == 0 &&
1064 command == NAND_CMD_PAGEPROG &&
1065 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1068 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1069 /* Last read: issue a 'last naked read' */
1070 if (info->data_size == info->chunk_size)
1071 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1073 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1076 * If a splitted program command has no more data to transfer,
1077 * the command dispatch must be issued to complete.
1079 } else if (command == NAND_CMD_PAGEPROG &&
1080 info->data_size == 0) {
1081 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1085 info->state = STATE_IDLE;
1088 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1089 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1092 chip->write_buf(mtd, buf, mtd->writesize);
1093 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1098 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1099 struct nand_chip *chip, uint8_t *buf, int oob_required,
1102 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1103 struct pxa3xx_nand_info *info = host->info_data;
1105 chip->read_buf(mtd, buf, mtd->writesize);
1106 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1108 if (info->retcode == ERR_CORERR && info->use_ecc) {
1109 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1111 } else if (info->retcode == ERR_UNCORERR) {
1113 * for blank page (all 0xff), HW will calculate its ECC as
1114 * 0, which is different from the ECC information within
1115 * OOB, ignore such uncorrectable errors
1117 if (is_buf_blank(buf, mtd->writesize))
1118 info->retcode = ERR_NONE;
1120 mtd->ecc_stats.failed++;
1123 return info->max_bitflips;
1126 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1128 struct nand_chip *chip = mtd_to_nand(mtd);
1129 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1130 struct pxa3xx_nand_info *info = host->info_data;
1133 if (info->buf_start < info->buf_count)
1134 /* Has just send a new command? */
1135 retval = info->data_buff[info->buf_start++];
1140 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1142 struct nand_chip *chip = mtd_to_nand(mtd);
1143 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1144 struct pxa3xx_nand_info *info = host->info_data;
1145 u16 retval = 0xFFFF;
1147 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1148 retval = *((u16 *)(info->data_buff+info->buf_start));
1149 info->buf_start += 2;
1154 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1156 struct nand_chip *chip = mtd_to_nand(mtd);
1157 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1158 struct pxa3xx_nand_info *info = host->info_data;
1159 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1161 memcpy(buf, info->data_buff + info->buf_start, real_len);
1162 info->buf_start += real_len;
1165 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1166 const uint8_t *buf, int len)
1168 struct nand_chip *chip = mtd_to_nand(mtd);
1169 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1170 struct pxa3xx_nand_info *info = host->info_data;
1171 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1173 memcpy(info->data_buff + info->buf_start, buf, real_len);
1174 info->buf_start += real_len;
1177 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1182 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1184 struct nand_chip *chip = mtd_to_nand(mtd);
1185 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1186 struct pxa3xx_nand_info *info = host->info_data;
1188 if (info->need_wait) {
1191 info->need_wait = 0;
1197 status = nand_readl(info, NDSR);
1199 pxa3xx_nand_irq(info);
1201 if (info->dev_ready)
1204 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1205 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1206 return NAND_STATUS_FAIL;
1211 /* pxa3xx_nand_send_command has waited for command complete */
1212 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1213 if (info->retcode == ERR_NONE)
1216 return NAND_STATUS_FAIL;
1219 return NAND_STATUS_READY;
1222 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1224 struct pxa3xx_nand_host *host = info->host[info->cs];
1225 struct mtd_info *mtd = host->mtd;
1226 struct nand_chip *chip = mtd_to_nand(mtd);
1228 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1229 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1230 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1235 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1238 * We set 0 by hard coding here, for we don't support keep_config
1239 * when there is more than one chip attached to the controller
1241 struct pxa3xx_nand_host *host = info->host[0];
1242 uint32_t ndcr = nand_readl(info, NDCR);
1244 if (ndcr & NDCR_PAGE_SZ) {
1245 /* Controller's FIFO size */
1246 info->chunk_size = 2048;
1247 host->read_id_bytes = 4;
1249 info->chunk_size = 512;
1250 host->read_id_bytes = 2;
1253 /* Set an initial chunk size */
1254 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1255 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1256 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1260 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1262 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1263 if (info->data_buff == NULL)
1268 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1270 struct pxa3xx_nand_info *info = host->info_data;
1271 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1272 struct mtd_info *mtd;
1273 struct nand_chip *chip;
1274 const struct nand_sdr_timings *timings;
1277 mtd = info->host[info->cs]->mtd;
1278 chip = mtd_to_nand(mtd);
1280 /* configure default flash values */
1281 info->reg_ndcr = 0x0; /* enable all interrupts */
1282 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1283 info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1284 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1286 /* use the common timing to make a try */
1287 timings = onfi_async_timing_mode_to_sdr_timings(0);
1288 if (IS_ERR(timings))
1289 return PTR_ERR(timings);
1291 pxa3xx_nand_set_sdr_timing(host, timings);
1293 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1294 ret = chip->waitfunc(mtd, chip);
1295 if (ret & NAND_STATUS_FAIL)
1301 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1302 struct nand_ecc_ctrl *ecc,
1303 int strength, int ecc_stepsize, int page_size)
1305 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1306 info->chunk_size = 2048;
1307 info->spare_size = 40;
1308 info->ecc_size = 24;
1309 ecc->mode = NAND_ECC_HW;
1313 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1314 info->chunk_size = 512;
1315 info->spare_size = 8;
1317 ecc->mode = NAND_ECC_HW;
1322 * Required ECC: 4-bit correction per 512 bytes
1323 * Select: 16-bit correction per 2048 bytes
1325 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1327 info->chunk_size = 2048;
1328 info->spare_size = 32;
1329 info->ecc_size = 32;
1330 ecc->mode = NAND_ECC_HW;
1331 ecc->size = info->chunk_size;
1332 ecc->layout = &ecc_layout_2KB_bch4bit;
1335 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1337 info->chunk_size = 2048;
1338 info->spare_size = 32;
1339 info->ecc_size = 32;
1340 ecc->mode = NAND_ECC_HW;
1341 ecc->size = info->chunk_size;
1342 ecc->layout = &ecc_layout_4KB_bch4bit;
1346 * Required ECC: 8-bit correction per 512 bytes
1347 * Select: 16-bit correction per 1024 bytes
1349 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1351 info->chunk_size = 1024;
1352 info->spare_size = 0;
1353 info->ecc_size = 32;
1354 ecc->mode = NAND_ECC_HW;
1355 ecc->size = info->chunk_size;
1356 ecc->layout = &ecc_layout_4KB_bch8bit;
1359 dev_err(&info->pdev->dev,
1360 "ECC strength %d at page size %d is not supported\n",
1361 strength, page_size);
1368 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1370 struct nand_chip *chip = mtd_to_nand(mtd);
1371 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1372 struct pxa3xx_nand_info *info = host->info_data;
1373 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1375 uint16_t ecc_strength, ecc_step;
1377 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1380 /* Set a default chunk size */
1381 info->chunk_size = 512;
1383 ret = pxa3xx_nand_sensing(host);
1385 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1392 /* Device detection must be done with ECC disabled */
1393 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1394 nand_writel(info, NDECCCTRL, 0x0);
1396 if (nand_scan_ident(mtd, 1, NULL))
1399 if (!pdata->keep_config) {
1400 ret = pxa3xx_nand_init_timings(host);
1402 dev_err(&info->pdev->dev,
1403 "Failed to set timings: %d\n", ret);
1408 ret = pxa3xx_nand_config_flash(info);
1412 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1414 * We'll use a bad block table stored in-flash and don't
1415 * allow writing the bad block marker to the flash.
1417 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1418 chip->bbt_td = &bbt_main_descr;
1419 chip->bbt_md = &bbt_mirror_descr;
1423 * If the page size is bigger than the FIFO size, let's check
1424 * we are given the right variant and then switch to the extended
1425 * (aka splitted) command handling,
1427 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1428 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1429 chip->cmdfunc = nand_cmdfunc_extended;
1431 dev_err(&info->pdev->dev,
1432 "unsupported page size on this variant\n");
1437 if (pdata->ecc_strength && pdata->ecc_step_size) {
1438 ecc_strength = pdata->ecc_strength;
1439 ecc_step = pdata->ecc_step_size;
1441 ecc_strength = chip->ecc_strength_ds;
1442 ecc_step = chip->ecc_step_ds;
1445 /* Set default ECC strength requirements on non-ONFI devices */
1446 if (ecc_strength < 1 && ecc_step < 1) {
1451 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1452 ecc_step, mtd->writesize);
1456 /* calculate addressing information */
1457 if (mtd->writesize >= 2048)
1458 host->col_addr_cycles = 2;
1460 host->col_addr_cycles = 1;
1462 /* release the initial buffer */
1463 kfree(info->data_buff);
1465 /* allocate the real data + oob buffer */
1466 info->buf_size = mtd->writesize + mtd->oobsize;
1467 ret = pxa3xx_nand_init_buff(info);
1470 info->oob_buff = info->data_buff + mtd->writesize;
1472 if ((mtd->size >> chip->page_shift) > 65536)
1473 host->row_addr_cycles = 3;
1475 host->row_addr_cycles = 2;
1476 return nand_scan_tail(mtd);
1479 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1481 struct pxa3xx_nand_platform_data *pdata;
1482 struct pxa3xx_nand_host *host;
1483 struct nand_chip *chip = NULL;
1484 struct mtd_info *mtd;
1487 pdata = info->pdata;
1488 if (pdata->num_cs <= 0)
1491 info->variant = pxa3xx_nand_get_variant();
1492 for (cs = 0; cs < pdata->num_cs; cs++) {
1493 chip = (struct nand_chip *)
1494 ((u8 *)&info[1] + sizeof(*host) * cs);
1495 mtd = nand_to_mtd(chip);
1496 host = (struct pxa3xx_nand_host *)chip;
1497 info->host[cs] = host;
1500 host->info_data = info;
1501 host->read_id_bytes = 4;
1502 mtd->owner = THIS_MODULE;
1504 nand_set_controller_data(chip, host);
1505 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1506 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1507 chip->controller = &info->controller;
1508 chip->waitfunc = pxa3xx_nand_waitfunc;
1509 chip->select_chip = pxa3xx_nand_select_chip;
1510 chip->read_word = pxa3xx_nand_read_word;
1511 chip->read_byte = pxa3xx_nand_read_byte;
1512 chip->read_buf = pxa3xx_nand_read_buf;
1513 chip->write_buf = pxa3xx_nand_write_buf;
1514 chip->options |= NAND_NO_SUBPAGE_WRITE;
1515 chip->cmdfunc = nand_cmdfunc;
1518 /* Allocate a buffer to allow flash detection */
1519 info->buf_size = INIT_BUFFER_SIZE;
1520 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1521 if (info->data_buff == NULL) {
1523 goto fail_disable_clk;
1526 /* initialize all interrupts to be disabled */
1527 disable_int(info, NDSR_MASK);
1531 kfree(info->data_buff);
1536 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1538 struct pxa3xx_nand_platform_data *pdata;
1539 const void *blob = gd->fdt_blob;
1542 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1546 /* Get address decoding nodes from the FDT blob */
1548 node = fdt_node_offset_by_compatible(blob, node,
1549 "marvell,mvebu-pxa3xx-nand");
1553 /* Bypass disabeld nodes */
1554 if (!fdtdec_get_is_enabled(blob, node))
1557 /* Get the first enabled NAND controler base address */
1559 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1560 blob, node, "reg", 0, NULL, true);
1562 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1563 if (pdata->num_cs != 1) {
1564 pr_err("pxa3xx driver supports single CS only\n");
1568 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1569 pdata->enable_arbiter = 1;
1571 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1572 pdata->keep_config = 1;
1576 * If these are not set, they will be selected according
1577 * to the detected flash type.
1580 pdata->ecc_strength = fdtdec_get_int(blob, node,
1581 "nand-ecc-strength", 0);
1584 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1585 "nand-ecc-step-size", 0);
1587 info->pdata = pdata;
1589 /* Currently support only a single NAND controller */
1592 } while (node >= 0);
1597 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1599 struct pxa3xx_nand_platform_data *pdata;
1600 int ret, cs, probe_success;
1602 ret = pxa3xx_nand_probe_dt(info);
1606 pdata = info->pdata;
1608 ret = alloc_nand_resource(info);
1610 dev_err(&pdev->dev, "alloc nand resource failed\n");
1615 for (cs = 0; cs < pdata->num_cs; cs++) {
1616 struct mtd_info *mtd = info->host[cs]->mtd;
1619 * The mtd name matches the one used in 'mtdparts' kernel
1620 * parameter. This name cannot be changed or otherwise
1621 * user's mtd partitions configuration would get broken.
1623 mtd->name = "pxa3xx_nand-0";
1625 ret = pxa3xx_nand_scan(mtd);
1627 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1632 if (nand_register(cs, mtd))
1645 * Main initialization routine
1647 void board_nand_init(void)
1649 struct pxa3xx_nand_info *info;
1650 struct pxa3xx_nand_host *host;
1653 info = kzalloc(sizeof(*info) +
1654 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1659 ret = pxa3xx_nand_probe(info);