4 * Synopsys DesignWare Cores (DWC) SATA host driver
6 * Author: Mark Miesfeld <mmiesfeld@amcc.com>
8 * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
9 * Copyright 2008 DENX Software Engineering
11 * Based on versions provided by AMCC and Synopsys which are:
12 * Copyright 2006 Applied Micro Circuits Corporation
13 * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
15 * This program is free software; you can redistribute
16 * it and/or modify it under the terms of the GNU
17 * General Public License as published by the
18 * Free Software Foundation; either version 2 of the License,
19 * or (at your option) any later version.
23 * SATA support based on the chip canyonlands.
26 * The local version of this driver for the canyonlands board
27 * does not use interrupts but polls the chip instead.
33 #include <asm/processor.h>
34 #include <asm/errno.h>
38 #include <linux/ctype.h>
42 #define DMA_NUM_CHANS 1
43 #define DMA_NUM_CHAN_REGS 8
45 #define AHB_DMA_BRST_DFLT 16
52 struct dma_chan_regs {
59 struct dmareg sstatar;
60 struct dmareg dstatar;
66 struct dma_interrupt_regs {
69 struct dmareg srctran;
70 struct dmareg dsttran;
75 struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS];
76 struct dma_interrupt_regs interrupt_raw;
77 struct dma_interrupt_regs interrupt_status;
78 struct dma_interrupt_regs interrupt_mask;
79 struct dma_interrupt_regs interrupt_clear;
80 struct dmareg statusInt;
81 struct dmareg rq_srcreg;
82 struct dmareg rq_dstreg;
83 struct dmareg rq_sgl_srcreg;
84 struct dmareg rq_sgl_dstreg;
85 struct dmareg rq_lst_srcreg;
86 struct dmareg rq_lst_dstreg;
87 struct dmareg dma_cfg;
88 struct dmareg dma_chan_en;
90 struct dmareg dma_test;
94 * Param 6 = dma_param[0], Param 5 = dma_param[1],
95 * Param 4 = dma_param[2] ...
97 struct dmareg dma_params[6];
100 #define DMA_EN 0x00000001
101 #define DMA_DI 0x00000000
102 #define DMA_CHANNEL(ch) (0x00000001 << (ch))
103 #define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \
104 ((0x000000001 << (ch)) << 8))
105 #define DMA_DISABLE_CHAN(ch) (0x00000000 | \
106 ((0x000000001 << (ch)) << 8))
108 #define SATA_DWC_MAX_PORTS 1
109 #define SATA_DWC_SCR_OFFSET 0x24
110 #define SATA_DWC_REG_OFFSET 0x64
112 struct sata_dwc_regs {
142 #define SATA_DWC_TXFIFO_DEPTH 0x01FF
143 #define SATA_DWC_RXFIFO_DEPTH 0x01FF
145 #define SATA_DWC_DBTSR_MWR(size) ((size / 4) & SATA_DWC_TXFIFO_DEPTH)
146 #define SATA_DWC_DBTSR_MRD(size) (((size / 4) & \
147 SATA_DWC_RXFIFO_DEPTH) << 16)
148 #define SATA_DWC_INTPR_DMAT 0x00000001
149 #define SATA_DWC_INTPR_NEWFP 0x00000002
150 #define SATA_DWC_INTPR_PMABRT 0x00000004
151 #define SATA_DWC_INTPR_ERR 0x00000008
152 #define SATA_DWC_INTPR_NEWBIST 0x00000010
153 #define SATA_DWC_INTPR_IPF 0x10000000
154 #define SATA_DWC_INTMR_DMATM 0x00000001
155 #define SATA_DWC_INTMR_NEWFPM 0x00000002
156 #define SATA_DWC_INTMR_PMABRTM 0x00000004
157 #define SATA_DWC_INTMR_ERRM 0x00000008
158 #define SATA_DWC_INTMR_NEWBISTM 0x00000010
160 #define SATA_DWC_DMACR_TMOD_TXCHEN 0x00000004
161 #define SATA_DWC_DMACR_TXRXCH_CLEAR SATA_DWC_DMACR_TMOD_TXCHEN
163 #define SATA_DWC_QCMD_MAX 32
165 #define SATA_DWC_SERROR_ERR_BITS 0x0FFF0F03
167 #define HSDEVP_FROM_AP(ap) (struct sata_dwc_device_port*) \
170 struct sata_dwc_device {
172 struct ata_probe_ent *pe;
173 struct ata_host *host;
175 struct sata_dwc_regs *sata_dwc_regs;
179 struct sata_dwc_device_port {
180 struct sata_dwc_device *hsdev;
181 int cmd_issued[SATA_DWC_QCMD_MAX];
182 u32 dma_chan[SATA_DWC_QCMD_MAX];
183 int dma_pending[SATA_DWC_QCMD_MAX];
187 SATA_DWC_CMD_ISSUED_NOT = 0,
188 SATA_DWC_CMD_ISSUED_PEND = 1,
189 SATA_DWC_CMD_ISSUED_EXEC = 2,
190 SATA_DWC_CMD_ISSUED_NODATA = 3,
192 SATA_DWC_DMA_PENDING_NONE = 0,
193 SATA_DWC_DMA_PENDING_TX = 1,
194 SATA_DWC_DMA_PENDING_RX = 2,
197 #define msleep(a) udelay(a * 1000)
198 #define ssleep(a) msleep(a * 1000)
200 static int ata_probe_timeout = (ATA_TMOUT_INTERNAL / 100);
202 enum sata_dev_state {
208 enum sata_dev_state dev_state = SATA_INIT;
210 static struct ahb_dma_regs *sata_dma_regs = 0;
211 static struct ata_host *phost;
212 static struct ata_port ap;
213 static struct ata_port *pap = ≈
214 static struct ata_device ata_device;
215 static struct sata_dwc_device_port dwc_devp;
217 static void *scr_addr_sstatus;
218 static u32 temp_n_block = 0;
220 static unsigned ata_exec_internal(struct ata_device *dev,
221 struct ata_taskfile *tf, const u8 *cdb,
222 int dma_dir, unsigned int buflen,
223 unsigned long timeout);
224 static unsigned int ata_dev_set_feature(struct ata_device *dev,
225 u8 enable,u8 feature);
226 static unsigned int ata_dev_init_params(struct ata_device *dev,
227 u16 heads, u16 sectors);
228 static u8 ata_irq_on(struct ata_port *ap);
229 static struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
231 static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
232 u8 status, int in_wq);
233 static void ata_tf_to_host(struct ata_port *ap,
234 const struct ata_taskfile *tf);
235 static void ata_exec_command(struct ata_port *ap,
236 const struct ata_taskfile *tf);
237 static unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
238 static u8 ata_check_altstatus(struct ata_port *ap);
239 static u8 ata_check_status(struct ata_port *ap);
240 static void ata_dev_select(struct ata_port *ap, unsigned int device,
241 unsigned int wait, unsigned int can_sleep);
242 static void ata_qc_issue(struct ata_queued_cmd *qc);
243 static void ata_tf_load(struct ata_port *ap,
244 const struct ata_taskfile *tf);
245 static int ata_dev_read_sectors(unsigned char* pdata,
246 unsigned long datalen, u32 block, u32 n_block);
247 static int ata_dev_write_sectors(unsigned char* pdata,
248 unsigned long datalen , u32 block, u32 n_block);
249 static void ata_std_dev_select(struct ata_port *ap, unsigned int device);
250 static void ata_qc_complete(struct ata_queued_cmd *qc);
251 static void __ata_qc_complete(struct ata_queued_cmd *qc);
252 static void fill_result_tf(struct ata_queued_cmd *qc);
253 static void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
254 static void ata_mmio_data_xfer(struct ata_device *dev,
256 unsigned int buflen,int do_write);
257 static void ata_pio_task(struct ata_port *arg_ap);
258 static void __ata_port_freeze(struct ata_port *ap);
259 static int ata_port_freeze(struct ata_port *ap);
260 static void ata_qc_free(struct ata_queued_cmd *qc);
261 static void ata_pio_sectors(struct ata_queued_cmd *qc);
262 static void ata_pio_sector(struct ata_queued_cmd *qc);
263 static void ata_pio_queue_task(struct ata_port *ap,
264 void *data,unsigned long delay);
265 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq);
266 static int sata_dwc_softreset(struct ata_port *ap);
267 static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
268 unsigned int flags, u16 *id);
269 static int check_sata_dev_state(void);
271 extern block_dev_desc_t sata_dev_desc[CONFIG_SYS_SATA_MAX_DEVICE];
273 static const struct ata_port_info sata_dwc_port_info[] = {
275 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
276 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING |
277 ATA_FLAG_SRST | ATA_FLAG_NCQ,
284 int init_sata(int dev)
286 struct sata_dwc_device hsdev;
287 struct ata_host host;
288 struct ata_port_info pi = sata_dwc_port_info[0];
289 struct ata_link *link;
290 struct sata_dwc_device_port hsdevp = dwc_devp;
292 u8 *sata_dma_regs_addr = 0;
294 unsigned long base_addr = 0;
301 base = (u8*)SATA_BASE_ADDR;
303 hsdev.sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
305 host.n_ports = SATA_DWC_MAX_PORTS;
307 for (i = 0; i < SATA_DWC_MAX_PORTS; i++) {
308 ap.pflags |= ATA_PFLAG_INITIALIZING;
309 ap.flags = ATA_FLAG_DISABLED;
311 ap.ctl = ATA_DEVCTL_OBS;
318 link->active_tag = ATA_TAG_POISON;
319 link->hw_sata_spd_limit = 0;
325 ap.pio_mask = pi.pio_mask;
326 ap.mwdma_mask = pi.mwdma_mask;
327 ap.udma_mask = pi.udma_mask;
328 ap.flags |= pi.flags;
329 ap.link.flags |= pi.link_flags;
331 host.ports[0]->ioaddr.cmd_addr = base;
332 host.ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
333 scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
335 base_addr = (unsigned long)base;
337 host.ports[0]->ioaddr.cmd_addr = (void *)base_addr + 0x00;
338 host.ports[0]->ioaddr.data_addr = (void *)base_addr + 0x00;
340 host.ports[0]->ioaddr.error_addr = (void *)base_addr + 0x04;
341 host.ports[0]->ioaddr.feature_addr = (void *)base_addr + 0x04;
343 host.ports[0]->ioaddr.nsect_addr = (void *)base_addr + 0x08;
345 host.ports[0]->ioaddr.lbal_addr = (void *)base_addr + 0x0c;
346 host.ports[0]->ioaddr.lbam_addr = (void *)base_addr + 0x10;
347 host.ports[0]->ioaddr.lbah_addr = (void *)base_addr + 0x14;
349 host.ports[0]->ioaddr.device_addr = (void *)base_addr + 0x18;
350 host.ports[0]->ioaddr.command_addr = (void *)base_addr + 0x1c;
351 host.ports[0]->ioaddr.status_addr = (void *)base_addr + 0x1c;
353 host.ports[0]->ioaddr.altstatus_addr = (void *)base_addr + 0x20;
354 host.ports[0]->ioaddr.ctl_addr = (void *)base_addr + 0x20;
356 sata_dma_regs_addr = (u8*)SATA_DMA_REG_ADDR;
357 sata_dma_regs = (void *__iomem)sata_dma_regs_addr;
359 status = ata_check_altstatus(&ap);
361 if (status == 0x7f) {
362 printf("Hard Disk not found.\n");
363 dev_state = SATA_NODEVICE;
368 printf("Waiting for device...");
373 status = ata_check_altstatus(&ap);
375 if ((status & ATA_BUSY) == 0) {
381 if (i > (ATA_RESET_TIME * 100)) {
382 printf("** TimeOUT **\n");
384 dev_state = SATA_NODEVICE;
388 if ((i >= 100) && ((i % 100) == 0))
392 rc = sata_dwc_softreset(&ap);
395 printf("sata_dwc : error. soft reset failed\n");
399 for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
400 out_le32(&(sata_dma_regs->interrupt_mask.error.low),
401 DMA_DISABLE_CHAN(chan));
403 out_le32(&(sata_dma_regs->interrupt_mask.tfr.low),
404 DMA_DISABLE_CHAN(chan));
407 out_le32(&(sata_dma_regs->dma_cfg.low), DMA_DI);
409 out_le32(&hsdev.sata_dwc_regs->intmr,
410 SATA_DWC_INTMR_ERRM |
411 SATA_DWC_INTMR_PMABRTM);
413 /* Unmask the error bits that should trigger
414 * an error interrupt by setting the error mask register.
416 out_le32(&hsdev.sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
418 hsdev.host = ap.host;
419 memset(&hsdevp, 0, sizeof(hsdevp));
420 hsdevp.hsdev = &hsdev;
422 for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
423 hsdevp.cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
425 out_le32((void __iomem *)scr_addr_sstatus + 4,
426 in_le32((void __iomem *)scr_addr_sstatus + 4));
432 static u8 ata_check_altstatus(struct ata_port *ap)
435 val = readb(ap->ioaddr.altstatus_addr);
439 static int sata_dwc_softreset(struct ata_port *ap)
445 struct ata_ioports *ioaddr = &ap->ioaddr;
447 serror = in_le32((void *)ap->ioaddr.scr_addr + (SCR_ERROR * 4));
449 writeb(0x55, ioaddr->nsect_addr);
450 writeb(0xaa, ioaddr->lbal_addr);
451 writeb(0xaa, ioaddr->nsect_addr);
452 writeb(0x55, ioaddr->lbal_addr);
453 writeb(0x55, ioaddr->nsect_addr);
454 writeb(0xaa, ioaddr->lbal_addr);
456 nsect = readb(ioaddr->nsect_addr);
457 lbal = readb(ioaddr->lbal_addr);
459 if ((nsect == 0x55) && (lbal == 0xaa)) {
460 printf("Device found\n");
462 printf("No device found\n");
463 dev_state = SATA_NODEVICE;
467 tmp = ATA_DEVICE_OBS;
468 writeb(tmp, ioaddr->device_addr);
469 writeb(ap->ctl, ioaddr->ctl_addr);
473 writeb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
476 writeb(ap->ctl, ioaddr->ctl_addr);
479 status = ata_check_status(ap);
482 ata_check_status(ap);
485 u8 status = ata_check_status(ap);
487 if (!(status & ATA_BUSY))
490 printf("Hard Disk status is BUSY.\n");
494 tmp = ATA_DEVICE_OBS;
495 writeb(tmp, ioaddr->device_addr);
497 nsect = readb(ioaddr->nsect_addr);
498 lbal = readb(ioaddr->lbal_addr);
503 static u8 ata_check_status(struct ata_port *ap)
506 val = readb(ap->ioaddr.status_addr);
510 static int ata_id_has_hipm(const u16 *id)
514 if (val == 0 || val == 0xffff)
517 return val & (1 << 9);
520 static int ata_id_has_dipm(const u16 *id)
524 if (val == 0 || val == 0xffff)
527 return val & (1 << 3);
530 int scan_sata(int dev)
536 struct ata_device *ata_dev = &ata_device;
537 unsigned long pio_mask, mwdma_mask, udma_mask;
538 unsigned long xfer_mask;
540 u16 iobuf[ATA_SECTOR_WORDS];
542 memset(iobuf, 0, sizeof(iobuf));
544 if (dev_state == SATA_NODEVICE)
547 printf("Waiting for device...");
552 status = ata_check_altstatus(&ap);
554 if ((status & ATA_BUSY) == 0) {
560 if (i > (ATA_RESET_TIME * 100)) {
561 printf("** TimeOUT **\n");
563 dev_state = SATA_NODEVICE;
566 if ((i >= 100) && ((i % 100) == 0))
572 rc = ata_dev_read_id(ata_dev, &ata_dev->class,
573 ATA_READID_POSTRESET,ata_dev->id);
575 printf("sata_dwc : error. failed sata scan\n");
579 /* SATA drives indicate we have a bridge. We don't know which
580 * end of the link the bridge is which is a problem
582 if (ata_id_is_sata(ata_dev->id))
583 ap.cbl = ATA_CBL_SATA;
587 ata_dev->flags &= ~ATA_DFLAG_CFG_MASK;
588 ata_dev->max_sectors = 0;
589 ata_dev->cdb_len = 0;
590 ata_dev->n_sectors = 0;
591 ata_dev->cylinders = 0;
593 ata_dev->sectors = 0;
595 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
596 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
600 /* If word 64 isn't valid then Word 51 high byte holds
601 * the PIO timing number for the maximum. Turn it into
604 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
606 pio_mask = (2 << mode) - 1;
612 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
614 if (ata_id_is_cfa(id)) {
615 int pio = id[163] & 0x7;
616 int dma = (id[163] >> 3) & 7;
619 pio_mask |= (1 << 5);
621 pio_mask |= (1 << 6);
623 mwdma_mask |= (1 << 3);
625 mwdma_mask |= (1 << 4);
629 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
630 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
632 xfer_mask = ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
633 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
634 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
636 if (ata_dev->class == ATA_DEV_ATA) {
637 if (ata_id_is_cfa(id)) {
639 printf("supports DRM functions and may "
640 "not be fully accessable.\n");
641 sprintf(revbuf, "%s", "CFA");
643 if (ata_id_has_tpm(id))
644 printf("supports DRM functions and may "
645 "not be fully accessable.\n");
648 ata_dev->n_sectors = ata_id_n_sectors((u16*)id);
650 if (ata_dev->id[59] & 0x100)
651 ata_dev->multi_count = ata_dev->id[59] & 0xff;
653 if (ata_id_has_lba(id)) {
654 const char *lba_desc;
658 ata_dev->flags |= ATA_DFLAG_LBA;
659 if (ata_id_has_lba48(id)) {
660 ata_dev->flags |= ATA_DFLAG_LBA48;
663 if (ata_dev->n_sectors >= (1UL << 28) &&
664 ata_id_has_flush_ext(id))
665 ata_dev->flags |= ATA_DFLAG_FLUSH_EXT;
667 if (!ata_id_has_ncq(ata_dev->id))
670 if (ata_dev->horkage & ATA_HORKAGE_NONCQ)
671 sprintf(ncq_desc, "%s", "NCQ (not used)");
673 if (ap.flags & ATA_FLAG_NCQ)
674 ata_dev->flags |= ATA_DFLAG_NCQ;
676 ata_dev->cdb_len = 16;
678 ata_dev->max_sectors = ATA_MAX_SECTORS;
679 if (ata_dev->flags & ATA_DFLAG_LBA48)
680 ata_dev->max_sectors = ATA_MAX_SECTORS_LBA48;
682 if (!(ata_dev->horkage & ATA_HORKAGE_IPM)) {
683 if (ata_id_has_hipm(ata_dev->id))
684 ata_dev->flags |= ATA_DFLAG_HIPM;
685 if (ata_id_has_dipm(ata_dev->id))
686 ata_dev->flags |= ATA_DFLAG_DIPM;
689 if ((ap.cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ata_dev->id))) {
690 ata_dev->udma_mask &= ATA_UDMA5;
691 ata_dev->max_sectors = ATA_MAX_SECTORS;
694 if (ata_dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
695 printf("Drive reports diagnostics failure."
696 "This may indicate a drive\n");
697 printf("fault or invalid emulation."
698 "Contact drive vendor for information.\n");
701 rc = check_sata_dev_state();
703 ata_id_c_string(ata_dev->id,
704 (unsigned char *)sata_dev_desc[dev].revision,
705 ATA_ID_FW_REV, sizeof(sata_dev_desc[dev].revision));
706 ata_id_c_string(ata_dev->id,
707 (unsigned char *)sata_dev_desc[dev].vendor,
708 ATA_ID_PROD, sizeof(sata_dev_desc[dev].vendor));
709 ata_id_c_string(ata_dev->id,
710 (unsigned char *)sata_dev_desc[dev].product,
711 ATA_ID_SERNO, sizeof(sata_dev_desc[dev].product));
713 sata_dev_desc[dev].lba = (u32) ata_dev->n_sectors;
716 if (ata_dev->id[83] & (1 << 10)) {
717 sata_dev_desc[dev].lba48 = 1;
719 sata_dev_desc[dev].lba48 = 0;
726 static u8 ata_busy_wait(struct ata_port *ap,
727 unsigned int bits,unsigned int max)
733 status = ata_check_status(ap);
735 } while (status != 0xff && (status & bits) && (max > 0));
740 static int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
741 unsigned int flags, u16 *id)
743 struct ata_port *ap = pap;
744 unsigned int class = *p_class;
745 struct ata_taskfile tf;
746 unsigned int err_mask = 0;
748 int may_fallback = 1, tried_spinup = 0;
752 status = ata_busy_wait(ap, ATA_BUSY, 30000);
753 if (status & ATA_BUSY) {
754 printf("BSY = 0 check. timeout.\n");
759 ata_dev_select(ap, dev->devno, 1, 1);
762 memset(&tf, 0, sizeof(tf));
764 ap->flags &= ~ATA_FLAG_DISABLED;
766 tf.device = ATA_DEVICE_OBS;
767 tf.command = ATA_CMD_ID_ATA;
768 tf.protocol = ATA_PROT_PIO;
770 /* Some devices choke if TF registers contain garbage. Make
771 * sure those are properly initialized.
773 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
775 /* Device presence detection is unreliable on some
776 * controllers. Always poll IDENTIFY if available.
778 tf.flags |= ATA_TFLAG_POLLING;
782 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
783 sizeof(id[0]) * ATA_ID_WORDS, 0);
786 if (err_mask & AC_ERR_NODEV_HINT) {
787 printf("NODEV after polling detection\n");
791 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
792 /* Device or controller might have reported
793 * the wrong device class. Give a shot at the
794 * other IDENTIFY if the current one is
795 * aborted by the device.
800 if (class == ATA_DEV_ATA) {
801 class = ATA_DEV_ATAPI;
807 /* Control reaches here iff the device aborted
808 * both flavors of IDENTIFYs which happens
809 * sometimes with phantom devices.
811 printf("both IDENTIFYs aborted, assuming NODEV\n");
815 reason = "I/O error";
819 /* Falling back doesn't make sense if ID data was read
820 * successfully at least once.
826 for (id_cnt = 0; id_cnt < ATA_ID_WORDS; id_cnt++)
827 id[id_cnt] = le16_to_cpu(id[id_cnt]);
831 reason = "device reports invalid type";
833 if (class == ATA_DEV_ATA) {
834 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
837 if (ata_id_is_ata(id))
840 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
843 * Drive powered-up in standby mode, and requires a specific
844 * SET_FEATURES spin-up subcommand before it will accept
845 * anything other than the original IDENTIFY command.
847 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
848 if (err_mask && id[2] != 0x738c) {
850 reason = "SPINUP failed";
854 * If the drive initially returned incomplete IDENTIFY info,
855 * we now must reissue the IDENTIFY command.
861 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
863 * The exact sequence expected by certain pre-ATA4 drives is:
865 * IDENTIFY (optional in early ATA)
866 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
868 * Some drives were very specific about that exact sequence.
870 * Note that ATA4 says lba is mandatory so the second check
871 * shoud never trigger.
873 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
874 err_mask = ata_dev_init_params(dev, id[3], id[6]);
877 reason = "INIT_DEV_PARAMS failed";
881 /* current CHS translation info (id[53-58]) might be
882 * changed. reread the identify device info.
884 flags &= ~ATA_READID_POSTRESET;
896 static u8 ata_wait_idle(struct ata_port *ap)
898 u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
902 static void ata_dev_select(struct ata_port *ap, unsigned int device,
903 unsigned int wait, unsigned int can_sleep)
908 ata_std_dev_select(ap, device);
914 static void ata_std_dev_select(struct ata_port *ap, unsigned int device)
919 tmp = ATA_DEVICE_OBS;
921 tmp = ATA_DEVICE_OBS | ATA_DEV1;
924 writeb(tmp, ap->ioaddr.device_addr);
926 readb(ap->ioaddr.altstatus_addr);
931 static int waiting_for_reg_state(volatile u8 *offset,
938 for (i = 0; i < timeout_msec; i++) {
939 status = readl(offset);
940 if ((status & sign) != 0)
945 return (i < timeout_msec) ? 0 : -1;
948 static void ata_qc_reinit(struct ata_queued_cmd *qc)
950 qc->dma_dir = DMA_NONE;
952 qc->nbytes = qc->extrabytes = qc->curbytes = 0;
955 qc->sect_size = ATA_SECT_SIZE;
956 qc->nbytes = ATA_SECT_SIZE * temp_n_block;
958 memset(&qc->tf, 0, sizeof(qc->tf));
960 qc->tf.device = ATA_DEVICE_OBS;
962 qc->result_tf.command = ATA_DRDY;
963 qc->result_tf.feature = 0;
966 struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
969 if (tag < ATA_MAX_QUEUE)
970 return &ap->qcmd[tag];
974 static void __ata_port_freeze(struct ata_port *ap)
976 printf("set port freeze.\n");
977 ap->pflags |= ATA_PFLAG_FROZEN;
980 static int ata_port_freeze(struct ata_port *ap)
982 __ata_port_freeze(ap);
986 unsigned ata_exec_internal(struct ata_device *dev,
987 struct ata_taskfile *tf, const u8 *cdb,
988 int dma_dir, unsigned int buflen,
989 unsigned long timeout)
991 struct ata_link *link = dev->link;
992 struct ata_port *ap = pap;
993 struct ata_queued_cmd *qc;
994 unsigned int tag, preempted_tag;
995 u32 preempted_sactive, preempted_qc_active;
996 int preempted_nr_active_links;
997 unsigned int err_mask;
1001 status = ata_busy_wait(ap, ATA_BUSY, 300000);
1002 if (status & ATA_BUSY) {
1003 printf("BSY = 0 check. timeout.\n");
1008 if (ap->pflags & ATA_PFLAG_FROZEN)
1009 return AC_ERR_SYSTEM;
1011 tag = ATA_TAG_INTERNAL;
1013 if (test_and_set_bit(tag, &ap->qc_allocated)) {
1018 qc = __ata_qc_from_tag(ap, tag);
1025 preempted_tag = link->active_tag;
1026 preempted_sactive = link->sactive;
1027 preempted_qc_active = ap->qc_active;
1028 preempted_nr_active_links = ap->nr_active_links;
1029 link->active_tag = ATA_TAG_POISON;
1032 ap->nr_active_links = 0;
1036 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1037 qc->flags |= ATA_QCFLAG_RESULT_TF;
1038 qc->dma_dir = dma_dir;
1039 qc->private_data = 0;
1044 timeout = ata_probe_timeout * 1000 / HZ;
1046 status = ata_busy_wait(ap, ATA_BUSY, 30000);
1047 if (status & ATA_BUSY) {
1048 printf("BSY = 0 check. timeout.\n");
1049 printf("altstatus = 0x%x.\n", status);
1050 qc->err_mask |= AC_ERR_OTHER;
1051 return qc->err_mask;
1054 if (waiting_for_reg_state(ap->ioaddr.altstatus_addr, 1000, 0x8)) {
1058 status = readb(ap->ioaddr.altstatus_addr);
1059 if ((status & 0x01) != 0) {
1060 errorStatus = readb(ap->ioaddr.feature_addr);
1061 if (errorStatus == 0x04 &&
1062 qc->tf.command == ATA_CMD_PIO_READ_EXT){
1063 printf("Hard Disk doesn't support LBA48\n");
1064 dev_state = SATA_ERROR;
1065 qc->err_mask |= AC_ERR_OTHER;
1066 return qc->err_mask;
1069 qc->err_mask |= AC_ERR_OTHER;
1070 return qc->err_mask;
1073 status = ata_busy_wait(ap, ATA_BUSY, 10);
1074 if (status & ATA_BUSY) {
1075 printf("BSY = 0 check. timeout.\n");
1076 qc->err_mask |= AC_ERR_OTHER;
1077 return qc->err_mask;
1083 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1084 qc->err_mask |= AC_ERR_TIMEOUT;
1085 ata_port_freeze(ap);
1089 if (qc->flags & ATA_QCFLAG_FAILED) {
1090 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1091 qc->err_mask |= AC_ERR_DEV;
1094 qc->err_mask |= AC_ERR_OTHER;
1096 if (qc->err_mask & ~AC_ERR_OTHER)
1097 qc->err_mask &= ~AC_ERR_OTHER;
1100 *tf = qc->result_tf;
1101 err_mask = qc->err_mask;
1103 link->active_tag = preempted_tag;
1104 link->sactive = preempted_sactive;
1105 ap->qc_active = preempted_qc_active;
1106 ap->nr_active_links = preempted_nr_active_links;
1108 if (ap->flags & ATA_FLAG_DISABLED) {
1109 err_mask |= AC_ERR_SYSTEM;
1110 ap->flags &= ~ATA_FLAG_DISABLED;
1116 static void ata_qc_issue(struct ata_queued_cmd *qc)
1118 struct ata_port *ap = qc->ap;
1119 struct ata_link *link = qc->dev->link;
1120 u8 prot = qc->tf.protocol;
1122 if (ata_is_ncq(prot)) {
1124 ap->nr_active_links++;
1125 link->sactive |= 1 << qc->tag;
1127 ap->nr_active_links++;
1128 link->active_tag = qc->tag;
1131 qc->flags |= ATA_QCFLAG_ACTIVE;
1132 ap->qc_active |= 1 << qc->tag;
1134 if (qc->dev->flags & ATA_DFLAG_SLEEPING) {
1139 qc->err_mask |= ata_qc_issue_prot(qc);
1145 ata_qc_complete(qc);
1148 static unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1150 struct ata_port *ap = qc->ap;
1152 if (ap->flags & ATA_FLAG_PIO_POLLING) {
1153 switch (qc->tf.protocol) {
1155 case ATA_PROT_NODATA:
1156 case ATAPI_PROT_PIO:
1157 case ATAPI_PROT_NODATA:
1158 qc->tf.flags |= ATA_TFLAG_POLLING;
1165 ata_dev_select(ap, qc->dev->devno, 1, 0);
1167 switch (qc->tf.protocol) {
1169 if (qc->tf.flags & ATA_TFLAG_POLLING)
1170 qc->tf.ctl |= ATA_NIEN;
1172 ata_tf_to_host(ap, &qc->tf);
1174 ap->hsm_task_state = HSM_ST;
1176 if (qc->tf.flags & ATA_TFLAG_POLLING)
1177 ata_pio_queue_task(ap, qc, 0);
1182 return AC_ERR_SYSTEM;
1188 static void ata_tf_to_host(struct ata_port *ap,
1189 const struct ata_taskfile *tf)
1191 ata_tf_load(ap, tf);
1192 ata_exec_command(ap, tf);
1195 static void ata_tf_load(struct ata_port *ap,
1196 const struct ata_taskfile *tf)
1198 struct ata_ioports *ioaddr = &ap->ioaddr;
1199 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
1201 if (tf->ctl != ap->last_ctl) {
1202 if (ioaddr->ctl_addr)
1203 writeb(tf->ctl, ioaddr->ctl_addr);
1204 ap->last_ctl = tf->ctl;
1208 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
1209 writeb(tf->hob_feature, ioaddr->feature_addr);
1210 writeb(tf->hob_nsect, ioaddr->nsect_addr);
1211 writeb(tf->hob_lbal, ioaddr->lbal_addr);
1212 writeb(tf->hob_lbam, ioaddr->lbam_addr);
1213 writeb(tf->hob_lbah, ioaddr->lbah_addr);
1217 writeb(tf->feature, ioaddr->feature_addr);
1218 writeb(tf->nsect, ioaddr->nsect_addr);
1219 writeb(tf->lbal, ioaddr->lbal_addr);
1220 writeb(tf->lbam, ioaddr->lbam_addr);
1221 writeb(tf->lbah, ioaddr->lbah_addr);
1224 if (tf->flags & ATA_TFLAG_DEVICE)
1225 writeb(tf->device, ioaddr->device_addr);
1230 static void ata_exec_command(struct ata_port *ap,
1231 const struct ata_taskfile *tf)
1233 writeb(tf->command, ap->ioaddr.command_addr);
1235 readb(ap->ioaddr.altstatus_addr);
1240 static void ata_pio_queue_task(struct ata_port *ap,
1241 void *data,unsigned long delay)
1243 ap->port_task_data = data;
1246 static unsigned int ac_err_mask(u8 status)
1248 if (status & (ATA_BUSY | ATA_DRQ))
1250 if (status & (ATA_ERR | ATA_DF))
1255 static unsigned int __ac_err_mask(u8 status)
1257 unsigned int mask = ac_err_mask(status);
1259 return AC_ERR_OTHER;
1263 static void ata_pio_task(struct ata_port *arg_ap)
1265 struct ata_port *ap = arg_ap;
1266 struct ata_queued_cmd *qc = ap->port_task_data;
1272 * This is purely heuristic. This is a fast path.
1273 * Sometimes when we enter, BSY will be cleared in
1274 * a chk-status or two. If not, the drive is probably seeking
1275 * or something. Snooze for a couple msecs, then
1276 * chk-status again. If still busy, queue delayed work.
1278 status = ata_busy_wait(ap, ATA_BUSY, 5);
1279 if (status & ATA_BUSY) {
1281 status = ata_busy_wait(ap, ATA_BUSY, 10);
1282 if (status & ATA_BUSY) {
1283 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
1288 poll_next = ata_hsm_move(ap, qc, status, 1);
1290 /* another command or interrupt handler
1291 * may be running at this point.
1297 static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1298 u8 status, int in_wq)
1303 switch (ap->hsm_task_state) {
1305 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1307 if ((status & ATA_DRQ) == 0) {
1308 if (status & (ATA_ERR | ATA_DF)) {
1309 qc->err_mask |= AC_ERR_DEV;
1311 qc->err_mask |= AC_ERR_HSM;
1313 ap->hsm_task_state = HSM_ST_ERR;
1317 /* Device should not ask for data transfer (DRQ=1)
1318 * when it finds something wrong.
1319 * We ignore DRQ here and stop the HSM by
1320 * changing hsm_task_state to HSM_ST_ERR and
1321 * let the EH abort the command or reset the device.
1323 if (status & (ATA_ERR | ATA_DF)) {
1324 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1325 printf("DRQ=1 with device error, "
1326 "dev_stat 0x%X\n", status);
1327 qc->err_mask |= AC_ERR_HSM;
1328 ap->hsm_task_state = HSM_ST_ERR;
1333 if (qc->tf.protocol == ATA_PROT_PIO) {
1334 /* PIO data out protocol.
1335 * send first data block.
1337 /* ata_pio_sectors() might change the state
1338 * to HSM_ST_LAST. so, the state is changed here
1339 * before ata_pio_sectors().
1341 ap->hsm_task_state = HSM_ST;
1342 ata_pio_sectors(qc);
1344 printf("protocol is not ATA_PROT_PIO \n");
1349 if ((status & ATA_DRQ) == 0) {
1350 if (status & (ATA_ERR | ATA_DF)) {
1351 qc->err_mask |= AC_ERR_DEV;
1353 /* HSM violation. Let EH handle this.
1354 * Phantom devices also trigger this
1355 * condition. Mark hint.
1357 qc->err_mask |= AC_ERR_HSM | AC_ERR_NODEV_HINT;
1360 ap->hsm_task_state = HSM_ST_ERR;
1363 /* For PIO reads, some devices may ask for
1364 * data transfer (DRQ=1) alone with ERR=1.
1365 * We respect DRQ here and transfer one
1366 * block of junk data before changing the
1367 * hsm_task_state to HSM_ST_ERR.
1369 * For PIO writes, ERR=1 DRQ=1 doesn't make
1370 * sense since the data block has been
1371 * transferred to the device.
1373 if (status & (ATA_ERR | ATA_DF)) {
1374 qc->err_mask |= AC_ERR_DEV;
1376 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1377 ata_pio_sectors(qc);
1378 status = ata_wait_idle(ap);
1381 if (status & (ATA_BUSY | ATA_DRQ))
1382 qc->err_mask |= AC_ERR_HSM;
1384 /* ata_pio_sectors() might change the
1385 * state to HSM_ST_LAST. so, the state
1386 * is changed after ata_pio_sectors().
1388 ap->hsm_task_state = HSM_ST_ERR;
1392 ata_pio_sectors(qc);
1393 if (ap->hsm_task_state == HSM_ST_LAST &&
1394 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1395 status = ata_wait_idle(ap);
1403 if (!ata_ok(status)) {
1404 qc->err_mask |= __ac_err_mask(status);
1405 ap->hsm_task_state = HSM_ST_ERR;
1409 ap->hsm_task_state = HSM_ST_IDLE;
1411 ata_hsm_qc_complete(qc, in_wq);
1417 /* make sure qc->err_mask is available to
1418 * know what's wrong and recover
1420 ap->hsm_task_state = HSM_ST_IDLE;
1422 ata_hsm_qc_complete(qc, in_wq);
1433 static void ata_pio_sectors(struct ata_queued_cmd *qc)
1435 struct ata_port *ap;
1437 qc->pdata = ap->pdata;
1441 readb(qc->ap->ioaddr.altstatus_addr);
1445 static void ata_pio_sector(struct ata_queued_cmd *qc)
1447 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1448 struct ata_port *ap = qc->ap;
1449 unsigned int offset;
1451 char temp_data_buf[512];
1453 if (qc->curbytes == qc->nbytes - qc->sect_size)
1454 ap->hsm_task_state = HSM_ST_LAST;
1456 offset = qc->curbytes;
1458 switch (qc->tf.command) {
1459 case ATA_CMD_ID_ATA:
1460 buf = (unsigned char *)&ata_device.id[0];
1462 case ATA_CMD_PIO_READ_EXT:
1463 case ATA_CMD_PIO_READ:
1464 case ATA_CMD_PIO_WRITE_EXT:
1465 case ATA_CMD_PIO_WRITE:
1466 buf = qc->pdata + offset;
1469 buf = (unsigned char *)&temp_data_buf[0];
1472 ata_mmio_data_xfer(qc->dev, buf, qc->sect_size, do_write);
1474 qc->curbytes += qc->sect_size;
1478 static void ata_mmio_data_xfer(struct ata_device *dev, unsigned char *buf,
1479 unsigned int buflen, int do_write)
1481 struct ata_port *ap = pap;
1482 void __iomem *data_addr = ap->ioaddr.data_addr;
1483 unsigned int words = buflen >> 1;
1484 u16 *buf16 = (u16 *)buf;
1489 for (i = 0; i < words; i++)
1490 writew(le16_to_cpu(buf16[i]), data_addr);
1492 for (i = 0; i < words; i++)
1493 buf16[i] = cpu_to_le16(readw(data_addr));
1496 if (buflen & 0x01) {
1497 __le16 align_buf[1] = { 0 };
1498 unsigned char *trailing_buf = buf + buflen - 1;
1501 memcpy(align_buf, trailing_buf, 1);
1502 writew(le16_to_cpu(align_buf[0]), data_addr);
1504 align_buf[0] = cpu_to_le16(readw(data_addr));
1505 memcpy(trailing_buf, align_buf, 1);
1510 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1512 struct ata_port *ap = qc->ap;
1515 /* EH might have kicked in while host lock is
1518 qc = &ap->qcmd[qc->tag];
1520 if (!(qc->err_mask & AC_ERR_HSM)) {
1522 ata_qc_complete(qc);
1524 ata_port_freeze(ap);
1528 if (!(qc->err_mask & AC_ERR_HSM)) {
1529 ata_qc_complete(qc);
1531 ata_port_freeze(ap);
1536 static u8 ata_irq_on(struct ata_port *ap)
1538 struct ata_ioports *ioaddr = &ap->ioaddr;
1541 ap->ctl &= ~ATA_NIEN;
1542 ap->last_ctl = ap->ctl;
1544 if (ioaddr->ctl_addr)
1545 writeb(ap->ctl, ioaddr->ctl_addr);
1547 tmp = ata_wait_idle(ap);
1552 static unsigned int ata_tag_internal(unsigned int tag)
1554 return tag == ATA_MAX_QUEUE - 1;
1557 static void ata_qc_complete(struct ata_queued_cmd *qc)
1559 struct ata_device *dev = qc->dev;
1561 qc->flags |= ATA_QCFLAG_FAILED;
1563 if (qc->flags & ATA_QCFLAG_FAILED) {
1564 if (!ata_tag_internal(qc->tag)) {
1569 if (qc->flags & ATA_QCFLAG_RESULT_TF)
1572 /* Some commands need post-processing after successful
1575 switch (qc->tf.command) {
1576 case ATA_CMD_SET_FEATURES:
1577 if (qc->tf.feature != SETFEATURES_WC_ON &&
1578 qc->tf.feature != SETFEATURES_WC_OFF)
1580 case ATA_CMD_INIT_DEV_PARAMS:
1581 case ATA_CMD_SET_MULTI:
1585 dev->flags |= ATA_DFLAG_SLEEPING;
1589 __ata_qc_complete(qc);
1592 static void fill_result_tf(struct ata_queued_cmd *qc)
1594 struct ata_port *ap = qc->ap;
1596 qc->result_tf.flags = qc->tf.flags;
1597 ata_tf_read(ap, &qc->result_tf);
1600 static void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
1602 struct ata_ioports *ioaddr = &ap->ioaddr;
1604 tf->command = ata_check_status(ap);
1605 tf->feature = readb(ioaddr->error_addr);
1606 tf->nsect = readb(ioaddr->nsect_addr);
1607 tf->lbal = readb(ioaddr->lbal_addr);
1608 tf->lbam = readb(ioaddr->lbam_addr);
1609 tf->lbah = readb(ioaddr->lbah_addr);
1610 tf->device = readb(ioaddr->device_addr);
1612 if (tf->flags & ATA_TFLAG_LBA48) {
1613 if (ioaddr->ctl_addr) {
1614 writeb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
1616 tf->hob_feature = readb(ioaddr->error_addr);
1617 tf->hob_nsect = readb(ioaddr->nsect_addr);
1618 tf->hob_lbal = readb(ioaddr->lbal_addr);
1619 tf->hob_lbam = readb(ioaddr->lbam_addr);
1620 tf->hob_lbah = readb(ioaddr->lbah_addr);
1622 writeb(tf->ctl, ioaddr->ctl_addr);
1623 ap->last_ctl = tf->ctl;
1625 printf("sata_dwc warnning register read.\n");
1630 static void __ata_qc_complete(struct ata_queued_cmd *qc)
1632 struct ata_port *ap = qc->ap;
1633 struct ata_link *link = qc->dev->link;
1635 link->active_tag = ATA_TAG_POISON;
1636 ap->nr_active_links--;
1638 if (qc->flags & ATA_QCFLAG_CLEAR_EXCL && ap->excl_link == link)
1639 ap->excl_link = NULL;
1641 qc->flags &= ~ATA_QCFLAG_ACTIVE;
1642 ap->qc_active &= ~(1 << qc->tag);
1645 static void ata_qc_free(struct ata_queued_cmd *qc)
1647 struct ata_port *ap = qc->ap;
1651 if (tag < ATA_MAX_QUEUE) {
1652 qc->tag = ATA_TAG_POISON;
1653 clear_bit(tag, &ap->qc_allocated);
1657 static int check_sata_dev_state(void)
1659 unsigned long datalen;
1660 unsigned char *pdata;
1663 char temp_data_buf[512];
1668 pdata = (unsigned char*)&temp_data_buf[0];
1671 ret = ata_dev_read_sectors(pdata, datalen, 0, 1);
1677 if (i > (ATA_RESET_TIME * 100)) {
1678 printf("** TimeOUT **\n");
1679 dev_state = SATA_NODEVICE;
1683 if ((i >= 100) && ((i % 100) == 0))
1687 dev_state = SATA_READY;
1692 static unsigned int ata_dev_set_feature(struct ata_device *dev,
1693 u8 enable, u8 feature)
1695 struct ata_taskfile tf;
1696 struct ata_port *ap;
1698 unsigned int err_mask;
1700 memset(&tf, 0, sizeof(tf));
1703 tf.device = ATA_DEVICE_OBS;
1704 tf.command = ATA_CMD_SET_FEATURES;
1705 tf.feature = enable;
1706 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1707 tf.protocol = ATA_PROT_NODATA;
1710 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, 0, 0);
1715 static unsigned int ata_dev_init_params(struct ata_device *dev,
1716 u16 heads, u16 sectors)
1718 struct ata_taskfile tf;
1719 struct ata_port *ap;
1721 unsigned int err_mask;
1723 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
1724 return AC_ERR_INVALID;
1726 memset(&tf, 0, sizeof(tf));
1728 tf.device = ATA_DEVICE_OBS;
1729 tf.command = ATA_CMD_INIT_DEV_PARAMS;
1730 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1731 tf.protocol = ATA_PROT_NODATA;
1733 tf.device |= (heads - 1) & 0x0f;
1735 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, 0, 0);
1737 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1743 #if defined(CONFIG_SATA_DWC) && !defined(CONFIG_LBA48)
1744 #define SATA_MAX_READ_BLK 0xFF
1746 #define SATA_MAX_READ_BLK 0xFFFF
1749 ulong sata_read(int device, ulong blknr, lbaint_t blkcnt, void *buffer)
1751 ulong start,blks, buf_addr;
1752 unsigned short smallblks;
1753 unsigned long datalen;
1754 unsigned char *pdata;
1760 if (dev_state != SATA_READY)
1763 buf_addr = (unsigned long)buffer;
1767 pdata = (unsigned char *)buf_addr;
1768 if (blks > SATA_MAX_READ_BLK) {
1769 datalen = sata_dev_desc[device].blksz * SATA_MAX_READ_BLK;
1770 smallblks = SATA_MAX_READ_BLK;
1773 n_block = (u32)smallblks;
1775 start += SATA_MAX_READ_BLK;
1776 blks -= SATA_MAX_READ_BLK;
1778 datalen = sata_dev_desc[device].blksz * SATA_MAX_READ_BLK;
1779 datalen = sata_dev_desc[device].blksz * blks;
1780 smallblks = (unsigned short)blks;
1783 n_block = (u32)smallblks;
1789 if (ata_dev_read_sectors(pdata, datalen, block, n_block) != TRUE) {
1790 printf("sata_dwc : Hard disk read error.\n");
1794 buf_addr += datalen;
1795 } while (blks != 0);
1800 static int ata_dev_read_sectors(unsigned char *pdata, unsigned long datalen,
1801 u32 block, u32 n_block)
1803 struct ata_port *ap = pap;
1804 struct ata_device *dev = &ata_device;
1805 struct ata_taskfile tf;
1806 unsigned int class = ATA_DEV_ATA;
1807 unsigned int err_mask = 0;
1809 int may_fallback = 1;
1812 if (dev_state == SATA_ERROR)
1815 ata_dev_select(ap, dev->devno, 1, 1);
1818 memset(&tf, 0, sizeof(tf));
1821 ap->flags &= ~ATA_FLAG_DISABLED;
1825 tf.device = ATA_DEVICE_OBS;
1827 temp_n_block = n_block;
1830 tf.command = ATA_CMD_PIO_READ_EXT;
1831 tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1833 tf.hob_feature = 31;
1835 tf.hob_nsect = (n_block >> 8) & 0xff;
1836 tf.nsect = n_block & 0xff;
1840 tf.hob_lbal = (block >> 24) & 0xff;
1841 tf.lbah = (block >> 16) & 0xff;
1842 tf.lbam = (block >> 8) & 0xff;
1843 tf.lbal = block & 0xff;
1846 if (tf.flags & ATA_TFLAG_FUA)
1847 tf.device |= 1 << 7;
1849 tf.command = ATA_CMD_PIO_READ;
1850 tf.flags |= ATA_TFLAG_LBA ;
1853 tf.nsect = n_block & 0xff;
1855 tf.lbah = (block >> 16) & 0xff;
1856 tf.lbam = (block >> 8) & 0xff;
1857 tf.lbal = block & 0xff;
1859 tf.device = (block >> 24) & 0xf;
1861 tf.device |= 1 << 6;
1862 if (tf.flags & ATA_TFLAG_FUA)
1863 tf.device |= 1 << 7;
1867 tf.protocol = ATA_PROT_PIO;
1869 /* Some devices choke if TF registers contain garbage. Make
1870 * sure those are properly initialized.
1872 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1873 tf.flags |= ATA_TFLAG_POLLING;
1875 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 0, 0);
1878 if (err_mask & AC_ERR_NODEV_HINT) {
1879 printf("READ_SECTORS NODEV after polling detection\n");
1883 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1884 /* Device or controller might have reported
1885 * the wrong device class. Give a shot at the
1886 * other IDENTIFY if the current one is
1887 * aborted by the device.
1892 if (class == ATA_DEV_ATA) {
1893 class = ATA_DEV_ATAPI;
1895 class = ATA_DEV_ATA;
1899 /* Control reaches here iff the device aborted
1900 * both flavors of IDENTIFYs which happens
1901 * sometimes with phantom devices.
1903 printf("both IDENTIFYs aborted, assuming NODEV\n");
1908 reason = "I/O error";
1912 /* Falling back doesn't make sense if ID data was read
1913 * successfully at least once.
1918 reason = "device reports invalid type";
1923 printf("failed to READ SECTORS (%s, err_mask=0x%x)\n", reason, err_mask);
1927 #if defined(CONFIG_SATA_DWC) && !defined(CONFIG_LBA48)
1928 #define SATA_MAX_WRITE_BLK 0xFF
1930 #define SATA_MAX_WRITE_BLK 0xFFFF
1933 ulong sata_write(int device, ulong blknr, lbaint_t blkcnt, void *buffer)
1935 ulong start,blks, buf_addr;
1936 unsigned short smallblks;
1937 unsigned long datalen;
1938 unsigned char *pdata;
1945 if (dev_state != SATA_READY)
1948 buf_addr = (unsigned long)buffer;
1952 pdata = (unsigned char *)buf_addr;
1953 if (blks > SATA_MAX_WRITE_BLK) {
1954 datalen = sata_dev_desc[device].blksz * SATA_MAX_WRITE_BLK;
1955 smallblks = SATA_MAX_WRITE_BLK;
1958 n_block = (u32)smallblks;
1960 start += SATA_MAX_WRITE_BLK;
1961 blks -= SATA_MAX_WRITE_BLK;
1963 datalen = sata_dev_desc[device].blksz * blks;
1964 smallblks = (unsigned short)blks;
1967 n_block = (u32)smallblks;
1973 if (ata_dev_write_sectors(pdata, datalen, block, n_block) != TRUE) {
1974 printf("sata_dwc : Hard disk read error.\n");
1978 buf_addr += datalen;
1979 } while (blks != 0);
1984 static int ata_dev_write_sectors(unsigned char* pdata, unsigned long datalen,
1985 u32 block, u32 n_block)
1987 struct ata_port *ap = pap;
1988 struct ata_device *dev = &ata_device;
1989 struct ata_taskfile tf;
1990 unsigned int class = ATA_DEV_ATA;
1991 unsigned int err_mask = 0;
1993 int may_fallback = 1;
1996 if (dev_state == SATA_ERROR)
1999 ata_dev_select(ap, dev->devno, 1, 1);
2002 memset(&tf, 0, sizeof(tf));
2005 ap->flags &= ~ATA_FLAG_DISABLED;
2009 tf.device = ATA_DEVICE_OBS;
2011 temp_n_block = n_block;
2015 tf.command = ATA_CMD_PIO_WRITE_EXT;
2016 tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48 | ATA_TFLAG_WRITE;
2018 tf.hob_feature = 31;
2020 tf.hob_nsect = (n_block >> 8) & 0xff;
2021 tf.nsect = n_block & 0xff;
2025 tf.hob_lbal = (block >> 24) & 0xff;
2026 tf.lbah = (block >> 16) & 0xff;
2027 tf.lbam = (block >> 8) & 0xff;
2028 tf.lbal = block & 0xff;
2031 if (tf.flags & ATA_TFLAG_FUA)
2032 tf.device |= 1 << 7;
2034 tf.command = ATA_CMD_PIO_WRITE;
2035 tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_WRITE;
2038 tf.nsect = n_block & 0xff;
2040 tf.lbah = (block >> 16) & 0xff;
2041 tf.lbam = (block >> 8) & 0xff;
2042 tf.lbal = block & 0xff;
2044 tf.device = (block >> 24) & 0xf;
2046 tf.device |= 1 << 6;
2047 if (tf.flags & ATA_TFLAG_FUA)
2048 tf.device |= 1 << 7;
2052 tf.protocol = ATA_PROT_PIO;
2054 /* Some devices choke if TF registers contain garbage. Make
2055 * sure those are properly initialized.
2057 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2058 tf.flags |= ATA_TFLAG_POLLING;
2060 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 0, 0);
2063 if (err_mask & AC_ERR_NODEV_HINT) {
2064 printf("READ_SECTORS NODEV after polling detection\n");
2068 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2069 /* Device or controller might have reported
2070 * the wrong device class. Give a shot at the
2071 * other IDENTIFY if the current one is
2072 * aborted by the device.
2077 if (class == ATA_DEV_ATA) {
2078 class = ATA_DEV_ATAPI;
2080 class = ATA_DEV_ATA;
2084 /* Control reaches here iff the device aborted
2085 * both flavors of IDENTIFYs which happens
2086 * sometimes with phantom devices.
2088 printf("both IDENTIFYs aborted, assuming NODEV\n");
2093 reason = "I/O error";
2097 /* Falling back doesn't make sense if ID data was read
2098 * successfully at least once.
2103 reason = "device reports invalid type";
2108 printf("failed to WRITE SECTORS (%s, err_mask=0x%x)\n", reason, err_mask);