2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
62 /* debounce timing parameters in msecs { interval, duration, timeout } */
63 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
64 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
65 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
67 static unsigned int ata_dev_init_params(struct ata_device *dev,
68 u16 heads, u16 sectors);
69 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
70 static void ata_dev_xfermask(struct ata_device *dev);
72 static unsigned int ata_unique_id = 1;
73 static struct workqueue_struct *ata_wq;
75 struct workqueue_struct *ata_aux_wq;
77 int atapi_enabled = 1;
78 module_param(atapi_enabled, int, 0444);
79 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
82 module_param(atapi_dmadir, int, 0444);
83 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
86 module_param_named(fua, libata_fua, int, 0444);
87 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
89 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
90 module_param(ata_probe_timeout, int, 0444);
91 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
93 MODULE_AUTHOR("Jeff Garzik");
94 MODULE_DESCRIPTION("Library module for ATA devices");
95 MODULE_LICENSE("GPL");
96 MODULE_VERSION(DRV_VERSION);
100 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
101 * @tf: Taskfile to convert
102 * @fis: Buffer into which data will output
103 * @pmp: Port multiplier port
105 * Converts a standard ATA taskfile to a Serial ATA
106 * FIS structure (Register - Host to Device).
109 * Inherited from caller.
112 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
114 fis[0] = 0x27; /* Register - Host to Device FIS */
115 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
116 bit 7 indicates Command FIS */
117 fis[2] = tf->command;
118 fis[3] = tf->feature;
125 fis[8] = tf->hob_lbal;
126 fis[9] = tf->hob_lbam;
127 fis[10] = tf->hob_lbah;
128 fis[11] = tf->hob_feature;
131 fis[13] = tf->hob_nsect;
142 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
143 * @fis: Buffer from which data will be input
144 * @tf: Taskfile to output
146 * Converts a serial ATA FIS structure to a standard ATA taskfile.
149 * Inherited from caller.
152 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
154 tf->command = fis[2]; /* status */
155 tf->feature = fis[3]; /* error */
162 tf->hob_lbal = fis[8];
163 tf->hob_lbam = fis[9];
164 tf->hob_lbah = fis[10];
167 tf->hob_nsect = fis[13];
170 static const u8 ata_rw_cmds[] = {
174 ATA_CMD_READ_MULTI_EXT,
175 ATA_CMD_WRITE_MULTI_EXT,
179 ATA_CMD_WRITE_MULTI_FUA_EXT,
183 ATA_CMD_PIO_READ_EXT,
184 ATA_CMD_PIO_WRITE_EXT,
197 ATA_CMD_WRITE_FUA_EXT
201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
202 * @qc: command to examine and configure
204 * Examine the device configuration and tf->flags to calculate
205 * the proper read/write commands and protocol to use.
210 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
212 struct ata_taskfile *tf = &qc->tf;
213 struct ata_device *dev = qc->dev;
216 int index, fua, lba48, write;
218 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
219 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
220 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
222 if (dev->flags & ATA_DFLAG_PIO) {
223 tf->protocol = ATA_PROT_PIO;
224 index = dev->multi_count ? 0 : 8;
225 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
226 /* Unable to use DMA due to host limitation */
227 tf->protocol = ATA_PROT_PIO;
228 index = dev->multi_count ? 0 : 8;
230 tf->protocol = ATA_PROT_DMA;
234 cmd = ata_rw_cmds[index + fua + lba48 + write];
243 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
244 * @pio_mask: pio_mask
245 * @mwdma_mask: mwdma_mask
246 * @udma_mask: udma_mask
248 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
249 * unsigned int xfer_mask.
257 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
258 unsigned int mwdma_mask,
259 unsigned int udma_mask)
261 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
262 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
263 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
267 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
268 * @xfer_mask: xfer_mask to unpack
269 * @pio_mask: resulting pio_mask
270 * @mwdma_mask: resulting mwdma_mask
271 * @udma_mask: resulting udma_mask
273 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
274 * Any NULL distination masks will be ignored.
276 static void ata_unpack_xfermask(unsigned int xfer_mask,
277 unsigned int *pio_mask,
278 unsigned int *mwdma_mask,
279 unsigned int *udma_mask)
282 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
284 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
286 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
289 static const struct ata_xfer_ent {
293 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
294 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
295 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
300 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
301 * @xfer_mask: xfer_mask of interest
303 * Return matching XFER_* value for @xfer_mask. Only the highest
304 * bit of @xfer_mask is considered.
310 * Matching XFER_* value, 0 if no match found.
312 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
314 int highbit = fls(xfer_mask) - 1;
315 const struct ata_xfer_ent *ent;
317 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
318 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
319 return ent->base + highbit - ent->shift;
324 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
325 * @xfer_mode: XFER_* of interest
327 * Return matching xfer_mask for @xfer_mode.
333 * Matching xfer_mask, 0 if no match found.
335 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
337 const struct ata_xfer_ent *ent;
339 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
340 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
341 return 1 << (ent->shift + xfer_mode - ent->base);
346 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
347 * @xfer_mode: XFER_* of interest
349 * Return matching xfer_shift for @xfer_mode.
355 * Matching xfer_shift, -1 if no match found.
357 static int ata_xfer_mode2shift(unsigned int xfer_mode)
359 const struct ata_xfer_ent *ent;
361 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
362 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
368 * ata_mode_string - convert xfer_mask to string
369 * @xfer_mask: mask of bits supported; only highest bit counts.
371 * Determine string which represents the highest speed
372 * (highest bit in @modemask).
378 * Constant C string representing highest speed listed in
379 * @mode_mask, or the constant C string "<n/a>".
381 static const char *ata_mode_string(unsigned int xfer_mask)
383 static const char * const xfer_mode_str[] = {
407 highbit = fls(xfer_mask) - 1;
408 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
409 return xfer_mode_str[highbit];
413 static const char *sata_spd_string(unsigned int spd)
415 static const char * const spd_str[] = {
420 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
422 return spd_str[spd - 1];
425 void ata_dev_disable(struct ata_device *dev)
427 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
428 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
434 * ata_pio_devchk - PATA device presence detection
435 * @ap: ATA channel to examine
436 * @device: Device to examine (starting at zero)
438 * This technique was originally described in
439 * Hale Landis's ATADRVR (www.ata-atapi.com), and
440 * later found its way into the ATA/ATAPI spec.
442 * Write a pattern to the ATA shadow registers,
443 * and if a device is present, it will respond by
444 * correctly storing and echoing back the
445 * ATA shadow register contents.
451 static unsigned int ata_pio_devchk(struct ata_port *ap,
454 struct ata_ioports *ioaddr = &ap->ioaddr;
457 ap->ops->dev_select(ap, device);
459 outb(0x55, ioaddr->nsect_addr);
460 outb(0xaa, ioaddr->lbal_addr);
462 outb(0xaa, ioaddr->nsect_addr);
463 outb(0x55, ioaddr->lbal_addr);
465 outb(0x55, ioaddr->nsect_addr);
466 outb(0xaa, ioaddr->lbal_addr);
468 nsect = inb(ioaddr->nsect_addr);
469 lbal = inb(ioaddr->lbal_addr);
471 if ((nsect == 0x55) && (lbal == 0xaa))
472 return 1; /* we found a device */
474 return 0; /* nothing found */
478 * ata_mmio_devchk - PATA device presence detection
479 * @ap: ATA channel to examine
480 * @device: Device to examine (starting at zero)
482 * This technique was originally described in
483 * Hale Landis's ATADRVR (www.ata-atapi.com), and
484 * later found its way into the ATA/ATAPI spec.
486 * Write a pattern to the ATA shadow registers,
487 * and if a device is present, it will respond by
488 * correctly storing and echoing back the
489 * ATA shadow register contents.
495 static unsigned int ata_mmio_devchk(struct ata_port *ap,
498 struct ata_ioports *ioaddr = &ap->ioaddr;
501 ap->ops->dev_select(ap, device);
503 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
504 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
506 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
507 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
509 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
510 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
512 nsect = readb((void __iomem *) ioaddr->nsect_addr);
513 lbal = readb((void __iomem *) ioaddr->lbal_addr);
515 if ((nsect == 0x55) && (lbal == 0xaa))
516 return 1; /* we found a device */
518 return 0; /* nothing found */
522 * ata_devchk - PATA device presence detection
523 * @ap: ATA channel to examine
524 * @device: Device to examine (starting at zero)
526 * Dispatch ATA device presence detection, depending
527 * on whether we are using PIO or MMIO to talk to the
528 * ATA shadow registers.
534 static unsigned int ata_devchk(struct ata_port *ap,
537 if (ap->flags & ATA_FLAG_MMIO)
538 return ata_mmio_devchk(ap, device);
539 return ata_pio_devchk(ap, device);
543 * ata_dev_classify - determine device type based on ATA-spec signature
544 * @tf: ATA taskfile register set for device to be identified
546 * Determine from taskfile register contents whether a device is
547 * ATA or ATAPI, as per "Signature and persistence" section
548 * of ATA/PI spec (volume 1, sect 5.14).
554 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
555 * the event of failure.
558 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
560 /* Apple's open source Darwin code hints that some devices only
561 * put a proper signature into the LBA mid/high registers,
562 * So, we only check those. It's sufficient for uniqueness.
565 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
566 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
567 DPRINTK("found ATA device by sig\n");
571 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
572 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
573 DPRINTK("found ATAPI device by sig\n");
574 return ATA_DEV_ATAPI;
577 DPRINTK("unknown device\n");
578 return ATA_DEV_UNKNOWN;
582 * ata_dev_try_classify - Parse returned ATA device signature
583 * @ap: ATA channel to examine
584 * @device: Device to examine (starting at zero)
585 * @r_err: Value of error register on completion
587 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
588 * an ATA/ATAPI-defined set of values is placed in the ATA
589 * shadow registers, indicating the results of device detection
592 * Select the ATA device, and read the values from the ATA shadow
593 * registers. Then parse according to the Error register value,
594 * and the spec-defined values examined by ata_dev_classify().
600 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
604 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
606 struct ata_taskfile tf;
610 ap->ops->dev_select(ap, device);
612 memset(&tf, 0, sizeof(tf));
614 ap->ops->tf_read(ap, &tf);
619 /* see if device passed diags: if master then continue and warn later */
620 if (err == 0 && device == 0)
621 /* diagnostic fail : do nothing _YET_ */
622 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
625 else if ((device == 0) && (err == 0x81))
630 /* determine if device is ATA or ATAPI */
631 class = ata_dev_classify(&tf);
633 if (class == ATA_DEV_UNKNOWN)
635 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
641 * ata_id_string - Convert IDENTIFY DEVICE page into string
642 * @id: IDENTIFY DEVICE results we will examine
643 * @s: string into which data is output
644 * @ofs: offset into identify device page
645 * @len: length of string to return. must be an even number.
647 * The strings in the IDENTIFY DEVICE page are broken up into
648 * 16-bit chunks. Run through the string, and output each
649 * 8-bit chunk linearly, regardless of platform.
655 void ata_id_string(const u16 *id, unsigned char *s,
656 unsigned int ofs, unsigned int len)
675 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
676 * @id: IDENTIFY DEVICE results we will examine
677 * @s: string into which data is output
678 * @ofs: offset into identify device page
679 * @len: length of string to return. must be an odd number.
681 * This function is identical to ata_id_string except that it
682 * trims trailing spaces and terminates the resulting string with
683 * null. @len must be actual maximum length (even number) + 1.
688 void ata_id_c_string(const u16 *id, unsigned char *s,
689 unsigned int ofs, unsigned int len)
695 ata_id_string(id, s, ofs, len - 1);
697 p = s + strnlen(s, len - 1);
698 while (p > s && p[-1] == ' ')
703 static u64 ata_id_n_sectors(const u16 *id)
705 if (ata_id_has_lba(id)) {
706 if (ata_id_has_lba48(id))
707 return ata_id_u64(id, 100);
709 return ata_id_u32(id, 60);
711 if (ata_id_current_chs_valid(id))
712 return ata_id_u32(id, 57);
714 return id[1] * id[3] * id[6];
719 * ata_noop_dev_select - Select device 0/1 on ATA bus
720 * @ap: ATA channel to manipulate
721 * @device: ATA device (numbered from zero) to select
723 * This function performs no actual function.
725 * May be used as the dev_select() entry in ata_port_operations.
730 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
736 * ata_std_dev_select - Select device 0/1 on ATA bus
737 * @ap: ATA channel to manipulate
738 * @device: ATA device (numbered from zero) to select
740 * Use the method defined in the ATA specification to
741 * make either device 0, or device 1, active on the
742 * ATA channel. Works with both PIO and MMIO.
744 * May be used as the dev_select() entry in ata_port_operations.
750 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
755 tmp = ATA_DEVICE_OBS;
757 tmp = ATA_DEVICE_OBS | ATA_DEV1;
759 if (ap->flags & ATA_FLAG_MMIO) {
760 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
762 outb(tmp, ap->ioaddr.device_addr);
764 ata_pause(ap); /* needed; also flushes, for mmio */
768 * ata_dev_select - Select device 0/1 on ATA bus
769 * @ap: ATA channel to manipulate
770 * @device: ATA device (numbered from zero) to select
771 * @wait: non-zero to wait for Status register BSY bit to clear
772 * @can_sleep: non-zero if context allows sleeping
774 * Use the method defined in the ATA specification to
775 * make either device 0, or device 1, active on the
778 * This is a high-level version of ata_std_dev_select(),
779 * which additionally provides the services of inserting
780 * the proper pauses and status polling, where needed.
786 void ata_dev_select(struct ata_port *ap, unsigned int device,
787 unsigned int wait, unsigned int can_sleep)
789 if (ata_msg_probe(ap))
790 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
791 "device %u, wait %u\n", ap->id, device, wait);
796 ap->ops->dev_select(ap, device);
799 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
806 * ata_dump_id - IDENTIFY DEVICE info debugging output
807 * @id: IDENTIFY DEVICE page to dump
809 * Dump selected 16-bit words from the given IDENTIFY DEVICE
816 static inline void ata_dump_id(const u16 *id)
818 DPRINTK("49==0x%04x "
828 DPRINTK("80==0x%04x "
838 DPRINTK("88==0x%04x "
845 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
846 * @id: IDENTIFY data to compute xfer mask from
848 * Compute the xfermask for this device. This is not as trivial
849 * as it seems if we must consider early devices correctly.
851 * FIXME: pre IDE drive timing (do we care ?).
859 static unsigned int ata_id_xfermask(const u16 *id)
861 unsigned int pio_mask, mwdma_mask, udma_mask;
863 /* Usual case. Word 53 indicates word 64 is valid */
864 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
865 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
869 /* If word 64 isn't valid then Word 51 high byte holds
870 * the PIO timing number for the maximum. Turn it into
873 u8 mode = id[ATA_ID_OLD_PIO_MODES] & 0xFF;
874 if (mode < 5) /* Valid PIO range */
875 pio_mask = (2 << mode) - 1;
879 /* But wait.. there's more. Design your standards by
880 * committee and you too can get a free iordy field to
881 * process. However its the speeds not the modes that
882 * are supported... Note drivers using the timing API
883 * will get this right anyway
887 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
889 if (ata_id_is_cfa(id)) {
891 * Process compact flash extended modes
893 int pio = id[163] & 0x7;
894 int dma = (id[163] >> 3) & 7;
897 pio_mask |= (1 << 5);
899 pio_mask |= (1 << 6);
901 mwdma_mask |= (1 << 3);
903 mwdma_mask |= (1 << 4);
907 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
908 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
910 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
914 * ata_port_queue_task - Queue port_task
915 * @ap: The ata_port to queue port_task for
916 * @fn: workqueue function to be scheduled
917 * @data: data for @fn to use
918 * @delay: delay time for workqueue function
920 * Schedule @fn(@data) for execution after @delay jiffies using
921 * port_task. There is one port_task per port and it's the
922 * user(low level driver)'s responsibility to make sure that only
923 * one task is active at any given time.
925 * libata core layer takes care of synchronization between
926 * port_task and EH. ata_port_queue_task() may be ignored for EH
930 * Inherited from caller.
932 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
937 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
940 PREPARE_DELAYED_WORK(&ap->port_task, fn);
941 ap->port_task_data = data;
943 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
945 /* rc == 0 means that another user is using port task */
950 * ata_port_flush_task - Flush port_task
951 * @ap: The ata_port to flush port_task for
953 * After this function completes, port_task is guranteed not to
954 * be running or scheduled.
957 * Kernel thread context (may sleep)
959 void ata_port_flush_task(struct ata_port *ap)
965 spin_lock_irqsave(ap->lock, flags);
966 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
967 spin_unlock_irqrestore(ap->lock, flags);
969 DPRINTK("flush #1\n");
970 flush_workqueue(ata_wq);
973 * At this point, if a task is running, it's guaranteed to see
974 * the FLUSH flag; thus, it will never queue pio tasks again.
977 if (!cancel_delayed_work(&ap->port_task)) {
979 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
981 flush_workqueue(ata_wq);
984 spin_lock_irqsave(ap->lock, flags);
985 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
986 spin_unlock_irqrestore(ap->lock, flags);
989 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
992 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
994 struct completion *waiting = qc->private_data;
1000 * ata_exec_internal - execute libata internal command
1001 * @dev: Device to which the command is sent
1002 * @tf: Taskfile registers for the command and the result
1003 * @cdb: CDB for packet command
1004 * @dma_dir: Data tranfer direction of the command
1005 * @buf: Data buffer of the command
1006 * @buflen: Length of data buffer
1008 * Executes libata internal command with timeout. @tf contains
1009 * command on entry and result on return. Timeout and error
1010 * conditions are reported via return value. No recovery action
1011 * is taken after a command times out. It's caller's duty to
1012 * clean up after timeout.
1015 * None. Should be called with kernel context, might sleep.
1018 * Zero on success, AC_ERR_* mask on failure
1020 unsigned ata_exec_internal(struct ata_device *dev,
1021 struct ata_taskfile *tf, const u8 *cdb,
1022 int dma_dir, void *buf, unsigned int buflen)
1024 struct ata_port *ap = dev->ap;
1025 u8 command = tf->command;
1026 struct ata_queued_cmd *qc;
1027 unsigned int tag, preempted_tag;
1028 u32 preempted_sactive, preempted_qc_active;
1029 DECLARE_COMPLETION_ONSTACK(wait);
1030 unsigned long flags;
1031 unsigned int err_mask;
1034 spin_lock_irqsave(ap->lock, flags);
1036 /* no internal command while frozen */
1037 if (ap->pflags & ATA_PFLAG_FROZEN) {
1038 spin_unlock_irqrestore(ap->lock, flags);
1039 return AC_ERR_SYSTEM;
1042 /* initialize internal qc */
1044 /* XXX: Tag 0 is used for drivers with legacy EH as some
1045 * drivers choke if any other tag is given. This breaks
1046 * ata_tag_internal() test for those drivers. Don't use new
1047 * EH stuff without converting to it.
1049 if (ap->ops->error_handler)
1050 tag = ATA_TAG_INTERNAL;
1054 if (test_and_set_bit(tag, &ap->qc_allocated))
1056 qc = __ata_qc_from_tag(ap, tag);
1064 preempted_tag = ap->active_tag;
1065 preempted_sactive = ap->sactive;
1066 preempted_qc_active = ap->qc_active;
1067 ap->active_tag = ATA_TAG_POISON;
1071 /* prepare & issue qc */
1074 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1075 qc->flags |= ATA_QCFLAG_RESULT_TF;
1076 qc->dma_dir = dma_dir;
1077 if (dma_dir != DMA_NONE) {
1078 ata_sg_init_one(qc, buf, buflen);
1079 qc->nsect = buflen / ATA_SECT_SIZE;
1082 qc->private_data = &wait;
1083 qc->complete_fn = ata_qc_complete_internal;
1087 spin_unlock_irqrestore(ap->lock, flags);
1089 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1091 ata_port_flush_task(ap);
1094 spin_lock_irqsave(ap->lock, flags);
1096 /* We're racing with irq here. If we lose, the
1097 * following test prevents us from completing the qc
1098 * twice. If we win, the port is frozen and will be
1099 * cleaned up by ->post_internal_cmd().
1101 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1102 qc->err_mask |= AC_ERR_TIMEOUT;
1104 if (ap->ops->error_handler)
1105 ata_port_freeze(ap);
1107 ata_qc_complete(qc);
1109 if (ata_msg_warn(ap))
1110 ata_dev_printk(dev, KERN_WARNING,
1111 "qc timeout (cmd 0x%x)\n", command);
1114 spin_unlock_irqrestore(ap->lock, flags);
1117 /* do post_internal_cmd */
1118 if (ap->ops->post_internal_cmd)
1119 ap->ops->post_internal_cmd(qc);
1121 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1122 if (ata_msg_warn(ap))
1123 ata_dev_printk(dev, KERN_WARNING,
1124 "zero err_mask for failed "
1125 "internal command, assuming AC_ERR_OTHER\n");
1126 qc->err_mask |= AC_ERR_OTHER;
1130 spin_lock_irqsave(ap->lock, flags);
1132 *tf = qc->result_tf;
1133 err_mask = qc->err_mask;
1136 ap->active_tag = preempted_tag;
1137 ap->sactive = preempted_sactive;
1138 ap->qc_active = preempted_qc_active;
1140 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1141 * Until those drivers are fixed, we detect the condition
1142 * here, fail the command with AC_ERR_SYSTEM and reenable the
1145 * Note that this doesn't change any behavior as internal
1146 * command failure results in disabling the device in the
1147 * higher layer for LLDDs without new reset/EH callbacks.
1149 * Kill the following code as soon as those drivers are fixed.
1151 if (ap->flags & ATA_FLAG_DISABLED) {
1152 err_mask |= AC_ERR_SYSTEM;
1156 spin_unlock_irqrestore(ap->lock, flags);
1162 * ata_do_simple_cmd - execute simple internal command
1163 * @dev: Device to which the command is sent
1164 * @cmd: Opcode to execute
1166 * Execute a 'simple' command, that only consists of the opcode
1167 * 'cmd' itself, without filling any other registers
1170 * Kernel thread context (may sleep).
1173 * Zero on success, AC_ERR_* mask on failure
1175 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1177 struct ata_taskfile tf;
1179 ata_tf_init(dev, &tf);
1182 tf.flags |= ATA_TFLAG_DEVICE;
1183 tf.protocol = ATA_PROT_NODATA;
1185 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1189 * ata_pio_need_iordy - check if iordy needed
1192 * Check if the current speed of the device requires IORDY. Used
1193 * by various controllers for chip configuration.
1196 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1199 int speed = adev->pio_mode - XFER_PIO_0;
1206 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1208 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1209 pio = adev->id[ATA_ID_EIDE_PIO];
1210 /* Is the speed faster than the drive allows non IORDY ? */
1212 /* This is cycle times not frequency - watch the logic! */
1213 if (pio > 240) /* PIO2 is 240nS per cycle */
1222 * ata_dev_read_id - Read ID data from the specified device
1223 * @dev: target device
1224 * @p_class: pointer to class of the target device (may be changed)
1225 * @post_reset: is this read ID post-reset?
1226 * @id: buffer to read IDENTIFY data into
1228 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1229 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1230 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1231 * for pre-ATA4 drives.
1234 * Kernel thread context (may sleep)
1237 * 0 on success, -errno otherwise.
1239 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1240 int post_reset, u16 *id)
1242 struct ata_port *ap = dev->ap;
1243 unsigned int class = *p_class;
1244 struct ata_taskfile tf;
1245 unsigned int err_mask = 0;
1249 if (ata_msg_ctl(ap))
1250 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1251 __FUNCTION__, ap->id, dev->devno);
1253 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1256 ata_tf_init(dev, &tf);
1260 tf.command = ATA_CMD_ID_ATA;
1263 tf.command = ATA_CMD_ID_ATAPI;
1267 reason = "unsupported class";
1271 tf.protocol = ATA_PROT_PIO;
1273 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1274 id, sizeof(id[0]) * ATA_ID_WORDS);
1277 reason = "I/O error";
1281 swap_buf_le16(id, ATA_ID_WORDS);
1285 reason = "device reports illegal type";
1287 if (class == ATA_DEV_ATA) {
1288 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1291 if (ata_id_is_ata(id))
1295 if (post_reset && class == ATA_DEV_ATA) {
1297 * The exact sequence expected by certain pre-ATA4 drives is:
1300 * INITIALIZE DEVICE PARAMETERS
1302 * Some drives were very specific about that exact sequence.
1304 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1305 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1308 reason = "INIT_DEV_PARAMS failed";
1312 /* current CHS translation info (id[53-58]) might be
1313 * changed. reread the identify device info.
1325 if (ata_msg_warn(ap))
1326 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1327 "(%s, err_mask=0x%x)\n", reason, err_mask);
1331 static inline u8 ata_dev_knobble(struct ata_device *dev)
1333 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1336 static void ata_dev_config_ncq(struct ata_device *dev,
1337 char *desc, size_t desc_sz)
1339 struct ata_port *ap = dev->ap;
1340 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1342 if (!ata_id_has_ncq(dev->id)) {
1347 if (ap->flags & ATA_FLAG_NCQ) {
1348 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1349 dev->flags |= ATA_DFLAG_NCQ;
1352 if (hdepth >= ddepth)
1353 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1355 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1358 static void ata_set_port_max_cmd_len(struct ata_port *ap)
1362 if (ap->scsi_host) {
1363 unsigned int len = 0;
1365 for (i = 0; i < ATA_MAX_DEVICES; i++)
1366 len = max(len, ap->device[i].cdb_len);
1368 ap->scsi_host->max_cmd_len = len;
1373 * ata_dev_configure - Configure the specified ATA/ATAPI device
1374 * @dev: Target device to configure
1375 * @print_info: Enable device info printout
1377 * Configure @dev according to @dev->id. Generic and low-level
1378 * driver specific fixups are also applied.
1381 * Kernel thread context (may sleep)
1384 * 0 on success, -errno otherwise
1386 int ata_dev_configure(struct ata_device *dev, int print_info)
1388 struct ata_port *ap = dev->ap;
1389 const u16 *id = dev->id;
1390 unsigned int xfer_mask;
1391 char revbuf[7]; /* XYZ-99\0 */
1394 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1395 ata_dev_printk(dev, KERN_INFO,
1396 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1397 __FUNCTION__, ap->id, dev->devno);
1401 if (ata_msg_probe(ap))
1402 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1403 __FUNCTION__, ap->id, dev->devno);
1405 /* print device capabilities */
1406 if (ata_msg_probe(ap))
1407 ata_dev_printk(dev, KERN_DEBUG,
1408 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1409 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1411 id[49], id[82], id[83], id[84],
1412 id[85], id[86], id[87], id[88]);
1414 /* initialize to-be-configured parameters */
1415 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1416 dev->max_sectors = 0;
1424 * common ATA, ATAPI feature tests
1427 /* find max transfer mode; for printk only */
1428 xfer_mask = ata_id_xfermask(id);
1430 if (ata_msg_probe(ap))
1433 /* ATA-specific feature tests */
1434 if (dev->class == ATA_DEV_ATA) {
1435 if (ata_id_is_cfa(id)) {
1436 if (id[162] & 1) /* CPRM may make this media unusable */
1437 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1438 ap->id, dev->devno);
1439 snprintf(revbuf, 7, "CFA");
1442 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1444 dev->n_sectors = ata_id_n_sectors(id);
1446 if (ata_id_has_lba(id)) {
1447 const char *lba_desc;
1451 dev->flags |= ATA_DFLAG_LBA;
1452 if (ata_id_has_lba48(id)) {
1453 dev->flags |= ATA_DFLAG_LBA48;
1458 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1460 /* print device info to dmesg */
1461 if (ata_msg_drv(ap) && print_info)
1462 ata_dev_printk(dev, KERN_INFO, "%s, "
1463 "max %s, %Lu sectors: %s %s\n",
1465 ata_mode_string(xfer_mask),
1466 (unsigned long long)dev->n_sectors,
1467 lba_desc, ncq_desc);
1471 /* Default translation */
1472 dev->cylinders = id[1];
1474 dev->sectors = id[6];
1476 if (ata_id_current_chs_valid(id)) {
1477 /* Current CHS translation is valid. */
1478 dev->cylinders = id[54];
1479 dev->heads = id[55];
1480 dev->sectors = id[56];
1483 /* print device info to dmesg */
1484 if (ata_msg_drv(ap) && print_info)
1485 ata_dev_printk(dev, KERN_INFO, "%s, "
1486 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1488 ata_mode_string(xfer_mask),
1489 (unsigned long long)dev->n_sectors,
1490 dev->cylinders, dev->heads,
1494 if (dev->id[59] & 0x100) {
1495 dev->multi_count = dev->id[59] & 0xff;
1496 if (ata_msg_drv(ap) && print_info)
1497 ata_dev_printk(dev, KERN_INFO,
1498 "ata%u: dev %u multi count %u\n",
1499 ap->id, dev->devno, dev->multi_count);
1505 /* ATAPI-specific feature tests */
1506 else if (dev->class == ATA_DEV_ATAPI) {
1507 char *cdb_intr_string = "";
1509 rc = atapi_cdb_len(id);
1510 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1511 if (ata_msg_warn(ap))
1512 ata_dev_printk(dev, KERN_WARNING,
1513 "unsupported CDB len\n");
1517 dev->cdb_len = (unsigned int) rc;
1519 if (ata_id_cdb_intr(dev->id)) {
1520 dev->flags |= ATA_DFLAG_CDB_INTR;
1521 cdb_intr_string = ", CDB intr";
1524 /* print device info to dmesg */
1525 if (ata_msg_drv(ap) && print_info)
1526 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1527 ata_mode_string(xfer_mask),
1531 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1532 /* Let the user know. We don't want to disallow opens for
1533 rescue purposes, or in case the vendor is just a blithering
1536 ata_dev_printk(dev, KERN_WARNING,
1537 "Drive reports diagnostics failure. This may indicate a drive\n");
1538 ata_dev_printk(dev, KERN_WARNING,
1539 "fault or invalid emulation. Contact drive vendor for information.\n");
1543 ata_set_port_max_cmd_len(ap);
1545 /* limit bridge transfers to udma5, 200 sectors */
1546 if (ata_dev_knobble(dev)) {
1547 if (ata_msg_drv(ap) && print_info)
1548 ata_dev_printk(dev, KERN_INFO,
1549 "applying bridge limits\n");
1550 dev->udma_mask &= ATA_UDMA5;
1551 dev->max_sectors = ATA_MAX_SECTORS;
1554 if (ap->ops->dev_config)
1555 ap->ops->dev_config(ap, dev);
1557 if (ata_msg_probe(ap))
1558 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1559 __FUNCTION__, ata_chk_status(ap));
1563 if (ata_msg_probe(ap))
1564 ata_dev_printk(dev, KERN_DEBUG,
1565 "%s: EXIT, err\n", __FUNCTION__);
1570 * ata_bus_probe - Reset and probe ATA bus
1573 * Master ATA bus probing function. Initiates a hardware-dependent
1574 * bus reset, then attempts to identify any devices found on
1578 * PCI/etc. bus probe sem.
1581 * Zero on success, negative errno otherwise.
1584 int ata_bus_probe(struct ata_port *ap)
1586 unsigned int classes[ATA_MAX_DEVICES];
1587 int tries[ATA_MAX_DEVICES];
1588 int i, rc, down_xfermask;
1589 struct ata_device *dev;
1593 for (i = 0; i < ATA_MAX_DEVICES; i++)
1594 tries[i] = ATA_PROBE_MAX_TRIES;
1599 /* reset and determine device classes */
1600 ap->ops->phy_reset(ap);
1602 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1603 dev = &ap->device[i];
1605 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1606 dev->class != ATA_DEV_UNKNOWN)
1607 classes[dev->devno] = dev->class;
1609 classes[dev->devno] = ATA_DEV_NONE;
1611 dev->class = ATA_DEV_UNKNOWN;
1616 /* after the reset the device state is PIO 0 and the controller
1617 state is undefined. Record the mode */
1619 for (i = 0; i < ATA_MAX_DEVICES; i++)
1620 ap->device[i].pio_mode = XFER_PIO_0;
1622 /* read IDENTIFY page and configure devices */
1623 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1624 dev = &ap->device[i];
1627 dev->class = classes[i];
1629 if (!ata_dev_enabled(dev))
1632 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1636 rc = ata_dev_configure(dev, 1);
1641 /* configure transfer mode */
1642 rc = ata_set_mode(ap, &dev);
1648 for (i = 0; i < ATA_MAX_DEVICES; i++)
1649 if (ata_dev_enabled(&ap->device[i]))
1652 /* no device present, disable port */
1653 ata_port_disable(ap);
1654 ap->ops->port_disable(ap);
1661 tries[dev->devno] = 0;
1664 sata_down_spd_limit(ap);
1667 tries[dev->devno]--;
1668 if (down_xfermask &&
1669 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1670 tries[dev->devno] = 0;
1673 if (!tries[dev->devno]) {
1674 ata_down_xfermask_limit(dev, 1);
1675 ata_dev_disable(dev);
1682 * ata_port_probe - Mark port as enabled
1683 * @ap: Port for which we indicate enablement
1685 * Modify @ap data structure such that the system
1686 * thinks that the entire port is enabled.
1688 * LOCKING: host lock, or some other form of
1692 void ata_port_probe(struct ata_port *ap)
1694 ap->flags &= ~ATA_FLAG_DISABLED;
1698 * sata_print_link_status - Print SATA link status
1699 * @ap: SATA port to printk link status about
1701 * This function prints link speed and status of a SATA link.
1706 static void sata_print_link_status(struct ata_port *ap)
1708 u32 sstatus, scontrol, tmp;
1710 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1712 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1714 if (ata_port_online(ap)) {
1715 tmp = (sstatus >> 4) & 0xf;
1716 ata_port_printk(ap, KERN_INFO,
1717 "SATA link up %s (SStatus %X SControl %X)\n",
1718 sata_spd_string(tmp), sstatus, scontrol);
1720 ata_port_printk(ap, KERN_INFO,
1721 "SATA link down (SStatus %X SControl %X)\n",
1727 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1728 * @ap: SATA port associated with target SATA PHY.
1730 * This function issues commands to standard SATA Sxxx
1731 * PHY registers, to wake up the phy (and device), and
1732 * clear any reset condition.
1735 * PCI/etc. bus probe sem.
1738 void __sata_phy_reset(struct ata_port *ap)
1741 unsigned long timeout = jiffies + (HZ * 5);
1743 if (ap->flags & ATA_FLAG_SATA_RESET) {
1744 /* issue phy wake/reset */
1745 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1746 /* Couldn't find anything in SATA I/II specs, but
1747 * AHCI-1.1 10.4.2 says at least 1 ms. */
1750 /* phy wake/clear reset */
1751 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1753 /* wait for phy to become ready, if necessary */
1756 sata_scr_read(ap, SCR_STATUS, &sstatus);
1757 if ((sstatus & 0xf) != 1)
1759 } while (time_before(jiffies, timeout));
1761 /* print link status */
1762 sata_print_link_status(ap);
1764 /* TODO: phy layer with polling, timeouts, etc. */
1765 if (!ata_port_offline(ap))
1768 ata_port_disable(ap);
1770 if (ap->flags & ATA_FLAG_DISABLED)
1773 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1774 ata_port_disable(ap);
1778 ap->cbl = ATA_CBL_SATA;
1782 * sata_phy_reset - Reset SATA bus.
1783 * @ap: SATA port associated with target SATA PHY.
1785 * This function resets the SATA bus, and then probes
1786 * the bus for devices.
1789 * PCI/etc. bus probe sem.
1792 void sata_phy_reset(struct ata_port *ap)
1794 __sata_phy_reset(ap);
1795 if (ap->flags & ATA_FLAG_DISABLED)
1801 * ata_dev_pair - return other device on cable
1804 * Obtain the other device on the same cable, or if none is
1805 * present NULL is returned
1808 struct ata_device *ata_dev_pair(struct ata_device *adev)
1810 struct ata_port *ap = adev->ap;
1811 struct ata_device *pair = &ap->device[1 - adev->devno];
1812 if (!ata_dev_enabled(pair))
1818 * ata_port_disable - Disable port.
1819 * @ap: Port to be disabled.
1821 * Modify @ap data structure such that the system
1822 * thinks that the entire port is disabled, and should
1823 * never attempt to probe or communicate with devices
1826 * LOCKING: host lock, or some other form of
1830 void ata_port_disable(struct ata_port *ap)
1832 ap->device[0].class = ATA_DEV_NONE;
1833 ap->device[1].class = ATA_DEV_NONE;
1834 ap->flags |= ATA_FLAG_DISABLED;
1838 * sata_down_spd_limit - adjust SATA spd limit downward
1839 * @ap: Port to adjust SATA spd limit for
1841 * Adjust SATA spd limit of @ap downward. Note that this
1842 * function only adjusts the limit. The change must be applied
1843 * using sata_set_spd().
1846 * Inherited from caller.
1849 * 0 on success, negative errno on failure
1851 int sata_down_spd_limit(struct ata_port *ap)
1853 u32 sstatus, spd, mask;
1856 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1860 mask = ap->sata_spd_limit;
1863 highbit = fls(mask) - 1;
1864 mask &= ~(1 << highbit);
1866 spd = (sstatus >> 4) & 0xf;
1870 mask &= (1 << spd) - 1;
1874 ap->sata_spd_limit = mask;
1876 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1877 sata_spd_string(fls(mask)));
1882 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1886 if (ap->sata_spd_limit == UINT_MAX)
1889 limit = fls(ap->sata_spd_limit);
1891 spd = (*scontrol >> 4) & 0xf;
1892 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1894 return spd != limit;
1898 * sata_set_spd_needed - is SATA spd configuration needed
1899 * @ap: Port in question
1901 * Test whether the spd limit in SControl matches
1902 * @ap->sata_spd_limit. This function is used to determine
1903 * whether hardreset is necessary to apply SATA spd
1907 * Inherited from caller.
1910 * 1 if SATA spd configuration is needed, 0 otherwise.
1912 int sata_set_spd_needed(struct ata_port *ap)
1916 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1919 return __sata_set_spd_needed(ap, &scontrol);
1923 * sata_set_spd - set SATA spd according to spd limit
1924 * @ap: Port to set SATA spd for
1926 * Set SATA spd of @ap according to sata_spd_limit.
1929 * Inherited from caller.
1932 * 0 if spd doesn't need to be changed, 1 if spd has been
1933 * changed. Negative errno if SCR registers are inaccessible.
1935 int sata_set_spd(struct ata_port *ap)
1940 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1943 if (!__sata_set_spd_needed(ap, &scontrol))
1946 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1953 * This mode timing computation functionality is ported over from
1954 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1957 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1958 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1959 * for UDMA6, which is currently supported only by Maxtor drives.
1961 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
1964 static const struct ata_timing ata_timing[] = {
1966 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1967 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1968 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1969 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1971 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
1972 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
1973 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1974 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1975 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1977 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1979 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1980 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1981 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1983 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1984 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1985 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1987 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
1988 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
1989 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1990 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1992 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1993 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1994 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1996 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2001 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2002 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2004 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2006 q->setup = EZ(t->setup * 1000, T);
2007 q->act8b = EZ(t->act8b * 1000, T);
2008 q->rec8b = EZ(t->rec8b * 1000, T);
2009 q->cyc8b = EZ(t->cyc8b * 1000, T);
2010 q->active = EZ(t->active * 1000, T);
2011 q->recover = EZ(t->recover * 1000, T);
2012 q->cycle = EZ(t->cycle * 1000, T);
2013 q->udma = EZ(t->udma * 1000, UT);
2016 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2017 struct ata_timing *m, unsigned int what)
2019 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2020 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2021 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2022 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2023 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2024 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2025 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2026 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2029 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2031 const struct ata_timing *t;
2033 for (t = ata_timing; t->mode != speed; t++)
2034 if (t->mode == 0xFF)
2039 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2040 struct ata_timing *t, int T, int UT)
2042 const struct ata_timing *s;
2043 struct ata_timing p;
2049 if (!(s = ata_timing_find_mode(speed)))
2052 memcpy(t, s, sizeof(*s));
2055 * If the drive is an EIDE drive, it can tell us it needs extended
2056 * PIO/MW_DMA cycle timing.
2059 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2060 memset(&p, 0, sizeof(p));
2061 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2062 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2063 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2064 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2065 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2067 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2071 * Convert the timing to bus clock counts.
2074 ata_timing_quantize(t, t, T, UT);
2077 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2078 * S.M.A.R.T * and some other commands. We have to ensure that the
2079 * DMA cycle timing is slower/equal than the fastest PIO timing.
2082 if (speed > XFER_PIO_4) {
2083 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2084 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2088 * Lengthen active & recovery time so that cycle time is correct.
2091 if (t->act8b + t->rec8b < t->cyc8b) {
2092 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2093 t->rec8b = t->cyc8b - t->act8b;
2096 if (t->active + t->recover < t->cycle) {
2097 t->active += (t->cycle - (t->active + t->recover)) / 2;
2098 t->recover = t->cycle - t->active;
2105 * ata_down_xfermask_limit - adjust dev xfer masks downward
2106 * @dev: Device to adjust xfer masks
2107 * @force_pio0: Force PIO0
2109 * Adjust xfer masks of @dev downward. Note that this function
2110 * does not apply the change. Invoking ata_set_mode() afterwards
2111 * will apply the limit.
2114 * Inherited from caller.
2117 * 0 on success, negative errno on failure
2119 int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2121 unsigned long xfer_mask;
2124 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2129 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2130 if (xfer_mask & ATA_MASK_UDMA)
2131 xfer_mask &= ~ATA_MASK_MWDMA;
2133 highbit = fls(xfer_mask) - 1;
2134 xfer_mask &= ~(1 << highbit);
2136 xfer_mask &= 1 << ATA_SHIFT_PIO;
2140 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2143 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2144 ata_mode_string(xfer_mask));
2152 static int ata_dev_set_mode(struct ata_device *dev)
2154 unsigned int err_mask;
2157 dev->flags &= ~ATA_DFLAG_PIO;
2158 if (dev->xfer_shift == ATA_SHIFT_PIO)
2159 dev->flags |= ATA_DFLAG_PIO;
2161 err_mask = ata_dev_set_xfermode(dev);
2163 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2164 "(err_mask=0x%x)\n", err_mask);
2168 rc = ata_dev_revalidate(dev, 0);
2172 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2173 dev->xfer_shift, (int)dev->xfer_mode);
2175 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2176 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2181 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2182 * @ap: port on which timings will be programmed
2183 * @r_failed_dev: out paramter for failed device
2185 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2186 * ata_set_mode() fails, pointer to the failing device is
2187 * returned in @r_failed_dev.
2190 * PCI/etc. bus probe sem.
2193 * 0 on success, negative errno otherwise
2195 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2197 struct ata_device *dev;
2198 int i, rc = 0, used_dma = 0, found = 0;
2200 /* has private set_mode? */
2201 if (ap->ops->set_mode) {
2202 /* FIXME: make ->set_mode handle no device case and
2203 * return error code and failing device on failure.
2205 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2206 if (ata_dev_ready(&ap->device[i])) {
2207 ap->ops->set_mode(ap);
2214 /* step 1: calculate xfer_mask */
2215 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2216 unsigned int pio_mask, dma_mask;
2218 dev = &ap->device[i];
2220 if (!ata_dev_enabled(dev))
2223 ata_dev_xfermask(dev);
2225 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2226 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2227 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2228 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2237 /* step 2: always set host PIO timings */
2238 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2239 dev = &ap->device[i];
2240 if (!ata_dev_enabled(dev))
2243 if (!dev->pio_mode) {
2244 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2249 dev->xfer_mode = dev->pio_mode;
2250 dev->xfer_shift = ATA_SHIFT_PIO;
2251 if (ap->ops->set_piomode)
2252 ap->ops->set_piomode(ap, dev);
2255 /* step 3: set host DMA timings */
2256 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2257 dev = &ap->device[i];
2259 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2262 dev->xfer_mode = dev->dma_mode;
2263 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2264 if (ap->ops->set_dmamode)
2265 ap->ops->set_dmamode(ap, dev);
2268 /* step 4: update devices' xfer mode */
2269 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2270 dev = &ap->device[i];
2272 /* don't udpate suspended devices' xfer mode */
2273 if (!ata_dev_ready(dev))
2276 rc = ata_dev_set_mode(dev);
2281 /* Record simplex status. If we selected DMA then the other
2282 * host channels are not permitted to do so.
2284 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2285 ap->host->simplex_claimed = 1;
2287 /* step5: chip specific finalisation */
2288 if (ap->ops->post_set_mode)
2289 ap->ops->post_set_mode(ap);
2293 *r_failed_dev = dev;
2298 * ata_tf_to_host - issue ATA taskfile to host controller
2299 * @ap: port to which command is being issued
2300 * @tf: ATA taskfile register set
2302 * Issues ATA taskfile register set to ATA host controller,
2303 * with proper synchronization with interrupt handler and
2307 * spin_lock_irqsave(host lock)
2310 static inline void ata_tf_to_host(struct ata_port *ap,
2311 const struct ata_taskfile *tf)
2313 ap->ops->tf_load(ap, tf);
2314 ap->ops->exec_command(ap, tf);
2318 * ata_busy_sleep - sleep until BSY clears, or timeout
2319 * @ap: port containing status register to be polled
2320 * @tmout_pat: impatience timeout
2321 * @tmout: overall timeout
2323 * Sleep until ATA Status register bit BSY clears,
2324 * or a timeout occurs.
2329 unsigned int ata_busy_sleep (struct ata_port *ap,
2330 unsigned long tmout_pat, unsigned long tmout)
2332 unsigned long timer_start, timeout;
2335 status = ata_busy_wait(ap, ATA_BUSY, 300);
2336 timer_start = jiffies;
2337 timeout = timer_start + tmout_pat;
2338 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2340 status = ata_busy_wait(ap, ATA_BUSY, 3);
2343 if (status & ATA_BUSY)
2344 ata_port_printk(ap, KERN_WARNING,
2345 "port is slow to respond, please be patient "
2346 "(Status 0x%x)\n", status);
2348 timeout = timer_start + tmout;
2349 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2351 status = ata_chk_status(ap);
2354 if (status & ATA_BUSY) {
2355 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2356 "(%lu secs, Status 0x%x)\n",
2357 tmout / HZ, status);
2364 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2366 struct ata_ioports *ioaddr = &ap->ioaddr;
2367 unsigned int dev0 = devmask & (1 << 0);
2368 unsigned int dev1 = devmask & (1 << 1);
2369 unsigned long timeout;
2371 /* if device 0 was found in ata_devchk, wait for its
2375 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2377 /* if device 1 was found in ata_devchk, wait for
2378 * register access, then wait for BSY to clear
2380 timeout = jiffies + ATA_TMOUT_BOOT;
2384 ap->ops->dev_select(ap, 1);
2385 if (ap->flags & ATA_FLAG_MMIO) {
2386 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2387 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2389 nsect = inb(ioaddr->nsect_addr);
2390 lbal = inb(ioaddr->lbal_addr);
2392 if ((nsect == 1) && (lbal == 1))
2394 if (time_after(jiffies, timeout)) {
2398 msleep(50); /* give drive a breather */
2401 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2403 /* is all this really necessary? */
2404 ap->ops->dev_select(ap, 0);
2406 ap->ops->dev_select(ap, 1);
2408 ap->ops->dev_select(ap, 0);
2411 static unsigned int ata_bus_softreset(struct ata_port *ap,
2412 unsigned int devmask)
2414 struct ata_ioports *ioaddr = &ap->ioaddr;
2416 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2418 /* software reset. causes dev0 to be selected */
2419 if (ap->flags & ATA_FLAG_MMIO) {
2420 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2421 udelay(20); /* FIXME: flush */
2422 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2423 udelay(20); /* FIXME: flush */
2424 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2426 outb(ap->ctl, ioaddr->ctl_addr);
2428 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2430 outb(ap->ctl, ioaddr->ctl_addr);
2433 /* spec mandates ">= 2ms" before checking status.
2434 * We wait 150ms, because that was the magic delay used for
2435 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2436 * between when the ATA command register is written, and then
2437 * status is checked. Because waiting for "a while" before
2438 * checking status is fine, post SRST, we perform this magic
2439 * delay here as well.
2441 * Old drivers/ide uses the 2mS rule and then waits for ready
2445 /* Before we perform post reset processing we want to see if
2446 * the bus shows 0xFF because the odd clown forgets the D7
2447 * pulldown resistor.
2449 if (ata_check_status(ap) == 0xFF) {
2450 ata_port_printk(ap, KERN_ERR, "SRST failed (status 0xFF)\n");
2451 return AC_ERR_OTHER;
2454 ata_bus_post_reset(ap, devmask);
2460 * ata_bus_reset - reset host port and associated ATA channel
2461 * @ap: port to reset
2463 * This is typically the first time we actually start issuing
2464 * commands to the ATA channel. We wait for BSY to clear, then
2465 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2466 * result. Determine what devices, if any, are on the channel
2467 * by looking at the device 0/1 error register. Look at the signature
2468 * stored in each device's taskfile registers, to determine if
2469 * the device is ATA or ATAPI.
2472 * PCI/etc. bus probe sem.
2473 * Obtains host lock.
2476 * Sets ATA_FLAG_DISABLED if bus reset fails.
2479 void ata_bus_reset(struct ata_port *ap)
2481 struct ata_ioports *ioaddr = &ap->ioaddr;
2482 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2484 unsigned int dev0, dev1 = 0, devmask = 0;
2486 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2488 /* determine if device 0/1 are present */
2489 if (ap->flags & ATA_FLAG_SATA_RESET)
2492 dev0 = ata_devchk(ap, 0);
2494 dev1 = ata_devchk(ap, 1);
2498 devmask |= (1 << 0);
2500 devmask |= (1 << 1);
2502 /* select device 0 again */
2503 ap->ops->dev_select(ap, 0);
2505 /* issue bus reset */
2506 if (ap->flags & ATA_FLAG_SRST)
2507 if (ata_bus_softreset(ap, devmask))
2511 * determine by signature whether we have ATA or ATAPI devices
2513 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2514 if ((slave_possible) && (err != 0x81))
2515 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2517 /* re-enable interrupts */
2518 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2521 /* is double-select really necessary? */
2522 if (ap->device[1].class != ATA_DEV_NONE)
2523 ap->ops->dev_select(ap, 1);
2524 if (ap->device[0].class != ATA_DEV_NONE)
2525 ap->ops->dev_select(ap, 0);
2527 /* if no devices were detected, disable this port */
2528 if ((ap->device[0].class == ATA_DEV_NONE) &&
2529 (ap->device[1].class == ATA_DEV_NONE))
2532 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2533 /* set up device control for ATA_FLAG_SATA_RESET */
2534 if (ap->flags & ATA_FLAG_MMIO)
2535 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2537 outb(ap->ctl, ioaddr->ctl_addr);
2544 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2545 ap->ops->port_disable(ap);
2551 * sata_phy_debounce - debounce SATA phy status
2552 * @ap: ATA port to debounce SATA phy status for
2553 * @params: timing parameters { interval, duratinon, timeout } in msec
2555 * Make sure SStatus of @ap reaches stable state, determined by
2556 * holding the same value where DET is not 1 for @duration polled
2557 * every @interval, before @timeout. Timeout constraints the
2558 * beginning of the stable state. Because, after hot unplugging,
2559 * DET gets stuck at 1 on some controllers, this functions waits
2560 * until timeout then returns 0 if DET is stable at 1.
2563 * Kernel thread context (may sleep)
2566 * 0 on success, -errno on failure.
2568 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2570 unsigned long interval_msec = params[0];
2571 unsigned long duration = params[1] * HZ / 1000;
2572 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2573 unsigned long last_jiffies;
2577 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2582 last_jiffies = jiffies;
2585 msleep(interval_msec);
2586 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2592 if (cur == 1 && time_before(jiffies, timeout))
2594 if (time_after(jiffies, last_jiffies + duration))
2599 /* unstable, start over */
2601 last_jiffies = jiffies;
2604 if (time_after(jiffies, timeout))
2610 * sata_phy_resume - resume SATA phy
2611 * @ap: ATA port to resume SATA phy for
2612 * @params: timing parameters { interval, duratinon, timeout } in msec
2614 * Resume SATA phy of @ap and debounce it.
2617 * Kernel thread context (may sleep)
2620 * 0 on success, -errno on failure.
2622 int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2627 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2630 scontrol = (scontrol & 0x0f0) | 0x300;
2632 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2635 /* Some PHYs react badly if SStatus is pounded immediately
2636 * after resuming. Delay 200ms before debouncing.
2640 return sata_phy_debounce(ap, params);
2643 static void ata_wait_spinup(struct ata_port *ap)
2645 struct ata_eh_context *ehc = &ap->eh_context;
2646 unsigned long end, secs;
2649 /* first, debounce phy if SATA */
2650 if (ap->cbl == ATA_CBL_SATA) {
2651 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
2653 /* if debounced successfully and offline, no need to wait */
2654 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2658 /* okay, let's give the drive time to spin up */
2659 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2660 secs = ((end - jiffies) + HZ - 1) / HZ;
2662 if (time_after(jiffies, end))
2666 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2667 "(%lu secs)\n", secs);
2669 schedule_timeout_uninterruptible(end - jiffies);
2673 * ata_std_prereset - prepare for reset
2674 * @ap: ATA port to be reset
2676 * @ap is about to be reset. Initialize it.
2679 * Kernel thread context (may sleep)
2682 * 0 on success, -errno otherwise.
2684 int ata_std_prereset(struct ata_port *ap)
2686 struct ata_eh_context *ehc = &ap->eh_context;
2687 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2690 /* handle link resume & hotplug spinup */
2691 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2692 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2693 ehc->i.action |= ATA_EH_HARDRESET;
2695 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2696 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2697 ata_wait_spinup(ap);
2699 /* if we're about to do hardreset, nothing more to do */
2700 if (ehc->i.action & ATA_EH_HARDRESET)
2703 /* if SATA, resume phy */
2704 if (ap->cbl == ATA_CBL_SATA) {
2705 rc = sata_phy_resume(ap, timing);
2706 if (rc && rc != -EOPNOTSUPP) {
2707 /* phy resume failed */
2708 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2709 "link for reset (errno=%d)\n", rc);
2714 /* Wait for !BSY if the controller can wait for the first D2H
2715 * Reg FIS and we don't know that no device is attached.
2717 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2718 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2724 * ata_std_softreset - reset host port via ATA SRST
2725 * @ap: port to reset
2726 * @classes: resulting classes of attached devices
2728 * Reset host port using ATA SRST.
2731 * Kernel thread context (may sleep)
2734 * 0 on success, -errno otherwise.
2736 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2738 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2739 unsigned int devmask = 0, err_mask;
2744 if (ata_port_offline(ap)) {
2745 classes[0] = ATA_DEV_NONE;
2749 /* determine if device 0/1 are present */
2750 if (ata_devchk(ap, 0))
2751 devmask |= (1 << 0);
2752 if (slave_possible && ata_devchk(ap, 1))
2753 devmask |= (1 << 1);
2755 /* select device 0 again */
2756 ap->ops->dev_select(ap, 0);
2758 /* issue bus reset */
2759 DPRINTK("about to softreset, devmask=%x\n", devmask);
2760 err_mask = ata_bus_softreset(ap, devmask);
2762 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2767 /* determine by signature whether we have ATA or ATAPI devices */
2768 classes[0] = ata_dev_try_classify(ap, 0, &err);
2769 if (slave_possible && err != 0x81)
2770 classes[1] = ata_dev_try_classify(ap, 1, &err);
2773 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2778 * sata_std_hardreset - reset host port via SATA phy reset
2779 * @ap: port to reset
2780 * @class: resulting class of attached device
2782 * SATA phy-reset host port using DET bits of SControl register.
2785 * Kernel thread context (may sleep)
2788 * 0 on success, -errno otherwise.
2790 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2792 struct ata_eh_context *ehc = &ap->eh_context;
2793 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2799 if (sata_set_spd_needed(ap)) {
2800 /* SATA spec says nothing about how to reconfigure
2801 * spd. To be on the safe side, turn off phy during
2802 * reconfiguration. This works for at least ICH7 AHCI
2805 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2808 scontrol = (scontrol & 0x0f0) | 0x304;
2810 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2816 /* issue phy wake/reset */
2817 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2820 scontrol = (scontrol & 0x0f0) | 0x301;
2822 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2825 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2826 * 10.4.2 says at least 1 ms.
2830 /* bring phy back */
2831 sata_phy_resume(ap, timing);
2833 /* TODO: phy layer with polling, timeouts, etc. */
2834 if (ata_port_offline(ap)) {
2835 *class = ATA_DEV_NONE;
2836 DPRINTK("EXIT, link offline\n");
2840 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2841 ata_port_printk(ap, KERN_ERR,
2842 "COMRESET failed (device not ready)\n");
2846 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2848 *class = ata_dev_try_classify(ap, 0, NULL);
2850 DPRINTK("EXIT, class=%u\n", *class);
2855 * ata_std_postreset - standard postreset callback
2856 * @ap: the target ata_port
2857 * @classes: classes of attached devices
2859 * This function is invoked after a successful reset. Note that
2860 * the device might have been reset more than once using
2861 * different reset methods before postreset is invoked.
2864 * Kernel thread context (may sleep)
2866 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2872 /* print link status */
2873 sata_print_link_status(ap);
2876 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2877 sata_scr_write(ap, SCR_ERROR, serror);
2879 /* re-enable interrupts */
2880 if (!ap->ops->error_handler) {
2881 /* FIXME: hack. create a hook instead */
2882 if (ap->ioaddr.ctl_addr)
2886 /* is double-select really necessary? */
2887 if (classes[0] != ATA_DEV_NONE)
2888 ap->ops->dev_select(ap, 1);
2889 if (classes[1] != ATA_DEV_NONE)
2890 ap->ops->dev_select(ap, 0);
2892 /* bail out if no device is present */
2893 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2894 DPRINTK("EXIT, no device\n");
2898 /* set up device control */
2899 if (ap->ioaddr.ctl_addr) {
2900 if (ap->flags & ATA_FLAG_MMIO)
2901 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2903 outb(ap->ctl, ap->ioaddr.ctl_addr);
2910 * ata_dev_same_device - Determine whether new ID matches configured device
2911 * @dev: device to compare against
2912 * @new_class: class of the new device
2913 * @new_id: IDENTIFY page of the new device
2915 * Compare @new_class and @new_id against @dev and determine
2916 * whether @dev is the device indicated by @new_class and
2923 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2925 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2928 const u16 *old_id = dev->id;
2929 unsigned char model[2][41], serial[2][21];
2932 if (dev->class != new_class) {
2933 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2934 dev->class, new_class);
2938 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2939 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2940 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2941 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2942 new_n_sectors = ata_id_n_sectors(new_id);
2944 if (strcmp(model[0], model[1])) {
2945 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2946 "'%s' != '%s'\n", model[0], model[1]);
2950 if (strcmp(serial[0], serial[1])) {
2951 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2952 "'%s' != '%s'\n", serial[0], serial[1]);
2956 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2957 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2959 (unsigned long long)dev->n_sectors,
2960 (unsigned long long)new_n_sectors);
2968 * ata_dev_revalidate - Revalidate ATA device
2969 * @dev: device to revalidate
2970 * @post_reset: is this revalidation after reset?
2972 * Re-read IDENTIFY page and make sure @dev is still attached to
2976 * Kernel thread context (may sleep)
2979 * 0 on success, negative errno otherwise
2981 int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2983 unsigned int class = dev->class;
2984 u16 *id = (void *)dev->ap->sector_buf;
2987 if (!ata_dev_enabled(dev)) {
2993 rc = ata_dev_read_id(dev, &class, post_reset, id);
2997 /* is the device still there? */
2998 if (!ata_dev_same_device(dev, class, id)) {
3003 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3005 /* configure device according to the new ID */
3006 rc = ata_dev_configure(dev, 0);
3011 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3015 static const char * const ata_dma_blacklist [] = {
3016 "WDC AC11000H", NULL,
3017 "WDC AC22100H", NULL,
3018 "WDC AC32500H", NULL,
3019 "WDC AC33100H", NULL,
3020 "WDC AC31600H", NULL,
3021 "WDC AC32100H", "24.09P07",
3022 "WDC AC23200L", "21.10N21",
3023 "Compaq CRD-8241B", NULL,
3028 "SanDisk SDP3B", NULL,
3029 "SanDisk SDP3B-64", NULL,
3030 "SANYO CD-ROM CRD", NULL,
3031 "HITACHI CDR-8", NULL,
3032 "HITACHI CDR-8335", NULL,
3033 "HITACHI CDR-8435", NULL,
3034 "Toshiba CD-ROM XM-6202B", NULL,
3035 "TOSHIBA CD-ROM XM-1702BC", NULL,
3037 "E-IDE CD-ROM CR-840", NULL,
3038 "CD-ROM Drive/F5A", NULL,
3039 "WPI CDD-820", NULL,
3040 "SAMSUNG CD-ROM SC-148C", NULL,
3041 "SAMSUNG CD-ROM SC", NULL,
3042 "SanDisk SDP3B-64", NULL,
3043 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
3044 "_NEC DV5800A", NULL,
3045 "SAMSUNG CD-ROM SN-124", "N001"
3048 static int ata_strim(char *s, size_t len)
3050 len = strnlen(s, len);
3052 /* ATAPI specifies that empty space is blank-filled; remove blanks */
3053 while ((len > 0) && (s[len - 1] == ' ')) {
3060 static int ata_dma_blacklisted(const struct ata_device *dev)
3062 unsigned char model_num[40];
3063 unsigned char model_rev[16];
3064 unsigned int nlen, rlen;
3067 /* We don't support polling DMA.
3068 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3069 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3071 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3072 (dev->flags & ATA_DFLAG_CDB_INTR))
3075 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3077 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3079 nlen = ata_strim(model_num, sizeof(model_num));
3080 rlen = ata_strim(model_rev, sizeof(model_rev));
3082 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
3083 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
3084 if (ata_dma_blacklist[i+1] == NULL)
3086 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
3094 * ata_dev_xfermask - Compute supported xfermask of the given device
3095 * @dev: Device to compute xfermask for
3097 * Compute supported xfermask of @dev and store it in
3098 * dev->*_mask. This function is responsible for applying all
3099 * known limits including host controller limits, device
3105 static void ata_dev_xfermask(struct ata_device *dev)
3107 struct ata_port *ap = dev->ap;
3108 struct ata_host *host = ap->host;
3109 unsigned long xfer_mask;
3111 /* controller modes available */
3112 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3113 ap->mwdma_mask, ap->udma_mask);
3115 /* Apply cable rule here. Don't apply it early because when
3116 * we handle hot plug the cable type can itself change.
3118 if (ap->cbl == ATA_CBL_PATA40)
3119 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3121 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3122 dev->mwdma_mask, dev->udma_mask);
3123 xfer_mask &= ata_id_xfermask(dev->id);
3126 * CFA Advanced TrueIDE timings are not allowed on a shared
3129 if (ata_dev_pair(dev)) {
3130 /* No PIO5 or PIO6 */
3131 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3132 /* No MWDMA3 or MWDMA 4 */
3133 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3136 if (ata_dma_blacklisted(dev)) {
3137 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3138 ata_dev_printk(dev, KERN_WARNING,
3139 "device is on DMA blacklist, disabling DMA\n");
3142 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
3143 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3144 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3145 "other device, disabling DMA\n");
3148 if (ap->ops->mode_filter)
3149 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3151 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3152 &dev->mwdma_mask, &dev->udma_mask);
3156 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3157 * @dev: Device to which command will be sent
3159 * Issue SET FEATURES - XFER MODE command to device @dev
3163 * PCI/etc. bus probe sem.
3166 * 0 on success, AC_ERR_* mask otherwise.
3169 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3171 struct ata_taskfile tf;
3172 unsigned int err_mask;
3174 /* set up set-features taskfile */
3175 DPRINTK("set features - xfer mode\n");
3177 ata_tf_init(dev, &tf);
3178 tf.command = ATA_CMD_SET_FEATURES;
3179 tf.feature = SETFEATURES_XFER;
3180 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3181 tf.protocol = ATA_PROT_NODATA;
3182 tf.nsect = dev->xfer_mode;
3184 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3186 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3191 * ata_dev_init_params - Issue INIT DEV PARAMS command
3192 * @dev: Device to which command will be sent
3193 * @heads: Number of heads (taskfile parameter)
3194 * @sectors: Number of sectors (taskfile parameter)
3197 * Kernel thread context (may sleep)
3200 * 0 on success, AC_ERR_* mask otherwise.
3202 static unsigned int ata_dev_init_params(struct ata_device *dev,
3203 u16 heads, u16 sectors)
3205 struct ata_taskfile tf;
3206 unsigned int err_mask;
3208 /* Number of sectors per track 1-255. Number of heads 1-16 */
3209 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3210 return AC_ERR_INVALID;
3212 /* set up init dev params taskfile */
3213 DPRINTK("init dev params \n");
3215 ata_tf_init(dev, &tf);
3216 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3217 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3218 tf.protocol = ATA_PROT_NODATA;
3220 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3222 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3224 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3229 * ata_sg_clean - Unmap DMA memory associated with command
3230 * @qc: Command containing DMA memory to be released
3232 * Unmap all mapped DMA memory associated with this command.
3235 * spin_lock_irqsave(host lock)
3238 static void ata_sg_clean(struct ata_queued_cmd *qc)
3240 struct ata_port *ap = qc->ap;
3241 struct scatterlist *sg = qc->__sg;
3242 int dir = qc->dma_dir;
3243 void *pad_buf = NULL;
3245 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3246 WARN_ON(sg == NULL);
3248 if (qc->flags & ATA_QCFLAG_SINGLE)
3249 WARN_ON(qc->n_elem > 1);
3251 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3253 /* if we padded the buffer out to 32-bit bound, and data
3254 * xfer direction is from-device, we must copy from the
3255 * pad buffer back into the supplied buffer
3257 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3258 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3260 if (qc->flags & ATA_QCFLAG_SG) {
3262 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3263 /* restore last sg */
3264 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3266 struct scatterlist *psg = &qc->pad_sgent;
3267 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3268 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3269 kunmap_atomic(addr, KM_IRQ0);
3273 dma_unmap_single(ap->dev,
3274 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3277 sg->length += qc->pad_len;
3279 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3280 pad_buf, qc->pad_len);
3283 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3288 * ata_fill_sg - Fill PCI IDE PRD table
3289 * @qc: Metadata associated with taskfile to be transferred
3291 * Fill PCI IDE PRD (scatter-gather) table with segments
3292 * associated with the current disk command.
3295 * spin_lock_irqsave(host lock)
3298 static void ata_fill_sg(struct ata_queued_cmd *qc)
3300 struct ata_port *ap = qc->ap;
3301 struct scatterlist *sg;
3304 WARN_ON(qc->__sg == NULL);
3305 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3308 ata_for_each_sg(sg, qc) {
3312 /* determine if physical DMA addr spans 64K boundary.
3313 * Note h/w doesn't support 64-bit, so we unconditionally
3314 * truncate dma_addr_t to u32.
3316 addr = (u32) sg_dma_address(sg);
3317 sg_len = sg_dma_len(sg);
3320 offset = addr & 0xffff;
3322 if ((offset + sg_len) > 0x10000)
3323 len = 0x10000 - offset;
3325 ap->prd[idx].addr = cpu_to_le32(addr);
3326 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3327 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3336 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3339 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3340 * @qc: Metadata associated with taskfile to check
3342 * Allow low-level driver to filter ATA PACKET commands, returning
3343 * a status indicating whether or not it is OK to use DMA for the
3344 * supplied PACKET command.
3347 * spin_lock_irqsave(host lock)
3349 * RETURNS: 0 when ATAPI DMA can be used
3352 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3354 struct ata_port *ap = qc->ap;
3355 int rc = 0; /* Assume ATAPI DMA is OK by default */
3357 if (ap->ops->check_atapi_dma)
3358 rc = ap->ops->check_atapi_dma(qc);
3363 * ata_qc_prep - Prepare taskfile for submission
3364 * @qc: Metadata associated with taskfile to be prepared
3366 * Prepare ATA taskfile for submission.
3369 * spin_lock_irqsave(host lock)
3371 void ata_qc_prep(struct ata_queued_cmd *qc)
3373 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3379 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3382 * ata_sg_init_one - Associate command with memory buffer
3383 * @qc: Command to be associated
3384 * @buf: Memory buffer
3385 * @buflen: Length of memory buffer, in bytes.
3387 * Initialize the data-related elements of queued_cmd @qc
3388 * to point to a single memory buffer, @buf of byte length @buflen.
3391 * spin_lock_irqsave(host lock)
3394 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3396 struct scatterlist *sg;
3398 qc->flags |= ATA_QCFLAG_SINGLE;
3400 memset(&qc->sgent, 0, sizeof(qc->sgent));
3401 qc->__sg = &qc->sgent;
3403 qc->orig_n_elem = 1;
3405 qc->nbytes = buflen;
3408 sg_init_one(sg, buf, buflen);
3412 * ata_sg_init - Associate command with scatter-gather table.
3413 * @qc: Command to be associated
3414 * @sg: Scatter-gather table.
3415 * @n_elem: Number of elements in s/g table.
3417 * Initialize the data-related elements of queued_cmd @qc
3418 * to point to a scatter-gather table @sg, containing @n_elem
3422 * spin_lock_irqsave(host lock)
3425 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3426 unsigned int n_elem)
3428 qc->flags |= ATA_QCFLAG_SG;
3430 qc->n_elem = n_elem;
3431 qc->orig_n_elem = n_elem;
3435 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3436 * @qc: Command with memory buffer to be mapped.
3438 * DMA-map the memory buffer associated with queued_cmd @qc.
3441 * spin_lock_irqsave(host lock)
3444 * Zero on success, negative on error.
3447 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3449 struct ata_port *ap = qc->ap;
3450 int dir = qc->dma_dir;
3451 struct scatterlist *sg = qc->__sg;
3452 dma_addr_t dma_address;
3455 /* we must lengthen transfers to end on a 32-bit boundary */
3456 qc->pad_len = sg->length & 3;
3458 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3459 struct scatterlist *psg = &qc->pad_sgent;
3461 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3463 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3465 if (qc->tf.flags & ATA_TFLAG_WRITE)
3466 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3469 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3470 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3472 sg->length -= qc->pad_len;
3473 if (sg->length == 0)
3476 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3477 sg->length, qc->pad_len);
3485 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3487 if (dma_mapping_error(dma_address)) {
3489 sg->length += qc->pad_len;
3493 sg_dma_address(sg) = dma_address;
3494 sg_dma_len(sg) = sg->length;
3497 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3498 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3504 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3505 * @qc: Command with scatter-gather table to be mapped.
3507 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3510 * spin_lock_irqsave(host lock)
3513 * Zero on success, negative on error.
3517 static int ata_sg_setup(struct ata_queued_cmd *qc)
3519 struct ata_port *ap = qc->ap;
3520 struct scatterlist *sg = qc->__sg;
3521 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3522 int n_elem, pre_n_elem, dir, trim_sg = 0;
3524 VPRINTK("ENTER, ata%u\n", ap->id);
3525 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3527 /* we must lengthen transfers to end on a 32-bit boundary */
3528 qc->pad_len = lsg->length & 3;
3530 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3531 struct scatterlist *psg = &qc->pad_sgent;
3532 unsigned int offset;
3534 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3536 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3539 * psg->page/offset are used to copy to-be-written
3540 * data in this function or read data in ata_sg_clean.
3542 offset = lsg->offset + lsg->length - qc->pad_len;
3543 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3544 psg->offset = offset_in_page(offset);
3546 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3547 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3548 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3549 kunmap_atomic(addr, KM_IRQ0);
3552 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3553 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3555 lsg->length -= qc->pad_len;
3556 if (lsg->length == 0)
3559 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3560 qc->n_elem - 1, lsg->length, qc->pad_len);
3563 pre_n_elem = qc->n_elem;
3564 if (trim_sg && pre_n_elem)
3573 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3575 /* restore last sg */
3576 lsg->length += qc->pad_len;
3580 DPRINTK("%d sg elements mapped\n", n_elem);
3583 qc->n_elem = n_elem;
3589 * swap_buf_le16 - swap halves of 16-bit words in place
3590 * @buf: Buffer to swap
3591 * @buf_words: Number of 16-bit words in buffer.
3593 * Swap halves of 16-bit words if needed to convert from
3594 * little-endian byte order to native cpu byte order, or
3598 * Inherited from caller.
3600 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3605 for (i = 0; i < buf_words; i++)
3606 buf[i] = le16_to_cpu(buf[i]);
3607 #endif /* __BIG_ENDIAN */
3611 * ata_mmio_data_xfer - Transfer data by MMIO
3612 * @adev: device for this I/O
3614 * @buflen: buffer length
3615 * @write_data: read/write
3617 * Transfer data from/to the device data register by MMIO.
3620 * Inherited from caller.
3623 void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3624 unsigned int buflen, int write_data)
3626 struct ata_port *ap = adev->ap;
3628 unsigned int words = buflen >> 1;
3629 u16 *buf16 = (u16 *) buf;
3630 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3632 /* Transfer multiple of 2 bytes */
3634 for (i = 0; i < words; i++)
3635 writew(le16_to_cpu(buf16[i]), mmio);
3637 for (i = 0; i < words; i++)
3638 buf16[i] = cpu_to_le16(readw(mmio));
3641 /* Transfer trailing 1 byte, if any. */
3642 if (unlikely(buflen & 0x01)) {
3643 u16 align_buf[1] = { 0 };
3644 unsigned char *trailing_buf = buf + buflen - 1;
3647 memcpy(align_buf, trailing_buf, 1);
3648 writew(le16_to_cpu(align_buf[0]), mmio);
3650 align_buf[0] = cpu_to_le16(readw(mmio));
3651 memcpy(trailing_buf, align_buf, 1);
3657 * ata_pio_data_xfer - Transfer data by PIO
3658 * @adev: device to target
3660 * @buflen: buffer length
3661 * @write_data: read/write
3663 * Transfer data from/to the device data register by PIO.
3666 * Inherited from caller.
3669 void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3670 unsigned int buflen, int write_data)
3672 struct ata_port *ap = adev->ap;
3673 unsigned int words = buflen >> 1;
3675 /* Transfer multiple of 2 bytes */
3677 outsw(ap->ioaddr.data_addr, buf, words);
3679 insw(ap->ioaddr.data_addr, buf, words);
3681 /* Transfer trailing 1 byte, if any. */
3682 if (unlikely(buflen & 0x01)) {
3683 u16 align_buf[1] = { 0 };
3684 unsigned char *trailing_buf = buf + buflen - 1;
3687 memcpy(align_buf, trailing_buf, 1);
3688 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3690 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3691 memcpy(trailing_buf, align_buf, 1);
3697 * ata_pio_data_xfer_noirq - Transfer data by PIO
3698 * @adev: device to target
3700 * @buflen: buffer length
3701 * @write_data: read/write
3703 * Transfer data from/to the device data register by PIO. Do the
3704 * transfer with interrupts disabled.
3707 * Inherited from caller.
3710 void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3711 unsigned int buflen, int write_data)
3713 unsigned long flags;
3714 local_irq_save(flags);
3715 ata_pio_data_xfer(adev, buf, buflen, write_data);
3716 local_irq_restore(flags);
3721 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3722 * @qc: Command on going
3724 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3727 * Inherited from caller.
3730 static void ata_pio_sector(struct ata_queued_cmd *qc)
3732 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3733 struct scatterlist *sg = qc->__sg;
3734 struct ata_port *ap = qc->ap;
3736 unsigned int offset;
3739 if (qc->cursect == (qc->nsect - 1))
3740 ap->hsm_task_state = HSM_ST_LAST;
3742 page = sg[qc->cursg].page;
3743 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3745 /* get the current page and offset */
3746 page = nth_page(page, (offset >> PAGE_SHIFT));
3747 offset %= PAGE_SIZE;
3749 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3751 if (PageHighMem(page)) {
3752 unsigned long flags;
3754 /* FIXME: use a bounce buffer */
3755 local_irq_save(flags);
3756 buf = kmap_atomic(page, KM_IRQ0);
3758 /* do the actual data transfer */
3759 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3761 kunmap_atomic(buf, KM_IRQ0);
3762 local_irq_restore(flags);
3764 buf = page_address(page);
3765 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3771 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3778 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3779 * @qc: Command on going
3781 * Transfer one or many ATA_SECT_SIZE of data from/to the
3782 * ATA device for the DRQ request.
3785 * Inherited from caller.
3788 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3790 if (is_multi_taskfile(&qc->tf)) {
3791 /* READ/WRITE MULTIPLE */
3794 WARN_ON(qc->dev->multi_count == 0);
3796 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3804 * atapi_send_cdb - Write CDB bytes to hardware
3805 * @ap: Port to which ATAPI device is attached.
3806 * @qc: Taskfile currently active
3808 * When device has indicated its readiness to accept
3809 * a CDB, this function is called. Send the CDB.
3815 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3818 DPRINTK("send cdb\n");
3819 WARN_ON(qc->dev->cdb_len < 12);
3821 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3822 ata_altstatus(ap); /* flush */
3824 switch (qc->tf.protocol) {
3825 case ATA_PROT_ATAPI:
3826 ap->hsm_task_state = HSM_ST;
3828 case ATA_PROT_ATAPI_NODATA:
3829 ap->hsm_task_state = HSM_ST_LAST;
3831 case ATA_PROT_ATAPI_DMA:
3832 ap->hsm_task_state = HSM_ST_LAST;
3833 /* initiate bmdma */
3834 ap->ops->bmdma_start(qc);
3840 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3841 * @qc: Command on going
3842 * @bytes: number of bytes
3844 * Transfer Transfer data from/to the ATAPI device.
3847 * Inherited from caller.
3851 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3853 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3854 struct scatterlist *sg = qc->__sg;
3855 struct ata_port *ap = qc->ap;
3858 unsigned int offset, count;
3860 if (qc->curbytes + bytes >= qc->nbytes)
3861 ap->hsm_task_state = HSM_ST_LAST;
3864 if (unlikely(qc->cursg >= qc->n_elem)) {
3866 * The end of qc->sg is reached and the device expects
3867 * more data to transfer. In order not to overrun qc->sg
3868 * and fulfill length specified in the byte count register,
3869 * - for read case, discard trailing data from the device
3870 * - for write case, padding zero data to the device
3872 u16 pad_buf[1] = { 0 };
3873 unsigned int words = bytes >> 1;
3876 if (words) /* warning if bytes > 1 */
3877 ata_dev_printk(qc->dev, KERN_WARNING,
3878 "%u bytes trailing data\n", bytes);
3880 for (i = 0; i < words; i++)
3881 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3883 ap->hsm_task_state = HSM_ST_LAST;
3887 sg = &qc->__sg[qc->cursg];
3890 offset = sg->offset + qc->cursg_ofs;
3892 /* get the current page and offset */
3893 page = nth_page(page, (offset >> PAGE_SHIFT));
3894 offset %= PAGE_SIZE;
3896 /* don't overrun current sg */
3897 count = min(sg->length - qc->cursg_ofs, bytes);
3899 /* don't cross page boundaries */
3900 count = min(count, (unsigned int)PAGE_SIZE - offset);
3902 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3904 if (PageHighMem(page)) {
3905 unsigned long flags;
3907 /* FIXME: use bounce buffer */
3908 local_irq_save(flags);
3909 buf = kmap_atomic(page, KM_IRQ0);
3911 /* do the actual data transfer */
3912 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3914 kunmap_atomic(buf, KM_IRQ0);
3915 local_irq_restore(flags);
3917 buf = page_address(page);
3918 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3922 qc->curbytes += count;
3923 qc->cursg_ofs += count;
3925 if (qc->cursg_ofs == sg->length) {
3935 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3936 * @qc: Command on going
3938 * Transfer Transfer data from/to the ATAPI device.
3941 * Inherited from caller.
3944 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3946 struct ata_port *ap = qc->ap;
3947 struct ata_device *dev = qc->dev;
3948 unsigned int ireason, bc_lo, bc_hi, bytes;
3949 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3951 /* Abuse qc->result_tf for temp storage of intermediate TF
3952 * here to save some kernel stack usage.
3953 * For normal completion, qc->result_tf is not relevant. For
3954 * error, qc->result_tf is later overwritten by ata_qc_complete().
3955 * So, the correctness of qc->result_tf is not affected.
3957 ap->ops->tf_read(ap, &qc->result_tf);
3958 ireason = qc->result_tf.nsect;
3959 bc_lo = qc->result_tf.lbam;
3960 bc_hi = qc->result_tf.lbah;
3961 bytes = (bc_hi << 8) | bc_lo;
3963 /* shall be cleared to zero, indicating xfer of data */
3964 if (ireason & (1 << 0))
3967 /* make sure transfer direction matches expected */
3968 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3969 if (do_write != i_write)
3972 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3974 __atapi_pio_bytes(qc, bytes);
3979 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
3980 qc->err_mask |= AC_ERR_HSM;
3981 ap->hsm_task_state = HSM_ST_ERR;
3985 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3986 * @ap: the target ata_port
3990 * 1 if ok in workqueue, 0 otherwise.
3993 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3995 if (qc->tf.flags & ATA_TFLAG_POLLING)
3998 if (ap->hsm_task_state == HSM_ST_FIRST) {
3999 if (qc->tf.protocol == ATA_PROT_PIO &&
4000 (qc->tf.flags & ATA_TFLAG_WRITE))
4003 if (is_atapi_taskfile(&qc->tf) &&
4004 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4012 * ata_hsm_qc_complete - finish a qc running on standard HSM
4013 * @qc: Command to complete
4014 * @in_wq: 1 if called from workqueue, 0 otherwise
4016 * Finish @qc which is running on standard HSM.
4019 * If @in_wq is zero, spin_lock_irqsave(host lock).
4020 * Otherwise, none on entry and grabs host lock.
4022 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4024 struct ata_port *ap = qc->ap;
4025 unsigned long flags;
4027 if (ap->ops->error_handler) {
4029 spin_lock_irqsave(ap->lock, flags);
4031 /* EH might have kicked in while host lock is
4034 qc = ata_qc_from_tag(ap, qc->tag);
4036 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4038 ata_qc_complete(qc);
4040 ata_port_freeze(ap);
4043 spin_unlock_irqrestore(ap->lock, flags);
4045 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4046 ata_qc_complete(qc);
4048 ata_port_freeze(ap);
4052 spin_lock_irqsave(ap->lock, flags);
4054 ata_qc_complete(qc);
4055 spin_unlock_irqrestore(ap->lock, flags);
4057 ata_qc_complete(qc);
4060 ata_altstatus(ap); /* flush */
4064 * ata_hsm_move - move the HSM to the next state.
4065 * @ap: the target ata_port
4067 * @status: current device status
4068 * @in_wq: 1 if called from workqueue, 0 otherwise
4071 * 1 when poll next status needed, 0 otherwise.
4073 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4074 u8 status, int in_wq)
4076 unsigned long flags = 0;
4079 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4081 /* Make sure ata_qc_issue_prot() does not throw things
4082 * like DMA polling into the workqueue. Notice that
4083 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4085 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4088 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4089 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4091 switch (ap->hsm_task_state) {
4093 /* Send first data block or PACKET CDB */
4095 /* If polling, we will stay in the work queue after
4096 * sending the data. Otherwise, interrupt handler
4097 * takes over after sending the data.
4099 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4101 /* check device status */
4102 if (unlikely((status & ATA_DRQ) == 0)) {
4103 /* handle BSY=0, DRQ=0 as error */
4104 if (likely(status & (ATA_ERR | ATA_DF)))
4105 /* device stops HSM for abort/error */
4106 qc->err_mask |= AC_ERR_DEV;
4108 /* HSM violation. Let EH handle this */
4109 qc->err_mask |= AC_ERR_HSM;
4111 ap->hsm_task_state = HSM_ST_ERR;
4115 /* Device should not ask for data transfer (DRQ=1)
4116 * when it finds something wrong.
4117 * We ignore DRQ here and stop the HSM by
4118 * changing hsm_task_state to HSM_ST_ERR and
4119 * let the EH abort the command or reset the device.
4121 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4122 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4124 qc->err_mask |= AC_ERR_HSM;
4125 ap->hsm_task_state = HSM_ST_ERR;
4129 /* Send the CDB (atapi) or the first data block (ata pio out).
4130 * During the state transition, interrupt handler shouldn't
4131 * be invoked before the data transfer is complete and
4132 * hsm_task_state is changed. Hence, the following locking.
4135 spin_lock_irqsave(ap->lock, flags);
4137 if (qc->tf.protocol == ATA_PROT_PIO) {
4138 /* PIO data out protocol.
4139 * send first data block.
4142 /* ata_pio_sectors() might change the state
4143 * to HSM_ST_LAST. so, the state is changed here
4144 * before ata_pio_sectors().
4146 ap->hsm_task_state = HSM_ST;
4147 ata_pio_sectors(qc);
4148 ata_altstatus(ap); /* flush */
4151 atapi_send_cdb(ap, qc);
4154 spin_unlock_irqrestore(ap->lock, flags);
4156 /* if polling, ata_pio_task() handles the rest.
4157 * otherwise, interrupt handler takes over from here.
4162 /* complete command or read/write the data register */
4163 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4164 /* ATAPI PIO protocol */
4165 if ((status & ATA_DRQ) == 0) {
4166 /* No more data to transfer or device error.
4167 * Device error will be tagged in HSM_ST_LAST.
4169 ap->hsm_task_state = HSM_ST_LAST;
4173 /* Device should not ask for data transfer (DRQ=1)
4174 * when it finds something wrong.
4175 * We ignore DRQ here and stop the HSM by
4176 * changing hsm_task_state to HSM_ST_ERR and
4177 * let the EH abort the command or reset the device.
4179 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4180 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4182 qc->err_mask |= AC_ERR_HSM;
4183 ap->hsm_task_state = HSM_ST_ERR;
4187 atapi_pio_bytes(qc);
4189 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4190 /* bad ireason reported by device */
4194 /* ATA PIO protocol */
4195 if (unlikely((status & ATA_DRQ) == 0)) {
4196 /* handle BSY=0, DRQ=0 as error */
4197 if (likely(status & (ATA_ERR | ATA_DF)))
4198 /* device stops HSM for abort/error */
4199 qc->err_mask |= AC_ERR_DEV;
4201 /* HSM violation. Let EH handle this */
4202 qc->err_mask |= AC_ERR_HSM;
4204 ap->hsm_task_state = HSM_ST_ERR;
4208 /* For PIO reads, some devices may ask for
4209 * data transfer (DRQ=1) alone with ERR=1.
4210 * We respect DRQ here and transfer one
4211 * block of junk data before changing the
4212 * hsm_task_state to HSM_ST_ERR.
4214 * For PIO writes, ERR=1 DRQ=1 doesn't make
4215 * sense since the data block has been
4216 * transferred to the device.
4218 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4219 /* data might be corrputed */
4220 qc->err_mask |= AC_ERR_DEV;
4222 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4223 ata_pio_sectors(qc);
4225 status = ata_wait_idle(ap);
4228 if (status & (ATA_BUSY | ATA_DRQ))
4229 qc->err_mask |= AC_ERR_HSM;
4231 /* ata_pio_sectors() might change the
4232 * state to HSM_ST_LAST. so, the state
4233 * is changed after ata_pio_sectors().
4235 ap->hsm_task_state = HSM_ST_ERR;
4239 ata_pio_sectors(qc);
4241 if (ap->hsm_task_state == HSM_ST_LAST &&
4242 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4245 status = ata_wait_idle(ap);
4250 ata_altstatus(ap); /* flush */
4255 if (unlikely(!ata_ok(status))) {
4256 qc->err_mask |= __ac_err_mask(status);
4257 ap->hsm_task_state = HSM_ST_ERR;
4261 /* no more data to transfer */
4262 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4263 ap->id, qc->dev->devno, status);
4265 WARN_ON(qc->err_mask);
4267 ap->hsm_task_state = HSM_ST_IDLE;
4269 /* complete taskfile transaction */
4270 ata_hsm_qc_complete(qc, in_wq);
4276 /* make sure qc->err_mask is available to
4277 * know what's wrong and recover
4279 WARN_ON(qc->err_mask == 0);
4281 ap->hsm_task_state = HSM_ST_IDLE;
4283 /* complete taskfile transaction */
4284 ata_hsm_qc_complete(qc, in_wq);
4296 static void ata_pio_task(struct work_struct *work)
4298 struct ata_port *ap =
4299 container_of(work, struct ata_port, port_task.work);
4300 struct ata_queued_cmd *qc = ap->port_task_data;
4305 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4308 * This is purely heuristic. This is a fast path.
4309 * Sometimes when we enter, BSY will be cleared in
4310 * a chk-status or two. If not, the drive is probably seeking
4311 * or something. Snooze for a couple msecs, then
4312 * chk-status again. If still busy, queue delayed work.
4314 status = ata_busy_wait(ap, ATA_BUSY, 5);
4315 if (status & ATA_BUSY) {
4317 status = ata_busy_wait(ap, ATA_BUSY, 10);
4318 if (status & ATA_BUSY) {
4319 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4325 poll_next = ata_hsm_move(ap, qc, status, 1);
4327 /* another command or interrupt handler
4328 * may be running at this point.
4335 * ata_qc_new - Request an available ATA command, for queueing
4336 * @ap: Port associated with device @dev
4337 * @dev: Device from whom we request an available command structure
4343 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4345 struct ata_queued_cmd *qc = NULL;
4348 /* no command while frozen */
4349 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4352 /* the last tag is reserved for internal command. */
4353 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4354 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4355 qc = __ata_qc_from_tag(ap, i);
4366 * ata_qc_new_init - Request an available ATA command, and initialize it
4367 * @dev: Device from whom we request an available command structure
4373 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4375 struct ata_port *ap = dev->ap;
4376 struct ata_queued_cmd *qc;
4378 qc = ata_qc_new(ap);
4391 * ata_qc_free - free unused ata_queued_cmd
4392 * @qc: Command to complete
4394 * Designed to free unused ata_queued_cmd object
4395 * in case something prevents using it.
4398 * spin_lock_irqsave(host lock)
4400 void ata_qc_free(struct ata_queued_cmd *qc)
4402 struct ata_port *ap = qc->ap;
4405 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4409 if (likely(ata_tag_valid(tag))) {
4410 qc->tag = ATA_TAG_POISON;
4411 clear_bit(tag, &ap->qc_allocated);
4415 void __ata_qc_complete(struct ata_queued_cmd *qc)
4417 struct ata_port *ap = qc->ap;
4419 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4420 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4422 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4425 /* command should be marked inactive atomically with qc completion */
4426 if (qc->tf.protocol == ATA_PROT_NCQ)
4427 ap->sactive &= ~(1 << qc->tag);
4429 ap->active_tag = ATA_TAG_POISON;
4431 /* atapi: mark qc as inactive to prevent the interrupt handler
4432 * from completing the command twice later, before the error handler
4433 * is called. (when rc != 0 and atapi request sense is needed)
4435 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4436 ap->qc_active &= ~(1 << qc->tag);
4438 /* call completion callback */
4439 qc->complete_fn(qc);
4443 * ata_qc_complete - Complete an active ATA command
4444 * @qc: Command to complete
4445 * @err_mask: ATA Status register contents
4447 * Indicate to the mid and upper layers that an ATA
4448 * command has completed, with either an ok or not-ok status.
4451 * spin_lock_irqsave(host lock)
4453 void ata_qc_complete(struct ata_queued_cmd *qc)
4455 struct ata_port *ap = qc->ap;
4457 /* XXX: New EH and old EH use different mechanisms to
4458 * synchronize EH with regular execution path.
4460 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4461 * Normal execution path is responsible for not accessing a
4462 * failed qc. libata core enforces the rule by returning NULL
4463 * from ata_qc_from_tag() for failed qcs.
4465 * Old EH depends on ata_qc_complete() nullifying completion
4466 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4467 * not synchronize with interrupt handler. Only PIO task is
4470 if (ap->ops->error_handler) {
4471 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4473 if (unlikely(qc->err_mask))
4474 qc->flags |= ATA_QCFLAG_FAILED;
4476 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4477 if (!ata_tag_internal(qc->tag)) {
4478 /* always fill result TF for failed qc */
4479 ap->ops->tf_read(ap, &qc->result_tf);
4480 ata_qc_schedule_eh(qc);
4485 /* read result TF if requested */
4486 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4487 ap->ops->tf_read(ap, &qc->result_tf);
4489 __ata_qc_complete(qc);
4491 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4494 /* read result TF if failed or requested */
4495 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4496 ap->ops->tf_read(ap, &qc->result_tf);
4498 __ata_qc_complete(qc);
4503 * ata_qc_complete_multiple - Complete multiple qcs successfully
4504 * @ap: port in question
4505 * @qc_active: new qc_active mask
4506 * @finish_qc: LLDD callback invoked before completing a qc
4508 * Complete in-flight commands. This functions is meant to be
4509 * called from low-level driver's interrupt routine to complete
4510 * requests normally. ap->qc_active and @qc_active is compared
4511 * and commands are completed accordingly.
4514 * spin_lock_irqsave(host lock)
4517 * Number of completed commands on success, -errno otherwise.
4519 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4520 void (*finish_qc)(struct ata_queued_cmd *))
4526 done_mask = ap->qc_active ^ qc_active;
4528 if (unlikely(done_mask & qc_active)) {
4529 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4530 "(%08x->%08x)\n", ap->qc_active, qc_active);
4534 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4535 struct ata_queued_cmd *qc;
4537 if (!(done_mask & (1 << i)))
4540 if ((qc = ata_qc_from_tag(ap, i))) {
4543 ata_qc_complete(qc);
4551 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4553 struct ata_port *ap = qc->ap;
4555 switch (qc->tf.protocol) {
4558 case ATA_PROT_ATAPI_DMA:
4561 case ATA_PROT_ATAPI:
4563 if (ap->flags & ATA_FLAG_PIO_DMA)
4576 * ata_qc_issue - issue taskfile to device
4577 * @qc: command to issue to device
4579 * Prepare an ATA command to submission to device.
4580 * This includes mapping the data into a DMA-able
4581 * area, filling in the S/G table, and finally
4582 * writing the taskfile to hardware, starting the command.
4585 * spin_lock_irqsave(host lock)
4587 void ata_qc_issue(struct ata_queued_cmd *qc)
4589 struct ata_port *ap = qc->ap;
4591 /* Make sure only one non-NCQ command is outstanding. The
4592 * check is skipped for old EH because it reuses active qc to
4593 * request ATAPI sense.
4595 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4597 if (qc->tf.protocol == ATA_PROT_NCQ) {
4598 WARN_ON(ap->sactive & (1 << qc->tag));
4599 ap->sactive |= 1 << qc->tag;
4601 WARN_ON(ap->sactive);
4602 ap->active_tag = qc->tag;
4605 qc->flags |= ATA_QCFLAG_ACTIVE;
4606 ap->qc_active |= 1 << qc->tag;
4608 if (ata_should_dma_map(qc)) {
4609 if (qc->flags & ATA_QCFLAG_SG) {
4610 if (ata_sg_setup(qc))
4612 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4613 if (ata_sg_setup_one(qc))
4617 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4620 ap->ops->qc_prep(qc);
4622 qc->err_mask |= ap->ops->qc_issue(qc);
4623 if (unlikely(qc->err_mask))
4628 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4629 qc->err_mask |= AC_ERR_SYSTEM;
4631 ata_qc_complete(qc);
4635 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4636 * @qc: command to issue to device
4638 * Using various libata functions and hooks, this function
4639 * starts an ATA command. ATA commands are grouped into
4640 * classes called "protocols", and issuing each type of protocol
4641 * is slightly different.
4643 * May be used as the qc_issue() entry in ata_port_operations.
4646 * spin_lock_irqsave(host lock)
4649 * Zero on success, AC_ERR_* mask on failure
4652 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4654 struct ata_port *ap = qc->ap;
4656 /* Use polling pio if the LLD doesn't handle
4657 * interrupt driven pio and atapi CDB interrupt.
4659 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4660 switch (qc->tf.protocol) {
4662 case ATA_PROT_ATAPI:
4663 case ATA_PROT_ATAPI_NODATA:
4664 qc->tf.flags |= ATA_TFLAG_POLLING;
4666 case ATA_PROT_ATAPI_DMA:
4667 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4668 /* see ata_dma_blacklisted() */
4676 /* select the device */
4677 ata_dev_select(ap, qc->dev->devno, 1, 0);
4679 /* start the command */
4680 switch (qc->tf.protocol) {
4681 case ATA_PROT_NODATA:
4682 if (qc->tf.flags & ATA_TFLAG_POLLING)
4683 ata_qc_set_polling(qc);
4685 ata_tf_to_host(ap, &qc->tf);
4686 ap->hsm_task_state = HSM_ST_LAST;
4688 if (qc->tf.flags & ATA_TFLAG_POLLING)
4689 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4694 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4696 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4697 ap->ops->bmdma_setup(qc); /* set up bmdma */
4698 ap->ops->bmdma_start(qc); /* initiate bmdma */
4699 ap->hsm_task_state = HSM_ST_LAST;
4703 if (qc->tf.flags & ATA_TFLAG_POLLING)
4704 ata_qc_set_polling(qc);
4706 ata_tf_to_host(ap, &qc->tf);
4708 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4709 /* PIO data out protocol */
4710 ap->hsm_task_state = HSM_ST_FIRST;
4711 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4713 /* always send first data block using
4714 * the ata_pio_task() codepath.
4717 /* PIO data in protocol */
4718 ap->hsm_task_state = HSM_ST;
4720 if (qc->tf.flags & ATA_TFLAG_POLLING)
4721 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4723 /* if polling, ata_pio_task() handles the rest.
4724 * otherwise, interrupt handler takes over from here.
4730 case ATA_PROT_ATAPI:
4731 case ATA_PROT_ATAPI_NODATA:
4732 if (qc->tf.flags & ATA_TFLAG_POLLING)
4733 ata_qc_set_polling(qc);
4735 ata_tf_to_host(ap, &qc->tf);
4737 ap->hsm_task_state = HSM_ST_FIRST;
4739 /* send cdb by polling if no cdb interrupt */
4740 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4741 (qc->tf.flags & ATA_TFLAG_POLLING))
4742 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4745 case ATA_PROT_ATAPI_DMA:
4746 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4748 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4749 ap->ops->bmdma_setup(qc); /* set up bmdma */
4750 ap->hsm_task_state = HSM_ST_FIRST;
4752 /* send cdb by polling if no cdb interrupt */
4753 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4754 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4759 return AC_ERR_SYSTEM;
4766 * ata_host_intr - Handle host interrupt for given (port, task)
4767 * @ap: Port on which interrupt arrived (possibly...)
4768 * @qc: Taskfile currently active in engine
4770 * Handle host interrupt for given queued command. Currently,
4771 * only DMA interrupts are handled. All other commands are
4772 * handled via polling with interrupts disabled (nIEN bit).
4775 * spin_lock_irqsave(host lock)
4778 * One if interrupt was handled, zero if not (shared irq).
4781 inline unsigned int ata_host_intr (struct ata_port *ap,
4782 struct ata_queued_cmd *qc)
4784 u8 status, host_stat = 0;
4786 VPRINTK("ata%u: protocol %d task_state %d\n",
4787 ap->id, qc->tf.protocol, ap->hsm_task_state);
4789 /* Check whether we are expecting interrupt in this state */
4790 switch (ap->hsm_task_state) {
4792 /* Some pre-ATAPI-4 devices assert INTRQ
4793 * at this state when ready to receive CDB.
4796 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4797 * The flag was turned on only for atapi devices.
4798 * No need to check is_atapi_taskfile(&qc->tf) again.
4800 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4804 if (qc->tf.protocol == ATA_PROT_DMA ||
4805 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4806 /* check status of DMA engine */
4807 host_stat = ap->ops->bmdma_status(ap);
4808 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4810 /* if it's not our irq... */
4811 if (!(host_stat & ATA_DMA_INTR))
4814 /* before we do anything else, clear DMA-Start bit */
4815 ap->ops->bmdma_stop(qc);
4817 if (unlikely(host_stat & ATA_DMA_ERR)) {
4818 /* error when transfering data to/from memory */
4819 qc->err_mask |= AC_ERR_HOST_BUS;
4820 ap->hsm_task_state = HSM_ST_ERR;
4830 /* check altstatus */
4831 status = ata_altstatus(ap);
4832 if (status & ATA_BUSY)
4835 /* check main status, clearing INTRQ */
4836 status = ata_chk_status(ap);
4837 if (unlikely(status & ATA_BUSY))
4840 /* ack bmdma irq events */
4841 ap->ops->irq_clear(ap);
4843 ata_hsm_move(ap, qc, status, 0);
4844 return 1; /* irq handled */
4847 ap->stats.idle_irq++;
4850 if ((ap->stats.idle_irq % 1000) == 0) {
4851 ata_irq_ack(ap, 0); /* debug trap */
4852 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4856 return 0; /* irq not handled */
4860 * ata_interrupt - Default ATA host interrupt handler
4861 * @irq: irq line (unused)
4862 * @dev_instance: pointer to our ata_host information structure
4864 * Default interrupt handler for PCI IDE devices. Calls
4865 * ata_host_intr() for each port that is not disabled.
4868 * Obtains host lock during operation.
4871 * IRQ_NONE or IRQ_HANDLED.
4874 irqreturn_t ata_interrupt (int irq, void *dev_instance)
4876 struct ata_host *host = dev_instance;
4878 unsigned int handled = 0;
4879 unsigned long flags;
4881 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4882 spin_lock_irqsave(&host->lock, flags);
4884 for (i = 0; i < host->n_ports; i++) {
4885 struct ata_port *ap;
4887 ap = host->ports[i];
4889 !(ap->flags & ATA_FLAG_DISABLED)) {
4890 struct ata_queued_cmd *qc;
4892 qc = ata_qc_from_tag(ap, ap->active_tag);
4893 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4894 (qc->flags & ATA_QCFLAG_ACTIVE))
4895 handled |= ata_host_intr(ap, qc);
4899 spin_unlock_irqrestore(&host->lock, flags);
4901 return IRQ_RETVAL(handled);
4905 * sata_scr_valid - test whether SCRs are accessible
4906 * @ap: ATA port to test SCR accessibility for
4908 * Test whether SCRs are accessible for @ap.
4914 * 1 if SCRs are accessible, 0 otherwise.
4916 int sata_scr_valid(struct ata_port *ap)
4918 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4922 * sata_scr_read - read SCR register of the specified port
4923 * @ap: ATA port to read SCR for
4925 * @val: Place to store read value
4927 * Read SCR register @reg of @ap into *@val. This function is
4928 * guaranteed to succeed if the cable type of the port is SATA
4929 * and the port implements ->scr_read.
4935 * 0 on success, negative errno on failure.
4937 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4939 if (sata_scr_valid(ap)) {
4940 *val = ap->ops->scr_read(ap, reg);
4947 * sata_scr_write - write SCR register of the specified port
4948 * @ap: ATA port to write SCR for
4949 * @reg: SCR to write
4950 * @val: value to write
4952 * Write @val to SCR register @reg of @ap. This function is
4953 * guaranteed to succeed if the cable type of the port is SATA
4954 * and the port implements ->scr_read.
4960 * 0 on success, negative errno on failure.
4962 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
4964 if (sata_scr_valid(ap)) {
4965 ap->ops->scr_write(ap, reg, val);
4972 * sata_scr_write_flush - write SCR register of the specified port and flush
4973 * @ap: ATA port to write SCR for
4974 * @reg: SCR to write
4975 * @val: value to write
4977 * This function is identical to sata_scr_write() except that this
4978 * function performs flush after writing to the register.
4984 * 0 on success, negative errno on failure.
4986 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
4988 if (sata_scr_valid(ap)) {
4989 ap->ops->scr_write(ap, reg, val);
4990 ap->ops->scr_read(ap, reg);
4997 * ata_port_online - test whether the given port is online
4998 * @ap: ATA port to test
5000 * Test whether @ap is online. Note that this function returns 0
5001 * if online status of @ap cannot be obtained, so
5002 * ata_port_online(ap) != !ata_port_offline(ap).
5008 * 1 if the port online status is available and online.
5010 int ata_port_online(struct ata_port *ap)
5014 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5020 * ata_port_offline - test whether the given port is offline
5021 * @ap: ATA port to test
5023 * Test whether @ap is offline. Note that this function returns
5024 * 0 if offline status of @ap cannot be obtained, so
5025 * ata_port_online(ap) != !ata_port_offline(ap).
5031 * 1 if the port offline status is available and offline.
5033 int ata_port_offline(struct ata_port *ap)
5037 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5042 int ata_flush_cache(struct ata_device *dev)
5044 unsigned int err_mask;
5047 if (!ata_try_flush_cache(dev))
5050 if (ata_id_has_flush_ext(dev->id))
5051 cmd = ATA_CMD_FLUSH_EXT;
5053 cmd = ATA_CMD_FLUSH;
5055 err_mask = ata_do_simple_cmd(dev, cmd);
5057 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5064 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5065 unsigned int action, unsigned int ehi_flags,
5068 unsigned long flags;
5071 for (i = 0; i < host->n_ports; i++) {
5072 struct ata_port *ap = host->ports[i];
5074 /* Previous resume operation might still be in
5075 * progress. Wait for PM_PENDING to clear.
5077 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5078 ata_port_wait_eh(ap);
5079 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5082 /* request PM ops to EH */
5083 spin_lock_irqsave(ap->lock, flags);
5088 ap->pm_result = &rc;
5091 ap->pflags |= ATA_PFLAG_PM_PENDING;
5092 ap->eh_info.action |= action;
5093 ap->eh_info.flags |= ehi_flags;
5095 ata_port_schedule_eh(ap);
5097 spin_unlock_irqrestore(ap->lock, flags);
5099 /* wait and check result */
5101 ata_port_wait_eh(ap);
5102 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5112 * ata_host_suspend - suspend host
5113 * @host: host to suspend
5116 * Suspend @host. Actual operation is performed by EH. This
5117 * function requests EH to perform PM operations and waits for EH
5121 * Kernel thread context (may sleep).
5124 * 0 on success, -errno on failure.
5126 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5130 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5134 /* EH is quiescent now. Fail if we have any ready device.
5135 * This happens if hotplug occurs between completion of device
5136 * suspension and here.
5138 for (i = 0; i < host->n_ports; i++) {
5139 struct ata_port *ap = host->ports[i];
5141 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5142 struct ata_device *dev = &ap->device[j];
5144 if (ata_dev_ready(dev)) {
5145 ata_port_printk(ap, KERN_WARNING,
5146 "suspend failed, device %d "
5147 "still active\n", dev->devno);
5154 host->dev->power.power_state = mesg;
5158 ata_host_resume(host);
5163 * ata_host_resume - resume host
5164 * @host: host to resume
5166 * Resume @host. Actual operation is performed by EH. This
5167 * function requests EH to perform PM operations and returns.
5168 * Note that all resume operations are performed parallely.
5171 * Kernel thread context (may sleep).
5173 void ata_host_resume(struct ata_host *host)
5175 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5176 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5177 host->dev->power.power_state = PMSG_ON;
5181 * ata_port_start - Set port up for dma.
5182 * @ap: Port to initialize
5184 * Called just after data structures for each port are
5185 * initialized. Allocates space for PRD table.
5187 * May be used as the port_start() entry in ata_port_operations.
5190 * Inherited from caller.
5193 int ata_port_start (struct ata_port *ap)
5195 struct device *dev = ap->dev;
5198 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5202 rc = ata_pad_alloc(ap, dev);
5204 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5208 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5215 * ata_port_stop - Undo ata_port_start()
5216 * @ap: Port to shut down
5218 * Frees the PRD table.
5220 * May be used as the port_stop() entry in ata_port_operations.
5223 * Inherited from caller.
5226 void ata_port_stop (struct ata_port *ap)
5228 struct device *dev = ap->dev;
5230 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5231 ata_pad_free(ap, dev);
5234 void ata_host_stop (struct ata_host *host)
5236 if (host->mmio_base)
5237 iounmap(host->mmio_base);
5241 * ata_dev_init - Initialize an ata_device structure
5242 * @dev: Device structure to initialize
5244 * Initialize @dev in preparation for probing.
5247 * Inherited from caller.
5249 void ata_dev_init(struct ata_device *dev)
5251 struct ata_port *ap = dev->ap;
5252 unsigned long flags;
5254 /* SATA spd limit is bound to the first device */
5255 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5257 /* High bits of dev->flags are used to record warm plug
5258 * requests which occur asynchronously. Synchronize using
5261 spin_lock_irqsave(ap->lock, flags);
5262 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5263 spin_unlock_irqrestore(ap->lock, flags);
5265 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5266 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5267 dev->pio_mask = UINT_MAX;
5268 dev->mwdma_mask = UINT_MAX;
5269 dev->udma_mask = UINT_MAX;
5273 * ata_port_init - Initialize an ata_port structure
5274 * @ap: Structure to initialize
5275 * @host: Collection of hosts to which @ap belongs
5276 * @ent: Probe information provided by low-level driver
5277 * @port_no: Port number associated with this ata_port
5279 * Initialize a new ata_port structure.
5282 * Inherited from caller.
5284 void ata_port_init(struct ata_port *ap, struct ata_host *host,
5285 const struct ata_probe_ent *ent, unsigned int port_no)
5289 ap->lock = &host->lock;
5290 ap->flags = ATA_FLAG_DISABLED;
5291 ap->id = ata_unique_id++;
5292 ap->ctl = ATA_DEVCTL_OBS;
5295 ap->port_no = port_no;
5296 if (port_no == 1 && ent->pinfo2) {
5297 ap->pio_mask = ent->pinfo2->pio_mask;
5298 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5299 ap->udma_mask = ent->pinfo2->udma_mask;
5300 ap->flags |= ent->pinfo2->flags;
5301 ap->ops = ent->pinfo2->port_ops;
5303 ap->pio_mask = ent->pio_mask;
5304 ap->mwdma_mask = ent->mwdma_mask;
5305 ap->udma_mask = ent->udma_mask;
5306 ap->flags |= ent->port_flags;
5307 ap->ops = ent->port_ops;
5309 ap->hw_sata_spd_limit = UINT_MAX;
5310 ap->active_tag = ATA_TAG_POISON;
5311 ap->last_ctl = 0xFF;
5313 #if defined(ATA_VERBOSE_DEBUG)
5314 /* turn on all debugging levels */
5315 ap->msg_enable = 0x00FF;
5316 #elif defined(ATA_DEBUG)
5317 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5319 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5322 INIT_DELAYED_WORK(&ap->port_task, NULL);
5323 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5324 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5325 INIT_LIST_HEAD(&ap->eh_done_q);
5326 init_waitqueue_head(&ap->eh_wait_q);
5328 /* set cable type */
5329 ap->cbl = ATA_CBL_NONE;
5330 if (ap->flags & ATA_FLAG_SATA)
5331 ap->cbl = ATA_CBL_SATA;
5333 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5334 struct ata_device *dev = &ap->device[i];
5341 ap->stats.unhandled_irq = 1;
5342 ap->stats.idle_irq = 1;
5345 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5349 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5350 * @ap: ATA port to initialize SCSI host for
5351 * @shost: SCSI host associated with @ap
5353 * Initialize SCSI host @shost associated with ATA port @ap.
5356 * Inherited from caller.
5358 static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
5360 ap->scsi_host = shost;
5362 shost->unique_id = ap->id;
5365 shost->max_channel = 1;
5366 shost->max_cmd_len = 12;
5370 * ata_port_add - Attach low-level ATA driver to system
5371 * @ent: Information provided by low-level driver
5372 * @host: Collections of ports to which we add
5373 * @port_no: Port number associated with this host
5375 * Attach low-level ATA driver to system.
5378 * PCI/etc. bus probe sem.
5381 * New ata_port on success, for NULL on error.
5383 static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
5384 struct ata_host *host,
5385 unsigned int port_no)
5387 struct Scsi_Host *shost;
5388 struct ata_port *ap;
5392 if (!ent->port_ops->error_handler &&
5393 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5394 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5399 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5403 shost->transportt = &ata_scsi_transport_template;
5405 ap = ata_shost_to_port(shost);
5407 ata_port_init(ap, host, ent, port_no);
5408 ata_port_init_shost(ap, shost);
5414 * ata_sas_host_init - Initialize a host struct
5415 * @host: host to initialize
5416 * @dev: device host is attached to
5417 * @flags: host flags
5421 * PCI/etc. bus probe sem.
5425 void ata_host_init(struct ata_host *host, struct device *dev,
5426 unsigned long flags, const struct ata_port_operations *ops)
5428 spin_lock_init(&host->lock);
5430 host->flags = flags;
5435 * ata_device_add - Register hardware device with ATA and SCSI layers
5436 * @ent: Probe information describing hardware device to be registered
5438 * This function processes the information provided in the probe
5439 * information struct @ent, allocates the necessary ATA and SCSI
5440 * host information structures, initializes them, and registers
5441 * everything with requisite kernel subsystems.
5443 * This function requests irqs, probes the ATA bus, and probes
5447 * PCI/etc. bus probe sem.
5450 * Number of ports registered. Zero on error (no ports registered).
5452 int ata_device_add(const struct ata_probe_ent *ent)
5455 struct device *dev = ent->dev;
5456 struct ata_host *host;
5461 if (ent->irq == 0) {
5462 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5465 /* alloc a container for our list of ATA ports (buses) */
5466 host = kzalloc(sizeof(struct ata_host) +
5467 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5471 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5472 host->n_ports = ent->n_ports;
5473 host->irq = ent->irq;
5474 host->irq2 = ent->irq2;
5475 host->mmio_base = ent->mmio_base;
5476 host->private_data = ent->private_data;
5478 /* register each port bound to this device */
5479 for (i = 0; i < host->n_ports; i++) {
5480 struct ata_port *ap;
5481 unsigned long xfer_mode_mask;
5482 int irq_line = ent->irq;
5484 ap = ata_port_add(ent, host, i);
5485 host->ports[i] = ap;
5490 if (ent->dummy_port_mask & (1 << i)) {
5491 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5492 ap->ops = &ata_dummy_port_ops;
5497 rc = ap->ops->port_start(ap);
5499 host->ports[i] = NULL;
5500 scsi_host_put(ap->scsi_host);
5504 /* Report the secondary IRQ for second channel legacy */
5505 if (i == 1 && ent->irq2)
5506 irq_line = ent->irq2;
5508 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5509 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5510 (ap->pio_mask << ATA_SHIFT_PIO);
5512 /* print per-port info to dmesg */
5513 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5514 "ctl 0x%lX bmdma 0x%lX irq %d\n",
5515 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5516 ata_mode_string(xfer_mode_mask),
5517 ap->ioaddr.cmd_addr,
5518 ap->ioaddr.ctl_addr,
5519 ap->ioaddr.bmdma_addr,
5523 host->ops->irq_clear(ap);
5524 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5527 /* obtain irq, that may be shared between channels */
5528 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5531 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5536 /* do we have a second IRQ for the other channel, eg legacy mode */
5538 /* We will get weird core code crashes later if this is true
5540 BUG_ON(ent->irq == ent->irq2);
5542 rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags,
5545 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5547 goto err_out_free_irq;
5551 /* perform each probe synchronously */
5552 DPRINTK("probe begin\n");
5553 for (i = 0; i < host->n_ports; i++) {
5554 struct ata_port *ap = host->ports[i];
5558 /* init sata_spd_limit to the current value */
5559 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5560 int spd = (scontrol >> 4) & 0xf;
5561 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5563 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5565 rc = scsi_add_host(ap->scsi_host, dev);
5567 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5568 /* FIXME: do something useful here */
5569 /* FIXME: handle unconditional calls to
5570 * scsi_scan_host and ata_host_remove, below,
5575 if (ap->ops->error_handler) {
5576 struct ata_eh_info *ehi = &ap->eh_info;
5577 unsigned long flags;
5581 /* kick EH for boot probing */
5582 spin_lock_irqsave(ap->lock, flags);
5584 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5585 ehi->action |= ATA_EH_SOFTRESET;
5586 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5588 ap->pflags |= ATA_PFLAG_LOADING;
5589 ata_port_schedule_eh(ap);
5591 spin_unlock_irqrestore(ap->lock, flags);
5593 /* wait for EH to finish */
5594 ata_port_wait_eh(ap);
5596 DPRINTK("ata%u: bus probe begin\n", ap->id);
5597 rc = ata_bus_probe(ap);
5598 DPRINTK("ata%u: bus probe end\n", ap->id);
5601 /* FIXME: do something useful here?
5602 * Current libata behavior will
5603 * tear down everything when
5604 * the module is removed
5605 * or the h/w is unplugged.
5611 /* probes are done, now scan each port's disk(s) */
5612 DPRINTK("host probe begin\n");
5613 for (i = 0; i < host->n_ports; i++) {
5614 struct ata_port *ap = host->ports[i];
5616 ata_scsi_scan_host(ap);
5619 dev_set_drvdata(dev, host);
5621 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5622 return ent->n_ports; /* success */
5625 free_irq(ent->irq, host);
5627 for (i = 0; i < host->n_ports; i++) {
5628 struct ata_port *ap = host->ports[i];
5630 ap->ops->port_stop(ap);
5631 scsi_host_put(ap->scsi_host);
5636 VPRINTK("EXIT, returning 0\n");
5641 * ata_port_detach - Detach ATA port in prepration of device removal
5642 * @ap: ATA port to be detached
5644 * Detach all ATA devices and the associated SCSI devices of @ap;
5645 * then, remove the associated SCSI host. @ap is guaranteed to
5646 * be quiescent on return from this function.
5649 * Kernel thread context (may sleep).
5651 void ata_port_detach(struct ata_port *ap)
5653 unsigned long flags;
5656 if (!ap->ops->error_handler)
5659 /* tell EH we're leaving & flush EH */
5660 spin_lock_irqsave(ap->lock, flags);
5661 ap->pflags |= ATA_PFLAG_UNLOADING;
5662 spin_unlock_irqrestore(ap->lock, flags);
5664 ata_port_wait_eh(ap);
5666 /* EH is now guaranteed to see UNLOADING, so no new device
5667 * will be attached. Disable all existing devices.
5669 spin_lock_irqsave(ap->lock, flags);
5671 for (i = 0; i < ATA_MAX_DEVICES; i++)
5672 ata_dev_disable(&ap->device[i]);
5674 spin_unlock_irqrestore(ap->lock, flags);
5676 /* Final freeze & EH. All in-flight commands are aborted. EH
5677 * will be skipped and retrials will be terminated with bad
5680 spin_lock_irqsave(ap->lock, flags);
5681 ata_port_freeze(ap); /* won't be thawed */
5682 spin_unlock_irqrestore(ap->lock, flags);
5684 ata_port_wait_eh(ap);
5686 /* Flush hotplug task. The sequence is similar to
5687 * ata_port_flush_task().
5689 flush_workqueue(ata_aux_wq);
5690 cancel_delayed_work(&ap->hotplug_task);
5691 flush_workqueue(ata_aux_wq);
5694 /* remove the associated SCSI host */
5695 scsi_remove_host(ap->scsi_host);
5699 * ata_host_remove - PCI layer callback for device removal
5700 * @host: ATA host set that was removed
5702 * Unregister all objects associated with this host set. Free those
5706 * Inherited from calling layer (may sleep).
5709 void ata_host_remove(struct ata_host *host)
5713 for (i = 0; i < host->n_ports; i++)
5714 ata_port_detach(host->ports[i]);
5716 free_irq(host->irq, host);
5718 free_irq(host->irq2, host);
5720 for (i = 0; i < host->n_ports; i++) {
5721 struct ata_port *ap = host->ports[i];
5723 ata_scsi_release(ap->scsi_host);
5725 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5726 struct ata_ioports *ioaddr = &ap->ioaddr;
5728 /* FIXME: Add -ac IDE pci mods to remove these special cases */
5729 if (ioaddr->cmd_addr == ATA_PRIMARY_CMD)
5730 release_region(ATA_PRIMARY_CMD, 8);
5731 else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD)
5732 release_region(ATA_SECONDARY_CMD, 8);
5735 scsi_host_put(ap->scsi_host);
5738 if (host->ops->host_stop)
5739 host->ops->host_stop(host);
5745 * ata_scsi_release - SCSI layer callback hook for host unload
5746 * @shost: libata host to be unloaded
5748 * Performs all duties necessary to shut down a libata port...
5749 * Kill port kthread, disable port, and release resources.
5752 * Inherited from SCSI layer.
5758 int ata_scsi_release(struct Scsi_Host *shost)
5760 struct ata_port *ap = ata_shost_to_port(shost);
5764 ap->ops->port_disable(ap);
5765 ap->ops->port_stop(ap);
5771 struct ata_probe_ent *
5772 ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5774 struct ata_probe_ent *probe_ent;
5776 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
5778 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5779 kobject_name(&(dev->kobj)));
5783 INIT_LIST_HEAD(&probe_ent->node);
5784 probe_ent->dev = dev;
5786 probe_ent->sht = port->sht;
5787 probe_ent->port_flags = port->flags;
5788 probe_ent->pio_mask = port->pio_mask;
5789 probe_ent->mwdma_mask = port->mwdma_mask;
5790 probe_ent->udma_mask = port->udma_mask;
5791 probe_ent->port_ops = port->port_ops;
5792 probe_ent->private_data = port->private_data;
5798 * ata_std_ports - initialize ioaddr with standard port offsets.
5799 * @ioaddr: IO address structure to be initialized
5801 * Utility function which initializes data_addr, error_addr,
5802 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5803 * device_addr, status_addr, and command_addr to standard offsets
5804 * relative to cmd_addr.
5806 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5809 void ata_std_ports(struct ata_ioports *ioaddr)
5811 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5812 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5813 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5814 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5815 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5816 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5817 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5818 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5819 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5820 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5826 void ata_pci_host_stop (struct ata_host *host)
5828 struct pci_dev *pdev = to_pci_dev(host->dev);
5830 pci_iounmap(pdev, host->mmio_base);
5834 * ata_pci_remove_one - PCI layer callback for device removal
5835 * @pdev: PCI device that was removed
5837 * PCI layer indicates to libata via this hook that
5838 * hot-unplug or module unload event has occurred.
5839 * Handle this by unregistering all objects associated
5840 * with this PCI device. Free those objects. Then finally
5841 * release PCI resources and disable device.
5844 * Inherited from PCI layer (may sleep).
5847 void ata_pci_remove_one (struct pci_dev *pdev)
5849 struct device *dev = pci_dev_to_dev(pdev);
5850 struct ata_host *host = dev_get_drvdata(dev);
5852 ata_host_remove(host);
5854 pci_release_regions(pdev);
5855 pci_disable_device(pdev);
5856 dev_set_drvdata(dev, NULL);
5859 /* move to PCI subsystem */
5860 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5862 unsigned long tmp = 0;
5864 switch (bits->width) {
5867 pci_read_config_byte(pdev, bits->reg, &tmp8);
5873 pci_read_config_word(pdev, bits->reg, &tmp16);
5879 pci_read_config_dword(pdev, bits->reg, &tmp32);
5890 return (tmp == bits->val) ? 1 : 0;
5893 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5895 pci_save_state(pdev);
5897 if (mesg.event == PM_EVENT_SUSPEND) {
5898 pci_disable_device(pdev);
5899 pci_set_power_state(pdev, PCI_D3hot);
5903 void ata_pci_device_do_resume(struct pci_dev *pdev)
5905 pci_set_power_state(pdev, PCI_D0);
5906 pci_restore_state(pdev);
5907 pci_enable_device(pdev);
5908 pci_set_master(pdev);
5911 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5913 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5916 rc = ata_host_suspend(host, mesg);
5920 ata_pci_device_do_suspend(pdev, mesg);
5925 int ata_pci_device_resume(struct pci_dev *pdev)
5927 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5929 ata_pci_device_do_resume(pdev);
5930 ata_host_resume(host);
5933 #endif /* CONFIG_PCI */
5936 static int __init ata_init(void)
5938 ata_probe_timeout *= HZ;
5939 ata_wq = create_workqueue("ata");
5943 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5945 destroy_workqueue(ata_wq);
5949 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5953 static void __exit ata_exit(void)
5955 destroy_workqueue(ata_wq);
5956 destroy_workqueue(ata_aux_wq);
5959 subsys_initcall(ata_init);
5960 module_exit(ata_exit);
5962 static unsigned long ratelimit_time;
5963 static DEFINE_SPINLOCK(ata_ratelimit_lock);
5965 int ata_ratelimit(void)
5968 unsigned long flags;
5970 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5972 if (time_after(jiffies, ratelimit_time)) {
5974 ratelimit_time = jiffies + (HZ/5);
5978 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5984 * ata_wait_register - wait until register value changes
5985 * @reg: IO-mapped register
5986 * @mask: Mask to apply to read register value
5987 * @val: Wait condition
5988 * @interval_msec: polling interval in milliseconds
5989 * @timeout_msec: timeout in milliseconds
5991 * Waiting for some bits of register to change is a common
5992 * operation for ATA controllers. This function reads 32bit LE
5993 * IO-mapped register @reg and tests for the following condition.
5995 * (*@reg & mask) != val
5997 * If the condition is met, it returns; otherwise, the process is
5998 * repeated after @interval_msec until timeout.
6001 * Kernel thread context (may sleep)
6004 * The final register value.
6006 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6007 unsigned long interval_msec,
6008 unsigned long timeout_msec)
6010 unsigned long timeout;
6013 tmp = ioread32(reg);
6015 /* Calculate timeout _after_ the first read to make sure
6016 * preceding writes reach the controller before starting to
6017 * eat away the timeout.
6019 timeout = jiffies + (timeout_msec * HZ) / 1000;
6021 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6022 msleep(interval_msec);
6023 tmp = ioread32(reg);
6032 static void ata_dummy_noret(struct ata_port *ap) { }
6033 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6034 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6036 static u8 ata_dummy_check_status(struct ata_port *ap)
6041 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6043 return AC_ERR_SYSTEM;
6046 const struct ata_port_operations ata_dummy_port_ops = {
6047 .port_disable = ata_port_disable,
6048 .check_status = ata_dummy_check_status,
6049 .check_altstatus = ata_dummy_check_status,
6050 .dev_select = ata_noop_dev_select,
6051 .qc_prep = ata_noop_qc_prep,
6052 .qc_issue = ata_dummy_qc_issue,
6053 .freeze = ata_dummy_noret,
6054 .thaw = ata_dummy_noret,
6055 .error_handler = ata_dummy_noret,
6056 .post_internal_cmd = ata_dummy_qc_noret,
6057 .irq_clear = ata_dummy_noret,
6058 .port_start = ata_dummy_ret0,
6059 .port_stop = ata_dummy_noret,
6063 * libata is essentially a library of internal helper functions for
6064 * low-level ATA host controller drivers. As such, the API/ABI is
6065 * likely to change as new drivers are added and updated.
6066 * Do not depend on ABI/API stability.
6069 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6070 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6071 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6072 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6073 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6074 EXPORT_SYMBOL_GPL(ata_std_ports);
6075 EXPORT_SYMBOL_GPL(ata_host_init);
6076 EXPORT_SYMBOL_GPL(ata_device_add);
6077 EXPORT_SYMBOL_GPL(ata_port_detach);
6078 EXPORT_SYMBOL_GPL(ata_host_remove);
6079 EXPORT_SYMBOL_GPL(ata_sg_init);
6080 EXPORT_SYMBOL_GPL(ata_sg_init_one);
6081 EXPORT_SYMBOL_GPL(ata_hsm_move);
6082 EXPORT_SYMBOL_GPL(ata_qc_complete);
6083 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6084 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6085 EXPORT_SYMBOL_GPL(ata_tf_load);
6086 EXPORT_SYMBOL_GPL(ata_tf_read);
6087 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6088 EXPORT_SYMBOL_GPL(ata_std_dev_select);
6089 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6090 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6091 EXPORT_SYMBOL_GPL(ata_check_status);
6092 EXPORT_SYMBOL_GPL(ata_altstatus);
6093 EXPORT_SYMBOL_GPL(ata_exec_command);
6094 EXPORT_SYMBOL_GPL(ata_port_start);
6095 EXPORT_SYMBOL_GPL(ata_port_stop);
6096 EXPORT_SYMBOL_GPL(ata_host_stop);
6097 EXPORT_SYMBOL_GPL(ata_interrupt);
6098 EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
6099 EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
6100 EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
6101 EXPORT_SYMBOL_GPL(ata_qc_prep);
6102 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6103 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6104 EXPORT_SYMBOL_GPL(ata_bmdma_start);
6105 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6106 EXPORT_SYMBOL_GPL(ata_bmdma_status);
6107 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6108 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6109 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6110 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6111 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6112 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6113 EXPORT_SYMBOL_GPL(ata_port_probe);
6114 EXPORT_SYMBOL_GPL(sata_set_spd);
6115 EXPORT_SYMBOL_GPL(sata_phy_debounce);
6116 EXPORT_SYMBOL_GPL(sata_phy_resume);
6117 EXPORT_SYMBOL_GPL(sata_phy_reset);
6118 EXPORT_SYMBOL_GPL(__sata_phy_reset);
6119 EXPORT_SYMBOL_GPL(ata_bus_reset);
6120 EXPORT_SYMBOL_GPL(ata_std_prereset);
6121 EXPORT_SYMBOL_GPL(ata_std_softreset);
6122 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6123 EXPORT_SYMBOL_GPL(ata_std_postreset);
6124 EXPORT_SYMBOL_GPL(ata_dev_classify);
6125 EXPORT_SYMBOL_GPL(ata_dev_pair);
6126 EXPORT_SYMBOL_GPL(ata_port_disable);
6127 EXPORT_SYMBOL_GPL(ata_ratelimit);
6128 EXPORT_SYMBOL_GPL(ata_wait_register);
6129 EXPORT_SYMBOL_GPL(ata_busy_sleep);
6130 EXPORT_SYMBOL_GPL(ata_port_queue_task);
6131 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6132 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6133 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6134 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6135 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6136 EXPORT_SYMBOL_GPL(ata_scsi_release);
6137 EXPORT_SYMBOL_GPL(ata_host_intr);
6138 EXPORT_SYMBOL_GPL(sata_scr_valid);
6139 EXPORT_SYMBOL_GPL(sata_scr_read);
6140 EXPORT_SYMBOL_GPL(sata_scr_write);
6141 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6142 EXPORT_SYMBOL_GPL(ata_port_online);
6143 EXPORT_SYMBOL_GPL(ata_port_offline);
6144 EXPORT_SYMBOL_GPL(ata_host_suspend);
6145 EXPORT_SYMBOL_GPL(ata_host_resume);
6146 EXPORT_SYMBOL_GPL(ata_id_string);
6147 EXPORT_SYMBOL_GPL(ata_id_c_string);
6148 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6150 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6151 EXPORT_SYMBOL_GPL(ata_timing_compute);
6152 EXPORT_SYMBOL_GPL(ata_timing_merge);
6155 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6156 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
6157 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6158 EXPORT_SYMBOL_GPL(ata_pci_init_one);
6159 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6160 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6161 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6162 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6163 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6164 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6165 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6166 #endif /* CONFIG_PCI */
6168 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6169 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6171 EXPORT_SYMBOL_GPL(ata_eng_timeout);
6172 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6173 EXPORT_SYMBOL_GPL(ata_port_abort);
6174 EXPORT_SYMBOL_GPL(ata_port_freeze);
6175 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6176 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6177 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6178 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6179 EXPORT_SYMBOL_GPL(ata_do_eh);