1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libata-core.c - helper library for ATA
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
8 * libata documentation is available via 'make {ps|pdf}docs',
9 * as Documentation/driver-api/libata.rst
11 * Hardware documentation available from http://www.t13.org/ and
12 * http://www.sata-io.org/
14 * Standards documents from:
15 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
16 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
17 * http://www.sata-io.org (SATA)
18 * http://www.compactflash.org (CF)
19 * http://www.qic.org (QIC157 - Tape and DSC)
20 * http://www.ce-ata.org (CE-ATA: not supported)
22 * libata is essentially a library of internal helper functions for
23 * low-level ATA host controller drivers. As such, the API/ABI is
24 * likely to change as new drivers are added and updated.
25 * Do not depend on ABI/API stability.
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
34 #include <linux/spinlock.h>
35 #include <linux/blkdev.h>
36 #include <linux/delay.h>
37 #include <linux/timer.h>
38 #include <linux/time.h>
39 #include <linux/interrupt.h>
40 #include <linux/completion.h>
41 #include <linux/suspend.h>
42 #include <linux/workqueue.h>
43 #include <linux/scatterlist.h>
45 #include <linux/log2.h>
46 #include <linux/slab.h>
47 #include <linux/glob.h>
48 #include <scsi/scsi.h>
49 #include <scsi/scsi_cmnd.h>
50 #include <scsi/scsi_host.h>
51 #include <linux/libata.h>
52 #include <asm/byteorder.h>
53 #include <asm/unaligned.h>
54 #include <linux/cdrom.h>
55 #include <linux/ratelimit.h>
56 #include <linux/leds.h>
57 #include <linux/pm_runtime.h>
58 #include <linux/platform_device.h>
59 #include <asm/setup.h>
61 #define CREATE_TRACE_POINTS
62 #include <trace/events/libata.h>
65 #include "libata-transport.h"
67 const struct ata_port_operations ata_base_port_ops = {
68 .prereset = ata_std_prereset,
69 .postreset = ata_std_postreset,
70 .error_handler = ata_std_error_handler,
71 .sched_eh = ata_std_sched_eh,
72 .end_eh = ata_std_end_eh,
75 const struct ata_port_operations sata_port_ops = {
76 .inherits = &ata_base_port_ops,
78 .qc_defer = ata_std_qc_defer,
79 .hardreset = sata_std_hardreset,
81 EXPORT_SYMBOL_GPL(sata_port_ops);
83 static unsigned int ata_dev_init_params(struct ata_device *dev,
84 u16 heads, u16 sectors);
85 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
86 static void ata_dev_xfermask(struct ata_device *dev);
87 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
89 atomic_t ata_print_id = ATOMIC_INIT(0);
91 #ifdef CONFIG_ATA_FORCE
92 struct ata_force_param {
96 unsigned int xfer_mask;
97 unsigned int horkage_on;
98 unsigned int horkage_off;
103 struct ata_force_ent {
106 struct ata_force_param param;
109 static struct ata_force_ent *ata_force_tbl;
110 static int ata_force_tbl_size;
112 static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
113 /* param_buf is thrown away after initialization, disallow read */
114 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
115 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
118 static int atapi_enabled = 1;
119 module_param(atapi_enabled, int, 0444);
120 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
122 static int atapi_dmadir = 0;
123 module_param(atapi_dmadir, int, 0444);
124 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
126 int atapi_passthru16 = 1;
127 module_param(atapi_passthru16, int, 0444);
128 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
131 module_param_named(fua, libata_fua, int, 0444);
132 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
134 static int ata_ignore_hpa;
135 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
136 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
138 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
139 module_param_named(dma, libata_dma_mask, int, 0444);
140 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
142 static int ata_probe_timeout;
143 module_param(ata_probe_timeout, int, 0444);
144 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
146 int libata_noacpi = 0;
147 module_param_named(noacpi, libata_noacpi, int, 0444);
148 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
150 int libata_allow_tpm = 0;
151 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
152 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
155 module_param(atapi_an, int, 0444);
156 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
158 MODULE_AUTHOR("Jeff Garzik");
159 MODULE_DESCRIPTION("Library module for ATA devices");
160 MODULE_LICENSE("GPL");
161 MODULE_VERSION(DRV_VERSION);
163 static inline bool ata_dev_print_info(struct ata_device *dev)
165 struct ata_eh_context *ehc = &dev->link->eh_context;
167 return ehc->i.flags & ATA_EHI_PRINTINFO;
170 static bool ata_sstatus_online(u32 sstatus)
172 return (sstatus & 0xf) == 0x3;
176 * ata_link_next - link iteration helper
177 * @link: the previous link, NULL to start
178 * @ap: ATA port containing links to iterate
179 * @mode: iteration mode, one of ATA_LITER_*
182 * Host lock or EH context.
185 * Pointer to the next link.
187 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
188 enum ata_link_iter_mode mode)
190 BUG_ON(mode != ATA_LITER_EDGE &&
191 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
193 /* NULL link indicates start of iteration */
197 case ATA_LITER_PMP_FIRST:
198 if (sata_pmp_attached(ap))
201 case ATA_LITER_HOST_FIRST:
205 /* we just iterated over the host link, what's next? */
206 if (link == &ap->link)
208 case ATA_LITER_HOST_FIRST:
209 if (sata_pmp_attached(ap))
212 case ATA_LITER_PMP_FIRST:
213 if (unlikely(ap->slave_link))
214 return ap->slave_link;
220 /* slave_link excludes PMP */
221 if (unlikely(link == ap->slave_link))
224 /* we were over a PMP link */
225 if (++link < ap->pmp_link + ap->nr_pmp_links)
228 if (mode == ATA_LITER_PMP_FIRST)
233 EXPORT_SYMBOL_GPL(ata_link_next);
236 * ata_dev_next - device iteration helper
237 * @dev: the previous device, NULL to start
238 * @link: ATA link containing devices to iterate
239 * @mode: iteration mode, one of ATA_DITER_*
242 * Host lock or EH context.
245 * Pointer to the next device.
247 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
248 enum ata_dev_iter_mode mode)
250 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
251 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
253 /* NULL dev indicates start of iteration */
256 case ATA_DITER_ENABLED:
260 case ATA_DITER_ENABLED_REVERSE:
261 case ATA_DITER_ALL_REVERSE:
262 dev = link->device + ata_link_max_devices(link) - 1;
267 /* move to the next one */
269 case ATA_DITER_ENABLED:
271 if (++dev < link->device + ata_link_max_devices(link))
274 case ATA_DITER_ENABLED_REVERSE:
275 case ATA_DITER_ALL_REVERSE:
276 if (--dev >= link->device)
282 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
283 !ata_dev_enabled(dev))
287 EXPORT_SYMBOL_GPL(ata_dev_next);
290 * ata_dev_phys_link - find physical link for a device
291 * @dev: ATA device to look up physical link for
293 * Look up physical link which @dev is attached to. Note that
294 * this is different from @dev->link only when @dev is on slave
295 * link. For all other cases, it's the same as @dev->link.
301 * Pointer to the found physical link.
303 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
305 struct ata_port *ap = dev->link->ap;
311 return ap->slave_link;
314 #ifdef CONFIG_ATA_FORCE
316 * ata_force_cbl - force cable type according to libata.force
317 * @ap: ATA port of interest
319 * Force cable type according to libata.force and whine about it.
320 * The last entry which has matching port number is used, so it
321 * can be specified as part of device force parameters. For
322 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
328 void ata_force_cbl(struct ata_port *ap)
332 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
333 const struct ata_force_ent *fe = &ata_force_tbl[i];
335 if (fe->port != -1 && fe->port != ap->print_id)
338 if (fe->param.cbl == ATA_CBL_NONE)
341 ap->cbl = fe->param.cbl;
342 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
348 * ata_force_link_limits - force link limits according to libata.force
349 * @link: ATA link of interest
351 * Force link flags and SATA spd limit according to libata.force
352 * and whine about it. When only the port part is specified
353 * (e.g. 1:), the limit applies to all links connected to both
354 * the host link and all fan-out ports connected via PMP. If the
355 * device part is specified as 0 (e.g. 1.00:), it specifies the
356 * first fan-out link not the host link. Device number 15 always
357 * points to the host link whether PMP is attached or not. If the
358 * controller has slave link, device number 16 points to it.
363 static void ata_force_link_limits(struct ata_link *link)
365 bool did_spd = false;
366 int linkno = link->pmp;
369 if (ata_is_host_link(link))
372 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
373 const struct ata_force_ent *fe = &ata_force_tbl[i];
375 if (fe->port != -1 && fe->port != link->ap->print_id)
378 if (fe->device != -1 && fe->device != linkno)
381 /* only honor the first spd limit */
382 if (!did_spd && fe->param.spd_limit) {
383 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
384 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
389 /* let lflags stack */
390 if (fe->param.lflags_on) {
391 link->flags |= fe->param.lflags_on;
392 ata_link_notice(link,
393 "FORCE: link flag 0x%x forced -> 0x%x\n",
394 fe->param.lflags_on, link->flags);
396 if (fe->param.lflags_off) {
397 link->flags &= ~fe->param.lflags_off;
398 ata_link_notice(link,
399 "FORCE: link flag 0x%x cleared -> 0x%x\n",
400 fe->param.lflags_off, link->flags);
406 * ata_force_xfermask - force xfermask according to libata.force
407 * @dev: ATA device of interest
409 * Force xfer_mask according to libata.force and whine about it.
410 * For consistency with link selection, device number 15 selects
411 * the first device connected to the host link.
416 static void ata_force_xfermask(struct ata_device *dev)
418 int devno = dev->link->pmp + dev->devno;
419 int alt_devno = devno;
422 /* allow n.15/16 for devices attached to host port */
423 if (ata_is_host_link(dev->link))
426 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
427 const struct ata_force_ent *fe = &ata_force_tbl[i];
428 unsigned int pio_mask, mwdma_mask, udma_mask;
430 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
433 if (fe->device != -1 && fe->device != devno &&
434 fe->device != alt_devno)
437 if (!fe->param.xfer_mask)
440 ata_unpack_xfermask(fe->param.xfer_mask,
441 &pio_mask, &mwdma_mask, &udma_mask);
443 dev->udma_mask = udma_mask;
444 else if (mwdma_mask) {
446 dev->mwdma_mask = mwdma_mask;
450 dev->pio_mask = pio_mask;
453 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
460 * ata_force_horkage - force horkage according to libata.force
461 * @dev: ATA device of interest
463 * Force horkage according to libata.force and whine about it.
464 * For consistency with link selection, device number 15 selects
465 * the first device connected to the host link.
470 static void ata_force_horkage(struct ata_device *dev)
472 int devno = dev->link->pmp + dev->devno;
473 int alt_devno = devno;
476 /* allow n.15/16 for devices attached to host port */
477 if (ata_is_host_link(dev->link))
480 for (i = 0; i < ata_force_tbl_size; i++) {
481 const struct ata_force_ent *fe = &ata_force_tbl[i];
483 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
486 if (fe->device != -1 && fe->device != devno &&
487 fe->device != alt_devno)
490 if (!(~dev->horkage & fe->param.horkage_on) &&
491 !(dev->horkage & fe->param.horkage_off))
494 dev->horkage |= fe->param.horkage_on;
495 dev->horkage &= ~fe->param.horkage_off;
497 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
502 static inline void ata_force_link_limits(struct ata_link *link) { }
503 static inline void ata_force_xfermask(struct ata_device *dev) { }
504 static inline void ata_force_horkage(struct ata_device *dev) { }
508 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
509 * @opcode: SCSI opcode
511 * Determine ATAPI command type from @opcode.
517 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
519 int atapi_cmd_type(u8 opcode)
528 case GPCMD_WRITE_AND_VERIFY_10:
532 case GPCMD_READ_CD_MSF:
533 return ATAPI_READ_CD;
537 if (atapi_passthru16)
538 return ATAPI_PASS_THRU;
544 EXPORT_SYMBOL_GPL(atapi_cmd_type);
546 static const u8 ata_rw_cmds[] = {
550 ATA_CMD_READ_MULTI_EXT,
551 ATA_CMD_WRITE_MULTI_EXT,
559 ATA_CMD_PIO_READ_EXT,
560 ATA_CMD_PIO_WRITE_EXT,
573 ATA_CMD_WRITE_FUA_EXT
577 * ata_set_rwcmd_protocol - set taskfile r/w command and protocol
578 * @dev: target device for the taskfile
579 * @tf: taskfile to examine and configure
581 * Examine the device configuration and tf->flags to determine
582 * the proper read/write command and protocol to use for @tf.
587 static bool ata_set_rwcmd_protocol(struct ata_device *dev,
588 struct ata_taskfile *tf)
592 int index, fua, lba48, write;
594 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
595 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
596 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
598 if (dev->flags & ATA_DFLAG_PIO) {
599 tf->protocol = ATA_PROT_PIO;
600 index = dev->multi_count ? 0 : 8;
601 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
602 /* Unable to use DMA due to host limitation */
603 tf->protocol = ATA_PROT_PIO;
604 index = dev->multi_count ? 0 : 8;
606 tf->protocol = ATA_PROT_DMA;
610 cmd = ata_rw_cmds[index + fua + lba48 + write];
620 * ata_tf_read_block - Read block address from ATA taskfile
621 * @tf: ATA taskfile of interest
622 * @dev: ATA device @tf belongs to
627 * Read block address from @tf. This function can handle all
628 * three address formats - LBA, LBA48 and CHS. tf->protocol and
629 * flags select the address format to use.
632 * Block address read from @tf.
634 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
638 if (tf->flags & ATA_TFLAG_LBA) {
639 if (tf->flags & ATA_TFLAG_LBA48) {
640 block |= (u64)tf->hob_lbah << 40;
641 block |= (u64)tf->hob_lbam << 32;
642 block |= (u64)tf->hob_lbal << 24;
644 block |= (tf->device & 0xf) << 24;
646 block |= tf->lbah << 16;
647 block |= tf->lbam << 8;
652 cyl = tf->lbam | (tf->lbah << 8);
653 head = tf->device & 0xf;
658 "device reported invalid CHS sector 0\n");
662 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
669 * ata_build_rw_tf - Build ATA taskfile for given read/write request
670 * @qc: Metadata associated with the taskfile to build
671 * @block: Block address
672 * @n_block: Number of blocks
673 * @tf_flags: RW/FUA etc...
674 * @class: IO priority class
679 * Build ATA taskfile for the command @qc for read/write request described
680 * by @block, @n_block, @tf_flags and @class.
684 * 0 on success, -ERANGE if the request is too large for @dev,
685 * -EINVAL if the request is invalid.
687 int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
688 unsigned int tf_flags, int class)
690 struct ata_taskfile *tf = &qc->tf;
691 struct ata_device *dev = qc->dev;
693 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
694 tf->flags |= tf_flags;
696 if (ata_ncq_enabled(dev)) {
698 if (!lba_48_ok(block, n_block))
701 tf->protocol = ATA_PROT_NCQ;
702 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
704 if (tf->flags & ATA_TFLAG_WRITE)
705 tf->command = ATA_CMD_FPDMA_WRITE;
707 tf->command = ATA_CMD_FPDMA_READ;
709 tf->nsect = qc->hw_tag << 3;
710 tf->hob_feature = (n_block >> 8) & 0xff;
711 tf->feature = n_block & 0xff;
713 tf->hob_lbah = (block >> 40) & 0xff;
714 tf->hob_lbam = (block >> 32) & 0xff;
715 tf->hob_lbal = (block >> 24) & 0xff;
716 tf->lbah = (block >> 16) & 0xff;
717 tf->lbam = (block >> 8) & 0xff;
718 tf->lbal = block & 0xff;
720 tf->device = ATA_LBA;
721 if (tf->flags & ATA_TFLAG_FUA)
722 tf->device |= 1 << 7;
724 if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED &&
725 class == IOPRIO_CLASS_RT)
726 tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
727 } else if (dev->flags & ATA_DFLAG_LBA) {
728 tf->flags |= ATA_TFLAG_LBA;
730 /* We need LBA48 for FUA writes */
731 if (!(tf->flags & ATA_TFLAG_FUA) && lba_28_ok(block, n_block)) {
733 tf->device |= (block >> 24) & 0xf;
734 } else if (lba_48_ok(block, n_block)) {
735 if (!(dev->flags & ATA_DFLAG_LBA48))
739 tf->flags |= ATA_TFLAG_LBA48;
741 tf->hob_nsect = (n_block >> 8) & 0xff;
743 tf->hob_lbah = (block >> 40) & 0xff;
744 tf->hob_lbam = (block >> 32) & 0xff;
745 tf->hob_lbal = (block >> 24) & 0xff;
747 /* request too large even for LBA48 */
751 if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
754 tf->nsect = n_block & 0xff;
756 tf->lbah = (block >> 16) & 0xff;
757 tf->lbam = (block >> 8) & 0xff;
758 tf->lbal = block & 0xff;
760 tf->device |= ATA_LBA;
763 u32 sect, head, cyl, track;
765 /* The request -may- be too large for CHS addressing. */
766 if (!lba_28_ok(block, n_block))
769 if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
772 /* Convert LBA to CHS */
773 track = (u32)block / dev->sectors;
774 cyl = track / dev->heads;
775 head = track % dev->heads;
776 sect = (u32)block % dev->sectors + 1;
778 /* Check whether the converted CHS can fit.
782 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
785 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
796 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
797 * @pio_mask: pio_mask
798 * @mwdma_mask: mwdma_mask
799 * @udma_mask: udma_mask
801 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
802 * unsigned int xfer_mask.
810 unsigned int ata_pack_xfermask(unsigned int pio_mask,
811 unsigned int mwdma_mask,
812 unsigned int udma_mask)
814 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
815 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
816 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
818 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
821 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
822 * @xfer_mask: xfer_mask to unpack
823 * @pio_mask: resulting pio_mask
824 * @mwdma_mask: resulting mwdma_mask
825 * @udma_mask: resulting udma_mask
827 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
828 * Any NULL destination masks will be ignored.
830 void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask,
831 unsigned int *mwdma_mask, unsigned int *udma_mask)
834 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
836 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
838 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
841 static const struct ata_xfer_ent {
845 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
846 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
847 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
852 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
853 * @xfer_mask: xfer_mask of interest
855 * Return matching XFER_* value for @xfer_mask. Only the highest
856 * bit of @xfer_mask is considered.
862 * Matching XFER_* value, 0xff if no match found.
864 u8 ata_xfer_mask2mode(unsigned int xfer_mask)
866 int highbit = fls(xfer_mask) - 1;
867 const struct ata_xfer_ent *ent;
869 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
870 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
871 return ent->base + highbit - ent->shift;
874 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
877 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
878 * @xfer_mode: XFER_* of interest
880 * Return matching xfer_mask for @xfer_mode.
886 * Matching xfer_mask, 0 if no match found.
888 unsigned int ata_xfer_mode2mask(u8 xfer_mode)
890 const struct ata_xfer_ent *ent;
892 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
893 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
894 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
895 & ~((1 << ent->shift) - 1);
898 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
901 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
902 * @xfer_mode: XFER_* of interest
904 * Return matching xfer_shift for @xfer_mode.
910 * Matching xfer_shift, -1 if no match found.
912 int ata_xfer_mode2shift(u8 xfer_mode)
914 const struct ata_xfer_ent *ent;
916 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
917 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
921 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
924 * ata_mode_string - convert xfer_mask to string
925 * @xfer_mask: mask of bits supported; only highest bit counts.
927 * Determine string which represents the highest speed
928 * (highest bit in @modemask).
934 * Constant C string representing highest speed listed in
935 * @mode_mask, or the constant C string "<n/a>".
937 const char *ata_mode_string(unsigned int xfer_mask)
939 static const char * const xfer_mode_str[] = {
963 highbit = fls(xfer_mask) - 1;
964 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
965 return xfer_mode_str[highbit];
968 EXPORT_SYMBOL_GPL(ata_mode_string);
970 const char *sata_spd_string(unsigned int spd)
972 static const char * const spd_str[] = {
978 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
980 return spd_str[spd - 1];
984 * ata_dev_classify - determine device type based on ATA-spec signature
985 * @tf: ATA taskfile register set for device to be identified
987 * Determine from taskfile register contents whether a device is
988 * ATA or ATAPI, as per "Signature and persistence" section
989 * of ATA/PI spec (volume 1, sect 5.14).
995 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
996 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
998 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1000 /* Apple's open source Darwin code hints that some devices only
1001 * put a proper signature into the LBA mid/high registers,
1002 * So, we only check those. It's sufficient for uniqueness.
1004 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1005 * signatures for ATA and ATAPI devices attached on SerialATA,
1006 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1007 * spec has never mentioned about using different signatures
1008 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1009 * Multiplier specification began to use 0x69/0x96 to identify
1010 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1011 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1012 * 0x69/0x96 shortly and described them as reserved for
1015 * We follow the current spec and consider that 0x69/0x96
1016 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1017 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1018 * SEMB signature. This is worked around in
1019 * ata_dev_read_id().
1021 if (tf->lbam == 0 && tf->lbah == 0)
1024 if (tf->lbam == 0x14 && tf->lbah == 0xeb)
1025 return ATA_DEV_ATAPI;
1027 if (tf->lbam == 0x69 && tf->lbah == 0x96)
1030 if (tf->lbam == 0x3c && tf->lbah == 0xc3)
1031 return ATA_DEV_SEMB;
1033 if (tf->lbam == 0xcd && tf->lbah == 0xab)
1036 return ATA_DEV_UNKNOWN;
1038 EXPORT_SYMBOL_GPL(ata_dev_classify);
1041 * ata_id_string - Convert IDENTIFY DEVICE page into string
1042 * @id: IDENTIFY DEVICE results we will examine
1043 * @s: string into which data is output
1044 * @ofs: offset into identify device page
1045 * @len: length of string to return. must be an even number.
1047 * The strings in the IDENTIFY DEVICE page are broken up into
1048 * 16-bit chunks. Run through the string, and output each
1049 * 8-bit chunk linearly, regardless of platform.
1055 void ata_id_string(const u16 *id, unsigned char *s,
1056 unsigned int ofs, unsigned int len)
1075 EXPORT_SYMBOL_GPL(ata_id_string);
1078 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1079 * @id: IDENTIFY DEVICE results we will examine
1080 * @s: string into which data is output
1081 * @ofs: offset into identify device page
1082 * @len: length of string to return. must be an odd number.
1084 * This function is identical to ata_id_string except that it
1085 * trims trailing spaces and terminates the resulting string with
1086 * null. @len must be actual maximum length (even number) + 1.
1091 void ata_id_c_string(const u16 *id, unsigned char *s,
1092 unsigned int ofs, unsigned int len)
1096 ata_id_string(id, s, ofs, len - 1);
1098 p = s + strnlen(s, len - 1);
1099 while (p > s && p[-1] == ' ')
1103 EXPORT_SYMBOL_GPL(ata_id_c_string);
1105 static u64 ata_id_n_sectors(const u16 *id)
1107 if (ata_id_has_lba(id)) {
1108 if (ata_id_has_lba48(id))
1109 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1111 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1114 if (ata_id_current_chs_valid(id))
1115 return (u32)id[ATA_ID_CUR_CYLS] * (u32)id[ATA_ID_CUR_HEADS] *
1116 (u32)id[ATA_ID_CUR_SECTORS];
1118 return (u32)id[ATA_ID_CYLS] * (u32)id[ATA_ID_HEADS] *
1119 (u32)id[ATA_ID_SECTORS];
1122 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1126 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1127 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1128 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1129 sectors |= (tf->lbah & 0xff) << 16;
1130 sectors |= (tf->lbam & 0xff) << 8;
1131 sectors |= (tf->lbal & 0xff);
1136 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1140 sectors |= (tf->device & 0x0f) << 24;
1141 sectors |= (tf->lbah & 0xff) << 16;
1142 sectors |= (tf->lbam & 0xff) << 8;
1143 sectors |= (tf->lbal & 0xff);
1149 * ata_read_native_max_address - Read native max address
1150 * @dev: target device
1151 * @max_sectors: out parameter for the result native max address
1153 * Perform an LBA48 or LBA28 native size query upon the device in
1157 * 0 on success, -EACCES if command is aborted by the drive.
1158 * -EIO on other errors.
1160 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1162 unsigned int err_mask;
1163 struct ata_taskfile tf;
1164 int lba48 = ata_id_has_lba48(dev->id);
1166 ata_tf_init(dev, &tf);
1168 /* always clear all address registers */
1169 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1172 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1173 tf.flags |= ATA_TFLAG_LBA48;
1175 tf.command = ATA_CMD_READ_NATIVE_MAX;
1177 tf.protocol = ATA_PROT_NODATA;
1178 tf.device |= ATA_LBA;
1180 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1183 "failed to read native max address (err_mask=0x%x)\n",
1185 if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
1191 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1193 *max_sectors = ata_tf_to_lba(&tf) + 1;
1194 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1200 * ata_set_max_sectors - Set max sectors
1201 * @dev: target device
1202 * @new_sectors: new max sectors value to set for the device
1204 * Set max sectors of @dev to @new_sectors.
1207 * 0 on success, -EACCES if command is aborted or denied (due to
1208 * previous non-volatile SET_MAX) by the drive. -EIO on other
1211 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1213 unsigned int err_mask;
1214 struct ata_taskfile tf;
1215 int lba48 = ata_id_has_lba48(dev->id);
1219 ata_tf_init(dev, &tf);
1221 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1224 tf.command = ATA_CMD_SET_MAX_EXT;
1225 tf.flags |= ATA_TFLAG_LBA48;
1227 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1228 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1229 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1231 tf.command = ATA_CMD_SET_MAX;
1233 tf.device |= (new_sectors >> 24) & 0xf;
1236 tf.protocol = ATA_PROT_NODATA;
1237 tf.device |= ATA_LBA;
1239 tf.lbal = (new_sectors >> 0) & 0xff;
1240 tf.lbam = (new_sectors >> 8) & 0xff;
1241 tf.lbah = (new_sectors >> 16) & 0xff;
1243 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1246 "failed to set max address (err_mask=0x%x)\n",
1248 if (err_mask == AC_ERR_DEV &&
1249 (tf.error & (ATA_ABORTED | ATA_IDNF)))
1258 * ata_hpa_resize - Resize a device with an HPA set
1259 * @dev: Device to resize
1261 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1262 * it if required to the full size of the media. The caller must check
1263 * the drive has the HPA feature set enabled.
1266 * 0 on success, -errno on failure.
1268 static int ata_hpa_resize(struct ata_device *dev)
1270 bool print_info = ata_dev_print_info(dev);
1271 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1272 u64 sectors = ata_id_n_sectors(dev->id);
1276 /* do we need to do it? */
1277 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1278 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1279 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1282 /* read native max address */
1283 rc = ata_read_native_max_address(dev, &native_sectors);
1285 /* If device aborted the command or HPA isn't going to
1286 * be unlocked, skip HPA resizing.
1288 if (rc == -EACCES || !unlock_hpa) {
1290 "HPA support seems broken, skipping HPA handling\n");
1291 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1293 /* we can continue if device aborted the command */
1300 dev->n_native_sectors = native_sectors;
1302 /* nothing to do? */
1303 if (native_sectors <= sectors || !unlock_hpa) {
1304 if (!print_info || native_sectors == sectors)
1307 if (native_sectors > sectors)
1309 "HPA detected: current %llu, native %llu\n",
1310 (unsigned long long)sectors,
1311 (unsigned long long)native_sectors);
1312 else if (native_sectors < sectors)
1314 "native sectors (%llu) is smaller than sectors (%llu)\n",
1315 (unsigned long long)native_sectors,
1316 (unsigned long long)sectors);
1320 /* let's unlock HPA */
1321 rc = ata_set_max_sectors(dev, native_sectors);
1322 if (rc == -EACCES) {
1323 /* if device aborted the command, skip HPA resizing */
1325 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1326 (unsigned long long)sectors,
1327 (unsigned long long)native_sectors);
1328 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1333 /* re-read IDENTIFY data */
1334 rc = ata_dev_reread_id(dev, 0);
1337 "failed to re-read IDENTIFY data after HPA resizing\n");
1342 u64 new_sectors = ata_id_n_sectors(dev->id);
1344 "HPA unlocked: %llu -> %llu, native %llu\n",
1345 (unsigned long long)sectors,
1346 (unsigned long long)new_sectors,
1347 (unsigned long long)native_sectors);
1354 * ata_dump_id - IDENTIFY DEVICE info debugging output
1355 * @dev: device from which the information is fetched
1356 * @id: IDENTIFY DEVICE page to dump
1358 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1365 static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
1368 "49==0x%04x 53==0x%04x 63==0x%04x 64==0x%04x 75==0x%04x\n"
1369 "80==0x%04x 81==0x%04x 82==0x%04x 83==0x%04x 84==0x%04x\n"
1370 "88==0x%04x 93==0x%04x\n",
1371 id[49], id[53], id[63], id[64], id[75], id[80],
1372 id[81], id[82], id[83], id[84], id[88], id[93]);
1376 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1377 * @id: IDENTIFY data to compute xfer mask from
1379 * Compute the xfermask for this device. This is not as trivial
1380 * as it seems if we must consider early devices correctly.
1382 * FIXME: pre IDE drive timing (do we care ?).
1390 unsigned int ata_id_xfermask(const u16 *id)
1392 unsigned int pio_mask, mwdma_mask, udma_mask;
1394 /* Usual case. Word 53 indicates word 64 is valid */
1395 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1396 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1400 /* If word 64 isn't valid then Word 51 high byte holds
1401 * the PIO timing number for the maximum. Turn it into
1404 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1405 if (mode < 5) /* Valid PIO range */
1406 pio_mask = (2 << mode) - 1;
1410 /* But wait.. there's more. Design your standards by
1411 * committee and you too can get a free iordy field to
1412 * process. However it is the speeds not the modes that
1413 * are supported... Note drivers using the timing API
1414 * will get this right anyway
1418 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1420 if (ata_id_is_cfa(id)) {
1422 * Process compact flash extended modes
1424 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1425 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1428 pio_mask |= (1 << 5);
1430 pio_mask |= (1 << 6);
1432 mwdma_mask |= (1 << 3);
1434 mwdma_mask |= (1 << 4);
1438 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1439 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1441 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1443 EXPORT_SYMBOL_GPL(ata_id_xfermask);
1445 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1447 struct completion *waiting = qc->private_data;
1453 * ata_exec_internal_sg - execute libata internal command
1454 * @dev: Device to which the command is sent
1455 * @tf: Taskfile registers for the command and the result
1456 * @cdb: CDB for packet command
1457 * @dma_dir: Data transfer direction of the command
1458 * @sgl: sg list for the data buffer of the command
1459 * @n_elem: Number of sg entries
1460 * @timeout: Timeout in msecs (0 for default)
1462 * Executes libata internal command with timeout. @tf contains
1463 * command on entry and result on return. Timeout and error
1464 * conditions are reported via return value. No recovery action
1465 * is taken after a command times out. It's caller's duty to
1466 * clean up after timeout.
1469 * None. Should be called with kernel context, might sleep.
1472 * Zero on success, AC_ERR_* mask on failure
1474 static unsigned ata_exec_internal_sg(struct ata_device *dev,
1475 struct ata_taskfile *tf, const u8 *cdb,
1476 int dma_dir, struct scatterlist *sgl,
1477 unsigned int n_elem, unsigned int timeout)
1479 struct ata_link *link = dev->link;
1480 struct ata_port *ap = link->ap;
1481 u8 command = tf->command;
1482 int auto_timeout = 0;
1483 struct ata_queued_cmd *qc;
1484 unsigned int preempted_tag;
1485 u32 preempted_sactive;
1486 u64 preempted_qc_active;
1487 int preempted_nr_active_links;
1488 DECLARE_COMPLETION_ONSTACK(wait);
1489 unsigned long flags;
1490 unsigned int err_mask;
1493 spin_lock_irqsave(ap->lock, flags);
1495 /* no internal command while frozen */
1496 if (ata_port_is_frozen(ap)) {
1497 spin_unlock_irqrestore(ap->lock, flags);
1498 return AC_ERR_SYSTEM;
1501 /* initialize internal qc */
1502 qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1504 qc->tag = ATA_TAG_INTERNAL;
1511 preempted_tag = link->active_tag;
1512 preempted_sactive = link->sactive;
1513 preempted_qc_active = ap->qc_active;
1514 preempted_nr_active_links = ap->nr_active_links;
1515 link->active_tag = ATA_TAG_POISON;
1518 ap->nr_active_links = 0;
1520 /* prepare & issue qc */
1523 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1525 /* some SATA bridges need us to indicate data xfer direction */
1526 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1527 dma_dir == DMA_FROM_DEVICE)
1528 qc->tf.feature |= ATAPI_DMADIR;
1530 qc->flags |= ATA_QCFLAG_RESULT_TF;
1531 qc->dma_dir = dma_dir;
1532 if (dma_dir != DMA_NONE) {
1533 unsigned int i, buflen = 0;
1534 struct scatterlist *sg;
1536 for_each_sg(sgl, sg, n_elem, i)
1537 buflen += sg->length;
1539 ata_sg_init(qc, sgl, n_elem);
1540 qc->nbytes = buflen;
1543 qc->private_data = &wait;
1544 qc->complete_fn = ata_qc_complete_internal;
1548 spin_unlock_irqrestore(ap->lock, flags);
1551 if (ata_probe_timeout)
1552 timeout = ata_probe_timeout * 1000;
1554 timeout = ata_internal_cmd_timeout(dev, command);
1559 if (ap->ops->error_handler)
1562 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1564 if (ap->ops->error_handler)
1567 ata_sff_flush_pio_task(ap);
1570 spin_lock_irqsave(ap->lock, flags);
1572 /* We're racing with irq here. If we lose, the
1573 * following test prevents us from completing the qc
1574 * twice. If we win, the port is frozen and will be
1575 * cleaned up by ->post_internal_cmd().
1577 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1578 qc->err_mask |= AC_ERR_TIMEOUT;
1580 if (ap->ops->error_handler)
1581 ata_port_freeze(ap);
1583 ata_qc_complete(qc);
1585 ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
1589 spin_unlock_irqrestore(ap->lock, flags);
1592 /* do post_internal_cmd */
1593 if (ap->ops->post_internal_cmd)
1594 ap->ops->post_internal_cmd(qc);
1596 /* perform minimal error analysis */
1597 if (qc->flags & ATA_QCFLAG_EH) {
1598 if (qc->result_tf.status & (ATA_ERR | ATA_DF))
1599 qc->err_mask |= AC_ERR_DEV;
1602 qc->err_mask |= AC_ERR_OTHER;
1604 if (qc->err_mask & ~AC_ERR_OTHER)
1605 qc->err_mask &= ~AC_ERR_OTHER;
1606 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1607 qc->result_tf.status |= ATA_SENSE;
1611 spin_lock_irqsave(ap->lock, flags);
1613 *tf = qc->result_tf;
1614 err_mask = qc->err_mask;
1617 link->active_tag = preempted_tag;
1618 link->sactive = preempted_sactive;
1619 ap->qc_active = preempted_qc_active;
1620 ap->nr_active_links = preempted_nr_active_links;
1622 spin_unlock_irqrestore(ap->lock, flags);
1624 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1625 ata_internal_cmd_timed_out(dev, command);
1631 * ata_exec_internal - execute libata internal command
1632 * @dev: Device to which the command is sent
1633 * @tf: Taskfile registers for the command and the result
1634 * @cdb: CDB for packet command
1635 * @dma_dir: Data transfer direction of the command
1636 * @buf: Data buffer of the command
1637 * @buflen: Length of data buffer
1638 * @timeout: Timeout in msecs (0 for default)
1640 * Wrapper around ata_exec_internal_sg() which takes simple
1641 * buffer instead of sg list.
1644 * None. Should be called with kernel context, might sleep.
1647 * Zero on success, AC_ERR_* mask on failure
1649 unsigned ata_exec_internal(struct ata_device *dev,
1650 struct ata_taskfile *tf, const u8 *cdb,
1651 int dma_dir, void *buf, unsigned int buflen,
1652 unsigned int timeout)
1654 struct scatterlist *psg = NULL, sg;
1655 unsigned int n_elem = 0;
1657 if (dma_dir != DMA_NONE) {
1659 sg_init_one(&sg, buf, buflen);
1664 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1669 * ata_pio_need_iordy - check if iordy needed
1672 * Check if the current speed of the device requires IORDY. Used
1673 * by various controllers for chip configuration.
1675 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1677 /* Don't set IORDY if we're preparing for reset. IORDY may
1678 * lead to controller lock up on certain controllers if the
1679 * port is not occupied. See bko#11703 for details.
1681 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1683 /* Controller doesn't support IORDY. Probably a pointless
1684 * check as the caller should know this.
1686 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1688 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1689 if (ata_id_is_cfa(adev->id)
1690 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1692 /* PIO3 and higher it is mandatory */
1693 if (adev->pio_mode > XFER_PIO_2)
1695 /* We turn it on when possible */
1696 if (ata_id_has_iordy(adev->id))
1700 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
1703 * ata_pio_mask_no_iordy - Return the non IORDY mask
1706 * Compute the highest mode possible if we are not using iordy. Return
1707 * -1 if no iordy mode is available.
1709 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1711 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1712 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1713 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1714 /* Is the speed faster than the drive allows non IORDY ? */
1716 /* This is cycle times not frequency - watch the logic! */
1717 if (pio > 240) /* PIO2 is 240nS per cycle */
1718 return 3 << ATA_SHIFT_PIO;
1719 return 7 << ATA_SHIFT_PIO;
1722 return 3 << ATA_SHIFT_PIO;
1726 * ata_do_dev_read_id - default ID read method
1728 * @tf: proposed taskfile
1731 * Issue the identify taskfile and hand back the buffer containing
1732 * identify data. For some RAID controllers and for pre ATA devices
1733 * this function is wrapped or replaced by the driver
1735 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1736 struct ata_taskfile *tf, __le16 *id)
1738 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1739 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1741 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1744 * ata_dev_read_id - Read ID data from the specified device
1745 * @dev: target device
1746 * @p_class: pointer to class of the target device (may be changed)
1747 * @flags: ATA_READID_* flags
1748 * @id: buffer to read IDENTIFY data into
1750 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1751 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1752 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1753 * for pre-ATA4 drives.
1755 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1756 * now we abort if we hit that case.
1759 * Kernel thread context (may sleep)
1762 * 0 on success, -errno otherwise.
1764 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1765 unsigned int flags, u16 *id)
1767 struct ata_port *ap = dev->link->ap;
1768 unsigned int class = *p_class;
1769 struct ata_taskfile tf;
1770 unsigned int err_mask = 0;
1772 bool is_semb = class == ATA_DEV_SEMB;
1773 int may_fallback = 1, tried_spinup = 0;
1777 ata_tf_init(dev, &tf);
1781 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1785 tf.command = ATA_CMD_ID_ATA;
1788 tf.command = ATA_CMD_ID_ATAPI;
1792 reason = "unsupported class";
1796 tf.protocol = ATA_PROT_PIO;
1798 /* Some devices choke if TF registers contain garbage. Make
1799 * sure those are properly initialized.
1801 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1803 /* Device presence detection is unreliable on some
1804 * controllers. Always poll IDENTIFY if available.
1806 tf.flags |= ATA_TFLAG_POLLING;
1808 if (ap->ops->read_id)
1809 err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
1811 err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
1814 if (err_mask & AC_ERR_NODEV_HINT) {
1815 ata_dev_dbg(dev, "NODEV after polling detection\n");
1821 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1822 /* SEMB is not supported yet */
1823 *p_class = ATA_DEV_SEMB_UNSUP;
1827 if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
1828 /* Device or controller might have reported
1829 * the wrong device class. Give a shot at the
1830 * other IDENTIFY if the current one is
1831 * aborted by the device.
1836 if (class == ATA_DEV_ATA)
1837 class = ATA_DEV_ATAPI;
1839 class = ATA_DEV_ATA;
1843 /* Control reaches here iff the device aborted
1844 * both flavors of IDENTIFYs which happens
1845 * sometimes with phantom devices.
1848 "both IDENTIFYs aborted, assuming NODEV\n");
1853 reason = "I/O error";
1857 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1858 ata_dev_info(dev, "dumping IDENTIFY data, "
1859 "class=%d may_fallback=%d tried_spinup=%d\n",
1860 class, may_fallback, tried_spinup);
1861 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
1862 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1865 /* Falling back doesn't make sense if ID data was read
1866 * successfully at least once.
1870 swap_buf_le16(id, ATA_ID_WORDS);
1874 reason = "device reports invalid type";
1876 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1877 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1879 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1880 ata_id_is_ata(id)) {
1882 "host indicates ignore ATA devices, ignored\n");
1886 if (ata_id_is_ata(id))
1890 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1893 * Drive powered-up in standby mode, and requires a specific
1894 * SET_FEATURES spin-up subcommand before it will accept
1895 * anything other than the original IDENTIFY command.
1897 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1898 if (err_mask && id[2] != 0x738c) {
1900 reason = "SPINUP failed";
1904 * If the drive initially returned incomplete IDENTIFY info,
1905 * we now must reissue the IDENTIFY command.
1907 if (id[2] == 0x37c8)
1911 if ((flags & ATA_READID_POSTRESET) &&
1912 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
1914 * The exact sequence expected by certain pre-ATA4 drives is:
1916 * IDENTIFY (optional in early ATA)
1917 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1919 * Some drives were very specific about that exact sequence.
1921 * Note that ATA4 says lba is mandatory so the second check
1922 * should never trigger.
1924 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1925 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1928 reason = "INIT_DEV_PARAMS failed";
1932 /* current CHS translation info (id[53-58]) might be
1933 * changed. reread the identify device info.
1935 flags &= ~ATA_READID_POSTRESET;
1945 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
1951 * ata_read_log_page - read a specific log page
1952 * @dev: target device
1954 * @page: page to read
1955 * @buf: buffer to store read page
1956 * @sectors: number of sectors to read
1958 * Read log page using READ_LOG_EXT command.
1961 * Kernel thread context (may sleep).
1964 * 0 on success, AC_ERR_* mask otherwise.
1966 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1967 u8 page, void *buf, unsigned int sectors)
1969 unsigned long ap_flags = dev->link->ap->flags;
1970 struct ata_taskfile tf;
1971 unsigned int err_mask;
1974 ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
1977 * Return error without actually issuing the command on controllers
1978 * which e.g. lockup on a read log page.
1980 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
1984 ata_tf_init(dev, &tf);
1985 if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
1986 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
1987 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
1988 tf.protocol = ATA_PROT_DMA;
1991 tf.command = ATA_CMD_READ_LOG_EXT;
1992 tf.protocol = ATA_PROT_PIO;
1998 tf.hob_nsect = sectors >> 8;
1999 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2001 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2002 buf, sectors * ATA_SECT_SIZE, 0);
2006 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2007 if (!ata_port_is_frozen(dev->link->ap))
2011 "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
2012 (unsigned int)log, (unsigned int)page, err_mask);
2018 static int ata_log_supported(struct ata_device *dev, u8 log)
2020 struct ata_port *ap = dev->link->ap;
2022 if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
2025 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2027 return get_unaligned_le16(&ap->sector_buf[log * 2]);
2030 static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2032 struct ata_port *ap = dev->link->ap;
2033 unsigned int err, i;
2035 if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
2038 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2040 * IDENTIFY DEVICE data log is defined as mandatory starting
2041 * with ACS-3 (ATA version 10). Warn about the missing log
2042 * for drives which implement this ATA level or above.
2044 if (ata_id_major_version(dev->id) >= 10)
2046 "ATA Identify Device Log not supported\n");
2047 dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
2052 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2055 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2060 for (i = 0; i < ap->sector_buf[8]; i++) {
2061 if (ap->sector_buf[9 + i] == page)
2068 static int ata_do_link_spd_horkage(struct ata_device *dev)
2070 struct ata_link *plink = ata_dev_phys_link(dev);
2071 u32 target, target_limit;
2073 if (!sata_scr_valid(plink))
2076 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2081 target_limit = (1 << target) - 1;
2083 /* if already on stricter limit, no need to push further */
2084 if (plink->sata_spd_limit <= target_limit)
2087 plink->sata_spd_limit = target_limit;
2089 /* Request another EH round by returning -EAGAIN if link is
2090 * going faster than the target speed. Forward progress is
2091 * guaranteed by setting sata_spd_limit to target_limit above.
2093 if (plink->sata_spd > target) {
2094 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2095 sata_spd_string(target));
2101 static inline u8 ata_dev_knobble(struct ata_device *dev)
2103 struct ata_port *ap = dev->link->ap;
2105 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2108 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2111 static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2113 struct ata_port *ap = dev->link->ap;
2114 unsigned int err_mask;
2116 if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2117 ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2120 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2121 0, ap->sector_buf, 1);
2123 u8 *cmds = dev->ncq_send_recv_cmds;
2125 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2126 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2128 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2129 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2130 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2131 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2136 static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2138 struct ata_port *ap = dev->link->ap;
2139 unsigned int err_mask;
2141 if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2143 "NCQ Send/Recv Log not supported\n");
2146 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2147 0, ap->sector_buf, 1);
2149 u8 *cmds = dev->ncq_non_data_cmds;
2151 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2155 static void ata_dev_config_ncq_prio(struct ata_device *dev)
2157 struct ata_port *ap = dev->link->ap;
2158 unsigned int err_mask;
2160 if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2163 err_mask = ata_read_log_page(dev,
2164 ATA_LOG_IDENTIFY_DEVICE,
2165 ATA_LOG_SATA_SETTINGS,
2171 if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
2174 dev->flags |= ATA_DFLAG_NCQ_PRIO;
2179 dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED;
2180 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2183 static bool ata_dev_check_adapter(struct ata_device *dev,
2184 unsigned short vendor_id)
2186 struct pci_dev *pcidev = NULL;
2187 struct device *parent_dev = NULL;
2189 for (parent_dev = dev->tdev.parent; parent_dev != NULL;
2190 parent_dev = parent_dev->parent) {
2191 if (dev_is_pci(parent_dev)) {
2192 pcidev = to_pci_dev(parent_dev);
2193 if (pcidev->vendor == vendor_id)
2202 static int ata_dev_config_ncq(struct ata_device *dev,
2203 char *desc, size_t desc_sz)
2205 struct ata_port *ap = dev->link->ap;
2206 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2207 unsigned int err_mask;
2210 if (!ata_id_has_ncq(dev->id)) {
2214 if (!IS_ENABLED(CONFIG_SATA_HOST))
2216 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2217 snprintf(desc, desc_sz, "NCQ (not used)");
2221 if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
2222 ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
2223 snprintf(desc, desc_sz, "NCQ (not used)");
2227 if (ap->flags & ATA_FLAG_NCQ) {
2228 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
2229 dev->flags |= ATA_DFLAG_NCQ;
2232 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2233 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2234 ata_id_has_fpdma_aa(dev->id)) {
2235 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2239 "failed to enable AA (error_mask=0x%x)\n",
2241 if (err_mask != AC_ERR_DEV) {
2242 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2249 if (hdepth >= ddepth)
2250 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2252 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2255 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2256 if (ata_id_has_ncq_send_and_recv(dev->id))
2257 ata_dev_config_ncq_send_recv(dev);
2258 if (ata_id_has_ncq_non_data(dev->id))
2259 ata_dev_config_ncq_non_data(dev);
2260 if (ata_id_has_ncq_prio(dev->id))
2261 ata_dev_config_ncq_prio(dev);
2267 static void ata_dev_config_sense_reporting(struct ata_device *dev)
2269 unsigned int err_mask;
2271 if (!ata_id_has_sense_reporting(dev->id))
2274 if (ata_id_sense_reporting_enabled(dev->id))
2277 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2280 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2285 static void ata_dev_config_zac(struct ata_device *dev)
2287 struct ata_port *ap = dev->link->ap;
2288 unsigned int err_mask;
2289 u8 *identify_buf = ap->sector_buf;
2291 dev->zac_zones_optimal_open = U32_MAX;
2292 dev->zac_zones_optimal_nonseq = U32_MAX;
2293 dev->zac_zones_max_open = U32_MAX;
2296 * Always set the 'ZAC' flag for Host-managed devices.
2298 if (dev->class == ATA_DEV_ZAC)
2299 dev->flags |= ATA_DFLAG_ZAC;
2300 else if (ata_id_zoned_cap(dev->id) == 0x01)
2302 * Check for host-aware devices.
2304 dev->flags |= ATA_DFLAG_ZAC;
2306 if (!(dev->flags & ATA_DFLAG_ZAC))
2309 if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2311 "ATA Zoned Information Log not supported\n");
2316 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2318 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2319 ATA_LOG_ZONED_INFORMATION,
2322 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2324 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2325 if ((zoned_cap >> 63))
2326 dev->zac_zoned_cap = (zoned_cap & 1);
2327 opt_open = get_unaligned_le64(&identify_buf[24]);
2328 if ((opt_open >> 63))
2329 dev->zac_zones_optimal_open = (u32)opt_open;
2330 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2331 if ((opt_nonseq >> 63))
2332 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2333 max_open = get_unaligned_le64(&identify_buf[40]);
2334 if ((max_open >> 63))
2335 dev->zac_zones_max_open = (u32)max_open;
2339 static void ata_dev_config_trusted(struct ata_device *dev)
2341 struct ata_port *ap = dev->link->ap;
2345 if (!ata_id_has_trusted(dev->id))
2348 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2350 "Security Log not supported\n");
2354 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2359 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2360 if (!(trusted_cap & (1ULL << 63))) {
2362 "Trusted Computing capability qword not valid!\n");
2366 if (trusted_cap & (1 << 0))
2367 dev->flags |= ATA_DFLAG_TRUSTED;
2370 static int ata_dev_config_lba(struct ata_device *dev)
2372 const u16 *id = dev->id;
2373 const char *lba_desc;
2377 dev->flags |= ATA_DFLAG_LBA;
2379 if (ata_id_has_lba48(id)) {
2381 dev->flags |= ATA_DFLAG_LBA48;
2382 if (dev->n_sectors >= (1UL << 28) &&
2383 ata_id_has_flush_ext(id))
2384 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2390 ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2392 /* print device info to dmesg */
2393 if (ata_dev_print_info(dev))
2395 "%llu sectors, multi %u: %s %s\n",
2396 (unsigned long long)dev->n_sectors,
2397 dev->multi_count, lba_desc, ncq_desc);
2402 static void ata_dev_config_chs(struct ata_device *dev)
2404 const u16 *id = dev->id;
2406 if (ata_id_current_chs_valid(id)) {
2407 /* Current CHS translation is valid. */
2408 dev->cylinders = id[54];
2409 dev->heads = id[55];
2410 dev->sectors = id[56];
2412 /* Default translation */
2413 dev->cylinders = id[1];
2415 dev->sectors = id[6];
2418 /* print device info to dmesg */
2419 if (ata_dev_print_info(dev))
2421 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2422 (unsigned long long)dev->n_sectors,
2423 dev->multi_count, dev->cylinders,
2424 dev->heads, dev->sectors);
2427 static void ata_dev_config_fua(struct ata_device *dev)
2429 /* Ignore FUA support if its use is disabled globally */
2433 /* Ignore devices without support for WRITE DMA FUA EXT */
2434 if (!(dev->flags & ATA_DFLAG_LBA48) || !ata_id_has_fua(dev->id))
2437 /* Ignore known bad devices and devices that lack NCQ support */
2438 if (!ata_ncq_supported(dev) || (dev->horkage & ATA_HORKAGE_NO_FUA))
2441 dev->flags |= ATA_DFLAG_FUA;
2446 dev->flags &= ~ATA_DFLAG_FUA;
2449 static void ata_dev_config_devslp(struct ata_device *dev)
2451 u8 *sata_setting = dev->link->ap->sector_buf;
2452 unsigned int err_mask;
2456 * Check device sleep capability. Get DevSlp timing variables
2457 * from SATA Settings page of Identify Device Data Log.
2459 if (!ata_id_has_devslp(dev->id) ||
2460 !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2463 err_mask = ata_read_log_page(dev,
2464 ATA_LOG_IDENTIFY_DEVICE,
2465 ATA_LOG_SATA_SETTINGS,
2470 dev->flags |= ATA_DFLAG_DEVSLP;
2471 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2472 j = ATA_LOG_DEVSLP_OFFSET + i;
2473 dev->devslp_timing[i] = sata_setting[j];
2477 static void ata_dev_config_cpr(struct ata_device *dev)
2479 unsigned int err_mask;
2482 struct ata_cpr_log *cpr_log = NULL;
2483 u8 *desc, *buf = NULL;
2485 if (ata_id_major_version(dev->id) < 11)
2488 buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
2493 * Read the concurrent positioning ranges log (0x47). We can have at
2494 * most 255 32B range descriptors plus a 64B header. This log varies in
2495 * size, so use the size reported in the GPL directory. Reading beyond
2496 * the supported length will result in an error.
2499 buf = kzalloc(buf_len, GFP_KERNEL);
2503 err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
2504 0, buf, buf_len >> 9);
2512 cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
2516 cpr_log->nr_cpr = nr_cpr;
2518 for (i = 0; i < nr_cpr; i++, desc += 32) {
2519 cpr_log->cpr[i].num = desc[0];
2520 cpr_log->cpr[i].num_storage_elements = desc[1];
2521 cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
2522 cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
2526 swap(dev->cpr_log, cpr_log);
2531 static void ata_dev_print_features(struct ata_device *dev)
2533 if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
2537 "Features:%s%s%s%s%s%s%s\n",
2538 dev->flags & ATA_DFLAG_FUA ? " FUA" : "",
2539 dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
2540 dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
2541 dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
2542 dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
2543 dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
2544 dev->cpr_log ? " CPR" : "");
2548 * ata_dev_configure - Configure the specified ATA/ATAPI device
2549 * @dev: Target device to configure
2551 * Configure @dev according to @dev->id. Generic and low-level
2552 * driver specific fixups are also applied.
2555 * Kernel thread context (may sleep)
2558 * 0 on success, -errno otherwise
2560 int ata_dev_configure(struct ata_device *dev)
2562 struct ata_port *ap = dev->link->ap;
2563 bool print_info = ata_dev_print_info(dev);
2564 const u16 *id = dev->id;
2565 unsigned int xfer_mask;
2566 unsigned int err_mask;
2567 char revbuf[7]; /* XYZ-99\0 */
2568 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2569 char modelbuf[ATA_ID_PROD_LEN+1];
2572 if (!ata_dev_enabled(dev)) {
2573 ata_dev_dbg(dev, "no device\n");
2578 dev->horkage |= ata_dev_blacklisted(dev);
2579 ata_force_horkage(dev);
2581 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2582 ata_dev_info(dev, "unsupported device, disabling\n");
2583 ata_dev_disable(dev);
2587 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2588 dev->class == ATA_DEV_ATAPI) {
2589 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2590 atapi_enabled ? "not supported with this driver"
2592 ata_dev_disable(dev);
2596 rc = ata_do_link_spd_horkage(dev);
2600 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2601 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2602 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2603 dev->horkage |= ATA_HORKAGE_NOLPM;
2605 if (ap->flags & ATA_FLAG_NO_LPM)
2606 dev->horkage |= ATA_HORKAGE_NOLPM;
2608 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2609 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2610 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2613 /* let ACPI work its magic */
2614 rc = ata_acpi_on_devcfg(dev);
2618 /* massage HPA, do it early as it might change IDENTIFY data */
2619 rc = ata_hpa_resize(dev);
2623 /* print device capabilities */
2625 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2626 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2628 id[49], id[82], id[83], id[84],
2629 id[85], id[86], id[87], id[88]);
2631 /* initialize to-be-configured parameters */
2632 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2633 dev->max_sectors = 0;
2639 dev->multi_count = 0;
2642 * common ATA, ATAPI feature tests
2645 /* find max transfer mode; for printk only */
2646 xfer_mask = ata_id_xfermask(id);
2648 ata_dump_id(dev, id);
2650 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2651 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2654 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2657 /* ATA-specific feature tests */
2658 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2659 if (ata_id_is_cfa(id)) {
2660 /* CPRM may make this media unusable */
2661 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2663 "supports DRM functions and may not be fully accessible\n");
2664 snprintf(revbuf, 7, "CFA");
2666 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2667 /* Warn the user if the device has TPM extensions */
2668 if (ata_id_has_tpm(id))
2670 "supports DRM functions and may not be fully accessible\n");
2673 dev->n_sectors = ata_id_n_sectors(id);
2675 /* get current R/W Multiple count setting */
2676 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2677 unsigned int max = dev->id[47] & 0xff;
2678 unsigned int cnt = dev->id[59] & 0xff;
2679 /* only recognize/allow powers of two here */
2680 if (is_power_of_2(max) && is_power_of_2(cnt))
2682 dev->multi_count = cnt;
2685 /* print device info to dmesg */
2687 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2688 revbuf, modelbuf, fwrevbuf,
2689 ata_mode_string(xfer_mask));
2691 if (ata_id_has_lba(id)) {
2692 rc = ata_dev_config_lba(dev);
2696 ata_dev_config_chs(dev);
2699 ata_dev_config_fua(dev);
2700 ata_dev_config_devslp(dev);
2701 ata_dev_config_sense_reporting(dev);
2702 ata_dev_config_zac(dev);
2703 ata_dev_config_trusted(dev);
2704 ata_dev_config_cpr(dev);
2708 ata_dev_print_features(dev);
2711 /* ATAPI-specific feature tests */
2712 else if (dev->class == ATA_DEV_ATAPI) {
2713 const char *cdb_intr_string = "";
2714 const char *atapi_an_string = "";
2715 const char *dma_dir_string = "";
2718 rc = atapi_cdb_len(id);
2719 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2720 ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
2724 dev->cdb_len = (unsigned int) rc;
2726 /* Enable ATAPI AN if both the host and device have
2727 * the support. If PMP is attached, SNTF is required
2728 * to enable ATAPI AN to discern between PHY status
2729 * changed notifications and ATAPI ANs.
2732 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2733 (!sata_pmp_attached(ap) ||
2734 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2735 /* issue SET feature command to turn this on */
2736 err_mask = ata_dev_set_feature(dev,
2737 SETFEATURES_SATA_ENABLE, SATA_AN);
2740 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2743 dev->flags |= ATA_DFLAG_AN;
2744 atapi_an_string = ", ATAPI AN";
2748 if (ata_id_cdb_intr(dev->id)) {
2749 dev->flags |= ATA_DFLAG_CDB_INTR;
2750 cdb_intr_string = ", CDB intr";
2753 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2754 dev->flags |= ATA_DFLAG_DMADIR;
2755 dma_dir_string = ", DMADIR";
2758 if (ata_id_has_da(dev->id)) {
2759 dev->flags |= ATA_DFLAG_DA;
2763 /* print device info to dmesg */
2766 "ATAPI: %s, %s, max %s%s%s%s\n",
2768 ata_mode_string(xfer_mask),
2769 cdb_intr_string, atapi_an_string,
2773 /* determine max_sectors */
2774 dev->max_sectors = ATA_MAX_SECTORS;
2775 if (dev->flags & ATA_DFLAG_LBA48)
2776 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2778 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2780 if (ata_dev_knobble(dev)) {
2782 ata_dev_info(dev, "applying bridge limits\n");
2783 dev->udma_mask &= ATA_UDMA5;
2784 dev->max_sectors = ATA_MAX_SECTORS;
2787 if ((dev->class == ATA_DEV_ATAPI) &&
2788 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2789 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2790 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2793 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2794 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2797 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2798 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2801 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2802 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2804 if (ap->ops->dev_config)
2805 ap->ops->dev_config(dev);
2807 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2808 /* Let the user know. We don't want to disallow opens for
2809 rescue purposes, or in case the vendor is just a blithering
2810 idiot. Do this after the dev_config call as some controllers
2811 with buggy firmware may want to avoid reporting false device
2816 "Drive reports diagnostics failure. This may indicate a drive\n");
2818 "fault or invalid emulation. Contact drive vendor for information.\n");
2822 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2823 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2824 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2834 * ata_cable_40wire - return 40 wire cable type
2837 * Helper method for drivers which want to hardwire 40 wire cable
2841 int ata_cable_40wire(struct ata_port *ap)
2843 return ATA_CBL_PATA40;
2845 EXPORT_SYMBOL_GPL(ata_cable_40wire);
2848 * ata_cable_80wire - return 80 wire cable type
2851 * Helper method for drivers which want to hardwire 80 wire cable
2855 int ata_cable_80wire(struct ata_port *ap)
2857 return ATA_CBL_PATA80;
2859 EXPORT_SYMBOL_GPL(ata_cable_80wire);
2862 * ata_cable_unknown - return unknown PATA cable.
2865 * Helper method for drivers which have no PATA cable detection.
2868 int ata_cable_unknown(struct ata_port *ap)
2870 return ATA_CBL_PATA_UNK;
2872 EXPORT_SYMBOL_GPL(ata_cable_unknown);
2875 * ata_cable_ignore - return ignored PATA cable.
2878 * Helper method for drivers which don't use cable type to limit
2881 int ata_cable_ignore(struct ata_port *ap)
2883 return ATA_CBL_PATA_IGN;
2885 EXPORT_SYMBOL_GPL(ata_cable_ignore);
2888 * ata_cable_sata - return SATA cable type
2891 * Helper method for drivers which have SATA cables
2894 int ata_cable_sata(struct ata_port *ap)
2896 return ATA_CBL_SATA;
2898 EXPORT_SYMBOL_GPL(ata_cable_sata);
2901 * ata_bus_probe - Reset and probe ATA bus
2904 * Master ATA bus probing function. Initiates a hardware-dependent
2905 * bus reset, then attempts to identify any devices found on
2909 * PCI/etc. bus probe sem.
2912 * Zero on success, negative errno otherwise.
2915 int ata_bus_probe(struct ata_port *ap)
2917 unsigned int classes[ATA_MAX_DEVICES];
2918 int tries[ATA_MAX_DEVICES];
2920 struct ata_device *dev;
2922 ata_for_each_dev(dev, &ap->link, ALL)
2923 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2926 ata_for_each_dev(dev, &ap->link, ALL) {
2927 /* If we issue an SRST then an ATA drive (not ATAPI)
2928 * may change configuration and be in PIO0 timing. If
2929 * we do a hard reset (or are coming from power on)
2930 * this is true for ATA or ATAPI. Until we've set a
2931 * suitable controller mode we should not touch the
2932 * bus as we may be talking too fast.
2934 dev->pio_mode = XFER_PIO_0;
2935 dev->dma_mode = 0xff;
2937 /* If the controller has a pio mode setup function
2938 * then use it to set the chipset to rights. Don't
2939 * touch the DMA setup as that will be dealt with when
2940 * configuring devices.
2942 if (ap->ops->set_piomode)
2943 ap->ops->set_piomode(ap, dev);
2946 /* reset and determine device classes */
2947 ap->ops->phy_reset(ap);
2949 ata_for_each_dev(dev, &ap->link, ALL) {
2950 if (dev->class != ATA_DEV_UNKNOWN)
2951 classes[dev->devno] = dev->class;
2953 classes[dev->devno] = ATA_DEV_NONE;
2955 dev->class = ATA_DEV_UNKNOWN;
2958 /* read IDENTIFY page and configure devices. We have to do the identify
2959 specific sequence bass-ackwards so that PDIAG- is released by
2962 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2963 if (tries[dev->devno])
2964 dev->class = classes[dev->devno];
2966 if (!ata_dev_enabled(dev))
2969 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2975 /* Now ask for the cable type as PDIAG- should have been released */
2976 if (ap->ops->cable_detect)
2977 ap->cbl = ap->ops->cable_detect(ap);
2979 /* We may have SATA bridge glue hiding here irrespective of
2980 * the reported cable types and sensed types. When SATA
2981 * drives indicate we have a bridge, we don't know which end
2982 * of the link the bridge is which is a problem.
2984 ata_for_each_dev(dev, &ap->link, ENABLED)
2985 if (ata_id_is_sata(dev->id))
2986 ap->cbl = ATA_CBL_SATA;
2988 /* After the identify sequence we can now set up the devices. We do
2989 this in the normal order so that the user doesn't get confused */
2991 ata_for_each_dev(dev, &ap->link, ENABLED) {
2992 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2993 rc = ata_dev_configure(dev);
2994 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2999 /* configure transfer mode */
3000 rc = ata_set_mode(&ap->link, &dev);
3004 ata_for_each_dev(dev, &ap->link, ENABLED)
3010 tries[dev->devno]--;
3014 /* eeek, something went very wrong, give up */
3015 tries[dev->devno] = 0;
3019 /* give it just one more chance */
3020 tries[dev->devno] = min(tries[dev->devno], 1);
3023 if (tries[dev->devno] == 1) {
3024 /* This is the last chance, better to slow
3025 * down than lose it.
3027 sata_down_spd_limit(&ap->link, 0);
3028 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3032 if (!tries[dev->devno])
3033 ata_dev_disable(dev);
3039 * sata_print_link_status - Print SATA link status
3040 * @link: SATA link to printk link status about
3042 * This function prints link speed and status of a SATA link.
3047 static void sata_print_link_status(struct ata_link *link)
3049 u32 sstatus, scontrol, tmp;
3051 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3053 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3056 if (ata_phys_link_online(link)) {
3057 tmp = (sstatus >> 4) & 0xf;
3058 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3059 sata_spd_string(tmp), sstatus, scontrol);
3061 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3067 * ata_dev_pair - return other device on cable
3070 * Obtain the other device on the same cable, or if none is
3071 * present NULL is returned
3074 struct ata_device *ata_dev_pair(struct ata_device *adev)
3076 struct ata_link *link = adev->link;
3077 struct ata_device *pair = &link->device[1 - adev->devno];
3078 if (!ata_dev_enabled(pair))
3082 EXPORT_SYMBOL_GPL(ata_dev_pair);
3085 * sata_down_spd_limit - adjust SATA spd limit downward
3086 * @link: Link to adjust SATA spd limit for
3087 * @spd_limit: Additional limit
3089 * Adjust SATA spd limit of @link downward. Note that this
3090 * function only adjusts the limit. The change must be applied
3091 * using sata_set_spd().
3093 * If @spd_limit is non-zero, the speed is limited to equal to or
3094 * lower than @spd_limit if such speed is supported. If
3095 * @spd_limit is slower than any supported speed, only the lowest
3096 * supported speed is allowed.
3099 * Inherited from caller.
3102 * 0 on success, negative errno on failure
3104 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3106 u32 sstatus, spd, mask;
3109 if (!sata_scr_valid(link))
3112 /* If SCR can be read, use it to determine the current SPD.
3113 * If not, use cached value in link->sata_spd.
3115 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3116 if (rc == 0 && ata_sstatus_online(sstatus))
3117 spd = (sstatus >> 4) & 0xf;
3119 spd = link->sata_spd;
3121 mask = link->sata_spd_limit;
3125 /* unconditionally mask off the highest bit */
3126 bit = fls(mask) - 1;
3127 mask &= ~(1 << bit);
3130 * Mask off all speeds higher than or equal to the current one. At
3131 * this point, if current SPD is not available and we previously
3132 * recorded the link speed from SStatus, the driver has already
3133 * masked off the highest bit so mask should already be 1 or 0.
3134 * Otherwise, we should not force 1.5Gbps on a link where we have
3135 * not previously recorded speed from SStatus. Just return in this
3139 mask &= (1 << (spd - 1)) - 1;
3140 else if (link->sata_spd)
3143 /* were we already at the bottom? */
3148 if (mask & ((1 << spd_limit) - 1))
3149 mask &= (1 << spd_limit) - 1;
3151 bit = ffs(mask) - 1;
3156 link->sata_spd_limit = mask;
3158 ata_link_warn(link, "limiting SATA link speed to %s\n",
3159 sata_spd_string(fls(mask)));
3164 #ifdef CONFIG_ATA_ACPI
3166 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3167 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3168 * @cycle: cycle duration in ns
3170 * Return matching xfer mode for @cycle. The returned mode is of
3171 * the transfer type specified by @xfer_shift. If @cycle is too
3172 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3173 * than the fastest known mode, the fasted mode is returned.
3179 * Matching xfer_mode, 0xff if no match found.
3181 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3183 u8 base_mode = 0xff, last_mode = 0xff;
3184 const struct ata_xfer_ent *ent;
3185 const struct ata_timing *t;
3187 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3188 if (ent->shift == xfer_shift)
3189 base_mode = ent->base;
3191 for (t = ata_timing_find_mode(base_mode);
3192 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3193 unsigned short this_cycle;
3195 switch (xfer_shift) {
3197 case ATA_SHIFT_MWDMA:
3198 this_cycle = t->cycle;
3200 case ATA_SHIFT_UDMA:
3201 this_cycle = t->udma;
3207 if (cycle > this_cycle)
3210 last_mode = t->mode;
3218 * ata_down_xfermask_limit - adjust dev xfer masks downward
3219 * @dev: Device to adjust xfer masks
3220 * @sel: ATA_DNXFER_* selector
3222 * Adjust xfer masks of @dev downward. Note that this function
3223 * does not apply the change. Invoking ata_set_mode() afterwards
3224 * will apply the limit.
3227 * Inherited from caller.
3230 * 0 on success, negative errno on failure
3232 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3235 unsigned int orig_mask, xfer_mask;
3236 unsigned int pio_mask, mwdma_mask, udma_mask;
3239 quiet = !!(sel & ATA_DNXFER_QUIET);
3240 sel &= ~ATA_DNXFER_QUIET;
3242 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3245 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3248 case ATA_DNXFER_PIO:
3249 highbit = fls(pio_mask) - 1;
3250 pio_mask &= ~(1 << highbit);
3253 case ATA_DNXFER_DMA:
3255 highbit = fls(udma_mask) - 1;
3256 udma_mask &= ~(1 << highbit);
3259 } else if (mwdma_mask) {
3260 highbit = fls(mwdma_mask) - 1;
3261 mwdma_mask &= ~(1 << highbit);
3267 case ATA_DNXFER_40C:
3268 udma_mask &= ATA_UDMA_MASK_40C;
3271 case ATA_DNXFER_FORCE_PIO0:
3274 case ATA_DNXFER_FORCE_PIO:
3283 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3285 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3289 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3290 snprintf(buf, sizeof(buf), "%s:%s",
3291 ata_mode_string(xfer_mask),
3292 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3294 snprintf(buf, sizeof(buf), "%s",
3295 ata_mode_string(xfer_mask));
3297 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3300 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3306 static int ata_dev_set_mode(struct ata_device *dev)
3308 struct ata_port *ap = dev->link->ap;
3309 struct ata_eh_context *ehc = &dev->link->eh_context;
3310 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3311 const char *dev_err_whine = "";
3312 int ign_dev_err = 0;
3313 unsigned int err_mask = 0;
3316 dev->flags &= ~ATA_DFLAG_PIO;
3317 if (dev->xfer_shift == ATA_SHIFT_PIO)
3318 dev->flags |= ATA_DFLAG_PIO;
3320 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3321 dev_err_whine = " (SET_XFERMODE skipped)";
3325 "NOSETXFER but PATA detected - can't "
3326 "skip SETXFER, might malfunction\n");
3327 err_mask = ata_dev_set_xfermode(dev);
3330 if (err_mask & ~AC_ERR_DEV)
3334 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3335 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3336 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3340 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3341 /* Old CFA may refuse this command, which is just fine */
3342 if (ata_id_is_cfa(dev->id))
3344 /* Catch several broken garbage emulations plus some pre
3346 if (ata_id_major_version(dev->id) == 0 &&
3347 dev->pio_mode <= XFER_PIO_2)
3349 /* Some very old devices and some bad newer ones fail
3350 any kind of SET_XFERMODE request but support PIO0-2
3351 timings and no IORDY */
3352 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3355 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3356 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3357 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3358 dev->dma_mode == XFER_MW_DMA_0 &&
3359 (dev->id[63] >> 8) & 1)
3362 /* if the device is actually configured correctly, ignore dev err */
3363 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3366 if (err_mask & AC_ERR_DEV) {
3370 dev_err_whine = " (device error ignored)";
3373 ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
3374 dev->xfer_shift, (int)dev->xfer_mode);
3376 if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3377 ehc->i.flags & ATA_EHI_DID_HARDRESET)
3378 ata_dev_info(dev, "configured for %s%s\n",
3379 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3385 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3390 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3391 * @link: link on which timings will be programmed
3392 * @r_failed_dev: out parameter for failed device
3394 * Standard implementation of the function used to tune and set
3395 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3396 * ata_dev_set_mode() fails, pointer to the failing device is
3397 * returned in @r_failed_dev.
3400 * PCI/etc. bus probe sem.
3403 * 0 on success, negative errno otherwise
3406 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3408 struct ata_port *ap = link->ap;
3409 struct ata_device *dev;
3410 int rc = 0, used_dma = 0, found = 0;
3412 /* step 1: calculate xfer_mask */
3413 ata_for_each_dev(dev, link, ENABLED) {
3414 unsigned int pio_mask, dma_mask;
3415 unsigned int mode_mask;
3417 mode_mask = ATA_DMA_MASK_ATA;
3418 if (dev->class == ATA_DEV_ATAPI)
3419 mode_mask = ATA_DMA_MASK_ATAPI;
3420 else if (ata_id_is_cfa(dev->id))
3421 mode_mask = ATA_DMA_MASK_CFA;
3423 ata_dev_xfermask(dev);
3424 ata_force_xfermask(dev);
3426 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3428 if (libata_dma_mask & mode_mask)
3429 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3434 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3435 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3438 if (ata_dma_enabled(dev))
3444 /* step 2: always set host PIO timings */
3445 ata_for_each_dev(dev, link, ENABLED) {
3446 if (dev->pio_mode == 0xff) {
3447 ata_dev_warn(dev, "no PIO support\n");
3452 dev->xfer_mode = dev->pio_mode;
3453 dev->xfer_shift = ATA_SHIFT_PIO;
3454 if (ap->ops->set_piomode)
3455 ap->ops->set_piomode(ap, dev);
3458 /* step 3: set host DMA timings */
3459 ata_for_each_dev(dev, link, ENABLED) {
3460 if (!ata_dma_enabled(dev))
3463 dev->xfer_mode = dev->dma_mode;
3464 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3465 if (ap->ops->set_dmamode)
3466 ap->ops->set_dmamode(ap, dev);
3469 /* step 4: update devices' xfer mode */
3470 ata_for_each_dev(dev, link, ENABLED) {
3471 rc = ata_dev_set_mode(dev);
3476 /* Record simplex status. If we selected DMA then the other
3477 * host channels are not permitted to do so.
3479 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3480 ap->host->simplex_claimed = ap;
3484 *r_failed_dev = dev;
3487 EXPORT_SYMBOL_GPL(ata_do_set_mode);
3490 * ata_wait_ready - wait for link to become ready
3491 * @link: link to be waited on
3492 * @deadline: deadline jiffies for the operation
3493 * @check_ready: callback to check link readiness
3495 * Wait for @link to become ready. @check_ready should return
3496 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3497 * link doesn't seem to be occupied, other errno for other error
3500 * Transient -ENODEV conditions are allowed for
3501 * ATA_TMOUT_FF_WAIT.
3507 * 0 if @link is ready before @deadline; otherwise, -errno.
3509 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3510 int (*check_ready)(struct ata_link *link))
3512 unsigned long start = jiffies;
3513 unsigned long nodev_deadline;
3516 /* choose which 0xff timeout to use, read comment in libata.h */
3517 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3518 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3520 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3522 /* Slave readiness can't be tested separately from master. On
3523 * M/S emulation configuration, this function should be called
3524 * only on the master and it will handle both master and slave.
3526 WARN_ON(link == link->ap->slave_link);
3528 if (time_after(nodev_deadline, deadline))
3529 nodev_deadline = deadline;
3532 unsigned long now = jiffies;
3535 ready = tmp = check_ready(link);
3540 * -ENODEV could be transient. Ignore -ENODEV if link
3541 * is online. Also, some SATA devices take a long
3542 * time to clear 0xff after reset. Wait for
3543 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3546 * Note that some PATA controllers (pata_ali) explode
3547 * if status register is read more than once when
3548 * there's no device attached.
3550 if (ready == -ENODEV) {
3551 if (ata_link_online(link))
3553 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3554 !ata_link_offline(link) &&
3555 time_before(now, nodev_deadline))
3561 if (time_after(now, deadline))
3564 if (!warned && time_after(now, start + 5 * HZ) &&
3565 (deadline - now > 3 * HZ)) {
3567 "link is slow to respond, please be patient "
3568 "(ready=%d)\n", tmp);
3572 ata_msleep(link->ap, 50);
3577 * ata_wait_after_reset - wait for link to become ready after reset
3578 * @link: link to be waited on
3579 * @deadline: deadline jiffies for the operation
3580 * @check_ready: callback to check link readiness
3582 * Wait for @link to become ready after reset.
3588 * 0 if @link is ready before @deadline; otherwise, -errno.
3590 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3591 int (*check_ready)(struct ata_link *link))
3593 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3595 return ata_wait_ready(link, deadline, check_ready);
3597 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
3600 * ata_std_prereset - prepare for reset
3601 * @link: ATA link to be reset
3602 * @deadline: deadline jiffies for the operation
3604 * @link is about to be reset. Initialize it. Failure from
3605 * prereset makes libata abort whole reset sequence and give up
3606 * that port, so prereset should be best-effort. It does its
3607 * best to prepare for reset sequence but if things go wrong, it
3608 * should just whine, not fail.
3611 * Kernel thread context (may sleep)
3616 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3618 struct ata_port *ap = link->ap;
3619 struct ata_eh_context *ehc = &link->eh_context;
3620 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3623 /* if we're about to do hardreset, nothing more to do */
3624 if (ehc->i.action & ATA_EH_HARDRESET)
3627 /* if SATA, resume link */
3628 if (ap->flags & ATA_FLAG_SATA) {
3629 rc = sata_link_resume(link, timing, deadline);
3630 /* whine about phy resume failure but proceed */
3631 if (rc && rc != -EOPNOTSUPP)
3633 "failed to resume link for reset (errno=%d)\n",
3637 /* no point in trying softreset on offline link */
3638 if (ata_phys_link_offline(link))
3639 ehc->i.action &= ~ATA_EH_SOFTRESET;
3643 EXPORT_SYMBOL_GPL(ata_std_prereset);
3646 * sata_std_hardreset - COMRESET w/o waiting or classification
3647 * @link: link to reset
3648 * @class: resulting class of attached device
3649 * @deadline: deadline jiffies for the operation
3651 * Standard SATA COMRESET w/o waiting or classification.
3654 * Kernel thread context (may sleep)
3657 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3659 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3660 unsigned long deadline)
3662 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3667 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3668 return online ? -EAGAIN : rc;
3670 EXPORT_SYMBOL_GPL(sata_std_hardreset);
3673 * ata_std_postreset - standard postreset callback
3674 * @link: the target ata_link
3675 * @classes: classes of attached devices
3677 * This function is invoked after a successful reset. Note that
3678 * the device might have been reset more than once using
3679 * different reset methods before postreset is invoked.
3682 * Kernel thread context (may sleep)
3684 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3688 /* reset complete, clear SError */
3689 if (!sata_scr_read(link, SCR_ERROR, &serror))
3690 sata_scr_write(link, SCR_ERROR, serror);
3692 /* print link status */
3693 sata_print_link_status(link);
3695 EXPORT_SYMBOL_GPL(ata_std_postreset);
3698 * ata_dev_same_device - Determine whether new ID matches configured device
3699 * @dev: device to compare against
3700 * @new_class: class of the new device
3701 * @new_id: IDENTIFY page of the new device
3703 * Compare @new_class and @new_id against @dev and determine
3704 * whether @dev is the device indicated by @new_class and
3711 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3713 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3716 const u16 *old_id = dev->id;
3717 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3718 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3720 if (dev->class != new_class) {
3721 ata_dev_info(dev, "class mismatch %d != %d\n",
3722 dev->class, new_class);
3726 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3727 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3728 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3729 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3731 if (strcmp(model[0], model[1])) {
3732 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3733 model[0], model[1]);
3737 if (strcmp(serial[0], serial[1])) {
3738 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3739 serial[0], serial[1]);
3747 * ata_dev_reread_id - Re-read IDENTIFY data
3748 * @dev: target ATA device
3749 * @readid_flags: read ID flags
3751 * Re-read IDENTIFY page and make sure @dev is still attached to
3755 * Kernel thread context (may sleep)
3758 * 0 on success, negative errno otherwise
3760 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3762 unsigned int class = dev->class;
3763 u16 *id = (void *)dev->link->ap->sector_buf;
3767 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3771 /* is the device still there? */
3772 if (!ata_dev_same_device(dev, class, id))
3775 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3780 * ata_dev_revalidate - Revalidate ATA device
3781 * @dev: device to revalidate
3782 * @new_class: new class code
3783 * @readid_flags: read ID flags
3785 * Re-read IDENTIFY page, make sure @dev is still attached to the
3786 * port and reconfigure it according to the new IDENTIFY page.
3789 * Kernel thread context (may sleep)
3792 * 0 on success, negative errno otherwise
3794 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3795 unsigned int readid_flags)
3797 u64 n_sectors = dev->n_sectors;
3798 u64 n_native_sectors = dev->n_native_sectors;
3801 if (!ata_dev_enabled(dev))
3804 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3805 if (ata_class_enabled(new_class) &&
3806 new_class != ATA_DEV_ATA &&
3807 new_class != ATA_DEV_ATAPI &&
3808 new_class != ATA_DEV_ZAC &&
3809 new_class != ATA_DEV_SEMB) {
3810 ata_dev_info(dev, "class mismatch %u != %u\n",
3811 dev->class, new_class);
3817 rc = ata_dev_reread_id(dev, readid_flags);
3821 /* configure device according to the new ID */
3822 rc = ata_dev_configure(dev);
3826 /* verify n_sectors hasn't changed */
3827 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3828 dev->n_sectors == n_sectors)
3831 /* n_sectors has changed */
3832 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3833 (unsigned long long)n_sectors,
3834 (unsigned long long)dev->n_sectors);
3837 * Something could have caused HPA to be unlocked
3838 * involuntarily. If n_native_sectors hasn't changed and the
3839 * new size matches it, keep the device.
3841 if (dev->n_native_sectors == n_native_sectors &&
3842 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3844 "new n_sectors matches native, probably "
3845 "late HPA unlock, n_sectors updated\n");
3846 /* use the larger n_sectors */
3851 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
3852 * unlocking HPA in those cases.
3854 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
3856 if (dev->n_native_sectors == n_native_sectors &&
3857 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3858 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
3860 "old n_sectors matches native, probably "
3861 "late HPA lock, will try to unlock HPA\n");
3862 /* try unlocking HPA */
3863 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
3868 /* restore original n_[native_]sectors and fail */
3869 dev->n_native_sectors = n_native_sectors;
3870 dev->n_sectors = n_sectors;
3872 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
3876 struct ata_blacklist_entry {
3877 const char *model_num;
3878 const char *model_rev;
3879 unsigned long horkage;
3882 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3883 /* Devices with DMA related problems under Linux */
3884 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3885 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3886 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3887 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3888 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3889 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3890 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3891 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3892 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3893 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
3894 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3895 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3896 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3897 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3898 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3899 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
3900 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3901 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3902 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3903 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3904 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3905 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3906 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3907 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3908 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3909 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3910 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
3911 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3912 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
3913 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
3914 /* Odd clown on sil3726/4726 PMPs */
3915 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
3916 /* Similar story with ASMedia 1092 */
3917 { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
3919 /* Weird ATAPI devices */
3920 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3921 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
3922 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
3923 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
3926 * Causes silent data corruption with higher max sects.
3927 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
3929 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
3932 * These devices time out with higher max sects.
3933 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
3935 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
3936 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
3938 /* Devices we expect to fail diagnostics */
3940 /* Devices where NCQ should be avoided */
3942 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3943 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ },
3944 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3945 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3947 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
3948 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3949 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
3950 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
3951 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
3953 /* Seagate NCQ + FLUSH CACHE firmware bug */
3954 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3955 ATA_HORKAGE_FIRMWARE_WARN },
3957 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3958 ATA_HORKAGE_FIRMWARE_WARN },
3960 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3961 ATA_HORKAGE_FIRMWARE_WARN },
3963 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3964 ATA_HORKAGE_FIRMWARE_WARN },
3966 /* drives which fail FPDMA_AA activation (some may freeze afterwards)
3967 the ST disks also have LPM issues */
3968 { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
3969 ATA_HORKAGE_NOLPM },
3970 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
3972 /* Blacklist entries taken from Silicon Image 3124/3132
3973 Windows driver .inf file - also several Linux problem reports */
3974 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ },
3975 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ },
3976 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ },
3978 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
3979 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ },
3981 /* Sandisk SD7/8/9s lock up hard on large trims */
3982 { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M },
3984 /* devices which puke on READ_NATIVE_MAX */
3985 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA },
3986 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3987 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3988 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3990 /* this one allows HPA unlocking but fails IOs on the area */
3991 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
3993 /* Devices which report 1 sector over size HPA */
3994 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE },
3995 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE },
3996 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE },
3998 /* Devices which get the IVB wrong */
3999 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
4000 /* Maybe we should just blacklist TSSTcorp... */
4001 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB },
4003 /* Devices that do not need bridging limits applied */
4004 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK },
4005 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK },
4007 /* Devices which aren't very happy with higher link speeds */
4008 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS },
4009 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS },
4012 * Devices which choke on SETXFER. Applies only if both the
4013 * device and controller are SATA.
4015 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4016 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4017 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4018 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4019 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4021 /* These specific Pioneer models have LPM issues */
4022 { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
4023 { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
4025 /* Crucial BX100 SSD 500GB has broken LPM support */
4026 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
4028 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4029 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4030 ATA_HORKAGE_ZERO_AFTER_TRIM |
4031 ATA_HORKAGE_NOLPM },
4032 /* 512GB MX100 with newer firmware has only LPM issues */
4033 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
4034 ATA_HORKAGE_NOLPM },
4036 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4037 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4038 ATA_HORKAGE_ZERO_AFTER_TRIM |
4039 ATA_HORKAGE_NOLPM },
4040 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4041 ATA_HORKAGE_ZERO_AFTER_TRIM |
4042 ATA_HORKAGE_NOLPM },
4044 /* These specific Samsung models/firmware-revs do not handle LPM well */
4045 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
4046 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
4047 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM },
4048 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
4050 /* devices that don't properly handle queued TRIM commands */
4051 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4052 ATA_HORKAGE_ZERO_AFTER_TRIM },
4053 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4054 ATA_HORKAGE_ZERO_AFTER_TRIM },
4055 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4056 ATA_HORKAGE_ZERO_AFTER_TRIM },
4057 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4058 ATA_HORKAGE_ZERO_AFTER_TRIM },
4059 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4060 ATA_HORKAGE_ZERO_AFTER_TRIM },
4061 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4062 ATA_HORKAGE_ZERO_AFTER_TRIM },
4063 { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4064 ATA_HORKAGE_NO_DMA_LOG |
4065 ATA_HORKAGE_ZERO_AFTER_TRIM },
4066 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4067 ATA_HORKAGE_ZERO_AFTER_TRIM },
4068 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4069 ATA_HORKAGE_ZERO_AFTER_TRIM },
4070 { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4071 ATA_HORKAGE_ZERO_AFTER_TRIM |
4072 ATA_HORKAGE_NO_NCQ_ON_ATI },
4073 { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4074 ATA_HORKAGE_ZERO_AFTER_TRIM |
4075 ATA_HORKAGE_NO_NCQ_ON_ATI },
4076 { "SAMSUNG*MZ7LH*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4077 ATA_HORKAGE_ZERO_AFTER_TRIM |
4078 ATA_HORKAGE_NO_NCQ_ON_ATI, },
4079 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4080 ATA_HORKAGE_ZERO_AFTER_TRIM },
4082 /* devices that don't properly handle TRIM commands */
4083 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM },
4084 { "M88V29*", NULL, ATA_HORKAGE_NOTRIM },
4087 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4088 * (Return Zero After Trim) flags in the ATA Command Set are
4089 * unreliable in the sense that they only define what happens if
4090 * the device successfully executed the DSM TRIM command. TRIM
4091 * is only advisory, however, and the device is free to silently
4092 * ignore all or parts of the request.
4094 * Whitelist drives that are known to reliably return zeroes
4099 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4100 * that model before whitelisting all other intel SSDs.
4102 { "INTEL*SSDSC2MH*", NULL, 0 },
4104 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4105 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4106 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4107 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4108 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4109 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4110 { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4111 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4114 * Some WD SATA-I drives spin up and down erratically when the link
4115 * is put into the slumber mode. We don't have full list of the
4116 * affected devices. Disable LPM if the device matches one of the
4117 * known prefixes and is SATA-1. As a side effect LPM partial is
4120 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4122 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4123 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4124 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4125 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4126 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4127 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4128 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4131 * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
4132 * log page is accessed. Ensure we never ask for this log page with
4135 { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
4138 { "Maxtor", "BANC1G10", ATA_HORKAGE_NO_FUA },
4139 { "WDC*WD2500J*", NULL, ATA_HORKAGE_NO_FUA },
4140 { "OCZ-VERTEX*", NULL, ATA_HORKAGE_NO_FUA },
4141 { "INTEL*SSDSC2CT*", NULL, ATA_HORKAGE_NO_FUA },
4147 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4149 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4150 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4151 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4153 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4154 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4156 while (ad->model_num) {
4157 if (glob_match(ad->model_num, model_num)) {
4158 if (ad->model_rev == NULL)
4160 if (glob_match(ad->model_rev, model_rev))
4168 static int ata_dma_blacklisted(const struct ata_device *dev)
4170 /* We don't support polling DMA.
4171 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4172 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4174 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4175 (dev->flags & ATA_DFLAG_CDB_INTR))
4177 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4181 * ata_is_40wire - check drive side detection
4184 * Perform drive side detection decoding, allowing for device vendors
4185 * who can't follow the documentation.
4188 static int ata_is_40wire(struct ata_device *dev)
4190 if (dev->horkage & ATA_HORKAGE_IVB)
4191 return ata_drive_40wire_relaxed(dev->id);
4192 return ata_drive_40wire(dev->id);
4196 * cable_is_40wire - 40/80/SATA decider
4197 * @ap: port to consider
4199 * This function encapsulates the policy for speed management
4200 * in one place. At the moment we don't cache the result but
4201 * there is a good case for setting ap->cbl to the result when
4202 * we are called with unknown cables (and figuring out if it
4203 * impacts hotplug at all).
4205 * Return 1 if the cable appears to be 40 wire.
4208 static int cable_is_40wire(struct ata_port *ap)
4210 struct ata_link *link;
4211 struct ata_device *dev;
4213 /* If the controller thinks we are 40 wire, we are. */
4214 if (ap->cbl == ATA_CBL_PATA40)
4217 /* If the controller thinks we are 80 wire, we are. */
4218 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4221 /* If the system is known to be 40 wire short cable (eg
4222 * laptop), then we allow 80 wire modes even if the drive
4225 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4228 /* If the controller doesn't know, we scan.
4230 * Note: We look for all 40 wire detects at this point. Any
4231 * 80 wire detect is taken to be 80 wire cable because
4232 * - in many setups only the one drive (slave if present) will
4233 * give a valid detect
4234 * - if you have a non detect capable drive you don't want it
4235 * to colour the choice
4237 ata_for_each_link(link, ap, EDGE) {
4238 ata_for_each_dev(dev, link, ENABLED) {
4239 if (!ata_is_40wire(dev))
4247 * ata_dev_xfermask - Compute supported xfermask of the given device
4248 * @dev: Device to compute xfermask for
4250 * Compute supported xfermask of @dev and store it in
4251 * dev->*_mask. This function is responsible for applying all
4252 * known limits including host controller limits, device
4258 static void ata_dev_xfermask(struct ata_device *dev)
4260 struct ata_link *link = dev->link;
4261 struct ata_port *ap = link->ap;
4262 struct ata_host *host = ap->host;
4263 unsigned int xfer_mask;
4265 /* controller modes available */
4266 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4267 ap->mwdma_mask, ap->udma_mask);
4269 /* drive modes available */
4270 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4271 dev->mwdma_mask, dev->udma_mask);
4272 xfer_mask &= ata_id_xfermask(dev->id);
4275 * CFA Advanced TrueIDE timings are not allowed on a shared
4278 if (ata_dev_pair(dev)) {
4279 /* No PIO5 or PIO6 */
4280 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4281 /* No MWDMA3 or MWDMA 4 */
4282 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4285 if (ata_dma_blacklisted(dev)) {
4286 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4288 "device is on DMA blacklist, disabling DMA\n");
4291 if ((host->flags & ATA_HOST_SIMPLEX) &&
4292 host->simplex_claimed && host->simplex_claimed != ap) {
4293 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4295 "simplex DMA is claimed by other device, disabling DMA\n");
4298 if (ap->flags & ATA_FLAG_NO_IORDY)
4299 xfer_mask &= ata_pio_mask_no_iordy(dev);
4301 if (ap->ops->mode_filter)
4302 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4304 /* Apply cable rule here. Don't apply it early because when
4305 * we handle hot plug the cable type can itself change.
4306 * Check this last so that we know if the transfer rate was
4307 * solely limited by the cable.
4308 * Unknown or 80 wire cables reported host side are checked
4309 * drive side as well. Cases where we know a 40wire cable
4310 * is used safely for 80 are not checked here.
4312 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4313 /* UDMA/44 or higher would be available */
4314 if (cable_is_40wire(ap)) {
4316 "limited to UDMA/33 due to 40-wire cable\n");
4317 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4320 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4321 &dev->mwdma_mask, &dev->udma_mask);
4325 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4326 * @dev: Device to which command will be sent
4328 * Issue SET FEATURES - XFER MODE command to device @dev
4332 * PCI/etc. bus probe sem.
4335 * 0 on success, AC_ERR_* mask otherwise.
4338 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4340 struct ata_taskfile tf;
4342 /* set up set-features taskfile */
4343 ata_dev_dbg(dev, "set features - xfer mode\n");
4345 /* Some controllers and ATAPI devices show flaky interrupt
4346 * behavior after setting xfer mode. Use polling instead.
4348 ata_tf_init(dev, &tf);
4349 tf.command = ATA_CMD_SET_FEATURES;
4350 tf.feature = SETFEATURES_XFER;
4351 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4352 tf.protocol = ATA_PROT_NODATA;
4353 /* If we are using IORDY we must send the mode setting command */
4354 if (ata_pio_need_iordy(dev))
4355 tf.nsect = dev->xfer_mode;
4356 /* If the device has IORDY and the controller does not - turn it off */
4357 else if (ata_id_has_iordy(dev->id))
4359 else /* In the ancient relic department - skip all of this */
4363 * On some disks, this command causes spin-up, so we need longer
4366 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4370 * ata_dev_set_feature - Issue SET FEATURES
4371 * @dev: Device to which command will be sent
4372 * @subcmd: The SET FEATURES subcommand to be sent
4373 * @action: The sector count represents a subcommand specific action
4375 * Issue SET FEATURES command to device @dev on port @ap with sector count
4378 * PCI/etc. bus probe sem.
4381 * 0 on success, AC_ERR_* mask otherwise.
4383 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 subcmd, u8 action)
4385 struct ata_taskfile tf;
4386 unsigned int timeout = 0;
4388 /* set up set-features taskfile */
4389 ata_dev_dbg(dev, "set features\n");
4391 ata_tf_init(dev, &tf);
4392 tf.command = ATA_CMD_SET_FEATURES;
4393 tf.feature = subcmd;
4394 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4395 tf.protocol = ATA_PROT_NODATA;
4398 if (subcmd == SETFEATURES_SPINUP)
4399 timeout = ata_probe_timeout ?
4400 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4402 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4404 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4407 * ata_dev_init_params - Issue INIT DEV PARAMS command
4408 * @dev: Device to which command will be sent
4409 * @heads: Number of heads (taskfile parameter)
4410 * @sectors: Number of sectors (taskfile parameter)
4413 * Kernel thread context (may sleep)
4416 * 0 on success, AC_ERR_* mask otherwise.
4418 static unsigned int ata_dev_init_params(struct ata_device *dev,
4419 u16 heads, u16 sectors)
4421 struct ata_taskfile tf;
4422 unsigned int err_mask;
4424 /* Number of sectors per track 1-255. Number of heads 1-16 */
4425 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4426 return AC_ERR_INVALID;
4428 /* set up init dev params taskfile */
4429 ata_dev_dbg(dev, "init dev params \n");
4431 ata_tf_init(dev, &tf);
4432 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4433 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4434 tf.protocol = ATA_PROT_NODATA;
4436 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4438 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4439 /* A clean abort indicates an original or just out of spec drive
4440 and we should continue as we issue the setup based on the
4441 drive reported working geometry */
4442 if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
4449 * atapi_check_dma - Check whether ATAPI DMA can be supported
4450 * @qc: Metadata associated with taskfile to check
4452 * Allow low-level driver to filter ATA PACKET commands, returning
4453 * a status indicating whether or not it is OK to use DMA for the
4454 * supplied PACKET command.
4457 * spin_lock_irqsave(host lock)
4459 * RETURNS: 0 when ATAPI DMA can be used
4462 int atapi_check_dma(struct ata_queued_cmd *qc)
4464 struct ata_port *ap = qc->ap;
4466 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4467 * few ATAPI devices choke on such DMA requests.
4469 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4470 unlikely(qc->nbytes & 15))
4473 if (ap->ops->check_atapi_dma)
4474 return ap->ops->check_atapi_dma(qc);
4480 * ata_std_qc_defer - Check whether a qc needs to be deferred
4481 * @qc: ATA command in question
4483 * Non-NCQ commands cannot run with any other command, NCQ or
4484 * not. As upper layer only knows the queue depth, we are
4485 * responsible for maintaining exclusion. This function checks
4486 * whether a new command @qc can be issued.
4489 * spin_lock_irqsave(host lock)
4492 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4494 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4496 struct ata_link *link = qc->dev->link;
4498 if (ata_is_ncq(qc->tf.protocol)) {
4499 if (!ata_tag_valid(link->active_tag))
4502 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4506 return ATA_DEFER_LINK;
4508 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
4510 enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
4514 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4517 * ata_sg_init - Associate command with scatter-gather table.
4518 * @qc: Command to be associated
4519 * @sg: Scatter-gather table.
4520 * @n_elem: Number of elements in s/g table.
4522 * Initialize the data-related elements of queued_cmd @qc
4523 * to point to a scatter-gather table @sg, containing @n_elem
4527 * spin_lock_irqsave(host lock)
4529 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4530 unsigned int n_elem)
4533 qc->n_elem = n_elem;
4537 #ifdef CONFIG_HAS_DMA
4540 * ata_sg_clean - Unmap DMA memory associated with command
4541 * @qc: Command containing DMA memory to be released
4543 * Unmap all mapped DMA memory associated with this command.
4546 * spin_lock_irqsave(host lock)
4548 static void ata_sg_clean(struct ata_queued_cmd *qc)
4550 struct ata_port *ap = qc->ap;
4551 struct scatterlist *sg = qc->sg;
4552 int dir = qc->dma_dir;
4554 WARN_ON_ONCE(sg == NULL);
4557 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4559 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4564 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4565 * @qc: Command with scatter-gather table to be mapped.
4567 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4570 * spin_lock_irqsave(host lock)
4573 * Zero on success, negative on error.
4576 static int ata_sg_setup(struct ata_queued_cmd *qc)
4578 struct ata_port *ap = qc->ap;
4579 unsigned int n_elem;
4581 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4585 qc->orig_n_elem = qc->n_elem;
4586 qc->n_elem = n_elem;
4587 qc->flags |= ATA_QCFLAG_DMAMAP;
4592 #else /* !CONFIG_HAS_DMA */
4594 static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
4595 static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
4597 #endif /* !CONFIG_HAS_DMA */
4600 * swap_buf_le16 - swap halves of 16-bit words in place
4601 * @buf: Buffer to swap
4602 * @buf_words: Number of 16-bit words in buffer.
4604 * Swap halves of 16-bit words if needed to convert from
4605 * little-endian byte order to native cpu byte order, or
4609 * Inherited from caller.
4611 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4616 for (i = 0; i < buf_words; i++)
4617 buf[i] = le16_to_cpu(buf[i]);
4618 #endif /* __BIG_ENDIAN */
4622 * ata_qc_free - free unused ata_queued_cmd
4623 * @qc: Command to complete
4625 * Designed to free unused ata_queued_cmd object
4626 * in case something prevents using it.
4629 * spin_lock_irqsave(host lock)
4631 void ata_qc_free(struct ata_queued_cmd *qc)
4634 if (ata_tag_valid(qc->tag))
4635 qc->tag = ATA_TAG_POISON;
4638 void __ata_qc_complete(struct ata_queued_cmd *qc)
4640 struct ata_port *ap;
4641 struct ata_link *link;
4643 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4644 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4646 link = qc->dev->link;
4648 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4651 /* command should be marked inactive atomically with qc completion */
4652 if (ata_is_ncq(qc->tf.protocol)) {
4653 link->sactive &= ~(1 << qc->hw_tag);
4655 ap->nr_active_links--;
4657 link->active_tag = ATA_TAG_POISON;
4658 ap->nr_active_links--;
4661 /* clear exclusive status */
4662 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4663 ap->excl_link == link))
4664 ap->excl_link = NULL;
4666 /* atapi: mark qc as inactive to prevent the interrupt handler
4667 * from completing the command twice later, before the error handler
4668 * is called. (when rc != 0 and atapi request sense is needed)
4670 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4671 ap->qc_active &= ~(1ULL << qc->tag);
4673 /* call completion callback */
4674 qc->complete_fn(qc);
4677 static void fill_result_tf(struct ata_queued_cmd *qc)
4679 struct ata_port *ap = qc->ap;
4681 qc->result_tf.flags = qc->tf.flags;
4682 ap->ops->qc_fill_rtf(qc);
4685 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4687 struct ata_device *dev = qc->dev;
4689 if (!ata_is_data(qc->tf.protocol))
4692 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4695 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4699 * ata_qc_complete - Complete an active ATA command
4700 * @qc: Command to complete
4702 * Indicate to the mid and upper layers that an ATA command has
4703 * completed, with either an ok or not-ok status.
4705 * Refrain from calling this function multiple times when
4706 * successfully completing multiple NCQ commands.
4707 * ata_qc_complete_multiple() should be used instead, which will
4708 * properly update IRQ expect state.
4711 * spin_lock_irqsave(host lock)
4713 void ata_qc_complete(struct ata_queued_cmd *qc)
4715 struct ata_port *ap = qc->ap;
4717 /* Trigger the LED (if available) */
4718 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
4720 /* XXX: New EH and old EH use different mechanisms to
4721 * synchronize EH with regular execution path.
4723 * In new EH, a qc owned by EH is marked with ATA_QCFLAG_EH.
4724 * Normal execution path is responsible for not accessing a
4725 * qc owned by EH. libata core enforces the rule by returning NULL
4726 * from ata_qc_from_tag() for qcs owned by EH.
4728 * Old EH depends on ata_qc_complete() nullifying completion
4729 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4730 * not synchronize with interrupt handler. Only PIO task is
4733 if (ap->ops->error_handler) {
4734 struct ata_device *dev = qc->dev;
4735 struct ata_eh_info *ehi = &dev->link->eh_info;
4737 if (unlikely(qc->err_mask))
4738 qc->flags |= ATA_QCFLAG_EH;
4741 * Finish internal commands without any further processing
4742 * and always with the result TF filled.
4744 if (unlikely(ata_tag_internal(qc->tag))) {
4746 trace_ata_qc_complete_internal(qc);
4747 __ata_qc_complete(qc);
4752 * Non-internal qc has failed. Fill the result TF and
4755 if (unlikely(qc->flags & ATA_QCFLAG_EH)) {
4757 trace_ata_qc_complete_failed(qc);
4758 ata_qc_schedule_eh(qc);
4762 WARN_ON_ONCE(ata_port_is_frozen(ap));
4764 /* read result TF if requested */
4765 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4768 trace_ata_qc_complete_done(qc);
4769 /* Some commands need post-processing after successful
4772 switch (qc->tf.command) {
4773 case ATA_CMD_SET_FEATURES:
4774 if (qc->tf.feature != SETFEATURES_WC_ON &&
4775 qc->tf.feature != SETFEATURES_WC_OFF &&
4776 qc->tf.feature != SETFEATURES_RA_ON &&
4777 qc->tf.feature != SETFEATURES_RA_OFF)
4780 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4781 case ATA_CMD_SET_MULTI: /* multi_count changed */
4782 /* revalidate device */
4783 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4784 ata_port_schedule_eh(ap);
4788 dev->flags |= ATA_DFLAG_SLEEPING;
4792 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4793 ata_verify_xfer(qc);
4795 __ata_qc_complete(qc);
4797 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4800 /* read result TF if failed or requested */
4801 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4804 __ata_qc_complete(qc);
4807 EXPORT_SYMBOL_GPL(ata_qc_complete);
4810 * ata_qc_get_active - get bitmask of active qcs
4811 * @ap: port in question
4814 * spin_lock_irqsave(host lock)
4817 * Bitmask of active qcs
4819 u64 ata_qc_get_active(struct ata_port *ap)
4821 u64 qc_active = ap->qc_active;
4823 /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
4824 if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
4825 qc_active |= (1 << 0);
4826 qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
4831 EXPORT_SYMBOL_GPL(ata_qc_get_active);
4834 * ata_qc_issue - issue taskfile to device
4835 * @qc: command to issue to device
4837 * Prepare an ATA command to submission to device.
4838 * This includes mapping the data into a DMA-able
4839 * area, filling in the S/G table, and finally
4840 * writing the taskfile to hardware, starting the command.
4843 * spin_lock_irqsave(host lock)
4845 void ata_qc_issue(struct ata_queued_cmd *qc)
4847 struct ata_port *ap = qc->ap;
4848 struct ata_link *link = qc->dev->link;
4849 u8 prot = qc->tf.protocol;
4851 /* Make sure only one non-NCQ command is outstanding. The
4852 * check is skipped for old EH because it reuses active qc to
4853 * request ATAPI sense.
4855 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4857 if (ata_is_ncq(prot)) {
4858 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
4861 ap->nr_active_links++;
4862 link->sactive |= 1 << qc->hw_tag;
4864 WARN_ON_ONCE(link->sactive);
4866 ap->nr_active_links++;
4867 link->active_tag = qc->tag;
4870 qc->flags |= ATA_QCFLAG_ACTIVE;
4871 ap->qc_active |= 1ULL << qc->tag;
4874 * We guarantee to LLDs that they will have at least one
4875 * non-zero sg if the command is a data command.
4877 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
4880 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4881 (ap->flags & ATA_FLAG_PIO_DMA)))
4882 if (ata_sg_setup(qc))
4885 /* if device is sleeping, schedule reset and abort the link */
4886 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4887 link->eh_info.action |= ATA_EH_RESET;
4888 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4889 ata_link_abort(link);
4893 trace_ata_qc_prep(qc);
4894 qc->err_mask |= ap->ops->qc_prep(qc);
4895 if (unlikely(qc->err_mask))
4897 trace_ata_qc_issue(qc);
4898 qc->err_mask |= ap->ops->qc_issue(qc);
4899 if (unlikely(qc->err_mask))
4904 qc->err_mask |= AC_ERR_SYSTEM;
4906 ata_qc_complete(qc);
4910 * ata_phys_link_online - test whether the given link is online
4911 * @link: ATA link to test
4913 * Test whether @link is online. Note that this function returns
4914 * 0 if online status of @link cannot be obtained, so
4915 * ata_link_online(link) != !ata_link_offline(link).
4921 * True if the port online status is available and online.
4923 bool ata_phys_link_online(struct ata_link *link)
4927 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4928 ata_sstatus_online(sstatus))
4934 * ata_phys_link_offline - test whether the given link is offline
4935 * @link: ATA link to test
4937 * Test whether @link is offline. Note that this function
4938 * returns 0 if offline status of @link cannot be obtained, so
4939 * ata_link_online(link) != !ata_link_offline(link).
4945 * True if the port offline status is available and offline.
4947 bool ata_phys_link_offline(struct ata_link *link)
4951 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4952 !ata_sstatus_online(sstatus))
4958 * ata_link_online - test whether the given link is online
4959 * @link: ATA link to test
4961 * Test whether @link is online. This is identical to
4962 * ata_phys_link_online() when there's no slave link. When
4963 * there's a slave link, this function should only be called on
4964 * the master link and will return true if any of M/S links is
4971 * True if the port online status is available and online.
4973 bool ata_link_online(struct ata_link *link)
4975 struct ata_link *slave = link->ap->slave_link;
4977 WARN_ON(link == slave); /* shouldn't be called on slave link */
4979 return ata_phys_link_online(link) ||
4980 (slave && ata_phys_link_online(slave));
4982 EXPORT_SYMBOL_GPL(ata_link_online);
4985 * ata_link_offline - test whether the given link is offline
4986 * @link: ATA link to test
4988 * Test whether @link is offline. This is identical to
4989 * ata_phys_link_offline() when there's no slave link. When
4990 * there's a slave link, this function should only be called on
4991 * the master link and will return true if both M/S links are
4998 * True if the port offline status is available and offline.
5000 bool ata_link_offline(struct ata_link *link)
5002 struct ata_link *slave = link->ap->slave_link;
5004 WARN_ON(link == slave); /* shouldn't be called on slave link */
5006 return ata_phys_link_offline(link) &&
5007 (!slave || ata_phys_link_offline(slave));
5009 EXPORT_SYMBOL_GPL(ata_link_offline);
5012 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5013 unsigned int action, unsigned int ehi_flags,
5016 struct ata_link *link;
5017 unsigned long flags;
5019 /* Previous resume operation might still be in
5020 * progress. Wait for PM_PENDING to clear.
5022 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5023 ata_port_wait_eh(ap);
5024 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5027 /* request PM ops to EH */
5028 spin_lock_irqsave(ap->lock, flags);
5031 ap->pflags |= ATA_PFLAG_PM_PENDING;
5032 ata_for_each_link(link, ap, HOST_FIRST) {
5033 link->eh_info.action |= action;
5034 link->eh_info.flags |= ehi_flags;
5037 ata_port_schedule_eh(ap);
5039 spin_unlock_irqrestore(ap->lock, flags);
5042 ata_port_wait_eh(ap);
5043 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5048 * On some hardware, device fails to respond after spun down for suspend. As
5049 * the device won't be used before being resumed, we don't need to touch the
5050 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5052 * http://thread.gmane.org/gmane.linux.ide/46764
5054 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5055 | ATA_EHI_NO_AUTOPSY
5056 | ATA_EHI_NO_RECOVERY;
5058 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5060 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5063 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5065 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5068 static int ata_port_pm_suspend(struct device *dev)
5070 struct ata_port *ap = to_ata_port(dev);
5072 if (pm_runtime_suspended(dev))
5075 ata_port_suspend(ap, PMSG_SUSPEND);
5079 static int ata_port_pm_freeze(struct device *dev)
5081 struct ata_port *ap = to_ata_port(dev);
5083 if (pm_runtime_suspended(dev))
5086 ata_port_suspend(ap, PMSG_FREEZE);
5090 static int ata_port_pm_poweroff(struct device *dev)
5092 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5096 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5099 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5101 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5104 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5106 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5109 static int ata_port_pm_resume(struct device *dev)
5111 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5112 pm_runtime_disable(dev);
5113 pm_runtime_set_active(dev);
5114 pm_runtime_enable(dev);
5119 * For ODDs, the upper layer will poll for media change every few seconds,
5120 * which will make it enter and leave suspend state every few seconds. And
5121 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5122 * is very little and the ODD may malfunction after constantly being reset.
5123 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5124 * ODD is attached to the port.
5126 static int ata_port_runtime_idle(struct device *dev)
5128 struct ata_port *ap = to_ata_port(dev);
5129 struct ata_link *link;
5130 struct ata_device *adev;
5132 ata_for_each_link(link, ap, HOST_FIRST) {
5133 ata_for_each_dev(adev, link, ENABLED)
5134 if (adev->class == ATA_DEV_ATAPI &&
5135 !zpodd_dev_enabled(adev))
5142 static int ata_port_runtime_suspend(struct device *dev)
5144 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5148 static int ata_port_runtime_resume(struct device *dev)
5150 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5154 static const struct dev_pm_ops ata_port_pm_ops = {
5155 .suspend = ata_port_pm_suspend,
5156 .resume = ata_port_pm_resume,
5157 .freeze = ata_port_pm_freeze,
5158 .thaw = ata_port_pm_resume,
5159 .poweroff = ata_port_pm_poweroff,
5160 .restore = ata_port_pm_resume,
5162 .runtime_suspend = ata_port_runtime_suspend,
5163 .runtime_resume = ata_port_runtime_resume,
5164 .runtime_idle = ata_port_runtime_idle,
5167 /* sas ports don't participate in pm runtime management of ata_ports,
5168 * and need to resume ata devices at the domain level, not the per-port
5169 * level. sas suspend/resume is async to allow parallel port recovery
5170 * since sas has multiple ata_port instances per Scsi_Host.
5172 void ata_sas_port_suspend(struct ata_port *ap)
5174 ata_port_suspend_async(ap, PMSG_SUSPEND);
5176 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5178 void ata_sas_port_resume(struct ata_port *ap)
5180 ata_port_resume_async(ap, PMSG_RESUME);
5182 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5185 * ata_host_suspend - suspend host
5186 * @host: host to suspend
5189 * Suspend @host. Actual operation is performed by port suspend.
5191 void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5193 host->dev->power.power_state = mesg;
5195 EXPORT_SYMBOL_GPL(ata_host_suspend);
5198 * ata_host_resume - resume host
5199 * @host: host to resume
5201 * Resume @host. Actual operation is performed by port resume.
5203 void ata_host_resume(struct ata_host *host)
5205 host->dev->power.power_state = PMSG_ON;
5207 EXPORT_SYMBOL_GPL(ata_host_resume);
5210 const struct device_type ata_port_type = {
5213 .pm = &ata_port_pm_ops,
5218 * ata_dev_init - Initialize an ata_device structure
5219 * @dev: Device structure to initialize
5221 * Initialize @dev in preparation for probing.
5224 * Inherited from caller.
5226 void ata_dev_init(struct ata_device *dev)
5228 struct ata_link *link = ata_dev_phys_link(dev);
5229 struct ata_port *ap = link->ap;
5230 unsigned long flags;
5232 /* SATA spd limit is bound to the attached device, reset together */
5233 link->sata_spd_limit = link->hw_sata_spd_limit;
5236 /* High bits of dev->flags are used to record warm plug
5237 * requests which occur asynchronously. Synchronize using
5240 spin_lock_irqsave(ap->lock, flags);
5241 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5243 spin_unlock_irqrestore(ap->lock, flags);
5245 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5246 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5247 dev->pio_mask = UINT_MAX;
5248 dev->mwdma_mask = UINT_MAX;
5249 dev->udma_mask = UINT_MAX;
5253 * ata_link_init - Initialize an ata_link structure
5254 * @ap: ATA port link is attached to
5255 * @link: Link structure to initialize
5256 * @pmp: Port multiplier port number
5261 * Kernel thread context (may sleep)
5263 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5267 /* clear everything except for devices */
5268 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5269 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5273 link->active_tag = ATA_TAG_POISON;
5274 link->hw_sata_spd_limit = UINT_MAX;
5276 /* can't use iterator, ap isn't initialized yet */
5277 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5278 struct ata_device *dev = &link->device[i];
5281 dev->devno = dev - link->device;
5282 #ifdef CONFIG_ATA_ACPI
5283 dev->gtf_filter = ata_acpi_gtf_filter;
5290 * sata_link_init_spd - Initialize link->sata_spd_limit
5291 * @link: Link to configure sata_spd_limit for
5293 * Initialize ``link->[hw_]sata_spd_limit`` to the currently
5297 * Kernel thread context (may sleep).
5300 * 0 on success, -errno on failure.
5302 int sata_link_init_spd(struct ata_link *link)
5307 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5311 spd = (link->saved_scontrol >> 4) & 0xf;
5313 link->hw_sata_spd_limit &= (1 << spd) - 1;
5315 ata_force_link_limits(link);
5317 link->sata_spd_limit = link->hw_sata_spd_limit;
5323 * ata_port_alloc - allocate and initialize basic ATA port resources
5324 * @host: ATA host this allocated port belongs to
5326 * Allocate and initialize basic ATA port resources.
5329 * Allocate ATA port on success, NULL on failure.
5332 * Inherited from calling layer (may sleep).
5334 struct ata_port *ata_port_alloc(struct ata_host *host)
5336 struct ata_port *ap;
5338 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5342 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5343 ap->lock = &host->lock;
5345 ap->local_port_no = -1;
5347 ap->dev = host->dev;
5349 mutex_init(&ap->scsi_scan_mutex);
5350 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5351 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5352 INIT_LIST_HEAD(&ap->eh_done_q);
5353 init_waitqueue_head(&ap->eh_wait_q);
5354 init_completion(&ap->park_req_pending);
5355 timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
5358 ap->cbl = ATA_CBL_NONE;
5360 ata_link_init(ap, &ap->link, 0);
5363 ap->stats.unhandled_irq = 1;
5364 ap->stats.idle_irq = 1;
5366 ata_sff_port_init(ap);
5371 static void ata_devres_release(struct device *gendev, void *res)
5373 struct ata_host *host = dev_get_drvdata(gendev);
5376 for (i = 0; i < host->n_ports; i++) {
5377 struct ata_port *ap = host->ports[i];
5383 scsi_host_put(ap->scsi_host);
5387 dev_set_drvdata(gendev, NULL);
5391 static void ata_host_release(struct kref *kref)
5393 struct ata_host *host = container_of(kref, struct ata_host, kref);
5396 for (i = 0; i < host->n_ports; i++) {
5397 struct ata_port *ap = host->ports[i];
5399 kfree(ap->pmp_link);
5400 kfree(ap->slave_link);
5402 host->ports[i] = NULL;
5407 void ata_host_get(struct ata_host *host)
5409 kref_get(&host->kref);
5412 void ata_host_put(struct ata_host *host)
5414 kref_put(&host->kref, ata_host_release);
5416 EXPORT_SYMBOL_GPL(ata_host_put);
5419 * ata_host_alloc - allocate and init basic ATA host resources
5420 * @dev: generic device this host is associated with
5421 * @max_ports: maximum number of ATA ports associated with this host
5423 * Allocate and initialize basic ATA host resources. LLD calls
5424 * this function to allocate a host, initializes it fully and
5425 * attaches it using ata_host_register().
5427 * @max_ports ports are allocated and host->n_ports is
5428 * initialized to @max_ports. The caller is allowed to decrease
5429 * host->n_ports before calling ata_host_register(). The unused
5430 * ports will be automatically freed on registration.
5433 * Allocate ATA host on success, NULL on failure.
5436 * Inherited from calling layer (may sleep).
5438 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5440 struct ata_host *host;
5445 /* alloc a container for our list of ATA ports (buses) */
5446 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5447 host = kzalloc(sz, GFP_KERNEL);
5451 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5454 dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
5458 devres_add(dev, dr);
5459 dev_set_drvdata(dev, host);
5461 spin_lock_init(&host->lock);
5462 mutex_init(&host->eh_mutex);
5464 host->n_ports = max_ports;
5465 kref_init(&host->kref);
5467 /* allocate ports bound to this host */
5468 for (i = 0; i < max_ports; i++) {
5469 struct ata_port *ap;
5471 ap = ata_port_alloc(host);
5476 host->ports[i] = ap;
5479 devres_remove_group(dev, NULL);
5483 devres_release_group(dev, NULL);
5488 EXPORT_SYMBOL_GPL(ata_host_alloc);
5491 * ata_host_alloc_pinfo - alloc host and init with port_info array
5492 * @dev: generic device this host is associated with
5493 * @ppi: array of ATA port_info to initialize host with
5494 * @n_ports: number of ATA ports attached to this host
5496 * Allocate ATA host and initialize with info from @ppi. If NULL
5497 * terminated, @ppi may contain fewer entries than @n_ports. The
5498 * last entry will be used for the remaining ports.
5501 * Allocate ATA host on success, NULL on failure.
5504 * Inherited from calling layer (may sleep).
5506 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5507 const struct ata_port_info * const * ppi,
5510 const struct ata_port_info *pi = &ata_dummy_port_info;
5511 struct ata_host *host;
5514 host = ata_host_alloc(dev, n_ports);
5518 for (i = 0, j = 0; i < host->n_ports; i++) {
5519 struct ata_port *ap = host->ports[i];
5524 ap->pio_mask = pi->pio_mask;
5525 ap->mwdma_mask = pi->mwdma_mask;
5526 ap->udma_mask = pi->udma_mask;
5527 ap->flags |= pi->flags;
5528 ap->link.flags |= pi->link_flags;
5529 ap->ops = pi->port_ops;
5531 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5532 host->ops = pi->port_ops;
5537 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
5539 static void ata_host_stop(struct device *gendev, void *res)
5541 struct ata_host *host = dev_get_drvdata(gendev);
5544 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5546 for (i = 0; i < host->n_ports; i++) {
5547 struct ata_port *ap = host->ports[i];
5549 if (ap->ops->port_stop)
5550 ap->ops->port_stop(ap);
5553 if (host->ops->host_stop)
5554 host->ops->host_stop(host);
5558 * ata_finalize_port_ops - finalize ata_port_operations
5559 * @ops: ata_port_operations to finalize
5561 * An ata_port_operations can inherit from another ops and that
5562 * ops can again inherit from another. This can go on as many
5563 * times as necessary as long as there is no loop in the
5564 * inheritance chain.
5566 * Ops tables are finalized when the host is started. NULL or
5567 * unspecified entries are inherited from the closet ancestor
5568 * which has the method and the entry is populated with it.
5569 * After finalization, the ops table directly points to all the
5570 * methods and ->inherits is no longer necessary and cleared.
5572 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5577 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5579 static DEFINE_SPINLOCK(lock);
5580 const struct ata_port_operations *cur;
5581 void **begin = (void **)ops;
5582 void **end = (void **)&ops->inherits;
5585 if (!ops || !ops->inherits)
5590 for (cur = ops->inherits; cur; cur = cur->inherits) {
5591 void **inherit = (void **)cur;
5593 for (pp = begin; pp < end; pp++, inherit++)
5598 for (pp = begin; pp < end; pp++)
5602 ops->inherits = NULL;
5608 * ata_host_start - start and freeze ports of an ATA host
5609 * @host: ATA host to start ports for
5611 * Start and then freeze ports of @host. Started status is
5612 * recorded in host->flags, so this function can be called
5613 * multiple times. Ports are guaranteed to get started only
5614 * once. If host->ops is not initialized yet, it is set to the
5615 * first non-dummy port ops.
5618 * Inherited from calling layer (may sleep).
5621 * 0 if all ports are started successfully, -errno otherwise.
5623 int ata_host_start(struct ata_host *host)
5626 void *start_dr = NULL;
5629 if (host->flags & ATA_HOST_STARTED)
5632 ata_finalize_port_ops(host->ops);
5634 for (i = 0; i < host->n_ports; i++) {
5635 struct ata_port *ap = host->ports[i];
5637 ata_finalize_port_ops(ap->ops);
5639 if (!host->ops && !ata_port_is_dummy(ap))
5640 host->ops = ap->ops;
5642 if (ap->ops->port_stop)
5646 if (host->ops && host->ops->host_stop)
5650 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5655 for (i = 0; i < host->n_ports; i++) {
5656 struct ata_port *ap = host->ports[i];
5658 if (ap->ops->port_start) {
5659 rc = ap->ops->port_start(ap);
5663 "failed to start port %d (errno=%d)\n",
5668 ata_eh_freeze_port(ap);
5672 devres_add(host->dev, start_dr);
5673 host->flags |= ATA_HOST_STARTED;
5678 struct ata_port *ap = host->ports[i];
5680 if (ap->ops->port_stop)
5681 ap->ops->port_stop(ap);
5683 devres_free(start_dr);
5686 EXPORT_SYMBOL_GPL(ata_host_start);
5689 * ata_host_init - Initialize a host struct for sas (ipr, libsas)
5690 * @host: host to initialize
5691 * @dev: device host is attached to
5695 void ata_host_init(struct ata_host *host, struct device *dev,
5696 struct ata_port_operations *ops)
5698 spin_lock_init(&host->lock);
5699 mutex_init(&host->eh_mutex);
5700 host->n_tags = ATA_MAX_QUEUE;
5703 kref_init(&host->kref);
5705 EXPORT_SYMBOL_GPL(ata_host_init);
5707 void __ata_port_probe(struct ata_port *ap)
5709 struct ata_eh_info *ehi = &ap->link.eh_info;
5710 unsigned long flags;
5712 /* kick EH for boot probing */
5713 spin_lock_irqsave(ap->lock, flags);
5715 ehi->probe_mask |= ATA_ALL_DEVICES;
5716 ehi->action |= ATA_EH_RESET;
5717 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5719 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5720 ap->pflags |= ATA_PFLAG_LOADING;
5721 ata_port_schedule_eh(ap);
5723 spin_unlock_irqrestore(ap->lock, flags);
5726 int ata_port_probe(struct ata_port *ap)
5730 if (ap->ops->error_handler) {
5731 __ata_port_probe(ap);
5732 ata_port_wait_eh(ap);
5734 rc = ata_bus_probe(ap);
5740 static void async_port_probe(void *data, async_cookie_t cookie)
5742 struct ata_port *ap = data;
5745 * If we're not allowed to scan this host in parallel,
5746 * we need to wait until all previous scans have completed
5747 * before going further.
5748 * Jeff Garzik says this is only within a controller, so we
5749 * don't need to wait for port 0, only for later ports.
5751 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5752 async_synchronize_cookie(cookie);
5754 (void)ata_port_probe(ap);
5756 /* in order to keep device order, we need to synchronize at this point */
5757 async_synchronize_cookie(cookie);
5759 ata_scsi_scan_host(ap, 1);
5763 * ata_host_register - register initialized ATA host
5764 * @host: ATA host to register
5765 * @sht: template for SCSI host
5767 * Register initialized ATA host. @host is allocated using
5768 * ata_host_alloc() and fully initialized by LLD. This function
5769 * starts ports, registers @host with ATA and SCSI layers and
5770 * probe registered devices.
5773 * Inherited from calling layer (may sleep).
5776 * 0 on success, -errno otherwise.
5778 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5782 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
5784 /* host must have been started */
5785 if (!(host->flags & ATA_HOST_STARTED)) {
5786 dev_err(host->dev, "BUG: trying to register unstarted host\n");
5791 /* Blow away unused ports. This happens when LLD can't
5792 * determine the exact number of ports to allocate at
5795 for (i = host->n_ports; host->ports[i]; i++)
5796 kfree(host->ports[i]);
5798 /* give ports names and add SCSI hosts */
5799 for (i = 0; i < host->n_ports; i++) {
5800 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
5801 host->ports[i]->local_port_no = i + 1;
5804 /* Create associated sysfs transport objects */
5805 for (i = 0; i < host->n_ports; i++) {
5806 rc = ata_tport_add(host->dev,host->ports[i]);
5812 rc = ata_scsi_add_hosts(host, sht);
5816 /* set cable, sata_spd_limit and report */
5817 for (i = 0; i < host->n_ports; i++) {
5818 struct ata_port *ap = host->ports[i];
5819 unsigned int xfer_mask;
5821 /* set SATA cable type if still unset */
5822 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5823 ap->cbl = ATA_CBL_SATA;
5825 /* init sata_spd_limit to the current value */
5826 sata_link_init_spd(&ap->link);
5828 sata_link_init_spd(ap->slave_link);
5830 /* print per-port info to dmesg */
5831 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5834 if (!ata_port_is_dummy(ap)) {
5835 ata_port_info(ap, "%cATA max %s %s\n",
5836 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5837 ata_mode_string(xfer_mask),
5838 ap->link.eh_info.desc);
5839 ata_ehi_clear_desc(&ap->link.eh_info);
5841 ata_port_info(ap, "DUMMY\n");
5844 /* perform each probe asynchronously */
5845 for (i = 0; i < host->n_ports; i++) {
5846 struct ata_port *ap = host->ports[i];
5847 ap->cookie = async_schedule(async_port_probe, ap);
5854 ata_tport_delete(host->ports[i]);
5859 EXPORT_SYMBOL_GPL(ata_host_register);
5862 * ata_host_activate - start host, request IRQ and register it
5863 * @host: target ATA host
5864 * @irq: IRQ to request
5865 * @irq_handler: irq_handler used when requesting IRQ
5866 * @irq_flags: irq_flags used when requesting IRQ
5867 * @sht: scsi_host_template to use when registering the host
5869 * After allocating an ATA host and initializing it, most libata
5870 * LLDs perform three steps to activate the host - start host,
5871 * request IRQ and register it. This helper takes necessary
5872 * arguments and performs the three steps in one go.
5874 * An invalid IRQ skips the IRQ registration and expects the host to
5875 * have set polling mode on the port. In this case, @irq_handler
5879 * Inherited from calling layer (may sleep).
5882 * 0 on success, -errno otherwise.
5884 int ata_host_activate(struct ata_host *host, int irq,
5885 irq_handler_t irq_handler, unsigned long irq_flags,
5886 struct scsi_host_template *sht)
5891 rc = ata_host_start(host);
5895 /* Special case for polling mode */
5897 WARN_ON(irq_handler);
5898 return ata_host_register(host, sht);
5901 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
5902 dev_driver_string(host->dev),
5903 dev_name(host->dev));
5907 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5912 for (i = 0; i < host->n_ports; i++)
5913 ata_port_desc(host->ports[i], "irq %d", irq);
5915 rc = ata_host_register(host, sht);
5916 /* if failed, just free the IRQ and leave ports alone */
5918 devm_free_irq(host->dev, irq, host);
5922 EXPORT_SYMBOL_GPL(ata_host_activate);
5925 * ata_port_detach - Detach ATA port in preparation of device removal
5926 * @ap: ATA port to be detached
5928 * Detach all ATA devices and the associated SCSI devices of @ap;
5929 * then, remove the associated SCSI host. @ap is guaranteed to
5930 * be quiescent on return from this function.
5933 * Kernel thread context (may sleep).
5935 static void ata_port_detach(struct ata_port *ap)
5937 unsigned long flags;
5938 struct ata_link *link;
5939 struct ata_device *dev;
5941 if (!ap->ops->error_handler)
5944 /* tell EH we're leaving & flush EH */
5945 spin_lock_irqsave(ap->lock, flags);
5946 ap->pflags |= ATA_PFLAG_UNLOADING;
5947 ata_port_schedule_eh(ap);
5948 spin_unlock_irqrestore(ap->lock, flags);
5950 /* wait till EH commits suicide */
5951 ata_port_wait_eh(ap);
5953 /* it better be dead now */
5954 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
5956 cancel_delayed_work_sync(&ap->hotplug_task);
5959 /* clean up zpodd on port removal */
5960 ata_for_each_link(link, ap, HOST_FIRST) {
5961 ata_for_each_dev(dev, link, ALL) {
5962 if (zpodd_dev_enabled(dev))
5968 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
5969 ata_tlink_delete(&ap->pmp_link[i]);
5971 /* remove the associated SCSI host */
5972 scsi_remove_host(ap->scsi_host);
5973 ata_tport_delete(ap);
5977 * ata_host_detach - Detach all ports of an ATA host
5978 * @host: Host to detach
5980 * Detach all ports of @host.
5983 * Kernel thread context (may sleep).
5985 void ata_host_detach(struct ata_host *host)
5989 for (i = 0; i < host->n_ports; i++) {
5990 /* Ensure ata_port probe has completed */
5991 async_synchronize_cookie(host->ports[i]->cookie + 1);
5992 ata_port_detach(host->ports[i]);
5995 /* the host is dead now, dissociate ACPI */
5996 ata_acpi_dissociate(host);
5998 EXPORT_SYMBOL_GPL(ata_host_detach);
6003 * ata_pci_remove_one - PCI layer callback for device removal
6004 * @pdev: PCI device that was removed
6006 * PCI layer indicates to libata via this hook that hot-unplug or
6007 * module unload event has occurred. Detach all ports. Resource
6008 * release is handled via devres.
6011 * Inherited from PCI layer (may sleep).
6013 void ata_pci_remove_one(struct pci_dev *pdev)
6015 struct ata_host *host = pci_get_drvdata(pdev);
6017 ata_host_detach(host);
6019 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6021 void ata_pci_shutdown_one(struct pci_dev *pdev)
6023 struct ata_host *host = pci_get_drvdata(pdev);
6026 for (i = 0; i < host->n_ports; i++) {
6027 struct ata_port *ap = host->ports[i];
6029 ap->pflags |= ATA_PFLAG_FROZEN;
6031 /* Disable port interrupts */
6032 if (ap->ops->freeze)
6033 ap->ops->freeze(ap);
6035 /* Stop the port DMA engines */
6036 if (ap->ops->port_stop)
6037 ap->ops->port_stop(ap);
6040 EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
6042 /* move to PCI subsystem */
6043 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6045 unsigned long tmp = 0;
6047 switch (bits->width) {
6050 pci_read_config_byte(pdev, bits->reg, &tmp8);
6056 pci_read_config_word(pdev, bits->reg, &tmp16);
6062 pci_read_config_dword(pdev, bits->reg, &tmp32);
6073 return (tmp == bits->val) ? 1 : 0;
6075 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6078 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6080 pci_save_state(pdev);
6081 pci_disable_device(pdev);
6083 if (mesg.event & PM_EVENT_SLEEP)
6084 pci_set_power_state(pdev, PCI_D3hot);
6086 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6088 int ata_pci_device_do_resume(struct pci_dev *pdev)
6092 pci_set_power_state(pdev, PCI_D0);
6093 pci_restore_state(pdev);
6095 rc = pcim_enable_device(pdev);
6098 "failed to enable device after resume (%d)\n", rc);
6102 pci_set_master(pdev);
6105 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6107 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6109 struct ata_host *host = pci_get_drvdata(pdev);
6111 ata_host_suspend(host, mesg);
6113 ata_pci_device_do_suspend(pdev, mesg);
6117 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6119 int ata_pci_device_resume(struct pci_dev *pdev)
6121 struct ata_host *host = pci_get_drvdata(pdev);
6124 rc = ata_pci_device_do_resume(pdev);
6126 ata_host_resume(host);
6129 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6130 #endif /* CONFIG_PM */
6131 #endif /* CONFIG_PCI */
6134 * ata_platform_remove_one - Platform layer callback for device removal
6135 * @pdev: Platform device that was removed
6137 * Platform layer indicates to libata via this hook that hot-unplug or
6138 * module unload event has occurred. Detach all ports. Resource
6139 * release is handled via devres.
6142 * Inherited from platform layer (may sleep).
6144 int ata_platform_remove_one(struct platform_device *pdev)
6146 struct ata_host *host = platform_get_drvdata(pdev);
6148 ata_host_detach(host);
6152 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6154 #ifdef CONFIG_ATA_FORCE
6156 #define force_cbl(name, flag) \
6157 { #name, .cbl = (flag) }
6159 #define force_spd_limit(spd, val) \
6160 { #spd, .spd_limit = (val) }
6162 #define force_xfer(mode, shift) \
6163 { #mode, .xfer_mask = (1UL << (shift)) }
6165 #define force_lflag_on(name, flags) \
6166 { #name, .lflags_on = (flags) }
6168 #define force_lflag_onoff(name, flags) \
6169 { "no" #name, .lflags_on = (flags) }, \
6170 { #name, .lflags_off = (flags) }
6172 #define force_horkage_on(name, flag) \
6173 { #name, .horkage_on = (flag) }
6175 #define force_horkage_onoff(name, flag) \
6176 { "no" #name, .horkage_on = (flag) }, \
6177 { #name, .horkage_off = (flag) }
6179 static const struct ata_force_param force_tbl[] __initconst = {
6180 force_cbl(40c, ATA_CBL_PATA40),
6181 force_cbl(80c, ATA_CBL_PATA80),
6182 force_cbl(short40c, ATA_CBL_PATA40_SHORT),
6183 force_cbl(unk, ATA_CBL_PATA_UNK),
6184 force_cbl(ign, ATA_CBL_PATA_IGN),
6185 force_cbl(sata, ATA_CBL_SATA),
6187 force_spd_limit(1.5Gbps, 1),
6188 force_spd_limit(3.0Gbps, 2),
6190 force_xfer(pio0, ATA_SHIFT_PIO + 0),
6191 force_xfer(pio1, ATA_SHIFT_PIO + 1),
6192 force_xfer(pio2, ATA_SHIFT_PIO + 2),
6193 force_xfer(pio3, ATA_SHIFT_PIO + 3),
6194 force_xfer(pio4, ATA_SHIFT_PIO + 4),
6195 force_xfer(pio5, ATA_SHIFT_PIO + 5),
6196 force_xfer(pio6, ATA_SHIFT_PIO + 6),
6197 force_xfer(mwdma0, ATA_SHIFT_MWDMA + 0),
6198 force_xfer(mwdma1, ATA_SHIFT_MWDMA + 1),
6199 force_xfer(mwdma2, ATA_SHIFT_MWDMA + 2),
6200 force_xfer(mwdma3, ATA_SHIFT_MWDMA + 3),
6201 force_xfer(mwdma4, ATA_SHIFT_MWDMA + 4),
6202 force_xfer(udma0, ATA_SHIFT_UDMA + 0),
6203 force_xfer(udma16, ATA_SHIFT_UDMA + 0),
6204 force_xfer(udma/16, ATA_SHIFT_UDMA + 0),
6205 force_xfer(udma1, ATA_SHIFT_UDMA + 1),
6206 force_xfer(udma25, ATA_SHIFT_UDMA + 1),
6207 force_xfer(udma/25, ATA_SHIFT_UDMA + 1),
6208 force_xfer(udma2, ATA_SHIFT_UDMA + 2),
6209 force_xfer(udma33, ATA_SHIFT_UDMA + 2),
6210 force_xfer(udma/33, ATA_SHIFT_UDMA + 2),
6211 force_xfer(udma3, ATA_SHIFT_UDMA + 3),
6212 force_xfer(udma44, ATA_SHIFT_UDMA + 3),
6213 force_xfer(udma/44, ATA_SHIFT_UDMA + 3),
6214 force_xfer(udma4, ATA_SHIFT_UDMA + 4),
6215 force_xfer(udma66, ATA_SHIFT_UDMA + 4),
6216 force_xfer(udma/66, ATA_SHIFT_UDMA + 4),
6217 force_xfer(udma5, ATA_SHIFT_UDMA + 5),
6218 force_xfer(udma100, ATA_SHIFT_UDMA + 5),
6219 force_xfer(udma/100, ATA_SHIFT_UDMA + 5),
6220 force_xfer(udma6, ATA_SHIFT_UDMA + 6),
6221 force_xfer(udma133, ATA_SHIFT_UDMA + 6),
6222 force_xfer(udma/133, ATA_SHIFT_UDMA + 6),
6223 force_xfer(udma7, ATA_SHIFT_UDMA + 7),
6225 force_lflag_on(nohrst, ATA_LFLAG_NO_HRST),
6226 force_lflag_on(nosrst, ATA_LFLAG_NO_SRST),
6227 force_lflag_on(norst, ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST),
6228 force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
6229 force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
6231 force_horkage_onoff(ncq, ATA_HORKAGE_NONCQ),
6232 force_horkage_onoff(ncqtrim, ATA_HORKAGE_NO_NCQ_TRIM),
6233 force_horkage_onoff(ncqati, ATA_HORKAGE_NO_NCQ_ON_ATI),
6235 force_horkage_onoff(trim, ATA_HORKAGE_NOTRIM),
6236 force_horkage_on(trim_zero, ATA_HORKAGE_ZERO_AFTER_TRIM),
6237 force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M),
6239 force_horkage_onoff(dma, ATA_HORKAGE_NODMA),
6240 force_horkage_on(atapi_dmadir, ATA_HORKAGE_ATAPI_DMADIR),
6241 force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA),
6243 force_horkage_onoff(dmalog, ATA_HORKAGE_NO_DMA_LOG),
6244 force_horkage_onoff(iddevlog, ATA_HORKAGE_NO_ID_DEV_LOG),
6245 force_horkage_onoff(logdir, ATA_HORKAGE_NO_LOG_DIR),
6247 force_horkage_on(max_sec_128, ATA_HORKAGE_MAX_SEC_128),
6248 force_horkage_on(max_sec_1024, ATA_HORKAGE_MAX_SEC_1024),
6249 force_horkage_on(max_sec_lba48, ATA_HORKAGE_MAX_SEC_LBA48),
6251 force_horkage_onoff(lpm, ATA_HORKAGE_NOLPM),
6252 force_horkage_onoff(setxfer, ATA_HORKAGE_NOSETXFER),
6253 force_horkage_on(dump_id, ATA_HORKAGE_DUMP_ID),
6254 force_horkage_onoff(fua, ATA_HORKAGE_NO_FUA),
6256 force_horkage_on(disable, ATA_HORKAGE_DISABLE),
6259 static int __init ata_parse_force_one(char **cur,
6260 struct ata_force_ent *force_ent,
6261 const char **reason)
6263 char *start = *cur, *p = *cur;
6264 char *id, *val, *endp;
6265 const struct ata_force_param *match_fp = NULL;
6266 int nr_matches = 0, i;
6268 /* find where this param ends and update *cur */
6269 while (*p != '\0' && *p != ',')
6280 p = strchr(start, ':');
6282 val = strstrip(start);
6287 id = strstrip(start);
6288 val = strstrip(p + 1);
6291 p = strchr(id, '.');
6294 force_ent->device = simple_strtoul(p, &endp, 10);
6295 if (p == endp || *endp != '\0') {
6296 *reason = "invalid device";
6301 force_ent->port = simple_strtoul(id, &endp, 10);
6302 if (id == endp || *endp != '\0') {
6303 *reason = "invalid port/link";
6308 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6309 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6310 const struct ata_force_param *fp = &force_tbl[i];
6312 if (strncasecmp(val, fp->name, strlen(val)))
6318 if (strcasecmp(val, fp->name) == 0) {
6325 *reason = "unknown value";
6328 if (nr_matches > 1) {
6329 *reason = "ambiguous value";
6333 force_ent->param = *match_fp;
6338 static void __init ata_parse_force_param(void)
6340 int idx = 0, size = 1;
6341 int last_port = -1, last_device = -1;
6342 char *p, *cur, *next;
6344 /* Calculate maximum number of params and allocate ata_force_tbl */
6345 for (p = ata_force_param_buf; *p; p++)
6349 ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
6350 if (!ata_force_tbl) {
6351 printk(KERN_WARNING "ata: failed to extend force table, "
6352 "libata.force ignored\n");
6356 /* parse and populate the table */
6357 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6358 const char *reason = "";
6359 struct ata_force_ent te = { .port = -1, .device = -1 };
6362 if (ata_parse_force_one(&next, &te, &reason)) {
6363 printk(KERN_WARNING "ata: failed to parse force "
6364 "parameter \"%s\" (%s)\n",
6369 if (te.port == -1) {
6370 te.port = last_port;
6371 te.device = last_device;
6374 ata_force_tbl[idx++] = te;
6376 last_port = te.port;
6377 last_device = te.device;
6380 ata_force_tbl_size = idx;
6383 static void ata_free_force_param(void)
6385 kfree(ata_force_tbl);
6388 static inline void ata_parse_force_param(void) { }
6389 static inline void ata_free_force_param(void) { }
6392 static int __init ata_init(void)
6396 ata_parse_force_param();
6398 rc = ata_sff_init();
6400 ata_free_force_param();
6404 libata_transport_init();
6405 ata_scsi_transport_template = ata_attach_transport();
6406 if (!ata_scsi_transport_template) {
6412 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6419 static void __exit ata_exit(void)
6421 ata_release_transport(ata_scsi_transport_template);
6422 libata_transport_exit();
6424 ata_free_force_param();
6427 subsys_initcall(ata_init);
6428 module_exit(ata_exit);
6430 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6432 int ata_ratelimit(void)
6434 return __ratelimit(&ratelimit);
6436 EXPORT_SYMBOL_GPL(ata_ratelimit);
6439 * ata_msleep - ATA EH owner aware msleep
6440 * @ap: ATA port to attribute the sleep to
6441 * @msecs: duration to sleep in milliseconds
6443 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6444 * ownership is released before going to sleep and reacquired
6445 * after the sleep is complete. IOW, other ports sharing the
6446 * @ap->host will be allowed to own the EH while this task is
6452 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6454 bool owns_eh = ap && ap->host->eh_owner == current;
6460 unsigned long usecs = msecs * USEC_PER_MSEC;
6461 usleep_range(usecs, usecs + 50);
6469 EXPORT_SYMBOL_GPL(ata_msleep);
6472 * ata_wait_register - wait until register value changes
6473 * @ap: ATA port to wait register for, can be NULL
6474 * @reg: IO-mapped register
6475 * @mask: Mask to apply to read register value
6476 * @val: Wait condition
6477 * @interval: polling interval in milliseconds
6478 * @timeout: timeout in milliseconds
6480 * Waiting for some bits of register to change is a common
6481 * operation for ATA controllers. This function reads 32bit LE
6482 * IO-mapped register @reg and tests for the following condition.
6484 * (*@reg & mask) != val
6486 * If the condition is met, it returns; otherwise, the process is
6487 * repeated after @interval_msec until timeout.
6490 * Kernel thread context (may sleep)
6493 * The final register value.
6495 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6496 unsigned long interval, unsigned long timeout)
6498 unsigned long deadline;
6501 tmp = ioread32(reg);
6503 /* Calculate timeout _after_ the first read to make sure
6504 * preceding writes reach the controller before starting to
6505 * eat away the timeout.
6507 deadline = ata_deadline(jiffies, timeout);
6509 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6510 ata_msleep(ap, interval);
6511 tmp = ioread32(reg);
6516 EXPORT_SYMBOL_GPL(ata_wait_register);
6521 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6523 return AC_ERR_SYSTEM;
6526 static void ata_dummy_error_handler(struct ata_port *ap)
6531 struct ata_port_operations ata_dummy_port_ops = {
6532 .qc_prep = ata_noop_qc_prep,
6533 .qc_issue = ata_dummy_qc_issue,
6534 .error_handler = ata_dummy_error_handler,
6535 .sched_eh = ata_std_sched_eh,
6536 .end_eh = ata_std_end_eh,
6538 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6540 const struct ata_port_info ata_dummy_port_info = {
6541 .port_ops = &ata_dummy_port_ops,
6543 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6545 void ata_print_version(const struct device *dev, const char *version)
6547 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6549 EXPORT_SYMBOL(ata_print_version);
6551 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
6552 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
6553 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
6554 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
6555 EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);