libata: don't use ap->ioaddr in non-SFF drivers
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
1da177e4
LT
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
67846b30 57#include <linux/jiffies.h>
378f058c 58#include <linux/scatterlist.h>
2dcb407e 59#include <linux/io.h>
1da177e4 60#include <scsi/scsi.h>
193515d5 61#include <scsi/scsi_cmnd.h>
1da177e4
LT
62#include <scsi/scsi_host.h>
63#include <linux/libata.h>
1da177e4
LT
64#include <asm/semaphore.h>
65#include <asm/byteorder.h>
140b5e59 66#include <linux/cdrom.h>
1da177e4
LT
67
68#include "libata.h"
69
fda0efc5 70
d7bb4cc7 71/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
72const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
73const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
74const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 75
029cfd6b 76const struct ata_port_operations ata_base_port_ops = {
0aa1113d 77 .prereset = ata_std_prereset,
203c75b8 78 .postreset = ata_std_postreset,
a1efdaba 79 .error_handler = ata_std_error_handler,
029cfd6b
TH
80};
81
82const struct ata_port_operations sata_port_ops = {
83 .inherits = &ata_base_port_ops,
84
85 .qc_defer = ata_std_qc_defer,
57c9efdf 86 .hardreset = sata_std_hardreset,
029cfd6b
TH
87};
88
89const struct ata_port_operations sata_pmp_port_ops = {
90 .inherits = &sata_port_ops,
a1efdaba 91
ac371987 92 .pmp_prereset = ata_std_prereset,
5958e302 93 .pmp_hardreset = sata_std_hardreset,
ac371987 94 .pmp_postreset = ata_std_postreset,
a1efdaba 95 .error_handler = sata_pmp_error_handler,
029cfd6b
TH
96};
97
3373efd8
TH
98static unsigned int ata_dev_init_params(struct ata_device *dev,
99 u16 heads, u16 sectors);
100static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
101static unsigned int ata_dev_set_feature(struct ata_device *dev,
102 u8 enable, u8 feature);
3373efd8 103static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 104static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 105
f3187195 106unsigned int ata_print_id = 1;
1da177e4
LT
107static struct workqueue_struct *ata_wq;
108
453b07ac
TH
109struct workqueue_struct *ata_aux_wq;
110
33267325
TH
111struct ata_force_param {
112 const char *name;
113 unsigned int cbl;
114 int spd_limit;
115 unsigned long xfer_mask;
116 unsigned int horkage_on;
117 unsigned int horkage_off;
118};
119
120struct ata_force_ent {
121 int port;
122 int device;
123 struct ata_force_param param;
124};
125
126static struct ata_force_ent *ata_force_tbl;
127static int ata_force_tbl_size;
128
129static char ata_force_param_buf[PAGE_SIZE] __initdata;
7afb4222
TH
130/* param_buf is thrown away after initialization, disallow read */
131module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
33267325
TH
132MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
133
418dc1f5 134int atapi_enabled = 1;
1623c81e
JG
135module_param(atapi_enabled, int, 0444);
136MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
137
c5c61bda 138static int atapi_dmadir = 0;
95de719a
AL
139module_param(atapi_dmadir, int, 0444);
140MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
141
baf4fdfa
ML
142int atapi_passthru16 = 1;
143module_param(atapi_passthru16, int, 0444);
144MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
145
c3c013a2
JG
146int libata_fua = 0;
147module_param_named(fua, libata_fua, int, 0444);
148MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
149
2dcb407e 150static int ata_ignore_hpa;
1e999736
AC
151module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
152MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
153
b3a70601
AC
154static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
155module_param_named(dma, libata_dma_mask, int, 0444);
156MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
157
a8601e5f
AM
158static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
159module_param(ata_probe_timeout, int, 0444);
160MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
161
6ebe9d86 162int libata_noacpi = 0;
d7d0dad6 163module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 164MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 165
ae8d4ee7
AC
166int libata_allow_tpm = 0;
167module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
168MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
169
1da177e4
LT
170MODULE_AUTHOR("Jeff Garzik");
171MODULE_DESCRIPTION("Library module for ATA devices");
172MODULE_LICENSE("GPL");
173MODULE_VERSION(DRV_VERSION);
174
0baab86b 175
33267325
TH
176/**
177 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 178 * @ap: ATA port of interest
33267325
TH
179 *
180 * Force cable type according to libata.force and whine about it.
181 * The last entry which has matching port number is used, so it
182 * can be specified as part of device force parameters. For
183 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
184 * same effect.
185 *
186 * LOCKING:
187 * EH context.
188 */
189void ata_force_cbl(struct ata_port *ap)
190{
191 int i;
192
193 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
194 const struct ata_force_ent *fe = &ata_force_tbl[i];
195
196 if (fe->port != -1 && fe->port != ap->print_id)
197 continue;
198
199 if (fe->param.cbl == ATA_CBL_NONE)
200 continue;
201
202 ap->cbl = fe->param.cbl;
203 ata_port_printk(ap, KERN_NOTICE,
204 "FORCE: cable set to %s\n", fe->param.name);
205 return;
206 }
207}
208
209/**
210 * ata_force_spd_limit - force SATA spd limit according to libata.force
211 * @link: ATA link of interest
212 *
213 * Force SATA spd limit according to libata.force and whine about
214 * it. When only the port part is specified (e.g. 1:), the limit
215 * applies to all links connected to both the host link and all
216 * fan-out ports connected via PMP. If the device part is
217 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
218 * link not the host link. Device number 15 always points to the
219 * host link whether PMP is attached or not.
220 *
221 * LOCKING:
222 * EH context.
223 */
224static void ata_force_spd_limit(struct ata_link *link)
225{
226 int linkno, i;
227
228 if (ata_is_host_link(link))
229 linkno = 15;
230 else
231 linkno = link->pmp;
232
233 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
234 const struct ata_force_ent *fe = &ata_force_tbl[i];
235
236 if (fe->port != -1 && fe->port != link->ap->print_id)
237 continue;
238
239 if (fe->device != -1 && fe->device != linkno)
240 continue;
241
242 if (!fe->param.spd_limit)
243 continue;
244
245 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
246 ata_link_printk(link, KERN_NOTICE,
247 "FORCE: PHY spd limit set to %s\n", fe->param.name);
248 return;
249 }
250}
251
252/**
253 * ata_force_xfermask - force xfermask according to libata.force
254 * @dev: ATA device of interest
255 *
256 * Force xfer_mask according to libata.force and whine about it.
257 * For consistency with link selection, device number 15 selects
258 * the first device connected to the host link.
259 *
260 * LOCKING:
261 * EH context.
262 */
263static void ata_force_xfermask(struct ata_device *dev)
264{
265 int devno = dev->link->pmp + dev->devno;
266 int alt_devno = devno;
267 int i;
268
269 /* allow n.15 for the first device attached to host port */
270 if (ata_is_host_link(dev->link) && devno == 0)
271 alt_devno = 15;
272
273 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
274 const struct ata_force_ent *fe = &ata_force_tbl[i];
275 unsigned long pio_mask, mwdma_mask, udma_mask;
276
277 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
278 continue;
279
280 if (fe->device != -1 && fe->device != devno &&
281 fe->device != alt_devno)
282 continue;
283
284 if (!fe->param.xfer_mask)
285 continue;
286
287 ata_unpack_xfermask(fe->param.xfer_mask,
288 &pio_mask, &mwdma_mask, &udma_mask);
289 if (udma_mask)
290 dev->udma_mask = udma_mask;
291 else if (mwdma_mask) {
292 dev->udma_mask = 0;
293 dev->mwdma_mask = mwdma_mask;
294 } else {
295 dev->udma_mask = 0;
296 dev->mwdma_mask = 0;
297 dev->pio_mask = pio_mask;
298 }
299
300 ata_dev_printk(dev, KERN_NOTICE,
301 "FORCE: xfer_mask set to %s\n", fe->param.name);
302 return;
303 }
304}
305
306/**
307 * ata_force_horkage - force horkage according to libata.force
308 * @dev: ATA device of interest
309 *
310 * Force horkage according to libata.force and whine about it.
311 * For consistency with link selection, device number 15 selects
312 * the first device connected to the host link.
313 *
314 * LOCKING:
315 * EH context.
316 */
317static void ata_force_horkage(struct ata_device *dev)
318{
319 int devno = dev->link->pmp + dev->devno;
320 int alt_devno = devno;
321 int i;
322
323 /* allow n.15 for the first device attached to host port */
324 if (ata_is_host_link(dev->link) && devno == 0)
325 alt_devno = 15;
326
327 for (i = 0; i < ata_force_tbl_size; i++) {
328 const struct ata_force_ent *fe = &ata_force_tbl[i];
329
330 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
331 continue;
332
333 if (fe->device != -1 && fe->device != devno &&
334 fe->device != alt_devno)
335 continue;
336
337 if (!(~dev->horkage & fe->param.horkage_on) &&
338 !(dev->horkage & fe->param.horkage_off))
339 continue;
340
341 dev->horkage |= fe->param.horkage_on;
342 dev->horkage &= ~fe->param.horkage_off;
343
344 ata_dev_printk(dev, KERN_NOTICE,
345 "FORCE: horkage modified (%s)\n", fe->param.name);
346 }
347}
348
436d34b3
TH
349/**
350 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
351 * @opcode: SCSI opcode
352 *
353 * Determine ATAPI command type from @opcode.
354 *
355 * LOCKING:
356 * None.
357 *
358 * RETURNS:
359 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
360 */
361int atapi_cmd_type(u8 opcode)
362{
363 switch (opcode) {
364 case GPCMD_READ_10:
365 case GPCMD_READ_12:
366 return ATAPI_READ;
367
368 case GPCMD_WRITE_10:
369 case GPCMD_WRITE_12:
370 case GPCMD_WRITE_AND_VERIFY_10:
371 return ATAPI_WRITE;
372
373 case GPCMD_READ_CD:
374 case GPCMD_READ_CD_MSF:
375 return ATAPI_READ_CD;
376
e52dcc48
TH
377 case ATA_16:
378 case ATA_12:
379 if (atapi_passthru16)
380 return ATAPI_PASS_THRU;
381 /* fall thru */
436d34b3
TH
382 default:
383 return ATAPI_MISC;
384 }
385}
386
1da177e4
LT
387/**
388 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
389 * @tf: Taskfile to convert
1da177e4 390 * @pmp: Port multiplier port
9977126c
TH
391 * @is_cmd: This FIS is for command
392 * @fis: Buffer into which data will output
1da177e4
LT
393 *
394 * Converts a standard ATA taskfile to a Serial ATA
395 * FIS structure (Register - Host to Device).
396 *
397 * LOCKING:
398 * Inherited from caller.
399 */
9977126c 400void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 401{
9977126c
TH
402 fis[0] = 0x27; /* Register - Host to Device FIS */
403 fis[1] = pmp & 0xf; /* Port multiplier number*/
404 if (is_cmd)
405 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
406
1da177e4
LT
407 fis[2] = tf->command;
408 fis[3] = tf->feature;
409
410 fis[4] = tf->lbal;
411 fis[5] = tf->lbam;
412 fis[6] = tf->lbah;
413 fis[7] = tf->device;
414
415 fis[8] = tf->hob_lbal;
416 fis[9] = tf->hob_lbam;
417 fis[10] = tf->hob_lbah;
418 fis[11] = tf->hob_feature;
419
420 fis[12] = tf->nsect;
421 fis[13] = tf->hob_nsect;
422 fis[14] = 0;
423 fis[15] = tf->ctl;
424
425 fis[16] = 0;
426 fis[17] = 0;
427 fis[18] = 0;
428 fis[19] = 0;
429}
430
431/**
432 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
433 * @fis: Buffer from which data will be input
434 * @tf: Taskfile to output
435 *
e12a1be6 436 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
437 *
438 * LOCKING:
439 * Inherited from caller.
440 */
441
057ace5e 442void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
443{
444 tf->command = fis[2]; /* status */
445 tf->feature = fis[3]; /* error */
446
447 tf->lbal = fis[4];
448 tf->lbam = fis[5];
449 tf->lbah = fis[6];
450 tf->device = fis[7];
451
452 tf->hob_lbal = fis[8];
453 tf->hob_lbam = fis[9];
454 tf->hob_lbah = fis[10];
455
456 tf->nsect = fis[12];
457 tf->hob_nsect = fis[13];
458}
459
8cbd6df1
AL
460static const u8 ata_rw_cmds[] = {
461 /* pio multi */
462 ATA_CMD_READ_MULTI,
463 ATA_CMD_WRITE_MULTI,
464 ATA_CMD_READ_MULTI_EXT,
465 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
466 0,
467 0,
468 0,
469 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
470 /* pio */
471 ATA_CMD_PIO_READ,
472 ATA_CMD_PIO_WRITE,
473 ATA_CMD_PIO_READ_EXT,
474 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
475 0,
476 0,
477 0,
478 0,
8cbd6df1
AL
479 /* dma */
480 ATA_CMD_READ,
481 ATA_CMD_WRITE,
482 ATA_CMD_READ_EXT,
9a3dccc4
TH
483 ATA_CMD_WRITE_EXT,
484 0,
485 0,
486 0,
487 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 488};
1da177e4
LT
489
490/**
8cbd6df1 491 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
492 * @tf: command to examine and configure
493 * @dev: device tf belongs to
1da177e4 494 *
2e9edbf8 495 * Examine the device configuration and tf->flags to calculate
8cbd6df1 496 * the proper read/write commands and protocol to use.
1da177e4
LT
497 *
498 * LOCKING:
499 * caller.
500 */
bd056d7e 501static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 502{
9a3dccc4 503 u8 cmd;
1da177e4 504
9a3dccc4 505 int index, fua, lba48, write;
2e9edbf8 506
9a3dccc4 507 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
508 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
509 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 510
8cbd6df1
AL
511 if (dev->flags & ATA_DFLAG_PIO) {
512 tf->protocol = ATA_PROT_PIO;
9a3dccc4 513 index = dev->multi_count ? 0 : 8;
9af5c9c9 514 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
515 /* Unable to use DMA due to host limitation */
516 tf->protocol = ATA_PROT_PIO;
0565c26d 517 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
518 } else {
519 tf->protocol = ATA_PROT_DMA;
9a3dccc4 520 index = 16;
8cbd6df1 521 }
1da177e4 522
9a3dccc4
TH
523 cmd = ata_rw_cmds[index + fua + lba48 + write];
524 if (cmd) {
525 tf->command = cmd;
526 return 0;
527 }
528 return -1;
1da177e4
LT
529}
530
35b649fe
TH
531/**
532 * ata_tf_read_block - Read block address from ATA taskfile
533 * @tf: ATA taskfile of interest
534 * @dev: ATA device @tf belongs to
535 *
536 * LOCKING:
537 * None.
538 *
539 * Read block address from @tf. This function can handle all
540 * three address formats - LBA, LBA48 and CHS. tf->protocol and
541 * flags select the address format to use.
542 *
543 * RETURNS:
544 * Block address read from @tf.
545 */
546u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
547{
548 u64 block = 0;
549
550 if (tf->flags & ATA_TFLAG_LBA) {
551 if (tf->flags & ATA_TFLAG_LBA48) {
552 block |= (u64)tf->hob_lbah << 40;
553 block |= (u64)tf->hob_lbam << 32;
554 block |= tf->hob_lbal << 24;
555 } else
556 block |= (tf->device & 0xf) << 24;
557
558 block |= tf->lbah << 16;
559 block |= tf->lbam << 8;
560 block |= tf->lbal;
561 } else {
562 u32 cyl, head, sect;
563
564 cyl = tf->lbam | (tf->lbah << 8);
565 head = tf->device & 0xf;
566 sect = tf->lbal;
567
568 block = (cyl * dev->heads + head) * dev->sectors + sect;
569 }
570
571 return block;
572}
573
bd056d7e
TH
574/**
575 * ata_build_rw_tf - Build ATA taskfile for given read/write request
576 * @tf: Target ATA taskfile
577 * @dev: ATA device @tf belongs to
578 * @block: Block address
579 * @n_block: Number of blocks
580 * @tf_flags: RW/FUA etc...
581 * @tag: tag
582 *
583 * LOCKING:
584 * None.
585 *
586 * Build ATA taskfile @tf for read/write request described by
587 * @block, @n_block, @tf_flags and @tag on @dev.
588 *
589 * RETURNS:
590 *
591 * 0 on success, -ERANGE if the request is too large for @dev,
592 * -EINVAL if the request is invalid.
593 */
594int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
595 u64 block, u32 n_block, unsigned int tf_flags,
596 unsigned int tag)
597{
598 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
599 tf->flags |= tf_flags;
600
6d1245bf 601 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
602 /* yay, NCQ */
603 if (!lba_48_ok(block, n_block))
604 return -ERANGE;
605
606 tf->protocol = ATA_PROT_NCQ;
607 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
608
609 if (tf->flags & ATA_TFLAG_WRITE)
610 tf->command = ATA_CMD_FPDMA_WRITE;
611 else
612 tf->command = ATA_CMD_FPDMA_READ;
613
614 tf->nsect = tag << 3;
615 tf->hob_feature = (n_block >> 8) & 0xff;
616 tf->feature = n_block & 0xff;
617
618 tf->hob_lbah = (block >> 40) & 0xff;
619 tf->hob_lbam = (block >> 32) & 0xff;
620 tf->hob_lbal = (block >> 24) & 0xff;
621 tf->lbah = (block >> 16) & 0xff;
622 tf->lbam = (block >> 8) & 0xff;
623 tf->lbal = block & 0xff;
624
625 tf->device = 1 << 6;
626 if (tf->flags & ATA_TFLAG_FUA)
627 tf->device |= 1 << 7;
628 } else if (dev->flags & ATA_DFLAG_LBA) {
629 tf->flags |= ATA_TFLAG_LBA;
630
631 if (lba_28_ok(block, n_block)) {
632 /* use LBA28 */
633 tf->device |= (block >> 24) & 0xf;
634 } else if (lba_48_ok(block, n_block)) {
635 if (!(dev->flags & ATA_DFLAG_LBA48))
636 return -ERANGE;
637
638 /* use LBA48 */
639 tf->flags |= ATA_TFLAG_LBA48;
640
641 tf->hob_nsect = (n_block >> 8) & 0xff;
642
643 tf->hob_lbah = (block >> 40) & 0xff;
644 tf->hob_lbam = (block >> 32) & 0xff;
645 tf->hob_lbal = (block >> 24) & 0xff;
646 } else
647 /* request too large even for LBA48 */
648 return -ERANGE;
649
650 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
651 return -EINVAL;
652
653 tf->nsect = n_block & 0xff;
654
655 tf->lbah = (block >> 16) & 0xff;
656 tf->lbam = (block >> 8) & 0xff;
657 tf->lbal = block & 0xff;
658
659 tf->device |= ATA_LBA;
660 } else {
661 /* CHS */
662 u32 sect, head, cyl, track;
663
664 /* The request -may- be too large for CHS addressing. */
665 if (!lba_28_ok(block, n_block))
666 return -ERANGE;
667
668 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
669 return -EINVAL;
670
671 /* Convert LBA to CHS */
672 track = (u32)block / dev->sectors;
673 cyl = track / dev->heads;
674 head = track % dev->heads;
675 sect = (u32)block % dev->sectors + 1;
676
677 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
678 (u32)block, track, cyl, head, sect);
679
680 /* Check whether the converted CHS can fit.
681 Cylinder: 0-65535
682 Head: 0-15
683 Sector: 1-255*/
684 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
685 return -ERANGE;
686
687 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
688 tf->lbal = sect;
689 tf->lbam = cyl;
690 tf->lbah = cyl >> 8;
691 tf->device |= head;
692 }
693
694 return 0;
695}
696
cb95d562
TH
697/**
698 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
699 * @pio_mask: pio_mask
700 * @mwdma_mask: mwdma_mask
701 * @udma_mask: udma_mask
702 *
703 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
704 * unsigned int xfer_mask.
705 *
706 * LOCKING:
707 * None.
708 *
709 * RETURNS:
710 * Packed xfer_mask.
711 */
7dc951ae
TH
712unsigned long ata_pack_xfermask(unsigned long pio_mask,
713 unsigned long mwdma_mask,
714 unsigned long udma_mask)
cb95d562
TH
715{
716 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
717 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
718 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
719}
720
c0489e4e
TH
721/**
722 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
723 * @xfer_mask: xfer_mask to unpack
724 * @pio_mask: resulting pio_mask
725 * @mwdma_mask: resulting mwdma_mask
726 * @udma_mask: resulting udma_mask
727 *
728 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
729 * Any NULL distination masks will be ignored.
730 */
7dc951ae
TH
731void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
732 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
733{
734 if (pio_mask)
735 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
736 if (mwdma_mask)
737 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
738 if (udma_mask)
739 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
740}
741
cb95d562 742static const struct ata_xfer_ent {
be9a50c8 743 int shift, bits;
cb95d562
TH
744 u8 base;
745} ata_xfer_tbl[] = {
70cd071e
TH
746 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
747 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
748 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
749 { -1, },
750};
751
752/**
753 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
754 * @xfer_mask: xfer_mask of interest
755 *
756 * Return matching XFER_* value for @xfer_mask. Only the highest
757 * bit of @xfer_mask is considered.
758 *
759 * LOCKING:
760 * None.
761 *
762 * RETURNS:
70cd071e 763 * Matching XFER_* value, 0xff if no match found.
cb95d562 764 */
7dc951ae 765u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
766{
767 int highbit = fls(xfer_mask) - 1;
768 const struct ata_xfer_ent *ent;
769
770 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
771 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
772 return ent->base + highbit - ent->shift;
70cd071e 773 return 0xff;
cb95d562
TH
774}
775
776/**
777 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
778 * @xfer_mode: XFER_* of interest
779 *
780 * Return matching xfer_mask for @xfer_mode.
781 *
782 * LOCKING:
783 * None.
784 *
785 * RETURNS:
786 * Matching xfer_mask, 0 if no match found.
787 */
7dc951ae 788unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
789{
790 const struct ata_xfer_ent *ent;
791
792 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
793 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
794 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
795 & ~((1 << ent->shift) - 1);
cb95d562
TH
796 return 0;
797}
798
799/**
800 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
801 * @xfer_mode: XFER_* of interest
802 *
803 * Return matching xfer_shift for @xfer_mode.
804 *
805 * LOCKING:
806 * None.
807 *
808 * RETURNS:
809 * Matching xfer_shift, -1 if no match found.
810 */
7dc951ae 811int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
812{
813 const struct ata_xfer_ent *ent;
814
815 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
816 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
817 return ent->shift;
818 return -1;
819}
820
1da177e4 821/**
1da7b0d0
TH
822 * ata_mode_string - convert xfer_mask to string
823 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
824 *
825 * Determine string which represents the highest speed
1da7b0d0 826 * (highest bit in @modemask).
1da177e4
LT
827 *
828 * LOCKING:
829 * None.
830 *
831 * RETURNS:
832 * Constant C string representing highest speed listed in
1da7b0d0 833 * @mode_mask, or the constant C string "<n/a>".
1da177e4 834 */
7dc951ae 835const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 836{
75f554bc
TH
837 static const char * const xfer_mode_str[] = {
838 "PIO0",
839 "PIO1",
840 "PIO2",
841 "PIO3",
842 "PIO4",
b352e57d
AC
843 "PIO5",
844 "PIO6",
75f554bc
TH
845 "MWDMA0",
846 "MWDMA1",
847 "MWDMA2",
b352e57d
AC
848 "MWDMA3",
849 "MWDMA4",
75f554bc
TH
850 "UDMA/16",
851 "UDMA/25",
852 "UDMA/33",
853 "UDMA/44",
854 "UDMA/66",
855 "UDMA/100",
856 "UDMA/133",
857 "UDMA7",
858 };
1da7b0d0 859 int highbit;
1da177e4 860
1da7b0d0
TH
861 highbit = fls(xfer_mask) - 1;
862 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
863 return xfer_mode_str[highbit];
1da177e4 864 return "<n/a>";
1da177e4
LT
865}
866
4c360c81
TH
867static const char *sata_spd_string(unsigned int spd)
868{
869 static const char * const spd_str[] = {
870 "1.5 Gbps",
871 "3.0 Gbps",
872 };
873
874 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
875 return "<unknown>";
876 return spd_str[spd - 1];
877}
878
3373efd8 879void ata_dev_disable(struct ata_device *dev)
0b8efb0a 880{
09d7f9b0 881 if (ata_dev_enabled(dev)) {
9af5c9c9 882 if (ata_msg_drv(dev->link->ap))
09d7f9b0 883 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
562f0c2d 884 ata_acpi_on_disable(dev);
4ae72a1e
TH
885 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
886 ATA_DNXFER_QUIET);
0b8efb0a
TH
887 dev->class++;
888 }
889}
890
ca77329f
KCA
891static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
892{
893 struct ata_link *link = dev->link;
894 struct ata_port *ap = link->ap;
895 u32 scontrol;
896 unsigned int err_mask;
897 int rc;
898
899 /*
900 * disallow DIPM for drivers which haven't set
901 * ATA_FLAG_IPM. This is because when DIPM is enabled,
902 * phy ready will be set in the interrupt status on
903 * state changes, which will cause some drivers to
904 * think there are errors - additionally drivers will
905 * need to disable hot plug.
906 */
907 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
908 ap->pm_policy = NOT_AVAILABLE;
909 return -EINVAL;
910 }
911
912 /*
913 * For DIPM, we will only enable it for the
914 * min_power setting.
915 *
916 * Why? Because Disks are too stupid to know that
917 * If the host rejects a request to go to SLUMBER
918 * they should retry at PARTIAL, and instead it
919 * just would give up. So, for medium_power to
920 * work at all, we need to only allow HIPM.
921 */
922 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
923 if (rc)
924 return rc;
925
926 switch (policy) {
927 case MIN_POWER:
928 /* no restrictions on IPM transitions */
929 scontrol &= ~(0x3 << 8);
930 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
931 if (rc)
932 return rc;
933
934 /* enable DIPM */
935 if (dev->flags & ATA_DFLAG_DIPM)
936 err_mask = ata_dev_set_feature(dev,
937 SETFEATURES_SATA_ENABLE, SATA_DIPM);
938 break;
939 case MEDIUM_POWER:
940 /* allow IPM to PARTIAL */
941 scontrol &= ~(0x1 << 8);
942 scontrol |= (0x2 << 8);
943 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
944 if (rc)
945 return rc;
946
f5456b63
KCA
947 /*
948 * we don't have to disable DIPM since IPM flags
949 * disallow transitions to SLUMBER, which effectively
950 * disable DIPM if it does not support PARTIAL
951 */
ca77329f
KCA
952 break;
953 case NOT_AVAILABLE:
954 case MAX_PERFORMANCE:
955 /* disable all IPM transitions */
956 scontrol |= (0x3 << 8);
957 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
958 if (rc)
959 return rc;
960
f5456b63
KCA
961 /*
962 * we don't have to disable DIPM since IPM flags
963 * disallow all transitions which effectively
964 * disable DIPM anyway.
965 */
ca77329f
KCA
966 break;
967 }
968
969 /* FIXME: handle SET FEATURES failure */
970 (void) err_mask;
971
972 return 0;
973}
974
975/**
976 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
977 * @dev: device to enable power management
978 * @policy: the link power management policy
ca77329f
KCA
979 *
980 * Enable SATA Interface power management. This will enable
981 * Device Interface Power Management (DIPM) for min_power
982 * policy, and then call driver specific callbacks for
983 * enabling Host Initiated Power management.
984 *
985 * Locking: Caller.
986 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
987 */
988void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
989{
990 int rc = 0;
991 struct ata_port *ap = dev->link->ap;
992
993 /* set HIPM first, then DIPM */
994 if (ap->ops->enable_pm)
995 rc = ap->ops->enable_pm(ap, policy);
996 if (rc)
997 goto enable_pm_out;
998 rc = ata_dev_set_dipm(dev, policy);
999
1000enable_pm_out:
1001 if (rc)
1002 ap->pm_policy = MAX_PERFORMANCE;
1003 else
1004 ap->pm_policy = policy;
1005 return /* rc */; /* hopefully we can use 'rc' eventually */
1006}
1007
1992a5ed 1008#ifdef CONFIG_PM
ca77329f
KCA
1009/**
1010 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 1011 * @dev: device to disable power management
ca77329f
KCA
1012 *
1013 * Disable SATA Interface power management. This will disable
1014 * Device Interface Power Management (DIPM) without changing
1015 * policy, call driver specific callbacks for disabling Host
1016 * Initiated Power management.
1017 *
1018 * Locking: Caller.
1019 * Returns: void
1020 */
1021static void ata_dev_disable_pm(struct ata_device *dev)
1022{
1023 struct ata_port *ap = dev->link->ap;
1024
1025 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1026 if (ap->ops->disable_pm)
1027 ap->ops->disable_pm(ap);
1028}
1992a5ed 1029#endif /* CONFIG_PM */
ca77329f
KCA
1030
1031void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1032{
1033 ap->pm_policy = policy;
3ec25ebd 1034 ap->link.eh_info.action |= ATA_EH_LPM;
ca77329f
KCA
1035 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1036 ata_port_schedule_eh(ap);
1037}
1038
1992a5ed 1039#ifdef CONFIG_PM
ca77329f
KCA
1040static void ata_lpm_enable(struct ata_host *host)
1041{
1042 struct ata_link *link;
1043 struct ata_port *ap;
1044 struct ata_device *dev;
1045 int i;
1046
1047 for (i = 0; i < host->n_ports; i++) {
1048 ap = host->ports[i];
1049 ata_port_for_each_link(link, ap) {
1050 ata_link_for_each_dev(dev, link)
1051 ata_dev_disable_pm(dev);
1052 }
1053 }
1054}
1055
1056static void ata_lpm_disable(struct ata_host *host)
1057{
1058 int i;
1059
1060 for (i = 0; i < host->n_ports; i++) {
1061 struct ata_port *ap = host->ports[i];
1062 ata_lpm_schedule(ap, ap->pm_policy);
1063 }
1064}
1992a5ed 1065#endif /* CONFIG_PM */
ca77329f 1066
1da177e4
LT
1067/**
1068 * ata_dev_classify - determine device type based on ATA-spec signature
1069 * @tf: ATA taskfile register set for device to be identified
1070 *
1071 * Determine from taskfile register contents whether a device is
1072 * ATA or ATAPI, as per "Signature and persistence" section
1073 * of ATA/PI spec (volume 1, sect 5.14).
1074 *
1075 * LOCKING:
1076 * None.
1077 *
1078 * RETURNS:
633273a3
TH
1079 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1080 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1081 */
057ace5e 1082unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1083{
1084 /* Apple's open source Darwin code hints that some devices only
1085 * put a proper signature into the LBA mid/high registers,
1086 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1087 *
1088 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1089 * signatures for ATA and ATAPI devices attached on SerialATA,
1090 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1091 * spec has never mentioned about using different signatures
1092 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1093 * Multiplier specification began to use 0x69/0x96 to identify
1094 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1095 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1096 * 0x69/0x96 shortly and described them as reserved for
1097 * SerialATA.
1098 *
1099 * We follow the current spec and consider that 0x69/0x96
1100 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 1101 */
633273a3 1102 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1103 DPRINTK("found ATA device by sig\n");
1104 return ATA_DEV_ATA;
1105 }
1106
633273a3 1107 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1108 DPRINTK("found ATAPI device by sig\n");
1109 return ATA_DEV_ATAPI;
1110 }
1111
633273a3
TH
1112 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1113 DPRINTK("found PMP device by sig\n");
1114 return ATA_DEV_PMP;
1115 }
1116
1117 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 1118 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
1119 return ATA_DEV_SEMB_UNSUP; /* not yet */
1120 }
1121
1da177e4
LT
1122 DPRINTK("unknown device\n");
1123 return ATA_DEV_UNKNOWN;
1124}
1125
1da177e4 1126/**
6a62a04d 1127 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1128 * @id: IDENTIFY DEVICE results we will examine
1129 * @s: string into which data is output
1130 * @ofs: offset into identify device page
1131 * @len: length of string to return. must be an even number.
1132 *
1133 * The strings in the IDENTIFY DEVICE page are broken up into
1134 * 16-bit chunks. Run through the string, and output each
1135 * 8-bit chunk linearly, regardless of platform.
1136 *
1137 * LOCKING:
1138 * caller.
1139 */
1140
6a62a04d
TH
1141void ata_id_string(const u16 *id, unsigned char *s,
1142 unsigned int ofs, unsigned int len)
1da177e4
LT
1143{
1144 unsigned int c;
1145
1146 while (len > 0) {
1147 c = id[ofs] >> 8;
1148 *s = c;
1149 s++;
1150
1151 c = id[ofs] & 0xff;
1152 *s = c;
1153 s++;
1154
1155 ofs++;
1156 len -= 2;
1157 }
1158}
1159
0e949ff3 1160/**
6a62a04d 1161 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1162 * @id: IDENTIFY DEVICE results we will examine
1163 * @s: string into which data is output
1164 * @ofs: offset into identify device page
1165 * @len: length of string to return. must be an odd number.
1166 *
6a62a04d 1167 * This function is identical to ata_id_string except that it
0e949ff3
TH
1168 * trims trailing spaces and terminates the resulting string with
1169 * null. @len must be actual maximum length (even number) + 1.
1170 *
1171 * LOCKING:
1172 * caller.
1173 */
6a62a04d
TH
1174void ata_id_c_string(const u16 *id, unsigned char *s,
1175 unsigned int ofs, unsigned int len)
0e949ff3
TH
1176{
1177 unsigned char *p;
1178
1179 WARN_ON(!(len & 1));
1180
6a62a04d 1181 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1182
1183 p = s + strnlen(s, len - 1);
1184 while (p > s && p[-1] == ' ')
1185 p--;
1186 *p = '\0';
1187}
0baab86b 1188
db6f8759
TH
1189static u64 ata_id_n_sectors(const u16 *id)
1190{
1191 if (ata_id_has_lba(id)) {
1192 if (ata_id_has_lba48(id))
1193 return ata_id_u64(id, 100);
1194 else
1195 return ata_id_u32(id, 60);
1196 } else {
1197 if (ata_id_current_chs_valid(id))
1198 return ata_id_u32(id, 57);
1199 else
1200 return id[1] * id[3] * id[6];
1201 }
1202}
1203
a5987e0a 1204u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1e999736
AC
1205{
1206 u64 sectors = 0;
1207
1208 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1209 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1210 sectors |= (tf->hob_lbal & 0xff) << 24;
1211 sectors |= (tf->lbah & 0xff) << 16;
1212 sectors |= (tf->lbam & 0xff) << 8;
1213 sectors |= (tf->lbal & 0xff);
1214
a5987e0a 1215 return sectors;
1e999736
AC
1216}
1217
a5987e0a 1218u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1e999736
AC
1219{
1220 u64 sectors = 0;
1221
1222 sectors |= (tf->device & 0x0f) << 24;
1223 sectors |= (tf->lbah & 0xff) << 16;
1224 sectors |= (tf->lbam & 0xff) << 8;
1225 sectors |= (tf->lbal & 0xff);
1226
a5987e0a 1227 return sectors;
1e999736
AC
1228}
1229
1230/**
c728a914
TH
1231 * ata_read_native_max_address - Read native max address
1232 * @dev: target device
1233 * @max_sectors: out parameter for the result native max address
1e999736 1234 *
c728a914
TH
1235 * Perform an LBA48 or LBA28 native size query upon the device in
1236 * question.
1e999736 1237 *
c728a914
TH
1238 * RETURNS:
1239 * 0 on success, -EACCES if command is aborted by the drive.
1240 * -EIO on other errors.
1e999736 1241 */
c728a914 1242static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1243{
c728a914 1244 unsigned int err_mask;
1e999736 1245 struct ata_taskfile tf;
c728a914 1246 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1247
1248 ata_tf_init(dev, &tf);
1249
c728a914 1250 /* always clear all address registers */
1e999736 1251 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1252
c728a914
TH
1253 if (lba48) {
1254 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1255 tf.flags |= ATA_TFLAG_LBA48;
1256 } else
1257 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1258
1e999736 1259 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1260 tf.device |= ATA_LBA;
1261
2b789108 1262 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1263 if (err_mask) {
1264 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1265 "max address (err_mask=0x%x)\n", err_mask);
1266 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1267 return -EACCES;
1268 return -EIO;
1269 }
1e999736 1270
c728a914 1271 if (lba48)
a5987e0a 1272 *max_sectors = ata_tf_to_lba48(&tf) + 1;
c728a914 1273 else
a5987e0a 1274 *max_sectors = ata_tf_to_lba(&tf) + 1;
2dcb407e 1275 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1276 (*max_sectors)--;
c728a914 1277 return 0;
1e999736
AC
1278}
1279
1280/**
c728a914
TH
1281 * ata_set_max_sectors - Set max sectors
1282 * @dev: target device
6b38d1d1 1283 * @new_sectors: new max sectors value to set for the device
1e999736 1284 *
c728a914
TH
1285 * Set max sectors of @dev to @new_sectors.
1286 *
1287 * RETURNS:
1288 * 0 on success, -EACCES if command is aborted or denied (due to
1289 * previous non-volatile SET_MAX) by the drive. -EIO on other
1290 * errors.
1e999736 1291 */
05027adc 1292static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1293{
c728a914 1294 unsigned int err_mask;
1e999736 1295 struct ata_taskfile tf;
c728a914 1296 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1297
1298 new_sectors--;
1299
1300 ata_tf_init(dev, &tf);
1301
1e999736 1302 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1303
1304 if (lba48) {
1305 tf.command = ATA_CMD_SET_MAX_EXT;
1306 tf.flags |= ATA_TFLAG_LBA48;
1307
1308 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1309 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1310 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1311 } else {
c728a914
TH
1312 tf.command = ATA_CMD_SET_MAX;
1313
1e582ba4
TH
1314 tf.device |= (new_sectors >> 24) & 0xf;
1315 }
1316
1e999736 1317 tf.protocol |= ATA_PROT_NODATA;
c728a914 1318 tf.device |= ATA_LBA;
1e999736
AC
1319
1320 tf.lbal = (new_sectors >> 0) & 0xff;
1321 tf.lbam = (new_sectors >> 8) & 0xff;
1322 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1323
2b789108 1324 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1325 if (err_mask) {
1326 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1327 "max address (err_mask=0x%x)\n", err_mask);
1328 if (err_mask == AC_ERR_DEV &&
1329 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1330 return -EACCES;
1331 return -EIO;
1332 }
1333
c728a914 1334 return 0;
1e999736
AC
1335}
1336
1337/**
1338 * ata_hpa_resize - Resize a device with an HPA set
1339 * @dev: Device to resize
1340 *
1341 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1342 * it if required to the full size of the media. The caller must check
1343 * the drive has the HPA feature set enabled.
05027adc
TH
1344 *
1345 * RETURNS:
1346 * 0 on success, -errno on failure.
1e999736 1347 */
05027adc 1348static int ata_hpa_resize(struct ata_device *dev)
1e999736 1349{
05027adc
TH
1350 struct ata_eh_context *ehc = &dev->link->eh_context;
1351 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1352 u64 sectors = ata_id_n_sectors(dev->id);
1353 u64 native_sectors;
c728a914 1354 int rc;
a617c09f 1355
05027adc
TH
1356 /* do we need to do it? */
1357 if (dev->class != ATA_DEV_ATA ||
1358 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1359 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1360 return 0;
1e999736 1361
05027adc
TH
1362 /* read native max address */
1363 rc = ata_read_native_max_address(dev, &native_sectors);
1364 if (rc) {
dda7aba1
TH
1365 /* If device aborted the command or HPA isn't going to
1366 * be unlocked, skip HPA resizing.
05027adc 1367 */
dda7aba1 1368 if (rc == -EACCES || !ata_ignore_hpa) {
05027adc 1369 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
dda7aba1 1370 "broken, skipping HPA handling\n");
05027adc
TH
1371 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1372
1373 /* we can continue if device aborted the command */
1374 if (rc == -EACCES)
1375 rc = 0;
1e999736 1376 }
37301a55 1377
05027adc
TH
1378 return rc;
1379 }
1380
1381 /* nothing to do? */
1382 if (native_sectors <= sectors || !ata_ignore_hpa) {
1383 if (!print_info || native_sectors == sectors)
1384 return 0;
1385
1386 if (native_sectors > sectors)
1387 ata_dev_printk(dev, KERN_INFO,
1388 "HPA detected: current %llu, native %llu\n",
1389 (unsigned long long)sectors,
1390 (unsigned long long)native_sectors);
1391 else if (native_sectors < sectors)
1392 ata_dev_printk(dev, KERN_WARNING,
1393 "native sectors (%llu) is smaller than "
1394 "sectors (%llu)\n",
1395 (unsigned long long)native_sectors,
1396 (unsigned long long)sectors);
1397 return 0;
1398 }
1399
1400 /* let's unlock HPA */
1401 rc = ata_set_max_sectors(dev, native_sectors);
1402 if (rc == -EACCES) {
1403 /* if device aborted the command, skip HPA resizing */
1404 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1405 "(%llu -> %llu), skipping HPA handling\n",
1406 (unsigned long long)sectors,
1407 (unsigned long long)native_sectors);
1408 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1409 return 0;
1410 } else if (rc)
1411 return rc;
1412
1413 /* re-read IDENTIFY data */
1414 rc = ata_dev_reread_id(dev, 0);
1415 if (rc) {
1416 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1417 "data after HPA resizing\n");
1418 return rc;
1419 }
1420
1421 if (print_info) {
1422 u64 new_sectors = ata_id_n_sectors(dev->id);
1423 ata_dev_printk(dev, KERN_INFO,
1424 "HPA unlocked: %llu -> %llu, native %llu\n",
1425 (unsigned long long)sectors,
1426 (unsigned long long)new_sectors,
1427 (unsigned long long)native_sectors);
1428 }
1429
1430 return 0;
1e999736
AC
1431}
1432
1da177e4
LT
1433/**
1434 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1435 * @id: IDENTIFY DEVICE page to dump
1da177e4 1436 *
0bd3300a
TH
1437 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1438 * page.
1da177e4
LT
1439 *
1440 * LOCKING:
1441 * caller.
1442 */
1443
0bd3300a 1444static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1445{
1446 DPRINTK("49==0x%04x "
1447 "53==0x%04x "
1448 "63==0x%04x "
1449 "64==0x%04x "
1450 "75==0x%04x \n",
0bd3300a
TH
1451 id[49],
1452 id[53],
1453 id[63],
1454 id[64],
1455 id[75]);
1da177e4
LT
1456 DPRINTK("80==0x%04x "
1457 "81==0x%04x "
1458 "82==0x%04x "
1459 "83==0x%04x "
1460 "84==0x%04x \n",
0bd3300a
TH
1461 id[80],
1462 id[81],
1463 id[82],
1464 id[83],
1465 id[84]);
1da177e4
LT
1466 DPRINTK("88==0x%04x "
1467 "93==0x%04x\n",
0bd3300a
TH
1468 id[88],
1469 id[93]);
1da177e4
LT
1470}
1471
cb95d562
TH
1472/**
1473 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1474 * @id: IDENTIFY data to compute xfer mask from
1475 *
1476 * Compute the xfermask for this device. This is not as trivial
1477 * as it seems if we must consider early devices correctly.
1478 *
1479 * FIXME: pre IDE drive timing (do we care ?).
1480 *
1481 * LOCKING:
1482 * None.
1483 *
1484 * RETURNS:
1485 * Computed xfermask
1486 */
7dc951ae 1487unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1488{
7dc951ae 1489 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1490
1491 /* Usual case. Word 53 indicates word 64 is valid */
1492 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1493 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1494 pio_mask <<= 3;
1495 pio_mask |= 0x7;
1496 } else {
1497 /* If word 64 isn't valid then Word 51 high byte holds
1498 * the PIO timing number for the maximum. Turn it into
1499 * a mask.
1500 */
7a0f1c8a 1501 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1502 if (mode < 5) /* Valid PIO range */
2dcb407e 1503 pio_mask = (2 << mode) - 1;
46767aeb
AC
1504 else
1505 pio_mask = 1;
cb95d562
TH
1506
1507 /* But wait.. there's more. Design your standards by
1508 * committee and you too can get a free iordy field to
1509 * process. However its the speeds not the modes that
1510 * are supported... Note drivers using the timing API
1511 * will get this right anyway
1512 */
1513 }
1514
1515 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1516
b352e57d
AC
1517 if (ata_id_is_cfa(id)) {
1518 /*
1519 * Process compact flash extended modes
1520 */
1521 int pio = id[163] & 0x7;
1522 int dma = (id[163] >> 3) & 7;
1523
1524 if (pio)
1525 pio_mask |= (1 << 5);
1526 if (pio > 1)
1527 pio_mask |= (1 << 6);
1528 if (dma)
1529 mwdma_mask |= (1 << 3);
1530 if (dma > 1)
1531 mwdma_mask |= (1 << 4);
1532 }
1533
fb21f0d0
TH
1534 udma_mask = 0;
1535 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1536 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1537
1538 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1539}
1540
86e45b6b 1541/**
442eacc3 1542 * ata_pio_queue_task - Queue port_task
86e45b6b 1543 * @ap: The ata_port to queue port_task for
e2a7f77a 1544 * @fn: workqueue function to be scheduled
65f27f38 1545 * @data: data for @fn to use
e2a7f77a 1546 * @delay: delay time for workqueue function
86e45b6b
TH
1547 *
1548 * Schedule @fn(@data) for execution after @delay jiffies using
1549 * port_task. There is one port_task per port and it's the
1550 * user(low level driver)'s responsibility to make sure that only
1551 * one task is active at any given time.
1552 *
1553 * libata core layer takes care of synchronization between
442eacc3 1554 * port_task and EH. ata_pio_queue_task() may be ignored for EH
86e45b6b
TH
1555 * synchronization.
1556 *
1557 * LOCKING:
1558 * Inherited from caller.
1559 */
624d5c51 1560void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
86e45b6b 1561{
65f27f38 1562 ap->port_task_data = data;
86e45b6b 1563
45a66c1c
ON
1564 /* may fail if ata_port_flush_task() in progress */
1565 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1566}
1567
1568/**
1569 * ata_port_flush_task - Flush port_task
1570 * @ap: The ata_port to flush port_task for
1571 *
1572 * After this function completes, port_task is guranteed not to
1573 * be running or scheduled.
1574 *
1575 * LOCKING:
1576 * Kernel thread context (may sleep)
1577 */
1578void ata_port_flush_task(struct ata_port *ap)
1579{
86e45b6b
TH
1580 DPRINTK("ENTER\n");
1581
45a66c1c 1582 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1583
0dd4b21f 1584 if (ata_msg_ctl(ap))
7f5e4e8d 1585 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
86e45b6b
TH
1586}
1587
7102d230 1588static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1589{
77853bf2 1590 struct completion *waiting = qc->private_data;
a2a7a662 1591
a2a7a662 1592 complete(waiting);
a2a7a662
TH
1593}
1594
1595/**
2432697b 1596 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1597 * @dev: Device to which the command is sent
1598 * @tf: Taskfile registers for the command and the result
d69cf37d 1599 * @cdb: CDB for packet command
a2a7a662 1600 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1601 * @sgl: sg list for the data buffer of the command
2432697b 1602 * @n_elem: Number of sg entries
2b789108 1603 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1604 *
1605 * Executes libata internal command with timeout. @tf contains
1606 * command on entry and result on return. Timeout and error
1607 * conditions are reported via return value. No recovery action
1608 * is taken after a command times out. It's caller's duty to
1609 * clean up after timeout.
1610 *
1611 * LOCKING:
1612 * None. Should be called with kernel context, might sleep.
551e8889
TH
1613 *
1614 * RETURNS:
1615 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1616 */
2432697b
TH
1617unsigned ata_exec_internal_sg(struct ata_device *dev,
1618 struct ata_taskfile *tf, const u8 *cdb,
87260216 1619 int dma_dir, struct scatterlist *sgl,
2b789108 1620 unsigned int n_elem, unsigned long timeout)
a2a7a662 1621{
9af5c9c9
TH
1622 struct ata_link *link = dev->link;
1623 struct ata_port *ap = link->ap;
a2a7a662
TH
1624 u8 command = tf->command;
1625 struct ata_queued_cmd *qc;
2ab7db1f 1626 unsigned int tag, preempted_tag;
dedaf2b0 1627 u32 preempted_sactive, preempted_qc_active;
da917d69 1628 int preempted_nr_active_links;
60be6b9a 1629 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1630 unsigned long flags;
77853bf2 1631 unsigned int err_mask;
d95a717f 1632 int rc;
a2a7a662 1633
ba6a1308 1634 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1635
e3180499 1636 /* no internal command while frozen */
b51e9e5d 1637 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1638 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1639 return AC_ERR_SYSTEM;
1640 }
1641
2ab7db1f 1642 /* initialize internal qc */
a2a7a662 1643
2ab7db1f
TH
1644 /* XXX: Tag 0 is used for drivers with legacy EH as some
1645 * drivers choke if any other tag is given. This breaks
1646 * ata_tag_internal() test for those drivers. Don't use new
1647 * EH stuff without converting to it.
1648 */
1649 if (ap->ops->error_handler)
1650 tag = ATA_TAG_INTERNAL;
1651 else
1652 tag = 0;
1653
6cec4a39 1654 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1655 BUG();
f69499f4 1656 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1657
1658 qc->tag = tag;
1659 qc->scsicmd = NULL;
1660 qc->ap = ap;
1661 qc->dev = dev;
1662 ata_qc_reinit(qc);
1663
9af5c9c9
TH
1664 preempted_tag = link->active_tag;
1665 preempted_sactive = link->sactive;
dedaf2b0 1666 preempted_qc_active = ap->qc_active;
da917d69 1667 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1668 link->active_tag = ATA_TAG_POISON;
1669 link->sactive = 0;
dedaf2b0 1670 ap->qc_active = 0;
da917d69 1671 ap->nr_active_links = 0;
2ab7db1f
TH
1672
1673 /* prepare & issue qc */
a2a7a662 1674 qc->tf = *tf;
d69cf37d
TH
1675 if (cdb)
1676 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1677 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1678 qc->dma_dir = dma_dir;
1679 if (dma_dir != DMA_NONE) {
2432697b 1680 unsigned int i, buflen = 0;
87260216 1681 struct scatterlist *sg;
2432697b 1682
87260216
JA
1683 for_each_sg(sgl, sg, n_elem, i)
1684 buflen += sg->length;
2432697b 1685
87260216 1686 ata_sg_init(qc, sgl, n_elem);
49c80429 1687 qc->nbytes = buflen;
a2a7a662
TH
1688 }
1689
77853bf2 1690 qc->private_data = &wait;
a2a7a662
TH
1691 qc->complete_fn = ata_qc_complete_internal;
1692
8e0e694a 1693 ata_qc_issue(qc);
a2a7a662 1694
ba6a1308 1695 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1696
2b789108
TH
1697 if (!timeout)
1698 timeout = ata_probe_timeout * 1000 / HZ;
1699
1700 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1701
1702 ata_port_flush_task(ap);
41ade50c 1703
d95a717f 1704 if (!rc) {
ba6a1308 1705 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1706
1707 /* We're racing with irq here. If we lose, the
1708 * following test prevents us from completing the qc
d95a717f
TH
1709 * twice. If we win, the port is frozen and will be
1710 * cleaned up by ->post_internal_cmd().
a2a7a662 1711 */
77853bf2 1712 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1713 qc->err_mask |= AC_ERR_TIMEOUT;
1714
1715 if (ap->ops->error_handler)
1716 ata_port_freeze(ap);
1717 else
1718 ata_qc_complete(qc);
f15a1daf 1719
0dd4b21f
BP
1720 if (ata_msg_warn(ap))
1721 ata_dev_printk(dev, KERN_WARNING,
88574551 1722 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1723 }
1724
ba6a1308 1725 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1726 }
1727
d95a717f
TH
1728 /* do post_internal_cmd */
1729 if (ap->ops->post_internal_cmd)
1730 ap->ops->post_internal_cmd(qc);
1731
a51d644a
TH
1732 /* perform minimal error analysis */
1733 if (qc->flags & ATA_QCFLAG_FAILED) {
1734 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1735 qc->err_mask |= AC_ERR_DEV;
1736
1737 if (!qc->err_mask)
1738 qc->err_mask |= AC_ERR_OTHER;
1739
1740 if (qc->err_mask & ~AC_ERR_OTHER)
1741 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1742 }
1743
15869303 1744 /* finish up */
ba6a1308 1745 spin_lock_irqsave(ap->lock, flags);
15869303 1746
e61e0672 1747 *tf = qc->result_tf;
77853bf2
TH
1748 err_mask = qc->err_mask;
1749
1750 ata_qc_free(qc);
9af5c9c9
TH
1751 link->active_tag = preempted_tag;
1752 link->sactive = preempted_sactive;
dedaf2b0 1753 ap->qc_active = preempted_qc_active;
da917d69 1754 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1755
1f7dd3e9
TH
1756 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1757 * Until those drivers are fixed, we detect the condition
1758 * here, fail the command with AC_ERR_SYSTEM and reenable the
1759 * port.
1760 *
1761 * Note that this doesn't change any behavior as internal
1762 * command failure results in disabling the device in the
1763 * higher layer for LLDDs without new reset/EH callbacks.
1764 *
1765 * Kill the following code as soon as those drivers are fixed.
1766 */
198e0fed 1767 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1768 err_mask |= AC_ERR_SYSTEM;
1769 ata_port_probe(ap);
1770 }
1771
ba6a1308 1772 spin_unlock_irqrestore(ap->lock, flags);
15869303 1773
77853bf2 1774 return err_mask;
a2a7a662
TH
1775}
1776
2432697b 1777/**
33480a0e 1778 * ata_exec_internal - execute libata internal command
2432697b
TH
1779 * @dev: Device to which the command is sent
1780 * @tf: Taskfile registers for the command and the result
1781 * @cdb: CDB for packet command
1782 * @dma_dir: Data tranfer direction of the command
1783 * @buf: Data buffer of the command
1784 * @buflen: Length of data buffer
2b789108 1785 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1786 *
1787 * Wrapper around ata_exec_internal_sg() which takes simple
1788 * buffer instead of sg list.
1789 *
1790 * LOCKING:
1791 * None. Should be called with kernel context, might sleep.
1792 *
1793 * RETURNS:
1794 * Zero on success, AC_ERR_* mask on failure
1795 */
1796unsigned ata_exec_internal(struct ata_device *dev,
1797 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1798 int dma_dir, void *buf, unsigned int buflen,
1799 unsigned long timeout)
2432697b 1800{
33480a0e
TH
1801 struct scatterlist *psg = NULL, sg;
1802 unsigned int n_elem = 0;
2432697b 1803
33480a0e
TH
1804 if (dma_dir != DMA_NONE) {
1805 WARN_ON(!buf);
1806 sg_init_one(&sg, buf, buflen);
1807 psg = &sg;
1808 n_elem++;
1809 }
2432697b 1810
2b789108
TH
1811 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1812 timeout);
2432697b
TH
1813}
1814
977e6b9f
TH
1815/**
1816 * ata_do_simple_cmd - execute simple internal command
1817 * @dev: Device to which the command is sent
1818 * @cmd: Opcode to execute
1819 *
1820 * Execute a 'simple' command, that only consists of the opcode
1821 * 'cmd' itself, without filling any other registers
1822 *
1823 * LOCKING:
1824 * Kernel thread context (may sleep).
1825 *
1826 * RETURNS:
1827 * Zero on success, AC_ERR_* mask on failure
e58eb583 1828 */
77b08fb5 1829unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1830{
1831 struct ata_taskfile tf;
e58eb583
TH
1832
1833 ata_tf_init(dev, &tf);
1834
1835 tf.command = cmd;
1836 tf.flags |= ATA_TFLAG_DEVICE;
1837 tf.protocol = ATA_PROT_NODATA;
1838
2b789108 1839 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1840}
1841
1bc4ccff
AC
1842/**
1843 * ata_pio_need_iordy - check if iordy needed
1844 * @adev: ATA device
1845 *
1846 * Check if the current speed of the device requires IORDY. Used
1847 * by various controllers for chip configuration.
1848 */
a617c09f 1849
1bc4ccff
AC
1850unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1851{
432729f0
AC
1852 /* Controller doesn't support IORDY. Probably a pointless check
1853 as the caller should know this */
9af5c9c9 1854 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1855 return 0;
432729f0
AC
1856 /* PIO3 and higher it is mandatory */
1857 if (adev->pio_mode > XFER_PIO_2)
1858 return 1;
1859 /* We turn it on when possible */
1860 if (ata_id_has_iordy(adev->id))
1bc4ccff 1861 return 1;
432729f0
AC
1862 return 0;
1863}
2e9edbf8 1864
432729f0
AC
1865/**
1866 * ata_pio_mask_no_iordy - Return the non IORDY mask
1867 * @adev: ATA device
1868 *
1869 * Compute the highest mode possible if we are not using iordy. Return
1870 * -1 if no iordy mode is available.
1871 */
a617c09f 1872
432729f0
AC
1873static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1874{
1bc4ccff 1875 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1876 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1877 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1878 /* Is the speed faster than the drive allows non IORDY ? */
1879 if (pio) {
1880 /* This is cycle times not frequency - watch the logic! */
1881 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1882 return 3 << ATA_SHIFT_PIO;
1883 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1884 }
1885 }
432729f0 1886 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1887}
1888
1da177e4 1889/**
49016aca 1890 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1891 * @dev: target device
1892 * @p_class: pointer to class of the target device (may be changed)
bff04647 1893 * @flags: ATA_READID_* flags
fe635c7e 1894 * @id: buffer to read IDENTIFY data into
1da177e4 1895 *
49016aca
TH
1896 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1897 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1898 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1899 * for pre-ATA4 drives.
1da177e4 1900 *
50a99018 1901 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1902 * now we abort if we hit that case.
50a99018 1903 *
1da177e4 1904 * LOCKING:
49016aca
TH
1905 * Kernel thread context (may sleep)
1906 *
1907 * RETURNS:
1908 * 0 on success, -errno otherwise.
1da177e4 1909 */
a9beec95 1910int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1911 unsigned int flags, u16 *id)
1da177e4 1912{
9af5c9c9 1913 struct ata_port *ap = dev->link->ap;
49016aca 1914 unsigned int class = *p_class;
a0123703 1915 struct ata_taskfile tf;
49016aca
TH
1916 unsigned int err_mask = 0;
1917 const char *reason;
54936f8b 1918 int may_fallback = 1, tried_spinup = 0;
49016aca 1919 int rc;
1da177e4 1920
0dd4b21f 1921 if (ata_msg_ctl(ap))
7f5e4e8d 1922 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 1923
49016aca 1924 retry:
3373efd8 1925 ata_tf_init(dev, &tf);
a0123703 1926
49016aca
TH
1927 switch (class) {
1928 case ATA_DEV_ATA:
a0123703 1929 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1930 break;
1931 case ATA_DEV_ATAPI:
a0123703 1932 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1933 break;
1934 default:
1935 rc = -ENODEV;
1936 reason = "unsupported class";
1937 goto err_out;
1da177e4
LT
1938 }
1939
a0123703 1940 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1941
1942 /* Some devices choke if TF registers contain garbage. Make
1943 * sure those are properly initialized.
1944 */
1945 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1946
1947 /* Device presence detection is unreliable on some
1948 * controllers. Always poll IDENTIFY if available.
1949 */
1950 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1951
3373efd8 1952 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1953 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 1954 if (err_mask) {
800b3996 1955 if (err_mask & AC_ERR_NODEV_HINT) {
1ffc151f
TH
1956 ata_dev_printk(dev, KERN_DEBUG,
1957 "NODEV after polling detection\n");
55a8e2c8
TH
1958 return -ENOENT;
1959 }
1960
1ffc151f
TH
1961 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1962 /* Device or controller might have reported
1963 * the wrong device class. Give a shot at the
1964 * other IDENTIFY if the current one is
1965 * aborted by the device.
1966 */
1967 if (may_fallback) {
1968 may_fallback = 0;
1969
1970 if (class == ATA_DEV_ATA)
1971 class = ATA_DEV_ATAPI;
1972 else
1973 class = ATA_DEV_ATA;
1974 goto retry;
1975 }
1976
1977 /* Control reaches here iff the device aborted
1978 * both flavors of IDENTIFYs which happens
1979 * sometimes with phantom devices.
1980 */
1981 ata_dev_printk(dev, KERN_DEBUG,
1982 "both IDENTIFYs aborted, assuming NODEV\n");
1983 return -ENOENT;
54936f8b
TH
1984 }
1985
49016aca
TH
1986 rc = -EIO;
1987 reason = "I/O error";
1da177e4
LT
1988 goto err_out;
1989 }
1990
54936f8b
TH
1991 /* Falling back doesn't make sense if ID data was read
1992 * successfully at least once.
1993 */
1994 may_fallback = 0;
1995
49016aca 1996 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1997
49016aca 1998 /* sanity check */
a4f5749b 1999 rc = -EINVAL;
6070068b 2000 reason = "device reports invalid type";
a4f5749b
TH
2001
2002 if (class == ATA_DEV_ATA) {
2003 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2004 goto err_out;
2005 } else {
2006 if (ata_id_is_ata(id))
2007 goto err_out;
49016aca
TH
2008 }
2009
169439c2
ML
2010 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2011 tried_spinup = 1;
2012 /*
2013 * Drive powered-up in standby mode, and requires a specific
2014 * SET_FEATURES spin-up subcommand before it will accept
2015 * anything other than the original IDENTIFY command.
2016 */
218f3d30 2017 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 2018 if (err_mask && id[2] != 0x738c) {
169439c2
ML
2019 rc = -EIO;
2020 reason = "SPINUP failed";
2021 goto err_out;
2022 }
2023 /*
2024 * If the drive initially returned incomplete IDENTIFY info,
2025 * we now must reissue the IDENTIFY command.
2026 */
2027 if (id[2] == 0x37c8)
2028 goto retry;
2029 }
2030
bff04647 2031 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
2032 /*
2033 * The exact sequence expected by certain pre-ATA4 drives is:
2034 * SRST RESET
50a99018
AC
2035 * IDENTIFY (optional in early ATA)
2036 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2037 * anything else..
2038 * Some drives were very specific about that exact sequence.
50a99018
AC
2039 *
2040 * Note that ATA4 says lba is mandatory so the second check
2041 * shoud never trigger.
49016aca
TH
2042 */
2043 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2044 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2045 if (err_mask) {
2046 rc = -EIO;
2047 reason = "INIT_DEV_PARAMS failed";
2048 goto err_out;
2049 }
2050
2051 /* current CHS translation info (id[53-58]) might be
2052 * changed. reread the identify device info.
2053 */
bff04647 2054 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2055 goto retry;
2056 }
2057 }
2058
2059 *p_class = class;
fe635c7e 2060
49016aca
TH
2061 return 0;
2062
2063 err_out:
88574551 2064 if (ata_msg_warn(ap))
0dd4b21f 2065 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2066 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2067 return rc;
2068}
2069
3373efd8 2070static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2071{
9af5c9c9
TH
2072 struct ata_port *ap = dev->link->ap;
2073 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2074}
2075
a6e6ce8e
TH
2076static void ata_dev_config_ncq(struct ata_device *dev,
2077 char *desc, size_t desc_sz)
2078{
9af5c9c9 2079 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2080 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2081
2082 if (!ata_id_has_ncq(dev->id)) {
2083 desc[0] = '\0';
2084 return;
2085 }
75683fe7 2086 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2087 snprintf(desc, desc_sz, "NCQ (not used)");
2088 return;
2089 }
a6e6ce8e 2090 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2091 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2092 dev->flags |= ATA_DFLAG_NCQ;
2093 }
2094
2095 if (hdepth >= ddepth)
2096 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2097 else
2098 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2099}
2100
49016aca 2101/**
ffeae418 2102 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2103 * @dev: Target device to configure
2104 *
2105 * Configure @dev according to @dev->id. Generic and low-level
2106 * driver specific fixups are also applied.
49016aca
TH
2107 *
2108 * LOCKING:
ffeae418
TH
2109 * Kernel thread context (may sleep)
2110 *
2111 * RETURNS:
2112 * 0 on success, -errno otherwise
49016aca 2113 */
efdaedc4 2114int ata_dev_configure(struct ata_device *dev)
49016aca 2115{
9af5c9c9
TH
2116 struct ata_port *ap = dev->link->ap;
2117 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2118 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2119 const u16 *id = dev->id;
7dc951ae 2120 unsigned long xfer_mask;
b352e57d 2121 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2122 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2123 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2124 int rc;
49016aca 2125
0dd4b21f 2126 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e 2127 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
7f5e4e8d 2128 __func__);
ffeae418 2129 return 0;
49016aca
TH
2130 }
2131
0dd4b21f 2132 if (ata_msg_probe(ap))
7f5e4e8d 2133 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 2134
75683fe7
TH
2135 /* set horkage */
2136 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2137 ata_force_horkage(dev);
75683fe7 2138
6746544c
TH
2139 /* let ACPI work its magic */
2140 rc = ata_acpi_on_devcfg(dev);
2141 if (rc)
2142 return rc;
08573a86 2143
05027adc
TH
2144 /* massage HPA, do it early as it might change IDENTIFY data */
2145 rc = ata_hpa_resize(dev);
2146 if (rc)
2147 return rc;
2148
c39f5ebe 2149 /* print device capabilities */
0dd4b21f 2150 if (ata_msg_probe(ap))
88574551
TH
2151 ata_dev_printk(dev, KERN_DEBUG,
2152 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2153 "85:%04x 86:%04x 87:%04x 88:%04x\n",
7f5e4e8d 2154 __func__,
f15a1daf
TH
2155 id[49], id[82], id[83], id[84],
2156 id[85], id[86], id[87], id[88]);
c39f5ebe 2157
208a9933 2158 /* initialize to-be-configured parameters */
ea1dd4e1 2159 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2160 dev->max_sectors = 0;
2161 dev->cdb_len = 0;
2162 dev->n_sectors = 0;
2163 dev->cylinders = 0;
2164 dev->heads = 0;
2165 dev->sectors = 0;
2166
1da177e4
LT
2167 /*
2168 * common ATA, ATAPI feature tests
2169 */
2170
ff8854b2 2171 /* find max transfer mode; for printk only */
1148c3a7 2172 xfer_mask = ata_id_xfermask(id);
1da177e4 2173
0dd4b21f
BP
2174 if (ata_msg_probe(ap))
2175 ata_dump_id(id);
1da177e4 2176
ef143d57
AL
2177 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2178 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2179 sizeof(fwrevbuf));
2180
2181 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2182 sizeof(modelbuf));
2183
1da177e4
LT
2184 /* ATA-specific feature tests */
2185 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2186 if (ata_id_is_cfa(id)) {
2187 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2188 ata_dev_printk(dev, KERN_WARNING,
2189 "supports DRM functions and may "
2190 "not be fully accessable.\n");
b352e57d 2191 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2192 } else {
2dcb407e 2193 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2194 /* Warn the user if the device has TPM extensions */
2195 if (ata_id_has_tpm(id))
2196 ata_dev_printk(dev, KERN_WARNING,
2197 "supports DRM functions and may "
2198 "not be fully accessable.\n");
2199 }
b352e57d 2200
1148c3a7 2201 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2202
3f64f565
EM
2203 if (dev->id[59] & 0x100)
2204 dev->multi_count = dev->id[59] & 0xff;
2205
1148c3a7 2206 if (ata_id_has_lba(id)) {
4c2d721a 2207 const char *lba_desc;
a6e6ce8e 2208 char ncq_desc[20];
8bf62ece 2209
4c2d721a
TH
2210 lba_desc = "LBA";
2211 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2212 if (ata_id_has_lba48(id)) {
8bf62ece 2213 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2214 lba_desc = "LBA48";
6fc49adb
TH
2215
2216 if (dev->n_sectors >= (1UL << 28) &&
2217 ata_id_has_flush_ext(id))
2218 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2219 }
8bf62ece 2220
a6e6ce8e
TH
2221 /* config NCQ */
2222 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2223
8bf62ece 2224 /* print device info to dmesg */
3f64f565
EM
2225 if (ata_msg_drv(ap) && print_info) {
2226 ata_dev_printk(dev, KERN_INFO,
2227 "%s: %s, %s, max %s\n",
2228 revbuf, modelbuf, fwrevbuf,
2229 ata_mode_string(xfer_mask));
2230 ata_dev_printk(dev, KERN_INFO,
2231 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2232 (unsigned long long)dev->n_sectors,
3f64f565
EM
2233 dev->multi_count, lba_desc, ncq_desc);
2234 }
ffeae418 2235 } else {
8bf62ece
AL
2236 /* CHS */
2237
2238 /* Default translation */
1148c3a7
TH
2239 dev->cylinders = id[1];
2240 dev->heads = id[3];
2241 dev->sectors = id[6];
8bf62ece 2242
1148c3a7 2243 if (ata_id_current_chs_valid(id)) {
8bf62ece 2244 /* Current CHS translation is valid. */
1148c3a7
TH
2245 dev->cylinders = id[54];
2246 dev->heads = id[55];
2247 dev->sectors = id[56];
8bf62ece
AL
2248 }
2249
2250 /* print device info to dmesg */
3f64f565 2251 if (ata_msg_drv(ap) && print_info) {
88574551 2252 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2253 "%s: %s, %s, max %s\n",
2254 revbuf, modelbuf, fwrevbuf,
2255 ata_mode_string(xfer_mask));
a84471fe 2256 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2257 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2258 (unsigned long long)dev->n_sectors,
2259 dev->multi_count, dev->cylinders,
2260 dev->heads, dev->sectors);
2261 }
07f6f7d0
AL
2262 }
2263
6e7846e9 2264 dev->cdb_len = 16;
1da177e4
LT
2265 }
2266
2267 /* ATAPI-specific feature tests */
2c13b7ce 2268 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2269 const char *cdb_intr_string = "";
2270 const char *atapi_an_string = "";
91163006 2271 const char *dma_dir_string = "";
7d77b247 2272 u32 sntf;
08a556db 2273
1148c3a7 2274 rc = atapi_cdb_len(id);
1da177e4 2275 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2276 if (ata_msg_warn(ap))
88574551
TH
2277 ata_dev_printk(dev, KERN_WARNING,
2278 "unsupported CDB len\n");
ffeae418 2279 rc = -EINVAL;
1da177e4
LT
2280 goto err_out_nosup;
2281 }
6e7846e9 2282 dev->cdb_len = (unsigned int) rc;
1da177e4 2283
7d77b247
TH
2284 /* Enable ATAPI AN if both the host and device have
2285 * the support. If PMP is attached, SNTF is required
2286 * to enable ATAPI AN to discern between PHY status
2287 * changed notifications and ATAPI ANs.
9f45cbd3 2288 */
7d77b247
TH
2289 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2290 (!ap->nr_pmp_links ||
2291 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2292 unsigned int err_mask;
2293
9f45cbd3 2294 /* issue SET feature command to turn this on */
218f3d30
JG
2295 err_mask = ata_dev_set_feature(dev,
2296 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2297 if (err_mask)
9f45cbd3 2298 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2299 "failed to enable ATAPI AN "
2300 "(err_mask=0x%x)\n", err_mask);
2301 else {
9f45cbd3 2302 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2303 atapi_an_string = ", ATAPI AN";
2304 }
9f45cbd3
KCA
2305 }
2306
08a556db 2307 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2308 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2309 cdb_intr_string = ", CDB intr";
2310 }
312f7da2 2311
91163006
TH
2312 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2313 dev->flags |= ATA_DFLAG_DMADIR;
2314 dma_dir_string = ", DMADIR";
2315 }
2316
1da177e4 2317 /* print device info to dmesg */
5afc8142 2318 if (ata_msg_drv(ap) && print_info)
ef143d57 2319 ata_dev_printk(dev, KERN_INFO,
91163006 2320 "ATAPI: %s, %s, max %s%s%s%s\n",
ef143d57 2321 modelbuf, fwrevbuf,
12436c30 2322 ata_mode_string(xfer_mask),
91163006
TH
2323 cdb_intr_string, atapi_an_string,
2324 dma_dir_string);
1da177e4
LT
2325 }
2326
914ed354
TH
2327 /* determine max_sectors */
2328 dev->max_sectors = ATA_MAX_SECTORS;
2329 if (dev->flags & ATA_DFLAG_LBA48)
2330 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2331
ca77329f
KCA
2332 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2333 if (ata_id_has_hipm(dev->id))
2334 dev->flags |= ATA_DFLAG_HIPM;
2335 if (ata_id_has_dipm(dev->id))
2336 dev->flags |= ATA_DFLAG_DIPM;
2337 }
2338
c5038fc0
AC
2339 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2340 200 sectors */
3373efd8 2341 if (ata_dev_knobble(dev)) {
5afc8142 2342 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2343 ata_dev_printk(dev, KERN_INFO,
2344 "applying bridge limits\n");
5a529139 2345 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2346 dev->max_sectors = ATA_MAX_SECTORS;
2347 }
2348
f8d8e579 2349 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2350 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2351 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2352 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2353 }
f8d8e579 2354
75683fe7 2355 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2356 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2357 dev->max_sectors);
18d6e9d5 2358
ca77329f
KCA
2359 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2360 dev->horkage |= ATA_HORKAGE_IPM;
2361
2362 /* reset link pm_policy for this port to no pm */
2363 ap->pm_policy = MAX_PERFORMANCE;
2364 }
2365
4b2f3ede 2366 if (ap->ops->dev_config)
cd0d3bbc 2367 ap->ops->dev_config(dev);
4b2f3ede 2368
c5038fc0
AC
2369 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2370 /* Let the user know. We don't want to disallow opens for
2371 rescue purposes, or in case the vendor is just a blithering
2372 idiot. Do this after the dev_config call as some controllers
2373 with buggy firmware may want to avoid reporting false device
2374 bugs */
2375
2376 if (print_info) {
2377 ata_dev_printk(dev, KERN_WARNING,
2378"Drive reports diagnostics failure. This may indicate a drive\n");
2379 ata_dev_printk(dev, KERN_WARNING,
2380"fault or invalid emulation. Contact drive vendor for information.\n");
2381 }
2382 }
2383
ffeae418 2384 return 0;
1da177e4
LT
2385
2386err_out_nosup:
0dd4b21f 2387 if (ata_msg_probe(ap))
88574551 2388 ata_dev_printk(dev, KERN_DEBUG,
7f5e4e8d 2389 "%s: EXIT, err\n", __func__);
ffeae418 2390 return rc;
1da177e4
LT
2391}
2392
be0d18df 2393/**
2e41e8e6 2394 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2395 * @ap: port
2396 *
2e41e8e6 2397 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2398 * detection.
2399 */
2400
2401int ata_cable_40wire(struct ata_port *ap)
2402{
2403 return ATA_CBL_PATA40;
2404}
2405
2406/**
2e41e8e6 2407 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2408 * @ap: port
2409 *
2e41e8e6 2410 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2411 * detection.
2412 */
2413
2414int ata_cable_80wire(struct ata_port *ap)
2415{
2416 return ATA_CBL_PATA80;
2417}
2418
2419/**
2420 * ata_cable_unknown - return unknown PATA cable.
2421 * @ap: port
2422 *
2423 * Helper method for drivers which have no PATA cable detection.
2424 */
2425
2426int ata_cable_unknown(struct ata_port *ap)
2427{
2428 return ATA_CBL_PATA_UNK;
2429}
2430
c88f90c3
TH
2431/**
2432 * ata_cable_ignore - return ignored PATA cable.
2433 * @ap: port
2434 *
2435 * Helper method for drivers which don't use cable type to limit
2436 * transfer mode.
2437 */
2438int ata_cable_ignore(struct ata_port *ap)
2439{
2440 return ATA_CBL_PATA_IGN;
2441}
2442
be0d18df
AC
2443/**
2444 * ata_cable_sata - return SATA cable type
2445 * @ap: port
2446 *
2447 * Helper method for drivers which have SATA cables
2448 */
2449
2450int ata_cable_sata(struct ata_port *ap)
2451{
2452 return ATA_CBL_SATA;
2453}
2454
1da177e4
LT
2455/**
2456 * ata_bus_probe - Reset and probe ATA bus
2457 * @ap: Bus to probe
2458 *
0cba632b
JG
2459 * Master ATA bus probing function. Initiates a hardware-dependent
2460 * bus reset, then attempts to identify any devices found on
2461 * the bus.
2462 *
1da177e4 2463 * LOCKING:
0cba632b 2464 * PCI/etc. bus probe sem.
1da177e4
LT
2465 *
2466 * RETURNS:
96072e69 2467 * Zero on success, negative errno otherwise.
1da177e4
LT
2468 */
2469
80289167 2470int ata_bus_probe(struct ata_port *ap)
1da177e4 2471{
28ca5c57 2472 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2473 int tries[ATA_MAX_DEVICES];
f58229f8 2474 int rc;
e82cbdb9 2475 struct ata_device *dev;
1da177e4 2476
28ca5c57 2477 ata_port_probe(ap);
c19ba8af 2478
f58229f8
TH
2479 ata_link_for_each_dev(dev, &ap->link)
2480 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2481
2482 retry:
cdeab114
TH
2483 ata_link_for_each_dev(dev, &ap->link) {
2484 /* If we issue an SRST then an ATA drive (not ATAPI)
2485 * may change configuration and be in PIO0 timing. If
2486 * we do a hard reset (or are coming from power on)
2487 * this is true for ATA or ATAPI. Until we've set a
2488 * suitable controller mode we should not touch the
2489 * bus as we may be talking too fast.
2490 */
2491 dev->pio_mode = XFER_PIO_0;
2492
2493 /* If the controller has a pio mode setup function
2494 * then use it to set the chipset to rights. Don't
2495 * touch the DMA setup as that will be dealt with when
2496 * configuring devices.
2497 */
2498 if (ap->ops->set_piomode)
2499 ap->ops->set_piomode(ap, dev);
2500 }
2501
2044470c 2502 /* reset and determine device classes */
52783c5d 2503 ap->ops->phy_reset(ap);
2061a47a 2504
f58229f8 2505 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2506 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2507 dev->class != ATA_DEV_UNKNOWN)
2508 classes[dev->devno] = dev->class;
2509 else
2510 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2511
52783c5d 2512 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2513 }
1da177e4 2514
52783c5d 2515 ata_port_probe(ap);
2044470c 2516
f31f0cc2
JG
2517 /* read IDENTIFY page and configure devices. We have to do the identify
2518 specific sequence bass-ackwards so that PDIAG- is released by
2519 the slave device */
2520
a4ba7fe2 2521 ata_link_for_each_dev_reverse(dev, &ap->link) {
f58229f8
TH
2522 if (tries[dev->devno])
2523 dev->class = classes[dev->devno];
ffeae418 2524
14d2bac1 2525 if (!ata_dev_enabled(dev))
ffeae418 2526 continue;
ffeae418 2527
bff04647
TH
2528 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2529 dev->id);
14d2bac1
TH
2530 if (rc)
2531 goto fail;
f31f0cc2
JG
2532 }
2533
be0d18df
AC
2534 /* Now ask for the cable type as PDIAG- should have been released */
2535 if (ap->ops->cable_detect)
2536 ap->cbl = ap->ops->cable_detect(ap);
2537
614fe29b
AC
2538 /* We may have SATA bridge glue hiding here irrespective of the
2539 reported cable types and sensed types */
2540 ata_link_for_each_dev(dev, &ap->link) {
2541 if (!ata_dev_enabled(dev))
2542 continue;
2543 /* SATA drives indicate we have a bridge. We don't know which
2544 end of the link the bridge is which is a problem */
2545 if (ata_id_is_sata(dev->id))
2546 ap->cbl = ATA_CBL_SATA;
2547 }
2548
f31f0cc2
JG
2549 /* After the identify sequence we can now set up the devices. We do
2550 this in the normal order so that the user doesn't get confused */
2551
f58229f8 2552 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2553 if (!ata_dev_enabled(dev))
2554 continue;
14d2bac1 2555
9af5c9c9 2556 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2557 rc = ata_dev_configure(dev);
9af5c9c9 2558 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2559 if (rc)
2560 goto fail;
1da177e4
LT
2561 }
2562
e82cbdb9 2563 /* configure transfer mode */
0260731f 2564 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2565 if (rc)
51713d35 2566 goto fail;
1da177e4 2567
f58229f8
TH
2568 ata_link_for_each_dev(dev, &ap->link)
2569 if (ata_dev_enabled(dev))
e82cbdb9 2570 return 0;
1da177e4 2571
e82cbdb9
TH
2572 /* no device present, disable port */
2573 ata_port_disable(ap);
96072e69 2574 return -ENODEV;
14d2bac1
TH
2575
2576 fail:
4ae72a1e
TH
2577 tries[dev->devno]--;
2578
14d2bac1
TH
2579 switch (rc) {
2580 case -EINVAL:
4ae72a1e 2581 /* eeek, something went very wrong, give up */
14d2bac1
TH
2582 tries[dev->devno] = 0;
2583 break;
4ae72a1e
TH
2584
2585 case -ENODEV:
2586 /* give it just one more chance */
2587 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2588 case -EIO:
4ae72a1e
TH
2589 if (tries[dev->devno] == 1) {
2590 /* This is the last chance, better to slow
2591 * down than lose it.
2592 */
936fd732 2593 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2594 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2595 }
14d2bac1
TH
2596 }
2597
4ae72a1e 2598 if (!tries[dev->devno])
3373efd8 2599 ata_dev_disable(dev);
ec573755 2600
14d2bac1 2601 goto retry;
1da177e4
LT
2602}
2603
2604/**
0cba632b
JG
2605 * ata_port_probe - Mark port as enabled
2606 * @ap: Port for which we indicate enablement
1da177e4 2607 *
0cba632b
JG
2608 * Modify @ap data structure such that the system
2609 * thinks that the entire port is enabled.
2610 *
cca3974e 2611 * LOCKING: host lock, or some other form of
0cba632b 2612 * serialization.
1da177e4
LT
2613 */
2614
2615void ata_port_probe(struct ata_port *ap)
2616{
198e0fed 2617 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2618}
2619
3be680b7
TH
2620/**
2621 * sata_print_link_status - Print SATA link status
936fd732 2622 * @link: SATA link to printk link status about
3be680b7
TH
2623 *
2624 * This function prints link speed and status of a SATA link.
2625 *
2626 * LOCKING:
2627 * None.
2628 */
936fd732 2629void sata_print_link_status(struct ata_link *link)
3be680b7 2630{
6d5f9732 2631 u32 sstatus, scontrol, tmp;
3be680b7 2632
936fd732 2633 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2634 return;
936fd732 2635 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2636
936fd732 2637 if (ata_link_online(link)) {
3be680b7 2638 tmp = (sstatus >> 4) & 0xf;
936fd732 2639 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2640 "SATA link up %s (SStatus %X SControl %X)\n",
2641 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2642 } else {
936fd732 2643 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2644 "SATA link down (SStatus %X SControl %X)\n",
2645 sstatus, scontrol);
3be680b7
TH
2646 }
2647}
2648
ebdfca6e
AC
2649/**
2650 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2651 * @adev: device
2652 *
2653 * Obtain the other device on the same cable, or if none is
2654 * present NULL is returned
2655 */
2e9edbf8 2656
3373efd8 2657struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2658{
9af5c9c9
TH
2659 struct ata_link *link = adev->link;
2660 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2661 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2662 return NULL;
2663 return pair;
2664}
2665
1da177e4 2666/**
780a87f7
JG
2667 * ata_port_disable - Disable port.
2668 * @ap: Port to be disabled.
1da177e4 2669 *
780a87f7
JG
2670 * Modify @ap data structure such that the system
2671 * thinks that the entire port is disabled, and should
2672 * never attempt to probe or communicate with devices
2673 * on this port.
2674 *
cca3974e 2675 * LOCKING: host lock, or some other form of
780a87f7 2676 * serialization.
1da177e4
LT
2677 */
2678
2679void ata_port_disable(struct ata_port *ap)
2680{
9af5c9c9
TH
2681 ap->link.device[0].class = ATA_DEV_NONE;
2682 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2683 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2684}
2685
1c3fae4d 2686/**
3c567b7d 2687 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2688 * @link: Link to adjust SATA spd limit for
1c3fae4d 2689 *
936fd732 2690 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2691 * function only adjusts the limit. The change must be applied
3c567b7d 2692 * using sata_set_spd().
1c3fae4d
TH
2693 *
2694 * LOCKING:
2695 * Inherited from caller.
2696 *
2697 * RETURNS:
2698 * 0 on success, negative errno on failure
2699 */
936fd732 2700int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2701{
81952c54
TH
2702 u32 sstatus, spd, mask;
2703 int rc, highbit;
1c3fae4d 2704
936fd732 2705 if (!sata_scr_valid(link))
008a7896
TH
2706 return -EOPNOTSUPP;
2707
2708 /* If SCR can be read, use it to determine the current SPD.
936fd732 2709 * If not, use cached value in link->sata_spd.
008a7896 2710 */
936fd732 2711 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2712 if (rc == 0)
2713 spd = (sstatus >> 4) & 0xf;
2714 else
936fd732 2715 spd = link->sata_spd;
1c3fae4d 2716
936fd732 2717 mask = link->sata_spd_limit;
1c3fae4d
TH
2718 if (mask <= 1)
2719 return -EINVAL;
008a7896
TH
2720
2721 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2722 highbit = fls(mask) - 1;
2723 mask &= ~(1 << highbit);
2724
008a7896
TH
2725 /* Mask off all speeds higher than or equal to the current
2726 * one. Force 1.5Gbps if current SPD is not available.
2727 */
2728 if (spd > 1)
2729 mask &= (1 << (spd - 1)) - 1;
2730 else
2731 mask &= 1;
2732
2733 /* were we already at the bottom? */
1c3fae4d
TH
2734 if (!mask)
2735 return -EINVAL;
2736
936fd732 2737 link->sata_spd_limit = mask;
1c3fae4d 2738
936fd732 2739 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2740 sata_spd_string(fls(mask)));
1c3fae4d
TH
2741
2742 return 0;
2743}
2744
936fd732 2745static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2746{
5270222f
TH
2747 struct ata_link *host_link = &link->ap->link;
2748 u32 limit, target, spd;
1c3fae4d 2749
5270222f
TH
2750 limit = link->sata_spd_limit;
2751
2752 /* Don't configure downstream link faster than upstream link.
2753 * It doesn't speed up anything and some PMPs choke on such
2754 * configuration.
2755 */
2756 if (!ata_is_host_link(link) && host_link->sata_spd)
2757 limit &= (1 << host_link->sata_spd) - 1;
2758
2759 if (limit == UINT_MAX)
2760 target = 0;
1c3fae4d 2761 else
5270222f 2762 target = fls(limit);
1c3fae4d
TH
2763
2764 spd = (*scontrol >> 4) & 0xf;
5270222f 2765 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2766
5270222f 2767 return spd != target;
1c3fae4d
TH
2768}
2769
2770/**
3c567b7d 2771 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2772 * @link: Link in question
1c3fae4d
TH
2773 *
2774 * Test whether the spd limit in SControl matches
936fd732 2775 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2776 * whether hardreset is necessary to apply SATA spd
2777 * configuration.
2778 *
2779 * LOCKING:
2780 * Inherited from caller.
2781 *
2782 * RETURNS:
2783 * 1 if SATA spd configuration is needed, 0 otherwise.
2784 */
936fd732 2785int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2786{
2787 u32 scontrol;
2788
936fd732 2789 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2790 return 1;
1c3fae4d 2791
936fd732 2792 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2793}
2794
2795/**
3c567b7d 2796 * sata_set_spd - set SATA spd according to spd limit
936fd732 2797 * @link: Link to set SATA spd for
1c3fae4d 2798 *
936fd732 2799 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2800 *
2801 * LOCKING:
2802 * Inherited from caller.
2803 *
2804 * RETURNS:
2805 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2806 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2807 */
936fd732 2808int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2809{
2810 u32 scontrol;
81952c54 2811 int rc;
1c3fae4d 2812
936fd732 2813 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2814 return rc;
1c3fae4d 2815
936fd732 2816 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2817 return 0;
2818
936fd732 2819 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2820 return rc;
2821
1c3fae4d
TH
2822 return 1;
2823}
2824
452503f9
AC
2825/*
2826 * This mode timing computation functionality is ported over from
2827 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2828 */
2829/*
b352e57d 2830 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2831 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2832 * for UDMA6, which is currently supported only by Maxtor drives.
2833 *
2834 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2835 */
2836
2837static const struct ata_timing ata_timing[] = {
70cd071e
TH
2838/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2839 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2840 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2841 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2842 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2843 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2844 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2845 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
452503f9 2846
70cd071e
TH
2847 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2848 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2849 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
452503f9 2850
70cd071e
TH
2851 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2852 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2853 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
b352e57d 2854 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
70cd071e 2855 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
452503f9
AC
2856
2857/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
70cd071e
TH
2858 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2859 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2860 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2861 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2862 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2863 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2864 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
2865
2866 { 0xFF }
2867};
2868
2dcb407e
JG
2869#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2870#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2871
2872static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2873{
2874 q->setup = EZ(t->setup * 1000, T);
2875 q->act8b = EZ(t->act8b * 1000, T);
2876 q->rec8b = EZ(t->rec8b * 1000, T);
2877 q->cyc8b = EZ(t->cyc8b * 1000, T);
2878 q->active = EZ(t->active * 1000, T);
2879 q->recover = EZ(t->recover * 1000, T);
2880 q->cycle = EZ(t->cycle * 1000, T);
2881 q->udma = EZ(t->udma * 1000, UT);
2882}
2883
2884void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2885 struct ata_timing *m, unsigned int what)
2886{
2887 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2888 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2889 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2890 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2891 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2892 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2893 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2894 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2895}
2896
6357357c 2897const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 2898{
70cd071e
TH
2899 const struct ata_timing *t = ata_timing;
2900
2901 while (xfer_mode > t->mode)
2902 t++;
452503f9 2903
70cd071e
TH
2904 if (xfer_mode == t->mode)
2905 return t;
2906 return NULL;
452503f9
AC
2907}
2908
2909int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2910 struct ata_timing *t, int T, int UT)
2911{
2912 const struct ata_timing *s;
2913 struct ata_timing p;
2914
2915 /*
2e9edbf8 2916 * Find the mode.
75b1f2f8 2917 */
452503f9
AC
2918
2919 if (!(s = ata_timing_find_mode(speed)))
2920 return -EINVAL;
2921
75b1f2f8
AL
2922 memcpy(t, s, sizeof(*s));
2923
452503f9
AC
2924 /*
2925 * If the drive is an EIDE drive, it can tell us it needs extended
2926 * PIO/MW_DMA cycle timing.
2927 */
2928
2929 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2930 memset(&p, 0, sizeof(p));
2dcb407e 2931 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
2932 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2933 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 2934 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
2935 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2936 }
2937 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2938 }
2939
2940 /*
2941 * Convert the timing to bus clock counts.
2942 */
2943
75b1f2f8 2944 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2945
2946 /*
c893a3ae
RD
2947 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2948 * S.M.A.R.T * and some other commands. We have to ensure that the
2949 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2950 */
2951
fd3367af 2952 if (speed > XFER_PIO_6) {
452503f9
AC
2953 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2954 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2955 }
2956
2957 /*
c893a3ae 2958 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2959 */
2960
2961 if (t->act8b + t->rec8b < t->cyc8b) {
2962 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2963 t->rec8b = t->cyc8b - t->act8b;
2964 }
2965
2966 if (t->active + t->recover < t->cycle) {
2967 t->active += (t->cycle - (t->active + t->recover)) / 2;
2968 t->recover = t->cycle - t->active;
2969 }
a617c09f 2970
4f701d1e
AC
2971 /* In a few cases quantisation may produce enough errors to
2972 leave t->cycle too low for the sum of active and recovery
2973 if so we must correct this */
2974 if (t->active + t->recover > t->cycle)
2975 t->cycle = t->active + t->recover;
452503f9
AC
2976
2977 return 0;
2978}
2979
a0f79b92
TH
2980/**
2981 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2982 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
2983 * @cycle: cycle duration in ns
2984 *
2985 * Return matching xfer mode for @cycle. The returned mode is of
2986 * the transfer type specified by @xfer_shift. If @cycle is too
2987 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
2988 * than the fastest known mode, the fasted mode is returned.
2989 *
2990 * LOCKING:
2991 * None.
2992 *
2993 * RETURNS:
2994 * Matching xfer_mode, 0xff if no match found.
2995 */
2996u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
2997{
2998 u8 base_mode = 0xff, last_mode = 0xff;
2999 const struct ata_xfer_ent *ent;
3000 const struct ata_timing *t;
3001
3002 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3003 if (ent->shift == xfer_shift)
3004 base_mode = ent->base;
3005
3006 for (t = ata_timing_find_mode(base_mode);
3007 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3008 unsigned short this_cycle;
3009
3010 switch (xfer_shift) {
3011 case ATA_SHIFT_PIO:
3012 case ATA_SHIFT_MWDMA:
3013 this_cycle = t->cycle;
3014 break;
3015 case ATA_SHIFT_UDMA:
3016 this_cycle = t->udma;
3017 break;
3018 default:
3019 return 0xff;
3020 }
3021
3022 if (cycle > this_cycle)
3023 break;
3024
3025 last_mode = t->mode;
3026 }
3027
3028 return last_mode;
3029}
3030
cf176e1a
TH
3031/**
3032 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3033 * @dev: Device to adjust xfer masks
458337db 3034 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3035 *
3036 * Adjust xfer masks of @dev downward. Note that this function
3037 * does not apply the change. Invoking ata_set_mode() afterwards
3038 * will apply the limit.
3039 *
3040 * LOCKING:
3041 * Inherited from caller.
3042 *
3043 * RETURNS:
3044 * 0 on success, negative errno on failure
3045 */
458337db 3046int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3047{
458337db 3048 char buf[32];
7dc951ae
TH
3049 unsigned long orig_mask, xfer_mask;
3050 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3051 int quiet, highbit;
cf176e1a 3052
458337db
TH
3053 quiet = !!(sel & ATA_DNXFER_QUIET);
3054 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3055
458337db
TH
3056 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3057 dev->mwdma_mask,
3058 dev->udma_mask);
3059 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3060
458337db
TH
3061 switch (sel) {
3062 case ATA_DNXFER_PIO:
3063 highbit = fls(pio_mask) - 1;
3064 pio_mask &= ~(1 << highbit);
3065 break;
3066
3067 case ATA_DNXFER_DMA:
3068 if (udma_mask) {
3069 highbit = fls(udma_mask) - 1;
3070 udma_mask &= ~(1 << highbit);
3071 if (!udma_mask)
3072 return -ENOENT;
3073 } else if (mwdma_mask) {
3074 highbit = fls(mwdma_mask) - 1;
3075 mwdma_mask &= ~(1 << highbit);
3076 if (!mwdma_mask)
3077 return -ENOENT;
3078 }
3079 break;
3080
3081 case ATA_DNXFER_40C:
3082 udma_mask &= ATA_UDMA_MASK_40C;
3083 break;
3084
3085 case ATA_DNXFER_FORCE_PIO0:
3086 pio_mask &= 1;
3087 case ATA_DNXFER_FORCE_PIO:
3088 mwdma_mask = 0;
3089 udma_mask = 0;
3090 break;
3091
458337db
TH
3092 default:
3093 BUG();
3094 }
3095
3096 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3097
3098 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3099 return -ENOENT;
3100
3101 if (!quiet) {
3102 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3103 snprintf(buf, sizeof(buf), "%s:%s",
3104 ata_mode_string(xfer_mask),
3105 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3106 else
3107 snprintf(buf, sizeof(buf), "%s",
3108 ata_mode_string(xfer_mask));
3109
3110 ata_dev_printk(dev, KERN_WARNING,
3111 "limiting speed to %s\n", buf);
3112 }
cf176e1a
TH
3113
3114 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3115 &dev->udma_mask);
3116
cf176e1a 3117 return 0;
cf176e1a
TH
3118}
3119
3373efd8 3120static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3121{
9af5c9c9 3122 struct ata_eh_context *ehc = &dev->link->eh_context;
4055dee7
TH
3123 const char *dev_err_whine = "";
3124 int ign_dev_err = 0;
83206a29
TH
3125 unsigned int err_mask;
3126 int rc;
1da177e4 3127
e8384607 3128 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3129 if (dev->xfer_shift == ATA_SHIFT_PIO)
3130 dev->flags |= ATA_DFLAG_PIO;
3131
3373efd8 3132 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3133
4055dee7
TH
3134 if (err_mask & ~AC_ERR_DEV)
3135 goto fail;
3136
3137 /* revalidate */
3138 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3139 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3140 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3141 if (rc)
3142 return rc;
3143
11750a40
A
3144 /* Old CFA may refuse this command, which is just fine */
3145 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
4055dee7 3146 ign_dev_err = 1;
2dcb407e 3147
0bc2a79a
AC
3148 /* Some very old devices and some bad newer ones fail any kind of
3149 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3150 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3151 dev->pio_mode <= XFER_PIO_2)
4055dee7 3152 ign_dev_err = 1;
2dcb407e 3153
3acaf94b
AC
3154 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3155 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3156 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3157 dev->dma_mode == XFER_MW_DMA_0 &&
3158 (dev->id[63] >> 8) & 1)
4055dee7 3159 ign_dev_err = 1;
3acaf94b 3160
4055dee7
TH
3161 /* if the device is actually configured correctly, ignore dev err */
3162 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3163 ign_dev_err = 1;
1da177e4 3164
4055dee7
TH
3165 if (err_mask & AC_ERR_DEV) {
3166 if (!ign_dev_err)
3167 goto fail;
3168 else
3169 dev_err_whine = " (device error ignored)";
3170 }
48a8a14f 3171
23e71c3d
TH
3172 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3173 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3174
4055dee7
TH
3175 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3176 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3177 dev_err_whine);
3178
83206a29 3179 return 0;
4055dee7
TH
3180
3181 fail:
3182 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3183 "(err_mask=0x%x)\n", err_mask);
3184 return -EIO;
1da177e4
LT
3185}
3186
1da177e4 3187/**
04351821 3188 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3189 * @link: link on which timings will be programmed
1967b7ff 3190 * @r_failed_dev: out parameter for failed device
1da177e4 3191 *
04351821
A
3192 * Standard implementation of the function used to tune and set
3193 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3194 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3195 * returned in @r_failed_dev.
780a87f7 3196 *
1da177e4 3197 * LOCKING:
0cba632b 3198 * PCI/etc. bus probe sem.
e82cbdb9
TH
3199 *
3200 * RETURNS:
3201 * 0 on success, negative errno otherwise
1da177e4 3202 */
04351821 3203
0260731f 3204int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3205{
0260731f 3206 struct ata_port *ap = link->ap;
e8e0619f 3207 struct ata_device *dev;
f58229f8 3208 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3209
a6d5a51c 3210 /* step 1: calculate xfer_mask */
f58229f8 3211 ata_link_for_each_dev(dev, link) {
7dc951ae 3212 unsigned long pio_mask, dma_mask;
b3a70601 3213 unsigned int mode_mask;
a6d5a51c 3214
e1211e3f 3215 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3216 continue;
3217
b3a70601
AC
3218 mode_mask = ATA_DMA_MASK_ATA;
3219 if (dev->class == ATA_DEV_ATAPI)
3220 mode_mask = ATA_DMA_MASK_ATAPI;
3221 else if (ata_id_is_cfa(dev->id))
3222 mode_mask = ATA_DMA_MASK_CFA;
3223
3373efd8 3224 ata_dev_xfermask(dev);
33267325 3225 ata_force_xfermask(dev);
1da177e4 3226
acf356b1
TH
3227 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3228 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3229
3230 if (libata_dma_mask & mode_mask)
3231 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3232 else
3233 dma_mask = 0;
3234
acf356b1
TH
3235 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3236 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3237
4f65977d 3238 found = 1;
70cd071e 3239 if (dev->dma_mode != 0xff)
5444a6f4 3240 used_dma = 1;
a6d5a51c 3241 }
4f65977d 3242 if (!found)
e82cbdb9 3243 goto out;
a6d5a51c
TH
3244
3245 /* step 2: always set host PIO timings */
f58229f8 3246 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3247 if (!ata_dev_enabled(dev))
3248 continue;
3249
70cd071e 3250 if (dev->pio_mode == 0xff) {
f15a1daf 3251 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3252 rc = -EINVAL;
e82cbdb9 3253 goto out;
e8e0619f
TH
3254 }
3255
3256 dev->xfer_mode = dev->pio_mode;
3257 dev->xfer_shift = ATA_SHIFT_PIO;
3258 if (ap->ops->set_piomode)
3259 ap->ops->set_piomode(ap, dev);
3260 }
1da177e4 3261
a6d5a51c 3262 /* step 3: set host DMA timings */
f58229f8 3263 ata_link_for_each_dev(dev, link) {
70cd071e 3264 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
e8e0619f
TH
3265 continue;
3266
3267 dev->xfer_mode = dev->dma_mode;
3268 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3269 if (ap->ops->set_dmamode)
3270 ap->ops->set_dmamode(ap, dev);
3271 }
1da177e4
LT
3272
3273 /* step 4: update devices' xfer mode */
f58229f8 3274 ata_link_for_each_dev(dev, link) {
18d90deb 3275 /* don't update suspended devices' xfer mode */
9666f400 3276 if (!ata_dev_enabled(dev))
83206a29
TH
3277 continue;
3278
3373efd8 3279 rc = ata_dev_set_mode(dev);
5bbc53f4 3280 if (rc)
e82cbdb9 3281 goto out;
83206a29 3282 }
1da177e4 3283
e8e0619f
TH
3284 /* Record simplex status. If we selected DMA then the other
3285 * host channels are not permitted to do so.
5444a6f4 3286 */
cca3974e 3287 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3288 ap->host->simplex_claimed = ap;
5444a6f4 3289
e82cbdb9
TH
3290 out:
3291 if (rc)
3292 *r_failed_dev = dev;
3293 return rc;
1da177e4
LT
3294}
3295
aa2731ad
TH
3296/**
3297 * ata_wait_ready - wait for link to become ready
3298 * @link: link to be waited on
3299 * @deadline: deadline jiffies for the operation
3300 * @check_ready: callback to check link readiness
3301 *
3302 * Wait for @link to become ready. @check_ready should return
3303 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3304 * link doesn't seem to be occupied, other errno for other error
3305 * conditions.
3306 *
3307 * Transient -ENODEV conditions are allowed for
3308 * ATA_TMOUT_FF_WAIT.
3309 *
3310 * LOCKING:
3311 * EH context.
3312 *
3313 * RETURNS:
3314 * 0 if @linke is ready before @deadline; otherwise, -errno.
3315 */
3316int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3317 int (*check_ready)(struct ata_link *link))
3318{
3319 unsigned long start = jiffies;
3320 unsigned long nodev_deadline = start + ATA_TMOUT_FF_WAIT;
3321 int warned = 0;
3322
3323 if (time_after(nodev_deadline, deadline))
3324 nodev_deadline = deadline;
3325
3326 while (1) {
3327 unsigned long now = jiffies;
3328 int ready, tmp;
3329
3330 ready = tmp = check_ready(link);
3331 if (ready > 0)
3332 return 0;
3333
3334 /* -ENODEV could be transient. Ignore -ENODEV if link
3335 * is online. Also, some SATA devices take a long
3336 * time to clear 0xff after reset. For example,
3337 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3338 * GoVault needs even more than that. Wait for
3339 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3340 *
3341 * Note that some PATA controllers (pata_ali) explode
3342 * if status register is read more than once when
3343 * there's no device attached.
3344 */
3345 if (ready == -ENODEV) {
3346 if (ata_link_online(link))
3347 ready = 0;
3348 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3349 !ata_link_offline(link) &&
3350 time_before(now, nodev_deadline))
3351 ready = 0;
3352 }
3353
3354 if (ready)
3355 return ready;
3356 if (time_after(now, deadline))
3357 return -EBUSY;
3358
3359 if (!warned && time_after(now, start + 5 * HZ) &&
3360 (deadline - now > 3 * HZ)) {
3361 ata_link_printk(link, KERN_WARNING,
3362 "link is slow to respond, please be patient "
3363 "(ready=%d)\n", tmp);
3364 warned = 1;
3365 }
3366
3367 msleep(50);
3368 }
3369}
3370
3371/**
3372 * ata_wait_after_reset - wait for link to become ready after reset
3373 * @link: link to be waited on
3374 * @deadline: deadline jiffies for the operation
3375 * @check_ready: callback to check link readiness
3376 *
3377 * Wait for @link to become ready after reset.
3378 *
3379 * LOCKING:
3380 * EH context.
3381 *
3382 * RETURNS:
3383 * 0 if @linke is ready before @deadline; otherwise, -errno.
3384 */
3385extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3386 int (*check_ready)(struct ata_link *link))
3387{
3388 msleep(ATA_WAIT_AFTER_RESET_MSECS);
3389
3390 return ata_wait_ready(link, deadline, check_ready);
3391}
3392
d7bb4cc7 3393/**
936fd732
TH
3394 * sata_link_debounce - debounce SATA phy status
3395 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3396 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3397 * @deadline: deadline jiffies for the operation
d7bb4cc7 3398 *
936fd732 3399* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3400 * holding the same value where DET is not 1 for @duration polled
3401 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3402 * beginning of the stable state. Because DET gets stuck at 1 on
3403 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3404 * until timeout then returns 0 if DET is stable at 1.
3405 *
d4b2bab4
TH
3406 * @timeout is further limited by @deadline. The sooner of the
3407 * two is used.
3408 *
d7bb4cc7
TH
3409 * LOCKING:
3410 * Kernel thread context (may sleep)
3411 *
3412 * RETURNS:
3413 * 0 on success, -errno on failure.
3414 */
936fd732
TH
3415int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3416 unsigned long deadline)
7a7921e8 3417{
d7bb4cc7 3418 unsigned long interval_msec = params[0];
d4b2bab4
TH
3419 unsigned long duration = msecs_to_jiffies(params[1]);
3420 unsigned long last_jiffies, t;
d7bb4cc7
TH
3421 u32 last, cur;
3422 int rc;
3423
d4b2bab4
TH
3424 t = jiffies + msecs_to_jiffies(params[2]);
3425 if (time_before(t, deadline))
3426 deadline = t;
3427
936fd732 3428 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3429 return rc;
3430 cur &= 0xf;
3431
3432 last = cur;
3433 last_jiffies = jiffies;
3434
3435 while (1) {
3436 msleep(interval_msec);
936fd732 3437 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3438 return rc;
3439 cur &= 0xf;
3440
3441 /* DET stable? */
3442 if (cur == last) {
d4b2bab4 3443 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3444 continue;
3445 if (time_after(jiffies, last_jiffies + duration))
3446 return 0;
3447 continue;
3448 }
3449
3450 /* unstable, start over */
3451 last = cur;
3452 last_jiffies = jiffies;
3453
f1545154
TH
3454 /* Check deadline. If debouncing failed, return
3455 * -EPIPE to tell upper layer to lower link speed.
3456 */
d4b2bab4 3457 if (time_after(jiffies, deadline))
f1545154 3458 return -EPIPE;
d7bb4cc7
TH
3459 }
3460}
3461
3462/**
936fd732
TH
3463 * sata_link_resume - resume SATA link
3464 * @link: ATA link to resume SATA
d7bb4cc7 3465 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3466 * @deadline: deadline jiffies for the operation
d7bb4cc7 3467 *
936fd732 3468 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3469 *
3470 * LOCKING:
3471 * Kernel thread context (may sleep)
3472 *
3473 * RETURNS:
3474 * 0 on success, -errno on failure.
3475 */
936fd732
TH
3476int sata_link_resume(struct ata_link *link, const unsigned long *params,
3477 unsigned long deadline)
d7bb4cc7 3478{
ac371987 3479 u32 scontrol, serror;
81952c54
TH
3480 int rc;
3481
936fd732 3482 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3483 return rc;
7a7921e8 3484
852ee16a 3485 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3486
936fd732 3487 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3488 return rc;
7a7921e8 3489
d7bb4cc7
TH
3490 /* Some PHYs react badly if SStatus is pounded immediately
3491 * after resuming. Delay 200ms before debouncing.
3492 */
3493 msleep(200);
7a7921e8 3494
ac371987
TH
3495 if ((rc = sata_link_debounce(link, params, deadline)))
3496 return rc;
3497
3498 /* Clear SError. PMP and some host PHYs require this to
3499 * operate and clearing should be done before checking PHY
3500 * online status to avoid race condition (hotplugging between
3501 * link resume and status check).
3502 */
3503 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3504 rc = sata_scr_write(link, SCR_ERROR, serror);
3505 if (rc == 0 || rc == -EINVAL) {
3506 unsigned long flags;
3507
3508 spin_lock_irqsave(link->ap->lock, flags);
3509 link->eh_info.serror = 0;
3510 spin_unlock_irqrestore(link->ap->lock, flags);
3511 rc = 0;
3512 }
3513 return rc;
7a7921e8
TH
3514}
3515
f5914a46 3516/**
0aa1113d 3517 * ata_std_prereset - prepare for reset
cc0680a5 3518 * @link: ATA link to be reset
d4b2bab4 3519 * @deadline: deadline jiffies for the operation
f5914a46 3520 *
cc0680a5 3521 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3522 * prereset makes libata abort whole reset sequence and give up
3523 * that port, so prereset should be best-effort. It does its
3524 * best to prepare for reset sequence but if things go wrong, it
3525 * should just whine, not fail.
f5914a46
TH
3526 *
3527 * LOCKING:
3528 * Kernel thread context (may sleep)
3529 *
3530 * RETURNS:
3531 * 0 on success, -errno otherwise.
3532 */
0aa1113d 3533int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3534{
cc0680a5 3535 struct ata_port *ap = link->ap;
936fd732 3536 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3537 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3538 int rc;
3539
f5914a46
TH
3540 /* if we're about to do hardreset, nothing more to do */
3541 if (ehc->i.action & ATA_EH_HARDRESET)
3542 return 0;
3543
936fd732 3544 /* if SATA, resume link */
a16abc0b 3545 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3546 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3547 /* whine about phy resume failure but proceed */
3548 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3549 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3550 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3551 }
3552
f5914a46
TH
3553 return 0;
3554}
3555
c2bd5804 3556/**
624d5c51
TH
3557 * sata_link_hardreset - reset link via SATA phy reset
3558 * @link: link to reset
3559 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3560 * @deadline: deadline jiffies for the operation
9dadd45b
TH
3561 * @online: optional out parameter indicating link onlineness
3562 * @check_ready: optional callback to check link readiness
c2bd5804 3563 *
624d5c51 3564 * SATA phy-reset @link using DET bits of SControl register.
9dadd45b
TH
3565 * After hardreset, link readiness is waited upon using
3566 * ata_wait_ready() if @check_ready is specified. LLDs are
3567 * allowed to not specify @check_ready and wait itself after this
3568 * function returns. Device classification is LLD's
3569 * responsibility.
3570 *
3571 * *@online is set to one iff reset succeeded and @link is online
3572 * after reset.
c2bd5804
TH
3573 *
3574 * LOCKING:
3575 * Kernel thread context (may sleep)
3576 *
3577 * RETURNS:
3578 * 0 on success, -errno otherwise.
3579 */
624d5c51 3580int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
9dadd45b
TH
3581 unsigned long deadline,
3582 bool *online, int (*check_ready)(struct ata_link *))
c2bd5804 3583{
624d5c51 3584 u32 scontrol;
81952c54 3585 int rc;
852ee16a 3586
c2bd5804
TH
3587 DPRINTK("ENTER\n");
3588
9dadd45b
TH
3589 if (online)
3590 *online = false;
3591
936fd732 3592 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3593 /* SATA spec says nothing about how to reconfigure
3594 * spd. To be on the safe side, turn off phy during
3595 * reconfiguration. This works for at least ICH7 AHCI
3596 * and Sil3124.
3597 */
936fd732 3598 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3599 goto out;
81952c54 3600
a34b6fc0 3601 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3602
936fd732 3603 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3604 goto out;
1c3fae4d 3605
936fd732 3606 sata_set_spd(link);
1c3fae4d
TH
3607 }
3608
3609 /* issue phy wake/reset */
936fd732 3610 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3611 goto out;
81952c54 3612
852ee16a 3613 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3614
936fd732 3615 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3616 goto out;
c2bd5804 3617
1c3fae4d 3618 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3619 * 10.4.2 says at least 1 ms.
3620 */
3621 msleep(1);
3622
936fd732
TH
3623 /* bring link back */
3624 rc = sata_link_resume(link, timing, deadline);
9dadd45b
TH
3625 if (rc)
3626 goto out;
3627 /* if link is offline nothing more to do */
3628 if (ata_link_offline(link))
3629 goto out;
3630
3631 /* Link is online. From this point, -ENODEV too is an error. */
3632 if (online)
3633 *online = true;
3634
3635 if ((link->ap->flags & ATA_FLAG_PMP) && ata_is_host_link(link)) {
3636 /* If PMP is supported, we have to do follow-up SRST.
3637 * Some PMPs don't send D2H Reg FIS after hardreset if
3638 * the first port is empty. Wait only for
3639 * ATA_TMOUT_PMP_SRST_WAIT.
3640 */
3641 if (check_ready) {
3642 unsigned long pmp_deadline;
3643
3644 pmp_deadline = jiffies + ATA_TMOUT_PMP_SRST_WAIT;
3645 if (time_after(pmp_deadline, deadline))
3646 pmp_deadline = deadline;
3647 ata_wait_ready(link, pmp_deadline, check_ready);
3648 }
3649 rc = -EAGAIN;
3650 goto out;
3651 }
3652
3653 rc = 0;
3654 if (check_ready)
3655 rc = ata_wait_ready(link, deadline, check_ready);
b6103f6d 3656 out:
9dadd45b
TH
3657 if (rc && rc != -EAGAIN)
3658 ata_link_printk(link, KERN_ERR,
3659 "COMRESET failed (errno=%d)\n", rc);
b6103f6d
TH
3660 DPRINTK("EXIT, rc=%d\n", rc);
3661 return rc;
3662}
3663
57c9efdf
TH
3664/**
3665 * sata_std_hardreset - COMRESET w/o waiting or classification
3666 * @link: link to reset
3667 * @class: resulting class of attached device
3668 * @deadline: deadline jiffies for the operation
3669 *
3670 * Standard SATA COMRESET w/o waiting or classification.
3671 *
3672 * LOCKING:
3673 * Kernel thread context (may sleep)
3674 *
3675 * RETURNS:
3676 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3677 */
3678int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3679 unsigned long deadline)
3680{
3681 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3682 bool online;
3683 int rc;
3684
3685 /* do hardreset */
3686 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
57c9efdf
TH
3687 return online ? -EAGAIN : rc;
3688}
3689
c2bd5804 3690/**
203c75b8 3691 * ata_std_postreset - standard postreset callback
cc0680a5 3692 * @link: the target ata_link
c2bd5804
TH
3693 * @classes: classes of attached devices
3694 *
3695 * This function is invoked after a successful reset. Note that
3696 * the device might have been reset more than once using
3697 * different reset methods before postreset is invoked.
c2bd5804 3698 *
c2bd5804
TH
3699 * LOCKING:
3700 * Kernel thread context (may sleep)
3701 */
203c75b8 3702void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804
TH
3703{
3704 DPRINTK("ENTER\n");
3705
c2bd5804 3706 /* print link status */
936fd732 3707 sata_print_link_status(link);
c2bd5804 3708
c2bd5804
TH
3709 DPRINTK("EXIT\n");
3710}
3711
623a3128
TH
3712/**
3713 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3714 * @dev: device to compare against
3715 * @new_class: class of the new device
3716 * @new_id: IDENTIFY page of the new device
3717 *
3718 * Compare @new_class and @new_id against @dev and determine
3719 * whether @dev is the device indicated by @new_class and
3720 * @new_id.
3721 *
3722 * LOCKING:
3723 * None.
3724 *
3725 * RETURNS:
3726 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3727 */
3373efd8
TH
3728static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3729 const u16 *new_id)
623a3128
TH
3730{
3731 const u16 *old_id = dev->id;
a0cf733b
TH
3732 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3733 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3734
3735 if (dev->class != new_class) {
f15a1daf
TH
3736 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3737 dev->class, new_class);
623a3128
TH
3738 return 0;
3739 }
3740
a0cf733b
TH
3741 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3742 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3743 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3744 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3745
3746 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3747 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3748 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3749 return 0;
3750 }
3751
3752 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3753 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3754 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3755 return 0;
3756 }
3757
623a3128
TH
3758 return 1;
3759}
3760
3761/**
fe30911b 3762 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3763 * @dev: target ATA device
bff04647 3764 * @readid_flags: read ID flags
623a3128
TH
3765 *
3766 * Re-read IDENTIFY page and make sure @dev is still attached to
3767 * the port.
3768 *
3769 * LOCKING:
3770 * Kernel thread context (may sleep)
3771 *
3772 * RETURNS:
3773 * 0 on success, negative errno otherwise
3774 */
fe30911b 3775int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3776{
5eb45c02 3777 unsigned int class = dev->class;
9af5c9c9 3778 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3779 int rc;
3780
fe635c7e 3781 /* read ID data */
bff04647 3782 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3783 if (rc)
fe30911b 3784 return rc;
623a3128
TH
3785
3786 /* is the device still there? */
fe30911b
TH
3787 if (!ata_dev_same_device(dev, class, id))
3788 return -ENODEV;
623a3128 3789
fe635c7e 3790 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3791 return 0;
3792}
3793
3794/**
3795 * ata_dev_revalidate - Revalidate ATA device
3796 * @dev: device to revalidate
422c9daa 3797 * @new_class: new class code
fe30911b
TH
3798 * @readid_flags: read ID flags
3799 *
3800 * Re-read IDENTIFY page, make sure @dev is still attached to the
3801 * port and reconfigure it according to the new IDENTIFY page.
3802 *
3803 * LOCKING:
3804 * Kernel thread context (may sleep)
3805 *
3806 * RETURNS:
3807 * 0 on success, negative errno otherwise
3808 */
422c9daa
TH
3809int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3810 unsigned int readid_flags)
fe30911b 3811{
6ddcd3b0 3812 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3813 int rc;
3814
3815 if (!ata_dev_enabled(dev))
3816 return -ENODEV;
3817
422c9daa
TH
3818 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3819 if (ata_class_enabled(new_class) &&
3820 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3821 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3822 dev->class, new_class);
3823 rc = -ENODEV;
3824 goto fail;
3825 }
3826
fe30911b
TH
3827 /* re-read ID */
3828 rc = ata_dev_reread_id(dev, readid_flags);
3829 if (rc)
3830 goto fail;
623a3128
TH
3831
3832 /* configure device according to the new ID */
efdaedc4 3833 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3834 if (rc)
3835 goto fail;
3836
3837 /* verify n_sectors hasn't changed */
b54eebd6
TH
3838 if (dev->class == ATA_DEV_ATA && n_sectors &&
3839 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3840 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3841 "%llu != %llu\n",
3842 (unsigned long long)n_sectors,
3843 (unsigned long long)dev->n_sectors);
8270bec4
TH
3844
3845 /* restore original n_sectors */
3846 dev->n_sectors = n_sectors;
3847
6ddcd3b0
TH
3848 rc = -ENODEV;
3849 goto fail;
3850 }
3851
3852 return 0;
623a3128
TH
3853
3854 fail:
f15a1daf 3855 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3856 return rc;
3857}
3858
6919a0a6
AC
3859struct ata_blacklist_entry {
3860 const char *model_num;
3861 const char *model_rev;
3862 unsigned long horkage;
3863};
3864
3865static const struct ata_blacklist_entry ata_device_blacklist [] = {
3866 /* Devices with DMA related problems under Linux */
3867 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3868 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3869 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3870 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3871 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3872 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3873 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3874 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3875 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3876 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3877 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3878 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3879 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3880 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3881 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3882 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3883 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3884 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3885 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3886 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3887 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3888 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3889 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3890 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3891 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3892 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3893 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3894 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 3895 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 3896 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
3897 /* Odd clown on sil3726/4726 PMPs */
3898 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
3899 ATA_HORKAGE_SKIP_PM },
6919a0a6 3900
18d6e9d5 3901 /* Weird ATAPI devices */
40a1d531 3902 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3903
6919a0a6
AC
3904 /* Devices we expect to fail diagnostics */
3905
3906 /* Devices where NCQ should be avoided */
3907 /* NCQ is slow */
2dcb407e 3908 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 3909 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
3910 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3911 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3912 /* NCQ is broken */
539cc7c7 3913 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3914 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 3915 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 3916 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 3917
36e337d0
RH
3918 /* Blacklist entries taken from Silicon Image 3124/3132
3919 Windows driver .inf file - also several Linux problem reports */
3920 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3921 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3922 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 3923
16c55b03
TH
3924 /* devices which puke on READ_NATIVE_MAX */
3925 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3926 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3927 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3928 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 3929
93328e11
AC
3930 /* Devices which report 1 sector over size HPA */
3931 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
3932 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 3933 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 3934
6bbfd53d
AC
3935 /* Devices which get the IVB wrong */
3936 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
3937 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
e9f33406
PM
3938 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
3939 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
3940 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 3941
6919a0a6
AC
3942 /* End Marker */
3943 { }
1da177e4 3944};
2e9edbf8 3945
741b7763 3946static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
3947{
3948 const char *p;
3949 int len;
3950
3951 /*
3952 * check for trailing wildcard: *\0
3953 */
3954 p = strchr(patt, wildchar);
3955 if (p && ((*(p + 1)) == 0))
3956 len = p - patt;
317b50b8 3957 else {
539cc7c7 3958 len = strlen(name);
317b50b8
AP
3959 if (!len) {
3960 if (!*patt)
3961 return 0;
3962 return -1;
3963 }
3964 }
539cc7c7
JG
3965
3966 return strncmp(patt, name, len);
3967}
3968
75683fe7 3969static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 3970{
8bfa79fc
TH
3971 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3972 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3973 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3974
8bfa79fc
TH
3975 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3976 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3977
6919a0a6 3978 while (ad->model_num) {
539cc7c7 3979 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
3980 if (ad->model_rev == NULL)
3981 return ad->horkage;
539cc7c7 3982 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 3983 return ad->horkage;
f4b15fef 3984 }
6919a0a6 3985 ad++;
f4b15fef 3986 }
1da177e4
LT
3987 return 0;
3988}
3989
6919a0a6
AC
3990static int ata_dma_blacklisted(const struct ata_device *dev)
3991{
3992 /* We don't support polling DMA.
3993 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3994 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3995 */
9af5c9c9 3996 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
3997 (dev->flags & ATA_DFLAG_CDB_INTR))
3998 return 1;
75683fe7 3999 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4000}
4001
6bbfd53d
AC
4002/**
4003 * ata_is_40wire - check drive side detection
4004 * @dev: device
4005 *
4006 * Perform drive side detection decoding, allowing for device vendors
4007 * who can't follow the documentation.
4008 */
4009
4010static int ata_is_40wire(struct ata_device *dev)
4011{
4012 if (dev->horkage & ATA_HORKAGE_IVB)
4013 return ata_drive_40wire_relaxed(dev->id);
4014 return ata_drive_40wire(dev->id);
4015}
4016
15a5551c
AC
4017/**
4018 * cable_is_40wire - 40/80/SATA decider
4019 * @ap: port to consider
4020 *
4021 * This function encapsulates the policy for speed management
4022 * in one place. At the moment we don't cache the result but
4023 * there is a good case for setting ap->cbl to the result when
4024 * we are called with unknown cables (and figuring out if it
4025 * impacts hotplug at all).
4026 *
4027 * Return 1 if the cable appears to be 40 wire.
4028 */
4029
4030static int cable_is_40wire(struct ata_port *ap)
4031{
4032 struct ata_link *link;
4033 struct ata_device *dev;
4034
4035 /* If the controller thinks we are 40 wire, we are */
4036 if (ap->cbl == ATA_CBL_PATA40)
4037 return 1;
4038 /* If the controller thinks we are 80 wire, we are */
4039 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4040 return 0;
4041 /* If the controller doesn't know we scan
4042
4043 - Note: We look for all 40 wire detects at this point.
4044 Any 80 wire detect is taken to be 80 wire cable
4045 because
4046 - In many setups only the one drive (slave if present)
4047 will give a valid detect
4048 - If you have a non detect capable drive you don't
4049 want it to colour the choice
4050 */
4051 ata_port_for_each_link(link, ap) {
4052 ata_link_for_each_dev(dev, link) {
4053 if (!ata_is_40wire(dev))
4054 return 0;
4055 }
4056 }
4057 return 1;
4058}
4059
a6d5a51c
TH
4060/**
4061 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4062 * @dev: Device to compute xfermask for
4063 *
acf356b1
TH
4064 * Compute supported xfermask of @dev and store it in
4065 * dev->*_mask. This function is responsible for applying all
4066 * known limits including host controller limits, device
4067 * blacklist, etc...
a6d5a51c
TH
4068 *
4069 * LOCKING:
4070 * None.
a6d5a51c 4071 */
3373efd8 4072static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4073{
9af5c9c9
TH
4074 struct ata_link *link = dev->link;
4075 struct ata_port *ap = link->ap;
cca3974e 4076 struct ata_host *host = ap->host;
a6d5a51c 4077 unsigned long xfer_mask;
1da177e4 4078
37deecb5 4079 /* controller modes available */
565083e1
TH
4080 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4081 ap->mwdma_mask, ap->udma_mask);
4082
8343f889 4083 /* drive modes available */
37deecb5
TH
4084 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4085 dev->mwdma_mask, dev->udma_mask);
4086 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4087
b352e57d
AC
4088 /*
4089 * CFA Advanced TrueIDE timings are not allowed on a shared
4090 * cable
4091 */
4092 if (ata_dev_pair(dev)) {
4093 /* No PIO5 or PIO6 */
4094 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4095 /* No MWDMA3 or MWDMA 4 */
4096 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4097 }
4098
37deecb5
TH
4099 if (ata_dma_blacklisted(dev)) {
4100 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4101 ata_dev_printk(dev, KERN_WARNING,
4102 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4103 }
a6d5a51c 4104
14d66ab7 4105 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4106 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4107 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4108 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4109 "other device, disabling DMA\n");
5444a6f4 4110 }
565083e1 4111
e424675f
JG
4112 if (ap->flags & ATA_FLAG_NO_IORDY)
4113 xfer_mask &= ata_pio_mask_no_iordy(dev);
4114
5444a6f4 4115 if (ap->ops->mode_filter)
a76b62ca 4116 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4117
8343f889
RH
4118 /* Apply cable rule here. Don't apply it early because when
4119 * we handle hot plug the cable type can itself change.
4120 * Check this last so that we know if the transfer rate was
4121 * solely limited by the cable.
4122 * Unknown or 80 wire cables reported host side are checked
4123 * drive side as well. Cases where we know a 40wire cable
4124 * is used safely for 80 are not checked here.
4125 */
4126 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4127 /* UDMA/44 or higher would be available */
15a5551c 4128 if (cable_is_40wire(ap)) {
2dcb407e 4129 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4130 "limited to UDMA/33 due to 40-wire cable\n");
4131 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4132 }
4133
565083e1
TH
4134 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4135 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4136}
4137
1da177e4
LT
4138/**
4139 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4140 * @dev: Device to which command will be sent
4141 *
780a87f7
JG
4142 * Issue SET FEATURES - XFER MODE command to device @dev
4143 * on port @ap.
4144 *
1da177e4 4145 * LOCKING:
0cba632b 4146 * PCI/etc. bus probe sem.
83206a29
TH
4147 *
4148 * RETURNS:
4149 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4150 */
4151
3373efd8 4152static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4153{
a0123703 4154 struct ata_taskfile tf;
83206a29 4155 unsigned int err_mask;
1da177e4
LT
4156
4157 /* set up set-features taskfile */
4158 DPRINTK("set features - xfer mode\n");
4159
464cf177
TH
4160 /* Some controllers and ATAPI devices show flaky interrupt
4161 * behavior after setting xfer mode. Use polling instead.
4162 */
3373efd8 4163 ata_tf_init(dev, &tf);
a0123703
TH
4164 tf.command = ATA_CMD_SET_FEATURES;
4165 tf.feature = SETFEATURES_XFER;
464cf177 4166 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4167 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4168 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4169 if (ata_pio_need_iordy(dev))
4170 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4171 /* If the device has IORDY and the controller does not - turn it off */
4172 else if (ata_id_has_iordy(dev->id))
11b7becc 4173 tf.nsect = 0x01;
b9f8ab2d
AC
4174 else /* In the ancient relic department - skip all of this */
4175 return 0;
1da177e4 4176
2b789108 4177 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4178
4179 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4180 return err_mask;
4181}
9f45cbd3 4182/**
218f3d30 4183 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4184 * @dev: Device to which command will be sent
4185 * @enable: Whether to enable or disable the feature
218f3d30 4186 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4187 *
4188 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4189 * on port @ap with sector count
9f45cbd3
KCA
4190 *
4191 * LOCKING:
4192 * PCI/etc. bus probe sem.
4193 *
4194 * RETURNS:
4195 * 0 on success, AC_ERR_* mask otherwise.
4196 */
218f3d30
JG
4197static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4198 u8 feature)
9f45cbd3
KCA
4199{
4200 struct ata_taskfile tf;
4201 unsigned int err_mask;
4202
4203 /* set up set-features taskfile */
4204 DPRINTK("set features - SATA features\n");
4205
4206 ata_tf_init(dev, &tf);
4207 tf.command = ATA_CMD_SET_FEATURES;
4208 tf.feature = enable;
4209 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4210 tf.protocol = ATA_PROT_NODATA;
218f3d30 4211 tf.nsect = feature;
9f45cbd3 4212
2b789108 4213 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4214
83206a29
TH
4215 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4216 return err_mask;
1da177e4
LT
4217}
4218
8bf62ece
AL
4219/**
4220 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4221 * @dev: Device to which command will be sent
e2a7f77a
RD
4222 * @heads: Number of heads (taskfile parameter)
4223 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4224 *
4225 * LOCKING:
6aff8f1f
TH
4226 * Kernel thread context (may sleep)
4227 *
4228 * RETURNS:
4229 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4230 */
3373efd8
TH
4231static unsigned int ata_dev_init_params(struct ata_device *dev,
4232 u16 heads, u16 sectors)
8bf62ece 4233{
a0123703 4234 struct ata_taskfile tf;
6aff8f1f 4235 unsigned int err_mask;
8bf62ece
AL
4236
4237 /* Number of sectors per track 1-255. Number of heads 1-16 */
4238 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4239 return AC_ERR_INVALID;
8bf62ece
AL
4240
4241 /* set up init dev params taskfile */
4242 DPRINTK("init dev params \n");
4243
3373efd8 4244 ata_tf_init(dev, &tf);
a0123703
TH
4245 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4246 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4247 tf.protocol = ATA_PROT_NODATA;
4248 tf.nsect = sectors;
4249 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4250
2b789108 4251 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4252 /* A clean abort indicates an original or just out of spec drive
4253 and we should continue as we issue the setup based on the
4254 drive reported working geometry */
4255 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4256 err_mask = 0;
8bf62ece 4257
6aff8f1f
TH
4258 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4259 return err_mask;
8bf62ece
AL
4260}
4261
1da177e4 4262/**
0cba632b
JG
4263 * ata_sg_clean - Unmap DMA memory associated with command
4264 * @qc: Command containing DMA memory to be released
4265 *
4266 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4267 *
4268 * LOCKING:
cca3974e 4269 * spin_lock_irqsave(host lock)
1da177e4 4270 */
70e6ad0c 4271void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4272{
4273 struct ata_port *ap = qc->ap;
ff2aeb1e 4274 struct scatterlist *sg = qc->sg;
1da177e4
LT
4275 int dir = qc->dma_dir;
4276
a4631474 4277 WARN_ON(sg == NULL);
1da177e4 4278
dde20207 4279 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4280
dde20207
JB
4281 if (qc->n_elem)
4282 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
1da177e4
LT
4283
4284 qc->flags &= ~ATA_QCFLAG_DMAMAP;
ff2aeb1e 4285 qc->sg = NULL;
1da177e4
LT
4286}
4287
1da177e4
LT
4288/**
4289 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4290 * @qc: Metadata associated with taskfile to check
4291 *
780a87f7
JG
4292 * Allow low-level driver to filter ATA PACKET commands, returning
4293 * a status indicating whether or not it is OK to use DMA for the
4294 * supplied PACKET command.
4295 *
1da177e4 4296 * LOCKING:
624d5c51
TH
4297 * spin_lock_irqsave(host lock)
4298 *
4299 * RETURNS: 0 when ATAPI DMA can be used
4300 * nonzero otherwise
4301 */
4302int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4303{
4304 struct ata_port *ap = qc->ap;
71601958 4305
624d5c51
TH
4306 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4307 * few ATAPI devices choke on such DMA requests.
4308 */
4309 if (unlikely(qc->nbytes & 15))
4310 return 1;
e2cec771 4311
624d5c51
TH
4312 if (ap->ops->check_atapi_dma)
4313 return ap->ops->check_atapi_dma(qc);
e2cec771 4314
624d5c51
TH
4315 return 0;
4316}
1da177e4 4317
624d5c51
TH
4318/**
4319 * ata_std_qc_defer - Check whether a qc needs to be deferred
4320 * @qc: ATA command in question
4321 *
4322 * Non-NCQ commands cannot run with any other command, NCQ or
4323 * not. As upper layer only knows the queue depth, we are
4324 * responsible for maintaining exclusion. This function checks
4325 * whether a new command @qc can be issued.
4326 *
4327 * LOCKING:
4328 * spin_lock_irqsave(host lock)
4329 *
4330 * RETURNS:
4331 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4332 */
4333int ata_std_qc_defer(struct ata_queued_cmd *qc)
4334{
4335 struct ata_link *link = qc->dev->link;
e2cec771 4336
624d5c51
TH
4337 if (qc->tf.protocol == ATA_PROT_NCQ) {
4338 if (!ata_tag_valid(link->active_tag))
4339 return 0;
4340 } else {
4341 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4342 return 0;
4343 }
e2cec771 4344
624d5c51
TH
4345 return ATA_DEFER_LINK;
4346}
6912ccd5 4347
624d5c51 4348void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
1da177e4 4349
624d5c51
TH
4350/**
4351 * ata_sg_init - Associate command with scatter-gather table.
4352 * @qc: Command to be associated
4353 * @sg: Scatter-gather table.
4354 * @n_elem: Number of elements in s/g table.
4355 *
4356 * Initialize the data-related elements of queued_cmd @qc
4357 * to point to a scatter-gather table @sg, containing @n_elem
4358 * elements.
4359 *
4360 * LOCKING:
4361 * spin_lock_irqsave(host lock)
4362 */
4363void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4364 unsigned int n_elem)
4365{
4366 qc->sg = sg;
4367 qc->n_elem = n_elem;
4368 qc->cursg = qc->sg;
4369}
bb5cb290 4370
624d5c51
TH
4371/**
4372 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4373 * @qc: Command with scatter-gather table to be mapped.
4374 *
4375 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4376 *
4377 * LOCKING:
4378 * spin_lock_irqsave(host lock)
4379 *
4380 * RETURNS:
4381 * Zero on success, negative on error.
4382 *
4383 */
4384static int ata_sg_setup(struct ata_queued_cmd *qc)
4385{
4386 struct ata_port *ap = qc->ap;
4387 unsigned int n_elem;
1da177e4 4388
624d5c51 4389 VPRINTK("ENTER, ata%u\n", ap->print_id);
e2cec771 4390
624d5c51
TH
4391 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4392 if (n_elem < 1)
4393 return -1;
bb5cb290 4394
624d5c51 4395 DPRINTK("%d sg elements mapped\n", n_elem);
bb5cb290 4396
624d5c51
TH
4397 qc->n_elem = n_elem;
4398 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4 4399
624d5c51 4400 return 0;
1da177e4
LT
4401}
4402
624d5c51
TH
4403/**
4404 * swap_buf_le16 - swap halves of 16-bit words in place
4405 * @buf: Buffer to swap
4406 * @buf_words: Number of 16-bit words in buffer.
4407 *
4408 * Swap halves of 16-bit words if needed to convert from
4409 * little-endian byte order to native cpu byte order, or
4410 * vice-versa.
4411 *
4412 * LOCKING:
4413 * Inherited from caller.
4414 */
4415void swap_buf_le16(u16 *buf, unsigned int buf_words)
8061f5f0 4416{
624d5c51
TH
4417#ifdef __BIG_ENDIAN
4418 unsigned int i;
8061f5f0 4419
624d5c51
TH
4420 for (i = 0; i < buf_words; i++)
4421 buf[i] = le16_to_cpu(buf[i]);
4422#endif /* __BIG_ENDIAN */
8061f5f0
TH
4423}
4424
1da177e4
LT
4425/**
4426 * ata_qc_new - Request an available ATA command, for queueing
4427 * @ap: Port associated with device @dev
4428 * @dev: Device from whom we request an available command structure
4429 *
4430 * LOCKING:
0cba632b 4431 * None.
1da177e4
LT
4432 */
4433
4434static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4435{
4436 struct ata_queued_cmd *qc = NULL;
4437 unsigned int i;
4438
e3180499 4439 /* no command while frozen */
b51e9e5d 4440 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4441 return NULL;
4442
2ab7db1f
TH
4443 /* the last tag is reserved for internal command. */
4444 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4445 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4446 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4447 break;
4448 }
4449
4450 if (qc)
4451 qc->tag = i;
4452
4453 return qc;
4454}
4455
4456/**
4457 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4458 * @dev: Device from whom we request an available command structure
4459 *
4460 * LOCKING:
0cba632b 4461 * None.
1da177e4
LT
4462 */
4463
3373efd8 4464struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4465{
9af5c9c9 4466 struct ata_port *ap = dev->link->ap;
1da177e4
LT
4467 struct ata_queued_cmd *qc;
4468
4469 qc = ata_qc_new(ap);
4470 if (qc) {
1da177e4
LT
4471 qc->scsicmd = NULL;
4472 qc->ap = ap;
4473 qc->dev = dev;
1da177e4 4474
2c13b7ce 4475 ata_qc_reinit(qc);
1da177e4
LT
4476 }
4477
4478 return qc;
4479}
4480
1da177e4
LT
4481/**
4482 * ata_qc_free - free unused ata_queued_cmd
4483 * @qc: Command to complete
4484 *
4485 * Designed to free unused ata_queued_cmd object
4486 * in case something prevents using it.
4487 *
4488 * LOCKING:
cca3974e 4489 * spin_lock_irqsave(host lock)
1da177e4
LT
4490 */
4491void ata_qc_free(struct ata_queued_cmd *qc)
4492{
4ba946e9
TH
4493 struct ata_port *ap = qc->ap;
4494 unsigned int tag;
4495
a4631474 4496 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4497
4ba946e9
TH
4498 qc->flags = 0;
4499 tag = qc->tag;
4500 if (likely(ata_tag_valid(tag))) {
4ba946e9 4501 qc->tag = ATA_TAG_POISON;
6cec4a39 4502 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4503 }
1da177e4
LT
4504}
4505
76014427 4506void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4507{
dedaf2b0 4508 struct ata_port *ap = qc->ap;
9af5c9c9 4509 struct ata_link *link = qc->dev->link;
dedaf2b0 4510
a4631474
TH
4511 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4512 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4513
4514 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4515 ata_sg_clean(qc);
4516
7401abf2 4517 /* command should be marked inactive atomically with qc completion */
da917d69 4518 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 4519 link->sactive &= ~(1 << qc->tag);
da917d69
TH
4520 if (!link->sactive)
4521 ap->nr_active_links--;
4522 } else {
9af5c9c9 4523 link->active_tag = ATA_TAG_POISON;
da917d69
TH
4524 ap->nr_active_links--;
4525 }
4526
4527 /* clear exclusive status */
4528 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4529 ap->excl_link == link))
4530 ap->excl_link = NULL;
7401abf2 4531
3f3791d3
AL
4532 /* atapi: mark qc as inactive to prevent the interrupt handler
4533 * from completing the command twice later, before the error handler
4534 * is called. (when rc != 0 and atapi request sense is needed)
4535 */
4536 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4537 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4538
1da177e4 4539 /* call completion callback */
77853bf2 4540 qc->complete_fn(qc);
1da177e4
LT
4541}
4542
39599a53
TH
4543static void fill_result_tf(struct ata_queued_cmd *qc)
4544{
4545 struct ata_port *ap = qc->ap;
4546
39599a53 4547 qc->result_tf.flags = qc->tf.flags;
22183bf5 4548 ap->ops->qc_fill_rtf(qc);
39599a53
TH
4549}
4550
00115e0f
TH
4551static void ata_verify_xfer(struct ata_queued_cmd *qc)
4552{
4553 struct ata_device *dev = qc->dev;
4554
4555 if (ata_tag_internal(qc->tag))
4556 return;
4557
4558 if (ata_is_nodata(qc->tf.protocol))
4559 return;
4560
4561 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4562 return;
4563
4564 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4565}
4566
f686bcb8
TH
4567/**
4568 * ata_qc_complete - Complete an active ATA command
4569 * @qc: Command to complete
4570 * @err_mask: ATA Status register contents
4571 *
4572 * Indicate to the mid and upper layers that an ATA
4573 * command has completed, with either an ok or not-ok status.
4574 *
4575 * LOCKING:
cca3974e 4576 * spin_lock_irqsave(host lock)
f686bcb8
TH
4577 */
4578void ata_qc_complete(struct ata_queued_cmd *qc)
4579{
4580 struct ata_port *ap = qc->ap;
4581
4582 /* XXX: New EH and old EH use different mechanisms to
4583 * synchronize EH with regular execution path.
4584 *
4585 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4586 * Normal execution path is responsible for not accessing a
4587 * failed qc. libata core enforces the rule by returning NULL
4588 * from ata_qc_from_tag() for failed qcs.
4589 *
4590 * Old EH depends on ata_qc_complete() nullifying completion
4591 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4592 * not synchronize with interrupt handler. Only PIO task is
4593 * taken care of.
4594 */
4595 if (ap->ops->error_handler) {
4dbfa39b
TH
4596 struct ata_device *dev = qc->dev;
4597 struct ata_eh_info *ehi = &dev->link->eh_info;
4598
b51e9e5d 4599 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4600
4601 if (unlikely(qc->err_mask))
4602 qc->flags |= ATA_QCFLAG_FAILED;
4603
4604 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4605 if (!ata_tag_internal(qc->tag)) {
4606 /* always fill result TF for failed qc */
39599a53 4607 fill_result_tf(qc);
f686bcb8
TH
4608 ata_qc_schedule_eh(qc);
4609 return;
4610 }
4611 }
4612
4613 /* read result TF if requested */
4614 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4615 fill_result_tf(qc);
f686bcb8 4616
4dbfa39b
TH
4617 /* Some commands need post-processing after successful
4618 * completion.
4619 */
4620 switch (qc->tf.command) {
4621 case ATA_CMD_SET_FEATURES:
4622 if (qc->tf.feature != SETFEATURES_WC_ON &&
4623 qc->tf.feature != SETFEATURES_WC_OFF)
4624 break;
4625 /* fall through */
4626 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4627 case ATA_CMD_SET_MULTI: /* multi_count changed */
4628 /* revalidate device */
4629 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4630 ata_port_schedule_eh(ap);
4631 break;
054a5fba
TH
4632
4633 case ATA_CMD_SLEEP:
4634 dev->flags |= ATA_DFLAG_SLEEPING;
4635 break;
4dbfa39b
TH
4636 }
4637
00115e0f
TH
4638 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4639 ata_verify_xfer(qc);
4640
f686bcb8
TH
4641 __ata_qc_complete(qc);
4642 } else {
4643 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4644 return;
4645
4646 /* read result TF if failed or requested */
4647 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4648 fill_result_tf(qc);
f686bcb8
TH
4649
4650 __ata_qc_complete(qc);
4651 }
4652}
4653
dedaf2b0
TH
4654/**
4655 * ata_qc_complete_multiple - Complete multiple qcs successfully
4656 * @ap: port in question
4657 * @qc_active: new qc_active mask
dedaf2b0
TH
4658 *
4659 * Complete in-flight commands. This functions is meant to be
4660 * called from low-level driver's interrupt routine to complete
4661 * requests normally. ap->qc_active and @qc_active is compared
4662 * and commands are completed accordingly.
4663 *
4664 * LOCKING:
cca3974e 4665 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4666 *
4667 * RETURNS:
4668 * Number of completed commands on success, -errno otherwise.
4669 */
79f97dad 4670int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
dedaf2b0
TH
4671{
4672 int nr_done = 0;
4673 u32 done_mask;
4674 int i;
4675
4676 done_mask = ap->qc_active ^ qc_active;
4677
4678 if (unlikely(done_mask & qc_active)) {
4679 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4680 "(%08x->%08x)\n", ap->qc_active, qc_active);
4681 return -EINVAL;
4682 }
4683
4684 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4685 struct ata_queued_cmd *qc;
4686
4687 if (!(done_mask & (1 << i)))
4688 continue;
4689
4690 if ((qc = ata_qc_from_tag(ap, i))) {
dedaf2b0
TH
4691 ata_qc_complete(qc);
4692 nr_done++;
4693 }
4694 }
4695
4696 return nr_done;
4697}
4698
1da177e4
LT
4699/**
4700 * ata_qc_issue - issue taskfile to device
4701 * @qc: command to issue to device
4702 *
4703 * Prepare an ATA command to submission to device.
4704 * This includes mapping the data into a DMA-able
4705 * area, filling in the S/G table, and finally
4706 * writing the taskfile to hardware, starting the command.
4707 *
4708 * LOCKING:
cca3974e 4709 * spin_lock_irqsave(host lock)
1da177e4 4710 */
8e0e694a 4711void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4712{
4713 struct ata_port *ap = qc->ap;
9af5c9c9 4714 struct ata_link *link = qc->dev->link;
405e66b3 4715 u8 prot = qc->tf.protocol;
1da177e4 4716
dedaf2b0
TH
4717 /* Make sure only one non-NCQ command is outstanding. The
4718 * check is skipped for old EH because it reuses active qc to
4719 * request ATAPI sense.
4720 */
9af5c9c9 4721 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 4722
1973a023 4723 if (ata_is_ncq(prot)) {
9af5c9c9 4724 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
4725
4726 if (!link->sactive)
4727 ap->nr_active_links++;
9af5c9c9 4728 link->sactive |= 1 << qc->tag;
dedaf2b0 4729 } else {
9af5c9c9 4730 WARN_ON(link->sactive);
da917d69
TH
4731
4732 ap->nr_active_links++;
9af5c9c9 4733 link->active_tag = qc->tag;
dedaf2b0
TH
4734 }
4735
e4a70e76 4736 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4737 ap->qc_active |= 1 << qc->tag;
e4a70e76 4738
f92a2636
TH
4739 /* We guarantee to LLDs that they will have at least one
4740 * non-zero sg if the command is a data command.
4741 */
ff2aeb1e 4742 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
f92a2636 4743
405e66b3 4744 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 4745 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7
TH
4746 if (ata_sg_setup(qc))
4747 goto sg_err;
1da177e4 4748
cf480626 4749 /* if device is sleeping, schedule reset and abort the link */
054a5fba 4750 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
cf480626 4751 link->eh_info.action |= ATA_EH_RESET;
054a5fba
TH
4752 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4753 ata_link_abort(link);
4754 return;
4755 }
4756
1da177e4
LT
4757 ap->ops->qc_prep(qc);
4758
8e0e694a
TH
4759 qc->err_mask |= ap->ops->qc_issue(qc);
4760 if (unlikely(qc->err_mask))
4761 goto err;
4762 return;
1da177e4 4763
8e436af9 4764sg_err:
8e0e694a
TH
4765 qc->err_mask |= AC_ERR_SYSTEM;
4766err:
4767 ata_qc_complete(qc);
1da177e4
LT
4768}
4769
34bf2170
TH
4770/**
4771 * sata_scr_valid - test whether SCRs are accessible
936fd732 4772 * @link: ATA link to test SCR accessibility for
34bf2170 4773 *
936fd732 4774 * Test whether SCRs are accessible for @link.
34bf2170
TH
4775 *
4776 * LOCKING:
4777 * None.
4778 *
4779 * RETURNS:
4780 * 1 if SCRs are accessible, 0 otherwise.
4781 */
936fd732 4782int sata_scr_valid(struct ata_link *link)
34bf2170 4783{
936fd732
TH
4784 struct ata_port *ap = link->ap;
4785
a16abc0b 4786 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
4787}
4788
4789/**
4790 * sata_scr_read - read SCR register of the specified port
936fd732 4791 * @link: ATA link to read SCR for
34bf2170
TH
4792 * @reg: SCR to read
4793 * @val: Place to store read value
4794 *
936fd732 4795 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
4796 * guaranteed to succeed if @link is ap->link, the cable type of
4797 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
4798 *
4799 * LOCKING:
633273a3 4800 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
4801 *
4802 * RETURNS:
4803 * 0 on success, negative errno on failure.
4804 */
936fd732 4805int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 4806{
633273a3
TH
4807 if (ata_is_host_link(link)) {
4808 struct ata_port *ap = link->ap;
936fd732 4809
633273a3
TH
4810 if (sata_scr_valid(link))
4811 return ap->ops->scr_read(ap, reg, val);
4812 return -EOPNOTSUPP;
4813 }
4814
4815 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
4816}
4817
4818/**
4819 * sata_scr_write - write SCR register of the specified port
936fd732 4820 * @link: ATA link to write SCR for
34bf2170
TH
4821 * @reg: SCR to write
4822 * @val: value to write
4823 *
936fd732 4824 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
4825 * guaranteed to succeed if @link is ap->link, the cable type of
4826 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
4827 *
4828 * LOCKING:
633273a3 4829 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
4830 *
4831 * RETURNS:
4832 * 0 on success, negative errno on failure.
4833 */
936fd732 4834int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 4835{
633273a3
TH
4836 if (ata_is_host_link(link)) {
4837 struct ata_port *ap = link->ap;
4838
4839 if (sata_scr_valid(link))
4840 return ap->ops->scr_write(ap, reg, val);
4841 return -EOPNOTSUPP;
4842 }
936fd732 4843
633273a3 4844 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
4845}
4846
4847/**
4848 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 4849 * @link: ATA link to write SCR for
34bf2170
TH
4850 * @reg: SCR to write
4851 * @val: value to write
4852 *
4853 * This function is identical to sata_scr_write() except that this
4854 * function performs flush after writing to the register.
4855 *
4856 * LOCKING:
633273a3 4857 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
4858 *
4859 * RETURNS:
4860 * 0 on success, negative errno on failure.
4861 */
936fd732 4862int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 4863{
633273a3
TH
4864 if (ata_is_host_link(link)) {
4865 struct ata_port *ap = link->ap;
4866 int rc;
da3dbb17 4867
633273a3
TH
4868 if (sata_scr_valid(link)) {
4869 rc = ap->ops->scr_write(ap, reg, val);
4870 if (rc == 0)
4871 rc = ap->ops->scr_read(ap, reg, &val);
4872 return rc;
4873 }
4874 return -EOPNOTSUPP;
34bf2170 4875 }
633273a3
TH
4876
4877 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
4878}
4879
4880/**
936fd732
TH
4881 * ata_link_online - test whether the given link is online
4882 * @link: ATA link to test
34bf2170 4883 *
936fd732
TH
4884 * Test whether @link is online. Note that this function returns
4885 * 0 if online status of @link cannot be obtained, so
4886 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4887 *
4888 * LOCKING:
4889 * None.
4890 *
4891 * RETURNS:
4892 * 1 if the port online status is available and online.
4893 */
936fd732 4894int ata_link_online(struct ata_link *link)
34bf2170
TH
4895{
4896 u32 sstatus;
4897
936fd732
TH
4898 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4899 (sstatus & 0xf) == 0x3)
34bf2170
TH
4900 return 1;
4901 return 0;
4902}
4903
4904/**
936fd732
TH
4905 * ata_link_offline - test whether the given link is offline
4906 * @link: ATA link to test
34bf2170 4907 *
936fd732
TH
4908 * Test whether @link is offline. Note that this function
4909 * returns 0 if offline status of @link cannot be obtained, so
4910 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4911 *
4912 * LOCKING:
4913 * None.
4914 *
4915 * RETURNS:
4916 * 1 if the port offline status is available and offline.
4917 */
936fd732 4918int ata_link_offline(struct ata_link *link)
34bf2170
TH
4919{
4920 u32 sstatus;
4921
936fd732
TH
4922 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4923 (sstatus & 0xf) != 0x3)
34bf2170
TH
4924 return 1;
4925 return 0;
4926}
0baab86b 4927
6ffa01d8 4928#ifdef CONFIG_PM
cca3974e
JG
4929static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
4930 unsigned int action, unsigned int ehi_flags,
4931 int wait)
500530f6
TH
4932{
4933 unsigned long flags;
4934 int i, rc;
4935
cca3974e
JG
4936 for (i = 0; i < host->n_ports; i++) {
4937 struct ata_port *ap = host->ports[i];
e3667ebf 4938 struct ata_link *link;
500530f6
TH
4939
4940 /* Previous resume operation might still be in
4941 * progress. Wait for PM_PENDING to clear.
4942 */
4943 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4944 ata_port_wait_eh(ap);
4945 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4946 }
4947
4948 /* request PM ops to EH */
4949 spin_lock_irqsave(ap->lock, flags);
4950
4951 ap->pm_mesg = mesg;
4952 if (wait) {
4953 rc = 0;
4954 ap->pm_result = &rc;
4955 }
4956
4957 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
4958 __ata_port_for_each_link(link, ap) {
4959 link->eh_info.action |= action;
4960 link->eh_info.flags |= ehi_flags;
4961 }
500530f6
TH
4962
4963 ata_port_schedule_eh(ap);
4964
4965 spin_unlock_irqrestore(ap->lock, flags);
4966
4967 /* wait and check result */
4968 if (wait) {
4969 ata_port_wait_eh(ap);
4970 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4971 if (rc)
4972 return rc;
4973 }
4974 }
4975
4976 return 0;
4977}
4978
4979/**
cca3974e
JG
4980 * ata_host_suspend - suspend host
4981 * @host: host to suspend
500530f6
TH
4982 * @mesg: PM message
4983 *
cca3974e 4984 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
4985 * function requests EH to perform PM operations and waits for EH
4986 * to finish.
4987 *
4988 * LOCKING:
4989 * Kernel thread context (may sleep).
4990 *
4991 * RETURNS:
4992 * 0 on success, -errno on failure.
4993 */
cca3974e 4994int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 4995{
9666f400 4996 int rc;
500530f6 4997
ca77329f
KCA
4998 /*
4999 * disable link pm on all ports before requesting
5000 * any pm activity
5001 */
5002 ata_lpm_enable(host);
5003
cca3974e 5004 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
72ad6ec4
JG
5005 if (rc == 0)
5006 host->dev->power.power_state = mesg;
500530f6
TH
5007 return rc;
5008}
5009
5010/**
cca3974e
JG
5011 * ata_host_resume - resume host
5012 * @host: host to resume
500530f6 5013 *
cca3974e 5014 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5015 * function requests EH to perform PM operations and returns.
5016 * Note that all resume operations are performed parallely.
5017 *
5018 * LOCKING:
5019 * Kernel thread context (may sleep).
5020 */
cca3974e 5021void ata_host_resume(struct ata_host *host)
500530f6 5022{
cf480626 5023 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
cca3974e 5024 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
72ad6ec4 5025 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
5026
5027 /* reenable link pm */
5028 ata_lpm_disable(host);
500530f6 5029}
6ffa01d8 5030#endif
500530f6 5031
c893a3ae
RD
5032/**
5033 * ata_port_start - Set port up for dma.
5034 * @ap: Port to initialize
5035 *
5036 * Called just after data structures for each port are
5037 * initialized. Allocates space for PRD table.
5038 *
5039 * May be used as the port_start() entry in ata_port_operations.
5040 *
5041 * LOCKING:
5042 * Inherited from caller.
5043 */
f0d36efd 5044int ata_port_start(struct ata_port *ap)
1da177e4 5045{
2f1f610b 5046 struct device *dev = ap->dev;
1da177e4 5047
f0d36efd
TH
5048 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5049 GFP_KERNEL);
1da177e4
LT
5050 if (!ap->prd)
5051 return -ENOMEM;
5052
1da177e4
LT
5053 return 0;
5054}
5055
3ef3b43d
TH
5056/**
5057 * ata_dev_init - Initialize an ata_device structure
5058 * @dev: Device structure to initialize
5059 *
5060 * Initialize @dev in preparation for probing.
5061 *
5062 * LOCKING:
5063 * Inherited from caller.
5064 */
5065void ata_dev_init(struct ata_device *dev)
5066{
9af5c9c9
TH
5067 struct ata_link *link = dev->link;
5068 struct ata_port *ap = link->ap;
72fa4b74
TH
5069 unsigned long flags;
5070
5a04bf4b 5071 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
5072 link->sata_spd_limit = link->hw_sata_spd_limit;
5073 link->sata_spd = 0;
5a04bf4b 5074
72fa4b74
TH
5075 /* High bits of dev->flags are used to record warm plug
5076 * requests which occur asynchronously. Synchronize using
cca3974e 5077 * host lock.
72fa4b74 5078 */
ba6a1308 5079 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5080 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 5081 dev->horkage = 0;
ba6a1308 5082 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5083
72fa4b74
TH
5084 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5085 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5086 dev->pio_mask = UINT_MAX;
5087 dev->mwdma_mask = UINT_MAX;
5088 dev->udma_mask = UINT_MAX;
5089}
5090
4fb37a25
TH
5091/**
5092 * ata_link_init - Initialize an ata_link structure
5093 * @ap: ATA port link is attached to
5094 * @link: Link structure to initialize
8989805d 5095 * @pmp: Port multiplier port number
4fb37a25
TH
5096 *
5097 * Initialize @link.
5098 *
5099 * LOCKING:
5100 * Kernel thread context (may sleep)
5101 */
fb7fd614 5102void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
5103{
5104 int i;
5105
5106 /* clear everything except for devices */
5107 memset(link, 0, offsetof(struct ata_link, device[0]));
5108
5109 link->ap = ap;
8989805d 5110 link->pmp = pmp;
4fb37a25
TH
5111 link->active_tag = ATA_TAG_POISON;
5112 link->hw_sata_spd_limit = UINT_MAX;
5113
5114 /* can't use iterator, ap isn't initialized yet */
5115 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5116 struct ata_device *dev = &link->device[i];
5117
5118 dev->link = link;
5119 dev->devno = dev - link->device;
5120 ata_dev_init(dev);
5121 }
5122}
5123
5124/**
5125 * sata_link_init_spd - Initialize link->sata_spd_limit
5126 * @link: Link to configure sata_spd_limit for
5127 *
5128 * Initialize @link->[hw_]sata_spd_limit to the currently
5129 * configured value.
5130 *
5131 * LOCKING:
5132 * Kernel thread context (may sleep).
5133 *
5134 * RETURNS:
5135 * 0 on success, -errno on failure.
5136 */
fb7fd614 5137int sata_link_init_spd(struct ata_link *link)
4fb37a25 5138{
33267325
TH
5139 u32 scontrol;
5140 u8 spd;
4fb37a25
TH
5141 int rc;
5142
5143 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
5144 if (rc)
5145 return rc;
5146
5147 spd = (scontrol >> 4) & 0xf;
5148 if (spd)
5149 link->hw_sata_spd_limit &= (1 << spd) - 1;
5150
33267325
TH
5151 ata_force_spd_limit(link);
5152
4fb37a25
TH
5153 link->sata_spd_limit = link->hw_sata_spd_limit;
5154
5155 return 0;
5156}
5157
1da177e4 5158/**
f3187195
TH
5159 * ata_port_alloc - allocate and initialize basic ATA port resources
5160 * @host: ATA host this allocated port belongs to
1da177e4 5161 *
f3187195
TH
5162 * Allocate and initialize basic ATA port resources.
5163 *
5164 * RETURNS:
5165 * Allocate ATA port on success, NULL on failure.
0cba632b 5166 *
1da177e4 5167 * LOCKING:
f3187195 5168 * Inherited from calling layer (may sleep).
1da177e4 5169 */
f3187195 5170struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5171{
f3187195 5172 struct ata_port *ap;
1da177e4 5173
f3187195
TH
5174 DPRINTK("ENTER\n");
5175
5176 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5177 if (!ap)
5178 return NULL;
5179
f4d6d004 5180 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 5181 ap->lock = &host->lock;
198e0fed 5182 ap->flags = ATA_FLAG_DISABLED;
f3187195 5183 ap->print_id = -1;
1da177e4 5184 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5185 ap->host = host;
f3187195 5186 ap->dev = host->dev;
1da177e4 5187 ap->last_ctl = 0xFF;
bd5d825c
BP
5188
5189#if defined(ATA_VERBOSE_DEBUG)
5190 /* turn on all debugging levels */
5191 ap->msg_enable = 0x00FF;
5192#elif defined(ATA_DEBUG)
5193 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5194#else
0dd4b21f 5195 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5196#endif
1da177e4 5197
442eacc3 5198 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
65f27f38
DH
5199 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5200 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5201 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5202 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
5203 init_timer_deferrable(&ap->fastdrain_timer);
5204 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5205 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 5206
838df628 5207 ap->cbl = ATA_CBL_NONE;
838df628 5208
8989805d 5209 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
5210
5211#ifdef ATA_IRQ_TRAP
5212 ap->stats.unhandled_irq = 1;
5213 ap->stats.idle_irq = 1;
5214#endif
1da177e4 5215 return ap;
1da177e4
LT
5216}
5217
f0d36efd
TH
5218static void ata_host_release(struct device *gendev, void *res)
5219{
5220 struct ata_host *host = dev_get_drvdata(gendev);
5221 int i;
5222
1aa506e4
TH
5223 for (i = 0; i < host->n_ports; i++) {
5224 struct ata_port *ap = host->ports[i];
5225
4911487a
TH
5226 if (!ap)
5227 continue;
5228
5229 if (ap->scsi_host)
1aa506e4
TH
5230 scsi_host_put(ap->scsi_host);
5231
633273a3 5232 kfree(ap->pmp_link);
4911487a 5233 kfree(ap);
1aa506e4
TH
5234 host->ports[i] = NULL;
5235 }
5236
1aa56cca 5237 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5238}
5239
f3187195
TH
5240/**
5241 * ata_host_alloc - allocate and init basic ATA host resources
5242 * @dev: generic device this host is associated with
5243 * @max_ports: maximum number of ATA ports associated with this host
5244 *
5245 * Allocate and initialize basic ATA host resources. LLD calls
5246 * this function to allocate a host, initializes it fully and
5247 * attaches it using ata_host_register().
5248 *
5249 * @max_ports ports are allocated and host->n_ports is
5250 * initialized to @max_ports. The caller is allowed to decrease
5251 * host->n_ports before calling ata_host_register(). The unused
5252 * ports will be automatically freed on registration.
5253 *
5254 * RETURNS:
5255 * Allocate ATA host on success, NULL on failure.
5256 *
5257 * LOCKING:
5258 * Inherited from calling layer (may sleep).
5259 */
5260struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5261{
5262 struct ata_host *host;
5263 size_t sz;
5264 int i;
5265
5266 DPRINTK("ENTER\n");
5267
5268 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5269 return NULL;
5270
5271 /* alloc a container for our list of ATA ports (buses) */
5272 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5273 /* alloc a container for our list of ATA ports (buses) */
5274 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5275 if (!host)
5276 goto err_out;
5277
5278 devres_add(dev, host);
5279 dev_set_drvdata(dev, host);
5280
5281 spin_lock_init(&host->lock);
5282 host->dev = dev;
5283 host->n_ports = max_ports;
5284
5285 /* allocate ports bound to this host */
5286 for (i = 0; i < max_ports; i++) {
5287 struct ata_port *ap;
5288
5289 ap = ata_port_alloc(host);
5290 if (!ap)
5291 goto err_out;
5292
5293 ap->port_no = i;
5294 host->ports[i] = ap;
5295 }
5296
5297 devres_remove_group(dev, NULL);
5298 return host;
5299
5300 err_out:
5301 devres_release_group(dev, NULL);
5302 return NULL;
5303}
5304
f5cda257
TH
5305/**
5306 * ata_host_alloc_pinfo - alloc host and init with port_info array
5307 * @dev: generic device this host is associated with
5308 * @ppi: array of ATA port_info to initialize host with
5309 * @n_ports: number of ATA ports attached to this host
5310 *
5311 * Allocate ATA host and initialize with info from @ppi. If NULL
5312 * terminated, @ppi may contain fewer entries than @n_ports. The
5313 * last entry will be used for the remaining ports.
5314 *
5315 * RETURNS:
5316 * Allocate ATA host on success, NULL on failure.
5317 *
5318 * LOCKING:
5319 * Inherited from calling layer (may sleep).
5320 */
5321struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5322 const struct ata_port_info * const * ppi,
5323 int n_ports)
5324{
5325 const struct ata_port_info *pi;
5326 struct ata_host *host;
5327 int i, j;
5328
5329 host = ata_host_alloc(dev, n_ports);
5330 if (!host)
5331 return NULL;
5332
5333 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5334 struct ata_port *ap = host->ports[i];
5335
5336 if (ppi[j])
5337 pi = ppi[j++];
5338
5339 ap->pio_mask = pi->pio_mask;
5340 ap->mwdma_mask = pi->mwdma_mask;
5341 ap->udma_mask = pi->udma_mask;
5342 ap->flags |= pi->flags;
0c88758b 5343 ap->link.flags |= pi->link_flags;
f5cda257
TH
5344 ap->ops = pi->port_ops;
5345
5346 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5347 host->ops = pi->port_ops;
f5cda257
TH
5348 }
5349
5350 return host;
5351}
5352
32ebbc0c
TH
5353static void ata_host_stop(struct device *gendev, void *res)
5354{
5355 struct ata_host *host = dev_get_drvdata(gendev);
5356 int i;
5357
5358 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5359
5360 for (i = 0; i < host->n_ports; i++) {
5361 struct ata_port *ap = host->ports[i];
5362
5363 if (ap->ops->port_stop)
5364 ap->ops->port_stop(ap);
5365 }
5366
5367 if (host->ops->host_stop)
5368 host->ops->host_stop(host);
5369}
5370
029cfd6b
TH
5371/**
5372 * ata_finalize_port_ops - finalize ata_port_operations
5373 * @ops: ata_port_operations to finalize
5374 *
5375 * An ata_port_operations can inherit from another ops and that
5376 * ops can again inherit from another. This can go on as many
5377 * times as necessary as long as there is no loop in the
5378 * inheritance chain.
5379 *
5380 * Ops tables are finalized when the host is started. NULL or
5381 * unspecified entries are inherited from the closet ancestor
5382 * which has the method and the entry is populated with it.
5383 * After finalization, the ops table directly points to all the
5384 * methods and ->inherits is no longer necessary and cleared.
5385 *
5386 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5387 *
5388 * LOCKING:
5389 * None.
5390 */
5391static void ata_finalize_port_ops(struct ata_port_operations *ops)
5392{
5393 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
5394 const struct ata_port_operations *cur;
5395 void **begin = (void **)ops;
5396 void **end = (void **)&ops->inherits;
5397 void **pp;
5398
5399 if (!ops || !ops->inherits)
5400 return;
5401
5402 spin_lock(&lock);
5403
5404 for (cur = ops->inherits; cur; cur = cur->inherits) {
5405 void **inherit = (void **)cur;
5406
5407 for (pp = begin; pp < end; pp++, inherit++)
5408 if (!*pp)
5409 *pp = *inherit;
5410 }
5411
5412 for (pp = begin; pp < end; pp++)
5413 if (IS_ERR(*pp))
5414 *pp = NULL;
5415
5416 ops->inherits = NULL;
5417
5418 spin_unlock(&lock);
5419}
5420
ecef7253
TH
5421/**
5422 * ata_host_start - start and freeze ports of an ATA host
5423 * @host: ATA host to start ports for
5424 *
5425 * Start and then freeze ports of @host. Started status is
5426 * recorded in host->flags, so this function can be called
5427 * multiple times. Ports are guaranteed to get started only
f3187195
TH
5428 * once. If host->ops isn't initialized yet, its set to the
5429 * first non-dummy port ops.
ecef7253
TH
5430 *
5431 * LOCKING:
5432 * Inherited from calling layer (may sleep).
5433 *
5434 * RETURNS:
5435 * 0 if all ports are started successfully, -errno otherwise.
5436 */
5437int ata_host_start(struct ata_host *host)
5438{
32ebbc0c
TH
5439 int have_stop = 0;
5440 void *start_dr = NULL;
ecef7253
TH
5441 int i, rc;
5442
5443 if (host->flags & ATA_HOST_STARTED)
5444 return 0;
5445
029cfd6b
TH
5446 ata_finalize_port_ops(host->ops);
5447
ecef7253
TH
5448 for (i = 0; i < host->n_ports; i++) {
5449 struct ata_port *ap = host->ports[i];
5450
029cfd6b
TH
5451 ata_finalize_port_ops(ap->ops);
5452
f3187195
TH
5453 if (!host->ops && !ata_port_is_dummy(ap))
5454 host->ops = ap->ops;
5455
32ebbc0c
TH
5456 if (ap->ops->port_stop)
5457 have_stop = 1;
5458 }
5459
5460 if (host->ops->host_stop)
5461 have_stop = 1;
5462
5463 if (have_stop) {
5464 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5465 if (!start_dr)
5466 return -ENOMEM;
5467 }
5468
5469 for (i = 0; i < host->n_ports; i++) {
5470 struct ata_port *ap = host->ports[i];
5471
ecef7253
TH
5472 if (ap->ops->port_start) {
5473 rc = ap->ops->port_start(ap);
5474 if (rc) {
0f9fe9b7 5475 if (rc != -ENODEV)
0f757743
AM
5476 dev_printk(KERN_ERR, host->dev,
5477 "failed to start port %d "
5478 "(errno=%d)\n", i, rc);
ecef7253
TH
5479 goto err_out;
5480 }
5481 }
ecef7253
TH
5482 ata_eh_freeze_port(ap);
5483 }
5484
32ebbc0c
TH
5485 if (start_dr)
5486 devres_add(host->dev, start_dr);
ecef7253
TH
5487 host->flags |= ATA_HOST_STARTED;
5488 return 0;
5489
5490 err_out:
5491 while (--i >= 0) {
5492 struct ata_port *ap = host->ports[i];
5493
5494 if (ap->ops->port_stop)
5495 ap->ops->port_stop(ap);
5496 }
32ebbc0c 5497 devres_free(start_dr);
ecef7253
TH
5498 return rc;
5499}
5500
b03732f0 5501/**
cca3974e
JG
5502 * ata_sas_host_init - Initialize a host struct
5503 * @host: host to initialize
5504 * @dev: device host is attached to
5505 * @flags: host flags
5506 * @ops: port_ops
b03732f0
BK
5507 *
5508 * LOCKING:
5509 * PCI/etc. bus probe sem.
5510 *
5511 */
f3187195 5512/* KILLME - the only user left is ipr */
cca3974e 5513void ata_host_init(struct ata_host *host, struct device *dev,
029cfd6b 5514 unsigned long flags, struct ata_port_operations *ops)
b03732f0 5515{
cca3974e
JG
5516 spin_lock_init(&host->lock);
5517 host->dev = dev;
5518 host->flags = flags;
5519 host->ops = ops;
b03732f0
BK
5520}
5521
f3187195
TH
5522/**
5523 * ata_host_register - register initialized ATA host
5524 * @host: ATA host to register
5525 * @sht: template for SCSI host
5526 *
5527 * Register initialized ATA host. @host is allocated using
5528 * ata_host_alloc() and fully initialized by LLD. This function
5529 * starts ports, registers @host with ATA and SCSI layers and
5530 * probe registered devices.
5531 *
5532 * LOCKING:
5533 * Inherited from calling layer (may sleep).
5534 *
5535 * RETURNS:
5536 * 0 on success, -errno otherwise.
5537 */
5538int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5539{
5540 int i, rc;
5541
5542 /* host must have been started */
5543 if (!(host->flags & ATA_HOST_STARTED)) {
5544 dev_printk(KERN_ERR, host->dev,
5545 "BUG: trying to register unstarted host\n");
5546 WARN_ON(1);
5547 return -EINVAL;
5548 }
5549
5550 /* Blow away unused ports. This happens when LLD can't
5551 * determine the exact number of ports to allocate at
5552 * allocation time.
5553 */
5554 for (i = host->n_ports; host->ports[i]; i++)
5555 kfree(host->ports[i]);
5556
5557 /* give ports names and add SCSI hosts */
5558 for (i = 0; i < host->n_ports; i++)
5559 host->ports[i]->print_id = ata_print_id++;
5560
5561 rc = ata_scsi_add_hosts(host, sht);
5562 if (rc)
5563 return rc;
5564
fafbae87
TH
5565 /* associate with ACPI nodes */
5566 ata_acpi_associate(host);
5567
f3187195
TH
5568 /* set cable, sata_spd_limit and report */
5569 for (i = 0; i < host->n_ports; i++) {
5570 struct ata_port *ap = host->ports[i];
f3187195
TH
5571 unsigned long xfer_mask;
5572
5573 /* set SATA cable type if still unset */
5574 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5575 ap->cbl = ATA_CBL_SATA;
5576
5577 /* init sata_spd_limit to the current value */
4fb37a25 5578 sata_link_init_spd(&ap->link);
f3187195 5579
cbcdd875 5580 /* print per-port info to dmesg */
f3187195
TH
5581 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5582 ap->udma_mask);
5583
abf6e8ed 5584 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
5585 ata_port_printk(ap, KERN_INFO,
5586 "%cATA max %s %s\n",
a16abc0b 5587 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 5588 ata_mode_string(xfer_mask),
cbcdd875 5589 ap->link.eh_info.desc);
abf6e8ed
TH
5590 ata_ehi_clear_desc(&ap->link.eh_info);
5591 } else
f3187195
TH
5592 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5593 }
5594
5595 /* perform each probe synchronously */
5596 DPRINTK("probe begin\n");
5597 for (i = 0; i < host->n_ports; i++) {
5598 struct ata_port *ap = host->ports[i];
f3187195
TH
5599
5600 /* probe */
5601 if (ap->ops->error_handler) {
9af5c9c9 5602 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
5603 unsigned long flags;
5604
5605 ata_port_probe(ap);
5606
5607 /* kick EH for boot probing */
5608 spin_lock_irqsave(ap->lock, flags);
5609
b558eddd 5610 ehi->probe_mask |= ATA_ALL_DEVICES;
cf480626 5611 ehi->action |= ATA_EH_RESET;
f3187195
TH
5612 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5613
f4d6d004 5614 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
5615 ap->pflags |= ATA_PFLAG_LOADING;
5616 ata_port_schedule_eh(ap);
5617
5618 spin_unlock_irqrestore(ap->lock, flags);
5619
5620 /* wait for EH to finish */
5621 ata_port_wait_eh(ap);
5622 } else {
5623 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5624 rc = ata_bus_probe(ap);
5625 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5626
5627 if (rc) {
5628 /* FIXME: do something useful here?
5629 * Current libata behavior will
5630 * tear down everything when
5631 * the module is removed
5632 * or the h/w is unplugged.
5633 */
5634 }
5635 }
5636 }
5637
5638 /* probes are done, now scan each port's disk(s) */
5639 DPRINTK("host probe begin\n");
5640 for (i = 0; i < host->n_ports; i++) {
5641 struct ata_port *ap = host->ports[i];
5642
1ae46317 5643 ata_scsi_scan_host(ap, 1);
ca77329f 5644 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
5645 }
5646
5647 return 0;
5648}
5649
f5cda257
TH
5650/**
5651 * ata_host_activate - start host, request IRQ and register it
5652 * @host: target ATA host
5653 * @irq: IRQ to request
5654 * @irq_handler: irq_handler used when requesting IRQ
5655 * @irq_flags: irq_flags used when requesting IRQ
5656 * @sht: scsi_host_template to use when registering the host
5657 *
5658 * After allocating an ATA host and initializing it, most libata
5659 * LLDs perform three steps to activate the host - start host,
5660 * request IRQ and register it. This helper takes necessasry
5661 * arguments and performs the three steps in one go.
5662 *
3d46b2e2
PM
5663 * An invalid IRQ skips the IRQ registration and expects the host to
5664 * have set polling mode on the port. In this case, @irq_handler
5665 * should be NULL.
5666 *
f5cda257
TH
5667 * LOCKING:
5668 * Inherited from calling layer (may sleep).
5669 *
5670 * RETURNS:
5671 * 0 on success, -errno otherwise.
5672 */
5673int ata_host_activate(struct ata_host *host, int irq,
5674 irq_handler_t irq_handler, unsigned long irq_flags,
5675 struct scsi_host_template *sht)
5676{
cbcdd875 5677 int i, rc;
f5cda257
TH
5678
5679 rc = ata_host_start(host);
5680 if (rc)
5681 return rc;
5682
3d46b2e2
PM
5683 /* Special case for polling mode */
5684 if (!irq) {
5685 WARN_ON(irq_handler);
5686 return ata_host_register(host, sht);
5687 }
5688
f5cda257
TH
5689 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5690 dev_driver_string(host->dev), host);
5691 if (rc)
5692 return rc;
5693
cbcdd875
TH
5694 for (i = 0; i < host->n_ports; i++)
5695 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 5696
f5cda257
TH
5697 rc = ata_host_register(host, sht);
5698 /* if failed, just free the IRQ and leave ports alone */
5699 if (rc)
5700 devm_free_irq(host->dev, irq, host);
5701
5702 return rc;
5703}
5704
720ba126
TH
5705/**
5706 * ata_port_detach - Detach ATA port in prepration of device removal
5707 * @ap: ATA port to be detached
5708 *
5709 * Detach all ATA devices and the associated SCSI devices of @ap;
5710 * then, remove the associated SCSI host. @ap is guaranteed to
5711 * be quiescent on return from this function.
5712 *
5713 * LOCKING:
5714 * Kernel thread context (may sleep).
5715 */
741b7763 5716static void ata_port_detach(struct ata_port *ap)
720ba126
TH
5717{
5718 unsigned long flags;
41bda9c9 5719 struct ata_link *link;
f58229f8 5720 struct ata_device *dev;
720ba126
TH
5721
5722 if (!ap->ops->error_handler)
c3cf30a9 5723 goto skip_eh;
720ba126
TH
5724
5725 /* tell EH we're leaving & flush EH */
ba6a1308 5726 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5727 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5728 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5729
5730 ata_port_wait_eh(ap);
5731
7f9ad9b8
TH
5732 /* EH is now guaranteed to see UNLOADING - EH context belongs
5733 * to us. Disable all existing devices.
720ba126 5734 */
41bda9c9
TH
5735 ata_port_for_each_link(link, ap) {
5736 ata_link_for_each_dev(dev, link)
5737 ata_dev_disable(dev);
5738 }
720ba126 5739
720ba126
TH
5740 /* Final freeze & EH. All in-flight commands are aborted. EH
5741 * will be skipped and retrials will be terminated with bad
5742 * target.
5743 */
ba6a1308 5744 spin_lock_irqsave(ap->lock, flags);
720ba126 5745 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5746 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5747
5748 ata_port_wait_eh(ap);
45a66c1c 5749 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 5750
c3cf30a9 5751 skip_eh:
720ba126 5752 /* remove the associated SCSI host */
cca3974e 5753 scsi_remove_host(ap->scsi_host);
720ba126
TH
5754}
5755
0529c159
TH
5756/**
5757 * ata_host_detach - Detach all ports of an ATA host
5758 * @host: Host to detach
5759 *
5760 * Detach all ports of @host.
5761 *
5762 * LOCKING:
5763 * Kernel thread context (may sleep).
5764 */
5765void ata_host_detach(struct ata_host *host)
5766{
5767 int i;
5768
5769 for (i = 0; i < host->n_ports; i++)
5770 ata_port_detach(host->ports[i]);
562f0c2d
TH
5771
5772 /* the host is dead now, dissociate ACPI */
5773 ata_acpi_dissociate(host);
0529c159
TH
5774}
5775
374b1873
JG
5776#ifdef CONFIG_PCI
5777
1da177e4
LT
5778/**
5779 * ata_pci_remove_one - PCI layer callback for device removal
5780 * @pdev: PCI device that was removed
5781 *
b878ca5d
TH
5782 * PCI layer indicates to libata via this hook that hot-unplug or
5783 * module unload event has occurred. Detach all ports. Resource
5784 * release is handled via devres.
1da177e4
LT
5785 *
5786 * LOCKING:
5787 * Inherited from PCI layer (may sleep).
5788 */
f0d36efd 5789void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 5790{
2855568b 5791 struct device *dev = &pdev->dev;
cca3974e 5792 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 5793
b878ca5d 5794 ata_host_detach(host);
1da177e4
LT
5795}
5796
5797/* move to PCI subsystem */
057ace5e 5798int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
5799{
5800 unsigned long tmp = 0;
5801
5802 switch (bits->width) {
5803 case 1: {
5804 u8 tmp8 = 0;
5805 pci_read_config_byte(pdev, bits->reg, &tmp8);
5806 tmp = tmp8;
5807 break;
5808 }
5809 case 2: {
5810 u16 tmp16 = 0;
5811 pci_read_config_word(pdev, bits->reg, &tmp16);
5812 tmp = tmp16;
5813 break;
5814 }
5815 case 4: {
5816 u32 tmp32 = 0;
5817 pci_read_config_dword(pdev, bits->reg, &tmp32);
5818 tmp = tmp32;
5819 break;
5820 }
5821
5822 default:
5823 return -EINVAL;
5824 }
5825
5826 tmp &= bits->mask;
5827
5828 return (tmp == bits->val) ? 1 : 0;
5829}
9b847548 5830
6ffa01d8 5831#ifdef CONFIG_PM
3c5100c1 5832void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
5833{
5834 pci_save_state(pdev);
4c90d971 5835 pci_disable_device(pdev);
500530f6 5836
3a2d5b70 5837 if (mesg.event & PM_EVENT_SLEEP)
500530f6 5838 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
5839}
5840
553c4aa6 5841int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 5842{
553c4aa6
TH
5843 int rc;
5844
9b847548
JA
5845 pci_set_power_state(pdev, PCI_D0);
5846 pci_restore_state(pdev);
553c4aa6 5847
b878ca5d 5848 rc = pcim_enable_device(pdev);
553c4aa6
TH
5849 if (rc) {
5850 dev_printk(KERN_ERR, &pdev->dev,
5851 "failed to enable device after resume (%d)\n", rc);
5852 return rc;
5853 }
5854
9b847548 5855 pci_set_master(pdev);
553c4aa6 5856 return 0;
500530f6
TH
5857}
5858
3c5100c1 5859int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 5860{
cca3974e 5861 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
5862 int rc = 0;
5863
cca3974e 5864 rc = ata_host_suspend(host, mesg);
500530f6
TH
5865 if (rc)
5866 return rc;
5867
3c5100c1 5868 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
5869
5870 return 0;
5871}
5872
5873int ata_pci_device_resume(struct pci_dev *pdev)
5874{
cca3974e 5875 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 5876 int rc;
500530f6 5877
553c4aa6
TH
5878 rc = ata_pci_device_do_resume(pdev);
5879 if (rc == 0)
5880 ata_host_resume(host);
5881 return rc;
9b847548 5882}
6ffa01d8
TH
5883#endif /* CONFIG_PM */
5884
1da177e4
LT
5885#endif /* CONFIG_PCI */
5886
33267325
TH
5887static int __init ata_parse_force_one(char **cur,
5888 struct ata_force_ent *force_ent,
5889 const char **reason)
5890{
5891 /* FIXME: Currently, there's no way to tag init const data and
5892 * using __initdata causes build failure on some versions of
5893 * gcc. Once __initdataconst is implemented, add const to the
5894 * following structure.
5895 */
5896 static struct ata_force_param force_tbl[] __initdata = {
5897 { "40c", .cbl = ATA_CBL_PATA40 },
5898 { "80c", .cbl = ATA_CBL_PATA80 },
5899 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
5900 { "unk", .cbl = ATA_CBL_PATA_UNK },
5901 { "ign", .cbl = ATA_CBL_PATA_IGN },
5902 { "sata", .cbl = ATA_CBL_SATA },
5903 { "1.5Gbps", .spd_limit = 1 },
5904 { "3.0Gbps", .spd_limit = 2 },
5905 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
5906 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
5907 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
5908 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
5909 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
5910 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
5911 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
5912 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
5913 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
5914 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
5915 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
5916 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
5917 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
5918 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
5919 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5920 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5921 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5922 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5923 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5924 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5925 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5926 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5927 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5928 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5929 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5930 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5931 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5932 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5933 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5934 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5935 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5936 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5937 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5938 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5939 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5940 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
5941 };
5942 char *start = *cur, *p = *cur;
5943 char *id, *val, *endp;
5944 const struct ata_force_param *match_fp = NULL;
5945 int nr_matches = 0, i;
5946
5947 /* find where this param ends and update *cur */
5948 while (*p != '\0' && *p != ',')
5949 p++;
5950
5951 if (*p == '\0')
5952 *cur = p;
5953 else
5954 *cur = p + 1;
5955
5956 *p = '\0';
5957
5958 /* parse */
5959 p = strchr(start, ':');
5960 if (!p) {
5961 val = strstrip(start);
5962 goto parse_val;
5963 }
5964 *p = '\0';
5965
5966 id = strstrip(start);
5967 val = strstrip(p + 1);
5968
5969 /* parse id */
5970 p = strchr(id, '.');
5971 if (p) {
5972 *p++ = '\0';
5973 force_ent->device = simple_strtoul(p, &endp, 10);
5974 if (p == endp || *endp != '\0') {
5975 *reason = "invalid device";
5976 return -EINVAL;
5977 }
5978 }
5979
5980 force_ent->port = simple_strtoul(id, &endp, 10);
5981 if (p == endp || *endp != '\0') {
5982 *reason = "invalid port/link";
5983 return -EINVAL;
5984 }
5985
5986 parse_val:
5987 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
5988 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
5989 const struct ata_force_param *fp = &force_tbl[i];
5990
5991 if (strncasecmp(val, fp->name, strlen(val)))
5992 continue;
5993
5994 nr_matches++;
5995 match_fp = fp;
5996
5997 if (strcasecmp(val, fp->name) == 0) {
5998 nr_matches = 1;
5999 break;
6000 }
6001 }
6002
6003 if (!nr_matches) {
6004 *reason = "unknown value";
6005 return -EINVAL;
6006 }
6007 if (nr_matches > 1) {
6008 *reason = "ambigious value";
6009 return -EINVAL;
6010 }
6011
6012 force_ent->param = *match_fp;
6013
6014 return 0;
6015}
6016
6017static void __init ata_parse_force_param(void)
6018{
6019 int idx = 0, size = 1;
6020 int last_port = -1, last_device = -1;
6021 char *p, *cur, *next;
6022
6023 /* calculate maximum number of params and allocate force_tbl */
6024 for (p = ata_force_param_buf; *p; p++)
6025 if (*p == ',')
6026 size++;
6027
6028 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6029 if (!ata_force_tbl) {
6030 printk(KERN_WARNING "ata: failed to extend force table, "
6031 "libata.force ignored\n");
6032 return;
6033 }
6034
6035 /* parse and populate the table */
6036 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6037 const char *reason = "";
6038 struct ata_force_ent te = { .port = -1, .device = -1 };
6039
6040 next = cur;
6041 if (ata_parse_force_one(&next, &te, &reason)) {
6042 printk(KERN_WARNING "ata: failed to parse force "
6043 "parameter \"%s\" (%s)\n",
6044 cur, reason);
6045 continue;
6046 }
6047
6048 if (te.port == -1) {
6049 te.port = last_port;
6050 te.device = last_device;
6051 }
6052
6053 ata_force_tbl[idx++] = te;
6054
6055 last_port = te.port;
6056 last_device = te.device;
6057 }
6058
6059 ata_force_tbl_size = idx;
6060}
1da177e4 6061
1da177e4
LT
6062static int __init ata_init(void)
6063{
a8601e5f 6064 ata_probe_timeout *= HZ;
33267325
TH
6065
6066 ata_parse_force_param();
6067
1da177e4
LT
6068 ata_wq = create_workqueue("ata");
6069 if (!ata_wq)
6070 return -ENOMEM;
6071
453b07ac
TH
6072 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6073 if (!ata_aux_wq) {
6074 destroy_workqueue(ata_wq);
6075 return -ENOMEM;
6076 }
6077
1da177e4
LT
6078 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6079 return 0;
6080}
6081
6082static void __exit ata_exit(void)
6083{
33267325 6084 kfree(ata_force_tbl);
1da177e4 6085 destroy_workqueue(ata_wq);
453b07ac 6086 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6087}
6088
a4625085 6089subsys_initcall(ata_init);
1da177e4
LT
6090module_exit(ata_exit);
6091
67846b30 6092static unsigned long ratelimit_time;
34af946a 6093static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6094
6095int ata_ratelimit(void)
6096{
6097 int rc;
6098 unsigned long flags;
6099
6100 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6101
6102 if (time_after(jiffies, ratelimit_time)) {
6103 rc = 1;
6104 ratelimit_time = jiffies + (HZ/5);
6105 } else
6106 rc = 0;
6107
6108 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6109
6110 return rc;
6111}
6112
c22daff4
TH
6113/**
6114 * ata_wait_register - wait until register value changes
6115 * @reg: IO-mapped register
6116 * @mask: Mask to apply to read register value
6117 * @val: Wait condition
6118 * @interval_msec: polling interval in milliseconds
6119 * @timeout_msec: timeout in milliseconds
6120 *
6121 * Waiting for some bits of register to change is a common
6122 * operation for ATA controllers. This function reads 32bit LE
6123 * IO-mapped register @reg and tests for the following condition.
6124 *
6125 * (*@reg & mask) != val
6126 *
6127 * If the condition is met, it returns; otherwise, the process is
6128 * repeated after @interval_msec until timeout.
6129 *
6130 * LOCKING:
6131 * Kernel thread context (may sleep)
6132 *
6133 * RETURNS:
6134 * The final register value.
6135 */
6136u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6137 unsigned long interval_msec,
6138 unsigned long timeout_msec)
6139{
6140 unsigned long timeout;
6141 u32 tmp;
6142
6143 tmp = ioread32(reg);
6144
6145 /* Calculate timeout _after_ the first read to make sure
6146 * preceding writes reach the controller before starting to
6147 * eat away the timeout.
6148 */
6149 timeout = jiffies + (timeout_msec * HZ) / 1000;
6150
6151 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6152 msleep(interval_msec);
6153 tmp = ioread32(reg);
6154 }
6155
6156 return tmp;
6157}
6158
dd5b06c4
TH
6159/*
6160 * Dummy port_ops
6161 */
182d7bba 6162static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
dd5b06c4 6163{
182d7bba 6164 return AC_ERR_SYSTEM;
dd5b06c4
TH
6165}
6166
182d7bba 6167static void ata_dummy_error_handler(struct ata_port *ap)
dd5b06c4 6168{
182d7bba 6169 /* truly dummy */
dd5b06c4
TH
6170}
6171
029cfd6b 6172struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
6173 .qc_prep = ata_noop_qc_prep,
6174 .qc_issue = ata_dummy_qc_issue,
182d7bba 6175 .error_handler = ata_dummy_error_handler,
dd5b06c4
TH
6176};
6177
21b0ad4f
TH
6178const struct ata_port_info ata_dummy_port_info = {
6179 .port_ops = &ata_dummy_port_ops,
6180};
6181
1da177e4
LT
6182/*
6183 * libata is essentially a library of internal helper functions for
6184 * low-level ATA host controller drivers. As such, the API/ABI is
6185 * likely to change as new drivers are added and updated.
6186 * Do not depend on ABI/API stability.
6187 */
e9c83914
TH
6188EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6189EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6190EXPORT_SYMBOL_GPL(sata_deb_timing_long);
029cfd6b
TH
6191EXPORT_SYMBOL_GPL(ata_base_port_ops);
6192EXPORT_SYMBOL_GPL(sata_port_ops);
6193EXPORT_SYMBOL_GPL(sata_pmp_port_ops);
dd5b06c4 6194EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 6195EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4 6196EXPORT_SYMBOL_GPL(ata_std_bios_param);
cca3974e 6197EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6198EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 6199EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 6200EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6201EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 6202EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 6203EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 6204EXPORT_SYMBOL_GPL(ata_sg_init);
f686bcb8 6205EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6206EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
43727fbc 6207EXPORT_SYMBOL_GPL(sata_print_link_status);
436d34b3 6208EXPORT_SYMBOL_GPL(atapi_cmd_type);
1da177e4
LT
6209EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6210EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
6211EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6212EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6213EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6214EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6215EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6216EXPORT_SYMBOL_GPL(ata_mode_string);
6217EXPORT_SYMBOL_GPL(ata_id_xfermask);
1da177e4 6218EXPORT_SYMBOL_GPL(ata_port_start);
04351821 6219EXPORT_SYMBOL_GPL(ata_do_set_mode);
31cc23b3 6220EXPORT_SYMBOL_GPL(ata_std_qc_defer);
e46834cd 6221EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4 6222EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6223EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6224EXPORT_SYMBOL_GPL(sata_set_spd);
aa2731ad 6225EXPORT_SYMBOL_GPL(ata_wait_after_reset);
936fd732
TH
6226EXPORT_SYMBOL_GPL(sata_link_debounce);
6227EXPORT_SYMBOL_GPL(sata_link_resume);
0aa1113d 6228EXPORT_SYMBOL_GPL(ata_std_prereset);
cc0680a5 6229EXPORT_SYMBOL_GPL(sata_link_hardreset);
57c9efdf 6230EXPORT_SYMBOL_GPL(sata_std_hardreset);
203c75b8 6231EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6232EXPORT_SYMBOL_GPL(ata_dev_classify);
6233EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6234EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6235EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6236EXPORT_SYMBOL_GPL(ata_wait_register);
1da177e4
LT
6237EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6238EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6239EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6240EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6241EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
34bf2170
TH
6242EXPORT_SYMBOL_GPL(sata_scr_valid);
6243EXPORT_SYMBOL_GPL(sata_scr_read);
6244EXPORT_SYMBOL_GPL(sata_scr_write);
6245EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
6246EXPORT_SYMBOL_GPL(ata_link_online);
6247EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 6248#ifdef CONFIG_PM
cca3974e
JG
6249EXPORT_SYMBOL_GPL(ata_host_suspend);
6250EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6251#endif /* CONFIG_PM */
6a62a04d
TH
6252EXPORT_SYMBOL_GPL(ata_id_string);
6253EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4
LT
6254EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6255
1bc4ccff 6256EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 6257EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
6258EXPORT_SYMBOL_GPL(ata_timing_compute);
6259EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 6260EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 6261
1da177e4
LT
6262#ifdef CONFIG_PCI
6263EXPORT_SYMBOL_GPL(pci_test_config_bits);
1da177e4 6264EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6265#ifdef CONFIG_PM
500530f6
TH
6266EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6267EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6268EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6269EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6270#endif /* CONFIG_PM */
1da177e4 6271#endif /* CONFIG_PCI */
9b847548 6272
31f88384 6273EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
a1efdaba 6274EXPORT_SYMBOL_GPL(sata_pmp_error_handler);
3af9a77a 6275
b64bbc39
TH
6276EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6277EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6278EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
6279EXPORT_SYMBOL_GPL(ata_port_desc);
6280#ifdef CONFIG_PCI
6281EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6282#endif /* CONFIG_PCI */
7b70fc03 6283EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 6284EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 6285EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 6286EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 6287EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
6288EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6289EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6290EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6291EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6292EXPORT_SYMBOL_GPL(ata_do_eh);
a1efdaba 6293EXPORT_SYMBOL_GPL(ata_std_error_handler);
be0d18df
AC
6294
6295EXPORT_SYMBOL_GPL(ata_cable_40wire);
6296EXPORT_SYMBOL_GPL(ata_cable_80wire);
6297EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 6298EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 6299EXPORT_SYMBOL_GPL(ata_cable_sata);