libata: implement and use SHT initializers
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/highmem.h>
50#include <linux/spinlock.h>
51#include <linux/blkdev.h>
52#include <linux/delay.h>
53#include <linux/timer.h>
54#include <linux/interrupt.h>
55#include <linux/completion.h>
56#include <linux/suspend.h>
57#include <linux/workqueue.h>
67846b30 58#include <linux/jiffies.h>
378f058c 59#include <linux/scatterlist.h>
2dcb407e 60#include <linux/io.h>
1da177e4 61#include <scsi/scsi.h>
193515d5 62#include <scsi/scsi_cmnd.h>
1da177e4
LT
63#include <scsi/scsi_host.h>
64#include <linux/libata.h>
1da177e4
LT
65#include <asm/semaphore.h>
66#include <asm/byteorder.h>
140b5e59 67#include <linux/cdrom.h>
1da177e4
LT
68
69#include "libata.h"
70
fda0efc5 71
d7bb4cc7 72/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
73const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 76
3373efd8
TH
77static unsigned int ata_dev_init_params(struct ata_device *dev,
78 u16 heads, u16 sectors);
79static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
80static unsigned int ata_dev_set_feature(struct ata_device *dev,
81 u8 enable, u8 feature);
3373efd8 82static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 83static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 84
f3187195 85unsigned int ata_print_id = 1;
1da177e4
LT
86static struct workqueue_struct *ata_wq;
87
453b07ac
TH
88struct workqueue_struct *ata_aux_wq;
89
33267325
TH
90struct ata_force_param {
91 const char *name;
92 unsigned int cbl;
93 int spd_limit;
94 unsigned long xfer_mask;
95 unsigned int horkage_on;
96 unsigned int horkage_off;
97};
98
99struct ata_force_ent {
100 int port;
101 int device;
102 struct ata_force_param param;
103};
104
105static struct ata_force_ent *ata_force_tbl;
106static int ata_force_tbl_size;
107
108static char ata_force_param_buf[PAGE_SIZE] __initdata;
7afb4222
TH
109/* param_buf is thrown away after initialization, disallow read */
110module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
33267325
TH
111MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
112
418dc1f5 113int atapi_enabled = 1;
1623c81e
JG
114module_param(atapi_enabled, int, 0444);
115MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
116
c5c61bda 117static int atapi_dmadir = 0;
95de719a
AL
118module_param(atapi_dmadir, int, 0444);
119MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
120
baf4fdfa
ML
121int atapi_passthru16 = 1;
122module_param(atapi_passthru16, int, 0444);
123MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
124
c3c013a2
JG
125int libata_fua = 0;
126module_param_named(fua, libata_fua, int, 0444);
127MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
128
2dcb407e 129static int ata_ignore_hpa;
1e999736
AC
130module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
131MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
132
b3a70601
AC
133static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
134module_param_named(dma, libata_dma_mask, int, 0444);
135MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
136
a8601e5f
AM
137static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
138module_param(ata_probe_timeout, int, 0444);
139MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
140
6ebe9d86 141int libata_noacpi = 0;
d7d0dad6 142module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 143MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 144
ae8d4ee7
AC
145int libata_allow_tpm = 0;
146module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
147MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
148
1da177e4
LT
149MODULE_AUTHOR("Jeff Garzik");
150MODULE_DESCRIPTION("Library module for ATA devices");
151MODULE_LICENSE("GPL");
152MODULE_VERSION(DRV_VERSION);
153
0baab86b 154
33267325
TH
155/**
156 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 157 * @ap: ATA port of interest
33267325
TH
158 *
159 * Force cable type according to libata.force and whine about it.
160 * The last entry which has matching port number is used, so it
161 * can be specified as part of device force parameters. For
162 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
163 * same effect.
164 *
165 * LOCKING:
166 * EH context.
167 */
168void ata_force_cbl(struct ata_port *ap)
169{
170 int i;
171
172 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
173 const struct ata_force_ent *fe = &ata_force_tbl[i];
174
175 if (fe->port != -1 && fe->port != ap->print_id)
176 continue;
177
178 if (fe->param.cbl == ATA_CBL_NONE)
179 continue;
180
181 ap->cbl = fe->param.cbl;
182 ata_port_printk(ap, KERN_NOTICE,
183 "FORCE: cable set to %s\n", fe->param.name);
184 return;
185 }
186}
187
188/**
189 * ata_force_spd_limit - force SATA spd limit according to libata.force
190 * @link: ATA link of interest
191 *
192 * Force SATA spd limit according to libata.force and whine about
193 * it. When only the port part is specified (e.g. 1:), the limit
194 * applies to all links connected to both the host link and all
195 * fan-out ports connected via PMP. If the device part is
196 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
197 * link not the host link. Device number 15 always points to the
198 * host link whether PMP is attached or not.
199 *
200 * LOCKING:
201 * EH context.
202 */
203static void ata_force_spd_limit(struct ata_link *link)
204{
205 int linkno, i;
206
207 if (ata_is_host_link(link))
208 linkno = 15;
209 else
210 linkno = link->pmp;
211
212 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
213 const struct ata_force_ent *fe = &ata_force_tbl[i];
214
215 if (fe->port != -1 && fe->port != link->ap->print_id)
216 continue;
217
218 if (fe->device != -1 && fe->device != linkno)
219 continue;
220
221 if (!fe->param.spd_limit)
222 continue;
223
224 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
225 ata_link_printk(link, KERN_NOTICE,
226 "FORCE: PHY spd limit set to %s\n", fe->param.name);
227 return;
228 }
229}
230
231/**
232 * ata_force_xfermask - force xfermask according to libata.force
233 * @dev: ATA device of interest
234 *
235 * Force xfer_mask according to libata.force and whine about it.
236 * For consistency with link selection, device number 15 selects
237 * the first device connected to the host link.
238 *
239 * LOCKING:
240 * EH context.
241 */
242static void ata_force_xfermask(struct ata_device *dev)
243{
244 int devno = dev->link->pmp + dev->devno;
245 int alt_devno = devno;
246 int i;
247
248 /* allow n.15 for the first device attached to host port */
249 if (ata_is_host_link(dev->link) && devno == 0)
250 alt_devno = 15;
251
252 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
253 const struct ata_force_ent *fe = &ata_force_tbl[i];
254 unsigned long pio_mask, mwdma_mask, udma_mask;
255
256 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
257 continue;
258
259 if (fe->device != -1 && fe->device != devno &&
260 fe->device != alt_devno)
261 continue;
262
263 if (!fe->param.xfer_mask)
264 continue;
265
266 ata_unpack_xfermask(fe->param.xfer_mask,
267 &pio_mask, &mwdma_mask, &udma_mask);
268 if (udma_mask)
269 dev->udma_mask = udma_mask;
270 else if (mwdma_mask) {
271 dev->udma_mask = 0;
272 dev->mwdma_mask = mwdma_mask;
273 } else {
274 dev->udma_mask = 0;
275 dev->mwdma_mask = 0;
276 dev->pio_mask = pio_mask;
277 }
278
279 ata_dev_printk(dev, KERN_NOTICE,
280 "FORCE: xfer_mask set to %s\n", fe->param.name);
281 return;
282 }
283}
284
285/**
286 * ata_force_horkage - force horkage according to libata.force
287 * @dev: ATA device of interest
288 *
289 * Force horkage according to libata.force and whine about it.
290 * For consistency with link selection, device number 15 selects
291 * the first device connected to the host link.
292 *
293 * LOCKING:
294 * EH context.
295 */
296static void ata_force_horkage(struct ata_device *dev)
297{
298 int devno = dev->link->pmp + dev->devno;
299 int alt_devno = devno;
300 int i;
301
302 /* allow n.15 for the first device attached to host port */
303 if (ata_is_host_link(dev->link) && devno == 0)
304 alt_devno = 15;
305
306 for (i = 0; i < ata_force_tbl_size; i++) {
307 const struct ata_force_ent *fe = &ata_force_tbl[i];
308
309 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
310 continue;
311
312 if (fe->device != -1 && fe->device != devno &&
313 fe->device != alt_devno)
314 continue;
315
316 if (!(~dev->horkage & fe->param.horkage_on) &&
317 !(dev->horkage & fe->param.horkage_off))
318 continue;
319
320 dev->horkage |= fe->param.horkage_on;
321 dev->horkage &= ~fe->param.horkage_off;
322
323 ata_dev_printk(dev, KERN_NOTICE,
324 "FORCE: horkage modified (%s)\n", fe->param.name);
325 }
326}
327
436d34b3
TH
328/**
329 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
330 * @opcode: SCSI opcode
331 *
332 * Determine ATAPI command type from @opcode.
333 *
334 * LOCKING:
335 * None.
336 *
337 * RETURNS:
338 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
339 */
340int atapi_cmd_type(u8 opcode)
341{
342 switch (opcode) {
343 case GPCMD_READ_10:
344 case GPCMD_READ_12:
345 return ATAPI_READ;
346
347 case GPCMD_WRITE_10:
348 case GPCMD_WRITE_12:
349 case GPCMD_WRITE_AND_VERIFY_10:
350 return ATAPI_WRITE;
351
352 case GPCMD_READ_CD:
353 case GPCMD_READ_CD_MSF:
354 return ATAPI_READ_CD;
355
e52dcc48
TH
356 case ATA_16:
357 case ATA_12:
358 if (atapi_passthru16)
359 return ATAPI_PASS_THRU;
360 /* fall thru */
436d34b3
TH
361 default:
362 return ATAPI_MISC;
363 }
364}
365
1da177e4
LT
366/**
367 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
368 * @tf: Taskfile to convert
1da177e4 369 * @pmp: Port multiplier port
9977126c
TH
370 * @is_cmd: This FIS is for command
371 * @fis: Buffer into which data will output
1da177e4
LT
372 *
373 * Converts a standard ATA taskfile to a Serial ATA
374 * FIS structure (Register - Host to Device).
375 *
376 * LOCKING:
377 * Inherited from caller.
378 */
9977126c 379void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 380{
9977126c
TH
381 fis[0] = 0x27; /* Register - Host to Device FIS */
382 fis[1] = pmp & 0xf; /* Port multiplier number*/
383 if (is_cmd)
384 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
385
1da177e4
LT
386 fis[2] = tf->command;
387 fis[3] = tf->feature;
388
389 fis[4] = tf->lbal;
390 fis[5] = tf->lbam;
391 fis[6] = tf->lbah;
392 fis[7] = tf->device;
393
394 fis[8] = tf->hob_lbal;
395 fis[9] = tf->hob_lbam;
396 fis[10] = tf->hob_lbah;
397 fis[11] = tf->hob_feature;
398
399 fis[12] = tf->nsect;
400 fis[13] = tf->hob_nsect;
401 fis[14] = 0;
402 fis[15] = tf->ctl;
403
404 fis[16] = 0;
405 fis[17] = 0;
406 fis[18] = 0;
407 fis[19] = 0;
408}
409
410/**
411 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
412 * @fis: Buffer from which data will be input
413 * @tf: Taskfile to output
414 *
e12a1be6 415 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
416 *
417 * LOCKING:
418 * Inherited from caller.
419 */
420
057ace5e 421void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
422{
423 tf->command = fis[2]; /* status */
424 tf->feature = fis[3]; /* error */
425
426 tf->lbal = fis[4];
427 tf->lbam = fis[5];
428 tf->lbah = fis[6];
429 tf->device = fis[7];
430
431 tf->hob_lbal = fis[8];
432 tf->hob_lbam = fis[9];
433 tf->hob_lbah = fis[10];
434
435 tf->nsect = fis[12];
436 tf->hob_nsect = fis[13];
437}
438
8cbd6df1
AL
439static const u8 ata_rw_cmds[] = {
440 /* pio multi */
441 ATA_CMD_READ_MULTI,
442 ATA_CMD_WRITE_MULTI,
443 ATA_CMD_READ_MULTI_EXT,
444 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
445 0,
446 0,
447 0,
448 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
449 /* pio */
450 ATA_CMD_PIO_READ,
451 ATA_CMD_PIO_WRITE,
452 ATA_CMD_PIO_READ_EXT,
453 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
454 0,
455 0,
456 0,
457 0,
8cbd6df1
AL
458 /* dma */
459 ATA_CMD_READ,
460 ATA_CMD_WRITE,
461 ATA_CMD_READ_EXT,
9a3dccc4
TH
462 ATA_CMD_WRITE_EXT,
463 0,
464 0,
465 0,
466 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 467};
1da177e4
LT
468
469/**
8cbd6df1 470 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
471 * @tf: command to examine and configure
472 * @dev: device tf belongs to
1da177e4 473 *
2e9edbf8 474 * Examine the device configuration and tf->flags to calculate
8cbd6df1 475 * the proper read/write commands and protocol to use.
1da177e4
LT
476 *
477 * LOCKING:
478 * caller.
479 */
bd056d7e 480static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 481{
9a3dccc4 482 u8 cmd;
1da177e4 483
9a3dccc4 484 int index, fua, lba48, write;
2e9edbf8 485
9a3dccc4 486 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
487 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
488 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 489
8cbd6df1
AL
490 if (dev->flags & ATA_DFLAG_PIO) {
491 tf->protocol = ATA_PROT_PIO;
9a3dccc4 492 index = dev->multi_count ? 0 : 8;
9af5c9c9 493 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
494 /* Unable to use DMA due to host limitation */
495 tf->protocol = ATA_PROT_PIO;
0565c26d 496 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
497 } else {
498 tf->protocol = ATA_PROT_DMA;
9a3dccc4 499 index = 16;
8cbd6df1 500 }
1da177e4 501
9a3dccc4
TH
502 cmd = ata_rw_cmds[index + fua + lba48 + write];
503 if (cmd) {
504 tf->command = cmd;
505 return 0;
506 }
507 return -1;
1da177e4
LT
508}
509
35b649fe
TH
510/**
511 * ata_tf_read_block - Read block address from ATA taskfile
512 * @tf: ATA taskfile of interest
513 * @dev: ATA device @tf belongs to
514 *
515 * LOCKING:
516 * None.
517 *
518 * Read block address from @tf. This function can handle all
519 * three address formats - LBA, LBA48 and CHS. tf->protocol and
520 * flags select the address format to use.
521 *
522 * RETURNS:
523 * Block address read from @tf.
524 */
525u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
526{
527 u64 block = 0;
528
529 if (tf->flags & ATA_TFLAG_LBA) {
530 if (tf->flags & ATA_TFLAG_LBA48) {
531 block |= (u64)tf->hob_lbah << 40;
532 block |= (u64)tf->hob_lbam << 32;
533 block |= tf->hob_lbal << 24;
534 } else
535 block |= (tf->device & 0xf) << 24;
536
537 block |= tf->lbah << 16;
538 block |= tf->lbam << 8;
539 block |= tf->lbal;
540 } else {
541 u32 cyl, head, sect;
542
543 cyl = tf->lbam | (tf->lbah << 8);
544 head = tf->device & 0xf;
545 sect = tf->lbal;
546
547 block = (cyl * dev->heads + head) * dev->sectors + sect;
548 }
549
550 return block;
551}
552
bd056d7e
TH
553/**
554 * ata_build_rw_tf - Build ATA taskfile for given read/write request
555 * @tf: Target ATA taskfile
556 * @dev: ATA device @tf belongs to
557 * @block: Block address
558 * @n_block: Number of blocks
559 * @tf_flags: RW/FUA etc...
560 * @tag: tag
561 *
562 * LOCKING:
563 * None.
564 *
565 * Build ATA taskfile @tf for read/write request described by
566 * @block, @n_block, @tf_flags and @tag on @dev.
567 *
568 * RETURNS:
569 *
570 * 0 on success, -ERANGE if the request is too large for @dev,
571 * -EINVAL if the request is invalid.
572 */
573int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
574 u64 block, u32 n_block, unsigned int tf_flags,
575 unsigned int tag)
576{
577 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
578 tf->flags |= tf_flags;
579
6d1245bf 580 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
581 /* yay, NCQ */
582 if (!lba_48_ok(block, n_block))
583 return -ERANGE;
584
585 tf->protocol = ATA_PROT_NCQ;
586 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
587
588 if (tf->flags & ATA_TFLAG_WRITE)
589 tf->command = ATA_CMD_FPDMA_WRITE;
590 else
591 tf->command = ATA_CMD_FPDMA_READ;
592
593 tf->nsect = tag << 3;
594 tf->hob_feature = (n_block >> 8) & 0xff;
595 tf->feature = n_block & 0xff;
596
597 tf->hob_lbah = (block >> 40) & 0xff;
598 tf->hob_lbam = (block >> 32) & 0xff;
599 tf->hob_lbal = (block >> 24) & 0xff;
600 tf->lbah = (block >> 16) & 0xff;
601 tf->lbam = (block >> 8) & 0xff;
602 tf->lbal = block & 0xff;
603
604 tf->device = 1 << 6;
605 if (tf->flags & ATA_TFLAG_FUA)
606 tf->device |= 1 << 7;
607 } else if (dev->flags & ATA_DFLAG_LBA) {
608 tf->flags |= ATA_TFLAG_LBA;
609
610 if (lba_28_ok(block, n_block)) {
611 /* use LBA28 */
612 tf->device |= (block >> 24) & 0xf;
613 } else if (lba_48_ok(block, n_block)) {
614 if (!(dev->flags & ATA_DFLAG_LBA48))
615 return -ERANGE;
616
617 /* use LBA48 */
618 tf->flags |= ATA_TFLAG_LBA48;
619
620 tf->hob_nsect = (n_block >> 8) & 0xff;
621
622 tf->hob_lbah = (block >> 40) & 0xff;
623 tf->hob_lbam = (block >> 32) & 0xff;
624 tf->hob_lbal = (block >> 24) & 0xff;
625 } else
626 /* request too large even for LBA48 */
627 return -ERANGE;
628
629 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
630 return -EINVAL;
631
632 tf->nsect = n_block & 0xff;
633
634 tf->lbah = (block >> 16) & 0xff;
635 tf->lbam = (block >> 8) & 0xff;
636 tf->lbal = block & 0xff;
637
638 tf->device |= ATA_LBA;
639 } else {
640 /* CHS */
641 u32 sect, head, cyl, track;
642
643 /* The request -may- be too large for CHS addressing. */
644 if (!lba_28_ok(block, n_block))
645 return -ERANGE;
646
647 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
648 return -EINVAL;
649
650 /* Convert LBA to CHS */
651 track = (u32)block / dev->sectors;
652 cyl = track / dev->heads;
653 head = track % dev->heads;
654 sect = (u32)block % dev->sectors + 1;
655
656 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
657 (u32)block, track, cyl, head, sect);
658
659 /* Check whether the converted CHS can fit.
660 Cylinder: 0-65535
661 Head: 0-15
662 Sector: 1-255*/
663 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
664 return -ERANGE;
665
666 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
667 tf->lbal = sect;
668 tf->lbam = cyl;
669 tf->lbah = cyl >> 8;
670 tf->device |= head;
671 }
672
673 return 0;
674}
675
cb95d562
TH
676/**
677 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
678 * @pio_mask: pio_mask
679 * @mwdma_mask: mwdma_mask
680 * @udma_mask: udma_mask
681 *
682 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
683 * unsigned int xfer_mask.
684 *
685 * LOCKING:
686 * None.
687 *
688 * RETURNS:
689 * Packed xfer_mask.
690 */
7dc951ae
TH
691unsigned long ata_pack_xfermask(unsigned long pio_mask,
692 unsigned long mwdma_mask,
693 unsigned long udma_mask)
cb95d562
TH
694{
695 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
696 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
697 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
698}
699
c0489e4e
TH
700/**
701 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
702 * @xfer_mask: xfer_mask to unpack
703 * @pio_mask: resulting pio_mask
704 * @mwdma_mask: resulting mwdma_mask
705 * @udma_mask: resulting udma_mask
706 *
707 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
708 * Any NULL distination masks will be ignored.
709 */
7dc951ae
TH
710void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
711 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
712{
713 if (pio_mask)
714 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
715 if (mwdma_mask)
716 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
717 if (udma_mask)
718 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
719}
720
cb95d562 721static const struct ata_xfer_ent {
be9a50c8 722 int shift, bits;
cb95d562
TH
723 u8 base;
724} ata_xfer_tbl[] = {
70cd071e
TH
725 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
726 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
727 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
728 { -1, },
729};
730
731/**
732 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
733 * @xfer_mask: xfer_mask of interest
734 *
735 * Return matching XFER_* value for @xfer_mask. Only the highest
736 * bit of @xfer_mask is considered.
737 *
738 * LOCKING:
739 * None.
740 *
741 * RETURNS:
70cd071e 742 * Matching XFER_* value, 0xff if no match found.
cb95d562 743 */
7dc951ae 744u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
745{
746 int highbit = fls(xfer_mask) - 1;
747 const struct ata_xfer_ent *ent;
748
749 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
750 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
751 return ent->base + highbit - ent->shift;
70cd071e 752 return 0xff;
cb95d562
TH
753}
754
755/**
756 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
757 * @xfer_mode: XFER_* of interest
758 *
759 * Return matching xfer_mask for @xfer_mode.
760 *
761 * LOCKING:
762 * None.
763 *
764 * RETURNS:
765 * Matching xfer_mask, 0 if no match found.
766 */
7dc951ae 767unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
768{
769 const struct ata_xfer_ent *ent;
770
771 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
772 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
773 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
774 & ~((1 << ent->shift) - 1);
cb95d562
TH
775 return 0;
776}
777
778/**
779 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
780 * @xfer_mode: XFER_* of interest
781 *
782 * Return matching xfer_shift for @xfer_mode.
783 *
784 * LOCKING:
785 * None.
786 *
787 * RETURNS:
788 * Matching xfer_shift, -1 if no match found.
789 */
7dc951ae 790int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
791{
792 const struct ata_xfer_ent *ent;
793
794 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
795 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
796 return ent->shift;
797 return -1;
798}
799
1da177e4 800/**
1da7b0d0
TH
801 * ata_mode_string - convert xfer_mask to string
802 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
803 *
804 * Determine string which represents the highest speed
1da7b0d0 805 * (highest bit in @modemask).
1da177e4
LT
806 *
807 * LOCKING:
808 * None.
809 *
810 * RETURNS:
811 * Constant C string representing highest speed listed in
1da7b0d0 812 * @mode_mask, or the constant C string "<n/a>".
1da177e4 813 */
7dc951ae 814const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 815{
75f554bc
TH
816 static const char * const xfer_mode_str[] = {
817 "PIO0",
818 "PIO1",
819 "PIO2",
820 "PIO3",
821 "PIO4",
b352e57d
AC
822 "PIO5",
823 "PIO6",
75f554bc
TH
824 "MWDMA0",
825 "MWDMA1",
826 "MWDMA2",
b352e57d
AC
827 "MWDMA3",
828 "MWDMA4",
75f554bc
TH
829 "UDMA/16",
830 "UDMA/25",
831 "UDMA/33",
832 "UDMA/44",
833 "UDMA/66",
834 "UDMA/100",
835 "UDMA/133",
836 "UDMA7",
837 };
1da7b0d0 838 int highbit;
1da177e4 839
1da7b0d0
TH
840 highbit = fls(xfer_mask) - 1;
841 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
842 return xfer_mode_str[highbit];
1da177e4 843 return "<n/a>";
1da177e4
LT
844}
845
4c360c81
TH
846static const char *sata_spd_string(unsigned int spd)
847{
848 static const char * const spd_str[] = {
849 "1.5 Gbps",
850 "3.0 Gbps",
851 };
852
853 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
854 return "<unknown>";
855 return spd_str[spd - 1];
856}
857
3373efd8 858void ata_dev_disable(struct ata_device *dev)
0b8efb0a 859{
09d7f9b0 860 if (ata_dev_enabled(dev)) {
9af5c9c9 861 if (ata_msg_drv(dev->link->ap))
09d7f9b0 862 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
562f0c2d 863 ata_acpi_on_disable(dev);
4ae72a1e
TH
864 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
865 ATA_DNXFER_QUIET);
0b8efb0a
TH
866 dev->class++;
867 }
868}
869
ca77329f
KCA
870static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
871{
872 struct ata_link *link = dev->link;
873 struct ata_port *ap = link->ap;
874 u32 scontrol;
875 unsigned int err_mask;
876 int rc;
877
878 /*
879 * disallow DIPM for drivers which haven't set
880 * ATA_FLAG_IPM. This is because when DIPM is enabled,
881 * phy ready will be set in the interrupt status on
882 * state changes, which will cause some drivers to
883 * think there are errors - additionally drivers will
884 * need to disable hot plug.
885 */
886 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
887 ap->pm_policy = NOT_AVAILABLE;
888 return -EINVAL;
889 }
890
891 /*
892 * For DIPM, we will only enable it for the
893 * min_power setting.
894 *
895 * Why? Because Disks are too stupid to know that
896 * If the host rejects a request to go to SLUMBER
897 * they should retry at PARTIAL, and instead it
898 * just would give up. So, for medium_power to
899 * work at all, we need to only allow HIPM.
900 */
901 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
902 if (rc)
903 return rc;
904
905 switch (policy) {
906 case MIN_POWER:
907 /* no restrictions on IPM transitions */
908 scontrol &= ~(0x3 << 8);
909 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
910 if (rc)
911 return rc;
912
913 /* enable DIPM */
914 if (dev->flags & ATA_DFLAG_DIPM)
915 err_mask = ata_dev_set_feature(dev,
916 SETFEATURES_SATA_ENABLE, SATA_DIPM);
917 break;
918 case MEDIUM_POWER:
919 /* allow IPM to PARTIAL */
920 scontrol &= ~(0x1 << 8);
921 scontrol |= (0x2 << 8);
922 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
923 if (rc)
924 return rc;
925
f5456b63
KCA
926 /*
927 * we don't have to disable DIPM since IPM flags
928 * disallow transitions to SLUMBER, which effectively
929 * disable DIPM if it does not support PARTIAL
930 */
ca77329f
KCA
931 break;
932 case NOT_AVAILABLE:
933 case MAX_PERFORMANCE:
934 /* disable all IPM transitions */
935 scontrol |= (0x3 << 8);
936 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
937 if (rc)
938 return rc;
939
f5456b63
KCA
940 /*
941 * we don't have to disable DIPM since IPM flags
942 * disallow all transitions which effectively
943 * disable DIPM anyway.
944 */
ca77329f
KCA
945 break;
946 }
947
948 /* FIXME: handle SET FEATURES failure */
949 (void) err_mask;
950
951 return 0;
952}
953
954/**
955 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
956 * @dev: device to enable power management
957 * @policy: the link power management policy
ca77329f
KCA
958 *
959 * Enable SATA Interface power management. This will enable
960 * Device Interface Power Management (DIPM) for min_power
961 * policy, and then call driver specific callbacks for
962 * enabling Host Initiated Power management.
963 *
964 * Locking: Caller.
965 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
966 */
967void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
968{
969 int rc = 0;
970 struct ata_port *ap = dev->link->ap;
971
972 /* set HIPM first, then DIPM */
973 if (ap->ops->enable_pm)
974 rc = ap->ops->enable_pm(ap, policy);
975 if (rc)
976 goto enable_pm_out;
977 rc = ata_dev_set_dipm(dev, policy);
978
979enable_pm_out:
980 if (rc)
981 ap->pm_policy = MAX_PERFORMANCE;
982 else
983 ap->pm_policy = policy;
984 return /* rc */; /* hopefully we can use 'rc' eventually */
985}
986
1992a5ed 987#ifdef CONFIG_PM
ca77329f
KCA
988/**
989 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 990 * @dev: device to disable power management
ca77329f
KCA
991 *
992 * Disable SATA Interface power management. This will disable
993 * Device Interface Power Management (DIPM) without changing
994 * policy, call driver specific callbacks for disabling Host
995 * Initiated Power management.
996 *
997 * Locking: Caller.
998 * Returns: void
999 */
1000static void ata_dev_disable_pm(struct ata_device *dev)
1001{
1002 struct ata_port *ap = dev->link->ap;
1003
1004 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1005 if (ap->ops->disable_pm)
1006 ap->ops->disable_pm(ap);
1007}
1992a5ed 1008#endif /* CONFIG_PM */
ca77329f
KCA
1009
1010void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1011{
1012 ap->pm_policy = policy;
3ec25ebd 1013 ap->link.eh_info.action |= ATA_EH_LPM;
ca77329f
KCA
1014 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1015 ata_port_schedule_eh(ap);
1016}
1017
1992a5ed 1018#ifdef CONFIG_PM
ca77329f
KCA
1019static void ata_lpm_enable(struct ata_host *host)
1020{
1021 struct ata_link *link;
1022 struct ata_port *ap;
1023 struct ata_device *dev;
1024 int i;
1025
1026 for (i = 0; i < host->n_ports; i++) {
1027 ap = host->ports[i];
1028 ata_port_for_each_link(link, ap) {
1029 ata_link_for_each_dev(dev, link)
1030 ata_dev_disable_pm(dev);
1031 }
1032 }
1033}
1034
1035static void ata_lpm_disable(struct ata_host *host)
1036{
1037 int i;
1038
1039 for (i = 0; i < host->n_ports; i++) {
1040 struct ata_port *ap = host->ports[i];
1041 ata_lpm_schedule(ap, ap->pm_policy);
1042 }
1043}
1992a5ed 1044#endif /* CONFIG_PM */
ca77329f
KCA
1045
1046
1da177e4 1047/**
0d5ff566 1048 * ata_devchk - PATA device presence detection
1da177e4
LT
1049 * @ap: ATA channel to examine
1050 * @device: Device to examine (starting at zero)
1051 *
1052 * This technique was originally described in
1053 * Hale Landis's ATADRVR (www.ata-atapi.com), and
1054 * later found its way into the ATA/ATAPI spec.
1055 *
1056 * Write a pattern to the ATA shadow registers,
1057 * and if a device is present, it will respond by
1058 * correctly storing and echoing back the
1059 * ATA shadow register contents.
1060 *
1061 * LOCKING:
1062 * caller.
1063 */
1064
0d5ff566 1065static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
1066{
1067 struct ata_ioports *ioaddr = &ap->ioaddr;
1068 u8 nsect, lbal;
1069
1070 ap->ops->dev_select(ap, device);
1071
0d5ff566
TH
1072 iowrite8(0x55, ioaddr->nsect_addr);
1073 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 1074
0d5ff566
TH
1075 iowrite8(0xaa, ioaddr->nsect_addr);
1076 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 1077
0d5ff566
TH
1078 iowrite8(0x55, ioaddr->nsect_addr);
1079 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 1080
0d5ff566
TH
1081 nsect = ioread8(ioaddr->nsect_addr);
1082 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
1083
1084 if ((nsect == 0x55) && (lbal == 0xaa))
1085 return 1; /* we found a device */
1086
1087 return 0; /* nothing found */
1088}
1089
1da177e4
LT
1090/**
1091 * ata_dev_classify - determine device type based on ATA-spec signature
1092 * @tf: ATA taskfile register set for device to be identified
1093 *
1094 * Determine from taskfile register contents whether a device is
1095 * ATA or ATAPI, as per "Signature and persistence" section
1096 * of ATA/PI spec (volume 1, sect 5.14).
1097 *
1098 * LOCKING:
1099 * None.
1100 *
1101 * RETURNS:
633273a3
TH
1102 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1103 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1104 */
057ace5e 1105unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1106{
1107 /* Apple's open source Darwin code hints that some devices only
1108 * put a proper signature into the LBA mid/high registers,
1109 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1110 *
1111 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1112 * signatures for ATA and ATAPI devices attached on SerialATA,
1113 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1114 * spec has never mentioned about using different signatures
1115 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1116 * Multiplier specification began to use 0x69/0x96 to identify
1117 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1118 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1119 * 0x69/0x96 shortly and described them as reserved for
1120 * SerialATA.
1121 *
1122 * We follow the current spec and consider that 0x69/0x96
1123 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 1124 */
633273a3 1125 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1126 DPRINTK("found ATA device by sig\n");
1127 return ATA_DEV_ATA;
1128 }
1129
633273a3 1130 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1131 DPRINTK("found ATAPI device by sig\n");
1132 return ATA_DEV_ATAPI;
1133 }
1134
633273a3
TH
1135 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1136 DPRINTK("found PMP device by sig\n");
1137 return ATA_DEV_PMP;
1138 }
1139
1140 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 1141 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
1142 return ATA_DEV_SEMB_UNSUP; /* not yet */
1143 }
1144
1da177e4
LT
1145 DPRINTK("unknown device\n");
1146 return ATA_DEV_UNKNOWN;
1147}
1148
1149/**
1150 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
1151 * @dev: ATA device to classify (starting at zero)
1152 * @present: device seems present
b4dc7623 1153 * @r_err: Value of error register on completion
1da177e4
LT
1154 *
1155 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1156 * an ATA/ATAPI-defined set of values is placed in the ATA
1157 * shadow registers, indicating the results of device detection
1158 * and diagnostics.
1159 *
1160 * Select the ATA device, and read the values from the ATA shadow
1161 * registers. Then parse according to the Error register value,
1162 * and the spec-defined values examined by ata_dev_classify().
1163 *
1164 * LOCKING:
1165 * caller.
b4dc7623
TH
1166 *
1167 * RETURNS:
1168 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 1169 */
3f19859e
TH
1170unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
1171 u8 *r_err)
1da177e4 1172{
3f19859e 1173 struct ata_port *ap = dev->link->ap;
1da177e4
LT
1174 struct ata_taskfile tf;
1175 unsigned int class;
1176 u8 err;
1177
3f19859e 1178 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
1179
1180 memset(&tf, 0, sizeof(tf));
1181
1da177e4 1182 ap->ops->tf_read(ap, &tf);
0169e284 1183 err = tf.feature;
b4dc7623
TH
1184 if (r_err)
1185 *r_err = err;
1da177e4 1186
c5038fc0
AC
1187 /* see if device passed diags: continue and warn later */
1188 if (err == 0)
93590859 1189 /* diagnostic fail : do nothing _YET_ */
3f19859e 1190 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 1191 else if (err == 1)
1da177e4 1192 /* do nothing */ ;
3f19859e 1193 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
1194 /* do nothing */ ;
1195 else
b4dc7623 1196 return ATA_DEV_NONE;
1da177e4 1197
b4dc7623 1198 /* determine if device is ATA or ATAPI */
1da177e4 1199 class = ata_dev_classify(&tf);
b4dc7623 1200
d7fbee05
TH
1201 if (class == ATA_DEV_UNKNOWN) {
1202 /* If the device failed diagnostic, it's likely to
1203 * have reported incorrect device signature too.
1204 * Assume ATA device if the device seems present but
1205 * device signature is invalid with diagnostic
1206 * failure.
1207 */
1208 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1209 class = ATA_DEV_ATA;
1210 else
1211 class = ATA_DEV_NONE;
1212 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
1213 class = ATA_DEV_NONE;
1214
b4dc7623 1215 return class;
1da177e4
LT
1216}
1217
1218/**
6a62a04d 1219 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1220 * @id: IDENTIFY DEVICE results we will examine
1221 * @s: string into which data is output
1222 * @ofs: offset into identify device page
1223 * @len: length of string to return. must be an even number.
1224 *
1225 * The strings in the IDENTIFY DEVICE page are broken up into
1226 * 16-bit chunks. Run through the string, and output each
1227 * 8-bit chunk linearly, regardless of platform.
1228 *
1229 * LOCKING:
1230 * caller.
1231 */
1232
6a62a04d
TH
1233void ata_id_string(const u16 *id, unsigned char *s,
1234 unsigned int ofs, unsigned int len)
1da177e4
LT
1235{
1236 unsigned int c;
1237
1238 while (len > 0) {
1239 c = id[ofs] >> 8;
1240 *s = c;
1241 s++;
1242
1243 c = id[ofs] & 0xff;
1244 *s = c;
1245 s++;
1246
1247 ofs++;
1248 len -= 2;
1249 }
1250}
1251
0e949ff3 1252/**
6a62a04d 1253 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1254 * @id: IDENTIFY DEVICE results we will examine
1255 * @s: string into which data is output
1256 * @ofs: offset into identify device page
1257 * @len: length of string to return. must be an odd number.
1258 *
6a62a04d 1259 * This function is identical to ata_id_string except that it
0e949ff3
TH
1260 * trims trailing spaces and terminates the resulting string with
1261 * null. @len must be actual maximum length (even number) + 1.
1262 *
1263 * LOCKING:
1264 * caller.
1265 */
6a62a04d
TH
1266void ata_id_c_string(const u16 *id, unsigned char *s,
1267 unsigned int ofs, unsigned int len)
0e949ff3
TH
1268{
1269 unsigned char *p;
1270
1271 WARN_ON(!(len & 1));
1272
6a62a04d 1273 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1274
1275 p = s + strnlen(s, len - 1);
1276 while (p > s && p[-1] == ' ')
1277 p--;
1278 *p = '\0';
1279}
0baab86b 1280
db6f8759
TH
1281static u64 ata_id_n_sectors(const u16 *id)
1282{
1283 if (ata_id_has_lba(id)) {
1284 if (ata_id_has_lba48(id))
1285 return ata_id_u64(id, 100);
1286 else
1287 return ata_id_u32(id, 60);
1288 } else {
1289 if (ata_id_current_chs_valid(id))
1290 return ata_id_u32(id, 57);
1291 else
1292 return id[1] * id[3] * id[6];
1293 }
1294}
1295
1e999736
AC
1296static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1297{
1298 u64 sectors = 0;
1299
1300 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1301 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1302 sectors |= (tf->hob_lbal & 0xff) << 24;
1303 sectors |= (tf->lbah & 0xff) << 16;
1304 sectors |= (tf->lbam & 0xff) << 8;
1305 sectors |= (tf->lbal & 0xff);
1306
1307 return ++sectors;
1308}
1309
1310static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1311{
1312 u64 sectors = 0;
1313
1314 sectors |= (tf->device & 0x0f) << 24;
1315 sectors |= (tf->lbah & 0xff) << 16;
1316 sectors |= (tf->lbam & 0xff) << 8;
1317 sectors |= (tf->lbal & 0xff);
1318
1319 return ++sectors;
1320}
1321
1322/**
c728a914
TH
1323 * ata_read_native_max_address - Read native max address
1324 * @dev: target device
1325 * @max_sectors: out parameter for the result native max address
1e999736 1326 *
c728a914
TH
1327 * Perform an LBA48 or LBA28 native size query upon the device in
1328 * question.
1e999736 1329 *
c728a914
TH
1330 * RETURNS:
1331 * 0 on success, -EACCES if command is aborted by the drive.
1332 * -EIO on other errors.
1e999736 1333 */
c728a914 1334static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1335{
c728a914 1336 unsigned int err_mask;
1e999736 1337 struct ata_taskfile tf;
c728a914 1338 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1339
1340 ata_tf_init(dev, &tf);
1341
c728a914 1342 /* always clear all address registers */
1e999736 1343 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1344
c728a914
TH
1345 if (lba48) {
1346 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1347 tf.flags |= ATA_TFLAG_LBA48;
1348 } else
1349 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1350
1e999736 1351 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1352 tf.device |= ATA_LBA;
1353
2b789108 1354 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1355 if (err_mask) {
1356 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1357 "max address (err_mask=0x%x)\n", err_mask);
1358 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1359 return -EACCES;
1360 return -EIO;
1361 }
1e999736 1362
c728a914
TH
1363 if (lba48)
1364 *max_sectors = ata_tf_to_lba48(&tf);
1365 else
1366 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 1367 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1368 (*max_sectors)--;
c728a914 1369 return 0;
1e999736
AC
1370}
1371
1372/**
c728a914
TH
1373 * ata_set_max_sectors - Set max sectors
1374 * @dev: target device
6b38d1d1 1375 * @new_sectors: new max sectors value to set for the device
1e999736 1376 *
c728a914
TH
1377 * Set max sectors of @dev to @new_sectors.
1378 *
1379 * RETURNS:
1380 * 0 on success, -EACCES if command is aborted or denied (due to
1381 * previous non-volatile SET_MAX) by the drive. -EIO on other
1382 * errors.
1e999736 1383 */
05027adc 1384static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1385{
c728a914 1386 unsigned int err_mask;
1e999736 1387 struct ata_taskfile tf;
c728a914 1388 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1389
1390 new_sectors--;
1391
1392 ata_tf_init(dev, &tf);
1393
1e999736 1394 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1395
1396 if (lba48) {
1397 tf.command = ATA_CMD_SET_MAX_EXT;
1398 tf.flags |= ATA_TFLAG_LBA48;
1399
1400 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1401 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1402 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1403 } else {
c728a914
TH
1404 tf.command = ATA_CMD_SET_MAX;
1405
1e582ba4
TH
1406 tf.device |= (new_sectors >> 24) & 0xf;
1407 }
1408
1e999736 1409 tf.protocol |= ATA_PROT_NODATA;
c728a914 1410 tf.device |= ATA_LBA;
1e999736
AC
1411
1412 tf.lbal = (new_sectors >> 0) & 0xff;
1413 tf.lbam = (new_sectors >> 8) & 0xff;
1414 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1415
2b789108 1416 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1417 if (err_mask) {
1418 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1419 "max address (err_mask=0x%x)\n", err_mask);
1420 if (err_mask == AC_ERR_DEV &&
1421 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1422 return -EACCES;
1423 return -EIO;
1424 }
1425
c728a914 1426 return 0;
1e999736
AC
1427}
1428
1429/**
1430 * ata_hpa_resize - Resize a device with an HPA set
1431 * @dev: Device to resize
1432 *
1433 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1434 * it if required to the full size of the media. The caller must check
1435 * the drive has the HPA feature set enabled.
05027adc
TH
1436 *
1437 * RETURNS:
1438 * 0 on success, -errno on failure.
1e999736 1439 */
05027adc 1440static int ata_hpa_resize(struct ata_device *dev)
1e999736 1441{
05027adc
TH
1442 struct ata_eh_context *ehc = &dev->link->eh_context;
1443 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1444 u64 sectors = ata_id_n_sectors(dev->id);
1445 u64 native_sectors;
c728a914 1446 int rc;
a617c09f 1447
05027adc
TH
1448 /* do we need to do it? */
1449 if (dev->class != ATA_DEV_ATA ||
1450 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1451 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1452 return 0;
1e999736 1453
05027adc
TH
1454 /* read native max address */
1455 rc = ata_read_native_max_address(dev, &native_sectors);
1456 if (rc) {
dda7aba1
TH
1457 /* If device aborted the command or HPA isn't going to
1458 * be unlocked, skip HPA resizing.
05027adc 1459 */
dda7aba1 1460 if (rc == -EACCES || !ata_ignore_hpa) {
05027adc 1461 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
dda7aba1 1462 "broken, skipping HPA handling\n");
05027adc
TH
1463 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1464
1465 /* we can continue if device aborted the command */
1466 if (rc == -EACCES)
1467 rc = 0;
1e999736 1468 }
37301a55 1469
05027adc
TH
1470 return rc;
1471 }
1472
1473 /* nothing to do? */
1474 if (native_sectors <= sectors || !ata_ignore_hpa) {
1475 if (!print_info || native_sectors == sectors)
1476 return 0;
1477
1478 if (native_sectors > sectors)
1479 ata_dev_printk(dev, KERN_INFO,
1480 "HPA detected: current %llu, native %llu\n",
1481 (unsigned long long)sectors,
1482 (unsigned long long)native_sectors);
1483 else if (native_sectors < sectors)
1484 ata_dev_printk(dev, KERN_WARNING,
1485 "native sectors (%llu) is smaller than "
1486 "sectors (%llu)\n",
1487 (unsigned long long)native_sectors,
1488 (unsigned long long)sectors);
1489 return 0;
1490 }
1491
1492 /* let's unlock HPA */
1493 rc = ata_set_max_sectors(dev, native_sectors);
1494 if (rc == -EACCES) {
1495 /* if device aborted the command, skip HPA resizing */
1496 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1497 "(%llu -> %llu), skipping HPA handling\n",
1498 (unsigned long long)sectors,
1499 (unsigned long long)native_sectors);
1500 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1501 return 0;
1502 } else if (rc)
1503 return rc;
1504
1505 /* re-read IDENTIFY data */
1506 rc = ata_dev_reread_id(dev, 0);
1507 if (rc) {
1508 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1509 "data after HPA resizing\n");
1510 return rc;
1511 }
1512
1513 if (print_info) {
1514 u64 new_sectors = ata_id_n_sectors(dev->id);
1515 ata_dev_printk(dev, KERN_INFO,
1516 "HPA unlocked: %llu -> %llu, native %llu\n",
1517 (unsigned long long)sectors,
1518 (unsigned long long)new_sectors,
1519 (unsigned long long)native_sectors);
1520 }
1521
1522 return 0;
1e999736
AC
1523}
1524
0baab86b
EF
1525/**
1526 * ata_noop_dev_select - Select device 0/1 on ATA bus
1527 * @ap: ATA channel to manipulate
1528 * @device: ATA device (numbered from zero) to select
1529 *
1530 * This function performs no actual function.
1531 *
1532 * May be used as the dev_select() entry in ata_port_operations.
1533 *
1534 * LOCKING:
1535 * caller.
1536 */
2dcb407e 1537void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1538{
1539}
1540
0baab86b 1541
1da177e4
LT
1542/**
1543 * ata_std_dev_select - Select device 0/1 on ATA bus
1544 * @ap: ATA channel to manipulate
1545 * @device: ATA device (numbered from zero) to select
1546 *
1547 * Use the method defined in the ATA specification to
1548 * make either device 0, or device 1, active on the
0baab86b
EF
1549 * ATA channel. Works with both PIO and MMIO.
1550 *
1551 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1552 *
1553 * LOCKING:
1554 * caller.
1555 */
1556
2dcb407e 1557void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1558{
1559 u8 tmp;
1560
1561 if (device == 0)
1562 tmp = ATA_DEVICE_OBS;
1563 else
1564 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1565
0d5ff566 1566 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1567 ata_pause(ap); /* needed; also flushes, for mmio */
1568}
1569
1570/**
1571 * ata_dev_select - Select device 0/1 on ATA bus
1572 * @ap: ATA channel to manipulate
1573 * @device: ATA device (numbered from zero) to select
1574 * @wait: non-zero to wait for Status register BSY bit to clear
1575 * @can_sleep: non-zero if context allows sleeping
1576 *
1577 * Use the method defined in the ATA specification to
1578 * make either device 0, or device 1, active on the
1579 * ATA channel.
1580 *
1581 * This is a high-level version of ata_std_dev_select(),
1582 * which additionally provides the services of inserting
1583 * the proper pauses and status polling, where needed.
1584 *
1585 * LOCKING:
1586 * caller.
1587 */
1588
1589void ata_dev_select(struct ata_port *ap, unsigned int device,
1590 unsigned int wait, unsigned int can_sleep)
1591{
88574551 1592 if (ata_msg_probe(ap))
44877b4e
TH
1593 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1594 "device %u, wait %u\n", device, wait);
1da177e4
LT
1595
1596 if (wait)
1597 ata_wait_idle(ap);
1598
1599 ap->ops->dev_select(ap, device);
1600
1601 if (wait) {
9af5c9c9 1602 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1603 msleep(150);
1604 ata_wait_idle(ap);
1605 }
1606}
1607
1608/**
1609 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1610 * @id: IDENTIFY DEVICE page to dump
1da177e4 1611 *
0bd3300a
TH
1612 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1613 * page.
1da177e4
LT
1614 *
1615 * LOCKING:
1616 * caller.
1617 */
1618
0bd3300a 1619static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1620{
1621 DPRINTK("49==0x%04x "
1622 "53==0x%04x "
1623 "63==0x%04x "
1624 "64==0x%04x "
1625 "75==0x%04x \n",
0bd3300a
TH
1626 id[49],
1627 id[53],
1628 id[63],
1629 id[64],
1630 id[75]);
1da177e4
LT
1631 DPRINTK("80==0x%04x "
1632 "81==0x%04x "
1633 "82==0x%04x "
1634 "83==0x%04x "
1635 "84==0x%04x \n",
0bd3300a
TH
1636 id[80],
1637 id[81],
1638 id[82],
1639 id[83],
1640 id[84]);
1da177e4
LT
1641 DPRINTK("88==0x%04x "
1642 "93==0x%04x\n",
0bd3300a
TH
1643 id[88],
1644 id[93]);
1da177e4
LT
1645}
1646
cb95d562
TH
1647/**
1648 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1649 * @id: IDENTIFY data to compute xfer mask from
1650 *
1651 * Compute the xfermask for this device. This is not as trivial
1652 * as it seems if we must consider early devices correctly.
1653 *
1654 * FIXME: pre IDE drive timing (do we care ?).
1655 *
1656 * LOCKING:
1657 * None.
1658 *
1659 * RETURNS:
1660 * Computed xfermask
1661 */
7dc951ae 1662unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1663{
7dc951ae 1664 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1665
1666 /* Usual case. Word 53 indicates word 64 is valid */
1667 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1668 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1669 pio_mask <<= 3;
1670 pio_mask |= 0x7;
1671 } else {
1672 /* If word 64 isn't valid then Word 51 high byte holds
1673 * the PIO timing number for the maximum. Turn it into
1674 * a mask.
1675 */
7a0f1c8a 1676 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1677 if (mode < 5) /* Valid PIO range */
2dcb407e 1678 pio_mask = (2 << mode) - 1;
46767aeb
AC
1679 else
1680 pio_mask = 1;
cb95d562
TH
1681
1682 /* But wait.. there's more. Design your standards by
1683 * committee and you too can get a free iordy field to
1684 * process. However its the speeds not the modes that
1685 * are supported... Note drivers using the timing API
1686 * will get this right anyway
1687 */
1688 }
1689
1690 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1691
b352e57d
AC
1692 if (ata_id_is_cfa(id)) {
1693 /*
1694 * Process compact flash extended modes
1695 */
1696 int pio = id[163] & 0x7;
1697 int dma = (id[163] >> 3) & 7;
1698
1699 if (pio)
1700 pio_mask |= (1 << 5);
1701 if (pio > 1)
1702 pio_mask |= (1 << 6);
1703 if (dma)
1704 mwdma_mask |= (1 << 3);
1705 if (dma > 1)
1706 mwdma_mask |= (1 << 4);
1707 }
1708
fb21f0d0
TH
1709 udma_mask = 0;
1710 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1711 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1712
1713 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1714}
1715
86e45b6b 1716/**
442eacc3 1717 * ata_pio_queue_task - Queue port_task
86e45b6b 1718 * @ap: The ata_port to queue port_task for
e2a7f77a 1719 * @fn: workqueue function to be scheduled
65f27f38 1720 * @data: data for @fn to use
e2a7f77a 1721 * @delay: delay time for workqueue function
86e45b6b
TH
1722 *
1723 * Schedule @fn(@data) for execution after @delay jiffies using
1724 * port_task. There is one port_task per port and it's the
1725 * user(low level driver)'s responsibility to make sure that only
1726 * one task is active at any given time.
1727 *
1728 * libata core layer takes care of synchronization between
442eacc3 1729 * port_task and EH. ata_pio_queue_task() may be ignored for EH
86e45b6b
TH
1730 * synchronization.
1731 *
1732 * LOCKING:
1733 * Inherited from caller.
1734 */
442eacc3
JG
1735static void ata_pio_queue_task(struct ata_port *ap, void *data,
1736 unsigned long delay)
86e45b6b 1737{
65f27f38 1738 ap->port_task_data = data;
86e45b6b 1739
45a66c1c
ON
1740 /* may fail if ata_port_flush_task() in progress */
1741 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1742}
1743
1744/**
1745 * ata_port_flush_task - Flush port_task
1746 * @ap: The ata_port to flush port_task for
1747 *
1748 * After this function completes, port_task is guranteed not to
1749 * be running or scheduled.
1750 *
1751 * LOCKING:
1752 * Kernel thread context (may sleep)
1753 */
1754void ata_port_flush_task(struct ata_port *ap)
1755{
86e45b6b
TH
1756 DPRINTK("ENTER\n");
1757
45a66c1c 1758 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1759
0dd4b21f 1760 if (ata_msg_ctl(ap))
7f5e4e8d 1761 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
86e45b6b
TH
1762}
1763
7102d230 1764static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1765{
77853bf2 1766 struct completion *waiting = qc->private_data;
a2a7a662 1767
a2a7a662 1768 complete(waiting);
a2a7a662
TH
1769}
1770
1771/**
2432697b 1772 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1773 * @dev: Device to which the command is sent
1774 * @tf: Taskfile registers for the command and the result
d69cf37d 1775 * @cdb: CDB for packet command
a2a7a662 1776 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1777 * @sgl: sg list for the data buffer of the command
2432697b 1778 * @n_elem: Number of sg entries
2b789108 1779 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1780 *
1781 * Executes libata internal command with timeout. @tf contains
1782 * command on entry and result on return. Timeout and error
1783 * conditions are reported via return value. No recovery action
1784 * is taken after a command times out. It's caller's duty to
1785 * clean up after timeout.
1786 *
1787 * LOCKING:
1788 * None. Should be called with kernel context, might sleep.
551e8889
TH
1789 *
1790 * RETURNS:
1791 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1792 */
2432697b
TH
1793unsigned ata_exec_internal_sg(struct ata_device *dev,
1794 struct ata_taskfile *tf, const u8 *cdb,
87260216 1795 int dma_dir, struct scatterlist *sgl,
2b789108 1796 unsigned int n_elem, unsigned long timeout)
a2a7a662 1797{
9af5c9c9
TH
1798 struct ata_link *link = dev->link;
1799 struct ata_port *ap = link->ap;
a2a7a662
TH
1800 u8 command = tf->command;
1801 struct ata_queued_cmd *qc;
2ab7db1f 1802 unsigned int tag, preempted_tag;
dedaf2b0 1803 u32 preempted_sactive, preempted_qc_active;
da917d69 1804 int preempted_nr_active_links;
60be6b9a 1805 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1806 unsigned long flags;
77853bf2 1807 unsigned int err_mask;
d95a717f 1808 int rc;
a2a7a662 1809
ba6a1308 1810 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1811
e3180499 1812 /* no internal command while frozen */
b51e9e5d 1813 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1814 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1815 return AC_ERR_SYSTEM;
1816 }
1817
2ab7db1f 1818 /* initialize internal qc */
a2a7a662 1819
2ab7db1f
TH
1820 /* XXX: Tag 0 is used for drivers with legacy EH as some
1821 * drivers choke if any other tag is given. This breaks
1822 * ata_tag_internal() test for those drivers. Don't use new
1823 * EH stuff without converting to it.
1824 */
1825 if (ap->ops->error_handler)
1826 tag = ATA_TAG_INTERNAL;
1827 else
1828 tag = 0;
1829
6cec4a39 1830 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1831 BUG();
f69499f4 1832 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1833
1834 qc->tag = tag;
1835 qc->scsicmd = NULL;
1836 qc->ap = ap;
1837 qc->dev = dev;
1838 ata_qc_reinit(qc);
1839
9af5c9c9
TH
1840 preempted_tag = link->active_tag;
1841 preempted_sactive = link->sactive;
dedaf2b0 1842 preempted_qc_active = ap->qc_active;
da917d69 1843 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1844 link->active_tag = ATA_TAG_POISON;
1845 link->sactive = 0;
dedaf2b0 1846 ap->qc_active = 0;
da917d69 1847 ap->nr_active_links = 0;
2ab7db1f
TH
1848
1849 /* prepare & issue qc */
a2a7a662 1850 qc->tf = *tf;
d69cf37d
TH
1851 if (cdb)
1852 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1853 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1854 qc->dma_dir = dma_dir;
1855 if (dma_dir != DMA_NONE) {
2432697b 1856 unsigned int i, buflen = 0;
87260216 1857 struct scatterlist *sg;
2432697b 1858
87260216
JA
1859 for_each_sg(sgl, sg, n_elem, i)
1860 buflen += sg->length;
2432697b 1861
87260216 1862 ata_sg_init(qc, sgl, n_elem);
49c80429 1863 qc->nbytes = buflen;
a2a7a662
TH
1864 }
1865
77853bf2 1866 qc->private_data = &wait;
a2a7a662
TH
1867 qc->complete_fn = ata_qc_complete_internal;
1868
8e0e694a 1869 ata_qc_issue(qc);
a2a7a662 1870
ba6a1308 1871 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1872
2b789108
TH
1873 if (!timeout)
1874 timeout = ata_probe_timeout * 1000 / HZ;
1875
1876 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1877
1878 ata_port_flush_task(ap);
41ade50c 1879
d95a717f 1880 if (!rc) {
ba6a1308 1881 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1882
1883 /* We're racing with irq here. If we lose, the
1884 * following test prevents us from completing the qc
d95a717f
TH
1885 * twice. If we win, the port is frozen and will be
1886 * cleaned up by ->post_internal_cmd().
a2a7a662 1887 */
77853bf2 1888 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1889 qc->err_mask |= AC_ERR_TIMEOUT;
1890
1891 if (ap->ops->error_handler)
1892 ata_port_freeze(ap);
1893 else
1894 ata_qc_complete(qc);
f15a1daf 1895
0dd4b21f
BP
1896 if (ata_msg_warn(ap))
1897 ata_dev_printk(dev, KERN_WARNING,
88574551 1898 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1899 }
1900
ba6a1308 1901 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1902 }
1903
d95a717f
TH
1904 /* do post_internal_cmd */
1905 if (ap->ops->post_internal_cmd)
1906 ap->ops->post_internal_cmd(qc);
1907
a51d644a
TH
1908 /* perform minimal error analysis */
1909 if (qc->flags & ATA_QCFLAG_FAILED) {
1910 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1911 qc->err_mask |= AC_ERR_DEV;
1912
1913 if (!qc->err_mask)
1914 qc->err_mask |= AC_ERR_OTHER;
1915
1916 if (qc->err_mask & ~AC_ERR_OTHER)
1917 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1918 }
1919
15869303 1920 /* finish up */
ba6a1308 1921 spin_lock_irqsave(ap->lock, flags);
15869303 1922
e61e0672 1923 *tf = qc->result_tf;
77853bf2
TH
1924 err_mask = qc->err_mask;
1925
1926 ata_qc_free(qc);
9af5c9c9
TH
1927 link->active_tag = preempted_tag;
1928 link->sactive = preempted_sactive;
dedaf2b0 1929 ap->qc_active = preempted_qc_active;
da917d69 1930 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1931
1f7dd3e9
TH
1932 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1933 * Until those drivers are fixed, we detect the condition
1934 * here, fail the command with AC_ERR_SYSTEM and reenable the
1935 * port.
1936 *
1937 * Note that this doesn't change any behavior as internal
1938 * command failure results in disabling the device in the
1939 * higher layer for LLDDs without new reset/EH callbacks.
1940 *
1941 * Kill the following code as soon as those drivers are fixed.
1942 */
198e0fed 1943 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1944 err_mask |= AC_ERR_SYSTEM;
1945 ata_port_probe(ap);
1946 }
1947
ba6a1308 1948 spin_unlock_irqrestore(ap->lock, flags);
15869303 1949
77853bf2 1950 return err_mask;
a2a7a662
TH
1951}
1952
2432697b 1953/**
33480a0e 1954 * ata_exec_internal - execute libata internal command
2432697b
TH
1955 * @dev: Device to which the command is sent
1956 * @tf: Taskfile registers for the command and the result
1957 * @cdb: CDB for packet command
1958 * @dma_dir: Data tranfer direction of the command
1959 * @buf: Data buffer of the command
1960 * @buflen: Length of data buffer
2b789108 1961 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1962 *
1963 * Wrapper around ata_exec_internal_sg() which takes simple
1964 * buffer instead of sg list.
1965 *
1966 * LOCKING:
1967 * None. Should be called with kernel context, might sleep.
1968 *
1969 * RETURNS:
1970 * Zero on success, AC_ERR_* mask on failure
1971 */
1972unsigned ata_exec_internal(struct ata_device *dev,
1973 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1974 int dma_dir, void *buf, unsigned int buflen,
1975 unsigned long timeout)
2432697b 1976{
33480a0e
TH
1977 struct scatterlist *psg = NULL, sg;
1978 unsigned int n_elem = 0;
2432697b 1979
33480a0e
TH
1980 if (dma_dir != DMA_NONE) {
1981 WARN_ON(!buf);
1982 sg_init_one(&sg, buf, buflen);
1983 psg = &sg;
1984 n_elem++;
1985 }
2432697b 1986
2b789108
TH
1987 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1988 timeout);
2432697b
TH
1989}
1990
977e6b9f
TH
1991/**
1992 * ata_do_simple_cmd - execute simple internal command
1993 * @dev: Device to which the command is sent
1994 * @cmd: Opcode to execute
1995 *
1996 * Execute a 'simple' command, that only consists of the opcode
1997 * 'cmd' itself, without filling any other registers
1998 *
1999 * LOCKING:
2000 * Kernel thread context (may sleep).
2001 *
2002 * RETURNS:
2003 * Zero on success, AC_ERR_* mask on failure
e58eb583 2004 */
77b08fb5 2005unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
2006{
2007 struct ata_taskfile tf;
e58eb583
TH
2008
2009 ata_tf_init(dev, &tf);
2010
2011 tf.command = cmd;
2012 tf.flags |= ATA_TFLAG_DEVICE;
2013 tf.protocol = ATA_PROT_NODATA;
2014
2b789108 2015 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
2016}
2017
1bc4ccff
AC
2018/**
2019 * ata_pio_need_iordy - check if iordy needed
2020 * @adev: ATA device
2021 *
2022 * Check if the current speed of the device requires IORDY. Used
2023 * by various controllers for chip configuration.
2024 */
a617c09f 2025
1bc4ccff
AC
2026unsigned int ata_pio_need_iordy(const struct ata_device *adev)
2027{
432729f0
AC
2028 /* Controller doesn't support IORDY. Probably a pointless check
2029 as the caller should know this */
9af5c9c9 2030 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 2031 return 0;
432729f0
AC
2032 /* PIO3 and higher it is mandatory */
2033 if (adev->pio_mode > XFER_PIO_2)
2034 return 1;
2035 /* We turn it on when possible */
2036 if (ata_id_has_iordy(adev->id))
1bc4ccff 2037 return 1;
432729f0
AC
2038 return 0;
2039}
2e9edbf8 2040
432729f0
AC
2041/**
2042 * ata_pio_mask_no_iordy - Return the non IORDY mask
2043 * @adev: ATA device
2044 *
2045 * Compute the highest mode possible if we are not using iordy. Return
2046 * -1 if no iordy mode is available.
2047 */
a617c09f 2048
432729f0
AC
2049static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2050{
1bc4ccff 2051 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 2052 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 2053 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
2054 /* Is the speed faster than the drive allows non IORDY ? */
2055 if (pio) {
2056 /* This is cycle times not frequency - watch the logic! */
2057 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
2058 return 3 << ATA_SHIFT_PIO;
2059 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
2060 }
2061 }
432729f0 2062 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
2063}
2064
1da177e4 2065/**
49016aca 2066 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
2067 * @dev: target device
2068 * @p_class: pointer to class of the target device (may be changed)
bff04647 2069 * @flags: ATA_READID_* flags
fe635c7e 2070 * @id: buffer to read IDENTIFY data into
1da177e4 2071 *
49016aca
TH
2072 * Read ID data from the specified device. ATA_CMD_ID_ATA is
2073 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
2074 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
2075 * for pre-ATA4 drives.
1da177e4 2076 *
50a99018 2077 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 2078 * now we abort if we hit that case.
50a99018 2079 *
1da177e4 2080 * LOCKING:
49016aca
TH
2081 * Kernel thread context (may sleep)
2082 *
2083 * RETURNS:
2084 * 0 on success, -errno otherwise.
1da177e4 2085 */
a9beec95 2086int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 2087 unsigned int flags, u16 *id)
1da177e4 2088{
9af5c9c9 2089 struct ata_port *ap = dev->link->ap;
49016aca 2090 unsigned int class = *p_class;
a0123703 2091 struct ata_taskfile tf;
49016aca
TH
2092 unsigned int err_mask = 0;
2093 const char *reason;
54936f8b 2094 int may_fallback = 1, tried_spinup = 0;
49016aca 2095 int rc;
1da177e4 2096
0dd4b21f 2097 if (ata_msg_ctl(ap))
7f5e4e8d 2098 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 2099
49016aca 2100 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 2101 retry:
3373efd8 2102 ata_tf_init(dev, &tf);
a0123703 2103
49016aca
TH
2104 switch (class) {
2105 case ATA_DEV_ATA:
a0123703 2106 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
2107 break;
2108 case ATA_DEV_ATAPI:
a0123703 2109 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
2110 break;
2111 default:
2112 rc = -ENODEV;
2113 reason = "unsupported class";
2114 goto err_out;
1da177e4
LT
2115 }
2116
a0123703 2117 tf.protocol = ATA_PROT_PIO;
81afe893
TH
2118
2119 /* Some devices choke if TF registers contain garbage. Make
2120 * sure those are properly initialized.
2121 */
2122 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2123
2124 /* Device presence detection is unreliable on some
2125 * controllers. Always poll IDENTIFY if available.
2126 */
2127 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 2128
3373efd8 2129 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 2130 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 2131 if (err_mask) {
800b3996 2132 if (err_mask & AC_ERR_NODEV_HINT) {
1ffc151f
TH
2133 ata_dev_printk(dev, KERN_DEBUG,
2134 "NODEV after polling detection\n");
55a8e2c8
TH
2135 return -ENOENT;
2136 }
2137
1ffc151f
TH
2138 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2139 /* Device or controller might have reported
2140 * the wrong device class. Give a shot at the
2141 * other IDENTIFY if the current one is
2142 * aborted by the device.
2143 */
2144 if (may_fallback) {
2145 may_fallback = 0;
2146
2147 if (class == ATA_DEV_ATA)
2148 class = ATA_DEV_ATAPI;
2149 else
2150 class = ATA_DEV_ATA;
2151 goto retry;
2152 }
2153
2154 /* Control reaches here iff the device aborted
2155 * both flavors of IDENTIFYs which happens
2156 * sometimes with phantom devices.
2157 */
2158 ata_dev_printk(dev, KERN_DEBUG,
2159 "both IDENTIFYs aborted, assuming NODEV\n");
2160 return -ENOENT;
54936f8b
TH
2161 }
2162
49016aca
TH
2163 rc = -EIO;
2164 reason = "I/O error";
1da177e4
LT
2165 goto err_out;
2166 }
2167
54936f8b
TH
2168 /* Falling back doesn't make sense if ID data was read
2169 * successfully at least once.
2170 */
2171 may_fallback = 0;
2172
49016aca 2173 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 2174
49016aca 2175 /* sanity check */
a4f5749b 2176 rc = -EINVAL;
6070068b 2177 reason = "device reports invalid type";
a4f5749b
TH
2178
2179 if (class == ATA_DEV_ATA) {
2180 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2181 goto err_out;
2182 } else {
2183 if (ata_id_is_ata(id))
2184 goto err_out;
49016aca
TH
2185 }
2186
169439c2
ML
2187 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2188 tried_spinup = 1;
2189 /*
2190 * Drive powered-up in standby mode, and requires a specific
2191 * SET_FEATURES spin-up subcommand before it will accept
2192 * anything other than the original IDENTIFY command.
2193 */
218f3d30 2194 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 2195 if (err_mask && id[2] != 0x738c) {
169439c2
ML
2196 rc = -EIO;
2197 reason = "SPINUP failed";
2198 goto err_out;
2199 }
2200 /*
2201 * If the drive initially returned incomplete IDENTIFY info,
2202 * we now must reissue the IDENTIFY command.
2203 */
2204 if (id[2] == 0x37c8)
2205 goto retry;
2206 }
2207
bff04647 2208 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
2209 /*
2210 * The exact sequence expected by certain pre-ATA4 drives is:
2211 * SRST RESET
50a99018
AC
2212 * IDENTIFY (optional in early ATA)
2213 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2214 * anything else..
2215 * Some drives were very specific about that exact sequence.
50a99018
AC
2216 *
2217 * Note that ATA4 says lba is mandatory so the second check
2218 * shoud never trigger.
49016aca
TH
2219 */
2220 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2221 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2222 if (err_mask) {
2223 rc = -EIO;
2224 reason = "INIT_DEV_PARAMS failed";
2225 goto err_out;
2226 }
2227
2228 /* current CHS translation info (id[53-58]) might be
2229 * changed. reread the identify device info.
2230 */
bff04647 2231 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2232 goto retry;
2233 }
2234 }
2235
2236 *p_class = class;
fe635c7e 2237
49016aca
TH
2238 return 0;
2239
2240 err_out:
88574551 2241 if (ata_msg_warn(ap))
0dd4b21f 2242 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2243 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2244 return rc;
2245}
2246
3373efd8 2247static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2248{
9af5c9c9
TH
2249 struct ata_port *ap = dev->link->ap;
2250 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2251}
2252
a6e6ce8e
TH
2253static void ata_dev_config_ncq(struct ata_device *dev,
2254 char *desc, size_t desc_sz)
2255{
9af5c9c9 2256 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2257 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2258
2259 if (!ata_id_has_ncq(dev->id)) {
2260 desc[0] = '\0';
2261 return;
2262 }
75683fe7 2263 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2264 snprintf(desc, desc_sz, "NCQ (not used)");
2265 return;
2266 }
a6e6ce8e 2267 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2268 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2269 dev->flags |= ATA_DFLAG_NCQ;
2270 }
2271
2272 if (hdepth >= ddepth)
2273 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2274 else
2275 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2276}
2277
49016aca 2278/**
ffeae418 2279 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2280 * @dev: Target device to configure
2281 *
2282 * Configure @dev according to @dev->id. Generic and low-level
2283 * driver specific fixups are also applied.
49016aca
TH
2284 *
2285 * LOCKING:
ffeae418
TH
2286 * Kernel thread context (may sleep)
2287 *
2288 * RETURNS:
2289 * 0 on success, -errno otherwise
49016aca 2290 */
efdaedc4 2291int ata_dev_configure(struct ata_device *dev)
49016aca 2292{
9af5c9c9
TH
2293 struct ata_port *ap = dev->link->ap;
2294 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2295 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2296 const u16 *id = dev->id;
7dc951ae 2297 unsigned long xfer_mask;
b352e57d 2298 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2299 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2300 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2301 int rc;
49016aca 2302
0dd4b21f 2303 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e 2304 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
7f5e4e8d 2305 __func__);
ffeae418 2306 return 0;
49016aca
TH
2307 }
2308
0dd4b21f 2309 if (ata_msg_probe(ap))
7f5e4e8d 2310 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 2311
75683fe7
TH
2312 /* set horkage */
2313 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2314 ata_force_horkage(dev);
75683fe7 2315
6746544c
TH
2316 /* let ACPI work its magic */
2317 rc = ata_acpi_on_devcfg(dev);
2318 if (rc)
2319 return rc;
08573a86 2320
05027adc
TH
2321 /* massage HPA, do it early as it might change IDENTIFY data */
2322 rc = ata_hpa_resize(dev);
2323 if (rc)
2324 return rc;
2325
c39f5ebe 2326 /* print device capabilities */
0dd4b21f 2327 if (ata_msg_probe(ap))
88574551
TH
2328 ata_dev_printk(dev, KERN_DEBUG,
2329 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2330 "85:%04x 86:%04x 87:%04x 88:%04x\n",
7f5e4e8d 2331 __func__,
f15a1daf
TH
2332 id[49], id[82], id[83], id[84],
2333 id[85], id[86], id[87], id[88]);
c39f5ebe 2334
208a9933 2335 /* initialize to-be-configured parameters */
ea1dd4e1 2336 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2337 dev->max_sectors = 0;
2338 dev->cdb_len = 0;
2339 dev->n_sectors = 0;
2340 dev->cylinders = 0;
2341 dev->heads = 0;
2342 dev->sectors = 0;
2343
1da177e4
LT
2344 /*
2345 * common ATA, ATAPI feature tests
2346 */
2347
ff8854b2 2348 /* find max transfer mode; for printk only */
1148c3a7 2349 xfer_mask = ata_id_xfermask(id);
1da177e4 2350
0dd4b21f
BP
2351 if (ata_msg_probe(ap))
2352 ata_dump_id(id);
1da177e4 2353
ef143d57
AL
2354 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2355 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2356 sizeof(fwrevbuf));
2357
2358 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2359 sizeof(modelbuf));
2360
1da177e4
LT
2361 /* ATA-specific feature tests */
2362 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2363 if (ata_id_is_cfa(id)) {
2364 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2365 ata_dev_printk(dev, KERN_WARNING,
2366 "supports DRM functions and may "
2367 "not be fully accessable.\n");
b352e57d 2368 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2369 } else {
2dcb407e 2370 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2371 /* Warn the user if the device has TPM extensions */
2372 if (ata_id_has_tpm(id))
2373 ata_dev_printk(dev, KERN_WARNING,
2374 "supports DRM functions and may "
2375 "not be fully accessable.\n");
2376 }
b352e57d 2377
1148c3a7 2378 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2379
3f64f565
EM
2380 if (dev->id[59] & 0x100)
2381 dev->multi_count = dev->id[59] & 0xff;
2382
1148c3a7 2383 if (ata_id_has_lba(id)) {
4c2d721a 2384 const char *lba_desc;
a6e6ce8e 2385 char ncq_desc[20];
8bf62ece 2386
4c2d721a
TH
2387 lba_desc = "LBA";
2388 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2389 if (ata_id_has_lba48(id)) {
8bf62ece 2390 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2391 lba_desc = "LBA48";
6fc49adb
TH
2392
2393 if (dev->n_sectors >= (1UL << 28) &&
2394 ata_id_has_flush_ext(id))
2395 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2396 }
8bf62ece 2397
a6e6ce8e
TH
2398 /* config NCQ */
2399 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2400
8bf62ece 2401 /* print device info to dmesg */
3f64f565
EM
2402 if (ata_msg_drv(ap) && print_info) {
2403 ata_dev_printk(dev, KERN_INFO,
2404 "%s: %s, %s, max %s\n",
2405 revbuf, modelbuf, fwrevbuf,
2406 ata_mode_string(xfer_mask));
2407 ata_dev_printk(dev, KERN_INFO,
2408 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2409 (unsigned long long)dev->n_sectors,
3f64f565
EM
2410 dev->multi_count, lba_desc, ncq_desc);
2411 }
ffeae418 2412 } else {
8bf62ece
AL
2413 /* CHS */
2414
2415 /* Default translation */
1148c3a7
TH
2416 dev->cylinders = id[1];
2417 dev->heads = id[3];
2418 dev->sectors = id[6];
8bf62ece 2419
1148c3a7 2420 if (ata_id_current_chs_valid(id)) {
8bf62ece 2421 /* Current CHS translation is valid. */
1148c3a7
TH
2422 dev->cylinders = id[54];
2423 dev->heads = id[55];
2424 dev->sectors = id[56];
8bf62ece
AL
2425 }
2426
2427 /* print device info to dmesg */
3f64f565 2428 if (ata_msg_drv(ap) && print_info) {
88574551 2429 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2430 "%s: %s, %s, max %s\n",
2431 revbuf, modelbuf, fwrevbuf,
2432 ata_mode_string(xfer_mask));
a84471fe 2433 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2434 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2435 (unsigned long long)dev->n_sectors,
2436 dev->multi_count, dev->cylinders,
2437 dev->heads, dev->sectors);
2438 }
07f6f7d0
AL
2439 }
2440
6e7846e9 2441 dev->cdb_len = 16;
1da177e4
LT
2442 }
2443
2444 /* ATAPI-specific feature tests */
2c13b7ce 2445 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2446 const char *cdb_intr_string = "";
2447 const char *atapi_an_string = "";
91163006 2448 const char *dma_dir_string = "";
7d77b247 2449 u32 sntf;
08a556db 2450
1148c3a7 2451 rc = atapi_cdb_len(id);
1da177e4 2452 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2453 if (ata_msg_warn(ap))
88574551
TH
2454 ata_dev_printk(dev, KERN_WARNING,
2455 "unsupported CDB len\n");
ffeae418 2456 rc = -EINVAL;
1da177e4
LT
2457 goto err_out_nosup;
2458 }
6e7846e9 2459 dev->cdb_len = (unsigned int) rc;
1da177e4 2460
7d77b247
TH
2461 /* Enable ATAPI AN if both the host and device have
2462 * the support. If PMP is attached, SNTF is required
2463 * to enable ATAPI AN to discern between PHY status
2464 * changed notifications and ATAPI ANs.
9f45cbd3 2465 */
7d77b247
TH
2466 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2467 (!ap->nr_pmp_links ||
2468 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2469 unsigned int err_mask;
2470
9f45cbd3 2471 /* issue SET feature command to turn this on */
218f3d30
JG
2472 err_mask = ata_dev_set_feature(dev,
2473 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2474 if (err_mask)
9f45cbd3 2475 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2476 "failed to enable ATAPI AN "
2477 "(err_mask=0x%x)\n", err_mask);
2478 else {
9f45cbd3 2479 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2480 atapi_an_string = ", ATAPI AN";
2481 }
9f45cbd3
KCA
2482 }
2483
08a556db 2484 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2485 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2486 cdb_intr_string = ", CDB intr";
2487 }
312f7da2 2488
91163006
TH
2489 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2490 dev->flags |= ATA_DFLAG_DMADIR;
2491 dma_dir_string = ", DMADIR";
2492 }
2493
1da177e4 2494 /* print device info to dmesg */
5afc8142 2495 if (ata_msg_drv(ap) && print_info)
ef143d57 2496 ata_dev_printk(dev, KERN_INFO,
91163006 2497 "ATAPI: %s, %s, max %s%s%s%s\n",
ef143d57 2498 modelbuf, fwrevbuf,
12436c30 2499 ata_mode_string(xfer_mask),
91163006
TH
2500 cdb_intr_string, atapi_an_string,
2501 dma_dir_string);
1da177e4
LT
2502 }
2503
914ed354
TH
2504 /* determine max_sectors */
2505 dev->max_sectors = ATA_MAX_SECTORS;
2506 if (dev->flags & ATA_DFLAG_LBA48)
2507 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2508
ca77329f
KCA
2509 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2510 if (ata_id_has_hipm(dev->id))
2511 dev->flags |= ATA_DFLAG_HIPM;
2512 if (ata_id_has_dipm(dev->id))
2513 dev->flags |= ATA_DFLAG_DIPM;
2514 }
2515
c5038fc0
AC
2516 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2517 200 sectors */
3373efd8 2518 if (ata_dev_knobble(dev)) {
5afc8142 2519 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2520 ata_dev_printk(dev, KERN_INFO,
2521 "applying bridge limits\n");
5a529139 2522 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2523 dev->max_sectors = ATA_MAX_SECTORS;
2524 }
2525
f8d8e579 2526 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2527 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2528 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2529 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2530 }
f8d8e579 2531
75683fe7 2532 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2533 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2534 dev->max_sectors);
18d6e9d5 2535
ca77329f
KCA
2536 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2537 dev->horkage |= ATA_HORKAGE_IPM;
2538
2539 /* reset link pm_policy for this port to no pm */
2540 ap->pm_policy = MAX_PERFORMANCE;
2541 }
2542
4b2f3ede 2543 if (ap->ops->dev_config)
cd0d3bbc 2544 ap->ops->dev_config(dev);
4b2f3ede 2545
c5038fc0
AC
2546 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2547 /* Let the user know. We don't want to disallow opens for
2548 rescue purposes, or in case the vendor is just a blithering
2549 idiot. Do this after the dev_config call as some controllers
2550 with buggy firmware may want to avoid reporting false device
2551 bugs */
2552
2553 if (print_info) {
2554 ata_dev_printk(dev, KERN_WARNING,
2555"Drive reports diagnostics failure. This may indicate a drive\n");
2556 ata_dev_printk(dev, KERN_WARNING,
2557"fault or invalid emulation. Contact drive vendor for information.\n");
2558 }
2559 }
2560
0dd4b21f
BP
2561 if (ata_msg_probe(ap))
2562 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
7f5e4e8d 2563 __func__, ata_chk_status(ap));
ffeae418 2564 return 0;
1da177e4
LT
2565
2566err_out_nosup:
0dd4b21f 2567 if (ata_msg_probe(ap))
88574551 2568 ata_dev_printk(dev, KERN_DEBUG,
7f5e4e8d 2569 "%s: EXIT, err\n", __func__);
ffeae418 2570 return rc;
1da177e4
LT
2571}
2572
be0d18df 2573/**
2e41e8e6 2574 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2575 * @ap: port
2576 *
2e41e8e6 2577 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2578 * detection.
2579 */
2580
2581int ata_cable_40wire(struct ata_port *ap)
2582{
2583 return ATA_CBL_PATA40;
2584}
2585
2586/**
2e41e8e6 2587 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2588 * @ap: port
2589 *
2e41e8e6 2590 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2591 * detection.
2592 */
2593
2594int ata_cable_80wire(struct ata_port *ap)
2595{
2596 return ATA_CBL_PATA80;
2597}
2598
2599/**
2600 * ata_cable_unknown - return unknown PATA cable.
2601 * @ap: port
2602 *
2603 * Helper method for drivers which have no PATA cable detection.
2604 */
2605
2606int ata_cable_unknown(struct ata_port *ap)
2607{
2608 return ATA_CBL_PATA_UNK;
2609}
2610
c88f90c3
TH
2611/**
2612 * ata_cable_ignore - return ignored PATA cable.
2613 * @ap: port
2614 *
2615 * Helper method for drivers which don't use cable type to limit
2616 * transfer mode.
2617 */
2618int ata_cable_ignore(struct ata_port *ap)
2619{
2620 return ATA_CBL_PATA_IGN;
2621}
2622
be0d18df
AC
2623/**
2624 * ata_cable_sata - return SATA cable type
2625 * @ap: port
2626 *
2627 * Helper method for drivers which have SATA cables
2628 */
2629
2630int ata_cable_sata(struct ata_port *ap)
2631{
2632 return ATA_CBL_SATA;
2633}
2634
1da177e4
LT
2635/**
2636 * ata_bus_probe - Reset and probe ATA bus
2637 * @ap: Bus to probe
2638 *
0cba632b
JG
2639 * Master ATA bus probing function. Initiates a hardware-dependent
2640 * bus reset, then attempts to identify any devices found on
2641 * the bus.
2642 *
1da177e4 2643 * LOCKING:
0cba632b 2644 * PCI/etc. bus probe sem.
1da177e4
LT
2645 *
2646 * RETURNS:
96072e69 2647 * Zero on success, negative errno otherwise.
1da177e4
LT
2648 */
2649
80289167 2650int ata_bus_probe(struct ata_port *ap)
1da177e4 2651{
28ca5c57 2652 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2653 int tries[ATA_MAX_DEVICES];
f58229f8 2654 int rc;
e82cbdb9 2655 struct ata_device *dev;
1da177e4 2656
28ca5c57 2657 ata_port_probe(ap);
c19ba8af 2658
f58229f8
TH
2659 ata_link_for_each_dev(dev, &ap->link)
2660 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2661
2662 retry:
cdeab114
TH
2663 ata_link_for_each_dev(dev, &ap->link) {
2664 /* If we issue an SRST then an ATA drive (not ATAPI)
2665 * may change configuration and be in PIO0 timing. If
2666 * we do a hard reset (or are coming from power on)
2667 * this is true for ATA or ATAPI. Until we've set a
2668 * suitable controller mode we should not touch the
2669 * bus as we may be talking too fast.
2670 */
2671 dev->pio_mode = XFER_PIO_0;
2672
2673 /* If the controller has a pio mode setup function
2674 * then use it to set the chipset to rights. Don't
2675 * touch the DMA setup as that will be dealt with when
2676 * configuring devices.
2677 */
2678 if (ap->ops->set_piomode)
2679 ap->ops->set_piomode(ap, dev);
2680 }
2681
2044470c 2682 /* reset and determine device classes */
52783c5d 2683 ap->ops->phy_reset(ap);
2061a47a 2684
f58229f8 2685 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2686 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2687 dev->class != ATA_DEV_UNKNOWN)
2688 classes[dev->devno] = dev->class;
2689 else
2690 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2691
52783c5d 2692 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2693 }
1da177e4 2694
52783c5d 2695 ata_port_probe(ap);
2044470c 2696
f31f0cc2
JG
2697 /* read IDENTIFY page and configure devices. We have to do the identify
2698 specific sequence bass-ackwards so that PDIAG- is released by
2699 the slave device */
2700
a4ba7fe2 2701 ata_link_for_each_dev_reverse(dev, &ap->link) {
f58229f8
TH
2702 if (tries[dev->devno])
2703 dev->class = classes[dev->devno];
ffeae418 2704
14d2bac1 2705 if (!ata_dev_enabled(dev))
ffeae418 2706 continue;
ffeae418 2707
bff04647
TH
2708 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2709 dev->id);
14d2bac1
TH
2710 if (rc)
2711 goto fail;
f31f0cc2
JG
2712 }
2713
be0d18df
AC
2714 /* Now ask for the cable type as PDIAG- should have been released */
2715 if (ap->ops->cable_detect)
2716 ap->cbl = ap->ops->cable_detect(ap);
2717
614fe29b
AC
2718 /* We may have SATA bridge glue hiding here irrespective of the
2719 reported cable types and sensed types */
2720 ata_link_for_each_dev(dev, &ap->link) {
2721 if (!ata_dev_enabled(dev))
2722 continue;
2723 /* SATA drives indicate we have a bridge. We don't know which
2724 end of the link the bridge is which is a problem */
2725 if (ata_id_is_sata(dev->id))
2726 ap->cbl = ATA_CBL_SATA;
2727 }
2728
f31f0cc2
JG
2729 /* After the identify sequence we can now set up the devices. We do
2730 this in the normal order so that the user doesn't get confused */
2731
f58229f8 2732 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2733 if (!ata_dev_enabled(dev))
2734 continue;
14d2bac1 2735
9af5c9c9 2736 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2737 rc = ata_dev_configure(dev);
9af5c9c9 2738 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2739 if (rc)
2740 goto fail;
1da177e4
LT
2741 }
2742
e82cbdb9 2743 /* configure transfer mode */
0260731f 2744 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2745 if (rc)
51713d35 2746 goto fail;
1da177e4 2747
f58229f8
TH
2748 ata_link_for_each_dev(dev, &ap->link)
2749 if (ata_dev_enabled(dev))
e82cbdb9 2750 return 0;
1da177e4 2751
e82cbdb9
TH
2752 /* no device present, disable port */
2753 ata_port_disable(ap);
96072e69 2754 return -ENODEV;
14d2bac1
TH
2755
2756 fail:
4ae72a1e
TH
2757 tries[dev->devno]--;
2758
14d2bac1
TH
2759 switch (rc) {
2760 case -EINVAL:
4ae72a1e 2761 /* eeek, something went very wrong, give up */
14d2bac1
TH
2762 tries[dev->devno] = 0;
2763 break;
4ae72a1e
TH
2764
2765 case -ENODEV:
2766 /* give it just one more chance */
2767 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2768 case -EIO:
4ae72a1e
TH
2769 if (tries[dev->devno] == 1) {
2770 /* This is the last chance, better to slow
2771 * down than lose it.
2772 */
936fd732 2773 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2774 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2775 }
14d2bac1
TH
2776 }
2777
4ae72a1e 2778 if (!tries[dev->devno])
3373efd8 2779 ata_dev_disable(dev);
ec573755 2780
14d2bac1 2781 goto retry;
1da177e4
LT
2782}
2783
2784/**
0cba632b
JG
2785 * ata_port_probe - Mark port as enabled
2786 * @ap: Port for which we indicate enablement
1da177e4 2787 *
0cba632b
JG
2788 * Modify @ap data structure such that the system
2789 * thinks that the entire port is enabled.
2790 *
cca3974e 2791 * LOCKING: host lock, or some other form of
0cba632b 2792 * serialization.
1da177e4
LT
2793 */
2794
2795void ata_port_probe(struct ata_port *ap)
2796{
198e0fed 2797 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2798}
2799
3be680b7
TH
2800/**
2801 * sata_print_link_status - Print SATA link status
936fd732 2802 * @link: SATA link to printk link status about
3be680b7
TH
2803 *
2804 * This function prints link speed and status of a SATA link.
2805 *
2806 * LOCKING:
2807 * None.
2808 */
936fd732 2809void sata_print_link_status(struct ata_link *link)
3be680b7 2810{
6d5f9732 2811 u32 sstatus, scontrol, tmp;
3be680b7 2812
936fd732 2813 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2814 return;
936fd732 2815 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2816
936fd732 2817 if (ata_link_online(link)) {
3be680b7 2818 tmp = (sstatus >> 4) & 0xf;
936fd732 2819 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2820 "SATA link up %s (SStatus %X SControl %X)\n",
2821 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2822 } else {
936fd732 2823 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2824 "SATA link down (SStatus %X SControl %X)\n",
2825 sstatus, scontrol);
3be680b7
TH
2826 }
2827}
2828
ebdfca6e
AC
2829/**
2830 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2831 * @adev: device
2832 *
2833 * Obtain the other device on the same cable, or if none is
2834 * present NULL is returned
2835 */
2e9edbf8 2836
3373efd8 2837struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2838{
9af5c9c9
TH
2839 struct ata_link *link = adev->link;
2840 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2841 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2842 return NULL;
2843 return pair;
2844}
2845
1da177e4 2846/**
780a87f7
JG
2847 * ata_port_disable - Disable port.
2848 * @ap: Port to be disabled.
1da177e4 2849 *
780a87f7
JG
2850 * Modify @ap data structure such that the system
2851 * thinks that the entire port is disabled, and should
2852 * never attempt to probe or communicate with devices
2853 * on this port.
2854 *
cca3974e 2855 * LOCKING: host lock, or some other form of
780a87f7 2856 * serialization.
1da177e4
LT
2857 */
2858
2859void ata_port_disable(struct ata_port *ap)
2860{
9af5c9c9
TH
2861 ap->link.device[0].class = ATA_DEV_NONE;
2862 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2863 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2864}
2865
1c3fae4d 2866/**
3c567b7d 2867 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2868 * @link: Link to adjust SATA spd limit for
1c3fae4d 2869 *
936fd732 2870 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2871 * function only adjusts the limit. The change must be applied
3c567b7d 2872 * using sata_set_spd().
1c3fae4d
TH
2873 *
2874 * LOCKING:
2875 * Inherited from caller.
2876 *
2877 * RETURNS:
2878 * 0 on success, negative errno on failure
2879 */
936fd732 2880int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2881{
81952c54
TH
2882 u32 sstatus, spd, mask;
2883 int rc, highbit;
1c3fae4d 2884
936fd732 2885 if (!sata_scr_valid(link))
008a7896
TH
2886 return -EOPNOTSUPP;
2887
2888 /* If SCR can be read, use it to determine the current SPD.
936fd732 2889 * If not, use cached value in link->sata_spd.
008a7896 2890 */
936fd732 2891 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2892 if (rc == 0)
2893 spd = (sstatus >> 4) & 0xf;
2894 else
936fd732 2895 spd = link->sata_spd;
1c3fae4d 2896
936fd732 2897 mask = link->sata_spd_limit;
1c3fae4d
TH
2898 if (mask <= 1)
2899 return -EINVAL;
008a7896
TH
2900
2901 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2902 highbit = fls(mask) - 1;
2903 mask &= ~(1 << highbit);
2904
008a7896
TH
2905 /* Mask off all speeds higher than or equal to the current
2906 * one. Force 1.5Gbps if current SPD is not available.
2907 */
2908 if (spd > 1)
2909 mask &= (1 << (spd - 1)) - 1;
2910 else
2911 mask &= 1;
2912
2913 /* were we already at the bottom? */
1c3fae4d
TH
2914 if (!mask)
2915 return -EINVAL;
2916
936fd732 2917 link->sata_spd_limit = mask;
1c3fae4d 2918
936fd732 2919 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2920 sata_spd_string(fls(mask)));
1c3fae4d
TH
2921
2922 return 0;
2923}
2924
936fd732 2925static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2926{
5270222f
TH
2927 struct ata_link *host_link = &link->ap->link;
2928 u32 limit, target, spd;
1c3fae4d 2929
5270222f
TH
2930 limit = link->sata_spd_limit;
2931
2932 /* Don't configure downstream link faster than upstream link.
2933 * It doesn't speed up anything and some PMPs choke on such
2934 * configuration.
2935 */
2936 if (!ata_is_host_link(link) && host_link->sata_spd)
2937 limit &= (1 << host_link->sata_spd) - 1;
2938
2939 if (limit == UINT_MAX)
2940 target = 0;
1c3fae4d 2941 else
5270222f 2942 target = fls(limit);
1c3fae4d
TH
2943
2944 spd = (*scontrol >> 4) & 0xf;
5270222f 2945 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2946
5270222f 2947 return spd != target;
1c3fae4d
TH
2948}
2949
2950/**
3c567b7d 2951 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2952 * @link: Link in question
1c3fae4d
TH
2953 *
2954 * Test whether the spd limit in SControl matches
936fd732 2955 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2956 * whether hardreset is necessary to apply SATA spd
2957 * configuration.
2958 *
2959 * LOCKING:
2960 * Inherited from caller.
2961 *
2962 * RETURNS:
2963 * 1 if SATA spd configuration is needed, 0 otherwise.
2964 */
936fd732 2965int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2966{
2967 u32 scontrol;
2968
936fd732 2969 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2970 return 1;
1c3fae4d 2971
936fd732 2972 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2973}
2974
2975/**
3c567b7d 2976 * sata_set_spd - set SATA spd according to spd limit
936fd732 2977 * @link: Link to set SATA spd for
1c3fae4d 2978 *
936fd732 2979 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2980 *
2981 * LOCKING:
2982 * Inherited from caller.
2983 *
2984 * RETURNS:
2985 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2986 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2987 */
936fd732 2988int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2989{
2990 u32 scontrol;
81952c54 2991 int rc;
1c3fae4d 2992
936fd732 2993 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2994 return rc;
1c3fae4d 2995
936fd732 2996 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2997 return 0;
2998
936fd732 2999 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
3000 return rc;
3001
1c3fae4d
TH
3002 return 1;
3003}
3004
452503f9
AC
3005/*
3006 * This mode timing computation functionality is ported over from
3007 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3008 */
3009/*
b352e57d 3010 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 3011 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
3012 * for UDMA6, which is currently supported only by Maxtor drives.
3013 *
3014 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
3015 */
3016
3017static const struct ata_timing ata_timing[] = {
70cd071e
TH
3018/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
3019 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
3020 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
3021 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
3022 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
3023 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
3024 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
3025 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
452503f9 3026
70cd071e
TH
3027 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
3028 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
3029 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
452503f9 3030
70cd071e
TH
3031 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
3032 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
3033 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
b352e57d 3034 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
70cd071e 3035 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
452503f9
AC
3036
3037/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
70cd071e
TH
3038 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
3039 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
3040 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
3041 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
3042 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
3043 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
3044 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
3045
3046 { 0xFF }
3047};
3048
2dcb407e
JG
3049#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3050#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
3051
3052static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3053{
3054 q->setup = EZ(t->setup * 1000, T);
3055 q->act8b = EZ(t->act8b * 1000, T);
3056 q->rec8b = EZ(t->rec8b * 1000, T);
3057 q->cyc8b = EZ(t->cyc8b * 1000, T);
3058 q->active = EZ(t->active * 1000, T);
3059 q->recover = EZ(t->recover * 1000, T);
3060 q->cycle = EZ(t->cycle * 1000, T);
3061 q->udma = EZ(t->udma * 1000, UT);
3062}
3063
3064void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3065 struct ata_timing *m, unsigned int what)
3066{
3067 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3068 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3069 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3070 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3071 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3072 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3073 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3074 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3075}
3076
6357357c 3077const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 3078{
70cd071e
TH
3079 const struct ata_timing *t = ata_timing;
3080
3081 while (xfer_mode > t->mode)
3082 t++;
452503f9 3083
70cd071e
TH
3084 if (xfer_mode == t->mode)
3085 return t;
3086 return NULL;
452503f9
AC
3087}
3088
3089int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3090 struct ata_timing *t, int T, int UT)
3091{
3092 const struct ata_timing *s;
3093 struct ata_timing p;
3094
3095 /*
2e9edbf8 3096 * Find the mode.
75b1f2f8 3097 */
452503f9
AC
3098
3099 if (!(s = ata_timing_find_mode(speed)))
3100 return -EINVAL;
3101
75b1f2f8
AL
3102 memcpy(t, s, sizeof(*s));
3103
452503f9
AC
3104 /*
3105 * If the drive is an EIDE drive, it can tell us it needs extended
3106 * PIO/MW_DMA cycle timing.
3107 */
3108
3109 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3110 memset(&p, 0, sizeof(p));
2dcb407e 3111 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
3112 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3113 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 3114 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
3115 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3116 }
3117 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3118 }
3119
3120 /*
3121 * Convert the timing to bus clock counts.
3122 */
3123
75b1f2f8 3124 ata_timing_quantize(t, t, T, UT);
452503f9
AC
3125
3126 /*
c893a3ae
RD
3127 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3128 * S.M.A.R.T * and some other commands. We have to ensure that the
3129 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
3130 */
3131
fd3367af 3132 if (speed > XFER_PIO_6) {
452503f9
AC
3133 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3134 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3135 }
3136
3137 /*
c893a3ae 3138 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
3139 */
3140
3141 if (t->act8b + t->rec8b < t->cyc8b) {
3142 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3143 t->rec8b = t->cyc8b - t->act8b;
3144 }
3145
3146 if (t->active + t->recover < t->cycle) {
3147 t->active += (t->cycle - (t->active + t->recover)) / 2;
3148 t->recover = t->cycle - t->active;
3149 }
a617c09f 3150
4f701d1e
AC
3151 /* In a few cases quantisation may produce enough errors to
3152 leave t->cycle too low for the sum of active and recovery
3153 if so we must correct this */
3154 if (t->active + t->recover > t->cycle)
3155 t->cycle = t->active + t->recover;
452503f9
AC
3156
3157 return 0;
3158}
3159
a0f79b92
TH
3160/**
3161 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3162 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3163 * @cycle: cycle duration in ns
3164 *
3165 * Return matching xfer mode for @cycle. The returned mode is of
3166 * the transfer type specified by @xfer_shift. If @cycle is too
3167 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3168 * than the fastest known mode, the fasted mode is returned.
3169 *
3170 * LOCKING:
3171 * None.
3172 *
3173 * RETURNS:
3174 * Matching xfer_mode, 0xff if no match found.
3175 */
3176u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3177{
3178 u8 base_mode = 0xff, last_mode = 0xff;
3179 const struct ata_xfer_ent *ent;
3180 const struct ata_timing *t;
3181
3182 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3183 if (ent->shift == xfer_shift)
3184 base_mode = ent->base;
3185
3186 for (t = ata_timing_find_mode(base_mode);
3187 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3188 unsigned short this_cycle;
3189
3190 switch (xfer_shift) {
3191 case ATA_SHIFT_PIO:
3192 case ATA_SHIFT_MWDMA:
3193 this_cycle = t->cycle;
3194 break;
3195 case ATA_SHIFT_UDMA:
3196 this_cycle = t->udma;
3197 break;
3198 default:
3199 return 0xff;
3200 }
3201
3202 if (cycle > this_cycle)
3203 break;
3204
3205 last_mode = t->mode;
3206 }
3207
3208 return last_mode;
3209}
3210
cf176e1a
TH
3211/**
3212 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3213 * @dev: Device to adjust xfer masks
458337db 3214 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3215 *
3216 * Adjust xfer masks of @dev downward. Note that this function
3217 * does not apply the change. Invoking ata_set_mode() afterwards
3218 * will apply the limit.
3219 *
3220 * LOCKING:
3221 * Inherited from caller.
3222 *
3223 * RETURNS:
3224 * 0 on success, negative errno on failure
3225 */
458337db 3226int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3227{
458337db 3228 char buf[32];
7dc951ae
TH
3229 unsigned long orig_mask, xfer_mask;
3230 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3231 int quiet, highbit;
cf176e1a 3232
458337db
TH
3233 quiet = !!(sel & ATA_DNXFER_QUIET);
3234 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3235
458337db
TH
3236 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3237 dev->mwdma_mask,
3238 dev->udma_mask);
3239 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3240
458337db
TH
3241 switch (sel) {
3242 case ATA_DNXFER_PIO:
3243 highbit = fls(pio_mask) - 1;
3244 pio_mask &= ~(1 << highbit);
3245 break;
3246
3247 case ATA_DNXFER_DMA:
3248 if (udma_mask) {
3249 highbit = fls(udma_mask) - 1;
3250 udma_mask &= ~(1 << highbit);
3251 if (!udma_mask)
3252 return -ENOENT;
3253 } else if (mwdma_mask) {
3254 highbit = fls(mwdma_mask) - 1;
3255 mwdma_mask &= ~(1 << highbit);
3256 if (!mwdma_mask)
3257 return -ENOENT;
3258 }
3259 break;
3260
3261 case ATA_DNXFER_40C:
3262 udma_mask &= ATA_UDMA_MASK_40C;
3263 break;
3264
3265 case ATA_DNXFER_FORCE_PIO0:
3266 pio_mask &= 1;
3267 case ATA_DNXFER_FORCE_PIO:
3268 mwdma_mask = 0;
3269 udma_mask = 0;
3270 break;
3271
458337db
TH
3272 default:
3273 BUG();
3274 }
3275
3276 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3277
3278 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3279 return -ENOENT;
3280
3281 if (!quiet) {
3282 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3283 snprintf(buf, sizeof(buf), "%s:%s",
3284 ata_mode_string(xfer_mask),
3285 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3286 else
3287 snprintf(buf, sizeof(buf), "%s",
3288 ata_mode_string(xfer_mask));
3289
3290 ata_dev_printk(dev, KERN_WARNING,
3291 "limiting speed to %s\n", buf);
3292 }
cf176e1a
TH
3293
3294 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3295 &dev->udma_mask);
3296
cf176e1a 3297 return 0;
cf176e1a
TH
3298}
3299
3373efd8 3300static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3301{
9af5c9c9 3302 struct ata_eh_context *ehc = &dev->link->eh_context;
4055dee7
TH
3303 const char *dev_err_whine = "";
3304 int ign_dev_err = 0;
83206a29
TH
3305 unsigned int err_mask;
3306 int rc;
1da177e4 3307
e8384607 3308 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3309 if (dev->xfer_shift == ATA_SHIFT_PIO)
3310 dev->flags |= ATA_DFLAG_PIO;
3311
3373efd8 3312 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3313
4055dee7
TH
3314 if (err_mask & ~AC_ERR_DEV)
3315 goto fail;
3316
3317 /* revalidate */
3318 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3319 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3320 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3321 if (rc)
3322 return rc;
3323
11750a40
A
3324 /* Old CFA may refuse this command, which is just fine */
3325 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
4055dee7 3326 ign_dev_err = 1;
2dcb407e 3327
0bc2a79a
AC
3328 /* Some very old devices and some bad newer ones fail any kind of
3329 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3330 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3331 dev->pio_mode <= XFER_PIO_2)
4055dee7 3332 ign_dev_err = 1;
2dcb407e 3333
3acaf94b
AC
3334 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3335 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3336 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3337 dev->dma_mode == XFER_MW_DMA_0 &&
3338 (dev->id[63] >> 8) & 1)
4055dee7 3339 ign_dev_err = 1;
3acaf94b 3340
4055dee7
TH
3341 /* if the device is actually configured correctly, ignore dev err */
3342 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3343 ign_dev_err = 1;
1da177e4 3344
4055dee7
TH
3345 if (err_mask & AC_ERR_DEV) {
3346 if (!ign_dev_err)
3347 goto fail;
3348 else
3349 dev_err_whine = " (device error ignored)";
3350 }
48a8a14f 3351
23e71c3d
TH
3352 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3353 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3354
4055dee7
TH
3355 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3356 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3357 dev_err_whine);
3358
83206a29 3359 return 0;
4055dee7
TH
3360
3361 fail:
3362 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3363 "(err_mask=0x%x)\n", err_mask);
3364 return -EIO;
1da177e4
LT
3365}
3366
1da177e4 3367/**
04351821 3368 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3369 * @link: link on which timings will be programmed
1967b7ff 3370 * @r_failed_dev: out parameter for failed device
1da177e4 3371 *
04351821
A
3372 * Standard implementation of the function used to tune and set
3373 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3374 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3375 * returned in @r_failed_dev.
780a87f7 3376 *
1da177e4 3377 * LOCKING:
0cba632b 3378 * PCI/etc. bus probe sem.
e82cbdb9
TH
3379 *
3380 * RETURNS:
3381 * 0 on success, negative errno otherwise
1da177e4 3382 */
04351821 3383
0260731f 3384int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3385{
0260731f 3386 struct ata_port *ap = link->ap;
e8e0619f 3387 struct ata_device *dev;
f58229f8 3388 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3389
a6d5a51c 3390 /* step 1: calculate xfer_mask */
f58229f8 3391 ata_link_for_each_dev(dev, link) {
7dc951ae 3392 unsigned long pio_mask, dma_mask;
b3a70601 3393 unsigned int mode_mask;
a6d5a51c 3394
e1211e3f 3395 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3396 continue;
3397
b3a70601
AC
3398 mode_mask = ATA_DMA_MASK_ATA;
3399 if (dev->class == ATA_DEV_ATAPI)
3400 mode_mask = ATA_DMA_MASK_ATAPI;
3401 else if (ata_id_is_cfa(dev->id))
3402 mode_mask = ATA_DMA_MASK_CFA;
3403
3373efd8 3404 ata_dev_xfermask(dev);
33267325 3405 ata_force_xfermask(dev);
1da177e4 3406
acf356b1
TH
3407 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3408 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3409
3410 if (libata_dma_mask & mode_mask)
3411 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3412 else
3413 dma_mask = 0;
3414
acf356b1
TH
3415 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3416 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3417
4f65977d 3418 found = 1;
70cd071e 3419 if (dev->dma_mode != 0xff)
5444a6f4 3420 used_dma = 1;
a6d5a51c 3421 }
4f65977d 3422 if (!found)
e82cbdb9 3423 goto out;
a6d5a51c
TH
3424
3425 /* step 2: always set host PIO timings */
f58229f8 3426 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3427 if (!ata_dev_enabled(dev))
3428 continue;
3429
70cd071e 3430 if (dev->pio_mode == 0xff) {
f15a1daf 3431 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3432 rc = -EINVAL;
e82cbdb9 3433 goto out;
e8e0619f
TH
3434 }
3435
3436 dev->xfer_mode = dev->pio_mode;
3437 dev->xfer_shift = ATA_SHIFT_PIO;
3438 if (ap->ops->set_piomode)
3439 ap->ops->set_piomode(ap, dev);
3440 }
1da177e4 3441
a6d5a51c 3442 /* step 3: set host DMA timings */
f58229f8 3443 ata_link_for_each_dev(dev, link) {
70cd071e 3444 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
e8e0619f
TH
3445 continue;
3446
3447 dev->xfer_mode = dev->dma_mode;
3448 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3449 if (ap->ops->set_dmamode)
3450 ap->ops->set_dmamode(ap, dev);
3451 }
1da177e4
LT
3452
3453 /* step 4: update devices' xfer mode */
f58229f8 3454 ata_link_for_each_dev(dev, link) {
18d90deb 3455 /* don't update suspended devices' xfer mode */
9666f400 3456 if (!ata_dev_enabled(dev))
83206a29
TH
3457 continue;
3458
3373efd8 3459 rc = ata_dev_set_mode(dev);
5bbc53f4 3460 if (rc)
e82cbdb9 3461 goto out;
83206a29 3462 }
1da177e4 3463
e8e0619f
TH
3464 /* Record simplex status. If we selected DMA then the other
3465 * host channels are not permitted to do so.
5444a6f4 3466 */
cca3974e 3467 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3468 ap->host->simplex_claimed = ap;
5444a6f4 3469
e82cbdb9
TH
3470 out:
3471 if (rc)
3472 *r_failed_dev = dev;
3473 return rc;
1da177e4
LT
3474}
3475
1fdffbce
JG
3476/**
3477 * ata_tf_to_host - issue ATA taskfile to host controller
3478 * @ap: port to which command is being issued
3479 * @tf: ATA taskfile register set
3480 *
3481 * Issues ATA taskfile register set to ATA host controller,
3482 * with proper synchronization with interrupt handler and
3483 * other threads.
3484 *
3485 * LOCKING:
cca3974e 3486 * spin_lock_irqsave(host lock)
1fdffbce
JG
3487 */
3488
3489static inline void ata_tf_to_host(struct ata_port *ap,
3490 const struct ata_taskfile *tf)
3491{
3492 ap->ops->tf_load(ap, tf);
3493 ap->ops->exec_command(ap, tf);
3494}
3495
1da177e4
LT
3496/**
3497 * ata_busy_sleep - sleep until BSY clears, or timeout
3498 * @ap: port containing status register to be polled
3499 * @tmout_pat: impatience timeout
3500 * @tmout: overall timeout
3501 *
780a87f7
JG
3502 * Sleep until ATA Status register bit BSY clears,
3503 * or a timeout occurs.
3504 *
d1adc1bb
TH
3505 * LOCKING:
3506 * Kernel thread context (may sleep).
3507 *
3508 * RETURNS:
3509 * 0 on success, -errno otherwise.
1da177e4 3510 */
d1adc1bb
TH
3511int ata_busy_sleep(struct ata_port *ap,
3512 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3513{
3514 unsigned long timer_start, timeout;
3515 u8 status;
3516
3517 status = ata_busy_wait(ap, ATA_BUSY, 300);
3518 timer_start = jiffies;
3519 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3520 while (status != 0xff && (status & ATA_BUSY) &&
3521 time_before(jiffies, timeout)) {
1da177e4
LT
3522 msleep(50);
3523 status = ata_busy_wait(ap, ATA_BUSY, 3);
3524 }
3525
d1adc1bb 3526 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3527 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3528 "port is slow to respond, please be patient "
3529 "(Status 0x%x)\n", status);
1da177e4
LT
3530
3531 timeout = timer_start + tmout;
d1adc1bb
TH
3532 while (status != 0xff && (status & ATA_BUSY) &&
3533 time_before(jiffies, timeout)) {
1da177e4
LT
3534 msleep(50);
3535 status = ata_chk_status(ap);
3536 }
3537
d1adc1bb
TH
3538 if (status == 0xff)
3539 return -ENODEV;
3540
1da177e4 3541 if (status & ATA_BUSY) {
f15a1daf 3542 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3543 "(%lu secs, Status 0x%x)\n",
3544 tmout / HZ, status);
d1adc1bb 3545 return -EBUSY;
1da177e4
LT
3546 }
3547
3548 return 0;
3549}
3550
88ff6eaf
TH
3551/**
3552 * ata_wait_after_reset - wait before checking status after reset
3553 * @ap: port containing status register to be polled
3554 * @deadline: deadline jiffies for the operation
3555 *
3556 * After reset, we need to pause a while before reading status.
3557 * Also, certain combination of controller and device report 0xff
3558 * for some duration (e.g. until SATA PHY is up and running)
3559 * which is interpreted as empty port in ATA world. This
3560 * function also waits for such devices to get out of 0xff
3561 * status.
3562 *
3563 * LOCKING:
3564 * Kernel thread context (may sleep).
3565 */
3566void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3567{
3568 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3569
3570 if (time_before(until, deadline))
3571 deadline = until;
3572
3573 /* Spec mandates ">= 2ms" before checking status. We wait
3574 * 150ms, because that was the magic delay used for ATAPI
3575 * devices in Hale Landis's ATADRVR, for the period of time
3576 * between when the ATA command register is written, and then
3577 * status is checked. Because waiting for "a while" before
3578 * checking status is fine, post SRST, we perform this magic
3579 * delay here as well.
3580 *
3581 * Old drivers/ide uses the 2mS rule and then waits for ready.
3582 */
3583 msleep(150);
3584
3585 /* Wait for 0xff to clear. Some SATA devices take a long time
3586 * to clear 0xff after reset. For example, HHD424020F7SV00
3587 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3588 * than that.
1974e201
TH
3589 *
3590 * Note that some PATA controllers (pata_ali) explode if
3591 * status register is read more than once when there's no
3592 * device attached.
88ff6eaf 3593 */
1974e201
TH
3594 if (ap->flags & ATA_FLAG_SATA) {
3595 while (1) {
3596 u8 status = ata_chk_status(ap);
88ff6eaf 3597
1974e201
TH
3598 if (status != 0xff || time_after(jiffies, deadline))
3599 return;
88ff6eaf 3600
1974e201
TH
3601 msleep(50);
3602 }
88ff6eaf
TH
3603 }
3604}
3605
d4b2bab4
TH
3606/**
3607 * ata_wait_ready - sleep until BSY clears, or timeout
3608 * @ap: port containing status register to be polled
3609 * @deadline: deadline jiffies for the operation
3610 *
3611 * Sleep until ATA Status register bit BSY clears, or timeout
3612 * occurs.
3613 *
3614 * LOCKING:
3615 * Kernel thread context (may sleep).
3616 *
3617 * RETURNS:
3618 * 0 on success, -errno otherwise.
3619 */
3620int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3621{
3622 unsigned long start = jiffies;
3623 int warned = 0;
3624
3625 while (1) {
3626 u8 status = ata_chk_status(ap);
3627 unsigned long now = jiffies;
3628
3629 if (!(status & ATA_BUSY))
3630 return 0;
936fd732 3631 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3632 return -ENODEV;
3633 if (time_after(now, deadline))
3634 return -EBUSY;
3635
3636 if (!warned && time_after(now, start + 5 * HZ) &&
3637 (deadline - now > 3 * HZ)) {
3638 ata_port_printk(ap, KERN_WARNING,
3639 "port is slow to respond, please be patient "
3640 "(Status 0x%x)\n", status);
3641 warned = 1;
3642 }
3643
3644 msleep(50);
3645 }
3646}
3647
3648static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3649 unsigned long deadline)
1da177e4
LT
3650{
3651 struct ata_ioports *ioaddr = &ap->ioaddr;
3652 unsigned int dev0 = devmask & (1 << 0);
3653 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3654 int rc, ret = 0;
1da177e4
LT
3655
3656 /* if device 0 was found in ata_devchk, wait for its
3657 * BSY bit to clear
3658 */
d4b2bab4
TH
3659 if (dev0) {
3660 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3661 if (rc) {
3662 if (rc != -ENODEV)
3663 return rc;
3664 ret = rc;
3665 }
d4b2bab4 3666 }
1da177e4 3667
e141d999
TH
3668 /* if device 1 was found in ata_devchk, wait for register
3669 * access briefly, then wait for BSY to clear.
1da177e4 3670 */
e141d999
TH
3671 if (dev1) {
3672 int i;
1da177e4
LT
3673
3674 ap->ops->dev_select(ap, 1);
e141d999
TH
3675
3676 /* Wait for register access. Some ATAPI devices fail
3677 * to set nsect/lbal after reset, so don't waste too
3678 * much time on it. We're gonna wait for !BSY anyway.
3679 */
3680 for (i = 0; i < 2; i++) {
3681 u8 nsect, lbal;
3682
3683 nsect = ioread8(ioaddr->nsect_addr);
3684 lbal = ioread8(ioaddr->lbal_addr);
3685 if ((nsect == 1) && (lbal == 1))
3686 break;
3687 msleep(50); /* give drive a breather */
3688 }
3689
d4b2bab4 3690 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3691 if (rc) {
3692 if (rc != -ENODEV)
3693 return rc;
3694 ret = rc;
3695 }
d4b2bab4 3696 }
1da177e4
LT
3697
3698 /* is all this really necessary? */
3699 ap->ops->dev_select(ap, 0);
3700 if (dev1)
3701 ap->ops->dev_select(ap, 1);
3702 if (dev0)
3703 ap->ops->dev_select(ap, 0);
d4b2bab4 3704
9b89391c 3705 return ret;
1da177e4
LT
3706}
3707
d4b2bab4
TH
3708static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3709 unsigned long deadline)
1da177e4
LT
3710{
3711 struct ata_ioports *ioaddr = &ap->ioaddr;
3712
44877b4e 3713 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3714
3715 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3716 iowrite8(ap->ctl, ioaddr->ctl_addr);
3717 udelay(20); /* FIXME: flush */
3718 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3719 udelay(20); /* FIXME: flush */
3720 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3721
88ff6eaf
TH
3722 /* wait a while before checking status */
3723 ata_wait_after_reset(ap, deadline);
1da177e4 3724
2e9edbf8 3725 /* Before we perform post reset processing we want to see if
298a41ca
TH
3726 * the bus shows 0xFF because the odd clown forgets the D7
3727 * pulldown resistor.
3728 */
150981b0 3729 if (ata_chk_status(ap) == 0xFF)
9b89391c 3730 return -ENODEV;
09c7ad79 3731
d4b2bab4 3732 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3733}
3734
3735/**
3736 * ata_bus_reset - reset host port and associated ATA channel
3737 * @ap: port to reset
3738 *
3739 * This is typically the first time we actually start issuing
3740 * commands to the ATA channel. We wait for BSY to clear, then
3741 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3742 * result. Determine what devices, if any, are on the channel
3743 * by looking at the device 0/1 error register. Look at the signature
3744 * stored in each device's taskfile registers, to determine if
3745 * the device is ATA or ATAPI.
3746 *
3747 * LOCKING:
0cba632b 3748 * PCI/etc. bus probe sem.
cca3974e 3749 * Obtains host lock.
1da177e4
LT
3750 *
3751 * SIDE EFFECTS:
198e0fed 3752 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3753 */
3754
3755void ata_bus_reset(struct ata_port *ap)
3756{
9af5c9c9 3757 struct ata_device *device = ap->link.device;
1da177e4
LT
3758 struct ata_ioports *ioaddr = &ap->ioaddr;
3759 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3760 u8 err;
aec5c3c1 3761 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3762 int rc;
1da177e4 3763
44877b4e 3764 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3765
3766 /* determine if device 0/1 are present */
3767 if (ap->flags & ATA_FLAG_SATA_RESET)
3768 dev0 = 1;
3769 else {
3770 dev0 = ata_devchk(ap, 0);
3771 if (slave_possible)
3772 dev1 = ata_devchk(ap, 1);
3773 }
3774
3775 if (dev0)
3776 devmask |= (1 << 0);
3777 if (dev1)
3778 devmask |= (1 << 1);
3779
3780 /* select device 0 again */
3781 ap->ops->dev_select(ap, 0);
3782
3783 /* issue bus reset */
9b89391c
TH
3784 if (ap->flags & ATA_FLAG_SRST) {
3785 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3786 if (rc && rc != -ENODEV)
aec5c3c1 3787 goto err_out;
9b89391c 3788 }
1da177e4
LT
3789
3790 /*
3791 * determine by signature whether we have ATA or ATAPI devices
3792 */
3f19859e 3793 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3794 if ((slave_possible) && (err != 0x81))
3f19859e 3795 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3796
1da177e4 3797 /* is double-select really necessary? */
9af5c9c9 3798 if (device[1].class != ATA_DEV_NONE)
1da177e4 3799 ap->ops->dev_select(ap, 1);
9af5c9c9 3800 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3801 ap->ops->dev_select(ap, 0);
3802
3803 /* if no devices were detected, disable this port */
9af5c9c9
TH
3804 if ((device[0].class == ATA_DEV_NONE) &&
3805 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3806 goto err_out;
3807
3808 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3809 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3810 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3811 }
3812
3813 DPRINTK("EXIT\n");
3814 return;
3815
3816err_out:
f15a1daf 3817 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3818 ata_port_disable(ap);
1da177e4
LT
3819
3820 DPRINTK("EXIT\n");
3821}
3822
d7bb4cc7 3823/**
936fd732
TH
3824 * sata_link_debounce - debounce SATA phy status
3825 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3826 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3827 * @deadline: deadline jiffies for the operation
d7bb4cc7 3828 *
936fd732 3829* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3830 * holding the same value where DET is not 1 for @duration polled
3831 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3832 * beginning of the stable state. Because DET gets stuck at 1 on
3833 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3834 * until timeout then returns 0 if DET is stable at 1.
3835 *
d4b2bab4
TH
3836 * @timeout is further limited by @deadline. The sooner of the
3837 * two is used.
3838 *
d7bb4cc7
TH
3839 * LOCKING:
3840 * Kernel thread context (may sleep)
3841 *
3842 * RETURNS:
3843 * 0 on success, -errno on failure.
3844 */
936fd732
TH
3845int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3846 unsigned long deadline)
7a7921e8 3847{
d7bb4cc7 3848 unsigned long interval_msec = params[0];
d4b2bab4
TH
3849 unsigned long duration = msecs_to_jiffies(params[1]);
3850 unsigned long last_jiffies, t;
d7bb4cc7
TH
3851 u32 last, cur;
3852 int rc;
3853
d4b2bab4
TH
3854 t = jiffies + msecs_to_jiffies(params[2]);
3855 if (time_before(t, deadline))
3856 deadline = t;
3857
936fd732 3858 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3859 return rc;
3860 cur &= 0xf;
3861
3862 last = cur;
3863 last_jiffies = jiffies;
3864
3865 while (1) {
3866 msleep(interval_msec);
936fd732 3867 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3868 return rc;
3869 cur &= 0xf;
3870
3871 /* DET stable? */
3872 if (cur == last) {
d4b2bab4 3873 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3874 continue;
3875 if (time_after(jiffies, last_jiffies + duration))
3876 return 0;
3877 continue;
3878 }
3879
3880 /* unstable, start over */
3881 last = cur;
3882 last_jiffies = jiffies;
3883
f1545154
TH
3884 /* Check deadline. If debouncing failed, return
3885 * -EPIPE to tell upper layer to lower link speed.
3886 */
d4b2bab4 3887 if (time_after(jiffies, deadline))
f1545154 3888 return -EPIPE;
d7bb4cc7
TH
3889 }
3890}
3891
3892/**
936fd732
TH
3893 * sata_link_resume - resume SATA link
3894 * @link: ATA link to resume SATA
d7bb4cc7 3895 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3896 * @deadline: deadline jiffies for the operation
d7bb4cc7 3897 *
936fd732 3898 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3899 *
3900 * LOCKING:
3901 * Kernel thread context (may sleep)
3902 *
3903 * RETURNS:
3904 * 0 on success, -errno on failure.
3905 */
936fd732
TH
3906int sata_link_resume(struct ata_link *link, const unsigned long *params,
3907 unsigned long deadline)
d7bb4cc7
TH
3908{
3909 u32 scontrol;
81952c54
TH
3910 int rc;
3911
936fd732 3912 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3913 return rc;
7a7921e8 3914
852ee16a 3915 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3916
936fd732 3917 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3918 return rc;
7a7921e8 3919
d7bb4cc7
TH
3920 /* Some PHYs react badly if SStatus is pounded immediately
3921 * after resuming. Delay 200ms before debouncing.
3922 */
3923 msleep(200);
7a7921e8 3924
936fd732 3925 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3926}
3927
f5914a46
TH
3928/**
3929 * ata_std_prereset - prepare for reset
cc0680a5 3930 * @link: ATA link to be reset
d4b2bab4 3931 * @deadline: deadline jiffies for the operation
f5914a46 3932 *
cc0680a5 3933 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3934 * prereset makes libata abort whole reset sequence and give up
3935 * that port, so prereset should be best-effort. It does its
3936 * best to prepare for reset sequence but if things go wrong, it
3937 * should just whine, not fail.
f5914a46
TH
3938 *
3939 * LOCKING:
3940 * Kernel thread context (may sleep)
3941 *
3942 * RETURNS:
3943 * 0 on success, -errno otherwise.
3944 */
cc0680a5 3945int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3946{
cc0680a5 3947 struct ata_port *ap = link->ap;
936fd732 3948 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3949 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3950 int rc;
3951
f5914a46
TH
3952 /* if we're about to do hardreset, nothing more to do */
3953 if (ehc->i.action & ATA_EH_HARDRESET)
3954 return 0;
3955
936fd732 3956 /* if SATA, resume link */
a16abc0b 3957 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3958 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3959 /* whine about phy resume failure but proceed */
3960 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3961 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3962 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3963 }
3964
8cebf274
TH
3965 /* wait for !BSY if we don't know that no device is attached */
3966 if (!ata_link_offline(link)) {
b8cffc6a 3967 rc = ata_wait_ready(ap, deadline);
6dffaf61 3968 if (rc && rc != -ENODEV) {
cc0680a5 3969 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3970 "(errno=%d), forcing hardreset\n", rc);
3971 ehc->i.action |= ATA_EH_HARDRESET;
3972 }
3973 }
f5914a46
TH
3974
3975 return 0;
3976}
3977
c2bd5804
TH
3978/**
3979 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3980 * @link: ATA link to reset
c2bd5804 3981 * @classes: resulting classes of attached devices
d4b2bab4 3982 * @deadline: deadline jiffies for the operation
c2bd5804 3983 *
52783c5d 3984 * Reset host port using ATA SRST.
c2bd5804
TH
3985 *
3986 * LOCKING:
3987 * Kernel thread context (may sleep)
3988 *
3989 * RETURNS:
3990 * 0 on success, -errno otherwise.
3991 */
cc0680a5 3992int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3993 unsigned long deadline)
c2bd5804 3994{
cc0680a5 3995 struct ata_port *ap = link->ap;
c2bd5804 3996 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3997 unsigned int devmask = 0;
3998 int rc;
c2bd5804
TH
3999 u8 err;
4000
4001 DPRINTK("ENTER\n");
4002
936fd732 4003 if (ata_link_offline(link)) {
3a39746a
TH
4004 classes[0] = ATA_DEV_NONE;
4005 goto out;
4006 }
4007
c2bd5804
TH
4008 /* determine if device 0/1 are present */
4009 if (ata_devchk(ap, 0))
4010 devmask |= (1 << 0);
4011 if (slave_possible && ata_devchk(ap, 1))
4012 devmask |= (1 << 1);
4013
c2bd5804
TH
4014 /* select device 0 again */
4015 ap->ops->dev_select(ap, 0);
4016
4017 /* issue bus reset */
4018 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 4019 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 4020 /* if link is occupied, -ENODEV too is an error */
936fd732 4021 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 4022 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 4023 return rc;
c2bd5804
TH
4024 }
4025
4026 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
4027 classes[0] = ata_dev_try_classify(&link->device[0],
4028 devmask & (1 << 0), &err);
c2bd5804 4029 if (slave_possible && err != 0x81)
3f19859e
TH
4030 classes[1] = ata_dev_try_classify(&link->device[1],
4031 devmask & (1 << 1), &err);
c2bd5804 4032
3a39746a 4033 out:
c2bd5804
TH
4034 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
4035 return 0;
4036}
4037
4038/**
cc0680a5
TH
4039 * sata_link_hardreset - reset link via SATA phy reset
4040 * @link: link to reset
b6103f6d 4041 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 4042 * @deadline: deadline jiffies for the operation
c2bd5804 4043 *
cc0680a5 4044 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
4045 *
4046 * LOCKING:
4047 * Kernel thread context (may sleep)
4048 *
4049 * RETURNS:
4050 * 0 on success, -errno otherwise.
4051 */
cc0680a5 4052int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 4053 unsigned long deadline)
c2bd5804 4054{
852ee16a 4055 u32 scontrol;
81952c54 4056 int rc;
852ee16a 4057
c2bd5804
TH
4058 DPRINTK("ENTER\n");
4059
936fd732 4060 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
4061 /* SATA spec says nothing about how to reconfigure
4062 * spd. To be on the safe side, turn off phy during
4063 * reconfiguration. This works for at least ICH7 AHCI
4064 * and Sil3124.
4065 */
936fd732 4066 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4067 goto out;
81952c54 4068
a34b6fc0 4069 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 4070
936fd732 4071 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 4072 goto out;
1c3fae4d 4073
936fd732 4074 sata_set_spd(link);
1c3fae4d
TH
4075 }
4076
4077 /* issue phy wake/reset */
936fd732 4078 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4079 goto out;
81952c54 4080
852ee16a 4081 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 4082
936fd732 4083 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 4084 goto out;
c2bd5804 4085
1c3fae4d 4086 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
4087 * 10.4.2 says at least 1 ms.
4088 */
4089 msleep(1);
4090
936fd732
TH
4091 /* bring link back */
4092 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
4093 out:
4094 DPRINTK("EXIT, rc=%d\n", rc);
4095 return rc;
4096}
4097
4098/**
4099 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 4100 * @link: link to reset
b6103f6d 4101 * @class: resulting class of attached device
d4b2bab4 4102 * @deadline: deadline jiffies for the operation
b6103f6d
TH
4103 *
4104 * SATA phy-reset host port using DET bits of SControl register,
4105 * wait for !BSY and classify the attached device.
4106 *
4107 * LOCKING:
4108 * Kernel thread context (may sleep)
4109 *
4110 * RETURNS:
4111 * 0 on success, -errno otherwise.
4112 */
cc0680a5 4113int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 4114 unsigned long deadline)
b6103f6d 4115{
cc0680a5 4116 struct ata_port *ap = link->ap;
936fd732 4117 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
4118 int rc;
4119
4120 DPRINTK("ENTER\n");
4121
4122 /* do hardreset */
cc0680a5 4123 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 4124 if (rc) {
cc0680a5 4125 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
4126 "COMRESET failed (errno=%d)\n", rc);
4127 return rc;
4128 }
c2bd5804 4129
c2bd5804 4130 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 4131 if (ata_link_offline(link)) {
c2bd5804
TH
4132 *class = ATA_DEV_NONE;
4133 DPRINTK("EXIT, link offline\n");
4134 return 0;
4135 }
4136
88ff6eaf
TH
4137 /* wait a while before checking status */
4138 ata_wait_after_reset(ap, deadline);
34fee227 4139
633273a3
TH
4140 /* If PMP is supported, we have to do follow-up SRST. Note
4141 * that some PMPs don't send D2H Reg FIS after hardreset at
4142 * all if the first port is empty. Wait for it just for a
4143 * second and request follow-up SRST.
4144 */
4145 if (ap->flags & ATA_FLAG_PMP) {
4146 ata_wait_ready(ap, jiffies + HZ);
4147 return -EAGAIN;
4148 }
4149
d4b2bab4 4150 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
4151 /* link occupied, -ENODEV too is an error */
4152 if (rc) {
cc0680a5 4153 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
4154 "COMRESET failed (errno=%d)\n", rc);
4155 return rc;
c2bd5804
TH
4156 }
4157
3a39746a
TH
4158 ap->ops->dev_select(ap, 0); /* probably unnecessary */
4159
3f19859e 4160 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
4161
4162 DPRINTK("EXIT, class=%u\n", *class);
4163 return 0;
4164}
4165
4166/**
4167 * ata_std_postreset - standard postreset callback
cc0680a5 4168 * @link: the target ata_link
c2bd5804
TH
4169 * @classes: classes of attached devices
4170 *
4171 * This function is invoked after a successful reset. Note that
4172 * the device might have been reset more than once using
4173 * different reset methods before postreset is invoked.
c2bd5804 4174 *
c2bd5804
TH
4175 * LOCKING:
4176 * Kernel thread context (may sleep)
4177 */
cc0680a5 4178void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 4179{
cc0680a5 4180 struct ata_port *ap = link->ap;
dc2b3515
TH
4181 u32 serror;
4182
c2bd5804
TH
4183 DPRINTK("ENTER\n");
4184
c2bd5804 4185 /* print link status */
936fd732 4186 sata_print_link_status(link);
c2bd5804 4187
dc2b3515 4188 /* clear SError */
936fd732
TH
4189 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
4190 sata_scr_write(link, SCR_ERROR, serror);
f7fe7ad4 4191 link->eh_info.serror = 0;
dc2b3515 4192
c2bd5804
TH
4193 /* is double-select really necessary? */
4194 if (classes[0] != ATA_DEV_NONE)
4195 ap->ops->dev_select(ap, 1);
4196 if (classes[1] != ATA_DEV_NONE)
4197 ap->ops->dev_select(ap, 0);
4198
3a39746a
TH
4199 /* bail out if no device is present */
4200 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
4201 DPRINTK("EXIT, no device\n");
4202 return;
4203 }
4204
4205 /* set up device control */
0d5ff566
TH
4206 if (ap->ioaddr.ctl_addr)
4207 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
4208
4209 DPRINTK("EXIT\n");
4210}
4211
623a3128
TH
4212/**
4213 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
4214 * @dev: device to compare against
4215 * @new_class: class of the new device
4216 * @new_id: IDENTIFY page of the new device
4217 *
4218 * Compare @new_class and @new_id against @dev and determine
4219 * whether @dev is the device indicated by @new_class and
4220 * @new_id.
4221 *
4222 * LOCKING:
4223 * None.
4224 *
4225 * RETURNS:
4226 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4227 */
3373efd8
TH
4228static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4229 const u16 *new_id)
623a3128
TH
4230{
4231 const u16 *old_id = dev->id;
a0cf733b
TH
4232 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4233 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
4234
4235 if (dev->class != new_class) {
f15a1daf
TH
4236 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4237 dev->class, new_class);
623a3128
TH
4238 return 0;
4239 }
4240
a0cf733b
TH
4241 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4242 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4243 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4244 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
4245
4246 if (strcmp(model[0], model[1])) {
f15a1daf
TH
4247 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4248 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
4249 return 0;
4250 }
4251
4252 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
4253 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4254 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
4255 return 0;
4256 }
4257
623a3128
TH
4258 return 1;
4259}
4260
4261/**
fe30911b 4262 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 4263 * @dev: target ATA device
bff04647 4264 * @readid_flags: read ID flags
623a3128
TH
4265 *
4266 * Re-read IDENTIFY page and make sure @dev is still attached to
4267 * the port.
4268 *
4269 * LOCKING:
4270 * Kernel thread context (may sleep)
4271 *
4272 * RETURNS:
4273 * 0 on success, negative errno otherwise
4274 */
fe30911b 4275int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 4276{
5eb45c02 4277 unsigned int class = dev->class;
9af5c9c9 4278 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
4279 int rc;
4280
fe635c7e 4281 /* read ID data */
bff04647 4282 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 4283 if (rc)
fe30911b 4284 return rc;
623a3128
TH
4285
4286 /* is the device still there? */
fe30911b
TH
4287 if (!ata_dev_same_device(dev, class, id))
4288 return -ENODEV;
623a3128 4289
fe635c7e 4290 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4291 return 0;
4292}
4293
4294/**
4295 * ata_dev_revalidate - Revalidate ATA device
4296 * @dev: device to revalidate
422c9daa 4297 * @new_class: new class code
fe30911b
TH
4298 * @readid_flags: read ID flags
4299 *
4300 * Re-read IDENTIFY page, make sure @dev is still attached to the
4301 * port and reconfigure it according to the new IDENTIFY page.
4302 *
4303 * LOCKING:
4304 * Kernel thread context (may sleep)
4305 *
4306 * RETURNS:
4307 * 0 on success, negative errno otherwise
4308 */
422c9daa
TH
4309int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4310 unsigned int readid_flags)
fe30911b 4311{
6ddcd3b0 4312 u64 n_sectors = dev->n_sectors;
fe30911b
TH
4313 int rc;
4314
4315 if (!ata_dev_enabled(dev))
4316 return -ENODEV;
4317
422c9daa
TH
4318 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4319 if (ata_class_enabled(new_class) &&
4320 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4321 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4322 dev->class, new_class);
4323 rc = -ENODEV;
4324 goto fail;
4325 }
4326
fe30911b
TH
4327 /* re-read ID */
4328 rc = ata_dev_reread_id(dev, readid_flags);
4329 if (rc)
4330 goto fail;
623a3128
TH
4331
4332 /* configure device according to the new ID */
efdaedc4 4333 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4334 if (rc)
4335 goto fail;
4336
4337 /* verify n_sectors hasn't changed */
b54eebd6
TH
4338 if (dev->class == ATA_DEV_ATA && n_sectors &&
4339 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
4340 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4341 "%llu != %llu\n",
4342 (unsigned long long)n_sectors,
4343 (unsigned long long)dev->n_sectors);
8270bec4
TH
4344
4345 /* restore original n_sectors */
4346 dev->n_sectors = n_sectors;
4347
6ddcd3b0
TH
4348 rc = -ENODEV;
4349 goto fail;
4350 }
4351
4352 return 0;
623a3128
TH
4353
4354 fail:
f15a1daf 4355 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4356 return rc;
4357}
4358
6919a0a6
AC
4359struct ata_blacklist_entry {
4360 const char *model_num;
4361 const char *model_rev;
4362 unsigned long horkage;
4363};
4364
4365static const struct ata_blacklist_entry ata_device_blacklist [] = {
4366 /* Devices with DMA related problems under Linux */
4367 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4368 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4369 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4370 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4371 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4372 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4373 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4374 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4375 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4376 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4377 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4378 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4379 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4380 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4381 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4382 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4383 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4384 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4385 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4386 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4387 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4388 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4389 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4390 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4391 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4392 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4393 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4394 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4395 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4396 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
4397 /* Odd clown on sil3726/4726 PMPs */
4398 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4399 ATA_HORKAGE_SKIP_PM },
6919a0a6 4400
18d6e9d5 4401 /* Weird ATAPI devices */
40a1d531 4402 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 4403
6919a0a6
AC
4404 /* Devices we expect to fail diagnostics */
4405
4406 /* Devices where NCQ should be avoided */
4407 /* NCQ is slow */
2dcb407e 4408 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 4409 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
4410 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4411 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4412 /* NCQ is broken */
539cc7c7 4413 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4414 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 4415 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4416 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4417
36e337d0
RH
4418 /* Blacklist entries taken from Silicon Image 3124/3132
4419 Windows driver .inf file - also several Linux problem reports */
4420 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4421 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4422 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4423
16c55b03
TH
4424 /* devices which puke on READ_NATIVE_MAX */
4425 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4426 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4427 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4428 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4429
93328e11
AC
4430 /* Devices which report 1 sector over size HPA */
4431 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4432 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 4433 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 4434
6bbfd53d
AC
4435 /* Devices which get the IVB wrong */
4436 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4437 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
e9f33406
PM
4438 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4439 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4440 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 4441
6919a0a6
AC
4442 /* End Marker */
4443 { }
1da177e4 4444};
2e9edbf8 4445
741b7763 4446static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4447{
4448 const char *p;
4449 int len;
4450
4451 /*
4452 * check for trailing wildcard: *\0
4453 */
4454 p = strchr(patt, wildchar);
4455 if (p && ((*(p + 1)) == 0))
4456 len = p - patt;
317b50b8 4457 else {
539cc7c7 4458 len = strlen(name);
317b50b8
AP
4459 if (!len) {
4460 if (!*patt)
4461 return 0;
4462 return -1;
4463 }
4464 }
539cc7c7
JG
4465
4466 return strncmp(patt, name, len);
4467}
4468
75683fe7 4469static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4470{
8bfa79fc
TH
4471 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4472 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4473 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4474
8bfa79fc
TH
4475 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4476 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4477
6919a0a6 4478 while (ad->model_num) {
539cc7c7 4479 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4480 if (ad->model_rev == NULL)
4481 return ad->horkage;
539cc7c7 4482 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4483 return ad->horkage;
f4b15fef 4484 }
6919a0a6 4485 ad++;
f4b15fef 4486 }
1da177e4
LT
4487 return 0;
4488}
4489
6919a0a6
AC
4490static int ata_dma_blacklisted(const struct ata_device *dev)
4491{
4492 /* We don't support polling DMA.
4493 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4494 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4495 */
9af5c9c9 4496 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4497 (dev->flags & ATA_DFLAG_CDB_INTR))
4498 return 1;
75683fe7 4499 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4500}
4501
6bbfd53d
AC
4502/**
4503 * ata_is_40wire - check drive side detection
4504 * @dev: device
4505 *
4506 * Perform drive side detection decoding, allowing for device vendors
4507 * who can't follow the documentation.
4508 */
4509
4510static int ata_is_40wire(struct ata_device *dev)
4511{
4512 if (dev->horkage & ATA_HORKAGE_IVB)
4513 return ata_drive_40wire_relaxed(dev->id);
4514 return ata_drive_40wire(dev->id);
4515}
4516
a6d5a51c
TH
4517/**
4518 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4519 * @dev: Device to compute xfermask for
4520 *
acf356b1
TH
4521 * Compute supported xfermask of @dev and store it in
4522 * dev->*_mask. This function is responsible for applying all
4523 * known limits including host controller limits, device
4524 * blacklist, etc...
a6d5a51c
TH
4525 *
4526 * LOCKING:
4527 * None.
a6d5a51c 4528 */
3373efd8 4529static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4530{
9af5c9c9
TH
4531 struct ata_link *link = dev->link;
4532 struct ata_port *ap = link->ap;
cca3974e 4533 struct ata_host *host = ap->host;
a6d5a51c 4534 unsigned long xfer_mask;
1da177e4 4535
37deecb5 4536 /* controller modes available */
565083e1
TH
4537 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4538 ap->mwdma_mask, ap->udma_mask);
4539
8343f889 4540 /* drive modes available */
37deecb5
TH
4541 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4542 dev->mwdma_mask, dev->udma_mask);
4543 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4544
b352e57d
AC
4545 /*
4546 * CFA Advanced TrueIDE timings are not allowed on a shared
4547 * cable
4548 */
4549 if (ata_dev_pair(dev)) {
4550 /* No PIO5 or PIO6 */
4551 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4552 /* No MWDMA3 or MWDMA 4 */
4553 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4554 }
4555
37deecb5
TH
4556 if (ata_dma_blacklisted(dev)) {
4557 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4558 ata_dev_printk(dev, KERN_WARNING,
4559 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4560 }
a6d5a51c 4561
14d66ab7 4562 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4563 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4564 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4565 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4566 "other device, disabling DMA\n");
5444a6f4 4567 }
565083e1 4568
e424675f
JG
4569 if (ap->flags & ATA_FLAG_NO_IORDY)
4570 xfer_mask &= ata_pio_mask_no_iordy(dev);
4571
5444a6f4 4572 if (ap->ops->mode_filter)
a76b62ca 4573 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4574
8343f889
RH
4575 /* Apply cable rule here. Don't apply it early because when
4576 * we handle hot plug the cable type can itself change.
4577 * Check this last so that we know if the transfer rate was
4578 * solely limited by the cable.
4579 * Unknown or 80 wire cables reported host side are checked
4580 * drive side as well. Cases where we know a 40wire cable
4581 * is used safely for 80 are not checked here.
4582 */
4583 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4584 /* UDMA/44 or higher would be available */
2dcb407e 4585 if ((ap->cbl == ATA_CBL_PATA40) ||
6bbfd53d 4586 (ata_is_40wire(dev) &&
2dcb407e
JG
4587 (ap->cbl == ATA_CBL_PATA_UNK ||
4588 ap->cbl == ATA_CBL_PATA80))) {
4589 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4590 "limited to UDMA/33 due to 40-wire cable\n");
4591 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4592 }
4593
565083e1
TH
4594 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4595 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4596}
4597
1da177e4
LT
4598/**
4599 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4600 * @dev: Device to which command will be sent
4601 *
780a87f7
JG
4602 * Issue SET FEATURES - XFER MODE command to device @dev
4603 * on port @ap.
4604 *
1da177e4 4605 * LOCKING:
0cba632b 4606 * PCI/etc. bus probe sem.
83206a29
TH
4607 *
4608 * RETURNS:
4609 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4610 */
4611
3373efd8 4612static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4613{
a0123703 4614 struct ata_taskfile tf;
83206a29 4615 unsigned int err_mask;
1da177e4
LT
4616
4617 /* set up set-features taskfile */
4618 DPRINTK("set features - xfer mode\n");
4619
464cf177
TH
4620 /* Some controllers and ATAPI devices show flaky interrupt
4621 * behavior after setting xfer mode. Use polling instead.
4622 */
3373efd8 4623 ata_tf_init(dev, &tf);
a0123703
TH
4624 tf.command = ATA_CMD_SET_FEATURES;
4625 tf.feature = SETFEATURES_XFER;
464cf177 4626 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4627 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4628 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4629 if (ata_pio_need_iordy(dev))
4630 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4631 /* If the device has IORDY and the controller does not - turn it off */
4632 else if (ata_id_has_iordy(dev->id))
11b7becc 4633 tf.nsect = 0x01;
b9f8ab2d
AC
4634 else /* In the ancient relic department - skip all of this */
4635 return 0;
1da177e4 4636
2b789108 4637 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4638
4639 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4640 return err_mask;
4641}
9f45cbd3 4642/**
218f3d30 4643 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4644 * @dev: Device to which command will be sent
4645 * @enable: Whether to enable or disable the feature
218f3d30 4646 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4647 *
4648 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4649 * on port @ap with sector count
9f45cbd3
KCA
4650 *
4651 * LOCKING:
4652 * PCI/etc. bus probe sem.
4653 *
4654 * RETURNS:
4655 * 0 on success, AC_ERR_* mask otherwise.
4656 */
218f3d30
JG
4657static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4658 u8 feature)
9f45cbd3
KCA
4659{
4660 struct ata_taskfile tf;
4661 unsigned int err_mask;
4662
4663 /* set up set-features taskfile */
4664 DPRINTK("set features - SATA features\n");
4665
4666 ata_tf_init(dev, &tf);
4667 tf.command = ATA_CMD_SET_FEATURES;
4668 tf.feature = enable;
4669 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4670 tf.protocol = ATA_PROT_NODATA;
218f3d30 4671 tf.nsect = feature;
9f45cbd3 4672
2b789108 4673 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4674
83206a29
TH
4675 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4676 return err_mask;
1da177e4
LT
4677}
4678
8bf62ece
AL
4679/**
4680 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4681 * @dev: Device to which command will be sent
e2a7f77a
RD
4682 * @heads: Number of heads (taskfile parameter)
4683 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4684 *
4685 * LOCKING:
6aff8f1f
TH
4686 * Kernel thread context (may sleep)
4687 *
4688 * RETURNS:
4689 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4690 */
3373efd8
TH
4691static unsigned int ata_dev_init_params(struct ata_device *dev,
4692 u16 heads, u16 sectors)
8bf62ece 4693{
a0123703 4694 struct ata_taskfile tf;
6aff8f1f 4695 unsigned int err_mask;
8bf62ece
AL
4696
4697 /* Number of sectors per track 1-255. Number of heads 1-16 */
4698 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4699 return AC_ERR_INVALID;
8bf62ece
AL
4700
4701 /* set up init dev params taskfile */
4702 DPRINTK("init dev params \n");
4703
3373efd8 4704 ata_tf_init(dev, &tf);
a0123703
TH
4705 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4706 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4707 tf.protocol = ATA_PROT_NODATA;
4708 tf.nsect = sectors;
4709 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4710
2b789108 4711 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4712 /* A clean abort indicates an original or just out of spec drive
4713 and we should continue as we issue the setup based on the
4714 drive reported working geometry */
4715 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4716 err_mask = 0;
8bf62ece 4717
6aff8f1f
TH
4718 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4719 return err_mask;
8bf62ece
AL
4720}
4721
1da177e4 4722/**
0cba632b
JG
4723 * ata_sg_clean - Unmap DMA memory associated with command
4724 * @qc: Command containing DMA memory to be released
4725 *
4726 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4727 *
4728 * LOCKING:
cca3974e 4729 * spin_lock_irqsave(host lock)
1da177e4 4730 */
70e6ad0c 4731void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4732{
4733 struct ata_port *ap = qc->ap;
ff2aeb1e 4734 struct scatterlist *sg = qc->sg;
1da177e4
LT
4735 int dir = qc->dma_dir;
4736
a4631474 4737 WARN_ON(sg == NULL);
1da177e4 4738
dde20207 4739 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4740
dde20207
JB
4741 if (qc->n_elem)
4742 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
1da177e4
LT
4743
4744 qc->flags &= ~ATA_QCFLAG_DMAMAP;
ff2aeb1e 4745 qc->sg = NULL;
1da177e4
LT
4746}
4747
4748/**
4749 * ata_fill_sg - Fill PCI IDE PRD table
4750 * @qc: Metadata associated with taskfile to be transferred
4751 *
780a87f7
JG
4752 * Fill PCI IDE PRD (scatter-gather) table with segments
4753 * associated with the current disk command.
4754 *
1da177e4 4755 * LOCKING:
cca3974e 4756 * spin_lock_irqsave(host lock)
1da177e4
LT
4757 *
4758 */
4759static void ata_fill_sg(struct ata_queued_cmd *qc)
4760{
1da177e4 4761 struct ata_port *ap = qc->ap;
cedc9a47 4762 struct scatterlist *sg;
ff2aeb1e 4763 unsigned int si, pi;
1da177e4 4764
ff2aeb1e
TH
4765 pi = 0;
4766 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1da177e4
LT
4767 u32 addr, offset;
4768 u32 sg_len, len;
4769
4770 /* determine if physical DMA addr spans 64K boundary.
4771 * Note h/w doesn't support 64-bit, so we unconditionally
4772 * truncate dma_addr_t to u32.
4773 */
4774 addr = (u32) sg_dma_address(sg);
4775 sg_len = sg_dma_len(sg);
4776
4777 while (sg_len) {
4778 offset = addr & 0xffff;
4779 len = sg_len;
4780 if ((offset + sg_len) > 0x10000)
4781 len = 0x10000 - offset;
4782
ff2aeb1e
TH
4783 ap->prd[pi].addr = cpu_to_le32(addr);
4784 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
4785 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
1da177e4 4786
ff2aeb1e 4787 pi++;
1da177e4
LT
4788 sg_len -= len;
4789 addr += len;
4790 }
4791 }
4792
ff2aeb1e 4793 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1da177e4 4794}
b9a4197e 4795
d26fc955
AC
4796/**
4797 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4798 * @qc: Metadata associated with taskfile to be transferred
4799 *
4800 * Fill PCI IDE PRD (scatter-gather) table with segments
4801 * associated with the current disk command. Perform the fill
4802 * so that we avoid writing any length 64K records for
4803 * controllers that don't follow the spec.
4804 *
4805 * LOCKING:
4806 * spin_lock_irqsave(host lock)
4807 *
4808 */
4809static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4810{
4811 struct ata_port *ap = qc->ap;
4812 struct scatterlist *sg;
ff2aeb1e 4813 unsigned int si, pi;
d26fc955 4814
ff2aeb1e
TH
4815 pi = 0;
4816 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d26fc955
AC
4817 u32 addr, offset;
4818 u32 sg_len, len, blen;
4819
2dcb407e 4820 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4821 * Note h/w doesn't support 64-bit, so we unconditionally
4822 * truncate dma_addr_t to u32.
4823 */
4824 addr = (u32) sg_dma_address(sg);
4825 sg_len = sg_dma_len(sg);
4826
4827 while (sg_len) {
4828 offset = addr & 0xffff;
4829 len = sg_len;
4830 if ((offset + sg_len) > 0x10000)
4831 len = 0x10000 - offset;
4832
4833 blen = len & 0xffff;
ff2aeb1e 4834 ap->prd[pi].addr = cpu_to_le32(addr);
d26fc955
AC
4835 if (blen == 0) {
4836 /* Some PATA chipsets like the CS5530 can't
4837 cope with 0x0000 meaning 64K as the spec says */
ff2aeb1e 4838 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
d26fc955 4839 blen = 0x8000;
ff2aeb1e 4840 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
d26fc955 4841 }
ff2aeb1e
TH
4842 ap->prd[pi].flags_len = cpu_to_le32(blen);
4843 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
d26fc955 4844
ff2aeb1e 4845 pi++;
d26fc955
AC
4846 sg_len -= len;
4847 addr += len;
4848 }
4849 }
4850
ff2aeb1e 4851 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
d26fc955
AC
4852}
4853
1da177e4
LT
4854/**
4855 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4856 * @qc: Metadata associated with taskfile to check
4857 *
780a87f7
JG
4858 * Allow low-level driver to filter ATA PACKET commands, returning
4859 * a status indicating whether or not it is OK to use DMA for the
4860 * supplied PACKET command.
4861 *
1da177e4 4862 * LOCKING:
cca3974e 4863 * spin_lock_irqsave(host lock)
0cba632b 4864 *
1da177e4
LT
4865 * RETURNS: 0 when ATAPI DMA can be used
4866 * nonzero otherwise
4867 */
4868int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4869{
4870 struct ata_port *ap = qc->ap;
b9a4197e
TH
4871
4872 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4873 * few ATAPI devices choke on such DMA requests.
4874 */
4875 if (unlikely(qc->nbytes & 15))
4876 return 1;
6f23a31d 4877
1da177e4 4878 if (ap->ops->check_atapi_dma)
b9a4197e 4879 return ap->ops->check_atapi_dma(qc);
1da177e4 4880
b9a4197e 4881 return 0;
1da177e4 4882}
b9a4197e 4883
31cc23b3
TH
4884/**
4885 * ata_std_qc_defer - Check whether a qc needs to be deferred
4886 * @qc: ATA command in question
4887 *
4888 * Non-NCQ commands cannot run with any other command, NCQ or
4889 * not. As upper layer only knows the queue depth, we are
4890 * responsible for maintaining exclusion. This function checks
4891 * whether a new command @qc can be issued.
4892 *
4893 * LOCKING:
4894 * spin_lock_irqsave(host lock)
4895 *
4896 * RETURNS:
4897 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4898 */
4899int ata_std_qc_defer(struct ata_queued_cmd *qc)
4900{
4901 struct ata_link *link = qc->dev->link;
4902
4903 if (qc->tf.protocol == ATA_PROT_NCQ) {
4904 if (!ata_tag_valid(link->active_tag))
4905 return 0;
4906 } else {
4907 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4908 return 0;
4909 }
4910
4911 return ATA_DEFER_LINK;
4912}
4913
1da177e4
LT
4914/**
4915 * ata_qc_prep - Prepare taskfile for submission
4916 * @qc: Metadata associated with taskfile to be prepared
4917 *
780a87f7
JG
4918 * Prepare ATA taskfile for submission.
4919 *
1da177e4 4920 * LOCKING:
cca3974e 4921 * spin_lock_irqsave(host lock)
1da177e4
LT
4922 */
4923void ata_qc_prep(struct ata_queued_cmd *qc)
4924{
4925 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4926 return;
4927
4928 ata_fill_sg(qc);
4929}
4930
d26fc955
AC
4931/**
4932 * ata_dumb_qc_prep - Prepare taskfile for submission
4933 * @qc: Metadata associated with taskfile to be prepared
4934 *
4935 * Prepare ATA taskfile for submission.
4936 *
4937 * LOCKING:
4938 * spin_lock_irqsave(host lock)
4939 */
4940void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4941{
4942 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4943 return;
4944
4945 ata_fill_sg_dumb(qc);
4946}
4947
e46834cd
BK
4948void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4949
0cba632b
JG
4950/**
4951 * ata_sg_init - Associate command with scatter-gather table.
4952 * @qc: Command to be associated
4953 * @sg: Scatter-gather table.
4954 * @n_elem: Number of elements in s/g table.
4955 *
4956 * Initialize the data-related elements of queued_cmd @qc
4957 * to point to a scatter-gather table @sg, containing @n_elem
4958 * elements.
4959 *
4960 * LOCKING:
cca3974e 4961 * spin_lock_irqsave(host lock)
0cba632b 4962 */
1da177e4
LT
4963void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4964 unsigned int n_elem)
4965{
ff2aeb1e 4966 qc->sg = sg;
1da177e4 4967 qc->n_elem = n_elem;
ff2aeb1e 4968 qc->cursg = qc->sg;
1da177e4
LT
4969}
4970
ff2aeb1e
TH
4971/**
4972 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4973 * @qc: Command with scatter-gather table to be mapped.
4974 *
4975 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4976 *
4977 * LOCKING:
4978 * spin_lock_irqsave(host lock)
4979 *
4980 * RETURNS:
4981 * Zero on success, negative on error.
4982 *
4983 */
4984static int ata_sg_setup(struct ata_queued_cmd *qc)
4985{
4986 struct ata_port *ap = qc->ap;
dde20207 4987 unsigned int n_elem;
ff2aeb1e
TH
4988
4989 VPRINTK("ENTER, ata%u\n", ap->print_id);
4990
dde20207
JB
4991 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4992 if (n_elem < 1)
4993 return -1;
ff2aeb1e 4994
dde20207 4995 DPRINTK("%d sg elements mapped\n", n_elem);
1da177e4 4996
dde20207 4997 qc->n_elem = n_elem;
f92a2636 4998 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4
LT
4999
5000 return 0;
5001}
5002
0baab86b 5003/**
c893a3ae 5004 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
5005 * @buf: Buffer to swap
5006 * @buf_words: Number of 16-bit words in buffer.
5007 *
5008 * Swap halves of 16-bit words if needed to convert from
5009 * little-endian byte order to native cpu byte order, or
5010 * vice-versa.
5011 *
5012 * LOCKING:
6f0ef4fa 5013 * Inherited from caller.
0baab86b 5014 */
1da177e4
LT
5015void swap_buf_le16(u16 *buf, unsigned int buf_words)
5016{
5017#ifdef __BIG_ENDIAN
5018 unsigned int i;
5019
5020 for (i = 0; i < buf_words; i++)
5021 buf[i] = le16_to_cpu(buf[i]);
5022#endif /* __BIG_ENDIAN */
5023}
5024
6ae4cfb5 5025/**
0d5ff566 5026 * ata_data_xfer - Transfer data by PIO
55dba312 5027 * @dev: device to target
6ae4cfb5
AL
5028 * @buf: data buffer
5029 * @buflen: buffer length
0affa456 5030 * @rw: read/write
6ae4cfb5
AL
5031 *
5032 * Transfer data from/to the device data register by PIO.
5033 *
5034 * LOCKING:
5035 * Inherited from caller.
55dba312
TH
5036 *
5037 * RETURNS:
5038 * Bytes consumed.
6ae4cfb5 5039 */
55dba312
TH
5040unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
5041 unsigned int buflen, int rw)
1da177e4 5042{
55dba312
TH
5043 struct ata_port *ap = dev->link->ap;
5044 void __iomem *data_addr = ap->ioaddr.data_addr;
6ae4cfb5 5045 unsigned int words = buflen >> 1;
1da177e4 5046
6ae4cfb5 5047 /* Transfer multiple of 2 bytes */
55dba312
TH
5048 if (rw == READ)
5049 ioread16_rep(data_addr, buf, words);
1da177e4 5050 else
55dba312 5051 iowrite16_rep(data_addr, buf, words);
6ae4cfb5
AL
5052
5053 /* Transfer trailing 1 byte, if any. */
5054 if (unlikely(buflen & 0x01)) {
4ca4e439 5055 __le16 align_buf[1] = { 0 };
6ae4cfb5
AL
5056 unsigned char *trailing_buf = buf + buflen - 1;
5057
55dba312
TH
5058 if (rw == READ) {
5059 align_buf[0] = cpu_to_le16(ioread16(data_addr));
6ae4cfb5 5060 memcpy(trailing_buf, align_buf, 1);
55dba312
TH
5061 } else {
5062 memcpy(align_buf, trailing_buf, 1);
5063 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
6ae4cfb5 5064 }
55dba312 5065 words++;
6ae4cfb5 5066 }
55dba312
TH
5067
5068 return words << 1;
1da177e4
LT
5069}
5070
75e99585 5071/**
0d5ff566 5072 * ata_data_xfer_noirq - Transfer data by PIO
55dba312 5073 * @dev: device to target
75e99585
AC
5074 * @buf: data buffer
5075 * @buflen: buffer length
0affa456 5076 * @rw: read/write
75e99585 5077 *
88574551 5078 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
5079 * transfer with interrupts disabled.
5080 *
5081 * LOCKING:
5082 * Inherited from caller.
55dba312
TH
5083 *
5084 * RETURNS:
5085 * Bytes consumed.
75e99585 5086 */
55dba312
TH
5087unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
5088 unsigned int buflen, int rw)
75e99585
AC
5089{
5090 unsigned long flags;
55dba312
TH
5091 unsigned int consumed;
5092
75e99585 5093 local_irq_save(flags);
55dba312 5094 consumed = ata_data_xfer(dev, buf, buflen, rw);
75e99585 5095 local_irq_restore(flags);
55dba312
TH
5096
5097 return consumed;
75e99585
AC
5098}
5099
5100
6ae4cfb5 5101/**
5a5dbd18 5102 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
5103 * @qc: Command on going
5104 *
5a5dbd18 5105 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
5106 *
5107 * LOCKING:
5108 * Inherited from caller.
5109 */
5110
1da177e4
LT
5111static void ata_pio_sector(struct ata_queued_cmd *qc)
5112{
5113 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
5114 struct ata_port *ap = qc->ap;
5115 struct page *page;
5116 unsigned int offset;
5117 unsigned char *buf;
5118
5a5dbd18 5119 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 5120 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 5121
45711f1a 5122 page = sg_page(qc->cursg);
87260216 5123 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
5124
5125 /* get the current page and offset */
5126 page = nth_page(page, (offset >> PAGE_SHIFT));
5127 offset %= PAGE_SIZE;
5128
1da177e4
LT
5129 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5130
91b8b313
AL
5131 if (PageHighMem(page)) {
5132 unsigned long flags;
5133
a6b2c5d4 5134 /* FIXME: use a bounce buffer */
91b8b313
AL
5135 local_irq_save(flags);
5136 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5137
91b8b313 5138 /* do the actual data transfer */
5a5dbd18 5139 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 5140
91b8b313
AL
5141 kunmap_atomic(buf, KM_IRQ0);
5142 local_irq_restore(flags);
5143 } else {
5144 buf = page_address(page);
5a5dbd18 5145 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 5146 }
1da177e4 5147
5a5dbd18
ML
5148 qc->curbytes += qc->sect_size;
5149 qc->cursg_ofs += qc->sect_size;
1da177e4 5150
87260216
JA
5151 if (qc->cursg_ofs == qc->cursg->length) {
5152 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5153 qc->cursg_ofs = 0;
5154 }
1da177e4 5155}
1da177e4 5156
07f6f7d0 5157/**
5a5dbd18 5158 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
5159 * @qc: Command on going
5160 *
5a5dbd18 5161 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
5162 * ATA device for the DRQ request.
5163 *
5164 * LOCKING:
5165 * Inherited from caller.
5166 */
1da177e4 5167
07f6f7d0
AL
5168static void ata_pio_sectors(struct ata_queued_cmd *qc)
5169{
5170 if (is_multi_taskfile(&qc->tf)) {
5171 /* READ/WRITE MULTIPLE */
5172 unsigned int nsect;
5173
587005de 5174 WARN_ON(qc->dev->multi_count == 0);
1da177e4 5175
5a5dbd18 5176 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 5177 qc->dev->multi_count);
07f6f7d0
AL
5178 while (nsect--)
5179 ata_pio_sector(qc);
5180 } else
5181 ata_pio_sector(qc);
4cc980b3
AL
5182
5183 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
5184}
5185
c71c1857
AL
5186/**
5187 * atapi_send_cdb - Write CDB bytes to hardware
5188 * @ap: Port to which ATAPI device is attached.
5189 * @qc: Taskfile currently active
5190 *
5191 * When device has indicated its readiness to accept
5192 * a CDB, this function is called. Send the CDB.
5193 *
5194 * LOCKING:
5195 * caller.
5196 */
5197
5198static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5199{
5200 /* send SCSI cdb */
5201 DPRINTK("send cdb\n");
db024d53 5202 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 5203
a6b2c5d4 5204 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
5205 ata_altstatus(ap); /* flush */
5206
5207 switch (qc->tf.protocol) {
0dc36888 5208 case ATAPI_PROT_PIO:
c71c1857
AL
5209 ap->hsm_task_state = HSM_ST;
5210 break;
0dc36888 5211 case ATAPI_PROT_NODATA:
c71c1857
AL
5212 ap->hsm_task_state = HSM_ST_LAST;
5213 break;
0dc36888 5214 case ATAPI_PROT_DMA:
c71c1857
AL
5215 ap->hsm_task_state = HSM_ST_LAST;
5216 /* initiate bmdma */
5217 ap->ops->bmdma_start(qc);
5218 break;
5219 }
1da177e4
LT
5220}
5221
6ae4cfb5
AL
5222/**
5223 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5224 * @qc: Command on going
5225 * @bytes: number of bytes
5226 *
5227 * Transfer Transfer data from/to the ATAPI device.
5228 *
5229 * LOCKING:
5230 * Inherited from caller.
5231 *
5232 */
140b5e59 5233static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
1da177e4 5234{
56c819df 5235 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
1da177e4 5236 struct ata_port *ap = qc->ap;
56c819df
TH
5237 struct ata_device *dev = qc->dev;
5238 struct ata_eh_info *ehi = &dev->link->eh_info;
140b5e59 5239 struct scatterlist *sg;
1da177e4
LT
5240 struct page *page;
5241 unsigned char *buf;
56c819df 5242 unsigned int offset, count, consumed;
1da177e4
LT
5243
5244next_sg:
140b5e59
TH
5245 sg = qc->cursg;
5246 if (unlikely(!sg)) {
fa2fc7f4
JB
5247 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
5248 "buf=%u cur=%u bytes=%u",
5249 qc->nbytes, qc->curbytes, bytes);
5250 return -1;
140b5e59 5251 }
1da177e4 5252
45711f1a 5253 page = sg_page(sg);
1da177e4
LT
5254 offset = sg->offset + qc->cursg_ofs;
5255
5256 /* get the current page and offset */
5257 page = nth_page(page, (offset >> PAGE_SHIFT));
5258 offset %= PAGE_SIZE;
5259
6952df03 5260 /* don't overrun current sg */
32529e01 5261 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5262
5263 /* don't cross page boundaries */
5264 count = min(count, (unsigned int)PAGE_SIZE - offset);
5265
7282aa4b
AL
5266 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5267
91b8b313
AL
5268 if (PageHighMem(page)) {
5269 unsigned long flags;
5270
a6b2c5d4 5271 /* FIXME: use bounce buffer */
91b8b313
AL
5272 local_irq_save(flags);
5273 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5274
91b8b313 5275 /* do the actual data transfer */
56c819df 5276 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
7282aa4b 5277
91b8b313
AL
5278 kunmap_atomic(buf, KM_IRQ0);
5279 local_irq_restore(flags);
5280 } else {
5281 buf = page_address(page);
56c819df 5282 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
91b8b313 5283 }
1da177e4 5284
56c819df 5285 bytes -= min(bytes, consumed);
1da177e4
LT
5286 qc->curbytes += count;
5287 qc->cursg_ofs += count;
5288
32529e01 5289 if (qc->cursg_ofs == sg->length) {
87260216 5290 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5291 qc->cursg_ofs = 0;
5292 }
5293
56c819df
TH
5294 /* consumed can be larger than count only for the last transfer */
5295 WARN_ON(qc->cursg && count != consumed);
5296
563a6e1f 5297 if (bytes)
1da177e4 5298 goto next_sg;
140b5e59 5299 return 0;
1da177e4
LT
5300}
5301
6ae4cfb5
AL
5302/**
5303 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5304 * @qc: Command on going
5305 *
5306 * Transfer Transfer data from/to the ATAPI device.
5307 *
5308 * LOCKING:
5309 * Inherited from caller.
6ae4cfb5
AL
5310 */
5311
1da177e4
LT
5312static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5313{
5314 struct ata_port *ap = qc->ap;
5315 struct ata_device *dev = qc->dev;
56c819df 5316 struct ata_eh_info *ehi = &dev->link->eh_info;
1da177e4
LT
5317 unsigned int ireason, bc_lo, bc_hi, bytes;
5318 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5319
eec4c3f3
AL
5320 /* Abuse qc->result_tf for temp storage of intermediate TF
5321 * here to save some kernel stack usage.
5322 * For normal completion, qc->result_tf is not relevant. For
5323 * error, qc->result_tf is later overwritten by ata_qc_complete().
5324 * So, the correctness of qc->result_tf is not affected.
5325 */
5326 ap->ops->tf_read(ap, &qc->result_tf);
5327 ireason = qc->result_tf.nsect;
5328 bc_lo = qc->result_tf.lbam;
5329 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5330 bytes = (bc_hi << 8) | bc_lo;
5331
5332 /* shall be cleared to zero, indicating xfer of data */
0106372d 5333 if (unlikely(ireason & (1 << 0)))
56c819df 5334 goto atapi_check;
1da177e4
LT
5335
5336 /* make sure transfer direction matches expected */
5337 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
0106372d 5338 if (unlikely(do_write != i_write))
56c819df 5339 goto atapi_check;
0106372d
AL
5340
5341 if (unlikely(!bytes))
56c819df 5342 goto atapi_check;
1da177e4 5343
44877b4e 5344 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5345
56c819df 5346 if (unlikely(__atapi_pio_bytes(qc, bytes)))
140b5e59 5347 goto err_out;
4cc980b3 5348 ata_altstatus(ap); /* flush */
1da177e4
LT
5349
5350 return;
5351
56c819df
TH
5352 atapi_check:
5353 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
5354 ireason, bytes);
5355 err_out:
11a56d24 5356 qc->err_mask |= AC_ERR_HSM;
14be71f4 5357 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5358}
5359
5360/**
c234fb00
AL
5361 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5362 * @ap: the target ata_port
5363 * @qc: qc on going
1da177e4 5364 *
c234fb00
AL
5365 * RETURNS:
5366 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5367 */
c234fb00
AL
5368
5369static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5370{
c234fb00
AL
5371 if (qc->tf.flags & ATA_TFLAG_POLLING)
5372 return 1;
1da177e4 5373
c234fb00
AL
5374 if (ap->hsm_task_state == HSM_ST_FIRST) {
5375 if (qc->tf.protocol == ATA_PROT_PIO &&
5376 (qc->tf.flags & ATA_TFLAG_WRITE))
5377 return 1;
1da177e4 5378
405e66b3 5379 if (ata_is_atapi(qc->tf.protocol) &&
c234fb00
AL
5380 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5381 return 1;
fe79e683
AL
5382 }
5383
c234fb00
AL
5384 return 0;
5385}
1da177e4 5386
c17ea20d
TH
5387/**
5388 * ata_hsm_qc_complete - finish a qc running on standard HSM
5389 * @qc: Command to complete
5390 * @in_wq: 1 if called from workqueue, 0 otherwise
5391 *
5392 * Finish @qc which is running on standard HSM.
5393 *
5394 * LOCKING:
cca3974e 5395 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5396 * Otherwise, none on entry and grabs host lock.
5397 */
5398static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5399{
5400 struct ata_port *ap = qc->ap;
5401 unsigned long flags;
5402
5403 if (ap->ops->error_handler) {
5404 if (in_wq) {
ba6a1308 5405 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5406
cca3974e
JG
5407 /* EH might have kicked in while host lock is
5408 * released.
c17ea20d
TH
5409 */
5410 qc = ata_qc_from_tag(ap, qc->tag);
5411 if (qc) {
5412 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5413 ap->ops->irq_on(ap);
c17ea20d
TH
5414 ata_qc_complete(qc);
5415 } else
5416 ata_port_freeze(ap);
5417 }
5418
ba6a1308 5419 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5420 } else {
5421 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5422 ata_qc_complete(qc);
5423 else
5424 ata_port_freeze(ap);
5425 }
5426 } else {
5427 if (in_wq) {
ba6a1308 5428 spin_lock_irqsave(ap->lock, flags);
83625006 5429 ap->ops->irq_on(ap);
c17ea20d 5430 ata_qc_complete(qc);
ba6a1308 5431 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5432 } else
5433 ata_qc_complete(qc);
5434 }
5435}
5436
bb5cb290
AL
5437/**
5438 * ata_hsm_move - move the HSM to the next state.
5439 * @ap: the target ata_port
5440 * @qc: qc on going
5441 * @status: current device status
5442 * @in_wq: 1 if called from workqueue, 0 otherwise
5443 *
5444 * RETURNS:
5445 * 1 when poll next status needed, 0 otherwise.
5446 */
9a1004d0
TH
5447int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5448 u8 status, int in_wq)
e2cec771 5449{
bb5cb290
AL
5450 unsigned long flags = 0;
5451 int poll_next;
5452
6912ccd5
AL
5453 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5454
bb5cb290
AL
5455 /* Make sure ata_qc_issue_prot() does not throw things
5456 * like DMA polling into the workqueue. Notice that
5457 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5458 */
c234fb00 5459 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5460
e2cec771 5461fsm_start:
999bb6f4 5462 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5463 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5464
e2cec771
AL
5465 switch (ap->hsm_task_state) {
5466 case HSM_ST_FIRST:
bb5cb290
AL
5467 /* Send first data block or PACKET CDB */
5468
5469 /* If polling, we will stay in the work queue after
5470 * sending the data. Otherwise, interrupt handler
5471 * takes over after sending the data.
5472 */
5473 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5474
e2cec771 5475 /* check device status */
3655d1d3
AL
5476 if (unlikely((status & ATA_DRQ) == 0)) {
5477 /* handle BSY=0, DRQ=0 as error */
5478 if (likely(status & (ATA_ERR | ATA_DF)))
5479 /* device stops HSM for abort/error */
5480 qc->err_mask |= AC_ERR_DEV;
5481 else
5482 /* HSM violation. Let EH handle this */
5483 qc->err_mask |= AC_ERR_HSM;
5484
14be71f4 5485 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5486 goto fsm_start;
1da177e4
LT
5487 }
5488
71601958
AL
5489 /* Device should not ask for data transfer (DRQ=1)
5490 * when it finds something wrong.
eee6c32f
AL
5491 * We ignore DRQ here and stop the HSM by
5492 * changing hsm_task_state to HSM_ST_ERR and
5493 * let the EH abort the command or reset the device.
71601958
AL
5494 */
5495 if (unlikely(status & (ATA_ERR | ATA_DF))) {
2d3b8eea
AL
5496 /* Some ATAPI tape drives forget to clear the ERR bit
5497 * when doing the next command (mostly request sense).
5498 * We ignore ERR here to workaround and proceed sending
5499 * the CDB.
5500 */
5501 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5502 ata_port_printk(ap, KERN_WARNING,
5503 "DRQ=1 with device error, "
5504 "dev_stat 0x%X\n", status);
5505 qc->err_mask |= AC_ERR_HSM;
5506 ap->hsm_task_state = HSM_ST_ERR;
5507 goto fsm_start;
5508 }
71601958 5509 }
1da177e4 5510
bb5cb290
AL
5511 /* Send the CDB (atapi) or the first data block (ata pio out).
5512 * During the state transition, interrupt handler shouldn't
5513 * be invoked before the data transfer is complete and
5514 * hsm_task_state is changed. Hence, the following locking.
5515 */
5516 if (in_wq)
ba6a1308 5517 spin_lock_irqsave(ap->lock, flags);
1da177e4 5518
bb5cb290
AL
5519 if (qc->tf.protocol == ATA_PROT_PIO) {
5520 /* PIO data out protocol.
5521 * send first data block.
5522 */
0565c26d 5523
bb5cb290
AL
5524 /* ata_pio_sectors() might change the state
5525 * to HSM_ST_LAST. so, the state is changed here
5526 * before ata_pio_sectors().
5527 */
5528 ap->hsm_task_state = HSM_ST;
5529 ata_pio_sectors(qc);
bb5cb290
AL
5530 } else
5531 /* send CDB */
5532 atapi_send_cdb(ap, qc);
5533
5534 if (in_wq)
ba6a1308 5535 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5536
5537 /* if polling, ata_pio_task() handles the rest.
5538 * otherwise, interrupt handler takes over from here.
5539 */
e2cec771 5540 break;
1c848984 5541
e2cec771
AL
5542 case HSM_ST:
5543 /* complete command or read/write the data register */
0dc36888 5544 if (qc->tf.protocol == ATAPI_PROT_PIO) {
e2cec771
AL
5545 /* ATAPI PIO protocol */
5546 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5547 /* No more data to transfer or device error.
5548 * Device error will be tagged in HSM_ST_LAST.
5549 */
e2cec771
AL
5550 ap->hsm_task_state = HSM_ST_LAST;
5551 goto fsm_start;
5552 }
1da177e4 5553
71601958
AL
5554 /* Device should not ask for data transfer (DRQ=1)
5555 * when it finds something wrong.
eee6c32f
AL
5556 * We ignore DRQ here and stop the HSM by
5557 * changing hsm_task_state to HSM_ST_ERR and
5558 * let the EH abort the command or reset the device.
71601958
AL
5559 */
5560 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5561 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5562 "device error, dev_stat 0x%X\n",
5563 status);
3655d1d3 5564 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5565 ap->hsm_task_state = HSM_ST_ERR;
5566 goto fsm_start;
71601958 5567 }
1da177e4 5568
e2cec771 5569 atapi_pio_bytes(qc);
7fb6ec28 5570
e2cec771
AL
5571 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5572 /* bad ireason reported by device */
5573 goto fsm_start;
1da177e4 5574
e2cec771
AL
5575 } else {
5576 /* ATA PIO protocol */
5577 if (unlikely((status & ATA_DRQ) == 0)) {
5578 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5579 if (likely(status & (ATA_ERR | ATA_DF)))
5580 /* device stops HSM for abort/error */
5581 qc->err_mask |= AC_ERR_DEV;
5582 else
55a8e2c8
TH
5583 /* HSM violation. Let EH handle this.
5584 * Phantom devices also trigger this
5585 * condition. Mark hint.
5586 */
5587 qc->err_mask |= AC_ERR_HSM |
5588 AC_ERR_NODEV_HINT;
3655d1d3 5589
e2cec771
AL
5590 ap->hsm_task_state = HSM_ST_ERR;
5591 goto fsm_start;
5592 }
1da177e4 5593
eee6c32f
AL
5594 /* For PIO reads, some devices may ask for
5595 * data transfer (DRQ=1) alone with ERR=1.
5596 * We respect DRQ here and transfer one
5597 * block of junk data before changing the
5598 * hsm_task_state to HSM_ST_ERR.
5599 *
5600 * For PIO writes, ERR=1 DRQ=1 doesn't make
5601 * sense since the data block has been
5602 * transferred to the device.
71601958
AL
5603 */
5604 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5605 /* data might be corrputed */
5606 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5607
5608 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5609 ata_pio_sectors(qc);
eee6c32f
AL
5610 status = ata_wait_idle(ap);
5611 }
5612
3655d1d3
AL
5613 if (status & (ATA_BUSY | ATA_DRQ))
5614 qc->err_mask |= AC_ERR_HSM;
5615
eee6c32f
AL
5616 /* ata_pio_sectors() might change the
5617 * state to HSM_ST_LAST. so, the state
5618 * is changed after ata_pio_sectors().
5619 */
5620 ap->hsm_task_state = HSM_ST_ERR;
5621 goto fsm_start;
71601958
AL
5622 }
5623
e2cec771
AL
5624 ata_pio_sectors(qc);
5625
5626 if (ap->hsm_task_state == HSM_ST_LAST &&
5627 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5628 /* all data read */
52a32205 5629 status = ata_wait_idle(ap);
e2cec771
AL
5630 goto fsm_start;
5631 }
5632 }
5633
bb5cb290 5634 poll_next = 1;
1da177e4
LT
5635 break;
5636
14be71f4 5637 case HSM_ST_LAST:
6912ccd5
AL
5638 if (unlikely(!ata_ok(status))) {
5639 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5640 ap->hsm_task_state = HSM_ST_ERR;
5641 goto fsm_start;
5642 }
5643
5644 /* no more data to transfer */
4332a771 5645 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5646 ap->print_id, qc->dev->devno, status);
e2cec771 5647
6912ccd5
AL
5648 WARN_ON(qc->err_mask);
5649
e2cec771 5650 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5651
e2cec771 5652 /* complete taskfile transaction */
c17ea20d 5653 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5654
5655 poll_next = 0;
1da177e4
LT
5656 break;
5657
14be71f4 5658 case HSM_ST_ERR:
e2cec771
AL
5659 /* make sure qc->err_mask is available to
5660 * know what's wrong and recover
5661 */
5662 WARN_ON(qc->err_mask == 0);
5663
5664 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5665
999bb6f4 5666 /* complete taskfile transaction */
c17ea20d 5667 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5668
5669 poll_next = 0;
e2cec771
AL
5670 break;
5671 default:
bb5cb290 5672 poll_next = 0;
6912ccd5 5673 BUG();
1da177e4
LT
5674 }
5675
bb5cb290 5676 return poll_next;
1da177e4
LT
5677}
5678
65f27f38 5679static void ata_pio_task(struct work_struct *work)
8061f5f0 5680{
65f27f38
DH
5681 struct ata_port *ap =
5682 container_of(work, struct ata_port, port_task.work);
5683 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5684 u8 status;
a1af3734 5685 int poll_next;
8061f5f0 5686
7fb6ec28 5687fsm_start:
a1af3734 5688 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5689
a1af3734
AL
5690 /*
5691 * This is purely heuristic. This is a fast path.
5692 * Sometimes when we enter, BSY will be cleared in
5693 * a chk-status or two. If not, the drive is probably seeking
5694 * or something. Snooze for a couple msecs, then
5695 * chk-status again. If still busy, queue delayed work.
5696 */
5697 status = ata_busy_wait(ap, ATA_BUSY, 5);
5698 if (status & ATA_BUSY) {
5699 msleep(2);
5700 status = ata_busy_wait(ap, ATA_BUSY, 10);
5701 if (status & ATA_BUSY) {
442eacc3 5702 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5703 return;
5704 }
8061f5f0
TH
5705 }
5706
a1af3734
AL
5707 /* move the HSM */
5708 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5709
a1af3734
AL
5710 /* another command or interrupt handler
5711 * may be running at this point.
5712 */
5713 if (poll_next)
7fb6ec28 5714 goto fsm_start;
8061f5f0
TH
5715}
5716
1da177e4
LT
5717/**
5718 * ata_qc_new - Request an available ATA command, for queueing
5719 * @ap: Port associated with device @dev
5720 * @dev: Device from whom we request an available command structure
5721 *
5722 * LOCKING:
0cba632b 5723 * None.
1da177e4
LT
5724 */
5725
5726static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5727{
5728 struct ata_queued_cmd *qc = NULL;
5729 unsigned int i;
5730
e3180499 5731 /* no command while frozen */
b51e9e5d 5732 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5733 return NULL;
5734
2ab7db1f
TH
5735 /* the last tag is reserved for internal command. */
5736 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5737 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5738 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5739 break;
5740 }
5741
5742 if (qc)
5743 qc->tag = i;
5744
5745 return qc;
5746}
5747
5748/**
5749 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5750 * @dev: Device from whom we request an available command structure
5751 *
5752 * LOCKING:
0cba632b 5753 * None.
1da177e4
LT
5754 */
5755
3373efd8 5756struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5757{
9af5c9c9 5758 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5759 struct ata_queued_cmd *qc;
5760
5761 qc = ata_qc_new(ap);
5762 if (qc) {
1da177e4
LT
5763 qc->scsicmd = NULL;
5764 qc->ap = ap;
5765 qc->dev = dev;
1da177e4 5766
2c13b7ce 5767 ata_qc_reinit(qc);
1da177e4
LT
5768 }
5769
5770 return qc;
5771}
5772
1da177e4
LT
5773/**
5774 * ata_qc_free - free unused ata_queued_cmd
5775 * @qc: Command to complete
5776 *
5777 * Designed to free unused ata_queued_cmd object
5778 * in case something prevents using it.
5779 *
5780 * LOCKING:
cca3974e 5781 * spin_lock_irqsave(host lock)
1da177e4
LT
5782 */
5783void ata_qc_free(struct ata_queued_cmd *qc)
5784{
4ba946e9
TH
5785 struct ata_port *ap = qc->ap;
5786 unsigned int tag;
5787
a4631474 5788 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5789
4ba946e9
TH
5790 qc->flags = 0;
5791 tag = qc->tag;
5792 if (likely(ata_tag_valid(tag))) {
4ba946e9 5793 qc->tag = ATA_TAG_POISON;
6cec4a39 5794 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5795 }
1da177e4
LT
5796}
5797
76014427 5798void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5799{
dedaf2b0 5800 struct ata_port *ap = qc->ap;
9af5c9c9 5801 struct ata_link *link = qc->dev->link;
dedaf2b0 5802
a4631474
TH
5803 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5804 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5805
5806 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5807 ata_sg_clean(qc);
5808
7401abf2 5809 /* command should be marked inactive atomically with qc completion */
da917d69 5810 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5811 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5812 if (!link->sactive)
5813 ap->nr_active_links--;
5814 } else {
9af5c9c9 5815 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5816 ap->nr_active_links--;
5817 }
5818
5819 /* clear exclusive status */
5820 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5821 ap->excl_link == link))
5822 ap->excl_link = NULL;
7401abf2 5823
3f3791d3
AL
5824 /* atapi: mark qc as inactive to prevent the interrupt handler
5825 * from completing the command twice later, before the error handler
5826 * is called. (when rc != 0 and atapi request sense is needed)
5827 */
5828 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5829 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5830
1da177e4 5831 /* call completion callback */
77853bf2 5832 qc->complete_fn(qc);
1da177e4
LT
5833}
5834
39599a53
TH
5835static void fill_result_tf(struct ata_queued_cmd *qc)
5836{
5837 struct ata_port *ap = qc->ap;
5838
39599a53 5839 qc->result_tf.flags = qc->tf.flags;
4742d54f 5840 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5841}
5842
00115e0f
TH
5843static void ata_verify_xfer(struct ata_queued_cmd *qc)
5844{
5845 struct ata_device *dev = qc->dev;
5846
5847 if (ata_tag_internal(qc->tag))
5848 return;
5849
5850 if (ata_is_nodata(qc->tf.protocol))
5851 return;
5852
5853 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5854 return;
5855
5856 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5857}
5858
f686bcb8
TH
5859/**
5860 * ata_qc_complete - Complete an active ATA command
5861 * @qc: Command to complete
5862 * @err_mask: ATA Status register contents
5863 *
5864 * Indicate to the mid and upper layers that an ATA
5865 * command has completed, with either an ok or not-ok status.
5866 *
5867 * LOCKING:
cca3974e 5868 * spin_lock_irqsave(host lock)
f686bcb8
TH
5869 */
5870void ata_qc_complete(struct ata_queued_cmd *qc)
5871{
5872 struct ata_port *ap = qc->ap;
5873
5874 /* XXX: New EH and old EH use different mechanisms to
5875 * synchronize EH with regular execution path.
5876 *
5877 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5878 * Normal execution path is responsible for not accessing a
5879 * failed qc. libata core enforces the rule by returning NULL
5880 * from ata_qc_from_tag() for failed qcs.
5881 *
5882 * Old EH depends on ata_qc_complete() nullifying completion
5883 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5884 * not synchronize with interrupt handler. Only PIO task is
5885 * taken care of.
5886 */
5887 if (ap->ops->error_handler) {
4dbfa39b
TH
5888 struct ata_device *dev = qc->dev;
5889 struct ata_eh_info *ehi = &dev->link->eh_info;
5890
b51e9e5d 5891 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5892
5893 if (unlikely(qc->err_mask))
5894 qc->flags |= ATA_QCFLAG_FAILED;
5895
5896 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5897 if (!ata_tag_internal(qc->tag)) {
5898 /* always fill result TF for failed qc */
39599a53 5899 fill_result_tf(qc);
f686bcb8
TH
5900 ata_qc_schedule_eh(qc);
5901 return;
5902 }
5903 }
5904
5905 /* read result TF if requested */
5906 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5907 fill_result_tf(qc);
f686bcb8 5908
4dbfa39b
TH
5909 /* Some commands need post-processing after successful
5910 * completion.
5911 */
5912 switch (qc->tf.command) {
5913 case ATA_CMD_SET_FEATURES:
5914 if (qc->tf.feature != SETFEATURES_WC_ON &&
5915 qc->tf.feature != SETFEATURES_WC_OFF)
5916 break;
5917 /* fall through */
5918 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5919 case ATA_CMD_SET_MULTI: /* multi_count changed */
5920 /* revalidate device */
5921 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5922 ata_port_schedule_eh(ap);
5923 break;
054a5fba
TH
5924
5925 case ATA_CMD_SLEEP:
5926 dev->flags |= ATA_DFLAG_SLEEPING;
5927 break;
4dbfa39b
TH
5928 }
5929
00115e0f
TH
5930 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5931 ata_verify_xfer(qc);
5932
f686bcb8
TH
5933 __ata_qc_complete(qc);
5934 } else {
5935 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5936 return;
5937
5938 /* read result TF if failed or requested */
5939 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5940 fill_result_tf(qc);
f686bcb8
TH
5941
5942 __ata_qc_complete(qc);
5943 }
5944}
5945
dedaf2b0
TH
5946/**
5947 * ata_qc_complete_multiple - Complete multiple qcs successfully
5948 * @ap: port in question
5949 * @qc_active: new qc_active mask
5950 * @finish_qc: LLDD callback invoked before completing a qc
5951 *
5952 * Complete in-flight commands. This functions is meant to be
5953 * called from low-level driver's interrupt routine to complete
5954 * requests normally. ap->qc_active and @qc_active is compared
5955 * and commands are completed accordingly.
5956 *
5957 * LOCKING:
cca3974e 5958 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5959 *
5960 * RETURNS:
5961 * Number of completed commands on success, -errno otherwise.
5962 */
5963int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5964 void (*finish_qc)(struct ata_queued_cmd *))
5965{
5966 int nr_done = 0;
5967 u32 done_mask;
5968 int i;
5969
5970 done_mask = ap->qc_active ^ qc_active;
5971
5972 if (unlikely(done_mask & qc_active)) {
5973 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5974 "(%08x->%08x)\n", ap->qc_active, qc_active);
5975 return -EINVAL;
5976 }
5977
5978 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5979 struct ata_queued_cmd *qc;
5980
5981 if (!(done_mask & (1 << i)))
5982 continue;
5983
5984 if ((qc = ata_qc_from_tag(ap, i))) {
5985 if (finish_qc)
5986 finish_qc(qc);
5987 ata_qc_complete(qc);
5988 nr_done++;
5989 }
5990 }
5991
5992 return nr_done;
5993}
5994
1da177e4
LT
5995/**
5996 * ata_qc_issue - issue taskfile to device
5997 * @qc: command to issue to device
5998 *
5999 * Prepare an ATA command to submission to device.
6000 * This includes mapping the data into a DMA-able
6001 * area, filling in the S/G table, and finally
6002 * writing the taskfile to hardware, starting the command.
6003 *
6004 * LOCKING:
cca3974e 6005 * spin_lock_irqsave(host lock)
1da177e4 6006 */
8e0e694a 6007void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
6008{
6009 struct ata_port *ap = qc->ap;
9af5c9c9 6010 struct ata_link *link = qc->dev->link;
405e66b3 6011 u8 prot = qc->tf.protocol;
1da177e4 6012
dedaf2b0
TH
6013 /* Make sure only one non-NCQ command is outstanding. The
6014 * check is skipped for old EH because it reuses active qc to
6015 * request ATAPI sense.
6016 */
9af5c9c9 6017 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 6018
1973a023 6019 if (ata_is_ncq(prot)) {
9af5c9c9 6020 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
6021
6022 if (!link->sactive)
6023 ap->nr_active_links++;
9af5c9c9 6024 link->sactive |= 1 << qc->tag;
dedaf2b0 6025 } else {
9af5c9c9 6026 WARN_ON(link->sactive);
da917d69
TH
6027
6028 ap->nr_active_links++;
9af5c9c9 6029 link->active_tag = qc->tag;
dedaf2b0
TH
6030 }
6031
e4a70e76 6032 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 6033 ap->qc_active |= 1 << qc->tag;
e4a70e76 6034
f92a2636
TH
6035 /* We guarantee to LLDs that they will have at least one
6036 * non-zero sg if the command is a data command.
6037 */
ff2aeb1e 6038 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
f92a2636 6039
405e66b3 6040 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 6041 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7
TH
6042 if (ata_sg_setup(qc))
6043 goto sg_err;
1da177e4 6044
cf480626 6045 /* if device is sleeping, schedule reset and abort the link */
054a5fba 6046 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
cf480626 6047 link->eh_info.action |= ATA_EH_RESET;
054a5fba
TH
6048 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6049 ata_link_abort(link);
6050 return;
6051 }
6052
1da177e4
LT
6053 ap->ops->qc_prep(qc);
6054
8e0e694a
TH
6055 qc->err_mask |= ap->ops->qc_issue(qc);
6056 if (unlikely(qc->err_mask))
6057 goto err;
6058 return;
1da177e4 6059
8e436af9 6060sg_err:
8e0e694a
TH
6061 qc->err_mask |= AC_ERR_SYSTEM;
6062err:
6063 ata_qc_complete(qc);
1da177e4
LT
6064}
6065
6066/**
6067 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6068 * @qc: command to issue to device
6069 *
6070 * Using various libata functions and hooks, this function
6071 * starts an ATA command. ATA commands are grouped into
6072 * classes called "protocols", and issuing each type of protocol
6073 * is slightly different.
6074 *
0baab86b
EF
6075 * May be used as the qc_issue() entry in ata_port_operations.
6076 *
1da177e4 6077 * LOCKING:
cca3974e 6078 * spin_lock_irqsave(host lock)
1da177e4
LT
6079 *
6080 * RETURNS:
9a3d9eb0 6081 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
6082 */
6083
9a3d9eb0 6084unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
6085{
6086 struct ata_port *ap = qc->ap;
6087
e50362ec
AL
6088 /* Use polling pio if the LLD doesn't handle
6089 * interrupt driven pio and atapi CDB interrupt.
6090 */
6091 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6092 switch (qc->tf.protocol) {
6093 case ATA_PROT_PIO:
e3472cbe 6094 case ATA_PROT_NODATA:
0dc36888
TH
6095 case ATAPI_PROT_PIO:
6096 case ATAPI_PROT_NODATA:
e50362ec
AL
6097 qc->tf.flags |= ATA_TFLAG_POLLING;
6098 break;
0dc36888 6099 case ATAPI_PROT_DMA:
e50362ec 6100 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 6101 /* see ata_dma_blacklisted() */
e50362ec
AL
6102 BUG();
6103 break;
6104 default:
6105 break;
6106 }
6107 }
6108
312f7da2 6109 /* select the device */
1da177e4
LT
6110 ata_dev_select(ap, qc->dev->devno, 1, 0);
6111
312f7da2 6112 /* start the command */
1da177e4
LT
6113 switch (qc->tf.protocol) {
6114 case ATA_PROT_NODATA:
312f7da2
AL
6115 if (qc->tf.flags & ATA_TFLAG_POLLING)
6116 ata_qc_set_polling(qc);
6117
e5338254 6118 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
6119 ap->hsm_task_state = HSM_ST_LAST;
6120
6121 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6122 ata_pio_queue_task(ap, qc, 0);
312f7da2 6123
1da177e4
LT
6124 break;
6125
6126 case ATA_PROT_DMA:
587005de 6127 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6128
1da177e4
LT
6129 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6130 ap->ops->bmdma_setup(qc); /* set up bmdma */
6131 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 6132 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
6133 break;
6134
312f7da2
AL
6135 case ATA_PROT_PIO:
6136 if (qc->tf.flags & ATA_TFLAG_POLLING)
6137 ata_qc_set_polling(qc);
1da177e4 6138
e5338254 6139 ata_tf_to_host(ap, &qc->tf);
312f7da2 6140
54f00389
AL
6141 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6142 /* PIO data out protocol */
6143 ap->hsm_task_state = HSM_ST_FIRST;
442eacc3 6144 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6145
6146 /* always send first data block using
e27486db 6147 * the ata_pio_task() codepath.
54f00389 6148 */
312f7da2 6149 } else {
54f00389
AL
6150 /* PIO data in protocol */
6151 ap->hsm_task_state = HSM_ST;
6152
6153 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6154 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6155
6156 /* if polling, ata_pio_task() handles the rest.
6157 * otherwise, interrupt handler takes over from here.
6158 */
312f7da2
AL
6159 }
6160
1da177e4
LT
6161 break;
6162
0dc36888
TH
6163 case ATAPI_PROT_PIO:
6164 case ATAPI_PROT_NODATA:
312f7da2
AL
6165 if (qc->tf.flags & ATA_TFLAG_POLLING)
6166 ata_qc_set_polling(qc);
6167
e5338254 6168 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 6169
312f7da2
AL
6170 ap->hsm_task_state = HSM_ST_FIRST;
6171
6172 /* send cdb by polling if no cdb interrupt */
6173 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6174 (qc->tf.flags & ATA_TFLAG_POLLING))
442eacc3 6175 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6176 break;
6177
0dc36888 6178 case ATAPI_PROT_DMA:
587005de 6179 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6180
1da177e4
LT
6181 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6182 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
6183 ap->hsm_task_state = HSM_ST_FIRST;
6184
6185 /* send cdb by polling if no cdb interrupt */
6186 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
442eacc3 6187 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6188 break;
6189
6190 default:
6191 WARN_ON(1);
9a3d9eb0 6192 return AC_ERR_SYSTEM;
1da177e4
LT
6193 }
6194
6195 return 0;
6196}
6197
1da177e4
LT
6198/**
6199 * ata_host_intr - Handle host interrupt for given (port, task)
6200 * @ap: Port on which interrupt arrived (possibly...)
6201 * @qc: Taskfile currently active in engine
6202 *
6203 * Handle host interrupt for given queued command. Currently,
6204 * only DMA interrupts are handled. All other commands are
6205 * handled via polling with interrupts disabled (nIEN bit).
6206 *
6207 * LOCKING:
cca3974e 6208 * spin_lock_irqsave(host lock)
1da177e4
LT
6209 *
6210 * RETURNS:
6211 * One if interrupt was handled, zero if not (shared irq).
6212 */
6213
2dcb407e
JG
6214inline unsigned int ata_host_intr(struct ata_port *ap,
6215 struct ata_queued_cmd *qc)
1da177e4 6216{
9af5c9c9 6217 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 6218 u8 status, host_stat = 0;
1da177e4 6219
312f7da2 6220 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 6221 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 6222
312f7da2
AL
6223 /* Check whether we are expecting interrupt in this state */
6224 switch (ap->hsm_task_state) {
6225 case HSM_ST_FIRST:
6912ccd5
AL
6226 /* Some pre-ATAPI-4 devices assert INTRQ
6227 * at this state when ready to receive CDB.
6228 */
1da177e4 6229
312f7da2 6230 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
405e66b3
TH
6231 * The flag was turned on only for atapi devices. No
6232 * need to check ata_is_atapi(qc->tf.protocol) again.
312f7da2
AL
6233 */
6234 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 6235 goto idle_irq;
1da177e4 6236 break;
312f7da2
AL
6237 case HSM_ST_LAST:
6238 if (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6239 qc->tf.protocol == ATAPI_PROT_DMA) {
312f7da2
AL
6240 /* check status of DMA engine */
6241 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
6242 VPRINTK("ata%u: host_stat 0x%X\n",
6243 ap->print_id, host_stat);
312f7da2
AL
6244
6245 /* if it's not our irq... */
6246 if (!(host_stat & ATA_DMA_INTR))
6247 goto idle_irq;
6248
6249 /* before we do anything else, clear DMA-Start bit */
6250 ap->ops->bmdma_stop(qc);
a4f16610
AL
6251
6252 if (unlikely(host_stat & ATA_DMA_ERR)) {
6253 /* error when transfering data to/from memory */
6254 qc->err_mask |= AC_ERR_HOST_BUS;
6255 ap->hsm_task_state = HSM_ST_ERR;
6256 }
312f7da2
AL
6257 }
6258 break;
6259 case HSM_ST:
6260 break;
1da177e4
LT
6261 default:
6262 goto idle_irq;
6263 }
6264
312f7da2
AL
6265 /* check altstatus */
6266 status = ata_altstatus(ap);
6267 if (status & ATA_BUSY)
6268 goto idle_irq;
1da177e4 6269
312f7da2
AL
6270 /* check main status, clearing INTRQ */
6271 status = ata_chk_status(ap);
6272 if (unlikely(status & ATA_BUSY))
6273 goto idle_irq;
1da177e4 6274
312f7da2
AL
6275 /* ack bmdma irq events */
6276 ap->ops->irq_clear(ap);
1da177e4 6277
bb5cb290 6278 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
6279
6280 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6281 qc->tf.protocol == ATAPI_PROT_DMA))
ea54763f
TH
6282 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6283
1da177e4
LT
6284 return 1; /* irq handled */
6285
6286idle_irq:
6287 ap->stats.idle_irq++;
6288
6289#ifdef ATA_IRQ_TRAP
6290 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
6291 ata_chk_status(ap);
6292 ap->ops->irq_clear(ap);
f15a1daf 6293 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 6294 return 1;
1da177e4
LT
6295 }
6296#endif
6297 return 0; /* irq not handled */
6298}
6299
6300/**
6301 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6302 * @irq: irq line (unused)
cca3974e 6303 * @dev_instance: pointer to our ata_host information structure
1da177e4 6304 *
0cba632b
JG
6305 * Default interrupt handler for PCI IDE devices. Calls
6306 * ata_host_intr() for each port that is not disabled.
6307 *
1da177e4 6308 * LOCKING:
cca3974e 6309 * Obtains host lock during operation.
1da177e4
LT
6310 *
6311 * RETURNS:
0cba632b 6312 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6313 */
6314
2dcb407e 6315irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6316{
cca3974e 6317 struct ata_host *host = dev_instance;
1da177e4
LT
6318 unsigned int i;
6319 unsigned int handled = 0;
6320 unsigned long flags;
6321
6322 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6323 spin_lock_irqsave(&host->lock, flags);
1da177e4 6324
cca3974e 6325 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6326 struct ata_port *ap;
6327
cca3974e 6328 ap = host->ports[i];
c1389503 6329 if (ap &&
029f5468 6330 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6331 struct ata_queued_cmd *qc;
6332
9af5c9c9 6333 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6334 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6335 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6336 handled |= ata_host_intr(ap, qc);
6337 }
6338 }
6339
cca3974e 6340 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6341
6342 return IRQ_RETVAL(handled);
6343}
6344
34bf2170
TH
6345/**
6346 * sata_scr_valid - test whether SCRs are accessible
936fd732 6347 * @link: ATA link to test SCR accessibility for
34bf2170 6348 *
936fd732 6349 * Test whether SCRs are accessible for @link.
34bf2170
TH
6350 *
6351 * LOCKING:
6352 * None.
6353 *
6354 * RETURNS:
6355 * 1 if SCRs are accessible, 0 otherwise.
6356 */
936fd732 6357int sata_scr_valid(struct ata_link *link)
34bf2170 6358{
936fd732
TH
6359 struct ata_port *ap = link->ap;
6360
a16abc0b 6361 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6362}
6363
6364/**
6365 * sata_scr_read - read SCR register of the specified port
936fd732 6366 * @link: ATA link to read SCR for
34bf2170
TH
6367 * @reg: SCR to read
6368 * @val: Place to store read value
6369 *
936fd732 6370 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6371 * guaranteed to succeed if @link is ap->link, the cable type of
6372 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6373 *
6374 * LOCKING:
633273a3 6375 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6376 *
6377 * RETURNS:
6378 * 0 on success, negative errno on failure.
6379 */
936fd732 6380int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6381{
633273a3
TH
6382 if (ata_is_host_link(link)) {
6383 struct ata_port *ap = link->ap;
936fd732 6384
633273a3
TH
6385 if (sata_scr_valid(link))
6386 return ap->ops->scr_read(ap, reg, val);
6387 return -EOPNOTSUPP;
6388 }
6389
6390 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6391}
6392
6393/**
6394 * sata_scr_write - write SCR register of the specified port
936fd732 6395 * @link: ATA link to write SCR for
34bf2170
TH
6396 * @reg: SCR to write
6397 * @val: value to write
6398 *
936fd732 6399 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6400 * guaranteed to succeed if @link is ap->link, the cable type of
6401 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6402 *
6403 * LOCKING:
633273a3 6404 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6405 *
6406 * RETURNS:
6407 * 0 on success, negative errno on failure.
6408 */
936fd732 6409int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6410{
633273a3
TH
6411 if (ata_is_host_link(link)) {
6412 struct ata_port *ap = link->ap;
6413
6414 if (sata_scr_valid(link))
6415 return ap->ops->scr_write(ap, reg, val);
6416 return -EOPNOTSUPP;
6417 }
936fd732 6418
633273a3 6419 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6420}
6421
6422/**
6423 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6424 * @link: ATA link to write SCR for
34bf2170
TH
6425 * @reg: SCR to write
6426 * @val: value to write
6427 *
6428 * This function is identical to sata_scr_write() except that this
6429 * function performs flush after writing to the register.
6430 *
6431 * LOCKING:
633273a3 6432 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6433 *
6434 * RETURNS:
6435 * 0 on success, negative errno on failure.
6436 */
936fd732 6437int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6438{
633273a3
TH
6439 if (ata_is_host_link(link)) {
6440 struct ata_port *ap = link->ap;
6441 int rc;
da3dbb17 6442
633273a3
TH
6443 if (sata_scr_valid(link)) {
6444 rc = ap->ops->scr_write(ap, reg, val);
6445 if (rc == 0)
6446 rc = ap->ops->scr_read(ap, reg, &val);
6447 return rc;
6448 }
6449 return -EOPNOTSUPP;
34bf2170 6450 }
633273a3
TH
6451
6452 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6453}
6454
6455/**
936fd732
TH
6456 * ata_link_online - test whether the given link is online
6457 * @link: ATA link to test
34bf2170 6458 *
936fd732
TH
6459 * Test whether @link is online. Note that this function returns
6460 * 0 if online status of @link cannot be obtained, so
6461 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6462 *
6463 * LOCKING:
6464 * None.
6465 *
6466 * RETURNS:
6467 * 1 if the port online status is available and online.
6468 */
936fd732 6469int ata_link_online(struct ata_link *link)
34bf2170
TH
6470{
6471 u32 sstatus;
6472
936fd732
TH
6473 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6474 (sstatus & 0xf) == 0x3)
34bf2170
TH
6475 return 1;
6476 return 0;
6477}
6478
6479/**
936fd732
TH
6480 * ata_link_offline - test whether the given link is offline
6481 * @link: ATA link to test
34bf2170 6482 *
936fd732
TH
6483 * Test whether @link is offline. Note that this function
6484 * returns 0 if offline status of @link cannot be obtained, so
6485 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6486 *
6487 * LOCKING:
6488 * None.
6489 *
6490 * RETURNS:
6491 * 1 if the port offline status is available and offline.
6492 */
936fd732 6493int ata_link_offline(struct ata_link *link)
34bf2170
TH
6494{
6495 u32 sstatus;
6496
936fd732
TH
6497 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6498 (sstatus & 0xf) != 0x3)
34bf2170
TH
6499 return 1;
6500 return 0;
6501}
0baab86b 6502
77b08fb5 6503int ata_flush_cache(struct ata_device *dev)
9b847548 6504{
977e6b9f 6505 unsigned int err_mask;
9b847548
JA
6506 u8 cmd;
6507
6508 if (!ata_try_flush_cache(dev))
6509 return 0;
6510
6fc49adb 6511 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6512 cmd = ATA_CMD_FLUSH_EXT;
6513 else
6514 cmd = ATA_CMD_FLUSH;
6515
4f34337b
AC
6516 /* This is wrong. On a failed flush we get back the LBA of the lost
6517 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6518 a further flush command to continue the writeback until it
4f34337b 6519 does not error */
977e6b9f
TH
6520 err_mask = ata_do_simple_cmd(dev, cmd);
6521 if (err_mask) {
6522 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6523 return -EIO;
6524 }
6525
6526 return 0;
9b847548
JA
6527}
6528
6ffa01d8 6529#ifdef CONFIG_PM
cca3974e
JG
6530static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6531 unsigned int action, unsigned int ehi_flags,
6532 int wait)
500530f6
TH
6533{
6534 unsigned long flags;
6535 int i, rc;
6536
cca3974e
JG
6537 for (i = 0; i < host->n_ports; i++) {
6538 struct ata_port *ap = host->ports[i];
e3667ebf 6539 struct ata_link *link;
500530f6
TH
6540
6541 /* Previous resume operation might still be in
6542 * progress. Wait for PM_PENDING to clear.
6543 */
6544 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6545 ata_port_wait_eh(ap);
6546 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6547 }
6548
6549 /* request PM ops to EH */
6550 spin_lock_irqsave(ap->lock, flags);
6551
6552 ap->pm_mesg = mesg;
6553 if (wait) {
6554 rc = 0;
6555 ap->pm_result = &rc;
6556 }
6557
6558 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6559 __ata_port_for_each_link(link, ap) {
6560 link->eh_info.action |= action;
6561 link->eh_info.flags |= ehi_flags;
6562 }
500530f6
TH
6563
6564 ata_port_schedule_eh(ap);
6565
6566 spin_unlock_irqrestore(ap->lock, flags);
6567
6568 /* wait and check result */
6569 if (wait) {
6570 ata_port_wait_eh(ap);
6571 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6572 if (rc)
6573 return rc;
6574 }
6575 }
6576
6577 return 0;
6578}
6579
6580/**
cca3974e
JG
6581 * ata_host_suspend - suspend host
6582 * @host: host to suspend
500530f6
TH
6583 * @mesg: PM message
6584 *
cca3974e 6585 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6586 * function requests EH to perform PM operations and waits for EH
6587 * to finish.
6588 *
6589 * LOCKING:
6590 * Kernel thread context (may sleep).
6591 *
6592 * RETURNS:
6593 * 0 on success, -errno on failure.
6594 */
cca3974e 6595int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6596{
9666f400 6597 int rc;
500530f6 6598
ca77329f
KCA
6599 /*
6600 * disable link pm on all ports before requesting
6601 * any pm activity
6602 */
6603 ata_lpm_enable(host);
6604
cca3974e 6605 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
72ad6ec4
JG
6606 if (rc == 0)
6607 host->dev->power.power_state = mesg;
500530f6
TH
6608 return rc;
6609}
6610
6611/**
cca3974e
JG
6612 * ata_host_resume - resume host
6613 * @host: host to resume
500530f6 6614 *
cca3974e 6615 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6616 * function requests EH to perform PM operations and returns.
6617 * Note that all resume operations are performed parallely.
6618 *
6619 * LOCKING:
6620 * Kernel thread context (may sleep).
6621 */
cca3974e 6622void ata_host_resume(struct ata_host *host)
500530f6 6623{
cf480626 6624 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
cca3974e 6625 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
72ad6ec4 6626 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
6627
6628 /* reenable link pm */
6629 ata_lpm_disable(host);
500530f6 6630}
6ffa01d8 6631#endif
500530f6 6632
c893a3ae
RD
6633/**
6634 * ata_port_start - Set port up for dma.
6635 * @ap: Port to initialize
6636 *
6637 * Called just after data structures for each port are
6638 * initialized. Allocates space for PRD table.
6639 *
6640 * May be used as the port_start() entry in ata_port_operations.
6641 *
6642 * LOCKING:
6643 * Inherited from caller.
6644 */
f0d36efd 6645int ata_port_start(struct ata_port *ap)
1da177e4 6646{
2f1f610b 6647 struct device *dev = ap->dev;
1da177e4 6648
f0d36efd
TH
6649 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6650 GFP_KERNEL);
1da177e4
LT
6651 if (!ap->prd)
6652 return -ENOMEM;
6653
1da177e4
LT
6654 return 0;
6655}
6656
3ef3b43d
TH
6657/**
6658 * ata_dev_init - Initialize an ata_device structure
6659 * @dev: Device structure to initialize
6660 *
6661 * Initialize @dev in preparation for probing.
6662 *
6663 * LOCKING:
6664 * Inherited from caller.
6665 */
6666void ata_dev_init(struct ata_device *dev)
6667{
9af5c9c9
TH
6668 struct ata_link *link = dev->link;
6669 struct ata_port *ap = link->ap;
72fa4b74
TH
6670 unsigned long flags;
6671
5a04bf4b 6672 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6673 link->sata_spd_limit = link->hw_sata_spd_limit;
6674 link->sata_spd = 0;
5a04bf4b 6675
72fa4b74
TH
6676 /* High bits of dev->flags are used to record warm plug
6677 * requests which occur asynchronously. Synchronize using
cca3974e 6678 * host lock.
72fa4b74 6679 */
ba6a1308 6680 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6681 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6682 dev->horkage = 0;
ba6a1308 6683 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6684
72fa4b74
TH
6685 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6686 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6687 dev->pio_mask = UINT_MAX;
6688 dev->mwdma_mask = UINT_MAX;
6689 dev->udma_mask = UINT_MAX;
6690}
6691
4fb37a25
TH
6692/**
6693 * ata_link_init - Initialize an ata_link structure
6694 * @ap: ATA port link is attached to
6695 * @link: Link structure to initialize
8989805d 6696 * @pmp: Port multiplier port number
4fb37a25
TH
6697 *
6698 * Initialize @link.
6699 *
6700 * LOCKING:
6701 * Kernel thread context (may sleep)
6702 */
fb7fd614 6703void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6704{
6705 int i;
6706
6707 /* clear everything except for devices */
6708 memset(link, 0, offsetof(struct ata_link, device[0]));
6709
6710 link->ap = ap;
8989805d 6711 link->pmp = pmp;
4fb37a25
TH
6712 link->active_tag = ATA_TAG_POISON;
6713 link->hw_sata_spd_limit = UINT_MAX;
6714
6715 /* can't use iterator, ap isn't initialized yet */
6716 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6717 struct ata_device *dev = &link->device[i];
6718
6719 dev->link = link;
6720 dev->devno = dev - link->device;
6721 ata_dev_init(dev);
6722 }
6723}
6724
6725/**
6726 * sata_link_init_spd - Initialize link->sata_spd_limit
6727 * @link: Link to configure sata_spd_limit for
6728 *
6729 * Initialize @link->[hw_]sata_spd_limit to the currently
6730 * configured value.
6731 *
6732 * LOCKING:
6733 * Kernel thread context (may sleep).
6734 *
6735 * RETURNS:
6736 * 0 on success, -errno on failure.
6737 */
fb7fd614 6738int sata_link_init_spd(struct ata_link *link)
4fb37a25 6739{
33267325
TH
6740 u32 scontrol;
6741 u8 spd;
4fb37a25
TH
6742 int rc;
6743
6744 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6745 if (rc)
6746 return rc;
6747
6748 spd = (scontrol >> 4) & 0xf;
6749 if (spd)
6750 link->hw_sata_spd_limit &= (1 << spd) - 1;
6751
33267325
TH
6752 ata_force_spd_limit(link);
6753
4fb37a25
TH
6754 link->sata_spd_limit = link->hw_sata_spd_limit;
6755
6756 return 0;
6757}
6758
1da177e4 6759/**
f3187195
TH
6760 * ata_port_alloc - allocate and initialize basic ATA port resources
6761 * @host: ATA host this allocated port belongs to
1da177e4 6762 *
f3187195
TH
6763 * Allocate and initialize basic ATA port resources.
6764 *
6765 * RETURNS:
6766 * Allocate ATA port on success, NULL on failure.
0cba632b 6767 *
1da177e4 6768 * LOCKING:
f3187195 6769 * Inherited from calling layer (may sleep).
1da177e4 6770 */
f3187195 6771struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6772{
f3187195 6773 struct ata_port *ap;
1da177e4 6774
f3187195
TH
6775 DPRINTK("ENTER\n");
6776
6777 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6778 if (!ap)
6779 return NULL;
6780
f4d6d004 6781 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6782 ap->lock = &host->lock;
198e0fed 6783 ap->flags = ATA_FLAG_DISABLED;
f3187195 6784 ap->print_id = -1;
1da177e4 6785 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6786 ap->host = host;
f3187195 6787 ap->dev = host->dev;
1da177e4 6788 ap->last_ctl = 0xFF;
bd5d825c
BP
6789
6790#if defined(ATA_VERBOSE_DEBUG)
6791 /* turn on all debugging levels */
6792 ap->msg_enable = 0x00FF;
6793#elif defined(ATA_DEBUG)
6794 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6795#else
0dd4b21f 6796 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6797#endif
1da177e4 6798
442eacc3 6799 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
65f27f38
DH
6800 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6801 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6802 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6803 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6804 init_timer_deferrable(&ap->fastdrain_timer);
6805 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6806 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6807
838df628 6808 ap->cbl = ATA_CBL_NONE;
838df628 6809
8989805d 6810 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6811
6812#ifdef ATA_IRQ_TRAP
6813 ap->stats.unhandled_irq = 1;
6814 ap->stats.idle_irq = 1;
6815#endif
1da177e4 6816 return ap;
1da177e4
LT
6817}
6818
f0d36efd
TH
6819static void ata_host_release(struct device *gendev, void *res)
6820{
6821 struct ata_host *host = dev_get_drvdata(gendev);
6822 int i;
6823
1aa506e4
TH
6824 for (i = 0; i < host->n_ports; i++) {
6825 struct ata_port *ap = host->ports[i];
6826
4911487a
TH
6827 if (!ap)
6828 continue;
6829
6830 if (ap->scsi_host)
1aa506e4
TH
6831 scsi_host_put(ap->scsi_host);
6832
633273a3 6833 kfree(ap->pmp_link);
4911487a 6834 kfree(ap);
1aa506e4
TH
6835 host->ports[i] = NULL;
6836 }
6837
1aa56cca 6838 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6839}
6840
f3187195
TH
6841/**
6842 * ata_host_alloc - allocate and init basic ATA host resources
6843 * @dev: generic device this host is associated with
6844 * @max_ports: maximum number of ATA ports associated with this host
6845 *
6846 * Allocate and initialize basic ATA host resources. LLD calls
6847 * this function to allocate a host, initializes it fully and
6848 * attaches it using ata_host_register().
6849 *
6850 * @max_ports ports are allocated and host->n_ports is
6851 * initialized to @max_ports. The caller is allowed to decrease
6852 * host->n_ports before calling ata_host_register(). The unused
6853 * ports will be automatically freed on registration.
6854 *
6855 * RETURNS:
6856 * Allocate ATA host on success, NULL on failure.
6857 *
6858 * LOCKING:
6859 * Inherited from calling layer (may sleep).
6860 */
6861struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6862{
6863 struct ata_host *host;
6864 size_t sz;
6865 int i;
6866
6867 DPRINTK("ENTER\n");
6868
6869 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6870 return NULL;
6871
6872 /* alloc a container for our list of ATA ports (buses) */
6873 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6874 /* alloc a container for our list of ATA ports (buses) */
6875 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6876 if (!host)
6877 goto err_out;
6878
6879 devres_add(dev, host);
6880 dev_set_drvdata(dev, host);
6881
6882 spin_lock_init(&host->lock);
6883 host->dev = dev;
6884 host->n_ports = max_ports;
6885
6886 /* allocate ports bound to this host */
6887 for (i = 0; i < max_ports; i++) {
6888 struct ata_port *ap;
6889
6890 ap = ata_port_alloc(host);
6891 if (!ap)
6892 goto err_out;
6893
6894 ap->port_no = i;
6895 host->ports[i] = ap;
6896 }
6897
6898 devres_remove_group(dev, NULL);
6899 return host;
6900
6901 err_out:
6902 devres_release_group(dev, NULL);
6903 return NULL;
6904}
6905
f5cda257
TH
6906/**
6907 * ata_host_alloc_pinfo - alloc host and init with port_info array
6908 * @dev: generic device this host is associated with
6909 * @ppi: array of ATA port_info to initialize host with
6910 * @n_ports: number of ATA ports attached to this host
6911 *
6912 * Allocate ATA host and initialize with info from @ppi. If NULL
6913 * terminated, @ppi may contain fewer entries than @n_ports. The
6914 * last entry will be used for the remaining ports.
6915 *
6916 * RETURNS:
6917 * Allocate ATA host on success, NULL on failure.
6918 *
6919 * LOCKING:
6920 * Inherited from calling layer (may sleep).
6921 */
6922struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6923 const struct ata_port_info * const * ppi,
6924 int n_ports)
6925{
6926 const struct ata_port_info *pi;
6927 struct ata_host *host;
6928 int i, j;
6929
6930 host = ata_host_alloc(dev, n_ports);
6931 if (!host)
6932 return NULL;
6933
6934 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6935 struct ata_port *ap = host->ports[i];
6936
6937 if (ppi[j])
6938 pi = ppi[j++];
6939
6940 ap->pio_mask = pi->pio_mask;
6941 ap->mwdma_mask = pi->mwdma_mask;
6942 ap->udma_mask = pi->udma_mask;
6943 ap->flags |= pi->flags;
0c88758b 6944 ap->link.flags |= pi->link_flags;
f5cda257
TH
6945 ap->ops = pi->port_ops;
6946
6947 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6948 host->ops = pi->port_ops;
6949 if (!host->private_data && pi->private_data)
6950 host->private_data = pi->private_data;
6951 }
6952
6953 return host;
6954}
6955
32ebbc0c
TH
6956static void ata_host_stop(struct device *gendev, void *res)
6957{
6958 struct ata_host *host = dev_get_drvdata(gendev);
6959 int i;
6960
6961 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6962
6963 for (i = 0; i < host->n_ports; i++) {
6964 struct ata_port *ap = host->ports[i];
6965
6966 if (ap->ops->port_stop)
6967 ap->ops->port_stop(ap);
6968 }
6969
6970 if (host->ops->host_stop)
6971 host->ops->host_stop(host);
6972}
6973
ecef7253
TH
6974/**
6975 * ata_host_start - start and freeze ports of an ATA host
6976 * @host: ATA host to start ports for
6977 *
6978 * Start and then freeze ports of @host. Started status is
6979 * recorded in host->flags, so this function can be called
6980 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6981 * once. If host->ops isn't initialized yet, its set to the
6982 * first non-dummy port ops.
ecef7253
TH
6983 *
6984 * LOCKING:
6985 * Inherited from calling layer (may sleep).
6986 *
6987 * RETURNS:
6988 * 0 if all ports are started successfully, -errno otherwise.
6989 */
6990int ata_host_start(struct ata_host *host)
6991{
32ebbc0c
TH
6992 int have_stop = 0;
6993 void *start_dr = NULL;
ecef7253
TH
6994 int i, rc;
6995
6996 if (host->flags & ATA_HOST_STARTED)
6997 return 0;
6998
6999 for (i = 0; i < host->n_ports; i++) {
7000 struct ata_port *ap = host->ports[i];
7001
f3187195
TH
7002 if (!host->ops && !ata_port_is_dummy(ap))
7003 host->ops = ap->ops;
7004
32ebbc0c
TH
7005 if (ap->ops->port_stop)
7006 have_stop = 1;
7007 }
7008
7009 if (host->ops->host_stop)
7010 have_stop = 1;
7011
7012 if (have_stop) {
7013 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
7014 if (!start_dr)
7015 return -ENOMEM;
7016 }
7017
7018 for (i = 0; i < host->n_ports; i++) {
7019 struct ata_port *ap = host->ports[i];
7020
ecef7253
TH
7021 if (ap->ops->port_start) {
7022 rc = ap->ops->port_start(ap);
7023 if (rc) {
0f9fe9b7 7024 if (rc != -ENODEV)
0f757743
AM
7025 dev_printk(KERN_ERR, host->dev,
7026 "failed to start port %d "
7027 "(errno=%d)\n", i, rc);
ecef7253
TH
7028 goto err_out;
7029 }
7030 }
ecef7253
TH
7031 ata_eh_freeze_port(ap);
7032 }
7033
32ebbc0c
TH
7034 if (start_dr)
7035 devres_add(host->dev, start_dr);
ecef7253
TH
7036 host->flags |= ATA_HOST_STARTED;
7037 return 0;
7038
7039 err_out:
7040 while (--i >= 0) {
7041 struct ata_port *ap = host->ports[i];
7042
7043 if (ap->ops->port_stop)
7044 ap->ops->port_stop(ap);
7045 }
32ebbc0c 7046 devres_free(start_dr);
ecef7253
TH
7047 return rc;
7048}
7049
b03732f0 7050/**
cca3974e
JG
7051 * ata_sas_host_init - Initialize a host struct
7052 * @host: host to initialize
7053 * @dev: device host is attached to
7054 * @flags: host flags
7055 * @ops: port_ops
b03732f0
BK
7056 *
7057 * LOCKING:
7058 * PCI/etc. bus probe sem.
7059 *
7060 */
f3187195 7061/* KILLME - the only user left is ipr */
cca3974e
JG
7062void ata_host_init(struct ata_host *host, struct device *dev,
7063 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 7064{
cca3974e
JG
7065 spin_lock_init(&host->lock);
7066 host->dev = dev;
7067 host->flags = flags;
7068 host->ops = ops;
b03732f0
BK
7069}
7070
f3187195
TH
7071/**
7072 * ata_host_register - register initialized ATA host
7073 * @host: ATA host to register
7074 * @sht: template for SCSI host
7075 *
7076 * Register initialized ATA host. @host is allocated using
7077 * ata_host_alloc() and fully initialized by LLD. This function
7078 * starts ports, registers @host with ATA and SCSI layers and
7079 * probe registered devices.
7080 *
7081 * LOCKING:
7082 * Inherited from calling layer (may sleep).
7083 *
7084 * RETURNS:
7085 * 0 on success, -errno otherwise.
7086 */
7087int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7088{
7089 int i, rc;
7090
7091 /* host must have been started */
7092 if (!(host->flags & ATA_HOST_STARTED)) {
7093 dev_printk(KERN_ERR, host->dev,
7094 "BUG: trying to register unstarted host\n");
7095 WARN_ON(1);
7096 return -EINVAL;
7097 }
7098
7099 /* Blow away unused ports. This happens when LLD can't
7100 * determine the exact number of ports to allocate at
7101 * allocation time.
7102 */
7103 for (i = host->n_ports; host->ports[i]; i++)
7104 kfree(host->ports[i]);
7105
7106 /* give ports names and add SCSI hosts */
7107 for (i = 0; i < host->n_ports; i++)
7108 host->ports[i]->print_id = ata_print_id++;
7109
7110 rc = ata_scsi_add_hosts(host, sht);
7111 if (rc)
7112 return rc;
7113
fafbae87
TH
7114 /* associate with ACPI nodes */
7115 ata_acpi_associate(host);
7116
f3187195
TH
7117 /* set cable, sata_spd_limit and report */
7118 for (i = 0; i < host->n_ports; i++) {
7119 struct ata_port *ap = host->ports[i];
f3187195
TH
7120 unsigned long xfer_mask;
7121
7122 /* set SATA cable type if still unset */
7123 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7124 ap->cbl = ATA_CBL_SATA;
7125
7126 /* init sata_spd_limit to the current value */
4fb37a25 7127 sata_link_init_spd(&ap->link);
f3187195 7128
cbcdd875 7129 /* print per-port info to dmesg */
f3187195
TH
7130 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7131 ap->udma_mask);
7132
abf6e8ed 7133 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
7134 ata_port_printk(ap, KERN_INFO,
7135 "%cATA max %s %s\n",
a16abc0b 7136 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 7137 ata_mode_string(xfer_mask),
cbcdd875 7138 ap->link.eh_info.desc);
abf6e8ed
TH
7139 ata_ehi_clear_desc(&ap->link.eh_info);
7140 } else
f3187195
TH
7141 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7142 }
7143
7144 /* perform each probe synchronously */
7145 DPRINTK("probe begin\n");
7146 for (i = 0; i < host->n_ports; i++) {
7147 struct ata_port *ap = host->ports[i];
f3187195
TH
7148
7149 /* probe */
7150 if (ap->ops->error_handler) {
9af5c9c9 7151 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
7152 unsigned long flags;
7153
7154 ata_port_probe(ap);
7155
7156 /* kick EH for boot probing */
7157 spin_lock_irqsave(ap->lock, flags);
7158
b558eddd 7159 ehi->probe_mask |= ATA_ALL_DEVICES;
cf480626 7160 ehi->action |= ATA_EH_RESET;
f3187195
TH
7161 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7162
f4d6d004 7163 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
7164 ap->pflags |= ATA_PFLAG_LOADING;
7165 ata_port_schedule_eh(ap);
7166
7167 spin_unlock_irqrestore(ap->lock, flags);
7168
7169 /* wait for EH to finish */
7170 ata_port_wait_eh(ap);
7171 } else {
7172 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7173 rc = ata_bus_probe(ap);
7174 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7175
7176 if (rc) {
7177 /* FIXME: do something useful here?
7178 * Current libata behavior will
7179 * tear down everything when
7180 * the module is removed
7181 * or the h/w is unplugged.
7182 */
7183 }
7184 }
7185 }
7186
7187 /* probes are done, now scan each port's disk(s) */
7188 DPRINTK("host probe begin\n");
7189 for (i = 0; i < host->n_ports; i++) {
7190 struct ata_port *ap = host->ports[i];
7191
1ae46317 7192 ata_scsi_scan_host(ap, 1);
ca77329f 7193 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
7194 }
7195
7196 return 0;
7197}
7198
f5cda257
TH
7199/**
7200 * ata_host_activate - start host, request IRQ and register it
7201 * @host: target ATA host
7202 * @irq: IRQ to request
7203 * @irq_handler: irq_handler used when requesting IRQ
7204 * @irq_flags: irq_flags used when requesting IRQ
7205 * @sht: scsi_host_template to use when registering the host
7206 *
7207 * After allocating an ATA host and initializing it, most libata
7208 * LLDs perform three steps to activate the host - start host,
7209 * request IRQ and register it. This helper takes necessasry
7210 * arguments and performs the three steps in one go.
7211 *
3d46b2e2
PM
7212 * An invalid IRQ skips the IRQ registration and expects the host to
7213 * have set polling mode on the port. In this case, @irq_handler
7214 * should be NULL.
7215 *
f5cda257
TH
7216 * LOCKING:
7217 * Inherited from calling layer (may sleep).
7218 *
7219 * RETURNS:
7220 * 0 on success, -errno otherwise.
7221 */
7222int ata_host_activate(struct ata_host *host, int irq,
7223 irq_handler_t irq_handler, unsigned long irq_flags,
7224 struct scsi_host_template *sht)
7225{
cbcdd875 7226 int i, rc;
f5cda257
TH
7227
7228 rc = ata_host_start(host);
7229 if (rc)
7230 return rc;
7231
3d46b2e2
PM
7232 /* Special case for polling mode */
7233 if (!irq) {
7234 WARN_ON(irq_handler);
7235 return ata_host_register(host, sht);
7236 }
7237
f5cda257
TH
7238 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7239 dev_driver_string(host->dev), host);
7240 if (rc)
7241 return rc;
7242
cbcdd875
TH
7243 for (i = 0; i < host->n_ports; i++)
7244 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 7245
f5cda257
TH
7246 rc = ata_host_register(host, sht);
7247 /* if failed, just free the IRQ and leave ports alone */
7248 if (rc)
7249 devm_free_irq(host->dev, irq, host);
7250
7251 return rc;
7252}
7253
720ba126
TH
7254/**
7255 * ata_port_detach - Detach ATA port in prepration of device removal
7256 * @ap: ATA port to be detached
7257 *
7258 * Detach all ATA devices and the associated SCSI devices of @ap;
7259 * then, remove the associated SCSI host. @ap is guaranteed to
7260 * be quiescent on return from this function.
7261 *
7262 * LOCKING:
7263 * Kernel thread context (may sleep).
7264 */
741b7763 7265static void ata_port_detach(struct ata_port *ap)
720ba126
TH
7266{
7267 unsigned long flags;
41bda9c9 7268 struct ata_link *link;
f58229f8 7269 struct ata_device *dev;
720ba126
TH
7270
7271 if (!ap->ops->error_handler)
c3cf30a9 7272 goto skip_eh;
720ba126
TH
7273
7274 /* tell EH we're leaving & flush EH */
ba6a1308 7275 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 7276 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 7277 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7278
7279 ata_port_wait_eh(ap);
7280
7f9ad9b8
TH
7281 /* EH is now guaranteed to see UNLOADING - EH context belongs
7282 * to us. Disable all existing devices.
720ba126 7283 */
41bda9c9
TH
7284 ata_port_for_each_link(link, ap) {
7285 ata_link_for_each_dev(dev, link)
7286 ata_dev_disable(dev);
7287 }
720ba126 7288
720ba126
TH
7289 /* Final freeze & EH. All in-flight commands are aborted. EH
7290 * will be skipped and retrials will be terminated with bad
7291 * target.
7292 */
ba6a1308 7293 spin_lock_irqsave(ap->lock, flags);
720ba126 7294 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 7295 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7296
7297 ata_port_wait_eh(ap);
45a66c1c 7298 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 7299
c3cf30a9 7300 skip_eh:
720ba126 7301 /* remove the associated SCSI host */
cca3974e 7302 scsi_remove_host(ap->scsi_host);
720ba126
TH
7303}
7304
0529c159
TH
7305/**
7306 * ata_host_detach - Detach all ports of an ATA host
7307 * @host: Host to detach
7308 *
7309 * Detach all ports of @host.
7310 *
7311 * LOCKING:
7312 * Kernel thread context (may sleep).
7313 */
7314void ata_host_detach(struct ata_host *host)
7315{
7316 int i;
7317
7318 for (i = 0; i < host->n_ports; i++)
7319 ata_port_detach(host->ports[i]);
562f0c2d
TH
7320
7321 /* the host is dead now, dissociate ACPI */
7322 ata_acpi_dissociate(host);
0529c159
TH
7323}
7324
1da177e4
LT
7325/**
7326 * ata_std_ports - initialize ioaddr with standard port offsets.
7327 * @ioaddr: IO address structure to be initialized
0baab86b
EF
7328 *
7329 * Utility function which initializes data_addr, error_addr,
7330 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7331 * device_addr, status_addr, and command_addr to standard offsets
7332 * relative to cmd_addr.
7333 *
7334 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 7335 */
0baab86b 7336
1da177e4
LT
7337void ata_std_ports(struct ata_ioports *ioaddr)
7338{
7339 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7340 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7341 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7342 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7343 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7344 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7345 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7346 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7347 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7348 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7349}
7350
0baab86b 7351
374b1873
JG
7352#ifdef CONFIG_PCI
7353
1da177e4
LT
7354/**
7355 * ata_pci_remove_one - PCI layer callback for device removal
7356 * @pdev: PCI device that was removed
7357 *
b878ca5d
TH
7358 * PCI layer indicates to libata via this hook that hot-unplug or
7359 * module unload event has occurred. Detach all ports. Resource
7360 * release is handled via devres.
1da177e4
LT
7361 *
7362 * LOCKING:
7363 * Inherited from PCI layer (may sleep).
7364 */
f0d36efd 7365void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7366{
2855568b 7367 struct device *dev = &pdev->dev;
cca3974e 7368 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7369
b878ca5d 7370 ata_host_detach(host);
1da177e4
LT
7371}
7372
7373/* move to PCI subsystem */
057ace5e 7374int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7375{
7376 unsigned long tmp = 0;
7377
7378 switch (bits->width) {
7379 case 1: {
7380 u8 tmp8 = 0;
7381 pci_read_config_byte(pdev, bits->reg, &tmp8);
7382 tmp = tmp8;
7383 break;
7384 }
7385 case 2: {
7386 u16 tmp16 = 0;
7387 pci_read_config_word(pdev, bits->reg, &tmp16);
7388 tmp = tmp16;
7389 break;
7390 }
7391 case 4: {
7392 u32 tmp32 = 0;
7393 pci_read_config_dword(pdev, bits->reg, &tmp32);
7394 tmp = tmp32;
7395 break;
7396 }
7397
7398 default:
7399 return -EINVAL;
7400 }
7401
7402 tmp &= bits->mask;
7403
7404 return (tmp == bits->val) ? 1 : 0;
7405}
9b847548 7406
6ffa01d8 7407#ifdef CONFIG_PM
3c5100c1 7408void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7409{
7410 pci_save_state(pdev);
4c90d971 7411 pci_disable_device(pdev);
500530f6 7412
3a2d5b70 7413 if (mesg.event & PM_EVENT_SLEEP)
500530f6 7414 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7415}
7416
553c4aa6 7417int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7418{
553c4aa6
TH
7419 int rc;
7420
9b847548
JA
7421 pci_set_power_state(pdev, PCI_D0);
7422 pci_restore_state(pdev);
553c4aa6 7423
b878ca5d 7424 rc = pcim_enable_device(pdev);
553c4aa6
TH
7425 if (rc) {
7426 dev_printk(KERN_ERR, &pdev->dev,
7427 "failed to enable device after resume (%d)\n", rc);
7428 return rc;
7429 }
7430
9b847548 7431 pci_set_master(pdev);
553c4aa6 7432 return 0;
500530f6
TH
7433}
7434
3c5100c1 7435int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7436{
cca3974e 7437 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7438 int rc = 0;
7439
cca3974e 7440 rc = ata_host_suspend(host, mesg);
500530f6
TH
7441 if (rc)
7442 return rc;
7443
3c5100c1 7444 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7445
7446 return 0;
7447}
7448
7449int ata_pci_device_resume(struct pci_dev *pdev)
7450{
cca3974e 7451 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7452 int rc;
500530f6 7453
553c4aa6
TH
7454 rc = ata_pci_device_do_resume(pdev);
7455 if (rc == 0)
7456 ata_host_resume(host);
7457 return rc;
9b847548 7458}
6ffa01d8
TH
7459#endif /* CONFIG_PM */
7460
1da177e4
LT
7461#endif /* CONFIG_PCI */
7462
33267325
TH
7463static int __init ata_parse_force_one(char **cur,
7464 struct ata_force_ent *force_ent,
7465 const char **reason)
7466{
7467 /* FIXME: Currently, there's no way to tag init const data and
7468 * using __initdata causes build failure on some versions of
7469 * gcc. Once __initdataconst is implemented, add const to the
7470 * following structure.
7471 */
7472 static struct ata_force_param force_tbl[] __initdata = {
7473 { "40c", .cbl = ATA_CBL_PATA40 },
7474 { "80c", .cbl = ATA_CBL_PATA80 },
7475 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
7476 { "unk", .cbl = ATA_CBL_PATA_UNK },
7477 { "ign", .cbl = ATA_CBL_PATA_IGN },
7478 { "sata", .cbl = ATA_CBL_SATA },
7479 { "1.5Gbps", .spd_limit = 1 },
7480 { "3.0Gbps", .spd_limit = 2 },
7481 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
7482 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
7483 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
7484 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
7485 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
7486 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
7487 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
7488 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
7489 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
7490 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
7491 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
7492 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
7493 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
7494 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
7495 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7496 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7497 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7498 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7499 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7500 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7501 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7502 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7503 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7504 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7505 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7506 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7507 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7508 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7509 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7510 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7511 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7512 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7513 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7514 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7515 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7516 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
7517 };
7518 char *start = *cur, *p = *cur;
7519 char *id, *val, *endp;
7520 const struct ata_force_param *match_fp = NULL;
7521 int nr_matches = 0, i;
7522
7523 /* find where this param ends and update *cur */
7524 while (*p != '\0' && *p != ',')
7525 p++;
7526
7527 if (*p == '\0')
7528 *cur = p;
7529 else
7530 *cur = p + 1;
7531
7532 *p = '\0';
7533
7534 /* parse */
7535 p = strchr(start, ':');
7536 if (!p) {
7537 val = strstrip(start);
7538 goto parse_val;
7539 }
7540 *p = '\0';
7541
7542 id = strstrip(start);
7543 val = strstrip(p + 1);
7544
7545 /* parse id */
7546 p = strchr(id, '.');
7547 if (p) {
7548 *p++ = '\0';
7549 force_ent->device = simple_strtoul(p, &endp, 10);
7550 if (p == endp || *endp != '\0') {
7551 *reason = "invalid device";
7552 return -EINVAL;
7553 }
7554 }
7555
7556 force_ent->port = simple_strtoul(id, &endp, 10);
7557 if (p == endp || *endp != '\0') {
7558 *reason = "invalid port/link";
7559 return -EINVAL;
7560 }
7561
7562 parse_val:
7563 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
7564 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
7565 const struct ata_force_param *fp = &force_tbl[i];
7566
7567 if (strncasecmp(val, fp->name, strlen(val)))
7568 continue;
7569
7570 nr_matches++;
7571 match_fp = fp;
7572
7573 if (strcasecmp(val, fp->name) == 0) {
7574 nr_matches = 1;
7575 break;
7576 }
7577 }
7578
7579 if (!nr_matches) {
7580 *reason = "unknown value";
7581 return -EINVAL;
7582 }
7583 if (nr_matches > 1) {
7584 *reason = "ambigious value";
7585 return -EINVAL;
7586 }
7587
7588 force_ent->param = *match_fp;
7589
7590 return 0;
7591}
7592
7593static void __init ata_parse_force_param(void)
7594{
7595 int idx = 0, size = 1;
7596 int last_port = -1, last_device = -1;
7597 char *p, *cur, *next;
7598
7599 /* calculate maximum number of params and allocate force_tbl */
7600 for (p = ata_force_param_buf; *p; p++)
7601 if (*p == ',')
7602 size++;
7603
7604 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
7605 if (!ata_force_tbl) {
7606 printk(KERN_WARNING "ata: failed to extend force table, "
7607 "libata.force ignored\n");
7608 return;
7609 }
7610
7611 /* parse and populate the table */
7612 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7613 const char *reason = "";
7614 struct ata_force_ent te = { .port = -1, .device = -1 };
7615
7616 next = cur;
7617 if (ata_parse_force_one(&next, &te, &reason)) {
7618 printk(KERN_WARNING "ata: failed to parse force "
7619 "parameter \"%s\" (%s)\n",
7620 cur, reason);
7621 continue;
7622 }
7623
7624 if (te.port == -1) {
7625 te.port = last_port;
7626 te.device = last_device;
7627 }
7628
7629 ata_force_tbl[idx++] = te;
7630
7631 last_port = te.port;
7632 last_device = te.device;
7633 }
7634
7635 ata_force_tbl_size = idx;
7636}
1da177e4 7637
1da177e4
LT
7638static int __init ata_init(void)
7639{
a8601e5f 7640 ata_probe_timeout *= HZ;
33267325
TH
7641
7642 ata_parse_force_param();
7643
1da177e4
LT
7644 ata_wq = create_workqueue("ata");
7645 if (!ata_wq)
7646 return -ENOMEM;
7647
453b07ac
TH
7648 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7649 if (!ata_aux_wq) {
7650 destroy_workqueue(ata_wq);
7651 return -ENOMEM;
7652 }
7653
1da177e4
LT
7654 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7655 return 0;
7656}
7657
7658static void __exit ata_exit(void)
7659{
33267325 7660 kfree(ata_force_tbl);
1da177e4 7661 destroy_workqueue(ata_wq);
453b07ac 7662 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7663}
7664
a4625085 7665subsys_initcall(ata_init);
1da177e4
LT
7666module_exit(ata_exit);
7667
67846b30 7668static unsigned long ratelimit_time;
34af946a 7669static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7670
7671int ata_ratelimit(void)
7672{
7673 int rc;
7674 unsigned long flags;
7675
7676 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7677
7678 if (time_after(jiffies, ratelimit_time)) {
7679 rc = 1;
7680 ratelimit_time = jiffies + (HZ/5);
7681 } else
7682 rc = 0;
7683
7684 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7685
7686 return rc;
7687}
7688
c22daff4
TH
7689/**
7690 * ata_wait_register - wait until register value changes
7691 * @reg: IO-mapped register
7692 * @mask: Mask to apply to read register value
7693 * @val: Wait condition
7694 * @interval_msec: polling interval in milliseconds
7695 * @timeout_msec: timeout in milliseconds
7696 *
7697 * Waiting for some bits of register to change is a common
7698 * operation for ATA controllers. This function reads 32bit LE
7699 * IO-mapped register @reg and tests for the following condition.
7700 *
7701 * (*@reg & mask) != val
7702 *
7703 * If the condition is met, it returns; otherwise, the process is
7704 * repeated after @interval_msec until timeout.
7705 *
7706 * LOCKING:
7707 * Kernel thread context (may sleep)
7708 *
7709 * RETURNS:
7710 * The final register value.
7711 */
7712u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7713 unsigned long interval_msec,
7714 unsigned long timeout_msec)
7715{
7716 unsigned long timeout;
7717 u32 tmp;
7718
7719 tmp = ioread32(reg);
7720
7721 /* Calculate timeout _after_ the first read to make sure
7722 * preceding writes reach the controller before starting to
7723 * eat away the timeout.
7724 */
7725 timeout = jiffies + (timeout_msec * HZ) / 1000;
7726
7727 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7728 msleep(interval_msec);
7729 tmp = ioread32(reg);
7730 }
7731
7732 return tmp;
7733}
7734
dd5b06c4
TH
7735/*
7736 * Dummy port_ops
7737 */
7738static void ata_dummy_noret(struct ata_port *ap) { }
7739static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7740static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7741
7742static u8 ata_dummy_check_status(struct ata_port *ap)
7743{
7744 return ATA_DRDY;
7745}
7746
7747static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7748{
7749 return AC_ERR_SYSTEM;
7750}
7751
7752const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7753 .check_status = ata_dummy_check_status,
7754 .check_altstatus = ata_dummy_check_status,
7755 .dev_select = ata_noop_dev_select,
7756 .qc_prep = ata_noop_qc_prep,
7757 .qc_issue = ata_dummy_qc_issue,
7758 .freeze = ata_dummy_noret,
7759 .thaw = ata_dummy_noret,
7760 .error_handler = ata_dummy_noret,
7761 .post_internal_cmd = ata_dummy_qc_noret,
7762 .irq_clear = ata_dummy_noret,
7763 .port_start = ata_dummy_ret0,
7764 .port_stop = ata_dummy_noret,
7765};
7766
21b0ad4f
TH
7767const struct ata_port_info ata_dummy_port_info = {
7768 .port_ops = &ata_dummy_port_ops,
7769};
7770
1da177e4
LT
7771/*
7772 * libata is essentially a library of internal helper functions for
7773 * low-level ATA host controller drivers. As such, the API/ABI is
7774 * likely to change as new drivers are added and updated.
7775 * Do not depend on ABI/API stability.
7776 */
e9c83914
TH
7777EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7778EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7779EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7780EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7781EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7782EXPORT_SYMBOL_GPL(ata_std_bios_param);
7783EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7784EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7785EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7786EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7787EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7788EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7789EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7790EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 7791EXPORT_SYMBOL_GPL(ata_sg_init);
9a1004d0 7792EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7793EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7794EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7795EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7796EXPORT_SYMBOL_GPL(ata_tf_load);
7797EXPORT_SYMBOL_GPL(ata_tf_read);
7798EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7799EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7800EXPORT_SYMBOL_GPL(sata_print_link_status);
436d34b3 7801EXPORT_SYMBOL_GPL(atapi_cmd_type);
1da177e4
LT
7802EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7803EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
7804EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7805EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7806EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7807EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7808EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7809EXPORT_SYMBOL_GPL(ata_mode_string);
7810EXPORT_SYMBOL_GPL(ata_id_xfermask);
1da177e4
LT
7811EXPORT_SYMBOL_GPL(ata_check_status);
7812EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7813EXPORT_SYMBOL_GPL(ata_exec_command);
7814EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7815EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7816EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7817EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7818EXPORT_SYMBOL_GPL(ata_data_xfer);
7819EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7820EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7821EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7822EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7823EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7824EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7825EXPORT_SYMBOL_GPL(ata_bmdma_start);
7826EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
358f9a77 7827EXPORT_SYMBOL_GPL(ata_noop_irq_clear);
1da177e4
LT
7828EXPORT_SYMBOL_GPL(ata_bmdma_status);
7829EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7830EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7831EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7832EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7833EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7834EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7835EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7836EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7837EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7838EXPORT_SYMBOL_GPL(sata_link_debounce);
7839EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4 7840EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7841EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7842EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7843EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7844EXPORT_SYMBOL_GPL(sata_std_hardreset);
7845EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7846EXPORT_SYMBOL_GPL(ata_dev_classify);
7847EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7848EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7849EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7850EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7851EXPORT_SYMBOL_GPL(ata_busy_sleep);
88ff6eaf 7852EXPORT_SYMBOL_GPL(ata_wait_after_reset);
d4b2bab4 7853EXPORT_SYMBOL_GPL(ata_wait_ready);
1da177e4
LT
7854EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7855EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7856EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7857EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7858EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7859EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7860EXPORT_SYMBOL_GPL(sata_scr_valid);
7861EXPORT_SYMBOL_GPL(sata_scr_read);
7862EXPORT_SYMBOL_GPL(sata_scr_write);
7863EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7864EXPORT_SYMBOL_GPL(ata_link_online);
7865EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7866#ifdef CONFIG_PM
cca3974e
JG
7867EXPORT_SYMBOL_GPL(ata_host_suspend);
7868EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7869#endif /* CONFIG_PM */
6a62a04d
TH
7870EXPORT_SYMBOL_GPL(ata_id_string);
7871EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4
LT
7872EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7873
1bc4ccff 7874EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 7875EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
7876EXPORT_SYMBOL_GPL(ata_timing_compute);
7877EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 7878EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 7879
1da177e4
LT
7880#ifdef CONFIG_PCI
7881EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7882EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7883EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7884EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
4e6b79fa 7885EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
1da177e4
LT
7886EXPORT_SYMBOL_GPL(ata_pci_init_one);
7887EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7888#ifdef CONFIG_PM
500530f6
TH
7889EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7890EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7891EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7892EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7893#endif /* CONFIG_PM */
67951ade
AC
7894EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7895EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7896#endif /* CONFIG_PCI */
9b847548 7897
31f88384 7898EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7899EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7900EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7901EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7902EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7903
b64bbc39
TH
7904EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7905EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7906EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7907EXPORT_SYMBOL_GPL(ata_port_desc);
7908#ifdef CONFIG_PCI
7909EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7910#endif /* CONFIG_PCI */
7b70fc03 7911EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7912EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7913EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7914EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7915EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7916EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7917EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7918EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7919EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7920EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7921EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7922EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7923
7924EXPORT_SYMBOL_GPL(ata_cable_40wire);
7925EXPORT_SYMBOL_GPL(ata_cable_80wire);
7926EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 7927EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 7928EXPORT_SYMBOL_GPL(ata_cable_sata);