ide/libata: ST310211A has buggy HPA too
[linux-block.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/highmem.h>
50#include <linux/spinlock.h>
51#include <linux/blkdev.h>
52#include <linux/delay.h>
53#include <linux/timer.h>
54#include <linux/interrupt.h>
55#include <linux/completion.h>
56#include <linux/suspend.h>
57#include <linux/workqueue.h>
67846b30 58#include <linux/jiffies.h>
378f058c 59#include <linux/scatterlist.h>
2dcb407e 60#include <linux/io.h>
1da177e4 61#include <scsi/scsi.h>
193515d5 62#include <scsi/scsi_cmnd.h>
1da177e4
LT
63#include <scsi/scsi_host.h>
64#include <linux/libata.h>
1da177e4
LT
65#include <asm/semaphore.h>
66#include <asm/byteorder.h>
140b5e59 67#include <linux/cdrom.h>
1da177e4
LT
68
69#include "libata.h"
70
fda0efc5 71
d7bb4cc7 72/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
73const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 76
3373efd8
TH
77static unsigned int ata_dev_init_params(struct ata_device *dev,
78 u16 heads, u16 sectors);
79static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
80static unsigned int ata_dev_set_feature(struct ata_device *dev,
81 u8 enable, u8 feature);
3373efd8 82static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 83static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 84
f3187195 85unsigned int ata_print_id = 1;
1da177e4
LT
86static struct workqueue_struct *ata_wq;
87
453b07ac
TH
88struct workqueue_struct *ata_aux_wq;
89
418dc1f5 90int atapi_enabled = 1;
1623c81e
JG
91module_param(atapi_enabled, int, 0444);
92MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
93
95de719a
AL
94int atapi_dmadir = 0;
95module_param(atapi_dmadir, int, 0444);
96MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
97
baf4fdfa
ML
98int atapi_passthru16 = 1;
99module_param(atapi_passthru16, int, 0444);
100MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
101
c3c013a2
JG
102int libata_fua = 0;
103module_param_named(fua, libata_fua, int, 0444);
104MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
105
2dcb407e 106static int ata_ignore_hpa;
1e999736
AC
107module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
108MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
109
b3a70601
AC
110static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
111module_param_named(dma, libata_dma_mask, int, 0444);
112MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
113
a8601e5f
AM
114static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
115module_param(ata_probe_timeout, int, 0444);
116MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
117
6ebe9d86 118int libata_noacpi = 0;
d7d0dad6 119module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 120MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 121
ae8d4ee7
AC
122int libata_allow_tpm = 0;
123module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
124MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
125
1da177e4
LT
126MODULE_AUTHOR("Jeff Garzik");
127MODULE_DESCRIPTION("Library module for ATA devices");
128MODULE_LICENSE("GPL");
129MODULE_VERSION(DRV_VERSION);
130
0baab86b 131
1da177e4
LT
132/**
133 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
134 * @tf: Taskfile to convert
1da177e4 135 * @pmp: Port multiplier port
9977126c
TH
136 * @is_cmd: This FIS is for command
137 * @fis: Buffer into which data will output
1da177e4
LT
138 *
139 * Converts a standard ATA taskfile to a Serial ATA
140 * FIS structure (Register - Host to Device).
141 *
142 * LOCKING:
143 * Inherited from caller.
144 */
9977126c 145void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 146{
9977126c
TH
147 fis[0] = 0x27; /* Register - Host to Device FIS */
148 fis[1] = pmp & 0xf; /* Port multiplier number*/
149 if (is_cmd)
150 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
151
1da177e4
LT
152 fis[2] = tf->command;
153 fis[3] = tf->feature;
154
155 fis[4] = tf->lbal;
156 fis[5] = tf->lbam;
157 fis[6] = tf->lbah;
158 fis[7] = tf->device;
159
160 fis[8] = tf->hob_lbal;
161 fis[9] = tf->hob_lbam;
162 fis[10] = tf->hob_lbah;
163 fis[11] = tf->hob_feature;
164
165 fis[12] = tf->nsect;
166 fis[13] = tf->hob_nsect;
167 fis[14] = 0;
168 fis[15] = tf->ctl;
169
170 fis[16] = 0;
171 fis[17] = 0;
172 fis[18] = 0;
173 fis[19] = 0;
174}
175
176/**
177 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
178 * @fis: Buffer from which data will be input
179 * @tf: Taskfile to output
180 *
e12a1be6 181 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
182 *
183 * LOCKING:
184 * Inherited from caller.
185 */
186
057ace5e 187void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
188{
189 tf->command = fis[2]; /* status */
190 tf->feature = fis[3]; /* error */
191
192 tf->lbal = fis[4];
193 tf->lbam = fis[5];
194 tf->lbah = fis[6];
195 tf->device = fis[7];
196
197 tf->hob_lbal = fis[8];
198 tf->hob_lbam = fis[9];
199 tf->hob_lbah = fis[10];
200
201 tf->nsect = fis[12];
202 tf->hob_nsect = fis[13];
203}
204
8cbd6df1
AL
205static const u8 ata_rw_cmds[] = {
206 /* pio multi */
207 ATA_CMD_READ_MULTI,
208 ATA_CMD_WRITE_MULTI,
209 ATA_CMD_READ_MULTI_EXT,
210 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
211 0,
212 0,
213 0,
214 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
215 /* pio */
216 ATA_CMD_PIO_READ,
217 ATA_CMD_PIO_WRITE,
218 ATA_CMD_PIO_READ_EXT,
219 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
220 0,
221 0,
222 0,
223 0,
8cbd6df1
AL
224 /* dma */
225 ATA_CMD_READ,
226 ATA_CMD_WRITE,
227 ATA_CMD_READ_EXT,
9a3dccc4
TH
228 ATA_CMD_WRITE_EXT,
229 0,
230 0,
231 0,
232 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 233};
1da177e4
LT
234
235/**
8cbd6df1 236 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
237 * @tf: command to examine and configure
238 * @dev: device tf belongs to
1da177e4 239 *
2e9edbf8 240 * Examine the device configuration and tf->flags to calculate
8cbd6df1 241 * the proper read/write commands and protocol to use.
1da177e4
LT
242 *
243 * LOCKING:
244 * caller.
245 */
bd056d7e 246static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 247{
9a3dccc4 248 u8 cmd;
1da177e4 249
9a3dccc4 250 int index, fua, lba48, write;
2e9edbf8 251
9a3dccc4 252 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
253 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
254 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 255
8cbd6df1
AL
256 if (dev->flags & ATA_DFLAG_PIO) {
257 tf->protocol = ATA_PROT_PIO;
9a3dccc4 258 index = dev->multi_count ? 0 : 8;
9af5c9c9 259 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
260 /* Unable to use DMA due to host limitation */
261 tf->protocol = ATA_PROT_PIO;
0565c26d 262 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
263 } else {
264 tf->protocol = ATA_PROT_DMA;
9a3dccc4 265 index = 16;
8cbd6df1 266 }
1da177e4 267
9a3dccc4
TH
268 cmd = ata_rw_cmds[index + fua + lba48 + write];
269 if (cmd) {
270 tf->command = cmd;
271 return 0;
272 }
273 return -1;
1da177e4
LT
274}
275
35b649fe
TH
276/**
277 * ata_tf_read_block - Read block address from ATA taskfile
278 * @tf: ATA taskfile of interest
279 * @dev: ATA device @tf belongs to
280 *
281 * LOCKING:
282 * None.
283 *
284 * Read block address from @tf. This function can handle all
285 * three address formats - LBA, LBA48 and CHS. tf->protocol and
286 * flags select the address format to use.
287 *
288 * RETURNS:
289 * Block address read from @tf.
290 */
291u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
292{
293 u64 block = 0;
294
295 if (tf->flags & ATA_TFLAG_LBA) {
296 if (tf->flags & ATA_TFLAG_LBA48) {
297 block |= (u64)tf->hob_lbah << 40;
298 block |= (u64)tf->hob_lbam << 32;
299 block |= tf->hob_lbal << 24;
300 } else
301 block |= (tf->device & 0xf) << 24;
302
303 block |= tf->lbah << 16;
304 block |= tf->lbam << 8;
305 block |= tf->lbal;
306 } else {
307 u32 cyl, head, sect;
308
309 cyl = tf->lbam | (tf->lbah << 8);
310 head = tf->device & 0xf;
311 sect = tf->lbal;
312
313 block = (cyl * dev->heads + head) * dev->sectors + sect;
314 }
315
316 return block;
317}
318
bd056d7e
TH
319/**
320 * ata_build_rw_tf - Build ATA taskfile for given read/write request
321 * @tf: Target ATA taskfile
322 * @dev: ATA device @tf belongs to
323 * @block: Block address
324 * @n_block: Number of blocks
325 * @tf_flags: RW/FUA etc...
326 * @tag: tag
327 *
328 * LOCKING:
329 * None.
330 *
331 * Build ATA taskfile @tf for read/write request described by
332 * @block, @n_block, @tf_flags and @tag on @dev.
333 *
334 * RETURNS:
335 *
336 * 0 on success, -ERANGE if the request is too large for @dev,
337 * -EINVAL if the request is invalid.
338 */
339int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
340 u64 block, u32 n_block, unsigned int tf_flags,
341 unsigned int tag)
342{
343 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
344 tf->flags |= tf_flags;
345
6d1245bf 346 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
347 /* yay, NCQ */
348 if (!lba_48_ok(block, n_block))
349 return -ERANGE;
350
351 tf->protocol = ATA_PROT_NCQ;
352 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
353
354 if (tf->flags & ATA_TFLAG_WRITE)
355 tf->command = ATA_CMD_FPDMA_WRITE;
356 else
357 tf->command = ATA_CMD_FPDMA_READ;
358
359 tf->nsect = tag << 3;
360 tf->hob_feature = (n_block >> 8) & 0xff;
361 tf->feature = n_block & 0xff;
362
363 tf->hob_lbah = (block >> 40) & 0xff;
364 tf->hob_lbam = (block >> 32) & 0xff;
365 tf->hob_lbal = (block >> 24) & 0xff;
366 tf->lbah = (block >> 16) & 0xff;
367 tf->lbam = (block >> 8) & 0xff;
368 tf->lbal = block & 0xff;
369
370 tf->device = 1 << 6;
371 if (tf->flags & ATA_TFLAG_FUA)
372 tf->device |= 1 << 7;
373 } else if (dev->flags & ATA_DFLAG_LBA) {
374 tf->flags |= ATA_TFLAG_LBA;
375
376 if (lba_28_ok(block, n_block)) {
377 /* use LBA28 */
378 tf->device |= (block >> 24) & 0xf;
379 } else if (lba_48_ok(block, n_block)) {
380 if (!(dev->flags & ATA_DFLAG_LBA48))
381 return -ERANGE;
382
383 /* use LBA48 */
384 tf->flags |= ATA_TFLAG_LBA48;
385
386 tf->hob_nsect = (n_block >> 8) & 0xff;
387
388 tf->hob_lbah = (block >> 40) & 0xff;
389 tf->hob_lbam = (block >> 32) & 0xff;
390 tf->hob_lbal = (block >> 24) & 0xff;
391 } else
392 /* request too large even for LBA48 */
393 return -ERANGE;
394
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396 return -EINVAL;
397
398 tf->nsect = n_block & 0xff;
399
400 tf->lbah = (block >> 16) & 0xff;
401 tf->lbam = (block >> 8) & 0xff;
402 tf->lbal = block & 0xff;
403
404 tf->device |= ATA_LBA;
405 } else {
406 /* CHS */
407 u32 sect, head, cyl, track;
408
409 /* The request -may- be too large for CHS addressing. */
410 if (!lba_28_ok(block, n_block))
411 return -ERANGE;
412
413 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
414 return -EINVAL;
415
416 /* Convert LBA to CHS */
417 track = (u32)block / dev->sectors;
418 cyl = track / dev->heads;
419 head = track % dev->heads;
420 sect = (u32)block % dev->sectors + 1;
421
422 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
423 (u32)block, track, cyl, head, sect);
424
425 /* Check whether the converted CHS can fit.
426 Cylinder: 0-65535
427 Head: 0-15
428 Sector: 1-255*/
429 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
430 return -ERANGE;
431
432 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
433 tf->lbal = sect;
434 tf->lbam = cyl;
435 tf->lbah = cyl >> 8;
436 tf->device |= head;
437 }
438
439 return 0;
440}
441
cb95d562
TH
442/**
443 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
444 * @pio_mask: pio_mask
445 * @mwdma_mask: mwdma_mask
446 * @udma_mask: udma_mask
447 *
448 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
449 * unsigned int xfer_mask.
450 *
451 * LOCKING:
452 * None.
453 *
454 * RETURNS:
455 * Packed xfer_mask.
456 */
7dc951ae
TH
457unsigned long ata_pack_xfermask(unsigned long pio_mask,
458 unsigned long mwdma_mask,
459 unsigned long udma_mask)
cb95d562
TH
460{
461 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
462 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
463 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
464}
465
c0489e4e
TH
466/**
467 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
468 * @xfer_mask: xfer_mask to unpack
469 * @pio_mask: resulting pio_mask
470 * @mwdma_mask: resulting mwdma_mask
471 * @udma_mask: resulting udma_mask
472 *
473 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
474 * Any NULL distination masks will be ignored.
475 */
7dc951ae
TH
476void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
477 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
478{
479 if (pio_mask)
480 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
481 if (mwdma_mask)
482 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
483 if (udma_mask)
484 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
485}
486
cb95d562 487static const struct ata_xfer_ent {
be9a50c8 488 int shift, bits;
cb95d562
TH
489 u8 base;
490} ata_xfer_tbl[] = {
70cd071e
TH
491 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
492 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
493 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
494 { -1, },
495};
496
497/**
498 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
499 * @xfer_mask: xfer_mask of interest
500 *
501 * Return matching XFER_* value for @xfer_mask. Only the highest
502 * bit of @xfer_mask is considered.
503 *
504 * LOCKING:
505 * None.
506 *
507 * RETURNS:
70cd071e 508 * Matching XFER_* value, 0xff if no match found.
cb95d562 509 */
7dc951ae 510u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
511{
512 int highbit = fls(xfer_mask) - 1;
513 const struct ata_xfer_ent *ent;
514
515 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
517 return ent->base + highbit - ent->shift;
70cd071e 518 return 0xff;
cb95d562
TH
519}
520
521/**
522 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
523 * @xfer_mode: XFER_* of interest
524 *
525 * Return matching xfer_mask for @xfer_mode.
526 *
527 * LOCKING:
528 * None.
529 *
530 * RETURNS:
531 * Matching xfer_mask, 0 if no match found.
532 */
7dc951ae 533unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
534{
535 const struct ata_xfer_ent *ent;
536
537 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
539 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
540 & ~((1 << ent->shift) - 1);
cb95d562
TH
541 return 0;
542}
543
544/**
545 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
546 * @xfer_mode: XFER_* of interest
547 *
548 * Return matching xfer_shift for @xfer_mode.
549 *
550 * LOCKING:
551 * None.
552 *
553 * RETURNS:
554 * Matching xfer_shift, -1 if no match found.
555 */
7dc951ae 556int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
557{
558 const struct ata_xfer_ent *ent;
559
560 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
561 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
562 return ent->shift;
563 return -1;
564}
565
1da177e4 566/**
1da7b0d0
TH
567 * ata_mode_string - convert xfer_mask to string
568 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
569 *
570 * Determine string which represents the highest speed
1da7b0d0 571 * (highest bit in @modemask).
1da177e4
LT
572 *
573 * LOCKING:
574 * None.
575 *
576 * RETURNS:
577 * Constant C string representing highest speed listed in
1da7b0d0 578 * @mode_mask, or the constant C string "<n/a>".
1da177e4 579 */
7dc951ae 580const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 581{
75f554bc
TH
582 static const char * const xfer_mode_str[] = {
583 "PIO0",
584 "PIO1",
585 "PIO2",
586 "PIO3",
587 "PIO4",
b352e57d
AC
588 "PIO5",
589 "PIO6",
75f554bc
TH
590 "MWDMA0",
591 "MWDMA1",
592 "MWDMA2",
b352e57d
AC
593 "MWDMA3",
594 "MWDMA4",
75f554bc
TH
595 "UDMA/16",
596 "UDMA/25",
597 "UDMA/33",
598 "UDMA/44",
599 "UDMA/66",
600 "UDMA/100",
601 "UDMA/133",
602 "UDMA7",
603 };
1da7b0d0 604 int highbit;
1da177e4 605
1da7b0d0
TH
606 highbit = fls(xfer_mask) - 1;
607 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
608 return xfer_mode_str[highbit];
1da177e4 609 return "<n/a>";
1da177e4
LT
610}
611
4c360c81
TH
612static const char *sata_spd_string(unsigned int spd)
613{
614 static const char * const spd_str[] = {
615 "1.5 Gbps",
616 "3.0 Gbps",
617 };
618
619 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
620 return "<unknown>";
621 return spd_str[spd - 1];
622}
623
3373efd8 624void ata_dev_disable(struct ata_device *dev)
0b8efb0a 625{
09d7f9b0 626 if (ata_dev_enabled(dev)) {
9af5c9c9 627 if (ata_msg_drv(dev->link->ap))
09d7f9b0 628 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
562f0c2d 629 ata_acpi_on_disable(dev);
4ae72a1e
TH
630 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
631 ATA_DNXFER_QUIET);
0b8efb0a
TH
632 dev->class++;
633 }
634}
635
ca77329f
KCA
636static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
637{
638 struct ata_link *link = dev->link;
639 struct ata_port *ap = link->ap;
640 u32 scontrol;
641 unsigned int err_mask;
642 int rc;
643
644 /*
645 * disallow DIPM for drivers which haven't set
646 * ATA_FLAG_IPM. This is because when DIPM is enabled,
647 * phy ready will be set in the interrupt status on
648 * state changes, which will cause some drivers to
649 * think there are errors - additionally drivers will
650 * need to disable hot plug.
651 */
652 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
653 ap->pm_policy = NOT_AVAILABLE;
654 return -EINVAL;
655 }
656
657 /*
658 * For DIPM, we will only enable it for the
659 * min_power setting.
660 *
661 * Why? Because Disks are too stupid to know that
662 * If the host rejects a request to go to SLUMBER
663 * they should retry at PARTIAL, and instead it
664 * just would give up. So, for medium_power to
665 * work at all, we need to only allow HIPM.
666 */
667 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
668 if (rc)
669 return rc;
670
671 switch (policy) {
672 case MIN_POWER:
673 /* no restrictions on IPM transitions */
674 scontrol &= ~(0x3 << 8);
675 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
676 if (rc)
677 return rc;
678
679 /* enable DIPM */
680 if (dev->flags & ATA_DFLAG_DIPM)
681 err_mask = ata_dev_set_feature(dev,
682 SETFEATURES_SATA_ENABLE, SATA_DIPM);
683 break;
684 case MEDIUM_POWER:
685 /* allow IPM to PARTIAL */
686 scontrol &= ~(0x1 << 8);
687 scontrol |= (0x2 << 8);
688 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
689 if (rc)
690 return rc;
691
f5456b63
KCA
692 /*
693 * we don't have to disable DIPM since IPM flags
694 * disallow transitions to SLUMBER, which effectively
695 * disable DIPM if it does not support PARTIAL
696 */
ca77329f
KCA
697 break;
698 case NOT_AVAILABLE:
699 case MAX_PERFORMANCE:
700 /* disable all IPM transitions */
701 scontrol |= (0x3 << 8);
702 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
703 if (rc)
704 return rc;
705
f5456b63
KCA
706 /*
707 * we don't have to disable DIPM since IPM flags
708 * disallow all transitions which effectively
709 * disable DIPM anyway.
710 */
ca77329f
KCA
711 break;
712 }
713
714 /* FIXME: handle SET FEATURES failure */
715 (void) err_mask;
716
717 return 0;
718}
719
720/**
721 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
722 * @dev: device to enable power management
723 * @policy: the link power management policy
ca77329f
KCA
724 *
725 * Enable SATA Interface power management. This will enable
726 * Device Interface Power Management (DIPM) for min_power
727 * policy, and then call driver specific callbacks for
728 * enabling Host Initiated Power management.
729 *
730 * Locking: Caller.
731 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
732 */
733void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
734{
735 int rc = 0;
736 struct ata_port *ap = dev->link->ap;
737
738 /* set HIPM first, then DIPM */
739 if (ap->ops->enable_pm)
740 rc = ap->ops->enable_pm(ap, policy);
741 if (rc)
742 goto enable_pm_out;
743 rc = ata_dev_set_dipm(dev, policy);
744
745enable_pm_out:
746 if (rc)
747 ap->pm_policy = MAX_PERFORMANCE;
748 else
749 ap->pm_policy = policy;
750 return /* rc */; /* hopefully we can use 'rc' eventually */
751}
752
1992a5ed 753#ifdef CONFIG_PM
ca77329f
KCA
754/**
755 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 756 * @dev: device to disable power management
ca77329f
KCA
757 *
758 * Disable SATA Interface power management. This will disable
759 * Device Interface Power Management (DIPM) without changing
760 * policy, call driver specific callbacks for disabling Host
761 * Initiated Power management.
762 *
763 * Locking: Caller.
764 * Returns: void
765 */
766static void ata_dev_disable_pm(struct ata_device *dev)
767{
768 struct ata_port *ap = dev->link->ap;
769
770 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
771 if (ap->ops->disable_pm)
772 ap->ops->disable_pm(ap);
773}
1992a5ed 774#endif /* CONFIG_PM */
ca77329f
KCA
775
776void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
777{
778 ap->pm_policy = policy;
779 ap->link.eh_info.action |= ATA_EHI_LPM;
780 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
781 ata_port_schedule_eh(ap);
782}
783
1992a5ed 784#ifdef CONFIG_PM
ca77329f
KCA
785static void ata_lpm_enable(struct ata_host *host)
786{
787 struct ata_link *link;
788 struct ata_port *ap;
789 struct ata_device *dev;
790 int i;
791
792 for (i = 0; i < host->n_ports; i++) {
793 ap = host->ports[i];
794 ata_port_for_each_link(link, ap) {
795 ata_link_for_each_dev(dev, link)
796 ata_dev_disable_pm(dev);
797 }
798 }
799}
800
801static void ata_lpm_disable(struct ata_host *host)
802{
803 int i;
804
805 for (i = 0; i < host->n_ports; i++) {
806 struct ata_port *ap = host->ports[i];
807 ata_lpm_schedule(ap, ap->pm_policy);
808 }
809}
1992a5ed 810#endif /* CONFIG_PM */
ca77329f
KCA
811
812
1da177e4 813/**
0d5ff566 814 * ata_devchk - PATA device presence detection
1da177e4
LT
815 * @ap: ATA channel to examine
816 * @device: Device to examine (starting at zero)
817 *
818 * This technique was originally described in
819 * Hale Landis's ATADRVR (www.ata-atapi.com), and
820 * later found its way into the ATA/ATAPI spec.
821 *
822 * Write a pattern to the ATA shadow registers,
823 * and if a device is present, it will respond by
824 * correctly storing and echoing back the
825 * ATA shadow register contents.
826 *
827 * LOCKING:
828 * caller.
829 */
830
0d5ff566 831static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
832{
833 struct ata_ioports *ioaddr = &ap->ioaddr;
834 u8 nsect, lbal;
835
836 ap->ops->dev_select(ap, device);
837
0d5ff566
TH
838 iowrite8(0x55, ioaddr->nsect_addr);
839 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 840
0d5ff566
TH
841 iowrite8(0xaa, ioaddr->nsect_addr);
842 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 843
0d5ff566
TH
844 iowrite8(0x55, ioaddr->nsect_addr);
845 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 846
0d5ff566
TH
847 nsect = ioread8(ioaddr->nsect_addr);
848 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
849
850 if ((nsect == 0x55) && (lbal == 0xaa))
851 return 1; /* we found a device */
852
853 return 0; /* nothing found */
854}
855
1da177e4
LT
856/**
857 * ata_dev_classify - determine device type based on ATA-spec signature
858 * @tf: ATA taskfile register set for device to be identified
859 *
860 * Determine from taskfile register contents whether a device is
861 * ATA or ATAPI, as per "Signature and persistence" section
862 * of ATA/PI spec (volume 1, sect 5.14).
863 *
864 * LOCKING:
865 * None.
866 *
867 * RETURNS:
633273a3
TH
868 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
869 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 870 */
057ace5e 871unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
872{
873 /* Apple's open source Darwin code hints that some devices only
874 * put a proper signature into the LBA mid/high registers,
875 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
876 *
877 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
878 * signatures for ATA and ATAPI devices attached on SerialATA,
879 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
880 * spec has never mentioned about using different signatures
881 * for ATA/ATAPI devices. Then, Serial ATA II: Port
882 * Multiplier specification began to use 0x69/0x96 to identify
883 * port multpliers and 0x3c/0xc3 to identify SEMB device.
884 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
885 * 0x69/0x96 shortly and described them as reserved for
886 * SerialATA.
887 *
888 * We follow the current spec and consider that 0x69/0x96
889 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 890 */
633273a3 891 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
892 DPRINTK("found ATA device by sig\n");
893 return ATA_DEV_ATA;
894 }
895
633273a3 896 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
897 DPRINTK("found ATAPI device by sig\n");
898 return ATA_DEV_ATAPI;
899 }
900
633273a3
TH
901 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
902 DPRINTK("found PMP device by sig\n");
903 return ATA_DEV_PMP;
904 }
905
906 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 907 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
908 return ATA_DEV_SEMB_UNSUP; /* not yet */
909 }
910
1da177e4
LT
911 DPRINTK("unknown device\n");
912 return ATA_DEV_UNKNOWN;
913}
914
915/**
916 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
917 * @dev: ATA device to classify (starting at zero)
918 * @present: device seems present
b4dc7623 919 * @r_err: Value of error register on completion
1da177e4
LT
920 *
921 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
922 * an ATA/ATAPI-defined set of values is placed in the ATA
923 * shadow registers, indicating the results of device detection
924 * and diagnostics.
925 *
926 * Select the ATA device, and read the values from the ATA shadow
927 * registers. Then parse according to the Error register value,
928 * and the spec-defined values examined by ata_dev_classify().
929 *
930 * LOCKING:
931 * caller.
b4dc7623
TH
932 *
933 * RETURNS:
934 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 935 */
3f19859e
TH
936unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
937 u8 *r_err)
1da177e4 938{
3f19859e 939 struct ata_port *ap = dev->link->ap;
1da177e4
LT
940 struct ata_taskfile tf;
941 unsigned int class;
942 u8 err;
943
3f19859e 944 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
945
946 memset(&tf, 0, sizeof(tf));
947
1da177e4 948 ap->ops->tf_read(ap, &tf);
0169e284 949 err = tf.feature;
b4dc7623
TH
950 if (r_err)
951 *r_err = err;
1da177e4 952
c5038fc0
AC
953 /* see if device passed diags: continue and warn later */
954 if (err == 0)
93590859 955 /* diagnostic fail : do nothing _YET_ */
3f19859e 956 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 957 else if (err == 1)
1da177e4 958 /* do nothing */ ;
3f19859e 959 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
960 /* do nothing */ ;
961 else
b4dc7623 962 return ATA_DEV_NONE;
1da177e4 963
b4dc7623 964 /* determine if device is ATA or ATAPI */
1da177e4 965 class = ata_dev_classify(&tf);
b4dc7623 966
d7fbee05
TH
967 if (class == ATA_DEV_UNKNOWN) {
968 /* If the device failed diagnostic, it's likely to
969 * have reported incorrect device signature too.
970 * Assume ATA device if the device seems present but
971 * device signature is invalid with diagnostic
972 * failure.
973 */
974 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
975 class = ATA_DEV_ATA;
976 else
977 class = ATA_DEV_NONE;
978 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
979 class = ATA_DEV_NONE;
980
b4dc7623 981 return class;
1da177e4
LT
982}
983
984/**
6a62a04d 985 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
986 * @id: IDENTIFY DEVICE results we will examine
987 * @s: string into which data is output
988 * @ofs: offset into identify device page
989 * @len: length of string to return. must be an even number.
990 *
991 * The strings in the IDENTIFY DEVICE page are broken up into
992 * 16-bit chunks. Run through the string, and output each
993 * 8-bit chunk linearly, regardless of platform.
994 *
995 * LOCKING:
996 * caller.
997 */
998
6a62a04d
TH
999void ata_id_string(const u16 *id, unsigned char *s,
1000 unsigned int ofs, unsigned int len)
1da177e4
LT
1001{
1002 unsigned int c;
1003
1004 while (len > 0) {
1005 c = id[ofs] >> 8;
1006 *s = c;
1007 s++;
1008
1009 c = id[ofs] & 0xff;
1010 *s = c;
1011 s++;
1012
1013 ofs++;
1014 len -= 2;
1015 }
1016}
1017
0e949ff3 1018/**
6a62a04d 1019 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1020 * @id: IDENTIFY DEVICE results we will examine
1021 * @s: string into which data is output
1022 * @ofs: offset into identify device page
1023 * @len: length of string to return. must be an odd number.
1024 *
6a62a04d 1025 * This function is identical to ata_id_string except that it
0e949ff3
TH
1026 * trims trailing spaces and terminates the resulting string with
1027 * null. @len must be actual maximum length (even number) + 1.
1028 *
1029 * LOCKING:
1030 * caller.
1031 */
6a62a04d
TH
1032void ata_id_c_string(const u16 *id, unsigned char *s,
1033 unsigned int ofs, unsigned int len)
0e949ff3
TH
1034{
1035 unsigned char *p;
1036
1037 WARN_ON(!(len & 1));
1038
6a62a04d 1039 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1040
1041 p = s + strnlen(s, len - 1);
1042 while (p > s && p[-1] == ' ')
1043 p--;
1044 *p = '\0';
1045}
0baab86b 1046
db6f8759
TH
1047static u64 ata_id_n_sectors(const u16 *id)
1048{
1049 if (ata_id_has_lba(id)) {
1050 if (ata_id_has_lba48(id))
1051 return ata_id_u64(id, 100);
1052 else
1053 return ata_id_u32(id, 60);
1054 } else {
1055 if (ata_id_current_chs_valid(id))
1056 return ata_id_u32(id, 57);
1057 else
1058 return id[1] * id[3] * id[6];
1059 }
1060}
1061
1e999736
AC
1062static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1063{
1064 u64 sectors = 0;
1065
1066 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1067 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1068 sectors |= (tf->hob_lbal & 0xff) << 24;
1069 sectors |= (tf->lbah & 0xff) << 16;
1070 sectors |= (tf->lbam & 0xff) << 8;
1071 sectors |= (tf->lbal & 0xff);
1072
1073 return ++sectors;
1074}
1075
1076static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1077{
1078 u64 sectors = 0;
1079
1080 sectors |= (tf->device & 0x0f) << 24;
1081 sectors |= (tf->lbah & 0xff) << 16;
1082 sectors |= (tf->lbam & 0xff) << 8;
1083 sectors |= (tf->lbal & 0xff);
1084
1085 return ++sectors;
1086}
1087
1088/**
c728a914
TH
1089 * ata_read_native_max_address - Read native max address
1090 * @dev: target device
1091 * @max_sectors: out parameter for the result native max address
1e999736 1092 *
c728a914
TH
1093 * Perform an LBA48 or LBA28 native size query upon the device in
1094 * question.
1e999736 1095 *
c728a914
TH
1096 * RETURNS:
1097 * 0 on success, -EACCES if command is aborted by the drive.
1098 * -EIO on other errors.
1e999736 1099 */
c728a914 1100static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1101{
c728a914 1102 unsigned int err_mask;
1e999736 1103 struct ata_taskfile tf;
c728a914 1104 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1105
1106 ata_tf_init(dev, &tf);
1107
c728a914 1108 /* always clear all address registers */
1e999736 1109 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1110
c728a914
TH
1111 if (lba48) {
1112 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1113 tf.flags |= ATA_TFLAG_LBA48;
1114 } else
1115 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1116
1e999736 1117 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1118 tf.device |= ATA_LBA;
1119
2b789108 1120 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1121 if (err_mask) {
1122 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1123 "max address (err_mask=0x%x)\n", err_mask);
1124 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1125 return -EACCES;
1126 return -EIO;
1127 }
1e999736 1128
c728a914
TH
1129 if (lba48)
1130 *max_sectors = ata_tf_to_lba48(&tf);
1131 else
1132 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 1133 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1134 (*max_sectors)--;
c728a914 1135 return 0;
1e999736
AC
1136}
1137
1138/**
c728a914
TH
1139 * ata_set_max_sectors - Set max sectors
1140 * @dev: target device
6b38d1d1 1141 * @new_sectors: new max sectors value to set for the device
1e999736 1142 *
c728a914
TH
1143 * Set max sectors of @dev to @new_sectors.
1144 *
1145 * RETURNS:
1146 * 0 on success, -EACCES if command is aborted or denied (due to
1147 * previous non-volatile SET_MAX) by the drive. -EIO on other
1148 * errors.
1e999736 1149 */
05027adc 1150static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1151{
c728a914 1152 unsigned int err_mask;
1e999736 1153 struct ata_taskfile tf;
c728a914 1154 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1155
1156 new_sectors--;
1157
1158 ata_tf_init(dev, &tf);
1159
1e999736 1160 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1161
1162 if (lba48) {
1163 tf.command = ATA_CMD_SET_MAX_EXT;
1164 tf.flags |= ATA_TFLAG_LBA48;
1165
1166 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1167 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1168 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1169 } else {
c728a914
TH
1170 tf.command = ATA_CMD_SET_MAX;
1171
1e582ba4
TH
1172 tf.device |= (new_sectors >> 24) & 0xf;
1173 }
1174
1e999736 1175 tf.protocol |= ATA_PROT_NODATA;
c728a914 1176 tf.device |= ATA_LBA;
1e999736
AC
1177
1178 tf.lbal = (new_sectors >> 0) & 0xff;
1179 tf.lbam = (new_sectors >> 8) & 0xff;
1180 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1181
2b789108 1182 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1183 if (err_mask) {
1184 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1185 "max address (err_mask=0x%x)\n", err_mask);
1186 if (err_mask == AC_ERR_DEV &&
1187 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1188 return -EACCES;
1189 return -EIO;
1190 }
1191
c728a914 1192 return 0;
1e999736
AC
1193}
1194
1195/**
1196 * ata_hpa_resize - Resize a device with an HPA set
1197 * @dev: Device to resize
1198 *
1199 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1200 * it if required to the full size of the media. The caller must check
1201 * the drive has the HPA feature set enabled.
05027adc
TH
1202 *
1203 * RETURNS:
1204 * 0 on success, -errno on failure.
1e999736 1205 */
05027adc 1206static int ata_hpa_resize(struct ata_device *dev)
1e999736 1207{
05027adc
TH
1208 struct ata_eh_context *ehc = &dev->link->eh_context;
1209 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1210 u64 sectors = ata_id_n_sectors(dev->id);
1211 u64 native_sectors;
c728a914 1212 int rc;
a617c09f 1213
05027adc
TH
1214 /* do we need to do it? */
1215 if (dev->class != ATA_DEV_ATA ||
1216 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1217 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1218 return 0;
1e999736 1219
05027adc
TH
1220 /* read native max address */
1221 rc = ata_read_native_max_address(dev, &native_sectors);
1222 if (rc) {
1223 /* If HPA isn't going to be unlocked, skip HPA
1224 * resizing from the next try.
1225 */
1226 if (!ata_ignore_hpa) {
1227 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1228 "broken, will skip HPA handling\n");
1229 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1230
1231 /* we can continue if device aborted the command */
1232 if (rc == -EACCES)
1233 rc = 0;
1e999736 1234 }
37301a55 1235
05027adc
TH
1236 return rc;
1237 }
1238
1239 /* nothing to do? */
1240 if (native_sectors <= sectors || !ata_ignore_hpa) {
1241 if (!print_info || native_sectors == sectors)
1242 return 0;
1243
1244 if (native_sectors > sectors)
1245 ata_dev_printk(dev, KERN_INFO,
1246 "HPA detected: current %llu, native %llu\n",
1247 (unsigned long long)sectors,
1248 (unsigned long long)native_sectors);
1249 else if (native_sectors < sectors)
1250 ata_dev_printk(dev, KERN_WARNING,
1251 "native sectors (%llu) is smaller than "
1252 "sectors (%llu)\n",
1253 (unsigned long long)native_sectors,
1254 (unsigned long long)sectors);
1255 return 0;
1256 }
1257
1258 /* let's unlock HPA */
1259 rc = ata_set_max_sectors(dev, native_sectors);
1260 if (rc == -EACCES) {
1261 /* if device aborted the command, skip HPA resizing */
1262 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1263 "(%llu -> %llu), skipping HPA handling\n",
1264 (unsigned long long)sectors,
1265 (unsigned long long)native_sectors);
1266 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1267 return 0;
1268 } else if (rc)
1269 return rc;
1270
1271 /* re-read IDENTIFY data */
1272 rc = ata_dev_reread_id(dev, 0);
1273 if (rc) {
1274 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1275 "data after HPA resizing\n");
1276 return rc;
1277 }
1278
1279 if (print_info) {
1280 u64 new_sectors = ata_id_n_sectors(dev->id);
1281 ata_dev_printk(dev, KERN_INFO,
1282 "HPA unlocked: %llu -> %llu, native %llu\n",
1283 (unsigned long long)sectors,
1284 (unsigned long long)new_sectors,
1285 (unsigned long long)native_sectors);
1286 }
1287
1288 return 0;
1e999736
AC
1289}
1290
0baab86b
EF
1291/**
1292 * ata_noop_dev_select - Select device 0/1 on ATA bus
1293 * @ap: ATA channel to manipulate
1294 * @device: ATA device (numbered from zero) to select
1295 *
1296 * This function performs no actual function.
1297 *
1298 * May be used as the dev_select() entry in ata_port_operations.
1299 *
1300 * LOCKING:
1301 * caller.
1302 */
2dcb407e 1303void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1304{
1305}
1306
0baab86b 1307
1da177e4
LT
1308/**
1309 * ata_std_dev_select - Select device 0/1 on ATA bus
1310 * @ap: ATA channel to manipulate
1311 * @device: ATA device (numbered from zero) to select
1312 *
1313 * Use the method defined in the ATA specification to
1314 * make either device 0, or device 1, active on the
0baab86b
EF
1315 * ATA channel. Works with both PIO and MMIO.
1316 *
1317 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1318 *
1319 * LOCKING:
1320 * caller.
1321 */
1322
2dcb407e 1323void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1324{
1325 u8 tmp;
1326
1327 if (device == 0)
1328 tmp = ATA_DEVICE_OBS;
1329 else
1330 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1331
0d5ff566 1332 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1333 ata_pause(ap); /* needed; also flushes, for mmio */
1334}
1335
1336/**
1337 * ata_dev_select - Select device 0/1 on ATA bus
1338 * @ap: ATA channel to manipulate
1339 * @device: ATA device (numbered from zero) to select
1340 * @wait: non-zero to wait for Status register BSY bit to clear
1341 * @can_sleep: non-zero if context allows sleeping
1342 *
1343 * Use the method defined in the ATA specification to
1344 * make either device 0, or device 1, active on the
1345 * ATA channel.
1346 *
1347 * This is a high-level version of ata_std_dev_select(),
1348 * which additionally provides the services of inserting
1349 * the proper pauses and status polling, where needed.
1350 *
1351 * LOCKING:
1352 * caller.
1353 */
1354
1355void ata_dev_select(struct ata_port *ap, unsigned int device,
1356 unsigned int wait, unsigned int can_sleep)
1357{
88574551 1358 if (ata_msg_probe(ap))
44877b4e
TH
1359 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1360 "device %u, wait %u\n", device, wait);
1da177e4
LT
1361
1362 if (wait)
1363 ata_wait_idle(ap);
1364
1365 ap->ops->dev_select(ap, device);
1366
1367 if (wait) {
9af5c9c9 1368 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1369 msleep(150);
1370 ata_wait_idle(ap);
1371 }
1372}
1373
1374/**
1375 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1376 * @id: IDENTIFY DEVICE page to dump
1da177e4 1377 *
0bd3300a
TH
1378 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1379 * page.
1da177e4
LT
1380 *
1381 * LOCKING:
1382 * caller.
1383 */
1384
0bd3300a 1385static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1386{
1387 DPRINTK("49==0x%04x "
1388 "53==0x%04x "
1389 "63==0x%04x "
1390 "64==0x%04x "
1391 "75==0x%04x \n",
0bd3300a
TH
1392 id[49],
1393 id[53],
1394 id[63],
1395 id[64],
1396 id[75]);
1da177e4
LT
1397 DPRINTK("80==0x%04x "
1398 "81==0x%04x "
1399 "82==0x%04x "
1400 "83==0x%04x "
1401 "84==0x%04x \n",
0bd3300a
TH
1402 id[80],
1403 id[81],
1404 id[82],
1405 id[83],
1406 id[84]);
1da177e4
LT
1407 DPRINTK("88==0x%04x "
1408 "93==0x%04x\n",
0bd3300a
TH
1409 id[88],
1410 id[93]);
1da177e4
LT
1411}
1412
cb95d562
TH
1413/**
1414 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1415 * @id: IDENTIFY data to compute xfer mask from
1416 *
1417 * Compute the xfermask for this device. This is not as trivial
1418 * as it seems if we must consider early devices correctly.
1419 *
1420 * FIXME: pre IDE drive timing (do we care ?).
1421 *
1422 * LOCKING:
1423 * None.
1424 *
1425 * RETURNS:
1426 * Computed xfermask
1427 */
7dc951ae 1428unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1429{
7dc951ae 1430 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1431
1432 /* Usual case. Word 53 indicates word 64 is valid */
1433 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1434 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1435 pio_mask <<= 3;
1436 pio_mask |= 0x7;
1437 } else {
1438 /* If word 64 isn't valid then Word 51 high byte holds
1439 * the PIO timing number for the maximum. Turn it into
1440 * a mask.
1441 */
7a0f1c8a 1442 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1443 if (mode < 5) /* Valid PIO range */
2dcb407e 1444 pio_mask = (2 << mode) - 1;
46767aeb
AC
1445 else
1446 pio_mask = 1;
cb95d562
TH
1447
1448 /* But wait.. there's more. Design your standards by
1449 * committee and you too can get a free iordy field to
1450 * process. However its the speeds not the modes that
1451 * are supported... Note drivers using the timing API
1452 * will get this right anyway
1453 */
1454 }
1455
1456 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1457
b352e57d
AC
1458 if (ata_id_is_cfa(id)) {
1459 /*
1460 * Process compact flash extended modes
1461 */
1462 int pio = id[163] & 0x7;
1463 int dma = (id[163] >> 3) & 7;
1464
1465 if (pio)
1466 pio_mask |= (1 << 5);
1467 if (pio > 1)
1468 pio_mask |= (1 << 6);
1469 if (dma)
1470 mwdma_mask |= (1 << 3);
1471 if (dma > 1)
1472 mwdma_mask |= (1 << 4);
1473 }
1474
fb21f0d0
TH
1475 udma_mask = 0;
1476 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1477 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1478
1479 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1480}
1481
86e45b6b 1482/**
442eacc3 1483 * ata_pio_queue_task - Queue port_task
86e45b6b 1484 * @ap: The ata_port to queue port_task for
e2a7f77a 1485 * @fn: workqueue function to be scheduled
65f27f38 1486 * @data: data for @fn to use
e2a7f77a 1487 * @delay: delay time for workqueue function
86e45b6b
TH
1488 *
1489 * Schedule @fn(@data) for execution after @delay jiffies using
1490 * port_task. There is one port_task per port and it's the
1491 * user(low level driver)'s responsibility to make sure that only
1492 * one task is active at any given time.
1493 *
1494 * libata core layer takes care of synchronization between
442eacc3 1495 * port_task and EH. ata_pio_queue_task() may be ignored for EH
86e45b6b
TH
1496 * synchronization.
1497 *
1498 * LOCKING:
1499 * Inherited from caller.
1500 */
442eacc3
JG
1501static void ata_pio_queue_task(struct ata_port *ap, void *data,
1502 unsigned long delay)
86e45b6b 1503{
65f27f38 1504 ap->port_task_data = data;
86e45b6b 1505
45a66c1c
ON
1506 /* may fail if ata_port_flush_task() in progress */
1507 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1508}
1509
1510/**
1511 * ata_port_flush_task - Flush port_task
1512 * @ap: The ata_port to flush port_task for
1513 *
1514 * After this function completes, port_task is guranteed not to
1515 * be running or scheduled.
1516 *
1517 * LOCKING:
1518 * Kernel thread context (may sleep)
1519 */
1520void ata_port_flush_task(struct ata_port *ap)
1521{
86e45b6b
TH
1522 DPRINTK("ENTER\n");
1523
45a66c1c 1524 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1525
0dd4b21f
BP
1526 if (ata_msg_ctl(ap))
1527 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1528}
1529
7102d230 1530static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1531{
77853bf2 1532 struct completion *waiting = qc->private_data;
a2a7a662 1533
a2a7a662 1534 complete(waiting);
a2a7a662
TH
1535}
1536
1537/**
2432697b 1538 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1539 * @dev: Device to which the command is sent
1540 * @tf: Taskfile registers for the command and the result
d69cf37d 1541 * @cdb: CDB for packet command
a2a7a662 1542 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1543 * @sgl: sg list for the data buffer of the command
2432697b 1544 * @n_elem: Number of sg entries
2b789108 1545 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1546 *
1547 * Executes libata internal command with timeout. @tf contains
1548 * command on entry and result on return. Timeout and error
1549 * conditions are reported via return value. No recovery action
1550 * is taken after a command times out. It's caller's duty to
1551 * clean up after timeout.
1552 *
1553 * LOCKING:
1554 * None. Should be called with kernel context, might sleep.
551e8889
TH
1555 *
1556 * RETURNS:
1557 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1558 */
2432697b
TH
1559unsigned ata_exec_internal_sg(struct ata_device *dev,
1560 struct ata_taskfile *tf, const u8 *cdb,
87260216 1561 int dma_dir, struct scatterlist *sgl,
2b789108 1562 unsigned int n_elem, unsigned long timeout)
a2a7a662 1563{
9af5c9c9
TH
1564 struct ata_link *link = dev->link;
1565 struct ata_port *ap = link->ap;
a2a7a662
TH
1566 u8 command = tf->command;
1567 struct ata_queued_cmd *qc;
2ab7db1f 1568 unsigned int tag, preempted_tag;
dedaf2b0 1569 u32 preempted_sactive, preempted_qc_active;
da917d69 1570 int preempted_nr_active_links;
60be6b9a 1571 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1572 unsigned long flags;
77853bf2 1573 unsigned int err_mask;
d95a717f 1574 int rc;
a2a7a662 1575
ba6a1308 1576 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1577
e3180499 1578 /* no internal command while frozen */
b51e9e5d 1579 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1580 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1581 return AC_ERR_SYSTEM;
1582 }
1583
2ab7db1f 1584 /* initialize internal qc */
a2a7a662 1585
2ab7db1f
TH
1586 /* XXX: Tag 0 is used for drivers with legacy EH as some
1587 * drivers choke if any other tag is given. This breaks
1588 * ata_tag_internal() test for those drivers. Don't use new
1589 * EH stuff without converting to it.
1590 */
1591 if (ap->ops->error_handler)
1592 tag = ATA_TAG_INTERNAL;
1593 else
1594 tag = 0;
1595
6cec4a39 1596 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1597 BUG();
f69499f4 1598 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1599
1600 qc->tag = tag;
1601 qc->scsicmd = NULL;
1602 qc->ap = ap;
1603 qc->dev = dev;
1604 ata_qc_reinit(qc);
1605
9af5c9c9
TH
1606 preempted_tag = link->active_tag;
1607 preempted_sactive = link->sactive;
dedaf2b0 1608 preempted_qc_active = ap->qc_active;
da917d69 1609 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1610 link->active_tag = ATA_TAG_POISON;
1611 link->sactive = 0;
dedaf2b0 1612 ap->qc_active = 0;
da917d69 1613 ap->nr_active_links = 0;
2ab7db1f
TH
1614
1615 /* prepare & issue qc */
a2a7a662 1616 qc->tf = *tf;
d69cf37d
TH
1617 if (cdb)
1618 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1619 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1620 qc->dma_dir = dma_dir;
1621 if (dma_dir != DMA_NONE) {
2432697b 1622 unsigned int i, buflen = 0;
87260216 1623 struct scatterlist *sg;
2432697b 1624
87260216
JA
1625 for_each_sg(sgl, sg, n_elem, i)
1626 buflen += sg->length;
2432697b 1627
87260216 1628 ata_sg_init(qc, sgl, n_elem);
49c80429 1629 qc->nbytes = buflen;
a2a7a662
TH
1630 }
1631
77853bf2 1632 qc->private_data = &wait;
a2a7a662
TH
1633 qc->complete_fn = ata_qc_complete_internal;
1634
8e0e694a 1635 ata_qc_issue(qc);
a2a7a662 1636
ba6a1308 1637 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1638
2b789108
TH
1639 if (!timeout)
1640 timeout = ata_probe_timeout * 1000 / HZ;
1641
1642 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1643
1644 ata_port_flush_task(ap);
41ade50c 1645
d95a717f 1646 if (!rc) {
ba6a1308 1647 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1648
1649 /* We're racing with irq here. If we lose, the
1650 * following test prevents us from completing the qc
d95a717f
TH
1651 * twice. If we win, the port is frozen and will be
1652 * cleaned up by ->post_internal_cmd().
a2a7a662 1653 */
77853bf2 1654 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1655 qc->err_mask |= AC_ERR_TIMEOUT;
1656
1657 if (ap->ops->error_handler)
1658 ata_port_freeze(ap);
1659 else
1660 ata_qc_complete(qc);
f15a1daf 1661
0dd4b21f
BP
1662 if (ata_msg_warn(ap))
1663 ata_dev_printk(dev, KERN_WARNING,
88574551 1664 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1665 }
1666
ba6a1308 1667 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1668 }
1669
d95a717f
TH
1670 /* do post_internal_cmd */
1671 if (ap->ops->post_internal_cmd)
1672 ap->ops->post_internal_cmd(qc);
1673
a51d644a
TH
1674 /* perform minimal error analysis */
1675 if (qc->flags & ATA_QCFLAG_FAILED) {
1676 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1677 qc->err_mask |= AC_ERR_DEV;
1678
1679 if (!qc->err_mask)
1680 qc->err_mask |= AC_ERR_OTHER;
1681
1682 if (qc->err_mask & ~AC_ERR_OTHER)
1683 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1684 }
1685
15869303 1686 /* finish up */
ba6a1308 1687 spin_lock_irqsave(ap->lock, flags);
15869303 1688
e61e0672 1689 *tf = qc->result_tf;
77853bf2
TH
1690 err_mask = qc->err_mask;
1691
1692 ata_qc_free(qc);
9af5c9c9
TH
1693 link->active_tag = preempted_tag;
1694 link->sactive = preempted_sactive;
dedaf2b0 1695 ap->qc_active = preempted_qc_active;
da917d69 1696 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1697
1f7dd3e9
TH
1698 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1699 * Until those drivers are fixed, we detect the condition
1700 * here, fail the command with AC_ERR_SYSTEM and reenable the
1701 * port.
1702 *
1703 * Note that this doesn't change any behavior as internal
1704 * command failure results in disabling the device in the
1705 * higher layer for LLDDs without new reset/EH callbacks.
1706 *
1707 * Kill the following code as soon as those drivers are fixed.
1708 */
198e0fed 1709 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1710 err_mask |= AC_ERR_SYSTEM;
1711 ata_port_probe(ap);
1712 }
1713
ba6a1308 1714 spin_unlock_irqrestore(ap->lock, flags);
15869303 1715
77853bf2 1716 return err_mask;
a2a7a662
TH
1717}
1718
2432697b 1719/**
33480a0e 1720 * ata_exec_internal - execute libata internal command
2432697b
TH
1721 * @dev: Device to which the command is sent
1722 * @tf: Taskfile registers for the command and the result
1723 * @cdb: CDB for packet command
1724 * @dma_dir: Data tranfer direction of the command
1725 * @buf: Data buffer of the command
1726 * @buflen: Length of data buffer
2b789108 1727 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1728 *
1729 * Wrapper around ata_exec_internal_sg() which takes simple
1730 * buffer instead of sg list.
1731 *
1732 * LOCKING:
1733 * None. Should be called with kernel context, might sleep.
1734 *
1735 * RETURNS:
1736 * Zero on success, AC_ERR_* mask on failure
1737 */
1738unsigned ata_exec_internal(struct ata_device *dev,
1739 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1740 int dma_dir, void *buf, unsigned int buflen,
1741 unsigned long timeout)
2432697b 1742{
33480a0e
TH
1743 struct scatterlist *psg = NULL, sg;
1744 unsigned int n_elem = 0;
2432697b 1745
33480a0e
TH
1746 if (dma_dir != DMA_NONE) {
1747 WARN_ON(!buf);
1748 sg_init_one(&sg, buf, buflen);
1749 psg = &sg;
1750 n_elem++;
1751 }
2432697b 1752
2b789108
TH
1753 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1754 timeout);
2432697b
TH
1755}
1756
977e6b9f
TH
1757/**
1758 * ata_do_simple_cmd - execute simple internal command
1759 * @dev: Device to which the command is sent
1760 * @cmd: Opcode to execute
1761 *
1762 * Execute a 'simple' command, that only consists of the opcode
1763 * 'cmd' itself, without filling any other registers
1764 *
1765 * LOCKING:
1766 * Kernel thread context (may sleep).
1767 *
1768 * RETURNS:
1769 * Zero on success, AC_ERR_* mask on failure
e58eb583 1770 */
77b08fb5 1771unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1772{
1773 struct ata_taskfile tf;
e58eb583
TH
1774
1775 ata_tf_init(dev, &tf);
1776
1777 tf.command = cmd;
1778 tf.flags |= ATA_TFLAG_DEVICE;
1779 tf.protocol = ATA_PROT_NODATA;
1780
2b789108 1781 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1782}
1783
1bc4ccff
AC
1784/**
1785 * ata_pio_need_iordy - check if iordy needed
1786 * @adev: ATA device
1787 *
1788 * Check if the current speed of the device requires IORDY. Used
1789 * by various controllers for chip configuration.
1790 */
a617c09f 1791
1bc4ccff
AC
1792unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1793{
432729f0
AC
1794 /* Controller doesn't support IORDY. Probably a pointless check
1795 as the caller should know this */
9af5c9c9 1796 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1797 return 0;
432729f0
AC
1798 /* PIO3 and higher it is mandatory */
1799 if (adev->pio_mode > XFER_PIO_2)
1800 return 1;
1801 /* We turn it on when possible */
1802 if (ata_id_has_iordy(adev->id))
1bc4ccff 1803 return 1;
432729f0
AC
1804 return 0;
1805}
2e9edbf8 1806
432729f0
AC
1807/**
1808 * ata_pio_mask_no_iordy - Return the non IORDY mask
1809 * @adev: ATA device
1810 *
1811 * Compute the highest mode possible if we are not using iordy. Return
1812 * -1 if no iordy mode is available.
1813 */
a617c09f 1814
432729f0
AC
1815static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1816{
1bc4ccff 1817 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1818 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1819 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1820 /* Is the speed faster than the drive allows non IORDY ? */
1821 if (pio) {
1822 /* This is cycle times not frequency - watch the logic! */
1823 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1824 return 3 << ATA_SHIFT_PIO;
1825 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1826 }
1827 }
432729f0 1828 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1829}
1830
1da177e4 1831/**
49016aca 1832 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1833 * @dev: target device
1834 * @p_class: pointer to class of the target device (may be changed)
bff04647 1835 * @flags: ATA_READID_* flags
fe635c7e 1836 * @id: buffer to read IDENTIFY data into
1da177e4 1837 *
49016aca
TH
1838 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1839 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1840 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1841 * for pre-ATA4 drives.
1da177e4 1842 *
50a99018 1843 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1844 * now we abort if we hit that case.
50a99018 1845 *
1da177e4 1846 * LOCKING:
49016aca
TH
1847 * Kernel thread context (may sleep)
1848 *
1849 * RETURNS:
1850 * 0 on success, -errno otherwise.
1da177e4 1851 */
a9beec95 1852int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1853 unsigned int flags, u16 *id)
1da177e4 1854{
9af5c9c9 1855 struct ata_port *ap = dev->link->ap;
49016aca 1856 unsigned int class = *p_class;
a0123703 1857 struct ata_taskfile tf;
49016aca
TH
1858 unsigned int err_mask = 0;
1859 const char *reason;
54936f8b 1860 int may_fallback = 1, tried_spinup = 0;
49016aca 1861 int rc;
1da177e4 1862
0dd4b21f 1863 if (ata_msg_ctl(ap))
44877b4e 1864 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1865
49016aca 1866 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1867 retry:
3373efd8 1868 ata_tf_init(dev, &tf);
a0123703 1869
49016aca
TH
1870 switch (class) {
1871 case ATA_DEV_ATA:
a0123703 1872 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1873 break;
1874 case ATA_DEV_ATAPI:
a0123703 1875 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1876 break;
1877 default:
1878 rc = -ENODEV;
1879 reason = "unsupported class";
1880 goto err_out;
1da177e4
LT
1881 }
1882
a0123703 1883 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1884
1885 /* Some devices choke if TF registers contain garbage. Make
1886 * sure those are properly initialized.
1887 */
1888 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1889
1890 /* Device presence detection is unreliable on some
1891 * controllers. Always poll IDENTIFY if available.
1892 */
1893 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1894
3373efd8 1895 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1896 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 1897 if (err_mask) {
800b3996 1898 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1899 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1900 ap->print_id, dev->devno);
55a8e2c8
TH
1901 return -ENOENT;
1902 }
1903
54936f8b
TH
1904 /* Device or controller might have reported the wrong
1905 * device class. Give a shot at the other IDENTIFY if
1906 * the current one is aborted by the device.
1907 */
1908 if (may_fallback &&
1909 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1910 may_fallback = 0;
1911
1912 if (class == ATA_DEV_ATA)
1913 class = ATA_DEV_ATAPI;
1914 else
1915 class = ATA_DEV_ATA;
1916 goto retry;
1917 }
1918
49016aca
TH
1919 rc = -EIO;
1920 reason = "I/O error";
1da177e4
LT
1921 goto err_out;
1922 }
1923
54936f8b
TH
1924 /* Falling back doesn't make sense if ID data was read
1925 * successfully at least once.
1926 */
1927 may_fallback = 0;
1928
49016aca 1929 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1930
49016aca 1931 /* sanity check */
a4f5749b 1932 rc = -EINVAL;
6070068b 1933 reason = "device reports invalid type";
a4f5749b
TH
1934
1935 if (class == ATA_DEV_ATA) {
1936 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1937 goto err_out;
1938 } else {
1939 if (ata_id_is_ata(id))
1940 goto err_out;
49016aca
TH
1941 }
1942
169439c2
ML
1943 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1944 tried_spinup = 1;
1945 /*
1946 * Drive powered-up in standby mode, and requires a specific
1947 * SET_FEATURES spin-up subcommand before it will accept
1948 * anything other than the original IDENTIFY command.
1949 */
218f3d30 1950 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 1951 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1952 rc = -EIO;
1953 reason = "SPINUP failed";
1954 goto err_out;
1955 }
1956 /*
1957 * If the drive initially returned incomplete IDENTIFY info,
1958 * we now must reissue the IDENTIFY command.
1959 */
1960 if (id[2] == 0x37c8)
1961 goto retry;
1962 }
1963
bff04647 1964 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1965 /*
1966 * The exact sequence expected by certain pre-ATA4 drives is:
1967 * SRST RESET
50a99018
AC
1968 * IDENTIFY (optional in early ATA)
1969 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1970 * anything else..
1971 * Some drives were very specific about that exact sequence.
50a99018
AC
1972 *
1973 * Note that ATA4 says lba is mandatory so the second check
1974 * shoud never trigger.
49016aca
TH
1975 */
1976 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1977 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1978 if (err_mask) {
1979 rc = -EIO;
1980 reason = "INIT_DEV_PARAMS failed";
1981 goto err_out;
1982 }
1983
1984 /* current CHS translation info (id[53-58]) might be
1985 * changed. reread the identify device info.
1986 */
bff04647 1987 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1988 goto retry;
1989 }
1990 }
1991
1992 *p_class = class;
fe635c7e 1993
49016aca
TH
1994 return 0;
1995
1996 err_out:
88574551 1997 if (ata_msg_warn(ap))
0dd4b21f 1998 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1999 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2000 return rc;
2001}
2002
3373efd8 2003static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2004{
9af5c9c9
TH
2005 struct ata_port *ap = dev->link->ap;
2006 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2007}
2008
a6e6ce8e
TH
2009static void ata_dev_config_ncq(struct ata_device *dev,
2010 char *desc, size_t desc_sz)
2011{
9af5c9c9 2012 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2013 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2014
2015 if (!ata_id_has_ncq(dev->id)) {
2016 desc[0] = '\0';
2017 return;
2018 }
75683fe7 2019 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2020 snprintf(desc, desc_sz, "NCQ (not used)");
2021 return;
2022 }
a6e6ce8e 2023 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2024 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2025 dev->flags |= ATA_DFLAG_NCQ;
2026 }
2027
2028 if (hdepth >= ddepth)
2029 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2030 else
2031 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2032}
2033
49016aca 2034/**
ffeae418 2035 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2036 * @dev: Target device to configure
2037 *
2038 * Configure @dev according to @dev->id. Generic and low-level
2039 * driver specific fixups are also applied.
49016aca
TH
2040 *
2041 * LOCKING:
ffeae418
TH
2042 * Kernel thread context (may sleep)
2043 *
2044 * RETURNS:
2045 * 0 on success, -errno otherwise
49016aca 2046 */
efdaedc4 2047int ata_dev_configure(struct ata_device *dev)
49016aca 2048{
9af5c9c9
TH
2049 struct ata_port *ap = dev->link->ap;
2050 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2051 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2052 const u16 *id = dev->id;
7dc951ae 2053 unsigned long xfer_mask;
b352e57d 2054 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2055 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2056 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2057 int rc;
49016aca 2058
0dd4b21f 2059 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
2060 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2061 __FUNCTION__);
ffeae418 2062 return 0;
49016aca
TH
2063 }
2064
0dd4b21f 2065 if (ata_msg_probe(ap))
44877b4e 2066 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 2067
75683fe7
TH
2068 /* set horkage */
2069 dev->horkage |= ata_dev_blacklisted(dev);
2070
6746544c
TH
2071 /* let ACPI work its magic */
2072 rc = ata_acpi_on_devcfg(dev);
2073 if (rc)
2074 return rc;
08573a86 2075
05027adc
TH
2076 /* massage HPA, do it early as it might change IDENTIFY data */
2077 rc = ata_hpa_resize(dev);
2078 if (rc)
2079 return rc;
2080
c39f5ebe 2081 /* print device capabilities */
0dd4b21f 2082 if (ata_msg_probe(ap))
88574551
TH
2083 ata_dev_printk(dev, KERN_DEBUG,
2084 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2085 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 2086 __FUNCTION__,
f15a1daf
TH
2087 id[49], id[82], id[83], id[84],
2088 id[85], id[86], id[87], id[88]);
c39f5ebe 2089
208a9933 2090 /* initialize to-be-configured parameters */
ea1dd4e1 2091 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2092 dev->max_sectors = 0;
2093 dev->cdb_len = 0;
2094 dev->n_sectors = 0;
2095 dev->cylinders = 0;
2096 dev->heads = 0;
2097 dev->sectors = 0;
2098
1da177e4
LT
2099 /*
2100 * common ATA, ATAPI feature tests
2101 */
2102
ff8854b2 2103 /* find max transfer mode; for printk only */
1148c3a7 2104 xfer_mask = ata_id_xfermask(id);
1da177e4 2105
0dd4b21f
BP
2106 if (ata_msg_probe(ap))
2107 ata_dump_id(id);
1da177e4 2108
ef143d57
AL
2109 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2110 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2111 sizeof(fwrevbuf));
2112
2113 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2114 sizeof(modelbuf));
2115
1da177e4
LT
2116 /* ATA-specific feature tests */
2117 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2118 if (ata_id_is_cfa(id)) {
2119 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2120 ata_dev_printk(dev, KERN_WARNING,
2121 "supports DRM functions and may "
2122 "not be fully accessable.\n");
b352e57d 2123 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2124 } else {
2dcb407e 2125 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2126 /* Warn the user if the device has TPM extensions */
2127 if (ata_id_has_tpm(id))
2128 ata_dev_printk(dev, KERN_WARNING,
2129 "supports DRM functions and may "
2130 "not be fully accessable.\n");
2131 }
b352e57d 2132
1148c3a7 2133 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2134
3f64f565
EM
2135 if (dev->id[59] & 0x100)
2136 dev->multi_count = dev->id[59] & 0xff;
2137
1148c3a7 2138 if (ata_id_has_lba(id)) {
4c2d721a 2139 const char *lba_desc;
a6e6ce8e 2140 char ncq_desc[20];
8bf62ece 2141
4c2d721a
TH
2142 lba_desc = "LBA";
2143 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2144 if (ata_id_has_lba48(id)) {
8bf62ece 2145 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2146 lba_desc = "LBA48";
6fc49adb
TH
2147
2148 if (dev->n_sectors >= (1UL << 28) &&
2149 ata_id_has_flush_ext(id))
2150 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2151 }
8bf62ece 2152
a6e6ce8e
TH
2153 /* config NCQ */
2154 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2155
8bf62ece 2156 /* print device info to dmesg */
3f64f565
EM
2157 if (ata_msg_drv(ap) && print_info) {
2158 ata_dev_printk(dev, KERN_INFO,
2159 "%s: %s, %s, max %s\n",
2160 revbuf, modelbuf, fwrevbuf,
2161 ata_mode_string(xfer_mask));
2162 ata_dev_printk(dev, KERN_INFO,
2163 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2164 (unsigned long long)dev->n_sectors,
3f64f565
EM
2165 dev->multi_count, lba_desc, ncq_desc);
2166 }
ffeae418 2167 } else {
8bf62ece
AL
2168 /* CHS */
2169
2170 /* Default translation */
1148c3a7
TH
2171 dev->cylinders = id[1];
2172 dev->heads = id[3];
2173 dev->sectors = id[6];
8bf62ece 2174
1148c3a7 2175 if (ata_id_current_chs_valid(id)) {
8bf62ece 2176 /* Current CHS translation is valid. */
1148c3a7
TH
2177 dev->cylinders = id[54];
2178 dev->heads = id[55];
2179 dev->sectors = id[56];
8bf62ece
AL
2180 }
2181
2182 /* print device info to dmesg */
3f64f565 2183 if (ata_msg_drv(ap) && print_info) {
88574551 2184 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2185 "%s: %s, %s, max %s\n",
2186 revbuf, modelbuf, fwrevbuf,
2187 ata_mode_string(xfer_mask));
a84471fe 2188 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2189 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2190 (unsigned long long)dev->n_sectors,
2191 dev->multi_count, dev->cylinders,
2192 dev->heads, dev->sectors);
2193 }
07f6f7d0
AL
2194 }
2195
6e7846e9 2196 dev->cdb_len = 16;
1da177e4
LT
2197 }
2198
2199 /* ATAPI-specific feature tests */
2c13b7ce 2200 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2201 const char *cdb_intr_string = "";
2202 const char *atapi_an_string = "";
7d77b247 2203 u32 sntf;
08a556db 2204
1148c3a7 2205 rc = atapi_cdb_len(id);
1da177e4 2206 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2207 if (ata_msg_warn(ap))
88574551
TH
2208 ata_dev_printk(dev, KERN_WARNING,
2209 "unsupported CDB len\n");
ffeae418 2210 rc = -EINVAL;
1da177e4
LT
2211 goto err_out_nosup;
2212 }
6e7846e9 2213 dev->cdb_len = (unsigned int) rc;
1da177e4 2214
7d77b247
TH
2215 /* Enable ATAPI AN if both the host and device have
2216 * the support. If PMP is attached, SNTF is required
2217 * to enable ATAPI AN to discern between PHY status
2218 * changed notifications and ATAPI ANs.
9f45cbd3 2219 */
7d77b247
TH
2220 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2221 (!ap->nr_pmp_links ||
2222 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2223 unsigned int err_mask;
2224
9f45cbd3 2225 /* issue SET feature command to turn this on */
218f3d30
JG
2226 err_mask = ata_dev_set_feature(dev,
2227 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2228 if (err_mask)
9f45cbd3 2229 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2230 "failed to enable ATAPI AN "
2231 "(err_mask=0x%x)\n", err_mask);
2232 else {
9f45cbd3 2233 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2234 atapi_an_string = ", ATAPI AN";
2235 }
9f45cbd3
KCA
2236 }
2237
08a556db 2238 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2239 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2240 cdb_intr_string = ", CDB intr";
2241 }
312f7da2 2242
1da177e4 2243 /* print device info to dmesg */
5afc8142 2244 if (ata_msg_drv(ap) && print_info)
ef143d57 2245 ata_dev_printk(dev, KERN_INFO,
854c73a2 2246 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2247 modelbuf, fwrevbuf,
12436c30 2248 ata_mode_string(xfer_mask),
854c73a2 2249 cdb_intr_string, atapi_an_string);
1da177e4
LT
2250 }
2251
914ed354
TH
2252 /* determine max_sectors */
2253 dev->max_sectors = ATA_MAX_SECTORS;
2254 if (dev->flags & ATA_DFLAG_LBA48)
2255 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2256
ca77329f
KCA
2257 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2258 if (ata_id_has_hipm(dev->id))
2259 dev->flags |= ATA_DFLAG_HIPM;
2260 if (ata_id_has_dipm(dev->id))
2261 dev->flags |= ATA_DFLAG_DIPM;
2262 }
2263
c5038fc0
AC
2264 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2265 200 sectors */
3373efd8 2266 if (ata_dev_knobble(dev)) {
5afc8142 2267 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2268 ata_dev_printk(dev, KERN_INFO,
2269 "applying bridge limits\n");
5a529139 2270 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2271 dev->max_sectors = ATA_MAX_SECTORS;
2272 }
2273
f8d8e579 2274 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2275 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2276 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2277 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2278 }
f8d8e579 2279
75683fe7 2280 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2281 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2282 dev->max_sectors);
18d6e9d5 2283
ca77329f
KCA
2284 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2285 dev->horkage |= ATA_HORKAGE_IPM;
2286
2287 /* reset link pm_policy for this port to no pm */
2288 ap->pm_policy = MAX_PERFORMANCE;
2289 }
2290
4b2f3ede 2291 if (ap->ops->dev_config)
cd0d3bbc 2292 ap->ops->dev_config(dev);
4b2f3ede 2293
c5038fc0
AC
2294 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2295 /* Let the user know. We don't want to disallow opens for
2296 rescue purposes, or in case the vendor is just a blithering
2297 idiot. Do this after the dev_config call as some controllers
2298 with buggy firmware may want to avoid reporting false device
2299 bugs */
2300
2301 if (print_info) {
2302 ata_dev_printk(dev, KERN_WARNING,
2303"Drive reports diagnostics failure. This may indicate a drive\n");
2304 ata_dev_printk(dev, KERN_WARNING,
2305"fault or invalid emulation. Contact drive vendor for information.\n");
2306 }
2307 }
2308
0dd4b21f
BP
2309 if (ata_msg_probe(ap))
2310 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2311 __FUNCTION__, ata_chk_status(ap));
ffeae418 2312 return 0;
1da177e4
LT
2313
2314err_out_nosup:
0dd4b21f 2315 if (ata_msg_probe(ap))
88574551
TH
2316 ata_dev_printk(dev, KERN_DEBUG,
2317 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2318 return rc;
1da177e4
LT
2319}
2320
be0d18df 2321/**
2e41e8e6 2322 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2323 * @ap: port
2324 *
2e41e8e6 2325 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2326 * detection.
2327 */
2328
2329int ata_cable_40wire(struct ata_port *ap)
2330{
2331 return ATA_CBL_PATA40;
2332}
2333
2334/**
2e41e8e6 2335 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2336 * @ap: port
2337 *
2e41e8e6 2338 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2339 * detection.
2340 */
2341
2342int ata_cable_80wire(struct ata_port *ap)
2343{
2344 return ATA_CBL_PATA80;
2345}
2346
2347/**
2348 * ata_cable_unknown - return unknown PATA cable.
2349 * @ap: port
2350 *
2351 * Helper method for drivers which have no PATA cable detection.
2352 */
2353
2354int ata_cable_unknown(struct ata_port *ap)
2355{
2356 return ATA_CBL_PATA_UNK;
2357}
2358
c88f90c3
TH
2359/**
2360 * ata_cable_ignore - return ignored PATA cable.
2361 * @ap: port
2362 *
2363 * Helper method for drivers which don't use cable type to limit
2364 * transfer mode.
2365 */
2366int ata_cable_ignore(struct ata_port *ap)
2367{
2368 return ATA_CBL_PATA_IGN;
2369}
2370
be0d18df
AC
2371/**
2372 * ata_cable_sata - return SATA cable type
2373 * @ap: port
2374 *
2375 * Helper method for drivers which have SATA cables
2376 */
2377
2378int ata_cable_sata(struct ata_port *ap)
2379{
2380 return ATA_CBL_SATA;
2381}
2382
1da177e4
LT
2383/**
2384 * ata_bus_probe - Reset and probe ATA bus
2385 * @ap: Bus to probe
2386 *
0cba632b
JG
2387 * Master ATA bus probing function. Initiates a hardware-dependent
2388 * bus reset, then attempts to identify any devices found on
2389 * the bus.
2390 *
1da177e4 2391 * LOCKING:
0cba632b 2392 * PCI/etc. bus probe sem.
1da177e4
LT
2393 *
2394 * RETURNS:
96072e69 2395 * Zero on success, negative errno otherwise.
1da177e4
LT
2396 */
2397
80289167 2398int ata_bus_probe(struct ata_port *ap)
1da177e4 2399{
28ca5c57 2400 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2401 int tries[ATA_MAX_DEVICES];
f58229f8 2402 int rc;
e82cbdb9 2403 struct ata_device *dev;
1da177e4 2404
28ca5c57 2405 ata_port_probe(ap);
c19ba8af 2406
f58229f8
TH
2407 ata_link_for_each_dev(dev, &ap->link)
2408 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2409
2410 retry:
cdeab114
TH
2411 ata_link_for_each_dev(dev, &ap->link) {
2412 /* If we issue an SRST then an ATA drive (not ATAPI)
2413 * may change configuration and be in PIO0 timing. If
2414 * we do a hard reset (or are coming from power on)
2415 * this is true for ATA or ATAPI. Until we've set a
2416 * suitable controller mode we should not touch the
2417 * bus as we may be talking too fast.
2418 */
2419 dev->pio_mode = XFER_PIO_0;
2420
2421 /* If the controller has a pio mode setup function
2422 * then use it to set the chipset to rights. Don't
2423 * touch the DMA setup as that will be dealt with when
2424 * configuring devices.
2425 */
2426 if (ap->ops->set_piomode)
2427 ap->ops->set_piomode(ap, dev);
2428 }
2429
2044470c 2430 /* reset and determine device classes */
52783c5d 2431 ap->ops->phy_reset(ap);
2061a47a 2432
f58229f8 2433 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2434 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2435 dev->class != ATA_DEV_UNKNOWN)
2436 classes[dev->devno] = dev->class;
2437 else
2438 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2439
52783c5d 2440 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2441 }
1da177e4 2442
52783c5d 2443 ata_port_probe(ap);
2044470c 2444
f31f0cc2
JG
2445 /* read IDENTIFY page and configure devices. We have to do the identify
2446 specific sequence bass-ackwards so that PDIAG- is released by
2447 the slave device */
2448
f58229f8
TH
2449 ata_link_for_each_dev(dev, &ap->link) {
2450 if (tries[dev->devno])
2451 dev->class = classes[dev->devno];
ffeae418 2452
14d2bac1 2453 if (!ata_dev_enabled(dev))
ffeae418 2454 continue;
ffeae418 2455
bff04647
TH
2456 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2457 dev->id);
14d2bac1
TH
2458 if (rc)
2459 goto fail;
f31f0cc2
JG
2460 }
2461
be0d18df
AC
2462 /* Now ask for the cable type as PDIAG- should have been released */
2463 if (ap->ops->cable_detect)
2464 ap->cbl = ap->ops->cable_detect(ap);
2465
614fe29b
AC
2466 /* We may have SATA bridge glue hiding here irrespective of the
2467 reported cable types and sensed types */
2468 ata_link_for_each_dev(dev, &ap->link) {
2469 if (!ata_dev_enabled(dev))
2470 continue;
2471 /* SATA drives indicate we have a bridge. We don't know which
2472 end of the link the bridge is which is a problem */
2473 if (ata_id_is_sata(dev->id))
2474 ap->cbl = ATA_CBL_SATA;
2475 }
2476
f31f0cc2
JG
2477 /* After the identify sequence we can now set up the devices. We do
2478 this in the normal order so that the user doesn't get confused */
2479
f58229f8 2480 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2481 if (!ata_dev_enabled(dev))
2482 continue;
14d2bac1 2483
9af5c9c9 2484 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2485 rc = ata_dev_configure(dev);
9af5c9c9 2486 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2487 if (rc)
2488 goto fail;
1da177e4
LT
2489 }
2490
e82cbdb9 2491 /* configure transfer mode */
0260731f 2492 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2493 if (rc)
51713d35 2494 goto fail;
1da177e4 2495
f58229f8
TH
2496 ata_link_for_each_dev(dev, &ap->link)
2497 if (ata_dev_enabled(dev))
e82cbdb9 2498 return 0;
1da177e4 2499
e82cbdb9
TH
2500 /* no device present, disable port */
2501 ata_port_disable(ap);
96072e69 2502 return -ENODEV;
14d2bac1
TH
2503
2504 fail:
4ae72a1e
TH
2505 tries[dev->devno]--;
2506
14d2bac1
TH
2507 switch (rc) {
2508 case -EINVAL:
4ae72a1e 2509 /* eeek, something went very wrong, give up */
14d2bac1
TH
2510 tries[dev->devno] = 0;
2511 break;
4ae72a1e
TH
2512
2513 case -ENODEV:
2514 /* give it just one more chance */
2515 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2516 case -EIO:
4ae72a1e
TH
2517 if (tries[dev->devno] == 1) {
2518 /* This is the last chance, better to slow
2519 * down than lose it.
2520 */
936fd732 2521 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2522 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2523 }
14d2bac1
TH
2524 }
2525
4ae72a1e 2526 if (!tries[dev->devno])
3373efd8 2527 ata_dev_disable(dev);
ec573755 2528
14d2bac1 2529 goto retry;
1da177e4
LT
2530}
2531
2532/**
0cba632b
JG
2533 * ata_port_probe - Mark port as enabled
2534 * @ap: Port for which we indicate enablement
1da177e4 2535 *
0cba632b
JG
2536 * Modify @ap data structure such that the system
2537 * thinks that the entire port is enabled.
2538 *
cca3974e 2539 * LOCKING: host lock, or some other form of
0cba632b 2540 * serialization.
1da177e4
LT
2541 */
2542
2543void ata_port_probe(struct ata_port *ap)
2544{
198e0fed 2545 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2546}
2547
3be680b7
TH
2548/**
2549 * sata_print_link_status - Print SATA link status
936fd732 2550 * @link: SATA link to printk link status about
3be680b7
TH
2551 *
2552 * This function prints link speed and status of a SATA link.
2553 *
2554 * LOCKING:
2555 * None.
2556 */
936fd732 2557void sata_print_link_status(struct ata_link *link)
3be680b7 2558{
6d5f9732 2559 u32 sstatus, scontrol, tmp;
3be680b7 2560
936fd732 2561 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2562 return;
936fd732 2563 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2564
936fd732 2565 if (ata_link_online(link)) {
3be680b7 2566 tmp = (sstatus >> 4) & 0xf;
936fd732 2567 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2568 "SATA link up %s (SStatus %X SControl %X)\n",
2569 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2570 } else {
936fd732 2571 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2572 "SATA link down (SStatus %X SControl %X)\n",
2573 sstatus, scontrol);
3be680b7
TH
2574 }
2575}
2576
ebdfca6e
AC
2577/**
2578 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2579 * @adev: device
2580 *
2581 * Obtain the other device on the same cable, or if none is
2582 * present NULL is returned
2583 */
2e9edbf8 2584
3373efd8 2585struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2586{
9af5c9c9
TH
2587 struct ata_link *link = adev->link;
2588 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2589 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2590 return NULL;
2591 return pair;
2592}
2593
1da177e4 2594/**
780a87f7
JG
2595 * ata_port_disable - Disable port.
2596 * @ap: Port to be disabled.
1da177e4 2597 *
780a87f7
JG
2598 * Modify @ap data structure such that the system
2599 * thinks that the entire port is disabled, and should
2600 * never attempt to probe or communicate with devices
2601 * on this port.
2602 *
cca3974e 2603 * LOCKING: host lock, or some other form of
780a87f7 2604 * serialization.
1da177e4
LT
2605 */
2606
2607void ata_port_disable(struct ata_port *ap)
2608{
9af5c9c9
TH
2609 ap->link.device[0].class = ATA_DEV_NONE;
2610 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2611 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2612}
2613
1c3fae4d 2614/**
3c567b7d 2615 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2616 * @link: Link to adjust SATA spd limit for
1c3fae4d 2617 *
936fd732 2618 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2619 * function only adjusts the limit. The change must be applied
3c567b7d 2620 * using sata_set_spd().
1c3fae4d
TH
2621 *
2622 * LOCKING:
2623 * Inherited from caller.
2624 *
2625 * RETURNS:
2626 * 0 on success, negative errno on failure
2627 */
936fd732 2628int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2629{
81952c54
TH
2630 u32 sstatus, spd, mask;
2631 int rc, highbit;
1c3fae4d 2632
936fd732 2633 if (!sata_scr_valid(link))
008a7896
TH
2634 return -EOPNOTSUPP;
2635
2636 /* If SCR can be read, use it to determine the current SPD.
936fd732 2637 * If not, use cached value in link->sata_spd.
008a7896 2638 */
936fd732 2639 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2640 if (rc == 0)
2641 spd = (sstatus >> 4) & 0xf;
2642 else
936fd732 2643 spd = link->sata_spd;
1c3fae4d 2644
936fd732 2645 mask = link->sata_spd_limit;
1c3fae4d
TH
2646 if (mask <= 1)
2647 return -EINVAL;
008a7896
TH
2648
2649 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2650 highbit = fls(mask) - 1;
2651 mask &= ~(1 << highbit);
2652
008a7896
TH
2653 /* Mask off all speeds higher than or equal to the current
2654 * one. Force 1.5Gbps if current SPD is not available.
2655 */
2656 if (spd > 1)
2657 mask &= (1 << (spd - 1)) - 1;
2658 else
2659 mask &= 1;
2660
2661 /* were we already at the bottom? */
1c3fae4d
TH
2662 if (!mask)
2663 return -EINVAL;
2664
936fd732 2665 link->sata_spd_limit = mask;
1c3fae4d 2666
936fd732 2667 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2668 sata_spd_string(fls(mask)));
1c3fae4d
TH
2669
2670 return 0;
2671}
2672
936fd732 2673static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2674{
5270222f
TH
2675 struct ata_link *host_link = &link->ap->link;
2676 u32 limit, target, spd;
1c3fae4d 2677
5270222f
TH
2678 limit = link->sata_spd_limit;
2679
2680 /* Don't configure downstream link faster than upstream link.
2681 * It doesn't speed up anything and some PMPs choke on such
2682 * configuration.
2683 */
2684 if (!ata_is_host_link(link) && host_link->sata_spd)
2685 limit &= (1 << host_link->sata_spd) - 1;
2686
2687 if (limit == UINT_MAX)
2688 target = 0;
1c3fae4d 2689 else
5270222f 2690 target = fls(limit);
1c3fae4d
TH
2691
2692 spd = (*scontrol >> 4) & 0xf;
5270222f 2693 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2694
5270222f 2695 return spd != target;
1c3fae4d
TH
2696}
2697
2698/**
3c567b7d 2699 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2700 * @link: Link in question
1c3fae4d
TH
2701 *
2702 * Test whether the spd limit in SControl matches
936fd732 2703 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2704 * whether hardreset is necessary to apply SATA spd
2705 * configuration.
2706 *
2707 * LOCKING:
2708 * Inherited from caller.
2709 *
2710 * RETURNS:
2711 * 1 if SATA spd configuration is needed, 0 otherwise.
2712 */
936fd732 2713int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2714{
2715 u32 scontrol;
2716
936fd732 2717 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2718 return 1;
1c3fae4d 2719
936fd732 2720 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2721}
2722
2723/**
3c567b7d 2724 * sata_set_spd - set SATA spd according to spd limit
936fd732 2725 * @link: Link to set SATA spd for
1c3fae4d 2726 *
936fd732 2727 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2728 *
2729 * LOCKING:
2730 * Inherited from caller.
2731 *
2732 * RETURNS:
2733 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2734 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2735 */
936fd732 2736int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2737{
2738 u32 scontrol;
81952c54 2739 int rc;
1c3fae4d 2740
936fd732 2741 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2742 return rc;
1c3fae4d 2743
936fd732 2744 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2745 return 0;
2746
936fd732 2747 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2748 return rc;
2749
1c3fae4d
TH
2750 return 1;
2751}
2752
452503f9
AC
2753/*
2754 * This mode timing computation functionality is ported over from
2755 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2756 */
2757/*
b352e57d 2758 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2759 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2760 * for UDMA6, which is currently supported only by Maxtor drives.
2761 *
2762 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2763 */
2764
2765static const struct ata_timing ata_timing[] = {
70cd071e
TH
2766/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2767 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2768 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2769 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2770 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2771 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2772 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2773 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
452503f9 2774
70cd071e
TH
2775 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2776 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2777 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
452503f9 2778
70cd071e
TH
2779 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2780 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2781 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
b352e57d 2782 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
70cd071e 2783 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
452503f9
AC
2784
2785/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
70cd071e
TH
2786 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2787 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2788 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2789 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2790 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2791 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2792 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
2793
2794 { 0xFF }
2795};
2796
2dcb407e
JG
2797#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2798#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2799
2800static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2801{
2802 q->setup = EZ(t->setup * 1000, T);
2803 q->act8b = EZ(t->act8b * 1000, T);
2804 q->rec8b = EZ(t->rec8b * 1000, T);
2805 q->cyc8b = EZ(t->cyc8b * 1000, T);
2806 q->active = EZ(t->active * 1000, T);
2807 q->recover = EZ(t->recover * 1000, T);
2808 q->cycle = EZ(t->cycle * 1000, T);
2809 q->udma = EZ(t->udma * 1000, UT);
2810}
2811
2812void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2813 struct ata_timing *m, unsigned int what)
2814{
2815 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2816 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2817 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2818 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2819 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2820 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2821 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2822 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2823}
2824
6357357c 2825const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 2826{
70cd071e
TH
2827 const struct ata_timing *t = ata_timing;
2828
2829 while (xfer_mode > t->mode)
2830 t++;
452503f9 2831
70cd071e
TH
2832 if (xfer_mode == t->mode)
2833 return t;
2834 return NULL;
452503f9
AC
2835}
2836
2837int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2838 struct ata_timing *t, int T, int UT)
2839{
2840 const struct ata_timing *s;
2841 struct ata_timing p;
2842
2843 /*
2e9edbf8 2844 * Find the mode.
75b1f2f8 2845 */
452503f9
AC
2846
2847 if (!(s = ata_timing_find_mode(speed)))
2848 return -EINVAL;
2849
75b1f2f8
AL
2850 memcpy(t, s, sizeof(*s));
2851
452503f9
AC
2852 /*
2853 * If the drive is an EIDE drive, it can tell us it needs extended
2854 * PIO/MW_DMA cycle timing.
2855 */
2856
2857 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2858 memset(&p, 0, sizeof(p));
2dcb407e 2859 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
2860 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2861 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 2862 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
2863 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2864 }
2865 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2866 }
2867
2868 /*
2869 * Convert the timing to bus clock counts.
2870 */
2871
75b1f2f8 2872 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2873
2874 /*
c893a3ae
RD
2875 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2876 * S.M.A.R.T * and some other commands. We have to ensure that the
2877 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2878 */
2879
fd3367af 2880 if (speed > XFER_PIO_6) {
452503f9
AC
2881 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2882 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2883 }
2884
2885 /*
c893a3ae 2886 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2887 */
2888
2889 if (t->act8b + t->rec8b < t->cyc8b) {
2890 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2891 t->rec8b = t->cyc8b - t->act8b;
2892 }
2893
2894 if (t->active + t->recover < t->cycle) {
2895 t->active += (t->cycle - (t->active + t->recover)) / 2;
2896 t->recover = t->cycle - t->active;
2897 }
a617c09f 2898
4f701d1e
AC
2899 /* In a few cases quantisation may produce enough errors to
2900 leave t->cycle too low for the sum of active and recovery
2901 if so we must correct this */
2902 if (t->active + t->recover > t->cycle)
2903 t->cycle = t->active + t->recover;
452503f9
AC
2904
2905 return 0;
2906}
2907
a0f79b92
TH
2908/**
2909 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2910 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
2911 * @cycle: cycle duration in ns
2912 *
2913 * Return matching xfer mode for @cycle. The returned mode is of
2914 * the transfer type specified by @xfer_shift. If @cycle is too
2915 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
2916 * than the fastest known mode, the fasted mode is returned.
2917 *
2918 * LOCKING:
2919 * None.
2920 *
2921 * RETURNS:
2922 * Matching xfer_mode, 0xff if no match found.
2923 */
2924u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
2925{
2926 u8 base_mode = 0xff, last_mode = 0xff;
2927 const struct ata_xfer_ent *ent;
2928 const struct ata_timing *t;
2929
2930 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
2931 if (ent->shift == xfer_shift)
2932 base_mode = ent->base;
2933
2934 for (t = ata_timing_find_mode(base_mode);
2935 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
2936 unsigned short this_cycle;
2937
2938 switch (xfer_shift) {
2939 case ATA_SHIFT_PIO:
2940 case ATA_SHIFT_MWDMA:
2941 this_cycle = t->cycle;
2942 break;
2943 case ATA_SHIFT_UDMA:
2944 this_cycle = t->udma;
2945 break;
2946 default:
2947 return 0xff;
2948 }
2949
2950 if (cycle > this_cycle)
2951 break;
2952
2953 last_mode = t->mode;
2954 }
2955
2956 return last_mode;
2957}
2958
cf176e1a
TH
2959/**
2960 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2961 * @dev: Device to adjust xfer masks
458337db 2962 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2963 *
2964 * Adjust xfer masks of @dev downward. Note that this function
2965 * does not apply the change. Invoking ata_set_mode() afterwards
2966 * will apply the limit.
2967 *
2968 * LOCKING:
2969 * Inherited from caller.
2970 *
2971 * RETURNS:
2972 * 0 on success, negative errno on failure
2973 */
458337db 2974int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2975{
458337db 2976 char buf[32];
7dc951ae
TH
2977 unsigned long orig_mask, xfer_mask;
2978 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 2979 int quiet, highbit;
cf176e1a 2980
458337db
TH
2981 quiet = !!(sel & ATA_DNXFER_QUIET);
2982 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2983
458337db
TH
2984 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2985 dev->mwdma_mask,
2986 dev->udma_mask);
2987 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2988
458337db
TH
2989 switch (sel) {
2990 case ATA_DNXFER_PIO:
2991 highbit = fls(pio_mask) - 1;
2992 pio_mask &= ~(1 << highbit);
2993 break;
2994
2995 case ATA_DNXFER_DMA:
2996 if (udma_mask) {
2997 highbit = fls(udma_mask) - 1;
2998 udma_mask &= ~(1 << highbit);
2999 if (!udma_mask)
3000 return -ENOENT;
3001 } else if (mwdma_mask) {
3002 highbit = fls(mwdma_mask) - 1;
3003 mwdma_mask &= ~(1 << highbit);
3004 if (!mwdma_mask)
3005 return -ENOENT;
3006 }
3007 break;
3008
3009 case ATA_DNXFER_40C:
3010 udma_mask &= ATA_UDMA_MASK_40C;
3011 break;
3012
3013 case ATA_DNXFER_FORCE_PIO0:
3014 pio_mask &= 1;
3015 case ATA_DNXFER_FORCE_PIO:
3016 mwdma_mask = 0;
3017 udma_mask = 0;
3018 break;
3019
458337db
TH
3020 default:
3021 BUG();
3022 }
3023
3024 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3025
3026 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3027 return -ENOENT;
3028
3029 if (!quiet) {
3030 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3031 snprintf(buf, sizeof(buf), "%s:%s",
3032 ata_mode_string(xfer_mask),
3033 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3034 else
3035 snprintf(buf, sizeof(buf), "%s",
3036 ata_mode_string(xfer_mask));
3037
3038 ata_dev_printk(dev, KERN_WARNING,
3039 "limiting speed to %s\n", buf);
3040 }
cf176e1a
TH
3041
3042 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3043 &dev->udma_mask);
3044
cf176e1a 3045 return 0;
cf176e1a
TH
3046}
3047
3373efd8 3048static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3049{
9af5c9c9 3050 struct ata_eh_context *ehc = &dev->link->eh_context;
4055dee7
TH
3051 const char *dev_err_whine = "";
3052 int ign_dev_err = 0;
83206a29
TH
3053 unsigned int err_mask;
3054 int rc;
1da177e4 3055
e8384607 3056 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3057 if (dev->xfer_shift == ATA_SHIFT_PIO)
3058 dev->flags |= ATA_DFLAG_PIO;
3059
3373efd8 3060 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3061
4055dee7
TH
3062 if (err_mask & ~AC_ERR_DEV)
3063 goto fail;
3064
3065 /* revalidate */
3066 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3067 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3068 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3069 if (rc)
3070 return rc;
3071
11750a40
A
3072 /* Old CFA may refuse this command, which is just fine */
3073 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
4055dee7 3074 ign_dev_err = 1;
2dcb407e 3075
0bc2a79a
AC
3076 /* Some very old devices and some bad newer ones fail any kind of
3077 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3078 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3079 dev->pio_mode <= XFER_PIO_2)
4055dee7 3080 ign_dev_err = 1;
2dcb407e 3081
3acaf94b
AC
3082 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3083 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3084 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3085 dev->dma_mode == XFER_MW_DMA_0 &&
3086 (dev->id[63] >> 8) & 1)
4055dee7 3087 ign_dev_err = 1;
3acaf94b 3088
4055dee7
TH
3089 /* if the device is actually configured correctly, ignore dev err */
3090 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3091 ign_dev_err = 1;
1da177e4 3092
4055dee7
TH
3093 if (err_mask & AC_ERR_DEV) {
3094 if (!ign_dev_err)
3095 goto fail;
3096 else
3097 dev_err_whine = " (device error ignored)";
3098 }
48a8a14f 3099
23e71c3d
TH
3100 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3101 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3102
4055dee7
TH
3103 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3104 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3105 dev_err_whine);
3106
83206a29 3107 return 0;
4055dee7
TH
3108
3109 fail:
3110 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3111 "(err_mask=0x%x)\n", err_mask);
3112 return -EIO;
1da177e4
LT
3113}
3114
1da177e4 3115/**
04351821 3116 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3117 * @link: link on which timings will be programmed
1967b7ff 3118 * @r_failed_dev: out parameter for failed device
1da177e4 3119 *
04351821
A
3120 * Standard implementation of the function used to tune and set
3121 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3122 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3123 * returned in @r_failed_dev.
780a87f7 3124 *
1da177e4 3125 * LOCKING:
0cba632b 3126 * PCI/etc. bus probe sem.
e82cbdb9
TH
3127 *
3128 * RETURNS:
3129 * 0 on success, negative errno otherwise
1da177e4 3130 */
04351821 3131
0260731f 3132int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3133{
0260731f 3134 struct ata_port *ap = link->ap;
e8e0619f 3135 struct ata_device *dev;
f58229f8 3136 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3137
a6d5a51c 3138 /* step 1: calculate xfer_mask */
f58229f8 3139 ata_link_for_each_dev(dev, link) {
7dc951ae 3140 unsigned long pio_mask, dma_mask;
b3a70601 3141 unsigned int mode_mask;
a6d5a51c 3142
e1211e3f 3143 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3144 continue;
3145
b3a70601
AC
3146 mode_mask = ATA_DMA_MASK_ATA;
3147 if (dev->class == ATA_DEV_ATAPI)
3148 mode_mask = ATA_DMA_MASK_ATAPI;
3149 else if (ata_id_is_cfa(dev->id))
3150 mode_mask = ATA_DMA_MASK_CFA;
3151
3373efd8 3152 ata_dev_xfermask(dev);
1da177e4 3153
acf356b1
TH
3154 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3155 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3156
3157 if (libata_dma_mask & mode_mask)
3158 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3159 else
3160 dma_mask = 0;
3161
acf356b1
TH
3162 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3163 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3164
4f65977d 3165 found = 1;
70cd071e 3166 if (dev->dma_mode != 0xff)
5444a6f4 3167 used_dma = 1;
a6d5a51c 3168 }
4f65977d 3169 if (!found)
e82cbdb9 3170 goto out;
a6d5a51c
TH
3171
3172 /* step 2: always set host PIO timings */
f58229f8 3173 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3174 if (!ata_dev_enabled(dev))
3175 continue;
3176
70cd071e 3177 if (dev->pio_mode == 0xff) {
f15a1daf 3178 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3179 rc = -EINVAL;
e82cbdb9 3180 goto out;
e8e0619f
TH
3181 }
3182
3183 dev->xfer_mode = dev->pio_mode;
3184 dev->xfer_shift = ATA_SHIFT_PIO;
3185 if (ap->ops->set_piomode)
3186 ap->ops->set_piomode(ap, dev);
3187 }
1da177e4 3188
a6d5a51c 3189 /* step 3: set host DMA timings */
f58229f8 3190 ata_link_for_each_dev(dev, link) {
70cd071e 3191 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
e8e0619f
TH
3192 continue;
3193
3194 dev->xfer_mode = dev->dma_mode;
3195 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3196 if (ap->ops->set_dmamode)
3197 ap->ops->set_dmamode(ap, dev);
3198 }
1da177e4
LT
3199
3200 /* step 4: update devices' xfer mode */
f58229f8 3201 ata_link_for_each_dev(dev, link) {
18d90deb 3202 /* don't update suspended devices' xfer mode */
9666f400 3203 if (!ata_dev_enabled(dev))
83206a29
TH
3204 continue;
3205
3373efd8 3206 rc = ata_dev_set_mode(dev);
5bbc53f4 3207 if (rc)
e82cbdb9 3208 goto out;
83206a29 3209 }
1da177e4 3210
e8e0619f
TH
3211 /* Record simplex status. If we selected DMA then the other
3212 * host channels are not permitted to do so.
5444a6f4 3213 */
cca3974e 3214 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3215 ap->host->simplex_claimed = ap;
5444a6f4 3216
e82cbdb9
TH
3217 out:
3218 if (rc)
3219 *r_failed_dev = dev;
3220 return rc;
1da177e4
LT
3221}
3222
1fdffbce
JG
3223/**
3224 * ata_tf_to_host - issue ATA taskfile to host controller
3225 * @ap: port to which command is being issued
3226 * @tf: ATA taskfile register set
3227 *
3228 * Issues ATA taskfile register set to ATA host controller,
3229 * with proper synchronization with interrupt handler and
3230 * other threads.
3231 *
3232 * LOCKING:
cca3974e 3233 * spin_lock_irqsave(host lock)
1fdffbce
JG
3234 */
3235
3236static inline void ata_tf_to_host(struct ata_port *ap,
3237 const struct ata_taskfile *tf)
3238{
3239 ap->ops->tf_load(ap, tf);
3240 ap->ops->exec_command(ap, tf);
3241}
3242
1da177e4
LT
3243/**
3244 * ata_busy_sleep - sleep until BSY clears, or timeout
3245 * @ap: port containing status register to be polled
3246 * @tmout_pat: impatience timeout
3247 * @tmout: overall timeout
3248 *
780a87f7
JG
3249 * Sleep until ATA Status register bit BSY clears,
3250 * or a timeout occurs.
3251 *
d1adc1bb
TH
3252 * LOCKING:
3253 * Kernel thread context (may sleep).
3254 *
3255 * RETURNS:
3256 * 0 on success, -errno otherwise.
1da177e4 3257 */
d1adc1bb
TH
3258int ata_busy_sleep(struct ata_port *ap,
3259 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3260{
3261 unsigned long timer_start, timeout;
3262 u8 status;
3263
3264 status = ata_busy_wait(ap, ATA_BUSY, 300);
3265 timer_start = jiffies;
3266 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3267 while (status != 0xff && (status & ATA_BUSY) &&
3268 time_before(jiffies, timeout)) {
1da177e4
LT
3269 msleep(50);
3270 status = ata_busy_wait(ap, ATA_BUSY, 3);
3271 }
3272
d1adc1bb 3273 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3274 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3275 "port is slow to respond, please be patient "
3276 "(Status 0x%x)\n", status);
1da177e4
LT
3277
3278 timeout = timer_start + tmout;
d1adc1bb
TH
3279 while (status != 0xff && (status & ATA_BUSY) &&
3280 time_before(jiffies, timeout)) {
1da177e4
LT
3281 msleep(50);
3282 status = ata_chk_status(ap);
3283 }
3284
d1adc1bb
TH
3285 if (status == 0xff)
3286 return -ENODEV;
3287
1da177e4 3288 if (status & ATA_BUSY) {
f15a1daf 3289 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3290 "(%lu secs, Status 0x%x)\n",
3291 tmout / HZ, status);
d1adc1bb 3292 return -EBUSY;
1da177e4
LT
3293 }
3294
3295 return 0;
3296}
3297
88ff6eaf
TH
3298/**
3299 * ata_wait_after_reset - wait before checking status after reset
3300 * @ap: port containing status register to be polled
3301 * @deadline: deadline jiffies for the operation
3302 *
3303 * After reset, we need to pause a while before reading status.
3304 * Also, certain combination of controller and device report 0xff
3305 * for some duration (e.g. until SATA PHY is up and running)
3306 * which is interpreted as empty port in ATA world. This
3307 * function also waits for such devices to get out of 0xff
3308 * status.
3309 *
3310 * LOCKING:
3311 * Kernel thread context (may sleep).
3312 */
3313void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3314{
3315 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3316
3317 if (time_before(until, deadline))
3318 deadline = until;
3319
3320 /* Spec mandates ">= 2ms" before checking status. We wait
3321 * 150ms, because that was the magic delay used for ATAPI
3322 * devices in Hale Landis's ATADRVR, for the period of time
3323 * between when the ATA command register is written, and then
3324 * status is checked. Because waiting for "a while" before
3325 * checking status is fine, post SRST, we perform this magic
3326 * delay here as well.
3327 *
3328 * Old drivers/ide uses the 2mS rule and then waits for ready.
3329 */
3330 msleep(150);
3331
3332 /* Wait for 0xff to clear. Some SATA devices take a long time
3333 * to clear 0xff after reset. For example, HHD424020F7SV00
3334 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3335 * than that.
1974e201
TH
3336 *
3337 * Note that some PATA controllers (pata_ali) explode if
3338 * status register is read more than once when there's no
3339 * device attached.
88ff6eaf 3340 */
1974e201
TH
3341 if (ap->flags & ATA_FLAG_SATA) {
3342 while (1) {
3343 u8 status = ata_chk_status(ap);
88ff6eaf 3344
1974e201
TH
3345 if (status != 0xff || time_after(jiffies, deadline))
3346 return;
88ff6eaf 3347
1974e201
TH
3348 msleep(50);
3349 }
88ff6eaf
TH
3350 }
3351}
3352
d4b2bab4
TH
3353/**
3354 * ata_wait_ready - sleep until BSY clears, or timeout
3355 * @ap: port containing status register to be polled
3356 * @deadline: deadline jiffies for the operation
3357 *
3358 * Sleep until ATA Status register bit BSY clears, or timeout
3359 * occurs.
3360 *
3361 * LOCKING:
3362 * Kernel thread context (may sleep).
3363 *
3364 * RETURNS:
3365 * 0 on success, -errno otherwise.
3366 */
3367int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3368{
3369 unsigned long start = jiffies;
3370 int warned = 0;
3371
3372 while (1) {
3373 u8 status = ata_chk_status(ap);
3374 unsigned long now = jiffies;
3375
3376 if (!(status & ATA_BUSY))
3377 return 0;
936fd732 3378 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3379 return -ENODEV;
3380 if (time_after(now, deadline))
3381 return -EBUSY;
3382
3383 if (!warned && time_after(now, start + 5 * HZ) &&
3384 (deadline - now > 3 * HZ)) {
3385 ata_port_printk(ap, KERN_WARNING,
3386 "port is slow to respond, please be patient "
3387 "(Status 0x%x)\n", status);
3388 warned = 1;
3389 }
3390
3391 msleep(50);
3392 }
3393}
3394
3395static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3396 unsigned long deadline)
1da177e4
LT
3397{
3398 struct ata_ioports *ioaddr = &ap->ioaddr;
3399 unsigned int dev0 = devmask & (1 << 0);
3400 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3401 int rc, ret = 0;
1da177e4
LT
3402
3403 /* if device 0 was found in ata_devchk, wait for its
3404 * BSY bit to clear
3405 */
d4b2bab4
TH
3406 if (dev0) {
3407 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3408 if (rc) {
3409 if (rc != -ENODEV)
3410 return rc;
3411 ret = rc;
3412 }
d4b2bab4 3413 }
1da177e4 3414
e141d999
TH
3415 /* if device 1 was found in ata_devchk, wait for register
3416 * access briefly, then wait for BSY to clear.
1da177e4 3417 */
e141d999
TH
3418 if (dev1) {
3419 int i;
1da177e4
LT
3420
3421 ap->ops->dev_select(ap, 1);
e141d999
TH
3422
3423 /* Wait for register access. Some ATAPI devices fail
3424 * to set nsect/lbal after reset, so don't waste too
3425 * much time on it. We're gonna wait for !BSY anyway.
3426 */
3427 for (i = 0; i < 2; i++) {
3428 u8 nsect, lbal;
3429
3430 nsect = ioread8(ioaddr->nsect_addr);
3431 lbal = ioread8(ioaddr->lbal_addr);
3432 if ((nsect == 1) && (lbal == 1))
3433 break;
3434 msleep(50); /* give drive a breather */
3435 }
3436
d4b2bab4 3437 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3438 if (rc) {
3439 if (rc != -ENODEV)
3440 return rc;
3441 ret = rc;
3442 }
d4b2bab4 3443 }
1da177e4
LT
3444
3445 /* is all this really necessary? */
3446 ap->ops->dev_select(ap, 0);
3447 if (dev1)
3448 ap->ops->dev_select(ap, 1);
3449 if (dev0)
3450 ap->ops->dev_select(ap, 0);
d4b2bab4 3451
9b89391c 3452 return ret;
1da177e4
LT
3453}
3454
d4b2bab4
TH
3455static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3456 unsigned long deadline)
1da177e4
LT
3457{
3458 struct ata_ioports *ioaddr = &ap->ioaddr;
3459
44877b4e 3460 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3461
3462 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3463 iowrite8(ap->ctl, ioaddr->ctl_addr);
3464 udelay(20); /* FIXME: flush */
3465 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3466 udelay(20); /* FIXME: flush */
3467 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3468
88ff6eaf
TH
3469 /* wait a while before checking status */
3470 ata_wait_after_reset(ap, deadline);
1da177e4 3471
2e9edbf8 3472 /* Before we perform post reset processing we want to see if
298a41ca
TH
3473 * the bus shows 0xFF because the odd clown forgets the D7
3474 * pulldown resistor.
3475 */
150981b0 3476 if (ata_chk_status(ap) == 0xFF)
9b89391c 3477 return -ENODEV;
09c7ad79 3478
d4b2bab4 3479 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3480}
3481
3482/**
3483 * ata_bus_reset - reset host port and associated ATA channel
3484 * @ap: port to reset
3485 *
3486 * This is typically the first time we actually start issuing
3487 * commands to the ATA channel. We wait for BSY to clear, then
3488 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3489 * result. Determine what devices, if any, are on the channel
3490 * by looking at the device 0/1 error register. Look at the signature
3491 * stored in each device's taskfile registers, to determine if
3492 * the device is ATA or ATAPI.
3493 *
3494 * LOCKING:
0cba632b 3495 * PCI/etc. bus probe sem.
cca3974e 3496 * Obtains host lock.
1da177e4
LT
3497 *
3498 * SIDE EFFECTS:
198e0fed 3499 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3500 */
3501
3502void ata_bus_reset(struct ata_port *ap)
3503{
9af5c9c9 3504 struct ata_device *device = ap->link.device;
1da177e4
LT
3505 struct ata_ioports *ioaddr = &ap->ioaddr;
3506 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3507 u8 err;
aec5c3c1 3508 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3509 int rc;
1da177e4 3510
44877b4e 3511 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3512
3513 /* determine if device 0/1 are present */
3514 if (ap->flags & ATA_FLAG_SATA_RESET)
3515 dev0 = 1;
3516 else {
3517 dev0 = ata_devchk(ap, 0);
3518 if (slave_possible)
3519 dev1 = ata_devchk(ap, 1);
3520 }
3521
3522 if (dev0)
3523 devmask |= (1 << 0);
3524 if (dev1)
3525 devmask |= (1 << 1);
3526
3527 /* select device 0 again */
3528 ap->ops->dev_select(ap, 0);
3529
3530 /* issue bus reset */
9b89391c
TH
3531 if (ap->flags & ATA_FLAG_SRST) {
3532 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3533 if (rc && rc != -ENODEV)
aec5c3c1 3534 goto err_out;
9b89391c 3535 }
1da177e4
LT
3536
3537 /*
3538 * determine by signature whether we have ATA or ATAPI devices
3539 */
3f19859e 3540 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3541 if ((slave_possible) && (err != 0x81))
3f19859e 3542 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3543
1da177e4 3544 /* is double-select really necessary? */
9af5c9c9 3545 if (device[1].class != ATA_DEV_NONE)
1da177e4 3546 ap->ops->dev_select(ap, 1);
9af5c9c9 3547 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3548 ap->ops->dev_select(ap, 0);
3549
3550 /* if no devices were detected, disable this port */
9af5c9c9
TH
3551 if ((device[0].class == ATA_DEV_NONE) &&
3552 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3553 goto err_out;
3554
3555 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3556 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3557 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3558 }
3559
3560 DPRINTK("EXIT\n");
3561 return;
3562
3563err_out:
f15a1daf 3564 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3565 ata_port_disable(ap);
1da177e4
LT
3566
3567 DPRINTK("EXIT\n");
3568}
3569
d7bb4cc7 3570/**
936fd732
TH
3571 * sata_link_debounce - debounce SATA phy status
3572 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3573 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3574 * @deadline: deadline jiffies for the operation
d7bb4cc7 3575 *
936fd732 3576* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3577 * holding the same value where DET is not 1 for @duration polled
3578 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3579 * beginning of the stable state. Because DET gets stuck at 1 on
3580 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3581 * until timeout then returns 0 if DET is stable at 1.
3582 *
d4b2bab4
TH
3583 * @timeout is further limited by @deadline. The sooner of the
3584 * two is used.
3585 *
d7bb4cc7
TH
3586 * LOCKING:
3587 * Kernel thread context (may sleep)
3588 *
3589 * RETURNS:
3590 * 0 on success, -errno on failure.
3591 */
936fd732
TH
3592int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3593 unsigned long deadline)
7a7921e8 3594{
d7bb4cc7 3595 unsigned long interval_msec = params[0];
d4b2bab4
TH
3596 unsigned long duration = msecs_to_jiffies(params[1]);
3597 unsigned long last_jiffies, t;
d7bb4cc7
TH
3598 u32 last, cur;
3599 int rc;
3600
d4b2bab4
TH
3601 t = jiffies + msecs_to_jiffies(params[2]);
3602 if (time_before(t, deadline))
3603 deadline = t;
3604
936fd732 3605 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3606 return rc;
3607 cur &= 0xf;
3608
3609 last = cur;
3610 last_jiffies = jiffies;
3611
3612 while (1) {
3613 msleep(interval_msec);
936fd732 3614 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3615 return rc;
3616 cur &= 0xf;
3617
3618 /* DET stable? */
3619 if (cur == last) {
d4b2bab4 3620 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3621 continue;
3622 if (time_after(jiffies, last_jiffies + duration))
3623 return 0;
3624 continue;
3625 }
3626
3627 /* unstable, start over */
3628 last = cur;
3629 last_jiffies = jiffies;
3630
f1545154
TH
3631 /* Check deadline. If debouncing failed, return
3632 * -EPIPE to tell upper layer to lower link speed.
3633 */
d4b2bab4 3634 if (time_after(jiffies, deadline))
f1545154 3635 return -EPIPE;
d7bb4cc7
TH
3636 }
3637}
3638
3639/**
936fd732
TH
3640 * sata_link_resume - resume SATA link
3641 * @link: ATA link to resume SATA
d7bb4cc7 3642 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3643 * @deadline: deadline jiffies for the operation
d7bb4cc7 3644 *
936fd732 3645 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3646 *
3647 * LOCKING:
3648 * Kernel thread context (may sleep)
3649 *
3650 * RETURNS:
3651 * 0 on success, -errno on failure.
3652 */
936fd732
TH
3653int sata_link_resume(struct ata_link *link, const unsigned long *params,
3654 unsigned long deadline)
d7bb4cc7
TH
3655{
3656 u32 scontrol;
81952c54
TH
3657 int rc;
3658
936fd732 3659 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3660 return rc;
7a7921e8 3661
852ee16a 3662 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3663
936fd732 3664 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3665 return rc;
7a7921e8 3666
d7bb4cc7
TH
3667 /* Some PHYs react badly if SStatus is pounded immediately
3668 * after resuming. Delay 200ms before debouncing.
3669 */
3670 msleep(200);
7a7921e8 3671
936fd732 3672 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3673}
3674
f5914a46
TH
3675/**
3676 * ata_std_prereset - prepare for reset
cc0680a5 3677 * @link: ATA link to be reset
d4b2bab4 3678 * @deadline: deadline jiffies for the operation
f5914a46 3679 *
cc0680a5 3680 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3681 * prereset makes libata abort whole reset sequence and give up
3682 * that port, so prereset should be best-effort. It does its
3683 * best to prepare for reset sequence but if things go wrong, it
3684 * should just whine, not fail.
f5914a46
TH
3685 *
3686 * LOCKING:
3687 * Kernel thread context (may sleep)
3688 *
3689 * RETURNS:
3690 * 0 on success, -errno otherwise.
3691 */
cc0680a5 3692int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3693{
cc0680a5 3694 struct ata_port *ap = link->ap;
936fd732 3695 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3696 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3697 int rc;
3698
31daabda 3699 /* handle link resume */
28324304 3700 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3701 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3702 ehc->i.action |= ATA_EH_HARDRESET;
3703
633273a3
TH
3704 /* Some PMPs don't work with only SRST, force hardreset if PMP
3705 * is supported.
3706 */
3707 if (ap->flags & ATA_FLAG_PMP)
3708 ehc->i.action |= ATA_EH_HARDRESET;
3709
f5914a46
TH
3710 /* if we're about to do hardreset, nothing more to do */
3711 if (ehc->i.action & ATA_EH_HARDRESET)
3712 return 0;
3713
936fd732 3714 /* if SATA, resume link */
a16abc0b 3715 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3716 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3717 /* whine about phy resume failure but proceed */
3718 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3719 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3720 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3721 }
3722
3723 /* Wait for !BSY if the controller can wait for the first D2H
3724 * Reg FIS and we don't know that no device is attached.
3725 */
0c88758b 3726 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3727 rc = ata_wait_ready(ap, deadline);
6dffaf61 3728 if (rc && rc != -ENODEV) {
cc0680a5 3729 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3730 "(errno=%d), forcing hardreset\n", rc);
3731 ehc->i.action |= ATA_EH_HARDRESET;
3732 }
3733 }
f5914a46
TH
3734
3735 return 0;
3736}
3737
c2bd5804
TH
3738/**
3739 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3740 * @link: ATA link to reset
c2bd5804 3741 * @classes: resulting classes of attached devices
d4b2bab4 3742 * @deadline: deadline jiffies for the operation
c2bd5804 3743 *
52783c5d 3744 * Reset host port using ATA SRST.
c2bd5804
TH
3745 *
3746 * LOCKING:
3747 * Kernel thread context (may sleep)
3748 *
3749 * RETURNS:
3750 * 0 on success, -errno otherwise.
3751 */
cc0680a5 3752int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3753 unsigned long deadline)
c2bd5804 3754{
cc0680a5 3755 struct ata_port *ap = link->ap;
c2bd5804 3756 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3757 unsigned int devmask = 0;
3758 int rc;
c2bd5804
TH
3759 u8 err;
3760
3761 DPRINTK("ENTER\n");
3762
936fd732 3763 if (ata_link_offline(link)) {
3a39746a
TH
3764 classes[0] = ATA_DEV_NONE;
3765 goto out;
3766 }
3767
c2bd5804
TH
3768 /* determine if device 0/1 are present */
3769 if (ata_devchk(ap, 0))
3770 devmask |= (1 << 0);
3771 if (slave_possible && ata_devchk(ap, 1))
3772 devmask |= (1 << 1);
3773
c2bd5804
TH
3774 /* select device 0 again */
3775 ap->ops->dev_select(ap, 0);
3776
3777 /* issue bus reset */
3778 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3779 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3780 /* if link is occupied, -ENODEV too is an error */
936fd732 3781 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3782 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3783 return rc;
c2bd5804
TH
3784 }
3785
3786 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3787 classes[0] = ata_dev_try_classify(&link->device[0],
3788 devmask & (1 << 0), &err);
c2bd5804 3789 if (slave_possible && err != 0x81)
3f19859e
TH
3790 classes[1] = ata_dev_try_classify(&link->device[1],
3791 devmask & (1 << 1), &err);
c2bd5804 3792
3a39746a 3793 out:
c2bd5804
TH
3794 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3795 return 0;
3796}
3797
3798/**
cc0680a5
TH
3799 * sata_link_hardreset - reset link via SATA phy reset
3800 * @link: link to reset
b6103f6d 3801 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3802 * @deadline: deadline jiffies for the operation
c2bd5804 3803 *
cc0680a5 3804 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3805 *
3806 * LOCKING:
3807 * Kernel thread context (may sleep)
3808 *
3809 * RETURNS:
3810 * 0 on success, -errno otherwise.
3811 */
cc0680a5 3812int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3813 unsigned long deadline)
c2bd5804 3814{
852ee16a 3815 u32 scontrol;
81952c54 3816 int rc;
852ee16a 3817
c2bd5804
TH
3818 DPRINTK("ENTER\n");
3819
936fd732 3820 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3821 /* SATA spec says nothing about how to reconfigure
3822 * spd. To be on the safe side, turn off phy during
3823 * reconfiguration. This works for at least ICH7 AHCI
3824 * and Sil3124.
3825 */
936fd732 3826 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3827 goto out;
81952c54 3828
a34b6fc0 3829 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3830
936fd732 3831 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3832 goto out;
1c3fae4d 3833
936fd732 3834 sata_set_spd(link);
1c3fae4d
TH
3835 }
3836
3837 /* issue phy wake/reset */
936fd732 3838 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3839 goto out;
81952c54 3840
852ee16a 3841 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3842
936fd732 3843 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3844 goto out;
c2bd5804 3845
1c3fae4d 3846 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3847 * 10.4.2 says at least 1 ms.
3848 */
3849 msleep(1);
3850
936fd732
TH
3851 /* bring link back */
3852 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3853 out:
3854 DPRINTK("EXIT, rc=%d\n", rc);
3855 return rc;
3856}
3857
3858/**
3859 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3860 * @link: link to reset
b6103f6d 3861 * @class: resulting class of attached device
d4b2bab4 3862 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3863 *
3864 * SATA phy-reset host port using DET bits of SControl register,
3865 * wait for !BSY and classify the attached device.
3866 *
3867 * LOCKING:
3868 * Kernel thread context (may sleep)
3869 *
3870 * RETURNS:
3871 * 0 on success, -errno otherwise.
3872 */
cc0680a5 3873int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3874 unsigned long deadline)
b6103f6d 3875{
cc0680a5 3876 struct ata_port *ap = link->ap;
936fd732 3877 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3878 int rc;
3879
3880 DPRINTK("ENTER\n");
3881
3882 /* do hardreset */
cc0680a5 3883 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3884 if (rc) {
cc0680a5 3885 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3886 "COMRESET failed (errno=%d)\n", rc);
3887 return rc;
3888 }
c2bd5804 3889
c2bd5804 3890 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3891 if (ata_link_offline(link)) {
c2bd5804
TH
3892 *class = ATA_DEV_NONE;
3893 DPRINTK("EXIT, link offline\n");
3894 return 0;
3895 }
3896
88ff6eaf
TH
3897 /* wait a while before checking status */
3898 ata_wait_after_reset(ap, deadline);
34fee227 3899
633273a3
TH
3900 /* If PMP is supported, we have to do follow-up SRST. Note
3901 * that some PMPs don't send D2H Reg FIS after hardreset at
3902 * all if the first port is empty. Wait for it just for a
3903 * second and request follow-up SRST.
3904 */
3905 if (ap->flags & ATA_FLAG_PMP) {
3906 ata_wait_ready(ap, jiffies + HZ);
3907 return -EAGAIN;
3908 }
3909
d4b2bab4 3910 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3911 /* link occupied, -ENODEV too is an error */
3912 if (rc) {
cc0680a5 3913 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3914 "COMRESET failed (errno=%d)\n", rc);
3915 return rc;
c2bd5804
TH
3916 }
3917
3a39746a
TH
3918 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3919
3f19859e 3920 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3921
3922 DPRINTK("EXIT, class=%u\n", *class);
3923 return 0;
3924}
3925
3926/**
3927 * ata_std_postreset - standard postreset callback
cc0680a5 3928 * @link: the target ata_link
c2bd5804
TH
3929 * @classes: classes of attached devices
3930 *
3931 * This function is invoked after a successful reset. Note that
3932 * the device might have been reset more than once using
3933 * different reset methods before postreset is invoked.
c2bd5804 3934 *
c2bd5804
TH
3935 * LOCKING:
3936 * Kernel thread context (may sleep)
3937 */
cc0680a5 3938void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3939{
cc0680a5 3940 struct ata_port *ap = link->ap;
dc2b3515
TH
3941 u32 serror;
3942
c2bd5804
TH
3943 DPRINTK("ENTER\n");
3944
c2bd5804 3945 /* print link status */
936fd732 3946 sata_print_link_status(link);
c2bd5804 3947
dc2b3515 3948 /* clear SError */
936fd732
TH
3949 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3950 sata_scr_write(link, SCR_ERROR, serror);
f7fe7ad4 3951 link->eh_info.serror = 0;
dc2b3515 3952
c2bd5804
TH
3953 /* is double-select really necessary? */
3954 if (classes[0] != ATA_DEV_NONE)
3955 ap->ops->dev_select(ap, 1);
3956 if (classes[1] != ATA_DEV_NONE)
3957 ap->ops->dev_select(ap, 0);
3958
3a39746a
TH
3959 /* bail out if no device is present */
3960 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3961 DPRINTK("EXIT, no device\n");
3962 return;
3963 }
3964
3965 /* set up device control */
0d5ff566
TH
3966 if (ap->ioaddr.ctl_addr)
3967 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3968
3969 DPRINTK("EXIT\n");
3970}
3971
623a3128
TH
3972/**
3973 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3974 * @dev: device to compare against
3975 * @new_class: class of the new device
3976 * @new_id: IDENTIFY page of the new device
3977 *
3978 * Compare @new_class and @new_id against @dev and determine
3979 * whether @dev is the device indicated by @new_class and
3980 * @new_id.
3981 *
3982 * LOCKING:
3983 * None.
3984 *
3985 * RETURNS:
3986 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3987 */
3373efd8
TH
3988static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3989 const u16 *new_id)
623a3128
TH
3990{
3991 const u16 *old_id = dev->id;
a0cf733b
TH
3992 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3993 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3994
3995 if (dev->class != new_class) {
f15a1daf
TH
3996 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3997 dev->class, new_class);
623a3128
TH
3998 return 0;
3999 }
4000
a0cf733b
TH
4001 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4002 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4003 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4004 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
4005
4006 if (strcmp(model[0], model[1])) {
f15a1daf
TH
4007 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4008 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
4009 return 0;
4010 }
4011
4012 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
4013 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4014 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
4015 return 0;
4016 }
4017
623a3128
TH
4018 return 1;
4019}
4020
4021/**
fe30911b 4022 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 4023 * @dev: target ATA device
bff04647 4024 * @readid_flags: read ID flags
623a3128
TH
4025 *
4026 * Re-read IDENTIFY page and make sure @dev is still attached to
4027 * the port.
4028 *
4029 * LOCKING:
4030 * Kernel thread context (may sleep)
4031 *
4032 * RETURNS:
4033 * 0 on success, negative errno otherwise
4034 */
fe30911b 4035int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 4036{
5eb45c02 4037 unsigned int class = dev->class;
9af5c9c9 4038 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
4039 int rc;
4040
fe635c7e 4041 /* read ID data */
bff04647 4042 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 4043 if (rc)
fe30911b 4044 return rc;
623a3128
TH
4045
4046 /* is the device still there? */
fe30911b
TH
4047 if (!ata_dev_same_device(dev, class, id))
4048 return -ENODEV;
623a3128 4049
fe635c7e 4050 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4051 return 0;
4052}
4053
4054/**
4055 * ata_dev_revalidate - Revalidate ATA device
4056 * @dev: device to revalidate
422c9daa 4057 * @new_class: new class code
fe30911b
TH
4058 * @readid_flags: read ID flags
4059 *
4060 * Re-read IDENTIFY page, make sure @dev is still attached to the
4061 * port and reconfigure it according to the new IDENTIFY page.
4062 *
4063 * LOCKING:
4064 * Kernel thread context (may sleep)
4065 *
4066 * RETURNS:
4067 * 0 on success, negative errno otherwise
4068 */
422c9daa
TH
4069int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4070 unsigned int readid_flags)
fe30911b 4071{
6ddcd3b0 4072 u64 n_sectors = dev->n_sectors;
fe30911b
TH
4073 int rc;
4074
4075 if (!ata_dev_enabled(dev))
4076 return -ENODEV;
4077
422c9daa
TH
4078 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4079 if (ata_class_enabled(new_class) &&
4080 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4081 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4082 dev->class, new_class);
4083 rc = -ENODEV;
4084 goto fail;
4085 }
4086
fe30911b
TH
4087 /* re-read ID */
4088 rc = ata_dev_reread_id(dev, readid_flags);
4089 if (rc)
4090 goto fail;
623a3128
TH
4091
4092 /* configure device according to the new ID */
efdaedc4 4093 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4094 if (rc)
4095 goto fail;
4096
4097 /* verify n_sectors hasn't changed */
b54eebd6
TH
4098 if (dev->class == ATA_DEV_ATA && n_sectors &&
4099 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
4100 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4101 "%llu != %llu\n",
4102 (unsigned long long)n_sectors,
4103 (unsigned long long)dev->n_sectors);
8270bec4
TH
4104
4105 /* restore original n_sectors */
4106 dev->n_sectors = n_sectors;
4107
6ddcd3b0
TH
4108 rc = -ENODEV;
4109 goto fail;
4110 }
4111
4112 return 0;
623a3128
TH
4113
4114 fail:
f15a1daf 4115 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4116 return rc;
4117}
4118
6919a0a6
AC
4119struct ata_blacklist_entry {
4120 const char *model_num;
4121 const char *model_rev;
4122 unsigned long horkage;
4123};
4124
4125static const struct ata_blacklist_entry ata_device_blacklist [] = {
4126 /* Devices with DMA related problems under Linux */
4127 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4128 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4129 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4130 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4131 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4132 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4133 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4134 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4135 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4136 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4137 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4138 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4139 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4140 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4141 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4142 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4143 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4144 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4145 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4146 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4147 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4148 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4149 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4150 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4151 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4152 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4153 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4154 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4155 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4156 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
4157 /* Odd clown on sil3726/4726 PMPs */
4158 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4159 ATA_HORKAGE_SKIP_PM },
6919a0a6 4160
18d6e9d5 4161 /* Weird ATAPI devices */
40a1d531 4162 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 4163
6919a0a6
AC
4164 /* Devices we expect to fail diagnostics */
4165
4166 /* Devices where NCQ should be avoided */
4167 /* NCQ is slow */
2dcb407e 4168 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 4169 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
4170 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4171 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4172 /* NCQ is broken */
539cc7c7 4173 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4174 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 4175 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4176 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4177
36e337d0
RH
4178 /* Blacklist entries taken from Silicon Image 3124/3132
4179 Windows driver .inf file - also several Linux problem reports */
4180 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4181 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4182 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4183
16c55b03
TH
4184 /* devices which puke on READ_NATIVE_MAX */
4185 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4186 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4187 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4188 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4189
93328e11
AC
4190 /* Devices which report 1 sector over size HPA */
4191 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4192 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 4193 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 4194
6bbfd53d
AC
4195 /* Devices which get the IVB wrong */
4196 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4197 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
e9f33406
PM
4198 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4199 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4200 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 4201
6919a0a6
AC
4202 /* End Marker */
4203 { }
1da177e4 4204};
2e9edbf8 4205
741b7763 4206static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4207{
4208 const char *p;
4209 int len;
4210
4211 /*
4212 * check for trailing wildcard: *\0
4213 */
4214 p = strchr(patt, wildchar);
4215 if (p && ((*(p + 1)) == 0))
4216 len = p - patt;
317b50b8 4217 else {
539cc7c7 4218 len = strlen(name);
317b50b8
AP
4219 if (!len) {
4220 if (!*patt)
4221 return 0;
4222 return -1;
4223 }
4224 }
539cc7c7
JG
4225
4226 return strncmp(patt, name, len);
4227}
4228
75683fe7 4229static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4230{
8bfa79fc
TH
4231 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4232 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4233 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4234
8bfa79fc
TH
4235 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4236 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4237
6919a0a6 4238 while (ad->model_num) {
539cc7c7 4239 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4240 if (ad->model_rev == NULL)
4241 return ad->horkage;
539cc7c7 4242 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4243 return ad->horkage;
f4b15fef 4244 }
6919a0a6 4245 ad++;
f4b15fef 4246 }
1da177e4
LT
4247 return 0;
4248}
4249
6919a0a6
AC
4250static int ata_dma_blacklisted(const struct ata_device *dev)
4251{
4252 /* We don't support polling DMA.
4253 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4254 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4255 */
9af5c9c9 4256 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4257 (dev->flags & ATA_DFLAG_CDB_INTR))
4258 return 1;
75683fe7 4259 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4260}
4261
6bbfd53d
AC
4262/**
4263 * ata_is_40wire - check drive side detection
4264 * @dev: device
4265 *
4266 * Perform drive side detection decoding, allowing for device vendors
4267 * who can't follow the documentation.
4268 */
4269
4270static int ata_is_40wire(struct ata_device *dev)
4271{
4272 if (dev->horkage & ATA_HORKAGE_IVB)
4273 return ata_drive_40wire_relaxed(dev->id);
4274 return ata_drive_40wire(dev->id);
4275}
4276
a6d5a51c
TH
4277/**
4278 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4279 * @dev: Device to compute xfermask for
4280 *
acf356b1
TH
4281 * Compute supported xfermask of @dev and store it in
4282 * dev->*_mask. This function is responsible for applying all
4283 * known limits including host controller limits, device
4284 * blacklist, etc...
a6d5a51c
TH
4285 *
4286 * LOCKING:
4287 * None.
a6d5a51c 4288 */
3373efd8 4289static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4290{
9af5c9c9
TH
4291 struct ata_link *link = dev->link;
4292 struct ata_port *ap = link->ap;
cca3974e 4293 struct ata_host *host = ap->host;
a6d5a51c 4294 unsigned long xfer_mask;
1da177e4 4295
37deecb5 4296 /* controller modes available */
565083e1
TH
4297 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4298 ap->mwdma_mask, ap->udma_mask);
4299
8343f889 4300 /* drive modes available */
37deecb5
TH
4301 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4302 dev->mwdma_mask, dev->udma_mask);
4303 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4304
b352e57d
AC
4305 /*
4306 * CFA Advanced TrueIDE timings are not allowed on a shared
4307 * cable
4308 */
4309 if (ata_dev_pair(dev)) {
4310 /* No PIO5 or PIO6 */
4311 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4312 /* No MWDMA3 or MWDMA 4 */
4313 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4314 }
4315
37deecb5
TH
4316 if (ata_dma_blacklisted(dev)) {
4317 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4318 ata_dev_printk(dev, KERN_WARNING,
4319 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4320 }
a6d5a51c 4321
14d66ab7 4322 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4323 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4324 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4325 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4326 "other device, disabling DMA\n");
5444a6f4 4327 }
565083e1 4328
e424675f
JG
4329 if (ap->flags & ATA_FLAG_NO_IORDY)
4330 xfer_mask &= ata_pio_mask_no_iordy(dev);
4331
5444a6f4 4332 if (ap->ops->mode_filter)
a76b62ca 4333 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4334
8343f889
RH
4335 /* Apply cable rule here. Don't apply it early because when
4336 * we handle hot plug the cable type can itself change.
4337 * Check this last so that we know if the transfer rate was
4338 * solely limited by the cable.
4339 * Unknown or 80 wire cables reported host side are checked
4340 * drive side as well. Cases where we know a 40wire cable
4341 * is used safely for 80 are not checked here.
4342 */
4343 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4344 /* UDMA/44 or higher would be available */
2dcb407e 4345 if ((ap->cbl == ATA_CBL_PATA40) ||
6bbfd53d 4346 (ata_is_40wire(dev) &&
2dcb407e
JG
4347 (ap->cbl == ATA_CBL_PATA_UNK ||
4348 ap->cbl == ATA_CBL_PATA80))) {
4349 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4350 "limited to UDMA/33 due to 40-wire cable\n");
4351 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4352 }
4353
565083e1
TH
4354 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4355 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4356}
4357
1da177e4
LT
4358/**
4359 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4360 * @dev: Device to which command will be sent
4361 *
780a87f7
JG
4362 * Issue SET FEATURES - XFER MODE command to device @dev
4363 * on port @ap.
4364 *
1da177e4 4365 * LOCKING:
0cba632b 4366 * PCI/etc. bus probe sem.
83206a29
TH
4367 *
4368 * RETURNS:
4369 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4370 */
4371
3373efd8 4372static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4373{
a0123703 4374 struct ata_taskfile tf;
83206a29 4375 unsigned int err_mask;
1da177e4
LT
4376
4377 /* set up set-features taskfile */
4378 DPRINTK("set features - xfer mode\n");
4379
464cf177
TH
4380 /* Some controllers and ATAPI devices show flaky interrupt
4381 * behavior after setting xfer mode. Use polling instead.
4382 */
3373efd8 4383 ata_tf_init(dev, &tf);
a0123703
TH
4384 tf.command = ATA_CMD_SET_FEATURES;
4385 tf.feature = SETFEATURES_XFER;
464cf177 4386 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4387 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4388 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4389 if (ata_pio_need_iordy(dev))
4390 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4391 /* If the device has IORDY and the controller does not - turn it off */
4392 else if (ata_id_has_iordy(dev->id))
11b7becc 4393 tf.nsect = 0x01;
b9f8ab2d
AC
4394 else /* In the ancient relic department - skip all of this */
4395 return 0;
1da177e4 4396
2b789108 4397 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4398
4399 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4400 return err_mask;
4401}
9f45cbd3 4402/**
218f3d30 4403 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4404 * @dev: Device to which command will be sent
4405 * @enable: Whether to enable or disable the feature
218f3d30 4406 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4407 *
4408 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4409 * on port @ap with sector count
9f45cbd3
KCA
4410 *
4411 * LOCKING:
4412 * PCI/etc. bus probe sem.
4413 *
4414 * RETURNS:
4415 * 0 on success, AC_ERR_* mask otherwise.
4416 */
218f3d30
JG
4417static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4418 u8 feature)
9f45cbd3
KCA
4419{
4420 struct ata_taskfile tf;
4421 unsigned int err_mask;
4422
4423 /* set up set-features taskfile */
4424 DPRINTK("set features - SATA features\n");
4425
4426 ata_tf_init(dev, &tf);
4427 tf.command = ATA_CMD_SET_FEATURES;
4428 tf.feature = enable;
4429 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4430 tf.protocol = ATA_PROT_NODATA;
218f3d30 4431 tf.nsect = feature;
9f45cbd3 4432
2b789108 4433 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4434
83206a29
TH
4435 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4436 return err_mask;
1da177e4
LT
4437}
4438
8bf62ece
AL
4439/**
4440 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4441 * @dev: Device to which command will be sent
e2a7f77a
RD
4442 * @heads: Number of heads (taskfile parameter)
4443 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4444 *
4445 * LOCKING:
6aff8f1f
TH
4446 * Kernel thread context (may sleep)
4447 *
4448 * RETURNS:
4449 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4450 */
3373efd8
TH
4451static unsigned int ata_dev_init_params(struct ata_device *dev,
4452 u16 heads, u16 sectors)
8bf62ece 4453{
a0123703 4454 struct ata_taskfile tf;
6aff8f1f 4455 unsigned int err_mask;
8bf62ece
AL
4456
4457 /* Number of sectors per track 1-255. Number of heads 1-16 */
4458 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4459 return AC_ERR_INVALID;
8bf62ece
AL
4460
4461 /* set up init dev params taskfile */
4462 DPRINTK("init dev params \n");
4463
3373efd8 4464 ata_tf_init(dev, &tf);
a0123703
TH
4465 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4466 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4467 tf.protocol = ATA_PROT_NODATA;
4468 tf.nsect = sectors;
4469 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4470
2b789108 4471 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4472 /* A clean abort indicates an original or just out of spec drive
4473 and we should continue as we issue the setup based on the
4474 drive reported working geometry */
4475 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4476 err_mask = 0;
8bf62ece 4477
6aff8f1f
TH
4478 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4479 return err_mask;
8bf62ece
AL
4480}
4481
1da177e4 4482/**
0cba632b
JG
4483 * ata_sg_clean - Unmap DMA memory associated with command
4484 * @qc: Command containing DMA memory to be released
4485 *
4486 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4487 *
4488 * LOCKING:
cca3974e 4489 * spin_lock_irqsave(host lock)
1da177e4 4490 */
70e6ad0c 4491void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4492{
4493 struct ata_port *ap = qc->ap;
ff2aeb1e 4494 struct scatterlist *sg = qc->sg;
1da177e4 4495 int dir = qc->dma_dir;
cedc9a47 4496 void *pad_buf = NULL;
1da177e4 4497
a4631474 4498 WARN_ON(sg == NULL);
1da177e4 4499
ff2aeb1e 4500 VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem);
1da177e4 4501
cedc9a47
JG
4502 /* if we padded the buffer out to 32-bit bound, and data
4503 * xfer direction is from-device, we must copy from the
4504 * pad buffer back into the supplied buffer
4505 */
4506 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4507 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4508
ff2aeb1e
TH
4509 if (qc->mapped_n_elem)
4510 dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir);
001102d7 4511 /* restore last sg */
ff2aeb1e
TH
4512 if (qc->last_sg)
4513 *qc->last_sg = qc->saved_last_sg;
001102d7 4514 if (pad_buf) {
ff2aeb1e 4515 struct scatterlist *psg = &qc->extra_sg[1];
001102d7
TH
4516 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4517 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4518 kunmap_atomic(addr, KM_IRQ0);
cedc9a47 4519 }
1da177e4
LT
4520
4521 qc->flags &= ~ATA_QCFLAG_DMAMAP;
ff2aeb1e 4522 qc->sg = NULL;
1da177e4
LT
4523}
4524
4525/**
4526 * ata_fill_sg - Fill PCI IDE PRD table
4527 * @qc: Metadata associated with taskfile to be transferred
4528 *
780a87f7
JG
4529 * Fill PCI IDE PRD (scatter-gather) table with segments
4530 * associated with the current disk command.
4531 *
1da177e4 4532 * LOCKING:
cca3974e 4533 * spin_lock_irqsave(host lock)
1da177e4
LT
4534 *
4535 */
4536static void ata_fill_sg(struct ata_queued_cmd *qc)
4537{
1da177e4 4538 struct ata_port *ap = qc->ap;
cedc9a47 4539 struct scatterlist *sg;
ff2aeb1e 4540 unsigned int si, pi;
1da177e4 4541
ff2aeb1e
TH
4542 pi = 0;
4543 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1da177e4
LT
4544 u32 addr, offset;
4545 u32 sg_len, len;
4546
4547 /* determine if physical DMA addr spans 64K boundary.
4548 * Note h/w doesn't support 64-bit, so we unconditionally
4549 * truncate dma_addr_t to u32.
4550 */
4551 addr = (u32) sg_dma_address(sg);
4552 sg_len = sg_dma_len(sg);
4553
4554 while (sg_len) {
4555 offset = addr & 0xffff;
4556 len = sg_len;
4557 if ((offset + sg_len) > 0x10000)
4558 len = 0x10000 - offset;
4559
ff2aeb1e
TH
4560 ap->prd[pi].addr = cpu_to_le32(addr);
4561 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
4562 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
1da177e4 4563
ff2aeb1e 4564 pi++;
1da177e4
LT
4565 sg_len -= len;
4566 addr += len;
4567 }
4568 }
4569
ff2aeb1e 4570 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1da177e4 4571}
b9a4197e 4572
d26fc955
AC
4573/**
4574 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4575 * @qc: Metadata associated with taskfile to be transferred
4576 *
4577 * Fill PCI IDE PRD (scatter-gather) table with segments
4578 * associated with the current disk command. Perform the fill
4579 * so that we avoid writing any length 64K records for
4580 * controllers that don't follow the spec.
4581 *
4582 * LOCKING:
4583 * spin_lock_irqsave(host lock)
4584 *
4585 */
4586static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4587{
4588 struct ata_port *ap = qc->ap;
4589 struct scatterlist *sg;
ff2aeb1e 4590 unsigned int si, pi;
d26fc955 4591
ff2aeb1e
TH
4592 pi = 0;
4593 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d26fc955
AC
4594 u32 addr, offset;
4595 u32 sg_len, len, blen;
4596
2dcb407e 4597 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4598 * Note h/w doesn't support 64-bit, so we unconditionally
4599 * truncate dma_addr_t to u32.
4600 */
4601 addr = (u32) sg_dma_address(sg);
4602 sg_len = sg_dma_len(sg);
4603
4604 while (sg_len) {
4605 offset = addr & 0xffff;
4606 len = sg_len;
4607 if ((offset + sg_len) > 0x10000)
4608 len = 0x10000 - offset;
4609
4610 blen = len & 0xffff;
ff2aeb1e 4611 ap->prd[pi].addr = cpu_to_le32(addr);
d26fc955
AC
4612 if (blen == 0) {
4613 /* Some PATA chipsets like the CS5530 can't
4614 cope with 0x0000 meaning 64K as the spec says */
ff2aeb1e 4615 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
d26fc955 4616 blen = 0x8000;
ff2aeb1e 4617 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
d26fc955 4618 }
ff2aeb1e
TH
4619 ap->prd[pi].flags_len = cpu_to_le32(blen);
4620 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
d26fc955 4621
ff2aeb1e 4622 pi++;
d26fc955
AC
4623 sg_len -= len;
4624 addr += len;
4625 }
4626 }
4627
ff2aeb1e 4628 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
d26fc955
AC
4629}
4630
1da177e4
LT
4631/**
4632 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4633 * @qc: Metadata associated with taskfile to check
4634 *
780a87f7
JG
4635 * Allow low-level driver to filter ATA PACKET commands, returning
4636 * a status indicating whether or not it is OK to use DMA for the
4637 * supplied PACKET command.
4638 *
1da177e4 4639 * LOCKING:
cca3974e 4640 * spin_lock_irqsave(host lock)
0cba632b 4641 *
1da177e4
LT
4642 * RETURNS: 0 when ATAPI DMA can be used
4643 * nonzero otherwise
4644 */
4645int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4646{
4647 struct ata_port *ap = qc->ap;
b9a4197e
TH
4648
4649 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4650 * few ATAPI devices choke on such DMA requests.
4651 */
4652 if (unlikely(qc->nbytes & 15))
4653 return 1;
6f23a31d 4654
1da177e4 4655 if (ap->ops->check_atapi_dma)
b9a4197e 4656 return ap->ops->check_atapi_dma(qc);
1da177e4 4657
b9a4197e 4658 return 0;
1da177e4 4659}
b9a4197e 4660
140b5e59
TH
4661/**
4662 * atapi_qc_may_overflow - Check whether data transfer may overflow
4663 * @qc: ATA command in question
4664 *
4665 * ATAPI commands which transfer variable length data to host
4666 * might overflow due to application error or hardare bug. This
4667 * function checks whether overflow should be drained and ignored
4668 * for @qc.
4669 *
4670 * LOCKING:
4671 * None.
4672 *
4673 * RETURNS:
4674 * 1 if @qc may overflow; otherwise, 0.
4675 */
4676static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
4677{
0dc36888
TH
4678 if (qc->tf.protocol != ATAPI_PROT_PIO &&
4679 qc->tf.protocol != ATAPI_PROT_DMA)
140b5e59
TH
4680 return 0;
4681
4682 if (qc->tf.flags & ATA_TFLAG_WRITE)
4683 return 0;
4684
4685 switch (qc->cdb[0]) {
4686 case READ_10:
4687 case READ_12:
4688 case WRITE_10:
4689 case WRITE_12:
4690 case GPCMD_READ_CD:
4691 case GPCMD_READ_CD_MSF:
4692 return 0;
4693 }
4694
4695 return 1;
4696}
4697
31cc23b3
TH
4698/**
4699 * ata_std_qc_defer - Check whether a qc needs to be deferred
4700 * @qc: ATA command in question
4701 *
4702 * Non-NCQ commands cannot run with any other command, NCQ or
4703 * not. As upper layer only knows the queue depth, we are
4704 * responsible for maintaining exclusion. This function checks
4705 * whether a new command @qc can be issued.
4706 *
4707 * LOCKING:
4708 * spin_lock_irqsave(host lock)
4709 *
4710 * RETURNS:
4711 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4712 */
4713int ata_std_qc_defer(struct ata_queued_cmd *qc)
4714{
4715 struct ata_link *link = qc->dev->link;
4716
4717 if (qc->tf.protocol == ATA_PROT_NCQ) {
4718 if (!ata_tag_valid(link->active_tag))
4719 return 0;
4720 } else {
4721 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4722 return 0;
4723 }
4724
4725 return ATA_DEFER_LINK;
4726}
4727
1da177e4
LT
4728/**
4729 * ata_qc_prep - Prepare taskfile for submission
4730 * @qc: Metadata associated with taskfile to be prepared
4731 *
780a87f7
JG
4732 * Prepare ATA taskfile for submission.
4733 *
1da177e4 4734 * LOCKING:
cca3974e 4735 * spin_lock_irqsave(host lock)
1da177e4
LT
4736 */
4737void ata_qc_prep(struct ata_queued_cmd *qc)
4738{
4739 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4740 return;
4741
4742 ata_fill_sg(qc);
4743}
4744
d26fc955
AC
4745/**
4746 * ata_dumb_qc_prep - Prepare taskfile for submission
4747 * @qc: Metadata associated with taskfile to be prepared
4748 *
4749 * Prepare ATA taskfile for submission.
4750 *
4751 * LOCKING:
4752 * spin_lock_irqsave(host lock)
4753 */
4754void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4755{
4756 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4757 return;
4758
4759 ata_fill_sg_dumb(qc);
4760}
4761
e46834cd
BK
4762void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4763
0cba632b
JG
4764/**
4765 * ata_sg_init - Associate command with scatter-gather table.
4766 * @qc: Command to be associated
4767 * @sg: Scatter-gather table.
4768 * @n_elem: Number of elements in s/g table.
4769 *
4770 * Initialize the data-related elements of queued_cmd @qc
4771 * to point to a scatter-gather table @sg, containing @n_elem
4772 * elements.
4773 *
4774 * LOCKING:
cca3974e 4775 * spin_lock_irqsave(host lock)
0cba632b 4776 */
1da177e4
LT
4777void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4778 unsigned int n_elem)
4779{
ff2aeb1e 4780 qc->sg = sg;
1da177e4 4781 qc->n_elem = n_elem;
ff2aeb1e 4782 qc->cursg = qc->sg;
1da177e4
LT
4783}
4784
ff2aeb1e 4785static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
0bcc65ad
TH
4786 unsigned int *n_elem_extra,
4787 unsigned int *nbytes_extra)
1da177e4
LT
4788{
4789 struct ata_port *ap = qc->ap;
ff2aeb1e
TH
4790 unsigned int n_elem = qc->n_elem;
4791 struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL;
1da177e4 4792
ff2aeb1e 4793 *n_elem_extra = 0;
0bcc65ad 4794 *nbytes_extra = 0;
ff2aeb1e
TH
4795
4796 /* needs padding? */
4797 qc->pad_len = qc->nbytes & 3;
4798
4799 if (likely(!qc->pad_len))
4800 return n_elem;
4801
4802 /* locate last sg and save it */
4803 lsg = sg_last(qc->sg, n_elem);
4804 qc->last_sg = lsg;
4805 qc->saved_last_sg = *lsg;
4806
4807 sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg));
1da177e4 4808
cedc9a47 4809 if (qc->pad_len) {
ff2aeb1e 4810 struct scatterlist *psg = &qc->extra_sg[1];
cedc9a47 4811 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
cedc9a47
JG
4812 unsigned int offset;
4813
a4631474 4814 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4815
4816 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4817
ff2aeb1e 4818 /* psg->page/offset are used to copy to-be-written
cedc9a47
JG
4819 * data in this function or read data in ata_sg_clean.
4820 */
4821 offset = lsg->offset + lsg->length - qc->pad_len;
642f1490 4822 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
ff2aeb1e 4823 qc->pad_len, offset_in_page(offset));
cedc9a47
JG
4824
4825 if (qc->tf.flags & ATA_TFLAG_WRITE) {
45711f1a 4826 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4827 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4828 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4829 }
4830
4831 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4832 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
ff2aeb1e
TH
4833
4834 /* Trim the last sg entry and chain the original and
4835 * padding sg lists.
4836 *
4837 * Because chaining consumes one sg entry, one extra
4838 * sg entry is allocated and the last sg entry is
4839 * copied to it if the length isn't zero after padded
4840 * amount is removed.
4841 *
4842 * If the last sg entry is completely replaced by
4843 * padding sg entry, the first sg entry is skipped
4844 * while chaining.
4845 */
cedc9a47 4846 lsg->length -= qc->pad_len;
ff2aeb1e
TH
4847 if (lsg->length) {
4848 copy_lsg = &qc->extra_sg[0];
4849 tsg = &qc->extra_sg[0];
4850 } else {
4851 n_elem--;
4852 tsg = &qc->extra_sg[1];
4853 }
4854
4855 esg = &qc->extra_sg[1];
cedc9a47 4856
ff2aeb1e 4857 (*n_elem_extra)++;
0bcc65ad 4858 (*nbytes_extra) += 4 - qc->pad_len;
cedc9a47
JG
4859 }
4860
ff2aeb1e
TH
4861 if (copy_lsg)
4862 sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset);
e1410f2d 4863
ff2aeb1e
TH
4864 sg_chain(lsg, 1, tsg);
4865 sg_mark_end(esg);
4866
4867 /* sglist can't start with chaining sg entry, fast forward */
4868 if (qc->sg == lsg) {
4869 qc->sg = tsg;
4870 qc->cursg = tsg;
e1410f2d
JG
4871 }
4872
ff2aeb1e
TH
4873 return n_elem;
4874}
4875
4876/**
4877 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4878 * @qc: Command with scatter-gather table to be mapped.
4879 *
4880 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4881 *
4882 * LOCKING:
4883 * spin_lock_irqsave(host lock)
4884 *
4885 * RETURNS:
4886 * Zero on success, negative on error.
4887 *
4888 */
4889static int ata_sg_setup(struct ata_queued_cmd *qc)
4890{
4891 struct ata_port *ap = qc->ap;
0bcc65ad 4892 unsigned int n_elem, n_elem_extra, nbytes_extra;
ff2aeb1e
TH
4893
4894 VPRINTK("ENTER, ata%u\n", ap->print_id);
4895
0bcc65ad 4896 n_elem = ata_sg_setup_extra(qc, &n_elem_extra, &nbytes_extra);
ff2aeb1e
TH
4897
4898 if (n_elem) {
4899 n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir);
4900 if (n_elem < 1) {
4901 /* restore last sg */
4902 if (qc->last_sg)
4903 *qc->last_sg = qc->saved_last_sg;
4904 return -1;
4905 }
4906 DPRINTK("%d sg elements mapped\n", n_elem);
537a95d9 4907 }
1da177e4 4908
ff2aeb1e
TH
4909 qc->n_elem = qc->mapped_n_elem = n_elem;
4910 qc->n_elem += n_elem_extra;
0bcc65ad 4911 qc->nbytes += nbytes_extra;
f92a2636 4912 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4
LT
4913
4914 return 0;
4915}
4916
0baab86b 4917/**
c893a3ae 4918 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4919 * @buf: Buffer to swap
4920 * @buf_words: Number of 16-bit words in buffer.
4921 *
4922 * Swap halves of 16-bit words if needed to convert from
4923 * little-endian byte order to native cpu byte order, or
4924 * vice-versa.
4925 *
4926 * LOCKING:
6f0ef4fa 4927 * Inherited from caller.
0baab86b 4928 */
1da177e4
LT
4929void swap_buf_le16(u16 *buf, unsigned int buf_words)
4930{
4931#ifdef __BIG_ENDIAN
4932 unsigned int i;
4933
4934 for (i = 0; i < buf_words; i++)
4935 buf[i] = le16_to_cpu(buf[i]);
4936#endif /* __BIG_ENDIAN */
4937}
4938
6ae4cfb5 4939/**
0d5ff566 4940 * ata_data_xfer - Transfer data by PIO
55dba312 4941 * @dev: device to target
6ae4cfb5
AL
4942 * @buf: data buffer
4943 * @buflen: buffer length
0affa456 4944 * @rw: read/write
6ae4cfb5
AL
4945 *
4946 * Transfer data from/to the device data register by PIO.
4947 *
4948 * LOCKING:
4949 * Inherited from caller.
55dba312
TH
4950 *
4951 * RETURNS:
4952 * Bytes consumed.
6ae4cfb5 4953 */
55dba312
TH
4954unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
4955 unsigned int buflen, int rw)
1da177e4 4956{
55dba312
TH
4957 struct ata_port *ap = dev->link->ap;
4958 void __iomem *data_addr = ap->ioaddr.data_addr;
6ae4cfb5 4959 unsigned int words = buflen >> 1;
1da177e4 4960
6ae4cfb5 4961 /* Transfer multiple of 2 bytes */
55dba312
TH
4962 if (rw == READ)
4963 ioread16_rep(data_addr, buf, words);
1da177e4 4964 else
55dba312 4965 iowrite16_rep(data_addr, buf, words);
6ae4cfb5
AL
4966
4967 /* Transfer trailing 1 byte, if any. */
4968 if (unlikely(buflen & 0x01)) {
4ca4e439 4969 __le16 align_buf[1] = { 0 };
6ae4cfb5
AL
4970 unsigned char *trailing_buf = buf + buflen - 1;
4971
55dba312
TH
4972 if (rw == READ) {
4973 align_buf[0] = cpu_to_le16(ioread16(data_addr));
6ae4cfb5 4974 memcpy(trailing_buf, align_buf, 1);
55dba312
TH
4975 } else {
4976 memcpy(align_buf, trailing_buf, 1);
4977 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
6ae4cfb5 4978 }
55dba312 4979 words++;
6ae4cfb5 4980 }
55dba312
TH
4981
4982 return words << 1;
1da177e4
LT
4983}
4984
75e99585 4985/**
0d5ff566 4986 * ata_data_xfer_noirq - Transfer data by PIO
55dba312 4987 * @dev: device to target
75e99585
AC
4988 * @buf: data buffer
4989 * @buflen: buffer length
0affa456 4990 * @rw: read/write
75e99585 4991 *
88574551 4992 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4993 * transfer with interrupts disabled.
4994 *
4995 * LOCKING:
4996 * Inherited from caller.
55dba312
TH
4997 *
4998 * RETURNS:
4999 * Bytes consumed.
75e99585 5000 */
55dba312
TH
5001unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
5002 unsigned int buflen, int rw)
75e99585
AC
5003{
5004 unsigned long flags;
55dba312
TH
5005 unsigned int consumed;
5006
75e99585 5007 local_irq_save(flags);
55dba312 5008 consumed = ata_data_xfer(dev, buf, buflen, rw);
75e99585 5009 local_irq_restore(flags);
55dba312
TH
5010
5011 return consumed;
75e99585
AC
5012}
5013
5014
6ae4cfb5 5015/**
5a5dbd18 5016 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
5017 * @qc: Command on going
5018 *
5a5dbd18 5019 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
5020 *
5021 * LOCKING:
5022 * Inherited from caller.
5023 */
5024
1da177e4
LT
5025static void ata_pio_sector(struct ata_queued_cmd *qc)
5026{
5027 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
5028 struct ata_port *ap = qc->ap;
5029 struct page *page;
5030 unsigned int offset;
5031 unsigned char *buf;
5032
5a5dbd18 5033 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 5034 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 5035
45711f1a 5036 page = sg_page(qc->cursg);
87260216 5037 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
5038
5039 /* get the current page and offset */
5040 page = nth_page(page, (offset >> PAGE_SHIFT));
5041 offset %= PAGE_SIZE;
5042
1da177e4
LT
5043 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5044
91b8b313
AL
5045 if (PageHighMem(page)) {
5046 unsigned long flags;
5047
a6b2c5d4 5048 /* FIXME: use a bounce buffer */
91b8b313
AL
5049 local_irq_save(flags);
5050 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5051
91b8b313 5052 /* do the actual data transfer */
5a5dbd18 5053 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 5054
91b8b313
AL
5055 kunmap_atomic(buf, KM_IRQ0);
5056 local_irq_restore(flags);
5057 } else {
5058 buf = page_address(page);
5a5dbd18 5059 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 5060 }
1da177e4 5061
5a5dbd18
ML
5062 qc->curbytes += qc->sect_size;
5063 qc->cursg_ofs += qc->sect_size;
1da177e4 5064
87260216
JA
5065 if (qc->cursg_ofs == qc->cursg->length) {
5066 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5067 qc->cursg_ofs = 0;
5068 }
1da177e4 5069}
1da177e4 5070
07f6f7d0 5071/**
5a5dbd18 5072 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
5073 * @qc: Command on going
5074 *
5a5dbd18 5075 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
5076 * ATA device for the DRQ request.
5077 *
5078 * LOCKING:
5079 * Inherited from caller.
5080 */
1da177e4 5081
07f6f7d0
AL
5082static void ata_pio_sectors(struct ata_queued_cmd *qc)
5083{
5084 if (is_multi_taskfile(&qc->tf)) {
5085 /* READ/WRITE MULTIPLE */
5086 unsigned int nsect;
5087
587005de 5088 WARN_ON(qc->dev->multi_count == 0);
1da177e4 5089
5a5dbd18 5090 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 5091 qc->dev->multi_count);
07f6f7d0
AL
5092 while (nsect--)
5093 ata_pio_sector(qc);
5094 } else
5095 ata_pio_sector(qc);
4cc980b3
AL
5096
5097 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
5098}
5099
c71c1857
AL
5100/**
5101 * atapi_send_cdb - Write CDB bytes to hardware
5102 * @ap: Port to which ATAPI device is attached.
5103 * @qc: Taskfile currently active
5104 *
5105 * When device has indicated its readiness to accept
5106 * a CDB, this function is called. Send the CDB.
5107 *
5108 * LOCKING:
5109 * caller.
5110 */
5111
5112static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5113{
5114 /* send SCSI cdb */
5115 DPRINTK("send cdb\n");
db024d53 5116 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 5117
a6b2c5d4 5118 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
5119 ata_altstatus(ap); /* flush */
5120
5121 switch (qc->tf.protocol) {
0dc36888 5122 case ATAPI_PROT_PIO:
c71c1857
AL
5123 ap->hsm_task_state = HSM_ST;
5124 break;
0dc36888 5125 case ATAPI_PROT_NODATA:
c71c1857
AL
5126 ap->hsm_task_state = HSM_ST_LAST;
5127 break;
0dc36888 5128 case ATAPI_PROT_DMA:
c71c1857
AL
5129 ap->hsm_task_state = HSM_ST_LAST;
5130 /* initiate bmdma */
5131 ap->ops->bmdma_start(qc);
5132 break;
5133 }
1da177e4
LT
5134}
5135
6ae4cfb5
AL
5136/**
5137 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5138 * @qc: Command on going
5139 * @bytes: number of bytes
5140 *
5141 * Transfer Transfer data from/to the ATAPI device.
5142 *
5143 * LOCKING:
5144 * Inherited from caller.
5145 *
5146 */
140b5e59 5147static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
1da177e4
LT
5148{
5149 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4 5150 struct ata_port *ap = qc->ap;
140b5e59
TH
5151 struct ata_eh_info *ehi = &qc->dev->link->eh_info;
5152 struct scatterlist *sg;
1da177e4
LT
5153 struct page *page;
5154 unsigned char *buf;
5155 unsigned int offset, count;
1da177e4
LT
5156
5157next_sg:
140b5e59
TH
5158 sg = qc->cursg;
5159 if (unlikely(!sg)) {
7fb6ec28 5160 /*
563a6e1f
AL
5161 * The end of qc->sg is reached and the device expects
5162 * more data to transfer. In order not to overrun qc->sg
5163 * and fulfill length specified in the byte count register,
5164 * - for read case, discard trailing data from the device
5165 * - for write case, padding zero data to the device
5166 */
5167 u16 pad_buf[1] = { 0 };
563a6e1f
AL
5168 unsigned int i;
5169
140b5e59
TH
5170 if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
5171 ata_ehi_push_desc(ehi, "too much trailing data "
5172 "buf=%u cur=%u bytes=%u",
5173 qc->nbytes, qc->curbytes, bytes);
5174 return -1;
5175 }
5176
5177 /* overflow is exptected for misc ATAPI commands */
5178 if (bytes && !atapi_qc_may_overflow(qc))
5179 ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
5180 "trailing data (cdb=%02x nbytes=%u)\n",
5181 bytes, qc->cdb[0], qc->nbytes);
563a6e1f 5182
140b5e59 5183 for (i = 0; i < (bytes + 1) / 2; i++)
2dcb407e 5184 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
563a6e1f 5185
140b5e59 5186 qc->curbytes += bytes;
563a6e1f 5187
140b5e59
TH
5188 return 0;
5189 }
1da177e4 5190
45711f1a 5191 page = sg_page(sg);
1da177e4
LT
5192 offset = sg->offset + qc->cursg_ofs;
5193
5194 /* get the current page and offset */
5195 page = nth_page(page, (offset >> PAGE_SHIFT));
5196 offset %= PAGE_SIZE;
5197
6952df03 5198 /* don't overrun current sg */
32529e01 5199 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5200
5201 /* don't cross page boundaries */
5202 count = min(count, (unsigned int)PAGE_SIZE - offset);
5203
7282aa4b
AL
5204 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5205
91b8b313
AL
5206 if (PageHighMem(page)) {
5207 unsigned long flags;
5208
a6b2c5d4 5209 /* FIXME: use bounce buffer */
91b8b313
AL
5210 local_irq_save(flags);
5211 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5212
91b8b313 5213 /* do the actual data transfer */
a6b2c5d4 5214 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 5215
91b8b313
AL
5216 kunmap_atomic(buf, KM_IRQ0);
5217 local_irq_restore(flags);
5218 } else {
5219 buf = page_address(page);
a6b2c5d4 5220 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 5221 }
1da177e4
LT
5222
5223 bytes -= count;
140b5e59
TH
5224 if ((count & 1) && bytes)
5225 bytes--;
1da177e4
LT
5226 qc->curbytes += count;
5227 qc->cursg_ofs += count;
5228
32529e01 5229 if (qc->cursg_ofs == sg->length) {
87260216 5230 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5231 qc->cursg_ofs = 0;
5232 }
5233
563a6e1f 5234 if (bytes)
1da177e4 5235 goto next_sg;
140b5e59
TH
5236
5237 return 0;
1da177e4
LT
5238}
5239
6ae4cfb5
AL
5240/**
5241 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5242 * @qc: Command on going
5243 *
5244 * Transfer Transfer data from/to the ATAPI device.
5245 *
5246 * LOCKING:
5247 * Inherited from caller.
6ae4cfb5
AL
5248 */
5249
1da177e4
LT
5250static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5251{
5252 struct ata_port *ap = qc->ap;
5253 struct ata_device *dev = qc->dev;
5254 unsigned int ireason, bc_lo, bc_hi, bytes;
5255 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5256
eec4c3f3
AL
5257 /* Abuse qc->result_tf for temp storage of intermediate TF
5258 * here to save some kernel stack usage.
5259 * For normal completion, qc->result_tf is not relevant. For
5260 * error, qc->result_tf is later overwritten by ata_qc_complete().
5261 * So, the correctness of qc->result_tf is not affected.
5262 */
5263 ap->ops->tf_read(ap, &qc->result_tf);
5264 ireason = qc->result_tf.nsect;
5265 bc_lo = qc->result_tf.lbam;
5266 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5267 bytes = (bc_hi << 8) | bc_lo;
5268
5269 /* shall be cleared to zero, indicating xfer of data */
0106372d 5270 if (unlikely(ireason & (1 << 0)))
1da177e4
LT
5271 goto err_out;
5272
5273 /* make sure transfer direction matches expected */
5274 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
0106372d
AL
5275 if (unlikely(do_write != i_write))
5276 goto err_out;
5277
5278 if (unlikely(!bytes))
1da177e4
LT
5279 goto err_out;
5280
44877b4e 5281 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5282
140b5e59
TH
5283 if (__atapi_pio_bytes(qc, bytes))
5284 goto err_out;
4cc980b3 5285 ata_altstatus(ap); /* flush */
1da177e4
LT
5286
5287 return;
5288
5289err_out:
f15a1daf 5290 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 5291 qc->err_mask |= AC_ERR_HSM;
14be71f4 5292 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5293}
5294
5295/**
c234fb00
AL
5296 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5297 * @ap: the target ata_port
5298 * @qc: qc on going
1da177e4 5299 *
c234fb00
AL
5300 * RETURNS:
5301 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5302 */
c234fb00
AL
5303
5304static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5305{
c234fb00
AL
5306 if (qc->tf.flags & ATA_TFLAG_POLLING)
5307 return 1;
1da177e4 5308
c234fb00
AL
5309 if (ap->hsm_task_state == HSM_ST_FIRST) {
5310 if (qc->tf.protocol == ATA_PROT_PIO &&
5311 (qc->tf.flags & ATA_TFLAG_WRITE))
5312 return 1;
1da177e4 5313
405e66b3 5314 if (ata_is_atapi(qc->tf.protocol) &&
c234fb00
AL
5315 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5316 return 1;
fe79e683
AL
5317 }
5318
c234fb00
AL
5319 return 0;
5320}
1da177e4 5321
c17ea20d
TH
5322/**
5323 * ata_hsm_qc_complete - finish a qc running on standard HSM
5324 * @qc: Command to complete
5325 * @in_wq: 1 if called from workqueue, 0 otherwise
5326 *
5327 * Finish @qc which is running on standard HSM.
5328 *
5329 * LOCKING:
cca3974e 5330 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5331 * Otherwise, none on entry and grabs host lock.
5332 */
5333static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5334{
5335 struct ata_port *ap = qc->ap;
5336 unsigned long flags;
5337
5338 if (ap->ops->error_handler) {
5339 if (in_wq) {
ba6a1308 5340 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5341
cca3974e
JG
5342 /* EH might have kicked in while host lock is
5343 * released.
c17ea20d
TH
5344 */
5345 qc = ata_qc_from_tag(ap, qc->tag);
5346 if (qc) {
5347 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5348 ap->ops->irq_on(ap);
c17ea20d
TH
5349 ata_qc_complete(qc);
5350 } else
5351 ata_port_freeze(ap);
5352 }
5353
ba6a1308 5354 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5355 } else {
5356 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5357 ata_qc_complete(qc);
5358 else
5359 ata_port_freeze(ap);
5360 }
5361 } else {
5362 if (in_wq) {
ba6a1308 5363 spin_lock_irqsave(ap->lock, flags);
83625006 5364 ap->ops->irq_on(ap);
c17ea20d 5365 ata_qc_complete(qc);
ba6a1308 5366 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5367 } else
5368 ata_qc_complete(qc);
5369 }
5370}
5371
bb5cb290
AL
5372/**
5373 * ata_hsm_move - move the HSM to the next state.
5374 * @ap: the target ata_port
5375 * @qc: qc on going
5376 * @status: current device status
5377 * @in_wq: 1 if called from workqueue, 0 otherwise
5378 *
5379 * RETURNS:
5380 * 1 when poll next status needed, 0 otherwise.
5381 */
9a1004d0
TH
5382int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5383 u8 status, int in_wq)
e2cec771 5384{
bb5cb290
AL
5385 unsigned long flags = 0;
5386 int poll_next;
5387
6912ccd5
AL
5388 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5389
bb5cb290
AL
5390 /* Make sure ata_qc_issue_prot() does not throw things
5391 * like DMA polling into the workqueue. Notice that
5392 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5393 */
c234fb00 5394 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5395
e2cec771 5396fsm_start:
999bb6f4 5397 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5398 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5399
e2cec771
AL
5400 switch (ap->hsm_task_state) {
5401 case HSM_ST_FIRST:
bb5cb290
AL
5402 /* Send first data block or PACKET CDB */
5403
5404 /* If polling, we will stay in the work queue after
5405 * sending the data. Otherwise, interrupt handler
5406 * takes over after sending the data.
5407 */
5408 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5409
e2cec771 5410 /* check device status */
3655d1d3
AL
5411 if (unlikely((status & ATA_DRQ) == 0)) {
5412 /* handle BSY=0, DRQ=0 as error */
5413 if (likely(status & (ATA_ERR | ATA_DF)))
5414 /* device stops HSM for abort/error */
5415 qc->err_mask |= AC_ERR_DEV;
5416 else
5417 /* HSM violation. Let EH handle this */
5418 qc->err_mask |= AC_ERR_HSM;
5419
14be71f4 5420 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5421 goto fsm_start;
1da177e4
LT
5422 }
5423
71601958
AL
5424 /* Device should not ask for data transfer (DRQ=1)
5425 * when it finds something wrong.
eee6c32f
AL
5426 * We ignore DRQ here and stop the HSM by
5427 * changing hsm_task_state to HSM_ST_ERR and
5428 * let the EH abort the command or reset the device.
71601958
AL
5429 */
5430 if (unlikely(status & (ATA_ERR | ATA_DF))) {
2d3b8eea
AL
5431 /* Some ATAPI tape drives forget to clear the ERR bit
5432 * when doing the next command (mostly request sense).
5433 * We ignore ERR here to workaround and proceed sending
5434 * the CDB.
5435 */
5436 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5437 ata_port_printk(ap, KERN_WARNING,
5438 "DRQ=1 with device error, "
5439 "dev_stat 0x%X\n", status);
5440 qc->err_mask |= AC_ERR_HSM;
5441 ap->hsm_task_state = HSM_ST_ERR;
5442 goto fsm_start;
5443 }
71601958 5444 }
1da177e4 5445
bb5cb290
AL
5446 /* Send the CDB (atapi) or the first data block (ata pio out).
5447 * During the state transition, interrupt handler shouldn't
5448 * be invoked before the data transfer is complete and
5449 * hsm_task_state is changed. Hence, the following locking.
5450 */
5451 if (in_wq)
ba6a1308 5452 spin_lock_irqsave(ap->lock, flags);
1da177e4 5453
bb5cb290
AL
5454 if (qc->tf.protocol == ATA_PROT_PIO) {
5455 /* PIO data out protocol.
5456 * send first data block.
5457 */
0565c26d 5458
bb5cb290
AL
5459 /* ata_pio_sectors() might change the state
5460 * to HSM_ST_LAST. so, the state is changed here
5461 * before ata_pio_sectors().
5462 */
5463 ap->hsm_task_state = HSM_ST;
5464 ata_pio_sectors(qc);
bb5cb290
AL
5465 } else
5466 /* send CDB */
5467 atapi_send_cdb(ap, qc);
5468
5469 if (in_wq)
ba6a1308 5470 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5471
5472 /* if polling, ata_pio_task() handles the rest.
5473 * otherwise, interrupt handler takes over from here.
5474 */
e2cec771 5475 break;
1c848984 5476
e2cec771
AL
5477 case HSM_ST:
5478 /* complete command or read/write the data register */
0dc36888 5479 if (qc->tf.protocol == ATAPI_PROT_PIO) {
e2cec771
AL
5480 /* ATAPI PIO protocol */
5481 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5482 /* No more data to transfer or device error.
5483 * Device error will be tagged in HSM_ST_LAST.
5484 */
e2cec771
AL
5485 ap->hsm_task_state = HSM_ST_LAST;
5486 goto fsm_start;
5487 }
1da177e4 5488
71601958
AL
5489 /* Device should not ask for data transfer (DRQ=1)
5490 * when it finds something wrong.
eee6c32f
AL
5491 * We ignore DRQ here and stop the HSM by
5492 * changing hsm_task_state to HSM_ST_ERR and
5493 * let the EH abort the command or reset the device.
71601958
AL
5494 */
5495 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5496 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5497 "device error, dev_stat 0x%X\n",
5498 status);
3655d1d3 5499 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5500 ap->hsm_task_state = HSM_ST_ERR;
5501 goto fsm_start;
71601958 5502 }
1da177e4 5503
e2cec771 5504 atapi_pio_bytes(qc);
7fb6ec28 5505
e2cec771
AL
5506 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5507 /* bad ireason reported by device */
5508 goto fsm_start;
1da177e4 5509
e2cec771
AL
5510 } else {
5511 /* ATA PIO protocol */
5512 if (unlikely((status & ATA_DRQ) == 0)) {
5513 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5514 if (likely(status & (ATA_ERR | ATA_DF)))
5515 /* device stops HSM for abort/error */
5516 qc->err_mask |= AC_ERR_DEV;
5517 else
55a8e2c8
TH
5518 /* HSM violation. Let EH handle this.
5519 * Phantom devices also trigger this
5520 * condition. Mark hint.
5521 */
5522 qc->err_mask |= AC_ERR_HSM |
5523 AC_ERR_NODEV_HINT;
3655d1d3 5524
e2cec771
AL
5525 ap->hsm_task_state = HSM_ST_ERR;
5526 goto fsm_start;
5527 }
1da177e4 5528
eee6c32f
AL
5529 /* For PIO reads, some devices may ask for
5530 * data transfer (DRQ=1) alone with ERR=1.
5531 * We respect DRQ here and transfer one
5532 * block of junk data before changing the
5533 * hsm_task_state to HSM_ST_ERR.
5534 *
5535 * For PIO writes, ERR=1 DRQ=1 doesn't make
5536 * sense since the data block has been
5537 * transferred to the device.
71601958
AL
5538 */
5539 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5540 /* data might be corrputed */
5541 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5542
5543 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5544 ata_pio_sectors(qc);
eee6c32f
AL
5545 status = ata_wait_idle(ap);
5546 }
5547
3655d1d3
AL
5548 if (status & (ATA_BUSY | ATA_DRQ))
5549 qc->err_mask |= AC_ERR_HSM;
5550
eee6c32f
AL
5551 /* ata_pio_sectors() might change the
5552 * state to HSM_ST_LAST. so, the state
5553 * is changed after ata_pio_sectors().
5554 */
5555 ap->hsm_task_state = HSM_ST_ERR;
5556 goto fsm_start;
71601958
AL
5557 }
5558
e2cec771
AL
5559 ata_pio_sectors(qc);
5560
5561 if (ap->hsm_task_state == HSM_ST_LAST &&
5562 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5563 /* all data read */
52a32205 5564 status = ata_wait_idle(ap);
e2cec771
AL
5565 goto fsm_start;
5566 }
5567 }
5568
bb5cb290 5569 poll_next = 1;
1da177e4
LT
5570 break;
5571
14be71f4 5572 case HSM_ST_LAST:
6912ccd5
AL
5573 if (unlikely(!ata_ok(status))) {
5574 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5575 ap->hsm_task_state = HSM_ST_ERR;
5576 goto fsm_start;
5577 }
5578
5579 /* no more data to transfer */
4332a771 5580 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5581 ap->print_id, qc->dev->devno, status);
e2cec771 5582
6912ccd5
AL
5583 WARN_ON(qc->err_mask);
5584
e2cec771 5585 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5586
e2cec771 5587 /* complete taskfile transaction */
c17ea20d 5588 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5589
5590 poll_next = 0;
1da177e4
LT
5591 break;
5592
14be71f4 5593 case HSM_ST_ERR:
e2cec771
AL
5594 /* make sure qc->err_mask is available to
5595 * know what's wrong and recover
5596 */
5597 WARN_ON(qc->err_mask == 0);
5598
5599 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5600
999bb6f4 5601 /* complete taskfile transaction */
c17ea20d 5602 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5603
5604 poll_next = 0;
e2cec771
AL
5605 break;
5606 default:
bb5cb290 5607 poll_next = 0;
6912ccd5 5608 BUG();
1da177e4
LT
5609 }
5610
bb5cb290 5611 return poll_next;
1da177e4
LT
5612}
5613
65f27f38 5614static void ata_pio_task(struct work_struct *work)
8061f5f0 5615{
65f27f38
DH
5616 struct ata_port *ap =
5617 container_of(work, struct ata_port, port_task.work);
5618 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5619 u8 status;
a1af3734 5620 int poll_next;
8061f5f0 5621
7fb6ec28 5622fsm_start:
a1af3734 5623 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5624
a1af3734
AL
5625 /*
5626 * This is purely heuristic. This is a fast path.
5627 * Sometimes when we enter, BSY will be cleared in
5628 * a chk-status or two. If not, the drive is probably seeking
5629 * or something. Snooze for a couple msecs, then
5630 * chk-status again. If still busy, queue delayed work.
5631 */
5632 status = ata_busy_wait(ap, ATA_BUSY, 5);
5633 if (status & ATA_BUSY) {
5634 msleep(2);
5635 status = ata_busy_wait(ap, ATA_BUSY, 10);
5636 if (status & ATA_BUSY) {
442eacc3 5637 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5638 return;
5639 }
8061f5f0
TH
5640 }
5641
a1af3734
AL
5642 /* move the HSM */
5643 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5644
a1af3734
AL
5645 /* another command or interrupt handler
5646 * may be running at this point.
5647 */
5648 if (poll_next)
7fb6ec28 5649 goto fsm_start;
8061f5f0
TH
5650}
5651
1da177e4
LT
5652/**
5653 * ata_qc_new - Request an available ATA command, for queueing
5654 * @ap: Port associated with device @dev
5655 * @dev: Device from whom we request an available command structure
5656 *
5657 * LOCKING:
0cba632b 5658 * None.
1da177e4
LT
5659 */
5660
5661static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5662{
5663 struct ata_queued_cmd *qc = NULL;
5664 unsigned int i;
5665
e3180499 5666 /* no command while frozen */
b51e9e5d 5667 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5668 return NULL;
5669
2ab7db1f
TH
5670 /* the last tag is reserved for internal command. */
5671 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5672 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5673 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5674 break;
5675 }
5676
5677 if (qc)
5678 qc->tag = i;
5679
5680 return qc;
5681}
5682
5683/**
5684 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5685 * @dev: Device from whom we request an available command structure
5686 *
5687 * LOCKING:
0cba632b 5688 * None.
1da177e4
LT
5689 */
5690
3373efd8 5691struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5692{
9af5c9c9 5693 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5694 struct ata_queued_cmd *qc;
5695
5696 qc = ata_qc_new(ap);
5697 if (qc) {
1da177e4
LT
5698 qc->scsicmd = NULL;
5699 qc->ap = ap;
5700 qc->dev = dev;
1da177e4 5701
2c13b7ce 5702 ata_qc_reinit(qc);
1da177e4
LT
5703 }
5704
5705 return qc;
5706}
5707
1da177e4
LT
5708/**
5709 * ata_qc_free - free unused ata_queued_cmd
5710 * @qc: Command to complete
5711 *
5712 * Designed to free unused ata_queued_cmd object
5713 * in case something prevents using it.
5714 *
5715 * LOCKING:
cca3974e 5716 * spin_lock_irqsave(host lock)
1da177e4
LT
5717 */
5718void ata_qc_free(struct ata_queued_cmd *qc)
5719{
4ba946e9
TH
5720 struct ata_port *ap = qc->ap;
5721 unsigned int tag;
5722
a4631474 5723 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5724
4ba946e9
TH
5725 qc->flags = 0;
5726 tag = qc->tag;
5727 if (likely(ata_tag_valid(tag))) {
4ba946e9 5728 qc->tag = ATA_TAG_POISON;
6cec4a39 5729 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5730 }
1da177e4
LT
5731}
5732
76014427 5733void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5734{
dedaf2b0 5735 struct ata_port *ap = qc->ap;
9af5c9c9 5736 struct ata_link *link = qc->dev->link;
dedaf2b0 5737
a4631474
TH
5738 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5739 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5740
5741 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5742 ata_sg_clean(qc);
5743
7401abf2 5744 /* command should be marked inactive atomically with qc completion */
da917d69 5745 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5746 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5747 if (!link->sactive)
5748 ap->nr_active_links--;
5749 } else {
9af5c9c9 5750 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5751 ap->nr_active_links--;
5752 }
5753
5754 /* clear exclusive status */
5755 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5756 ap->excl_link == link))
5757 ap->excl_link = NULL;
7401abf2 5758
3f3791d3
AL
5759 /* atapi: mark qc as inactive to prevent the interrupt handler
5760 * from completing the command twice later, before the error handler
5761 * is called. (when rc != 0 and atapi request sense is needed)
5762 */
5763 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5764 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5765
1da177e4 5766 /* call completion callback */
77853bf2 5767 qc->complete_fn(qc);
1da177e4
LT
5768}
5769
39599a53
TH
5770static void fill_result_tf(struct ata_queued_cmd *qc)
5771{
5772 struct ata_port *ap = qc->ap;
5773
39599a53 5774 qc->result_tf.flags = qc->tf.flags;
4742d54f 5775 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5776}
5777
00115e0f
TH
5778static void ata_verify_xfer(struct ata_queued_cmd *qc)
5779{
5780 struct ata_device *dev = qc->dev;
5781
5782 if (ata_tag_internal(qc->tag))
5783 return;
5784
5785 if (ata_is_nodata(qc->tf.protocol))
5786 return;
5787
5788 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5789 return;
5790
5791 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5792}
5793
f686bcb8
TH
5794/**
5795 * ata_qc_complete - Complete an active ATA command
5796 * @qc: Command to complete
5797 * @err_mask: ATA Status register contents
5798 *
5799 * Indicate to the mid and upper layers that an ATA
5800 * command has completed, with either an ok or not-ok status.
5801 *
5802 * LOCKING:
cca3974e 5803 * spin_lock_irqsave(host lock)
f686bcb8
TH
5804 */
5805void ata_qc_complete(struct ata_queued_cmd *qc)
5806{
5807 struct ata_port *ap = qc->ap;
5808
5809 /* XXX: New EH and old EH use different mechanisms to
5810 * synchronize EH with regular execution path.
5811 *
5812 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5813 * Normal execution path is responsible for not accessing a
5814 * failed qc. libata core enforces the rule by returning NULL
5815 * from ata_qc_from_tag() for failed qcs.
5816 *
5817 * Old EH depends on ata_qc_complete() nullifying completion
5818 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5819 * not synchronize with interrupt handler. Only PIO task is
5820 * taken care of.
5821 */
5822 if (ap->ops->error_handler) {
4dbfa39b
TH
5823 struct ata_device *dev = qc->dev;
5824 struct ata_eh_info *ehi = &dev->link->eh_info;
5825
b51e9e5d 5826 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5827
5828 if (unlikely(qc->err_mask))
5829 qc->flags |= ATA_QCFLAG_FAILED;
5830
5831 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5832 if (!ata_tag_internal(qc->tag)) {
5833 /* always fill result TF for failed qc */
39599a53 5834 fill_result_tf(qc);
f686bcb8
TH
5835 ata_qc_schedule_eh(qc);
5836 return;
5837 }
5838 }
5839
5840 /* read result TF if requested */
5841 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5842 fill_result_tf(qc);
f686bcb8 5843
4dbfa39b
TH
5844 /* Some commands need post-processing after successful
5845 * completion.
5846 */
5847 switch (qc->tf.command) {
5848 case ATA_CMD_SET_FEATURES:
5849 if (qc->tf.feature != SETFEATURES_WC_ON &&
5850 qc->tf.feature != SETFEATURES_WC_OFF)
5851 break;
5852 /* fall through */
5853 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5854 case ATA_CMD_SET_MULTI: /* multi_count changed */
5855 /* revalidate device */
5856 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5857 ata_port_schedule_eh(ap);
5858 break;
054a5fba
TH
5859
5860 case ATA_CMD_SLEEP:
5861 dev->flags |= ATA_DFLAG_SLEEPING;
5862 break;
4dbfa39b
TH
5863 }
5864
00115e0f
TH
5865 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5866 ata_verify_xfer(qc);
5867
f686bcb8
TH
5868 __ata_qc_complete(qc);
5869 } else {
5870 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5871 return;
5872
5873 /* read result TF if failed or requested */
5874 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5875 fill_result_tf(qc);
f686bcb8
TH
5876
5877 __ata_qc_complete(qc);
5878 }
5879}
5880
dedaf2b0
TH
5881/**
5882 * ata_qc_complete_multiple - Complete multiple qcs successfully
5883 * @ap: port in question
5884 * @qc_active: new qc_active mask
5885 * @finish_qc: LLDD callback invoked before completing a qc
5886 *
5887 * Complete in-flight commands. This functions is meant to be
5888 * called from low-level driver's interrupt routine to complete
5889 * requests normally. ap->qc_active and @qc_active is compared
5890 * and commands are completed accordingly.
5891 *
5892 * LOCKING:
cca3974e 5893 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5894 *
5895 * RETURNS:
5896 * Number of completed commands on success, -errno otherwise.
5897 */
5898int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5899 void (*finish_qc)(struct ata_queued_cmd *))
5900{
5901 int nr_done = 0;
5902 u32 done_mask;
5903 int i;
5904
5905 done_mask = ap->qc_active ^ qc_active;
5906
5907 if (unlikely(done_mask & qc_active)) {
5908 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5909 "(%08x->%08x)\n", ap->qc_active, qc_active);
5910 return -EINVAL;
5911 }
5912
5913 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5914 struct ata_queued_cmd *qc;
5915
5916 if (!(done_mask & (1 << i)))
5917 continue;
5918
5919 if ((qc = ata_qc_from_tag(ap, i))) {
5920 if (finish_qc)
5921 finish_qc(qc);
5922 ata_qc_complete(qc);
5923 nr_done++;
5924 }
5925 }
5926
5927 return nr_done;
5928}
5929
1da177e4
LT
5930/**
5931 * ata_qc_issue - issue taskfile to device
5932 * @qc: command to issue to device
5933 *
5934 * Prepare an ATA command to submission to device.
5935 * This includes mapping the data into a DMA-able
5936 * area, filling in the S/G table, and finally
5937 * writing the taskfile to hardware, starting the command.
5938 *
5939 * LOCKING:
cca3974e 5940 * spin_lock_irqsave(host lock)
1da177e4 5941 */
8e0e694a 5942void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5943{
5944 struct ata_port *ap = qc->ap;
9af5c9c9 5945 struct ata_link *link = qc->dev->link;
405e66b3 5946 u8 prot = qc->tf.protocol;
1da177e4 5947
dedaf2b0
TH
5948 /* Make sure only one non-NCQ command is outstanding. The
5949 * check is skipped for old EH because it reuses active qc to
5950 * request ATAPI sense.
5951 */
9af5c9c9 5952 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 5953
1973a023 5954 if (ata_is_ncq(prot)) {
9af5c9c9 5955 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5956
5957 if (!link->sactive)
5958 ap->nr_active_links++;
9af5c9c9 5959 link->sactive |= 1 << qc->tag;
dedaf2b0 5960 } else {
9af5c9c9 5961 WARN_ON(link->sactive);
da917d69
TH
5962
5963 ap->nr_active_links++;
9af5c9c9 5964 link->active_tag = qc->tag;
dedaf2b0
TH
5965 }
5966
e4a70e76 5967 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5968 ap->qc_active |= 1 << qc->tag;
e4a70e76 5969
f92a2636
TH
5970 /* We guarantee to LLDs that they will have at least one
5971 * non-zero sg if the command is a data command.
5972 */
ff2aeb1e 5973 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
f92a2636 5974
0bcc65ad
TH
5975 /* ata_sg_setup() may update nbytes */
5976 qc->raw_nbytes = qc->nbytes;
5977
405e66b3 5978 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 5979 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7
TH
5980 if (ata_sg_setup(qc))
5981 goto sg_err;
1da177e4 5982
054a5fba
TH
5983 /* if device is sleeping, schedule softreset and abort the link */
5984 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5985 link->eh_info.action |= ATA_EH_SOFTRESET;
5986 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5987 ata_link_abort(link);
5988 return;
5989 }
5990
1da177e4
LT
5991 ap->ops->qc_prep(qc);
5992
8e0e694a
TH
5993 qc->err_mask |= ap->ops->qc_issue(qc);
5994 if (unlikely(qc->err_mask))
5995 goto err;
5996 return;
1da177e4 5997
8e436af9 5998sg_err:
8e0e694a
TH
5999 qc->err_mask |= AC_ERR_SYSTEM;
6000err:
6001 ata_qc_complete(qc);
1da177e4
LT
6002}
6003
6004/**
6005 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6006 * @qc: command to issue to device
6007 *
6008 * Using various libata functions and hooks, this function
6009 * starts an ATA command. ATA commands are grouped into
6010 * classes called "protocols", and issuing each type of protocol
6011 * is slightly different.
6012 *
0baab86b
EF
6013 * May be used as the qc_issue() entry in ata_port_operations.
6014 *
1da177e4 6015 * LOCKING:
cca3974e 6016 * spin_lock_irqsave(host lock)
1da177e4
LT
6017 *
6018 * RETURNS:
9a3d9eb0 6019 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
6020 */
6021
9a3d9eb0 6022unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
6023{
6024 struct ata_port *ap = qc->ap;
6025
e50362ec
AL
6026 /* Use polling pio if the LLD doesn't handle
6027 * interrupt driven pio and atapi CDB interrupt.
6028 */
6029 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6030 switch (qc->tf.protocol) {
6031 case ATA_PROT_PIO:
e3472cbe 6032 case ATA_PROT_NODATA:
0dc36888
TH
6033 case ATAPI_PROT_PIO:
6034 case ATAPI_PROT_NODATA:
e50362ec
AL
6035 qc->tf.flags |= ATA_TFLAG_POLLING;
6036 break;
0dc36888 6037 case ATAPI_PROT_DMA:
e50362ec 6038 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 6039 /* see ata_dma_blacklisted() */
e50362ec
AL
6040 BUG();
6041 break;
6042 default:
6043 break;
6044 }
6045 }
6046
312f7da2 6047 /* select the device */
1da177e4
LT
6048 ata_dev_select(ap, qc->dev->devno, 1, 0);
6049
312f7da2 6050 /* start the command */
1da177e4
LT
6051 switch (qc->tf.protocol) {
6052 case ATA_PROT_NODATA:
312f7da2
AL
6053 if (qc->tf.flags & ATA_TFLAG_POLLING)
6054 ata_qc_set_polling(qc);
6055
e5338254 6056 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
6057 ap->hsm_task_state = HSM_ST_LAST;
6058
6059 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6060 ata_pio_queue_task(ap, qc, 0);
312f7da2 6061
1da177e4
LT
6062 break;
6063
6064 case ATA_PROT_DMA:
587005de 6065 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6066
1da177e4
LT
6067 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6068 ap->ops->bmdma_setup(qc); /* set up bmdma */
6069 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 6070 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
6071 break;
6072
312f7da2
AL
6073 case ATA_PROT_PIO:
6074 if (qc->tf.flags & ATA_TFLAG_POLLING)
6075 ata_qc_set_polling(qc);
1da177e4 6076
e5338254 6077 ata_tf_to_host(ap, &qc->tf);
312f7da2 6078
54f00389
AL
6079 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6080 /* PIO data out protocol */
6081 ap->hsm_task_state = HSM_ST_FIRST;
442eacc3 6082 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6083
6084 /* always send first data block using
e27486db 6085 * the ata_pio_task() codepath.
54f00389 6086 */
312f7da2 6087 } else {
54f00389
AL
6088 /* PIO data in protocol */
6089 ap->hsm_task_state = HSM_ST;
6090
6091 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6092 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6093
6094 /* if polling, ata_pio_task() handles the rest.
6095 * otherwise, interrupt handler takes over from here.
6096 */
312f7da2
AL
6097 }
6098
1da177e4
LT
6099 break;
6100
0dc36888
TH
6101 case ATAPI_PROT_PIO:
6102 case ATAPI_PROT_NODATA:
312f7da2
AL
6103 if (qc->tf.flags & ATA_TFLAG_POLLING)
6104 ata_qc_set_polling(qc);
6105
e5338254 6106 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 6107
312f7da2
AL
6108 ap->hsm_task_state = HSM_ST_FIRST;
6109
6110 /* send cdb by polling if no cdb interrupt */
6111 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6112 (qc->tf.flags & ATA_TFLAG_POLLING))
442eacc3 6113 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6114 break;
6115
0dc36888 6116 case ATAPI_PROT_DMA:
587005de 6117 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6118
1da177e4
LT
6119 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6120 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
6121 ap->hsm_task_state = HSM_ST_FIRST;
6122
6123 /* send cdb by polling if no cdb interrupt */
6124 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
442eacc3 6125 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6126 break;
6127
6128 default:
6129 WARN_ON(1);
9a3d9eb0 6130 return AC_ERR_SYSTEM;
1da177e4
LT
6131 }
6132
6133 return 0;
6134}
6135
1da177e4
LT
6136/**
6137 * ata_host_intr - Handle host interrupt for given (port, task)
6138 * @ap: Port on which interrupt arrived (possibly...)
6139 * @qc: Taskfile currently active in engine
6140 *
6141 * Handle host interrupt for given queued command. Currently,
6142 * only DMA interrupts are handled. All other commands are
6143 * handled via polling with interrupts disabled (nIEN bit).
6144 *
6145 * LOCKING:
cca3974e 6146 * spin_lock_irqsave(host lock)
1da177e4
LT
6147 *
6148 * RETURNS:
6149 * One if interrupt was handled, zero if not (shared irq).
6150 */
6151
2dcb407e
JG
6152inline unsigned int ata_host_intr(struct ata_port *ap,
6153 struct ata_queued_cmd *qc)
1da177e4 6154{
9af5c9c9 6155 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 6156 u8 status, host_stat = 0;
1da177e4 6157
312f7da2 6158 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 6159 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 6160
312f7da2
AL
6161 /* Check whether we are expecting interrupt in this state */
6162 switch (ap->hsm_task_state) {
6163 case HSM_ST_FIRST:
6912ccd5
AL
6164 /* Some pre-ATAPI-4 devices assert INTRQ
6165 * at this state when ready to receive CDB.
6166 */
1da177e4 6167
312f7da2 6168 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
405e66b3
TH
6169 * The flag was turned on only for atapi devices. No
6170 * need to check ata_is_atapi(qc->tf.protocol) again.
312f7da2
AL
6171 */
6172 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 6173 goto idle_irq;
1da177e4 6174 break;
312f7da2
AL
6175 case HSM_ST_LAST:
6176 if (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6177 qc->tf.protocol == ATAPI_PROT_DMA) {
312f7da2
AL
6178 /* check status of DMA engine */
6179 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
6180 VPRINTK("ata%u: host_stat 0x%X\n",
6181 ap->print_id, host_stat);
312f7da2
AL
6182
6183 /* if it's not our irq... */
6184 if (!(host_stat & ATA_DMA_INTR))
6185 goto idle_irq;
6186
6187 /* before we do anything else, clear DMA-Start bit */
6188 ap->ops->bmdma_stop(qc);
a4f16610
AL
6189
6190 if (unlikely(host_stat & ATA_DMA_ERR)) {
6191 /* error when transfering data to/from memory */
6192 qc->err_mask |= AC_ERR_HOST_BUS;
6193 ap->hsm_task_state = HSM_ST_ERR;
6194 }
312f7da2
AL
6195 }
6196 break;
6197 case HSM_ST:
6198 break;
1da177e4
LT
6199 default:
6200 goto idle_irq;
6201 }
6202
312f7da2
AL
6203 /* check altstatus */
6204 status = ata_altstatus(ap);
6205 if (status & ATA_BUSY)
6206 goto idle_irq;
1da177e4 6207
312f7da2
AL
6208 /* check main status, clearing INTRQ */
6209 status = ata_chk_status(ap);
6210 if (unlikely(status & ATA_BUSY))
6211 goto idle_irq;
1da177e4 6212
312f7da2
AL
6213 /* ack bmdma irq events */
6214 ap->ops->irq_clear(ap);
1da177e4 6215
bb5cb290 6216 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
6217
6218 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6219 qc->tf.protocol == ATAPI_PROT_DMA))
ea54763f
TH
6220 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6221
1da177e4
LT
6222 return 1; /* irq handled */
6223
6224idle_irq:
6225 ap->stats.idle_irq++;
6226
6227#ifdef ATA_IRQ_TRAP
6228 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
6229 ata_chk_status(ap);
6230 ap->ops->irq_clear(ap);
f15a1daf 6231 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 6232 return 1;
1da177e4
LT
6233 }
6234#endif
6235 return 0; /* irq not handled */
6236}
6237
6238/**
6239 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6240 * @irq: irq line (unused)
cca3974e 6241 * @dev_instance: pointer to our ata_host information structure
1da177e4 6242 *
0cba632b
JG
6243 * Default interrupt handler for PCI IDE devices. Calls
6244 * ata_host_intr() for each port that is not disabled.
6245 *
1da177e4 6246 * LOCKING:
cca3974e 6247 * Obtains host lock during operation.
1da177e4
LT
6248 *
6249 * RETURNS:
0cba632b 6250 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6251 */
6252
2dcb407e 6253irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6254{
cca3974e 6255 struct ata_host *host = dev_instance;
1da177e4
LT
6256 unsigned int i;
6257 unsigned int handled = 0;
6258 unsigned long flags;
6259
6260 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6261 spin_lock_irqsave(&host->lock, flags);
1da177e4 6262
cca3974e 6263 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6264 struct ata_port *ap;
6265
cca3974e 6266 ap = host->ports[i];
c1389503 6267 if (ap &&
029f5468 6268 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6269 struct ata_queued_cmd *qc;
6270
9af5c9c9 6271 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6272 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6273 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6274 handled |= ata_host_intr(ap, qc);
6275 }
6276 }
6277
cca3974e 6278 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6279
6280 return IRQ_RETVAL(handled);
6281}
6282
34bf2170
TH
6283/**
6284 * sata_scr_valid - test whether SCRs are accessible
936fd732 6285 * @link: ATA link to test SCR accessibility for
34bf2170 6286 *
936fd732 6287 * Test whether SCRs are accessible for @link.
34bf2170
TH
6288 *
6289 * LOCKING:
6290 * None.
6291 *
6292 * RETURNS:
6293 * 1 if SCRs are accessible, 0 otherwise.
6294 */
936fd732 6295int sata_scr_valid(struct ata_link *link)
34bf2170 6296{
936fd732
TH
6297 struct ata_port *ap = link->ap;
6298
a16abc0b 6299 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6300}
6301
6302/**
6303 * sata_scr_read - read SCR register of the specified port
936fd732 6304 * @link: ATA link to read SCR for
34bf2170
TH
6305 * @reg: SCR to read
6306 * @val: Place to store read value
6307 *
936fd732 6308 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6309 * guaranteed to succeed if @link is ap->link, the cable type of
6310 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6311 *
6312 * LOCKING:
633273a3 6313 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6314 *
6315 * RETURNS:
6316 * 0 on success, negative errno on failure.
6317 */
936fd732 6318int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6319{
633273a3
TH
6320 if (ata_is_host_link(link)) {
6321 struct ata_port *ap = link->ap;
936fd732 6322
633273a3
TH
6323 if (sata_scr_valid(link))
6324 return ap->ops->scr_read(ap, reg, val);
6325 return -EOPNOTSUPP;
6326 }
6327
6328 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6329}
6330
6331/**
6332 * sata_scr_write - write SCR register of the specified port
936fd732 6333 * @link: ATA link to write SCR for
34bf2170
TH
6334 * @reg: SCR to write
6335 * @val: value to write
6336 *
936fd732 6337 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6338 * guaranteed to succeed if @link is ap->link, the cable type of
6339 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6340 *
6341 * LOCKING:
633273a3 6342 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6343 *
6344 * RETURNS:
6345 * 0 on success, negative errno on failure.
6346 */
936fd732 6347int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6348{
633273a3
TH
6349 if (ata_is_host_link(link)) {
6350 struct ata_port *ap = link->ap;
6351
6352 if (sata_scr_valid(link))
6353 return ap->ops->scr_write(ap, reg, val);
6354 return -EOPNOTSUPP;
6355 }
936fd732 6356
633273a3 6357 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6358}
6359
6360/**
6361 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6362 * @link: ATA link to write SCR for
34bf2170
TH
6363 * @reg: SCR to write
6364 * @val: value to write
6365 *
6366 * This function is identical to sata_scr_write() except that this
6367 * function performs flush after writing to the register.
6368 *
6369 * LOCKING:
633273a3 6370 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6371 *
6372 * RETURNS:
6373 * 0 on success, negative errno on failure.
6374 */
936fd732 6375int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6376{
633273a3
TH
6377 if (ata_is_host_link(link)) {
6378 struct ata_port *ap = link->ap;
6379 int rc;
da3dbb17 6380
633273a3
TH
6381 if (sata_scr_valid(link)) {
6382 rc = ap->ops->scr_write(ap, reg, val);
6383 if (rc == 0)
6384 rc = ap->ops->scr_read(ap, reg, &val);
6385 return rc;
6386 }
6387 return -EOPNOTSUPP;
34bf2170 6388 }
633273a3
TH
6389
6390 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6391}
6392
6393/**
936fd732
TH
6394 * ata_link_online - test whether the given link is online
6395 * @link: ATA link to test
34bf2170 6396 *
936fd732
TH
6397 * Test whether @link is online. Note that this function returns
6398 * 0 if online status of @link cannot be obtained, so
6399 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6400 *
6401 * LOCKING:
6402 * None.
6403 *
6404 * RETURNS:
6405 * 1 if the port online status is available and online.
6406 */
936fd732 6407int ata_link_online(struct ata_link *link)
34bf2170
TH
6408{
6409 u32 sstatus;
6410
936fd732
TH
6411 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6412 (sstatus & 0xf) == 0x3)
34bf2170
TH
6413 return 1;
6414 return 0;
6415}
6416
6417/**
936fd732
TH
6418 * ata_link_offline - test whether the given link is offline
6419 * @link: ATA link to test
34bf2170 6420 *
936fd732
TH
6421 * Test whether @link is offline. Note that this function
6422 * returns 0 if offline status of @link cannot be obtained, so
6423 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6424 *
6425 * LOCKING:
6426 * None.
6427 *
6428 * RETURNS:
6429 * 1 if the port offline status is available and offline.
6430 */
936fd732 6431int ata_link_offline(struct ata_link *link)
34bf2170
TH
6432{
6433 u32 sstatus;
6434
936fd732
TH
6435 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6436 (sstatus & 0xf) != 0x3)
34bf2170
TH
6437 return 1;
6438 return 0;
6439}
0baab86b 6440
77b08fb5 6441int ata_flush_cache(struct ata_device *dev)
9b847548 6442{
977e6b9f 6443 unsigned int err_mask;
9b847548
JA
6444 u8 cmd;
6445
6446 if (!ata_try_flush_cache(dev))
6447 return 0;
6448
6fc49adb 6449 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6450 cmd = ATA_CMD_FLUSH_EXT;
6451 else
6452 cmd = ATA_CMD_FLUSH;
6453
4f34337b
AC
6454 /* This is wrong. On a failed flush we get back the LBA of the lost
6455 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6456 a further flush command to continue the writeback until it
4f34337b 6457 does not error */
977e6b9f
TH
6458 err_mask = ata_do_simple_cmd(dev, cmd);
6459 if (err_mask) {
6460 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6461 return -EIO;
6462 }
6463
6464 return 0;
9b847548
JA
6465}
6466
6ffa01d8 6467#ifdef CONFIG_PM
cca3974e
JG
6468static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6469 unsigned int action, unsigned int ehi_flags,
6470 int wait)
500530f6
TH
6471{
6472 unsigned long flags;
6473 int i, rc;
6474
cca3974e
JG
6475 for (i = 0; i < host->n_ports; i++) {
6476 struct ata_port *ap = host->ports[i];
e3667ebf 6477 struct ata_link *link;
500530f6
TH
6478
6479 /* Previous resume operation might still be in
6480 * progress. Wait for PM_PENDING to clear.
6481 */
6482 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6483 ata_port_wait_eh(ap);
6484 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6485 }
6486
6487 /* request PM ops to EH */
6488 spin_lock_irqsave(ap->lock, flags);
6489
6490 ap->pm_mesg = mesg;
6491 if (wait) {
6492 rc = 0;
6493 ap->pm_result = &rc;
6494 }
6495
6496 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6497 __ata_port_for_each_link(link, ap) {
6498 link->eh_info.action |= action;
6499 link->eh_info.flags |= ehi_flags;
6500 }
500530f6
TH
6501
6502 ata_port_schedule_eh(ap);
6503
6504 spin_unlock_irqrestore(ap->lock, flags);
6505
6506 /* wait and check result */
6507 if (wait) {
6508 ata_port_wait_eh(ap);
6509 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6510 if (rc)
6511 return rc;
6512 }
6513 }
6514
6515 return 0;
6516}
6517
6518/**
cca3974e
JG
6519 * ata_host_suspend - suspend host
6520 * @host: host to suspend
500530f6
TH
6521 * @mesg: PM message
6522 *
cca3974e 6523 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6524 * function requests EH to perform PM operations and waits for EH
6525 * to finish.
6526 *
6527 * LOCKING:
6528 * Kernel thread context (may sleep).
6529 *
6530 * RETURNS:
6531 * 0 on success, -errno on failure.
6532 */
cca3974e 6533int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6534{
9666f400 6535 int rc;
500530f6 6536
ca77329f
KCA
6537 /*
6538 * disable link pm on all ports before requesting
6539 * any pm activity
6540 */
6541 ata_lpm_enable(host);
6542
cca3974e 6543 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6544 if (rc == 0)
6545 host->dev->power.power_state = mesg;
500530f6
TH
6546 return rc;
6547}
6548
6549/**
cca3974e
JG
6550 * ata_host_resume - resume host
6551 * @host: host to resume
500530f6 6552 *
cca3974e 6553 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6554 * function requests EH to perform PM operations and returns.
6555 * Note that all resume operations are performed parallely.
6556 *
6557 * LOCKING:
6558 * Kernel thread context (may sleep).
6559 */
cca3974e 6560void ata_host_resume(struct ata_host *host)
500530f6 6561{
cca3974e
JG
6562 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6563 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6564 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
6565
6566 /* reenable link pm */
6567 ata_lpm_disable(host);
500530f6 6568}
6ffa01d8 6569#endif
500530f6 6570
c893a3ae
RD
6571/**
6572 * ata_port_start - Set port up for dma.
6573 * @ap: Port to initialize
6574 *
6575 * Called just after data structures for each port are
6576 * initialized. Allocates space for PRD table.
6577 *
6578 * May be used as the port_start() entry in ata_port_operations.
6579 *
6580 * LOCKING:
6581 * Inherited from caller.
6582 */
f0d36efd 6583int ata_port_start(struct ata_port *ap)
1da177e4 6584{
2f1f610b 6585 struct device *dev = ap->dev;
6037d6bb 6586 int rc;
1da177e4 6587
f0d36efd
TH
6588 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6589 GFP_KERNEL);
1da177e4
LT
6590 if (!ap->prd)
6591 return -ENOMEM;
6592
6037d6bb 6593 rc = ata_pad_alloc(ap, dev);
f0d36efd 6594 if (rc)
6037d6bb 6595 return rc;
1da177e4 6596
f0d36efd
TH
6597 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6598 (unsigned long long)ap->prd_dma);
1da177e4
LT
6599 return 0;
6600}
6601
3ef3b43d
TH
6602/**
6603 * ata_dev_init - Initialize an ata_device structure
6604 * @dev: Device structure to initialize
6605 *
6606 * Initialize @dev in preparation for probing.
6607 *
6608 * LOCKING:
6609 * Inherited from caller.
6610 */
6611void ata_dev_init(struct ata_device *dev)
6612{
9af5c9c9
TH
6613 struct ata_link *link = dev->link;
6614 struct ata_port *ap = link->ap;
72fa4b74
TH
6615 unsigned long flags;
6616
5a04bf4b 6617 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6618 link->sata_spd_limit = link->hw_sata_spd_limit;
6619 link->sata_spd = 0;
5a04bf4b 6620
72fa4b74
TH
6621 /* High bits of dev->flags are used to record warm plug
6622 * requests which occur asynchronously. Synchronize using
cca3974e 6623 * host lock.
72fa4b74 6624 */
ba6a1308 6625 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6626 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6627 dev->horkage = 0;
ba6a1308 6628 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6629
72fa4b74
TH
6630 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6631 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6632 dev->pio_mask = UINT_MAX;
6633 dev->mwdma_mask = UINT_MAX;
6634 dev->udma_mask = UINT_MAX;
6635}
6636
4fb37a25
TH
6637/**
6638 * ata_link_init - Initialize an ata_link structure
6639 * @ap: ATA port link is attached to
6640 * @link: Link structure to initialize
8989805d 6641 * @pmp: Port multiplier port number
4fb37a25
TH
6642 *
6643 * Initialize @link.
6644 *
6645 * LOCKING:
6646 * Kernel thread context (may sleep)
6647 */
fb7fd614 6648void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6649{
6650 int i;
6651
6652 /* clear everything except for devices */
6653 memset(link, 0, offsetof(struct ata_link, device[0]));
6654
6655 link->ap = ap;
8989805d 6656 link->pmp = pmp;
4fb37a25
TH
6657 link->active_tag = ATA_TAG_POISON;
6658 link->hw_sata_spd_limit = UINT_MAX;
6659
6660 /* can't use iterator, ap isn't initialized yet */
6661 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6662 struct ata_device *dev = &link->device[i];
6663
6664 dev->link = link;
6665 dev->devno = dev - link->device;
6666 ata_dev_init(dev);
6667 }
6668}
6669
6670/**
6671 * sata_link_init_spd - Initialize link->sata_spd_limit
6672 * @link: Link to configure sata_spd_limit for
6673 *
6674 * Initialize @link->[hw_]sata_spd_limit to the currently
6675 * configured value.
6676 *
6677 * LOCKING:
6678 * Kernel thread context (may sleep).
6679 *
6680 * RETURNS:
6681 * 0 on success, -errno on failure.
6682 */
fb7fd614 6683int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6684{
6685 u32 scontrol, spd;
6686 int rc;
6687
6688 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6689 if (rc)
6690 return rc;
6691
6692 spd = (scontrol >> 4) & 0xf;
6693 if (spd)
6694 link->hw_sata_spd_limit &= (1 << spd) - 1;
6695
6696 link->sata_spd_limit = link->hw_sata_spd_limit;
6697
6698 return 0;
6699}
6700
1da177e4 6701/**
f3187195
TH
6702 * ata_port_alloc - allocate and initialize basic ATA port resources
6703 * @host: ATA host this allocated port belongs to
1da177e4 6704 *
f3187195
TH
6705 * Allocate and initialize basic ATA port resources.
6706 *
6707 * RETURNS:
6708 * Allocate ATA port on success, NULL on failure.
0cba632b 6709 *
1da177e4 6710 * LOCKING:
f3187195 6711 * Inherited from calling layer (may sleep).
1da177e4 6712 */
f3187195 6713struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6714{
f3187195 6715 struct ata_port *ap;
1da177e4 6716
f3187195
TH
6717 DPRINTK("ENTER\n");
6718
6719 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6720 if (!ap)
6721 return NULL;
6722
f4d6d004 6723 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6724 ap->lock = &host->lock;
198e0fed 6725 ap->flags = ATA_FLAG_DISABLED;
f3187195 6726 ap->print_id = -1;
1da177e4 6727 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6728 ap->host = host;
f3187195 6729 ap->dev = host->dev;
1da177e4 6730 ap->last_ctl = 0xFF;
bd5d825c
BP
6731
6732#if defined(ATA_VERBOSE_DEBUG)
6733 /* turn on all debugging levels */
6734 ap->msg_enable = 0x00FF;
6735#elif defined(ATA_DEBUG)
6736 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6737#else
0dd4b21f 6738 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6739#endif
1da177e4 6740
442eacc3 6741 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
65f27f38
DH
6742 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6743 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6744 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6745 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6746 init_timer_deferrable(&ap->fastdrain_timer);
6747 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6748 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6749
838df628 6750 ap->cbl = ATA_CBL_NONE;
838df628 6751
8989805d 6752 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6753
6754#ifdef ATA_IRQ_TRAP
6755 ap->stats.unhandled_irq = 1;
6756 ap->stats.idle_irq = 1;
6757#endif
1da177e4 6758 return ap;
1da177e4
LT
6759}
6760
f0d36efd
TH
6761static void ata_host_release(struct device *gendev, void *res)
6762{
6763 struct ata_host *host = dev_get_drvdata(gendev);
6764 int i;
6765
1aa506e4
TH
6766 for (i = 0; i < host->n_ports; i++) {
6767 struct ata_port *ap = host->ports[i];
6768
4911487a
TH
6769 if (!ap)
6770 continue;
6771
6772 if (ap->scsi_host)
1aa506e4
TH
6773 scsi_host_put(ap->scsi_host);
6774
633273a3 6775 kfree(ap->pmp_link);
4911487a 6776 kfree(ap);
1aa506e4
TH
6777 host->ports[i] = NULL;
6778 }
6779
1aa56cca 6780 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6781}
6782
f3187195
TH
6783/**
6784 * ata_host_alloc - allocate and init basic ATA host resources
6785 * @dev: generic device this host is associated with
6786 * @max_ports: maximum number of ATA ports associated with this host
6787 *
6788 * Allocate and initialize basic ATA host resources. LLD calls
6789 * this function to allocate a host, initializes it fully and
6790 * attaches it using ata_host_register().
6791 *
6792 * @max_ports ports are allocated and host->n_ports is
6793 * initialized to @max_ports. The caller is allowed to decrease
6794 * host->n_ports before calling ata_host_register(). The unused
6795 * ports will be automatically freed on registration.
6796 *
6797 * RETURNS:
6798 * Allocate ATA host on success, NULL on failure.
6799 *
6800 * LOCKING:
6801 * Inherited from calling layer (may sleep).
6802 */
6803struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6804{
6805 struct ata_host *host;
6806 size_t sz;
6807 int i;
6808
6809 DPRINTK("ENTER\n");
6810
6811 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6812 return NULL;
6813
6814 /* alloc a container for our list of ATA ports (buses) */
6815 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6816 /* alloc a container for our list of ATA ports (buses) */
6817 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6818 if (!host)
6819 goto err_out;
6820
6821 devres_add(dev, host);
6822 dev_set_drvdata(dev, host);
6823
6824 spin_lock_init(&host->lock);
6825 host->dev = dev;
6826 host->n_ports = max_ports;
6827
6828 /* allocate ports bound to this host */
6829 for (i = 0; i < max_ports; i++) {
6830 struct ata_port *ap;
6831
6832 ap = ata_port_alloc(host);
6833 if (!ap)
6834 goto err_out;
6835
6836 ap->port_no = i;
6837 host->ports[i] = ap;
6838 }
6839
6840 devres_remove_group(dev, NULL);
6841 return host;
6842
6843 err_out:
6844 devres_release_group(dev, NULL);
6845 return NULL;
6846}
6847
f5cda257
TH
6848/**
6849 * ata_host_alloc_pinfo - alloc host and init with port_info array
6850 * @dev: generic device this host is associated with
6851 * @ppi: array of ATA port_info to initialize host with
6852 * @n_ports: number of ATA ports attached to this host
6853 *
6854 * Allocate ATA host and initialize with info from @ppi. If NULL
6855 * terminated, @ppi may contain fewer entries than @n_ports. The
6856 * last entry will be used for the remaining ports.
6857 *
6858 * RETURNS:
6859 * Allocate ATA host on success, NULL on failure.
6860 *
6861 * LOCKING:
6862 * Inherited from calling layer (may sleep).
6863 */
6864struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6865 const struct ata_port_info * const * ppi,
6866 int n_ports)
6867{
6868 const struct ata_port_info *pi;
6869 struct ata_host *host;
6870 int i, j;
6871
6872 host = ata_host_alloc(dev, n_ports);
6873 if (!host)
6874 return NULL;
6875
6876 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6877 struct ata_port *ap = host->ports[i];
6878
6879 if (ppi[j])
6880 pi = ppi[j++];
6881
6882 ap->pio_mask = pi->pio_mask;
6883 ap->mwdma_mask = pi->mwdma_mask;
6884 ap->udma_mask = pi->udma_mask;
6885 ap->flags |= pi->flags;
0c88758b 6886 ap->link.flags |= pi->link_flags;
f5cda257
TH
6887 ap->ops = pi->port_ops;
6888
6889 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6890 host->ops = pi->port_ops;
6891 if (!host->private_data && pi->private_data)
6892 host->private_data = pi->private_data;
6893 }
6894
6895 return host;
6896}
6897
32ebbc0c
TH
6898static void ata_host_stop(struct device *gendev, void *res)
6899{
6900 struct ata_host *host = dev_get_drvdata(gendev);
6901 int i;
6902
6903 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6904
6905 for (i = 0; i < host->n_ports; i++) {
6906 struct ata_port *ap = host->ports[i];
6907
6908 if (ap->ops->port_stop)
6909 ap->ops->port_stop(ap);
6910 }
6911
6912 if (host->ops->host_stop)
6913 host->ops->host_stop(host);
6914}
6915
ecef7253
TH
6916/**
6917 * ata_host_start - start and freeze ports of an ATA host
6918 * @host: ATA host to start ports for
6919 *
6920 * Start and then freeze ports of @host. Started status is
6921 * recorded in host->flags, so this function can be called
6922 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6923 * once. If host->ops isn't initialized yet, its set to the
6924 * first non-dummy port ops.
ecef7253
TH
6925 *
6926 * LOCKING:
6927 * Inherited from calling layer (may sleep).
6928 *
6929 * RETURNS:
6930 * 0 if all ports are started successfully, -errno otherwise.
6931 */
6932int ata_host_start(struct ata_host *host)
6933{
32ebbc0c
TH
6934 int have_stop = 0;
6935 void *start_dr = NULL;
ecef7253
TH
6936 int i, rc;
6937
6938 if (host->flags & ATA_HOST_STARTED)
6939 return 0;
6940
6941 for (i = 0; i < host->n_ports; i++) {
6942 struct ata_port *ap = host->ports[i];
6943
f3187195
TH
6944 if (!host->ops && !ata_port_is_dummy(ap))
6945 host->ops = ap->ops;
6946
32ebbc0c
TH
6947 if (ap->ops->port_stop)
6948 have_stop = 1;
6949 }
6950
6951 if (host->ops->host_stop)
6952 have_stop = 1;
6953
6954 if (have_stop) {
6955 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6956 if (!start_dr)
6957 return -ENOMEM;
6958 }
6959
6960 for (i = 0; i < host->n_ports; i++) {
6961 struct ata_port *ap = host->ports[i];
6962
ecef7253
TH
6963 if (ap->ops->port_start) {
6964 rc = ap->ops->port_start(ap);
6965 if (rc) {
0f9fe9b7 6966 if (rc != -ENODEV)
0f757743
AM
6967 dev_printk(KERN_ERR, host->dev,
6968 "failed to start port %d "
6969 "(errno=%d)\n", i, rc);
ecef7253
TH
6970 goto err_out;
6971 }
6972 }
ecef7253
TH
6973 ata_eh_freeze_port(ap);
6974 }
6975
32ebbc0c
TH
6976 if (start_dr)
6977 devres_add(host->dev, start_dr);
ecef7253
TH
6978 host->flags |= ATA_HOST_STARTED;
6979 return 0;
6980
6981 err_out:
6982 while (--i >= 0) {
6983 struct ata_port *ap = host->ports[i];
6984
6985 if (ap->ops->port_stop)
6986 ap->ops->port_stop(ap);
6987 }
32ebbc0c 6988 devres_free(start_dr);
ecef7253
TH
6989 return rc;
6990}
6991
b03732f0 6992/**
cca3974e
JG
6993 * ata_sas_host_init - Initialize a host struct
6994 * @host: host to initialize
6995 * @dev: device host is attached to
6996 * @flags: host flags
6997 * @ops: port_ops
b03732f0
BK
6998 *
6999 * LOCKING:
7000 * PCI/etc. bus probe sem.
7001 *
7002 */
f3187195 7003/* KILLME - the only user left is ipr */
cca3974e
JG
7004void ata_host_init(struct ata_host *host, struct device *dev,
7005 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 7006{
cca3974e
JG
7007 spin_lock_init(&host->lock);
7008 host->dev = dev;
7009 host->flags = flags;
7010 host->ops = ops;
b03732f0
BK
7011}
7012
f3187195
TH
7013/**
7014 * ata_host_register - register initialized ATA host
7015 * @host: ATA host to register
7016 * @sht: template for SCSI host
7017 *
7018 * Register initialized ATA host. @host is allocated using
7019 * ata_host_alloc() and fully initialized by LLD. This function
7020 * starts ports, registers @host with ATA and SCSI layers and
7021 * probe registered devices.
7022 *
7023 * LOCKING:
7024 * Inherited from calling layer (may sleep).
7025 *
7026 * RETURNS:
7027 * 0 on success, -errno otherwise.
7028 */
7029int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7030{
7031 int i, rc;
7032
7033 /* host must have been started */
7034 if (!(host->flags & ATA_HOST_STARTED)) {
7035 dev_printk(KERN_ERR, host->dev,
7036 "BUG: trying to register unstarted host\n");
7037 WARN_ON(1);
7038 return -EINVAL;
7039 }
7040
7041 /* Blow away unused ports. This happens when LLD can't
7042 * determine the exact number of ports to allocate at
7043 * allocation time.
7044 */
7045 for (i = host->n_ports; host->ports[i]; i++)
7046 kfree(host->ports[i]);
7047
7048 /* give ports names and add SCSI hosts */
7049 for (i = 0; i < host->n_ports; i++)
7050 host->ports[i]->print_id = ata_print_id++;
7051
7052 rc = ata_scsi_add_hosts(host, sht);
7053 if (rc)
7054 return rc;
7055
fafbae87
TH
7056 /* associate with ACPI nodes */
7057 ata_acpi_associate(host);
7058
f3187195
TH
7059 /* set cable, sata_spd_limit and report */
7060 for (i = 0; i < host->n_ports; i++) {
7061 struct ata_port *ap = host->ports[i];
f3187195
TH
7062 unsigned long xfer_mask;
7063
7064 /* set SATA cable type if still unset */
7065 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7066 ap->cbl = ATA_CBL_SATA;
7067
7068 /* init sata_spd_limit to the current value */
4fb37a25 7069 sata_link_init_spd(&ap->link);
f3187195 7070
cbcdd875 7071 /* print per-port info to dmesg */
f3187195
TH
7072 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7073 ap->udma_mask);
7074
abf6e8ed 7075 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
7076 ata_port_printk(ap, KERN_INFO,
7077 "%cATA max %s %s\n",
a16abc0b 7078 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 7079 ata_mode_string(xfer_mask),
cbcdd875 7080 ap->link.eh_info.desc);
abf6e8ed
TH
7081 ata_ehi_clear_desc(&ap->link.eh_info);
7082 } else
f3187195
TH
7083 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7084 }
7085
7086 /* perform each probe synchronously */
7087 DPRINTK("probe begin\n");
7088 for (i = 0; i < host->n_ports; i++) {
7089 struct ata_port *ap = host->ports[i];
f3187195
TH
7090
7091 /* probe */
7092 if (ap->ops->error_handler) {
9af5c9c9 7093 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
7094 unsigned long flags;
7095
7096 ata_port_probe(ap);
7097
7098 /* kick EH for boot probing */
7099 spin_lock_irqsave(ap->lock, flags);
7100
f58229f8
TH
7101 ehi->probe_mask =
7102 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
7103 ehi->action |= ATA_EH_SOFTRESET;
7104 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7105
f4d6d004 7106 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
7107 ap->pflags |= ATA_PFLAG_LOADING;
7108 ata_port_schedule_eh(ap);
7109
7110 spin_unlock_irqrestore(ap->lock, flags);
7111
7112 /* wait for EH to finish */
7113 ata_port_wait_eh(ap);
7114 } else {
7115 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7116 rc = ata_bus_probe(ap);
7117 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7118
7119 if (rc) {
7120 /* FIXME: do something useful here?
7121 * Current libata behavior will
7122 * tear down everything when
7123 * the module is removed
7124 * or the h/w is unplugged.
7125 */
7126 }
7127 }
7128 }
7129
7130 /* probes are done, now scan each port's disk(s) */
7131 DPRINTK("host probe begin\n");
7132 for (i = 0; i < host->n_ports; i++) {
7133 struct ata_port *ap = host->ports[i];
7134
1ae46317 7135 ata_scsi_scan_host(ap, 1);
ca77329f 7136 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
7137 }
7138
7139 return 0;
7140}
7141
f5cda257
TH
7142/**
7143 * ata_host_activate - start host, request IRQ and register it
7144 * @host: target ATA host
7145 * @irq: IRQ to request
7146 * @irq_handler: irq_handler used when requesting IRQ
7147 * @irq_flags: irq_flags used when requesting IRQ
7148 * @sht: scsi_host_template to use when registering the host
7149 *
7150 * After allocating an ATA host and initializing it, most libata
7151 * LLDs perform three steps to activate the host - start host,
7152 * request IRQ and register it. This helper takes necessasry
7153 * arguments and performs the three steps in one go.
7154 *
3d46b2e2
PM
7155 * An invalid IRQ skips the IRQ registration and expects the host to
7156 * have set polling mode on the port. In this case, @irq_handler
7157 * should be NULL.
7158 *
f5cda257
TH
7159 * LOCKING:
7160 * Inherited from calling layer (may sleep).
7161 *
7162 * RETURNS:
7163 * 0 on success, -errno otherwise.
7164 */
7165int ata_host_activate(struct ata_host *host, int irq,
7166 irq_handler_t irq_handler, unsigned long irq_flags,
7167 struct scsi_host_template *sht)
7168{
cbcdd875 7169 int i, rc;
f5cda257
TH
7170
7171 rc = ata_host_start(host);
7172 if (rc)
7173 return rc;
7174
3d46b2e2
PM
7175 /* Special case for polling mode */
7176 if (!irq) {
7177 WARN_ON(irq_handler);
7178 return ata_host_register(host, sht);
7179 }
7180
f5cda257
TH
7181 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7182 dev_driver_string(host->dev), host);
7183 if (rc)
7184 return rc;
7185
cbcdd875
TH
7186 for (i = 0; i < host->n_ports; i++)
7187 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 7188
f5cda257
TH
7189 rc = ata_host_register(host, sht);
7190 /* if failed, just free the IRQ and leave ports alone */
7191 if (rc)
7192 devm_free_irq(host->dev, irq, host);
7193
7194 return rc;
7195}
7196
720ba126
TH
7197/**
7198 * ata_port_detach - Detach ATA port in prepration of device removal
7199 * @ap: ATA port to be detached
7200 *
7201 * Detach all ATA devices and the associated SCSI devices of @ap;
7202 * then, remove the associated SCSI host. @ap is guaranteed to
7203 * be quiescent on return from this function.
7204 *
7205 * LOCKING:
7206 * Kernel thread context (may sleep).
7207 */
741b7763 7208static void ata_port_detach(struct ata_port *ap)
720ba126
TH
7209{
7210 unsigned long flags;
41bda9c9 7211 struct ata_link *link;
f58229f8 7212 struct ata_device *dev;
720ba126
TH
7213
7214 if (!ap->ops->error_handler)
c3cf30a9 7215 goto skip_eh;
720ba126
TH
7216
7217 /* tell EH we're leaving & flush EH */
ba6a1308 7218 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 7219 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 7220 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7221
7222 ata_port_wait_eh(ap);
7223
7f9ad9b8
TH
7224 /* EH is now guaranteed to see UNLOADING - EH context belongs
7225 * to us. Disable all existing devices.
720ba126 7226 */
41bda9c9
TH
7227 ata_port_for_each_link(link, ap) {
7228 ata_link_for_each_dev(dev, link)
7229 ata_dev_disable(dev);
7230 }
720ba126 7231
720ba126
TH
7232 /* Final freeze & EH. All in-flight commands are aborted. EH
7233 * will be skipped and retrials will be terminated with bad
7234 * target.
7235 */
ba6a1308 7236 spin_lock_irqsave(ap->lock, flags);
720ba126 7237 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 7238 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7239
7240 ata_port_wait_eh(ap);
45a66c1c 7241 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 7242
c3cf30a9 7243 skip_eh:
720ba126 7244 /* remove the associated SCSI host */
cca3974e 7245 scsi_remove_host(ap->scsi_host);
720ba126
TH
7246}
7247
0529c159
TH
7248/**
7249 * ata_host_detach - Detach all ports of an ATA host
7250 * @host: Host to detach
7251 *
7252 * Detach all ports of @host.
7253 *
7254 * LOCKING:
7255 * Kernel thread context (may sleep).
7256 */
7257void ata_host_detach(struct ata_host *host)
7258{
7259 int i;
7260
7261 for (i = 0; i < host->n_ports; i++)
7262 ata_port_detach(host->ports[i]);
562f0c2d
TH
7263
7264 /* the host is dead now, dissociate ACPI */
7265 ata_acpi_dissociate(host);
0529c159
TH
7266}
7267
1da177e4
LT
7268/**
7269 * ata_std_ports - initialize ioaddr with standard port offsets.
7270 * @ioaddr: IO address structure to be initialized
0baab86b
EF
7271 *
7272 * Utility function which initializes data_addr, error_addr,
7273 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7274 * device_addr, status_addr, and command_addr to standard offsets
7275 * relative to cmd_addr.
7276 *
7277 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 7278 */
0baab86b 7279
1da177e4
LT
7280void ata_std_ports(struct ata_ioports *ioaddr)
7281{
7282 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7283 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7284 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7285 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7286 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7287 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7288 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7289 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7290 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7291 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7292}
7293
0baab86b 7294
374b1873
JG
7295#ifdef CONFIG_PCI
7296
1da177e4
LT
7297/**
7298 * ata_pci_remove_one - PCI layer callback for device removal
7299 * @pdev: PCI device that was removed
7300 *
b878ca5d
TH
7301 * PCI layer indicates to libata via this hook that hot-unplug or
7302 * module unload event has occurred. Detach all ports. Resource
7303 * release is handled via devres.
1da177e4
LT
7304 *
7305 * LOCKING:
7306 * Inherited from PCI layer (may sleep).
7307 */
f0d36efd 7308void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7309{
2855568b 7310 struct device *dev = &pdev->dev;
cca3974e 7311 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7312
b878ca5d 7313 ata_host_detach(host);
1da177e4
LT
7314}
7315
7316/* move to PCI subsystem */
057ace5e 7317int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7318{
7319 unsigned long tmp = 0;
7320
7321 switch (bits->width) {
7322 case 1: {
7323 u8 tmp8 = 0;
7324 pci_read_config_byte(pdev, bits->reg, &tmp8);
7325 tmp = tmp8;
7326 break;
7327 }
7328 case 2: {
7329 u16 tmp16 = 0;
7330 pci_read_config_word(pdev, bits->reg, &tmp16);
7331 tmp = tmp16;
7332 break;
7333 }
7334 case 4: {
7335 u32 tmp32 = 0;
7336 pci_read_config_dword(pdev, bits->reg, &tmp32);
7337 tmp = tmp32;
7338 break;
7339 }
7340
7341 default:
7342 return -EINVAL;
7343 }
7344
7345 tmp &= bits->mask;
7346
7347 return (tmp == bits->val) ? 1 : 0;
7348}
9b847548 7349
6ffa01d8 7350#ifdef CONFIG_PM
3c5100c1 7351void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7352{
7353 pci_save_state(pdev);
4c90d971 7354 pci_disable_device(pdev);
500530f6 7355
4c90d971 7356 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 7357 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7358}
7359
553c4aa6 7360int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7361{
553c4aa6
TH
7362 int rc;
7363
9b847548
JA
7364 pci_set_power_state(pdev, PCI_D0);
7365 pci_restore_state(pdev);
553c4aa6 7366
b878ca5d 7367 rc = pcim_enable_device(pdev);
553c4aa6
TH
7368 if (rc) {
7369 dev_printk(KERN_ERR, &pdev->dev,
7370 "failed to enable device after resume (%d)\n", rc);
7371 return rc;
7372 }
7373
9b847548 7374 pci_set_master(pdev);
553c4aa6 7375 return 0;
500530f6
TH
7376}
7377
3c5100c1 7378int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7379{
cca3974e 7380 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7381 int rc = 0;
7382
cca3974e 7383 rc = ata_host_suspend(host, mesg);
500530f6
TH
7384 if (rc)
7385 return rc;
7386
3c5100c1 7387 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7388
7389 return 0;
7390}
7391
7392int ata_pci_device_resume(struct pci_dev *pdev)
7393{
cca3974e 7394 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7395 int rc;
500530f6 7396
553c4aa6
TH
7397 rc = ata_pci_device_do_resume(pdev);
7398 if (rc == 0)
7399 ata_host_resume(host);
7400 return rc;
9b847548 7401}
6ffa01d8
TH
7402#endif /* CONFIG_PM */
7403
1da177e4
LT
7404#endif /* CONFIG_PCI */
7405
7406
1da177e4
LT
7407static int __init ata_init(void)
7408{
a8601e5f 7409 ata_probe_timeout *= HZ;
1da177e4
LT
7410 ata_wq = create_workqueue("ata");
7411 if (!ata_wq)
7412 return -ENOMEM;
7413
453b07ac
TH
7414 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7415 if (!ata_aux_wq) {
7416 destroy_workqueue(ata_wq);
7417 return -ENOMEM;
7418 }
7419
1da177e4
LT
7420 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7421 return 0;
7422}
7423
7424static void __exit ata_exit(void)
7425{
7426 destroy_workqueue(ata_wq);
453b07ac 7427 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7428}
7429
a4625085 7430subsys_initcall(ata_init);
1da177e4
LT
7431module_exit(ata_exit);
7432
67846b30 7433static unsigned long ratelimit_time;
34af946a 7434static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7435
7436int ata_ratelimit(void)
7437{
7438 int rc;
7439 unsigned long flags;
7440
7441 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7442
7443 if (time_after(jiffies, ratelimit_time)) {
7444 rc = 1;
7445 ratelimit_time = jiffies + (HZ/5);
7446 } else
7447 rc = 0;
7448
7449 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7450
7451 return rc;
7452}
7453
c22daff4
TH
7454/**
7455 * ata_wait_register - wait until register value changes
7456 * @reg: IO-mapped register
7457 * @mask: Mask to apply to read register value
7458 * @val: Wait condition
7459 * @interval_msec: polling interval in milliseconds
7460 * @timeout_msec: timeout in milliseconds
7461 *
7462 * Waiting for some bits of register to change is a common
7463 * operation for ATA controllers. This function reads 32bit LE
7464 * IO-mapped register @reg and tests for the following condition.
7465 *
7466 * (*@reg & mask) != val
7467 *
7468 * If the condition is met, it returns; otherwise, the process is
7469 * repeated after @interval_msec until timeout.
7470 *
7471 * LOCKING:
7472 * Kernel thread context (may sleep)
7473 *
7474 * RETURNS:
7475 * The final register value.
7476 */
7477u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7478 unsigned long interval_msec,
7479 unsigned long timeout_msec)
7480{
7481 unsigned long timeout;
7482 u32 tmp;
7483
7484 tmp = ioread32(reg);
7485
7486 /* Calculate timeout _after_ the first read to make sure
7487 * preceding writes reach the controller before starting to
7488 * eat away the timeout.
7489 */
7490 timeout = jiffies + (timeout_msec * HZ) / 1000;
7491
7492 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7493 msleep(interval_msec);
7494 tmp = ioread32(reg);
7495 }
7496
7497 return tmp;
7498}
7499
dd5b06c4
TH
7500/*
7501 * Dummy port_ops
7502 */
7503static void ata_dummy_noret(struct ata_port *ap) { }
7504static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7505static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7506
7507static u8 ata_dummy_check_status(struct ata_port *ap)
7508{
7509 return ATA_DRDY;
7510}
7511
7512static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7513{
7514 return AC_ERR_SYSTEM;
7515}
7516
7517const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7518 .check_status = ata_dummy_check_status,
7519 .check_altstatus = ata_dummy_check_status,
7520 .dev_select = ata_noop_dev_select,
7521 .qc_prep = ata_noop_qc_prep,
7522 .qc_issue = ata_dummy_qc_issue,
7523 .freeze = ata_dummy_noret,
7524 .thaw = ata_dummy_noret,
7525 .error_handler = ata_dummy_noret,
7526 .post_internal_cmd = ata_dummy_qc_noret,
7527 .irq_clear = ata_dummy_noret,
7528 .port_start = ata_dummy_ret0,
7529 .port_stop = ata_dummy_noret,
7530};
7531
21b0ad4f
TH
7532const struct ata_port_info ata_dummy_port_info = {
7533 .port_ops = &ata_dummy_port_ops,
7534};
7535
1da177e4
LT
7536/*
7537 * libata is essentially a library of internal helper functions for
7538 * low-level ATA host controller drivers. As such, the API/ABI is
7539 * likely to change as new drivers are added and updated.
7540 * Do not depend on ABI/API stability.
7541 */
e9c83914
TH
7542EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7543EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7544EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7545EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7546EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7547EXPORT_SYMBOL_GPL(ata_std_bios_param);
7548EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7549EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7550EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7551EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7552EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7553EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7554EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7555EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 7556EXPORT_SYMBOL_GPL(ata_sg_init);
9a1004d0 7557EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7558EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7559EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7560EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7561EXPORT_SYMBOL_GPL(ata_tf_load);
7562EXPORT_SYMBOL_GPL(ata_tf_read);
7563EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7564EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7565EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7566EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7567EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
7568EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7569EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7570EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7571EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7572EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7573EXPORT_SYMBOL_GPL(ata_mode_string);
7574EXPORT_SYMBOL_GPL(ata_id_xfermask);
1da177e4
LT
7575EXPORT_SYMBOL_GPL(ata_check_status);
7576EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7577EXPORT_SYMBOL_GPL(ata_exec_command);
7578EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7579EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7580EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7581EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7582EXPORT_SYMBOL_GPL(ata_data_xfer);
7583EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7584EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7585EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7586EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7587EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7588EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7589EXPORT_SYMBOL_GPL(ata_bmdma_start);
7590EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7591EXPORT_SYMBOL_GPL(ata_bmdma_status);
7592EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7593EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7594EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7595EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7596EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7597EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7598EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7599EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7600EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7601EXPORT_SYMBOL_GPL(sata_link_debounce);
7602EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4 7603EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7604EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7605EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7606EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7607EXPORT_SYMBOL_GPL(sata_std_hardreset);
7608EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7609EXPORT_SYMBOL_GPL(ata_dev_classify);
7610EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7611EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7612EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7613EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7614EXPORT_SYMBOL_GPL(ata_busy_sleep);
88ff6eaf 7615EXPORT_SYMBOL_GPL(ata_wait_after_reset);
d4b2bab4 7616EXPORT_SYMBOL_GPL(ata_wait_ready);
1da177e4
LT
7617EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7618EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7619EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7620EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7621EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7622EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7623EXPORT_SYMBOL_GPL(sata_scr_valid);
7624EXPORT_SYMBOL_GPL(sata_scr_read);
7625EXPORT_SYMBOL_GPL(sata_scr_write);
7626EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7627EXPORT_SYMBOL_GPL(ata_link_online);
7628EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7629#ifdef CONFIG_PM
cca3974e
JG
7630EXPORT_SYMBOL_GPL(ata_host_suspend);
7631EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7632#endif /* CONFIG_PM */
6a62a04d
TH
7633EXPORT_SYMBOL_GPL(ata_id_string);
7634EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4
LT
7635EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7636
1bc4ccff 7637EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 7638EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
7639EXPORT_SYMBOL_GPL(ata_timing_compute);
7640EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 7641EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 7642
1da177e4
LT
7643#ifdef CONFIG_PCI
7644EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7645EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7646EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7647EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
4e6b79fa 7648EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
1da177e4
LT
7649EXPORT_SYMBOL_GPL(ata_pci_init_one);
7650EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7651#ifdef CONFIG_PM
500530f6
TH
7652EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7653EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7654EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7655EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7656#endif /* CONFIG_PM */
67951ade
AC
7657EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7658EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7659#endif /* CONFIG_PCI */
9b847548 7660
31f88384 7661EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7662EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7663EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7664EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7665EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7666
b64bbc39
TH
7667EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7668EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7669EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7670EXPORT_SYMBOL_GPL(ata_port_desc);
7671#ifdef CONFIG_PCI
7672EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7673#endif /* CONFIG_PCI */
7b70fc03 7674EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7675EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7676EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7677EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7678EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7679EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7680EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7681EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7682EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7683EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7684EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7685EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7686
7687EXPORT_SYMBOL_GPL(ata_cable_40wire);
7688EXPORT_SYMBOL_GPL(ata_cable_80wire);
7689EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 7690EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 7691EXPORT_SYMBOL_GPL(ata_cable_sata);