libata: fix ata_acpi_gtm_xfermask()
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/highmem.h>
50#include <linux/spinlock.h>
51#include <linux/blkdev.h>
52#include <linux/delay.h>
53#include <linux/timer.h>
54#include <linux/interrupt.h>
55#include <linux/completion.h>
56#include <linux/suspend.h>
57#include <linux/workqueue.h>
67846b30 58#include <linux/jiffies.h>
378f058c 59#include <linux/scatterlist.h>
2dcb407e 60#include <linux/io.h>
1da177e4 61#include <scsi/scsi.h>
193515d5 62#include <scsi/scsi_cmnd.h>
1da177e4
LT
63#include <scsi/scsi_host.h>
64#include <linux/libata.h>
1da177e4
LT
65#include <asm/semaphore.h>
66#include <asm/byteorder.h>
140b5e59 67#include <linux/cdrom.h>
1da177e4
LT
68
69#include "libata.h"
70
fda0efc5 71
d7bb4cc7 72/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
73const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 76
3373efd8
TH
77static unsigned int ata_dev_init_params(struct ata_device *dev,
78 u16 heads, u16 sectors);
79static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
80static unsigned int ata_dev_set_feature(struct ata_device *dev,
81 u8 enable, u8 feature);
3373efd8 82static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 83static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 84
f3187195 85unsigned int ata_print_id = 1;
1da177e4
LT
86static struct workqueue_struct *ata_wq;
87
453b07ac
TH
88struct workqueue_struct *ata_aux_wq;
89
418dc1f5 90int atapi_enabled = 1;
1623c81e
JG
91module_param(atapi_enabled, int, 0444);
92MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
93
95de719a
AL
94int atapi_dmadir = 0;
95module_param(atapi_dmadir, int, 0444);
96MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
97
baf4fdfa
ML
98int atapi_passthru16 = 1;
99module_param(atapi_passthru16, int, 0444);
100MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
101
c3c013a2
JG
102int libata_fua = 0;
103module_param_named(fua, libata_fua, int, 0444);
104MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
105
2dcb407e 106static int ata_ignore_hpa;
1e999736
AC
107module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
108MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
109
b3a70601
AC
110static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
111module_param_named(dma, libata_dma_mask, int, 0444);
112MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
113
a8601e5f
AM
114static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
115module_param(ata_probe_timeout, int, 0444);
116MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
117
6ebe9d86 118int libata_noacpi = 0;
d7d0dad6 119module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 120MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 121
ae8d4ee7
AC
122int libata_allow_tpm = 0;
123module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
124MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
125
1da177e4
LT
126MODULE_AUTHOR("Jeff Garzik");
127MODULE_DESCRIPTION("Library module for ATA devices");
128MODULE_LICENSE("GPL");
129MODULE_VERSION(DRV_VERSION);
130
0baab86b 131
1da177e4
LT
132/**
133 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
134 * @tf: Taskfile to convert
1da177e4 135 * @pmp: Port multiplier port
9977126c
TH
136 * @is_cmd: This FIS is for command
137 * @fis: Buffer into which data will output
1da177e4
LT
138 *
139 * Converts a standard ATA taskfile to a Serial ATA
140 * FIS structure (Register - Host to Device).
141 *
142 * LOCKING:
143 * Inherited from caller.
144 */
9977126c 145void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 146{
9977126c
TH
147 fis[0] = 0x27; /* Register - Host to Device FIS */
148 fis[1] = pmp & 0xf; /* Port multiplier number*/
149 if (is_cmd)
150 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
151
1da177e4
LT
152 fis[2] = tf->command;
153 fis[3] = tf->feature;
154
155 fis[4] = tf->lbal;
156 fis[5] = tf->lbam;
157 fis[6] = tf->lbah;
158 fis[7] = tf->device;
159
160 fis[8] = tf->hob_lbal;
161 fis[9] = tf->hob_lbam;
162 fis[10] = tf->hob_lbah;
163 fis[11] = tf->hob_feature;
164
165 fis[12] = tf->nsect;
166 fis[13] = tf->hob_nsect;
167 fis[14] = 0;
168 fis[15] = tf->ctl;
169
170 fis[16] = 0;
171 fis[17] = 0;
172 fis[18] = 0;
173 fis[19] = 0;
174}
175
176/**
177 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
178 * @fis: Buffer from which data will be input
179 * @tf: Taskfile to output
180 *
e12a1be6 181 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
182 *
183 * LOCKING:
184 * Inherited from caller.
185 */
186
057ace5e 187void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
188{
189 tf->command = fis[2]; /* status */
190 tf->feature = fis[3]; /* error */
191
192 tf->lbal = fis[4];
193 tf->lbam = fis[5];
194 tf->lbah = fis[6];
195 tf->device = fis[7];
196
197 tf->hob_lbal = fis[8];
198 tf->hob_lbam = fis[9];
199 tf->hob_lbah = fis[10];
200
201 tf->nsect = fis[12];
202 tf->hob_nsect = fis[13];
203}
204
8cbd6df1
AL
205static const u8 ata_rw_cmds[] = {
206 /* pio multi */
207 ATA_CMD_READ_MULTI,
208 ATA_CMD_WRITE_MULTI,
209 ATA_CMD_READ_MULTI_EXT,
210 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
211 0,
212 0,
213 0,
214 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
215 /* pio */
216 ATA_CMD_PIO_READ,
217 ATA_CMD_PIO_WRITE,
218 ATA_CMD_PIO_READ_EXT,
219 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
220 0,
221 0,
222 0,
223 0,
8cbd6df1
AL
224 /* dma */
225 ATA_CMD_READ,
226 ATA_CMD_WRITE,
227 ATA_CMD_READ_EXT,
9a3dccc4
TH
228 ATA_CMD_WRITE_EXT,
229 0,
230 0,
231 0,
232 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 233};
1da177e4
LT
234
235/**
8cbd6df1 236 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
237 * @tf: command to examine and configure
238 * @dev: device tf belongs to
1da177e4 239 *
2e9edbf8 240 * Examine the device configuration and tf->flags to calculate
8cbd6df1 241 * the proper read/write commands and protocol to use.
1da177e4
LT
242 *
243 * LOCKING:
244 * caller.
245 */
bd056d7e 246static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 247{
9a3dccc4 248 u8 cmd;
1da177e4 249
9a3dccc4 250 int index, fua, lba48, write;
2e9edbf8 251
9a3dccc4 252 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
253 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
254 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 255
8cbd6df1
AL
256 if (dev->flags & ATA_DFLAG_PIO) {
257 tf->protocol = ATA_PROT_PIO;
9a3dccc4 258 index = dev->multi_count ? 0 : 8;
9af5c9c9 259 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
260 /* Unable to use DMA due to host limitation */
261 tf->protocol = ATA_PROT_PIO;
0565c26d 262 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
263 } else {
264 tf->protocol = ATA_PROT_DMA;
9a3dccc4 265 index = 16;
8cbd6df1 266 }
1da177e4 267
9a3dccc4
TH
268 cmd = ata_rw_cmds[index + fua + lba48 + write];
269 if (cmd) {
270 tf->command = cmd;
271 return 0;
272 }
273 return -1;
1da177e4
LT
274}
275
35b649fe
TH
276/**
277 * ata_tf_read_block - Read block address from ATA taskfile
278 * @tf: ATA taskfile of interest
279 * @dev: ATA device @tf belongs to
280 *
281 * LOCKING:
282 * None.
283 *
284 * Read block address from @tf. This function can handle all
285 * three address formats - LBA, LBA48 and CHS. tf->protocol and
286 * flags select the address format to use.
287 *
288 * RETURNS:
289 * Block address read from @tf.
290 */
291u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
292{
293 u64 block = 0;
294
295 if (tf->flags & ATA_TFLAG_LBA) {
296 if (tf->flags & ATA_TFLAG_LBA48) {
297 block |= (u64)tf->hob_lbah << 40;
298 block |= (u64)tf->hob_lbam << 32;
299 block |= tf->hob_lbal << 24;
300 } else
301 block |= (tf->device & 0xf) << 24;
302
303 block |= tf->lbah << 16;
304 block |= tf->lbam << 8;
305 block |= tf->lbal;
306 } else {
307 u32 cyl, head, sect;
308
309 cyl = tf->lbam | (tf->lbah << 8);
310 head = tf->device & 0xf;
311 sect = tf->lbal;
312
313 block = (cyl * dev->heads + head) * dev->sectors + sect;
314 }
315
316 return block;
317}
318
bd056d7e
TH
319/**
320 * ata_build_rw_tf - Build ATA taskfile for given read/write request
321 * @tf: Target ATA taskfile
322 * @dev: ATA device @tf belongs to
323 * @block: Block address
324 * @n_block: Number of blocks
325 * @tf_flags: RW/FUA etc...
326 * @tag: tag
327 *
328 * LOCKING:
329 * None.
330 *
331 * Build ATA taskfile @tf for read/write request described by
332 * @block, @n_block, @tf_flags and @tag on @dev.
333 *
334 * RETURNS:
335 *
336 * 0 on success, -ERANGE if the request is too large for @dev,
337 * -EINVAL if the request is invalid.
338 */
339int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
340 u64 block, u32 n_block, unsigned int tf_flags,
341 unsigned int tag)
342{
343 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
344 tf->flags |= tf_flags;
345
6d1245bf 346 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
347 /* yay, NCQ */
348 if (!lba_48_ok(block, n_block))
349 return -ERANGE;
350
351 tf->protocol = ATA_PROT_NCQ;
352 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
353
354 if (tf->flags & ATA_TFLAG_WRITE)
355 tf->command = ATA_CMD_FPDMA_WRITE;
356 else
357 tf->command = ATA_CMD_FPDMA_READ;
358
359 tf->nsect = tag << 3;
360 tf->hob_feature = (n_block >> 8) & 0xff;
361 tf->feature = n_block & 0xff;
362
363 tf->hob_lbah = (block >> 40) & 0xff;
364 tf->hob_lbam = (block >> 32) & 0xff;
365 tf->hob_lbal = (block >> 24) & 0xff;
366 tf->lbah = (block >> 16) & 0xff;
367 tf->lbam = (block >> 8) & 0xff;
368 tf->lbal = block & 0xff;
369
370 tf->device = 1 << 6;
371 if (tf->flags & ATA_TFLAG_FUA)
372 tf->device |= 1 << 7;
373 } else if (dev->flags & ATA_DFLAG_LBA) {
374 tf->flags |= ATA_TFLAG_LBA;
375
376 if (lba_28_ok(block, n_block)) {
377 /* use LBA28 */
378 tf->device |= (block >> 24) & 0xf;
379 } else if (lba_48_ok(block, n_block)) {
380 if (!(dev->flags & ATA_DFLAG_LBA48))
381 return -ERANGE;
382
383 /* use LBA48 */
384 tf->flags |= ATA_TFLAG_LBA48;
385
386 tf->hob_nsect = (n_block >> 8) & 0xff;
387
388 tf->hob_lbah = (block >> 40) & 0xff;
389 tf->hob_lbam = (block >> 32) & 0xff;
390 tf->hob_lbal = (block >> 24) & 0xff;
391 } else
392 /* request too large even for LBA48 */
393 return -ERANGE;
394
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396 return -EINVAL;
397
398 tf->nsect = n_block & 0xff;
399
400 tf->lbah = (block >> 16) & 0xff;
401 tf->lbam = (block >> 8) & 0xff;
402 tf->lbal = block & 0xff;
403
404 tf->device |= ATA_LBA;
405 } else {
406 /* CHS */
407 u32 sect, head, cyl, track;
408
409 /* The request -may- be too large for CHS addressing. */
410 if (!lba_28_ok(block, n_block))
411 return -ERANGE;
412
413 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
414 return -EINVAL;
415
416 /* Convert LBA to CHS */
417 track = (u32)block / dev->sectors;
418 cyl = track / dev->heads;
419 head = track % dev->heads;
420 sect = (u32)block % dev->sectors + 1;
421
422 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
423 (u32)block, track, cyl, head, sect);
424
425 /* Check whether the converted CHS can fit.
426 Cylinder: 0-65535
427 Head: 0-15
428 Sector: 1-255*/
429 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
430 return -ERANGE;
431
432 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
433 tf->lbal = sect;
434 tf->lbam = cyl;
435 tf->lbah = cyl >> 8;
436 tf->device |= head;
437 }
438
439 return 0;
440}
441
cb95d562
TH
442/**
443 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
444 * @pio_mask: pio_mask
445 * @mwdma_mask: mwdma_mask
446 * @udma_mask: udma_mask
447 *
448 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
449 * unsigned int xfer_mask.
450 *
451 * LOCKING:
452 * None.
453 *
454 * RETURNS:
455 * Packed xfer_mask.
456 */
7dc951ae
TH
457unsigned long ata_pack_xfermask(unsigned long pio_mask,
458 unsigned long mwdma_mask,
459 unsigned long udma_mask)
cb95d562
TH
460{
461 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
462 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
463 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
464}
465
c0489e4e
TH
466/**
467 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
468 * @xfer_mask: xfer_mask to unpack
469 * @pio_mask: resulting pio_mask
470 * @mwdma_mask: resulting mwdma_mask
471 * @udma_mask: resulting udma_mask
472 *
473 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
474 * Any NULL distination masks will be ignored.
475 */
7dc951ae
TH
476void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
477 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
478{
479 if (pio_mask)
480 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
481 if (mwdma_mask)
482 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
483 if (udma_mask)
484 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
485}
486
cb95d562 487static const struct ata_xfer_ent {
be9a50c8 488 int shift, bits;
cb95d562
TH
489 u8 base;
490} ata_xfer_tbl[] = {
70cd071e
TH
491 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
492 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
493 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
494 { -1, },
495};
496
497/**
498 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
499 * @xfer_mask: xfer_mask of interest
500 *
501 * Return matching XFER_* value for @xfer_mask. Only the highest
502 * bit of @xfer_mask is considered.
503 *
504 * LOCKING:
505 * None.
506 *
507 * RETURNS:
70cd071e 508 * Matching XFER_* value, 0xff if no match found.
cb95d562 509 */
7dc951ae 510u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
511{
512 int highbit = fls(xfer_mask) - 1;
513 const struct ata_xfer_ent *ent;
514
515 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
517 return ent->base + highbit - ent->shift;
70cd071e 518 return 0xff;
cb95d562
TH
519}
520
521/**
522 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
523 * @xfer_mode: XFER_* of interest
524 *
525 * Return matching xfer_mask for @xfer_mode.
526 *
527 * LOCKING:
528 * None.
529 *
530 * RETURNS:
531 * Matching xfer_mask, 0 if no match found.
532 */
7dc951ae 533unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
534{
535 const struct ata_xfer_ent *ent;
536
537 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
539 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
540 & ~((1 << ent->shift) - 1);
cb95d562
TH
541 return 0;
542}
543
544/**
545 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
546 * @xfer_mode: XFER_* of interest
547 *
548 * Return matching xfer_shift for @xfer_mode.
549 *
550 * LOCKING:
551 * None.
552 *
553 * RETURNS:
554 * Matching xfer_shift, -1 if no match found.
555 */
7dc951ae 556int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
557{
558 const struct ata_xfer_ent *ent;
559
560 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
561 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
562 return ent->shift;
563 return -1;
564}
565
1da177e4 566/**
1da7b0d0
TH
567 * ata_mode_string - convert xfer_mask to string
568 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
569 *
570 * Determine string which represents the highest speed
1da7b0d0 571 * (highest bit in @modemask).
1da177e4
LT
572 *
573 * LOCKING:
574 * None.
575 *
576 * RETURNS:
577 * Constant C string representing highest speed listed in
1da7b0d0 578 * @mode_mask, or the constant C string "<n/a>".
1da177e4 579 */
7dc951ae 580const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 581{
75f554bc
TH
582 static const char * const xfer_mode_str[] = {
583 "PIO0",
584 "PIO1",
585 "PIO2",
586 "PIO3",
587 "PIO4",
b352e57d
AC
588 "PIO5",
589 "PIO6",
75f554bc
TH
590 "MWDMA0",
591 "MWDMA1",
592 "MWDMA2",
b352e57d
AC
593 "MWDMA3",
594 "MWDMA4",
75f554bc
TH
595 "UDMA/16",
596 "UDMA/25",
597 "UDMA/33",
598 "UDMA/44",
599 "UDMA/66",
600 "UDMA/100",
601 "UDMA/133",
602 "UDMA7",
603 };
1da7b0d0 604 int highbit;
1da177e4 605
1da7b0d0
TH
606 highbit = fls(xfer_mask) - 1;
607 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
608 return xfer_mode_str[highbit];
1da177e4 609 return "<n/a>";
1da177e4
LT
610}
611
4c360c81
TH
612static const char *sata_spd_string(unsigned int spd)
613{
614 static const char * const spd_str[] = {
615 "1.5 Gbps",
616 "3.0 Gbps",
617 };
618
619 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
620 return "<unknown>";
621 return spd_str[spd - 1];
622}
623
3373efd8 624void ata_dev_disable(struct ata_device *dev)
0b8efb0a 625{
09d7f9b0 626 if (ata_dev_enabled(dev)) {
9af5c9c9 627 if (ata_msg_drv(dev->link->ap))
09d7f9b0 628 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
562f0c2d 629 ata_acpi_on_disable(dev);
4ae72a1e
TH
630 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
631 ATA_DNXFER_QUIET);
0b8efb0a
TH
632 dev->class++;
633 }
634}
635
ca77329f
KCA
636static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
637{
638 struct ata_link *link = dev->link;
639 struct ata_port *ap = link->ap;
640 u32 scontrol;
641 unsigned int err_mask;
642 int rc;
643
644 /*
645 * disallow DIPM for drivers which haven't set
646 * ATA_FLAG_IPM. This is because when DIPM is enabled,
647 * phy ready will be set in the interrupt status on
648 * state changes, which will cause some drivers to
649 * think there are errors - additionally drivers will
650 * need to disable hot plug.
651 */
652 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
653 ap->pm_policy = NOT_AVAILABLE;
654 return -EINVAL;
655 }
656
657 /*
658 * For DIPM, we will only enable it for the
659 * min_power setting.
660 *
661 * Why? Because Disks are too stupid to know that
662 * If the host rejects a request to go to SLUMBER
663 * they should retry at PARTIAL, and instead it
664 * just would give up. So, for medium_power to
665 * work at all, we need to only allow HIPM.
666 */
667 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
668 if (rc)
669 return rc;
670
671 switch (policy) {
672 case MIN_POWER:
673 /* no restrictions on IPM transitions */
674 scontrol &= ~(0x3 << 8);
675 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
676 if (rc)
677 return rc;
678
679 /* enable DIPM */
680 if (dev->flags & ATA_DFLAG_DIPM)
681 err_mask = ata_dev_set_feature(dev,
682 SETFEATURES_SATA_ENABLE, SATA_DIPM);
683 break;
684 case MEDIUM_POWER:
685 /* allow IPM to PARTIAL */
686 scontrol &= ~(0x1 << 8);
687 scontrol |= (0x2 << 8);
688 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
689 if (rc)
690 return rc;
691
f5456b63
KCA
692 /*
693 * we don't have to disable DIPM since IPM flags
694 * disallow transitions to SLUMBER, which effectively
695 * disable DIPM if it does not support PARTIAL
696 */
ca77329f
KCA
697 break;
698 case NOT_AVAILABLE:
699 case MAX_PERFORMANCE:
700 /* disable all IPM transitions */
701 scontrol |= (0x3 << 8);
702 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
703 if (rc)
704 return rc;
705
f5456b63
KCA
706 /*
707 * we don't have to disable DIPM since IPM flags
708 * disallow all transitions which effectively
709 * disable DIPM anyway.
710 */
ca77329f
KCA
711 break;
712 }
713
714 /* FIXME: handle SET FEATURES failure */
715 (void) err_mask;
716
717 return 0;
718}
719
720/**
721 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
722 * @dev: device to enable power management
723 * @policy: the link power management policy
ca77329f
KCA
724 *
725 * Enable SATA Interface power management. This will enable
726 * Device Interface Power Management (DIPM) for min_power
727 * policy, and then call driver specific callbacks for
728 * enabling Host Initiated Power management.
729 *
730 * Locking: Caller.
731 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
732 */
733void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
734{
735 int rc = 0;
736 struct ata_port *ap = dev->link->ap;
737
738 /* set HIPM first, then DIPM */
739 if (ap->ops->enable_pm)
740 rc = ap->ops->enable_pm(ap, policy);
741 if (rc)
742 goto enable_pm_out;
743 rc = ata_dev_set_dipm(dev, policy);
744
745enable_pm_out:
746 if (rc)
747 ap->pm_policy = MAX_PERFORMANCE;
748 else
749 ap->pm_policy = policy;
750 return /* rc */; /* hopefully we can use 'rc' eventually */
751}
752
1992a5ed 753#ifdef CONFIG_PM
ca77329f
KCA
754/**
755 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 756 * @dev: device to disable power management
ca77329f
KCA
757 *
758 * Disable SATA Interface power management. This will disable
759 * Device Interface Power Management (DIPM) without changing
760 * policy, call driver specific callbacks for disabling Host
761 * Initiated Power management.
762 *
763 * Locking: Caller.
764 * Returns: void
765 */
766static void ata_dev_disable_pm(struct ata_device *dev)
767{
768 struct ata_port *ap = dev->link->ap;
769
770 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
771 if (ap->ops->disable_pm)
772 ap->ops->disable_pm(ap);
773}
1992a5ed 774#endif /* CONFIG_PM */
ca77329f
KCA
775
776void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
777{
778 ap->pm_policy = policy;
779 ap->link.eh_info.action |= ATA_EHI_LPM;
780 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
781 ata_port_schedule_eh(ap);
782}
783
1992a5ed 784#ifdef CONFIG_PM
ca77329f
KCA
785static void ata_lpm_enable(struct ata_host *host)
786{
787 struct ata_link *link;
788 struct ata_port *ap;
789 struct ata_device *dev;
790 int i;
791
792 for (i = 0; i < host->n_ports; i++) {
793 ap = host->ports[i];
794 ata_port_for_each_link(link, ap) {
795 ata_link_for_each_dev(dev, link)
796 ata_dev_disable_pm(dev);
797 }
798 }
799}
800
801static void ata_lpm_disable(struct ata_host *host)
802{
803 int i;
804
805 for (i = 0; i < host->n_ports; i++) {
806 struct ata_port *ap = host->ports[i];
807 ata_lpm_schedule(ap, ap->pm_policy);
808 }
809}
1992a5ed 810#endif /* CONFIG_PM */
ca77329f
KCA
811
812
1da177e4 813/**
0d5ff566 814 * ata_devchk - PATA device presence detection
1da177e4
LT
815 * @ap: ATA channel to examine
816 * @device: Device to examine (starting at zero)
817 *
818 * This technique was originally described in
819 * Hale Landis's ATADRVR (www.ata-atapi.com), and
820 * later found its way into the ATA/ATAPI spec.
821 *
822 * Write a pattern to the ATA shadow registers,
823 * and if a device is present, it will respond by
824 * correctly storing and echoing back the
825 * ATA shadow register contents.
826 *
827 * LOCKING:
828 * caller.
829 */
830
0d5ff566 831static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
832{
833 struct ata_ioports *ioaddr = &ap->ioaddr;
834 u8 nsect, lbal;
835
836 ap->ops->dev_select(ap, device);
837
0d5ff566
TH
838 iowrite8(0x55, ioaddr->nsect_addr);
839 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 840
0d5ff566
TH
841 iowrite8(0xaa, ioaddr->nsect_addr);
842 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 843
0d5ff566
TH
844 iowrite8(0x55, ioaddr->nsect_addr);
845 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 846
0d5ff566
TH
847 nsect = ioread8(ioaddr->nsect_addr);
848 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
849
850 if ((nsect == 0x55) && (lbal == 0xaa))
851 return 1; /* we found a device */
852
853 return 0; /* nothing found */
854}
855
1da177e4
LT
856/**
857 * ata_dev_classify - determine device type based on ATA-spec signature
858 * @tf: ATA taskfile register set for device to be identified
859 *
860 * Determine from taskfile register contents whether a device is
861 * ATA or ATAPI, as per "Signature and persistence" section
862 * of ATA/PI spec (volume 1, sect 5.14).
863 *
864 * LOCKING:
865 * None.
866 *
867 * RETURNS:
633273a3
TH
868 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
869 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 870 */
057ace5e 871unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
872{
873 /* Apple's open source Darwin code hints that some devices only
874 * put a proper signature into the LBA mid/high registers,
875 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
876 *
877 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
878 * signatures for ATA and ATAPI devices attached on SerialATA,
879 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
880 * spec has never mentioned about using different signatures
881 * for ATA/ATAPI devices. Then, Serial ATA II: Port
882 * Multiplier specification began to use 0x69/0x96 to identify
883 * port multpliers and 0x3c/0xc3 to identify SEMB device.
884 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
885 * 0x69/0x96 shortly and described them as reserved for
886 * SerialATA.
887 *
888 * We follow the current spec and consider that 0x69/0x96
889 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 890 */
633273a3 891 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
892 DPRINTK("found ATA device by sig\n");
893 return ATA_DEV_ATA;
894 }
895
633273a3 896 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
897 DPRINTK("found ATAPI device by sig\n");
898 return ATA_DEV_ATAPI;
899 }
900
633273a3
TH
901 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
902 DPRINTK("found PMP device by sig\n");
903 return ATA_DEV_PMP;
904 }
905
906 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 907 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
908 return ATA_DEV_SEMB_UNSUP; /* not yet */
909 }
910
1da177e4
LT
911 DPRINTK("unknown device\n");
912 return ATA_DEV_UNKNOWN;
913}
914
915/**
916 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
917 * @dev: ATA device to classify (starting at zero)
918 * @present: device seems present
b4dc7623 919 * @r_err: Value of error register on completion
1da177e4
LT
920 *
921 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
922 * an ATA/ATAPI-defined set of values is placed in the ATA
923 * shadow registers, indicating the results of device detection
924 * and diagnostics.
925 *
926 * Select the ATA device, and read the values from the ATA shadow
927 * registers. Then parse according to the Error register value,
928 * and the spec-defined values examined by ata_dev_classify().
929 *
930 * LOCKING:
931 * caller.
b4dc7623
TH
932 *
933 * RETURNS:
934 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 935 */
3f19859e
TH
936unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
937 u8 *r_err)
1da177e4 938{
3f19859e 939 struct ata_port *ap = dev->link->ap;
1da177e4
LT
940 struct ata_taskfile tf;
941 unsigned int class;
942 u8 err;
943
3f19859e 944 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
945
946 memset(&tf, 0, sizeof(tf));
947
1da177e4 948 ap->ops->tf_read(ap, &tf);
0169e284 949 err = tf.feature;
b4dc7623
TH
950 if (r_err)
951 *r_err = err;
1da177e4 952
93590859 953 /* see if device passed diags: if master then continue and warn later */
3f19859e 954 if (err == 0 && dev->devno == 0)
93590859 955 /* diagnostic fail : do nothing _YET_ */
3f19859e 956 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 957 else if (err == 1)
1da177e4 958 /* do nothing */ ;
3f19859e 959 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
960 /* do nothing */ ;
961 else
b4dc7623 962 return ATA_DEV_NONE;
1da177e4 963
b4dc7623 964 /* determine if device is ATA or ATAPI */
1da177e4 965 class = ata_dev_classify(&tf);
b4dc7623 966
d7fbee05
TH
967 if (class == ATA_DEV_UNKNOWN) {
968 /* If the device failed diagnostic, it's likely to
969 * have reported incorrect device signature too.
970 * Assume ATA device if the device seems present but
971 * device signature is invalid with diagnostic
972 * failure.
973 */
974 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
975 class = ATA_DEV_ATA;
976 else
977 class = ATA_DEV_NONE;
978 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
979 class = ATA_DEV_NONE;
980
b4dc7623 981 return class;
1da177e4
LT
982}
983
984/**
6a62a04d 985 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
986 * @id: IDENTIFY DEVICE results we will examine
987 * @s: string into which data is output
988 * @ofs: offset into identify device page
989 * @len: length of string to return. must be an even number.
990 *
991 * The strings in the IDENTIFY DEVICE page are broken up into
992 * 16-bit chunks. Run through the string, and output each
993 * 8-bit chunk linearly, regardless of platform.
994 *
995 * LOCKING:
996 * caller.
997 */
998
6a62a04d
TH
999void ata_id_string(const u16 *id, unsigned char *s,
1000 unsigned int ofs, unsigned int len)
1da177e4
LT
1001{
1002 unsigned int c;
1003
1004 while (len > 0) {
1005 c = id[ofs] >> 8;
1006 *s = c;
1007 s++;
1008
1009 c = id[ofs] & 0xff;
1010 *s = c;
1011 s++;
1012
1013 ofs++;
1014 len -= 2;
1015 }
1016}
1017
0e949ff3 1018/**
6a62a04d 1019 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1020 * @id: IDENTIFY DEVICE results we will examine
1021 * @s: string into which data is output
1022 * @ofs: offset into identify device page
1023 * @len: length of string to return. must be an odd number.
1024 *
6a62a04d 1025 * This function is identical to ata_id_string except that it
0e949ff3
TH
1026 * trims trailing spaces and terminates the resulting string with
1027 * null. @len must be actual maximum length (even number) + 1.
1028 *
1029 * LOCKING:
1030 * caller.
1031 */
6a62a04d
TH
1032void ata_id_c_string(const u16 *id, unsigned char *s,
1033 unsigned int ofs, unsigned int len)
0e949ff3
TH
1034{
1035 unsigned char *p;
1036
1037 WARN_ON(!(len & 1));
1038
6a62a04d 1039 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1040
1041 p = s + strnlen(s, len - 1);
1042 while (p > s && p[-1] == ' ')
1043 p--;
1044 *p = '\0';
1045}
0baab86b 1046
db6f8759
TH
1047static u64 ata_id_n_sectors(const u16 *id)
1048{
1049 if (ata_id_has_lba(id)) {
1050 if (ata_id_has_lba48(id))
1051 return ata_id_u64(id, 100);
1052 else
1053 return ata_id_u32(id, 60);
1054 } else {
1055 if (ata_id_current_chs_valid(id))
1056 return ata_id_u32(id, 57);
1057 else
1058 return id[1] * id[3] * id[6];
1059 }
1060}
1061
1e999736
AC
1062static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1063{
1064 u64 sectors = 0;
1065
1066 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1067 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1068 sectors |= (tf->hob_lbal & 0xff) << 24;
1069 sectors |= (tf->lbah & 0xff) << 16;
1070 sectors |= (tf->lbam & 0xff) << 8;
1071 sectors |= (tf->lbal & 0xff);
1072
1073 return ++sectors;
1074}
1075
1076static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1077{
1078 u64 sectors = 0;
1079
1080 sectors |= (tf->device & 0x0f) << 24;
1081 sectors |= (tf->lbah & 0xff) << 16;
1082 sectors |= (tf->lbam & 0xff) << 8;
1083 sectors |= (tf->lbal & 0xff);
1084
1085 return ++sectors;
1086}
1087
1088/**
c728a914
TH
1089 * ata_read_native_max_address - Read native max address
1090 * @dev: target device
1091 * @max_sectors: out parameter for the result native max address
1e999736 1092 *
c728a914
TH
1093 * Perform an LBA48 or LBA28 native size query upon the device in
1094 * question.
1e999736 1095 *
c728a914
TH
1096 * RETURNS:
1097 * 0 on success, -EACCES if command is aborted by the drive.
1098 * -EIO on other errors.
1e999736 1099 */
c728a914 1100static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1101{
c728a914 1102 unsigned int err_mask;
1e999736 1103 struct ata_taskfile tf;
c728a914 1104 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1105
1106 ata_tf_init(dev, &tf);
1107
c728a914 1108 /* always clear all address registers */
1e999736 1109 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1110
c728a914
TH
1111 if (lba48) {
1112 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1113 tf.flags |= ATA_TFLAG_LBA48;
1114 } else
1115 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1116
1e999736 1117 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1118 tf.device |= ATA_LBA;
1119
2b789108 1120 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1121 if (err_mask) {
1122 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1123 "max address (err_mask=0x%x)\n", err_mask);
1124 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1125 return -EACCES;
1126 return -EIO;
1127 }
1e999736 1128
c728a914
TH
1129 if (lba48)
1130 *max_sectors = ata_tf_to_lba48(&tf);
1131 else
1132 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 1133 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1134 (*max_sectors)--;
c728a914 1135 return 0;
1e999736
AC
1136}
1137
1138/**
c728a914
TH
1139 * ata_set_max_sectors - Set max sectors
1140 * @dev: target device
6b38d1d1 1141 * @new_sectors: new max sectors value to set for the device
1e999736 1142 *
c728a914
TH
1143 * Set max sectors of @dev to @new_sectors.
1144 *
1145 * RETURNS:
1146 * 0 on success, -EACCES if command is aborted or denied (due to
1147 * previous non-volatile SET_MAX) by the drive. -EIO on other
1148 * errors.
1e999736 1149 */
05027adc 1150static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1151{
c728a914 1152 unsigned int err_mask;
1e999736 1153 struct ata_taskfile tf;
c728a914 1154 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1155
1156 new_sectors--;
1157
1158 ata_tf_init(dev, &tf);
1159
1e999736 1160 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1161
1162 if (lba48) {
1163 tf.command = ATA_CMD_SET_MAX_EXT;
1164 tf.flags |= ATA_TFLAG_LBA48;
1165
1166 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1167 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1168 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1169 } else {
c728a914
TH
1170 tf.command = ATA_CMD_SET_MAX;
1171
1e582ba4
TH
1172 tf.device |= (new_sectors >> 24) & 0xf;
1173 }
1174
1e999736 1175 tf.protocol |= ATA_PROT_NODATA;
c728a914 1176 tf.device |= ATA_LBA;
1e999736
AC
1177
1178 tf.lbal = (new_sectors >> 0) & 0xff;
1179 tf.lbam = (new_sectors >> 8) & 0xff;
1180 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1181
2b789108 1182 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1183 if (err_mask) {
1184 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1185 "max address (err_mask=0x%x)\n", err_mask);
1186 if (err_mask == AC_ERR_DEV &&
1187 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1188 return -EACCES;
1189 return -EIO;
1190 }
1191
c728a914 1192 return 0;
1e999736
AC
1193}
1194
1195/**
1196 * ata_hpa_resize - Resize a device with an HPA set
1197 * @dev: Device to resize
1198 *
1199 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1200 * it if required to the full size of the media. The caller must check
1201 * the drive has the HPA feature set enabled.
05027adc
TH
1202 *
1203 * RETURNS:
1204 * 0 on success, -errno on failure.
1e999736 1205 */
05027adc 1206static int ata_hpa_resize(struct ata_device *dev)
1e999736 1207{
05027adc
TH
1208 struct ata_eh_context *ehc = &dev->link->eh_context;
1209 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1210 u64 sectors = ata_id_n_sectors(dev->id);
1211 u64 native_sectors;
c728a914 1212 int rc;
a617c09f 1213
05027adc
TH
1214 /* do we need to do it? */
1215 if (dev->class != ATA_DEV_ATA ||
1216 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1217 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1218 return 0;
1e999736 1219
05027adc
TH
1220 /* read native max address */
1221 rc = ata_read_native_max_address(dev, &native_sectors);
1222 if (rc) {
1223 /* If HPA isn't going to be unlocked, skip HPA
1224 * resizing from the next try.
1225 */
1226 if (!ata_ignore_hpa) {
1227 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1228 "broken, will skip HPA handling\n");
1229 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1230
1231 /* we can continue if device aborted the command */
1232 if (rc == -EACCES)
1233 rc = 0;
1e999736 1234 }
37301a55 1235
05027adc
TH
1236 return rc;
1237 }
1238
1239 /* nothing to do? */
1240 if (native_sectors <= sectors || !ata_ignore_hpa) {
1241 if (!print_info || native_sectors == sectors)
1242 return 0;
1243
1244 if (native_sectors > sectors)
1245 ata_dev_printk(dev, KERN_INFO,
1246 "HPA detected: current %llu, native %llu\n",
1247 (unsigned long long)sectors,
1248 (unsigned long long)native_sectors);
1249 else if (native_sectors < sectors)
1250 ata_dev_printk(dev, KERN_WARNING,
1251 "native sectors (%llu) is smaller than "
1252 "sectors (%llu)\n",
1253 (unsigned long long)native_sectors,
1254 (unsigned long long)sectors);
1255 return 0;
1256 }
1257
1258 /* let's unlock HPA */
1259 rc = ata_set_max_sectors(dev, native_sectors);
1260 if (rc == -EACCES) {
1261 /* if device aborted the command, skip HPA resizing */
1262 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1263 "(%llu -> %llu), skipping HPA handling\n",
1264 (unsigned long long)sectors,
1265 (unsigned long long)native_sectors);
1266 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1267 return 0;
1268 } else if (rc)
1269 return rc;
1270
1271 /* re-read IDENTIFY data */
1272 rc = ata_dev_reread_id(dev, 0);
1273 if (rc) {
1274 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1275 "data after HPA resizing\n");
1276 return rc;
1277 }
1278
1279 if (print_info) {
1280 u64 new_sectors = ata_id_n_sectors(dev->id);
1281 ata_dev_printk(dev, KERN_INFO,
1282 "HPA unlocked: %llu -> %llu, native %llu\n",
1283 (unsigned long long)sectors,
1284 (unsigned long long)new_sectors,
1285 (unsigned long long)native_sectors);
1286 }
1287
1288 return 0;
1e999736
AC
1289}
1290
0baab86b
EF
1291/**
1292 * ata_noop_dev_select - Select device 0/1 on ATA bus
1293 * @ap: ATA channel to manipulate
1294 * @device: ATA device (numbered from zero) to select
1295 *
1296 * This function performs no actual function.
1297 *
1298 * May be used as the dev_select() entry in ata_port_operations.
1299 *
1300 * LOCKING:
1301 * caller.
1302 */
2dcb407e 1303void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1304{
1305}
1306
0baab86b 1307
1da177e4
LT
1308/**
1309 * ata_std_dev_select - Select device 0/1 on ATA bus
1310 * @ap: ATA channel to manipulate
1311 * @device: ATA device (numbered from zero) to select
1312 *
1313 * Use the method defined in the ATA specification to
1314 * make either device 0, or device 1, active on the
0baab86b
EF
1315 * ATA channel. Works with both PIO and MMIO.
1316 *
1317 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1318 *
1319 * LOCKING:
1320 * caller.
1321 */
1322
2dcb407e 1323void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1324{
1325 u8 tmp;
1326
1327 if (device == 0)
1328 tmp = ATA_DEVICE_OBS;
1329 else
1330 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1331
0d5ff566 1332 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1333 ata_pause(ap); /* needed; also flushes, for mmio */
1334}
1335
1336/**
1337 * ata_dev_select - Select device 0/1 on ATA bus
1338 * @ap: ATA channel to manipulate
1339 * @device: ATA device (numbered from zero) to select
1340 * @wait: non-zero to wait for Status register BSY bit to clear
1341 * @can_sleep: non-zero if context allows sleeping
1342 *
1343 * Use the method defined in the ATA specification to
1344 * make either device 0, or device 1, active on the
1345 * ATA channel.
1346 *
1347 * This is a high-level version of ata_std_dev_select(),
1348 * which additionally provides the services of inserting
1349 * the proper pauses and status polling, where needed.
1350 *
1351 * LOCKING:
1352 * caller.
1353 */
1354
1355void ata_dev_select(struct ata_port *ap, unsigned int device,
1356 unsigned int wait, unsigned int can_sleep)
1357{
88574551 1358 if (ata_msg_probe(ap))
44877b4e
TH
1359 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1360 "device %u, wait %u\n", device, wait);
1da177e4
LT
1361
1362 if (wait)
1363 ata_wait_idle(ap);
1364
1365 ap->ops->dev_select(ap, device);
1366
1367 if (wait) {
9af5c9c9 1368 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1369 msleep(150);
1370 ata_wait_idle(ap);
1371 }
1372}
1373
1374/**
1375 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1376 * @id: IDENTIFY DEVICE page to dump
1da177e4 1377 *
0bd3300a
TH
1378 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1379 * page.
1da177e4
LT
1380 *
1381 * LOCKING:
1382 * caller.
1383 */
1384
0bd3300a 1385static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1386{
1387 DPRINTK("49==0x%04x "
1388 "53==0x%04x "
1389 "63==0x%04x "
1390 "64==0x%04x "
1391 "75==0x%04x \n",
0bd3300a
TH
1392 id[49],
1393 id[53],
1394 id[63],
1395 id[64],
1396 id[75]);
1da177e4
LT
1397 DPRINTK("80==0x%04x "
1398 "81==0x%04x "
1399 "82==0x%04x "
1400 "83==0x%04x "
1401 "84==0x%04x \n",
0bd3300a
TH
1402 id[80],
1403 id[81],
1404 id[82],
1405 id[83],
1406 id[84]);
1da177e4
LT
1407 DPRINTK("88==0x%04x "
1408 "93==0x%04x\n",
0bd3300a
TH
1409 id[88],
1410 id[93]);
1da177e4
LT
1411}
1412
cb95d562
TH
1413/**
1414 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1415 * @id: IDENTIFY data to compute xfer mask from
1416 *
1417 * Compute the xfermask for this device. This is not as trivial
1418 * as it seems if we must consider early devices correctly.
1419 *
1420 * FIXME: pre IDE drive timing (do we care ?).
1421 *
1422 * LOCKING:
1423 * None.
1424 *
1425 * RETURNS:
1426 * Computed xfermask
1427 */
7dc951ae 1428unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1429{
7dc951ae 1430 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1431
1432 /* Usual case. Word 53 indicates word 64 is valid */
1433 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1434 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1435 pio_mask <<= 3;
1436 pio_mask |= 0x7;
1437 } else {
1438 /* If word 64 isn't valid then Word 51 high byte holds
1439 * the PIO timing number for the maximum. Turn it into
1440 * a mask.
1441 */
7a0f1c8a 1442 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1443 if (mode < 5) /* Valid PIO range */
2dcb407e 1444 pio_mask = (2 << mode) - 1;
46767aeb
AC
1445 else
1446 pio_mask = 1;
cb95d562
TH
1447
1448 /* But wait.. there's more. Design your standards by
1449 * committee and you too can get a free iordy field to
1450 * process. However its the speeds not the modes that
1451 * are supported... Note drivers using the timing API
1452 * will get this right anyway
1453 */
1454 }
1455
1456 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1457
b352e57d
AC
1458 if (ata_id_is_cfa(id)) {
1459 /*
1460 * Process compact flash extended modes
1461 */
1462 int pio = id[163] & 0x7;
1463 int dma = (id[163] >> 3) & 7;
1464
1465 if (pio)
1466 pio_mask |= (1 << 5);
1467 if (pio > 1)
1468 pio_mask |= (1 << 6);
1469 if (dma)
1470 mwdma_mask |= (1 << 3);
1471 if (dma > 1)
1472 mwdma_mask |= (1 << 4);
1473 }
1474
fb21f0d0
TH
1475 udma_mask = 0;
1476 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1477 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1478
1479 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1480}
1481
86e45b6b
TH
1482/**
1483 * ata_port_queue_task - Queue port_task
1484 * @ap: The ata_port to queue port_task for
e2a7f77a 1485 * @fn: workqueue function to be scheduled
65f27f38 1486 * @data: data for @fn to use
e2a7f77a 1487 * @delay: delay time for workqueue function
86e45b6b
TH
1488 *
1489 * Schedule @fn(@data) for execution after @delay jiffies using
1490 * port_task. There is one port_task per port and it's the
1491 * user(low level driver)'s responsibility to make sure that only
1492 * one task is active at any given time.
1493 *
1494 * libata core layer takes care of synchronization between
1495 * port_task and EH. ata_port_queue_task() may be ignored for EH
1496 * synchronization.
1497 *
1498 * LOCKING:
1499 * Inherited from caller.
1500 */
65f27f38 1501void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1502 unsigned long delay)
1503{
65f27f38
DH
1504 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1505 ap->port_task_data = data;
86e45b6b 1506
45a66c1c
ON
1507 /* may fail if ata_port_flush_task() in progress */
1508 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1509}
1510
1511/**
1512 * ata_port_flush_task - Flush port_task
1513 * @ap: The ata_port to flush port_task for
1514 *
1515 * After this function completes, port_task is guranteed not to
1516 * be running or scheduled.
1517 *
1518 * LOCKING:
1519 * Kernel thread context (may sleep)
1520 */
1521void ata_port_flush_task(struct ata_port *ap)
1522{
86e45b6b
TH
1523 DPRINTK("ENTER\n");
1524
45a66c1c 1525 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1526
0dd4b21f
BP
1527 if (ata_msg_ctl(ap))
1528 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1529}
1530
7102d230 1531static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1532{
77853bf2 1533 struct completion *waiting = qc->private_data;
a2a7a662 1534
a2a7a662 1535 complete(waiting);
a2a7a662
TH
1536}
1537
1538/**
2432697b 1539 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1540 * @dev: Device to which the command is sent
1541 * @tf: Taskfile registers for the command and the result
d69cf37d 1542 * @cdb: CDB for packet command
a2a7a662 1543 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1544 * @sgl: sg list for the data buffer of the command
2432697b 1545 * @n_elem: Number of sg entries
2b789108 1546 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1547 *
1548 * Executes libata internal command with timeout. @tf contains
1549 * command on entry and result on return. Timeout and error
1550 * conditions are reported via return value. No recovery action
1551 * is taken after a command times out. It's caller's duty to
1552 * clean up after timeout.
1553 *
1554 * LOCKING:
1555 * None. Should be called with kernel context, might sleep.
551e8889
TH
1556 *
1557 * RETURNS:
1558 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1559 */
2432697b
TH
1560unsigned ata_exec_internal_sg(struct ata_device *dev,
1561 struct ata_taskfile *tf, const u8 *cdb,
87260216 1562 int dma_dir, struct scatterlist *sgl,
2b789108 1563 unsigned int n_elem, unsigned long timeout)
a2a7a662 1564{
9af5c9c9
TH
1565 struct ata_link *link = dev->link;
1566 struct ata_port *ap = link->ap;
a2a7a662
TH
1567 u8 command = tf->command;
1568 struct ata_queued_cmd *qc;
2ab7db1f 1569 unsigned int tag, preempted_tag;
dedaf2b0 1570 u32 preempted_sactive, preempted_qc_active;
da917d69 1571 int preempted_nr_active_links;
60be6b9a 1572 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1573 unsigned long flags;
77853bf2 1574 unsigned int err_mask;
d95a717f 1575 int rc;
a2a7a662 1576
ba6a1308 1577 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1578
e3180499 1579 /* no internal command while frozen */
b51e9e5d 1580 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1581 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1582 return AC_ERR_SYSTEM;
1583 }
1584
2ab7db1f 1585 /* initialize internal qc */
a2a7a662 1586
2ab7db1f
TH
1587 /* XXX: Tag 0 is used for drivers with legacy EH as some
1588 * drivers choke if any other tag is given. This breaks
1589 * ata_tag_internal() test for those drivers. Don't use new
1590 * EH stuff without converting to it.
1591 */
1592 if (ap->ops->error_handler)
1593 tag = ATA_TAG_INTERNAL;
1594 else
1595 tag = 0;
1596
6cec4a39 1597 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1598 BUG();
f69499f4 1599 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1600
1601 qc->tag = tag;
1602 qc->scsicmd = NULL;
1603 qc->ap = ap;
1604 qc->dev = dev;
1605 ata_qc_reinit(qc);
1606
9af5c9c9
TH
1607 preempted_tag = link->active_tag;
1608 preempted_sactive = link->sactive;
dedaf2b0 1609 preempted_qc_active = ap->qc_active;
da917d69 1610 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1611 link->active_tag = ATA_TAG_POISON;
1612 link->sactive = 0;
dedaf2b0 1613 ap->qc_active = 0;
da917d69 1614 ap->nr_active_links = 0;
2ab7db1f
TH
1615
1616 /* prepare & issue qc */
a2a7a662 1617 qc->tf = *tf;
d69cf37d
TH
1618 if (cdb)
1619 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1620 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1621 qc->dma_dir = dma_dir;
1622 if (dma_dir != DMA_NONE) {
2432697b 1623 unsigned int i, buflen = 0;
87260216 1624 struct scatterlist *sg;
2432697b 1625
87260216
JA
1626 for_each_sg(sgl, sg, n_elem, i)
1627 buflen += sg->length;
2432697b 1628
87260216 1629 ata_sg_init(qc, sgl, n_elem);
49c80429 1630 qc->nbytes = buflen;
a2a7a662
TH
1631 }
1632
77853bf2 1633 qc->private_data = &wait;
a2a7a662
TH
1634 qc->complete_fn = ata_qc_complete_internal;
1635
8e0e694a 1636 ata_qc_issue(qc);
a2a7a662 1637
ba6a1308 1638 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1639
2b789108
TH
1640 if (!timeout)
1641 timeout = ata_probe_timeout * 1000 / HZ;
1642
1643 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1644
1645 ata_port_flush_task(ap);
41ade50c 1646
d95a717f 1647 if (!rc) {
ba6a1308 1648 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1649
1650 /* We're racing with irq here. If we lose, the
1651 * following test prevents us from completing the qc
d95a717f
TH
1652 * twice. If we win, the port is frozen and will be
1653 * cleaned up by ->post_internal_cmd().
a2a7a662 1654 */
77853bf2 1655 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1656 qc->err_mask |= AC_ERR_TIMEOUT;
1657
1658 if (ap->ops->error_handler)
1659 ata_port_freeze(ap);
1660 else
1661 ata_qc_complete(qc);
f15a1daf 1662
0dd4b21f
BP
1663 if (ata_msg_warn(ap))
1664 ata_dev_printk(dev, KERN_WARNING,
88574551 1665 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1666 }
1667
ba6a1308 1668 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1669 }
1670
d95a717f
TH
1671 /* do post_internal_cmd */
1672 if (ap->ops->post_internal_cmd)
1673 ap->ops->post_internal_cmd(qc);
1674
a51d644a
TH
1675 /* perform minimal error analysis */
1676 if (qc->flags & ATA_QCFLAG_FAILED) {
1677 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1678 qc->err_mask |= AC_ERR_DEV;
1679
1680 if (!qc->err_mask)
1681 qc->err_mask |= AC_ERR_OTHER;
1682
1683 if (qc->err_mask & ~AC_ERR_OTHER)
1684 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1685 }
1686
15869303 1687 /* finish up */
ba6a1308 1688 spin_lock_irqsave(ap->lock, flags);
15869303 1689
e61e0672 1690 *tf = qc->result_tf;
77853bf2
TH
1691 err_mask = qc->err_mask;
1692
1693 ata_qc_free(qc);
9af5c9c9
TH
1694 link->active_tag = preempted_tag;
1695 link->sactive = preempted_sactive;
dedaf2b0 1696 ap->qc_active = preempted_qc_active;
da917d69 1697 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1698
1f7dd3e9
TH
1699 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1700 * Until those drivers are fixed, we detect the condition
1701 * here, fail the command with AC_ERR_SYSTEM and reenable the
1702 * port.
1703 *
1704 * Note that this doesn't change any behavior as internal
1705 * command failure results in disabling the device in the
1706 * higher layer for LLDDs without new reset/EH callbacks.
1707 *
1708 * Kill the following code as soon as those drivers are fixed.
1709 */
198e0fed 1710 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1711 err_mask |= AC_ERR_SYSTEM;
1712 ata_port_probe(ap);
1713 }
1714
ba6a1308 1715 spin_unlock_irqrestore(ap->lock, flags);
15869303 1716
77853bf2 1717 return err_mask;
a2a7a662
TH
1718}
1719
2432697b 1720/**
33480a0e 1721 * ata_exec_internal - execute libata internal command
2432697b
TH
1722 * @dev: Device to which the command is sent
1723 * @tf: Taskfile registers for the command and the result
1724 * @cdb: CDB for packet command
1725 * @dma_dir: Data tranfer direction of the command
1726 * @buf: Data buffer of the command
1727 * @buflen: Length of data buffer
2b789108 1728 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1729 *
1730 * Wrapper around ata_exec_internal_sg() which takes simple
1731 * buffer instead of sg list.
1732 *
1733 * LOCKING:
1734 * None. Should be called with kernel context, might sleep.
1735 *
1736 * RETURNS:
1737 * Zero on success, AC_ERR_* mask on failure
1738 */
1739unsigned ata_exec_internal(struct ata_device *dev,
1740 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1741 int dma_dir, void *buf, unsigned int buflen,
1742 unsigned long timeout)
2432697b 1743{
33480a0e
TH
1744 struct scatterlist *psg = NULL, sg;
1745 unsigned int n_elem = 0;
2432697b 1746
33480a0e
TH
1747 if (dma_dir != DMA_NONE) {
1748 WARN_ON(!buf);
1749 sg_init_one(&sg, buf, buflen);
1750 psg = &sg;
1751 n_elem++;
1752 }
2432697b 1753
2b789108
TH
1754 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1755 timeout);
2432697b
TH
1756}
1757
977e6b9f
TH
1758/**
1759 * ata_do_simple_cmd - execute simple internal command
1760 * @dev: Device to which the command is sent
1761 * @cmd: Opcode to execute
1762 *
1763 * Execute a 'simple' command, that only consists of the opcode
1764 * 'cmd' itself, without filling any other registers
1765 *
1766 * LOCKING:
1767 * Kernel thread context (may sleep).
1768 *
1769 * RETURNS:
1770 * Zero on success, AC_ERR_* mask on failure
e58eb583 1771 */
77b08fb5 1772unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1773{
1774 struct ata_taskfile tf;
e58eb583
TH
1775
1776 ata_tf_init(dev, &tf);
1777
1778 tf.command = cmd;
1779 tf.flags |= ATA_TFLAG_DEVICE;
1780 tf.protocol = ATA_PROT_NODATA;
1781
2b789108 1782 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1783}
1784
1bc4ccff
AC
1785/**
1786 * ata_pio_need_iordy - check if iordy needed
1787 * @adev: ATA device
1788 *
1789 * Check if the current speed of the device requires IORDY. Used
1790 * by various controllers for chip configuration.
1791 */
a617c09f 1792
1bc4ccff
AC
1793unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1794{
432729f0
AC
1795 /* Controller doesn't support IORDY. Probably a pointless check
1796 as the caller should know this */
9af5c9c9 1797 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1798 return 0;
432729f0
AC
1799 /* PIO3 and higher it is mandatory */
1800 if (adev->pio_mode > XFER_PIO_2)
1801 return 1;
1802 /* We turn it on when possible */
1803 if (ata_id_has_iordy(adev->id))
1bc4ccff 1804 return 1;
432729f0
AC
1805 return 0;
1806}
2e9edbf8 1807
432729f0
AC
1808/**
1809 * ata_pio_mask_no_iordy - Return the non IORDY mask
1810 * @adev: ATA device
1811 *
1812 * Compute the highest mode possible if we are not using iordy. Return
1813 * -1 if no iordy mode is available.
1814 */
a617c09f 1815
432729f0
AC
1816static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1817{
1bc4ccff 1818 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1819 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1820 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1821 /* Is the speed faster than the drive allows non IORDY ? */
1822 if (pio) {
1823 /* This is cycle times not frequency - watch the logic! */
1824 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1825 return 3 << ATA_SHIFT_PIO;
1826 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1827 }
1828 }
432729f0 1829 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1830}
1831
1da177e4 1832/**
49016aca 1833 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1834 * @dev: target device
1835 * @p_class: pointer to class of the target device (may be changed)
bff04647 1836 * @flags: ATA_READID_* flags
fe635c7e 1837 * @id: buffer to read IDENTIFY data into
1da177e4 1838 *
49016aca
TH
1839 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1840 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1841 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1842 * for pre-ATA4 drives.
1da177e4 1843 *
50a99018 1844 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1845 * now we abort if we hit that case.
50a99018 1846 *
1da177e4 1847 * LOCKING:
49016aca
TH
1848 * Kernel thread context (may sleep)
1849 *
1850 * RETURNS:
1851 * 0 on success, -errno otherwise.
1da177e4 1852 */
a9beec95 1853int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1854 unsigned int flags, u16 *id)
1da177e4 1855{
9af5c9c9 1856 struct ata_port *ap = dev->link->ap;
49016aca 1857 unsigned int class = *p_class;
a0123703 1858 struct ata_taskfile tf;
49016aca
TH
1859 unsigned int err_mask = 0;
1860 const char *reason;
54936f8b 1861 int may_fallback = 1, tried_spinup = 0;
49016aca 1862 int rc;
1da177e4 1863
0dd4b21f 1864 if (ata_msg_ctl(ap))
44877b4e 1865 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1866
49016aca 1867 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1868 retry:
3373efd8 1869 ata_tf_init(dev, &tf);
a0123703 1870
49016aca
TH
1871 switch (class) {
1872 case ATA_DEV_ATA:
a0123703 1873 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1874 break;
1875 case ATA_DEV_ATAPI:
a0123703 1876 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1877 break;
1878 default:
1879 rc = -ENODEV;
1880 reason = "unsupported class";
1881 goto err_out;
1da177e4
LT
1882 }
1883
a0123703 1884 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1885
1886 /* Some devices choke if TF registers contain garbage. Make
1887 * sure those are properly initialized.
1888 */
1889 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1890
1891 /* Device presence detection is unreliable on some
1892 * controllers. Always poll IDENTIFY if available.
1893 */
1894 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1895
3373efd8 1896 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1897 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 1898 if (err_mask) {
800b3996 1899 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1900 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1901 ap->print_id, dev->devno);
55a8e2c8
TH
1902 return -ENOENT;
1903 }
1904
54936f8b
TH
1905 /* Device or controller might have reported the wrong
1906 * device class. Give a shot at the other IDENTIFY if
1907 * the current one is aborted by the device.
1908 */
1909 if (may_fallback &&
1910 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1911 may_fallback = 0;
1912
1913 if (class == ATA_DEV_ATA)
1914 class = ATA_DEV_ATAPI;
1915 else
1916 class = ATA_DEV_ATA;
1917 goto retry;
1918 }
1919
49016aca
TH
1920 rc = -EIO;
1921 reason = "I/O error";
1da177e4
LT
1922 goto err_out;
1923 }
1924
54936f8b
TH
1925 /* Falling back doesn't make sense if ID data was read
1926 * successfully at least once.
1927 */
1928 may_fallback = 0;
1929
49016aca 1930 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1931
49016aca 1932 /* sanity check */
a4f5749b 1933 rc = -EINVAL;
6070068b 1934 reason = "device reports invalid type";
a4f5749b
TH
1935
1936 if (class == ATA_DEV_ATA) {
1937 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1938 goto err_out;
1939 } else {
1940 if (ata_id_is_ata(id))
1941 goto err_out;
49016aca
TH
1942 }
1943
169439c2
ML
1944 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1945 tried_spinup = 1;
1946 /*
1947 * Drive powered-up in standby mode, and requires a specific
1948 * SET_FEATURES spin-up subcommand before it will accept
1949 * anything other than the original IDENTIFY command.
1950 */
218f3d30 1951 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 1952 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1953 rc = -EIO;
1954 reason = "SPINUP failed";
1955 goto err_out;
1956 }
1957 /*
1958 * If the drive initially returned incomplete IDENTIFY info,
1959 * we now must reissue the IDENTIFY command.
1960 */
1961 if (id[2] == 0x37c8)
1962 goto retry;
1963 }
1964
bff04647 1965 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1966 /*
1967 * The exact sequence expected by certain pre-ATA4 drives is:
1968 * SRST RESET
50a99018
AC
1969 * IDENTIFY (optional in early ATA)
1970 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1971 * anything else..
1972 * Some drives were very specific about that exact sequence.
50a99018
AC
1973 *
1974 * Note that ATA4 says lba is mandatory so the second check
1975 * shoud never trigger.
49016aca
TH
1976 */
1977 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1978 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1979 if (err_mask) {
1980 rc = -EIO;
1981 reason = "INIT_DEV_PARAMS failed";
1982 goto err_out;
1983 }
1984
1985 /* current CHS translation info (id[53-58]) might be
1986 * changed. reread the identify device info.
1987 */
bff04647 1988 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1989 goto retry;
1990 }
1991 }
1992
1993 *p_class = class;
fe635c7e 1994
49016aca
TH
1995 return 0;
1996
1997 err_out:
88574551 1998 if (ata_msg_warn(ap))
0dd4b21f 1999 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2000 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2001 return rc;
2002}
2003
3373efd8 2004static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2005{
9af5c9c9
TH
2006 struct ata_port *ap = dev->link->ap;
2007 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2008}
2009
a6e6ce8e
TH
2010static void ata_dev_config_ncq(struct ata_device *dev,
2011 char *desc, size_t desc_sz)
2012{
9af5c9c9 2013 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2014 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2015
2016 if (!ata_id_has_ncq(dev->id)) {
2017 desc[0] = '\0';
2018 return;
2019 }
75683fe7 2020 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2021 snprintf(desc, desc_sz, "NCQ (not used)");
2022 return;
2023 }
a6e6ce8e 2024 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2025 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2026 dev->flags |= ATA_DFLAG_NCQ;
2027 }
2028
2029 if (hdepth >= ddepth)
2030 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2031 else
2032 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2033}
2034
49016aca 2035/**
ffeae418 2036 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2037 * @dev: Target device to configure
2038 *
2039 * Configure @dev according to @dev->id. Generic and low-level
2040 * driver specific fixups are also applied.
49016aca
TH
2041 *
2042 * LOCKING:
ffeae418
TH
2043 * Kernel thread context (may sleep)
2044 *
2045 * RETURNS:
2046 * 0 on success, -errno otherwise
49016aca 2047 */
efdaedc4 2048int ata_dev_configure(struct ata_device *dev)
49016aca 2049{
9af5c9c9
TH
2050 struct ata_port *ap = dev->link->ap;
2051 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2052 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2053 const u16 *id = dev->id;
7dc951ae 2054 unsigned long xfer_mask;
b352e57d 2055 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2056 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2057 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2058 int rc;
49016aca 2059
0dd4b21f 2060 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
2061 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2062 __FUNCTION__);
ffeae418 2063 return 0;
49016aca
TH
2064 }
2065
0dd4b21f 2066 if (ata_msg_probe(ap))
44877b4e 2067 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 2068
75683fe7
TH
2069 /* set horkage */
2070 dev->horkage |= ata_dev_blacklisted(dev);
2071
6746544c
TH
2072 /* let ACPI work its magic */
2073 rc = ata_acpi_on_devcfg(dev);
2074 if (rc)
2075 return rc;
08573a86 2076
05027adc
TH
2077 /* massage HPA, do it early as it might change IDENTIFY data */
2078 rc = ata_hpa_resize(dev);
2079 if (rc)
2080 return rc;
2081
c39f5ebe 2082 /* print device capabilities */
0dd4b21f 2083 if (ata_msg_probe(ap))
88574551
TH
2084 ata_dev_printk(dev, KERN_DEBUG,
2085 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2086 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 2087 __FUNCTION__,
f15a1daf
TH
2088 id[49], id[82], id[83], id[84],
2089 id[85], id[86], id[87], id[88]);
c39f5ebe 2090
208a9933 2091 /* initialize to-be-configured parameters */
ea1dd4e1 2092 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2093 dev->max_sectors = 0;
2094 dev->cdb_len = 0;
2095 dev->n_sectors = 0;
2096 dev->cylinders = 0;
2097 dev->heads = 0;
2098 dev->sectors = 0;
2099
1da177e4
LT
2100 /*
2101 * common ATA, ATAPI feature tests
2102 */
2103
ff8854b2 2104 /* find max transfer mode; for printk only */
1148c3a7 2105 xfer_mask = ata_id_xfermask(id);
1da177e4 2106
0dd4b21f
BP
2107 if (ata_msg_probe(ap))
2108 ata_dump_id(id);
1da177e4 2109
ef143d57
AL
2110 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2111 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2112 sizeof(fwrevbuf));
2113
2114 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2115 sizeof(modelbuf));
2116
1da177e4
LT
2117 /* ATA-specific feature tests */
2118 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2119 if (ata_id_is_cfa(id)) {
2120 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2121 ata_dev_printk(dev, KERN_WARNING,
2122 "supports DRM functions and may "
2123 "not be fully accessable.\n");
b352e57d 2124 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2125 } else {
2dcb407e 2126 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2127 /* Warn the user if the device has TPM extensions */
2128 if (ata_id_has_tpm(id))
2129 ata_dev_printk(dev, KERN_WARNING,
2130 "supports DRM functions and may "
2131 "not be fully accessable.\n");
2132 }
b352e57d 2133
1148c3a7 2134 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2135
3f64f565
EM
2136 if (dev->id[59] & 0x100)
2137 dev->multi_count = dev->id[59] & 0xff;
2138
1148c3a7 2139 if (ata_id_has_lba(id)) {
4c2d721a 2140 const char *lba_desc;
a6e6ce8e 2141 char ncq_desc[20];
8bf62ece 2142
4c2d721a
TH
2143 lba_desc = "LBA";
2144 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2145 if (ata_id_has_lba48(id)) {
8bf62ece 2146 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2147 lba_desc = "LBA48";
6fc49adb
TH
2148
2149 if (dev->n_sectors >= (1UL << 28) &&
2150 ata_id_has_flush_ext(id))
2151 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2152 }
8bf62ece 2153
a6e6ce8e
TH
2154 /* config NCQ */
2155 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2156
8bf62ece 2157 /* print device info to dmesg */
3f64f565
EM
2158 if (ata_msg_drv(ap) && print_info) {
2159 ata_dev_printk(dev, KERN_INFO,
2160 "%s: %s, %s, max %s\n",
2161 revbuf, modelbuf, fwrevbuf,
2162 ata_mode_string(xfer_mask));
2163 ata_dev_printk(dev, KERN_INFO,
2164 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2165 (unsigned long long)dev->n_sectors,
3f64f565
EM
2166 dev->multi_count, lba_desc, ncq_desc);
2167 }
ffeae418 2168 } else {
8bf62ece
AL
2169 /* CHS */
2170
2171 /* Default translation */
1148c3a7
TH
2172 dev->cylinders = id[1];
2173 dev->heads = id[3];
2174 dev->sectors = id[6];
8bf62ece 2175
1148c3a7 2176 if (ata_id_current_chs_valid(id)) {
8bf62ece 2177 /* Current CHS translation is valid. */
1148c3a7
TH
2178 dev->cylinders = id[54];
2179 dev->heads = id[55];
2180 dev->sectors = id[56];
8bf62ece
AL
2181 }
2182
2183 /* print device info to dmesg */
3f64f565 2184 if (ata_msg_drv(ap) && print_info) {
88574551 2185 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2186 "%s: %s, %s, max %s\n",
2187 revbuf, modelbuf, fwrevbuf,
2188 ata_mode_string(xfer_mask));
a84471fe 2189 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2190 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2191 (unsigned long long)dev->n_sectors,
2192 dev->multi_count, dev->cylinders,
2193 dev->heads, dev->sectors);
2194 }
07f6f7d0
AL
2195 }
2196
6e7846e9 2197 dev->cdb_len = 16;
1da177e4
LT
2198 }
2199
2200 /* ATAPI-specific feature tests */
2c13b7ce 2201 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2202 const char *cdb_intr_string = "";
2203 const char *atapi_an_string = "";
7d77b247 2204 u32 sntf;
08a556db 2205
1148c3a7 2206 rc = atapi_cdb_len(id);
1da177e4 2207 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2208 if (ata_msg_warn(ap))
88574551
TH
2209 ata_dev_printk(dev, KERN_WARNING,
2210 "unsupported CDB len\n");
ffeae418 2211 rc = -EINVAL;
1da177e4
LT
2212 goto err_out_nosup;
2213 }
6e7846e9 2214 dev->cdb_len = (unsigned int) rc;
1da177e4 2215
7d77b247
TH
2216 /* Enable ATAPI AN if both the host and device have
2217 * the support. If PMP is attached, SNTF is required
2218 * to enable ATAPI AN to discern between PHY status
2219 * changed notifications and ATAPI ANs.
9f45cbd3 2220 */
7d77b247
TH
2221 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2222 (!ap->nr_pmp_links ||
2223 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2224 unsigned int err_mask;
2225
9f45cbd3 2226 /* issue SET feature command to turn this on */
218f3d30
JG
2227 err_mask = ata_dev_set_feature(dev,
2228 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2229 if (err_mask)
9f45cbd3 2230 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2231 "failed to enable ATAPI AN "
2232 "(err_mask=0x%x)\n", err_mask);
2233 else {
9f45cbd3 2234 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2235 atapi_an_string = ", ATAPI AN";
2236 }
9f45cbd3
KCA
2237 }
2238
08a556db 2239 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2240 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2241 cdb_intr_string = ", CDB intr";
2242 }
312f7da2 2243
1da177e4 2244 /* print device info to dmesg */
5afc8142 2245 if (ata_msg_drv(ap) && print_info)
ef143d57 2246 ata_dev_printk(dev, KERN_INFO,
854c73a2 2247 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2248 modelbuf, fwrevbuf,
12436c30 2249 ata_mode_string(xfer_mask),
854c73a2 2250 cdb_intr_string, atapi_an_string);
1da177e4
LT
2251 }
2252
914ed354
TH
2253 /* determine max_sectors */
2254 dev->max_sectors = ATA_MAX_SECTORS;
2255 if (dev->flags & ATA_DFLAG_LBA48)
2256 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2257
ca77329f
KCA
2258 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2259 if (ata_id_has_hipm(dev->id))
2260 dev->flags |= ATA_DFLAG_HIPM;
2261 if (ata_id_has_dipm(dev->id))
2262 dev->flags |= ATA_DFLAG_DIPM;
2263 }
2264
93590859
AC
2265 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2266 /* Let the user know. We don't want to disallow opens for
2267 rescue purposes, or in case the vendor is just a blithering
2268 idiot */
2dcb407e 2269 if (print_info) {
93590859
AC
2270 ata_dev_printk(dev, KERN_WARNING,
2271"Drive reports diagnostics failure. This may indicate a drive\n");
2272 ata_dev_printk(dev, KERN_WARNING,
2273"fault or invalid emulation. Contact drive vendor for information.\n");
2274 }
2275 }
2276
4b2f3ede 2277 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2278 if (ata_dev_knobble(dev)) {
5afc8142 2279 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2280 ata_dev_printk(dev, KERN_INFO,
2281 "applying bridge limits\n");
5a529139 2282 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2283 dev->max_sectors = ATA_MAX_SECTORS;
2284 }
2285
f8d8e579 2286 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2287 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2288 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2289 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2290 }
f8d8e579 2291
75683fe7 2292 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2293 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2294 dev->max_sectors);
18d6e9d5 2295
ca77329f
KCA
2296 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2297 dev->horkage |= ATA_HORKAGE_IPM;
2298
2299 /* reset link pm_policy for this port to no pm */
2300 ap->pm_policy = MAX_PERFORMANCE;
2301 }
2302
4b2f3ede 2303 if (ap->ops->dev_config)
cd0d3bbc 2304 ap->ops->dev_config(dev);
4b2f3ede 2305
0dd4b21f
BP
2306 if (ata_msg_probe(ap))
2307 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2308 __FUNCTION__, ata_chk_status(ap));
ffeae418 2309 return 0;
1da177e4
LT
2310
2311err_out_nosup:
0dd4b21f 2312 if (ata_msg_probe(ap))
88574551
TH
2313 ata_dev_printk(dev, KERN_DEBUG,
2314 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2315 return rc;
1da177e4
LT
2316}
2317
be0d18df 2318/**
2e41e8e6 2319 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2320 * @ap: port
2321 *
2e41e8e6 2322 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2323 * detection.
2324 */
2325
2326int ata_cable_40wire(struct ata_port *ap)
2327{
2328 return ATA_CBL_PATA40;
2329}
2330
2331/**
2e41e8e6 2332 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2333 * @ap: port
2334 *
2e41e8e6 2335 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2336 * detection.
2337 */
2338
2339int ata_cable_80wire(struct ata_port *ap)
2340{
2341 return ATA_CBL_PATA80;
2342}
2343
2344/**
2345 * ata_cable_unknown - return unknown PATA cable.
2346 * @ap: port
2347 *
2348 * Helper method for drivers which have no PATA cable detection.
2349 */
2350
2351int ata_cable_unknown(struct ata_port *ap)
2352{
2353 return ATA_CBL_PATA_UNK;
2354}
2355
c88f90c3
TH
2356/**
2357 * ata_cable_ignore - return ignored PATA cable.
2358 * @ap: port
2359 *
2360 * Helper method for drivers which don't use cable type to limit
2361 * transfer mode.
2362 */
2363int ata_cable_ignore(struct ata_port *ap)
2364{
2365 return ATA_CBL_PATA_IGN;
2366}
2367
be0d18df
AC
2368/**
2369 * ata_cable_sata - return SATA cable type
2370 * @ap: port
2371 *
2372 * Helper method for drivers which have SATA cables
2373 */
2374
2375int ata_cable_sata(struct ata_port *ap)
2376{
2377 return ATA_CBL_SATA;
2378}
2379
1da177e4
LT
2380/**
2381 * ata_bus_probe - Reset and probe ATA bus
2382 * @ap: Bus to probe
2383 *
0cba632b
JG
2384 * Master ATA bus probing function. Initiates a hardware-dependent
2385 * bus reset, then attempts to identify any devices found on
2386 * the bus.
2387 *
1da177e4 2388 * LOCKING:
0cba632b 2389 * PCI/etc. bus probe sem.
1da177e4
LT
2390 *
2391 * RETURNS:
96072e69 2392 * Zero on success, negative errno otherwise.
1da177e4
LT
2393 */
2394
80289167 2395int ata_bus_probe(struct ata_port *ap)
1da177e4 2396{
28ca5c57 2397 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2398 int tries[ATA_MAX_DEVICES];
f58229f8 2399 int rc;
e82cbdb9 2400 struct ata_device *dev;
1da177e4 2401
28ca5c57 2402 ata_port_probe(ap);
c19ba8af 2403
f58229f8
TH
2404 ata_link_for_each_dev(dev, &ap->link)
2405 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2406
2407 retry:
cdeab114
TH
2408 ata_link_for_each_dev(dev, &ap->link) {
2409 /* If we issue an SRST then an ATA drive (not ATAPI)
2410 * may change configuration and be in PIO0 timing. If
2411 * we do a hard reset (or are coming from power on)
2412 * this is true for ATA or ATAPI. Until we've set a
2413 * suitable controller mode we should not touch the
2414 * bus as we may be talking too fast.
2415 */
2416 dev->pio_mode = XFER_PIO_0;
2417
2418 /* If the controller has a pio mode setup function
2419 * then use it to set the chipset to rights. Don't
2420 * touch the DMA setup as that will be dealt with when
2421 * configuring devices.
2422 */
2423 if (ap->ops->set_piomode)
2424 ap->ops->set_piomode(ap, dev);
2425 }
2426
2044470c 2427 /* reset and determine device classes */
52783c5d 2428 ap->ops->phy_reset(ap);
2061a47a 2429
f58229f8 2430 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2431 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2432 dev->class != ATA_DEV_UNKNOWN)
2433 classes[dev->devno] = dev->class;
2434 else
2435 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2436
52783c5d 2437 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2438 }
1da177e4 2439
52783c5d 2440 ata_port_probe(ap);
2044470c 2441
f31f0cc2
JG
2442 /* read IDENTIFY page and configure devices. We have to do the identify
2443 specific sequence bass-ackwards so that PDIAG- is released by
2444 the slave device */
2445
f58229f8
TH
2446 ata_link_for_each_dev(dev, &ap->link) {
2447 if (tries[dev->devno])
2448 dev->class = classes[dev->devno];
ffeae418 2449
14d2bac1 2450 if (!ata_dev_enabled(dev))
ffeae418 2451 continue;
ffeae418 2452
bff04647
TH
2453 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2454 dev->id);
14d2bac1
TH
2455 if (rc)
2456 goto fail;
f31f0cc2
JG
2457 }
2458
be0d18df
AC
2459 /* Now ask for the cable type as PDIAG- should have been released */
2460 if (ap->ops->cable_detect)
2461 ap->cbl = ap->ops->cable_detect(ap);
2462
614fe29b
AC
2463 /* We may have SATA bridge glue hiding here irrespective of the
2464 reported cable types and sensed types */
2465 ata_link_for_each_dev(dev, &ap->link) {
2466 if (!ata_dev_enabled(dev))
2467 continue;
2468 /* SATA drives indicate we have a bridge. We don't know which
2469 end of the link the bridge is which is a problem */
2470 if (ata_id_is_sata(dev->id))
2471 ap->cbl = ATA_CBL_SATA;
2472 }
2473
f31f0cc2
JG
2474 /* After the identify sequence we can now set up the devices. We do
2475 this in the normal order so that the user doesn't get confused */
2476
f58229f8 2477 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2478 if (!ata_dev_enabled(dev))
2479 continue;
14d2bac1 2480
9af5c9c9 2481 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2482 rc = ata_dev_configure(dev);
9af5c9c9 2483 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2484 if (rc)
2485 goto fail;
1da177e4
LT
2486 }
2487
e82cbdb9 2488 /* configure transfer mode */
0260731f 2489 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2490 if (rc)
51713d35 2491 goto fail;
1da177e4 2492
f58229f8
TH
2493 ata_link_for_each_dev(dev, &ap->link)
2494 if (ata_dev_enabled(dev))
e82cbdb9 2495 return 0;
1da177e4 2496
e82cbdb9
TH
2497 /* no device present, disable port */
2498 ata_port_disable(ap);
96072e69 2499 return -ENODEV;
14d2bac1
TH
2500
2501 fail:
4ae72a1e
TH
2502 tries[dev->devno]--;
2503
14d2bac1
TH
2504 switch (rc) {
2505 case -EINVAL:
4ae72a1e 2506 /* eeek, something went very wrong, give up */
14d2bac1
TH
2507 tries[dev->devno] = 0;
2508 break;
4ae72a1e
TH
2509
2510 case -ENODEV:
2511 /* give it just one more chance */
2512 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2513 case -EIO:
4ae72a1e
TH
2514 if (tries[dev->devno] == 1) {
2515 /* This is the last chance, better to slow
2516 * down than lose it.
2517 */
936fd732 2518 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2519 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2520 }
14d2bac1
TH
2521 }
2522
4ae72a1e 2523 if (!tries[dev->devno])
3373efd8 2524 ata_dev_disable(dev);
ec573755 2525
14d2bac1 2526 goto retry;
1da177e4
LT
2527}
2528
2529/**
0cba632b
JG
2530 * ata_port_probe - Mark port as enabled
2531 * @ap: Port for which we indicate enablement
1da177e4 2532 *
0cba632b
JG
2533 * Modify @ap data structure such that the system
2534 * thinks that the entire port is enabled.
2535 *
cca3974e 2536 * LOCKING: host lock, or some other form of
0cba632b 2537 * serialization.
1da177e4
LT
2538 */
2539
2540void ata_port_probe(struct ata_port *ap)
2541{
198e0fed 2542 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2543}
2544
3be680b7
TH
2545/**
2546 * sata_print_link_status - Print SATA link status
936fd732 2547 * @link: SATA link to printk link status about
3be680b7
TH
2548 *
2549 * This function prints link speed and status of a SATA link.
2550 *
2551 * LOCKING:
2552 * None.
2553 */
936fd732 2554void sata_print_link_status(struct ata_link *link)
3be680b7 2555{
6d5f9732 2556 u32 sstatus, scontrol, tmp;
3be680b7 2557
936fd732 2558 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2559 return;
936fd732 2560 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2561
936fd732 2562 if (ata_link_online(link)) {
3be680b7 2563 tmp = (sstatus >> 4) & 0xf;
936fd732 2564 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2565 "SATA link up %s (SStatus %X SControl %X)\n",
2566 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2567 } else {
936fd732 2568 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2569 "SATA link down (SStatus %X SControl %X)\n",
2570 sstatus, scontrol);
3be680b7
TH
2571 }
2572}
2573
ebdfca6e
AC
2574/**
2575 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2576 * @adev: device
2577 *
2578 * Obtain the other device on the same cable, or if none is
2579 * present NULL is returned
2580 */
2e9edbf8 2581
3373efd8 2582struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2583{
9af5c9c9
TH
2584 struct ata_link *link = adev->link;
2585 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2586 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2587 return NULL;
2588 return pair;
2589}
2590
1da177e4 2591/**
780a87f7
JG
2592 * ata_port_disable - Disable port.
2593 * @ap: Port to be disabled.
1da177e4 2594 *
780a87f7
JG
2595 * Modify @ap data structure such that the system
2596 * thinks that the entire port is disabled, and should
2597 * never attempt to probe or communicate with devices
2598 * on this port.
2599 *
cca3974e 2600 * LOCKING: host lock, or some other form of
780a87f7 2601 * serialization.
1da177e4
LT
2602 */
2603
2604void ata_port_disable(struct ata_port *ap)
2605{
9af5c9c9
TH
2606 ap->link.device[0].class = ATA_DEV_NONE;
2607 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2608 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2609}
2610
1c3fae4d 2611/**
3c567b7d 2612 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2613 * @link: Link to adjust SATA spd limit for
1c3fae4d 2614 *
936fd732 2615 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2616 * function only adjusts the limit. The change must be applied
3c567b7d 2617 * using sata_set_spd().
1c3fae4d
TH
2618 *
2619 * LOCKING:
2620 * Inherited from caller.
2621 *
2622 * RETURNS:
2623 * 0 on success, negative errno on failure
2624 */
936fd732 2625int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2626{
81952c54
TH
2627 u32 sstatus, spd, mask;
2628 int rc, highbit;
1c3fae4d 2629
936fd732 2630 if (!sata_scr_valid(link))
008a7896
TH
2631 return -EOPNOTSUPP;
2632
2633 /* If SCR can be read, use it to determine the current SPD.
936fd732 2634 * If not, use cached value in link->sata_spd.
008a7896 2635 */
936fd732 2636 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2637 if (rc == 0)
2638 spd = (sstatus >> 4) & 0xf;
2639 else
936fd732 2640 spd = link->sata_spd;
1c3fae4d 2641
936fd732 2642 mask = link->sata_spd_limit;
1c3fae4d
TH
2643 if (mask <= 1)
2644 return -EINVAL;
008a7896
TH
2645
2646 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2647 highbit = fls(mask) - 1;
2648 mask &= ~(1 << highbit);
2649
008a7896
TH
2650 /* Mask off all speeds higher than or equal to the current
2651 * one. Force 1.5Gbps if current SPD is not available.
2652 */
2653 if (spd > 1)
2654 mask &= (1 << (spd - 1)) - 1;
2655 else
2656 mask &= 1;
2657
2658 /* were we already at the bottom? */
1c3fae4d
TH
2659 if (!mask)
2660 return -EINVAL;
2661
936fd732 2662 link->sata_spd_limit = mask;
1c3fae4d 2663
936fd732 2664 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2665 sata_spd_string(fls(mask)));
1c3fae4d
TH
2666
2667 return 0;
2668}
2669
936fd732 2670static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2671{
5270222f
TH
2672 struct ata_link *host_link = &link->ap->link;
2673 u32 limit, target, spd;
1c3fae4d 2674
5270222f
TH
2675 limit = link->sata_spd_limit;
2676
2677 /* Don't configure downstream link faster than upstream link.
2678 * It doesn't speed up anything and some PMPs choke on such
2679 * configuration.
2680 */
2681 if (!ata_is_host_link(link) && host_link->sata_spd)
2682 limit &= (1 << host_link->sata_spd) - 1;
2683
2684 if (limit == UINT_MAX)
2685 target = 0;
1c3fae4d 2686 else
5270222f 2687 target = fls(limit);
1c3fae4d
TH
2688
2689 spd = (*scontrol >> 4) & 0xf;
5270222f 2690 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2691
5270222f 2692 return spd != target;
1c3fae4d
TH
2693}
2694
2695/**
3c567b7d 2696 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2697 * @link: Link in question
1c3fae4d
TH
2698 *
2699 * Test whether the spd limit in SControl matches
936fd732 2700 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2701 * whether hardreset is necessary to apply SATA spd
2702 * configuration.
2703 *
2704 * LOCKING:
2705 * Inherited from caller.
2706 *
2707 * RETURNS:
2708 * 1 if SATA spd configuration is needed, 0 otherwise.
2709 */
936fd732 2710int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2711{
2712 u32 scontrol;
2713
936fd732 2714 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2715 return 1;
1c3fae4d 2716
936fd732 2717 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2718}
2719
2720/**
3c567b7d 2721 * sata_set_spd - set SATA spd according to spd limit
936fd732 2722 * @link: Link to set SATA spd for
1c3fae4d 2723 *
936fd732 2724 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2725 *
2726 * LOCKING:
2727 * Inherited from caller.
2728 *
2729 * RETURNS:
2730 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2731 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2732 */
936fd732 2733int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2734{
2735 u32 scontrol;
81952c54 2736 int rc;
1c3fae4d 2737
936fd732 2738 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2739 return rc;
1c3fae4d 2740
936fd732 2741 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2742 return 0;
2743
936fd732 2744 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2745 return rc;
2746
1c3fae4d
TH
2747 return 1;
2748}
2749
452503f9
AC
2750/*
2751 * This mode timing computation functionality is ported over from
2752 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2753 */
2754/*
b352e57d 2755 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2756 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2757 * for UDMA6, which is currently supported only by Maxtor drives.
2758 *
2759 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2760 */
2761
2762static const struct ata_timing ata_timing[] = {
70cd071e
TH
2763/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2764 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2765 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2766 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2767 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2768 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2769 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2770 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
452503f9 2771
70cd071e
TH
2772 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2773 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2774 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
452503f9 2775
70cd071e
TH
2776 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2777 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2778 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
b352e57d 2779 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
70cd071e 2780 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
452503f9
AC
2781
2782/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
70cd071e
TH
2783 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2784 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2785 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2786 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2787 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2788 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2789 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
2790
2791 { 0xFF }
2792};
2793
2dcb407e
JG
2794#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2795#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2796
2797static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2798{
2799 q->setup = EZ(t->setup * 1000, T);
2800 q->act8b = EZ(t->act8b * 1000, T);
2801 q->rec8b = EZ(t->rec8b * 1000, T);
2802 q->cyc8b = EZ(t->cyc8b * 1000, T);
2803 q->active = EZ(t->active * 1000, T);
2804 q->recover = EZ(t->recover * 1000, T);
2805 q->cycle = EZ(t->cycle * 1000, T);
2806 q->udma = EZ(t->udma * 1000, UT);
2807}
2808
2809void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2810 struct ata_timing *m, unsigned int what)
2811{
2812 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2813 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2814 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2815 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2816 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2817 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2818 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2819 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2820}
2821
6357357c 2822const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 2823{
70cd071e
TH
2824 const struct ata_timing *t = ata_timing;
2825
2826 while (xfer_mode > t->mode)
2827 t++;
452503f9 2828
70cd071e
TH
2829 if (xfer_mode == t->mode)
2830 return t;
2831 return NULL;
452503f9
AC
2832}
2833
2834int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2835 struct ata_timing *t, int T, int UT)
2836{
2837 const struct ata_timing *s;
2838 struct ata_timing p;
2839
2840 /*
2e9edbf8 2841 * Find the mode.
75b1f2f8 2842 */
452503f9
AC
2843
2844 if (!(s = ata_timing_find_mode(speed)))
2845 return -EINVAL;
2846
75b1f2f8
AL
2847 memcpy(t, s, sizeof(*s));
2848
452503f9
AC
2849 /*
2850 * If the drive is an EIDE drive, it can tell us it needs extended
2851 * PIO/MW_DMA cycle timing.
2852 */
2853
2854 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2855 memset(&p, 0, sizeof(p));
2dcb407e 2856 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
2857 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2858 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 2859 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
2860 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2861 }
2862 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2863 }
2864
2865 /*
2866 * Convert the timing to bus clock counts.
2867 */
2868
75b1f2f8 2869 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2870
2871 /*
c893a3ae
RD
2872 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2873 * S.M.A.R.T * and some other commands. We have to ensure that the
2874 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2875 */
2876
fd3367af 2877 if (speed > XFER_PIO_6) {
452503f9
AC
2878 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2879 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2880 }
2881
2882 /*
c893a3ae 2883 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2884 */
2885
2886 if (t->act8b + t->rec8b < t->cyc8b) {
2887 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2888 t->rec8b = t->cyc8b - t->act8b;
2889 }
2890
2891 if (t->active + t->recover < t->cycle) {
2892 t->active += (t->cycle - (t->active + t->recover)) / 2;
2893 t->recover = t->cycle - t->active;
2894 }
a617c09f 2895
4f701d1e
AC
2896 /* In a few cases quantisation may produce enough errors to
2897 leave t->cycle too low for the sum of active and recovery
2898 if so we must correct this */
2899 if (t->active + t->recover > t->cycle)
2900 t->cycle = t->active + t->recover;
452503f9
AC
2901
2902 return 0;
2903}
2904
cf176e1a
TH
2905/**
2906 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2907 * @dev: Device to adjust xfer masks
458337db 2908 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2909 *
2910 * Adjust xfer masks of @dev downward. Note that this function
2911 * does not apply the change. Invoking ata_set_mode() afterwards
2912 * will apply the limit.
2913 *
2914 * LOCKING:
2915 * Inherited from caller.
2916 *
2917 * RETURNS:
2918 * 0 on success, negative errno on failure
2919 */
458337db 2920int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2921{
458337db 2922 char buf[32];
7dc951ae
TH
2923 unsigned long orig_mask, xfer_mask;
2924 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 2925 int quiet, highbit;
cf176e1a 2926
458337db
TH
2927 quiet = !!(sel & ATA_DNXFER_QUIET);
2928 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2929
458337db
TH
2930 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2931 dev->mwdma_mask,
2932 dev->udma_mask);
2933 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2934
458337db
TH
2935 switch (sel) {
2936 case ATA_DNXFER_PIO:
2937 highbit = fls(pio_mask) - 1;
2938 pio_mask &= ~(1 << highbit);
2939 break;
2940
2941 case ATA_DNXFER_DMA:
2942 if (udma_mask) {
2943 highbit = fls(udma_mask) - 1;
2944 udma_mask &= ~(1 << highbit);
2945 if (!udma_mask)
2946 return -ENOENT;
2947 } else if (mwdma_mask) {
2948 highbit = fls(mwdma_mask) - 1;
2949 mwdma_mask &= ~(1 << highbit);
2950 if (!mwdma_mask)
2951 return -ENOENT;
2952 }
2953 break;
2954
2955 case ATA_DNXFER_40C:
2956 udma_mask &= ATA_UDMA_MASK_40C;
2957 break;
2958
2959 case ATA_DNXFER_FORCE_PIO0:
2960 pio_mask &= 1;
2961 case ATA_DNXFER_FORCE_PIO:
2962 mwdma_mask = 0;
2963 udma_mask = 0;
2964 break;
2965
458337db
TH
2966 default:
2967 BUG();
2968 }
2969
2970 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2971
2972 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2973 return -ENOENT;
2974
2975 if (!quiet) {
2976 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2977 snprintf(buf, sizeof(buf), "%s:%s",
2978 ata_mode_string(xfer_mask),
2979 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2980 else
2981 snprintf(buf, sizeof(buf), "%s",
2982 ata_mode_string(xfer_mask));
2983
2984 ata_dev_printk(dev, KERN_WARNING,
2985 "limiting speed to %s\n", buf);
2986 }
cf176e1a
TH
2987
2988 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2989 &dev->udma_mask);
2990
cf176e1a 2991 return 0;
cf176e1a
TH
2992}
2993
3373efd8 2994static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2995{
9af5c9c9 2996 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2997 unsigned int err_mask;
2998 int rc;
1da177e4 2999
e8384607 3000 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3001 if (dev->xfer_shift == ATA_SHIFT_PIO)
3002 dev->flags |= ATA_DFLAG_PIO;
3003
3373efd8 3004 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3005
11750a40
A
3006 /* Old CFA may refuse this command, which is just fine */
3007 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2dcb407e
JG
3008 err_mask &= ~AC_ERR_DEV;
3009
0bc2a79a
AC
3010 /* Some very old devices and some bad newer ones fail any kind of
3011 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3012 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3013 dev->pio_mode <= XFER_PIO_2)
3014 err_mask &= ~AC_ERR_DEV;
2dcb407e 3015
3acaf94b
AC
3016 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3017 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3018 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3019 dev->dma_mode == XFER_MW_DMA_0 &&
3020 (dev->id[63] >> 8) & 1)
3021 err_mask &= ~AC_ERR_DEV;
3022
83206a29 3023 if (err_mask) {
f15a1daf
TH
3024 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3025 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
3026 return -EIO;
3027 }
1da177e4 3028
baa1e78a 3029 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 3030 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 3031 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 3032 if (rc)
83206a29 3033 return rc;
48a8a14f 3034
23e71c3d
TH
3035 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3036 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3037
f15a1daf
TH
3038 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
3039 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 3040 return 0;
1da177e4
LT
3041}
3042
1da177e4 3043/**
04351821 3044 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3045 * @link: link on which timings will be programmed
e82cbdb9 3046 * @r_failed_dev: out paramter for failed device
1da177e4 3047 *
04351821
A
3048 * Standard implementation of the function used to tune and set
3049 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3050 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3051 * returned in @r_failed_dev.
780a87f7 3052 *
1da177e4 3053 * LOCKING:
0cba632b 3054 * PCI/etc. bus probe sem.
e82cbdb9
TH
3055 *
3056 * RETURNS:
3057 * 0 on success, negative errno otherwise
1da177e4 3058 */
04351821 3059
0260731f 3060int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3061{
0260731f 3062 struct ata_port *ap = link->ap;
e8e0619f 3063 struct ata_device *dev;
f58229f8 3064 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3065
a6d5a51c 3066 /* step 1: calculate xfer_mask */
f58229f8 3067 ata_link_for_each_dev(dev, link) {
7dc951ae 3068 unsigned long pio_mask, dma_mask;
b3a70601 3069 unsigned int mode_mask;
a6d5a51c 3070
e1211e3f 3071 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3072 continue;
3073
b3a70601
AC
3074 mode_mask = ATA_DMA_MASK_ATA;
3075 if (dev->class == ATA_DEV_ATAPI)
3076 mode_mask = ATA_DMA_MASK_ATAPI;
3077 else if (ata_id_is_cfa(dev->id))
3078 mode_mask = ATA_DMA_MASK_CFA;
3079
3373efd8 3080 ata_dev_xfermask(dev);
1da177e4 3081
acf356b1
TH
3082 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3083 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3084
3085 if (libata_dma_mask & mode_mask)
3086 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3087 else
3088 dma_mask = 0;
3089
acf356b1
TH
3090 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3091 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3092
4f65977d 3093 found = 1;
70cd071e 3094 if (dev->dma_mode != 0xff)
5444a6f4 3095 used_dma = 1;
a6d5a51c 3096 }
4f65977d 3097 if (!found)
e82cbdb9 3098 goto out;
a6d5a51c
TH
3099
3100 /* step 2: always set host PIO timings */
f58229f8 3101 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3102 if (!ata_dev_enabled(dev))
3103 continue;
3104
70cd071e 3105 if (dev->pio_mode == 0xff) {
f15a1daf 3106 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3107 rc = -EINVAL;
e82cbdb9 3108 goto out;
e8e0619f
TH
3109 }
3110
3111 dev->xfer_mode = dev->pio_mode;
3112 dev->xfer_shift = ATA_SHIFT_PIO;
3113 if (ap->ops->set_piomode)
3114 ap->ops->set_piomode(ap, dev);
3115 }
1da177e4 3116
a6d5a51c 3117 /* step 3: set host DMA timings */
f58229f8 3118 ata_link_for_each_dev(dev, link) {
70cd071e 3119 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
e8e0619f
TH
3120 continue;
3121
3122 dev->xfer_mode = dev->dma_mode;
3123 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3124 if (ap->ops->set_dmamode)
3125 ap->ops->set_dmamode(ap, dev);
3126 }
1da177e4
LT
3127
3128 /* step 4: update devices' xfer mode */
f58229f8 3129 ata_link_for_each_dev(dev, link) {
18d90deb 3130 /* don't update suspended devices' xfer mode */
9666f400 3131 if (!ata_dev_enabled(dev))
83206a29
TH
3132 continue;
3133
3373efd8 3134 rc = ata_dev_set_mode(dev);
5bbc53f4 3135 if (rc)
e82cbdb9 3136 goto out;
83206a29 3137 }
1da177e4 3138
e8e0619f
TH
3139 /* Record simplex status. If we selected DMA then the other
3140 * host channels are not permitted to do so.
5444a6f4 3141 */
cca3974e 3142 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3143 ap->host->simplex_claimed = ap;
5444a6f4 3144
e82cbdb9
TH
3145 out:
3146 if (rc)
3147 *r_failed_dev = dev;
3148 return rc;
1da177e4
LT
3149}
3150
1fdffbce
JG
3151/**
3152 * ata_tf_to_host - issue ATA taskfile to host controller
3153 * @ap: port to which command is being issued
3154 * @tf: ATA taskfile register set
3155 *
3156 * Issues ATA taskfile register set to ATA host controller,
3157 * with proper synchronization with interrupt handler and
3158 * other threads.
3159 *
3160 * LOCKING:
cca3974e 3161 * spin_lock_irqsave(host lock)
1fdffbce
JG
3162 */
3163
3164static inline void ata_tf_to_host(struct ata_port *ap,
3165 const struct ata_taskfile *tf)
3166{
3167 ap->ops->tf_load(ap, tf);
3168 ap->ops->exec_command(ap, tf);
3169}
3170
1da177e4
LT
3171/**
3172 * ata_busy_sleep - sleep until BSY clears, or timeout
3173 * @ap: port containing status register to be polled
3174 * @tmout_pat: impatience timeout
3175 * @tmout: overall timeout
3176 *
780a87f7
JG
3177 * Sleep until ATA Status register bit BSY clears,
3178 * or a timeout occurs.
3179 *
d1adc1bb
TH
3180 * LOCKING:
3181 * Kernel thread context (may sleep).
3182 *
3183 * RETURNS:
3184 * 0 on success, -errno otherwise.
1da177e4 3185 */
d1adc1bb
TH
3186int ata_busy_sleep(struct ata_port *ap,
3187 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3188{
3189 unsigned long timer_start, timeout;
3190 u8 status;
3191
3192 status = ata_busy_wait(ap, ATA_BUSY, 300);
3193 timer_start = jiffies;
3194 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3195 while (status != 0xff && (status & ATA_BUSY) &&
3196 time_before(jiffies, timeout)) {
1da177e4
LT
3197 msleep(50);
3198 status = ata_busy_wait(ap, ATA_BUSY, 3);
3199 }
3200
d1adc1bb 3201 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3202 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3203 "port is slow to respond, please be patient "
3204 "(Status 0x%x)\n", status);
1da177e4
LT
3205
3206 timeout = timer_start + tmout;
d1adc1bb
TH
3207 while (status != 0xff && (status & ATA_BUSY) &&
3208 time_before(jiffies, timeout)) {
1da177e4
LT
3209 msleep(50);
3210 status = ata_chk_status(ap);
3211 }
3212
d1adc1bb
TH
3213 if (status == 0xff)
3214 return -ENODEV;
3215
1da177e4 3216 if (status & ATA_BUSY) {
f15a1daf 3217 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3218 "(%lu secs, Status 0x%x)\n",
3219 tmout / HZ, status);
d1adc1bb 3220 return -EBUSY;
1da177e4
LT
3221 }
3222
3223 return 0;
3224}
3225
88ff6eaf
TH
3226/**
3227 * ata_wait_after_reset - wait before checking status after reset
3228 * @ap: port containing status register to be polled
3229 * @deadline: deadline jiffies for the operation
3230 *
3231 * After reset, we need to pause a while before reading status.
3232 * Also, certain combination of controller and device report 0xff
3233 * for some duration (e.g. until SATA PHY is up and running)
3234 * which is interpreted as empty port in ATA world. This
3235 * function also waits for such devices to get out of 0xff
3236 * status.
3237 *
3238 * LOCKING:
3239 * Kernel thread context (may sleep).
3240 */
3241void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3242{
3243 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3244
3245 if (time_before(until, deadline))
3246 deadline = until;
3247
3248 /* Spec mandates ">= 2ms" before checking status. We wait
3249 * 150ms, because that was the magic delay used for ATAPI
3250 * devices in Hale Landis's ATADRVR, for the period of time
3251 * between when the ATA command register is written, and then
3252 * status is checked. Because waiting for "a while" before
3253 * checking status is fine, post SRST, we perform this magic
3254 * delay here as well.
3255 *
3256 * Old drivers/ide uses the 2mS rule and then waits for ready.
3257 */
3258 msleep(150);
3259
3260 /* Wait for 0xff to clear. Some SATA devices take a long time
3261 * to clear 0xff after reset. For example, HHD424020F7SV00
3262 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3263 * than that.
1974e201
TH
3264 *
3265 * Note that some PATA controllers (pata_ali) explode if
3266 * status register is read more than once when there's no
3267 * device attached.
88ff6eaf 3268 */
1974e201
TH
3269 if (ap->flags & ATA_FLAG_SATA) {
3270 while (1) {
3271 u8 status = ata_chk_status(ap);
88ff6eaf 3272
1974e201
TH
3273 if (status != 0xff || time_after(jiffies, deadline))
3274 return;
88ff6eaf 3275
1974e201
TH
3276 msleep(50);
3277 }
88ff6eaf
TH
3278 }
3279}
3280
d4b2bab4
TH
3281/**
3282 * ata_wait_ready - sleep until BSY clears, or timeout
3283 * @ap: port containing status register to be polled
3284 * @deadline: deadline jiffies for the operation
3285 *
3286 * Sleep until ATA Status register bit BSY clears, or timeout
3287 * occurs.
3288 *
3289 * LOCKING:
3290 * Kernel thread context (may sleep).
3291 *
3292 * RETURNS:
3293 * 0 on success, -errno otherwise.
3294 */
3295int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3296{
3297 unsigned long start = jiffies;
3298 int warned = 0;
3299
3300 while (1) {
3301 u8 status = ata_chk_status(ap);
3302 unsigned long now = jiffies;
3303
3304 if (!(status & ATA_BUSY))
3305 return 0;
936fd732 3306 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3307 return -ENODEV;
3308 if (time_after(now, deadline))
3309 return -EBUSY;
3310
3311 if (!warned && time_after(now, start + 5 * HZ) &&
3312 (deadline - now > 3 * HZ)) {
3313 ata_port_printk(ap, KERN_WARNING,
3314 "port is slow to respond, please be patient "
3315 "(Status 0x%x)\n", status);
3316 warned = 1;
3317 }
3318
3319 msleep(50);
3320 }
3321}
3322
3323static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3324 unsigned long deadline)
1da177e4
LT
3325{
3326 struct ata_ioports *ioaddr = &ap->ioaddr;
3327 unsigned int dev0 = devmask & (1 << 0);
3328 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3329 int rc, ret = 0;
1da177e4
LT
3330
3331 /* if device 0 was found in ata_devchk, wait for its
3332 * BSY bit to clear
3333 */
d4b2bab4
TH
3334 if (dev0) {
3335 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3336 if (rc) {
3337 if (rc != -ENODEV)
3338 return rc;
3339 ret = rc;
3340 }
d4b2bab4 3341 }
1da177e4 3342
e141d999
TH
3343 /* if device 1 was found in ata_devchk, wait for register
3344 * access briefly, then wait for BSY to clear.
1da177e4 3345 */
e141d999
TH
3346 if (dev1) {
3347 int i;
1da177e4
LT
3348
3349 ap->ops->dev_select(ap, 1);
e141d999
TH
3350
3351 /* Wait for register access. Some ATAPI devices fail
3352 * to set nsect/lbal after reset, so don't waste too
3353 * much time on it. We're gonna wait for !BSY anyway.
3354 */
3355 for (i = 0; i < 2; i++) {
3356 u8 nsect, lbal;
3357
3358 nsect = ioread8(ioaddr->nsect_addr);
3359 lbal = ioread8(ioaddr->lbal_addr);
3360 if ((nsect == 1) && (lbal == 1))
3361 break;
3362 msleep(50); /* give drive a breather */
3363 }
3364
d4b2bab4 3365 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3366 if (rc) {
3367 if (rc != -ENODEV)
3368 return rc;
3369 ret = rc;
3370 }
d4b2bab4 3371 }
1da177e4
LT
3372
3373 /* is all this really necessary? */
3374 ap->ops->dev_select(ap, 0);
3375 if (dev1)
3376 ap->ops->dev_select(ap, 1);
3377 if (dev0)
3378 ap->ops->dev_select(ap, 0);
d4b2bab4 3379
9b89391c 3380 return ret;
1da177e4
LT
3381}
3382
d4b2bab4
TH
3383static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3384 unsigned long deadline)
1da177e4
LT
3385{
3386 struct ata_ioports *ioaddr = &ap->ioaddr;
3387
44877b4e 3388 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3389
3390 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3391 iowrite8(ap->ctl, ioaddr->ctl_addr);
3392 udelay(20); /* FIXME: flush */
3393 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3394 udelay(20); /* FIXME: flush */
3395 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3396
88ff6eaf
TH
3397 /* wait a while before checking status */
3398 ata_wait_after_reset(ap, deadline);
1da177e4 3399
2e9edbf8 3400 /* Before we perform post reset processing we want to see if
298a41ca
TH
3401 * the bus shows 0xFF because the odd clown forgets the D7
3402 * pulldown resistor.
3403 */
150981b0 3404 if (ata_chk_status(ap) == 0xFF)
9b89391c 3405 return -ENODEV;
09c7ad79 3406
d4b2bab4 3407 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3408}
3409
3410/**
3411 * ata_bus_reset - reset host port and associated ATA channel
3412 * @ap: port to reset
3413 *
3414 * This is typically the first time we actually start issuing
3415 * commands to the ATA channel. We wait for BSY to clear, then
3416 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3417 * result. Determine what devices, if any, are on the channel
3418 * by looking at the device 0/1 error register. Look at the signature
3419 * stored in each device's taskfile registers, to determine if
3420 * the device is ATA or ATAPI.
3421 *
3422 * LOCKING:
0cba632b 3423 * PCI/etc. bus probe sem.
cca3974e 3424 * Obtains host lock.
1da177e4
LT
3425 *
3426 * SIDE EFFECTS:
198e0fed 3427 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3428 */
3429
3430void ata_bus_reset(struct ata_port *ap)
3431{
9af5c9c9 3432 struct ata_device *device = ap->link.device;
1da177e4
LT
3433 struct ata_ioports *ioaddr = &ap->ioaddr;
3434 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3435 u8 err;
aec5c3c1 3436 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3437 int rc;
1da177e4 3438
44877b4e 3439 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3440
3441 /* determine if device 0/1 are present */
3442 if (ap->flags & ATA_FLAG_SATA_RESET)
3443 dev0 = 1;
3444 else {
3445 dev0 = ata_devchk(ap, 0);
3446 if (slave_possible)
3447 dev1 = ata_devchk(ap, 1);
3448 }
3449
3450 if (dev0)
3451 devmask |= (1 << 0);
3452 if (dev1)
3453 devmask |= (1 << 1);
3454
3455 /* select device 0 again */
3456 ap->ops->dev_select(ap, 0);
3457
3458 /* issue bus reset */
9b89391c
TH
3459 if (ap->flags & ATA_FLAG_SRST) {
3460 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3461 if (rc && rc != -ENODEV)
aec5c3c1 3462 goto err_out;
9b89391c 3463 }
1da177e4
LT
3464
3465 /*
3466 * determine by signature whether we have ATA or ATAPI devices
3467 */
3f19859e 3468 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3469 if ((slave_possible) && (err != 0x81))
3f19859e 3470 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3471
1da177e4 3472 /* is double-select really necessary? */
9af5c9c9 3473 if (device[1].class != ATA_DEV_NONE)
1da177e4 3474 ap->ops->dev_select(ap, 1);
9af5c9c9 3475 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3476 ap->ops->dev_select(ap, 0);
3477
3478 /* if no devices were detected, disable this port */
9af5c9c9
TH
3479 if ((device[0].class == ATA_DEV_NONE) &&
3480 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3481 goto err_out;
3482
3483 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3484 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3485 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3486 }
3487
3488 DPRINTK("EXIT\n");
3489 return;
3490
3491err_out:
f15a1daf 3492 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3493 ata_port_disable(ap);
1da177e4
LT
3494
3495 DPRINTK("EXIT\n");
3496}
3497
d7bb4cc7 3498/**
936fd732
TH
3499 * sata_link_debounce - debounce SATA phy status
3500 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3501 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3502 * @deadline: deadline jiffies for the operation
d7bb4cc7 3503 *
936fd732 3504* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3505 * holding the same value where DET is not 1 for @duration polled
3506 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3507 * beginning of the stable state. Because DET gets stuck at 1 on
3508 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3509 * until timeout then returns 0 if DET is stable at 1.
3510 *
d4b2bab4
TH
3511 * @timeout is further limited by @deadline. The sooner of the
3512 * two is used.
3513 *
d7bb4cc7
TH
3514 * LOCKING:
3515 * Kernel thread context (may sleep)
3516 *
3517 * RETURNS:
3518 * 0 on success, -errno on failure.
3519 */
936fd732
TH
3520int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3521 unsigned long deadline)
7a7921e8 3522{
d7bb4cc7 3523 unsigned long interval_msec = params[0];
d4b2bab4
TH
3524 unsigned long duration = msecs_to_jiffies(params[1]);
3525 unsigned long last_jiffies, t;
d7bb4cc7
TH
3526 u32 last, cur;
3527 int rc;
3528
d4b2bab4
TH
3529 t = jiffies + msecs_to_jiffies(params[2]);
3530 if (time_before(t, deadline))
3531 deadline = t;
3532
936fd732 3533 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3534 return rc;
3535 cur &= 0xf;
3536
3537 last = cur;
3538 last_jiffies = jiffies;
3539
3540 while (1) {
3541 msleep(interval_msec);
936fd732 3542 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3543 return rc;
3544 cur &= 0xf;
3545
3546 /* DET stable? */
3547 if (cur == last) {
d4b2bab4 3548 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3549 continue;
3550 if (time_after(jiffies, last_jiffies + duration))
3551 return 0;
3552 continue;
3553 }
3554
3555 /* unstable, start over */
3556 last = cur;
3557 last_jiffies = jiffies;
3558
f1545154
TH
3559 /* Check deadline. If debouncing failed, return
3560 * -EPIPE to tell upper layer to lower link speed.
3561 */
d4b2bab4 3562 if (time_after(jiffies, deadline))
f1545154 3563 return -EPIPE;
d7bb4cc7
TH
3564 }
3565}
3566
3567/**
936fd732
TH
3568 * sata_link_resume - resume SATA link
3569 * @link: ATA link to resume SATA
d7bb4cc7 3570 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3571 * @deadline: deadline jiffies for the operation
d7bb4cc7 3572 *
936fd732 3573 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3574 *
3575 * LOCKING:
3576 * Kernel thread context (may sleep)
3577 *
3578 * RETURNS:
3579 * 0 on success, -errno on failure.
3580 */
936fd732
TH
3581int sata_link_resume(struct ata_link *link, const unsigned long *params,
3582 unsigned long deadline)
d7bb4cc7
TH
3583{
3584 u32 scontrol;
81952c54
TH
3585 int rc;
3586
936fd732 3587 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3588 return rc;
7a7921e8 3589
852ee16a 3590 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3591
936fd732 3592 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3593 return rc;
7a7921e8 3594
d7bb4cc7
TH
3595 /* Some PHYs react badly if SStatus is pounded immediately
3596 * after resuming. Delay 200ms before debouncing.
3597 */
3598 msleep(200);
7a7921e8 3599
936fd732 3600 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3601}
3602
f5914a46
TH
3603/**
3604 * ata_std_prereset - prepare for reset
cc0680a5 3605 * @link: ATA link to be reset
d4b2bab4 3606 * @deadline: deadline jiffies for the operation
f5914a46 3607 *
cc0680a5 3608 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3609 * prereset makes libata abort whole reset sequence and give up
3610 * that port, so prereset should be best-effort. It does its
3611 * best to prepare for reset sequence but if things go wrong, it
3612 * should just whine, not fail.
f5914a46
TH
3613 *
3614 * LOCKING:
3615 * Kernel thread context (may sleep)
3616 *
3617 * RETURNS:
3618 * 0 on success, -errno otherwise.
3619 */
cc0680a5 3620int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3621{
cc0680a5 3622 struct ata_port *ap = link->ap;
936fd732 3623 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3624 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3625 int rc;
3626
31daabda 3627 /* handle link resume */
28324304 3628 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3629 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3630 ehc->i.action |= ATA_EH_HARDRESET;
3631
633273a3
TH
3632 /* Some PMPs don't work with only SRST, force hardreset if PMP
3633 * is supported.
3634 */
3635 if (ap->flags & ATA_FLAG_PMP)
3636 ehc->i.action |= ATA_EH_HARDRESET;
3637
f5914a46
TH
3638 /* if we're about to do hardreset, nothing more to do */
3639 if (ehc->i.action & ATA_EH_HARDRESET)
3640 return 0;
3641
936fd732 3642 /* if SATA, resume link */
a16abc0b 3643 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3644 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3645 /* whine about phy resume failure but proceed */
3646 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3647 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3648 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3649 }
3650
3651 /* Wait for !BSY if the controller can wait for the first D2H
3652 * Reg FIS and we don't know that no device is attached.
3653 */
0c88758b 3654 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3655 rc = ata_wait_ready(ap, deadline);
6dffaf61 3656 if (rc && rc != -ENODEV) {
cc0680a5 3657 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3658 "(errno=%d), forcing hardreset\n", rc);
3659 ehc->i.action |= ATA_EH_HARDRESET;
3660 }
3661 }
f5914a46
TH
3662
3663 return 0;
3664}
3665
c2bd5804
TH
3666/**
3667 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3668 * @link: ATA link to reset
c2bd5804 3669 * @classes: resulting classes of attached devices
d4b2bab4 3670 * @deadline: deadline jiffies for the operation
c2bd5804 3671 *
52783c5d 3672 * Reset host port using ATA SRST.
c2bd5804
TH
3673 *
3674 * LOCKING:
3675 * Kernel thread context (may sleep)
3676 *
3677 * RETURNS:
3678 * 0 on success, -errno otherwise.
3679 */
cc0680a5 3680int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3681 unsigned long deadline)
c2bd5804 3682{
cc0680a5 3683 struct ata_port *ap = link->ap;
c2bd5804 3684 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3685 unsigned int devmask = 0;
3686 int rc;
c2bd5804
TH
3687 u8 err;
3688
3689 DPRINTK("ENTER\n");
3690
936fd732 3691 if (ata_link_offline(link)) {
3a39746a
TH
3692 classes[0] = ATA_DEV_NONE;
3693 goto out;
3694 }
3695
c2bd5804
TH
3696 /* determine if device 0/1 are present */
3697 if (ata_devchk(ap, 0))
3698 devmask |= (1 << 0);
3699 if (slave_possible && ata_devchk(ap, 1))
3700 devmask |= (1 << 1);
3701
c2bd5804
TH
3702 /* select device 0 again */
3703 ap->ops->dev_select(ap, 0);
3704
3705 /* issue bus reset */
3706 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3707 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3708 /* if link is occupied, -ENODEV too is an error */
936fd732 3709 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3710 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3711 return rc;
c2bd5804
TH
3712 }
3713
3714 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3715 classes[0] = ata_dev_try_classify(&link->device[0],
3716 devmask & (1 << 0), &err);
c2bd5804 3717 if (slave_possible && err != 0x81)
3f19859e
TH
3718 classes[1] = ata_dev_try_classify(&link->device[1],
3719 devmask & (1 << 1), &err);
c2bd5804 3720
3a39746a 3721 out:
c2bd5804
TH
3722 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3723 return 0;
3724}
3725
3726/**
cc0680a5
TH
3727 * sata_link_hardreset - reset link via SATA phy reset
3728 * @link: link to reset
b6103f6d 3729 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3730 * @deadline: deadline jiffies for the operation
c2bd5804 3731 *
cc0680a5 3732 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3733 *
3734 * LOCKING:
3735 * Kernel thread context (may sleep)
3736 *
3737 * RETURNS:
3738 * 0 on success, -errno otherwise.
3739 */
cc0680a5 3740int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3741 unsigned long deadline)
c2bd5804 3742{
852ee16a 3743 u32 scontrol;
81952c54 3744 int rc;
852ee16a 3745
c2bd5804
TH
3746 DPRINTK("ENTER\n");
3747
936fd732 3748 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3749 /* SATA spec says nothing about how to reconfigure
3750 * spd. To be on the safe side, turn off phy during
3751 * reconfiguration. This works for at least ICH7 AHCI
3752 * and Sil3124.
3753 */
936fd732 3754 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3755 goto out;
81952c54 3756
a34b6fc0 3757 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3758
936fd732 3759 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3760 goto out;
1c3fae4d 3761
936fd732 3762 sata_set_spd(link);
1c3fae4d
TH
3763 }
3764
3765 /* issue phy wake/reset */
936fd732 3766 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3767 goto out;
81952c54 3768
852ee16a 3769 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3770
936fd732 3771 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3772 goto out;
c2bd5804 3773
1c3fae4d 3774 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3775 * 10.4.2 says at least 1 ms.
3776 */
3777 msleep(1);
3778
936fd732
TH
3779 /* bring link back */
3780 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3781 out:
3782 DPRINTK("EXIT, rc=%d\n", rc);
3783 return rc;
3784}
3785
3786/**
3787 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3788 * @link: link to reset
b6103f6d 3789 * @class: resulting class of attached device
d4b2bab4 3790 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3791 *
3792 * SATA phy-reset host port using DET bits of SControl register,
3793 * wait for !BSY and classify the attached device.
3794 *
3795 * LOCKING:
3796 * Kernel thread context (may sleep)
3797 *
3798 * RETURNS:
3799 * 0 on success, -errno otherwise.
3800 */
cc0680a5 3801int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3802 unsigned long deadline)
b6103f6d 3803{
cc0680a5 3804 struct ata_port *ap = link->ap;
936fd732 3805 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3806 int rc;
3807
3808 DPRINTK("ENTER\n");
3809
3810 /* do hardreset */
cc0680a5 3811 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3812 if (rc) {
cc0680a5 3813 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3814 "COMRESET failed (errno=%d)\n", rc);
3815 return rc;
3816 }
c2bd5804 3817
c2bd5804 3818 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3819 if (ata_link_offline(link)) {
c2bd5804
TH
3820 *class = ATA_DEV_NONE;
3821 DPRINTK("EXIT, link offline\n");
3822 return 0;
3823 }
3824
88ff6eaf
TH
3825 /* wait a while before checking status */
3826 ata_wait_after_reset(ap, deadline);
34fee227 3827
633273a3
TH
3828 /* If PMP is supported, we have to do follow-up SRST. Note
3829 * that some PMPs don't send D2H Reg FIS after hardreset at
3830 * all if the first port is empty. Wait for it just for a
3831 * second and request follow-up SRST.
3832 */
3833 if (ap->flags & ATA_FLAG_PMP) {
3834 ata_wait_ready(ap, jiffies + HZ);
3835 return -EAGAIN;
3836 }
3837
d4b2bab4 3838 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3839 /* link occupied, -ENODEV too is an error */
3840 if (rc) {
cc0680a5 3841 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3842 "COMRESET failed (errno=%d)\n", rc);
3843 return rc;
c2bd5804
TH
3844 }
3845
3a39746a
TH
3846 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3847
3f19859e 3848 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3849
3850 DPRINTK("EXIT, class=%u\n", *class);
3851 return 0;
3852}
3853
3854/**
3855 * ata_std_postreset - standard postreset callback
cc0680a5 3856 * @link: the target ata_link
c2bd5804
TH
3857 * @classes: classes of attached devices
3858 *
3859 * This function is invoked after a successful reset. Note that
3860 * the device might have been reset more than once using
3861 * different reset methods before postreset is invoked.
c2bd5804 3862 *
c2bd5804
TH
3863 * LOCKING:
3864 * Kernel thread context (may sleep)
3865 */
cc0680a5 3866void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3867{
cc0680a5 3868 struct ata_port *ap = link->ap;
dc2b3515
TH
3869 u32 serror;
3870
c2bd5804
TH
3871 DPRINTK("ENTER\n");
3872
c2bd5804 3873 /* print link status */
936fd732 3874 sata_print_link_status(link);
c2bd5804 3875
dc2b3515 3876 /* clear SError */
936fd732
TH
3877 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3878 sata_scr_write(link, SCR_ERROR, serror);
f7fe7ad4 3879 link->eh_info.serror = 0;
dc2b3515 3880
c2bd5804
TH
3881 /* is double-select really necessary? */
3882 if (classes[0] != ATA_DEV_NONE)
3883 ap->ops->dev_select(ap, 1);
3884 if (classes[1] != ATA_DEV_NONE)
3885 ap->ops->dev_select(ap, 0);
3886
3a39746a
TH
3887 /* bail out if no device is present */
3888 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3889 DPRINTK("EXIT, no device\n");
3890 return;
3891 }
3892
3893 /* set up device control */
0d5ff566
TH
3894 if (ap->ioaddr.ctl_addr)
3895 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3896
3897 DPRINTK("EXIT\n");
3898}
3899
623a3128
TH
3900/**
3901 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3902 * @dev: device to compare against
3903 * @new_class: class of the new device
3904 * @new_id: IDENTIFY page of the new device
3905 *
3906 * Compare @new_class and @new_id against @dev and determine
3907 * whether @dev is the device indicated by @new_class and
3908 * @new_id.
3909 *
3910 * LOCKING:
3911 * None.
3912 *
3913 * RETURNS:
3914 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3915 */
3373efd8
TH
3916static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3917 const u16 *new_id)
623a3128
TH
3918{
3919 const u16 *old_id = dev->id;
a0cf733b
TH
3920 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3921 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3922
3923 if (dev->class != new_class) {
f15a1daf
TH
3924 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3925 dev->class, new_class);
623a3128
TH
3926 return 0;
3927 }
3928
a0cf733b
TH
3929 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3930 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3931 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3932 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3933
3934 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3935 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3936 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3937 return 0;
3938 }
3939
3940 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3941 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3942 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3943 return 0;
3944 }
3945
623a3128
TH
3946 return 1;
3947}
3948
3949/**
fe30911b 3950 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3951 * @dev: target ATA device
bff04647 3952 * @readid_flags: read ID flags
623a3128
TH
3953 *
3954 * Re-read IDENTIFY page and make sure @dev is still attached to
3955 * the port.
3956 *
3957 * LOCKING:
3958 * Kernel thread context (may sleep)
3959 *
3960 * RETURNS:
3961 * 0 on success, negative errno otherwise
3962 */
fe30911b 3963int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3964{
5eb45c02 3965 unsigned int class = dev->class;
9af5c9c9 3966 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3967 int rc;
3968
fe635c7e 3969 /* read ID data */
bff04647 3970 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3971 if (rc)
fe30911b 3972 return rc;
623a3128
TH
3973
3974 /* is the device still there? */
fe30911b
TH
3975 if (!ata_dev_same_device(dev, class, id))
3976 return -ENODEV;
623a3128 3977
fe635c7e 3978 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3979 return 0;
3980}
3981
3982/**
3983 * ata_dev_revalidate - Revalidate ATA device
3984 * @dev: device to revalidate
422c9daa 3985 * @new_class: new class code
fe30911b
TH
3986 * @readid_flags: read ID flags
3987 *
3988 * Re-read IDENTIFY page, make sure @dev is still attached to the
3989 * port and reconfigure it according to the new IDENTIFY page.
3990 *
3991 * LOCKING:
3992 * Kernel thread context (may sleep)
3993 *
3994 * RETURNS:
3995 * 0 on success, negative errno otherwise
3996 */
422c9daa
TH
3997int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3998 unsigned int readid_flags)
fe30911b 3999{
6ddcd3b0 4000 u64 n_sectors = dev->n_sectors;
fe30911b
TH
4001 int rc;
4002
4003 if (!ata_dev_enabled(dev))
4004 return -ENODEV;
4005
422c9daa
TH
4006 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4007 if (ata_class_enabled(new_class) &&
4008 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4009 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4010 dev->class, new_class);
4011 rc = -ENODEV;
4012 goto fail;
4013 }
4014
fe30911b
TH
4015 /* re-read ID */
4016 rc = ata_dev_reread_id(dev, readid_flags);
4017 if (rc)
4018 goto fail;
623a3128
TH
4019
4020 /* configure device according to the new ID */
efdaedc4 4021 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4022 if (rc)
4023 goto fail;
4024
4025 /* verify n_sectors hasn't changed */
b54eebd6
TH
4026 if (dev->class == ATA_DEV_ATA && n_sectors &&
4027 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
4028 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4029 "%llu != %llu\n",
4030 (unsigned long long)n_sectors,
4031 (unsigned long long)dev->n_sectors);
8270bec4
TH
4032
4033 /* restore original n_sectors */
4034 dev->n_sectors = n_sectors;
4035
6ddcd3b0
TH
4036 rc = -ENODEV;
4037 goto fail;
4038 }
4039
4040 return 0;
623a3128
TH
4041
4042 fail:
f15a1daf 4043 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4044 return rc;
4045}
4046
6919a0a6
AC
4047struct ata_blacklist_entry {
4048 const char *model_num;
4049 const char *model_rev;
4050 unsigned long horkage;
4051};
4052
4053static const struct ata_blacklist_entry ata_device_blacklist [] = {
4054 /* Devices with DMA related problems under Linux */
4055 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4056 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4057 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4058 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4059 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4060 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4061 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4062 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4063 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4064 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4065 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4066 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4067 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4068 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4069 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4070 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4071 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4072 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4073 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4074 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4075 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4076 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4077 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4078 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4079 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4080 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4081 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4082 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4083 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4084 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
4085 /* Odd clown on sil3726/4726 PMPs */
4086 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4087 ATA_HORKAGE_SKIP_PM },
6919a0a6 4088
18d6e9d5 4089 /* Weird ATAPI devices */
40a1d531 4090 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 4091
6919a0a6
AC
4092 /* Devices we expect to fail diagnostics */
4093
4094 /* Devices where NCQ should be avoided */
4095 /* NCQ is slow */
2dcb407e 4096 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 4097 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
4098 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4099 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4100 /* NCQ is broken */
539cc7c7 4101 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4102 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
0b0a43e0
DM
4103 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
4104 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
da6f0ec2 4105 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4106 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4107
36e337d0
RH
4108 /* Blacklist entries taken from Silicon Image 3124/3132
4109 Windows driver .inf file - also several Linux problem reports */
4110 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4111 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4112 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4113
16c55b03
TH
4114 /* devices which puke on READ_NATIVE_MAX */
4115 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4116 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4117 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4118 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4119
93328e11
AC
4120 /* Devices which report 1 sector over size HPA */
4121 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4122 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4123
6bbfd53d
AC
4124 /* Devices which get the IVB wrong */
4125 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4126 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
e9f33406
PM
4127 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4128 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4129 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 4130
6919a0a6
AC
4131 /* End Marker */
4132 { }
1da177e4 4133};
2e9edbf8 4134
741b7763 4135static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4136{
4137 const char *p;
4138 int len;
4139
4140 /*
4141 * check for trailing wildcard: *\0
4142 */
4143 p = strchr(patt, wildchar);
4144 if (p && ((*(p + 1)) == 0))
4145 len = p - patt;
317b50b8 4146 else {
539cc7c7 4147 len = strlen(name);
317b50b8
AP
4148 if (!len) {
4149 if (!*patt)
4150 return 0;
4151 return -1;
4152 }
4153 }
539cc7c7
JG
4154
4155 return strncmp(patt, name, len);
4156}
4157
75683fe7 4158static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4159{
8bfa79fc
TH
4160 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4161 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4162 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4163
8bfa79fc
TH
4164 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4165 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4166
6919a0a6 4167 while (ad->model_num) {
539cc7c7 4168 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4169 if (ad->model_rev == NULL)
4170 return ad->horkage;
539cc7c7 4171 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4172 return ad->horkage;
f4b15fef 4173 }
6919a0a6 4174 ad++;
f4b15fef 4175 }
1da177e4
LT
4176 return 0;
4177}
4178
6919a0a6
AC
4179static int ata_dma_blacklisted(const struct ata_device *dev)
4180{
4181 /* We don't support polling DMA.
4182 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4183 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4184 */
9af5c9c9 4185 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4186 (dev->flags & ATA_DFLAG_CDB_INTR))
4187 return 1;
75683fe7 4188 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4189}
4190
6bbfd53d
AC
4191/**
4192 * ata_is_40wire - check drive side detection
4193 * @dev: device
4194 *
4195 * Perform drive side detection decoding, allowing for device vendors
4196 * who can't follow the documentation.
4197 */
4198
4199static int ata_is_40wire(struct ata_device *dev)
4200{
4201 if (dev->horkage & ATA_HORKAGE_IVB)
4202 return ata_drive_40wire_relaxed(dev->id);
4203 return ata_drive_40wire(dev->id);
4204}
4205
a6d5a51c
TH
4206/**
4207 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4208 * @dev: Device to compute xfermask for
4209 *
acf356b1
TH
4210 * Compute supported xfermask of @dev and store it in
4211 * dev->*_mask. This function is responsible for applying all
4212 * known limits including host controller limits, device
4213 * blacklist, etc...
a6d5a51c
TH
4214 *
4215 * LOCKING:
4216 * None.
a6d5a51c 4217 */
3373efd8 4218static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4219{
9af5c9c9
TH
4220 struct ata_link *link = dev->link;
4221 struct ata_port *ap = link->ap;
cca3974e 4222 struct ata_host *host = ap->host;
a6d5a51c 4223 unsigned long xfer_mask;
1da177e4 4224
37deecb5 4225 /* controller modes available */
565083e1
TH
4226 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4227 ap->mwdma_mask, ap->udma_mask);
4228
8343f889 4229 /* drive modes available */
37deecb5
TH
4230 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4231 dev->mwdma_mask, dev->udma_mask);
4232 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4233
b352e57d
AC
4234 /*
4235 * CFA Advanced TrueIDE timings are not allowed on a shared
4236 * cable
4237 */
4238 if (ata_dev_pair(dev)) {
4239 /* No PIO5 or PIO6 */
4240 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4241 /* No MWDMA3 or MWDMA 4 */
4242 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4243 }
4244
37deecb5
TH
4245 if (ata_dma_blacklisted(dev)) {
4246 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4247 ata_dev_printk(dev, KERN_WARNING,
4248 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4249 }
a6d5a51c 4250
14d66ab7 4251 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4252 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4253 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4254 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4255 "other device, disabling DMA\n");
5444a6f4 4256 }
565083e1 4257
e424675f
JG
4258 if (ap->flags & ATA_FLAG_NO_IORDY)
4259 xfer_mask &= ata_pio_mask_no_iordy(dev);
4260
5444a6f4 4261 if (ap->ops->mode_filter)
a76b62ca 4262 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4263
8343f889
RH
4264 /* Apply cable rule here. Don't apply it early because when
4265 * we handle hot plug the cable type can itself change.
4266 * Check this last so that we know if the transfer rate was
4267 * solely limited by the cable.
4268 * Unknown or 80 wire cables reported host side are checked
4269 * drive side as well. Cases where we know a 40wire cable
4270 * is used safely for 80 are not checked here.
4271 */
4272 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4273 /* UDMA/44 or higher would be available */
2dcb407e 4274 if ((ap->cbl == ATA_CBL_PATA40) ||
6bbfd53d 4275 (ata_is_40wire(dev) &&
2dcb407e
JG
4276 (ap->cbl == ATA_CBL_PATA_UNK ||
4277 ap->cbl == ATA_CBL_PATA80))) {
4278 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4279 "limited to UDMA/33 due to 40-wire cable\n");
4280 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4281 }
4282
565083e1
TH
4283 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4284 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4285}
4286
1da177e4
LT
4287/**
4288 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4289 * @dev: Device to which command will be sent
4290 *
780a87f7
JG
4291 * Issue SET FEATURES - XFER MODE command to device @dev
4292 * on port @ap.
4293 *
1da177e4 4294 * LOCKING:
0cba632b 4295 * PCI/etc. bus probe sem.
83206a29
TH
4296 *
4297 * RETURNS:
4298 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4299 */
4300
3373efd8 4301static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4302{
a0123703 4303 struct ata_taskfile tf;
83206a29 4304 unsigned int err_mask;
1da177e4
LT
4305
4306 /* set up set-features taskfile */
4307 DPRINTK("set features - xfer mode\n");
4308
464cf177
TH
4309 /* Some controllers and ATAPI devices show flaky interrupt
4310 * behavior after setting xfer mode. Use polling instead.
4311 */
3373efd8 4312 ata_tf_init(dev, &tf);
a0123703
TH
4313 tf.command = ATA_CMD_SET_FEATURES;
4314 tf.feature = SETFEATURES_XFER;
464cf177 4315 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4316 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4317 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4318 if (ata_pio_need_iordy(dev))
4319 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4320 /* If the device has IORDY and the controller does not - turn it off */
4321 else if (ata_id_has_iordy(dev->id))
11b7becc 4322 tf.nsect = 0x01;
b9f8ab2d
AC
4323 else /* In the ancient relic department - skip all of this */
4324 return 0;
1da177e4 4325
2b789108 4326 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4327
4328 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4329 return err_mask;
4330}
9f45cbd3 4331/**
218f3d30 4332 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4333 * @dev: Device to which command will be sent
4334 * @enable: Whether to enable or disable the feature
218f3d30 4335 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4336 *
4337 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4338 * on port @ap with sector count
9f45cbd3
KCA
4339 *
4340 * LOCKING:
4341 * PCI/etc. bus probe sem.
4342 *
4343 * RETURNS:
4344 * 0 on success, AC_ERR_* mask otherwise.
4345 */
218f3d30
JG
4346static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4347 u8 feature)
9f45cbd3
KCA
4348{
4349 struct ata_taskfile tf;
4350 unsigned int err_mask;
4351
4352 /* set up set-features taskfile */
4353 DPRINTK("set features - SATA features\n");
4354
4355 ata_tf_init(dev, &tf);
4356 tf.command = ATA_CMD_SET_FEATURES;
4357 tf.feature = enable;
4358 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4359 tf.protocol = ATA_PROT_NODATA;
218f3d30 4360 tf.nsect = feature;
9f45cbd3 4361
2b789108 4362 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4363
83206a29
TH
4364 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4365 return err_mask;
1da177e4
LT
4366}
4367
8bf62ece
AL
4368/**
4369 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4370 * @dev: Device to which command will be sent
e2a7f77a
RD
4371 * @heads: Number of heads (taskfile parameter)
4372 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4373 *
4374 * LOCKING:
6aff8f1f
TH
4375 * Kernel thread context (may sleep)
4376 *
4377 * RETURNS:
4378 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4379 */
3373efd8
TH
4380static unsigned int ata_dev_init_params(struct ata_device *dev,
4381 u16 heads, u16 sectors)
8bf62ece 4382{
a0123703 4383 struct ata_taskfile tf;
6aff8f1f 4384 unsigned int err_mask;
8bf62ece
AL
4385
4386 /* Number of sectors per track 1-255. Number of heads 1-16 */
4387 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4388 return AC_ERR_INVALID;
8bf62ece
AL
4389
4390 /* set up init dev params taskfile */
4391 DPRINTK("init dev params \n");
4392
3373efd8 4393 ata_tf_init(dev, &tf);
a0123703
TH
4394 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4395 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4396 tf.protocol = ATA_PROT_NODATA;
4397 tf.nsect = sectors;
4398 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4399
2b789108 4400 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4401 /* A clean abort indicates an original or just out of spec drive
4402 and we should continue as we issue the setup based on the
4403 drive reported working geometry */
4404 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4405 err_mask = 0;
8bf62ece 4406
6aff8f1f
TH
4407 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4408 return err_mask;
8bf62ece
AL
4409}
4410
1da177e4 4411/**
0cba632b
JG
4412 * ata_sg_clean - Unmap DMA memory associated with command
4413 * @qc: Command containing DMA memory to be released
4414 *
4415 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4416 *
4417 * LOCKING:
cca3974e 4418 * spin_lock_irqsave(host lock)
1da177e4 4419 */
70e6ad0c 4420void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4421{
4422 struct ata_port *ap = qc->ap;
cedc9a47 4423 struct scatterlist *sg = qc->__sg;
1da177e4 4424 int dir = qc->dma_dir;
cedc9a47 4425 void *pad_buf = NULL;
1da177e4 4426
a4631474
TH
4427 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4428 WARN_ON(sg == NULL);
1da177e4
LT
4429
4430 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4431 WARN_ON(qc->n_elem > 1);
1da177e4 4432
2c13b7ce 4433 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4434
cedc9a47
JG
4435 /* if we padded the buffer out to 32-bit bound, and data
4436 * xfer direction is from-device, we must copy from the
4437 * pad buffer back into the supplied buffer
4438 */
4439 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4440 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4441
4442 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4443 if (qc->n_elem)
2f1f610b 4444 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47 4445 /* restore last sg */
87260216 4446 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
cedc9a47
JG
4447 if (pad_buf) {
4448 struct scatterlist *psg = &qc->pad_sgent;
45711f1a 4449 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4450 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4451 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4452 }
4453 } else {
2e242fa9 4454 if (qc->n_elem)
2f1f610b 4455 dma_unmap_single(ap->dev,
e1410f2d
JG
4456 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4457 dir);
cedc9a47
JG
4458 /* restore sg */
4459 sg->length += qc->pad_len;
4460 if (pad_buf)
4461 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4462 pad_buf, qc->pad_len);
4463 }
1da177e4
LT
4464
4465 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4466 qc->__sg = NULL;
1da177e4
LT
4467}
4468
4469/**
4470 * ata_fill_sg - Fill PCI IDE PRD table
4471 * @qc: Metadata associated with taskfile to be transferred
4472 *
780a87f7
JG
4473 * Fill PCI IDE PRD (scatter-gather) table with segments
4474 * associated with the current disk command.
4475 *
1da177e4 4476 * LOCKING:
cca3974e 4477 * spin_lock_irqsave(host lock)
1da177e4
LT
4478 *
4479 */
4480static void ata_fill_sg(struct ata_queued_cmd *qc)
4481{
1da177e4 4482 struct ata_port *ap = qc->ap;
cedc9a47
JG
4483 struct scatterlist *sg;
4484 unsigned int idx;
1da177e4 4485
a4631474 4486 WARN_ON(qc->__sg == NULL);
f131883e 4487 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4488
4489 idx = 0;
cedc9a47 4490 ata_for_each_sg(sg, qc) {
1da177e4
LT
4491 u32 addr, offset;
4492 u32 sg_len, len;
4493
4494 /* determine if physical DMA addr spans 64K boundary.
4495 * Note h/w doesn't support 64-bit, so we unconditionally
4496 * truncate dma_addr_t to u32.
4497 */
4498 addr = (u32) sg_dma_address(sg);
4499 sg_len = sg_dma_len(sg);
4500
4501 while (sg_len) {
4502 offset = addr & 0xffff;
4503 len = sg_len;
4504 if ((offset + sg_len) > 0x10000)
4505 len = 0x10000 - offset;
4506
4507 ap->prd[idx].addr = cpu_to_le32(addr);
4508 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4509 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4510
4511 idx++;
4512 sg_len -= len;
4513 addr += len;
4514 }
4515 }
4516
4517 if (idx)
4518 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4519}
b9a4197e 4520
d26fc955
AC
4521/**
4522 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4523 * @qc: Metadata associated with taskfile to be transferred
4524 *
4525 * Fill PCI IDE PRD (scatter-gather) table with segments
4526 * associated with the current disk command. Perform the fill
4527 * so that we avoid writing any length 64K records for
4528 * controllers that don't follow the spec.
4529 *
4530 * LOCKING:
4531 * spin_lock_irqsave(host lock)
4532 *
4533 */
4534static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4535{
4536 struct ata_port *ap = qc->ap;
4537 struct scatterlist *sg;
4538 unsigned int idx;
4539
4540 WARN_ON(qc->__sg == NULL);
4541 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4542
4543 idx = 0;
4544 ata_for_each_sg(sg, qc) {
4545 u32 addr, offset;
4546 u32 sg_len, len, blen;
4547
2dcb407e 4548 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4549 * Note h/w doesn't support 64-bit, so we unconditionally
4550 * truncate dma_addr_t to u32.
4551 */
4552 addr = (u32) sg_dma_address(sg);
4553 sg_len = sg_dma_len(sg);
4554
4555 while (sg_len) {
4556 offset = addr & 0xffff;
4557 len = sg_len;
4558 if ((offset + sg_len) > 0x10000)
4559 len = 0x10000 - offset;
4560
4561 blen = len & 0xffff;
4562 ap->prd[idx].addr = cpu_to_le32(addr);
4563 if (blen == 0) {
4564 /* Some PATA chipsets like the CS5530 can't
4565 cope with 0x0000 meaning 64K as the spec says */
4566 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4567 blen = 0x8000;
4568 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4569 }
4570 ap->prd[idx].flags_len = cpu_to_le32(blen);
4571 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4572
4573 idx++;
4574 sg_len -= len;
4575 addr += len;
4576 }
4577 }
4578
4579 if (idx)
4580 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4581}
4582
1da177e4
LT
4583/**
4584 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4585 * @qc: Metadata associated with taskfile to check
4586 *
780a87f7
JG
4587 * Allow low-level driver to filter ATA PACKET commands, returning
4588 * a status indicating whether or not it is OK to use DMA for the
4589 * supplied PACKET command.
4590 *
1da177e4 4591 * LOCKING:
cca3974e 4592 * spin_lock_irqsave(host lock)
0cba632b 4593 *
1da177e4
LT
4594 * RETURNS: 0 when ATAPI DMA can be used
4595 * nonzero otherwise
4596 */
4597int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4598{
4599 struct ata_port *ap = qc->ap;
b9a4197e
TH
4600
4601 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4602 * few ATAPI devices choke on such DMA requests.
4603 */
4604 if (unlikely(qc->nbytes & 15))
4605 return 1;
6f23a31d 4606
1da177e4 4607 if (ap->ops->check_atapi_dma)
b9a4197e 4608 return ap->ops->check_atapi_dma(qc);
1da177e4 4609
b9a4197e 4610 return 0;
1da177e4 4611}
b9a4197e 4612
140b5e59
TH
4613/**
4614 * atapi_qc_may_overflow - Check whether data transfer may overflow
4615 * @qc: ATA command in question
4616 *
4617 * ATAPI commands which transfer variable length data to host
4618 * might overflow due to application error or hardare bug. This
4619 * function checks whether overflow should be drained and ignored
4620 * for @qc.
4621 *
4622 * LOCKING:
4623 * None.
4624 *
4625 * RETURNS:
4626 * 1 if @qc may overflow; otherwise, 0.
4627 */
4628static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
4629{
4630 if (qc->tf.protocol != ATA_PROT_ATAPI &&
4631 qc->tf.protocol != ATA_PROT_ATAPI_DMA)
4632 return 0;
4633
4634 if (qc->tf.flags & ATA_TFLAG_WRITE)
4635 return 0;
4636
4637 switch (qc->cdb[0]) {
4638 case READ_10:
4639 case READ_12:
4640 case WRITE_10:
4641 case WRITE_12:
4642 case GPCMD_READ_CD:
4643 case GPCMD_READ_CD_MSF:
4644 return 0;
4645 }
4646
4647 return 1;
4648}
4649
31cc23b3
TH
4650/**
4651 * ata_std_qc_defer - Check whether a qc needs to be deferred
4652 * @qc: ATA command in question
4653 *
4654 * Non-NCQ commands cannot run with any other command, NCQ or
4655 * not. As upper layer only knows the queue depth, we are
4656 * responsible for maintaining exclusion. This function checks
4657 * whether a new command @qc can be issued.
4658 *
4659 * LOCKING:
4660 * spin_lock_irqsave(host lock)
4661 *
4662 * RETURNS:
4663 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4664 */
4665int ata_std_qc_defer(struct ata_queued_cmd *qc)
4666{
4667 struct ata_link *link = qc->dev->link;
4668
4669 if (qc->tf.protocol == ATA_PROT_NCQ) {
4670 if (!ata_tag_valid(link->active_tag))
4671 return 0;
4672 } else {
4673 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4674 return 0;
4675 }
4676
4677 return ATA_DEFER_LINK;
4678}
4679
1da177e4
LT
4680/**
4681 * ata_qc_prep - Prepare taskfile for submission
4682 * @qc: Metadata associated with taskfile to be prepared
4683 *
780a87f7
JG
4684 * Prepare ATA taskfile for submission.
4685 *
1da177e4 4686 * LOCKING:
cca3974e 4687 * spin_lock_irqsave(host lock)
1da177e4
LT
4688 */
4689void ata_qc_prep(struct ata_queued_cmd *qc)
4690{
4691 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4692 return;
4693
4694 ata_fill_sg(qc);
4695}
4696
d26fc955
AC
4697/**
4698 * ata_dumb_qc_prep - Prepare taskfile for submission
4699 * @qc: Metadata associated with taskfile to be prepared
4700 *
4701 * Prepare ATA taskfile for submission.
4702 *
4703 * LOCKING:
4704 * spin_lock_irqsave(host lock)
4705 */
4706void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4707{
4708 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4709 return;
4710
4711 ata_fill_sg_dumb(qc);
4712}
4713
e46834cd
BK
4714void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4715
0cba632b
JG
4716/**
4717 * ata_sg_init_one - Associate command with memory buffer
4718 * @qc: Command to be associated
4719 * @buf: Memory buffer
4720 * @buflen: Length of memory buffer, in bytes.
4721 *
4722 * Initialize the data-related elements of queued_cmd @qc
4723 * to point to a single memory buffer, @buf of byte length @buflen.
4724 *
4725 * LOCKING:
cca3974e 4726 * spin_lock_irqsave(host lock)
0cba632b
JG
4727 */
4728
1da177e4
LT
4729void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4730{
1da177e4
LT
4731 qc->flags |= ATA_QCFLAG_SINGLE;
4732
cedc9a47 4733 qc->__sg = &qc->sgent;
1da177e4 4734 qc->n_elem = 1;
cedc9a47 4735 qc->orig_n_elem = 1;
1da177e4 4736 qc->buf_virt = buf;
233277ca 4737 qc->nbytes = buflen;
87260216 4738 qc->cursg = qc->__sg;
1da177e4 4739
61c0596c 4740 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4741}
4742
0cba632b
JG
4743/**
4744 * ata_sg_init - Associate command with scatter-gather table.
4745 * @qc: Command to be associated
4746 * @sg: Scatter-gather table.
4747 * @n_elem: Number of elements in s/g table.
4748 *
4749 * Initialize the data-related elements of queued_cmd @qc
4750 * to point to a scatter-gather table @sg, containing @n_elem
4751 * elements.
4752 *
4753 * LOCKING:
cca3974e 4754 * spin_lock_irqsave(host lock)
0cba632b
JG
4755 */
4756
1da177e4
LT
4757void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4758 unsigned int n_elem)
4759{
4760 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4761 qc->__sg = sg;
1da177e4 4762 qc->n_elem = n_elem;
cedc9a47 4763 qc->orig_n_elem = n_elem;
87260216 4764 qc->cursg = qc->__sg;
1da177e4
LT
4765}
4766
4767/**
0cba632b
JG
4768 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4769 * @qc: Command with memory buffer to be mapped.
4770 *
4771 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4772 *
4773 * LOCKING:
cca3974e 4774 * spin_lock_irqsave(host lock)
1da177e4
LT
4775 *
4776 * RETURNS:
0cba632b 4777 * Zero on success, negative on error.
1da177e4
LT
4778 */
4779
4780static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4781{
4782 struct ata_port *ap = qc->ap;
4783 int dir = qc->dma_dir;
cedc9a47 4784 struct scatterlist *sg = qc->__sg;
1da177e4 4785 dma_addr_t dma_address;
2e242fa9 4786 int trim_sg = 0;
1da177e4 4787
cedc9a47
JG
4788 /* we must lengthen transfers to end on a 32-bit boundary */
4789 qc->pad_len = sg->length & 3;
4790 if (qc->pad_len) {
4791 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4792 struct scatterlist *psg = &qc->pad_sgent;
4793
a4631474 4794 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4795
4796 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4797
4798 if (qc->tf.flags & ATA_TFLAG_WRITE)
4799 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4800 qc->pad_len);
4801
4802 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4803 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4804 /* trim sg */
4805 sg->length -= qc->pad_len;
2e242fa9
TH
4806 if (sg->length == 0)
4807 trim_sg = 1;
cedc9a47
JG
4808
4809 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4810 sg->length, qc->pad_len);
4811 }
4812
2e242fa9
TH
4813 if (trim_sg) {
4814 qc->n_elem--;
e1410f2d
JG
4815 goto skip_map;
4816 }
4817
2f1f610b 4818 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4819 sg->length, dir);
537a95d9
TH
4820 if (dma_mapping_error(dma_address)) {
4821 /* restore sg */
4822 sg->length += qc->pad_len;
1da177e4 4823 return -1;
537a95d9 4824 }
1da177e4
LT
4825
4826 sg_dma_address(sg) = dma_address;
32529e01 4827 sg_dma_len(sg) = sg->length;
1da177e4 4828
2e242fa9 4829skip_map:
1da177e4
LT
4830 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4831 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4832
4833 return 0;
4834}
4835
4836/**
0cba632b
JG
4837 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4838 * @qc: Command with scatter-gather table to be mapped.
4839 *
4840 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4841 *
4842 * LOCKING:
cca3974e 4843 * spin_lock_irqsave(host lock)
1da177e4
LT
4844 *
4845 * RETURNS:
0cba632b 4846 * Zero on success, negative on error.
1da177e4
LT
4847 *
4848 */
4849
4850static int ata_sg_setup(struct ata_queued_cmd *qc)
4851{
4852 struct ata_port *ap = qc->ap;
cedc9a47 4853 struct scatterlist *sg = qc->__sg;
87260216 4854 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
e1410f2d 4855 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4856
44877b4e 4857 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4858 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4859
cedc9a47
JG
4860 /* we must lengthen transfers to end on a 32-bit boundary */
4861 qc->pad_len = lsg->length & 3;
4862 if (qc->pad_len) {
4863 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4864 struct scatterlist *psg = &qc->pad_sgent;
4865 unsigned int offset;
4866
a4631474 4867 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4868
4869 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4870
4871 /*
4872 * psg->page/offset are used to copy to-be-written
4873 * data in this function or read data in ata_sg_clean.
4874 */
4875 offset = lsg->offset + lsg->length - qc->pad_len;
acd054a5 4876 sg_init_table(psg, 1);
642f1490
JA
4877 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4878 qc->pad_len, offset_in_page(offset));
cedc9a47
JG
4879
4880 if (qc->tf.flags & ATA_TFLAG_WRITE) {
45711f1a 4881 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4882 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4883 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4884 }
4885
4886 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4887 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4888 /* trim last sg */
4889 lsg->length -= qc->pad_len;
e1410f2d
JG
4890 if (lsg->length == 0)
4891 trim_sg = 1;
cedc9a47
JG
4892
4893 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4894 qc->n_elem - 1, lsg->length, qc->pad_len);
4895 }
4896
e1410f2d
JG
4897 pre_n_elem = qc->n_elem;
4898 if (trim_sg && pre_n_elem)
4899 pre_n_elem--;
4900
4901 if (!pre_n_elem) {
4902 n_elem = 0;
4903 goto skip_map;
4904 }
4905
1da177e4 4906 dir = qc->dma_dir;
2f1f610b 4907 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4908 if (n_elem < 1) {
4909 /* restore last sg */
4910 lsg->length += qc->pad_len;
1da177e4 4911 return -1;
537a95d9 4912 }
1da177e4
LT
4913
4914 DPRINTK("%d sg elements mapped\n", n_elem);
4915
e1410f2d 4916skip_map:
1da177e4
LT
4917 qc->n_elem = n_elem;
4918
4919 return 0;
4920}
4921
0baab86b 4922/**
c893a3ae 4923 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4924 * @buf: Buffer to swap
4925 * @buf_words: Number of 16-bit words in buffer.
4926 *
4927 * Swap halves of 16-bit words if needed to convert from
4928 * little-endian byte order to native cpu byte order, or
4929 * vice-versa.
4930 *
4931 * LOCKING:
6f0ef4fa 4932 * Inherited from caller.
0baab86b 4933 */
1da177e4
LT
4934void swap_buf_le16(u16 *buf, unsigned int buf_words)
4935{
4936#ifdef __BIG_ENDIAN
4937 unsigned int i;
4938
4939 for (i = 0; i < buf_words; i++)
4940 buf[i] = le16_to_cpu(buf[i]);
4941#endif /* __BIG_ENDIAN */
4942}
4943
6ae4cfb5 4944/**
0d5ff566 4945 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4946 * @adev: device to target
6ae4cfb5
AL
4947 * @buf: data buffer
4948 * @buflen: buffer length
344babaa 4949 * @write_data: read/write
6ae4cfb5
AL
4950 *
4951 * Transfer data from/to the device data register by PIO.
4952 *
4953 * LOCKING:
4954 * Inherited from caller.
6ae4cfb5 4955 */
0d5ff566
TH
4956void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4957 unsigned int buflen, int write_data)
1da177e4 4958{
9af5c9c9 4959 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4960 unsigned int words = buflen >> 1;
1da177e4 4961
6ae4cfb5 4962 /* Transfer multiple of 2 bytes */
1da177e4 4963 if (write_data)
0d5ff566 4964 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4965 else
0d5ff566 4966 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4967
4968 /* Transfer trailing 1 byte, if any. */
4969 if (unlikely(buflen & 0x01)) {
4970 u16 align_buf[1] = { 0 };
4971 unsigned char *trailing_buf = buf + buflen - 1;
4972
4973 if (write_data) {
4974 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4975 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4976 } else {
0d5ff566 4977 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4978 memcpy(trailing_buf, align_buf, 1);
4979 }
4980 }
1da177e4
LT
4981}
4982
75e99585 4983/**
0d5ff566 4984 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4985 * @adev: device to target
4986 * @buf: data buffer
4987 * @buflen: buffer length
4988 * @write_data: read/write
4989 *
88574551 4990 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4991 * transfer with interrupts disabled.
4992 *
4993 * LOCKING:
4994 * Inherited from caller.
4995 */
0d5ff566
TH
4996void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4997 unsigned int buflen, int write_data)
75e99585
AC
4998{
4999 unsigned long flags;
5000 local_irq_save(flags);
0d5ff566 5001 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
5002 local_irq_restore(flags);
5003}
5004
5005
6ae4cfb5 5006/**
5a5dbd18 5007 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
5008 * @qc: Command on going
5009 *
5a5dbd18 5010 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
5011 *
5012 * LOCKING:
5013 * Inherited from caller.
5014 */
5015
1da177e4
LT
5016static void ata_pio_sector(struct ata_queued_cmd *qc)
5017{
5018 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
5019 struct ata_port *ap = qc->ap;
5020 struct page *page;
5021 unsigned int offset;
5022 unsigned char *buf;
5023
5a5dbd18 5024 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 5025 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 5026
45711f1a 5027 page = sg_page(qc->cursg);
87260216 5028 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
5029
5030 /* get the current page and offset */
5031 page = nth_page(page, (offset >> PAGE_SHIFT));
5032 offset %= PAGE_SIZE;
5033
1da177e4
LT
5034 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5035
91b8b313
AL
5036 if (PageHighMem(page)) {
5037 unsigned long flags;
5038
a6b2c5d4 5039 /* FIXME: use a bounce buffer */
91b8b313
AL
5040 local_irq_save(flags);
5041 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5042
91b8b313 5043 /* do the actual data transfer */
5a5dbd18 5044 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 5045
91b8b313
AL
5046 kunmap_atomic(buf, KM_IRQ0);
5047 local_irq_restore(flags);
5048 } else {
5049 buf = page_address(page);
5a5dbd18 5050 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 5051 }
1da177e4 5052
5a5dbd18
ML
5053 qc->curbytes += qc->sect_size;
5054 qc->cursg_ofs += qc->sect_size;
1da177e4 5055
87260216
JA
5056 if (qc->cursg_ofs == qc->cursg->length) {
5057 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5058 qc->cursg_ofs = 0;
5059 }
1da177e4 5060}
1da177e4 5061
07f6f7d0 5062/**
5a5dbd18 5063 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
5064 * @qc: Command on going
5065 *
5a5dbd18 5066 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
5067 * ATA device for the DRQ request.
5068 *
5069 * LOCKING:
5070 * Inherited from caller.
5071 */
1da177e4 5072
07f6f7d0
AL
5073static void ata_pio_sectors(struct ata_queued_cmd *qc)
5074{
5075 if (is_multi_taskfile(&qc->tf)) {
5076 /* READ/WRITE MULTIPLE */
5077 unsigned int nsect;
5078
587005de 5079 WARN_ON(qc->dev->multi_count == 0);
1da177e4 5080
5a5dbd18 5081 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 5082 qc->dev->multi_count);
07f6f7d0
AL
5083 while (nsect--)
5084 ata_pio_sector(qc);
5085 } else
5086 ata_pio_sector(qc);
4cc980b3
AL
5087
5088 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
5089}
5090
c71c1857
AL
5091/**
5092 * atapi_send_cdb - Write CDB bytes to hardware
5093 * @ap: Port to which ATAPI device is attached.
5094 * @qc: Taskfile currently active
5095 *
5096 * When device has indicated its readiness to accept
5097 * a CDB, this function is called. Send the CDB.
5098 *
5099 * LOCKING:
5100 * caller.
5101 */
5102
5103static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5104{
5105 /* send SCSI cdb */
5106 DPRINTK("send cdb\n");
db024d53 5107 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 5108
a6b2c5d4 5109 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
5110 ata_altstatus(ap); /* flush */
5111
5112 switch (qc->tf.protocol) {
5113 case ATA_PROT_ATAPI:
5114 ap->hsm_task_state = HSM_ST;
5115 break;
5116 case ATA_PROT_ATAPI_NODATA:
5117 ap->hsm_task_state = HSM_ST_LAST;
5118 break;
5119 case ATA_PROT_ATAPI_DMA:
5120 ap->hsm_task_state = HSM_ST_LAST;
5121 /* initiate bmdma */
5122 ap->ops->bmdma_start(qc);
5123 break;
5124 }
1da177e4
LT
5125}
5126
6ae4cfb5
AL
5127/**
5128 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5129 * @qc: Command on going
5130 * @bytes: number of bytes
5131 *
5132 * Transfer Transfer data from/to the ATAPI device.
5133 *
5134 * LOCKING:
5135 * Inherited from caller.
5136 *
5137 */
140b5e59 5138static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
1da177e4
LT
5139{
5140 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4 5141 struct ata_port *ap = qc->ap;
140b5e59
TH
5142 struct ata_eh_info *ehi = &qc->dev->link->eh_info;
5143 struct scatterlist *sg;
1da177e4
LT
5144 struct page *page;
5145 unsigned char *buf;
5146 unsigned int offset, count;
1da177e4
LT
5147
5148next_sg:
140b5e59
TH
5149 sg = qc->cursg;
5150 if (unlikely(!sg)) {
7fb6ec28 5151 /*
563a6e1f
AL
5152 * The end of qc->sg is reached and the device expects
5153 * more data to transfer. In order not to overrun qc->sg
5154 * and fulfill length specified in the byte count register,
5155 * - for read case, discard trailing data from the device
5156 * - for write case, padding zero data to the device
5157 */
5158 u16 pad_buf[1] = { 0 };
563a6e1f
AL
5159 unsigned int i;
5160
140b5e59
TH
5161 if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
5162 ata_ehi_push_desc(ehi, "too much trailing data "
5163 "buf=%u cur=%u bytes=%u",
5164 qc->nbytes, qc->curbytes, bytes);
5165 return -1;
5166 }
5167
5168 /* overflow is exptected for misc ATAPI commands */
5169 if (bytes && !atapi_qc_may_overflow(qc))
5170 ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
5171 "trailing data (cdb=%02x nbytes=%u)\n",
5172 bytes, qc->cdb[0], qc->nbytes);
563a6e1f 5173
140b5e59 5174 for (i = 0; i < (bytes + 1) / 2; i++)
2dcb407e 5175 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
563a6e1f 5176
140b5e59 5177 qc->curbytes += bytes;
563a6e1f 5178
140b5e59
TH
5179 return 0;
5180 }
1da177e4 5181
45711f1a 5182 page = sg_page(sg);
1da177e4
LT
5183 offset = sg->offset + qc->cursg_ofs;
5184
5185 /* get the current page and offset */
5186 page = nth_page(page, (offset >> PAGE_SHIFT));
5187 offset %= PAGE_SIZE;
5188
6952df03 5189 /* don't overrun current sg */
32529e01 5190 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5191
5192 /* don't cross page boundaries */
5193 count = min(count, (unsigned int)PAGE_SIZE - offset);
5194
7282aa4b
AL
5195 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5196
91b8b313
AL
5197 if (PageHighMem(page)) {
5198 unsigned long flags;
5199
a6b2c5d4 5200 /* FIXME: use bounce buffer */
91b8b313
AL
5201 local_irq_save(flags);
5202 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5203
91b8b313 5204 /* do the actual data transfer */
a6b2c5d4 5205 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 5206
91b8b313
AL
5207 kunmap_atomic(buf, KM_IRQ0);
5208 local_irq_restore(flags);
5209 } else {
5210 buf = page_address(page);
a6b2c5d4 5211 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 5212 }
1da177e4
LT
5213
5214 bytes -= count;
140b5e59
TH
5215 if ((count & 1) && bytes)
5216 bytes--;
1da177e4
LT
5217 qc->curbytes += count;
5218 qc->cursg_ofs += count;
5219
32529e01 5220 if (qc->cursg_ofs == sg->length) {
87260216 5221 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5222 qc->cursg_ofs = 0;
5223 }
5224
563a6e1f 5225 if (bytes)
1da177e4 5226 goto next_sg;
140b5e59
TH
5227
5228 return 0;
1da177e4
LT
5229}
5230
6ae4cfb5
AL
5231/**
5232 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5233 * @qc: Command on going
5234 *
5235 * Transfer Transfer data from/to the ATAPI device.
5236 *
5237 * LOCKING:
5238 * Inherited from caller.
6ae4cfb5
AL
5239 */
5240
1da177e4
LT
5241static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5242{
5243 struct ata_port *ap = qc->ap;
5244 struct ata_device *dev = qc->dev;
5245 unsigned int ireason, bc_lo, bc_hi, bytes;
5246 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5247
eec4c3f3
AL
5248 /* Abuse qc->result_tf for temp storage of intermediate TF
5249 * here to save some kernel stack usage.
5250 * For normal completion, qc->result_tf is not relevant. For
5251 * error, qc->result_tf is later overwritten by ata_qc_complete().
5252 * So, the correctness of qc->result_tf is not affected.
5253 */
5254 ap->ops->tf_read(ap, &qc->result_tf);
5255 ireason = qc->result_tf.nsect;
5256 bc_lo = qc->result_tf.lbam;
5257 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5258 bytes = (bc_hi << 8) | bc_lo;
5259
5260 /* shall be cleared to zero, indicating xfer of data */
5261 if (ireason & (1 << 0))
5262 goto err_out;
5263
5264 /* make sure transfer direction matches expected */
5265 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5266 if (do_write != i_write)
5267 goto err_out;
5268
44877b4e 5269 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5270
140b5e59
TH
5271 if (__atapi_pio_bytes(qc, bytes))
5272 goto err_out;
4cc980b3 5273 ata_altstatus(ap); /* flush */
1da177e4
LT
5274
5275 return;
5276
5277err_out:
f15a1daf 5278 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 5279 qc->err_mask |= AC_ERR_HSM;
14be71f4 5280 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5281}
5282
5283/**
c234fb00
AL
5284 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5285 * @ap: the target ata_port
5286 * @qc: qc on going
1da177e4 5287 *
c234fb00
AL
5288 * RETURNS:
5289 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5290 */
c234fb00
AL
5291
5292static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5293{
c234fb00
AL
5294 if (qc->tf.flags & ATA_TFLAG_POLLING)
5295 return 1;
1da177e4 5296
c234fb00
AL
5297 if (ap->hsm_task_state == HSM_ST_FIRST) {
5298 if (qc->tf.protocol == ATA_PROT_PIO &&
5299 (qc->tf.flags & ATA_TFLAG_WRITE))
5300 return 1;
1da177e4 5301
405e66b3 5302 if (ata_is_atapi(qc->tf.protocol) &&
c234fb00
AL
5303 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5304 return 1;
fe79e683
AL
5305 }
5306
c234fb00
AL
5307 return 0;
5308}
1da177e4 5309
c17ea20d
TH
5310/**
5311 * ata_hsm_qc_complete - finish a qc running on standard HSM
5312 * @qc: Command to complete
5313 * @in_wq: 1 if called from workqueue, 0 otherwise
5314 *
5315 * Finish @qc which is running on standard HSM.
5316 *
5317 * LOCKING:
cca3974e 5318 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5319 * Otherwise, none on entry and grabs host lock.
5320 */
5321static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5322{
5323 struct ata_port *ap = qc->ap;
5324 unsigned long flags;
5325
5326 if (ap->ops->error_handler) {
5327 if (in_wq) {
ba6a1308 5328 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5329
cca3974e
JG
5330 /* EH might have kicked in while host lock is
5331 * released.
c17ea20d
TH
5332 */
5333 qc = ata_qc_from_tag(ap, qc->tag);
5334 if (qc) {
5335 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5336 ap->ops->irq_on(ap);
c17ea20d
TH
5337 ata_qc_complete(qc);
5338 } else
5339 ata_port_freeze(ap);
5340 }
5341
ba6a1308 5342 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5343 } else {
5344 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5345 ata_qc_complete(qc);
5346 else
5347 ata_port_freeze(ap);
5348 }
5349 } else {
5350 if (in_wq) {
ba6a1308 5351 spin_lock_irqsave(ap->lock, flags);
83625006 5352 ap->ops->irq_on(ap);
c17ea20d 5353 ata_qc_complete(qc);
ba6a1308 5354 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5355 } else
5356 ata_qc_complete(qc);
5357 }
5358}
5359
bb5cb290
AL
5360/**
5361 * ata_hsm_move - move the HSM to the next state.
5362 * @ap: the target ata_port
5363 * @qc: qc on going
5364 * @status: current device status
5365 * @in_wq: 1 if called from workqueue, 0 otherwise
5366 *
5367 * RETURNS:
5368 * 1 when poll next status needed, 0 otherwise.
5369 */
9a1004d0
TH
5370int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5371 u8 status, int in_wq)
e2cec771 5372{
bb5cb290
AL
5373 unsigned long flags = 0;
5374 int poll_next;
5375
6912ccd5
AL
5376 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5377
bb5cb290
AL
5378 /* Make sure ata_qc_issue_prot() does not throw things
5379 * like DMA polling into the workqueue. Notice that
5380 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5381 */
c234fb00 5382 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5383
e2cec771 5384fsm_start:
999bb6f4 5385 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5386 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5387
e2cec771
AL
5388 switch (ap->hsm_task_state) {
5389 case HSM_ST_FIRST:
bb5cb290
AL
5390 /* Send first data block or PACKET CDB */
5391
5392 /* If polling, we will stay in the work queue after
5393 * sending the data. Otherwise, interrupt handler
5394 * takes over after sending the data.
5395 */
5396 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5397
e2cec771 5398 /* check device status */
3655d1d3
AL
5399 if (unlikely((status & ATA_DRQ) == 0)) {
5400 /* handle BSY=0, DRQ=0 as error */
5401 if (likely(status & (ATA_ERR | ATA_DF)))
5402 /* device stops HSM for abort/error */
5403 qc->err_mask |= AC_ERR_DEV;
5404 else
5405 /* HSM violation. Let EH handle this */
5406 qc->err_mask |= AC_ERR_HSM;
5407
14be71f4 5408 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5409 goto fsm_start;
1da177e4
LT
5410 }
5411
71601958
AL
5412 /* Device should not ask for data transfer (DRQ=1)
5413 * when it finds something wrong.
eee6c32f
AL
5414 * We ignore DRQ here and stop the HSM by
5415 * changing hsm_task_state to HSM_ST_ERR and
5416 * let the EH abort the command or reset the device.
71601958
AL
5417 */
5418 if (unlikely(status & (ATA_ERR | ATA_DF))) {
2d3b8eea
AL
5419 /* Some ATAPI tape drives forget to clear the ERR bit
5420 * when doing the next command (mostly request sense).
5421 * We ignore ERR here to workaround and proceed sending
5422 * the CDB.
5423 */
5424 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5425 ata_port_printk(ap, KERN_WARNING,
5426 "DRQ=1 with device error, "
5427 "dev_stat 0x%X\n", status);
5428 qc->err_mask |= AC_ERR_HSM;
5429 ap->hsm_task_state = HSM_ST_ERR;
5430 goto fsm_start;
5431 }
71601958 5432 }
1da177e4 5433
bb5cb290
AL
5434 /* Send the CDB (atapi) or the first data block (ata pio out).
5435 * During the state transition, interrupt handler shouldn't
5436 * be invoked before the data transfer is complete and
5437 * hsm_task_state is changed. Hence, the following locking.
5438 */
5439 if (in_wq)
ba6a1308 5440 spin_lock_irqsave(ap->lock, flags);
1da177e4 5441
bb5cb290
AL
5442 if (qc->tf.protocol == ATA_PROT_PIO) {
5443 /* PIO data out protocol.
5444 * send first data block.
5445 */
0565c26d 5446
bb5cb290
AL
5447 /* ata_pio_sectors() might change the state
5448 * to HSM_ST_LAST. so, the state is changed here
5449 * before ata_pio_sectors().
5450 */
5451 ap->hsm_task_state = HSM_ST;
5452 ata_pio_sectors(qc);
bb5cb290
AL
5453 } else
5454 /* send CDB */
5455 atapi_send_cdb(ap, qc);
5456
5457 if (in_wq)
ba6a1308 5458 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5459
5460 /* if polling, ata_pio_task() handles the rest.
5461 * otherwise, interrupt handler takes over from here.
5462 */
e2cec771 5463 break;
1c848984 5464
e2cec771
AL
5465 case HSM_ST:
5466 /* complete command or read/write the data register */
5467 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5468 /* ATAPI PIO protocol */
5469 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5470 /* No more data to transfer or device error.
5471 * Device error will be tagged in HSM_ST_LAST.
5472 */
e2cec771
AL
5473 ap->hsm_task_state = HSM_ST_LAST;
5474 goto fsm_start;
5475 }
1da177e4 5476
71601958
AL
5477 /* Device should not ask for data transfer (DRQ=1)
5478 * when it finds something wrong.
eee6c32f
AL
5479 * We ignore DRQ here and stop the HSM by
5480 * changing hsm_task_state to HSM_ST_ERR and
5481 * let the EH abort the command or reset the device.
71601958
AL
5482 */
5483 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5484 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5485 "device error, dev_stat 0x%X\n",
5486 status);
3655d1d3 5487 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5488 ap->hsm_task_state = HSM_ST_ERR;
5489 goto fsm_start;
71601958 5490 }
1da177e4 5491
e2cec771 5492 atapi_pio_bytes(qc);
7fb6ec28 5493
e2cec771
AL
5494 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5495 /* bad ireason reported by device */
5496 goto fsm_start;
1da177e4 5497
e2cec771
AL
5498 } else {
5499 /* ATA PIO protocol */
5500 if (unlikely((status & ATA_DRQ) == 0)) {
5501 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5502 if (likely(status & (ATA_ERR | ATA_DF)))
5503 /* device stops HSM for abort/error */
5504 qc->err_mask |= AC_ERR_DEV;
5505 else
55a8e2c8
TH
5506 /* HSM violation. Let EH handle this.
5507 * Phantom devices also trigger this
5508 * condition. Mark hint.
5509 */
5510 qc->err_mask |= AC_ERR_HSM |
5511 AC_ERR_NODEV_HINT;
3655d1d3 5512
e2cec771
AL
5513 ap->hsm_task_state = HSM_ST_ERR;
5514 goto fsm_start;
5515 }
1da177e4 5516
eee6c32f
AL
5517 /* For PIO reads, some devices may ask for
5518 * data transfer (DRQ=1) alone with ERR=1.
5519 * We respect DRQ here and transfer one
5520 * block of junk data before changing the
5521 * hsm_task_state to HSM_ST_ERR.
5522 *
5523 * For PIO writes, ERR=1 DRQ=1 doesn't make
5524 * sense since the data block has been
5525 * transferred to the device.
71601958
AL
5526 */
5527 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5528 /* data might be corrputed */
5529 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5530
5531 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5532 ata_pio_sectors(qc);
eee6c32f
AL
5533 status = ata_wait_idle(ap);
5534 }
5535
3655d1d3
AL
5536 if (status & (ATA_BUSY | ATA_DRQ))
5537 qc->err_mask |= AC_ERR_HSM;
5538
eee6c32f
AL
5539 /* ata_pio_sectors() might change the
5540 * state to HSM_ST_LAST. so, the state
5541 * is changed after ata_pio_sectors().
5542 */
5543 ap->hsm_task_state = HSM_ST_ERR;
5544 goto fsm_start;
71601958
AL
5545 }
5546
e2cec771
AL
5547 ata_pio_sectors(qc);
5548
5549 if (ap->hsm_task_state == HSM_ST_LAST &&
5550 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5551 /* all data read */
52a32205 5552 status = ata_wait_idle(ap);
e2cec771
AL
5553 goto fsm_start;
5554 }
5555 }
5556
bb5cb290 5557 poll_next = 1;
1da177e4
LT
5558 break;
5559
14be71f4 5560 case HSM_ST_LAST:
6912ccd5
AL
5561 if (unlikely(!ata_ok(status))) {
5562 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5563 ap->hsm_task_state = HSM_ST_ERR;
5564 goto fsm_start;
5565 }
5566
5567 /* no more data to transfer */
4332a771 5568 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5569 ap->print_id, qc->dev->devno, status);
e2cec771 5570
6912ccd5
AL
5571 WARN_ON(qc->err_mask);
5572
e2cec771 5573 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5574
e2cec771 5575 /* complete taskfile transaction */
c17ea20d 5576 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5577
5578 poll_next = 0;
1da177e4
LT
5579 break;
5580
14be71f4 5581 case HSM_ST_ERR:
e2cec771
AL
5582 /* make sure qc->err_mask is available to
5583 * know what's wrong and recover
5584 */
5585 WARN_ON(qc->err_mask == 0);
5586
5587 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5588
999bb6f4 5589 /* complete taskfile transaction */
c17ea20d 5590 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5591
5592 poll_next = 0;
e2cec771
AL
5593 break;
5594 default:
bb5cb290 5595 poll_next = 0;
6912ccd5 5596 BUG();
1da177e4
LT
5597 }
5598
bb5cb290 5599 return poll_next;
1da177e4
LT
5600}
5601
65f27f38 5602static void ata_pio_task(struct work_struct *work)
8061f5f0 5603{
65f27f38
DH
5604 struct ata_port *ap =
5605 container_of(work, struct ata_port, port_task.work);
5606 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5607 u8 status;
a1af3734 5608 int poll_next;
8061f5f0 5609
7fb6ec28 5610fsm_start:
a1af3734 5611 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5612
a1af3734
AL
5613 /*
5614 * This is purely heuristic. This is a fast path.
5615 * Sometimes when we enter, BSY will be cleared in
5616 * a chk-status or two. If not, the drive is probably seeking
5617 * or something. Snooze for a couple msecs, then
5618 * chk-status again. If still busy, queue delayed work.
5619 */
5620 status = ata_busy_wait(ap, ATA_BUSY, 5);
5621 if (status & ATA_BUSY) {
5622 msleep(2);
5623 status = ata_busy_wait(ap, ATA_BUSY, 10);
5624 if (status & ATA_BUSY) {
31ce6dae 5625 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5626 return;
5627 }
8061f5f0
TH
5628 }
5629
a1af3734
AL
5630 /* move the HSM */
5631 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5632
a1af3734
AL
5633 /* another command or interrupt handler
5634 * may be running at this point.
5635 */
5636 if (poll_next)
7fb6ec28 5637 goto fsm_start;
8061f5f0
TH
5638}
5639
1da177e4
LT
5640/**
5641 * ata_qc_new - Request an available ATA command, for queueing
5642 * @ap: Port associated with device @dev
5643 * @dev: Device from whom we request an available command structure
5644 *
5645 * LOCKING:
0cba632b 5646 * None.
1da177e4
LT
5647 */
5648
5649static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5650{
5651 struct ata_queued_cmd *qc = NULL;
5652 unsigned int i;
5653
e3180499 5654 /* no command while frozen */
b51e9e5d 5655 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5656 return NULL;
5657
2ab7db1f
TH
5658 /* the last tag is reserved for internal command. */
5659 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5660 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5661 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5662 break;
5663 }
5664
5665 if (qc)
5666 qc->tag = i;
5667
5668 return qc;
5669}
5670
5671/**
5672 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5673 * @dev: Device from whom we request an available command structure
5674 *
5675 * LOCKING:
0cba632b 5676 * None.
1da177e4
LT
5677 */
5678
3373efd8 5679struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5680{
9af5c9c9 5681 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5682 struct ata_queued_cmd *qc;
5683
5684 qc = ata_qc_new(ap);
5685 if (qc) {
1da177e4
LT
5686 qc->scsicmd = NULL;
5687 qc->ap = ap;
5688 qc->dev = dev;
1da177e4 5689
2c13b7ce 5690 ata_qc_reinit(qc);
1da177e4
LT
5691 }
5692
5693 return qc;
5694}
5695
1da177e4
LT
5696/**
5697 * ata_qc_free - free unused ata_queued_cmd
5698 * @qc: Command to complete
5699 *
5700 * Designed to free unused ata_queued_cmd object
5701 * in case something prevents using it.
5702 *
5703 * LOCKING:
cca3974e 5704 * spin_lock_irqsave(host lock)
1da177e4
LT
5705 */
5706void ata_qc_free(struct ata_queued_cmd *qc)
5707{
4ba946e9
TH
5708 struct ata_port *ap = qc->ap;
5709 unsigned int tag;
5710
a4631474 5711 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5712
4ba946e9
TH
5713 qc->flags = 0;
5714 tag = qc->tag;
5715 if (likely(ata_tag_valid(tag))) {
4ba946e9 5716 qc->tag = ATA_TAG_POISON;
6cec4a39 5717 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5718 }
1da177e4
LT
5719}
5720
76014427 5721void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5722{
dedaf2b0 5723 struct ata_port *ap = qc->ap;
9af5c9c9 5724 struct ata_link *link = qc->dev->link;
dedaf2b0 5725
a4631474
TH
5726 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5727 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5728
5729 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5730 ata_sg_clean(qc);
5731
7401abf2 5732 /* command should be marked inactive atomically with qc completion */
da917d69 5733 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5734 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5735 if (!link->sactive)
5736 ap->nr_active_links--;
5737 } else {
9af5c9c9 5738 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5739 ap->nr_active_links--;
5740 }
5741
5742 /* clear exclusive status */
5743 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5744 ap->excl_link == link))
5745 ap->excl_link = NULL;
7401abf2 5746
3f3791d3
AL
5747 /* atapi: mark qc as inactive to prevent the interrupt handler
5748 * from completing the command twice later, before the error handler
5749 * is called. (when rc != 0 and atapi request sense is needed)
5750 */
5751 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5752 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5753
1da177e4 5754 /* call completion callback */
77853bf2 5755 qc->complete_fn(qc);
1da177e4
LT
5756}
5757
39599a53
TH
5758static void fill_result_tf(struct ata_queued_cmd *qc)
5759{
5760 struct ata_port *ap = qc->ap;
5761
39599a53 5762 qc->result_tf.flags = qc->tf.flags;
4742d54f 5763 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5764}
5765
00115e0f
TH
5766static void ata_verify_xfer(struct ata_queued_cmd *qc)
5767{
5768 struct ata_device *dev = qc->dev;
5769
5770 if (ata_tag_internal(qc->tag))
5771 return;
5772
5773 if (ata_is_nodata(qc->tf.protocol))
5774 return;
5775
5776 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5777 return;
5778
5779 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5780}
5781
f686bcb8
TH
5782/**
5783 * ata_qc_complete - Complete an active ATA command
5784 * @qc: Command to complete
5785 * @err_mask: ATA Status register contents
5786 *
5787 * Indicate to the mid and upper layers that an ATA
5788 * command has completed, with either an ok or not-ok status.
5789 *
5790 * LOCKING:
cca3974e 5791 * spin_lock_irqsave(host lock)
f686bcb8
TH
5792 */
5793void ata_qc_complete(struct ata_queued_cmd *qc)
5794{
5795 struct ata_port *ap = qc->ap;
5796
5797 /* XXX: New EH and old EH use different mechanisms to
5798 * synchronize EH with regular execution path.
5799 *
5800 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5801 * Normal execution path is responsible for not accessing a
5802 * failed qc. libata core enforces the rule by returning NULL
5803 * from ata_qc_from_tag() for failed qcs.
5804 *
5805 * Old EH depends on ata_qc_complete() nullifying completion
5806 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5807 * not synchronize with interrupt handler. Only PIO task is
5808 * taken care of.
5809 */
5810 if (ap->ops->error_handler) {
4dbfa39b
TH
5811 struct ata_device *dev = qc->dev;
5812 struct ata_eh_info *ehi = &dev->link->eh_info;
5813
b51e9e5d 5814 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5815
5816 if (unlikely(qc->err_mask))
5817 qc->flags |= ATA_QCFLAG_FAILED;
5818
5819 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5820 if (!ata_tag_internal(qc->tag)) {
5821 /* always fill result TF for failed qc */
39599a53 5822 fill_result_tf(qc);
f686bcb8
TH
5823 ata_qc_schedule_eh(qc);
5824 return;
5825 }
5826 }
5827
5828 /* read result TF if requested */
5829 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5830 fill_result_tf(qc);
f686bcb8 5831
4dbfa39b
TH
5832 /* Some commands need post-processing after successful
5833 * completion.
5834 */
5835 switch (qc->tf.command) {
5836 case ATA_CMD_SET_FEATURES:
5837 if (qc->tf.feature != SETFEATURES_WC_ON &&
5838 qc->tf.feature != SETFEATURES_WC_OFF)
5839 break;
5840 /* fall through */
5841 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5842 case ATA_CMD_SET_MULTI: /* multi_count changed */
5843 /* revalidate device */
5844 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5845 ata_port_schedule_eh(ap);
5846 break;
054a5fba
TH
5847
5848 case ATA_CMD_SLEEP:
5849 dev->flags |= ATA_DFLAG_SLEEPING;
5850 break;
4dbfa39b
TH
5851 }
5852
00115e0f
TH
5853 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5854 ata_verify_xfer(qc);
5855
f686bcb8
TH
5856 __ata_qc_complete(qc);
5857 } else {
5858 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5859 return;
5860
5861 /* read result TF if failed or requested */
5862 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5863 fill_result_tf(qc);
f686bcb8
TH
5864
5865 __ata_qc_complete(qc);
5866 }
5867}
5868
dedaf2b0
TH
5869/**
5870 * ata_qc_complete_multiple - Complete multiple qcs successfully
5871 * @ap: port in question
5872 * @qc_active: new qc_active mask
5873 * @finish_qc: LLDD callback invoked before completing a qc
5874 *
5875 * Complete in-flight commands. This functions is meant to be
5876 * called from low-level driver's interrupt routine to complete
5877 * requests normally. ap->qc_active and @qc_active is compared
5878 * and commands are completed accordingly.
5879 *
5880 * LOCKING:
cca3974e 5881 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5882 *
5883 * RETURNS:
5884 * Number of completed commands on success, -errno otherwise.
5885 */
5886int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5887 void (*finish_qc)(struct ata_queued_cmd *))
5888{
5889 int nr_done = 0;
5890 u32 done_mask;
5891 int i;
5892
5893 done_mask = ap->qc_active ^ qc_active;
5894
5895 if (unlikely(done_mask & qc_active)) {
5896 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5897 "(%08x->%08x)\n", ap->qc_active, qc_active);
5898 return -EINVAL;
5899 }
5900
5901 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5902 struct ata_queued_cmd *qc;
5903
5904 if (!(done_mask & (1 << i)))
5905 continue;
5906
5907 if ((qc = ata_qc_from_tag(ap, i))) {
5908 if (finish_qc)
5909 finish_qc(qc);
5910 ata_qc_complete(qc);
5911 nr_done++;
5912 }
5913 }
5914
5915 return nr_done;
5916}
5917
1da177e4
LT
5918/**
5919 * ata_qc_issue - issue taskfile to device
5920 * @qc: command to issue to device
5921 *
5922 * Prepare an ATA command to submission to device.
5923 * This includes mapping the data into a DMA-able
5924 * area, filling in the S/G table, and finally
5925 * writing the taskfile to hardware, starting the command.
5926 *
5927 * LOCKING:
cca3974e 5928 * spin_lock_irqsave(host lock)
1da177e4 5929 */
8e0e694a 5930void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5931{
5932 struct ata_port *ap = qc->ap;
9af5c9c9 5933 struct ata_link *link = qc->dev->link;
405e66b3 5934 u8 prot = qc->tf.protocol;
1da177e4 5935
dedaf2b0
TH
5936 /* Make sure only one non-NCQ command is outstanding. The
5937 * check is skipped for old EH because it reuses active qc to
5938 * request ATAPI sense.
5939 */
9af5c9c9 5940 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 5941
405e66b3 5942 if (prot == ATA_PROT_NCQ) {
9af5c9c9 5943 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5944
5945 if (!link->sactive)
5946 ap->nr_active_links++;
9af5c9c9 5947 link->sactive |= 1 << qc->tag;
dedaf2b0 5948 } else {
9af5c9c9 5949 WARN_ON(link->sactive);
da917d69
TH
5950
5951 ap->nr_active_links++;
9af5c9c9 5952 link->active_tag = qc->tag;
dedaf2b0
TH
5953 }
5954
e4a70e76 5955 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5956 ap->qc_active |= 1 << qc->tag;
e4a70e76 5957
405e66b3
TH
5958 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5959 (ap->flags & ATA_FLAG_PIO_DMA))) {
1da177e4
LT
5960 if (qc->flags & ATA_QCFLAG_SG) {
5961 if (ata_sg_setup(qc))
8e436af9 5962 goto sg_err;
1da177e4
LT
5963 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5964 if (ata_sg_setup_one(qc))
8e436af9 5965 goto sg_err;
1da177e4
LT
5966 }
5967 } else {
5968 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5969 }
5970
054a5fba
TH
5971 /* if device is sleeping, schedule softreset and abort the link */
5972 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5973 link->eh_info.action |= ATA_EH_SOFTRESET;
5974 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5975 ata_link_abort(link);
5976 return;
5977 }
5978
1da177e4
LT
5979 ap->ops->qc_prep(qc);
5980
8e0e694a
TH
5981 qc->err_mask |= ap->ops->qc_issue(qc);
5982 if (unlikely(qc->err_mask))
5983 goto err;
5984 return;
1da177e4 5985
8e436af9
TH
5986sg_err:
5987 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5988 qc->err_mask |= AC_ERR_SYSTEM;
5989err:
5990 ata_qc_complete(qc);
1da177e4
LT
5991}
5992
5993/**
5994 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5995 * @qc: command to issue to device
5996 *
5997 * Using various libata functions and hooks, this function
5998 * starts an ATA command. ATA commands are grouped into
5999 * classes called "protocols", and issuing each type of protocol
6000 * is slightly different.
6001 *
0baab86b
EF
6002 * May be used as the qc_issue() entry in ata_port_operations.
6003 *
1da177e4 6004 * LOCKING:
cca3974e 6005 * spin_lock_irqsave(host lock)
1da177e4
LT
6006 *
6007 * RETURNS:
9a3d9eb0 6008 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
6009 */
6010
9a3d9eb0 6011unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
6012{
6013 struct ata_port *ap = qc->ap;
6014
e50362ec
AL
6015 /* Use polling pio if the LLD doesn't handle
6016 * interrupt driven pio and atapi CDB interrupt.
6017 */
6018 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6019 switch (qc->tf.protocol) {
6020 case ATA_PROT_PIO:
e3472cbe 6021 case ATA_PROT_NODATA:
e50362ec
AL
6022 case ATA_PROT_ATAPI:
6023 case ATA_PROT_ATAPI_NODATA:
6024 qc->tf.flags |= ATA_TFLAG_POLLING;
6025 break;
6026 case ATA_PROT_ATAPI_DMA:
6027 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 6028 /* see ata_dma_blacklisted() */
e50362ec
AL
6029 BUG();
6030 break;
6031 default:
6032 break;
6033 }
6034 }
6035
312f7da2 6036 /* select the device */
1da177e4
LT
6037 ata_dev_select(ap, qc->dev->devno, 1, 0);
6038
312f7da2 6039 /* start the command */
1da177e4
LT
6040 switch (qc->tf.protocol) {
6041 case ATA_PROT_NODATA:
312f7da2
AL
6042 if (qc->tf.flags & ATA_TFLAG_POLLING)
6043 ata_qc_set_polling(qc);
6044
e5338254 6045 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
6046 ap->hsm_task_state = HSM_ST_LAST;
6047
6048 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 6049 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 6050
1da177e4
LT
6051 break;
6052
6053 case ATA_PROT_DMA:
587005de 6054 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6055
1da177e4
LT
6056 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6057 ap->ops->bmdma_setup(qc); /* set up bmdma */
6058 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 6059 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
6060 break;
6061
312f7da2
AL
6062 case ATA_PROT_PIO:
6063 if (qc->tf.flags & ATA_TFLAG_POLLING)
6064 ata_qc_set_polling(qc);
1da177e4 6065
e5338254 6066 ata_tf_to_host(ap, &qc->tf);
312f7da2 6067
54f00389
AL
6068 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6069 /* PIO data out protocol */
6070 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 6071 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
6072
6073 /* always send first data block using
e27486db 6074 * the ata_pio_task() codepath.
54f00389 6075 */
312f7da2 6076 } else {
54f00389
AL
6077 /* PIO data in protocol */
6078 ap->hsm_task_state = HSM_ST;
6079
6080 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 6081 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
6082
6083 /* if polling, ata_pio_task() handles the rest.
6084 * otherwise, interrupt handler takes over from here.
6085 */
312f7da2
AL
6086 }
6087
1da177e4
LT
6088 break;
6089
1da177e4 6090 case ATA_PROT_ATAPI:
1da177e4 6091 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
6092 if (qc->tf.flags & ATA_TFLAG_POLLING)
6093 ata_qc_set_polling(qc);
6094
e5338254 6095 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 6096
312f7da2
AL
6097 ap->hsm_task_state = HSM_ST_FIRST;
6098
6099 /* send cdb by polling if no cdb interrupt */
6100 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6101 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 6102 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
6103 break;
6104
6105 case ATA_PROT_ATAPI_DMA:
587005de 6106 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6107
1da177e4
LT
6108 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6109 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
6110 ap->hsm_task_state = HSM_ST_FIRST;
6111
6112 /* send cdb by polling if no cdb interrupt */
6113 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 6114 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
6115 break;
6116
6117 default:
6118 WARN_ON(1);
9a3d9eb0 6119 return AC_ERR_SYSTEM;
1da177e4
LT
6120 }
6121
6122 return 0;
6123}
6124
1da177e4
LT
6125/**
6126 * ata_host_intr - Handle host interrupt for given (port, task)
6127 * @ap: Port on which interrupt arrived (possibly...)
6128 * @qc: Taskfile currently active in engine
6129 *
6130 * Handle host interrupt for given queued command. Currently,
6131 * only DMA interrupts are handled. All other commands are
6132 * handled via polling with interrupts disabled (nIEN bit).
6133 *
6134 * LOCKING:
cca3974e 6135 * spin_lock_irqsave(host lock)
1da177e4
LT
6136 *
6137 * RETURNS:
6138 * One if interrupt was handled, zero if not (shared irq).
6139 */
6140
2dcb407e
JG
6141inline unsigned int ata_host_intr(struct ata_port *ap,
6142 struct ata_queued_cmd *qc)
1da177e4 6143{
9af5c9c9 6144 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 6145 u8 status, host_stat = 0;
1da177e4 6146
312f7da2 6147 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 6148 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 6149
312f7da2
AL
6150 /* Check whether we are expecting interrupt in this state */
6151 switch (ap->hsm_task_state) {
6152 case HSM_ST_FIRST:
6912ccd5
AL
6153 /* Some pre-ATAPI-4 devices assert INTRQ
6154 * at this state when ready to receive CDB.
6155 */
1da177e4 6156
312f7da2 6157 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
405e66b3
TH
6158 * The flag was turned on only for atapi devices. No
6159 * need to check ata_is_atapi(qc->tf.protocol) again.
312f7da2
AL
6160 */
6161 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 6162 goto idle_irq;
1da177e4 6163 break;
312f7da2
AL
6164 case HSM_ST_LAST:
6165 if (qc->tf.protocol == ATA_PROT_DMA ||
6166 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
6167 /* check status of DMA engine */
6168 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
6169 VPRINTK("ata%u: host_stat 0x%X\n",
6170 ap->print_id, host_stat);
312f7da2
AL
6171
6172 /* if it's not our irq... */
6173 if (!(host_stat & ATA_DMA_INTR))
6174 goto idle_irq;
6175
6176 /* before we do anything else, clear DMA-Start bit */
6177 ap->ops->bmdma_stop(qc);
a4f16610
AL
6178
6179 if (unlikely(host_stat & ATA_DMA_ERR)) {
6180 /* error when transfering data to/from memory */
6181 qc->err_mask |= AC_ERR_HOST_BUS;
6182 ap->hsm_task_state = HSM_ST_ERR;
6183 }
312f7da2
AL
6184 }
6185 break;
6186 case HSM_ST:
6187 break;
1da177e4
LT
6188 default:
6189 goto idle_irq;
6190 }
6191
312f7da2
AL
6192 /* check altstatus */
6193 status = ata_altstatus(ap);
6194 if (status & ATA_BUSY)
6195 goto idle_irq;
1da177e4 6196
312f7da2
AL
6197 /* check main status, clearing INTRQ */
6198 status = ata_chk_status(ap);
6199 if (unlikely(status & ATA_BUSY))
6200 goto idle_irq;
1da177e4 6201
312f7da2
AL
6202 /* ack bmdma irq events */
6203 ap->ops->irq_clear(ap);
1da177e4 6204
bb5cb290 6205 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
6206
6207 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6208 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
6209 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6210
1da177e4
LT
6211 return 1; /* irq handled */
6212
6213idle_irq:
6214 ap->stats.idle_irq++;
6215
6216#ifdef ATA_IRQ_TRAP
6217 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
6218 ata_chk_status(ap);
6219 ap->ops->irq_clear(ap);
f15a1daf 6220 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 6221 return 1;
1da177e4
LT
6222 }
6223#endif
6224 return 0; /* irq not handled */
6225}
6226
6227/**
6228 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6229 * @irq: irq line (unused)
cca3974e 6230 * @dev_instance: pointer to our ata_host information structure
1da177e4 6231 *
0cba632b
JG
6232 * Default interrupt handler for PCI IDE devices. Calls
6233 * ata_host_intr() for each port that is not disabled.
6234 *
1da177e4 6235 * LOCKING:
cca3974e 6236 * Obtains host lock during operation.
1da177e4
LT
6237 *
6238 * RETURNS:
0cba632b 6239 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6240 */
6241
2dcb407e 6242irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6243{
cca3974e 6244 struct ata_host *host = dev_instance;
1da177e4
LT
6245 unsigned int i;
6246 unsigned int handled = 0;
6247 unsigned long flags;
6248
6249 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6250 spin_lock_irqsave(&host->lock, flags);
1da177e4 6251
cca3974e 6252 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6253 struct ata_port *ap;
6254
cca3974e 6255 ap = host->ports[i];
c1389503 6256 if (ap &&
029f5468 6257 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6258 struct ata_queued_cmd *qc;
6259
9af5c9c9 6260 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6261 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6262 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6263 handled |= ata_host_intr(ap, qc);
6264 }
6265 }
6266
cca3974e 6267 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6268
6269 return IRQ_RETVAL(handled);
6270}
6271
34bf2170
TH
6272/**
6273 * sata_scr_valid - test whether SCRs are accessible
936fd732 6274 * @link: ATA link to test SCR accessibility for
34bf2170 6275 *
936fd732 6276 * Test whether SCRs are accessible for @link.
34bf2170
TH
6277 *
6278 * LOCKING:
6279 * None.
6280 *
6281 * RETURNS:
6282 * 1 if SCRs are accessible, 0 otherwise.
6283 */
936fd732 6284int sata_scr_valid(struct ata_link *link)
34bf2170 6285{
936fd732
TH
6286 struct ata_port *ap = link->ap;
6287
a16abc0b 6288 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6289}
6290
6291/**
6292 * sata_scr_read - read SCR register of the specified port
936fd732 6293 * @link: ATA link to read SCR for
34bf2170
TH
6294 * @reg: SCR to read
6295 * @val: Place to store read value
6296 *
936fd732 6297 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6298 * guaranteed to succeed if @link is ap->link, the cable type of
6299 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6300 *
6301 * LOCKING:
633273a3 6302 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6303 *
6304 * RETURNS:
6305 * 0 on success, negative errno on failure.
6306 */
936fd732 6307int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6308{
633273a3
TH
6309 if (ata_is_host_link(link)) {
6310 struct ata_port *ap = link->ap;
936fd732 6311
633273a3
TH
6312 if (sata_scr_valid(link))
6313 return ap->ops->scr_read(ap, reg, val);
6314 return -EOPNOTSUPP;
6315 }
6316
6317 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6318}
6319
6320/**
6321 * sata_scr_write - write SCR register of the specified port
936fd732 6322 * @link: ATA link to write SCR for
34bf2170
TH
6323 * @reg: SCR to write
6324 * @val: value to write
6325 *
936fd732 6326 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6327 * guaranteed to succeed if @link is ap->link, the cable type of
6328 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6329 *
6330 * LOCKING:
633273a3 6331 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6332 *
6333 * RETURNS:
6334 * 0 on success, negative errno on failure.
6335 */
936fd732 6336int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6337{
633273a3
TH
6338 if (ata_is_host_link(link)) {
6339 struct ata_port *ap = link->ap;
6340
6341 if (sata_scr_valid(link))
6342 return ap->ops->scr_write(ap, reg, val);
6343 return -EOPNOTSUPP;
6344 }
936fd732 6345
633273a3 6346 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6347}
6348
6349/**
6350 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6351 * @link: ATA link to write SCR for
34bf2170
TH
6352 * @reg: SCR to write
6353 * @val: value to write
6354 *
6355 * This function is identical to sata_scr_write() except that this
6356 * function performs flush after writing to the register.
6357 *
6358 * LOCKING:
633273a3 6359 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6360 *
6361 * RETURNS:
6362 * 0 on success, negative errno on failure.
6363 */
936fd732 6364int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6365{
633273a3
TH
6366 if (ata_is_host_link(link)) {
6367 struct ata_port *ap = link->ap;
6368 int rc;
da3dbb17 6369
633273a3
TH
6370 if (sata_scr_valid(link)) {
6371 rc = ap->ops->scr_write(ap, reg, val);
6372 if (rc == 0)
6373 rc = ap->ops->scr_read(ap, reg, &val);
6374 return rc;
6375 }
6376 return -EOPNOTSUPP;
34bf2170 6377 }
633273a3
TH
6378
6379 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6380}
6381
6382/**
936fd732
TH
6383 * ata_link_online - test whether the given link is online
6384 * @link: ATA link to test
34bf2170 6385 *
936fd732
TH
6386 * Test whether @link is online. Note that this function returns
6387 * 0 if online status of @link cannot be obtained, so
6388 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6389 *
6390 * LOCKING:
6391 * None.
6392 *
6393 * RETURNS:
6394 * 1 if the port online status is available and online.
6395 */
936fd732 6396int ata_link_online(struct ata_link *link)
34bf2170
TH
6397{
6398 u32 sstatus;
6399
936fd732
TH
6400 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6401 (sstatus & 0xf) == 0x3)
34bf2170
TH
6402 return 1;
6403 return 0;
6404}
6405
6406/**
936fd732
TH
6407 * ata_link_offline - test whether the given link is offline
6408 * @link: ATA link to test
34bf2170 6409 *
936fd732
TH
6410 * Test whether @link is offline. Note that this function
6411 * returns 0 if offline status of @link cannot be obtained, so
6412 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6413 *
6414 * LOCKING:
6415 * None.
6416 *
6417 * RETURNS:
6418 * 1 if the port offline status is available and offline.
6419 */
936fd732 6420int ata_link_offline(struct ata_link *link)
34bf2170
TH
6421{
6422 u32 sstatus;
6423
936fd732
TH
6424 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6425 (sstatus & 0xf) != 0x3)
34bf2170
TH
6426 return 1;
6427 return 0;
6428}
0baab86b 6429
77b08fb5 6430int ata_flush_cache(struct ata_device *dev)
9b847548 6431{
977e6b9f 6432 unsigned int err_mask;
9b847548
JA
6433 u8 cmd;
6434
6435 if (!ata_try_flush_cache(dev))
6436 return 0;
6437
6fc49adb 6438 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6439 cmd = ATA_CMD_FLUSH_EXT;
6440 else
6441 cmd = ATA_CMD_FLUSH;
6442
4f34337b
AC
6443 /* This is wrong. On a failed flush we get back the LBA of the lost
6444 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6445 a further flush command to continue the writeback until it
4f34337b 6446 does not error */
977e6b9f
TH
6447 err_mask = ata_do_simple_cmd(dev, cmd);
6448 if (err_mask) {
6449 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6450 return -EIO;
6451 }
6452
6453 return 0;
9b847548
JA
6454}
6455
6ffa01d8 6456#ifdef CONFIG_PM
cca3974e
JG
6457static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6458 unsigned int action, unsigned int ehi_flags,
6459 int wait)
500530f6
TH
6460{
6461 unsigned long flags;
6462 int i, rc;
6463
cca3974e
JG
6464 for (i = 0; i < host->n_ports; i++) {
6465 struct ata_port *ap = host->ports[i];
e3667ebf 6466 struct ata_link *link;
500530f6
TH
6467
6468 /* Previous resume operation might still be in
6469 * progress. Wait for PM_PENDING to clear.
6470 */
6471 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6472 ata_port_wait_eh(ap);
6473 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6474 }
6475
6476 /* request PM ops to EH */
6477 spin_lock_irqsave(ap->lock, flags);
6478
6479 ap->pm_mesg = mesg;
6480 if (wait) {
6481 rc = 0;
6482 ap->pm_result = &rc;
6483 }
6484
6485 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6486 __ata_port_for_each_link(link, ap) {
6487 link->eh_info.action |= action;
6488 link->eh_info.flags |= ehi_flags;
6489 }
500530f6
TH
6490
6491 ata_port_schedule_eh(ap);
6492
6493 spin_unlock_irqrestore(ap->lock, flags);
6494
6495 /* wait and check result */
6496 if (wait) {
6497 ata_port_wait_eh(ap);
6498 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6499 if (rc)
6500 return rc;
6501 }
6502 }
6503
6504 return 0;
6505}
6506
6507/**
cca3974e
JG
6508 * ata_host_suspend - suspend host
6509 * @host: host to suspend
500530f6
TH
6510 * @mesg: PM message
6511 *
cca3974e 6512 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6513 * function requests EH to perform PM operations and waits for EH
6514 * to finish.
6515 *
6516 * LOCKING:
6517 * Kernel thread context (may sleep).
6518 *
6519 * RETURNS:
6520 * 0 on success, -errno on failure.
6521 */
cca3974e 6522int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6523{
9666f400 6524 int rc;
500530f6 6525
ca77329f
KCA
6526 /*
6527 * disable link pm on all ports before requesting
6528 * any pm activity
6529 */
6530 ata_lpm_enable(host);
6531
cca3974e 6532 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6533 if (rc == 0)
6534 host->dev->power.power_state = mesg;
500530f6
TH
6535 return rc;
6536}
6537
6538/**
cca3974e
JG
6539 * ata_host_resume - resume host
6540 * @host: host to resume
500530f6 6541 *
cca3974e 6542 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6543 * function requests EH to perform PM operations and returns.
6544 * Note that all resume operations are performed parallely.
6545 *
6546 * LOCKING:
6547 * Kernel thread context (may sleep).
6548 */
cca3974e 6549void ata_host_resume(struct ata_host *host)
500530f6 6550{
cca3974e
JG
6551 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6552 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6553 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
6554
6555 /* reenable link pm */
6556 ata_lpm_disable(host);
500530f6 6557}
6ffa01d8 6558#endif
500530f6 6559
c893a3ae
RD
6560/**
6561 * ata_port_start - Set port up for dma.
6562 * @ap: Port to initialize
6563 *
6564 * Called just after data structures for each port are
6565 * initialized. Allocates space for PRD table.
6566 *
6567 * May be used as the port_start() entry in ata_port_operations.
6568 *
6569 * LOCKING:
6570 * Inherited from caller.
6571 */
f0d36efd 6572int ata_port_start(struct ata_port *ap)
1da177e4 6573{
2f1f610b 6574 struct device *dev = ap->dev;
6037d6bb 6575 int rc;
1da177e4 6576
f0d36efd
TH
6577 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6578 GFP_KERNEL);
1da177e4
LT
6579 if (!ap->prd)
6580 return -ENOMEM;
6581
6037d6bb 6582 rc = ata_pad_alloc(ap, dev);
f0d36efd 6583 if (rc)
6037d6bb 6584 return rc;
1da177e4 6585
f0d36efd
TH
6586 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6587 (unsigned long long)ap->prd_dma);
1da177e4
LT
6588 return 0;
6589}
6590
3ef3b43d
TH
6591/**
6592 * ata_dev_init - Initialize an ata_device structure
6593 * @dev: Device structure to initialize
6594 *
6595 * Initialize @dev in preparation for probing.
6596 *
6597 * LOCKING:
6598 * Inherited from caller.
6599 */
6600void ata_dev_init(struct ata_device *dev)
6601{
9af5c9c9
TH
6602 struct ata_link *link = dev->link;
6603 struct ata_port *ap = link->ap;
72fa4b74
TH
6604 unsigned long flags;
6605
5a04bf4b 6606 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6607 link->sata_spd_limit = link->hw_sata_spd_limit;
6608 link->sata_spd = 0;
5a04bf4b 6609
72fa4b74
TH
6610 /* High bits of dev->flags are used to record warm plug
6611 * requests which occur asynchronously. Synchronize using
cca3974e 6612 * host lock.
72fa4b74 6613 */
ba6a1308 6614 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6615 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6616 dev->horkage = 0;
ba6a1308 6617 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6618
72fa4b74
TH
6619 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6620 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6621 dev->pio_mask = UINT_MAX;
6622 dev->mwdma_mask = UINT_MAX;
6623 dev->udma_mask = UINT_MAX;
6624}
6625
4fb37a25
TH
6626/**
6627 * ata_link_init - Initialize an ata_link structure
6628 * @ap: ATA port link is attached to
6629 * @link: Link structure to initialize
8989805d 6630 * @pmp: Port multiplier port number
4fb37a25
TH
6631 *
6632 * Initialize @link.
6633 *
6634 * LOCKING:
6635 * Kernel thread context (may sleep)
6636 */
fb7fd614 6637void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6638{
6639 int i;
6640
6641 /* clear everything except for devices */
6642 memset(link, 0, offsetof(struct ata_link, device[0]));
6643
6644 link->ap = ap;
8989805d 6645 link->pmp = pmp;
4fb37a25
TH
6646 link->active_tag = ATA_TAG_POISON;
6647 link->hw_sata_spd_limit = UINT_MAX;
6648
6649 /* can't use iterator, ap isn't initialized yet */
6650 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6651 struct ata_device *dev = &link->device[i];
6652
6653 dev->link = link;
6654 dev->devno = dev - link->device;
6655 ata_dev_init(dev);
6656 }
6657}
6658
6659/**
6660 * sata_link_init_spd - Initialize link->sata_spd_limit
6661 * @link: Link to configure sata_spd_limit for
6662 *
6663 * Initialize @link->[hw_]sata_spd_limit to the currently
6664 * configured value.
6665 *
6666 * LOCKING:
6667 * Kernel thread context (may sleep).
6668 *
6669 * RETURNS:
6670 * 0 on success, -errno on failure.
6671 */
fb7fd614 6672int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6673{
6674 u32 scontrol, spd;
6675 int rc;
6676
6677 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6678 if (rc)
6679 return rc;
6680
6681 spd = (scontrol >> 4) & 0xf;
6682 if (spd)
6683 link->hw_sata_spd_limit &= (1 << spd) - 1;
6684
6685 link->sata_spd_limit = link->hw_sata_spd_limit;
6686
6687 return 0;
6688}
6689
1da177e4 6690/**
f3187195
TH
6691 * ata_port_alloc - allocate and initialize basic ATA port resources
6692 * @host: ATA host this allocated port belongs to
1da177e4 6693 *
f3187195
TH
6694 * Allocate and initialize basic ATA port resources.
6695 *
6696 * RETURNS:
6697 * Allocate ATA port on success, NULL on failure.
0cba632b 6698 *
1da177e4 6699 * LOCKING:
f3187195 6700 * Inherited from calling layer (may sleep).
1da177e4 6701 */
f3187195 6702struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6703{
f3187195 6704 struct ata_port *ap;
1da177e4 6705
f3187195
TH
6706 DPRINTK("ENTER\n");
6707
6708 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6709 if (!ap)
6710 return NULL;
6711
f4d6d004 6712 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6713 ap->lock = &host->lock;
198e0fed 6714 ap->flags = ATA_FLAG_DISABLED;
f3187195 6715 ap->print_id = -1;
1da177e4 6716 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6717 ap->host = host;
f3187195 6718 ap->dev = host->dev;
1da177e4 6719 ap->last_ctl = 0xFF;
bd5d825c
BP
6720
6721#if defined(ATA_VERBOSE_DEBUG)
6722 /* turn on all debugging levels */
6723 ap->msg_enable = 0x00FF;
6724#elif defined(ATA_DEBUG)
6725 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6726#else
0dd4b21f 6727 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6728#endif
1da177e4 6729
65f27f38
DH
6730 INIT_DELAYED_WORK(&ap->port_task, NULL);
6731 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6732 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6733 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6734 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6735 init_timer_deferrable(&ap->fastdrain_timer);
6736 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6737 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6738
838df628 6739 ap->cbl = ATA_CBL_NONE;
838df628 6740
8989805d 6741 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6742
6743#ifdef ATA_IRQ_TRAP
6744 ap->stats.unhandled_irq = 1;
6745 ap->stats.idle_irq = 1;
6746#endif
1da177e4 6747 return ap;
1da177e4
LT
6748}
6749
f0d36efd
TH
6750static void ata_host_release(struct device *gendev, void *res)
6751{
6752 struct ata_host *host = dev_get_drvdata(gendev);
6753 int i;
6754
1aa506e4
TH
6755 for (i = 0; i < host->n_ports; i++) {
6756 struct ata_port *ap = host->ports[i];
6757
4911487a
TH
6758 if (!ap)
6759 continue;
6760
6761 if (ap->scsi_host)
1aa506e4
TH
6762 scsi_host_put(ap->scsi_host);
6763
633273a3 6764 kfree(ap->pmp_link);
4911487a 6765 kfree(ap);
1aa506e4
TH
6766 host->ports[i] = NULL;
6767 }
6768
1aa56cca 6769 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6770}
6771
f3187195
TH
6772/**
6773 * ata_host_alloc - allocate and init basic ATA host resources
6774 * @dev: generic device this host is associated with
6775 * @max_ports: maximum number of ATA ports associated with this host
6776 *
6777 * Allocate and initialize basic ATA host resources. LLD calls
6778 * this function to allocate a host, initializes it fully and
6779 * attaches it using ata_host_register().
6780 *
6781 * @max_ports ports are allocated and host->n_ports is
6782 * initialized to @max_ports. The caller is allowed to decrease
6783 * host->n_ports before calling ata_host_register(). The unused
6784 * ports will be automatically freed on registration.
6785 *
6786 * RETURNS:
6787 * Allocate ATA host on success, NULL on failure.
6788 *
6789 * LOCKING:
6790 * Inherited from calling layer (may sleep).
6791 */
6792struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6793{
6794 struct ata_host *host;
6795 size_t sz;
6796 int i;
6797
6798 DPRINTK("ENTER\n");
6799
6800 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6801 return NULL;
6802
6803 /* alloc a container for our list of ATA ports (buses) */
6804 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6805 /* alloc a container for our list of ATA ports (buses) */
6806 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6807 if (!host)
6808 goto err_out;
6809
6810 devres_add(dev, host);
6811 dev_set_drvdata(dev, host);
6812
6813 spin_lock_init(&host->lock);
6814 host->dev = dev;
6815 host->n_ports = max_ports;
6816
6817 /* allocate ports bound to this host */
6818 for (i = 0; i < max_ports; i++) {
6819 struct ata_port *ap;
6820
6821 ap = ata_port_alloc(host);
6822 if (!ap)
6823 goto err_out;
6824
6825 ap->port_no = i;
6826 host->ports[i] = ap;
6827 }
6828
6829 devres_remove_group(dev, NULL);
6830 return host;
6831
6832 err_out:
6833 devres_release_group(dev, NULL);
6834 return NULL;
6835}
6836
f5cda257
TH
6837/**
6838 * ata_host_alloc_pinfo - alloc host and init with port_info array
6839 * @dev: generic device this host is associated with
6840 * @ppi: array of ATA port_info to initialize host with
6841 * @n_ports: number of ATA ports attached to this host
6842 *
6843 * Allocate ATA host and initialize with info from @ppi. If NULL
6844 * terminated, @ppi may contain fewer entries than @n_ports. The
6845 * last entry will be used for the remaining ports.
6846 *
6847 * RETURNS:
6848 * Allocate ATA host on success, NULL on failure.
6849 *
6850 * LOCKING:
6851 * Inherited from calling layer (may sleep).
6852 */
6853struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6854 const struct ata_port_info * const * ppi,
6855 int n_ports)
6856{
6857 const struct ata_port_info *pi;
6858 struct ata_host *host;
6859 int i, j;
6860
6861 host = ata_host_alloc(dev, n_ports);
6862 if (!host)
6863 return NULL;
6864
6865 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6866 struct ata_port *ap = host->ports[i];
6867
6868 if (ppi[j])
6869 pi = ppi[j++];
6870
6871 ap->pio_mask = pi->pio_mask;
6872 ap->mwdma_mask = pi->mwdma_mask;
6873 ap->udma_mask = pi->udma_mask;
6874 ap->flags |= pi->flags;
0c88758b 6875 ap->link.flags |= pi->link_flags;
f5cda257
TH
6876 ap->ops = pi->port_ops;
6877
6878 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6879 host->ops = pi->port_ops;
6880 if (!host->private_data && pi->private_data)
6881 host->private_data = pi->private_data;
6882 }
6883
6884 return host;
6885}
6886
32ebbc0c
TH
6887static void ata_host_stop(struct device *gendev, void *res)
6888{
6889 struct ata_host *host = dev_get_drvdata(gendev);
6890 int i;
6891
6892 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6893
6894 for (i = 0; i < host->n_ports; i++) {
6895 struct ata_port *ap = host->ports[i];
6896
6897 if (ap->ops->port_stop)
6898 ap->ops->port_stop(ap);
6899 }
6900
6901 if (host->ops->host_stop)
6902 host->ops->host_stop(host);
6903}
6904
ecef7253
TH
6905/**
6906 * ata_host_start - start and freeze ports of an ATA host
6907 * @host: ATA host to start ports for
6908 *
6909 * Start and then freeze ports of @host. Started status is
6910 * recorded in host->flags, so this function can be called
6911 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6912 * once. If host->ops isn't initialized yet, its set to the
6913 * first non-dummy port ops.
ecef7253
TH
6914 *
6915 * LOCKING:
6916 * Inherited from calling layer (may sleep).
6917 *
6918 * RETURNS:
6919 * 0 if all ports are started successfully, -errno otherwise.
6920 */
6921int ata_host_start(struct ata_host *host)
6922{
32ebbc0c
TH
6923 int have_stop = 0;
6924 void *start_dr = NULL;
ecef7253
TH
6925 int i, rc;
6926
6927 if (host->flags & ATA_HOST_STARTED)
6928 return 0;
6929
6930 for (i = 0; i < host->n_ports; i++) {
6931 struct ata_port *ap = host->ports[i];
6932
f3187195
TH
6933 if (!host->ops && !ata_port_is_dummy(ap))
6934 host->ops = ap->ops;
6935
32ebbc0c
TH
6936 if (ap->ops->port_stop)
6937 have_stop = 1;
6938 }
6939
6940 if (host->ops->host_stop)
6941 have_stop = 1;
6942
6943 if (have_stop) {
6944 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6945 if (!start_dr)
6946 return -ENOMEM;
6947 }
6948
6949 for (i = 0; i < host->n_ports; i++) {
6950 struct ata_port *ap = host->ports[i];
6951
ecef7253
TH
6952 if (ap->ops->port_start) {
6953 rc = ap->ops->port_start(ap);
6954 if (rc) {
0f9fe9b7 6955 if (rc != -ENODEV)
0f757743
AM
6956 dev_printk(KERN_ERR, host->dev,
6957 "failed to start port %d "
6958 "(errno=%d)\n", i, rc);
ecef7253
TH
6959 goto err_out;
6960 }
6961 }
ecef7253
TH
6962 ata_eh_freeze_port(ap);
6963 }
6964
32ebbc0c
TH
6965 if (start_dr)
6966 devres_add(host->dev, start_dr);
ecef7253
TH
6967 host->flags |= ATA_HOST_STARTED;
6968 return 0;
6969
6970 err_out:
6971 while (--i >= 0) {
6972 struct ata_port *ap = host->ports[i];
6973
6974 if (ap->ops->port_stop)
6975 ap->ops->port_stop(ap);
6976 }
32ebbc0c 6977 devres_free(start_dr);
ecef7253
TH
6978 return rc;
6979}
6980
b03732f0 6981/**
cca3974e
JG
6982 * ata_sas_host_init - Initialize a host struct
6983 * @host: host to initialize
6984 * @dev: device host is attached to
6985 * @flags: host flags
6986 * @ops: port_ops
b03732f0
BK
6987 *
6988 * LOCKING:
6989 * PCI/etc. bus probe sem.
6990 *
6991 */
f3187195 6992/* KILLME - the only user left is ipr */
cca3974e
JG
6993void ata_host_init(struct ata_host *host, struct device *dev,
6994 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6995{
cca3974e
JG
6996 spin_lock_init(&host->lock);
6997 host->dev = dev;
6998 host->flags = flags;
6999 host->ops = ops;
b03732f0
BK
7000}
7001
f3187195
TH
7002/**
7003 * ata_host_register - register initialized ATA host
7004 * @host: ATA host to register
7005 * @sht: template for SCSI host
7006 *
7007 * Register initialized ATA host. @host is allocated using
7008 * ata_host_alloc() and fully initialized by LLD. This function
7009 * starts ports, registers @host with ATA and SCSI layers and
7010 * probe registered devices.
7011 *
7012 * LOCKING:
7013 * Inherited from calling layer (may sleep).
7014 *
7015 * RETURNS:
7016 * 0 on success, -errno otherwise.
7017 */
7018int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7019{
7020 int i, rc;
7021
7022 /* host must have been started */
7023 if (!(host->flags & ATA_HOST_STARTED)) {
7024 dev_printk(KERN_ERR, host->dev,
7025 "BUG: trying to register unstarted host\n");
7026 WARN_ON(1);
7027 return -EINVAL;
7028 }
7029
7030 /* Blow away unused ports. This happens when LLD can't
7031 * determine the exact number of ports to allocate at
7032 * allocation time.
7033 */
7034 for (i = host->n_ports; host->ports[i]; i++)
7035 kfree(host->ports[i]);
7036
7037 /* give ports names and add SCSI hosts */
7038 for (i = 0; i < host->n_ports; i++)
7039 host->ports[i]->print_id = ata_print_id++;
7040
7041 rc = ata_scsi_add_hosts(host, sht);
7042 if (rc)
7043 return rc;
7044
fafbae87
TH
7045 /* associate with ACPI nodes */
7046 ata_acpi_associate(host);
7047
f3187195
TH
7048 /* set cable, sata_spd_limit and report */
7049 for (i = 0; i < host->n_ports; i++) {
7050 struct ata_port *ap = host->ports[i];
f3187195
TH
7051 unsigned long xfer_mask;
7052
7053 /* set SATA cable type if still unset */
7054 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7055 ap->cbl = ATA_CBL_SATA;
7056
7057 /* init sata_spd_limit to the current value */
4fb37a25 7058 sata_link_init_spd(&ap->link);
f3187195 7059
cbcdd875 7060 /* print per-port info to dmesg */
f3187195
TH
7061 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7062 ap->udma_mask);
7063
abf6e8ed 7064 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
7065 ata_port_printk(ap, KERN_INFO,
7066 "%cATA max %s %s\n",
a16abc0b 7067 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 7068 ata_mode_string(xfer_mask),
cbcdd875 7069 ap->link.eh_info.desc);
abf6e8ed
TH
7070 ata_ehi_clear_desc(&ap->link.eh_info);
7071 } else
f3187195
TH
7072 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7073 }
7074
7075 /* perform each probe synchronously */
7076 DPRINTK("probe begin\n");
7077 for (i = 0; i < host->n_ports; i++) {
7078 struct ata_port *ap = host->ports[i];
7079 int rc;
7080
7081 /* probe */
7082 if (ap->ops->error_handler) {
9af5c9c9 7083 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
7084 unsigned long flags;
7085
7086 ata_port_probe(ap);
7087
7088 /* kick EH for boot probing */
7089 spin_lock_irqsave(ap->lock, flags);
7090
f58229f8
TH
7091 ehi->probe_mask =
7092 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
7093 ehi->action |= ATA_EH_SOFTRESET;
7094 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7095
f4d6d004 7096 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
7097 ap->pflags |= ATA_PFLAG_LOADING;
7098 ata_port_schedule_eh(ap);
7099
7100 spin_unlock_irqrestore(ap->lock, flags);
7101
7102 /* wait for EH to finish */
7103 ata_port_wait_eh(ap);
7104 } else {
7105 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7106 rc = ata_bus_probe(ap);
7107 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7108
7109 if (rc) {
7110 /* FIXME: do something useful here?
7111 * Current libata behavior will
7112 * tear down everything when
7113 * the module is removed
7114 * or the h/w is unplugged.
7115 */
7116 }
7117 }
7118 }
7119
7120 /* probes are done, now scan each port's disk(s) */
7121 DPRINTK("host probe begin\n");
7122 for (i = 0; i < host->n_ports; i++) {
7123 struct ata_port *ap = host->ports[i];
7124
1ae46317 7125 ata_scsi_scan_host(ap, 1);
ca77329f 7126 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
7127 }
7128
7129 return 0;
7130}
7131
f5cda257
TH
7132/**
7133 * ata_host_activate - start host, request IRQ and register it
7134 * @host: target ATA host
7135 * @irq: IRQ to request
7136 * @irq_handler: irq_handler used when requesting IRQ
7137 * @irq_flags: irq_flags used when requesting IRQ
7138 * @sht: scsi_host_template to use when registering the host
7139 *
7140 * After allocating an ATA host and initializing it, most libata
7141 * LLDs perform three steps to activate the host - start host,
7142 * request IRQ and register it. This helper takes necessasry
7143 * arguments and performs the three steps in one go.
7144 *
3d46b2e2
PM
7145 * An invalid IRQ skips the IRQ registration and expects the host to
7146 * have set polling mode on the port. In this case, @irq_handler
7147 * should be NULL.
7148 *
f5cda257
TH
7149 * LOCKING:
7150 * Inherited from calling layer (may sleep).
7151 *
7152 * RETURNS:
7153 * 0 on success, -errno otherwise.
7154 */
7155int ata_host_activate(struct ata_host *host, int irq,
7156 irq_handler_t irq_handler, unsigned long irq_flags,
7157 struct scsi_host_template *sht)
7158{
cbcdd875 7159 int i, rc;
f5cda257
TH
7160
7161 rc = ata_host_start(host);
7162 if (rc)
7163 return rc;
7164
3d46b2e2
PM
7165 /* Special case for polling mode */
7166 if (!irq) {
7167 WARN_ON(irq_handler);
7168 return ata_host_register(host, sht);
7169 }
7170
f5cda257
TH
7171 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7172 dev_driver_string(host->dev), host);
7173 if (rc)
7174 return rc;
7175
cbcdd875
TH
7176 for (i = 0; i < host->n_ports; i++)
7177 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 7178
f5cda257
TH
7179 rc = ata_host_register(host, sht);
7180 /* if failed, just free the IRQ and leave ports alone */
7181 if (rc)
7182 devm_free_irq(host->dev, irq, host);
7183
7184 return rc;
7185}
7186
720ba126
TH
7187/**
7188 * ata_port_detach - Detach ATA port in prepration of device removal
7189 * @ap: ATA port to be detached
7190 *
7191 * Detach all ATA devices and the associated SCSI devices of @ap;
7192 * then, remove the associated SCSI host. @ap is guaranteed to
7193 * be quiescent on return from this function.
7194 *
7195 * LOCKING:
7196 * Kernel thread context (may sleep).
7197 */
741b7763 7198static void ata_port_detach(struct ata_port *ap)
720ba126
TH
7199{
7200 unsigned long flags;
41bda9c9 7201 struct ata_link *link;
f58229f8 7202 struct ata_device *dev;
720ba126
TH
7203
7204 if (!ap->ops->error_handler)
c3cf30a9 7205 goto skip_eh;
720ba126
TH
7206
7207 /* tell EH we're leaving & flush EH */
ba6a1308 7208 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 7209 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 7210 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7211
7212 ata_port_wait_eh(ap);
7213
7f9ad9b8
TH
7214 /* EH is now guaranteed to see UNLOADING - EH context belongs
7215 * to us. Disable all existing devices.
720ba126 7216 */
41bda9c9
TH
7217 ata_port_for_each_link(link, ap) {
7218 ata_link_for_each_dev(dev, link)
7219 ata_dev_disable(dev);
7220 }
720ba126 7221
720ba126
TH
7222 /* Final freeze & EH. All in-flight commands are aborted. EH
7223 * will be skipped and retrials will be terminated with bad
7224 * target.
7225 */
ba6a1308 7226 spin_lock_irqsave(ap->lock, flags);
720ba126 7227 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 7228 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7229
7230 ata_port_wait_eh(ap);
45a66c1c 7231 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 7232
c3cf30a9 7233 skip_eh:
720ba126 7234 /* remove the associated SCSI host */
cca3974e 7235 scsi_remove_host(ap->scsi_host);
720ba126
TH
7236}
7237
0529c159
TH
7238/**
7239 * ata_host_detach - Detach all ports of an ATA host
7240 * @host: Host to detach
7241 *
7242 * Detach all ports of @host.
7243 *
7244 * LOCKING:
7245 * Kernel thread context (may sleep).
7246 */
7247void ata_host_detach(struct ata_host *host)
7248{
7249 int i;
7250
7251 for (i = 0; i < host->n_ports; i++)
7252 ata_port_detach(host->ports[i]);
562f0c2d
TH
7253
7254 /* the host is dead now, dissociate ACPI */
7255 ata_acpi_dissociate(host);
0529c159
TH
7256}
7257
1da177e4
LT
7258/**
7259 * ata_std_ports - initialize ioaddr with standard port offsets.
7260 * @ioaddr: IO address structure to be initialized
0baab86b
EF
7261 *
7262 * Utility function which initializes data_addr, error_addr,
7263 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7264 * device_addr, status_addr, and command_addr to standard offsets
7265 * relative to cmd_addr.
7266 *
7267 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 7268 */
0baab86b 7269
1da177e4
LT
7270void ata_std_ports(struct ata_ioports *ioaddr)
7271{
7272 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7273 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7274 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7275 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7276 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7277 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7278 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7279 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7280 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7281 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7282}
7283
0baab86b 7284
374b1873
JG
7285#ifdef CONFIG_PCI
7286
1da177e4
LT
7287/**
7288 * ata_pci_remove_one - PCI layer callback for device removal
7289 * @pdev: PCI device that was removed
7290 *
b878ca5d
TH
7291 * PCI layer indicates to libata via this hook that hot-unplug or
7292 * module unload event has occurred. Detach all ports. Resource
7293 * release is handled via devres.
1da177e4
LT
7294 *
7295 * LOCKING:
7296 * Inherited from PCI layer (may sleep).
7297 */
f0d36efd 7298void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7299{
2855568b 7300 struct device *dev = &pdev->dev;
cca3974e 7301 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7302
b878ca5d 7303 ata_host_detach(host);
1da177e4
LT
7304}
7305
7306/* move to PCI subsystem */
057ace5e 7307int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7308{
7309 unsigned long tmp = 0;
7310
7311 switch (bits->width) {
7312 case 1: {
7313 u8 tmp8 = 0;
7314 pci_read_config_byte(pdev, bits->reg, &tmp8);
7315 tmp = tmp8;
7316 break;
7317 }
7318 case 2: {
7319 u16 tmp16 = 0;
7320 pci_read_config_word(pdev, bits->reg, &tmp16);
7321 tmp = tmp16;
7322 break;
7323 }
7324 case 4: {
7325 u32 tmp32 = 0;
7326 pci_read_config_dword(pdev, bits->reg, &tmp32);
7327 tmp = tmp32;
7328 break;
7329 }
7330
7331 default:
7332 return -EINVAL;
7333 }
7334
7335 tmp &= bits->mask;
7336
7337 return (tmp == bits->val) ? 1 : 0;
7338}
9b847548 7339
6ffa01d8 7340#ifdef CONFIG_PM
3c5100c1 7341void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7342{
7343 pci_save_state(pdev);
4c90d971 7344 pci_disable_device(pdev);
500530f6 7345
4c90d971 7346 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 7347 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7348}
7349
553c4aa6 7350int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7351{
553c4aa6
TH
7352 int rc;
7353
9b847548
JA
7354 pci_set_power_state(pdev, PCI_D0);
7355 pci_restore_state(pdev);
553c4aa6 7356
b878ca5d 7357 rc = pcim_enable_device(pdev);
553c4aa6
TH
7358 if (rc) {
7359 dev_printk(KERN_ERR, &pdev->dev,
7360 "failed to enable device after resume (%d)\n", rc);
7361 return rc;
7362 }
7363
9b847548 7364 pci_set_master(pdev);
553c4aa6 7365 return 0;
500530f6
TH
7366}
7367
3c5100c1 7368int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7369{
cca3974e 7370 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7371 int rc = 0;
7372
cca3974e 7373 rc = ata_host_suspend(host, mesg);
500530f6
TH
7374 if (rc)
7375 return rc;
7376
3c5100c1 7377 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7378
7379 return 0;
7380}
7381
7382int ata_pci_device_resume(struct pci_dev *pdev)
7383{
cca3974e 7384 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7385 int rc;
500530f6 7386
553c4aa6
TH
7387 rc = ata_pci_device_do_resume(pdev);
7388 if (rc == 0)
7389 ata_host_resume(host);
7390 return rc;
9b847548 7391}
6ffa01d8
TH
7392#endif /* CONFIG_PM */
7393
1da177e4
LT
7394#endif /* CONFIG_PCI */
7395
7396
1da177e4
LT
7397static int __init ata_init(void)
7398{
a8601e5f 7399 ata_probe_timeout *= HZ;
1da177e4
LT
7400 ata_wq = create_workqueue("ata");
7401 if (!ata_wq)
7402 return -ENOMEM;
7403
453b07ac
TH
7404 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7405 if (!ata_aux_wq) {
7406 destroy_workqueue(ata_wq);
7407 return -ENOMEM;
7408 }
7409
1da177e4
LT
7410 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7411 return 0;
7412}
7413
7414static void __exit ata_exit(void)
7415{
7416 destroy_workqueue(ata_wq);
453b07ac 7417 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7418}
7419
a4625085 7420subsys_initcall(ata_init);
1da177e4
LT
7421module_exit(ata_exit);
7422
67846b30 7423static unsigned long ratelimit_time;
34af946a 7424static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7425
7426int ata_ratelimit(void)
7427{
7428 int rc;
7429 unsigned long flags;
7430
7431 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7432
7433 if (time_after(jiffies, ratelimit_time)) {
7434 rc = 1;
7435 ratelimit_time = jiffies + (HZ/5);
7436 } else
7437 rc = 0;
7438
7439 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7440
7441 return rc;
7442}
7443
c22daff4
TH
7444/**
7445 * ata_wait_register - wait until register value changes
7446 * @reg: IO-mapped register
7447 * @mask: Mask to apply to read register value
7448 * @val: Wait condition
7449 * @interval_msec: polling interval in milliseconds
7450 * @timeout_msec: timeout in milliseconds
7451 *
7452 * Waiting for some bits of register to change is a common
7453 * operation for ATA controllers. This function reads 32bit LE
7454 * IO-mapped register @reg and tests for the following condition.
7455 *
7456 * (*@reg & mask) != val
7457 *
7458 * If the condition is met, it returns; otherwise, the process is
7459 * repeated after @interval_msec until timeout.
7460 *
7461 * LOCKING:
7462 * Kernel thread context (may sleep)
7463 *
7464 * RETURNS:
7465 * The final register value.
7466 */
7467u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7468 unsigned long interval_msec,
7469 unsigned long timeout_msec)
7470{
7471 unsigned long timeout;
7472 u32 tmp;
7473
7474 tmp = ioread32(reg);
7475
7476 /* Calculate timeout _after_ the first read to make sure
7477 * preceding writes reach the controller before starting to
7478 * eat away the timeout.
7479 */
7480 timeout = jiffies + (timeout_msec * HZ) / 1000;
7481
7482 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7483 msleep(interval_msec);
7484 tmp = ioread32(reg);
7485 }
7486
7487 return tmp;
7488}
7489
dd5b06c4
TH
7490/*
7491 * Dummy port_ops
7492 */
7493static void ata_dummy_noret(struct ata_port *ap) { }
7494static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7495static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7496
7497static u8 ata_dummy_check_status(struct ata_port *ap)
7498{
7499 return ATA_DRDY;
7500}
7501
7502static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7503{
7504 return AC_ERR_SYSTEM;
7505}
7506
7507const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7508 .check_status = ata_dummy_check_status,
7509 .check_altstatus = ata_dummy_check_status,
7510 .dev_select = ata_noop_dev_select,
7511 .qc_prep = ata_noop_qc_prep,
7512 .qc_issue = ata_dummy_qc_issue,
7513 .freeze = ata_dummy_noret,
7514 .thaw = ata_dummy_noret,
7515 .error_handler = ata_dummy_noret,
7516 .post_internal_cmd = ata_dummy_qc_noret,
7517 .irq_clear = ata_dummy_noret,
7518 .port_start = ata_dummy_ret0,
7519 .port_stop = ata_dummy_noret,
7520};
7521
21b0ad4f
TH
7522const struct ata_port_info ata_dummy_port_info = {
7523 .port_ops = &ata_dummy_port_ops,
7524};
7525
1da177e4
LT
7526/*
7527 * libata is essentially a library of internal helper functions for
7528 * low-level ATA host controller drivers. As such, the API/ABI is
7529 * likely to change as new drivers are added and updated.
7530 * Do not depend on ABI/API stability.
7531 */
e9c83914
TH
7532EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7533EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7534EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7535EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7536EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7537EXPORT_SYMBOL_GPL(ata_std_bios_param);
7538EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7539EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7540EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7541EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7542EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7543EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7544EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7545EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7546EXPORT_SYMBOL_GPL(ata_sg_init);
7547EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7548EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7549EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7550EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7551EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7552EXPORT_SYMBOL_GPL(ata_tf_load);
7553EXPORT_SYMBOL_GPL(ata_tf_read);
7554EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7555EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7556EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7557EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7558EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
7559EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7560EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7561EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7562EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7563EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7564EXPORT_SYMBOL_GPL(ata_mode_string);
7565EXPORT_SYMBOL_GPL(ata_id_xfermask);
1da177e4
LT
7566EXPORT_SYMBOL_GPL(ata_check_status);
7567EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7568EXPORT_SYMBOL_GPL(ata_exec_command);
7569EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7570EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7571EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7572EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7573EXPORT_SYMBOL_GPL(ata_data_xfer);
7574EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7575EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7576EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7577EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7578EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7579EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7580EXPORT_SYMBOL_GPL(ata_bmdma_start);
7581EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7582EXPORT_SYMBOL_GPL(ata_bmdma_status);
7583EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7584EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7585EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7586EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7587EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7588EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7589EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7590EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7591EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7592EXPORT_SYMBOL_GPL(sata_link_debounce);
7593EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4 7594EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7595EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7596EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7597EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7598EXPORT_SYMBOL_GPL(sata_std_hardreset);
7599EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7600EXPORT_SYMBOL_GPL(ata_dev_classify);
7601EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7602EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7603EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7604EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7605EXPORT_SYMBOL_GPL(ata_busy_sleep);
88ff6eaf 7606EXPORT_SYMBOL_GPL(ata_wait_after_reset);
d4b2bab4 7607EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7608EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7609EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7610EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7611EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7612EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7613EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7614EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7615EXPORT_SYMBOL_GPL(sata_scr_valid);
7616EXPORT_SYMBOL_GPL(sata_scr_read);
7617EXPORT_SYMBOL_GPL(sata_scr_write);
7618EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7619EXPORT_SYMBOL_GPL(ata_link_online);
7620EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7621#ifdef CONFIG_PM
cca3974e
JG
7622EXPORT_SYMBOL_GPL(ata_host_suspend);
7623EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7624#endif /* CONFIG_PM */
6a62a04d
TH
7625EXPORT_SYMBOL_GPL(ata_id_string);
7626EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4
LT
7627EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7628
1bc4ccff 7629EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 7630EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
7631EXPORT_SYMBOL_GPL(ata_timing_compute);
7632EXPORT_SYMBOL_GPL(ata_timing_merge);
7633
1da177e4
LT
7634#ifdef CONFIG_PCI
7635EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7636EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7637EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7638EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7639EXPORT_SYMBOL_GPL(ata_pci_init_one);
7640EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7641#ifdef CONFIG_PM
500530f6
TH
7642EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7643EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7644EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7645EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7646#endif /* CONFIG_PM */
67951ade
AC
7647EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7648EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7649#endif /* CONFIG_PCI */
9b847548 7650
31f88384 7651EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7652EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7653EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7654EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7655EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7656
b64bbc39
TH
7657EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7658EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7659EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7660EXPORT_SYMBOL_GPL(ata_port_desc);
7661#ifdef CONFIG_PCI
7662EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7663#endif /* CONFIG_PCI */
7b70fc03 7664EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7665EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7666EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7667EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7668EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7669EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7670EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7671EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7672EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7673EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7674EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7675EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7676
7677EXPORT_SYMBOL_GPL(ata_cable_40wire);
7678EXPORT_SYMBOL_GPL(ata_cable_80wire);
7679EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 7680EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 7681EXPORT_SYMBOL_GPL(ata_cable_sata);