libata: handle ata_pci_device_do_resume() failure while resuming
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
cb48cab7 62#define DRV_VERSION "2.20" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
44877b4e 75static unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
d7d0dad6
JG
96int libata_noacpi = 1;
97module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
98MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
99
1da177e4
LT
100MODULE_AUTHOR("Jeff Garzik");
101MODULE_DESCRIPTION("Library module for ATA devices");
102MODULE_LICENSE("GPL");
103MODULE_VERSION(DRV_VERSION);
104
0baab86b 105
1da177e4
LT
106/**
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
111 *
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
114 *
115 * LOCKING:
116 * Inherited from caller.
117 */
118
057ace5e 119void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
120{
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
126
127 fis[4] = tf->lbal;
128 fis[5] = tf->lbam;
129 fis[6] = tf->lbah;
130 fis[7] = tf->device;
131
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
136
137 fis[12] = tf->nsect;
138 fis[13] = tf->hob_nsect;
139 fis[14] = 0;
140 fis[15] = tf->ctl;
141
142 fis[16] = 0;
143 fis[17] = 0;
144 fis[18] = 0;
145 fis[19] = 0;
146}
147
148/**
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
152 *
e12a1be6 153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
154 *
155 * LOCKING:
156 * Inherited from caller.
157 */
158
057ace5e 159void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
160{
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
163
164 tf->lbal = fis[4];
165 tf->lbam = fis[5];
166 tf->lbah = fis[6];
167 tf->device = fis[7];
168
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
172
173 tf->nsect = fis[12];
174 tf->hob_nsect = fis[13];
175}
176
8cbd6df1
AL
177static const u8 ata_rw_cmds[] = {
178 /* pio multi */
179 ATA_CMD_READ_MULTI,
180 ATA_CMD_WRITE_MULTI,
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
187 /* pio */
188 ATA_CMD_PIO_READ,
189 ATA_CMD_PIO_WRITE,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
192 0,
193 0,
194 0,
195 0,
8cbd6df1
AL
196 /* dma */
197 ATA_CMD_READ,
198 ATA_CMD_WRITE,
199 ATA_CMD_READ_EXT,
9a3dccc4
TH
200 ATA_CMD_WRITE_EXT,
201 0,
202 0,
203 0,
204 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 205};
1da177e4
LT
206
207/**
8cbd6df1 208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
1da177e4 211 *
2e9edbf8 212 * Examine the device configuration and tf->flags to calculate
8cbd6df1 213 * the proper read/write commands and protocol to use.
1da177e4
LT
214 *
215 * LOCKING:
216 * caller.
217 */
bd056d7e 218static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 219{
9a3dccc4 220 u8 cmd;
1da177e4 221
9a3dccc4 222 int index, fua, lba48, write;
2e9edbf8 223
9a3dccc4 224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 227
8cbd6df1
AL
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
9a3dccc4 230 index = dev->multi_count ? 0 : 8;
bd056d7e 231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
0565c26d 234 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
235 } else {
236 tf->protocol = ATA_PROT_DMA;
9a3dccc4 237 index = 16;
8cbd6df1 238 }
1da177e4 239
9a3dccc4
TH
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 if (cmd) {
242 tf->command = cmd;
243 return 0;
244 }
245 return -1;
1da177e4
LT
246}
247
35b649fe
TH
248/**
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
252 *
253 * LOCKING:
254 * None.
255 *
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
259 *
260 * RETURNS:
261 * Block address read from @tf.
262 */
263u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
264{
265 u64 block = 0;
266
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
272 } else
273 block |= (tf->device & 0xf) << 24;
274
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
277 block |= tf->lbal;
278 } else {
279 u32 cyl, head, sect;
280
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
283 sect = tf->lbal;
284
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
286 }
287
288 return block;
289}
290
bd056d7e
TH
291/**
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
298 * @tag: tag
299 *
300 * LOCKING:
301 * None.
302 *
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
305 *
306 * RETURNS:
307 *
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
310 */
311int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
313 unsigned int tag)
314{
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
317
6d1245bf 318 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
319 /* yay, NCQ */
320 if (!lba_48_ok(block, n_block))
321 return -ERANGE;
322
323 tf->protocol = ATA_PROT_NCQ;
324 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
325
326 if (tf->flags & ATA_TFLAG_WRITE)
327 tf->command = ATA_CMD_FPDMA_WRITE;
328 else
329 tf->command = ATA_CMD_FPDMA_READ;
330
331 tf->nsect = tag << 3;
332 tf->hob_feature = (n_block >> 8) & 0xff;
333 tf->feature = n_block & 0xff;
334
335 tf->hob_lbah = (block >> 40) & 0xff;
336 tf->hob_lbam = (block >> 32) & 0xff;
337 tf->hob_lbal = (block >> 24) & 0xff;
338 tf->lbah = (block >> 16) & 0xff;
339 tf->lbam = (block >> 8) & 0xff;
340 tf->lbal = block & 0xff;
341
342 tf->device = 1 << 6;
343 if (tf->flags & ATA_TFLAG_FUA)
344 tf->device |= 1 << 7;
345 } else if (dev->flags & ATA_DFLAG_LBA) {
346 tf->flags |= ATA_TFLAG_LBA;
347
348 if (lba_28_ok(block, n_block)) {
349 /* use LBA28 */
350 tf->device |= (block >> 24) & 0xf;
351 } else if (lba_48_ok(block, n_block)) {
352 if (!(dev->flags & ATA_DFLAG_LBA48))
353 return -ERANGE;
354
355 /* use LBA48 */
356 tf->flags |= ATA_TFLAG_LBA48;
357
358 tf->hob_nsect = (n_block >> 8) & 0xff;
359
360 tf->hob_lbah = (block >> 40) & 0xff;
361 tf->hob_lbam = (block >> 32) & 0xff;
362 tf->hob_lbal = (block >> 24) & 0xff;
363 } else
364 /* request too large even for LBA48 */
365 return -ERANGE;
366
367 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
368 return -EINVAL;
369
370 tf->nsect = n_block & 0xff;
371
372 tf->lbah = (block >> 16) & 0xff;
373 tf->lbam = (block >> 8) & 0xff;
374 tf->lbal = block & 0xff;
375
376 tf->device |= ATA_LBA;
377 } else {
378 /* CHS */
379 u32 sect, head, cyl, track;
380
381 /* The request -may- be too large for CHS addressing. */
382 if (!lba_28_ok(block, n_block))
383 return -ERANGE;
384
385 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
386 return -EINVAL;
387
388 /* Convert LBA to CHS */
389 track = (u32)block / dev->sectors;
390 cyl = track / dev->heads;
391 head = track % dev->heads;
392 sect = (u32)block % dev->sectors + 1;
393
394 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
395 (u32)block, track, cyl, head, sect);
396
397 /* Check whether the converted CHS can fit.
398 Cylinder: 0-65535
399 Head: 0-15
400 Sector: 1-255*/
401 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
402 return -ERANGE;
403
404 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
405 tf->lbal = sect;
406 tf->lbam = cyl;
407 tf->lbah = cyl >> 8;
408 tf->device |= head;
409 }
410
411 return 0;
412}
413
cb95d562
TH
414/**
415 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
416 * @pio_mask: pio_mask
417 * @mwdma_mask: mwdma_mask
418 * @udma_mask: udma_mask
419 *
420 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
421 * unsigned int xfer_mask.
422 *
423 * LOCKING:
424 * None.
425 *
426 * RETURNS:
427 * Packed xfer_mask.
428 */
429static unsigned int ata_pack_xfermask(unsigned int pio_mask,
430 unsigned int mwdma_mask,
431 unsigned int udma_mask)
432{
433 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
434 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
435 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
436}
437
c0489e4e
TH
438/**
439 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
440 * @xfer_mask: xfer_mask to unpack
441 * @pio_mask: resulting pio_mask
442 * @mwdma_mask: resulting mwdma_mask
443 * @udma_mask: resulting udma_mask
444 *
445 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
446 * Any NULL distination masks will be ignored.
447 */
448static void ata_unpack_xfermask(unsigned int xfer_mask,
449 unsigned int *pio_mask,
450 unsigned int *mwdma_mask,
451 unsigned int *udma_mask)
452{
453 if (pio_mask)
454 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
455 if (mwdma_mask)
456 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
457 if (udma_mask)
458 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
459}
460
cb95d562 461static const struct ata_xfer_ent {
be9a50c8 462 int shift, bits;
cb95d562
TH
463 u8 base;
464} ata_xfer_tbl[] = {
465 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
466 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
467 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
468 { -1, },
469};
470
471/**
472 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
473 * @xfer_mask: xfer_mask of interest
474 *
475 * Return matching XFER_* value for @xfer_mask. Only the highest
476 * bit of @xfer_mask is considered.
477 *
478 * LOCKING:
479 * None.
480 *
481 * RETURNS:
482 * Matching XFER_* value, 0 if no match found.
483 */
484static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
485{
486 int highbit = fls(xfer_mask) - 1;
487 const struct ata_xfer_ent *ent;
488
489 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
490 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
491 return ent->base + highbit - ent->shift;
492 return 0;
493}
494
495/**
496 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
497 * @xfer_mode: XFER_* of interest
498 *
499 * Return matching xfer_mask for @xfer_mode.
500 *
501 * LOCKING:
502 * None.
503 *
504 * RETURNS:
505 * Matching xfer_mask, 0 if no match found.
506 */
507static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
508{
509 const struct ata_xfer_ent *ent;
510
511 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
512 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
513 return 1 << (ent->shift + xfer_mode - ent->base);
514 return 0;
515}
516
517/**
518 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
519 * @xfer_mode: XFER_* of interest
520 *
521 * Return matching xfer_shift for @xfer_mode.
522 *
523 * LOCKING:
524 * None.
525 *
526 * RETURNS:
527 * Matching xfer_shift, -1 if no match found.
528 */
529static int ata_xfer_mode2shift(unsigned int xfer_mode)
530{
531 const struct ata_xfer_ent *ent;
532
533 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
534 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
535 return ent->shift;
536 return -1;
537}
538
1da177e4 539/**
1da7b0d0
TH
540 * ata_mode_string - convert xfer_mask to string
541 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
542 *
543 * Determine string which represents the highest speed
1da7b0d0 544 * (highest bit in @modemask).
1da177e4
LT
545 *
546 * LOCKING:
547 * None.
548 *
549 * RETURNS:
550 * Constant C string representing highest speed listed in
1da7b0d0 551 * @mode_mask, or the constant C string "<n/a>".
1da177e4 552 */
1da7b0d0 553static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 554{
75f554bc
TH
555 static const char * const xfer_mode_str[] = {
556 "PIO0",
557 "PIO1",
558 "PIO2",
559 "PIO3",
560 "PIO4",
b352e57d
AC
561 "PIO5",
562 "PIO6",
75f554bc
TH
563 "MWDMA0",
564 "MWDMA1",
565 "MWDMA2",
b352e57d
AC
566 "MWDMA3",
567 "MWDMA4",
75f554bc
TH
568 "UDMA/16",
569 "UDMA/25",
570 "UDMA/33",
571 "UDMA/44",
572 "UDMA/66",
573 "UDMA/100",
574 "UDMA/133",
575 "UDMA7",
576 };
1da7b0d0 577 int highbit;
1da177e4 578
1da7b0d0
TH
579 highbit = fls(xfer_mask) - 1;
580 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
581 return xfer_mode_str[highbit];
1da177e4 582 return "<n/a>";
1da177e4
LT
583}
584
4c360c81
TH
585static const char *sata_spd_string(unsigned int spd)
586{
587 static const char * const spd_str[] = {
588 "1.5 Gbps",
589 "3.0 Gbps",
590 };
591
592 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
593 return "<unknown>";
594 return spd_str[spd - 1];
595}
596
3373efd8 597void ata_dev_disable(struct ata_device *dev)
0b8efb0a 598{
0dd4b21f 599 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 600 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
601 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
602 ATA_DNXFER_QUIET);
0b8efb0a
TH
603 dev->class++;
604 }
605}
606
1da177e4 607/**
0d5ff566 608 * ata_devchk - PATA device presence detection
1da177e4
LT
609 * @ap: ATA channel to examine
610 * @device: Device to examine (starting at zero)
611 *
612 * This technique was originally described in
613 * Hale Landis's ATADRVR (www.ata-atapi.com), and
614 * later found its way into the ATA/ATAPI spec.
615 *
616 * Write a pattern to the ATA shadow registers,
617 * and if a device is present, it will respond by
618 * correctly storing and echoing back the
619 * ATA shadow register contents.
620 *
621 * LOCKING:
622 * caller.
623 */
624
0d5ff566 625static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
626{
627 struct ata_ioports *ioaddr = &ap->ioaddr;
628 u8 nsect, lbal;
629
630 ap->ops->dev_select(ap, device);
631
0d5ff566
TH
632 iowrite8(0x55, ioaddr->nsect_addr);
633 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 634
0d5ff566
TH
635 iowrite8(0xaa, ioaddr->nsect_addr);
636 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 637
0d5ff566
TH
638 iowrite8(0x55, ioaddr->nsect_addr);
639 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 640
0d5ff566
TH
641 nsect = ioread8(ioaddr->nsect_addr);
642 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
643
644 if ((nsect == 0x55) && (lbal == 0xaa))
645 return 1; /* we found a device */
646
647 return 0; /* nothing found */
648}
649
1da177e4
LT
650/**
651 * ata_dev_classify - determine device type based on ATA-spec signature
652 * @tf: ATA taskfile register set for device to be identified
653 *
654 * Determine from taskfile register contents whether a device is
655 * ATA or ATAPI, as per "Signature and persistence" section
656 * of ATA/PI spec (volume 1, sect 5.14).
657 *
658 * LOCKING:
659 * None.
660 *
661 * RETURNS:
662 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
663 * the event of failure.
664 */
665
057ace5e 666unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
667{
668 /* Apple's open source Darwin code hints that some devices only
669 * put a proper signature into the LBA mid/high registers,
670 * So, we only check those. It's sufficient for uniqueness.
671 */
672
673 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
674 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
675 DPRINTK("found ATA device by sig\n");
676 return ATA_DEV_ATA;
677 }
678
679 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
680 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
681 DPRINTK("found ATAPI device by sig\n");
682 return ATA_DEV_ATAPI;
683 }
684
685 DPRINTK("unknown device\n");
686 return ATA_DEV_UNKNOWN;
687}
688
689/**
690 * ata_dev_try_classify - Parse returned ATA device signature
691 * @ap: ATA channel to examine
692 * @device: Device to examine (starting at zero)
b4dc7623 693 * @r_err: Value of error register on completion
1da177e4
LT
694 *
695 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
696 * an ATA/ATAPI-defined set of values is placed in the ATA
697 * shadow registers, indicating the results of device detection
698 * and diagnostics.
699 *
700 * Select the ATA device, and read the values from the ATA shadow
701 * registers. Then parse according to the Error register value,
702 * and the spec-defined values examined by ata_dev_classify().
703 *
704 * LOCKING:
705 * caller.
b4dc7623
TH
706 *
707 * RETURNS:
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
709 */
710
a619f981 711unsigned int
b4dc7623 712ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 713{
1da177e4
LT
714 struct ata_taskfile tf;
715 unsigned int class;
716 u8 err;
717
718 ap->ops->dev_select(ap, device);
719
720 memset(&tf, 0, sizeof(tf));
721
1da177e4 722 ap->ops->tf_read(ap, &tf);
0169e284 723 err = tf.feature;
b4dc7623
TH
724 if (r_err)
725 *r_err = err;
1da177e4 726
93590859
AC
727 /* see if device passed diags: if master then continue and warn later */
728 if (err == 0 && device == 0)
729 /* diagnostic fail : do nothing _YET_ */
730 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
731 else if (err == 1)
1da177e4
LT
732 /* do nothing */ ;
733 else if ((device == 0) && (err == 0x81))
734 /* do nothing */ ;
735 else
b4dc7623 736 return ATA_DEV_NONE;
1da177e4 737
b4dc7623 738 /* determine if device is ATA or ATAPI */
1da177e4 739 class = ata_dev_classify(&tf);
b4dc7623 740
1da177e4 741 if (class == ATA_DEV_UNKNOWN)
b4dc7623 742 return ATA_DEV_NONE;
1da177e4 743 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
744 return ATA_DEV_NONE;
745 return class;
1da177e4
LT
746}
747
748/**
6a62a04d 749 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
750 * @id: IDENTIFY DEVICE results we will examine
751 * @s: string into which data is output
752 * @ofs: offset into identify device page
753 * @len: length of string to return. must be an even number.
754 *
755 * The strings in the IDENTIFY DEVICE page are broken up into
756 * 16-bit chunks. Run through the string, and output each
757 * 8-bit chunk linearly, regardless of platform.
758 *
759 * LOCKING:
760 * caller.
761 */
762
6a62a04d
TH
763void ata_id_string(const u16 *id, unsigned char *s,
764 unsigned int ofs, unsigned int len)
1da177e4
LT
765{
766 unsigned int c;
767
768 while (len > 0) {
769 c = id[ofs] >> 8;
770 *s = c;
771 s++;
772
773 c = id[ofs] & 0xff;
774 *s = c;
775 s++;
776
777 ofs++;
778 len -= 2;
779 }
780}
781
0e949ff3 782/**
6a62a04d 783 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
784 * @id: IDENTIFY DEVICE results we will examine
785 * @s: string into which data is output
786 * @ofs: offset into identify device page
787 * @len: length of string to return. must be an odd number.
788 *
6a62a04d 789 * This function is identical to ata_id_string except that it
0e949ff3
TH
790 * trims trailing spaces and terminates the resulting string with
791 * null. @len must be actual maximum length (even number) + 1.
792 *
793 * LOCKING:
794 * caller.
795 */
6a62a04d
TH
796void ata_id_c_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len)
0e949ff3
TH
798{
799 unsigned char *p;
800
801 WARN_ON(!(len & 1));
802
6a62a04d 803 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
804
805 p = s + strnlen(s, len - 1);
806 while (p > s && p[-1] == ' ')
807 p--;
808 *p = '\0';
809}
0baab86b 810
2940740b
TH
811static u64 ata_id_n_sectors(const u16 *id)
812{
813 if (ata_id_has_lba(id)) {
814 if (ata_id_has_lba48(id))
815 return ata_id_u64(id, 100);
816 else
817 return ata_id_u32(id, 60);
818 } else {
819 if (ata_id_current_chs_valid(id))
820 return ata_id_u32(id, 57);
821 else
822 return id[1] * id[3] * id[6];
823 }
824}
825
10305f0f
A
826/**
827 * ata_id_to_dma_mode - Identify DMA mode from id block
828 * @dev: device to identify
cc261267 829 * @unknown: mode to assume if we cannot tell
10305f0f
A
830 *
831 * Set up the timing values for the device based upon the identify
832 * reported values for the DMA mode. This function is used by drivers
833 * which rely upon firmware configured modes, but wish to report the
834 * mode correctly when possible.
835 *
836 * In addition we emit similarly formatted messages to the default
837 * ata_dev_set_mode handler, in order to provide consistency of
838 * presentation.
839 */
840
841void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
842{
843 unsigned int mask;
844 u8 mode;
845
846 /* Pack the DMA modes */
847 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
848 if (dev->id[53] & 0x04)
849 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
850
851 /* Select the mode in use */
852 mode = ata_xfer_mask2mode(mask);
853
854 if (mode != 0) {
855 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
856 ata_mode_string(mask));
857 } else {
858 /* SWDMA perhaps ? */
859 mode = unknown;
860 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
861 }
862
863 /* Configure the device reporting */
864 dev->xfer_mode = mode;
865 dev->xfer_shift = ata_xfer_mode2shift(mode);
866}
867
0baab86b
EF
868/**
869 * ata_noop_dev_select - Select device 0/1 on ATA bus
870 * @ap: ATA channel to manipulate
871 * @device: ATA device (numbered from zero) to select
872 *
873 * This function performs no actual function.
874 *
875 * May be used as the dev_select() entry in ata_port_operations.
876 *
877 * LOCKING:
878 * caller.
879 */
1da177e4
LT
880void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
881{
882}
883
0baab86b 884
1da177e4
LT
885/**
886 * ata_std_dev_select - Select device 0/1 on ATA bus
887 * @ap: ATA channel to manipulate
888 * @device: ATA device (numbered from zero) to select
889 *
890 * Use the method defined in the ATA specification to
891 * make either device 0, or device 1, active on the
0baab86b
EF
892 * ATA channel. Works with both PIO and MMIO.
893 *
894 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
895 *
896 * LOCKING:
897 * caller.
898 */
899
900void ata_std_dev_select (struct ata_port *ap, unsigned int device)
901{
902 u8 tmp;
903
904 if (device == 0)
905 tmp = ATA_DEVICE_OBS;
906 else
907 tmp = ATA_DEVICE_OBS | ATA_DEV1;
908
0d5ff566 909 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
910 ata_pause(ap); /* needed; also flushes, for mmio */
911}
912
913/**
914 * ata_dev_select - Select device 0/1 on ATA bus
915 * @ap: ATA channel to manipulate
916 * @device: ATA device (numbered from zero) to select
917 * @wait: non-zero to wait for Status register BSY bit to clear
918 * @can_sleep: non-zero if context allows sleeping
919 *
920 * Use the method defined in the ATA specification to
921 * make either device 0, or device 1, active on the
922 * ATA channel.
923 *
924 * This is a high-level version of ata_std_dev_select(),
925 * which additionally provides the services of inserting
926 * the proper pauses and status polling, where needed.
927 *
928 * LOCKING:
929 * caller.
930 */
931
932void ata_dev_select(struct ata_port *ap, unsigned int device,
933 unsigned int wait, unsigned int can_sleep)
934{
88574551 935 if (ata_msg_probe(ap))
44877b4e
TH
936 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
937 "device %u, wait %u\n", device, wait);
1da177e4
LT
938
939 if (wait)
940 ata_wait_idle(ap);
941
942 ap->ops->dev_select(ap, device);
943
944 if (wait) {
945 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
946 msleep(150);
947 ata_wait_idle(ap);
948 }
949}
950
951/**
952 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 953 * @id: IDENTIFY DEVICE page to dump
1da177e4 954 *
0bd3300a
TH
955 * Dump selected 16-bit words from the given IDENTIFY DEVICE
956 * page.
1da177e4
LT
957 *
958 * LOCKING:
959 * caller.
960 */
961
0bd3300a 962static inline void ata_dump_id(const u16 *id)
1da177e4
LT
963{
964 DPRINTK("49==0x%04x "
965 "53==0x%04x "
966 "63==0x%04x "
967 "64==0x%04x "
968 "75==0x%04x \n",
0bd3300a
TH
969 id[49],
970 id[53],
971 id[63],
972 id[64],
973 id[75]);
1da177e4
LT
974 DPRINTK("80==0x%04x "
975 "81==0x%04x "
976 "82==0x%04x "
977 "83==0x%04x "
978 "84==0x%04x \n",
0bd3300a
TH
979 id[80],
980 id[81],
981 id[82],
982 id[83],
983 id[84]);
1da177e4
LT
984 DPRINTK("88==0x%04x "
985 "93==0x%04x\n",
0bd3300a
TH
986 id[88],
987 id[93]);
1da177e4
LT
988}
989
cb95d562
TH
990/**
991 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
992 * @id: IDENTIFY data to compute xfer mask from
993 *
994 * Compute the xfermask for this device. This is not as trivial
995 * as it seems if we must consider early devices correctly.
996 *
997 * FIXME: pre IDE drive timing (do we care ?).
998 *
999 * LOCKING:
1000 * None.
1001 *
1002 * RETURNS:
1003 * Computed xfermask
1004 */
1005static unsigned int ata_id_xfermask(const u16 *id)
1006{
1007 unsigned int pio_mask, mwdma_mask, udma_mask;
1008
1009 /* Usual case. Word 53 indicates word 64 is valid */
1010 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1011 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1012 pio_mask <<= 3;
1013 pio_mask |= 0x7;
1014 } else {
1015 /* If word 64 isn't valid then Word 51 high byte holds
1016 * the PIO timing number for the maximum. Turn it into
1017 * a mask.
1018 */
7a0f1c8a 1019 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1020 if (mode < 5) /* Valid PIO range */
1021 pio_mask = (2 << mode) - 1;
1022 else
1023 pio_mask = 1;
cb95d562
TH
1024
1025 /* But wait.. there's more. Design your standards by
1026 * committee and you too can get a free iordy field to
1027 * process. However its the speeds not the modes that
1028 * are supported... Note drivers using the timing API
1029 * will get this right anyway
1030 */
1031 }
1032
1033 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1034
b352e57d
AC
1035 if (ata_id_is_cfa(id)) {
1036 /*
1037 * Process compact flash extended modes
1038 */
1039 int pio = id[163] & 0x7;
1040 int dma = (id[163] >> 3) & 7;
1041
1042 if (pio)
1043 pio_mask |= (1 << 5);
1044 if (pio > 1)
1045 pio_mask |= (1 << 6);
1046 if (dma)
1047 mwdma_mask |= (1 << 3);
1048 if (dma > 1)
1049 mwdma_mask |= (1 << 4);
1050 }
1051
fb21f0d0
TH
1052 udma_mask = 0;
1053 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1054 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1055
1056 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1057}
1058
86e45b6b
TH
1059/**
1060 * ata_port_queue_task - Queue port_task
1061 * @ap: The ata_port to queue port_task for
e2a7f77a 1062 * @fn: workqueue function to be scheduled
65f27f38 1063 * @data: data for @fn to use
e2a7f77a 1064 * @delay: delay time for workqueue function
86e45b6b
TH
1065 *
1066 * Schedule @fn(@data) for execution after @delay jiffies using
1067 * port_task. There is one port_task per port and it's the
1068 * user(low level driver)'s responsibility to make sure that only
1069 * one task is active at any given time.
1070 *
1071 * libata core layer takes care of synchronization between
1072 * port_task and EH. ata_port_queue_task() may be ignored for EH
1073 * synchronization.
1074 *
1075 * LOCKING:
1076 * Inherited from caller.
1077 */
65f27f38 1078void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1079 unsigned long delay)
1080{
1081 int rc;
1082
b51e9e5d 1083 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1084 return;
1085
65f27f38
DH
1086 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1087 ap->port_task_data = data;
86e45b6b 1088
52bad64d 1089 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1090
1091 /* rc == 0 means that another user is using port task */
1092 WARN_ON(rc == 0);
1093}
1094
1095/**
1096 * ata_port_flush_task - Flush port_task
1097 * @ap: The ata_port to flush port_task for
1098 *
1099 * After this function completes, port_task is guranteed not to
1100 * be running or scheduled.
1101 *
1102 * LOCKING:
1103 * Kernel thread context (may sleep)
1104 */
1105void ata_port_flush_task(struct ata_port *ap)
1106{
1107 unsigned long flags;
1108
1109 DPRINTK("ENTER\n");
1110
ba6a1308 1111 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1112 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1113 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1114
1115 DPRINTK("flush #1\n");
1116 flush_workqueue(ata_wq);
1117
1118 /*
1119 * At this point, if a task is running, it's guaranteed to see
1120 * the FLUSH flag; thus, it will never queue pio tasks again.
1121 * Cancel and flush.
1122 */
1123 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1124 if (ata_msg_ctl(ap))
88574551
TH
1125 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1126 __FUNCTION__);
86e45b6b
TH
1127 flush_workqueue(ata_wq);
1128 }
1129
ba6a1308 1130 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1131 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1132 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1133
0dd4b21f
BP
1134 if (ata_msg_ctl(ap))
1135 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1136}
1137
7102d230 1138static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1139{
77853bf2 1140 struct completion *waiting = qc->private_data;
a2a7a662 1141
a2a7a662 1142 complete(waiting);
a2a7a662
TH
1143}
1144
1145/**
2432697b 1146 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1147 * @dev: Device to which the command is sent
1148 * @tf: Taskfile registers for the command and the result
d69cf37d 1149 * @cdb: CDB for packet command
a2a7a662 1150 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1151 * @sg: sg list for the data buffer of the command
1152 * @n_elem: Number of sg entries
a2a7a662
TH
1153 *
1154 * Executes libata internal command with timeout. @tf contains
1155 * command on entry and result on return. Timeout and error
1156 * conditions are reported via return value. No recovery action
1157 * is taken after a command times out. It's caller's duty to
1158 * clean up after timeout.
1159 *
1160 * LOCKING:
1161 * None. Should be called with kernel context, might sleep.
551e8889
TH
1162 *
1163 * RETURNS:
1164 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1165 */
2432697b
TH
1166unsigned ata_exec_internal_sg(struct ata_device *dev,
1167 struct ata_taskfile *tf, const u8 *cdb,
1168 int dma_dir, struct scatterlist *sg,
1169 unsigned int n_elem)
a2a7a662 1170{
3373efd8 1171 struct ata_port *ap = dev->ap;
a2a7a662
TH
1172 u8 command = tf->command;
1173 struct ata_queued_cmd *qc;
2ab7db1f 1174 unsigned int tag, preempted_tag;
dedaf2b0 1175 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1176 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1177 unsigned long flags;
77853bf2 1178 unsigned int err_mask;
d95a717f 1179 int rc;
a2a7a662 1180
ba6a1308 1181 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1182
e3180499 1183 /* no internal command while frozen */
b51e9e5d 1184 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1185 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1186 return AC_ERR_SYSTEM;
1187 }
1188
2ab7db1f 1189 /* initialize internal qc */
a2a7a662 1190
2ab7db1f
TH
1191 /* XXX: Tag 0 is used for drivers with legacy EH as some
1192 * drivers choke if any other tag is given. This breaks
1193 * ata_tag_internal() test for those drivers. Don't use new
1194 * EH stuff without converting to it.
1195 */
1196 if (ap->ops->error_handler)
1197 tag = ATA_TAG_INTERNAL;
1198 else
1199 tag = 0;
1200
6cec4a39 1201 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1202 BUG();
f69499f4 1203 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1204
1205 qc->tag = tag;
1206 qc->scsicmd = NULL;
1207 qc->ap = ap;
1208 qc->dev = dev;
1209 ata_qc_reinit(qc);
1210
1211 preempted_tag = ap->active_tag;
dedaf2b0
TH
1212 preempted_sactive = ap->sactive;
1213 preempted_qc_active = ap->qc_active;
2ab7db1f 1214 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1215 ap->sactive = 0;
1216 ap->qc_active = 0;
2ab7db1f
TH
1217
1218 /* prepare & issue qc */
a2a7a662 1219 qc->tf = *tf;
d69cf37d
TH
1220 if (cdb)
1221 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1222 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1223 qc->dma_dir = dma_dir;
1224 if (dma_dir != DMA_NONE) {
2432697b
TH
1225 unsigned int i, buflen = 0;
1226
1227 for (i = 0; i < n_elem; i++)
1228 buflen += sg[i].length;
1229
1230 ata_sg_init(qc, sg, n_elem);
49c80429 1231 qc->nbytes = buflen;
a2a7a662
TH
1232 }
1233
77853bf2 1234 qc->private_data = &wait;
a2a7a662
TH
1235 qc->complete_fn = ata_qc_complete_internal;
1236
8e0e694a 1237 ata_qc_issue(qc);
a2a7a662 1238
ba6a1308 1239 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1240
a8601e5f 1241 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1242
1243 ata_port_flush_task(ap);
41ade50c 1244
d95a717f 1245 if (!rc) {
ba6a1308 1246 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1247
1248 /* We're racing with irq here. If we lose, the
1249 * following test prevents us from completing the qc
d95a717f
TH
1250 * twice. If we win, the port is frozen and will be
1251 * cleaned up by ->post_internal_cmd().
a2a7a662 1252 */
77853bf2 1253 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1254 qc->err_mask |= AC_ERR_TIMEOUT;
1255
1256 if (ap->ops->error_handler)
1257 ata_port_freeze(ap);
1258 else
1259 ata_qc_complete(qc);
f15a1daf 1260
0dd4b21f
BP
1261 if (ata_msg_warn(ap))
1262 ata_dev_printk(dev, KERN_WARNING,
88574551 1263 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1264 }
1265
ba6a1308 1266 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1267 }
1268
d95a717f
TH
1269 /* do post_internal_cmd */
1270 if (ap->ops->post_internal_cmd)
1271 ap->ops->post_internal_cmd(qc);
1272
18d90deb 1273 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1274 if (ata_msg_warn(ap))
88574551 1275 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1276 "zero err_mask for failed "
88574551 1277 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1278 qc->err_mask |= AC_ERR_OTHER;
1279 }
1280
15869303 1281 /* finish up */
ba6a1308 1282 spin_lock_irqsave(ap->lock, flags);
15869303 1283
e61e0672 1284 *tf = qc->result_tf;
77853bf2
TH
1285 err_mask = qc->err_mask;
1286
1287 ata_qc_free(qc);
2ab7db1f 1288 ap->active_tag = preempted_tag;
dedaf2b0
TH
1289 ap->sactive = preempted_sactive;
1290 ap->qc_active = preempted_qc_active;
77853bf2 1291
1f7dd3e9
TH
1292 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1293 * Until those drivers are fixed, we detect the condition
1294 * here, fail the command with AC_ERR_SYSTEM and reenable the
1295 * port.
1296 *
1297 * Note that this doesn't change any behavior as internal
1298 * command failure results in disabling the device in the
1299 * higher layer for LLDDs without new reset/EH callbacks.
1300 *
1301 * Kill the following code as soon as those drivers are fixed.
1302 */
198e0fed 1303 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1304 err_mask |= AC_ERR_SYSTEM;
1305 ata_port_probe(ap);
1306 }
1307
ba6a1308 1308 spin_unlock_irqrestore(ap->lock, flags);
15869303 1309
77853bf2 1310 return err_mask;
a2a7a662
TH
1311}
1312
2432697b 1313/**
33480a0e 1314 * ata_exec_internal - execute libata internal command
2432697b
TH
1315 * @dev: Device to which the command is sent
1316 * @tf: Taskfile registers for the command and the result
1317 * @cdb: CDB for packet command
1318 * @dma_dir: Data tranfer direction of the command
1319 * @buf: Data buffer of the command
1320 * @buflen: Length of data buffer
1321 *
1322 * Wrapper around ata_exec_internal_sg() which takes simple
1323 * buffer instead of sg list.
1324 *
1325 * LOCKING:
1326 * None. Should be called with kernel context, might sleep.
1327 *
1328 * RETURNS:
1329 * Zero on success, AC_ERR_* mask on failure
1330 */
1331unsigned ata_exec_internal(struct ata_device *dev,
1332 struct ata_taskfile *tf, const u8 *cdb,
1333 int dma_dir, void *buf, unsigned int buflen)
1334{
33480a0e
TH
1335 struct scatterlist *psg = NULL, sg;
1336 unsigned int n_elem = 0;
2432697b 1337
33480a0e
TH
1338 if (dma_dir != DMA_NONE) {
1339 WARN_ON(!buf);
1340 sg_init_one(&sg, buf, buflen);
1341 psg = &sg;
1342 n_elem++;
1343 }
2432697b 1344
33480a0e 1345 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1346}
1347
977e6b9f
TH
1348/**
1349 * ata_do_simple_cmd - execute simple internal command
1350 * @dev: Device to which the command is sent
1351 * @cmd: Opcode to execute
1352 *
1353 * Execute a 'simple' command, that only consists of the opcode
1354 * 'cmd' itself, without filling any other registers
1355 *
1356 * LOCKING:
1357 * Kernel thread context (may sleep).
1358 *
1359 * RETURNS:
1360 * Zero on success, AC_ERR_* mask on failure
e58eb583 1361 */
77b08fb5 1362unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1363{
1364 struct ata_taskfile tf;
e58eb583
TH
1365
1366 ata_tf_init(dev, &tf);
1367
1368 tf.command = cmd;
1369 tf.flags |= ATA_TFLAG_DEVICE;
1370 tf.protocol = ATA_PROT_NODATA;
1371
977e6b9f 1372 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1373}
1374
1bc4ccff
AC
1375/**
1376 * ata_pio_need_iordy - check if iordy needed
1377 * @adev: ATA device
1378 *
1379 * Check if the current speed of the device requires IORDY. Used
1380 * by various controllers for chip configuration.
1381 */
1382
1383unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1384{
1385 int pio;
1386 int speed = adev->pio_mode - XFER_PIO_0;
1387
1388 if (speed < 2)
1389 return 0;
1390 if (speed > 2)
1391 return 1;
2e9edbf8 1392
1bc4ccff
AC
1393 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1394
1395 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1396 pio = adev->id[ATA_ID_EIDE_PIO];
1397 /* Is the speed faster than the drive allows non IORDY ? */
1398 if (pio) {
1399 /* This is cycle times not frequency - watch the logic! */
1400 if (pio > 240) /* PIO2 is 240nS per cycle */
1401 return 1;
1402 return 0;
1403 }
1404 }
1405 return 0;
1406}
1407
1da177e4 1408/**
49016aca 1409 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1410 * @dev: target device
1411 * @p_class: pointer to class of the target device (may be changed)
bff04647 1412 * @flags: ATA_READID_* flags
fe635c7e 1413 * @id: buffer to read IDENTIFY data into
1da177e4 1414 *
49016aca
TH
1415 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1416 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1417 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1418 * for pre-ATA4 drives.
1da177e4
LT
1419 *
1420 * LOCKING:
49016aca
TH
1421 * Kernel thread context (may sleep)
1422 *
1423 * RETURNS:
1424 * 0 on success, -errno otherwise.
1da177e4 1425 */
a9beec95 1426int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1427 unsigned int flags, u16 *id)
1da177e4 1428{
3373efd8 1429 struct ata_port *ap = dev->ap;
49016aca 1430 unsigned int class = *p_class;
a0123703 1431 struct ata_taskfile tf;
49016aca
TH
1432 unsigned int err_mask = 0;
1433 const char *reason;
1434 int rc;
1da177e4 1435
0dd4b21f 1436 if (ata_msg_ctl(ap))
44877b4e 1437 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1438
49016aca 1439 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1440
49016aca 1441 retry:
3373efd8 1442 ata_tf_init(dev, &tf);
a0123703 1443
49016aca
TH
1444 switch (class) {
1445 case ATA_DEV_ATA:
a0123703 1446 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1447 break;
1448 case ATA_DEV_ATAPI:
a0123703 1449 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1450 break;
1451 default:
1452 rc = -ENODEV;
1453 reason = "unsupported class";
1454 goto err_out;
1da177e4
LT
1455 }
1456
a0123703 1457 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1458
1459 /* Some devices choke if TF registers contain garbage. Make
1460 * sure those are properly initialized.
1461 */
1462 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1463
1464 /* Device presence detection is unreliable on some
1465 * controllers. Always poll IDENTIFY if available.
1466 */
1467 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1468
3373efd8 1469 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1470 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1471 if (err_mask) {
800b3996 1472 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1473 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1474 ap->print_id, dev->devno);
55a8e2c8
TH
1475 return -ENOENT;
1476 }
1477
49016aca
TH
1478 rc = -EIO;
1479 reason = "I/O error";
1da177e4
LT
1480 goto err_out;
1481 }
1482
49016aca 1483 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1484
49016aca 1485 /* sanity check */
a4f5749b
TH
1486 rc = -EINVAL;
1487 reason = "device reports illegal type";
1488
1489 if (class == ATA_DEV_ATA) {
1490 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1491 goto err_out;
1492 } else {
1493 if (ata_id_is_ata(id))
1494 goto err_out;
49016aca
TH
1495 }
1496
bff04647 1497 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1498 /*
1499 * The exact sequence expected by certain pre-ATA4 drives is:
1500 * SRST RESET
1501 * IDENTIFY
1502 * INITIALIZE DEVICE PARAMETERS
1503 * anything else..
1504 * Some drives were very specific about that exact sequence.
1505 */
1506 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1507 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1508 if (err_mask) {
1509 rc = -EIO;
1510 reason = "INIT_DEV_PARAMS failed";
1511 goto err_out;
1512 }
1513
1514 /* current CHS translation info (id[53-58]) might be
1515 * changed. reread the identify device info.
1516 */
bff04647 1517 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1518 goto retry;
1519 }
1520 }
1521
1522 *p_class = class;
fe635c7e 1523
49016aca
TH
1524 return 0;
1525
1526 err_out:
88574551 1527 if (ata_msg_warn(ap))
0dd4b21f 1528 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1529 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1530 return rc;
1531}
1532
3373efd8 1533static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1534{
3373efd8 1535 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1536}
1537
a6e6ce8e
TH
1538static void ata_dev_config_ncq(struct ata_device *dev,
1539 char *desc, size_t desc_sz)
1540{
1541 struct ata_port *ap = dev->ap;
1542 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1543
1544 if (!ata_id_has_ncq(dev->id)) {
1545 desc[0] = '\0';
1546 return;
1547 }
6919a0a6
AC
1548 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1549 snprintf(desc, desc_sz, "NCQ (not used)");
1550 return;
1551 }
a6e6ce8e 1552 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1553 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1554 dev->flags |= ATA_DFLAG_NCQ;
1555 }
1556
1557 if (hdepth >= ddepth)
1558 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1559 else
1560 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1561}
1562
49016aca 1563/**
ffeae418 1564 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1565 * @dev: Target device to configure
1566 *
1567 * Configure @dev according to @dev->id. Generic and low-level
1568 * driver specific fixups are also applied.
49016aca
TH
1569 *
1570 * LOCKING:
ffeae418
TH
1571 * Kernel thread context (may sleep)
1572 *
1573 * RETURNS:
1574 * 0 on success, -errno otherwise
49016aca 1575 */
efdaedc4 1576int ata_dev_configure(struct ata_device *dev)
49016aca 1577{
3373efd8 1578 struct ata_port *ap = dev->ap;
efdaedc4 1579 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1580 const u16 *id = dev->id;
ff8854b2 1581 unsigned int xfer_mask;
b352e57d 1582 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1583 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1584 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1585 int rc;
49016aca 1586
0dd4b21f 1587 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1588 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1589 __FUNCTION__);
ffeae418 1590 return 0;
49016aca
TH
1591 }
1592
0dd4b21f 1593 if (ata_msg_probe(ap))
44877b4e 1594 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1595
08573a86
KCA
1596 /* set _SDD */
1597 rc = ata_acpi_push_id(ap, dev->devno);
1598 if (rc) {
1599 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1600 rc);
1601 }
1602
1603 /* retrieve and execute the ATA task file of _GTF */
1604 ata_acpi_exec_tfs(ap);
1605
c39f5ebe 1606 /* print device capabilities */
0dd4b21f 1607 if (ata_msg_probe(ap))
88574551
TH
1608 ata_dev_printk(dev, KERN_DEBUG,
1609 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1610 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1611 __FUNCTION__,
f15a1daf
TH
1612 id[49], id[82], id[83], id[84],
1613 id[85], id[86], id[87], id[88]);
c39f5ebe 1614
208a9933 1615 /* initialize to-be-configured parameters */
ea1dd4e1 1616 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1617 dev->max_sectors = 0;
1618 dev->cdb_len = 0;
1619 dev->n_sectors = 0;
1620 dev->cylinders = 0;
1621 dev->heads = 0;
1622 dev->sectors = 0;
1623
1da177e4
LT
1624 /*
1625 * common ATA, ATAPI feature tests
1626 */
1627
ff8854b2 1628 /* find max transfer mode; for printk only */
1148c3a7 1629 xfer_mask = ata_id_xfermask(id);
1da177e4 1630
0dd4b21f
BP
1631 if (ata_msg_probe(ap))
1632 ata_dump_id(id);
1da177e4
LT
1633
1634 /* ATA-specific feature tests */
1635 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1636 if (ata_id_is_cfa(id)) {
1637 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1638 ata_dev_printk(dev, KERN_WARNING,
1639 "supports DRM functions and may "
1640 "not be fully accessable.\n");
b352e57d
AC
1641 snprintf(revbuf, 7, "CFA");
1642 }
1643 else
1644 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1645
1148c3a7 1646 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1647
3f64f565 1648 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1649 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1650 sizeof(fwrevbuf));
1651
591a6e8e 1652 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1653 sizeof(modelbuf));
1654
1655 if (dev->id[59] & 0x100)
1656 dev->multi_count = dev->id[59] & 0xff;
1657
1148c3a7 1658 if (ata_id_has_lba(id)) {
4c2d721a 1659 const char *lba_desc;
a6e6ce8e 1660 char ncq_desc[20];
8bf62ece 1661
4c2d721a
TH
1662 lba_desc = "LBA";
1663 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1664 if (ata_id_has_lba48(id)) {
8bf62ece 1665 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1666 lba_desc = "LBA48";
6fc49adb
TH
1667
1668 if (dev->n_sectors >= (1UL << 28) &&
1669 ata_id_has_flush_ext(id))
1670 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1671 }
8bf62ece 1672
a6e6ce8e
TH
1673 /* config NCQ */
1674 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1675
8bf62ece 1676 /* print device info to dmesg */
3f64f565
EM
1677 if (ata_msg_drv(ap) && print_info) {
1678 ata_dev_printk(dev, KERN_INFO,
1679 "%s: %s, %s, max %s\n",
1680 revbuf, modelbuf, fwrevbuf,
1681 ata_mode_string(xfer_mask));
1682 ata_dev_printk(dev, KERN_INFO,
1683 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1684 (unsigned long long)dev->n_sectors,
3f64f565
EM
1685 dev->multi_count, lba_desc, ncq_desc);
1686 }
ffeae418 1687 } else {
8bf62ece
AL
1688 /* CHS */
1689
1690 /* Default translation */
1148c3a7
TH
1691 dev->cylinders = id[1];
1692 dev->heads = id[3];
1693 dev->sectors = id[6];
8bf62ece 1694
1148c3a7 1695 if (ata_id_current_chs_valid(id)) {
8bf62ece 1696 /* Current CHS translation is valid. */
1148c3a7
TH
1697 dev->cylinders = id[54];
1698 dev->heads = id[55];
1699 dev->sectors = id[56];
8bf62ece
AL
1700 }
1701
1702 /* print device info to dmesg */
3f64f565 1703 if (ata_msg_drv(ap) && print_info) {
88574551 1704 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1705 "%s: %s, %s, max %s\n",
1706 revbuf, modelbuf, fwrevbuf,
1707 ata_mode_string(xfer_mask));
a84471fe 1708 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1709 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1710 (unsigned long long)dev->n_sectors,
1711 dev->multi_count, dev->cylinders,
1712 dev->heads, dev->sectors);
1713 }
07f6f7d0
AL
1714 }
1715
6e7846e9 1716 dev->cdb_len = 16;
1da177e4
LT
1717 }
1718
1719 /* ATAPI-specific feature tests */
2c13b7ce 1720 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1721 char *cdb_intr_string = "";
1722
1148c3a7 1723 rc = atapi_cdb_len(id);
1da177e4 1724 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1725 if (ata_msg_warn(ap))
88574551
TH
1726 ata_dev_printk(dev, KERN_WARNING,
1727 "unsupported CDB len\n");
ffeae418 1728 rc = -EINVAL;
1da177e4
LT
1729 goto err_out_nosup;
1730 }
6e7846e9 1731 dev->cdb_len = (unsigned int) rc;
1da177e4 1732
08a556db 1733 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1734 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1735 cdb_intr_string = ", CDB intr";
1736 }
312f7da2 1737
1da177e4 1738 /* print device info to dmesg */
5afc8142 1739 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1740 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1741 ata_mode_string(xfer_mask),
1742 cdb_intr_string);
1da177e4
LT
1743 }
1744
914ed354
TH
1745 /* determine max_sectors */
1746 dev->max_sectors = ATA_MAX_SECTORS;
1747 if (dev->flags & ATA_DFLAG_LBA48)
1748 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1749
93590859
AC
1750 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1751 /* Let the user know. We don't want to disallow opens for
1752 rescue purposes, or in case the vendor is just a blithering
1753 idiot */
1754 if (print_info) {
1755 ata_dev_printk(dev, KERN_WARNING,
1756"Drive reports diagnostics failure. This may indicate a drive\n");
1757 ata_dev_printk(dev, KERN_WARNING,
1758"fault or invalid emulation. Contact drive vendor for information.\n");
1759 }
1760 }
1761
4b2f3ede 1762 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1763 if (ata_dev_knobble(dev)) {
5afc8142 1764 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1765 ata_dev_printk(dev, KERN_INFO,
1766 "applying bridge limits\n");
5a529139 1767 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1768 dev->max_sectors = ATA_MAX_SECTORS;
1769 }
1770
18d6e9d5
AL
1771 if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
1772 dev->max_sectors = min(ATA_MAX_SECTORS_128, dev->max_sectors);
1773
6f23a31d
AL
1774 /* limit ATAPI DMA to R/W commands only */
1775 if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
1776 dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
1777
4b2f3ede 1778 if (ap->ops->dev_config)
cd0d3bbc 1779 ap->ops->dev_config(dev);
4b2f3ede 1780
0dd4b21f
BP
1781 if (ata_msg_probe(ap))
1782 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1783 __FUNCTION__, ata_chk_status(ap));
ffeae418 1784 return 0;
1da177e4
LT
1785
1786err_out_nosup:
0dd4b21f 1787 if (ata_msg_probe(ap))
88574551
TH
1788 ata_dev_printk(dev, KERN_DEBUG,
1789 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1790 return rc;
1da177e4
LT
1791}
1792
1793/**
1794 * ata_bus_probe - Reset and probe ATA bus
1795 * @ap: Bus to probe
1796 *
0cba632b
JG
1797 * Master ATA bus probing function. Initiates a hardware-dependent
1798 * bus reset, then attempts to identify any devices found on
1799 * the bus.
1800 *
1da177e4 1801 * LOCKING:
0cba632b 1802 * PCI/etc. bus probe sem.
1da177e4
LT
1803 *
1804 * RETURNS:
96072e69 1805 * Zero on success, negative errno otherwise.
1da177e4
LT
1806 */
1807
80289167 1808int ata_bus_probe(struct ata_port *ap)
1da177e4 1809{
28ca5c57 1810 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 1811 int tries[ATA_MAX_DEVICES];
4ae72a1e 1812 int i, rc;
e82cbdb9 1813 struct ata_device *dev;
1da177e4 1814
28ca5c57 1815 ata_port_probe(ap);
c19ba8af 1816
14d2bac1
TH
1817 for (i = 0; i < ATA_MAX_DEVICES; i++)
1818 tries[i] = ATA_PROBE_MAX_TRIES;
1819
1820 retry:
2044470c 1821 /* reset and determine device classes */
52783c5d 1822 ap->ops->phy_reset(ap);
2061a47a 1823
52783c5d
TH
1824 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1825 dev = &ap->device[i];
c19ba8af 1826
52783c5d
TH
1827 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1828 dev->class != ATA_DEV_UNKNOWN)
1829 classes[dev->devno] = dev->class;
1830 else
1831 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1832
52783c5d 1833 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1834 }
1da177e4 1835
52783c5d 1836 ata_port_probe(ap);
2044470c 1837
b6079ca4
AC
1838 /* after the reset the device state is PIO 0 and the controller
1839 state is undefined. Record the mode */
1840
1841 for (i = 0; i < ATA_MAX_DEVICES; i++)
1842 ap->device[i].pio_mode = XFER_PIO_0;
1843
f31f0cc2
JG
1844 /* read IDENTIFY page and configure devices. We have to do the identify
1845 specific sequence bass-ackwards so that PDIAG- is released by
1846 the slave device */
1847
1848 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
e82cbdb9 1849 dev = &ap->device[i];
28ca5c57 1850
ec573755
TH
1851 if (tries[i])
1852 dev->class = classes[i];
ffeae418 1853
14d2bac1 1854 if (!ata_dev_enabled(dev))
ffeae418 1855 continue;
ffeae418 1856
bff04647
TH
1857 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1858 dev->id);
14d2bac1
TH
1859 if (rc)
1860 goto fail;
f31f0cc2
JG
1861 }
1862
1863 /* After the identify sequence we can now set up the devices. We do
1864 this in the normal order so that the user doesn't get confused */
1865
1866 for(i = 0; i < ATA_MAX_DEVICES; i++) {
1867 dev = &ap->device[i];
1868 if (!ata_dev_enabled(dev))
1869 continue;
14d2bac1 1870
efdaedc4
TH
1871 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1872 rc = ata_dev_configure(dev);
1873 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1874 if (rc)
1875 goto fail;
1da177e4
LT
1876 }
1877
e82cbdb9 1878 /* configure transfer mode */
3adcebb2 1879 rc = ata_set_mode(ap, &dev);
4ae72a1e 1880 if (rc)
51713d35 1881 goto fail;
1da177e4 1882
e82cbdb9
TH
1883 for (i = 0; i < ATA_MAX_DEVICES; i++)
1884 if (ata_dev_enabled(&ap->device[i]))
1885 return 0;
1da177e4 1886
e82cbdb9
TH
1887 /* no device present, disable port */
1888 ata_port_disable(ap);
1da177e4 1889 ap->ops->port_disable(ap);
96072e69 1890 return -ENODEV;
14d2bac1
TH
1891
1892 fail:
4ae72a1e
TH
1893 tries[dev->devno]--;
1894
14d2bac1
TH
1895 switch (rc) {
1896 case -EINVAL:
4ae72a1e 1897 /* eeek, something went very wrong, give up */
14d2bac1
TH
1898 tries[dev->devno] = 0;
1899 break;
4ae72a1e
TH
1900
1901 case -ENODEV:
1902 /* give it just one more chance */
1903 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 1904 case -EIO:
4ae72a1e
TH
1905 if (tries[dev->devno] == 1) {
1906 /* This is the last chance, better to slow
1907 * down than lose it.
1908 */
1909 sata_down_spd_limit(ap);
1910 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
1911 }
14d2bac1
TH
1912 }
1913
4ae72a1e 1914 if (!tries[dev->devno])
3373efd8 1915 ata_dev_disable(dev);
ec573755 1916
14d2bac1 1917 goto retry;
1da177e4
LT
1918}
1919
1920/**
0cba632b
JG
1921 * ata_port_probe - Mark port as enabled
1922 * @ap: Port for which we indicate enablement
1da177e4 1923 *
0cba632b
JG
1924 * Modify @ap data structure such that the system
1925 * thinks that the entire port is enabled.
1926 *
cca3974e 1927 * LOCKING: host lock, or some other form of
0cba632b 1928 * serialization.
1da177e4
LT
1929 */
1930
1931void ata_port_probe(struct ata_port *ap)
1932{
198e0fed 1933 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1934}
1935
3be680b7
TH
1936/**
1937 * sata_print_link_status - Print SATA link status
1938 * @ap: SATA port to printk link status about
1939 *
1940 * This function prints link speed and status of a SATA link.
1941 *
1942 * LOCKING:
1943 * None.
1944 */
43727fbc 1945void sata_print_link_status(struct ata_port *ap)
3be680b7 1946{
6d5f9732 1947 u32 sstatus, scontrol, tmp;
3be680b7 1948
81952c54 1949 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1950 return;
81952c54 1951 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1952
81952c54 1953 if (ata_port_online(ap)) {
3be680b7 1954 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1955 ata_port_printk(ap, KERN_INFO,
1956 "SATA link up %s (SStatus %X SControl %X)\n",
1957 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1958 } else {
f15a1daf
TH
1959 ata_port_printk(ap, KERN_INFO,
1960 "SATA link down (SStatus %X SControl %X)\n",
1961 sstatus, scontrol);
3be680b7
TH
1962 }
1963}
1964
1da177e4 1965/**
780a87f7
JG
1966 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1967 * @ap: SATA port associated with target SATA PHY.
1da177e4 1968 *
780a87f7
JG
1969 * This function issues commands to standard SATA Sxxx
1970 * PHY registers, to wake up the phy (and device), and
1971 * clear any reset condition.
1da177e4
LT
1972 *
1973 * LOCKING:
0cba632b 1974 * PCI/etc. bus probe sem.
1da177e4
LT
1975 *
1976 */
1977void __sata_phy_reset(struct ata_port *ap)
1978{
1979 u32 sstatus;
1980 unsigned long timeout = jiffies + (HZ * 5);
1981
1982 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1983 /* issue phy wake/reset */
81952c54 1984 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1985 /* Couldn't find anything in SATA I/II specs, but
1986 * AHCI-1.1 10.4.2 says at least 1 ms. */
1987 mdelay(1);
1da177e4 1988 }
81952c54
TH
1989 /* phy wake/clear reset */
1990 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
1991
1992 /* wait for phy to become ready, if necessary */
1993 do {
1994 msleep(200);
81952c54 1995 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
1996 if ((sstatus & 0xf) != 1)
1997 break;
1998 } while (time_before(jiffies, timeout));
1999
3be680b7
TH
2000 /* print link status */
2001 sata_print_link_status(ap);
656563e3 2002
3be680b7 2003 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2004 if (!ata_port_offline(ap))
1da177e4 2005 ata_port_probe(ap);
3be680b7 2006 else
1da177e4 2007 ata_port_disable(ap);
1da177e4 2008
198e0fed 2009 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2010 return;
2011
2012 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2013 ata_port_disable(ap);
2014 return;
2015 }
2016
2017 ap->cbl = ATA_CBL_SATA;
2018}
2019
2020/**
780a87f7
JG
2021 * sata_phy_reset - Reset SATA bus.
2022 * @ap: SATA port associated with target SATA PHY.
1da177e4 2023 *
780a87f7
JG
2024 * This function resets the SATA bus, and then probes
2025 * the bus for devices.
1da177e4
LT
2026 *
2027 * LOCKING:
0cba632b 2028 * PCI/etc. bus probe sem.
1da177e4
LT
2029 *
2030 */
2031void sata_phy_reset(struct ata_port *ap)
2032{
2033 __sata_phy_reset(ap);
198e0fed 2034 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2035 return;
2036 ata_bus_reset(ap);
2037}
2038
ebdfca6e
AC
2039/**
2040 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2041 * @adev: device
2042 *
2043 * Obtain the other device on the same cable, or if none is
2044 * present NULL is returned
2045 */
2e9edbf8 2046
3373efd8 2047struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2048{
3373efd8 2049 struct ata_port *ap = adev->ap;
ebdfca6e 2050 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2051 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2052 return NULL;
2053 return pair;
2054}
2055
1da177e4 2056/**
780a87f7
JG
2057 * ata_port_disable - Disable port.
2058 * @ap: Port to be disabled.
1da177e4 2059 *
780a87f7
JG
2060 * Modify @ap data structure such that the system
2061 * thinks that the entire port is disabled, and should
2062 * never attempt to probe or communicate with devices
2063 * on this port.
2064 *
cca3974e 2065 * LOCKING: host lock, or some other form of
780a87f7 2066 * serialization.
1da177e4
LT
2067 */
2068
2069void ata_port_disable(struct ata_port *ap)
2070{
2071 ap->device[0].class = ATA_DEV_NONE;
2072 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2073 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2074}
2075
1c3fae4d 2076/**
3c567b7d 2077 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2078 * @ap: Port to adjust SATA spd limit for
2079 *
2080 * Adjust SATA spd limit of @ap downward. Note that this
2081 * function only adjusts the limit. The change must be applied
3c567b7d 2082 * using sata_set_spd().
1c3fae4d
TH
2083 *
2084 * LOCKING:
2085 * Inherited from caller.
2086 *
2087 * RETURNS:
2088 * 0 on success, negative errno on failure
2089 */
3c567b7d 2090int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2091{
81952c54
TH
2092 u32 sstatus, spd, mask;
2093 int rc, highbit;
1c3fae4d 2094
81952c54
TH
2095 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2096 if (rc)
2097 return rc;
1c3fae4d
TH
2098
2099 mask = ap->sata_spd_limit;
2100 if (mask <= 1)
2101 return -EINVAL;
2102 highbit = fls(mask) - 1;
2103 mask &= ~(1 << highbit);
2104
81952c54 2105 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2106 if (spd <= 1)
2107 return -EINVAL;
2108 spd--;
2109 mask &= (1 << spd) - 1;
2110 if (!mask)
2111 return -EINVAL;
2112
2113 ap->sata_spd_limit = mask;
2114
f15a1daf
TH
2115 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2116 sata_spd_string(fls(mask)));
1c3fae4d
TH
2117
2118 return 0;
2119}
2120
3c567b7d 2121static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2122{
2123 u32 spd, limit;
2124
2125 if (ap->sata_spd_limit == UINT_MAX)
2126 limit = 0;
2127 else
2128 limit = fls(ap->sata_spd_limit);
2129
2130 spd = (*scontrol >> 4) & 0xf;
2131 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2132
2133 return spd != limit;
2134}
2135
2136/**
3c567b7d 2137 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2138 * @ap: Port in question
2139 *
2140 * Test whether the spd limit in SControl matches
2141 * @ap->sata_spd_limit. This function is used to determine
2142 * whether hardreset is necessary to apply SATA spd
2143 * configuration.
2144 *
2145 * LOCKING:
2146 * Inherited from caller.
2147 *
2148 * RETURNS:
2149 * 1 if SATA spd configuration is needed, 0 otherwise.
2150 */
3c567b7d 2151int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2152{
2153 u32 scontrol;
2154
81952c54 2155 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2156 return 0;
2157
3c567b7d 2158 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2159}
2160
2161/**
3c567b7d 2162 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2163 * @ap: Port to set SATA spd for
2164 *
2165 * Set SATA spd of @ap according to sata_spd_limit.
2166 *
2167 * LOCKING:
2168 * Inherited from caller.
2169 *
2170 * RETURNS:
2171 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2172 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2173 */
3c567b7d 2174int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2175{
2176 u32 scontrol;
81952c54 2177 int rc;
1c3fae4d 2178
81952c54
TH
2179 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2180 return rc;
1c3fae4d 2181
3c567b7d 2182 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2183 return 0;
2184
81952c54
TH
2185 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2186 return rc;
2187
1c3fae4d
TH
2188 return 1;
2189}
2190
452503f9
AC
2191/*
2192 * This mode timing computation functionality is ported over from
2193 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2194 */
2195/*
b352e57d 2196 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2197 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2198 * for UDMA6, which is currently supported only by Maxtor drives.
2199 *
2200 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2201 */
2202
2203static const struct ata_timing ata_timing[] = {
2204
2205 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2206 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2207 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2208 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2209
b352e57d
AC
2210 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2211 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2212 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2213 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2214 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2215
2216/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2217
452503f9
AC
2218 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2219 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2220 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2221
452503f9
AC
2222 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2223 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2224 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2225
b352e57d
AC
2226 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2227 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2228 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2229 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2230
2231 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2232 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2233 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2234
2235/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2236
2237 { 0xFF }
2238};
2239
2240#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2241#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2242
2243static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2244{
2245 q->setup = EZ(t->setup * 1000, T);
2246 q->act8b = EZ(t->act8b * 1000, T);
2247 q->rec8b = EZ(t->rec8b * 1000, T);
2248 q->cyc8b = EZ(t->cyc8b * 1000, T);
2249 q->active = EZ(t->active * 1000, T);
2250 q->recover = EZ(t->recover * 1000, T);
2251 q->cycle = EZ(t->cycle * 1000, T);
2252 q->udma = EZ(t->udma * 1000, UT);
2253}
2254
2255void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2256 struct ata_timing *m, unsigned int what)
2257{
2258 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2259 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2260 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2261 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2262 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2263 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2264 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2265 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2266}
2267
2268static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2269{
2270 const struct ata_timing *t;
2271
2272 for (t = ata_timing; t->mode != speed; t++)
91190758 2273 if (t->mode == 0xFF)
452503f9 2274 return NULL;
2e9edbf8 2275 return t;
452503f9
AC
2276}
2277
2278int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2279 struct ata_timing *t, int T, int UT)
2280{
2281 const struct ata_timing *s;
2282 struct ata_timing p;
2283
2284 /*
2e9edbf8 2285 * Find the mode.
75b1f2f8 2286 */
452503f9
AC
2287
2288 if (!(s = ata_timing_find_mode(speed)))
2289 return -EINVAL;
2290
75b1f2f8
AL
2291 memcpy(t, s, sizeof(*s));
2292
452503f9
AC
2293 /*
2294 * If the drive is an EIDE drive, it can tell us it needs extended
2295 * PIO/MW_DMA cycle timing.
2296 */
2297
2298 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2299 memset(&p, 0, sizeof(p));
2300 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2301 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2302 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2303 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2304 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2305 }
2306 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2307 }
2308
2309 /*
2310 * Convert the timing to bus clock counts.
2311 */
2312
75b1f2f8 2313 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2314
2315 /*
c893a3ae
RD
2316 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2317 * S.M.A.R.T * and some other commands. We have to ensure that the
2318 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2319 */
2320
fd3367af 2321 if (speed > XFER_PIO_6) {
452503f9
AC
2322 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2323 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2324 }
2325
2326 /*
c893a3ae 2327 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2328 */
2329
2330 if (t->act8b + t->rec8b < t->cyc8b) {
2331 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2332 t->rec8b = t->cyc8b - t->act8b;
2333 }
2334
2335 if (t->active + t->recover < t->cycle) {
2336 t->active += (t->cycle - (t->active + t->recover)) / 2;
2337 t->recover = t->cycle - t->active;
2338 }
2339
2340 return 0;
2341}
2342
cf176e1a
TH
2343/**
2344 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2345 * @dev: Device to adjust xfer masks
458337db 2346 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2347 *
2348 * Adjust xfer masks of @dev downward. Note that this function
2349 * does not apply the change. Invoking ata_set_mode() afterwards
2350 * will apply the limit.
2351 *
2352 * LOCKING:
2353 * Inherited from caller.
2354 *
2355 * RETURNS:
2356 * 0 on success, negative errno on failure
2357 */
458337db 2358int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2359{
458337db
TH
2360 char buf[32];
2361 unsigned int orig_mask, xfer_mask;
2362 unsigned int pio_mask, mwdma_mask, udma_mask;
2363 int quiet, highbit;
cf176e1a 2364
458337db
TH
2365 quiet = !!(sel & ATA_DNXFER_QUIET);
2366 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2367
458337db
TH
2368 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2369 dev->mwdma_mask,
2370 dev->udma_mask);
2371 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2372
458337db
TH
2373 switch (sel) {
2374 case ATA_DNXFER_PIO:
2375 highbit = fls(pio_mask) - 1;
2376 pio_mask &= ~(1 << highbit);
2377 break;
2378
2379 case ATA_DNXFER_DMA:
2380 if (udma_mask) {
2381 highbit = fls(udma_mask) - 1;
2382 udma_mask &= ~(1 << highbit);
2383 if (!udma_mask)
2384 return -ENOENT;
2385 } else if (mwdma_mask) {
2386 highbit = fls(mwdma_mask) - 1;
2387 mwdma_mask &= ~(1 << highbit);
2388 if (!mwdma_mask)
2389 return -ENOENT;
2390 }
2391 break;
2392
2393 case ATA_DNXFER_40C:
2394 udma_mask &= ATA_UDMA_MASK_40C;
2395 break;
2396
2397 case ATA_DNXFER_FORCE_PIO0:
2398 pio_mask &= 1;
2399 case ATA_DNXFER_FORCE_PIO:
2400 mwdma_mask = 0;
2401 udma_mask = 0;
2402 break;
2403
458337db
TH
2404 default:
2405 BUG();
2406 }
2407
2408 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2409
2410 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2411 return -ENOENT;
2412
2413 if (!quiet) {
2414 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2415 snprintf(buf, sizeof(buf), "%s:%s",
2416 ata_mode_string(xfer_mask),
2417 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2418 else
2419 snprintf(buf, sizeof(buf), "%s",
2420 ata_mode_string(xfer_mask));
2421
2422 ata_dev_printk(dev, KERN_WARNING,
2423 "limiting speed to %s\n", buf);
2424 }
cf176e1a
TH
2425
2426 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2427 &dev->udma_mask);
2428
cf176e1a 2429 return 0;
cf176e1a
TH
2430}
2431
3373efd8 2432static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2433{
baa1e78a 2434 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2435 unsigned int err_mask;
2436 int rc;
1da177e4 2437
e8384607 2438 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2439 if (dev->xfer_shift == ATA_SHIFT_PIO)
2440 dev->flags |= ATA_DFLAG_PIO;
2441
3373efd8 2442 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2443 /* Old CFA may refuse this command, which is just fine */
2444 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2445 err_mask &= ~AC_ERR_DEV;
2446
83206a29 2447 if (err_mask) {
f15a1daf
TH
2448 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2449 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2450 return -EIO;
2451 }
1da177e4 2452
baa1e78a 2453 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2454 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2455 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2456 if (rc)
83206a29 2457 return rc;
48a8a14f 2458
23e71c3d
TH
2459 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2460 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2461
f15a1daf
TH
2462 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2463 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2464 return 0;
1da177e4
LT
2465}
2466
1da177e4
LT
2467/**
2468 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2469 * @ap: port on which timings will be programmed
e82cbdb9 2470 * @r_failed_dev: out paramter for failed device
1da177e4 2471 *
e82cbdb9
TH
2472 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2473 * ata_set_mode() fails, pointer to the failing device is
2474 * returned in @r_failed_dev.
780a87f7 2475 *
1da177e4 2476 * LOCKING:
0cba632b 2477 * PCI/etc. bus probe sem.
e82cbdb9
TH
2478 *
2479 * RETURNS:
2480 * 0 on success, negative errno otherwise
1da177e4 2481 */
1ad8e7f9 2482int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2483{
e8e0619f 2484 struct ata_device *dev;
e82cbdb9 2485 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2486
3adcebb2 2487 /* has private set_mode? */
b229a7b0
A
2488 if (ap->ops->set_mode)
2489 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2490
a6d5a51c
TH
2491 /* step 1: calculate xfer_mask */
2492 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2493 unsigned int pio_mask, dma_mask;
a6d5a51c 2494
e8e0619f
TH
2495 dev = &ap->device[i];
2496
e1211e3f 2497 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2498 continue;
2499
3373efd8 2500 ata_dev_xfermask(dev);
1da177e4 2501
acf356b1
TH
2502 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2503 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2504 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2505 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2506
4f65977d 2507 found = 1;
5444a6f4
AC
2508 if (dev->dma_mode)
2509 used_dma = 1;
a6d5a51c 2510 }
4f65977d 2511 if (!found)
e82cbdb9 2512 goto out;
a6d5a51c
TH
2513
2514 /* step 2: always set host PIO timings */
e8e0619f
TH
2515 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2516 dev = &ap->device[i];
2517 if (!ata_dev_enabled(dev))
2518 continue;
2519
2520 if (!dev->pio_mode) {
f15a1daf 2521 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2522 rc = -EINVAL;
e82cbdb9 2523 goto out;
e8e0619f
TH
2524 }
2525
2526 dev->xfer_mode = dev->pio_mode;
2527 dev->xfer_shift = ATA_SHIFT_PIO;
2528 if (ap->ops->set_piomode)
2529 ap->ops->set_piomode(ap, dev);
2530 }
1da177e4 2531
a6d5a51c 2532 /* step 3: set host DMA timings */
e8e0619f
TH
2533 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2534 dev = &ap->device[i];
2535
2536 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2537 continue;
2538
2539 dev->xfer_mode = dev->dma_mode;
2540 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2541 if (ap->ops->set_dmamode)
2542 ap->ops->set_dmamode(ap, dev);
2543 }
1da177e4
LT
2544
2545 /* step 4: update devices' xfer mode */
83206a29 2546 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2547 dev = &ap->device[i];
1da177e4 2548
18d90deb 2549 /* don't update suspended devices' xfer mode */
02670bf3 2550 if (!ata_dev_ready(dev))
83206a29
TH
2551 continue;
2552
3373efd8 2553 rc = ata_dev_set_mode(dev);
5bbc53f4 2554 if (rc)
e82cbdb9 2555 goto out;
83206a29 2556 }
1da177e4 2557
e8e0619f
TH
2558 /* Record simplex status. If we selected DMA then the other
2559 * host channels are not permitted to do so.
5444a6f4 2560 */
cca3974e 2561 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2562 ap->host->simplex_claimed = ap;
5444a6f4 2563
e8e0619f 2564 /* step5: chip specific finalisation */
1da177e4
LT
2565 if (ap->ops->post_set_mode)
2566 ap->ops->post_set_mode(ap);
e82cbdb9
TH
2567 out:
2568 if (rc)
2569 *r_failed_dev = dev;
2570 return rc;
1da177e4
LT
2571}
2572
1fdffbce
JG
2573/**
2574 * ata_tf_to_host - issue ATA taskfile to host controller
2575 * @ap: port to which command is being issued
2576 * @tf: ATA taskfile register set
2577 *
2578 * Issues ATA taskfile register set to ATA host controller,
2579 * with proper synchronization with interrupt handler and
2580 * other threads.
2581 *
2582 * LOCKING:
cca3974e 2583 * spin_lock_irqsave(host lock)
1fdffbce
JG
2584 */
2585
2586static inline void ata_tf_to_host(struct ata_port *ap,
2587 const struct ata_taskfile *tf)
2588{
2589 ap->ops->tf_load(ap, tf);
2590 ap->ops->exec_command(ap, tf);
2591}
2592
1da177e4
LT
2593/**
2594 * ata_busy_sleep - sleep until BSY clears, or timeout
2595 * @ap: port containing status register to be polled
2596 * @tmout_pat: impatience timeout
2597 * @tmout: overall timeout
2598 *
780a87f7
JG
2599 * Sleep until ATA Status register bit BSY clears,
2600 * or a timeout occurs.
2601 *
d1adc1bb
TH
2602 * LOCKING:
2603 * Kernel thread context (may sleep).
2604 *
2605 * RETURNS:
2606 * 0 on success, -errno otherwise.
1da177e4 2607 */
d1adc1bb
TH
2608int ata_busy_sleep(struct ata_port *ap,
2609 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2610{
2611 unsigned long timer_start, timeout;
2612 u8 status;
2613
2614 status = ata_busy_wait(ap, ATA_BUSY, 300);
2615 timer_start = jiffies;
2616 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2617 while (status != 0xff && (status & ATA_BUSY) &&
2618 time_before(jiffies, timeout)) {
1da177e4
LT
2619 msleep(50);
2620 status = ata_busy_wait(ap, ATA_BUSY, 3);
2621 }
2622
d1adc1bb 2623 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2624 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2625 "port is slow to respond, please be patient "
2626 "(Status 0x%x)\n", status);
1da177e4
LT
2627
2628 timeout = timer_start + tmout;
d1adc1bb
TH
2629 while (status != 0xff && (status & ATA_BUSY) &&
2630 time_before(jiffies, timeout)) {
1da177e4
LT
2631 msleep(50);
2632 status = ata_chk_status(ap);
2633 }
2634
d1adc1bb
TH
2635 if (status == 0xff)
2636 return -ENODEV;
2637
1da177e4 2638 if (status & ATA_BUSY) {
f15a1daf 2639 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2640 "(%lu secs, Status 0x%x)\n",
2641 tmout / HZ, status);
d1adc1bb 2642 return -EBUSY;
1da177e4
LT
2643 }
2644
2645 return 0;
2646}
2647
2648static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2649{
2650 struct ata_ioports *ioaddr = &ap->ioaddr;
2651 unsigned int dev0 = devmask & (1 << 0);
2652 unsigned int dev1 = devmask & (1 << 1);
2653 unsigned long timeout;
2654
2655 /* if device 0 was found in ata_devchk, wait for its
2656 * BSY bit to clear
2657 */
2658 if (dev0)
2659 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2660
2661 /* if device 1 was found in ata_devchk, wait for
2662 * register access, then wait for BSY to clear
2663 */
2664 timeout = jiffies + ATA_TMOUT_BOOT;
2665 while (dev1) {
2666 u8 nsect, lbal;
2667
2668 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2669 nsect = ioread8(ioaddr->nsect_addr);
2670 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2671 if ((nsect == 1) && (lbal == 1))
2672 break;
2673 if (time_after(jiffies, timeout)) {
2674 dev1 = 0;
2675 break;
2676 }
2677 msleep(50); /* give drive a breather */
2678 }
2679 if (dev1)
2680 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2681
2682 /* is all this really necessary? */
2683 ap->ops->dev_select(ap, 0);
2684 if (dev1)
2685 ap->ops->dev_select(ap, 1);
2686 if (dev0)
2687 ap->ops->dev_select(ap, 0);
2688}
2689
1da177e4
LT
2690static unsigned int ata_bus_softreset(struct ata_port *ap,
2691 unsigned int devmask)
2692{
2693 struct ata_ioports *ioaddr = &ap->ioaddr;
2694
44877b4e 2695 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
2696
2697 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2698 iowrite8(ap->ctl, ioaddr->ctl_addr);
2699 udelay(20); /* FIXME: flush */
2700 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2701 udelay(20); /* FIXME: flush */
2702 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2703
2704 /* spec mandates ">= 2ms" before checking status.
2705 * We wait 150ms, because that was the magic delay used for
2706 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2707 * between when the ATA command register is written, and then
2708 * status is checked. Because waiting for "a while" before
2709 * checking status is fine, post SRST, we perform this magic
2710 * delay here as well.
09c7ad79
AC
2711 *
2712 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2713 */
2714 msleep(150);
2715
2e9edbf8 2716 /* Before we perform post reset processing we want to see if
298a41ca
TH
2717 * the bus shows 0xFF because the odd clown forgets the D7
2718 * pulldown resistor.
2719 */
d1adc1bb
TH
2720 if (ata_check_status(ap) == 0xFF)
2721 return 0;
09c7ad79 2722
1da177e4
LT
2723 ata_bus_post_reset(ap, devmask);
2724
2725 return 0;
2726}
2727
2728/**
2729 * ata_bus_reset - reset host port and associated ATA channel
2730 * @ap: port to reset
2731 *
2732 * This is typically the first time we actually start issuing
2733 * commands to the ATA channel. We wait for BSY to clear, then
2734 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2735 * result. Determine what devices, if any, are on the channel
2736 * by looking at the device 0/1 error register. Look at the signature
2737 * stored in each device's taskfile registers, to determine if
2738 * the device is ATA or ATAPI.
2739 *
2740 * LOCKING:
0cba632b 2741 * PCI/etc. bus probe sem.
cca3974e 2742 * Obtains host lock.
1da177e4
LT
2743 *
2744 * SIDE EFFECTS:
198e0fed 2745 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2746 */
2747
2748void ata_bus_reset(struct ata_port *ap)
2749{
2750 struct ata_ioports *ioaddr = &ap->ioaddr;
2751 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2752 u8 err;
aec5c3c1 2753 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4 2754
44877b4e 2755 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
2756
2757 /* determine if device 0/1 are present */
2758 if (ap->flags & ATA_FLAG_SATA_RESET)
2759 dev0 = 1;
2760 else {
2761 dev0 = ata_devchk(ap, 0);
2762 if (slave_possible)
2763 dev1 = ata_devchk(ap, 1);
2764 }
2765
2766 if (dev0)
2767 devmask |= (1 << 0);
2768 if (dev1)
2769 devmask |= (1 << 1);
2770
2771 /* select device 0 again */
2772 ap->ops->dev_select(ap, 0);
2773
2774 /* issue bus reset */
2775 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2776 if (ata_bus_softreset(ap, devmask))
2777 goto err_out;
1da177e4
LT
2778
2779 /*
2780 * determine by signature whether we have ATA or ATAPI devices
2781 */
b4dc7623 2782 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2783 if ((slave_possible) && (err != 0x81))
b4dc7623 2784 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2785
2786 /* re-enable interrupts */
83625006 2787 ap->ops->irq_on(ap);
1da177e4
LT
2788
2789 /* is double-select really necessary? */
2790 if (ap->device[1].class != ATA_DEV_NONE)
2791 ap->ops->dev_select(ap, 1);
2792 if (ap->device[0].class != ATA_DEV_NONE)
2793 ap->ops->dev_select(ap, 0);
2794
2795 /* if no devices were detected, disable this port */
2796 if ((ap->device[0].class == ATA_DEV_NONE) &&
2797 (ap->device[1].class == ATA_DEV_NONE))
2798 goto err_out;
2799
2800 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2801 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2802 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2803 }
2804
2805 DPRINTK("EXIT\n");
2806 return;
2807
2808err_out:
f15a1daf 2809 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2810 ap->ops->port_disable(ap);
2811
2812 DPRINTK("EXIT\n");
2813}
2814
d7bb4cc7
TH
2815/**
2816 * sata_phy_debounce - debounce SATA phy status
2817 * @ap: ATA port to debounce SATA phy status for
2818 * @params: timing parameters { interval, duratinon, timeout } in msec
2819 *
2820 * Make sure SStatus of @ap reaches stable state, determined by
2821 * holding the same value where DET is not 1 for @duration polled
2822 * every @interval, before @timeout. Timeout constraints the
2823 * beginning of the stable state. Because, after hot unplugging,
2824 * DET gets stuck at 1 on some controllers, this functions waits
2825 * until timeout then returns 0 if DET is stable at 1.
2826 *
2827 * LOCKING:
2828 * Kernel thread context (may sleep)
2829 *
2830 * RETURNS:
2831 * 0 on success, -errno on failure.
2832 */
2833int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2834{
d7bb4cc7
TH
2835 unsigned long interval_msec = params[0];
2836 unsigned long duration = params[1] * HZ / 1000;
2837 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2838 unsigned long last_jiffies;
2839 u32 last, cur;
2840 int rc;
2841
2842 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2843 return rc;
2844 cur &= 0xf;
2845
2846 last = cur;
2847 last_jiffies = jiffies;
2848
2849 while (1) {
2850 msleep(interval_msec);
2851 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2852 return rc;
2853 cur &= 0xf;
2854
2855 /* DET stable? */
2856 if (cur == last) {
2857 if (cur == 1 && time_before(jiffies, timeout))
2858 continue;
2859 if (time_after(jiffies, last_jiffies + duration))
2860 return 0;
2861 continue;
2862 }
2863
2864 /* unstable, start over */
2865 last = cur;
2866 last_jiffies = jiffies;
2867
2868 /* check timeout */
2869 if (time_after(jiffies, timeout))
2870 return -EBUSY;
2871 }
2872}
2873
2874/**
2875 * sata_phy_resume - resume SATA phy
2876 * @ap: ATA port to resume SATA phy for
2877 * @params: timing parameters { interval, duratinon, timeout } in msec
2878 *
2879 * Resume SATA phy of @ap and debounce it.
2880 *
2881 * LOCKING:
2882 * Kernel thread context (may sleep)
2883 *
2884 * RETURNS:
2885 * 0 on success, -errno on failure.
2886 */
2887int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2888{
2889 u32 scontrol;
81952c54
TH
2890 int rc;
2891
2892 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2893 return rc;
7a7921e8 2894
852ee16a 2895 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2896
2897 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2898 return rc;
7a7921e8 2899
d7bb4cc7
TH
2900 /* Some PHYs react badly if SStatus is pounded immediately
2901 * after resuming. Delay 200ms before debouncing.
2902 */
2903 msleep(200);
7a7921e8 2904
d7bb4cc7 2905 return sata_phy_debounce(ap, params);
7a7921e8
TH
2906}
2907
f5914a46
TH
2908static void ata_wait_spinup(struct ata_port *ap)
2909{
2910 struct ata_eh_context *ehc = &ap->eh_context;
2911 unsigned long end, secs;
2912 int rc;
2913
2914 /* first, debounce phy if SATA */
2915 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2916 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2917
2918 /* if debounced successfully and offline, no need to wait */
2919 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2920 return;
2921 }
2922
2923 /* okay, let's give the drive time to spin up */
2924 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2925 secs = ((end - jiffies) + HZ - 1) / HZ;
2926
2927 if (time_after(jiffies, end))
2928 return;
2929
2930 if (secs > 5)
2931 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2932 "(%lu secs)\n", secs);
2933
2934 schedule_timeout_uninterruptible(end - jiffies);
2935}
2936
2937/**
2938 * ata_std_prereset - prepare for reset
2939 * @ap: ATA port to be reset
2940 *
2941 * @ap is about to be reset. Initialize it.
2942 *
2943 * LOCKING:
2944 * Kernel thread context (may sleep)
2945 *
2946 * RETURNS:
2947 * 0 on success, -errno otherwise.
2948 */
2949int ata_std_prereset(struct ata_port *ap)
2950{
2951 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2952 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2953 int rc;
2954
28324304
TH
2955 /* handle link resume & hotplug spinup */
2956 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2957 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2958 ehc->i.action |= ATA_EH_HARDRESET;
2959
2960 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2961 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2962 ata_wait_spinup(ap);
f5914a46
TH
2963
2964 /* if we're about to do hardreset, nothing more to do */
2965 if (ehc->i.action & ATA_EH_HARDRESET)
2966 return 0;
2967
2968 /* if SATA, resume phy */
2969 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2970 rc = sata_phy_resume(ap, timing);
2971 if (rc && rc != -EOPNOTSUPP) {
2972 /* phy resume failed */
2973 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2974 "link for reset (errno=%d)\n", rc);
2975 return rc;
2976 }
2977 }
2978
2979 /* Wait for !BSY if the controller can wait for the first D2H
2980 * Reg FIS and we don't know that no device is attached.
2981 */
2982 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2983 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2984
2985 return 0;
2986}
2987
c2bd5804
TH
2988/**
2989 * ata_std_softreset - reset host port via ATA SRST
2990 * @ap: port to reset
c2bd5804
TH
2991 * @classes: resulting classes of attached devices
2992 *
52783c5d 2993 * Reset host port using ATA SRST.
c2bd5804
TH
2994 *
2995 * LOCKING:
2996 * Kernel thread context (may sleep)
2997 *
2998 * RETURNS:
2999 * 0 on success, -errno otherwise.
3000 */
2bf2cb26 3001int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
3002{
3003 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3004 unsigned int devmask = 0, err_mask;
3005 u8 err;
3006
3007 DPRINTK("ENTER\n");
3008
81952c54 3009 if (ata_port_offline(ap)) {
3a39746a
TH
3010 classes[0] = ATA_DEV_NONE;
3011 goto out;
3012 }
3013
c2bd5804
TH
3014 /* determine if device 0/1 are present */
3015 if (ata_devchk(ap, 0))
3016 devmask |= (1 << 0);
3017 if (slave_possible && ata_devchk(ap, 1))
3018 devmask |= (1 << 1);
3019
c2bd5804
TH
3020 /* select device 0 again */
3021 ap->ops->dev_select(ap, 0);
3022
3023 /* issue bus reset */
3024 DPRINTK("about to softreset, devmask=%x\n", devmask);
3025 err_mask = ata_bus_softreset(ap, devmask);
3026 if (err_mask) {
f15a1daf
TH
3027 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
3028 err_mask);
c2bd5804
TH
3029 return -EIO;
3030 }
3031
3032 /* determine by signature whether we have ATA or ATAPI devices */
3033 classes[0] = ata_dev_try_classify(ap, 0, &err);
3034 if (slave_possible && err != 0x81)
3035 classes[1] = ata_dev_try_classify(ap, 1, &err);
3036
3a39746a 3037 out:
c2bd5804
TH
3038 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3039 return 0;
3040}
3041
3042/**
b6103f6d 3043 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3044 * @ap: port to reset
b6103f6d 3045 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
3046 *
3047 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3048 *
3049 * LOCKING:
3050 * Kernel thread context (may sleep)
3051 *
3052 * RETURNS:
3053 * 0 on success, -errno otherwise.
3054 */
b6103f6d 3055int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 3056{
852ee16a 3057 u32 scontrol;
81952c54 3058 int rc;
852ee16a 3059
c2bd5804
TH
3060 DPRINTK("ENTER\n");
3061
3c567b7d 3062 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3063 /* SATA spec says nothing about how to reconfigure
3064 * spd. To be on the safe side, turn off phy during
3065 * reconfiguration. This works for at least ICH7 AHCI
3066 * and Sil3124.
3067 */
81952c54 3068 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3069 goto out;
81952c54 3070
a34b6fc0 3071 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3072
3073 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3074 goto out;
1c3fae4d 3075
3c567b7d 3076 sata_set_spd(ap);
1c3fae4d
TH
3077 }
3078
3079 /* issue phy wake/reset */
81952c54 3080 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3081 goto out;
81952c54 3082
852ee16a 3083 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3084
3085 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3086 goto out;
c2bd5804 3087
1c3fae4d 3088 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3089 * 10.4.2 says at least 1 ms.
3090 */
3091 msleep(1);
3092
1c3fae4d 3093 /* bring phy back */
b6103f6d
TH
3094 rc = sata_phy_resume(ap, timing);
3095 out:
3096 DPRINTK("EXIT, rc=%d\n", rc);
3097 return rc;
3098}
3099
3100/**
3101 * sata_std_hardreset - reset host port via SATA phy reset
3102 * @ap: port to reset
3103 * @class: resulting class of attached device
3104 *
3105 * SATA phy-reset host port using DET bits of SControl register,
3106 * wait for !BSY and classify the attached device.
3107 *
3108 * LOCKING:
3109 * Kernel thread context (may sleep)
3110 *
3111 * RETURNS:
3112 * 0 on success, -errno otherwise.
3113 */
3114int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3115{
3116 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3117 int rc;
3118
3119 DPRINTK("ENTER\n");
3120
3121 /* do hardreset */
3122 rc = sata_port_hardreset(ap, timing);
3123 if (rc) {
3124 ata_port_printk(ap, KERN_ERR,
3125 "COMRESET failed (errno=%d)\n", rc);
3126 return rc;
3127 }
c2bd5804 3128
c2bd5804 3129 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3130 if (ata_port_offline(ap)) {
c2bd5804
TH
3131 *class = ATA_DEV_NONE;
3132 DPRINTK("EXIT, link offline\n");
3133 return 0;
3134 }
3135
34fee227
TH
3136 /* wait a while before checking status, see SRST for more info */
3137 msleep(150);
3138
c2bd5804 3139 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3140 ata_port_printk(ap, KERN_ERR,
3141 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3142 return -EIO;
3143 }
3144
3a39746a
TH
3145 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3146
c2bd5804
TH
3147 *class = ata_dev_try_classify(ap, 0, NULL);
3148
3149 DPRINTK("EXIT, class=%u\n", *class);
3150 return 0;
3151}
3152
3153/**
3154 * ata_std_postreset - standard postreset callback
3155 * @ap: the target ata_port
3156 * @classes: classes of attached devices
3157 *
3158 * This function is invoked after a successful reset. Note that
3159 * the device might have been reset more than once using
3160 * different reset methods before postreset is invoked.
c2bd5804 3161 *
c2bd5804
TH
3162 * LOCKING:
3163 * Kernel thread context (may sleep)
3164 */
3165void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3166{
dc2b3515
TH
3167 u32 serror;
3168
c2bd5804
TH
3169 DPRINTK("ENTER\n");
3170
c2bd5804 3171 /* print link status */
81952c54 3172 sata_print_link_status(ap);
c2bd5804 3173
dc2b3515
TH
3174 /* clear SError */
3175 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3176 sata_scr_write(ap, SCR_ERROR, serror);
3177
3a39746a 3178 /* re-enable interrupts */
83625006
AI
3179 if (!ap->ops->error_handler)
3180 ap->ops->irq_on(ap);
c2bd5804
TH
3181
3182 /* is double-select really necessary? */
3183 if (classes[0] != ATA_DEV_NONE)
3184 ap->ops->dev_select(ap, 1);
3185 if (classes[1] != ATA_DEV_NONE)
3186 ap->ops->dev_select(ap, 0);
3187
3a39746a
TH
3188 /* bail out if no device is present */
3189 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3190 DPRINTK("EXIT, no device\n");
3191 return;
3192 }
3193
3194 /* set up device control */
0d5ff566
TH
3195 if (ap->ioaddr.ctl_addr)
3196 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3197
3198 DPRINTK("EXIT\n");
3199}
3200
623a3128
TH
3201/**
3202 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3203 * @dev: device to compare against
3204 * @new_class: class of the new device
3205 * @new_id: IDENTIFY page of the new device
3206 *
3207 * Compare @new_class and @new_id against @dev and determine
3208 * whether @dev is the device indicated by @new_class and
3209 * @new_id.
3210 *
3211 * LOCKING:
3212 * None.
3213 *
3214 * RETURNS:
3215 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3216 */
3373efd8
TH
3217static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3218 const u16 *new_id)
623a3128
TH
3219{
3220 const u16 *old_id = dev->id;
a0cf733b
TH
3221 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3222 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3223 u64 new_n_sectors;
3224
3225 if (dev->class != new_class) {
f15a1daf
TH
3226 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3227 dev->class, new_class);
623a3128
TH
3228 return 0;
3229 }
3230
a0cf733b
TH
3231 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3232 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3233 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3234 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3235 new_n_sectors = ata_id_n_sectors(new_id);
3236
3237 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3238 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3239 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3240 return 0;
3241 }
3242
3243 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3244 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3245 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3246 return 0;
3247 }
3248
3249 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3250 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3251 "%llu != %llu\n",
3252 (unsigned long long)dev->n_sectors,
3253 (unsigned long long)new_n_sectors);
623a3128
TH
3254 return 0;
3255 }
3256
3257 return 1;
3258}
3259
3260/**
3261 * ata_dev_revalidate - Revalidate ATA device
623a3128 3262 * @dev: device to revalidate
bff04647 3263 * @readid_flags: read ID flags
623a3128
TH
3264 *
3265 * Re-read IDENTIFY page and make sure @dev is still attached to
3266 * the port.
3267 *
3268 * LOCKING:
3269 * Kernel thread context (may sleep)
3270 *
3271 * RETURNS:
3272 * 0 on success, negative errno otherwise
3273 */
bff04647 3274int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3275{
5eb45c02 3276 unsigned int class = dev->class;
f15a1daf 3277 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3278 int rc;
3279
5eb45c02
TH
3280 if (!ata_dev_enabled(dev)) {
3281 rc = -ENODEV;
3282 goto fail;
3283 }
623a3128 3284
fe635c7e 3285 /* read ID data */
bff04647 3286 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3287 if (rc)
3288 goto fail;
3289
3290 /* is the device still there? */
3373efd8 3291 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3292 rc = -ENODEV;
3293 goto fail;
3294 }
3295
fe635c7e 3296 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3297
3298 /* configure device according to the new ID */
efdaedc4 3299 rc = ata_dev_configure(dev);
5eb45c02
TH
3300 if (rc == 0)
3301 return 0;
623a3128
TH
3302
3303 fail:
f15a1daf 3304 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3305 return rc;
3306}
3307
6919a0a6
AC
3308struct ata_blacklist_entry {
3309 const char *model_num;
3310 const char *model_rev;
3311 unsigned long horkage;
3312};
3313
3314static const struct ata_blacklist_entry ata_device_blacklist [] = {
3315 /* Devices with DMA related problems under Linux */
3316 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3317 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3318 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3319 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3320 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3321 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3322 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3323 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3324 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3325 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3326 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3327 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3328 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3329 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3330 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3331 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3332 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3333 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3334 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3335 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3336 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3337 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3338 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3339 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3340 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3341 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3342 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3343 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3344 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3345
18d6e9d5 3346 /* Weird ATAPI devices */
6f23a31d
AL
3347 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 |
3348 ATA_HORKAGE_DMA_RW_ONLY },
18d6e9d5 3349
6919a0a6
AC
3350 /* Devices we expect to fail diagnostics */
3351
3352 /* Devices where NCQ should be avoided */
3353 /* NCQ is slow */
3354 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3355 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3356 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3357 /* NCQ is broken */
3358 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
96442925
JA
3359 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3360 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
36e337d0
RH
3361 /* Blacklist entries taken from Silicon Image 3124/3132
3362 Windows driver .inf file - also several Linux problem reports */
3363 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3364 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3365 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6
AC
3366
3367 /* Devices with NCQ limits */
3368
3369 /* End Marker */
3370 { }
1da177e4 3371};
2e9edbf8 3372
6919a0a6 3373unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3374{
8bfa79fc
TH
3375 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3376 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3377 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3378
8bfa79fc
TH
3379 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3380 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3381
6919a0a6 3382 while (ad->model_num) {
8bfa79fc 3383 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3384 if (ad->model_rev == NULL)
3385 return ad->horkage;
8bfa79fc 3386 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3387 return ad->horkage;
f4b15fef 3388 }
6919a0a6 3389 ad++;
f4b15fef 3390 }
1da177e4
LT
3391 return 0;
3392}
3393
6919a0a6
AC
3394static int ata_dma_blacklisted(const struct ata_device *dev)
3395{
3396 /* We don't support polling DMA.
3397 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3398 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3399 */
3400 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3401 (dev->flags & ATA_DFLAG_CDB_INTR))
3402 return 1;
3403 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3404}
3405
a6d5a51c
TH
3406/**
3407 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3408 * @dev: Device to compute xfermask for
3409 *
acf356b1
TH
3410 * Compute supported xfermask of @dev and store it in
3411 * dev->*_mask. This function is responsible for applying all
3412 * known limits including host controller limits, device
3413 * blacklist, etc...
a6d5a51c
TH
3414 *
3415 * LOCKING:
3416 * None.
a6d5a51c 3417 */
3373efd8 3418static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3419{
3373efd8 3420 struct ata_port *ap = dev->ap;
cca3974e 3421 struct ata_host *host = ap->host;
a6d5a51c 3422 unsigned long xfer_mask;
1da177e4 3423
37deecb5 3424 /* controller modes available */
565083e1
TH
3425 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3426 ap->mwdma_mask, ap->udma_mask);
3427
8343f889 3428 /* drive modes available */
37deecb5
TH
3429 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3430 dev->mwdma_mask, dev->udma_mask);
3431 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3432
b352e57d
AC
3433 /*
3434 * CFA Advanced TrueIDE timings are not allowed on a shared
3435 * cable
3436 */
3437 if (ata_dev_pair(dev)) {
3438 /* No PIO5 or PIO6 */
3439 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3440 /* No MWDMA3 or MWDMA 4 */
3441 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3442 }
3443
37deecb5
TH
3444 if (ata_dma_blacklisted(dev)) {
3445 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3446 ata_dev_printk(dev, KERN_WARNING,
3447 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3448 }
a6d5a51c 3449
14d66ab7
PV
3450 if ((host->flags & ATA_HOST_SIMPLEX) &&
3451 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3452 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3453 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3454 "other device, disabling DMA\n");
5444a6f4 3455 }
565083e1 3456
5444a6f4
AC
3457 if (ap->ops->mode_filter)
3458 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3459
8343f889
RH
3460 /* Apply cable rule here. Don't apply it early because when
3461 * we handle hot plug the cable type can itself change.
3462 * Check this last so that we know if the transfer rate was
3463 * solely limited by the cable.
3464 * Unknown or 80 wire cables reported host side are checked
3465 * drive side as well. Cases where we know a 40wire cable
3466 * is used safely for 80 are not checked here.
3467 */
3468 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3469 /* UDMA/44 or higher would be available */
3470 if((ap->cbl == ATA_CBL_PATA40) ||
3471 (ata_drive_40wire(dev->id) &&
3472 (ap->cbl == ATA_CBL_PATA_UNK ||
3473 ap->cbl == ATA_CBL_PATA80))) {
3474 ata_dev_printk(dev, KERN_WARNING,
3475 "limited to UDMA/33 due to 40-wire cable\n");
3476 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3477 }
3478
565083e1
TH
3479 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3480 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3481}
3482
1da177e4
LT
3483/**
3484 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3485 * @dev: Device to which command will be sent
3486 *
780a87f7
JG
3487 * Issue SET FEATURES - XFER MODE command to device @dev
3488 * on port @ap.
3489 *
1da177e4 3490 * LOCKING:
0cba632b 3491 * PCI/etc. bus probe sem.
83206a29
TH
3492 *
3493 * RETURNS:
3494 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3495 */
3496
3373efd8 3497static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3498{
a0123703 3499 struct ata_taskfile tf;
83206a29 3500 unsigned int err_mask;
1da177e4
LT
3501
3502 /* set up set-features taskfile */
3503 DPRINTK("set features - xfer mode\n");
3504
3373efd8 3505 ata_tf_init(dev, &tf);
a0123703
TH
3506 tf.command = ATA_CMD_SET_FEATURES;
3507 tf.feature = SETFEATURES_XFER;
3508 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3509 tf.protocol = ATA_PROT_NODATA;
3510 tf.nsect = dev->xfer_mode;
1da177e4 3511
3373efd8 3512 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3513
83206a29
TH
3514 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3515 return err_mask;
1da177e4
LT
3516}
3517
8bf62ece
AL
3518/**
3519 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3520 * @dev: Device to which command will be sent
e2a7f77a
RD
3521 * @heads: Number of heads (taskfile parameter)
3522 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3523 *
3524 * LOCKING:
6aff8f1f
TH
3525 * Kernel thread context (may sleep)
3526 *
3527 * RETURNS:
3528 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3529 */
3373efd8
TH
3530static unsigned int ata_dev_init_params(struct ata_device *dev,
3531 u16 heads, u16 sectors)
8bf62ece 3532{
a0123703 3533 struct ata_taskfile tf;
6aff8f1f 3534 unsigned int err_mask;
8bf62ece
AL
3535
3536 /* Number of sectors per track 1-255. Number of heads 1-16 */
3537 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3538 return AC_ERR_INVALID;
8bf62ece
AL
3539
3540 /* set up init dev params taskfile */
3541 DPRINTK("init dev params \n");
3542
3373efd8 3543 ata_tf_init(dev, &tf);
a0123703
TH
3544 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3545 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3546 tf.protocol = ATA_PROT_NODATA;
3547 tf.nsect = sectors;
3548 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3549
3373efd8 3550 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3551
6aff8f1f
TH
3552 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3553 return err_mask;
8bf62ece
AL
3554}
3555
1da177e4 3556/**
0cba632b
JG
3557 * ata_sg_clean - Unmap DMA memory associated with command
3558 * @qc: Command containing DMA memory to be released
3559 *
3560 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3561 *
3562 * LOCKING:
cca3974e 3563 * spin_lock_irqsave(host lock)
1da177e4 3564 */
70e6ad0c 3565void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3566{
3567 struct ata_port *ap = qc->ap;
cedc9a47 3568 struct scatterlist *sg = qc->__sg;
1da177e4 3569 int dir = qc->dma_dir;
cedc9a47 3570 void *pad_buf = NULL;
1da177e4 3571
a4631474
TH
3572 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3573 WARN_ON(sg == NULL);
1da177e4
LT
3574
3575 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3576 WARN_ON(qc->n_elem > 1);
1da177e4 3577
2c13b7ce 3578 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3579
cedc9a47
JG
3580 /* if we padded the buffer out to 32-bit bound, and data
3581 * xfer direction is from-device, we must copy from the
3582 * pad buffer back into the supplied buffer
3583 */
3584 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3585 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3586
3587 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3588 if (qc->n_elem)
2f1f610b 3589 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3590 /* restore last sg */
3591 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3592 if (pad_buf) {
3593 struct scatterlist *psg = &qc->pad_sgent;
3594 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3595 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3596 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3597 }
3598 } else {
2e242fa9 3599 if (qc->n_elem)
2f1f610b 3600 dma_unmap_single(ap->dev,
e1410f2d
JG
3601 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3602 dir);
cedc9a47
JG
3603 /* restore sg */
3604 sg->length += qc->pad_len;
3605 if (pad_buf)
3606 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3607 pad_buf, qc->pad_len);
3608 }
1da177e4
LT
3609
3610 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3611 qc->__sg = NULL;
1da177e4
LT
3612}
3613
3614/**
3615 * ata_fill_sg - Fill PCI IDE PRD table
3616 * @qc: Metadata associated with taskfile to be transferred
3617 *
780a87f7
JG
3618 * Fill PCI IDE PRD (scatter-gather) table with segments
3619 * associated with the current disk command.
3620 *
1da177e4 3621 * LOCKING:
cca3974e 3622 * spin_lock_irqsave(host lock)
1da177e4
LT
3623 *
3624 */
3625static void ata_fill_sg(struct ata_queued_cmd *qc)
3626{
1da177e4 3627 struct ata_port *ap = qc->ap;
cedc9a47
JG
3628 struct scatterlist *sg;
3629 unsigned int idx;
1da177e4 3630
a4631474 3631 WARN_ON(qc->__sg == NULL);
f131883e 3632 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3633
3634 idx = 0;
cedc9a47 3635 ata_for_each_sg(sg, qc) {
1da177e4
LT
3636 u32 addr, offset;
3637 u32 sg_len, len;
3638
3639 /* determine if physical DMA addr spans 64K boundary.
3640 * Note h/w doesn't support 64-bit, so we unconditionally
3641 * truncate dma_addr_t to u32.
3642 */
3643 addr = (u32) sg_dma_address(sg);
3644 sg_len = sg_dma_len(sg);
3645
3646 while (sg_len) {
3647 offset = addr & 0xffff;
3648 len = sg_len;
3649 if ((offset + sg_len) > 0x10000)
3650 len = 0x10000 - offset;
3651
3652 ap->prd[idx].addr = cpu_to_le32(addr);
3653 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3654 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3655
3656 idx++;
3657 sg_len -= len;
3658 addr += len;
3659 }
3660 }
3661
3662 if (idx)
3663 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3664}
3665/**
3666 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3667 * @qc: Metadata associated with taskfile to check
3668 *
780a87f7
JG
3669 * Allow low-level driver to filter ATA PACKET commands, returning
3670 * a status indicating whether or not it is OK to use DMA for the
3671 * supplied PACKET command.
3672 *
1da177e4 3673 * LOCKING:
cca3974e 3674 * spin_lock_irqsave(host lock)
0cba632b 3675 *
1da177e4
LT
3676 * RETURNS: 0 when ATAPI DMA can be used
3677 * nonzero otherwise
3678 */
3679int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3680{
3681 struct ata_port *ap = qc->ap;
3682 int rc = 0; /* Assume ATAPI DMA is OK by default */
3683
6f23a31d
AL
3684 /* some drives can only do ATAPI DMA on read/write */
3685 if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
3686 struct scsi_cmnd *cmd = qc->scsicmd;
3687 u8 *scsicmd = cmd->cmnd;
3688
3689 switch (scsicmd[0]) {
3690 case READ_10:
3691 case WRITE_10:
3692 case READ_12:
3693 case WRITE_12:
3694 case READ_6:
3695 case WRITE_6:
3696 /* atapi dma maybe ok */
3697 break;
3698 default:
3699 /* turn off atapi dma */
3700 return 1;
3701 }
3702 }
3703
1da177e4
LT
3704 if (ap->ops->check_atapi_dma)
3705 rc = ap->ops->check_atapi_dma(qc);
3706
3707 return rc;
3708}
3709/**
3710 * ata_qc_prep - Prepare taskfile for submission
3711 * @qc: Metadata associated with taskfile to be prepared
3712 *
780a87f7
JG
3713 * Prepare ATA taskfile for submission.
3714 *
1da177e4 3715 * LOCKING:
cca3974e 3716 * spin_lock_irqsave(host lock)
1da177e4
LT
3717 */
3718void ata_qc_prep(struct ata_queued_cmd *qc)
3719{
3720 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3721 return;
3722
3723 ata_fill_sg(qc);
3724}
3725
e46834cd
BK
3726void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3727
0cba632b
JG
3728/**
3729 * ata_sg_init_one - Associate command with memory buffer
3730 * @qc: Command to be associated
3731 * @buf: Memory buffer
3732 * @buflen: Length of memory buffer, in bytes.
3733 *
3734 * Initialize the data-related elements of queued_cmd @qc
3735 * to point to a single memory buffer, @buf of byte length @buflen.
3736 *
3737 * LOCKING:
cca3974e 3738 * spin_lock_irqsave(host lock)
0cba632b
JG
3739 */
3740
1da177e4
LT
3741void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3742{
1da177e4
LT
3743 qc->flags |= ATA_QCFLAG_SINGLE;
3744
cedc9a47 3745 qc->__sg = &qc->sgent;
1da177e4 3746 qc->n_elem = 1;
cedc9a47 3747 qc->orig_n_elem = 1;
1da177e4 3748 qc->buf_virt = buf;
233277ca 3749 qc->nbytes = buflen;
1da177e4 3750
61c0596c 3751 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3752}
3753
0cba632b
JG
3754/**
3755 * ata_sg_init - Associate command with scatter-gather table.
3756 * @qc: Command to be associated
3757 * @sg: Scatter-gather table.
3758 * @n_elem: Number of elements in s/g table.
3759 *
3760 * Initialize the data-related elements of queued_cmd @qc
3761 * to point to a scatter-gather table @sg, containing @n_elem
3762 * elements.
3763 *
3764 * LOCKING:
cca3974e 3765 * spin_lock_irqsave(host lock)
0cba632b
JG
3766 */
3767
1da177e4
LT
3768void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3769 unsigned int n_elem)
3770{
3771 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3772 qc->__sg = sg;
1da177e4 3773 qc->n_elem = n_elem;
cedc9a47 3774 qc->orig_n_elem = n_elem;
1da177e4
LT
3775}
3776
3777/**
0cba632b
JG
3778 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3779 * @qc: Command with memory buffer to be mapped.
3780 *
3781 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3782 *
3783 * LOCKING:
cca3974e 3784 * spin_lock_irqsave(host lock)
1da177e4
LT
3785 *
3786 * RETURNS:
0cba632b 3787 * Zero on success, negative on error.
1da177e4
LT
3788 */
3789
3790static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3791{
3792 struct ata_port *ap = qc->ap;
3793 int dir = qc->dma_dir;
cedc9a47 3794 struct scatterlist *sg = qc->__sg;
1da177e4 3795 dma_addr_t dma_address;
2e242fa9 3796 int trim_sg = 0;
1da177e4 3797
cedc9a47
JG
3798 /* we must lengthen transfers to end on a 32-bit boundary */
3799 qc->pad_len = sg->length & 3;
3800 if (qc->pad_len) {
3801 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3802 struct scatterlist *psg = &qc->pad_sgent;
3803
a4631474 3804 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3805
3806 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3807
3808 if (qc->tf.flags & ATA_TFLAG_WRITE)
3809 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3810 qc->pad_len);
3811
3812 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3813 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3814 /* trim sg */
3815 sg->length -= qc->pad_len;
2e242fa9
TH
3816 if (sg->length == 0)
3817 trim_sg = 1;
cedc9a47
JG
3818
3819 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3820 sg->length, qc->pad_len);
3821 }
3822
2e242fa9
TH
3823 if (trim_sg) {
3824 qc->n_elem--;
e1410f2d
JG
3825 goto skip_map;
3826 }
3827
2f1f610b 3828 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3829 sg->length, dir);
537a95d9
TH
3830 if (dma_mapping_error(dma_address)) {
3831 /* restore sg */
3832 sg->length += qc->pad_len;
1da177e4 3833 return -1;
537a95d9 3834 }
1da177e4
LT
3835
3836 sg_dma_address(sg) = dma_address;
32529e01 3837 sg_dma_len(sg) = sg->length;
1da177e4 3838
2e242fa9 3839skip_map:
1da177e4
LT
3840 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3841 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3842
3843 return 0;
3844}
3845
3846/**
0cba632b
JG
3847 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3848 * @qc: Command with scatter-gather table to be mapped.
3849 *
3850 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3851 *
3852 * LOCKING:
cca3974e 3853 * spin_lock_irqsave(host lock)
1da177e4
LT
3854 *
3855 * RETURNS:
0cba632b 3856 * Zero on success, negative on error.
1da177e4
LT
3857 *
3858 */
3859
3860static int ata_sg_setup(struct ata_queued_cmd *qc)
3861{
3862 struct ata_port *ap = qc->ap;
cedc9a47
JG
3863 struct scatterlist *sg = qc->__sg;
3864 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3865 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 3866
44877b4e 3867 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 3868 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3869
cedc9a47
JG
3870 /* we must lengthen transfers to end on a 32-bit boundary */
3871 qc->pad_len = lsg->length & 3;
3872 if (qc->pad_len) {
3873 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3874 struct scatterlist *psg = &qc->pad_sgent;
3875 unsigned int offset;
3876
a4631474 3877 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3878
3879 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3880
3881 /*
3882 * psg->page/offset are used to copy to-be-written
3883 * data in this function or read data in ata_sg_clean.
3884 */
3885 offset = lsg->offset + lsg->length - qc->pad_len;
3886 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3887 psg->offset = offset_in_page(offset);
3888
3889 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3890 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3891 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3892 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3893 }
3894
3895 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3896 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3897 /* trim last sg */
3898 lsg->length -= qc->pad_len;
e1410f2d
JG
3899 if (lsg->length == 0)
3900 trim_sg = 1;
cedc9a47
JG
3901
3902 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3903 qc->n_elem - 1, lsg->length, qc->pad_len);
3904 }
3905
e1410f2d
JG
3906 pre_n_elem = qc->n_elem;
3907 if (trim_sg && pre_n_elem)
3908 pre_n_elem--;
3909
3910 if (!pre_n_elem) {
3911 n_elem = 0;
3912 goto skip_map;
3913 }
3914
1da177e4 3915 dir = qc->dma_dir;
2f1f610b 3916 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3917 if (n_elem < 1) {
3918 /* restore last sg */
3919 lsg->length += qc->pad_len;
1da177e4 3920 return -1;
537a95d9 3921 }
1da177e4
LT
3922
3923 DPRINTK("%d sg elements mapped\n", n_elem);
3924
e1410f2d 3925skip_map:
1da177e4
LT
3926 qc->n_elem = n_elem;
3927
3928 return 0;
3929}
3930
0baab86b 3931/**
c893a3ae 3932 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3933 * @buf: Buffer to swap
3934 * @buf_words: Number of 16-bit words in buffer.
3935 *
3936 * Swap halves of 16-bit words if needed to convert from
3937 * little-endian byte order to native cpu byte order, or
3938 * vice-versa.
3939 *
3940 * LOCKING:
6f0ef4fa 3941 * Inherited from caller.
0baab86b 3942 */
1da177e4
LT
3943void swap_buf_le16(u16 *buf, unsigned int buf_words)
3944{
3945#ifdef __BIG_ENDIAN
3946 unsigned int i;
3947
3948 for (i = 0; i < buf_words; i++)
3949 buf[i] = le16_to_cpu(buf[i]);
3950#endif /* __BIG_ENDIAN */
3951}
3952
6ae4cfb5 3953/**
0d5ff566 3954 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 3955 * @adev: device to target
6ae4cfb5
AL
3956 * @buf: data buffer
3957 * @buflen: buffer length
344babaa 3958 * @write_data: read/write
6ae4cfb5
AL
3959 *
3960 * Transfer data from/to the device data register by PIO.
3961 *
3962 * LOCKING:
3963 * Inherited from caller.
6ae4cfb5 3964 */
0d5ff566
TH
3965void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3966 unsigned int buflen, int write_data)
1da177e4 3967{
a6b2c5d4 3968 struct ata_port *ap = adev->ap;
6ae4cfb5 3969 unsigned int words = buflen >> 1;
1da177e4 3970
6ae4cfb5 3971 /* Transfer multiple of 2 bytes */
1da177e4 3972 if (write_data)
0d5ff566 3973 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 3974 else
0d5ff566 3975 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
3976
3977 /* Transfer trailing 1 byte, if any. */
3978 if (unlikely(buflen & 0x01)) {
3979 u16 align_buf[1] = { 0 };
3980 unsigned char *trailing_buf = buf + buflen - 1;
3981
3982 if (write_data) {
3983 memcpy(align_buf, trailing_buf, 1);
0d5ff566 3984 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 3985 } else {
0d5ff566 3986 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
3987 memcpy(trailing_buf, align_buf, 1);
3988 }
3989 }
1da177e4
LT
3990}
3991
75e99585 3992/**
0d5ff566 3993 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
3994 * @adev: device to target
3995 * @buf: data buffer
3996 * @buflen: buffer length
3997 * @write_data: read/write
3998 *
88574551 3999 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4000 * transfer with interrupts disabled.
4001 *
4002 * LOCKING:
4003 * Inherited from caller.
4004 */
0d5ff566
TH
4005void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4006 unsigned int buflen, int write_data)
75e99585
AC
4007{
4008 unsigned long flags;
4009 local_irq_save(flags);
0d5ff566 4010 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4011 local_irq_restore(flags);
4012}
4013
4014
6ae4cfb5
AL
4015/**
4016 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
4017 * @qc: Command on going
4018 *
4019 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
4020 *
4021 * LOCKING:
4022 * Inherited from caller.
4023 */
4024
1da177e4
LT
4025static void ata_pio_sector(struct ata_queued_cmd *qc)
4026{
4027 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4028 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4029 struct ata_port *ap = qc->ap;
4030 struct page *page;
4031 unsigned int offset;
4032 unsigned char *buf;
4033
726f0785 4034 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 4035 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4036
4037 page = sg[qc->cursg].page;
726f0785 4038 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4039
4040 /* get the current page and offset */
4041 page = nth_page(page, (offset >> PAGE_SHIFT));
4042 offset %= PAGE_SIZE;
4043
1da177e4
LT
4044 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4045
91b8b313
AL
4046 if (PageHighMem(page)) {
4047 unsigned long flags;
4048
a6b2c5d4 4049 /* FIXME: use a bounce buffer */
91b8b313
AL
4050 local_irq_save(flags);
4051 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4052
91b8b313 4053 /* do the actual data transfer */
a6b2c5d4 4054 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 4055
91b8b313
AL
4056 kunmap_atomic(buf, KM_IRQ0);
4057 local_irq_restore(flags);
4058 } else {
4059 buf = page_address(page);
a6b2c5d4 4060 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 4061 }
1da177e4 4062
726f0785
TH
4063 qc->curbytes += ATA_SECT_SIZE;
4064 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 4065
726f0785 4066 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4067 qc->cursg++;
4068 qc->cursg_ofs = 0;
4069 }
1da177e4 4070}
1da177e4 4071
07f6f7d0
AL
4072/**
4073 * ata_pio_sectors - Transfer one or many 512-byte sectors.
4074 * @qc: Command on going
4075 *
c81e29b4 4076 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
4077 * ATA device for the DRQ request.
4078 *
4079 * LOCKING:
4080 * Inherited from caller.
4081 */
1da177e4 4082
07f6f7d0
AL
4083static void ata_pio_sectors(struct ata_queued_cmd *qc)
4084{
4085 if (is_multi_taskfile(&qc->tf)) {
4086 /* READ/WRITE MULTIPLE */
4087 unsigned int nsect;
4088
587005de 4089 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4090
726f0785
TH
4091 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
4092 qc->dev->multi_count);
07f6f7d0
AL
4093 while (nsect--)
4094 ata_pio_sector(qc);
4095 } else
4096 ata_pio_sector(qc);
4097}
4098
c71c1857
AL
4099/**
4100 * atapi_send_cdb - Write CDB bytes to hardware
4101 * @ap: Port to which ATAPI device is attached.
4102 * @qc: Taskfile currently active
4103 *
4104 * When device has indicated its readiness to accept
4105 * a CDB, this function is called. Send the CDB.
4106 *
4107 * LOCKING:
4108 * caller.
4109 */
4110
4111static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4112{
4113 /* send SCSI cdb */
4114 DPRINTK("send cdb\n");
db024d53 4115 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4116
a6b2c5d4 4117 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4118 ata_altstatus(ap); /* flush */
4119
4120 switch (qc->tf.protocol) {
4121 case ATA_PROT_ATAPI:
4122 ap->hsm_task_state = HSM_ST;
4123 break;
4124 case ATA_PROT_ATAPI_NODATA:
4125 ap->hsm_task_state = HSM_ST_LAST;
4126 break;
4127 case ATA_PROT_ATAPI_DMA:
4128 ap->hsm_task_state = HSM_ST_LAST;
4129 /* initiate bmdma */
4130 ap->ops->bmdma_start(qc);
4131 break;
4132 }
1da177e4
LT
4133}
4134
6ae4cfb5
AL
4135/**
4136 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4137 * @qc: Command on going
4138 * @bytes: number of bytes
4139 *
4140 * Transfer Transfer data from/to the ATAPI device.
4141 *
4142 * LOCKING:
4143 * Inherited from caller.
4144 *
4145 */
4146
1da177e4
LT
4147static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4148{
4149 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4150 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4151 struct ata_port *ap = qc->ap;
4152 struct page *page;
4153 unsigned char *buf;
4154 unsigned int offset, count;
4155
563a6e1f 4156 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4157 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4158
4159next_sg:
563a6e1f 4160 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4161 /*
563a6e1f
AL
4162 * The end of qc->sg is reached and the device expects
4163 * more data to transfer. In order not to overrun qc->sg
4164 * and fulfill length specified in the byte count register,
4165 * - for read case, discard trailing data from the device
4166 * - for write case, padding zero data to the device
4167 */
4168 u16 pad_buf[1] = { 0 };
4169 unsigned int words = bytes >> 1;
4170 unsigned int i;
4171
4172 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4173 ata_dev_printk(qc->dev, KERN_WARNING,
4174 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4175
4176 for (i = 0; i < words; i++)
a6b2c5d4 4177 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4178
14be71f4 4179 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4180 return;
4181 }
4182
cedc9a47 4183 sg = &qc->__sg[qc->cursg];
1da177e4 4184
1da177e4
LT
4185 page = sg->page;
4186 offset = sg->offset + qc->cursg_ofs;
4187
4188 /* get the current page and offset */
4189 page = nth_page(page, (offset >> PAGE_SHIFT));
4190 offset %= PAGE_SIZE;
4191
6952df03 4192 /* don't overrun current sg */
32529e01 4193 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4194
4195 /* don't cross page boundaries */
4196 count = min(count, (unsigned int)PAGE_SIZE - offset);
4197
7282aa4b
AL
4198 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4199
91b8b313
AL
4200 if (PageHighMem(page)) {
4201 unsigned long flags;
4202
a6b2c5d4 4203 /* FIXME: use bounce buffer */
91b8b313
AL
4204 local_irq_save(flags);
4205 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4206
91b8b313 4207 /* do the actual data transfer */
a6b2c5d4 4208 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4209
91b8b313
AL
4210 kunmap_atomic(buf, KM_IRQ0);
4211 local_irq_restore(flags);
4212 } else {
4213 buf = page_address(page);
a6b2c5d4 4214 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4215 }
1da177e4
LT
4216
4217 bytes -= count;
4218 qc->curbytes += count;
4219 qc->cursg_ofs += count;
4220
32529e01 4221 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4222 qc->cursg++;
4223 qc->cursg_ofs = 0;
4224 }
4225
563a6e1f 4226 if (bytes)
1da177e4 4227 goto next_sg;
1da177e4
LT
4228}
4229
6ae4cfb5
AL
4230/**
4231 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4232 * @qc: Command on going
4233 *
4234 * Transfer Transfer data from/to the ATAPI device.
4235 *
4236 * LOCKING:
4237 * Inherited from caller.
6ae4cfb5
AL
4238 */
4239
1da177e4
LT
4240static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4241{
4242 struct ata_port *ap = qc->ap;
4243 struct ata_device *dev = qc->dev;
4244 unsigned int ireason, bc_lo, bc_hi, bytes;
4245 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4246
eec4c3f3
AL
4247 /* Abuse qc->result_tf for temp storage of intermediate TF
4248 * here to save some kernel stack usage.
4249 * For normal completion, qc->result_tf is not relevant. For
4250 * error, qc->result_tf is later overwritten by ata_qc_complete().
4251 * So, the correctness of qc->result_tf is not affected.
4252 */
4253 ap->ops->tf_read(ap, &qc->result_tf);
4254 ireason = qc->result_tf.nsect;
4255 bc_lo = qc->result_tf.lbam;
4256 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4257 bytes = (bc_hi << 8) | bc_lo;
4258
4259 /* shall be cleared to zero, indicating xfer of data */
4260 if (ireason & (1 << 0))
4261 goto err_out;
4262
4263 /* make sure transfer direction matches expected */
4264 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4265 if (do_write != i_write)
4266 goto err_out;
4267
44877b4e 4268 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4269
1da177e4
LT
4270 __atapi_pio_bytes(qc, bytes);
4271
4272 return;
4273
4274err_out:
f15a1daf 4275 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4276 qc->err_mask |= AC_ERR_HSM;
14be71f4 4277 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4278}
4279
4280/**
c234fb00
AL
4281 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4282 * @ap: the target ata_port
4283 * @qc: qc on going
1da177e4 4284 *
c234fb00
AL
4285 * RETURNS:
4286 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4287 */
c234fb00
AL
4288
4289static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4290{
c234fb00
AL
4291 if (qc->tf.flags & ATA_TFLAG_POLLING)
4292 return 1;
1da177e4 4293
c234fb00
AL
4294 if (ap->hsm_task_state == HSM_ST_FIRST) {
4295 if (qc->tf.protocol == ATA_PROT_PIO &&
4296 (qc->tf.flags & ATA_TFLAG_WRITE))
4297 return 1;
1da177e4 4298
c234fb00
AL
4299 if (is_atapi_taskfile(&qc->tf) &&
4300 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4301 return 1;
fe79e683
AL
4302 }
4303
c234fb00
AL
4304 return 0;
4305}
1da177e4 4306
c17ea20d
TH
4307/**
4308 * ata_hsm_qc_complete - finish a qc running on standard HSM
4309 * @qc: Command to complete
4310 * @in_wq: 1 if called from workqueue, 0 otherwise
4311 *
4312 * Finish @qc which is running on standard HSM.
4313 *
4314 * LOCKING:
cca3974e 4315 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4316 * Otherwise, none on entry and grabs host lock.
4317 */
4318static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4319{
4320 struct ata_port *ap = qc->ap;
4321 unsigned long flags;
4322
4323 if (ap->ops->error_handler) {
4324 if (in_wq) {
ba6a1308 4325 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4326
cca3974e
JG
4327 /* EH might have kicked in while host lock is
4328 * released.
c17ea20d
TH
4329 */
4330 qc = ata_qc_from_tag(ap, qc->tag);
4331 if (qc) {
4332 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4333 ap->ops->irq_on(ap);
c17ea20d
TH
4334 ata_qc_complete(qc);
4335 } else
4336 ata_port_freeze(ap);
4337 }
4338
ba6a1308 4339 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4340 } else {
4341 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4342 ata_qc_complete(qc);
4343 else
4344 ata_port_freeze(ap);
4345 }
4346 } else {
4347 if (in_wq) {
ba6a1308 4348 spin_lock_irqsave(ap->lock, flags);
83625006 4349 ap->ops->irq_on(ap);
c17ea20d 4350 ata_qc_complete(qc);
ba6a1308 4351 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4352 } else
4353 ata_qc_complete(qc);
4354 }
1da177e4 4355
c81e29b4 4356 ata_altstatus(ap); /* flush */
c17ea20d
TH
4357}
4358
bb5cb290
AL
4359/**
4360 * ata_hsm_move - move the HSM to the next state.
4361 * @ap: the target ata_port
4362 * @qc: qc on going
4363 * @status: current device status
4364 * @in_wq: 1 if called from workqueue, 0 otherwise
4365 *
4366 * RETURNS:
4367 * 1 when poll next status needed, 0 otherwise.
4368 */
9a1004d0
TH
4369int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4370 u8 status, int in_wq)
e2cec771 4371{
bb5cb290
AL
4372 unsigned long flags = 0;
4373 int poll_next;
4374
6912ccd5
AL
4375 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4376
bb5cb290
AL
4377 /* Make sure ata_qc_issue_prot() does not throw things
4378 * like DMA polling into the workqueue. Notice that
4379 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4380 */
c234fb00 4381 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4382
e2cec771 4383fsm_start:
999bb6f4 4384 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4385 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4386
e2cec771
AL
4387 switch (ap->hsm_task_state) {
4388 case HSM_ST_FIRST:
bb5cb290
AL
4389 /* Send first data block or PACKET CDB */
4390
4391 /* If polling, we will stay in the work queue after
4392 * sending the data. Otherwise, interrupt handler
4393 * takes over after sending the data.
4394 */
4395 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4396
e2cec771 4397 /* check device status */
3655d1d3
AL
4398 if (unlikely((status & ATA_DRQ) == 0)) {
4399 /* handle BSY=0, DRQ=0 as error */
4400 if (likely(status & (ATA_ERR | ATA_DF)))
4401 /* device stops HSM for abort/error */
4402 qc->err_mask |= AC_ERR_DEV;
4403 else
4404 /* HSM violation. Let EH handle this */
4405 qc->err_mask |= AC_ERR_HSM;
4406
14be71f4 4407 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4408 goto fsm_start;
1da177e4
LT
4409 }
4410
71601958
AL
4411 /* Device should not ask for data transfer (DRQ=1)
4412 * when it finds something wrong.
eee6c32f
AL
4413 * We ignore DRQ here and stop the HSM by
4414 * changing hsm_task_state to HSM_ST_ERR and
4415 * let the EH abort the command or reset the device.
71601958
AL
4416 */
4417 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4418 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4419 "error, dev_stat 0x%X\n", status);
3655d1d3 4420 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4421 ap->hsm_task_state = HSM_ST_ERR;
4422 goto fsm_start;
71601958 4423 }
1da177e4 4424
bb5cb290
AL
4425 /* Send the CDB (atapi) or the first data block (ata pio out).
4426 * During the state transition, interrupt handler shouldn't
4427 * be invoked before the data transfer is complete and
4428 * hsm_task_state is changed. Hence, the following locking.
4429 */
4430 if (in_wq)
ba6a1308 4431 spin_lock_irqsave(ap->lock, flags);
1da177e4 4432
bb5cb290
AL
4433 if (qc->tf.protocol == ATA_PROT_PIO) {
4434 /* PIO data out protocol.
4435 * send first data block.
4436 */
0565c26d 4437
bb5cb290
AL
4438 /* ata_pio_sectors() might change the state
4439 * to HSM_ST_LAST. so, the state is changed here
4440 * before ata_pio_sectors().
4441 */
4442 ap->hsm_task_state = HSM_ST;
4443 ata_pio_sectors(qc);
4444 ata_altstatus(ap); /* flush */
4445 } else
4446 /* send CDB */
4447 atapi_send_cdb(ap, qc);
4448
4449 if (in_wq)
ba6a1308 4450 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4451
4452 /* if polling, ata_pio_task() handles the rest.
4453 * otherwise, interrupt handler takes over from here.
4454 */
e2cec771 4455 break;
1c848984 4456
e2cec771
AL
4457 case HSM_ST:
4458 /* complete command or read/write the data register */
4459 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4460 /* ATAPI PIO protocol */
4461 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4462 /* No more data to transfer or device error.
4463 * Device error will be tagged in HSM_ST_LAST.
4464 */
e2cec771
AL
4465 ap->hsm_task_state = HSM_ST_LAST;
4466 goto fsm_start;
4467 }
1da177e4 4468
71601958
AL
4469 /* Device should not ask for data transfer (DRQ=1)
4470 * when it finds something wrong.
eee6c32f
AL
4471 * We ignore DRQ here and stop the HSM by
4472 * changing hsm_task_state to HSM_ST_ERR and
4473 * let the EH abort the command or reset the device.
71601958
AL
4474 */
4475 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4476 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4477 "device error, dev_stat 0x%X\n",
4478 status);
3655d1d3 4479 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4480 ap->hsm_task_state = HSM_ST_ERR;
4481 goto fsm_start;
71601958 4482 }
1da177e4 4483
e2cec771 4484 atapi_pio_bytes(qc);
7fb6ec28 4485
e2cec771
AL
4486 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4487 /* bad ireason reported by device */
4488 goto fsm_start;
1da177e4 4489
e2cec771
AL
4490 } else {
4491 /* ATA PIO protocol */
4492 if (unlikely((status & ATA_DRQ) == 0)) {
4493 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4494 if (likely(status & (ATA_ERR | ATA_DF)))
4495 /* device stops HSM for abort/error */
4496 qc->err_mask |= AC_ERR_DEV;
4497 else
55a8e2c8
TH
4498 /* HSM violation. Let EH handle this.
4499 * Phantom devices also trigger this
4500 * condition. Mark hint.
4501 */
4502 qc->err_mask |= AC_ERR_HSM |
4503 AC_ERR_NODEV_HINT;
3655d1d3 4504
e2cec771
AL
4505 ap->hsm_task_state = HSM_ST_ERR;
4506 goto fsm_start;
4507 }
1da177e4 4508
eee6c32f
AL
4509 /* For PIO reads, some devices may ask for
4510 * data transfer (DRQ=1) alone with ERR=1.
4511 * We respect DRQ here and transfer one
4512 * block of junk data before changing the
4513 * hsm_task_state to HSM_ST_ERR.
4514 *
4515 * For PIO writes, ERR=1 DRQ=1 doesn't make
4516 * sense since the data block has been
4517 * transferred to the device.
71601958
AL
4518 */
4519 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4520 /* data might be corrputed */
4521 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4522
4523 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4524 ata_pio_sectors(qc);
4525 ata_altstatus(ap);
4526 status = ata_wait_idle(ap);
4527 }
4528
3655d1d3
AL
4529 if (status & (ATA_BUSY | ATA_DRQ))
4530 qc->err_mask |= AC_ERR_HSM;
4531
eee6c32f
AL
4532 /* ata_pio_sectors() might change the
4533 * state to HSM_ST_LAST. so, the state
4534 * is changed after ata_pio_sectors().
4535 */
4536 ap->hsm_task_state = HSM_ST_ERR;
4537 goto fsm_start;
71601958
AL
4538 }
4539
e2cec771
AL
4540 ata_pio_sectors(qc);
4541
4542 if (ap->hsm_task_state == HSM_ST_LAST &&
4543 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4544 /* all data read */
4545 ata_altstatus(ap);
52a32205 4546 status = ata_wait_idle(ap);
e2cec771
AL
4547 goto fsm_start;
4548 }
4549 }
4550
4551 ata_altstatus(ap); /* flush */
bb5cb290 4552 poll_next = 1;
1da177e4
LT
4553 break;
4554
14be71f4 4555 case HSM_ST_LAST:
6912ccd5
AL
4556 if (unlikely(!ata_ok(status))) {
4557 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4558 ap->hsm_task_state = HSM_ST_ERR;
4559 goto fsm_start;
4560 }
4561
4562 /* no more data to transfer */
4332a771 4563 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 4564 ap->print_id, qc->dev->devno, status);
e2cec771 4565
6912ccd5
AL
4566 WARN_ON(qc->err_mask);
4567
e2cec771 4568 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4569
e2cec771 4570 /* complete taskfile transaction */
c17ea20d 4571 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4572
4573 poll_next = 0;
1da177e4
LT
4574 break;
4575
14be71f4 4576 case HSM_ST_ERR:
e2cec771
AL
4577 /* make sure qc->err_mask is available to
4578 * know what's wrong and recover
4579 */
4580 WARN_ON(qc->err_mask == 0);
4581
4582 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4583
999bb6f4 4584 /* complete taskfile transaction */
c17ea20d 4585 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4586
4587 poll_next = 0;
e2cec771
AL
4588 break;
4589 default:
bb5cb290 4590 poll_next = 0;
6912ccd5 4591 BUG();
1da177e4
LT
4592 }
4593
bb5cb290 4594 return poll_next;
1da177e4
LT
4595}
4596
65f27f38 4597static void ata_pio_task(struct work_struct *work)
8061f5f0 4598{
65f27f38
DH
4599 struct ata_port *ap =
4600 container_of(work, struct ata_port, port_task.work);
4601 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4602 u8 status;
a1af3734 4603 int poll_next;
8061f5f0 4604
7fb6ec28 4605fsm_start:
a1af3734 4606 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4607
a1af3734
AL
4608 /*
4609 * This is purely heuristic. This is a fast path.
4610 * Sometimes when we enter, BSY will be cleared in
4611 * a chk-status or two. If not, the drive is probably seeking
4612 * or something. Snooze for a couple msecs, then
4613 * chk-status again. If still busy, queue delayed work.
4614 */
4615 status = ata_busy_wait(ap, ATA_BUSY, 5);
4616 if (status & ATA_BUSY) {
4617 msleep(2);
4618 status = ata_busy_wait(ap, ATA_BUSY, 10);
4619 if (status & ATA_BUSY) {
31ce6dae 4620 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4621 return;
4622 }
8061f5f0
TH
4623 }
4624
a1af3734
AL
4625 /* move the HSM */
4626 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4627
a1af3734
AL
4628 /* another command or interrupt handler
4629 * may be running at this point.
4630 */
4631 if (poll_next)
7fb6ec28 4632 goto fsm_start;
8061f5f0
TH
4633}
4634
1da177e4
LT
4635/**
4636 * ata_qc_new - Request an available ATA command, for queueing
4637 * @ap: Port associated with device @dev
4638 * @dev: Device from whom we request an available command structure
4639 *
4640 * LOCKING:
0cba632b 4641 * None.
1da177e4
LT
4642 */
4643
4644static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4645{
4646 struct ata_queued_cmd *qc = NULL;
4647 unsigned int i;
4648
e3180499 4649 /* no command while frozen */
b51e9e5d 4650 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4651 return NULL;
4652
2ab7db1f
TH
4653 /* the last tag is reserved for internal command. */
4654 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4655 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4656 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4657 break;
4658 }
4659
4660 if (qc)
4661 qc->tag = i;
4662
4663 return qc;
4664}
4665
4666/**
4667 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4668 * @dev: Device from whom we request an available command structure
4669 *
4670 * LOCKING:
0cba632b 4671 * None.
1da177e4
LT
4672 */
4673
3373efd8 4674struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4675{
3373efd8 4676 struct ata_port *ap = dev->ap;
1da177e4
LT
4677 struct ata_queued_cmd *qc;
4678
4679 qc = ata_qc_new(ap);
4680 if (qc) {
1da177e4
LT
4681 qc->scsicmd = NULL;
4682 qc->ap = ap;
4683 qc->dev = dev;
1da177e4 4684
2c13b7ce 4685 ata_qc_reinit(qc);
1da177e4
LT
4686 }
4687
4688 return qc;
4689}
4690
1da177e4
LT
4691/**
4692 * ata_qc_free - free unused ata_queued_cmd
4693 * @qc: Command to complete
4694 *
4695 * Designed to free unused ata_queued_cmd object
4696 * in case something prevents using it.
4697 *
4698 * LOCKING:
cca3974e 4699 * spin_lock_irqsave(host lock)
1da177e4
LT
4700 */
4701void ata_qc_free(struct ata_queued_cmd *qc)
4702{
4ba946e9
TH
4703 struct ata_port *ap = qc->ap;
4704 unsigned int tag;
4705
a4631474 4706 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4707
4ba946e9
TH
4708 qc->flags = 0;
4709 tag = qc->tag;
4710 if (likely(ata_tag_valid(tag))) {
4ba946e9 4711 qc->tag = ATA_TAG_POISON;
6cec4a39 4712 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4713 }
1da177e4
LT
4714}
4715
76014427 4716void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4717{
dedaf2b0
TH
4718 struct ata_port *ap = qc->ap;
4719
a4631474
TH
4720 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4721 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4722
4723 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4724 ata_sg_clean(qc);
4725
7401abf2 4726 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4727 if (qc->tf.protocol == ATA_PROT_NCQ)
4728 ap->sactive &= ~(1 << qc->tag);
4729 else
4730 ap->active_tag = ATA_TAG_POISON;
7401abf2 4731
3f3791d3
AL
4732 /* atapi: mark qc as inactive to prevent the interrupt handler
4733 * from completing the command twice later, before the error handler
4734 * is called. (when rc != 0 and atapi request sense is needed)
4735 */
4736 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4737 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4738
1da177e4 4739 /* call completion callback */
77853bf2 4740 qc->complete_fn(qc);
1da177e4
LT
4741}
4742
39599a53
TH
4743static void fill_result_tf(struct ata_queued_cmd *qc)
4744{
4745 struct ata_port *ap = qc->ap;
4746
39599a53 4747 qc->result_tf.flags = qc->tf.flags;
4742d54f 4748 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
4749}
4750
f686bcb8
TH
4751/**
4752 * ata_qc_complete - Complete an active ATA command
4753 * @qc: Command to complete
4754 * @err_mask: ATA Status register contents
4755 *
4756 * Indicate to the mid and upper layers that an ATA
4757 * command has completed, with either an ok or not-ok status.
4758 *
4759 * LOCKING:
cca3974e 4760 * spin_lock_irqsave(host lock)
f686bcb8
TH
4761 */
4762void ata_qc_complete(struct ata_queued_cmd *qc)
4763{
4764 struct ata_port *ap = qc->ap;
4765
4766 /* XXX: New EH and old EH use different mechanisms to
4767 * synchronize EH with regular execution path.
4768 *
4769 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4770 * Normal execution path is responsible for not accessing a
4771 * failed qc. libata core enforces the rule by returning NULL
4772 * from ata_qc_from_tag() for failed qcs.
4773 *
4774 * Old EH depends on ata_qc_complete() nullifying completion
4775 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4776 * not synchronize with interrupt handler. Only PIO task is
4777 * taken care of.
4778 */
4779 if (ap->ops->error_handler) {
b51e9e5d 4780 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4781
4782 if (unlikely(qc->err_mask))
4783 qc->flags |= ATA_QCFLAG_FAILED;
4784
4785 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4786 if (!ata_tag_internal(qc->tag)) {
4787 /* always fill result TF for failed qc */
39599a53 4788 fill_result_tf(qc);
f686bcb8
TH
4789 ata_qc_schedule_eh(qc);
4790 return;
4791 }
4792 }
4793
4794 /* read result TF if requested */
4795 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4796 fill_result_tf(qc);
f686bcb8
TH
4797
4798 __ata_qc_complete(qc);
4799 } else {
4800 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4801 return;
4802
4803 /* read result TF if failed or requested */
4804 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4805 fill_result_tf(qc);
f686bcb8
TH
4806
4807 __ata_qc_complete(qc);
4808 }
4809}
4810
dedaf2b0
TH
4811/**
4812 * ata_qc_complete_multiple - Complete multiple qcs successfully
4813 * @ap: port in question
4814 * @qc_active: new qc_active mask
4815 * @finish_qc: LLDD callback invoked before completing a qc
4816 *
4817 * Complete in-flight commands. This functions is meant to be
4818 * called from low-level driver's interrupt routine to complete
4819 * requests normally. ap->qc_active and @qc_active is compared
4820 * and commands are completed accordingly.
4821 *
4822 * LOCKING:
cca3974e 4823 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4824 *
4825 * RETURNS:
4826 * Number of completed commands on success, -errno otherwise.
4827 */
4828int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4829 void (*finish_qc)(struct ata_queued_cmd *))
4830{
4831 int nr_done = 0;
4832 u32 done_mask;
4833 int i;
4834
4835 done_mask = ap->qc_active ^ qc_active;
4836
4837 if (unlikely(done_mask & qc_active)) {
4838 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4839 "(%08x->%08x)\n", ap->qc_active, qc_active);
4840 return -EINVAL;
4841 }
4842
4843 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4844 struct ata_queued_cmd *qc;
4845
4846 if (!(done_mask & (1 << i)))
4847 continue;
4848
4849 if ((qc = ata_qc_from_tag(ap, i))) {
4850 if (finish_qc)
4851 finish_qc(qc);
4852 ata_qc_complete(qc);
4853 nr_done++;
4854 }
4855 }
4856
4857 return nr_done;
4858}
4859
1da177e4
LT
4860static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4861{
4862 struct ata_port *ap = qc->ap;
4863
4864 switch (qc->tf.protocol) {
3dc1d881 4865 case ATA_PROT_NCQ:
1da177e4
LT
4866 case ATA_PROT_DMA:
4867 case ATA_PROT_ATAPI_DMA:
4868 return 1;
4869
4870 case ATA_PROT_ATAPI:
4871 case ATA_PROT_PIO:
1da177e4
LT
4872 if (ap->flags & ATA_FLAG_PIO_DMA)
4873 return 1;
4874
4875 /* fall through */
4876
4877 default:
4878 return 0;
4879 }
4880
4881 /* never reached */
4882}
4883
4884/**
4885 * ata_qc_issue - issue taskfile to device
4886 * @qc: command to issue to device
4887 *
4888 * Prepare an ATA command to submission to device.
4889 * This includes mapping the data into a DMA-able
4890 * area, filling in the S/G table, and finally
4891 * writing the taskfile to hardware, starting the command.
4892 *
4893 * LOCKING:
cca3974e 4894 * spin_lock_irqsave(host lock)
1da177e4 4895 */
8e0e694a 4896void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4897{
4898 struct ata_port *ap = qc->ap;
4899
dedaf2b0
TH
4900 /* Make sure only one non-NCQ command is outstanding. The
4901 * check is skipped for old EH because it reuses active qc to
4902 * request ATAPI sense.
4903 */
4904 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4905
4906 if (qc->tf.protocol == ATA_PROT_NCQ) {
4907 WARN_ON(ap->sactive & (1 << qc->tag));
4908 ap->sactive |= 1 << qc->tag;
4909 } else {
4910 WARN_ON(ap->sactive);
4911 ap->active_tag = qc->tag;
4912 }
4913
e4a70e76 4914 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4915 ap->qc_active |= 1 << qc->tag;
e4a70e76 4916
1da177e4
LT
4917 if (ata_should_dma_map(qc)) {
4918 if (qc->flags & ATA_QCFLAG_SG) {
4919 if (ata_sg_setup(qc))
8e436af9 4920 goto sg_err;
1da177e4
LT
4921 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4922 if (ata_sg_setup_one(qc))
8e436af9 4923 goto sg_err;
1da177e4
LT
4924 }
4925 } else {
4926 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4927 }
4928
4929 ap->ops->qc_prep(qc);
4930
8e0e694a
TH
4931 qc->err_mask |= ap->ops->qc_issue(qc);
4932 if (unlikely(qc->err_mask))
4933 goto err;
4934 return;
1da177e4 4935
8e436af9
TH
4936sg_err:
4937 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4938 qc->err_mask |= AC_ERR_SYSTEM;
4939err:
4940 ata_qc_complete(qc);
1da177e4
LT
4941}
4942
4943/**
4944 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4945 * @qc: command to issue to device
4946 *
4947 * Using various libata functions and hooks, this function
4948 * starts an ATA command. ATA commands are grouped into
4949 * classes called "protocols", and issuing each type of protocol
4950 * is slightly different.
4951 *
0baab86b
EF
4952 * May be used as the qc_issue() entry in ata_port_operations.
4953 *
1da177e4 4954 * LOCKING:
cca3974e 4955 * spin_lock_irqsave(host lock)
1da177e4
LT
4956 *
4957 * RETURNS:
9a3d9eb0 4958 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4959 */
4960
9a3d9eb0 4961unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4962{
4963 struct ata_port *ap = qc->ap;
4964
e50362ec
AL
4965 /* Use polling pio if the LLD doesn't handle
4966 * interrupt driven pio and atapi CDB interrupt.
4967 */
4968 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4969 switch (qc->tf.protocol) {
4970 case ATA_PROT_PIO:
e3472cbe 4971 case ATA_PROT_NODATA:
e50362ec
AL
4972 case ATA_PROT_ATAPI:
4973 case ATA_PROT_ATAPI_NODATA:
4974 qc->tf.flags |= ATA_TFLAG_POLLING;
4975 break;
4976 case ATA_PROT_ATAPI_DMA:
4977 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4978 /* see ata_dma_blacklisted() */
e50362ec
AL
4979 BUG();
4980 break;
4981 default:
4982 break;
4983 }
4984 }
4985
3d3cca37
TH
4986 /* Some controllers show flaky interrupt behavior after
4987 * setting xfer mode. Use polling instead.
4988 */
4989 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4990 qc->tf.feature == SETFEATURES_XFER) &&
4991 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4992 qc->tf.flags |= ATA_TFLAG_POLLING;
4993
312f7da2 4994 /* select the device */
1da177e4
LT
4995 ata_dev_select(ap, qc->dev->devno, 1, 0);
4996
312f7da2 4997 /* start the command */
1da177e4
LT
4998 switch (qc->tf.protocol) {
4999 case ATA_PROT_NODATA:
312f7da2
AL
5000 if (qc->tf.flags & ATA_TFLAG_POLLING)
5001 ata_qc_set_polling(qc);
5002
e5338254 5003 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5004 ap->hsm_task_state = HSM_ST_LAST;
5005
5006 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5007 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5008
1da177e4
LT
5009 break;
5010
5011 case ATA_PROT_DMA:
587005de 5012 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5013
1da177e4
LT
5014 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5015 ap->ops->bmdma_setup(qc); /* set up bmdma */
5016 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5017 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5018 break;
5019
312f7da2
AL
5020 case ATA_PROT_PIO:
5021 if (qc->tf.flags & ATA_TFLAG_POLLING)
5022 ata_qc_set_polling(qc);
1da177e4 5023
e5338254 5024 ata_tf_to_host(ap, &qc->tf);
312f7da2 5025
54f00389
AL
5026 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5027 /* PIO data out protocol */
5028 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5029 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5030
5031 /* always send first data block using
e27486db 5032 * the ata_pio_task() codepath.
54f00389 5033 */
312f7da2 5034 } else {
54f00389
AL
5035 /* PIO data in protocol */
5036 ap->hsm_task_state = HSM_ST;
5037
5038 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5039 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5040
5041 /* if polling, ata_pio_task() handles the rest.
5042 * otherwise, interrupt handler takes over from here.
5043 */
312f7da2
AL
5044 }
5045
1da177e4
LT
5046 break;
5047
1da177e4 5048 case ATA_PROT_ATAPI:
1da177e4 5049 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5050 if (qc->tf.flags & ATA_TFLAG_POLLING)
5051 ata_qc_set_polling(qc);
5052
e5338254 5053 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5054
312f7da2
AL
5055 ap->hsm_task_state = HSM_ST_FIRST;
5056
5057 /* send cdb by polling if no cdb interrupt */
5058 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5059 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5060 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5061 break;
5062
5063 case ATA_PROT_ATAPI_DMA:
587005de 5064 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5065
1da177e4
LT
5066 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5067 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5068 ap->hsm_task_state = HSM_ST_FIRST;
5069
5070 /* send cdb by polling if no cdb interrupt */
5071 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5072 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5073 break;
5074
5075 default:
5076 WARN_ON(1);
9a3d9eb0 5077 return AC_ERR_SYSTEM;
1da177e4
LT
5078 }
5079
5080 return 0;
5081}
5082
1da177e4
LT
5083/**
5084 * ata_host_intr - Handle host interrupt for given (port, task)
5085 * @ap: Port on which interrupt arrived (possibly...)
5086 * @qc: Taskfile currently active in engine
5087 *
5088 * Handle host interrupt for given queued command. Currently,
5089 * only DMA interrupts are handled. All other commands are
5090 * handled via polling with interrupts disabled (nIEN bit).
5091 *
5092 * LOCKING:
cca3974e 5093 * spin_lock_irqsave(host lock)
1da177e4
LT
5094 *
5095 * RETURNS:
5096 * One if interrupt was handled, zero if not (shared irq).
5097 */
5098
5099inline unsigned int ata_host_intr (struct ata_port *ap,
5100 struct ata_queued_cmd *qc)
5101{
ea54763f 5102 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5103 u8 status, host_stat = 0;
1da177e4 5104
312f7da2 5105 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5106 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5107
312f7da2
AL
5108 /* Check whether we are expecting interrupt in this state */
5109 switch (ap->hsm_task_state) {
5110 case HSM_ST_FIRST:
6912ccd5
AL
5111 /* Some pre-ATAPI-4 devices assert INTRQ
5112 * at this state when ready to receive CDB.
5113 */
1da177e4 5114
312f7da2
AL
5115 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5116 * The flag was turned on only for atapi devices.
5117 * No need to check is_atapi_taskfile(&qc->tf) again.
5118 */
5119 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5120 goto idle_irq;
1da177e4 5121 break;
312f7da2
AL
5122 case HSM_ST_LAST:
5123 if (qc->tf.protocol == ATA_PROT_DMA ||
5124 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5125 /* check status of DMA engine */
5126 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5127 VPRINTK("ata%u: host_stat 0x%X\n",
5128 ap->print_id, host_stat);
312f7da2
AL
5129
5130 /* if it's not our irq... */
5131 if (!(host_stat & ATA_DMA_INTR))
5132 goto idle_irq;
5133
5134 /* before we do anything else, clear DMA-Start bit */
5135 ap->ops->bmdma_stop(qc);
a4f16610
AL
5136
5137 if (unlikely(host_stat & ATA_DMA_ERR)) {
5138 /* error when transfering data to/from memory */
5139 qc->err_mask |= AC_ERR_HOST_BUS;
5140 ap->hsm_task_state = HSM_ST_ERR;
5141 }
312f7da2
AL
5142 }
5143 break;
5144 case HSM_ST:
5145 break;
1da177e4
LT
5146 default:
5147 goto idle_irq;
5148 }
5149
312f7da2
AL
5150 /* check altstatus */
5151 status = ata_altstatus(ap);
5152 if (status & ATA_BUSY)
5153 goto idle_irq;
1da177e4 5154
312f7da2
AL
5155 /* check main status, clearing INTRQ */
5156 status = ata_chk_status(ap);
5157 if (unlikely(status & ATA_BUSY))
5158 goto idle_irq;
1da177e4 5159
312f7da2
AL
5160 /* ack bmdma irq events */
5161 ap->ops->irq_clear(ap);
1da177e4 5162
bb5cb290 5163 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5164
5165 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5166 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5167 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5168
1da177e4
LT
5169 return 1; /* irq handled */
5170
5171idle_irq:
5172 ap->stats.idle_irq++;
5173
5174#ifdef ATA_IRQ_TRAP
5175 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5176 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5177 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5178 return 1;
1da177e4
LT
5179 }
5180#endif
5181 return 0; /* irq not handled */
5182}
5183
5184/**
5185 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5186 * @irq: irq line (unused)
cca3974e 5187 * @dev_instance: pointer to our ata_host information structure
1da177e4 5188 *
0cba632b
JG
5189 * Default interrupt handler for PCI IDE devices. Calls
5190 * ata_host_intr() for each port that is not disabled.
5191 *
1da177e4 5192 * LOCKING:
cca3974e 5193 * Obtains host lock during operation.
1da177e4
LT
5194 *
5195 * RETURNS:
0cba632b 5196 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5197 */
5198
7d12e780 5199irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5200{
cca3974e 5201 struct ata_host *host = dev_instance;
1da177e4
LT
5202 unsigned int i;
5203 unsigned int handled = 0;
5204 unsigned long flags;
5205
5206 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5207 spin_lock_irqsave(&host->lock, flags);
1da177e4 5208
cca3974e 5209 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5210 struct ata_port *ap;
5211
cca3974e 5212 ap = host->ports[i];
c1389503 5213 if (ap &&
029f5468 5214 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5215 struct ata_queued_cmd *qc;
5216
5217 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5218 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5219 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5220 handled |= ata_host_intr(ap, qc);
5221 }
5222 }
5223
cca3974e 5224 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5225
5226 return IRQ_RETVAL(handled);
5227}
5228
34bf2170
TH
5229/**
5230 * sata_scr_valid - test whether SCRs are accessible
5231 * @ap: ATA port to test SCR accessibility for
5232 *
5233 * Test whether SCRs are accessible for @ap.
5234 *
5235 * LOCKING:
5236 * None.
5237 *
5238 * RETURNS:
5239 * 1 if SCRs are accessible, 0 otherwise.
5240 */
5241int sata_scr_valid(struct ata_port *ap)
5242{
5243 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5244}
5245
5246/**
5247 * sata_scr_read - read SCR register of the specified port
5248 * @ap: ATA port to read SCR for
5249 * @reg: SCR to read
5250 * @val: Place to store read value
5251 *
5252 * Read SCR register @reg of @ap into *@val. This function is
5253 * guaranteed to succeed if the cable type of the port is SATA
5254 * and the port implements ->scr_read.
5255 *
5256 * LOCKING:
5257 * None.
5258 *
5259 * RETURNS:
5260 * 0 on success, negative errno on failure.
5261 */
5262int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5263{
5264 if (sata_scr_valid(ap)) {
5265 *val = ap->ops->scr_read(ap, reg);
5266 return 0;
5267 }
5268 return -EOPNOTSUPP;
5269}
5270
5271/**
5272 * sata_scr_write - write SCR register of the specified port
5273 * @ap: ATA port to write SCR for
5274 * @reg: SCR to write
5275 * @val: value to write
5276 *
5277 * Write @val to SCR register @reg of @ap. This function is
5278 * guaranteed to succeed if the cable type of the port is SATA
5279 * and the port implements ->scr_read.
5280 *
5281 * LOCKING:
5282 * None.
5283 *
5284 * RETURNS:
5285 * 0 on success, negative errno on failure.
5286 */
5287int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5288{
5289 if (sata_scr_valid(ap)) {
5290 ap->ops->scr_write(ap, reg, val);
5291 return 0;
5292 }
5293 return -EOPNOTSUPP;
5294}
5295
5296/**
5297 * sata_scr_write_flush - write SCR register of the specified port and flush
5298 * @ap: ATA port to write SCR for
5299 * @reg: SCR to write
5300 * @val: value to write
5301 *
5302 * This function is identical to sata_scr_write() except that this
5303 * function performs flush after writing to the register.
5304 *
5305 * LOCKING:
5306 * None.
5307 *
5308 * RETURNS:
5309 * 0 on success, negative errno on failure.
5310 */
5311int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5312{
5313 if (sata_scr_valid(ap)) {
5314 ap->ops->scr_write(ap, reg, val);
5315 ap->ops->scr_read(ap, reg);
5316 return 0;
5317 }
5318 return -EOPNOTSUPP;
5319}
5320
5321/**
5322 * ata_port_online - test whether the given port is online
5323 * @ap: ATA port to test
5324 *
5325 * Test whether @ap is online. Note that this function returns 0
5326 * if online status of @ap cannot be obtained, so
5327 * ata_port_online(ap) != !ata_port_offline(ap).
5328 *
5329 * LOCKING:
5330 * None.
5331 *
5332 * RETURNS:
5333 * 1 if the port online status is available and online.
5334 */
5335int ata_port_online(struct ata_port *ap)
5336{
5337 u32 sstatus;
5338
5339 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5340 return 1;
5341 return 0;
5342}
5343
5344/**
5345 * ata_port_offline - test whether the given port is offline
5346 * @ap: ATA port to test
5347 *
5348 * Test whether @ap is offline. Note that this function returns
5349 * 0 if offline status of @ap cannot be obtained, so
5350 * ata_port_online(ap) != !ata_port_offline(ap).
5351 *
5352 * LOCKING:
5353 * None.
5354 *
5355 * RETURNS:
5356 * 1 if the port offline status is available and offline.
5357 */
5358int ata_port_offline(struct ata_port *ap)
5359{
5360 u32 sstatus;
5361
5362 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5363 return 1;
5364 return 0;
5365}
0baab86b 5366
77b08fb5 5367int ata_flush_cache(struct ata_device *dev)
9b847548 5368{
977e6b9f 5369 unsigned int err_mask;
9b847548
JA
5370 u8 cmd;
5371
5372 if (!ata_try_flush_cache(dev))
5373 return 0;
5374
6fc49adb 5375 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5376 cmd = ATA_CMD_FLUSH_EXT;
5377 else
5378 cmd = ATA_CMD_FLUSH;
5379
977e6b9f
TH
5380 err_mask = ata_do_simple_cmd(dev, cmd);
5381 if (err_mask) {
5382 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5383 return -EIO;
5384 }
5385
5386 return 0;
9b847548
JA
5387}
5388
6ffa01d8 5389#ifdef CONFIG_PM
cca3974e
JG
5390static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5391 unsigned int action, unsigned int ehi_flags,
5392 int wait)
500530f6
TH
5393{
5394 unsigned long flags;
5395 int i, rc;
5396
cca3974e
JG
5397 for (i = 0; i < host->n_ports; i++) {
5398 struct ata_port *ap = host->ports[i];
500530f6
TH
5399
5400 /* Previous resume operation might still be in
5401 * progress. Wait for PM_PENDING to clear.
5402 */
5403 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5404 ata_port_wait_eh(ap);
5405 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5406 }
5407
5408 /* request PM ops to EH */
5409 spin_lock_irqsave(ap->lock, flags);
5410
5411 ap->pm_mesg = mesg;
5412 if (wait) {
5413 rc = 0;
5414 ap->pm_result = &rc;
5415 }
5416
5417 ap->pflags |= ATA_PFLAG_PM_PENDING;
5418 ap->eh_info.action |= action;
5419 ap->eh_info.flags |= ehi_flags;
5420
5421 ata_port_schedule_eh(ap);
5422
5423 spin_unlock_irqrestore(ap->lock, flags);
5424
5425 /* wait and check result */
5426 if (wait) {
5427 ata_port_wait_eh(ap);
5428 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5429 if (rc)
5430 return rc;
5431 }
5432 }
5433
5434 return 0;
5435}
5436
5437/**
cca3974e
JG
5438 * ata_host_suspend - suspend host
5439 * @host: host to suspend
500530f6
TH
5440 * @mesg: PM message
5441 *
cca3974e 5442 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5443 * function requests EH to perform PM operations and waits for EH
5444 * to finish.
5445 *
5446 * LOCKING:
5447 * Kernel thread context (may sleep).
5448 *
5449 * RETURNS:
5450 * 0 on success, -errno on failure.
5451 */
cca3974e 5452int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5453{
5454 int i, j, rc;
5455
cca3974e 5456 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5457 if (rc)
5458 goto fail;
5459
5460 /* EH is quiescent now. Fail if we have any ready device.
5461 * This happens if hotplug occurs between completion of device
5462 * suspension and here.
5463 */
cca3974e
JG
5464 for (i = 0; i < host->n_ports; i++) {
5465 struct ata_port *ap = host->ports[i];
500530f6
TH
5466
5467 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5468 struct ata_device *dev = &ap->device[j];
5469
5470 if (ata_dev_ready(dev)) {
5471 ata_port_printk(ap, KERN_WARNING,
5472 "suspend failed, device %d "
5473 "still active\n", dev->devno);
5474 rc = -EBUSY;
5475 goto fail;
5476 }
5477 }
5478 }
5479
cca3974e 5480 host->dev->power.power_state = mesg;
500530f6
TH
5481 return 0;
5482
5483 fail:
cca3974e 5484 ata_host_resume(host);
500530f6
TH
5485 return rc;
5486}
5487
5488/**
cca3974e
JG
5489 * ata_host_resume - resume host
5490 * @host: host to resume
500530f6 5491 *
cca3974e 5492 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5493 * function requests EH to perform PM operations and returns.
5494 * Note that all resume operations are performed parallely.
5495 *
5496 * LOCKING:
5497 * Kernel thread context (may sleep).
5498 */
cca3974e 5499void ata_host_resume(struct ata_host *host)
500530f6 5500{
cca3974e
JG
5501 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5502 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5503 host->dev->power.power_state = PMSG_ON;
500530f6 5504}
6ffa01d8 5505#endif
500530f6 5506
c893a3ae
RD
5507/**
5508 * ata_port_start - Set port up for dma.
5509 * @ap: Port to initialize
5510 *
5511 * Called just after data structures for each port are
5512 * initialized. Allocates space for PRD table.
5513 *
5514 * May be used as the port_start() entry in ata_port_operations.
5515 *
5516 * LOCKING:
5517 * Inherited from caller.
5518 */
f0d36efd 5519int ata_port_start(struct ata_port *ap)
1da177e4 5520{
2f1f610b 5521 struct device *dev = ap->dev;
6037d6bb 5522 int rc;
1da177e4 5523
f0d36efd
TH
5524 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5525 GFP_KERNEL);
1da177e4
LT
5526 if (!ap->prd)
5527 return -ENOMEM;
5528
6037d6bb 5529 rc = ata_pad_alloc(ap, dev);
f0d36efd 5530 if (rc)
6037d6bb 5531 return rc;
1da177e4 5532
f0d36efd
TH
5533 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5534 (unsigned long long)ap->prd_dma);
1da177e4
LT
5535 return 0;
5536}
5537
3ef3b43d
TH
5538/**
5539 * ata_dev_init - Initialize an ata_device structure
5540 * @dev: Device structure to initialize
5541 *
5542 * Initialize @dev in preparation for probing.
5543 *
5544 * LOCKING:
5545 * Inherited from caller.
5546 */
5547void ata_dev_init(struct ata_device *dev)
5548{
5549 struct ata_port *ap = dev->ap;
72fa4b74
TH
5550 unsigned long flags;
5551
5a04bf4b
TH
5552 /* SATA spd limit is bound to the first device */
5553 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5554
72fa4b74
TH
5555 /* High bits of dev->flags are used to record warm plug
5556 * requests which occur asynchronously. Synchronize using
cca3974e 5557 * host lock.
72fa4b74 5558 */
ba6a1308 5559 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5560 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5561 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5562
72fa4b74
TH
5563 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5564 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5565 dev->pio_mask = UINT_MAX;
5566 dev->mwdma_mask = UINT_MAX;
5567 dev->udma_mask = UINT_MAX;
5568}
5569
1da177e4 5570/**
155a8a9c 5571 * ata_port_init - Initialize an ata_port structure
1da177e4 5572 * @ap: Structure to initialize
cca3974e 5573 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5574 * @ent: Probe information provided by low-level driver
5575 * @port_no: Port number associated with this ata_port
5576 *
155a8a9c 5577 * Initialize a new ata_port structure.
0cba632b 5578 *
1da177e4 5579 * LOCKING:
0cba632b 5580 * Inherited from caller.
1da177e4 5581 */
cca3974e 5582void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5583 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5584{
5585 unsigned int i;
5586
cca3974e 5587 ap->lock = &host->lock;
198e0fed 5588 ap->flags = ATA_FLAG_DISABLED;
44877b4e 5589 ap->print_id = ata_print_id++;
1da177e4 5590 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5591 ap->host = host;
2f1f610b 5592 ap->dev = ent->dev;
1da177e4 5593 ap->port_no = port_no;
fea63e38
TH
5594 if (port_no == 1 && ent->pinfo2) {
5595 ap->pio_mask = ent->pinfo2->pio_mask;
5596 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5597 ap->udma_mask = ent->pinfo2->udma_mask;
5598 ap->flags |= ent->pinfo2->flags;
5599 ap->ops = ent->pinfo2->port_ops;
5600 } else {
5601 ap->pio_mask = ent->pio_mask;
5602 ap->mwdma_mask = ent->mwdma_mask;
5603 ap->udma_mask = ent->udma_mask;
5604 ap->flags |= ent->port_flags;
5605 ap->ops = ent->port_ops;
5606 }
5a04bf4b 5607 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5608 ap->active_tag = ATA_TAG_POISON;
5609 ap->last_ctl = 0xFF;
bd5d825c
BP
5610
5611#if defined(ATA_VERBOSE_DEBUG)
5612 /* turn on all debugging levels */
5613 ap->msg_enable = 0x00FF;
5614#elif defined(ATA_DEBUG)
5615 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5616#else
0dd4b21f 5617 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5618#endif
1da177e4 5619
65f27f38
DH
5620 INIT_DELAYED_WORK(&ap->port_task, NULL);
5621 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5622 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5623 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5624 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5625
838df628
TH
5626 /* set cable type */
5627 ap->cbl = ATA_CBL_NONE;
5628 if (ap->flags & ATA_FLAG_SATA)
5629 ap->cbl = ATA_CBL_SATA;
5630
acf356b1
TH
5631 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5632 struct ata_device *dev = &ap->device[i];
38d87234 5633 dev->ap = ap;
72fa4b74 5634 dev->devno = i;
3ef3b43d 5635 ata_dev_init(dev);
acf356b1 5636 }
1da177e4
LT
5637
5638#ifdef ATA_IRQ_TRAP
5639 ap->stats.unhandled_irq = 1;
5640 ap->stats.idle_irq = 1;
5641#endif
5642
5643 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5644}
5645
155a8a9c 5646/**
4608c160
TH
5647 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5648 * @ap: ATA port to initialize SCSI host for
5649 * @shost: SCSI host associated with @ap
155a8a9c 5650 *
4608c160 5651 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5652 *
5653 * LOCKING:
5654 * Inherited from caller.
5655 */
4608c160 5656static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5657{
cca3974e 5658 ap->scsi_host = shost;
155a8a9c 5659
44877b4e 5660 shost->unique_id = ap->print_id;
4608c160
TH
5661 shost->max_id = 16;
5662 shost->max_lun = 1;
5663 shost->max_channel = 1;
f0ef88ed 5664 shost->max_cmd_len = 16;
155a8a9c
BK
5665}
5666
1da177e4 5667/**
996139f1 5668 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5669 * @ent: Information provided by low-level driver
cca3974e 5670 * @host: Collections of ports to which we add
1da177e4
LT
5671 * @port_no: Port number associated with this host
5672 *
0cba632b
JG
5673 * Attach low-level ATA driver to system.
5674 *
1da177e4 5675 * LOCKING:
0cba632b 5676 * PCI/etc. bus probe sem.
1da177e4
LT
5677 *
5678 * RETURNS:
0cba632b 5679 * New ata_port on success, for NULL on error.
1da177e4 5680 */
996139f1 5681static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5682 struct ata_host *host,
1da177e4
LT
5683 unsigned int port_no)
5684{
996139f1 5685 struct Scsi_Host *shost;
1da177e4 5686 struct ata_port *ap;
1da177e4
LT
5687
5688 DPRINTK("ENTER\n");
aec5c3c1 5689
52783c5d 5690 if (!ent->port_ops->error_handler &&
cca3974e 5691 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5692 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5693 port_no);
5694 return NULL;
5695 }
5696
996139f1
JG
5697 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5698 if (!shost)
1da177e4
LT
5699 return NULL;
5700
996139f1 5701 shost->transportt = &ata_scsi_transport_template;
30afc84c 5702
996139f1 5703 ap = ata_shost_to_port(shost);
1da177e4 5704
cca3974e 5705 ata_port_init(ap, host, ent, port_no);
996139f1 5706 ata_port_init_shost(ap, shost);
1da177e4 5707
1da177e4 5708 return ap;
1da177e4
LT
5709}
5710
f0d36efd
TH
5711static void ata_host_release(struct device *gendev, void *res)
5712{
5713 struct ata_host *host = dev_get_drvdata(gendev);
5714 int i;
5715
5716 for (i = 0; i < host->n_ports; i++) {
5717 struct ata_port *ap = host->ports[i];
5718
1aa506e4 5719 if (ap && ap->ops->port_stop)
f0d36efd 5720 ap->ops->port_stop(ap);
f0d36efd
TH
5721 }
5722
5723 if (host->ops->host_stop)
5724 host->ops->host_stop(host);
1aa56cca 5725
1aa506e4
TH
5726 for (i = 0; i < host->n_ports; i++) {
5727 struct ata_port *ap = host->ports[i];
5728
5729 if (ap)
5730 scsi_host_put(ap->scsi_host);
5731
5732 host->ports[i] = NULL;
5733 }
5734
1aa56cca 5735 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5736}
5737
b03732f0 5738/**
cca3974e
JG
5739 * ata_sas_host_init - Initialize a host struct
5740 * @host: host to initialize
5741 * @dev: device host is attached to
5742 * @flags: host flags
5743 * @ops: port_ops
b03732f0
BK
5744 *
5745 * LOCKING:
5746 * PCI/etc. bus probe sem.
5747 *
5748 */
5749
cca3974e
JG
5750void ata_host_init(struct ata_host *host, struct device *dev,
5751 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5752{
cca3974e
JG
5753 spin_lock_init(&host->lock);
5754 host->dev = dev;
5755 host->flags = flags;
5756 host->ops = ops;
b03732f0
BK
5757}
5758
1da177e4 5759/**
0cba632b
JG
5760 * ata_device_add - Register hardware device with ATA and SCSI layers
5761 * @ent: Probe information describing hardware device to be registered
5762 *
5763 * This function processes the information provided in the probe
5764 * information struct @ent, allocates the necessary ATA and SCSI
5765 * host information structures, initializes them, and registers
5766 * everything with requisite kernel subsystems.
5767 *
5768 * This function requests irqs, probes the ATA bus, and probes
5769 * the SCSI bus.
1da177e4
LT
5770 *
5771 * LOCKING:
0cba632b 5772 * PCI/etc. bus probe sem.
1da177e4
LT
5773 *
5774 * RETURNS:
0cba632b 5775 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5776 */
057ace5e 5777int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5778{
6d0500df 5779 unsigned int i;
1da177e4 5780 struct device *dev = ent->dev;
cca3974e 5781 struct ata_host *host;
39b07ce6 5782 int rc;
1da177e4
LT
5783
5784 DPRINTK("ENTER\n");
f20b16ff 5785
02f076aa
AC
5786 if (ent->irq == 0) {
5787 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5788 return 0;
5789 }
f0d36efd
TH
5790
5791 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5792 return 0;
5793
1da177e4 5794 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5795 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5796 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5797 if (!host)
f0d36efd
TH
5798 goto err_out;
5799 devres_add(dev, host);
5800 dev_set_drvdata(dev, host);
1da177e4 5801
cca3974e
JG
5802 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5803 host->n_ports = ent->n_ports;
5804 host->irq = ent->irq;
5805 host->irq2 = ent->irq2;
0d5ff566 5806 host->iomap = ent->iomap;
cca3974e 5807 host->private_data = ent->private_data;
1da177e4
LT
5808
5809 /* register each port bound to this device */
cca3974e 5810 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5811 struct ata_port *ap;
5812 unsigned long xfer_mode_mask;
2ec7df04 5813 int irq_line = ent->irq;
1da177e4 5814
cca3974e 5815 ap = ata_port_add(ent, host, i);
c38778c3 5816 host->ports[i] = ap;
1da177e4
LT
5817 if (!ap)
5818 goto err_out;
5819
dd5b06c4
TH
5820 /* dummy? */
5821 if (ent->dummy_port_mask & (1 << i)) {
5822 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5823 ap->ops = &ata_dummy_port_ops;
5824 continue;
5825 }
5826
5827 /* start port */
5828 rc = ap->ops->port_start(ap);
5829 if (rc) {
cca3974e
JG
5830 host->ports[i] = NULL;
5831 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5832 goto err_out;
5833 }
5834
2ec7df04
AC
5835 /* Report the secondary IRQ for second channel legacy */
5836 if (i == 1 && ent->irq2)
5837 irq_line = ent->irq2;
5838
1da177e4
LT
5839 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5840 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5841 (ap->pio_mask << ATA_SHIFT_PIO);
5842
5843 /* print per-port info to dmesg */
0d5ff566
TH
5844 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5845 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5846 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5847 ata_mode_string(xfer_mode_mask),
5848 ap->ioaddr.cmd_addr,
5849 ap->ioaddr.ctl_addr,
5850 ap->ioaddr.bmdma_addr,
2ec7df04 5851 irq_line);
1da177e4 5852
0f0a3ad3
TH
5853 /* freeze port before requesting IRQ */
5854 ata_eh_freeze_port(ap);
1da177e4
LT
5855 }
5856
2ec7df04 5857 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5858 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5859 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5860 if (rc) {
5861 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5862 ent->irq, rc);
1da177e4 5863 goto err_out;
39b07ce6 5864 }
1da177e4 5865
2ec7df04
AC
5866 /* do we have a second IRQ for the other channel, eg legacy mode */
5867 if (ent->irq2) {
5868 /* We will get weird core code crashes later if this is true
5869 so trap it now */
5870 BUG_ON(ent->irq == ent->irq2);
5871
f0d36efd
TH
5872 rc = devm_request_irq(dev, ent->irq2,
5873 ent->port_ops->irq_handler, ent->irq_flags,
5874 DRV_NAME, host);
2ec7df04
AC
5875 if (rc) {
5876 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5877 ent->irq2, rc);
f0d36efd 5878 goto err_out;
2ec7df04
AC
5879 }
5880 }
5881
f0d36efd 5882 /* resource acquisition complete */
b878ca5d 5883 devres_remove_group(dev, ata_device_add);
f0d36efd 5884
1da177e4
LT
5885 /* perform each probe synchronously */
5886 DPRINTK("probe begin\n");
cca3974e
JG
5887 for (i = 0; i < host->n_ports; i++) {
5888 struct ata_port *ap = host->ports[i];
5a04bf4b 5889 u32 scontrol;
1da177e4
LT
5890 int rc;
5891
5a04bf4b
TH
5892 /* init sata_spd_limit to the current value */
5893 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5894 int spd = (scontrol >> 4) & 0xf;
5895 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5896 }
5897 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5898
cca3974e 5899 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5900 if (rc) {
f15a1daf 5901 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5902 /* FIXME: do something useful here */
5903 /* FIXME: handle unconditional calls to
5904 * scsi_scan_host and ata_host_remove, below,
5905 * at the very least
5906 */
5907 }
3e706399 5908
52783c5d 5909 if (ap->ops->error_handler) {
1cdaf534 5910 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5911 unsigned long flags;
5912
5913 ata_port_probe(ap);
5914
5915 /* kick EH for boot probing */
ba6a1308 5916 spin_lock_irqsave(ap->lock, flags);
3e706399 5917
1cdaf534
TH
5918 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5919 ehi->action |= ATA_EH_SOFTRESET;
5920 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5921
b51e9e5d 5922 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5923 ata_port_schedule_eh(ap);
5924
ba6a1308 5925 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5926
5927 /* wait for EH to finish */
5928 ata_port_wait_eh(ap);
5929 } else {
44877b4e 5930 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
3e706399 5931 rc = ata_bus_probe(ap);
44877b4e 5932 DPRINTK("ata%u: bus probe end\n", ap->print_id);
3e706399
TH
5933
5934 if (rc) {
5935 /* FIXME: do something useful here?
5936 * Current libata behavior will
5937 * tear down everything when
5938 * the module is removed
5939 * or the h/w is unplugged.
5940 */
5941 }
5942 }
1da177e4
LT
5943 }
5944
5945 /* probes are done, now scan each port's disk(s) */
c893a3ae 5946 DPRINTK("host probe begin\n");
cca3974e
JG
5947 for (i = 0; i < host->n_ports; i++) {
5948 struct ata_port *ap = host->ports[i];
1da177e4 5949
644dd0cc 5950 ata_scsi_scan_host(ap);
1da177e4
LT
5951 }
5952
1da177e4
LT
5953 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5954 return ent->n_ports; /* success */
5955
f0d36efd
TH
5956 err_out:
5957 devres_release_group(dev, ata_device_add);
f0d36efd 5958 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
5959 return 0;
5960}
5961
720ba126
TH
5962/**
5963 * ata_port_detach - Detach ATA port in prepration of device removal
5964 * @ap: ATA port to be detached
5965 *
5966 * Detach all ATA devices and the associated SCSI devices of @ap;
5967 * then, remove the associated SCSI host. @ap is guaranteed to
5968 * be quiescent on return from this function.
5969 *
5970 * LOCKING:
5971 * Kernel thread context (may sleep).
5972 */
5973void ata_port_detach(struct ata_port *ap)
5974{
5975 unsigned long flags;
5976 int i;
5977
5978 if (!ap->ops->error_handler)
c3cf30a9 5979 goto skip_eh;
720ba126
TH
5980
5981 /* tell EH we're leaving & flush EH */
ba6a1308 5982 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5983 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5984 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5985
5986 ata_port_wait_eh(ap);
5987
5988 /* EH is now guaranteed to see UNLOADING, so no new device
5989 * will be attached. Disable all existing devices.
5990 */
ba6a1308 5991 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5992
5993 for (i = 0; i < ATA_MAX_DEVICES; i++)
5994 ata_dev_disable(&ap->device[i]);
5995
ba6a1308 5996 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5997
5998 /* Final freeze & EH. All in-flight commands are aborted. EH
5999 * will be skipped and retrials will be terminated with bad
6000 * target.
6001 */
ba6a1308 6002 spin_lock_irqsave(ap->lock, flags);
720ba126 6003 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6004 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6005
6006 ata_port_wait_eh(ap);
6007
6008 /* Flush hotplug task. The sequence is similar to
6009 * ata_port_flush_task().
6010 */
6011 flush_workqueue(ata_aux_wq);
6012 cancel_delayed_work(&ap->hotplug_task);
6013 flush_workqueue(ata_aux_wq);
6014
c3cf30a9 6015 skip_eh:
720ba126 6016 /* remove the associated SCSI host */
cca3974e 6017 scsi_remove_host(ap->scsi_host);
720ba126
TH
6018}
6019
0529c159
TH
6020/**
6021 * ata_host_detach - Detach all ports of an ATA host
6022 * @host: Host to detach
6023 *
6024 * Detach all ports of @host.
6025 *
6026 * LOCKING:
6027 * Kernel thread context (may sleep).
6028 */
6029void ata_host_detach(struct ata_host *host)
6030{
6031 int i;
6032
6033 for (i = 0; i < host->n_ports; i++)
6034 ata_port_detach(host->ports[i]);
6035}
6036
f6d950e2
BK
6037struct ata_probe_ent *
6038ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
6039{
6040 struct ata_probe_ent *probe_ent;
6041
4d05447e 6042 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
6043 if (!probe_ent) {
6044 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
6045 kobject_name(&(dev->kobj)));
6046 return NULL;
6047 }
6048
6049 INIT_LIST_HEAD(&probe_ent->node);
6050 probe_ent->dev = dev;
6051
6052 probe_ent->sht = port->sht;
cca3974e 6053 probe_ent->port_flags = port->flags;
f6d950e2
BK
6054 probe_ent->pio_mask = port->pio_mask;
6055 probe_ent->mwdma_mask = port->mwdma_mask;
6056 probe_ent->udma_mask = port->udma_mask;
6057 probe_ent->port_ops = port->port_ops;
d639ca94 6058 probe_ent->private_data = port->private_data;
f6d950e2
BK
6059
6060 return probe_ent;
6061}
6062
1da177e4
LT
6063/**
6064 * ata_std_ports - initialize ioaddr with standard port offsets.
6065 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6066 *
6067 * Utility function which initializes data_addr, error_addr,
6068 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6069 * device_addr, status_addr, and command_addr to standard offsets
6070 * relative to cmd_addr.
6071 *
6072 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6073 */
0baab86b 6074
1da177e4
LT
6075void ata_std_ports(struct ata_ioports *ioaddr)
6076{
6077 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6078 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6079 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6080 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6081 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6082 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6083 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6084 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6085 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6086 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6087}
6088
0baab86b 6089
374b1873
JG
6090#ifdef CONFIG_PCI
6091
1da177e4
LT
6092/**
6093 * ata_pci_remove_one - PCI layer callback for device removal
6094 * @pdev: PCI device that was removed
6095 *
b878ca5d
TH
6096 * PCI layer indicates to libata via this hook that hot-unplug or
6097 * module unload event has occurred. Detach all ports. Resource
6098 * release is handled via devres.
1da177e4
LT
6099 *
6100 * LOCKING:
6101 * Inherited from PCI layer (may sleep).
6102 */
f0d36efd 6103void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6104{
6105 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6106 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6107
b878ca5d 6108 ata_host_detach(host);
1da177e4
LT
6109}
6110
6111/* move to PCI subsystem */
057ace5e 6112int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6113{
6114 unsigned long tmp = 0;
6115
6116 switch (bits->width) {
6117 case 1: {
6118 u8 tmp8 = 0;
6119 pci_read_config_byte(pdev, bits->reg, &tmp8);
6120 tmp = tmp8;
6121 break;
6122 }
6123 case 2: {
6124 u16 tmp16 = 0;
6125 pci_read_config_word(pdev, bits->reg, &tmp16);
6126 tmp = tmp16;
6127 break;
6128 }
6129 case 4: {
6130 u32 tmp32 = 0;
6131 pci_read_config_dword(pdev, bits->reg, &tmp32);
6132 tmp = tmp32;
6133 break;
6134 }
6135
6136 default:
6137 return -EINVAL;
6138 }
6139
6140 tmp &= bits->mask;
6141
6142 return (tmp == bits->val) ? 1 : 0;
6143}
9b847548 6144
6ffa01d8 6145#ifdef CONFIG_PM
3c5100c1 6146void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6147{
6148 pci_save_state(pdev);
4c90d971 6149 pci_disable_device(pdev);
500530f6 6150
4c90d971 6151 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6152 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6153}
6154
553c4aa6 6155int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6156{
553c4aa6
TH
6157 int rc;
6158
9b847548
JA
6159 pci_set_power_state(pdev, PCI_D0);
6160 pci_restore_state(pdev);
553c4aa6 6161
b878ca5d 6162 rc = pcim_enable_device(pdev);
553c4aa6
TH
6163 if (rc) {
6164 dev_printk(KERN_ERR, &pdev->dev,
6165 "failed to enable device after resume (%d)\n", rc);
6166 return rc;
6167 }
6168
9b847548 6169 pci_set_master(pdev);
553c4aa6 6170 return 0;
500530f6
TH
6171}
6172
3c5100c1 6173int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6174{
cca3974e 6175 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6176 int rc = 0;
6177
cca3974e 6178 rc = ata_host_suspend(host, mesg);
500530f6
TH
6179 if (rc)
6180 return rc;
6181
3c5100c1 6182 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6183
6184 return 0;
6185}
6186
6187int ata_pci_device_resume(struct pci_dev *pdev)
6188{
cca3974e 6189 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6190 int rc;
500530f6 6191
553c4aa6
TH
6192 rc = ata_pci_device_do_resume(pdev);
6193 if (rc == 0)
6194 ata_host_resume(host);
6195 return rc;
9b847548 6196}
6ffa01d8
TH
6197#endif /* CONFIG_PM */
6198
1da177e4
LT
6199#endif /* CONFIG_PCI */
6200
6201
1da177e4
LT
6202static int __init ata_init(void)
6203{
a8601e5f 6204 ata_probe_timeout *= HZ;
1da177e4
LT
6205 ata_wq = create_workqueue("ata");
6206 if (!ata_wq)
6207 return -ENOMEM;
6208
453b07ac
TH
6209 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6210 if (!ata_aux_wq) {
6211 destroy_workqueue(ata_wq);
6212 return -ENOMEM;
6213 }
6214
1da177e4
LT
6215 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6216 return 0;
6217}
6218
6219static void __exit ata_exit(void)
6220{
6221 destroy_workqueue(ata_wq);
453b07ac 6222 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6223}
6224
a4625085 6225subsys_initcall(ata_init);
1da177e4
LT
6226module_exit(ata_exit);
6227
67846b30 6228static unsigned long ratelimit_time;
34af946a 6229static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6230
6231int ata_ratelimit(void)
6232{
6233 int rc;
6234 unsigned long flags;
6235
6236 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6237
6238 if (time_after(jiffies, ratelimit_time)) {
6239 rc = 1;
6240 ratelimit_time = jiffies + (HZ/5);
6241 } else
6242 rc = 0;
6243
6244 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6245
6246 return rc;
6247}
6248
c22daff4
TH
6249/**
6250 * ata_wait_register - wait until register value changes
6251 * @reg: IO-mapped register
6252 * @mask: Mask to apply to read register value
6253 * @val: Wait condition
6254 * @interval_msec: polling interval in milliseconds
6255 * @timeout_msec: timeout in milliseconds
6256 *
6257 * Waiting for some bits of register to change is a common
6258 * operation for ATA controllers. This function reads 32bit LE
6259 * IO-mapped register @reg and tests for the following condition.
6260 *
6261 * (*@reg & mask) != val
6262 *
6263 * If the condition is met, it returns; otherwise, the process is
6264 * repeated after @interval_msec until timeout.
6265 *
6266 * LOCKING:
6267 * Kernel thread context (may sleep)
6268 *
6269 * RETURNS:
6270 * The final register value.
6271 */
6272u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6273 unsigned long interval_msec,
6274 unsigned long timeout_msec)
6275{
6276 unsigned long timeout;
6277 u32 tmp;
6278
6279 tmp = ioread32(reg);
6280
6281 /* Calculate timeout _after_ the first read to make sure
6282 * preceding writes reach the controller before starting to
6283 * eat away the timeout.
6284 */
6285 timeout = jiffies + (timeout_msec * HZ) / 1000;
6286
6287 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6288 msleep(interval_msec);
6289 tmp = ioread32(reg);
6290 }
6291
6292 return tmp;
6293}
6294
dd5b06c4
TH
6295/*
6296 * Dummy port_ops
6297 */
6298static void ata_dummy_noret(struct ata_port *ap) { }
6299static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6300static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6301
6302static u8 ata_dummy_check_status(struct ata_port *ap)
6303{
6304 return ATA_DRDY;
6305}
6306
6307static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6308{
6309 return AC_ERR_SYSTEM;
6310}
6311
6312const struct ata_port_operations ata_dummy_port_ops = {
6313 .port_disable = ata_port_disable,
6314 .check_status = ata_dummy_check_status,
6315 .check_altstatus = ata_dummy_check_status,
6316 .dev_select = ata_noop_dev_select,
6317 .qc_prep = ata_noop_qc_prep,
6318 .qc_issue = ata_dummy_qc_issue,
6319 .freeze = ata_dummy_noret,
6320 .thaw = ata_dummy_noret,
6321 .error_handler = ata_dummy_noret,
6322 .post_internal_cmd = ata_dummy_qc_noret,
6323 .irq_clear = ata_dummy_noret,
6324 .port_start = ata_dummy_ret0,
6325 .port_stop = ata_dummy_noret,
6326};
6327
1da177e4
LT
6328/*
6329 * libata is essentially a library of internal helper functions for
6330 * low-level ATA host controller drivers. As such, the API/ABI is
6331 * likely to change as new drivers are added and updated.
6332 * Do not depend on ABI/API stability.
6333 */
6334
e9c83914
TH
6335EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6336EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6337EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6338EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6339EXPORT_SYMBOL_GPL(ata_std_bios_param);
6340EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6341EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6342EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6343EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6344EXPORT_SYMBOL_GPL(ata_sg_init);
6345EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6346EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6347EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6348EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6349EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6350EXPORT_SYMBOL_GPL(ata_tf_load);
6351EXPORT_SYMBOL_GPL(ata_tf_read);
6352EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6353EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 6354EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
6355EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6356EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357EXPORT_SYMBOL_GPL(ata_check_status);
6358EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6359EXPORT_SYMBOL_GPL(ata_exec_command);
6360EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6361EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6362EXPORT_SYMBOL_GPL(ata_data_xfer);
6363EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6364EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6365EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6366EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6367EXPORT_SYMBOL_GPL(ata_bmdma_start);
6368EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6369EXPORT_SYMBOL_GPL(ata_bmdma_status);
6370EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6371EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6372EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6373EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6374EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6375EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6376EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6377EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6378EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6379EXPORT_SYMBOL_GPL(sata_phy_debounce);
6380EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6381EXPORT_SYMBOL_GPL(sata_phy_reset);
6382EXPORT_SYMBOL_GPL(__sata_phy_reset);
6383EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6384EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6385EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6386EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6387EXPORT_SYMBOL_GPL(sata_std_hardreset);
6388EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6389EXPORT_SYMBOL_GPL(ata_dev_classify);
6390EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6391EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6392EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6393EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6394EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6395EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6396EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6397EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6398EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6399EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6400EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6401EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6402EXPORT_SYMBOL_GPL(sata_scr_valid);
6403EXPORT_SYMBOL_GPL(sata_scr_read);
6404EXPORT_SYMBOL_GPL(sata_scr_write);
6405EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6406EXPORT_SYMBOL_GPL(ata_port_online);
6407EXPORT_SYMBOL_GPL(ata_port_offline);
6ffa01d8 6408#ifdef CONFIG_PM
cca3974e
JG
6409EXPORT_SYMBOL_GPL(ata_host_suspend);
6410EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6411#endif /* CONFIG_PM */
6a62a04d
TH
6412EXPORT_SYMBOL_GPL(ata_id_string);
6413EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 6414EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6919a0a6 6415EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6416EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6417
1bc4ccff 6418EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6419EXPORT_SYMBOL_GPL(ata_timing_compute);
6420EXPORT_SYMBOL_GPL(ata_timing_merge);
6421
1da177e4
LT
6422#ifdef CONFIG_PCI
6423EXPORT_SYMBOL_GPL(pci_test_config_bits);
6424EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6425EXPORT_SYMBOL_GPL(ata_pci_init_one);
6426EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6427#ifdef CONFIG_PM
500530f6
TH
6428EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6429EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6430EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6431EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6432#endif /* CONFIG_PM */
67951ade
AC
6433EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6434EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6435#endif /* CONFIG_PCI */
9b847548 6436
6ffa01d8 6437#ifdef CONFIG_PM
9b847548
JA
6438EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6439EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6ffa01d8 6440#endif /* CONFIG_PM */
ece1d636 6441
ece1d636 6442EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6443EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6444EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6445EXPORT_SYMBOL_GPL(ata_port_freeze);
6446EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6447EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6448EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6449EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6450EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6451EXPORT_SYMBOL_GPL(ata_irq_on);
6452EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6453EXPORT_SYMBOL_GPL(ata_irq_ack);
6454EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6455EXPORT_SYMBOL_GPL(ata_dev_try_classify);