SATA: use NULL for ptrs
[linux-block.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5
JG
62#define DRV_VERSION "2.10" /* must be exactly four chars */
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4
LT
74
75static unsigned int ata_unique_id = 1;
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
11ef697b
KCA
96int noacpi;
97module_param(noacpi, int, 0444);
98MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
99
1da177e4
LT
100MODULE_AUTHOR("Jeff Garzik");
101MODULE_DESCRIPTION("Library module for ATA devices");
102MODULE_LICENSE("GPL");
103MODULE_VERSION(DRV_VERSION);
104
0baab86b 105
1da177e4
LT
106/**
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
111 *
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
114 *
115 * LOCKING:
116 * Inherited from caller.
117 */
118
057ace5e 119void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
120{
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
126
127 fis[4] = tf->lbal;
128 fis[5] = tf->lbam;
129 fis[6] = tf->lbah;
130 fis[7] = tf->device;
131
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
136
137 fis[12] = tf->nsect;
138 fis[13] = tf->hob_nsect;
139 fis[14] = 0;
140 fis[15] = tf->ctl;
141
142 fis[16] = 0;
143 fis[17] = 0;
144 fis[18] = 0;
145 fis[19] = 0;
146}
147
148/**
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
152 *
e12a1be6 153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
154 *
155 * LOCKING:
156 * Inherited from caller.
157 */
158
057ace5e 159void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
160{
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
163
164 tf->lbal = fis[4];
165 tf->lbam = fis[5];
166 tf->lbah = fis[6];
167 tf->device = fis[7];
168
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
172
173 tf->nsect = fis[12];
174 tf->hob_nsect = fis[13];
175}
176
8cbd6df1
AL
177static const u8 ata_rw_cmds[] = {
178 /* pio multi */
179 ATA_CMD_READ_MULTI,
180 ATA_CMD_WRITE_MULTI,
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
187 /* pio */
188 ATA_CMD_PIO_READ,
189 ATA_CMD_PIO_WRITE,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
192 0,
193 0,
194 0,
195 0,
8cbd6df1
AL
196 /* dma */
197 ATA_CMD_READ,
198 ATA_CMD_WRITE,
199 ATA_CMD_READ_EXT,
9a3dccc4
TH
200 ATA_CMD_WRITE_EXT,
201 0,
202 0,
203 0,
204 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 205};
1da177e4
LT
206
207/**
8cbd6df1 208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
1da177e4 211 *
2e9edbf8 212 * Examine the device configuration and tf->flags to calculate
8cbd6df1 213 * the proper read/write commands and protocol to use.
1da177e4
LT
214 *
215 * LOCKING:
216 * caller.
217 */
bd056d7e 218static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 219{
9a3dccc4 220 u8 cmd;
1da177e4 221
9a3dccc4 222 int index, fua, lba48, write;
2e9edbf8 223
9a3dccc4 224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 227
8cbd6df1
AL
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
9a3dccc4 230 index = dev->multi_count ? 0 : 8;
bd056d7e 231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
0565c26d 234 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
235 } else {
236 tf->protocol = ATA_PROT_DMA;
9a3dccc4 237 index = 16;
8cbd6df1 238 }
1da177e4 239
9a3dccc4
TH
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 if (cmd) {
242 tf->command = cmd;
243 return 0;
244 }
245 return -1;
1da177e4
LT
246}
247
35b649fe
TH
248/**
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
252 *
253 * LOCKING:
254 * None.
255 *
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
259 *
260 * RETURNS:
261 * Block address read from @tf.
262 */
263u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
264{
265 u64 block = 0;
266
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
272 } else
273 block |= (tf->device & 0xf) << 24;
274
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
277 block |= tf->lbal;
278 } else {
279 u32 cyl, head, sect;
280
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
283 sect = tf->lbal;
284
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
286 }
287
288 return block;
289}
290
bd056d7e
TH
291/**
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
298 * @tag: tag
299 *
300 * LOCKING:
301 * None.
302 *
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
305 *
306 * RETURNS:
307 *
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
310 */
311int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
313 unsigned int tag)
314{
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
317
318 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
70e6ad0c
TH
319 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
320 likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
321 /* yay, NCQ */
322 if (!lba_48_ok(block, n_block))
323 return -ERANGE;
324
325 tf->protocol = ATA_PROT_NCQ;
326 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
327
328 if (tf->flags & ATA_TFLAG_WRITE)
329 tf->command = ATA_CMD_FPDMA_WRITE;
330 else
331 tf->command = ATA_CMD_FPDMA_READ;
332
333 tf->nsect = tag << 3;
334 tf->hob_feature = (n_block >> 8) & 0xff;
335 tf->feature = n_block & 0xff;
336
337 tf->hob_lbah = (block >> 40) & 0xff;
338 tf->hob_lbam = (block >> 32) & 0xff;
339 tf->hob_lbal = (block >> 24) & 0xff;
340 tf->lbah = (block >> 16) & 0xff;
341 tf->lbam = (block >> 8) & 0xff;
342 tf->lbal = block & 0xff;
343
344 tf->device = 1 << 6;
345 if (tf->flags & ATA_TFLAG_FUA)
346 tf->device |= 1 << 7;
347 } else if (dev->flags & ATA_DFLAG_LBA) {
348 tf->flags |= ATA_TFLAG_LBA;
349
350 if (lba_28_ok(block, n_block)) {
351 /* use LBA28 */
352 tf->device |= (block >> 24) & 0xf;
353 } else if (lba_48_ok(block, n_block)) {
354 if (!(dev->flags & ATA_DFLAG_LBA48))
355 return -ERANGE;
356
357 /* use LBA48 */
358 tf->flags |= ATA_TFLAG_LBA48;
359
360 tf->hob_nsect = (n_block >> 8) & 0xff;
361
362 tf->hob_lbah = (block >> 40) & 0xff;
363 tf->hob_lbam = (block >> 32) & 0xff;
364 tf->hob_lbal = (block >> 24) & 0xff;
365 } else
366 /* request too large even for LBA48 */
367 return -ERANGE;
368
369 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
370 return -EINVAL;
371
372 tf->nsect = n_block & 0xff;
373
374 tf->lbah = (block >> 16) & 0xff;
375 tf->lbam = (block >> 8) & 0xff;
376 tf->lbal = block & 0xff;
377
378 tf->device |= ATA_LBA;
379 } else {
380 /* CHS */
381 u32 sect, head, cyl, track;
382
383 /* The request -may- be too large for CHS addressing. */
384 if (!lba_28_ok(block, n_block))
385 return -ERANGE;
386
387 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
388 return -EINVAL;
389
390 /* Convert LBA to CHS */
391 track = (u32)block / dev->sectors;
392 cyl = track / dev->heads;
393 head = track % dev->heads;
394 sect = (u32)block % dev->sectors + 1;
395
396 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
397 (u32)block, track, cyl, head, sect);
398
399 /* Check whether the converted CHS can fit.
400 Cylinder: 0-65535
401 Head: 0-15
402 Sector: 1-255*/
403 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
404 return -ERANGE;
405
406 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
407 tf->lbal = sect;
408 tf->lbam = cyl;
409 tf->lbah = cyl >> 8;
410 tf->device |= head;
411 }
412
413 return 0;
414}
415
cb95d562
TH
416/**
417 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
418 * @pio_mask: pio_mask
419 * @mwdma_mask: mwdma_mask
420 * @udma_mask: udma_mask
421 *
422 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
423 * unsigned int xfer_mask.
424 *
425 * LOCKING:
426 * None.
427 *
428 * RETURNS:
429 * Packed xfer_mask.
430 */
431static unsigned int ata_pack_xfermask(unsigned int pio_mask,
432 unsigned int mwdma_mask,
433 unsigned int udma_mask)
434{
435 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
436 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
437 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
438}
439
c0489e4e
TH
440/**
441 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
442 * @xfer_mask: xfer_mask to unpack
443 * @pio_mask: resulting pio_mask
444 * @mwdma_mask: resulting mwdma_mask
445 * @udma_mask: resulting udma_mask
446 *
447 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
448 * Any NULL distination masks will be ignored.
449 */
450static void ata_unpack_xfermask(unsigned int xfer_mask,
451 unsigned int *pio_mask,
452 unsigned int *mwdma_mask,
453 unsigned int *udma_mask)
454{
455 if (pio_mask)
456 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
457 if (mwdma_mask)
458 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
459 if (udma_mask)
460 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
461}
462
cb95d562 463static const struct ata_xfer_ent {
be9a50c8 464 int shift, bits;
cb95d562
TH
465 u8 base;
466} ata_xfer_tbl[] = {
467 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
468 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
469 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
470 { -1, },
471};
472
473/**
474 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
475 * @xfer_mask: xfer_mask of interest
476 *
477 * Return matching XFER_* value for @xfer_mask. Only the highest
478 * bit of @xfer_mask is considered.
479 *
480 * LOCKING:
481 * None.
482 *
483 * RETURNS:
484 * Matching XFER_* value, 0 if no match found.
485 */
486static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
487{
488 int highbit = fls(xfer_mask) - 1;
489 const struct ata_xfer_ent *ent;
490
491 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
492 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
493 return ent->base + highbit - ent->shift;
494 return 0;
495}
496
497/**
498 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
499 * @xfer_mode: XFER_* of interest
500 *
501 * Return matching xfer_mask for @xfer_mode.
502 *
503 * LOCKING:
504 * None.
505 *
506 * RETURNS:
507 * Matching xfer_mask, 0 if no match found.
508 */
509static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
510{
511 const struct ata_xfer_ent *ent;
512
513 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
514 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
515 return 1 << (ent->shift + xfer_mode - ent->base);
516 return 0;
517}
518
519/**
520 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
521 * @xfer_mode: XFER_* of interest
522 *
523 * Return matching xfer_shift for @xfer_mode.
524 *
525 * LOCKING:
526 * None.
527 *
528 * RETURNS:
529 * Matching xfer_shift, -1 if no match found.
530 */
531static int ata_xfer_mode2shift(unsigned int xfer_mode)
532{
533 const struct ata_xfer_ent *ent;
534
535 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
536 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
537 return ent->shift;
538 return -1;
539}
540
1da177e4 541/**
1da7b0d0
TH
542 * ata_mode_string - convert xfer_mask to string
543 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
544 *
545 * Determine string which represents the highest speed
1da7b0d0 546 * (highest bit in @modemask).
1da177e4
LT
547 *
548 * LOCKING:
549 * None.
550 *
551 * RETURNS:
552 * Constant C string representing highest speed listed in
1da7b0d0 553 * @mode_mask, or the constant C string "<n/a>".
1da177e4 554 */
1da7b0d0 555static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 556{
75f554bc
TH
557 static const char * const xfer_mode_str[] = {
558 "PIO0",
559 "PIO1",
560 "PIO2",
561 "PIO3",
562 "PIO4",
b352e57d
AC
563 "PIO5",
564 "PIO6",
75f554bc
TH
565 "MWDMA0",
566 "MWDMA1",
567 "MWDMA2",
b352e57d
AC
568 "MWDMA3",
569 "MWDMA4",
75f554bc
TH
570 "UDMA/16",
571 "UDMA/25",
572 "UDMA/33",
573 "UDMA/44",
574 "UDMA/66",
575 "UDMA/100",
576 "UDMA/133",
577 "UDMA7",
578 };
1da7b0d0 579 int highbit;
1da177e4 580
1da7b0d0
TH
581 highbit = fls(xfer_mask) - 1;
582 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
583 return xfer_mode_str[highbit];
1da177e4 584 return "<n/a>";
1da177e4
LT
585}
586
4c360c81
TH
587static const char *sata_spd_string(unsigned int spd)
588{
589 static const char * const spd_str[] = {
590 "1.5 Gbps",
591 "3.0 Gbps",
592 };
593
594 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
595 return "<unknown>";
596 return spd_str[spd - 1];
597}
598
3373efd8 599void ata_dev_disable(struct ata_device *dev)
0b8efb0a 600{
0dd4b21f 601 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 602 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
603 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
604 ATA_DNXFER_QUIET);
0b8efb0a
TH
605 dev->class++;
606 }
607}
608
1da177e4 609/**
0d5ff566 610 * ata_devchk - PATA device presence detection
1da177e4
LT
611 * @ap: ATA channel to examine
612 * @device: Device to examine (starting at zero)
613 *
614 * This technique was originally described in
615 * Hale Landis's ATADRVR (www.ata-atapi.com), and
616 * later found its way into the ATA/ATAPI spec.
617 *
618 * Write a pattern to the ATA shadow registers,
619 * and if a device is present, it will respond by
620 * correctly storing and echoing back the
621 * ATA shadow register contents.
622 *
623 * LOCKING:
624 * caller.
625 */
626
0d5ff566 627static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
628{
629 struct ata_ioports *ioaddr = &ap->ioaddr;
630 u8 nsect, lbal;
631
632 ap->ops->dev_select(ap, device);
633
0d5ff566
TH
634 iowrite8(0x55, ioaddr->nsect_addr);
635 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 636
0d5ff566
TH
637 iowrite8(0xaa, ioaddr->nsect_addr);
638 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 639
0d5ff566
TH
640 iowrite8(0x55, ioaddr->nsect_addr);
641 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 642
0d5ff566
TH
643 nsect = ioread8(ioaddr->nsect_addr);
644 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
645
646 if ((nsect == 0x55) && (lbal == 0xaa))
647 return 1; /* we found a device */
648
649 return 0; /* nothing found */
650}
651
1da177e4
LT
652/**
653 * ata_dev_classify - determine device type based on ATA-spec signature
654 * @tf: ATA taskfile register set for device to be identified
655 *
656 * Determine from taskfile register contents whether a device is
657 * ATA or ATAPI, as per "Signature and persistence" section
658 * of ATA/PI spec (volume 1, sect 5.14).
659 *
660 * LOCKING:
661 * None.
662 *
663 * RETURNS:
664 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
665 * the event of failure.
666 */
667
057ace5e 668unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
669{
670 /* Apple's open source Darwin code hints that some devices only
671 * put a proper signature into the LBA mid/high registers,
672 * So, we only check those. It's sufficient for uniqueness.
673 */
674
675 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
676 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
677 DPRINTK("found ATA device by sig\n");
678 return ATA_DEV_ATA;
679 }
680
681 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
682 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
683 DPRINTK("found ATAPI device by sig\n");
684 return ATA_DEV_ATAPI;
685 }
686
687 DPRINTK("unknown device\n");
688 return ATA_DEV_UNKNOWN;
689}
690
691/**
692 * ata_dev_try_classify - Parse returned ATA device signature
693 * @ap: ATA channel to examine
694 * @device: Device to examine (starting at zero)
b4dc7623 695 * @r_err: Value of error register on completion
1da177e4
LT
696 *
697 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
698 * an ATA/ATAPI-defined set of values is placed in the ATA
699 * shadow registers, indicating the results of device detection
700 * and diagnostics.
701 *
702 * Select the ATA device, and read the values from the ATA shadow
703 * registers. Then parse according to the Error register value,
704 * and the spec-defined values examined by ata_dev_classify().
705 *
706 * LOCKING:
707 * caller.
b4dc7623
TH
708 *
709 * RETURNS:
710 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
711 */
712
a619f981 713unsigned int
b4dc7623 714ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 715{
1da177e4
LT
716 struct ata_taskfile tf;
717 unsigned int class;
718 u8 err;
719
720 ap->ops->dev_select(ap, device);
721
722 memset(&tf, 0, sizeof(tf));
723
1da177e4 724 ap->ops->tf_read(ap, &tf);
0169e284 725 err = tf.feature;
b4dc7623
TH
726 if (r_err)
727 *r_err = err;
1da177e4 728
93590859
AC
729 /* see if device passed diags: if master then continue and warn later */
730 if (err == 0 && device == 0)
731 /* diagnostic fail : do nothing _YET_ */
732 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
733 else if (err == 1)
1da177e4
LT
734 /* do nothing */ ;
735 else if ((device == 0) && (err == 0x81))
736 /* do nothing */ ;
737 else
b4dc7623 738 return ATA_DEV_NONE;
1da177e4 739
b4dc7623 740 /* determine if device is ATA or ATAPI */
1da177e4 741 class = ata_dev_classify(&tf);
b4dc7623 742
1da177e4 743 if (class == ATA_DEV_UNKNOWN)
b4dc7623 744 return ATA_DEV_NONE;
1da177e4 745 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
746 return ATA_DEV_NONE;
747 return class;
1da177e4
LT
748}
749
750/**
6a62a04d 751 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
752 * @id: IDENTIFY DEVICE results we will examine
753 * @s: string into which data is output
754 * @ofs: offset into identify device page
755 * @len: length of string to return. must be an even number.
756 *
757 * The strings in the IDENTIFY DEVICE page are broken up into
758 * 16-bit chunks. Run through the string, and output each
759 * 8-bit chunk linearly, regardless of platform.
760 *
761 * LOCKING:
762 * caller.
763 */
764
6a62a04d
TH
765void ata_id_string(const u16 *id, unsigned char *s,
766 unsigned int ofs, unsigned int len)
1da177e4
LT
767{
768 unsigned int c;
769
770 while (len > 0) {
771 c = id[ofs] >> 8;
772 *s = c;
773 s++;
774
775 c = id[ofs] & 0xff;
776 *s = c;
777 s++;
778
779 ofs++;
780 len -= 2;
781 }
782}
783
0e949ff3 784/**
6a62a04d 785 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
786 * @id: IDENTIFY DEVICE results we will examine
787 * @s: string into which data is output
788 * @ofs: offset into identify device page
789 * @len: length of string to return. must be an odd number.
790 *
6a62a04d 791 * This function is identical to ata_id_string except that it
0e949ff3
TH
792 * trims trailing spaces and terminates the resulting string with
793 * null. @len must be actual maximum length (even number) + 1.
794 *
795 * LOCKING:
796 * caller.
797 */
6a62a04d
TH
798void ata_id_c_string(const u16 *id, unsigned char *s,
799 unsigned int ofs, unsigned int len)
0e949ff3
TH
800{
801 unsigned char *p;
802
803 WARN_ON(!(len & 1));
804
6a62a04d 805 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
806
807 p = s + strnlen(s, len - 1);
808 while (p > s && p[-1] == ' ')
809 p--;
810 *p = '\0';
811}
0baab86b 812
2940740b
TH
813static u64 ata_id_n_sectors(const u16 *id)
814{
815 if (ata_id_has_lba(id)) {
816 if (ata_id_has_lba48(id))
817 return ata_id_u64(id, 100);
818 else
819 return ata_id_u32(id, 60);
820 } else {
821 if (ata_id_current_chs_valid(id))
822 return ata_id_u32(id, 57);
823 else
824 return id[1] * id[3] * id[6];
825 }
826}
827
0baab86b
EF
828/**
829 * ata_noop_dev_select - Select device 0/1 on ATA bus
830 * @ap: ATA channel to manipulate
831 * @device: ATA device (numbered from zero) to select
832 *
833 * This function performs no actual function.
834 *
835 * May be used as the dev_select() entry in ata_port_operations.
836 *
837 * LOCKING:
838 * caller.
839 */
1da177e4
LT
840void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
841{
842}
843
0baab86b 844
1da177e4
LT
845/**
846 * ata_std_dev_select - Select device 0/1 on ATA bus
847 * @ap: ATA channel to manipulate
848 * @device: ATA device (numbered from zero) to select
849 *
850 * Use the method defined in the ATA specification to
851 * make either device 0, or device 1, active on the
0baab86b
EF
852 * ATA channel. Works with both PIO and MMIO.
853 *
854 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
855 *
856 * LOCKING:
857 * caller.
858 */
859
860void ata_std_dev_select (struct ata_port *ap, unsigned int device)
861{
862 u8 tmp;
863
864 if (device == 0)
865 tmp = ATA_DEVICE_OBS;
866 else
867 tmp = ATA_DEVICE_OBS | ATA_DEV1;
868
0d5ff566 869 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
870 ata_pause(ap); /* needed; also flushes, for mmio */
871}
872
873/**
874 * ata_dev_select - Select device 0/1 on ATA bus
875 * @ap: ATA channel to manipulate
876 * @device: ATA device (numbered from zero) to select
877 * @wait: non-zero to wait for Status register BSY bit to clear
878 * @can_sleep: non-zero if context allows sleeping
879 *
880 * Use the method defined in the ATA specification to
881 * make either device 0, or device 1, active on the
882 * ATA channel.
883 *
884 * This is a high-level version of ata_std_dev_select(),
885 * which additionally provides the services of inserting
886 * the proper pauses and status polling, where needed.
887 *
888 * LOCKING:
889 * caller.
890 */
891
892void ata_dev_select(struct ata_port *ap, unsigned int device,
893 unsigned int wait, unsigned int can_sleep)
894{
88574551 895 if (ata_msg_probe(ap))
0dd4b21f 896 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
88574551 897 "device %u, wait %u\n", ap->id, device, wait);
1da177e4
LT
898
899 if (wait)
900 ata_wait_idle(ap);
901
902 ap->ops->dev_select(ap, device);
903
904 if (wait) {
905 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
906 msleep(150);
907 ata_wait_idle(ap);
908 }
909}
910
911/**
912 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 913 * @id: IDENTIFY DEVICE page to dump
1da177e4 914 *
0bd3300a
TH
915 * Dump selected 16-bit words from the given IDENTIFY DEVICE
916 * page.
1da177e4
LT
917 *
918 * LOCKING:
919 * caller.
920 */
921
0bd3300a 922static inline void ata_dump_id(const u16 *id)
1da177e4
LT
923{
924 DPRINTK("49==0x%04x "
925 "53==0x%04x "
926 "63==0x%04x "
927 "64==0x%04x "
928 "75==0x%04x \n",
0bd3300a
TH
929 id[49],
930 id[53],
931 id[63],
932 id[64],
933 id[75]);
1da177e4
LT
934 DPRINTK("80==0x%04x "
935 "81==0x%04x "
936 "82==0x%04x "
937 "83==0x%04x "
938 "84==0x%04x \n",
0bd3300a
TH
939 id[80],
940 id[81],
941 id[82],
942 id[83],
943 id[84]);
1da177e4
LT
944 DPRINTK("88==0x%04x "
945 "93==0x%04x\n",
0bd3300a
TH
946 id[88],
947 id[93]);
1da177e4
LT
948}
949
cb95d562
TH
950/**
951 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
952 * @id: IDENTIFY data to compute xfer mask from
953 *
954 * Compute the xfermask for this device. This is not as trivial
955 * as it seems if we must consider early devices correctly.
956 *
957 * FIXME: pre IDE drive timing (do we care ?).
958 *
959 * LOCKING:
960 * None.
961 *
962 * RETURNS:
963 * Computed xfermask
964 */
965static unsigned int ata_id_xfermask(const u16 *id)
966{
967 unsigned int pio_mask, mwdma_mask, udma_mask;
968
969 /* Usual case. Word 53 indicates word 64 is valid */
970 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
971 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
972 pio_mask <<= 3;
973 pio_mask |= 0x7;
974 } else {
975 /* If word 64 isn't valid then Word 51 high byte holds
976 * the PIO timing number for the maximum. Turn it into
977 * a mask.
978 */
7a0f1c8a 979 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
980 if (mode < 5) /* Valid PIO range */
981 pio_mask = (2 << mode) - 1;
982 else
983 pio_mask = 1;
cb95d562
TH
984
985 /* But wait.. there's more. Design your standards by
986 * committee and you too can get a free iordy field to
987 * process. However its the speeds not the modes that
988 * are supported... Note drivers using the timing API
989 * will get this right anyway
990 */
991 }
992
993 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 994
b352e57d
AC
995 if (ata_id_is_cfa(id)) {
996 /*
997 * Process compact flash extended modes
998 */
999 int pio = id[163] & 0x7;
1000 int dma = (id[163] >> 3) & 7;
1001
1002 if (pio)
1003 pio_mask |= (1 << 5);
1004 if (pio > 1)
1005 pio_mask |= (1 << 6);
1006 if (dma)
1007 mwdma_mask |= (1 << 3);
1008 if (dma > 1)
1009 mwdma_mask |= (1 << 4);
1010 }
1011
fb21f0d0
TH
1012 udma_mask = 0;
1013 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1014 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1015
1016 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1017}
1018
86e45b6b
TH
1019/**
1020 * ata_port_queue_task - Queue port_task
1021 * @ap: The ata_port to queue port_task for
e2a7f77a 1022 * @fn: workqueue function to be scheduled
65f27f38 1023 * @data: data for @fn to use
e2a7f77a 1024 * @delay: delay time for workqueue function
86e45b6b
TH
1025 *
1026 * Schedule @fn(@data) for execution after @delay jiffies using
1027 * port_task. There is one port_task per port and it's the
1028 * user(low level driver)'s responsibility to make sure that only
1029 * one task is active at any given time.
1030 *
1031 * libata core layer takes care of synchronization between
1032 * port_task and EH. ata_port_queue_task() may be ignored for EH
1033 * synchronization.
1034 *
1035 * LOCKING:
1036 * Inherited from caller.
1037 */
65f27f38 1038void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1039 unsigned long delay)
1040{
1041 int rc;
1042
b51e9e5d 1043 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1044 return;
1045
65f27f38
DH
1046 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1047 ap->port_task_data = data;
86e45b6b 1048
52bad64d 1049 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1050
1051 /* rc == 0 means that another user is using port task */
1052 WARN_ON(rc == 0);
1053}
1054
1055/**
1056 * ata_port_flush_task - Flush port_task
1057 * @ap: The ata_port to flush port_task for
1058 *
1059 * After this function completes, port_task is guranteed not to
1060 * be running or scheduled.
1061 *
1062 * LOCKING:
1063 * Kernel thread context (may sleep)
1064 */
1065void ata_port_flush_task(struct ata_port *ap)
1066{
1067 unsigned long flags;
1068
1069 DPRINTK("ENTER\n");
1070
ba6a1308 1071 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1072 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1073 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1074
1075 DPRINTK("flush #1\n");
1076 flush_workqueue(ata_wq);
1077
1078 /*
1079 * At this point, if a task is running, it's guaranteed to see
1080 * the FLUSH flag; thus, it will never queue pio tasks again.
1081 * Cancel and flush.
1082 */
1083 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1084 if (ata_msg_ctl(ap))
88574551
TH
1085 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1086 __FUNCTION__);
86e45b6b
TH
1087 flush_workqueue(ata_wq);
1088 }
1089
ba6a1308 1090 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1091 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1092 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1093
0dd4b21f
BP
1094 if (ata_msg_ctl(ap))
1095 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1096}
1097
7102d230 1098static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1099{
77853bf2 1100 struct completion *waiting = qc->private_data;
a2a7a662 1101
a2a7a662 1102 complete(waiting);
a2a7a662
TH
1103}
1104
1105/**
2432697b 1106 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1107 * @dev: Device to which the command is sent
1108 * @tf: Taskfile registers for the command and the result
d69cf37d 1109 * @cdb: CDB for packet command
a2a7a662 1110 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1111 * @sg: sg list for the data buffer of the command
1112 * @n_elem: Number of sg entries
a2a7a662
TH
1113 *
1114 * Executes libata internal command with timeout. @tf contains
1115 * command on entry and result on return. Timeout and error
1116 * conditions are reported via return value. No recovery action
1117 * is taken after a command times out. It's caller's duty to
1118 * clean up after timeout.
1119 *
1120 * LOCKING:
1121 * None. Should be called with kernel context, might sleep.
551e8889
TH
1122 *
1123 * RETURNS:
1124 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1125 */
2432697b
TH
1126unsigned ata_exec_internal_sg(struct ata_device *dev,
1127 struct ata_taskfile *tf, const u8 *cdb,
1128 int dma_dir, struct scatterlist *sg,
1129 unsigned int n_elem)
a2a7a662 1130{
3373efd8 1131 struct ata_port *ap = dev->ap;
a2a7a662
TH
1132 u8 command = tf->command;
1133 struct ata_queued_cmd *qc;
2ab7db1f 1134 unsigned int tag, preempted_tag;
dedaf2b0 1135 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1136 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1137 unsigned long flags;
77853bf2 1138 unsigned int err_mask;
d95a717f 1139 int rc;
a2a7a662 1140
ba6a1308 1141 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1142
e3180499 1143 /* no internal command while frozen */
b51e9e5d 1144 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1145 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1146 return AC_ERR_SYSTEM;
1147 }
1148
2ab7db1f 1149 /* initialize internal qc */
a2a7a662 1150
2ab7db1f
TH
1151 /* XXX: Tag 0 is used for drivers with legacy EH as some
1152 * drivers choke if any other tag is given. This breaks
1153 * ata_tag_internal() test for those drivers. Don't use new
1154 * EH stuff without converting to it.
1155 */
1156 if (ap->ops->error_handler)
1157 tag = ATA_TAG_INTERNAL;
1158 else
1159 tag = 0;
1160
6cec4a39 1161 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1162 BUG();
f69499f4 1163 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1164
1165 qc->tag = tag;
1166 qc->scsicmd = NULL;
1167 qc->ap = ap;
1168 qc->dev = dev;
1169 ata_qc_reinit(qc);
1170
1171 preempted_tag = ap->active_tag;
dedaf2b0
TH
1172 preempted_sactive = ap->sactive;
1173 preempted_qc_active = ap->qc_active;
2ab7db1f 1174 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1175 ap->sactive = 0;
1176 ap->qc_active = 0;
2ab7db1f
TH
1177
1178 /* prepare & issue qc */
a2a7a662 1179 qc->tf = *tf;
d69cf37d
TH
1180 if (cdb)
1181 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1182 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1183 qc->dma_dir = dma_dir;
1184 if (dma_dir != DMA_NONE) {
2432697b
TH
1185 unsigned int i, buflen = 0;
1186
1187 for (i = 0; i < n_elem; i++)
1188 buflen += sg[i].length;
1189
1190 ata_sg_init(qc, sg, n_elem);
49c80429 1191 qc->nbytes = buflen;
a2a7a662
TH
1192 }
1193
77853bf2 1194 qc->private_data = &wait;
a2a7a662
TH
1195 qc->complete_fn = ata_qc_complete_internal;
1196
8e0e694a 1197 ata_qc_issue(qc);
a2a7a662 1198
ba6a1308 1199 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1200
a8601e5f 1201 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1202
1203 ata_port_flush_task(ap);
41ade50c 1204
d95a717f 1205 if (!rc) {
ba6a1308 1206 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1207
1208 /* We're racing with irq here. If we lose, the
1209 * following test prevents us from completing the qc
d95a717f
TH
1210 * twice. If we win, the port is frozen and will be
1211 * cleaned up by ->post_internal_cmd().
a2a7a662 1212 */
77853bf2 1213 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1214 qc->err_mask |= AC_ERR_TIMEOUT;
1215
1216 if (ap->ops->error_handler)
1217 ata_port_freeze(ap);
1218 else
1219 ata_qc_complete(qc);
f15a1daf 1220
0dd4b21f
BP
1221 if (ata_msg_warn(ap))
1222 ata_dev_printk(dev, KERN_WARNING,
88574551 1223 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1224 }
1225
ba6a1308 1226 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1227 }
1228
d95a717f
TH
1229 /* do post_internal_cmd */
1230 if (ap->ops->post_internal_cmd)
1231 ap->ops->post_internal_cmd(qc);
1232
18d90deb 1233 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1234 if (ata_msg_warn(ap))
88574551 1235 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1236 "zero err_mask for failed "
88574551 1237 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1238 qc->err_mask |= AC_ERR_OTHER;
1239 }
1240
15869303 1241 /* finish up */
ba6a1308 1242 spin_lock_irqsave(ap->lock, flags);
15869303 1243
e61e0672 1244 *tf = qc->result_tf;
77853bf2
TH
1245 err_mask = qc->err_mask;
1246
1247 ata_qc_free(qc);
2ab7db1f 1248 ap->active_tag = preempted_tag;
dedaf2b0
TH
1249 ap->sactive = preempted_sactive;
1250 ap->qc_active = preempted_qc_active;
77853bf2 1251
1f7dd3e9
TH
1252 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1253 * Until those drivers are fixed, we detect the condition
1254 * here, fail the command with AC_ERR_SYSTEM and reenable the
1255 * port.
1256 *
1257 * Note that this doesn't change any behavior as internal
1258 * command failure results in disabling the device in the
1259 * higher layer for LLDDs without new reset/EH callbacks.
1260 *
1261 * Kill the following code as soon as those drivers are fixed.
1262 */
198e0fed 1263 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1264 err_mask |= AC_ERR_SYSTEM;
1265 ata_port_probe(ap);
1266 }
1267
ba6a1308 1268 spin_unlock_irqrestore(ap->lock, flags);
15869303 1269
77853bf2 1270 return err_mask;
a2a7a662
TH
1271}
1272
2432697b 1273/**
33480a0e 1274 * ata_exec_internal - execute libata internal command
2432697b
TH
1275 * @dev: Device to which the command is sent
1276 * @tf: Taskfile registers for the command and the result
1277 * @cdb: CDB for packet command
1278 * @dma_dir: Data tranfer direction of the command
1279 * @buf: Data buffer of the command
1280 * @buflen: Length of data buffer
1281 *
1282 * Wrapper around ata_exec_internal_sg() which takes simple
1283 * buffer instead of sg list.
1284 *
1285 * LOCKING:
1286 * None. Should be called with kernel context, might sleep.
1287 *
1288 * RETURNS:
1289 * Zero on success, AC_ERR_* mask on failure
1290 */
1291unsigned ata_exec_internal(struct ata_device *dev,
1292 struct ata_taskfile *tf, const u8 *cdb,
1293 int dma_dir, void *buf, unsigned int buflen)
1294{
33480a0e
TH
1295 struct scatterlist *psg = NULL, sg;
1296 unsigned int n_elem = 0;
2432697b 1297
33480a0e
TH
1298 if (dma_dir != DMA_NONE) {
1299 WARN_ON(!buf);
1300 sg_init_one(&sg, buf, buflen);
1301 psg = &sg;
1302 n_elem++;
1303 }
2432697b 1304
33480a0e 1305 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1306}
1307
977e6b9f
TH
1308/**
1309 * ata_do_simple_cmd - execute simple internal command
1310 * @dev: Device to which the command is sent
1311 * @cmd: Opcode to execute
1312 *
1313 * Execute a 'simple' command, that only consists of the opcode
1314 * 'cmd' itself, without filling any other registers
1315 *
1316 * LOCKING:
1317 * Kernel thread context (may sleep).
1318 *
1319 * RETURNS:
1320 * Zero on success, AC_ERR_* mask on failure
e58eb583 1321 */
77b08fb5 1322unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1323{
1324 struct ata_taskfile tf;
e58eb583
TH
1325
1326 ata_tf_init(dev, &tf);
1327
1328 tf.command = cmd;
1329 tf.flags |= ATA_TFLAG_DEVICE;
1330 tf.protocol = ATA_PROT_NODATA;
1331
977e6b9f 1332 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1333}
1334
1bc4ccff
AC
1335/**
1336 * ata_pio_need_iordy - check if iordy needed
1337 * @adev: ATA device
1338 *
1339 * Check if the current speed of the device requires IORDY. Used
1340 * by various controllers for chip configuration.
1341 */
1342
1343unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1344{
1345 int pio;
1346 int speed = adev->pio_mode - XFER_PIO_0;
1347
1348 if (speed < 2)
1349 return 0;
1350 if (speed > 2)
1351 return 1;
2e9edbf8 1352
1bc4ccff
AC
1353 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1354
1355 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1356 pio = adev->id[ATA_ID_EIDE_PIO];
1357 /* Is the speed faster than the drive allows non IORDY ? */
1358 if (pio) {
1359 /* This is cycle times not frequency - watch the logic! */
1360 if (pio > 240) /* PIO2 is 240nS per cycle */
1361 return 1;
1362 return 0;
1363 }
1364 }
1365 return 0;
1366}
1367
1da177e4 1368/**
49016aca 1369 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1370 * @dev: target device
1371 * @p_class: pointer to class of the target device (may be changed)
bff04647 1372 * @flags: ATA_READID_* flags
fe635c7e 1373 * @id: buffer to read IDENTIFY data into
1da177e4 1374 *
49016aca
TH
1375 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1376 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1377 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1378 * for pre-ATA4 drives.
1da177e4
LT
1379 *
1380 * LOCKING:
49016aca
TH
1381 * Kernel thread context (may sleep)
1382 *
1383 * RETURNS:
1384 * 0 on success, -errno otherwise.
1da177e4 1385 */
a9beec95 1386int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1387 unsigned int flags, u16 *id)
1da177e4 1388{
3373efd8 1389 struct ata_port *ap = dev->ap;
49016aca 1390 unsigned int class = *p_class;
a0123703 1391 struct ata_taskfile tf;
49016aca
TH
1392 unsigned int err_mask = 0;
1393 const char *reason;
1394 int rc;
1da177e4 1395
0dd4b21f 1396 if (ata_msg_ctl(ap))
88574551
TH
1397 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1398 __FUNCTION__, ap->id, dev->devno);
1da177e4 1399
49016aca 1400 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1401
49016aca 1402 retry:
3373efd8 1403 ata_tf_init(dev, &tf);
a0123703 1404
49016aca
TH
1405 switch (class) {
1406 case ATA_DEV_ATA:
a0123703 1407 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1408 break;
1409 case ATA_DEV_ATAPI:
a0123703 1410 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1411 break;
1412 default:
1413 rc = -ENODEV;
1414 reason = "unsupported class";
1415 goto err_out;
1da177e4
LT
1416 }
1417
a0123703 1418 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1419
1420 /* Some devices choke if TF registers contain garbage. Make
1421 * sure those are properly initialized.
1422 */
1423 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1424
1425 /* Device presence detection is unreliable on some
1426 * controllers. Always poll IDENTIFY if available.
1427 */
1428 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1429
3373efd8 1430 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1431 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1432 if (err_mask) {
800b3996 1433 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8
TH
1434 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1435 ap->id, dev->devno);
1436 return -ENOENT;
1437 }
1438
49016aca
TH
1439 rc = -EIO;
1440 reason = "I/O error";
1da177e4
LT
1441 goto err_out;
1442 }
1443
49016aca 1444 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1445
49016aca 1446 /* sanity check */
a4f5749b
TH
1447 rc = -EINVAL;
1448 reason = "device reports illegal type";
1449
1450 if (class == ATA_DEV_ATA) {
1451 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1452 goto err_out;
1453 } else {
1454 if (ata_id_is_ata(id))
1455 goto err_out;
49016aca
TH
1456 }
1457
bff04647 1458 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1459 /*
1460 * The exact sequence expected by certain pre-ATA4 drives is:
1461 * SRST RESET
1462 * IDENTIFY
1463 * INITIALIZE DEVICE PARAMETERS
1464 * anything else..
1465 * Some drives were very specific about that exact sequence.
1466 */
1467 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1468 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1469 if (err_mask) {
1470 rc = -EIO;
1471 reason = "INIT_DEV_PARAMS failed";
1472 goto err_out;
1473 }
1474
1475 /* current CHS translation info (id[53-58]) might be
1476 * changed. reread the identify device info.
1477 */
bff04647 1478 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1479 goto retry;
1480 }
1481 }
1482
1483 *p_class = class;
fe635c7e 1484
49016aca
TH
1485 return 0;
1486
1487 err_out:
88574551 1488 if (ata_msg_warn(ap))
0dd4b21f 1489 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1490 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1491 return rc;
1492}
1493
3373efd8 1494static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1495{
3373efd8 1496 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1497}
1498
a6e6ce8e
TH
1499static void ata_dev_config_ncq(struct ata_device *dev,
1500 char *desc, size_t desc_sz)
1501{
1502 struct ata_port *ap = dev->ap;
1503 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1504
1505 if (!ata_id_has_ncq(dev->id)) {
1506 desc[0] = '\0';
1507 return;
1508 }
6919a0a6
AC
1509 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1510 snprintf(desc, desc_sz, "NCQ (not used)");
1511 return;
1512 }
a6e6ce8e 1513 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1514 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1515 dev->flags |= ATA_DFLAG_NCQ;
1516 }
1517
1518 if (hdepth >= ddepth)
1519 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1520 else
1521 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1522}
1523
e6d902a3
BK
1524static void ata_set_port_max_cmd_len(struct ata_port *ap)
1525{
1526 int i;
1527
cca3974e
JG
1528 if (ap->scsi_host) {
1529 unsigned int len = 0;
1530
e6d902a3 1531 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1532 len = max(len, ap->device[i].cdb_len);
1533
1534 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1535 }
1536}
1537
49016aca 1538/**
ffeae418 1539 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1540 * @dev: Target device to configure
1541 *
1542 * Configure @dev according to @dev->id. Generic and low-level
1543 * driver specific fixups are also applied.
49016aca
TH
1544 *
1545 * LOCKING:
ffeae418
TH
1546 * Kernel thread context (may sleep)
1547 *
1548 * RETURNS:
1549 * 0 on success, -errno otherwise
49016aca 1550 */
efdaedc4 1551int ata_dev_configure(struct ata_device *dev)
49016aca 1552{
3373efd8 1553 struct ata_port *ap = dev->ap;
efdaedc4 1554 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1555 const u16 *id = dev->id;
ff8854b2 1556 unsigned int xfer_mask;
b352e57d 1557 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1558 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1559 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1560 int rc;
49016aca 1561
0dd4b21f 1562 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
88574551
TH
1563 ata_dev_printk(dev, KERN_INFO,
1564 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1565 __FUNCTION__, ap->id, dev->devno);
ffeae418 1566 return 0;
49016aca
TH
1567 }
1568
0dd4b21f 1569 if (ata_msg_probe(ap))
88574551
TH
1570 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1571 __FUNCTION__, ap->id, dev->devno);
1da177e4 1572
08573a86
KCA
1573 /* set _SDD */
1574 rc = ata_acpi_push_id(ap, dev->devno);
1575 if (rc) {
1576 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1577 rc);
1578 }
1579
1580 /* retrieve and execute the ATA task file of _GTF */
1581 ata_acpi_exec_tfs(ap);
1582
c39f5ebe 1583 /* print device capabilities */
0dd4b21f 1584 if (ata_msg_probe(ap))
88574551
TH
1585 ata_dev_printk(dev, KERN_DEBUG,
1586 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1587 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1588 __FUNCTION__,
f15a1daf
TH
1589 id[49], id[82], id[83], id[84],
1590 id[85], id[86], id[87], id[88]);
c39f5ebe 1591
208a9933 1592 /* initialize to-be-configured parameters */
ea1dd4e1 1593 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1594 dev->max_sectors = 0;
1595 dev->cdb_len = 0;
1596 dev->n_sectors = 0;
1597 dev->cylinders = 0;
1598 dev->heads = 0;
1599 dev->sectors = 0;
1600
1da177e4
LT
1601 /*
1602 * common ATA, ATAPI feature tests
1603 */
1604
ff8854b2 1605 /* find max transfer mode; for printk only */
1148c3a7 1606 xfer_mask = ata_id_xfermask(id);
1da177e4 1607
0dd4b21f
BP
1608 if (ata_msg_probe(ap))
1609 ata_dump_id(id);
1da177e4
LT
1610
1611 /* ATA-specific feature tests */
1612 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1613 if (ata_id_is_cfa(id)) {
1614 if (id[162] & 1) /* CPRM may make this media unusable */
1615 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1616 ap->id, dev->devno);
1617 snprintf(revbuf, 7, "CFA");
1618 }
1619 else
1620 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1621
1148c3a7 1622 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1623
3f64f565 1624 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1625 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1626 sizeof(fwrevbuf));
1627
591a6e8e 1628 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1629 sizeof(modelbuf));
1630
1631 if (dev->id[59] & 0x100)
1632 dev->multi_count = dev->id[59] & 0xff;
1633
1148c3a7 1634 if (ata_id_has_lba(id)) {
4c2d721a 1635 const char *lba_desc;
a6e6ce8e 1636 char ncq_desc[20];
8bf62ece 1637
4c2d721a
TH
1638 lba_desc = "LBA";
1639 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1640 if (ata_id_has_lba48(id)) {
8bf62ece 1641 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1642 lba_desc = "LBA48";
6fc49adb
TH
1643
1644 if (dev->n_sectors >= (1UL << 28) &&
1645 ata_id_has_flush_ext(id))
1646 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1647 }
8bf62ece 1648
a6e6ce8e
TH
1649 /* config NCQ */
1650 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1651
8bf62ece 1652 /* print device info to dmesg */
3f64f565
EM
1653 if (ata_msg_drv(ap) && print_info) {
1654 ata_dev_printk(dev, KERN_INFO,
1655 "%s: %s, %s, max %s\n",
1656 revbuf, modelbuf, fwrevbuf,
1657 ata_mode_string(xfer_mask));
1658 ata_dev_printk(dev, KERN_INFO,
1659 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1660 (unsigned long long)dev->n_sectors,
3f64f565
EM
1661 dev->multi_count, lba_desc, ncq_desc);
1662 }
ffeae418 1663 } else {
8bf62ece
AL
1664 /* CHS */
1665
1666 /* Default translation */
1148c3a7
TH
1667 dev->cylinders = id[1];
1668 dev->heads = id[3];
1669 dev->sectors = id[6];
8bf62ece 1670
1148c3a7 1671 if (ata_id_current_chs_valid(id)) {
8bf62ece 1672 /* Current CHS translation is valid. */
1148c3a7
TH
1673 dev->cylinders = id[54];
1674 dev->heads = id[55];
1675 dev->sectors = id[56];
8bf62ece
AL
1676 }
1677
1678 /* print device info to dmesg */
3f64f565 1679 if (ata_msg_drv(ap) && print_info) {
88574551 1680 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1681 "%s: %s, %s, max %s\n",
1682 revbuf, modelbuf, fwrevbuf,
1683 ata_mode_string(xfer_mask));
1684 ata_dev_printk(dev, KERN_INFO,
1685 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1686 (unsigned long long)dev->n_sectors,
1687 dev->multi_count, dev->cylinders,
1688 dev->heads, dev->sectors);
1689 }
07f6f7d0
AL
1690 }
1691
6e7846e9 1692 dev->cdb_len = 16;
1da177e4
LT
1693 }
1694
1695 /* ATAPI-specific feature tests */
2c13b7ce 1696 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1697 char *cdb_intr_string = "";
1698
1148c3a7 1699 rc = atapi_cdb_len(id);
1da177e4 1700 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1701 if (ata_msg_warn(ap))
88574551
TH
1702 ata_dev_printk(dev, KERN_WARNING,
1703 "unsupported CDB len\n");
ffeae418 1704 rc = -EINVAL;
1da177e4
LT
1705 goto err_out_nosup;
1706 }
6e7846e9 1707 dev->cdb_len = (unsigned int) rc;
1da177e4 1708
08a556db 1709 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1710 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1711 cdb_intr_string = ", CDB intr";
1712 }
312f7da2 1713
1da177e4 1714 /* print device info to dmesg */
5afc8142 1715 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1716 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1717 ata_mode_string(xfer_mask),
1718 cdb_intr_string);
1da177e4
LT
1719 }
1720
914ed354
TH
1721 /* determine max_sectors */
1722 dev->max_sectors = ATA_MAX_SECTORS;
1723 if (dev->flags & ATA_DFLAG_LBA48)
1724 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1725
93590859
AC
1726 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1727 /* Let the user know. We don't want to disallow opens for
1728 rescue purposes, or in case the vendor is just a blithering
1729 idiot */
1730 if (print_info) {
1731 ata_dev_printk(dev, KERN_WARNING,
1732"Drive reports diagnostics failure. This may indicate a drive\n");
1733 ata_dev_printk(dev, KERN_WARNING,
1734"fault or invalid emulation. Contact drive vendor for information.\n");
1735 }
1736 }
1737
e6d902a3 1738 ata_set_port_max_cmd_len(ap);
6e7846e9 1739
4b2f3ede 1740 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1741 if (ata_dev_knobble(dev)) {
5afc8142 1742 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1743 ata_dev_printk(dev, KERN_INFO,
1744 "applying bridge limits\n");
5a529139 1745 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1746 dev->max_sectors = ATA_MAX_SECTORS;
1747 }
1748
1749 if (ap->ops->dev_config)
1750 ap->ops->dev_config(ap, dev);
1751
0dd4b21f
BP
1752 if (ata_msg_probe(ap))
1753 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1754 __FUNCTION__, ata_chk_status(ap));
ffeae418 1755 return 0;
1da177e4
LT
1756
1757err_out_nosup:
0dd4b21f 1758 if (ata_msg_probe(ap))
88574551
TH
1759 ata_dev_printk(dev, KERN_DEBUG,
1760 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1761 return rc;
1da177e4
LT
1762}
1763
1764/**
1765 * ata_bus_probe - Reset and probe ATA bus
1766 * @ap: Bus to probe
1767 *
0cba632b
JG
1768 * Master ATA bus probing function. Initiates a hardware-dependent
1769 * bus reset, then attempts to identify any devices found on
1770 * the bus.
1771 *
1da177e4 1772 * LOCKING:
0cba632b 1773 * PCI/etc. bus probe sem.
1da177e4
LT
1774 *
1775 * RETURNS:
96072e69 1776 * Zero on success, negative errno otherwise.
1da177e4
LT
1777 */
1778
80289167 1779int ata_bus_probe(struct ata_port *ap)
1da177e4 1780{
28ca5c57 1781 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 1782 int tries[ATA_MAX_DEVICES];
4ae72a1e 1783 int i, rc;
e82cbdb9 1784 struct ata_device *dev;
1da177e4 1785
28ca5c57 1786 ata_port_probe(ap);
c19ba8af 1787
14d2bac1
TH
1788 for (i = 0; i < ATA_MAX_DEVICES; i++)
1789 tries[i] = ATA_PROBE_MAX_TRIES;
1790
1791 retry:
2044470c 1792 /* reset and determine device classes */
52783c5d 1793 ap->ops->phy_reset(ap);
2061a47a 1794
52783c5d
TH
1795 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1796 dev = &ap->device[i];
c19ba8af 1797
52783c5d
TH
1798 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1799 dev->class != ATA_DEV_UNKNOWN)
1800 classes[dev->devno] = dev->class;
1801 else
1802 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1803
52783c5d 1804 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1805 }
1da177e4 1806
52783c5d 1807 ata_port_probe(ap);
2044470c 1808
b6079ca4
AC
1809 /* after the reset the device state is PIO 0 and the controller
1810 state is undefined. Record the mode */
1811
1812 for (i = 0; i < ATA_MAX_DEVICES; i++)
1813 ap->device[i].pio_mode = XFER_PIO_0;
1814
28ca5c57 1815 /* read IDENTIFY page and configure devices */
1da177e4 1816 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e82cbdb9 1817 dev = &ap->device[i];
28ca5c57 1818
ec573755
TH
1819 if (tries[i])
1820 dev->class = classes[i];
ffeae418 1821
14d2bac1 1822 if (!ata_dev_enabled(dev))
ffeae418 1823 continue;
ffeae418 1824
bff04647
TH
1825 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1826 dev->id);
14d2bac1
TH
1827 if (rc)
1828 goto fail;
1829
efdaedc4
TH
1830 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1831 rc = ata_dev_configure(dev);
1832 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1833 if (rc)
1834 goto fail;
1da177e4
LT
1835 }
1836
e82cbdb9 1837 /* configure transfer mode */
3adcebb2 1838 rc = ata_set_mode(ap, &dev);
4ae72a1e 1839 if (rc)
51713d35 1840 goto fail;
1da177e4 1841
e82cbdb9
TH
1842 for (i = 0; i < ATA_MAX_DEVICES; i++)
1843 if (ata_dev_enabled(&ap->device[i]))
1844 return 0;
1da177e4 1845
e82cbdb9
TH
1846 /* no device present, disable port */
1847 ata_port_disable(ap);
1da177e4 1848 ap->ops->port_disable(ap);
96072e69 1849 return -ENODEV;
14d2bac1
TH
1850
1851 fail:
4ae72a1e
TH
1852 tries[dev->devno]--;
1853
14d2bac1
TH
1854 switch (rc) {
1855 case -EINVAL:
4ae72a1e 1856 /* eeek, something went very wrong, give up */
14d2bac1
TH
1857 tries[dev->devno] = 0;
1858 break;
4ae72a1e
TH
1859
1860 case -ENODEV:
1861 /* give it just one more chance */
1862 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 1863 case -EIO:
4ae72a1e
TH
1864 if (tries[dev->devno] == 1) {
1865 /* This is the last chance, better to slow
1866 * down than lose it.
1867 */
1868 sata_down_spd_limit(ap);
1869 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
1870 }
14d2bac1
TH
1871 }
1872
4ae72a1e 1873 if (!tries[dev->devno])
3373efd8 1874 ata_dev_disable(dev);
ec573755 1875
14d2bac1 1876 goto retry;
1da177e4
LT
1877}
1878
1879/**
0cba632b
JG
1880 * ata_port_probe - Mark port as enabled
1881 * @ap: Port for which we indicate enablement
1da177e4 1882 *
0cba632b
JG
1883 * Modify @ap data structure such that the system
1884 * thinks that the entire port is enabled.
1885 *
cca3974e 1886 * LOCKING: host lock, or some other form of
0cba632b 1887 * serialization.
1da177e4
LT
1888 */
1889
1890void ata_port_probe(struct ata_port *ap)
1891{
198e0fed 1892 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1893}
1894
3be680b7
TH
1895/**
1896 * sata_print_link_status - Print SATA link status
1897 * @ap: SATA port to printk link status about
1898 *
1899 * This function prints link speed and status of a SATA link.
1900 *
1901 * LOCKING:
1902 * None.
1903 */
1904static void sata_print_link_status(struct ata_port *ap)
1905{
6d5f9732 1906 u32 sstatus, scontrol, tmp;
3be680b7 1907
81952c54 1908 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1909 return;
81952c54 1910 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1911
81952c54 1912 if (ata_port_online(ap)) {
3be680b7 1913 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1914 ata_port_printk(ap, KERN_INFO,
1915 "SATA link up %s (SStatus %X SControl %X)\n",
1916 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1917 } else {
f15a1daf
TH
1918 ata_port_printk(ap, KERN_INFO,
1919 "SATA link down (SStatus %X SControl %X)\n",
1920 sstatus, scontrol);
3be680b7
TH
1921 }
1922}
1923
1da177e4 1924/**
780a87f7
JG
1925 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1926 * @ap: SATA port associated with target SATA PHY.
1da177e4 1927 *
780a87f7
JG
1928 * This function issues commands to standard SATA Sxxx
1929 * PHY registers, to wake up the phy (and device), and
1930 * clear any reset condition.
1da177e4
LT
1931 *
1932 * LOCKING:
0cba632b 1933 * PCI/etc. bus probe sem.
1da177e4
LT
1934 *
1935 */
1936void __sata_phy_reset(struct ata_port *ap)
1937{
1938 u32 sstatus;
1939 unsigned long timeout = jiffies + (HZ * 5);
1940
1941 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1942 /* issue phy wake/reset */
81952c54 1943 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1944 /* Couldn't find anything in SATA I/II specs, but
1945 * AHCI-1.1 10.4.2 says at least 1 ms. */
1946 mdelay(1);
1da177e4 1947 }
81952c54
TH
1948 /* phy wake/clear reset */
1949 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
1950
1951 /* wait for phy to become ready, if necessary */
1952 do {
1953 msleep(200);
81952c54 1954 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
1955 if ((sstatus & 0xf) != 1)
1956 break;
1957 } while (time_before(jiffies, timeout));
1958
3be680b7
TH
1959 /* print link status */
1960 sata_print_link_status(ap);
656563e3 1961
3be680b7 1962 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 1963 if (!ata_port_offline(ap))
1da177e4 1964 ata_port_probe(ap);
3be680b7 1965 else
1da177e4 1966 ata_port_disable(ap);
1da177e4 1967
198e0fed 1968 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1969 return;
1970
1971 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1972 ata_port_disable(ap);
1973 return;
1974 }
1975
1976 ap->cbl = ATA_CBL_SATA;
1977}
1978
1979/**
780a87f7
JG
1980 * sata_phy_reset - Reset SATA bus.
1981 * @ap: SATA port associated with target SATA PHY.
1da177e4 1982 *
780a87f7
JG
1983 * This function resets the SATA bus, and then probes
1984 * the bus for devices.
1da177e4
LT
1985 *
1986 * LOCKING:
0cba632b 1987 * PCI/etc. bus probe sem.
1da177e4
LT
1988 *
1989 */
1990void sata_phy_reset(struct ata_port *ap)
1991{
1992 __sata_phy_reset(ap);
198e0fed 1993 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1994 return;
1995 ata_bus_reset(ap);
1996}
1997
ebdfca6e
AC
1998/**
1999 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2000 * @adev: device
2001 *
2002 * Obtain the other device on the same cable, or if none is
2003 * present NULL is returned
2004 */
2e9edbf8 2005
3373efd8 2006struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2007{
3373efd8 2008 struct ata_port *ap = adev->ap;
ebdfca6e 2009 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2010 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2011 return NULL;
2012 return pair;
2013}
2014
1da177e4 2015/**
780a87f7
JG
2016 * ata_port_disable - Disable port.
2017 * @ap: Port to be disabled.
1da177e4 2018 *
780a87f7
JG
2019 * Modify @ap data structure such that the system
2020 * thinks that the entire port is disabled, and should
2021 * never attempt to probe or communicate with devices
2022 * on this port.
2023 *
cca3974e 2024 * LOCKING: host lock, or some other form of
780a87f7 2025 * serialization.
1da177e4
LT
2026 */
2027
2028void ata_port_disable(struct ata_port *ap)
2029{
2030 ap->device[0].class = ATA_DEV_NONE;
2031 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2032 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2033}
2034
1c3fae4d 2035/**
3c567b7d 2036 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2037 * @ap: Port to adjust SATA spd limit for
2038 *
2039 * Adjust SATA spd limit of @ap downward. Note that this
2040 * function only adjusts the limit. The change must be applied
3c567b7d 2041 * using sata_set_spd().
1c3fae4d
TH
2042 *
2043 * LOCKING:
2044 * Inherited from caller.
2045 *
2046 * RETURNS:
2047 * 0 on success, negative errno on failure
2048 */
3c567b7d 2049int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2050{
81952c54
TH
2051 u32 sstatus, spd, mask;
2052 int rc, highbit;
1c3fae4d 2053
81952c54
TH
2054 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2055 if (rc)
2056 return rc;
1c3fae4d
TH
2057
2058 mask = ap->sata_spd_limit;
2059 if (mask <= 1)
2060 return -EINVAL;
2061 highbit = fls(mask) - 1;
2062 mask &= ~(1 << highbit);
2063
81952c54 2064 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2065 if (spd <= 1)
2066 return -EINVAL;
2067 spd--;
2068 mask &= (1 << spd) - 1;
2069 if (!mask)
2070 return -EINVAL;
2071
2072 ap->sata_spd_limit = mask;
2073
f15a1daf
TH
2074 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2075 sata_spd_string(fls(mask)));
1c3fae4d
TH
2076
2077 return 0;
2078}
2079
3c567b7d 2080static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2081{
2082 u32 spd, limit;
2083
2084 if (ap->sata_spd_limit == UINT_MAX)
2085 limit = 0;
2086 else
2087 limit = fls(ap->sata_spd_limit);
2088
2089 spd = (*scontrol >> 4) & 0xf;
2090 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2091
2092 return spd != limit;
2093}
2094
2095/**
3c567b7d 2096 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2097 * @ap: Port in question
2098 *
2099 * Test whether the spd limit in SControl matches
2100 * @ap->sata_spd_limit. This function is used to determine
2101 * whether hardreset is necessary to apply SATA spd
2102 * configuration.
2103 *
2104 * LOCKING:
2105 * Inherited from caller.
2106 *
2107 * RETURNS:
2108 * 1 if SATA spd configuration is needed, 0 otherwise.
2109 */
3c567b7d 2110int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2111{
2112 u32 scontrol;
2113
81952c54 2114 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2115 return 0;
2116
3c567b7d 2117 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2118}
2119
2120/**
3c567b7d 2121 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2122 * @ap: Port to set SATA spd for
2123 *
2124 * Set SATA spd of @ap according to sata_spd_limit.
2125 *
2126 * LOCKING:
2127 * Inherited from caller.
2128 *
2129 * RETURNS:
2130 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2131 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2132 */
3c567b7d 2133int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2134{
2135 u32 scontrol;
81952c54 2136 int rc;
1c3fae4d 2137
81952c54
TH
2138 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2139 return rc;
1c3fae4d 2140
3c567b7d 2141 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2142 return 0;
2143
81952c54
TH
2144 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2145 return rc;
2146
1c3fae4d
TH
2147 return 1;
2148}
2149
452503f9
AC
2150/*
2151 * This mode timing computation functionality is ported over from
2152 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2153 */
2154/*
b352e57d 2155 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2156 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2157 * for UDMA6, which is currently supported only by Maxtor drives.
2158 *
2159 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2160 */
2161
2162static const struct ata_timing ata_timing[] = {
2163
2164 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2165 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2166 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2167 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2168
b352e57d
AC
2169 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2170 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2171 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2172 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2173 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2174
2175/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2176
452503f9
AC
2177 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2178 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2179 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2180
452503f9
AC
2181 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2182 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2183 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2184
b352e57d
AC
2185 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2186 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2187 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2188 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2189
2190 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2191 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2192 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2193
2194/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2195
2196 { 0xFF }
2197};
2198
2199#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2200#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2201
2202static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2203{
2204 q->setup = EZ(t->setup * 1000, T);
2205 q->act8b = EZ(t->act8b * 1000, T);
2206 q->rec8b = EZ(t->rec8b * 1000, T);
2207 q->cyc8b = EZ(t->cyc8b * 1000, T);
2208 q->active = EZ(t->active * 1000, T);
2209 q->recover = EZ(t->recover * 1000, T);
2210 q->cycle = EZ(t->cycle * 1000, T);
2211 q->udma = EZ(t->udma * 1000, UT);
2212}
2213
2214void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2215 struct ata_timing *m, unsigned int what)
2216{
2217 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2218 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2219 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2220 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2221 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2222 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2223 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2224 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2225}
2226
2227static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2228{
2229 const struct ata_timing *t;
2230
2231 for (t = ata_timing; t->mode != speed; t++)
91190758 2232 if (t->mode == 0xFF)
452503f9 2233 return NULL;
2e9edbf8 2234 return t;
452503f9
AC
2235}
2236
2237int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2238 struct ata_timing *t, int T, int UT)
2239{
2240 const struct ata_timing *s;
2241 struct ata_timing p;
2242
2243 /*
2e9edbf8 2244 * Find the mode.
75b1f2f8 2245 */
452503f9
AC
2246
2247 if (!(s = ata_timing_find_mode(speed)))
2248 return -EINVAL;
2249
75b1f2f8
AL
2250 memcpy(t, s, sizeof(*s));
2251
452503f9
AC
2252 /*
2253 * If the drive is an EIDE drive, it can tell us it needs extended
2254 * PIO/MW_DMA cycle timing.
2255 */
2256
2257 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2258 memset(&p, 0, sizeof(p));
2259 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2260 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2261 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2262 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2263 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2264 }
2265 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2266 }
2267
2268 /*
2269 * Convert the timing to bus clock counts.
2270 */
2271
75b1f2f8 2272 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2273
2274 /*
c893a3ae
RD
2275 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2276 * S.M.A.R.T * and some other commands. We have to ensure that the
2277 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2278 */
2279
fd3367af 2280 if (speed > XFER_PIO_6) {
452503f9
AC
2281 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2282 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2283 }
2284
2285 /*
c893a3ae 2286 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2287 */
2288
2289 if (t->act8b + t->rec8b < t->cyc8b) {
2290 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2291 t->rec8b = t->cyc8b - t->act8b;
2292 }
2293
2294 if (t->active + t->recover < t->cycle) {
2295 t->active += (t->cycle - (t->active + t->recover)) / 2;
2296 t->recover = t->cycle - t->active;
2297 }
2298
2299 return 0;
2300}
2301
cf176e1a
TH
2302/**
2303 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2304 * @dev: Device to adjust xfer masks
458337db 2305 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2306 *
2307 * Adjust xfer masks of @dev downward. Note that this function
2308 * does not apply the change. Invoking ata_set_mode() afterwards
2309 * will apply the limit.
2310 *
2311 * LOCKING:
2312 * Inherited from caller.
2313 *
2314 * RETURNS:
2315 * 0 on success, negative errno on failure
2316 */
458337db 2317int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2318{
458337db
TH
2319 char buf[32];
2320 unsigned int orig_mask, xfer_mask;
2321 unsigned int pio_mask, mwdma_mask, udma_mask;
2322 int quiet, highbit;
cf176e1a 2323
458337db
TH
2324 quiet = !!(sel & ATA_DNXFER_QUIET);
2325 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2326
458337db
TH
2327 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2328 dev->mwdma_mask,
2329 dev->udma_mask);
2330 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2331
458337db
TH
2332 switch (sel) {
2333 case ATA_DNXFER_PIO:
2334 highbit = fls(pio_mask) - 1;
2335 pio_mask &= ~(1 << highbit);
2336 break;
2337
2338 case ATA_DNXFER_DMA:
2339 if (udma_mask) {
2340 highbit = fls(udma_mask) - 1;
2341 udma_mask &= ~(1 << highbit);
2342 if (!udma_mask)
2343 return -ENOENT;
2344 } else if (mwdma_mask) {
2345 highbit = fls(mwdma_mask) - 1;
2346 mwdma_mask &= ~(1 << highbit);
2347 if (!mwdma_mask)
2348 return -ENOENT;
2349 }
2350 break;
2351
2352 case ATA_DNXFER_40C:
2353 udma_mask &= ATA_UDMA_MASK_40C;
2354 break;
2355
2356 case ATA_DNXFER_FORCE_PIO0:
2357 pio_mask &= 1;
2358 case ATA_DNXFER_FORCE_PIO:
2359 mwdma_mask = 0;
2360 udma_mask = 0;
2361 break;
2362
458337db
TH
2363 default:
2364 BUG();
2365 }
2366
2367 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2368
2369 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2370 return -ENOENT;
2371
2372 if (!quiet) {
2373 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2374 snprintf(buf, sizeof(buf), "%s:%s",
2375 ata_mode_string(xfer_mask),
2376 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2377 else
2378 snprintf(buf, sizeof(buf), "%s",
2379 ata_mode_string(xfer_mask));
2380
2381 ata_dev_printk(dev, KERN_WARNING,
2382 "limiting speed to %s\n", buf);
2383 }
cf176e1a
TH
2384
2385 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2386 &dev->udma_mask);
2387
cf176e1a 2388 return 0;
cf176e1a
TH
2389}
2390
3373efd8 2391static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2392{
baa1e78a 2393 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2394 unsigned int err_mask;
2395 int rc;
1da177e4 2396
e8384607 2397 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2398 if (dev->xfer_shift == ATA_SHIFT_PIO)
2399 dev->flags |= ATA_DFLAG_PIO;
2400
3373efd8 2401 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2402 /* Old CFA may refuse this command, which is just fine */
2403 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2404 err_mask &= ~AC_ERR_DEV;
2405
83206a29 2406 if (err_mask) {
f15a1daf
TH
2407 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2408 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2409 return -EIO;
2410 }
1da177e4 2411
baa1e78a 2412 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2413 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2414 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2415 if (rc)
83206a29 2416 return rc;
48a8a14f 2417
23e71c3d
TH
2418 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2419 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2420
f15a1daf
TH
2421 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2422 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2423 return 0;
1da177e4
LT
2424}
2425
1da177e4
LT
2426/**
2427 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2428 * @ap: port on which timings will be programmed
e82cbdb9 2429 * @r_failed_dev: out paramter for failed device
1da177e4 2430 *
e82cbdb9
TH
2431 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2432 * ata_set_mode() fails, pointer to the failing device is
2433 * returned in @r_failed_dev.
780a87f7 2434 *
1da177e4 2435 * LOCKING:
0cba632b 2436 * PCI/etc. bus probe sem.
e82cbdb9
TH
2437 *
2438 * RETURNS:
2439 * 0 on success, negative errno otherwise
1da177e4 2440 */
1ad8e7f9 2441int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2442{
e8e0619f 2443 struct ata_device *dev;
e82cbdb9 2444 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2445
3adcebb2 2446 /* has private set_mode? */
b229a7b0
A
2447 if (ap->ops->set_mode)
2448 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2449
a6d5a51c
TH
2450 /* step 1: calculate xfer_mask */
2451 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2452 unsigned int pio_mask, dma_mask;
a6d5a51c 2453
e8e0619f
TH
2454 dev = &ap->device[i];
2455
e1211e3f 2456 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2457 continue;
2458
3373efd8 2459 ata_dev_xfermask(dev);
1da177e4 2460
acf356b1
TH
2461 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2462 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2463 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2464 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2465
4f65977d 2466 found = 1;
5444a6f4
AC
2467 if (dev->dma_mode)
2468 used_dma = 1;
a6d5a51c 2469 }
4f65977d 2470 if (!found)
e82cbdb9 2471 goto out;
a6d5a51c
TH
2472
2473 /* step 2: always set host PIO timings */
e8e0619f
TH
2474 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2475 dev = &ap->device[i];
2476 if (!ata_dev_enabled(dev))
2477 continue;
2478
2479 if (!dev->pio_mode) {
f15a1daf 2480 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2481 rc = -EINVAL;
e82cbdb9 2482 goto out;
e8e0619f
TH
2483 }
2484
2485 dev->xfer_mode = dev->pio_mode;
2486 dev->xfer_shift = ATA_SHIFT_PIO;
2487 if (ap->ops->set_piomode)
2488 ap->ops->set_piomode(ap, dev);
2489 }
1da177e4 2490
a6d5a51c 2491 /* step 3: set host DMA timings */
e8e0619f
TH
2492 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2493 dev = &ap->device[i];
2494
2495 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2496 continue;
2497
2498 dev->xfer_mode = dev->dma_mode;
2499 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2500 if (ap->ops->set_dmamode)
2501 ap->ops->set_dmamode(ap, dev);
2502 }
1da177e4
LT
2503
2504 /* step 4: update devices' xfer mode */
83206a29 2505 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2506 dev = &ap->device[i];
1da177e4 2507
18d90deb 2508 /* don't update suspended devices' xfer mode */
02670bf3 2509 if (!ata_dev_ready(dev))
83206a29
TH
2510 continue;
2511
3373efd8 2512 rc = ata_dev_set_mode(dev);
5bbc53f4 2513 if (rc)
e82cbdb9 2514 goto out;
83206a29 2515 }
1da177e4 2516
e8e0619f
TH
2517 /* Record simplex status. If we selected DMA then the other
2518 * host channels are not permitted to do so.
5444a6f4 2519 */
cca3974e
JG
2520 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2521 ap->host->simplex_claimed = 1;
5444a6f4 2522
e8e0619f 2523 /* step5: chip specific finalisation */
1da177e4
LT
2524 if (ap->ops->post_set_mode)
2525 ap->ops->post_set_mode(ap);
2526
e82cbdb9
TH
2527 out:
2528 if (rc)
2529 *r_failed_dev = dev;
2530 return rc;
1da177e4
LT
2531}
2532
1fdffbce
JG
2533/**
2534 * ata_tf_to_host - issue ATA taskfile to host controller
2535 * @ap: port to which command is being issued
2536 * @tf: ATA taskfile register set
2537 *
2538 * Issues ATA taskfile register set to ATA host controller,
2539 * with proper synchronization with interrupt handler and
2540 * other threads.
2541 *
2542 * LOCKING:
cca3974e 2543 * spin_lock_irqsave(host lock)
1fdffbce
JG
2544 */
2545
2546static inline void ata_tf_to_host(struct ata_port *ap,
2547 const struct ata_taskfile *tf)
2548{
2549 ap->ops->tf_load(ap, tf);
2550 ap->ops->exec_command(ap, tf);
2551}
2552
1da177e4
LT
2553/**
2554 * ata_busy_sleep - sleep until BSY clears, or timeout
2555 * @ap: port containing status register to be polled
2556 * @tmout_pat: impatience timeout
2557 * @tmout: overall timeout
2558 *
780a87f7
JG
2559 * Sleep until ATA Status register bit BSY clears,
2560 * or a timeout occurs.
2561 *
d1adc1bb
TH
2562 * LOCKING:
2563 * Kernel thread context (may sleep).
2564 *
2565 * RETURNS:
2566 * 0 on success, -errno otherwise.
1da177e4 2567 */
d1adc1bb
TH
2568int ata_busy_sleep(struct ata_port *ap,
2569 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2570{
2571 unsigned long timer_start, timeout;
2572 u8 status;
2573
2574 status = ata_busy_wait(ap, ATA_BUSY, 300);
2575 timer_start = jiffies;
2576 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2577 while (status != 0xff && (status & ATA_BUSY) &&
2578 time_before(jiffies, timeout)) {
1da177e4
LT
2579 msleep(50);
2580 status = ata_busy_wait(ap, ATA_BUSY, 3);
2581 }
2582
d1adc1bb 2583 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2584 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2585 "port is slow to respond, please be patient "
2586 "(Status 0x%x)\n", status);
1da177e4
LT
2587
2588 timeout = timer_start + tmout;
d1adc1bb
TH
2589 while (status != 0xff && (status & ATA_BUSY) &&
2590 time_before(jiffies, timeout)) {
1da177e4
LT
2591 msleep(50);
2592 status = ata_chk_status(ap);
2593 }
2594
d1adc1bb
TH
2595 if (status == 0xff)
2596 return -ENODEV;
2597
1da177e4 2598 if (status & ATA_BUSY) {
f15a1daf 2599 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2600 "(%lu secs, Status 0x%x)\n",
2601 tmout / HZ, status);
d1adc1bb 2602 return -EBUSY;
1da177e4
LT
2603 }
2604
2605 return 0;
2606}
2607
2608static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2609{
2610 struct ata_ioports *ioaddr = &ap->ioaddr;
2611 unsigned int dev0 = devmask & (1 << 0);
2612 unsigned int dev1 = devmask & (1 << 1);
2613 unsigned long timeout;
2614
2615 /* if device 0 was found in ata_devchk, wait for its
2616 * BSY bit to clear
2617 */
2618 if (dev0)
2619 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2620
2621 /* if device 1 was found in ata_devchk, wait for
2622 * register access, then wait for BSY to clear
2623 */
2624 timeout = jiffies + ATA_TMOUT_BOOT;
2625 while (dev1) {
2626 u8 nsect, lbal;
2627
2628 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2629 nsect = ioread8(ioaddr->nsect_addr);
2630 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2631 if ((nsect == 1) && (lbal == 1))
2632 break;
2633 if (time_after(jiffies, timeout)) {
2634 dev1 = 0;
2635 break;
2636 }
2637 msleep(50); /* give drive a breather */
2638 }
2639 if (dev1)
2640 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2641
2642 /* is all this really necessary? */
2643 ap->ops->dev_select(ap, 0);
2644 if (dev1)
2645 ap->ops->dev_select(ap, 1);
2646 if (dev0)
2647 ap->ops->dev_select(ap, 0);
2648}
2649
1da177e4
LT
2650static unsigned int ata_bus_softreset(struct ata_port *ap,
2651 unsigned int devmask)
2652{
2653 struct ata_ioports *ioaddr = &ap->ioaddr;
2654
2655 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2656
2657 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2658 iowrite8(ap->ctl, ioaddr->ctl_addr);
2659 udelay(20); /* FIXME: flush */
2660 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2661 udelay(20); /* FIXME: flush */
2662 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2663
2664 /* spec mandates ">= 2ms" before checking status.
2665 * We wait 150ms, because that was the magic delay used for
2666 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2667 * between when the ATA command register is written, and then
2668 * status is checked. Because waiting for "a while" before
2669 * checking status is fine, post SRST, we perform this magic
2670 * delay here as well.
09c7ad79
AC
2671 *
2672 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2673 */
2674 msleep(150);
2675
2e9edbf8 2676 /* Before we perform post reset processing we want to see if
298a41ca
TH
2677 * the bus shows 0xFF because the odd clown forgets the D7
2678 * pulldown resistor.
2679 */
d1adc1bb
TH
2680 if (ata_check_status(ap) == 0xFF)
2681 return 0;
09c7ad79 2682
1da177e4
LT
2683 ata_bus_post_reset(ap, devmask);
2684
2685 return 0;
2686}
2687
2688/**
2689 * ata_bus_reset - reset host port and associated ATA channel
2690 * @ap: port to reset
2691 *
2692 * This is typically the first time we actually start issuing
2693 * commands to the ATA channel. We wait for BSY to clear, then
2694 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2695 * result. Determine what devices, if any, are on the channel
2696 * by looking at the device 0/1 error register. Look at the signature
2697 * stored in each device's taskfile registers, to determine if
2698 * the device is ATA or ATAPI.
2699 *
2700 * LOCKING:
0cba632b 2701 * PCI/etc. bus probe sem.
cca3974e 2702 * Obtains host lock.
1da177e4
LT
2703 *
2704 * SIDE EFFECTS:
198e0fed 2705 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2706 */
2707
2708void ata_bus_reset(struct ata_port *ap)
2709{
2710 struct ata_ioports *ioaddr = &ap->ioaddr;
2711 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2712 u8 err;
aec5c3c1 2713 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4
LT
2714
2715 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2716
2717 /* determine if device 0/1 are present */
2718 if (ap->flags & ATA_FLAG_SATA_RESET)
2719 dev0 = 1;
2720 else {
2721 dev0 = ata_devchk(ap, 0);
2722 if (slave_possible)
2723 dev1 = ata_devchk(ap, 1);
2724 }
2725
2726 if (dev0)
2727 devmask |= (1 << 0);
2728 if (dev1)
2729 devmask |= (1 << 1);
2730
2731 /* select device 0 again */
2732 ap->ops->dev_select(ap, 0);
2733
2734 /* issue bus reset */
2735 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2736 if (ata_bus_softreset(ap, devmask))
2737 goto err_out;
1da177e4
LT
2738
2739 /*
2740 * determine by signature whether we have ATA or ATAPI devices
2741 */
b4dc7623 2742 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2743 if ((slave_possible) && (err != 0x81))
b4dc7623 2744 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2745
2746 /* re-enable interrupts */
83625006 2747 ap->ops->irq_on(ap);
1da177e4
LT
2748
2749 /* is double-select really necessary? */
2750 if (ap->device[1].class != ATA_DEV_NONE)
2751 ap->ops->dev_select(ap, 1);
2752 if (ap->device[0].class != ATA_DEV_NONE)
2753 ap->ops->dev_select(ap, 0);
2754
2755 /* if no devices were detected, disable this port */
2756 if ((ap->device[0].class == ATA_DEV_NONE) &&
2757 (ap->device[1].class == ATA_DEV_NONE))
2758 goto err_out;
2759
2760 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2761 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2762 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2763 }
2764
2765 DPRINTK("EXIT\n");
2766 return;
2767
2768err_out:
f15a1daf 2769 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2770 ap->ops->port_disable(ap);
2771
2772 DPRINTK("EXIT\n");
2773}
2774
d7bb4cc7
TH
2775/**
2776 * sata_phy_debounce - debounce SATA phy status
2777 * @ap: ATA port to debounce SATA phy status for
2778 * @params: timing parameters { interval, duratinon, timeout } in msec
2779 *
2780 * Make sure SStatus of @ap reaches stable state, determined by
2781 * holding the same value where DET is not 1 for @duration polled
2782 * every @interval, before @timeout. Timeout constraints the
2783 * beginning of the stable state. Because, after hot unplugging,
2784 * DET gets stuck at 1 on some controllers, this functions waits
2785 * until timeout then returns 0 if DET is stable at 1.
2786 *
2787 * LOCKING:
2788 * Kernel thread context (may sleep)
2789 *
2790 * RETURNS:
2791 * 0 on success, -errno on failure.
2792 */
2793int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2794{
d7bb4cc7
TH
2795 unsigned long interval_msec = params[0];
2796 unsigned long duration = params[1] * HZ / 1000;
2797 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2798 unsigned long last_jiffies;
2799 u32 last, cur;
2800 int rc;
2801
2802 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2803 return rc;
2804 cur &= 0xf;
2805
2806 last = cur;
2807 last_jiffies = jiffies;
2808
2809 while (1) {
2810 msleep(interval_msec);
2811 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2812 return rc;
2813 cur &= 0xf;
2814
2815 /* DET stable? */
2816 if (cur == last) {
2817 if (cur == 1 && time_before(jiffies, timeout))
2818 continue;
2819 if (time_after(jiffies, last_jiffies + duration))
2820 return 0;
2821 continue;
2822 }
2823
2824 /* unstable, start over */
2825 last = cur;
2826 last_jiffies = jiffies;
2827
2828 /* check timeout */
2829 if (time_after(jiffies, timeout))
2830 return -EBUSY;
2831 }
2832}
2833
2834/**
2835 * sata_phy_resume - resume SATA phy
2836 * @ap: ATA port to resume SATA phy for
2837 * @params: timing parameters { interval, duratinon, timeout } in msec
2838 *
2839 * Resume SATA phy of @ap and debounce it.
2840 *
2841 * LOCKING:
2842 * Kernel thread context (may sleep)
2843 *
2844 * RETURNS:
2845 * 0 on success, -errno on failure.
2846 */
2847int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2848{
2849 u32 scontrol;
81952c54
TH
2850 int rc;
2851
2852 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2853 return rc;
7a7921e8 2854
852ee16a 2855 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2856
2857 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2858 return rc;
7a7921e8 2859
d7bb4cc7
TH
2860 /* Some PHYs react badly if SStatus is pounded immediately
2861 * after resuming. Delay 200ms before debouncing.
2862 */
2863 msleep(200);
7a7921e8 2864
d7bb4cc7 2865 return sata_phy_debounce(ap, params);
7a7921e8
TH
2866}
2867
f5914a46
TH
2868static void ata_wait_spinup(struct ata_port *ap)
2869{
2870 struct ata_eh_context *ehc = &ap->eh_context;
2871 unsigned long end, secs;
2872 int rc;
2873
2874 /* first, debounce phy if SATA */
2875 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2876 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2877
2878 /* if debounced successfully and offline, no need to wait */
2879 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2880 return;
2881 }
2882
2883 /* okay, let's give the drive time to spin up */
2884 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2885 secs = ((end - jiffies) + HZ - 1) / HZ;
2886
2887 if (time_after(jiffies, end))
2888 return;
2889
2890 if (secs > 5)
2891 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2892 "(%lu secs)\n", secs);
2893
2894 schedule_timeout_uninterruptible(end - jiffies);
2895}
2896
2897/**
2898 * ata_std_prereset - prepare for reset
2899 * @ap: ATA port to be reset
2900 *
2901 * @ap is about to be reset. Initialize it.
2902 *
2903 * LOCKING:
2904 * Kernel thread context (may sleep)
2905 *
2906 * RETURNS:
2907 * 0 on success, -errno otherwise.
2908 */
2909int ata_std_prereset(struct ata_port *ap)
2910{
2911 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2912 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2913 int rc;
2914
28324304
TH
2915 /* handle link resume & hotplug spinup */
2916 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2917 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2918 ehc->i.action |= ATA_EH_HARDRESET;
2919
2920 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2921 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2922 ata_wait_spinup(ap);
f5914a46
TH
2923
2924 /* if we're about to do hardreset, nothing more to do */
2925 if (ehc->i.action & ATA_EH_HARDRESET)
2926 return 0;
2927
2928 /* if SATA, resume phy */
2929 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2930 rc = sata_phy_resume(ap, timing);
2931 if (rc && rc != -EOPNOTSUPP) {
2932 /* phy resume failed */
2933 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2934 "link for reset (errno=%d)\n", rc);
2935 return rc;
2936 }
2937 }
2938
2939 /* Wait for !BSY if the controller can wait for the first D2H
2940 * Reg FIS and we don't know that no device is attached.
2941 */
2942 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2943 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2944
2945 return 0;
2946}
2947
c2bd5804
TH
2948/**
2949 * ata_std_softreset - reset host port via ATA SRST
2950 * @ap: port to reset
c2bd5804
TH
2951 * @classes: resulting classes of attached devices
2952 *
52783c5d 2953 * Reset host port using ATA SRST.
c2bd5804
TH
2954 *
2955 * LOCKING:
2956 * Kernel thread context (may sleep)
2957 *
2958 * RETURNS:
2959 * 0 on success, -errno otherwise.
2960 */
2bf2cb26 2961int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
2962{
2963 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2964 unsigned int devmask = 0, err_mask;
2965 u8 err;
2966
2967 DPRINTK("ENTER\n");
2968
81952c54 2969 if (ata_port_offline(ap)) {
3a39746a
TH
2970 classes[0] = ATA_DEV_NONE;
2971 goto out;
2972 }
2973
c2bd5804
TH
2974 /* determine if device 0/1 are present */
2975 if (ata_devchk(ap, 0))
2976 devmask |= (1 << 0);
2977 if (slave_possible && ata_devchk(ap, 1))
2978 devmask |= (1 << 1);
2979
c2bd5804
TH
2980 /* select device 0 again */
2981 ap->ops->dev_select(ap, 0);
2982
2983 /* issue bus reset */
2984 DPRINTK("about to softreset, devmask=%x\n", devmask);
2985 err_mask = ata_bus_softreset(ap, devmask);
2986 if (err_mask) {
f15a1daf
TH
2987 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2988 err_mask);
c2bd5804
TH
2989 return -EIO;
2990 }
2991
2992 /* determine by signature whether we have ATA or ATAPI devices */
2993 classes[0] = ata_dev_try_classify(ap, 0, &err);
2994 if (slave_possible && err != 0x81)
2995 classes[1] = ata_dev_try_classify(ap, 1, &err);
2996
3a39746a 2997 out:
c2bd5804
TH
2998 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2999 return 0;
3000}
3001
3002/**
b6103f6d 3003 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3004 * @ap: port to reset
b6103f6d 3005 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
3006 *
3007 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3008 *
3009 * LOCKING:
3010 * Kernel thread context (may sleep)
3011 *
3012 * RETURNS:
3013 * 0 on success, -errno otherwise.
3014 */
b6103f6d 3015int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 3016{
852ee16a 3017 u32 scontrol;
81952c54 3018 int rc;
852ee16a 3019
c2bd5804
TH
3020 DPRINTK("ENTER\n");
3021
3c567b7d 3022 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3023 /* SATA spec says nothing about how to reconfigure
3024 * spd. To be on the safe side, turn off phy during
3025 * reconfiguration. This works for at least ICH7 AHCI
3026 * and Sil3124.
3027 */
81952c54 3028 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3029 goto out;
81952c54 3030
a34b6fc0 3031 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3032
3033 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3034 goto out;
1c3fae4d 3035
3c567b7d 3036 sata_set_spd(ap);
1c3fae4d
TH
3037 }
3038
3039 /* issue phy wake/reset */
81952c54 3040 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3041 goto out;
81952c54 3042
852ee16a 3043 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3044
3045 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3046 goto out;
c2bd5804 3047
1c3fae4d 3048 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3049 * 10.4.2 says at least 1 ms.
3050 */
3051 msleep(1);
3052
1c3fae4d 3053 /* bring phy back */
b6103f6d
TH
3054 rc = sata_phy_resume(ap, timing);
3055 out:
3056 DPRINTK("EXIT, rc=%d\n", rc);
3057 return rc;
3058}
3059
3060/**
3061 * sata_std_hardreset - reset host port via SATA phy reset
3062 * @ap: port to reset
3063 * @class: resulting class of attached device
3064 *
3065 * SATA phy-reset host port using DET bits of SControl register,
3066 * wait for !BSY and classify the attached device.
3067 *
3068 * LOCKING:
3069 * Kernel thread context (may sleep)
3070 *
3071 * RETURNS:
3072 * 0 on success, -errno otherwise.
3073 */
3074int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3075{
3076 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3077 int rc;
3078
3079 DPRINTK("ENTER\n");
3080
3081 /* do hardreset */
3082 rc = sata_port_hardreset(ap, timing);
3083 if (rc) {
3084 ata_port_printk(ap, KERN_ERR,
3085 "COMRESET failed (errno=%d)\n", rc);
3086 return rc;
3087 }
c2bd5804 3088
c2bd5804 3089 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3090 if (ata_port_offline(ap)) {
c2bd5804
TH
3091 *class = ATA_DEV_NONE;
3092 DPRINTK("EXIT, link offline\n");
3093 return 0;
3094 }
3095
34fee227
TH
3096 /* wait a while before checking status, see SRST for more info */
3097 msleep(150);
3098
c2bd5804 3099 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3100 ata_port_printk(ap, KERN_ERR,
3101 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3102 return -EIO;
3103 }
3104
3a39746a
TH
3105 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3106
c2bd5804
TH
3107 *class = ata_dev_try_classify(ap, 0, NULL);
3108
3109 DPRINTK("EXIT, class=%u\n", *class);
3110 return 0;
3111}
3112
3113/**
3114 * ata_std_postreset - standard postreset callback
3115 * @ap: the target ata_port
3116 * @classes: classes of attached devices
3117 *
3118 * This function is invoked after a successful reset. Note that
3119 * the device might have been reset more than once using
3120 * different reset methods before postreset is invoked.
c2bd5804 3121 *
c2bd5804
TH
3122 * LOCKING:
3123 * Kernel thread context (may sleep)
3124 */
3125void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3126{
dc2b3515
TH
3127 u32 serror;
3128
c2bd5804
TH
3129 DPRINTK("ENTER\n");
3130
c2bd5804 3131 /* print link status */
81952c54 3132 sata_print_link_status(ap);
c2bd5804 3133
dc2b3515
TH
3134 /* clear SError */
3135 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3136 sata_scr_write(ap, SCR_ERROR, serror);
3137
3a39746a 3138 /* re-enable interrupts */
83625006
AI
3139 if (!ap->ops->error_handler)
3140 ap->ops->irq_on(ap);
c2bd5804
TH
3141
3142 /* is double-select really necessary? */
3143 if (classes[0] != ATA_DEV_NONE)
3144 ap->ops->dev_select(ap, 1);
3145 if (classes[1] != ATA_DEV_NONE)
3146 ap->ops->dev_select(ap, 0);
3147
3a39746a
TH
3148 /* bail out if no device is present */
3149 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3150 DPRINTK("EXIT, no device\n");
3151 return;
3152 }
3153
3154 /* set up device control */
0d5ff566
TH
3155 if (ap->ioaddr.ctl_addr)
3156 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3157
3158 DPRINTK("EXIT\n");
3159}
3160
623a3128
TH
3161/**
3162 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3163 * @dev: device to compare against
3164 * @new_class: class of the new device
3165 * @new_id: IDENTIFY page of the new device
3166 *
3167 * Compare @new_class and @new_id against @dev and determine
3168 * whether @dev is the device indicated by @new_class and
3169 * @new_id.
3170 *
3171 * LOCKING:
3172 * None.
3173 *
3174 * RETURNS:
3175 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3176 */
3373efd8
TH
3177static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3178 const u16 *new_id)
623a3128
TH
3179{
3180 const u16 *old_id = dev->id;
a0cf733b
TH
3181 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3182 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3183 u64 new_n_sectors;
3184
3185 if (dev->class != new_class) {
f15a1daf
TH
3186 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3187 dev->class, new_class);
623a3128
TH
3188 return 0;
3189 }
3190
a0cf733b
TH
3191 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3192 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3193 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3194 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3195 new_n_sectors = ata_id_n_sectors(new_id);
3196
3197 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3198 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3199 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3200 return 0;
3201 }
3202
3203 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3204 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3205 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3206 return 0;
3207 }
3208
3209 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3210 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3211 "%llu != %llu\n",
3212 (unsigned long long)dev->n_sectors,
3213 (unsigned long long)new_n_sectors);
623a3128
TH
3214 return 0;
3215 }
3216
3217 return 1;
3218}
3219
3220/**
3221 * ata_dev_revalidate - Revalidate ATA device
623a3128 3222 * @dev: device to revalidate
bff04647 3223 * @readid_flags: read ID flags
623a3128
TH
3224 *
3225 * Re-read IDENTIFY page and make sure @dev is still attached to
3226 * the port.
3227 *
3228 * LOCKING:
3229 * Kernel thread context (may sleep)
3230 *
3231 * RETURNS:
3232 * 0 on success, negative errno otherwise
3233 */
bff04647 3234int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3235{
5eb45c02 3236 unsigned int class = dev->class;
f15a1daf 3237 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3238 int rc;
3239
5eb45c02
TH
3240 if (!ata_dev_enabled(dev)) {
3241 rc = -ENODEV;
3242 goto fail;
3243 }
623a3128 3244
fe635c7e 3245 /* read ID data */
bff04647 3246 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3247 if (rc)
3248 goto fail;
3249
3250 /* is the device still there? */
3373efd8 3251 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3252 rc = -ENODEV;
3253 goto fail;
3254 }
3255
fe635c7e 3256 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3257
3258 /* configure device according to the new ID */
efdaedc4 3259 rc = ata_dev_configure(dev);
5eb45c02
TH
3260 if (rc == 0)
3261 return 0;
623a3128
TH
3262
3263 fail:
f15a1daf 3264 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3265 return rc;
3266}
3267
6919a0a6
AC
3268struct ata_blacklist_entry {
3269 const char *model_num;
3270 const char *model_rev;
3271 unsigned long horkage;
3272};
3273
3274static const struct ata_blacklist_entry ata_device_blacklist [] = {
3275 /* Devices with DMA related problems under Linux */
3276 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3277 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3278 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3279 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3280 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3281 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3282 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3283 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3284 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3285 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3286 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3287 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3288 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3289 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3290 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3291 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3292 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3293 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3294 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3295 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3296 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3297 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3298 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3299 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3300 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3301 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3302 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3303 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3304 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3305 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3306
3307 /* Devices we expect to fail diagnostics */
3308
3309 /* Devices where NCQ should be avoided */
3310 /* NCQ is slow */
3311 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3312
3313 /* Devices with NCQ limits */
3314
3315 /* End Marker */
3316 { }
1da177e4 3317};
2e9edbf8 3318
6919a0a6 3319unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3320{
8bfa79fc
TH
3321 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3322 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3323 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3324
8bfa79fc
TH
3325 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3326 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3327
6919a0a6 3328 while (ad->model_num) {
8bfa79fc 3329 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3330 if (ad->model_rev == NULL)
3331 return ad->horkage;
8bfa79fc 3332 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3333 return ad->horkage;
f4b15fef 3334 }
6919a0a6 3335 ad++;
f4b15fef 3336 }
1da177e4
LT
3337 return 0;
3338}
3339
6919a0a6
AC
3340static int ata_dma_blacklisted(const struct ata_device *dev)
3341{
3342 /* We don't support polling DMA.
3343 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3344 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3345 */
3346 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3347 (dev->flags & ATA_DFLAG_CDB_INTR))
3348 return 1;
3349 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3350}
3351
a6d5a51c
TH
3352/**
3353 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3354 * @dev: Device to compute xfermask for
3355 *
acf356b1
TH
3356 * Compute supported xfermask of @dev and store it in
3357 * dev->*_mask. This function is responsible for applying all
3358 * known limits including host controller limits, device
3359 * blacklist, etc...
a6d5a51c
TH
3360 *
3361 * LOCKING:
3362 * None.
a6d5a51c 3363 */
3373efd8 3364static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3365{
3373efd8 3366 struct ata_port *ap = dev->ap;
cca3974e 3367 struct ata_host *host = ap->host;
a6d5a51c 3368 unsigned long xfer_mask;
1da177e4 3369
37deecb5 3370 /* controller modes available */
565083e1
TH
3371 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3372 ap->mwdma_mask, ap->udma_mask);
3373
3374 /* Apply cable rule here. Don't apply it early because when
3375 * we handle hot plug the cable type can itself change.
3376 */
3377 if (ap->cbl == ATA_CBL_PATA40)
3378 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3379 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3380 * host side are checked drive side as well. Cases where we know a
3381 * 40wire cable is used safely for 80 are not checked here.
3382 */
3383 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3384 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3385
1da177e4 3386
37deecb5
TH
3387 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3388 dev->mwdma_mask, dev->udma_mask);
3389 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3390
b352e57d
AC
3391 /*
3392 * CFA Advanced TrueIDE timings are not allowed on a shared
3393 * cable
3394 */
3395 if (ata_dev_pair(dev)) {
3396 /* No PIO5 or PIO6 */
3397 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3398 /* No MWDMA3 or MWDMA 4 */
3399 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3400 }
3401
37deecb5
TH
3402 if (ata_dma_blacklisted(dev)) {
3403 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3404 ata_dev_printk(dev, KERN_WARNING,
3405 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3406 }
a6d5a51c 3407
cca3974e 3408 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
37deecb5
TH
3409 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3410 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3411 "other device, disabling DMA\n");
5444a6f4 3412 }
565083e1 3413
5444a6f4
AC
3414 if (ap->ops->mode_filter)
3415 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3416
565083e1
TH
3417 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3418 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3419}
3420
1da177e4
LT
3421/**
3422 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3423 * @dev: Device to which command will be sent
3424 *
780a87f7
JG
3425 * Issue SET FEATURES - XFER MODE command to device @dev
3426 * on port @ap.
3427 *
1da177e4 3428 * LOCKING:
0cba632b 3429 * PCI/etc. bus probe sem.
83206a29
TH
3430 *
3431 * RETURNS:
3432 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3433 */
3434
3373efd8 3435static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3436{
a0123703 3437 struct ata_taskfile tf;
83206a29 3438 unsigned int err_mask;
1da177e4
LT
3439
3440 /* set up set-features taskfile */
3441 DPRINTK("set features - xfer mode\n");
3442
3373efd8 3443 ata_tf_init(dev, &tf);
a0123703
TH
3444 tf.command = ATA_CMD_SET_FEATURES;
3445 tf.feature = SETFEATURES_XFER;
3446 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3447 tf.protocol = ATA_PROT_NODATA;
3448 tf.nsect = dev->xfer_mode;
1da177e4 3449
3373efd8 3450 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3451
83206a29
TH
3452 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3453 return err_mask;
1da177e4
LT
3454}
3455
8bf62ece
AL
3456/**
3457 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3458 * @dev: Device to which command will be sent
e2a7f77a
RD
3459 * @heads: Number of heads (taskfile parameter)
3460 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3461 *
3462 * LOCKING:
6aff8f1f
TH
3463 * Kernel thread context (may sleep)
3464 *
3465 * RETURNS:
3466 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3467 */
3373efd8
TH
3468static unsigned int ata_dev_init_params(struct ata_device *dev,
3469 u16 heads, u16 sectors)
8bf62ece 3470{
a0123703 3471 struct ata_taskfile tf;
6aff8f1f 3472 unsigned int err_mask;
8bf62ece
AL
3473
3474 /* Number of sectors per track 1-255. Number of heads 1-16 */
3475 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3476 return AC_ERR_INVALID;
8bf62ece
AL
3477
3478 /* set up init dev params taskfile */
3479 DPRINTK("init dev params \n");
3480
3373efd8 3481 ata_tf_init(dev, &tf);
a0123703
TH
3482 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3483 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3484 tf.protocol = ATA_PROT_NODATA;
3485 tf.nsect = sectors;
3486 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3487
3373efd8 3488 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3489
6aff8f1f
TH
3490 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3491 return err_mask;
8bf62ece
AL
3492}
3493
1da177e4 3494/**
0cba632b
JG
3495 * ata_sg_clean - Unmap DMA memory associated with command
3496 * @qc: Command containing DMA memory to be released
3497 *
3498 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3499 *
3500 * LOCKING:
cca3974e 3501 * spin_lock_irqsave(host lock)
1da177e4 3502 */
70e6ad0c 3503void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3504{
3505 struct ata_port *ap = qc->ap;
cedc9a47 3506 struct scatterlist *sg = qc->__sg;
1da177e4 3507 int dir = qc->dma_dir;
cedc9a47 3508 void *pad_buf = NULL;
1da177e4 3509
a4631474
TH
3510 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3511 WARN_ON(sg == NULL);
1da177e4
LT
3512
3513 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3514 WARN_ON(qc->n_elem > 1);
1da177e4 3515
2c13b7ce 3516 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3517
cedc9a47
JG
3518 /* if we padded the buffer out to 32-bit bound, and data
3519 * xfer direction is from-device, we must copy from the
3520 * pad buffer back into the supplied buffer
3521 */
3522 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3523 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3524
3525 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3526 if (qc->n_elem)
2f1f610b 3527 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3528 /* restore last sg */
3529 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3530 if (pad_buf) {
3531 struct scatterlist *psg = &qc->pad_sgent;
3532 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3533 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3534 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3535 }
3536 } else {
2e242fa9 3537 if (qc->n_elem)
2f1f610b 3538 dma_unmap_single(ap->dev,
e1410f2d
JG
3539 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3540 dir);
cedc9a47
JG
3541 /* restore sg */
3542 sg->length += qc->pad_len;
3543 if (pad_buf)
3544 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3545 pad_buf, qc->pad_len);
3546 }
1da177e4
LT
3547
3548 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3549 qc->__sg = NULL;
1da177e4
LT
3550}
3551
3552/**
3553 * ata_fill_sg - Fill PCI IDE PRD table
3554 * @qc: Metadata associated with taskfile to be transferred
3555 *
780a87f7
JG
3556 * Fill PCI IDE PRD (scatter-gather) table with segments
3557 * associated with the current disk command.
3558 *
1da177e4 3559 * LOCKING:
cca3974e 3560 * spin_lock_irqsave(host lock)
1da177e4
LT
3561 *
3562 */
3563static void ata_fill_sg(struct ata_queued_cmd *qc)
3564{
1da177e4 3565 struct ata_port *ap = qc->ap;
cedc9a47
JG
3566 struct scatterlist *sg;
3567 unsigned int idx;
1da177e4 3568
a4631474 3569 WARN_ON(qc->__sg == NULL);
f131883e 3570 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3571
3572 idx = 0;
cedc9a47 3573 ata_for_each_sg(sg, qc) {
1da177e4
LT
3574 u32 addr, offset;
3575 u32 sg_len, len;
3576
3577 /* determine if physical DMA addr spans 64K boundary.
3578 * Note h/w doesn't support 64-bit, so we unconditionally
3579 * truncate dma_addr_t to u32.
3580 */
3581 addr = (u32) sg_dma_address(sg);
3582 sg_len = sg_dma_len(sg);
3583
3584 while (sg_len) {
3585 offset = addr & 0xffff;
3586 len = sg_len;
3587 if ((offset + sg_len) > 0x10000)
3588 len = 0x10000 - offset;
3589
3590 ap->prd[idx].addr = cpu_to_le32(addr);
3591 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3592 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3593
3594 idx++;
3595 sg_len -= len;
3596 addr += len;
3597 }
3598 }
3599
3600 if (idx)
3601 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3602}
3603/**
3604 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3605 * @qc: Metadata associated with taskfile to check
3606 *
780a87f7
JG
3607 * Allow low-level driver to filter ATA PACKET commands, returning
3608 * a status indicating whether or not it is OK to use DMA for the
3609 * supplied PACKET command.
3610 *
1da177e4 3611 * LOCKING:
cca3974e 3612 * spin_lock_irqsave(host lock)
0cba632b 3613 *
1da177e4
LT
3614 * RETURNS: 0 when ATAPI DMA can be used
3615 * nonzero otherwise
3616 */
3617int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3618{
3619 struct ata_port *ap = qc->ap;
3620 int rc = 0; /* Assume ATAPI DMA is OK by default */
3621
3622 if (ap->ops->check_atapi_dma)
3623 rc = ap->ops->check_atapi_dma(qc);
3624
3625 return rc;
3626}
3627/**
3628 * ata_qc_prep - Prepare taskfile for submission
3629 * @qc: Metadata associated with taskfile to be prepared
3630 *
780a87f7
JG
3631 * Prepare ATA taskfile for submission.
3632 *
1da177e4 3633 * LOCKING:
cca3974e 3634 * spin_lock_irqsave(host lock)
1da177e4
LT
3635 */
3636void ata_qc_prep(struct ata_queued_cmd *qc)
3637{
3638 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3639 return;
3640
3641 ata_fill_sg(qc);
3642}
3643
e46834cd
BK
3644void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3645
0cba632b
JG
3646/**
3647 * ata_sg_init_one - Associate command with memory buffer
3648 * @qc: Command to be associated
3649 * @buf: Memory buffer
3650 * @buflen: Length of memory buffer, in bytes.
3651 *
3652 * Initialize the data-related elements of queued_cmd @qc
3653 * to point to a single memory buffer, @buf of byte length @buflen.
3654 *
3655 * LOCKING:
cca3974e 3656 * spin_lock_irqsave(host lock)
0cba632b
JG
3657 */
3658
1da177e4
LT
3659void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3660{
1da177e4
LT
3661 qc->flags |= ATA_QCFLAG_SINGLE;
3662
cedc9a47 3663 qc->__sg = &qc->sgent;
1da177e4 3664 qc->n_elem = 1;
cedc9a47 3665 qc->orig_n_elem = 1;
1da177e4 3666 qc->buf_virt = buf;
233277ca 3667 qc->nbytes = buflen;
1da177e4 3668
61c0596c 3669 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3670}
3671
0cba632b
JG
3672/**
3673 * ata_sg_init - Associate command with scatter-gather table.
3674 * @qc: Command to be associated
3675 * @sg: Scatter-gather table.
3676 * @n_elem: Number of elements in s/g table.
3677 *
3678 * Initialize the data-related elements of queued_cmd @qc
3679 * to point to a scatter-gather table @sg, containing @n_elem
3680 * elements.
3681 *
3682 * LOCKING:
cca3974e 3683 * spin_lock_irqsave(host lock)
0cba632b
JG
3684 */
3685
1da177e4
LT
3686void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3687 unsigned int n_elem)
3688{
3689 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3690 qc->__sg = sg;
1da177e4 3691 qc->n_elem = n_elem;
cedc9a47 3692 qc->orig_n_elem = n_elem;
1da177e4
LT
3693}
3694
3695/**
0cba632b
JG
3696 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3697 * @qc: Command with memory buffer to be mapped.
3698 *
3699 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3700 *
3701 * LOCKING:
cca3974e 3702 * spin_lock_irqsave(host lock)
1da177e4
LT
3703 *
3704 * RETURNS:
0cba632b 3705 * Zero on success, negative on error.
1da177e4
LT
3706 */
3707
3708static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3709{
3710 struct ata_port *ap = qc->ap;
3711 int dir = qc->dma_dir;
cedc9a47 3712 struct scatterlist *sg = qc->__sg;
1da177e4 3713 dma_addr_t dma_address;
2e242fa9 3714 int trim_sg = 0;
1da177e4 3715
cedc9a47
JG
3716 /* we must lengthen transfers to end on a 32-bit boundary */
3717 qc->pad_len = sg->length & 3;
3718 if (qc->pad_len) {
3719 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3720 struct scatterlist *psg = &qc->pad_sgent;
3721
a4631474 3722 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3723
3724 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3725
3726 if (qc->tf.flags & ATA_TFLAG_WRITE)
3727 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3728 qc->pad_len);
3729
3730 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3731 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3732 /* trim sg */
3733 sg->length -= qc->pad_len;
2e242fa9
TH
3734 if (sg->length == 0)
3735 trim_sg = 1;
cedc9a47
JG
3736
3737 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3738 sg->length, qc->pad_len);
3739 }
3740
2e242fa9
TH
3741 if (trim_sg) {
3742 qc->n_elem--;
e1410f2d
JG
3743 goto skip_map;
3744 }
3745
2f1f610b 3746 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3747 sg->length, dir);
537a95d9
TH
3748 if (dma_mapping_error(dma_address)) {
3749 /* restore sg */
3750 sg->length += qc->pad_len;
1da177e4 3751 return -1;
537a95d9 3752 }
1da177e4
LT
3753
3754 sg_dma_address(sg) = dma_address;
32529e01 3755 sg_dma_len(sg) = sg->length;
1da177e4 3756
2e242fa9 3757skip_map:
1da177e4
LT
3758 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3759 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3760
3761 return 0;
3762}
3763
3764/**
0cba632b
JG
3765 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3766 * @qc: Command with scatter-gather table to be mapped.
3767 *
3768 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3769 *
3770 * LOCKING:
cca3974e 3771 * spin_lock_irqsave(host lock)
1da177e4
LT
3772 *
3773 * RETURNS:
0cba632b 3774 * Zero on success, negative on error.
1da177e4
LT
3775 *
3776 */
3777
3778static int ata_sg_setup(struct ata_queued_cmd *qc)
3779{
3780 struct ata_port *ap = qc->ap;
cedc9a47
JG
3781 struct scatterlist *sg = qc->__sg;
3782 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3783 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4
LT
3784
3785 VPRINTK("ENTER, ata%u\n", ap->id);
a4631474 3786 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3787
cedc9a47
JG
3788 /* we must lengthen transfers to end on a 32-bit boundary */
3789 qc->pad_len = lsg->length & 3;
3790 if (qc->pad_len) {
3791 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3792 struct scatterlist *psg = &qc->pad_sgent;
3793 unsigned int offset;
3794
a4631474 3795 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3796
3797 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3798
3799 /*
3800 * psg->page/offset are used to copy to-be-written
3801 * data in this function or read data in ata_sg_clean.
3802 */
3803 offset = lsg->offset + lsg->length - qc->pad_len;
3804 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3805 psg->offset = offset_in_page(offset);
3806
3807 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3808 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3809 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3810 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3811 }
3812
3813 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3814 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3815 /* trim last sg */
3816 lsg->length -= qc->pad_len;
e1410f2d
JG
3817 if (lsg->length == 0)
3818 trim_sg = 1;
cedc9a47
JG
3819
3820 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3821 qc->n_elem - 1, lsg->length, qc->pad_len);
3822 }
3823
e1410f2d
JG
3824 pre_n_elem = qc->n_elem;
3825 if (trim_sg && pre_n_elem)
3826 pre_n_elem--;
3827
3828 if (!pre_n_elem) {
3829 n_elem = 0;
3830 goto skip_map;
3831 }
3832
1da177e4 3833 dir = qc->dma_dir;
2f1f610b 3834 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3835 if (n_elem < 1) {
3836 /* restore last sg */
3837 lsg->length += qc->pad_len;
1da177e4 3838 return -1;
537a95d9 3839 }
1da177e4
LT
3840
3841 DPRINTK("%d sg elements mapped\n", n_elem);
3842
e1410f2d 3843skip_map:
1da177e4
LT
3844 qc->n_elem = n_elem;
3845
3846 return 0;
3847}
3848
0baab86b 3849/**
c893a3ae 3850 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3851 * @buf: Buffer to swap
3852 * @buf_words: Number of 16-bit words in buffer.
3853 *
3854 * Swap halves of 16-bit words if needed to convert from
3855 * little-endian byte order to native cpu byte order, or
3856 * vice-versa.
3857 *
3858 * LOCKING:
6f0ef4fa 3859 * Inherited from caller.
0baab86b 3860 */
1da177e4
LT
3861void swap_buf_le16(u16 *buf, unsigned int buf_words)
3862{
3863#ifdef __BIG_ENDIAN
3864 unsigned int i;
3865
3866 for (i = 0; i < buf_words; i++)
3867 buf[i] = le16_to_cpu(buf[i]);
3868#endif /* __BIG_ENDIAN */
3869}
3870
6ae4cfb5 3871/**
0d5ff566 3872 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 3873 * @adev: device to target
6ae4cfb5
AL
3874 * @buf: data buffer
3875 * @buflen: buffer length
344babaa 3876 * @write_data: read/write
6ae4cfb5
AL
3877 *
3878 * Transfer data from/to the device data register by PIO.
3879 *
3880 * LOCKING:
3881 * Inherited from caller.
6ae4cfb5 3882 */
0d5ff566
TH
3883void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3884 unsigned int buflen, int write_data)
1da177e4 3885{
a6b2c5d4 3886 struct ata_port *ap = adev->ap;
6ae4cfb5 3887 unsigned int words = buflen >> 1;
1da177e4 3888
6ae4cfb5 3889 /* Transfer multiple of 2 bytes */
1da177e4 3890 if (write_data)
0d5ff566 3891 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 3892 else
0d5ff566 3893 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
3894
3895 /* Transfer trailing 1 byte, if any. */
3896 if (unlikely(buflen & 0x01)) {
3897 u16 align_buf[1] = { 0 };
3898 unsigned char *trailing_buf = buf + buflen - 1;
3899
3900 if (write_data) {
3901 memcpy(align_buf, trailing_buf, 1);
0d5ff566 3902 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 3903 } else {
0d5ff566 3904 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
3905 memcpy(trailing_buf, align_buf, 1);
3906 }
3907 }
1da177e4
LT
3908}
3909
75e99585 3910/**
0d5ff566 3911 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
3912 * @adev: device to target
3913 * @buf: data buffer
3914 * @buflen: buffer length
3915 * @write_data: read/write
3916 *
88574551 3917 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3918 * transfer with interrupts disabled.
3919 *
3920 * LOCKING:
3921 * Inherited from caller.
3922 */
0d5ff566
TH
3923void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3924 unsigned int buflen, int write_data)
75e99585
AC
3925{
3926 unsigned long flags;
3927 local_irq_save(flags);
0d5ff566 3928 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
3929 local_irq_restore(flags);
3930}
3931
3932
6ae4cfb5
AL
3933/**
3934 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3935 * @qc: Command on going
3936 *
3937 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3938 *
3939 * LOCKING:
3940 * Inherited from caller.
3941 */
3942
1da177e4
LT
3943static void ata_pio_sector(struct ata_queued_cmd *qc)
3944{
3945 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3946 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3947 struct ata_port *ap = qc->ap;
3948 struct page *page;
3949 unsigned int offset;
3950 unsigned char *buf;
3951
726f0785 3952 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 3953 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3954
3955 page = sg[qc->cursg].page;
726f0785 3956 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
3957
3958 /* get the current page and offset */
3959 page = nth_page(page, (offset >> PAGE_SHIFT));
3960 offset %= PAGE_SIZE;
3961
1da177e4
LT
3962 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3963
91b8b313
AL
3964 if (PageHighMem(page)) {
3965 unsigned long flags;
3966
a6b2c5d4 3967 /* FIXME: use a bounce buffer */
91b8b313
AL
3968 local_irq_save(flags);
3969 buf = kmap_atomic(page, KM_IRQ0);
083958d3 3970
91b8b313 3971 /* do the actual data transfer */
a6b2c5d4 3972 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 3973
91b8b313
AL
3974 kunmap_atomic(buf, KM_IRQ0);
3975 local_irq_restore(flags);
3976 } else {
3977 buf = page_address(page);
a6b2c5d4 3978 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 3979 }
1da177e4 3980
726f0785
TH
3981 qc->curbytes += ATA_SECT_SIZE;
3982 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 3983
726f0785 3984 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
3985 qc->cursg++;
3986 qc->cursg_ofs = 0;
3987 }
1da177e4 3988}
1da177e4 3989
07f6f7d0
AL
3990/**
3991 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3992 * @qc: Command on going
3993 *
c81e29b4 3994 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
3995 * ATA device for the DRQ request.
3996 *
3997 * LOCKING:
3998 * Inherited from caller.
3999 */
1da177e4 4000
07f6f7d0
AL
4001static void ata_pio_sectors(struct ata_queued_cmd *qc)
4002{
4003 if (is_multi_taskfile(&qc->tf)) {
4004 /* READ/WRITE MULTIPLE */
4005 unsigned int nsect;
4006
587005de 4007 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4008
726f0785
TH
4009 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
4010 qc->dev->multi_count);
07f6f7d0
AL
4011 while (nsect--)
4012 ata_pio_sector(qc);
4013 } else
4014 ata_pio_sector(qc);
4015}
4016
c71c1857
AL
4017/**
4018 * atapi_send_cdb - Write CDB bytes to hardware
4019 * @ap: Port to which ATAPI device is attached.
4020 * @qc: Taskfile currently active
4021 *
4022 * When device has indicated its readiness to accept
4023 * a CDB, this function is called. Send the CDB.
4024 *
4025 * LOCKING:
4026 * caller.
4027 */
4028
4029static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4030{
4031 /* send SCSI cdb */
4032 DPRINTK("send cdb\n");
db024d53 4033 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4034
a6b2c5d4 4035 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4036 ata_altstatus(ap); /* flush */
4037
4038 switch (qc->tf.protocol) {
4039 case ATA_PROT_ATAPI:
4040 ap->hsm_task_state = HSM_ST;
4041 break;
4042 case ATA_PROT_ATAPI_NODATA:
4043 ap->hsm_task_state = HSM_ST_LAST;
4044 break;
4045 case ATA_PROT_ATAPI_DMA:
4046 ap->hsm_task_state = HSM_ST_LAST;
4047 /* initiate bmdma */
4048 ap->ops->bmdma_start(qc);
4049 break;
4050 }
1da177e4
LT
4051}
4052
6ae4cfb5
AL
4053/**
4054 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4055 * @qc: Command on going
4056 * @bytes: number of bytes
4057 *
4058 * Transfer Transfer data from/to the ATAPI device.
4059 *
4060 * LOCKING:
4061 * Inherited from caller.
4062 *
4063 */
4064
1da177e4
LT
4065static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4066{
4067 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4068 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4069 struct ata_port *ap = qc->ap;
4070 struct page *page;
4071 unsigned char *buf;
4072 unsigned int offset, count;
4073
563a6e1f 4074 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4075 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4076
4077next_sg:
563a6e1f 4078 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4079 /*
563a6e1f
AL
4080 * The end of qc->sg is reached and the device expects
4081 * more data to transfer. In order not to overrun qc->sg
4082 * and fulfill length specified in the byte count register,
4083 * - for read case, discard trailing data from the device
4084 * - for write case, padding zero data to the device
4085 */
4086 u16 pad_buf[1] = { 0 };
4087 unsigned int words = bytes >> 1;
4088 unsigned int i;
4089
4090 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4091 ata_dev_printk(qc->dev, KERN_WARNING,
4092 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4093
4094 for (i = 0; i < words; i++)
a6b2c5d4 4095 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4096
14be71f4 4097 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4098 return;
4099 }
4100
cedc9a47 4101 sg = &qc->__sg[qc->cursg];
1da177e4 4102
1da177e4
LT
4103 page = sg->page;
4104 offset = sg->offset + qc->cursg_ofs;
4105
4106 /* get the current page and offset */
4107 page = nth_page(page, (offset >> PAGE_SHIFT));
4108 offset %= PAGE_SIZE;
4109
6952df03 4110 /* don't overrun current sg */
32529e01 4111 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4112
4113 /* don't cross page boundaries */
4114 count = min(count, (unsigned int)PAGE_SIZE - offset);
4115
7282aa4b
AL
4116 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4117
91b8b313
AL
4118 if (PageHighMem(page)) {
4119 unsigned long flags;
4120
a6b2c5d4 4121 /* FIXME: use bounce buffer */
91b8b313
AL
4122 local_irq_save(flags);
4123 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4124
91b8b313 4125 /* do the actual data transfer */
a6b2c5d4 4126 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4127
91b8b313
AL
4128 kunmap_atomic(buf, KM_IRQ0);
4129 local_irq_restore(flags);
4130 } else {
4131 buf = page_address(page);
a6b2c5d4 4132 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4133 }
1da177e4
LT
4134
4135 bytes -= count;
4136 qc->curbytes += count;
4137 qc->cursg_ofs += count;
4138
32529e01 4139 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4140 qc->cursg++;
4141 qc->cursg_ofs = 0;
4142 }
4143
563a6e1f 4144 if (bytes)
1da177e4 4145 goto next_sg;
1da177e4
LT
4146}
4147
6ae4cfb5
AL
4148/**
4149 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4150 * @qc: Command on going
4151 *
4152 * Transfer Transfer data from/to the ATAPI device.
4153 *
4154 * LOCKING:
4155 * Inherited from caller.
6ae4cfb5
AL
4156 */
4157
1da177e4
LT
4158static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4159{
4160 struct ata_port *ap = qc->ap;
4161 struct ata_device *dev = qc->dev;
4162 unsigned int ireason, bc_lo, bc_hi, bytes;
4163 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4164
eec4c3f3
AL
4165 /* Abuse qc->result_tf for temp storage of intermediate TF
4166 * here to save some kernel stack usage.
4167 * For normal completion, qc->result_tf is not relevant. For
4168 * error, qc->result_tf is later overwritten by ata_qc_complete().
4169 * So, the correctness of qc->result_tf is not affected.
4170 */
4171 ap->ops->tf_read(ap, &qc->result_tf);
4172 ireason = qc->result_tf.nsect;
4173 bc_lo = qc->result_tf.lbam;
4174 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4175 bytes = (bc_hi << 8) | bc_lo;
4176
4177 /* shall be cleared to zero, indicating xfer of data */
4178 if (ireason & (1 << 0))
4179 goto err_out;
4180
4181 /* make sure transfer direction matches expected */
4182 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4183 if (do_write != i_write)
4184 goto err_out;
4185
312f7da2
AL
4186 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
4187
1da177e4
LT
4188 __atapi_pio_bytes(qc, bytes);
4189
4190 return;
4191
4192err_out:
f15a1daf 4193 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4194 qc->err_mask |= AC_ERR_HSM;
14be71f4 4195 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4196}
4197
4198/**
c234fb00
AL
4199 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4200 * @ap: the target ata_port
4201 * @qc: qc on going
1da177e4 4202 *
c234fb00
AL
4203 * RETURNS:
4204 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4205 */
c234fb00
AL
4206
4207static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4208{
c234fb00
AL
4209 if (qc->tf.flags & ATA_TFLAG_POLLING)
4210 return 1;
1da177e4 4211
c234fb00
AL
4212 if (ap->hsm_task_state == HSM_ST_FIRST) {
4213 if (qc->tf.protocol == ATA_PROT_PIO &&
4214 (qc->tf.flags & ATA_TFLAG_WRITE))
4215 return 1;
1da177e4 4216
c234fb00
AL
4217 if (is_atapi_taskfile(&qc->tf) &&
4218 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4219 return 1;
fe79e683
AL
4220 }
4221
c234fb00
AL
4222 return 0;
4223}
1da177e4 4224
c17ea20d
TH
4225/**
4226 * ata_hsm_qc_complete - finish a qc running on standard HSM
4227 * @qc: Command to complete
4228 * @in_wq: 1 if called from workqueue, 0 otherwise
4229 *
4230 * Finish @qc which is running on standard HSM.
4231 *
4232 * LOCKING:
cca3974e 4233 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4234 * Otherwise, none on entry and grabs host lock.
4235 */
4236static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4237{
4238 struct ata_port *ap = qc->ap;
4239 unsigned long flags;
4240
4241 if (ap->ops->error_handler) {
4242 if (in_wq) {
ba6a1308 4243 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4244
cca3974e
JG
4245 /* EH might have kicked in while host lock is
4246 * released.
c17ea20d
TH
4247 */
4248 qc = ata_qc_from_tag(ap, qc->tag);
4249 if (qc) {
4250 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4251 ap->ops->irq_on(ap);
c17ea20d
TH
4252 ata_qc_complete(qc);
4253 } else
4254 ata_port_freeze(ap);
4255 }
4256
ba6a1308 4257 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4258 } else {
4259 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4260 ata_qc_complete(qc);
4261 else
4262 ata_port_freeze(ap);
4263 }
4264 } else {
4265 if (in_wq) {
ba6a1308 4266 spin_lock_irqsave(ap->lock, flags);
83625006 4267 ap->ops->irq_on(ap);
c17ea20d 4268 ata_qc_complete(qc);
ba6a1308 4269 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4270 } else
4271 ata_qc_complete(qc);
4272 }
1da177e4 4273
c81e29b4 4274 ata_altstatus(ap); /* flush */
c17ea20d
TH
4275}
4276
bb5cb290
AL
4277/**
4278 * ata_hsm_move - move the HSM to the next state.
4279 * @ap: the target ata_port
4280 * @qc: qc on going
4281 * @status: current device status
4282 * @in_wq: 1 if called from workqueue, 0 otherwise
4283 *
4284 * RETURNS:
4285 * 1 when poll next status needed, 0 otherwise.
4286 */
9a1004d0
TH
4287int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4288 u8 status, int in_wq)
e2cec771 4289{
bb5cb290
AL
4290 unsigned long flags = 0;
4291 int poll_next;
4292
6912ccd5
AL
4293 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4294
bb5cb290
AL
4295 /* Make sure ata_qc_issue_prot() does not throw things
4296 * like DMA polling into the workqueue. Notice that
4297 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4298 */
c234fb00 4299 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4300
e2cec771 4301fsm_start:
999bb6f4
AL
4302 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4303 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4304
e2cec771
AL
4305 switch (ap->hsm_task_state) {
4306 case HSM_ST_FIRST:
bb5cb290
AL
4307 /* Send first data block or PACKET CDB */
4308
4309 /* If polling, we will stay in the work queue after
4310 * sending the data. Otherwise, interrupt handler
4311 * takes over after sending the data.
4312 */
4313 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4314
e2cec771 4315 /* check device status */
3655d1d3
AL
4316 if (unlikely((status & ATA_DRQ) == 0)) {
4317 /* handle BSY=0, DRQ=0 as error */
4318 if (likely(status & (ATA_ERR | ATA_DF)))
4319 /* device stops HSM for abort/error */
4320 qc->err_mask |= AC_ERR_DEV;
4321 else
4322 /* HSM violation. Let EH handle this */
4323 qc->err_mask |= AC_ERR_HSM;
4324
14be71f4 4325 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4326 goto fsm_start;
1da177e4
LT
4327 }
4328
71601958
AL
4329 /* Device should not ask for data transfer (DRQ=1)
4330 * when it finds something wrong.
eee6c32f
AL
4331 * We ignore DRQ here and stop the HSM by
4332 * changing hsm_task_state to HSM_ST_ERR and
4333 * let the EH abort the command or reset the device.
71601958
AL
4334 */
4335 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4336 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4337 ap->id, status);
3655d1d3 4338 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4339 ap->hsm_task_state = HSM_ST_ERR;
4340 goto fsm_start;
71601958 4341 }
1da177e4 4342
bb5cb290
AL
4343 /* Send the CDB (atapi) or the first data block (ata pio out).
4344 * During the state transition, interrupt handler shouldn't
4345 * be invoked before the data transfer is complete and
4346 * hsm_task_state is changed. Hence, the following locking.
4347 */
4348 if (in_wq)
ba6a1308 4349 spin_lock_irqsave(ap->lock, flags);
1da177e4 4350
bb5cb290
AL
4351 if (qc->tf.protocol == ATA_PROT_PIO) {
4352 /* PIO data out protocol.
4353 * send first data block.
4354 */
0565c26d 4355
bb5cb290
AL
4356 /* ata_pio_sectors() might change the state
4357 * to HSM_ST_LAST. so, the state is changed here
4358 * before ata_pio_sectors().
4359 */
4360 ap->hsm_task_state = HSM_ST;
4361 ata_pio_sectors(qc);
4362 ata_altstatus(ap); /* flush */
4363 } else
4364 /* send CDB */
4365 atapi_send_cdb(ap, qc);
4366
4367 if (in_wq)
ba6a1308 4368 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4369
4370 /* if polling, ata_pio_task() handles the rest.
4371 * otherwise, interrupt handler takes over from here.
4372 */
e2cec771 4373 break;
1c848984 4374
e2cec771
AL
4375 case HSM_ST:
4376 /* complete command or read/write the data register */
4377 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4378 /* ATAPI PIO protocol */
4379 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4380 /* No more data to transfer or device error.
4381 * Device error will be tagged in HSM_ST_LAST.
4382 */
e2cec771
AL
4383 ap->hsm_task_state = HSM_ST_LAST;
4384 goto fsm_start;
4385 }
1da177e4 4386
71601958
AL
4387 /* Device should not ask for data transfer (DRQ=1)
4388 * when it finds something wrong.
eee6c32f
AL
4389 * We ignore DRQ here and stop the HSM by
4390 * changing hsm_task_state to HSM_ST_ERR and
4391 * let the EH abort the command or reset the device.
71601958
AL
4392 */
4393 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4394 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4395 ap->id, status);
3655d1d3 4396 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4397 ap->hsm_task_state = HSM_ST_ERR;
4398 goto fsm_start;
71601958 4399 }
1da177e4 4400
e2cec771 4401 atapi_pio_bytes(qc);
7fb6ec28 4402
e2cec771
AL
4403 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4404 /* bad ireason reported by device */
4405 goto fsm_start;
1da177e4 4406
e2cec771
AL
4407 } else {
4408 /* ATA PIO protocol */
4409 if (unlikely((status & ATA_DRQ) == 0)) {
4410 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4411 if (likely(status & (ATA_ERR | ATA_DF)))
4412 /* device stops HSM for abort/error */
4413 qc->err_mask |= AC_ERR_DEV;
4414 else
55a8e2c8
TH
4415 /* HSM violation. Let EH handle this.
4416 * Phantom devices also trigger this
4417 * condition. Mark hint.
4418 */
4419 qc->err_mask |= AC_ERR_HSM |
4420 AC_ERR_NODEV_HINT;
3655d1d3 4421
e2cec771
AL
4422 ap->hsm_task_state = HSM_ST_ERR;
4423 goto fsm_start;
4424 }
1da177e4 4425
eee6c32f
AL
4426 /* For PIO reads, some devices may ask for
4427 * data transfer (DRQ=1) alone with ERR=1.
4428 * We respect DRQ here and transfer one
4429 * block of junk data before changing the
4430 * hsm_task_state to HSM_ST_ERR.
4431 *
4432 * For PIO writes, ERR=1 DRQ=1 doesn't make
4433 * sense since the data block has been
4434 * transferred to the device.
71601958
AL
4435 */
4436 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4437 /* data might be corrputed */
4438 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4439
4440 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4441 ata_pio_sectors(qc);
4442 ata_altstatus(ap);
4443 status = ata_wait_idle(ap);
4444 }
4445
3655d1d3
AL
4446 if (status & (ATA_BUSY | ATA_DRQ))
4447 qc->err_mask |= AC_ERR_HSM;
4448
eee6c32f
AL
4449 /* ata_pio_sectors() might change the
4450 * state to HSM_ST_LAST. so, the state
4451 * is changed after ata_pio_sectors().
4452 */
4453 ap->hsm_task_state = HSM_ST_ERR;
4454 goto fsm_start;
71601958
AL
4455 }
4456
e2cec771
AL
4457 ata_pio_sectors(qc);
4458
4459 if (ap->hsm_task_state == HSM_ST_LAST &&
4460 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4461 /* all data read */
4462 ata_altstatus(ap);
52a32205 4463 status = ata_wait_idle(ap);
e2cec771
AL
4464 goto fsm_start;
4465 }
4466 }
4467
4468 ata_altstatus(ap); /* flush */
bb5cb290 4469 poll_next = 1;
1da177e4
LT
4470 break;
4471
14be71f4 4472 case HSM_ST_LAST:
6912ccd5
AL
4473 if (unlikely(!ata_ok(status))) {
4474 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4475 ap->hsm_task_state = HSM_ST_ERR;
4476 goto fsm_start;
4477 }
4478
4479 /* no more data to transfer */
4332a771
AL
4480 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4481 ap->id, qc->dev->devno, status);
e2cec771 4482
6912ccd5
AL
4483 WARN_ON(qc->err_mask);
4484
e2cec771 4485 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4486
e2cec771 4487 /* complete taskfile transaction */
c17ea20d 4488 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4489
4490 poll_next = 0;
1da177e4
LT
4491 break;
4492
14be71f4 4493 case HSM_ST_ERR:
e2cec771
AL
4494 /* make sure qc->err_mask is available to
4495 * know what's wrong and recover
4496 */
4497 WARN_ON(qc->err_mask == 0);
4498
4499 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4500
999bb6f4 4501 /* complete taskfile transaction */
c17ea20d 4502 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4503
4504 poll_next = 0;
e2cec771
AL
4505 break;
4506 default:
bb5cb290 4507 poll_next = 0;
6912ccd5 4508 BUG();
1da177e4
LT
4509 }
4510
bb5cb290 4511 return poll_next;
1da177e4
LT
4512}
4513
65f27f38 4514static void ata_pio_task(struct work_struct *work)
8061f5f0 4515{
65f27f38
DH
4516 struct ata_port *ap =
4517 container_of(work, struct ata_port, port_task.work);
4518 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4519 u8 status;
a1af3734 4520 int poll_next;
8061f5f0 4521
7fb6ec28 4522fsm_start:
a1af3734 4523 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4524
a1af3734
AL
4525 /*
4526 * This is purely heuristic. This is a fast path.
4527 * Sometimes when we enter, BSY will be cleared in
4528 * a chk-status or two. If not, the drive is probably seeking
4529 * or something. Snooze for a couple msecs, then
4530 * chk-status again. If still busy, queue delayed work.
4531 */
4532 status = ata_busy_wait(ap, ATA_BUSY, 5);
4533 if (status & ATA_BUSY) {
4534 msleep(2);
4535 status = ata_busy_wait(ap, ATA_BUSY, 10);
4536 if (status & ATA_BUSY) {
31ce6dae 4537 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4538 return;
4539 }
8061f5f0
TH
4540 }
4541
a1af3734
AL
4542 /* move the HSM */
4543 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4544
a1af3734
AL
4545 /* another command or interrupt handler
4546 * may be running at this point.
4547 */
4548 if (poll_next)
7fb6ec28 4549 goto fsm_start;
8061f5f0
TH
4550}
4551
1da177e4
LT
4552/**
4553 * ata_qc_new - Request an available ATA command, for queueing
4554 * @ap: Port associated with device @dev
4555 * @dev: Device from whom we request an available command structure
4556 *
4557 * LOCKING:
0cba632b 4558 * None.
1da177e4
LT
4559 */
4560
4561static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4562{
4563 struct ata_queued_cmd *qc = NULL;
4564 unsigned int i;
4565
e3180499 4566 /* no command while frozen */
b51e9e5d 4567 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4568 return NULL;
4569
2ab7db1f
TH
4570 /* the last tag is reserved for internal command. */
4571 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4572 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4573 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4574 break;
4575 }
4576
4577 if (qc)
4578 qc->tag = i;
4579
4580 return qc;
4581}
4582
4583/**
4584 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4585 * @dev: Device from whom we request an available command structure
4586 *
4587 * LOCKING:
0cba632b 4588 * None.
1da177e4
LT
4589 */
4590
3373efd8 4591struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4592{
3373efd8 4593 struct ata_port *ap = dev->ap;
1da177e4
LT
4594 struct ata_queued_cmd *qc;
4595
4596 qc = ata_qc_new(ap);
4597 if (qc) {
1da177e4
LT
4598 qc->scsicmd = NULL;
4599 qc->ap = ap;
4600 qc->dev = dev;
1da177e4 4601
2c13b7ce 4602 ata_qc_reinit(qc);
1da177e4
LT
4603 }
4604
4605 return qc;
4606}
4607
1da177e4
LT
4608/**
4609 * ata_qc_free - free unused ata_queued_cmd
4610 * @qc: Command to complete
4611 *
4612 * Designed to free unused ata_queued_cmd object
4613 * in case something prevents using it.
4614 *
4615 * LOCKING:
cca3974e 4616 * spin_lock_irqsave(host lock)
1da177e4
LT
4617 */
4618void ata_qc_free(struct ata_queued_cmd *qc)
4619{
4ba946e9
TH
4620 struct ata_port *ap = qc->ap;
4621 unsigned int tag;
4622
a4631474 4623 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4624
4ba946e9
TH
4625 qc->flags = 0;
4626 tag = qc->tag;
4627 if (likely(ata_tag_valid(tag))) {
4ba946e9 4628 qc->tag = ATA_TAG_POISON;
6cec4a39 4629 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4630 }
1da177e4
LT
4631}
4632
76014427 4633void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4634{
dedaf2b0
TH
4635 struct ata_port *ap = qc->ap;
4636
a4631474
TH
4637 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4638 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4639
4640 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4641 ata_sg_clean(qc);
4642
7401abf2 4643 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4644 if (qc->tf.protocol == ATA_PROT_NCQ)
4645 ap->sactive &= ~(1 << qc->tag);
4646 else
4647 ap->active_tag = ATA_TAG_POISON;
7401abf2 4648
3f3791d3
AL
4649 /* atapi: mark qc as inactive to prevent the interrupt handler
4650 * from completing the command twice later, before the error handler
4651 * is called. (when rc != 0 and atapi request sense is needed)
4652 */
4653 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4654 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4655
1da177e4 4656 /* call completion callback */
77853bf2 4657 qc->complete_fn(qc);
1da177e4
LT
4658}
4659
39599a53
TH
4660static void fill_result_tf(struct ata_queued_cmd *qc)
4661{
4662 struct ata_port *ap = qc->ap;
4663
4664 ap->ops->tf_read(ap, &qc->result_tf);
4665 qc->result_tf.flags = qc->tf.flags;
4666}
4667
f686bcb8
TH
4668/**
4669 * ata_qc_complete - Complete an active ATA command
4670 * @qc: Command to complete
4671 * @err_mask: ATA Status register contents
4672 *
4673 * Indicate to the mid and upper layers that an ATA
4674 * command has completed, with either an ok or not-ok status.
4675 *
4676 * LOCKING:
cca3974e 4677 * spin_lock_irqsave(host lock)
f686bcb8
TH
4678 */
4679void ata_qc_complete(struct ata_queued_cmd *qc)
4680{
4681 struct ata_port *ap = qc->ap;
4682
4683 /* XXX: New EH and old EH use different mechanisms to
4684 * synchronize EH with regular execution path.
4685 *
4686 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4687 * Normal execution path is responsible for not accessing a
4688 * failed qc. libata core enforces the rule by returning NULL
4689 * from ata_qc_from_tag() for failed qcs.
4690 *
4691 * Old EH depends on ata_qc_complete() nullifying completion
4692 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4693 * not synchronize with interrupt handler. Only PIO task is
4694 * taken care of.
4695 */
4696 if (ap->ops->error_handler) {
b51e9e5d 4697 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4698
4699 if (unlikely(qc->err_mask))
4700 qc->flags |= ATA_QCFLAG_FAILED;
4701
4702 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4703 if (!ata_tag_internal(qc->tag)) {
4704 /* always fill result TF for failed qc */
39599a53 4705 fill_result_tf(qc);
f686bcb8
TH
4706 ata_qc_schedule_eh(qc);
4707 return;
4708 }
4709 }
4710
4711 /* read result TF if requested */
4712 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4713 fill_result_tf(qc);
f686bcb8
TH
4714
4715 __ata_qc_complete(qc);
4716 } else {
4717 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4718 return;
4719
4720 /* read result TF if failed or requested */
4721 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4722 fill_result_tf(qc);
f686bcb8
TH
4723
4724 __ata_qc_complete(qc);
4725 }
4726}
4727
dedaf2b0
TH
4728/**
4729 * ata_qc_complete_multiple - Complete multiple qcs successfully
4730 * @ap: port in question
4731 * @qc_active: new qc_active mask
4732 * @finish_qc: LLDD callback invoked before completing a qc
4733 *
4734 * Complete in-flight commands. This functions is meant to be
4735 * called from low-level driver's interrupt routine to complete
4736 * requests normally. ap->qc_active and @qc_active is compared
4737 * and commands are completed accordingly.
4738 *
4739 * LOCKING:
cca3974e 4740 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4741 *
4742 * RETURNS:
4743 * Number of completed commands on success, -errno otherwise.
4744 */
4745int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4746 void (*finish_qc)(struct ata_queued_cmd *))
4747{
4748 int nr_done = 0;
4749 u32 done_mask;
4750 int i;
4751
4752 done_mask = ap->qc_active ^ qc_active;
4753
4754 if (unlikely(done_mask & qc_active)) {
4755 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4756 "(%08x->%08x)\n", ap->qc_active, qc_active);
4757 return -EINVAL;
4758 }
4759
4760 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4761 struct ata_queued_cmd *qc;
4762
4763 if (!(done_mask & (1 << i)))
4764 continue;
4765
4766 if ((qc = ata_qc_from_tag(ap, i))) {
4767 if (finish_qc)
4768 finish_qc(qc);
4769 ata_qc_complete(qc);
4770 nr_done++;
4771 }
4772 }
4773
4774 return nr_done;
4775}
4776
1da177e4
LT
4777static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4778{
4779 struct ata_port *ap = qc->ap;
4780
4781 switch (qc->tf.protocol) {
3dc1d881 4782 case ATA_PROT_NCQ:
1da177e4
LT
4783 case ATA_PROT_DMA:
4784 case ATA_PROT_ATAPI_DMA:
4785 return 1;
4786
4787 case ATA_PROT_ATAPI:
4788 case ATA_PROT_PIO:
1da177e4
LT
4789 if (ap->flags & ATA_FLAG_PIO_DMA)
4790 return 1;
4791
4792 /* fall through */
4793
4794 default:
4795 return 0;
4796 }
4797
4798 /* never reached */
4799}
4800
4801/**
4802 * ata_qc_issue - issue taskfile to device
4803 * @qc: command to issue to device
4804 *
4805 * Prepare an ATA command to submission to device.
4806 * This includes mapping the data into a DMA-able
4807 * area, filling in the S/G table, and finally
4808 * writing the taskfile to hardware, starting the command.
4809 *
4810 * LOCKING:
cca3974e 4811 * spin_lock_irqsave(host lock)
1da177e4 4812 */
8e0e694a 4813void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4814{
4815 struct ata_port *ap = qc->ap;
4816
dedaf2b0
TH
4817 /* Make sure only one non-NCQ command is outstanding. The
4818 * check is skipped for old EH because it reuses active qc to
4819 * request ATAPI sense.
4820 */
4821 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4822
4823 if (qc->tf.protocol == ATA_PROT_NCQ) {
4824 WARN_ON(ap->sactive & (1 << qc->tag));
4825 ap->sactive |= 1 << qc->tag;
4826 } else {
4827 WARN_ON(ap->sactive);
4828 ap->active_tag = qc->tag;
4829 }
4830
e4a70e76 4831 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4832 ap->qc_active |= 1 << qc->tag;
e4a70e76 4833
1da177e4
LT
4834 if (ata_should_dma_map(qc)) {
4835 if (qc->flags & ATA_QCFLAG_SG) {
4836 if (ata_sg_setup(qc))
8e436af9 4837 goto sg_err;
1da177e4
LT
4838 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4839 if (ata_sg_setup_one(qc))
8e436af9 4840 goto sg_err;
1da177e4
LT
4841 }
4842 } else {
4843 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4844 }
4845
4846 ap->ops->qc_prep(qc);
4847
8e0e694a
TH
4848 qc->err_mask |= ap->ops->qc_issue(qc);
4849 if (unlikely(qc->err_mask))
4850 goto err;
4851 return;
1da177e4 4852
8e436af9
TH
4853sg_err:
4854 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4855 qc->err_mask |= AC_ERR_SYSTEM;
4856err:
4857 ata_qc_complete(qc);
1da177e4
LT
4858}
4859
4860/**
4861 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4862 * @qc: command to issue to device
4863 *
4864 * Using various libata functions and hooks, this function
4865 * starts an ATA command. ATA commands are grouped into
4866 * classes called "protocols", and issuing each type of protocol
4867 * is slightly different.
4868 *
0baab86b
EF
4869 * May be used as the qc_issue() entry in ata_port_operations.
4870 *
1da177e4 4871 * LOCKING:
cca3974e 4872 * spin_lock_irqsave(host lock)
1da177e4
LT
4873 *
4874 * RETURNS:
9a3d9eb0 4875 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4876 */
4877
9a3d9eb0 4878unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4879{
4880 struct ata_port *ap = qc->ap;
4881
e50362ec
AL
4882 /* Use polling pio if the LLD doesn't handle
4883 * interrupt driven pio and atapi CDB interrupt.
4884 */
4885 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4886 switch (qc->tf.protocol) {
4887 case ATA_PROT_PIO:
e3472cbe 4888 case ATA_PROT_NODATA:
e50362ec
AL
4889 case ATA_PROT_ATAPI:
4890 case ATA_PROT_ATAPI_NODATA:
4891 qc->tf.flags |= ATA_TFLAG_POLLING;
4892 break;
4893 case ATA_PROT_ATAPI_DMA:
4894 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4895 /* see ata_dma_blacklisted() */
e50362ec
AL
4896 BUG();
4897 break;
4898 default:
4899 break;
4900 }
4901 }
4902
3d3cca37
TH
4903 /* Some controllers show flaky interrupt behavior after
4904 * setting xfer mode. Use polling instead.
4905 */
4906 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4907 qc->tf.feature == SETFEATURES_XFER) &&
4908 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4909 qc->tf.flags |= ATA_TFLAG_POLLING;
4910
312f7da2 4911 /* select the device */
1da177e4
LT
4912 ata_dev_select(ap, qc->dev->devno, 1, 0);
4913
312f7da2 4914 /* start the command */
1da177e4
LT
4915 switch (qc->tf.protocol) {
4916 case ATA_PROT_NODATA:
312f7da2
AL
4917 if (qc->tf.flags & ATA_TFLAG_POLLING)
4918 ata_qc_set_polling(qc);
4919
e5338254 4920 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4921 ap->hsm_task_state = HSM_ST_LAST;
4922
4923 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4924 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 4925
1da177e4
LT
4926 break;
4927
4928 case ATA_PROT_DMA:
587005de 4929 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4930
1da177e4
LT
4931 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4932 ap->ops->bmdma_setup(qc); /* set up bmdma */
4933 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 4934 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4935 break;
4936
312f7da2
AL
4937 case ATA_PROT_PIO:
4938 if (qc->tf.flags & ATA_TFLAG_POLLING)
4939 ata_qc_set_polling(qc);
1da177e4 4940
e5338254 4941 ata_tf_to_host(ap, &qc->tf);
312f7da2 4942
54f00389
AL
4943 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4944 /* PIO data out protocol */
4945 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 4946 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4947
4948 /* always send first data block using
e27486db 4949 * the ata_pio_task() codepath.
54f00389 4950 */
312f7da2 4951 } else {
54f00389
AL
4952 /* PIO data in protocol */
4953 ap->hsm_task_state = HSM_ST;
4954
4955 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4956 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4957
4958 /* if polling, ata_pio_task() handles the rest.
4959 * otherwise, interrupt handler takes over from here.
4960 */
312f7da2
AL
4961 }
4962
1da177e4
LT
4963 break;
4964
1da177e4 4965 case ATA_PROT_ATAPI:
1da177e4 4966 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
4967 if (qc->tf.flags & ATA_TFLAG_POLLING)
4968 ata_qc_set_polling(qc);
4969
e5338254 4970 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 4971
312f7da2
AL
4972 ap->hsm_task_state = HSM_ST_FIRST;
4973
4974 /* send cdb by polling if no cdb interrupt */
4975 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4976 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 4977 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4978 break;
4979
4980 case ATA_PROT_ATAPI_DMA:
587005de 4981 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4982
1da177e4
LT
4983 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4984 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
4985 ap->hsm_task_state = HSM_ST_FIRST;
4986
4987 /* send cdb by polling if no cdb interrupt */
4988 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 4989 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4990 break;
4991
4992 default:
4993 WARN_ON(1);
9a3d9eb0 4994 return AC_ERR_SYSTEM;
1da177e4
LT
4995 }
4996
4997 return 0;
4998}
4999
1da177e4
LT
5000/**
5001 * ata_host_intr - Handle host interrupt for given (port, task)
5002 * @ap: Port on which interrupt arrived (possibly...)
5003 * @qc: Taskfile currently active in engine
5004 *
5005 * Handle host interrupt for given queued command. Currently,
5006 * only DMA interrupts are handled. All other commands are
5007 * handled via polling with interrupts disabled (nIEN bit).
5008 *
5009 * LOCKING:
cca3974e 5010 * spin_lock_irqsave(host lock)
1da177e4
LT
5011 *
5012 * RETURNS:
5013 * One if interrupt was handled, zero if not (shared irq).
5014 */
5015
5016inline unsigned int ata_host_intr (struct ata_port *ap,
5017 struct ata_queued_cmd *qc)
5018{
ea54763f 5019 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5020 u8 status, host_stat = 0;
1da177e4 5021
312f7da2
AL
5022 VPRINTK("ata%u: protocol %d task_state %d\n",
5023 ap->id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5024
312f7da2
AL
5025 /* Check whether we are expecting interrupt in this state */
5026 switch (ap->hsm_task_state) {
5027 case HSM_ST_FIRST:
6912ccd5
AL
5028 /* Some pre-ATAPI-4 devices assert INTRQ
5029 * at this state when ready to receive CDB.
5030 */
1da177e4 5031
312f7da2
AL
5032 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5033 * The flag was turned on only for atapi devices.
5034 * No need to check is_atapi_taskfile(&qc->tf) again.
5035 */
5036 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5037 goto idle_irq;
1da177e4 5038 break;
312f7da2
AL
5039 case HSM_ST_LAST:
5040 if (qc->tf.protocol == ATA_PROT_DMA ||
5041 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5042 /* check status of DMA engine */
5043 host_stat = ap->ops->bmdma_status(ap);
5044 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
5045
5046 /* if it's not our irq... */
5047 if (!(host_stat & ATA_DMA_INTR))
5048 goto idle_irq;
5049
5050 /* before we do anything else, clear DMA-Start bit */
5051 ap->ops->bmdma_stop(qc);
a4f16610
AL
5052
5053 if (unlikely(host_stat & ATA_DMA_ERR)) {
5054 /* error when transfering data to/from memory */
5055 qc->err_mask |= AC_ERR_HOST_BUS;
5056 ap->hsm_task_state = HSM_ST_ERR;
5057 }
312f7da2
AL
5058 }
5059 break;
5060 case HSM_ST:
5061 break;
1da177e4
LT
5062 default:
5063 goto idle_irq;
5064 }
5065
312f7da2
AL
5066 /* check altstatus */
5067 status = ata_altstatus(ap);
5068 if (status & ATA_BUSY)
5069 goto idle_irq;
1da177e4 5070
312f7da2
AL
5071 /* check main status, clearing INTRQ */
5072 status = ata_chk_status(ap);
5073 if (unlikely(status & ATA_BUSY))
5074 goto idle_irq;
1da177e4 5075
312f7da2
AL
5076 /* ack bmdma irq events */
5077 ap->ops->irq_clear(ap);
1da177e4 5078
bb5cb290 5079 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5080
5081 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5082 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5083 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5084
1da177e4
LT
5085 return 1; /* irq handled */
5086
5087idle_irq:
5088 ap->stats.idle_irq++;
5089
5090#ifdef ATA_IRQ_TRAP
5091 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5092 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5093 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5094 return 1;
1da177e4
LT
5095 }
5096#endif
5097 return 0; /* irq not handled */
5098}
5099
5100/**
5101 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5102 * @irq: irq line (unused)
cca3974e 5103 * @dev_instance: pointer to our ata_host information structure
1da177e4 5104 *
0cba632b
JG
5105 * Default interrupt handler for PCI IDE devices. Calls
5106 * ata_host_intr() for each port that is not disabled.
5107 *
1da177e4 5108 * LOCKING:
cca3974e 5109 * Obtains host lock during operation.
1da177e4
LT
5110 *
5111 * RETURNS:
0cba632b 5112 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5113 */
5114
7d12e780 5115irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5116{
cca3974e 5117 struct ata_host *host = dev_instance;
1da177e4
LT
5118 unsigned int i;
5119 unsigned int handled = 0;
5120 unsigned long flags;
5121
5122 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5123 spin_lock_irqsave(&host->lock, flags);
1da177e4 5124
cca3974e 5125 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5126 struct ata_port *ap;
5127
cca3974e 5128 ap = host->ports[i];
c1389503 5129 if (ap &&
029f5468 5130 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5131 struct ata_queued_cmd *qc;
5132
5133 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5134 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5135 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5136 handled |= ata_host_intr(ap, qc);
5137 }
5138 }
5139
cca3974e 5140 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5141
5142 return IRQ_RETVAL(handled);
5143}
5144
34bf2170
TH
5145/**
5146 * sata_scr_valid - test whether SCRs are accessible
5147 * @ap: ATA port to test SCR accessibility for
5148 *
5149 * Test whether SCRs are accessible for @ap.
5150 *
5151 * LOCKING:
5152 * None.
5153 *
5154 * RETURNS:
5155 * 1 if SCRs are accessible, 0 otherwise.
5156 */
5157int sata_scr_valid(struct ata_port *ap)
5158{
5159 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5160}
5161
5162/**
5163 * sata_scr_read - read SCR register of the specified port
5164 * @ap: ATA port to read SCR for
5165 * @reg: SCR to read
5166 * @val: Place to store read value
5167 *
5168 * Read SCR register @reg of @ap into *@val. This function is
5169 * guaranteed to succeed if the cable type of the port is SATA
5170 * and the port implements ->scr_read.
5171 *
5172 * LOCKING:
5173 * None.
5174 *
5175 * RETURNS:
5176 * 0 on success, negative errno on failure.
5177 */
5178int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5179{
5180 if (sata_scr_valid(ap)) {
5181 *val = ap->ops->scr_read(ap, reg);
5182 return 0;
5183 }
5184 return -EOPNOTSUPP;
5185}
5186
5187/**
5188 * sata_scr_write - write SCR register of the specified port
5189 * @ap: ATA port to write SCR for
5190 * @reg: SCR to write
5191 * @val: value to write
5192 *
5193 * Write @val to SCR register @reg of @ap. This function is
5194 * guaranteed to succeed if the cable type of the port is SATA
5195 * and the port implements ->scr_read.
5196 *
5197 * LOCKING:
5198 * None.
5199 *
5200 * RETURNS:
5201 * 0 on success, negative errno on failure.
5202 */
5203int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5204{
5205 if (sata_scr_valid(ap)) {
5206 ap->ops->scr_write(ap, reg, val);
5207 return 0;
5208 }
5209 return -EOPNOTSUPP;
5210}
5211
5212/**
5213 * sata_scr_write_flush - write SCR register of the specified port and flush
5214 * @ap: ATA port to write SCR for
5215 * @reg: SCR to write
5216 * @val: value to write
5217 *
5218 * This function is identical to sata_scr_write() except that this
5219 * function performs flush after writing to the register.
5220 *
5221 * LOCKING:
5222 * None.
5223 *
5224 * RETURNS:
5225 * 0 on success, negative errno on failure.
5226 */
5227int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5228{
5229 if (sata_scr_valid(ap)) {
5230 ap->ops->scr_write(ap, reg, val);
5231 ap->ops->scr_read(ap, reg);
5232 return 0;
5233 }
5234 return -EOPNOTSUPP;
5235}
5236
5237/**
5238 * ata_port_online - test whether the given port is online
5239 * @ap: ATA port to test
5240 *
5241 * Test whether @ap is online. Note that this function returns 0
5242 * if online status of @ap cannot be obtained, so
5243 * ata_port_online(ap) != !ata_port_offline(ap).
5244 *
5245 * LOCKING:
5246 * None.
5247 *
5248 * RETURNS:
5249 * 1 if the port online status is available and online.
5250 */
5251int ata_port_online(struct ata_port *ap)
5252{
5253 u32 sstatus;
5254
5255 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5256 return 1;
5257 return 0;
5258}
5259
5260/**
5261 * ata_port_offline - test whether the given port is offline
5262 * @ap: ATA port to test
5263 *
5264 * Test whether @ap is offline. Note that this function returns
5265 * 0 if offline status of @ap cannot be obtained, so
5266 * ata_port_online(ap) != !ata_port_offline(ap).
5267 *
5268 * LOCKING:
5269 * None.
5270 *
5271 * RETURNS:
5272 * 1 if the port offline status is available and offline.
5273 */
5274int ata_port_offline(struct ata_port *ap)
5275{
5276 u32 sstatus;
5277
5278 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5279 return 1;
5280 return 0;
5281}
0baab86b 5282
77b08fb5 5283int ata_flush_cache(struct ata_device *dev)
9b847548 5284{
977e6b9f 5285 unsigned int err_mask;
9b847548
JA
5286 u8 cmd;
5287
5288 if (!ata_try_flush_cache(dev))
5289 return 0;
5290
6fc49adb 5291 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5292 cmd = ATA_CMD_FLUSH_EXT;
5293 else
5294 cmd = ATA_CMD_FLUSH;
5295
977e6b9f
TH
5296 err_mask = ata_do_simple_cmd(dev, cmd);
5297 if (err_mask) {
5298 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5299 return -EIO;
5300 }
5301
5302 return 0;
9b847548
JA
5303}
5304
cca3974e
JG
5305static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5306 unsigned int action, unsigned int ehi_flags,
5307 int wait)
500530f6
TH
5308{
5309 unsigned long flags;
5310 int i, rc;
5311
cca3974e
JG
5312 for (i = 0; i < host->n_ports; i++) {
5313 struct ata_port *ap = host->ports[i];
500530f6
TH
5314
5315 /* Previous resume operation might still be in
5316 * progress. Wait for PM_PENDING to clear.
5317 */
5318 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5319 ata_port_wait_eh(ap);
5320 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5321 }
5322
5323 /* request PM ops to EH */
5324 spin_lock_irqsave(ap->lock, flags);
5325
5326 ap->pm_mesg = mesg;
5327 if (wait) {
5328 rc = 0;
5329 ap->pm_result = &rc;
5330 }
5331
5332 ap->pflags |= ATA_PFLAG_PM_PENDING;
5333 ap->eh_info.action |= action;
5334 ap->eh_info.flags |= ehi_flags;
5335
5336 ata_port_schedule_eh(ap);
5337
5338 spin_unlock_irqrestore(ap->lock, flags);
5339
5340 /* wait and check result */
5341 if (wait) {
5342 ata_port_wait_eh(ap);
5343 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5344 if (rc)
5345 return rc;
5346 }
5347 }
5348
5349 return 0;
5350}
5351
5352/**
cca3974e
JG
5353 * ata_host_suspend - suspend host
5354 * @host: host to suspend
500530f6
TH
5355 * @mesg: PM message
5356 *
cca3974e 5357 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5358 * function requests EH to perform PM operations and waits for EH
5359 * to finish.
5360 *
5361 * LOCKING:
5362 * Kernel thread context (may sleep).
5363 *
5364 * RETURNS:
5365 * 0 on success, -errno on failure.
5366 */
cca3974e 5367int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5368{
5369 int i, j, rc;
5370
cca3974e 5371 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5372 if (rc)
5373 goto fail;
5374
5375 /* EH is quiescent now. Fail if we have any ready device.
5376 * This happens if hotplug occurs between completion of device
5377 * suspension and here.
5378 */
cca3974e
JG
5379 for (i = 0; i < host->n_ports; i++) {
5380 struct ata_port *ap = host->ports[i];
500530f6
TH
5381
5382 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5383 struct ata_device *dev = &ap->device[j];
5384
5385 if (ata_dev_ready(dev)) {
5386 ata_port_printk(ap, KERN_WARNING,
5387 "suspend failed, device %d "
5388 "still active\n", dev->devno);
5389 rc = -EBUSY;
5390 goto fail;
5391 }
5392 }
5393 }
5394
cca3974e 5395 host->dev->power.power_state = mesg;
500530f6
TH
5396 return 0;
5397
5398 fail:
cca3974e 5399 ata_host_resume(host);
500530f6
TH
5400 return rc;
5401}
5402
5403/**
cca3974e
JG
5404 * ata_host_resume - resume host
5405 * @host: host to resume
500530f6 5406 *
cca3974e 5407 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5408 * function requests EH to perform PM operations and returns.
5409 * Note that all resume operations are performed parallely.
5410 *
5411 * LOCKING:
5412 * Kernel thread context (may sleep).
5413 */
cca3974e 5414void ata_host_resume(struct ata_host *host)
500530f6 5415{
cca3974e
JG
5416 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5417 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5418 host->dev->power.power_state = PMSG_ON;
500530f6
TH
5419}
5420
c893a3ae
RD
5421/**
5422 * ata_port_start - Set port up for dma.
5423 * @ap: Port to initialize
5424 *
5425 * Called just after data structures for each port are
5426 * initialized. Allocates space for PRD table.
5427 *
5428 * May be used as the port_start() entry in ata_port_operations.
5429 *
5430 * LOCKING:
5431 * Inherited from caller.
5432 */
f0d36efd 5433int ata_port_start(struct ata_port *ap)
1da177e4 5434{
2f1f610b 5435 struct device *dev = ap->dev;
6037d6bb 5436 int rc;
1da177e4 5437
f0d36efd
TH
5438 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5439 GFP_KERNEL);
1da177e4
LT
5440 if (!ap->prd)
5441 return -ENOMEM;
5442
6037d6bb 5443 rc = ata_pad_alloc(ap, dev);
f0d36efd 5444 if (rc)
6037d6bb 5445 return rc;
1da177e4 5446
f0d36efd
TH
5447 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5448 (unsigned long long)ap->prd_dma);
1da177e4
LT
5449 return 0;
5450}
5451
3ef3b43d
TH
5452/**
5453 * ata_dev_init - Initialize an ata_device structure
5454 * @dev: Device structure to initialize
5455 *
5456 * Initialize @dev in preparation for probing.
5457 *
5458 * LOCKING:
5459 * Inherited from caller.
5460 */
5461void ata_dev_init(struct ata_device *dev)
5462{
5463 struct ata_port *ap = dev->ap;
72fa4b74
TH
5464 unsigned long flags;
5465
5a04bf4b
TH
5466 /* SATA spd limit is bound to the first device */
5467 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5468
72fa4b74
TH
5469 /* High bits of dev->flags are used to record warm plug
5470 * requests which occur asynchronously. Synchronize using
cca3974e 5471 * host lock.
72fa4b74 5472 */
ba6a1308 5473 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5474 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5475 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5476
72fa4b74
TH
5477 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5478 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5479 dev->pio_mask = UINT_MAX;
5480 dev->mwdma_mask = UINT_MAX;
5481 dev->udma_mask = UINT_MAX;
5482}
5483
1da177e4 5484/**
155a8a9c 5485 * ata_port_init - Initialize an ata_port structure
1da177e4 5486 * @ap: Structure to initialize
cca3974e 5487 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5488 * @ent: Probe information provided by low-level driver
5489 * @port_no: Port number associated with this ata_port
5490 *
155a8a9c 5491 * Initialize a new ata_port structure.
0cba632b 5492 *
1da177e4 5493 * LOCKING:
0cba632b 5494 * Inherited from caller.
1da177e4 5495 */
cca3974e 5496void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5497 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5498{
5499 unsigned int i;
5500
cca3974e 5501 ap->lock = &host->lock;
198e0fed 5502 ap->flags = ATA_FLAG_DISABLED;
155a8a9c 5503 ap->id = ata_unique_id++;
1da177e4 5504 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5505 ap->host = host;
2f1f610b 5506 ap->dev = ent->dev;
1da177e4 5507 ap->port_no = port_no;
fea63e38
TH
5508 if (port_no == 1 && ent->pinfo2) {
5509 ap->pio_mask = ent->pinfo2->pio_mask;
5510 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5511 ap->udma_mask = ent->pinfo2->udma_mask;
5512 ap->flags |= ent->pinfo2->flags;
5513 ap->ops = ent->pinfo2->port_ops;
5514 } else {
5515 ap->pio_mask = ent->pio_mask;
5516 ap->mwdma_mask = ent->mwdma_mask;
5517 ap->udma_mask = ent->udma_mask;
5518 ap->flags |= ent->port_flags;
5519 ap->ops = ent->port_ops;
5520 }
5a04bf4b 5521 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5522 ap->active_tag = ATA_TAG_POISON;
5523 ap->last_ctl = 0xFF;
bd5d825c
BP
5524
5525#if defined(ATA_VERBOSE_DEBUG)
5526 /* turn on all debugging levels */
5527 ap->msg_enable = 0x00FF;
5528#elif defined(ATA_DEBUG)
5529 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5530#else
0dd4b21f 5531 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5532#endif
1da177e4 5533
65f27f38
DH
5534 INIT_DELAYED_WORK(&ap->port_task, NULL);
5535 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5536 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5537 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5538 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5539
838df628
TH
5540 /* set cable type */
5541 ap->cbl = ATA_CBL_NONE;
5542 if (ap->flags & ATA_FLAG_SATA)
5543 ap->cbl = ATA_CBL_SATA;
5544
acf356b1
TH
5545 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5546 struct ata_device *dev = &ap->device[i];
38d87234 5547 dev->ap = ap;
72fa4b74 5548 dev->devno = i;
3ef3b43d 5549 ata_dev_init(dev);
acf356b1 5550 }
1da177e4
LT
5551
5552#ifdef ATA_IRQ_TRAP
5553 ap->stats.unhandled_irq = 1;
5554 ap->stats.idle_irq = 1;
5555#endif
5556
5557 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5558}
5559
155a8a9c 5560/**
4608c160
TH
5561 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5562 * @ap: ATA port to initialize SCSI host for
5563 * @shost: SCSI host associated with @ap
155a8a9c 5564 *
4608c160 5565 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5566 *
5567 * LOCKING:
5568 * Inherited from caller.
5569 */
4608c160 5570static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5571{
cca3974e 5572 ap->scsi_host = shost;
155a8a9c 5573
4608c160
TH
5574 shost->unique_id = ap->id;
5575 shost->max_id = 16;
5576 shost->max_lun = 1;
5577 shost->max_channel = 1;
5578 shost->max_cmd_len = 12;
155a8a9c
BK
5579}
5580
1da177e4 5581/**
996139f1 5582 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5583 * @ent: Information provided by low-level driver
cca3974e 5584 * @host: Collections of ports to which we add
1da177e4
LT
5585 * @port_no: Port number associated with this host
5586 *
0cba632b
JG
5587 * Attach low-level ATA driver to system.
5588 *
1da177e4 5589 * LOCKING:
0cba632b 5590 * PCI/etc. bus probe sem.
1da177e4
LT
5591 *
5592 * RETURNS:
0cba632b 5593 * New ata_port on success, for NULL on error.
1da177e4 5594 */
996139f1 5595static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5596 struct ata_host *host,
1da177e4
LT
5597 unsigned int port_no)
5598{
996139f1 5599 struct Scsi_Host *shost;
1da177e4 5600 struct ata_port *ap;
1da177e4
LT
5601
5602 DPRINTK("ENTER\n");
aec5c3c1 5603
52783c5d 5604 if (!ent->port_ops->error_handler &&
cca3974e 5605 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5606 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5607 port_no);
5608 return NULL;
5609 }
5610
996139f1
JG
5611 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5612 if (!shost)
1da177e4
LT
5613 return NULL;
5614
996139f1 5615 shost->transportt = &ata_scsi_transport_template;
30afc84c 5616
996139f1 5617 ap = ata_shost_to_port(shost);
1da177e4 5618
cca3974e 5619 ata_port_init(ap, host, ent, port_no);
996139f1 5620 ata_port_init_shost(ap, shost);
1da177e4 5621
1da177e4 5622 return ap;
1da177e4
LT
5623}
5624
f0d36efd
TH
5625static void ata_host_release(struct device *gendev, void *res)
5626{
5627 struct ata_host *host = dev_get_drvdata(gendev);
5628 int i;
5629
5630 for (i = 0; i < host->n_ports; i++) {
5631 struct ata_port *ap = host->ports[i];
5632
5633 if (!ap)
5634 continue;
5635
5636 if (ap->ops->port_stop)
5637 ap->ops->port_stop(ap);
5638
5639 scsi_host_put(ap->scsi_host);
5640 }
5641
5642 if (host->ops->host_stop)
5643 host->ops->host_stop(host);
5644}
5645
b03732f0 5646/**
cca3974e
JG
5647 * ata_sas_host_init - Initialize a host struct
5648 * @host: host to initialize
5649 * @dev: device host is attached to
5650 * @flags: host flags
5651 * @ops: port_ops
b03732f0
BK
5652 *
5653 * LOCKING:
5654 * PCI/etc. bus probe sem.
5655 *
5656 */
5657
cca3974e
JG
5658void ata_host_init(struct ata_host *host, struct device *dev,
5659 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5660{
cca3974e
JG
5661 spin_lock_init(&host->lock);
5662 host->dev = dev;
5663 host->flags = flags;
5664 host->ops = ops;
b03732f0
BK
5665}
5666
1da177e4 5667/**
0cba632b
JG
5668 * ata_device_add - Register hardware device with ATA and SCSI layers
5669 * @ent: Probe information describing hardware device to be registered
5670 *
5671 * This function processes the information provided in the probe
5672 * information struct @ent, allocates the necessary ATA and SCSI
5673 * host information structures, initializes them, and registers
5674 * everything with requisite kernel subsystems.
5675 *
5676 * This function requests irqs, probes the ATA bus, and probes
5677 * the SCSI bus.
1da177e4
LT
5678 *
5679 * LOCKING:
0cba632b 5680 * PCI/etc. bus probe sem.
1da177e4
LT
5681 *
5682 * RETURNS:
0cba632b 5683 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5684 */
057ace5e 5685int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5686{
6d0500df 5687 unsigned int i;
1da177e4 5688 struct device *dev = ent->dev;
cca3974e 5689 struct ata_host *host;
39b07ce6 5690 int rc;
1da177e4
LT
5691
5692 DPRINTK("ENTER\n");
f20b16ff 5693
02f076aa
AC
5694 if (ent->irq == 0) {
5695 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5696 return 0;
5697 }
f0d36efd
TH
5698
5699 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5700 return 0;
5701
1da177e4 5702 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5703 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5704 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5705 if (!host)
f0d36efd
TH
5706 goto err_out;
5707 devres_add(dev, host);
5708 dev_set_drvdata(dev, host);
1da177e4 5709
cca3974e
JG
5710 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5711 host->n_ports = ent->n_ports;
5712 host->irq = ent->irq;
5713 host->irq2 = ent->irq2;
0d5ff566 5714 host->iomap = ent->iomap;
cca3974e 5715 host->private_data = ent->private_data;
1da177e4
LT
5716
5717 /* register each port bound to this device */
cca3974e 5718 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5719 struct ata_port *ap;
5720 unsigned long xfer_mode_mask;
2ec7df04 5721 int irq_line = ent->irq;
1da177e4 5722
cca3974e 5723 ap = ata_port_add(ent, host, i);
c38778c3 5724 host->ports[i] = ap;
1da177e4
LT
5725 if (!ap)
5726 goto err_out;
5727
dd5b06c4
TH
5728 /* dummy? */
5729 if (ent->dummy_port_mask & (1 << i)) {
5730 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5731 ap->ops = &ata_dummy_port_ops;
5732 continue;
5733 }
5734
5735 /* start port */
5736 rc = ap->ops->port_start(ap);
5737 if (rc) {
cca3974e
JG
5738 host->ports[i] = NULL;
5739 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5740 goto err_out;
5741 }
5742
2ec7df04
AC
5743 /* Report the secondary IRQ for second channel legacy */
5744 if (i == 1 && ent->irq2)
5745 irq_line = ent->irq2;
5746
1da177e4
LT
5747 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5748 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5749 (ap->pio_mask << ATA_SHIFT_PIO);
5750
5751 /* print per-port info to dmesg */
0d5ff566
TH
5752 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5753 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5754 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5755 ata_mode_string(xfer_mode_mask),
5756 ap->ioaddr.cmd_addr,
5757 ap->ioaddr.ctl_addr,
5758 ap->ioaddr.bmdma_addr,
2ec7df04 5759 irq_line);
1da177e4 5760
0f0a3ad3
TH
5761 /* freeze port before requesting IRQ */
5762 ata_eh_freeze_port(ap);
1da177e4
LT
5763 }
5764
2ec7df04 5765 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5766 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5767 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5768 if (rc) {
5769 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5770 ent->irq, rc);
1da177e4 5771 goto err_out;
39b07ce6 5772 }
1da177e4 5773
2ec7df04
AC
5774 /* do we have a second IRQ for the other channel, eg legacy mode */
5775 if (ent->irq2) {
5776 /* We will get weird core code crashes later if this is true
5777 so trap it now */
5778 BUG_ON(ent->irq == ent->irq2);
5779
f0d36efd
TH
5780 rc = devm_request_irq(dev, ent->irq2,
5781 ent->port_ops->irq_handler, ent->irq_flags,
5782 DRV_NAME, host);
2ec7df04
AC
5783 if (rc) {
5784 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5785 ent->irq2, rc);
f0d36efd 5786 goto err_out;
2ec7df04
AC
5787 }
5788 }
5789
f0d36efd 5790 /* resource acquisition complete */
b878ca5d 5791 devres_remove_group(dev, ata_device_add);
f0d36efd 5792
1da177e4
LT
5793 /* perform each probe synchronously */
5794 DPRINTK("probe begin\n");
cca3974e
JG
5795 for (i = 0; i < host->n_ports; i++) {
5796 struct ata_port *ap = host->ports[i];
5a04bf4b 5797 u32 scontrol;
1da177e4
LT
5798 int rc;
5799
5a04bf4b
TH
5800 /* init sata_spd_limit to the current value */
5801 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5802 int spd = (scontrol >> 4) & 0xf;
5803 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5804 }
5805 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5806
cca3974e 5807 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5808 if (rc) {
f15a1daf 5809 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5810 /* FIXME: do something useful here */
5811 /* FIXME: handle unconditional calls to
5812 * scsi_scan_host and ata_host_remove, below,
5813 * at the very least
5814 */
5815 }
3e706399 5816
52783c5d 5817 if (ap->ops->error_handler) {
1cdaf534 5818 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5819 unsigned long flags;
5820
5821 ata_port_probe(ap);
5822
5823 /* kick EH for boot probing */
ba6a1308 5824 spin_lock_irqsave(ap->lock, flags);
3e706399 5825
1cdaf534
TH
5826 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5827 ehi->action |= ATA_EH_SOFTRESET;
5828 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5829
b51e9e5d 5830 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5831 ata_port_schedule_eh(ap);
5832
ba6a1308 5833 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5834
5835 /* wait for EH to finish */
5836 ata_port_wait_eh(ap);
5837 } else {
5838 DPRINTK("ata%u: bus probe begin\n", ap->id);
5839 rc = ata_bus_probe(ap);
5840 DPRINTK("ata%u: bus probe end\n", ap->id);
5841
5842 if (rc) {
5843 /* FIXME: do something useful here?
5844 * Current libata behavior will
5845 * tear down everything when
5846 * the module is removed
5847 * or the h/w is unplugged.
5848 */
5849 }
5850 }
1da177e4
LT
5851 }
5852
5853 /* probes are done, now scan each port's disk(s) */
c893a3ae 5854 DPRINTK("host probe begin\n");
cca3974e
JG
5855 for (i = 0; i < host->n_ports; i++) {
5856 struct ata_port *ap = host->ports[i];
1da177e4 5857
644dd0cc 5858 ata_scsi_scan_host(ap);
1da177e4
LT
5859 }
5860
1da177e4
LT
5861 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5862 return ent->n_ports; /* success */
5863
f0d36efd
TH
5864 err_out:
5865 devres_release_group(dev, ata_device_add);
5866 dev_set_drvdata(dev, NULL);
5867 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
5868 return 0;
5869}
5870
720ba126
TH
5871/**
5872 * ata_port_detach - Detach ATA port in prepration of device removal
5873 * @ap: ATA port to be detached
5874 *
5875 * Detach all ATA devices and the associated SCSI devices of @ap;
5876 * then, remove the associated SCSI host. @ap is guaranteed to
5877 * be quiescent on return from this function.
5878 *
5879 * LOCKING:
5880 * Kernel thread context (may sleep).
5881 */
5882void ata_port_detach(struct ata_port *ap)
5883{
5884 unsigned long flags;
5885 int i;
5886
5887 if (!ap->ops->error_handler)
c3cf30a9 5888 goto skip_eh;
720ba126
TH
5889
5890 /* tell EH we're leaving & flush EH */
ba6a1308 5891 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5892 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5893 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5894
5895 ata_port_wait_eh(ap);
5896
5897 /* EH is now guaranteed to see UNLOADING, so no new device
5898 * will be attached. Disable all existing devices.
5899 */
ba6a1308 5900 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5901
5902 for (i = 0; i < ATA_MAX_DEVICES; i++)
5903 ata_dev_disable(&ap->device[i]);
5904
ba6a1308 5905 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5906
5907 /* Final freeze & EH. All in-flight commands are aborted. EH
5908 * will be skipped and retrials will be terminated with bad
5909 * target.
5910 */
ba6a1308 5911 spin_lock_irqsave(ap->lock, flags);
720ba126 5912 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5913 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5914
5915 ata_port_wait_eh(ap);
5916
5917 /* Flush hotplug task. The sequence is similar to
5918 * ata_port_flush_task().
5919 */
5920 flush_workqueue(ata_aux_wq);
5921 cancel_delayed_work(&ap->hotplug_task);
5922 flush_workqueue(ata_aux_wq);
5923
c3cf30a9 5924 skip_eh:
720ba126 5925 /* remove the associated SCSI host */
cca3974e 5926 scsi_remove_host(ap->scsi_host);
720ba126
TH
5927}
5928
0529c159
TH
5929/**
5930 * ata_host_detach - Detach all ports of an ATA host
5931 * @host: Host to detach
5932 *
5933 * Detach all ports of @host.
5934 *
5935 * LOCKING:
5936 * Kernel thread context (may sleep).
5937 */
5938void ata_host_detach(struct ata_host *host)
5939{
5940 int i;
5941
5942 for (i = 0; i < host->n_ports; i++)
5943 ata_port_detach(host->ports[i]);
5944}
5945
f6d950e2
BK
5946struct ata_probe_ent *
5947ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5948{
5949 struct ata_probe_ent *probe_ent;
5950
f0d36efd
TH
5951 /* XXX - the following if can go away once all LLDs are managed */
5952 if (!list_empty(&dev->devres_head))
5953 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
5954 else
5955 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
5956 if (!probe_ent) {
5957 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5958 kobject_name(&(dev->kobj)));
5959 return NULL;
5960 }
5961
5962 INIT_LIST_HEAD(&probe_ent->node);
5963 probe_ent->dev = dev;
5964
5965 probe_ent->sht = port->sht;
cca3974e 5966 probe_ent->port_flags = port->flags;
f6d950e2
BK
5967 probe_ent->pio_mask = port->pio_mask;
5968 probe_ent->mwdma_mask = port->mwdma_mask;
5969 probe_ent->udma_mask = port->udma_mask;
5970 probe_ent->port_ops = port->port_ops;
d639ca94 5971 probe_ent->private_data = port->private_data;
f6d950e2
BK
5972
5973 return probe_ent;
5974}
5975
1da177e4
LT
5976/**
5977 * ata_std_ports - initialize ioaddr with standard port offsets.
5978 * @ioaddr: IO address structure to be initialized
0baab86b
EF
5979 *
5980 * Utility function which initializes data_addr, error_addr,
5981 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5982 * device_addr, status_addr, and command_addr to standard offsets
5983 * relative to cmd_addr.
5984 *
5985 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 5986 */
0baab86b 5987
1da177e4
LT
5988void ata_std_ports(struct ata_ioports *ioaddr)
5989{
5990 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5991 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5992 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5993 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5994 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5995 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5996 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5997 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5998 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5999 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6000}
6001
0baab86b 6002
374b1873
JG
6003#ifdef CONFIG_PCI
6004
1da177e4
LT
6005/**
6006 * ata_pci_remove_one - PCI layer callback for device removal
6007 * @pdev: PCI device that was removed
6008 *
b878ca5d
TH
6009 * PCI layer indicates to libata via this hook that hot-unplug or
6010 * module unload event has occurred. Detach all ports. Resource
6011 * release is handled via devres.
1da177e4
LT
6012 *
6013 * LOCKING:
6014 * Inherited from PCI layer (may sleep).
6015 */
f0d36efd 6016void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6017{
6018 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6019 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6020
b878ca5d 6021 ata_host_detach(host);
1da177e4
LT
6022}
6023
6024/* move to PCI subsystem */
057ace5e 6025int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6026{
6027 unsigned long tmp = 0;
6028
6029 switch (bits->width) {
6030 case 1: {
6031 u8 tmp8 = 0;
6032 pci_read_config_byte(pdev, bits->reg, &tmp8);
6033 tmp = tmp8;
6034 break;
6035 }
6036 case 2: {
6037 u16 tmp16 = 0;
6038 pci_read_config_word(pdev, bits->reg, &tmp16);
6039 tmp = tmp16;
6040 break;
6041 }
6042 case 4: {
6043 u32 tmp32 = 0;
6044 pci_read_config_dword(pdev, bits->reg, &tmp32);
6045 tmp = tmp32;
6046 break;
6047 }
6048
6049 default:
6050 return -EINVAL;
6051 }
6052
6053 tmp &= bits->mask;
6054
6055 return (tmp == bits->val) ? 1 : 0;
6056}
9b847548 6057
3c5100c1 6058void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6059{
6060 pci_save_state(pdev);
500530f6 6061
3c5100c1 6062 if (mesg.event == PM_EVENT_SUSPEND) {
500530f6
TH
6063 pci_disable_device(pdev);
6064 pci_set_power_state(pdev, PCI_D3hot);
6065 }
9b847548
JA
6066}
6067
553c4aa6 6068int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6069{
553c4aa6
TH
6070 int rc;
6071
9b847548
JA
6072 pci_set_power_state(pdev, PCI_D0);
6073 pci_restore_state(pdev);
553c4aa6 6074
b878ca5d 6075 rc = pcim_enable_device(pdev);
553c4aa6
TH
6076 if (rc) {
6077 dev_printk(KERN_ERR, &pdev->dev,
6078 "failed to enable device after resume (%d)\n", rc);
6079 return rc;
6080 }
6081
9b847548 6082 pci_set_master(pdev);
553c4aa6 6083 return 0;
500530f6
TH
6084}
6085
3c5100c1 6086int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6087{
cca3974e 6088 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6089 int rc = 0;
6090
cca3974e 6091 rc = ata_host_suspend(host, mesg);
500530f6
TH
6092 if (rc)
6093 return rc;
6094
3c5100c1 6095 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6096
6097 return 0;
6098}
6099
6100int ata_pci_device_resume(struct pci_dev *pdev)
6101{
cca3974e 6102 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6103 int rc;
500530f6 6104
553c4aa6
TH
6105 rc = ata_pci_device_do_resume(pdev);
6106 if (rc == 0)
6107 ata_host_resume(host);
6108 return rc;
9b847548 6109}
1da177e4
LT
6110#endif /* CONFIG_PCI */
6111
6112
1da177e4
LT
6113static int __init ata_init(void)
6114{
a8601e5f 6115 ata_probe_timeout *= HZ;
1da177e4
LT
6116 ata_wq = create_workqueue("ata");
6117 if (!ata_wq)
6118 return -ENOMEM;
6119
453b07ac
TH
6120 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6121 if (!ata_aux_wq) {
6122 destroy_workqueue(ata_wq);
6123 return -ENOMEM;
6124 }
6125
1da177e4
LT
6126 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6127 return 0;
6128}
6129
6130static void __exit ata_exit(void)
6131{
6132 destroy_workqueue(ata_wq);
453b07ac 6133 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6134}
6135
a4625085 6136subsys_initcall(ata_init);
1da177e4
LT
6137module_exit(ata_exit);
6138
67846b30 6139static unsigned long ratelimit_time;
34af946a 6140static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6141
6142int ata_ratelimit(void)
6143{
6144 int rc;
6145 unsigned long flags;
6146
6147 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6148
6149 if (time_after(jiffies, ratelimit_time)) {
6150 rc = 1;
6151 ratelimit_time = jiffies + (HZ/5);
6152 } else
6153 rc = 0;
6154
6155 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6156
6157 return rc;
6158}
6159
c22daff4
TH
6160/**
6161 * ata_wait_register - wait until register value changes
6162 * @reg: IO-mapped register
6163 * @mask: Mask to apply to read register value
6164 * @val: Wait condition
6165 * @interval_msec: polling interval in milliseconds
6166 * @timeout_msec: timeout in milliseconds
6167 *
6168 * Waiting for some bits of register to change is a common
6169 * operation for ATA controllers. This function reads 32bit LE
6170 * IO-mapped register @reg and tests for the following condition.
6171 *
6172 * (*@reg & mask) != val
6173 *
6174 * If the condition is met, it returns; otherwise, the process is
6175 * repeated after @interval_msec until timeout.
6176 *
6177 * LOCKING:
6178 * Kernel thread context (may sleep)
6179 *
6180 * RETURNS:
6181 * The final register value.
6182 */
6183u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6184 unsigned long interval_msec,
6185 unsigned long timeout_msec)
6186{
6187 unsigned long timeout;
6188 u32 tmp;
6189
6190 tmp = ioread32(reg);
6191
6192 /* Calculate timeout _after_ the first read to make sure
6193 * preceding writes reach the controller before starting to
6194 * eat away the timeout.
6195 */
6196 timeout = jiffies + (timeout_msec * HZ) / 1000;
6197
6198 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6199 msleep(interval_msec);
6200 tmp = ioread32(reg);
6201 }
6202
6203 return tmp;
6204}
6205
dd5b06c4
TH
6206/*
6207 * Dummy port_ops
6208 */
6209static void ata_dummy_noret(struct ata_port *ap) { }
6210static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6211static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6212
6213static u8 ata_dummy_check_status(struct ata_port *ap)
6214{
6215 return ATA_DRDY;
6216}
6217
6218static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6219{
6220 return AC_ERR_SYSTEM;
6221}
6222
6223const struct ata_port_operations ata_dummy_port_ops = {
6224 .port_disable = ata_port_disable,
6225 .check_status = ata_dummy_check_status,
6226 .check_altstatus = ata_dummy_check_status,
6227 .dev_select = ata_noop_dev_select,
6228 .qc_prep = ata_noop_qc_prep,
6229 .qc_issue = ata_dummy_qc_issue,
6230 .freeze = ata_dummy_noret,
6231 .thaw = ata_dummy_noret,
6232 .error_handler = ata_dummy_noret,
6233 .post_internal_cmd = ata_dummy_qc_noret,
6234 .irq_clear = ata_dummy_noret,
6235 .port_start = ata_dummy_ret0,
6236 .port_stop = ata_dummy_noret,
6237};
6238
1da177e4
LT
6239/*
6240 * libata is essentially a library of internal helper functions for
6241 * low-level ATA host controller drivers. As such, the API/ABI is
6242 * likely to change as new drivers are added and updated.
6243 * Do not depend on ABI/API stability.
6244 */
6245
e9c83914
TH
6246EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6247EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6248EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6249EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6250EXPORT_SYMBOL_GPL(ata_std_bios_param);
6251EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6252EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6253EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6254EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6255EXPORT_SYMBOL_GPL(ata_sg_init);
6256EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6257EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6258EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6259EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6260EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6261EXPORT_SYMBOL_GPL(ata_tf_load);
6262EXPORT_SYMBOL_GPL(ata_tf_read);
6263EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6264EXPORT_SYMBOL_GPL(ata_std_dev_select);
6265EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6266EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6267EXPORT_SYMBOL_GPL(ata_check_status);
6268EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6269EXPORT_SYMBOL_GPL(ata_exec_command);
6270EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6271EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6272EXPORT_SYMBOL_GPL(ata_data_xfer);
6273EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6274EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6275EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6276EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6277EXPORT_SYMBOL_GPL(ata_bmdma_start);
6278EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6279EXPORT_SYMBOL_GPL(ata_bmdma_status);
6280EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6281EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6282EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6283EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6284EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6285EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6286EXPORT_SYMBOL_GPL(ata_port_probe);
3c567b7d 6287EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6288EXPORT_SYMBOL_GPL(sata_phy_debounce);
6289EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6290EXPORT_SYMBOL_GPL(sata_phy_reset);
6291EXPORT_SYMBOL_GPL(__sata_phy_reset);
6292EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6293EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6294EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6295EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6296EXPORT_SYMBOL_GPL(sata_std_hardreset);
6297EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6298EXPORT_SYMBOL_GPL(ata_dev_classify);
6299EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6300EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6301EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6302EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6303EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6304EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6305EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6306EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6307EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6308EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6309EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6310EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6311EXPORT_SYMBOL_GPL(sata_scr_valid);
6312EXPORT_SYMBOL_GPL(sata_scr_read);
6313EXPORT_SYMBOL_GPL(sata_scr_write);
6314EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6315EXPORT_SYMBOL_GPL(ata_port_online);
6316EXPORT_SYMBOL_GPL(ata_port_offline);
cca3974e
JG
6317EXPORT_SYMBOL_GPL(ata_host_suspend);
6318EXPORT_SYMBOL_GPL(ata_host_resume);
6a62a04d
TH
6319EXPORT_SYMBOL_GPL(ata_id_string);
6320EXPORT_SYMBOL_GPL(ata_id_c_string);
6919a0a6 6321EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6322EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6323
1bc4ccff 6324EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6325EXPORT_SYMBOL_GPL(ata_timing_compute);
6326EXPORT_SYMBOL_GPL(ata_timing_merge);
6327
1da177e4
LT
6328#ifdef CONFIG_PCI
6329EXPORT_SYMBOL_GPL(pci_test_config_bits);
6330EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6331EXPORT_SYMBOL_GPL(ata_pci_init_one);
6332EXPORT_SYMBOL_GPL(ata_pci_remove_one);
500530f6
TH
6333EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6334EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6335EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6336EXPORT_SYMBOL_GPL(ata_pci_device_resume);
67951ade
AC
6337EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6338EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6339#endif /* CONFIG_PCI */
9b847548 6340
9b847548
JA
6341EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6342EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
ece1d636 6343
ece1d636 6344EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6345EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6346EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6347EXPORT_SYMBOL_GPL(ata_port_freeze);
6348EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6349EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6350EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6351EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6352EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6353EXPORT_SYMBOL_GPL(ata_irq_on);
6354EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6355EXPORT_SYMBOL_GPL(ata_irq_ack);
6356EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6357EXPORT_SYMBOL_GPL(ata_dev_try_classify);