pata_it821x: fix section mismatch warning
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
8bc3fc47 62#define DRV_VERSION "2.21" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
1e999736
AC
92static int ata_ignore_hpa = 0;
93module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
94MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
95
a8601e5f
AM
96static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
97module_param(ata_probe_timeout, int, 0444);
98MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
99
d7d0dad6
JG
100int libata_noacpi = 1;
101module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
102MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
103
1da177e4
LT
104MODULE_AUTHOR("Jeff Garzik");
105MODULE_DESCRIPTION("Library module for ATA devices");
106MODULE_LICENSE("GPL");
107MODULE_VERSION(DRV_VERSION);
108
0baab86b 109
1da177e4
LT
110/**
111 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
112 * @tf: Taskfile to convert
113 * @fis: Buffer into which data will output
114 * @pmp: Port multiplier port
115 *
116 * Converts a standard ATA taskfile to a Serial ATA
117 * FIS structure (Register - Host to Device).
118 *
119 * LOCKING:
120 * Inherited from caller.
121 */
122
057ace5e 123void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
124{
125 fis[0] = 0x27; /* Register - Host to Device FIS */
126 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
127 bit 7 indicates Command FIS */
128 fis[2] = tf->command;
129 fis[3] = tf->feature;
130
131 fis[4] = tf->lbal;
132 fis[5] = tf->lbam;
133 fis[6] = tf->lbah;
134 fis[7] = tf->device;
135
136 fis[8] = tf->hob_lbal;
137 fis[9] = tf->hob_lbam;
138 fis[10] = tf->hob_lbah;
139 fis[11] = tf->hob_feature;
140
141 fis[12] = tf->nsect;
142 fis[13] = tf->hob_nsect;
143 fis[14] = 0;
144 fis[15] = tf->ctl;
145
146 fis[16] = 0;
147 fis[17] = 0;
148 fis[18] = 0;
149 fis[19] = 0;
150}
151
152/**
153 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
154 * @fis: Buffer from which data will be input
155 * @tf: Taskfile to output
156 *
e12a1be6 157 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
158 *
159 * LOCKING:
160 * Inherited from caller.
161 */
162
057ace5e 163void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
164{
165 tf->command = fis[2]; /* status */
166 tf->feature = fis[3]; /* error */
167
168 tf->lbal = fis[4];
169 tf->lbam = fis[5];
170 tf->lbah = fis[6];
171 tf->device = fis[7];
172
173 tf->hob_lbal = fis[8];
174 tf->hob_lbam = fis[9];
175 tf->hob_lbah = fis[10];
176
177 tf->nsect = fis[12];
178 tf->hob_nsect = fis[13];
179}
180
8cbd6df1
AL
181static const u8 ata_rw_cmds[] = {
182 /* pio multi */
183 ATA_CMD_READ_MULTI,
184 ATA_CMD_WRITE_MULTI,
185 ATA_CMD_READ_MULTI_EXT,
186 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
187 0,
188 0,
189 0,
190 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
191 /* pio */
192 ATA_CMD_PIO_READ,
193 ATA_CMD_PIO_WRITE,
194 ATA_CMD_PIO_READ_EXT,
195 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
196 0,
197 0,
198 0,
199 0,
8cbd6df1
AL
200 /* dma */
201 ATA_CMD_READ,
202 ATA_CMD_WRITE,
203 ATA_CMD_READ_EXT,
9a3dccc4
TH
204 ATA_CMD_WRITE_EXT,
205 0,
206 0,
207 0,
208 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 209};
1da177e4
LT
210
211/**
8cbd6df1 212 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
213 * @tf: command to examine and configure
214 * @dev: device tf belongs to
1da177e4 215 *
2e9edbf8 216 * Examine the device configuration and tf->flags to calculate
8cbd6df1 217 * the proper read/write commands and protocol to use.
1da177e4
LT
218 *
219 * LOCKING:
220 * caller.
221 */
bd056d7e 222static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 223{
9a3dccc4 224 u8 cmd;
1da177e4 225
9a3dccc4 226 int index, fua, lba48, write;
2e9edbf8 227
9a3dccc4 228 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
229 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
230 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 231
8cbd6df1
AL
232 if (dev->flags & ATA_DFLAG_PIO) {
233 tf->protocol = ATA_PROT_PIO;
9a3dccc4 234 index = dev->multi_count ? 0 : 8;
bd056d7e 235 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
236 /* Unable to use DMA due to host limitation */
237 tf->protocol = ATA_PROT_PIO;
0565c26d 238 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
239 } else {
240 tf->protocol = ATA_PROT_DMA;
9a3dccc4 241 index = 16;
8cbd6df1 242 }
1da177e4 243
9a3dccc4
TH
244 cmd = ata_rw_cmds[index + fua + lba48 + write];
245 if (cmd) {
246 tf->command = cmd;
247 return 0;
248 }
249 return -1;
1da177e4
LT
250}
251
35b649fe
TH
252/**
253 * ata_tf_read_block - Read block address from ATA taskfile
254 * @tf: ATA taskfile of interest
255 * @dev: ATA device @tf belongs to
256 *
257 * LOCKING:
258 * None.
259 *
260 * Read block address from @tf. This function can handle all
261 * three address formats - LBA, LBA48 and CHS. tf->protocol and
262 * flags select the address format to use.
263 *
264 * RETURNS:
265 * Block address read from @tf.
266 */
267u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
268{
269 u64 block = 0;
270
271 if (tf->flags & ATA_TFLAG_LBA) {
272 if (tf->flags & ATA_TFLAG_LBA48) {
273 block |= (u64)tf->hob_lbah << 40;
274 block |= (u64)tf->hob_lbam << 32;
275 block |= tf->hob_lbal << 24;
276 } else
277 block |= (tf->device & 0xf) << 24;
278
279 block |= tf->lbah << 16;
280 block |= tf->lbam << 8;
281 block |= tf->lbal;
282 } else {
283 u32 cyl, head, sect;
284
285 cyl = tf->lbam | (tf->lbah << 8);
286 head = tf->device & 0xf;
287 sect = tf->lbal;
288
289 block = (cyl * dev->heads + head) * dev->sectors + sect;
290 }
291
292 return block;
293}
294
bd056d7e
TH
295/**
296 * ata_build_rw_tf - Build ATA taskfile for given read/write request
297 * @tf: Target ATA taskfile
298 * @dev: ATA device @tf belongs to
299 * @block: Block address
300 * @n_block: Number of blocks
301 * @tf_flags: RW/FUA etc...
302 * @tag: tag
303 *
304 * LOCKING:
305 * None.
306 *
307 * Build ATA taskfile @tf for read/write request described by
308 * @block, @n_block, @tf_flags and @tag on @dev.
309 *
310 * RETURNS:
311 *
312 * 0 on success, -ERANGE if the request is too large for @dev,
313 * -EINVAL if the request is invalid.
314 */
315int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
316 u64 block, u32 n_block, unsigned int tf_flags,
317 unsigned int tag)
318{
319 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
320 tf->flags |= tf_flags;
321
6d1245bf 322 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
323 /* yay, NCQ */
324 if (!lba_48_ok(block, n_block))
325 return -ERANGE;
326
327 tf->protocol = ATA_PROT_NCQ;
328 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
329
330 if (tf->flags & ATA_TFLAG_WRITE)
331 tf->command = ATA_CMD_FPDMA_WRITE;
332 else
333 tf->command = ATA_CMD_FPDMA_READ;
334
335 tf->nsect = tag << 3;
336 tf->hob_feature = (n_block >> 8) & 0xff;
337 tf->feature = n_block & 0xff;
338
339 tf->hob_lbah = (block >> 40) & 0xff;
340 tf->hob_lbam = (block >> 32) & 0xff;
341 tf->hob_lbal = (block >> 24) & 0xff;
342 tf->lbah = (block >> 16) & 0xff;
343 tf->lbam = (block >> 8) & 0xff;
344 tf->lbal = block & 0xff;
345
346 tf->device = 1 << 6;
347 if (tf->flags & ATA_TFLAG_FUA)
348 tf->device |= 1 << 7;
349 } else if (dev->flags & ATA_DFLAG_LBA) {
350 tf->flags |= ATA_TFLAG_LBA;
351
352 if (lba_28_ok(block, n_block)) {
353 /* use LBA28 */
354 tf->device |= (block >> 24) & 0xf;
355 } else if (lba_48_ok(block, n_block)) {
356 if (!(dev->flags & ATA_DFLAG_LBA48))
357 return -ERANGE;
358
359 /* use LBA48 */
360 tf->flags |= ATA_TFLAG_LBA48;
361
362 tf->hob_nsect = (n_block >> 8) & 0xff;
363
364 tf->hob_lbah = (block >> 40) & 0xff;
365 tf->hob_lbam = (block >> 32) & 0xff;
366 tf->hob_lbal = (block >> 24) & 0xff;
367 } else
368 /* request too large even for LBA48 */
369 return -ERANGE;
370
371 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
372 return -EINVAL;
373
374 tf->nsect = n_block & 0xff;
375
376 tf->lbah = (block >> 16) & 0xff;
377 tf->lbam = (block >> 8) & 0xff;
378 tf->lbal = block & 0xff;
379
380 tf->device |= ATA_LBA;
381 } else {
382 /* CHS */
383 u32 sect, head, cyl, track;
384
385 /* The request -may- be too large for CHS addressing. */
386 if (!lba_28_ok(block, n_block))
387 return -ERANGE;
388
389 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
390 return -EINVAL;
391
392 /* Convert LBA to CHS */
393 track = (u32)block / dev->sectors;
394 cyl = track / dev->heads;
395 head = track % dev->heads;
396 sect = (u32)block % dev->sectors + 1;
397
398 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
399 (u32)block, track, cyl, head, sect);
400
401 /* Check whether the converted CHS can fit.
402 Cylinder: 0-65535
403 Head: 0-15
404 Sector: 1-255*/
405 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
406 return -ERANGE;
407
408 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
409 tf->lbal = sect;
410 tf->lbam = cyl;
411 tf->lbah = cyl >> 8;
412 tf->device |= head;
413 }
414
415 return 0;
416}
417
cb95d562
TH
418/**
419 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
420 * @pio_mask: pio_mask
421 * @mwdma_mask: mwdma_mask
422 * @udma_mask: udma_mask
423 *
424 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
425 * unsigned int xfer_mask.
426 *
427 * LOCKING:
428 * None.
429 *
430 * RETURNS:
431 * Packed xfer_mask.
432 */
433static unsigned int ata_pack_xfermask(unsigned int pio_mask,
434 unsigned int mwdma_mask,
435 unsigned int udma_mask)
436{
437 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
438 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
439 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
440}
441
c0489e4e
TH
442/**
443 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
444 * @xfer_mask: xfer_mask to unpack
445 * @pio_mask: resulting pio_mask
446 * @mwdma_mask: resulting mwdma_mask
447 * @udma_mask: resulting udma_mask
448 *
449 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
450 * Any NULL distination masks will be ignored.
451 */
452static void ata_unpack_xfermask(unsigned int xfer_mask,
453 unsigned int *pio_mask,
454 unsigned int *mwdma_mask,
455 unsigned int *udma_mask)
456{
457 if (pio_mask)
458 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
459 if (mwdma_mask)
460 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
461 if (udma_mask)
462 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
463}
464
cb95d562 465static const struct ata_xfer_ent {
be9a50c8 466 int shift, bits;
cb95d562
TH
467 u8 base;
468} ata_xfer_tbl[] = {
469 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
470 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
471 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
472 { -1, },
473};
474
475/**
476 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
477 * @xfer_mask: xfer_mask of interest
478 *
479 * Return matching XFER_* value for @xfer_mask. Only the highest
480 * bit of @xfer_mask is considered.
481 *
482 * LOCKING:
483 * None.
484 *
485 * RETURNS:
486 * Matching XFER_* value, 0 if no match found.
487 */
488static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
489{
490 int highbit = fls(xfer_mask) - 1;
491 const struct ata_xfer_ent *ent;
492
493 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
494 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
495 return ent->base + highbit - ent->shift;
496 return 0;
497}
498
499/**
500 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
501 * @xfer_mode: XFER_* of interest
502 *
503 * Return matching xfer_mask for @xfer_mode.
504 *
505 * LOCKING:
506 * None.
507 *
508 * RETURNS:
509 * Matching xfer_mask, 0 if no match found.
510 */
511static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
512{
513 const struct ata_xfer_ent *ent;
514
515 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
517 return 1 << (ent->shift + xfer_mode - ent->base);
518 return 0;
519}
520
521/**
522 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
523 * @xfer_mode: XFER_* of interest
524 *
525 * Return matching xfer_shift for @xfer_mode.
526 *
527 * LOCKING:
528 * None.
529 *
530 * RETURNS:
531 * Matching xfer_shift, -1 if no match found.
532 */
533static int ata_xfer_mode2shift(unsigned int xfer_mode)
534{
535 const struct ata_xfer_ent *ent;
536
537 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
539 return ent->shift;
540 return -1;
541}
542
1da177e4 543/**
1da7b0d0
TH
544 * ata_mode_string - convert xfer_mask to string
545 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
546 *
547 * Determine string which represents the highest speed
1da7b0d0 548 * (highest bit in @modemask).
1da177e4
LT
549 *
550 * LOCKING:
551 * None.
552 *
553 * RETURNS:
554 * Constant C string representing highest speed listed in
1da7b0d0 555 * @mode_mask, or the constant C string "<n/a>".
1da177e4 556 */
1da7b0d0 557static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 558{
75f554bc
TH
559 static const char * const xfer_mode_str[] = {
560 "PIO0",
561 "PIO1",
562 "PIO2",
563 "PIO3",
564 "PIO4",
b352e57d
AC
565 "PIO5",
566 "PIO6",
75f554bc
TH
567 "MWDMA0",
568 "MWDMA1",
569 "MWDMA2",
b352e57d
AC
570 "MWDMA3",
571 "MWDMA4",
75f554bc
TH
572 "UDMA/16",
573 "UDMA/25",
574 "UDMA/33",
575 "UDMA/44",
576 "UDMA/66",
577 "UDMA/100",
578 "UDMA/133",
579 "UDMA7",
580 };
1da7b0d0 581 int highbit;
1da177e4 582
1da7b0d0
TH
583 highbit = fls(xfer_mask) - 1;
584 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
585 return xfer_mode_str[highbit];
1da177e4 586 return "<n/a>";
1da177e4
LT
587}
588
4c360c81
TH
589static const char *sata_spd_string(unsigned int spd)
590{
591 static const char * const spd_str[] = {
592 "1.5 Gbps",
593 "3.0 Gbps",
594 };
595
596 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
597 return "<unknown>";
598 return spd_str[spd - 1];
599}
600
3373efd8 601void ata_dev_disable(struct ata_device *dev)
0b8efb0a 602{
0dd4b21f 603 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 604 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
605 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
606 ATA_DNXFER_QUIET);
0b8efb0a
TH
607 dev->class++;
608 }
609}
610
1da177e4 611/**
0d5ff566 612 * ata_devchk - PATA device presence detection
1da177e4
LT
613 * @ap: ATA channel to examine
614 * @device: Device to examine (starting at zero)
615 *
616 * This technique was originally described in
617 * Hale Landis's ATADRVR (www.ata-atapi.com), and
618 * later found its way into the ATA/ATAPI spec.
619 *
620 * Write a pattern to the ATA shadow registers,
621 * and if a device is present, it will respond by
622 * correctly storing and echoing back the
623 * ATA shadow register contents.
624 *
625 * LOCKING:
626 * caller.
627 */
628
0d5ff566 629static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
630{
631 struct ata_ioports *ioaddr = &ap->ioaddr;
632 u8 nsect, lbal;
633
634 ap->ops->dev_select(ap, device);
635
0d5ff566
TH
636 iowrite8(0x55, ioaddr->nsect_addr);
637 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 638
0d5ff566
TH
639 iowrite8(0xaa, ioaddr->nsect_addr);
640 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 641
0d5ff566
TH
642 iowrite8(0x55, ioaddr->nsect_addr);
643 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 644
0d5ff566
TH
645 nsect = ioread8(ioaddr->nsect_addr);
646 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
647
648 if ((nsect == 0x55) && (lbal == 0xaa))
649 return 1; /* we found a device */
650
651 return 0; /* nothing found */
652}
653
1da177e4
LT
654/**
655 * ata_dev_classify - determine device type based on ATA-spec signature
656 * @tf: ATA taskfile register set for device to be identified
657 *
658 * Determine from taskfile register contents whether a device is
659 * ATA or ATAPI, as per "Signature and persistence" section
660 * of ATA/PI spec (volume 1, sect 5.14).
661 *
662 * LOCKING:
663 * None.
664 *
665 * RETURNS:
666 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
667 * the event of failure.
668 */
669
057ace5e 670unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
671{
672 /* Apple's open source Darwin code hints that some devices only
673 * put a proper signature into the LBA mid/high registers,
674 * So, we only check those. It's sufficient for uniqueness.
675 */
676
677 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
678 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
679 DPRINTK("found ATA device by sig\n");
680 return ATA_DEV_ATA;
681 }
682
683 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
684 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
685 DPRINTK("found ATAPI device by sig\n");
686 return ATA_DEV_ATAPI;
687 }
688
689 DPRINTK("unknown device\n");
690 return ATA_DEV_UNKNOWN;
691}
692
693/**
694 * ata_dev_try_classify - Parse returned ATA device signature
695 * @ap: ATA channel to examine
696 * @device: Device to examine (starting at zero)
b4dc7623 697 * @r_err: Value of error register on completion
1da177e4
LT
698 *
699 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
700 * an ATA/ATAPI-defined set of values is placed in the ATA
701 * shadow registers, indicating the results of device detection
702 * and diagnostics.
703 *
704 * Select the ATA device, and read the values from the ATA shadow
705 * registers. Then parse according to the Error register value,
706 * and the spec-defined values examined by ata_dev_classify().
707 *
708 * LOCKING:
709 * caller.
b4dc7623
TH
710 *
711 * RETURNS:
712 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
713 */
714
a619f981 715unsigned int
b4dc7623 716ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 717{
1da177e4
LT
718 struct ata_taskfile tf;
719 unsigned int class;
720 u8 err;
721
722 ap->ops->dev_select(ap, device);
723
724 memset(&tf, 0, sizeof(tf));
725
1da177e4 726 ap->ops->tf_read(ap, &tf);
0169e284 727 err = tf.feature;
b4dc7623
TH
728 if (r_err)
729 *r_err = err;
1da177e4 730
93590859
AC
731 /* see if device passed diags: if master then continue and warn later */
732 if (err == 0 && device == 0)
733 /* diagnostic fail : do nothing _YET_ */
734 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
735 else if (err == 1)
1da177e4
LT
736 /* do nothing */ ;
737 else if ((device == 0) && (err == 0x81))
738 /* do nothing */ ;
739 else
b4dc7623 740 return ATA_DEV_NONE;
1da177e4 741
b4dc7623 742 /* determine if device is ATA or ATAPI */
1da177e4 743 class = ata_dev_classify(&tf);
b4dc7623 744
1da177e4 745 if (class == ATA_DEV_UNKNOWN)
b4dc7623 746 return ATA_DEV_NONE;
1da177e4 747 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
748 return ATA_DEV_NONE;
749 return class;
1da177e4
LT
750}
751
752/**
6a62a04d 753 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
754 * @id: IDENTIFY DEVICE results we will examine
755 * @s: string into which data is output
756 * @ofs: offset into identify device page
757 * @len: length of string to return. must be an even number.
758 *
759 * The strings in the IDENTIFY DEVICE page are broken up into
760 * 16-bit chunks. Run through the string, and output each
761 * 8-bit chunk linearly, regardless of platform.
762 *
763 * LOCKING:
764 * caller.
765 */
766
6a62a04d
TH
767void ata_id_string(const u16 *id, unsigned char *s,
768 unsigned int ofs, unsigned int len)
1da177e4
LT
769{
770 unsigned int c;
771
772 while (len > 0) {
773 c = id[ofs] >> 8;
774 *s = c;
775 s++;
776
777 c = id[ofs] & 0xff;
778 *s = c;
779 s++;
780
781 ofs++;
782 len -= 2;
783 }
784}
785
0e949ff3 786/**
6a62a04d 787 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
788 * @id: IDENTIFY DEVICE results we will examine
789 * @s: string into which data is output
790 * @ofs: offset into identify device page
791 * @len: length of string to return. must be an odd number.
792 *
6a62a04d 793 * This function is identical to ata_id_string except that it
0e949ff3
TH
794 * trims trailing spaces and terminates the resulting string with
795 * null. @len must be actual maximum length (even number) + 1.
796 *
797 * LOCKING:
798 * caller.
799 */
6a62a04d
TH
800void ata_id_c_string(const u16 *id, unsigned char *s,
801 unsigned int ofs, unsigned int len)
0e949ff3
TH
802{
803 unsigned char *p;
804
805 WARN_ON(!(len & 1));
806
6a62a04d 807 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
808
809 p = s + strnlen(s, len - 1);
810 while (p > s && p[-1] == ' ')
811 p--;
812 *p = '\0';
813}
0baab86b 814
1e999736
AC
815static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
816{
817 u64 sectors = 0;
818
819 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
820 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
821 sectors |= (tf->hob_lbal & 0xff) << 24;
822 sectors |= (tf->lbah & 0xff) << 16;
823 sectors |= (tf->lbam & 0xff) << 8;
824 sectors |= (tf->lbal & 0xff);
825
826 return ++sectors;
827}
828
829static u64 ata_tf_to_lba(struct ata_taskfile *tf)
830{
831 u64 sectors = 0;
832
833 sectors |= (tf->device & 0x0f) << 24;
834 sectors |= (tf->lbah & 0xff) << 16;
835 sectors |= (tf->lbam & 0xff) << 8;
836 sectors |= (tf->lbal & 0xff);
837
838 return ++sectors;
839}
840
841/**
842 * ata_read_native_max_address_ext - LBA48 native max query
843 * @dev: Device to query
844 *
845 * Perform an LBA48 size query upon the device in question. Return the
846 * actual LBA48 size or zero if the command fails.
847 */
848
849static u64 ata_read_native_max_address_ext(struct ata_device *dev)
850{
851 unsigned int err;
852 struct ata_taskfile tf;
853
854 ata_tf_init(dev, &tf);
855
856 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
857 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
858 tf.protocol |= ATA_PROT_NODATA;
859 tf.device |= 0x40;
860
861 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
862 if (err)
863 return 0;
864
865 return ata_tf_to_lba48(&tf);
866}
867
868/**
869 * ata_read_native_max_address - LBA28 native max query
870 * @dev: Device to query
871 *
872 * Performa an LBA28 size query upon the device in question. Return the
873 * actual LBA28 size or zero if the command fails.
874 */
875
876static u64 ata_read_native_max_address(struct ata_device *dev)
877{
878 unsigned int err;
879 struct ata_taskfile tf;
880
881 ata_tf_init(dev, &tf);
882
883 tf.command = ATA_CMD_READ_NATIVE_MAX;
884 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
885 tf.protocol |= ATA_PROT_NODATA;
886 tf.device |= 0x40;
887
888 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
889 if (err)
890 return 0;
891
892 return ata_tf_to_lba(&tf);
893}
894
895/**
896 * ata_set_native_max_address_ext - LBA48 native max set
897 * @dev: Device to query
6b38d1d1 898 * @new_sectors: new max sectors value to set for the device
1e999736
AC
899 *
900 * Perform an LBA48 size set max upon the device in question. Return the
901 * actual LBA48 size or zero if the command fails.
902 */
903
904static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
905{
906 unsigned int err;
907 struct ata_taskfile tf;
908
909 new_sectors--;
910
911 ata_tf_init(dev, &tf);
912
913 tf.command = ATA_CMD_SET_MAX_EXT;
914 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
915 tf.protocol |= ATA_PROT_NODATA;
916 tf.device |= 0x40;
917
918 tf.lbal = (new_sectors >> 0) & 0xff;
919 tf.lbam = (new_sectors >> 8) & 0xff;
920 tf.lbah = (new_sectors >> 16) & 0xff;
921
922 tf.hob_lbal = (new_sectors >> 24) & 0xff;
923 tf.hob_lbam = (new_sectors >> 32) & 0xff;
924 tf.hob_lbah = (new_sectors >> 40) & 0xff;
925
926 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
927 if (err)
928 return 0;
929
930 return ata_tf_to_lba48(&tf);
931}
932
933/**
934 * ata_set_native_max_address - LBA28 native max set
935 * @dev: Device to query
6b38d1d1 936 * @new_sectors: new max sectors value to set for the device
1e999736
AC
937 *
938 * Perform an LBA28 size set max upon the device in question. Return the
939 * actual LBA28 size or zero if the command fails.
940 */
941
942static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
943{
944 unsigned int err;
945 struct ata_taskfile tf;
946
947 new_sectors--;
948
949 ata_tf_init(dev, &tf);
950
951 tf.command = ATA_CMD_SET_MAX;
952 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
953 tf.protocol |= ATA_PROT_NODATA;
954
955 tf.lbal = (new_sectors >> 0) & 0xff;
956 tf.lbam = (new_sectors >> 8) & 0xff;
957 tf.lbah = (new_sectors >> 16) & 0xff;
958 tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
959
960 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
961 if (err)
962 return 0;
963
964 return ata_tf_to_lba(&tf);
965}
966
967/**
968 * ata_hpa_resize - Resize a device with an HPA set
969 * @dev: Device to resize
970 *
971 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
972 * it if required to the full size of the media. The caller must check
973 * the drive has the HPA feature set enabled.
974 */
975
976static u64 ata_hpa_resize(struct ata_device *dev)
977{
978 u64 sectors = dev->n_sectors;
979 u64 hpa_sectors;
a617c09f 980
1e999736
AC
981 if (ata_id_has_lba48(dev->id))
982 hpa_sectors = ata_read_native_max_address_ext(dev);
983 else
984 hpa_sectors = ata_read_native_max_address(dev);
985
1e999736
AC
986 if (hpa_sectors > sectors) {
987 ata_dev_printk(dev, KERN_INFO,
988 "Host Protected Area detected:\n"
989 "\tcurrent size: %lld sectors\n"
990 "\tnative size: %lld sectors\n",
bd1d5ec6 991 (long long)sectors, (long long)hpa_sectors);
1e999736
AC
992
993 if (ata_ignore_hpa) {
994 if (ata_id_has_lba48(dev->id))
995 hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
996 else
bd1d5ec6
AM
997 hpa_sectors = ata_set_native_max_address(dev,
998 hpa_sectors);
1e999736
AC
999
1000 if (hpa_sectors) {
bd1d5ec6
AM
1001 ata_dev_printk(dev, KERN_INFO, "native size "
1002 "increased to %lld sectors\n",
1003 (long long)hpa_sectors);
1e999736
AC
1004 return hpa_sectors;
1005 }
1006 }
37301a55
TH
1007 } else if (hpa_sectors < sectors)
1008 ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) "
1009 "is smaller than sectors (%lld)\n", __FUNCTION__,
1010 (long long)hpa_sectors, (long long)sectors);
1011
1e999736
AC
1012 return sectors;
1013}
1014
2940740b
TH
1015static u64 ata_id_n_sectors(const u16 *id)
1016{
1017 if (ata_id_has_lba(id)) {
1018 if (ata_id_has_lba48(id))
1019 return ata_id_u64(id, 100);
1020 else
1021 return ata_id_u32(id, 60);
1022 } else {
1023 if (ata_id_current_chs_valid(id))
1024 return ata_id_u32(id, 57);
1025 else
1026 return id[1] * id[3] * id[6];
1027 }
1028}
1029
10305f0f
A
1030/**
1031 * ata_id_to_dma_mode - Identify DMA mode from id block
1032 * @dev: device to identify
cc261267 1033 * @unknown: mode to assume if we cannot tell
10305f0f
A
1034 *
1035 * Set up the timing values for the device based upon the identify
1036 * reported values for the DMA mode. This function is used by drivers
1037 * which rely upon firmware configured modes, but wish to report the
1038 * mode correctly when possible.
1039 *
1040 * In addition we emit similarly formatted messages to the default
1041 * ata_dev_set_mode handler, in order to provide consistency of
1042 * presentation.
1043 */
1044
1045void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1046{
1047 unsigned int mask;
1048 u8 mode;
1049
1050 /* Pack the DMA modes */
1051 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1052 if (dev->id[53] & 0x04)
1053 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1054
1055 /* Select the mode in use */
1056 mode = ata_xfer_mask2mode(mask);
1057
1058 if (mode != 0) {
1059 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1060 ata_mode_string(mask));
1061 } else {
1062 /* SWDMA perhaps ? */
1063 mode = unknown;
1064 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1065 }
1066
1067 /* Configure the device reporting */
1068 dev->xfer_mode = mode;
1069 dev->xfer_shift = ata_xfer_mode2shift(mode);
1070}
1071
0baab86b
EF
1072/**
1073 * ata_noop_dev_select - Select device 0/1 on ATA bus
1074 * @ap: ATA channel to manipulate
1075 * @device: ATA device (numbered from zero) to select
1076 *
1077 * This function performs no actual function.
1078 *
1079 * May be used as the dev_select() entry in ata_port_operations.
1080 *
1081 * LOCKING:
1082 * caller.
1083 */
1da177e4
LT
1084void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1085{
1086}
1087
0baab86b 1088
1da177e4
LT
1089/**
1090 * ata_std_dev_select - Select device 0/1 on ATA bus
1091 * @ap: ATA channel to manipulate
1092 * @device: ATA device (numbered from zero) to select
1093 *
1094 * Use the method defined in the ATA specification to
1095 * make either device 0, or device 1, active on the
0baab86b
EF
1096 * ATA channel. Works with both PIO and MMIO.
1097 *
1098 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1099 *
1100 * LOCKING:
1101 * caller.
1102 */
1103
1104void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1105{
1106 u8 tmp;
1107
1108 if (device == 0)
1109 tmp = ATA_DEVICE_OBS;
1110 else
1111 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1112
0d5ff566 1113 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1114 ata_pause(ap); /* needed; also flushes, for mmio */
1115}
1116
1117/**
1118 * ata_dev_select - Select device 0/1 on ATA bus
1119 * @ap: ATA channel to manipulate
1120 * @device: ATA device (numbered from zero) to select
1121 * @wait: non-zero to wait for Status register BSY bit to clear
1122 * @can_sleep: non-zero if context allows sleeping
1123 *
1124 * Use the method defined in the ATA specification to
1125 * make either device 0, or device 1, active on the
1126 * ATA channel.
1127 *
1128 * This is a high-level version of ata_std_dev_select(),
1129 * which additionally provides the services of inserting
1130 * the proper pauses and status polling, where needed.
1131 *
1132 * LOCKING:
1133 * caller.
1134 */
1135
1136void ata_dev_select(struct ata_port *ap, unsigned int device,
1137 unsigned int wait, unsigned int can_sleep)
1138{
88574551 1139 if (ata_msg_probe(ap))
44877b4e
TH
1140 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1141 "device %u, wait %u\n", device, wait);
1da177e4
LT
1142
1143 if (wait)
1144 ata_wait_idle(ap);
1145
1146 ap->ops->dev_select(ap, device);
1147
1148 if (wait) {
1149 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1150 msleep(150);
1151 ata_wait_idle(ap);
1152 }
1153}
1154
1155/**
1156 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1157 * @id: IDENTIFY DEVICE page to dump
1da177e4 1158 *
0bd3300a
TH
1159 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1160 * page.
1da177e4
LT
1161 *
1162 * LOCKING:
1163 * caller.
1164 */
1165
0bd3300a 1166static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1167{
1168 DPRINTK("49==0x%04x "
1169 "53==0x%04x "
1170 "63==0x%04x "
1171 "64==0x%04x "
1172 "75==0x%04x \n",
0bd3300a
TH
1173 id[49],
1174 id[53],
1175 id[63],
1176 id[64],
1177 id[75]);
1da177e4
LT
1178 DPRINTK("80==0x%04x "
1179 "81==0x%04x "
1180 "82==0x%04x "
1181 "83==0x%04x "
1182 "84==0x%04x \n",
0bd3300a
TH
1183 id[80],
1184 id[81],
1185 id[82],
1186 id[83],
1187 id[84]);
1da177e4
LT
1188 DPRINTK("88==0x%04x "
1189 "93==0x%04x\n",
0bd3300a
TH
1190 id[88],
1191 id[93]);
1da177e4
LT
1192}
1193
cb95d562
TH
1194/**
1195 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1196 * @id: IDENTIFY data to compute xfer mask from
1197 *
1198 * Compute the xfermask for this device. This is not as trivial
1199 * as it seems if we must consider early devices correctly.
1200 *
1201 * FIXME: pre IDE drive timing (do we care ?).
1202 *
1203 * LOCKING:
1204 * None.
1205 *
1206 * RETURNS:
1207 * Computed xfermask
1208 */
1209static unsigned int ata_id_xfermask(const u16 *id)
1210{
1211 unsigned int pio_mask, mwdma_mask, udma_mask;
1212
1213 /* Usual case. Word 53 indicates word 64 is valid */
1214 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1215 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1216 pio_mask <<= 3;
1217 pio_mask |= 0x7;
1218 } else {
1219 /* If word 64 isn't valid then Word 51 high byte holds
1220 * the PIO timing number for the maximum. Turn it into
1221 * a mask.
1222 */
7a0f1c8a 1223 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1224 if (mode < 5) /* Valid PIO range */
1225 pio_mask = (2 << mode) - 1;
1226 else
1227 pio_mask = 1;
cb95d562
TH
1228
1229 /* But wait.. there's more. Design your standards by
1230 * committee and you too can get a free iordy field to
1231 * process. However its the speeds not the modes that
1232 * are supported... Note drivers using the timing API
1233 * will get this right anyway
1234 */
1235 }
1236
1237 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1238
b352e57d
AC
1239 if (ata_id_is_cfa(id)) {
1240 /*
1241 * Process compact flash extended modes
1242 */
1243 int pio = id[163] & 0x7;
1244 int dma = (id[163] >> 3) & 7;
1245
1246 if (pio)
1247 pio_mask |= (1 << 5);
1248 if (pio > 1)
1249 pio_mask |= (1 << 6);
1250 if (dma)
1251 mwdma_mask |= (1 << 3);
1252 if (dma > 1)
1253 mwdma_mask |= (1 << 4);
1254 }
1255
fb21f0d0
TH
1256 udma_mask = 0;
1257 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1258 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1259
1260 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1261}
1262
86e45b6b
TH
1263/**
1264 * ata_port_queue_task - Queue port_task
1265 * @ap: The ata_port to queue port_task for
e2a7f77a 1266 * @fn: workqueue function to be scheduled
65f27f38 1267 * @data: data for @fn to use
e2a7f77a 1268 * @delay: delay time for workqueue function
86e45b6b
TH
1269 *
1270 * Schedule @fn(@data) for execution after @delay jiffies using
1271 * port_task. There is one port_task per port and it's the
1272 * user(low level driver)'s responsibility to make sure that only
1273 * one task is active at any given time.
1274 *
1275 * libata core layer takes care of synchronization between
1276 * port_task and EH. ata_port_queue_task() may be ignored for EH
1277 * synchronization.
1278 *
1279 * LOCKING:
1280 * Inherited from caller.
1281 */
65f27f38 1282void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1283 unsigned long delay)
1284{
1285 int rc;
1286
b51e9e5d 1287 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1288 return;
1289
65f27f38
DH
1290 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1291 ap->port_task_data = data;
86e45b6b 1292
52bad64d 1293 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1294
1295 /* rc == 0 means that another user is using port task */
1296 WARN_ON(rc == 0);
1297}
1298
1299/**
1300 * ata_port_flush_task - Flush port_task
1301 * @ap: The ata_port to flush port_task for
1302 *
1303 * After this function completes, port_task is guranteed not to
1304 * be running or scheduled.
1305 *
1306 * LOCKING:
1307 * Kernel thread context (may sleep)
1308 */
1309void ata_port_flush_task(struct ata_port *ap)
1310{
1311 unsigned long flags;
1312
1313 DPRINTK("ENTER\n");
1314
ba6a1308 1315 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1316 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1317 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1318
1319 DPRINTK("flush #1\n");
28e53bdd 1320 cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
86e45b6b
TH
1321
1322 /*
1323 * At this point, if a task is running, it's guaranteed to see
1324 * the FLUSH flag; thus, it will never queue pio tasks again.
1325 * Cancel and flush.
1326 */
1327 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1328 if (ata_msg_ctl(ap))
88574551
TH
1329 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1330 __FUNCTION__);
28e53bdd 1331 cancel_work_sync(&ap->port_task.work);
86e45b6b
TH
1332 }
1333
ba6a1308 1334 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1335 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1336 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1337
0dd4b21f
BP
1338 if (ata_msg_ctl(ap))
1339 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1340}
1341
7102d230 1342static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1343{
77853bf2 1344 struct completion *waiting = qc->private_data;
a2a7a662 1345
a2a7a662 1346 complete(waiting);
a2a7a662
TH
1347}
1348
1349/**
2432697b 1350 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1351 * @dev: Device to which the command is sent
1352 * @tf: Taskfile registers for the command and the result
d69cf37d 1353 * @cdb: CDB for packet command
a2a7a662 1354 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1355 * @sg: sg list for the data buffer of the command
1356 * @n_elem: Number of sg entries
a2a7a662
TH
1357 *
1358 * Executes libata internal command with timeout. @tf contains
1359 * command on entry and result on return. Timeout and error
1360 * conditions are reported via return value. No recovery action
1361 * is taken after a command times out. It's caller's duty to
1362 * clean up after timeout.
1363 *
1364 * LOCKING:
1365 * None. Should be called with kernel context, might sleep.
551e8889
TH
1366 *
1367 * RETURNS:
1368 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1369 */
2432697b
TH
1370unsigned ata_exec_internal_sg(struct ata_device *dev,
1371 struct ata_taskfile *tf, const u8 *cdb,
1372 int dma_dir, struct scatterlist *sg,
1373 unsigned int n_elem)
a2a7a662 1374{
3373efd8 1375 struct ata_port *ap = dev->ap;
a2a7a662
TH
1376 u8 command = tf->command;
1377 struct ata_queued_cmd *qc;
2ab7db1f 1378 unsigned int tag, preempted_tag;
dedaf2b0 1379 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1380 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1381 unsigned long flags;
77853bf2 1382 unsigned int err_mask;
d95a717f 1383 int rc;
a2a7a662 1384
ba6a1308 1385 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1386
e3180499 1387 /* no internal command while frozen */
b51e9e5d 1388 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1389 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1390 return AC_ERR_SYSTEM;
1391 }
1392
2ab7db1f 1393 /* initialize internal qc */
a2a7a662 1394
2ab7db1f
TH
1395 /* XXX: Tag 0 is used for drivers with legacy EH as some
1396 * drivers choke if any other tag is given. This breaks
1397 * ata_tag_internal() test for those drivers. Don't use new
1398 * EH stuff without converting to it.
1399 */
1400 if (ap->ops->error_handler)
1401 tag = ATA_TAG_INTERNAL;
1402 else
1403 tag = 0;
1404
6cec4a39 1405 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1406 BUG();
f69499f4 1407 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1408
1409 qc->tag = tag;
1410 qc->scsicmd = NULL;
1411 qc->ap = ap;
1412 qc->dev = dev;
1413 ata_qc_reinit(qc);
1414
1415 preempted_tag = ap->active_tag;
dedaf2b0
TH
1416 preempted_sactive = ap->sactive;
1417 preempted_qc_active = ap->qc_active;
2ab7db1f 1418 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1419 ap->sactive = 0;
1420 ap->qc_active = 0;
2ab7db1f
TH
1421
1422 /* prepare & issue qc */
a2a7a662 1423 qc->tf = *tf;
d69cf37d
TH
1424 if (cdb)
1425 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1426 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1427 qc->dma_dir = dma_dir;
1428 if (dma_dir != DMA_NONE) {
2432697b
TH
1429 unsigned int i, buflen = 0;
1430
1431 for (i = 0; i < n_elem; i++)
1432 buflen += sg[i].length;
1433
1434 ata_sg_init(qc, sg, n_elem);
49c80429 1435 qc->nbytes = buflen;
a2a7a662
TH
1436 }
1437
77853bf2 1438 qc->private_data = &wait;
a2a7a662
TH
1439 qc->complete_fn = ata_qc_complete_internal;
1440
8e0e694a 1441 ata_qc_issue(qc);
a2a7a662 1442
ba6a1308 1443 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1444
a8601e5f 1445 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1446
1447 ata_port_flush_task(ap);
41ade50c 1448
d95a717f 1449 if (!rc) {
ba6a1308 1450 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1451
1452 /* We're racing with irq here. If we lose, the
1453 * following test prevents us from completing the qc
d95a717f
TH
1454 * twice. If we win, the port is frozen and will be
1455 * cleaned up by ->post_internal_cmd().
a2a7a662 1456 */
77853bf2 1457 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1458 qc->err_mask |= AC_ERR_TIMEOUT;
1459
1460 if (ap->ops->error_handler)
1461 ata_port_freeze(ap);
1462 else
1463 ata_qc_complete(qc);
f15a1daf 1464
0dd4b21f
BP
1465 if (ata_msg_warn(ap))
1466 ata_dev_printk(dev, KERN_WARNING,
88574551 1467 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1468 }
1469
ba6a1308 1470 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1471 }
1472
d95a717f
TH
1473 /* do post_internal_cmd */
1474 if (ap->ops->post_internal_cmd)
1475 ap->ops->post_internal_cmd(qc);
1476
a51d644a
TH
1477 /* perform minimal error analysis */
1478 if (qc->flags & ATA_QCFLAG_FAILED) {
1479 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1480 qc->err_mask |= AC_ERR_DEV;
1481
1482 if (!qc->err_mask)
1483 qc->err_mask |= AC_ERR_OTHER;
1484
1485 if (qc->err_mask & ~AC_ERR_OTHER)
1486 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1487 }
1488
15869303 1489 /* finish up */
ba6a1308 1490 spin_lock_irqsave(ap->lock, flags);
15869303 1491
e61e0672 1492 *tf = qc->result_tf;
77853bf2
TH
1493 err_mask = qc->err_mask;
1494
1495 ata_qc_free(qc);
2ab7db1f 1496 ap->active_tag = preempted_tag;
dedaf2b0
TH
1497 ap->sactive = preempted_sactive;
1498 ap->qc_active = preempted_qc_active;
77853bf2 1499
1f7dd3e9
TH
1500 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1501 * Until those drivers are fixed, we detect the condition
1502 * here, fail the command with AC_ERR_SYSTEM and reenable the
1503 * port.
1504 *
1505 * Note that this doesn't change any behavior as internal
1506 * command failure results in disabling the device in the
1507 * higher layer for LLDDs without new reset/EH callbacks.
1508 *
1509 * Kill the following code as soon as those drivers are fixed.
1510 */
198e0fed 1511 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1512 err_mask |= AC_ERR_SYSTEM;
1513 ata_port_probe(ap);
1514 }
1515
ba6a1308 1516 spin_unlock_irqrestore(ap->lock, flags);
15869303 1517
77853bf2 1518 return err_mask;
a2a7a662
TH
1519}
1520
2432697b 1521/**
33480a0e 1522 * ata_exec_internal - execute libata internal command
2432697b
TH
1523 * @dev: Device to which the command is sent
1524 * @tf: Taskfile registers for the command and the result
1525 * @cdb: CDB for packet command
1526 * @dma_dir: Data tranfer direction of the command
1527 * @buf: Data buffer of the command
1528 * @buflen: Length of data buffer
1529 *
1530 * Wrapper around ata_exec_internal_sg() which takes simple
1531 * buffer instead of sg list.
1532 *
1533 * LOCKING:
1534 * None. Should be called with kernel context, might sleep.
1535 *
1536 * RETURNS:
1537 * Zero on success, AC_ERR_* mask on failure
1538 */
1539unsigned ata_exec_internal(struct ata_device *dev,
1540 struct ata_taskfile *tf, const u8 *cdb,
1541 int dma_dir, void *buf, unsigned int buflen)
1542{
33480a0e
TH
1543 struct scatterlist *psg = NULL, sg;
1544 unsigned int n_elem = 0;
2432697b 1545
33480a0e
TH
1546 if (dma_dir != DMA_NONE) {
1547 WARN_ON(!buf);
1548 sg_init_one(&sg, buf, buflen);
1549 psg = &sg;
1550 n_elem++;
1551 }
2432697b 1552
33480a0e 1553 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1554}
1555
977e6b9f
TH
1556/**
1557 * ata_do_simple_cmd - execute simple internal command
1558 * @dev: Device to which the command is sent
1559 * @cmd: Opcode to execute
1560 *
1561 * Execute a 'simple' command, that only consists of the opcode
1562 * 'cmd' itself, without filling any other registers
1563 *
1564 * LOCKING:
1565 * Kernel thread context (may sleep).
1566 *
1567 * RETURNS:
1568 * Zero on success, AC_ERR_* mask on failure
e58eb583 1569 */
77b08fb5 1570unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1571{
1572 struct ata_taskfile tf;
e58eb583
TH
1573
1574 ata_tf_init(dev, &tf);
1575
1576 tf.command = cmd;
1577 tf.flags |= ATA_TFLAG_DEVICE;
1578 tf.protocol = ATA_PROT_NODATA;
1579
977e6b9f 1580 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1581}
1582
1bc4ccff
AC
1583/**
1584 * ata_pio_need_iordy - check if iordy needed
1585 * @adev: ATA device
1586 *
1587 * Check if the current speed of the device requires IORDY. Used
1588 * by various controllers for chip configuration.
1589 */
a617c09f 1590
1bc4ccff
AC
1591unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1592{
432729f0
AC
1593 /* Controller doesn't support IORDY. Probably a pointless check
1594 as the caller should know this */
1595 if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1596 return 0;
432729f0
AC
1597 /* PIO3 and higher it is mandatory */
1598 if (adev->pio_mode > XFER_PIO_2)
1599 return 1;
1600 /* We turn it on when possible */
1601 if (ata_id_has_iordy(adev->id))
1bc4ccff 1602 return 1;
432729f0
AC
1603 return 0;
1604}
2e9edbf8 1605
432729f0
AC
1606/**
1607 * ata_pio_mask_no_iordy - Return the non IORDY mask
1608 * @adev: ATA device
1609 *
1610 * Compute the highest mode possible if we are not using iordy. Return
1611 * -1 if no iordy mode is available.
1612 */
a617c09f 1613
432729f0
AC
1614static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1615{
1bc4ccff 1616 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1617 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1618 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1619 /* Is the speed faster than the drive allows non IORDY ? */
1620 if (pio) {
1621 /* This is cycle times not frequency - watch the logic! */
1622 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1623 return 3 << ATA_SHIFT_PIO;
1624 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1625 }
1626 }
432729f0 1627 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1628}
1629
1da177e4 1630/**
49016aca 1631 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1632 * @dev: target device
1633 * @p_class: pointer to class of the target device (may be changed)
bff04647 1634 * @flags: ATA_READID_* flags
fe635c7e 1635 * @id: buffer to read IDENTIFY data into
1da177e4 1636 *
49016aca
TH
1637 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1638 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1639 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1640 * for pre-ATA4 drives.
1da177e4
LT
1641 *
1642 * LOCKING:
49016aca
TH
1643 * Kernel thread context (may sleep)
1644 *
1645 * RETURNS:
1646 * 0 on success, -errno otherwise.
1da177e4 1647 */
a9beec95 1648int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1649 unsigned int flags, u16 *id)
1da177e4 1650{
3373efd8 1651 struct ata_port *ap = dev->ap;
49016aca 1652 unsigned int class = *p_class;
a0123703 1653 struct ata_taskfile tf;
49016aca
TH
1654 unsigned int err_mask = 0;
1655 const char *reason;
54936f8b 1656 int may_fallback = 1, tried_spinup = 0;
49016aca 1657 int rc;
1da177e4 1658
0dd4b21f 1659 if (ata_msg_ctl(ap))
44877b4e 1660 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1661
49016aca 1662 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1663 retry:
3373efd8 1664 ata_tf_init(dev, &tf);
a0123703 1665
49016aca
TH
1666 switch (class) {
1667 case ATA_DEV_ATA:
a0123703 1668 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1669 break;
1670 case ATA_DEV_ATAPI:
a0123703 1671 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1672 break;
1673 default:
1674 rc = -ENODEV;
1675 reason = "unsupported class";
1676 goto err_out;
1da177e4
LT
1677 }
1678
a0123703 1679 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1680
1681 /* Some devices choke if TF registers contain garbage. Make
1682 * sure those are properly initialized.
1683 */
1684 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1685
1686 /* Device presence detection is unreliable on some
1687 * controllers. Always poll IDENTIFY if available.
1688 */
1689 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1690
3373efd8 1691 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1692 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1693 if (err_mask) {
800b3996 1694 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1695 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1696 ap->print_id, dev->devno);
55a8e2c8
TH
1697 return -ENOENT;
1698 }
1699
54936f8b
TH
1700 /* Device or controller might have reported the wrong
1701 * device class. Give a shot at the other IDENTIFY if
1702 * the current one is aborted by the device.
1703 */
1704 if (may_fallback &&
1705 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1706 may_fallback = 0;
1707
1708 if (class == ATA_DEV_ATA)
1709 class = ATA_DEV_ATAPI;
1710 else
1711 class = ATA_DEV_ATA;
1712 goto retry;
1713 }
1714
49016aca
TH
1715 rc = -EIO;
1716 reason = "I/O error";
1da177e4
LT
1717 goto err_out;
1718 }
1719
54936f8b
TH
1720 /* Falling back doesn't make sense if ID data was read
1721 * successfully at least once.
1722 */
1723 may_fallback = 0;
1724
49016aca 1725 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1726
49016aca 1727 /* sanity check */
a4f5749b 1728 rc = -EINVAL;
6070068b 1729 reason = "device reports invalid type";
a4f5749b
TH
1730
1731 if (class == ATA_DEV_ATA) {
1732 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1733 goto err_out;
1734 } else {
1735 if (ata_id_is_ata(id))
1736 goto err_out;
49016aca
TH
1737 }
1738
169439c2
ML
1739 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1740 tried_spinup = 1;
1741 /*
1742 * Drive powered-up in standby mode, and requires a specific
1743 * SET_FEATURES spin-up subcommand before it will accept
1744 * anything other than the original IDENTIFY command.
1745 */
1746 ata_tf_init(dev, &tf);
1747 tf.command = ATA_CMD_SET_FEATURES;
1748 tf.feature = SETFEATURES_SPINUP;
1749 tf.protocol = ATA_PROT_NODATA;
1750 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1751 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1752 if (err_mask) {
1753 rc = -EIO;
1754 reason = "SPINUP failed";
1755 goto err_out;
1756 }
1757 /*
1758 * If the drive initially returned incomplete IDENTIFY info,
1759 * we now must reissue the IDENTIFY command.
1760 */
1761 if (id[2] == 0x37c8)
1762 goto retry;
1763 }
1764
bff04647 1765 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1766 /*
1767 * The exact sequence expected by certain pre-ATA4 drives is:
1768 * SRST RESET
1769 * IDENTIFY
1770 * INITIALIZE DEVICE PARAMETERS
1771 * anything else..
1772 * Some drives were very specific about that exact sequence.
1773 */
1774 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1775 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1776 if (err_mask) {
1777 rc = -EIO;
1778 reason = "INIT_DEV_PARAMS failed";
1779 goto err_out;
1780 }
1781
1782 /* current CHS translation info (id[53-58]) might be
1783 * changed. reread the identify device info.
1784 */
bff04647 1785 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1786 goto retry;
1787 }
1788 }
1789
1790 *p_class = class;
fe635c7e 1791
49016aca
TH
1792 return 0;
1793
1794 err_out:
88574551 1795 if (ata_msg_warn(ap))
0dd4b21f 1796 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1797 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1798 return rc;
1799}
1800
3373efd8 1801static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1802{
3373efd8 1803 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1804}
1805
a6e6ce8e
TH
1806static void ata_dev_config_ncq(struct ata_device *dev,
1807 char *desc, size_t desc_sz)
1808{
1809 struct ata_port *ap = dev->ap;
1810 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1811
1812 if (!ata_id_has_ncq(dev->id)) {
1813 desc[0] = '\0';
1814 return;
1815 }
6919a0a6
AC
1816 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1817 snprintf(desc, desc_sz, "NCQ (not used)");
1818 return;
1819 }
a6e6ce8e 1820 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1821 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1822 dev->flags |= ATA_DFLAG_NCQ;
1823 }
1824
1825 if (hdepth >= ddepth)
1826 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1827 else
1828 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1829}
1830
49016aca 1831/**
ffeae418 1832 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1833 * @dev: Target device to configure
1834 *
1835 * Configure @dev according to @dev->id. Generic and low-level
1836 * driver specific fixups are also applied.
49016aca
TH
1837 *
1838 * LOCKING:
ffeae418
TH
1839 * Kernel thread context (may sleep)
1840 *
1841 * RETURNS:
1842 * 0 on success, -errno otherwise
49016aca 1843 */
efdaedc4 1844int ata_dev_configure(struct ata_device *dev)
49016aca 1845{
3373efd8 1846 struct ata_port *ap = dev->ap;
efdaedc4 1847 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1848 const u16 *id = dev->id;
ff8854b2 1849 unsigned int xfer_mask;
b352e57d 1850 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1851 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1852 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1853 int rc;
49016aca 1854
0dd4b21f 1855 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1856 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1857 __FUNCTION__);
ffeae418 1858 return 0;
49016aca
TH
1859 }
1860
0dd4b21f 1861 if (ata_msg_probe(ap))
44877b4e 1862 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1863
08573a86 1864 /* set _SDD */
3a32a8e9 1865 rc = ata_acpi_push_id(dev);
08573a86
KCA
1866 if (rc) {
1867 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1868 rc);
1869 }
1870
1871 /* retrieve and execute the ATA task file of _GTF */
1872 ata_acpi_exec_tfs(ap);
1873
c39f5ebe 1874 /* print device capabilities */
0dd4b21f 1875 if (ata_msg_probe(ap))
88574551
TH
1876 ata_dev_printk(dev, KERN_DEBUG,
1877 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1878 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1879 __FUNCTION__,
f15a1daf
TH
1880 id[49], id[82], id[83], id[84],
1881 id[85], id[86], id[87], id[88]);
c39f5ebe 1882
208a9933 1883 /* initialize to-be-configured parameters */
ea1dd4e1 1884 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1885 dev->max_sectors = 0;
1886 dev->cdb_len = 0;
1887 dev->n_sectors = 0;
1888 dev->cylinders = 0;
1889 dev->heads = 0;
1890 dev->sectors = 0;
1891
1da177e4
LT
1892 /*
1893 * common ATA, ATAPI feature tests
1894 */
1895
ff8854b2 1896 /* find max transfer mode; for printk only */
1148c3a7 1897 xfer_mask = ata_id_xfermask(id);
1da177e4 1898
0dd4b21f
BP
1899 if (ata_msg_probe(ap))
1900 ata_dump_id(id);
1da177e4 1901
ef143d57
AL
1902 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1903 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1904 sizeof(fwrevbuf));
1905
1906 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1907 sizeof(modelbuf));
1908
1da177e4
LT
1909 /* ATA-specific feature tests */
1910 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1911 if (ata_id_is_cfa(id)) {
1912 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1913 ata_dev_printk(dev, KERN_WARNING,
1914 "supports DRM functions and may "
1915 "not be fully accessable.\n");
b352e57d
AC
1916 snprintf(revbuf, 7, "CFA");
1917 }
1918 else
1919 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1920
1148c3a7 1921 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1922
3f64f565
EM
1923 if (dev->id[59] & 0x100)
1924 dev->multi_count = dev->id[59] & 0xff;
1925
1148c3a7 1926 if (ata_id_has_lba(id)) {
4c2d721a 1927 const char *lba_desc;
a6e6ce8e 1928 char ncq_desc[20];
8bf62ece 1929
4c2d721a
TH
1930 lba_desc = "LBA";
1931 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1932 if (ata_id_has_lba48(id)) {
8bf62ece 1933 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1934 lba_desc = "LBA48";
6fc49adb
TH
1935
1936 if (dev->n_sectors >= (1UL << 28) &&
1937 ata_id_has_flush_ext(id))
1938 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1939 }
8bf62ece 1940
1e999736
AC
1941 if (ata_id_hpa_enabled(dev->id))
1942 dev->n_sectors = ata_hpa_resize(dev);
1943
a6e6ce8e
TH
1944 /* config NCQ */
1945 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1946
8bf62ece 1947 /* print device info to dmesg */
3f64f565
EM
1948 if (ata_msg_drv(ap) && print_info) {
1949 ata_dev_printk(dev, KERN_INFO,
1950 "%s: %s, %s, max %s\n",
1951 revbuf, modelbuf, fwrevbuf,
1952 ata_mode_string(xfer_mask));
1953 ata_dev_printk(dev, KERN_INFO,
1954 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1955 (unsigned long long)dev->n_sectors,
3f64f565
EM
1956 dev->multi_count, lba_desc, ncq_desc);
1957 }
ffeae418 1958 } else {
8bf62ece
AL
1959 /* CHS */
1960
1961 /* Default translation */
1148c3a7
TH
1962 dev->cylinders = id[1];
1963 dev->heads = id[3];
1964 dev->sectors = id[6];
8bf62ece 1965
1148c3a7 1966 if (ata_id_current_chs_valid(id)) {
8bf62ece 1967 /* Current CHS translation is valid. */
1148c3a7
TH
1968 dev->cylinders = id[54];
1969 dev->heads = id[55];
1970 dev->sectors = id[56];
8bf62ece
AL
1971 }
1972
1973 /* print device info to dmesg */
3f64f565 1974 if (ata_msg_drv(ap) && print_info) {
88574551 1975 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1976 "%s: %s, %s, max %s\n",
1977 revbuf, modelbuf, fwrevbuf,
1978 ata_mode_string(xfer_mask));
a84471fe 1979 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1980 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1981 (unsigned long long)dev->n_sectors,
1982 dev->multi_count, dev->cylinders,
1983 dev->heads, dev->sectors);
1984 }
07f6f7d0
AL
1985 }
1986
6e7846e9 1987 dev->cdb_len = 16;
1da177e4
LT
1988 }
1989
1990 /* ATAPI-specific feature tests */
2c13b7ce 1991 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1992 char *cdb_intr_string = "";
1993
1148c3a7 1994 rc = atapi_cdb_len(id);
1da177e4 1995 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1996 if (ata_msg_warn(ap))
88574551
TH
1997 ata_dev_printk(dev, KERN_WARNING,
1998 "unsupported CDB len\n");
ffeae418 1999 rc = -EINVAL;
1da177e4
LT
2000 goto err_out_nosup;
2001 }
6e7846e9 2002 dev->cdb_len = (unsigned int) rc;
1da177e4 2003
08a556db 2004 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2005 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2006 cdb_intr_string = ", CDB intr";
2007 }
312f7da2 2008
1da177e4 2009 /* print device info to dmesg */
5afc8142 2010 if (ata_msg_drv(ap) && print_info)
ef143d57
AL
2011 ata_dev_printk(dev, KERN_INFO,
2012 "ATAPI: %s, %s, max %s%s\n",
2013 modelbuf, fwrevbuf,
12436c30
TH
2014 ata_mode_string(xfer_mask),
2015 cdb_intr_string);
1da177e4
LT
2016 }
2017
914ed354
TH
2018 /* determine max_sectors */
2019 dev->max_sectors = ATA_MAX_SECTORS;
2020 if (dev->flags & ATA_DFLAG_LBA48)
2021 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2022
93590859
AC
2023 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2024 /* Let the user know. We don't want to disallow opens for
2025 rescue purposes, or in case the vendor is just a blithering
2026 idiot */
2027 if (print_info) {
2028 ata_dev_printk(dev, KERN_WARNING,
2029"Drive reports diagnostics failure. This may indicate a drive\n");
2030 ata_dev_printk(dev, KERN_WARNING,
2031"fault or invalid emulation. Contact drive vendor for information.\n");
2032 }
2033 }
2034
4b2f3ede 2035 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2036 if (ata_dev_knobble(dev)) {
5afc8142 2037 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2038 ata_dev_printk(dev, KERN_INFO,
2039 "applying bridge limits\n");
5a529139 2040 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2041 dev->max_sectors = ATA_MAX_SECTORS;
2042 }
2043
18d6e9d5 2044 if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2045 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2046 dev->max_sectors);
18d6e9d5 2047
6f23a31d
AL
2048 /* limit ATAPI DMA to R/W commands only */
2049 if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
2050 dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
2051
4b2f3ede 2052 if (ap->ops->dev_config)
cd0d3bbc 2053 ap->ops->dev_config(dev);
4b2f3ede 2054
0dd4b21f
BP
2055 if (ata_msg_probe(ap))
2056 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2057 __FUNCTION__, ata_chk_status(ap));
ffeae418 2058 return 0;
1da177e4
LT
2059
2060err_out_nosup:
0dd4b21f 2061 if (ata_msg_probe(ap))
88574551
TH
2062 ata_dev_printk(dev, KERN_DEBUG,
2063 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2064 return rc;
1da177e4
LT
2065}
2066
be0d18df 2067/**
2e41e8e6 2068 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2069 * @ap: port
2070 *
2e41e8e6 2071 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2072 * detection.
2073 */
2074
2075int ata_cable_40wire(struct ata_port *ap)
2076{
2077 return ATA_CBL_PATA40;
2078}
2079
2080/**
2e41e8e6 2081 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2082 * @ap: port
2083 *
2e41e8e6 2084 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2085 * detection.
2086 */
2087
2088int ata_cable_80wire(struct ata_port *ap)
2089{
2090 return ATA_CBL_PATA80;
2091}
2092
2093/**
2094 * ata_cable_unknown - return unknown PATA cable.
2095 * @ap: port
2096 *
2097 * Helper method for drivers which have no PATA cable detection.
2098 */
2099
2100int ata_cable_unknown(struct ata_port *ap)
2101{
2102 return ATA_CBL_PATA_UNK;
2103}
2104
2105/**
2106 * ata_cable_sata - return SATA cable type
2107 * @ap: port
2108 *
2109 * Helper method for drivers which have SATA cables
2110 */
2111
2112int ata_cable_sata(struct ata_port *ap)
2113{
2114 return ATA_CBL_SATA;
2115}
2116
1da177e4
LT
2117/**
2118 * ata_bus_probe - Reset and probe ATA bus
2119 * @ap: Bus to probe
2120 *
0cba632b
JG
2121 * Master ATA bus probing function. Initiates a hardware-dependent
2122 * bus reset, then attempts to identify any devices found on
2123 * the bus.
2124 *
1da177e4 2125 * LOCKING:
0cba632b 2126 * PCI/etc. bus probe sem.
1da177e4
LT
2127 *
2128 * RETURNS:
96072e69 2129 * Zero on success, negative errno otherwise.
1da177e4
LT
2130 */
2131
80289167 2132int ata_bus_probe(struct ata_port *ap)
1da177e4 2133{
28ca5c57 2134 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2135 int tries[ATA_MAX_DEVICES];
4ae72a1e 2136 int i, rc;
e82cbdb9 2137 struct ata_device *dev;
1da177e4 2138
28ca5c57 2139 ata_port_probe(ap);
c19ba8af 2140
14d2bac1
TH
2141 for (i = 0; i < ATA_MAX_DEVICES; i++)
2142 tries[i] = ATA_PROBE_MAX_TRIES;
2143
2144 retry:
2044470c 2145 /* reset and determine device classes */
52783c5d 2146 ap->ops->phy_reset(ap);
2061a47a 2147
52783c5d
TH
2148 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2149 dev = &ap->device[i];
c19ba8af 2150
52783c5d
TH
2151 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2152 dev->class != ATA_DEV_UNKNOWN)
2153 classes[dev->devno] = dev->class;
2154 else
2155 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2156
52783c5d 2157 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2158 }
1da177e4 2159
52783c5d 2160 ata_port_probe(ap);
2044470c 2161
b6079ca4
AC
2162 /* after the reset the device state is PIO 0 and the controller
2163 state is undefined. Record the mode */
2164
2165 for (i = 0; i < ATA_MAX_DEVICES; i++)
2166 ap->device[i].pio_mode = XFER_PIO_0;
2167
f31f0cc2
JG
2168 /* read IDENTIFY page and configure devices. We have to do the identify
2169 specific sequence bass-ackwards so that PDIAG- is released by
2170 the slave device */
2171
2172 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
e82cbdb9 2173 dev = &ap->device[i];
28ca5c57 2174
ec573755
TH
2175 if (tries[i])
2176 dev->class = classes[i];
ffeae418 2177
14d2bac1 2178 if (!ata_dev_enabled(dev))
ffeae418 2179 continue;
ffeae418 2180
bff04647
TH
2181 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2182 dev->id);
14d2bac1
TH
2183 if (rc)
2184 goto fail;
f31f0cc2
JG
2185 }
2186
be0d18df
AC
2187 /* Now ask for the cable type as PDIAG- should have been released */
2188 if (ap->ops->cable_detect)
2189 ap->cbl = ap->ops->cable_detect(ap);
2190
f31f0cc2
JG
2191 /* After the identify sequence we can now set up the devices. We do
2192 this in the normal order so that the user doesn't get confused */
2193
2194 for(i = 0; i < ATA_MAX_DEVICES; i++) {
2195 dev = &ap->device[i];
2196 if (!ata_dev_enabled(dev))
2197 continue;
14d2bac1 2198
efdaedc4
TH
2199 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
2200 rc = ata_dev_configure(dev);
2201 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2202 if (rc)
2203 goto fail;
1da177e4
LT
2204 }
2205
e82cbdb9 2206 /* configure transfer mode */
3adcebb2 2207 rc = ata_set_mode(ap, &dev);
4ae72a1e 2208 if (rc)
51713d35 2209 goto fail;
1da177e4 2210
e82cbdb9
TH
2211 for (i = 0; i < ATA_MAX_DEVICES; i++)
2212 if (ata_dev_enabled(&ap->device[i]))
2213 return 0;
1da177e4 2214
e82cbdb9
TH
2215 /* no device present, disable port */
2216 ata_port_disable(ap);
1da177e4 2217 ap->ops->port_disable(ap);
96072e69 2218 return -ENODEV;
14d2bac1
TH
2219
2220 fail:
4ae72a1e
TH
2221 tries[dev->devno]--;
2222
14d2bac1
TH
2223 switch (rc) {
2224 case -EINVAL:
4ae72a1e 2225 /* eeek, something went very wrong, give up */
14d2bac1
TH
2226 tries[dev->devno] = 0;
2227 break;
4ae72a1e
TH
2228
2229 case -ENODEV:
2230 /* give it just one more chance */
2231 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2232 case -EIO:
4ae72a1e
TH
2233 if (tries[dev->devno] == 1) {
2234 /* This is the last chance, better to slow
2235 * down than lose it.
2236 */
2237 sata_down_spd_limit(ap);
2238 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2239 }
14d2bac1
TH
2240 }
2241
4ae72a1e 2242 if (!tries[dev->devno])
3373efd8 2243 ata_dev_disable(dev);
ec573755 2244
14d2bac1 2245 goto retry;
1da177e4
LT
2246}
2247
2248/**
0cba632b
JG
2249 * ata_port_probe - Mark port as enabled
2250 * @ap: Port for which we indicate enablement
1da177e4 2251 *
0cba632b
JG
2252 * Modify @ap data structure such that the system
2253 * thinks that the entire port is enabled.
2254 *
cca3974e 2255 * LOCKING: host lock, or some other form of
0cba632b 2256 * serialization.
1da177e4
LT
2257 */
2258
2259void ata_port_probe(struct ata_port *ap)
2260{
198e0fed 2261 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2262}
2263
3be680b7
TH
2264/**
2265 * sata_print_link_status - Print SATA link status
2266 * @ap: SATA port to printk link status about
2267 *
2268 * This function prints link speed and status of a SATA link.
2269 *
2270 * LOCKING:
2271 * None.
2272 */
43727fbc 2273void sata_print_link_status(struct ata_port *ap)
3be680b7 2274{
6d5f9732 2275 u32 sstatus, scontrol, tmp;
3be680b7 2276
81952c54 2277 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 2278 return;
81952c54 2279 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 2280
81952c54 2281 if (ata_port_online(ap)) {
3be680b7 2282 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
2283 ata_port_printk(ap, KERN_INFO,
2284 "SATA link up %s (SStatus %X SControl %X)\n",
2285 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2286 } else {
f15a1daf
TH
2287 ata_port_printk(ap, KERN_INFO,
2288 "SATA link down (SStatus %X SControl %X)\n",
2289 sstatus, scontrol);
3be680b7
TH
2290 }
2291}
2292
1da177e4 2293/**
780a87f7
JG
2294 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2295 * @ap: SATA port associated with target SATA PHY.
1da177e4 2296 *
780a87f7
JG
2297 * This function issues commands to standard SATA Sxxx
2298 * PHY registers, to wake up the phy (and device), and
2299 * clear any reset condition.
1da177e4
LT
2300 *
2301 * LOCKING:
0cba632b 2302 * PCI/etc. bus probe sem.
1da177e4
LT
2303 *
2304 */
2305void __sata_phy_reset(struct ata_port *ap)
2306{
2307 u32 sstatus;
2308 unsigned long timeout = jiffies + (HZ * 5);
2309
2310 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2311 /* issue phy wake/reset */
81952c54 2312 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
2313 /* Couldn't find anything in SATA I/II specs, but
2314 * AHCI-1.1 10.4.2 says at least 1 ms. */
2315 mdelay(1);
1da177e4 2316 }
81952c54
TH
2317 /* phy wake/clear reset */
2318 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
2319
2320 /* wait for phy to become ready, if necessary */
2321 do {
2322 msleep(200);
81952c54 2323 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
2324 if ((sstatus & 0xf) != 1)
2325 break;
2326 } while (time_before(jiffies, timeout));
2327
3be680b7
TH
2328 /* print link status */
2329 sata_print_link_status(ap);
656563e3 2330
3be680b7 2331 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2332 if (!ata_port_offline(ap))
1da177e4 2333 ata_port_probe(ap);
3be680b7 2334 else
1da177e4 2335 ata_port_disable(ap);
1da177e4 2336
198e0fed 2337 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2338 return;
2339
2340 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2341 ata_port_disable(ap);
2342 return;
2343 }
2344
2345 ap->cbl = ATA_CBL_SATA;
2346}
2347
2348/**
780a87f7
JG
2349 * sata_phy_reset - Reset SATA bus.
2350 * @ap: SATA port associated with target SATA PHY.
1da177e4 2351 *
780a87f7
JG
2352 * This function resets the SATA bus, and then probes
2353 * the bus for devices.
1da177e4
LT
2354 *
2355 * LOCKING:
0cba632b 2356 * PCI/etc. bus probe sem.
1da177e4
LT
2357 *
2358 */
2359void sata_phy_reset(struct ata_port *ap)
2360{
2361 __sata_phy_reset(ap);
198e0fed 2362 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2363 return;
2364 ata_bus_reset(ap);
2365}
2366
ebdfca6e
AC
2367/**
2368 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2369 * @adev: device
2370 *
2371 * Obtain the other device on the same cable, or if none is
2372 * present NULL is returned
2373 */
2e9edbf8 2374
3373efd8 2375struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2376{
3373efd8 2377 struct ata_port *ap = adev->ap;
ebdfca6e 2378 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2379 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2380 return NULL;
2381 return pair;
2382}
2383
1da177e4 2384/**
780a87f7
JG
2385 * ata_port_disable - Disable port.
2386 * @ap: Port to be disabled.
1da177e4 2387 *
780a87f7
JG
2388 * Modify @ap data structure such that the system
2389 * thinks that the entire port is disabled, and should
2390 * never attempt to probe or communicate with devices
2391 * on this port.
2392 *
cca3974e 2393 * LOCKING: host lock, or some other form of
780a87f7 2394 * serialization.
1da177e4
LT
2395 */
2396
2397void ata_port_disable(struct ata_port *ap)
2398{
2399 ap->device[0].class = ATA_DEV_NONE;
2400 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2401 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2402}
2403
1c3fae4d 2404/**
3c567b7d 2405 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2406 * @ap: Port to adjust SATA spd limit for
2407 *
2408 * Adjust SATA spd limit of @ap downward. Note that this
2409 * function only adjusts the limit. The change must be applied
3c567b7d 2410 * using sata_set_spd().
1c3fae4d
TH
2411 *
2412 * LOCKING:
2413 * Inherited from caller.
2414 *
2415 * RETURNS:
2416 * 0 on success, negative errno on failure
2417 */
3c567b7d 2418int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2419{
81952c54
TH
2420 u32 sstatus, spd, mask;
2421 int rc, highbit;
1c3fae4d 2422
81952c54
TH
2423 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2424 if (rc)
2425 return rc;
1c3fae4d
TH
2426
2427 mask = ap->sata_spd_limit;
2428 if (mask <= 1)
2429 return -EINVAL;
2430 highbit = fls(mask) - 1;
2431 mask &= ~(1 << highbit);
2432
81952c54 2433 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2434 if (spd <= 1)
2435 return -EINVAL;
2436 spd--;
2437 mask &= (1 << spd) - 1;
2438 if (!mask)
2439 return -EINVAL;
2440
2441 ap->sata_spd_limit = mask;
2442
f15a1daf
TH
2443 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2444 sata_spd_string(fls(mask)));
1c3fae4d
TH
2445
2446 return 0;
2447}
2448
3c567b7d 2449static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2450{
2451 u32 spd, limit;
2452
2453 if (ap->sata_spd_limit == UINT_MAX)
2454 limit = 0;
2455 else
2456 limit = fls(ap->sata_spd_limit);
2457
2458 spd = (*scontrol >> 4) & 0xf;
2459 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2460
2461 return spd != limit;
2462}
2463
2464/**
3c567b7d 2465 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2466 * @ap: Port in question
2467 *
2468 * Test whether the spd limit in SControl matches
2469 * @ap->sata_spd_limit. This function is used to determine
2470 * whether hardreset is necessary to apply SATA spd
2471 * configuration.
2472 *
2473 * LOCKING:
2474 * Inherited from caller.
2475 *
2476 * RETURNS:
2477 * 1 if SATA spd configuration is needed, 0 otherwise.
2478 */
3c567b7d 2479int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2480{
2481 u32 scontrol;
2482
81952c54 2483 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2484 return 0;
2485
3c567b7d 2486 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2487}
2488
2489/**
3c567b7d 2490 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2491 * @ap: Port to set SATA spd for
2492 *
2493 * Set SATA spd of @ap according to sata_spd_limit.
2494 *
2495 * LOCKING:
2496 * Inherited from caller.
2497 *
2498 * RETURNS:
2499 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2500 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2501 */
3c567b7d 2502int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2503{
2504 u32 scontrol;
81952c54 2505 int rc;
1c3fae4d 2506
81952c54
TH
2507 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2508 return rc;
1c3fae4d 2509
3c567b7d 2510 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2511 return 0;
2512
81952c54
TH
2513 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2514 return rc;
2515
1c3fae4d
TH
2516 return 1;
2517}
2518
452503f9
AC
2519/*
2520 * This mode timing computation functionality is ported over from
2521 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2522 */
2523/*
b352e57d 2524 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2525 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2526 * for UDMA6, which is currently supported only by Maxtor drives.
2527 *
2528 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2529 */
2530
2531static const struct ata_timing ata_timing[] = {
2532
2533 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2534 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2535 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2536 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2537
b352e57d
AC
2538 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2539 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2540 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2541 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2542 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2543
2544/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2545
452503f9
AC
2546 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2547 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2548 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2549
452503f9
AC
2550 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2551 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2552 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2553
b352e57d
AC
2554 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2555 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2556 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2557 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2558
2559 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2560 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2561 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2562
2563/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2564
2565 { 0xFF }
2566};
2567
2568#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2569#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2570
2571static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2572{
2573 q->setup = EZ(t->setup * 1000, T);
2574 q->act8b = EZ(t->act8b * 1000, T);
2575 q->rec8b = EZ(t->rec8b * 1000, T);
2576 q->cyc8b = EZ(t->cyc8b * 1000, T);
2577 q->active = EZ(t->active * 1000, T);
2578 q->recover = EZ(t->recover * 1000, T);
2579 q->cycle = EZ(t->cycle * 1000, T);
2580 q->udma = EZ(t->udma * 1000, UT);
2581}
2582
2583void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2584 struct ata_timing *m, unsigned int what)
2585{
2586 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2587 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2588 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2589 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2590 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2591 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2592 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2593 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2594}
2595
2596static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2597{
2598 const struct ata_timing *t;
2599
2600 for (t = ata_timing; t->mode != speed; t++)
91190758 2601 if (t->mode == 0xFF)
452503f9 2602 return NULL;
2e9edbf8 2603 return t;
452503f9
AC
2604}
2605
2606int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2607 struct ata_timing *t, int T, int UT)
2608{
2609 const struct ata_timing *s;
2610 struct ata_timing p;
2611
2612 /*
2e9edbf8 2613 * Find the mode.
75b1f2f8 2614 */
452503f9
AC
2615
2616 if (!(s = ata_timing_find_mode(speed)))
2617 return -EINVAL;
2618
75b1f2f8
AL
2619 memcpy(t, s, sizeof(*s));
2620
452503f9
AC
2621 /*
2622 * If the drive is an EIDE drive, it can tell us it needs extended
2623 * PIO/MW_DMA cycle timing.
2624 */
2625
2626 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2627 memset(&p, 0, sizeof(p));
2628 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2629 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2630 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2631 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2632 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2633 }
2634 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2635 }
2636
2637 /*
2638 * Convert the timing to bus clock counts.
2639 */
2640
75b1f2f8 2641 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2642
2643 /*
c893a3ae
RD
2644 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2645 * S.M.A.R.T * and some other commands. We have to ensure that the
2646 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2647 */
2648
fd3367af 2649 if (speed > XFER_PIO_6) {
452503f9
AC
2650 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2651 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2652 }
2653
2654 /*
c893a3ae 2655 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2656 */
2657
2658 if (t->act8b + t->rec8b < t->cyc8b) {
2659 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2660 t->rec8b = t->cyc8b - t->act8b;
2661 }
2662
2663 if (t->active + t->recover < t->cycle) {
2664 t->active += (t->cycle - (t->active + t->recover)) / 2;
2665 t->recover = t->cycle - t->active;
2666 }
a617c09f 2667
4f701d1e
AC
2668 /* In a few cases quantisation may produce enough errors to
2669 leave t->cycle too low for the sum of active and recovery
2670 if so we must correct this */
2671 if (t->active + t->recover > t->cycle)
2672 t->cycle = t->active + t->recover;
452503f9
AC
2673
2674 return 0;
2675}
2676
cf176e1a
TH
2677/**
2678 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2679 * @dev: Device to adjust xfer masks
458337db 2680 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2681 *
2682 * Adjust xfer masks of @dev downward. Note that this function
2683 * does not apply the change. Invoking ata_set_mode() afterwards
2684 * will apply the limit.
2685 *
2686 * LOCKING:
2687 * Inherited from caller.
2688 *
2689 * RETURNS:
2690 * 0 on success, negative errno on failure
2691 */
458337db 2692int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2693{
458337db
TH
2694 char buf[32];
2695 unsigned int orig_mask, xfer_mask;
2696 unsigned int pio_mask, mwdma_mask, udma_mask;
2697 int quiet, highbit;
cf176e1a 2698
458337db
TH
2699 quiet = !!(sel & ATA_DNXFER_QUIET);
2700 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2701
458337db
TH
2702 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2703 dev->mwdma_mask,
2704 dev->udma_mask);
2705 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2706
458337db
TH
2707 switch (sel) {
2708 case ATA_DNXFER_PIO:
2709 highbit = fls(pio_mask) - 1;
2710 pio_mask &= ~(1 << highbit);
2711 break;
2712
2713 case ATA_DNXFER_DMA:
2714 if (udma_mask) {
2715 highbit = fls(udma_mask) - 1;
2716 udma_mask &= ~(1 << highbit);
2717 if (!udma_mask)
2718 return -ENOENT;
2719 } else if (mwdma_mask) {
2720 highbit = fls(mwdma_mask) - 1;
2721 mwdma_mask &= ~(1 << highbit);
2722 if (!mwdma_mask)
2723 return -ENOENT;
2724 }
2725 break;
2726
2727 case ATA_DNXFER_40C:
2728 udma_mask &= ATA_UDMA_MASK_40C;
2729 break;
2730
2731 case ATA_DNXFER_FORCE_PIO0:
2732 pio_mask &= 1;
2733 case ATA_DNXFER_FORCE_PIO:
2734 mwdma_mask = 0;
2735 udma_mask = 0;
2736 break;
2737
458337db
TH
2738 default:
2739 BUG();
2740 }
2741
2742 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2743
2744 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2745 return -ENOENT;
2746
2747 if (!quiet) {
2748 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2749 snprintf(buf, sizeof(buf), "%s:%s",
2750 ata_mode_string(xfer_mask),
2751 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2752 else
2753 snprintf(buf, sizeof(buf), "%s",
2754 ata_mode_string(xfer_mask));
2755
2756 ata_dev_printk(dev, KERN_WARNING,
2757 "limiting speed to %s\n", buf);
2758 }
cf176e1a
TH
2759
2760 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2761 &dev->udma_mask);
2762
cf176e1a 2763 return 0;
cf176e1a
TH
2764}
2765
3373efd8 2766static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2767{
baa1e78a 2768 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2769 unsigned int err_mask;
2770 int rc;
1da177e4 2771
e8384607 2772 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2773 if (dev->xfer_shift == ATA_SHIFT_PIO)
2774 dev->flags |= ATA_DFLAG_PIO;
2775
3373efd8 2776 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2777 /* Old CFA may refuse this command, which is just fine */
2778 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2779 err_mask &= ~AC_ERR_DEV;
2780
83206a29 2781 if (err_mask) {
f15a1daf
TH
2782 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2783 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2784 return -EIO;
2785 }
1da177e4 2786
baa1e78a 2787 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2788 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2789 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2790 if (rc)
83206a29 2791 return rc;
48a8a14f 2792
23e71c3d
TH
2793 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2794 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2795
f15a1daf
TH
2796 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2797 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2798 return 0;
1da177e4
LT
2799}
2800
1da177e4 2801/**
04351821 2802 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
1da177e4 2803 * @ap: port on which timings will be programmed
e82cbdb9 2804 * @r_failed_dev: out paramter for failed device
1da177e4 2805 *
04351821
A
2806 * Standard implementation of the function used to tune and set
2807 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2808 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2809 * returned in @r_failed_dev.
780a87f7 2810 *
1da177e4 2811 * LOCKING:
0cba632b 2812 * PCI/etc. bus probe sem.
e82cbdb9
TH
2813 *
2814 * RETURNS:
2815 * 0 on success, negative errno otherwise
1da177e4 2816 */
04351821
A
2817
2818int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2819{
e8e0619f 2820 struct ata_device *dev;
e82cbdb9 2821 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2822
3adcebb2 2823
a6d5a51c
TH
2824 /* step 1: calculate xfer_mask */
2825 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2826 unsigned int pio_mask, dma_mask;
a6d5a51c 2827
e8e0619f
TH
2828 dev = &ap->device[i];
2829
e1211e3f 2830 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2831 continue;
2832
3373efd8 2833 ata_dev_xfermask(dev);
1da177e4 2834
acf356b1
TH
2835 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2836 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2837 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2838 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2839
4f65977d 2840 found = 1;
5444a6f4
AC
2841 if (dev->dma_mode)
2842 used_dma = 1;
a6d5a51c 2843 }
4f65977d 2844 if (!found)
e82cbdb9 2845 goto out;
a6d5a51c
TH
2846
2847 /* step 2: always set host PIO timings */
e8e0619f
TH
2848 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2849 dev = &ap->device[i];
2850 if (!ata_dev_enabled(dev))
2851 continue;
2852
2853 if (!dev->pio_mode) {
f15a1daf 2854 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2855 rc = -EINVAL;
e82cbdb9 2856 goto out;
e8e0619f
TH
2857 }
2858
2859 dev->xfer_mode = dev->pio_mode;
2860 dev->xfer_shift = ATA_SHIFT_PIO;
2861 if (ap->ops->set_piomode)
2862 ap->ops->set_piomode(ap, dev);
2863 }
1da177e4 2864
a6d5a51c 2865 /* step 3: set host DMA timings */
e8e0619f
TH
2866 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2867 dev = &ap->device[i];
2868
2869 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2870 continue;
2871
2872 dev->xfer_mode = dev->dma_mode;
2873 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2874 if (ap->ops->set_dmamode)
2875 ap->ops->set_dmamode(ap, dev);
2876 }
1da177e4
LT
2877
2878 /* step 4: update devices' xfer mode */
83206a29 2879 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2880 dev = &ap->device[i];
1da177e4 2881
18d90deb 2882 /* don't update suspended devices' xfer mode */
9666f400 2883 if (!ata_dev_enabled(dev))
83206a29
TH
2884 continue;
2885
3373efd8 2886 rc = ata_dev_set_mode(dev);
5bbc53f4 2887 if (rc)
e82cbdb9 2888 goto out;
83206a29 2889 }
1da177e4 2890
e8e0619f
TH
2891 /* Record simplex status. If we selected DMA then the other
2892 * host channels are not permitted to do so.
5444a6f4 2893 */
cca3974e 2894 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2895 ap->host->simplex_claimed = ap;
5444a6f4 2896
e82cbdb9
TH
2897 out:
2898 if (rc)
2899 *r_failed_dev = dev;
2900 return rc;
1da177e4
LT
2901}
2902
04351821
A
2903/**
2904 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2905 * @ap: port on which timings will be programmed
2906 * @r_failed_dev: out paramter for failed device
2907 *
2908 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2909 * ata_set_mode() fails, pointer to the failing device is
2910 * returned in @r_failed_dev.
2911 *
2912 * LOCKING:
2913 * PCI/etc. bus probe sem.
2914 *
2915 * RETURNS:
2916 * 0 on success, negative errno otherwise
2917 */
2918int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2919{
2920 /* has private set_mode? */
2921 if (ap->ops->set_mode)
2922 return ap->ops->set_mode(ap, r_failed_dev);
2923 return ata_do_set_mode(ap, r_failed_dev);
2924}
2925
1fdffbce
JG
2926/**
2927 * ata_tf_to_host - issue ATA taskfile to host controller
2928 * @ap: port to which command is being issued
2929 * @tf: ATA taskfile register set
2930 *
2931 * Issues ATA taskfile register set to ATA host controller,
2932 * with proper synchronization with interrupt handler and
2933 * other threads.
2934 *
2935 * LOCKING:
cca3974e 2936 * spin_lock_irqsave(host lock)
1fdffbce
JG
2937 */
2938
2939static inline void ata_tf_to_host(struct ata_port *ap,
2940 const struct ata_taskfile *tf)
2941{
2942 ap->ops->tf_load(ap, tf);
2943 ap->ops->exec_command(ap, tf);
2944}
2945
1da177e4
LT
2946/**
2947 * ata_busy_sleep - sleep until BSY clears, or timeout
2948 * @ap: port containing status register to be polled
2949 * @tmout_pat: impatience timeout
2950 * @tmout: overall timeout
2951 *
780a87f7
JG
2952 * Sleep until ATA Status register bit BSY clears,
2953 * or a timeout occurs.
2954 *
d1adc1bb
TH
2955 * LOCKING:
2956 * Kernel thread context (may sleep).
2957 *
2958 * RETURNS:
2959 * 0 on success, -errno otherwise.
1da177e4 2960 */
d1adc1bb
TH
2961int ata_busy_sleep(struct ata_port *ap,
2962 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2963{
2964 unsigned long timer_start, timeout;
2965 u8 status;
2966
2967 status = ata_busy_wait(ap, ATA_BUSY, 300);
2968 timer_start = jiffies;
2969 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2970 while (status != 0xff && (status & ATA_BUSY) &&
2971 time_before(jiffies, timeout)) {
1da177e4
LT
2972 msleep(50);
2973 status = ata_busy_wait(ap, ATA_BUSY, 3);
2974 }
2975
d1adc1bb 2976 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2977 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2978 "port is slow to respond, please be patient "
2979 "(Status 0x%x)\n", status);
1da177e4
LT
2980
2981 timeout = timer_start + tmout;
d1adc1bb
TH
2982 while (status != 0xff && (status & ATA_BUSY) &&
2983 time_before(jiffies, timeout)) {
1da177e4
LT
2984 msleep(50);
2985 status = ata_chk_status(ap);
2986 }
2987
d1adc1bb
TH
2988 if (status == 0xff)
2989 return -ENODEV;
2990
1da177e4 2991 if (status & ATA_BUSY) {
f15a1daf 2992 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2993 "(%lu secs, Status 0x%x)\n",
2994 tmout / HZ, status);
d1adc1bb 2995 return -EBUSY;
1da177e4
LT
2996 }
2997
2998 return 0;
2999}
3000
d4b2bab4
TH
3001/**
3002 * ata_wait_ready - sleep until BSY clears, or timeout
3003 * @ap: port containing status register to be polled
3004 * @deadline: deadline jiffies for the operation
3005 *
3006 * Sleep until ATA Status register bit BSY clears, or timeout
3007 * occurs.
3008 *
3009 * LOCKING:
3010 * Kernel thread context (may sleep).
3011 *
3012 * RETURNS:
3013 * 0 on success, -errno otherwise.
3014 */
3015int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3016{
3017 unsigned long start = jiffies;
3018 int warned = 0;
3019
3020 while (1) {
3021 u8 status = ata_chk_status(ap);
3022 unsigned long now = jiffies;
3023
3024 if (!(status & ATA_BUSY))
3025 return 0;
fd7fe701 3026 if (!ata_port_online(ap) && status == 0xff)
d4b2bab4
TH
3027 return -ENODEV;
3028 if (time_after(now, deadline))
3029 return -EBUSY;
3030
3031 if (!warned && time_after(now, start + 5 * HZ) &&
3032 (deadline - now > 3 * HZ)) {
3033 ata_port_printk(ap, KERN_WARNING,
3034 "port is slow to respond, please be patient "
3035 "(Status 0x%x)\n", status);
3036 warned = 1;
3037 }
3038
3039 msleep(50);
3040 }
3041}
3042
3043static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3044 unsigned long deadline)
1da177e4
LT
3045{
3046 struct ata_ioports *ioaddr = &ap->ioaddr;
3047 unsigned int dev0 = devmask & (1 << 0);
3048 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3049 int rc, ret = 0;
1da177e4
LT
3050
3051 /* if device 0 was found in ata_devchk, wait for its
3052 * BSY bit to clear
3053 */
d4b2bab4
TH
3054 if (dev0) {
3055 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3056 if (rc) {
3057 if (rc != -ENODEV)
3058 return rc;
3059 ret = rc;
3060 }
d4b2bab4 3061 }
1da177e4 3062
e141d999
TH
3063 /* if device 1 was found in ata_devchk, wait for register
3064 * access briefly, then wait for BSY to clear.
1da177e4 3065 */
e141d999
TH
3066 if (dev1) {
3067 int i;
1da177e4
LT
3068
3069 ap->ops->dev_select(ap, 1);
e141d999
TH
3070
3071 /* Wait for register access. Some ATAPI devices fail
3072 * to set nsect/lbal after reset, so don't waste too
3073 * much time on it. We're gonna wait for !BSY anyway.
3074 */
3075 for (i = 0; i < 2; i++) {
3076 u8 nsect, lbal;
3077
3078 nsect = ioread8(ioaddr->nsect_addr);
3079 lbal = ioread8(ioaddr->lbal_addr);
3080 if ((nsect == 1) && (lbal == 1))
3081 break;
3082 msleep(50); /* give drive a breather */
3083 }
3084
d4b2bab4 3085 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3086 if (rc) {
3087 if (rc != -ENODEV)
3088 return rc;
3089 ret = rc;
3090 }
d4b2bab4 3091 }
1da177e4
LT
3092
3093 /* is all this really necessary? */
3094 ap->ops->dev_select(ap, 0);
3095 if (dev1)
3096 ap->ops->dev_select(ap, 1);
3097 if (dev0)
3098 ap->ops->dev_select(ap, 0);
d4b2bab4 3099
9b89391c 3100 return ret;
1da177e4
LT
3101}
3102
d4b2bab4
TH
3103static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3104 unsigned long deadline)
1da177e4
LT
3105{
3106 struct ata_ioports *ioaddr = &ap->ioaddr;
3107
44877b4e 3108 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3109
3110 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3111 iowrite8(ap->ctl, ioaddr->ctl_addr);
3112 udelay(20); /* FIXME: flush */
3113 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3114 udelay(20); /* FIXME: flush */
3115 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3116
3117 /* spec mandates ">= 2ms" before checking status.
3118 * We wait 150ms, because that was the magic delay used for
3119 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3120 * between when the ATA command register is written, and then
3121 * status is checked. Because waiting for "a while" before
3122 * checking status is fine, post SRST, we perform this magic
3123 * delay here as well.
09c7ad79
AC
3124 *
3125 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3126 */
3127 msleep(150);
3128
2e9edbf8 3129 /* Before we perform post reset processing we want to see if
298a41ca
TH
3130 * the bus shows 0xFF because the odd clown forgets the D7
3131 * pulldown resistor.
3132 */
d1adc1bb 3133 if (ata_check_status(ap) == 0xFF)
9b89391c 3134 return -ENODEV;
09c7ad79 3135
d4b2bab4 3136 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3137}
3138
3139/**
3140 * ata_bus_reset - reset host port and associated ATA channel
3141 * @ap: port to reset
3142 *
3143 * This is typically the first time we actually start issuing
3144 * commands to the ATA channel. We wait for BSY to clear, then
3145 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3146 * result. Determine what devices, if any, are on the channel
3147 * by looking at the device 0/1 error register. Look at the signature
3148 * stored in each device's taskfile registers, to determine if
3149 * the device is ATA or ATAPI.
3150 *
3151 * LOCKING:
0cba632b 3152 * PCI/etc. bus probe sem.
cca3974e 3153 * Obtains host lock.
1da177e4
LT
3154 *
3155 * SIDE EFFECTS:
198e0fed 3156 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3157 */
3158
3159void ata_bus_reset(struct ata_port *ap)
3160{
3161 struct ata_ioports *ioaddr = &ap->ioaddr;
3162 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3163 u8 err;
aec5c3c1 3164 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3165 int rc;
1da177e4 3166
44877b4e 3167 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3168
3169 /* determine if device 0/1 are present */
3170 if (ap->flags & ATA_FLAG_SATA_RESET)
3171 dev0 = 1;
3172 else {
3173 dev0 = ata_devchk(ap, 0);
3174 if (slave_possible)
3175 dev1 = ata_devchk(ap, 1);
3176 }
3177
3178 if (dev0)
3179 devmask |= (1 << 0);
3180 if (dev1)
3181 devmask |= (1 << 1);
3182
3183 /* select device 0 again */
3184 ap->ops->dev_select(ap, 0);
3185
3186 /* issue bus reset */
9b89391c
TH
3187 if (ap->flags & ATA_FLAG_SRST) {
3188 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3189 if (rc && rc != -ENODEV)
aec5c3c1 3190 goto err_out;
9b89391c 3191 }
1da177e4
LT
3192
3193 /*
3194 * determine by signature whether we have ATA or ATAPI devices
3195 */
b4dc7623 3196 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 3197 if ((slave_possible) && (err != 0x81))
b4dc7623 3198 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
3199
3200 /* re-enable interrupts */
83625006 3201 ap->ops->irq_on(ap);
1da177e4
LT
3202
3203 /* is double-select really necessary? */
3204 if (ap->device[1].class != ATA_DEV_NONE)
3205 ap->ops->dev_select(ap, 1);
3206 if (ap->device[0].class != ATA_DEV_NONE)
3207 ap->ops->dev_select(ap, 0);
3208
3209 /* if no devices were detected, disable this port */
3210 if ((ap->device[0].class == ATA_DEV_NONE) &&
3211 (ap->device[1].class == ATA_DEV_NONE))
3212 goto err_out;
3213
3214 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3215 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3216 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3217 }
3218
3219 DPRINTK("EXIT\n");
3220 return;
3221
3222err_out:
f15a1daf 3223 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
3224 ap->ops->port_disable(ap);
3225
3226 DPRINTK("EXIT\n");
3227}
3228
d7bb4cc7
TH
3229/**
3230 * sata_phy_debounce - debounce SATA phy status
3231 * @ap: ATA port to debounce SATA phy status for
3232 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3233 * @deadline: deadline jiffies for the operation
d7bb4cc7
TH
3234 *
3235 * Make sure SStatus of @ap reaches stable state, determined by
3236 * holding the same value where DET is not 1 for @duration polled
3237 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3238 * beginning of the stable state. Because DET gets stuck at 1 on
3239 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3240 * until timeout then returns 0 if DET is stable at 1.
3241 *
d4b2bab4
TH
3242 * @timeout is further limited by @deadline. The sooner of the
3243 * two is used.
3244 *
d7bb4cc7
TH
3245 * LOCKING:
3246 * Kernel thread context (may sleep)
3247 *
3248 * RETURNS:
3249 * 0 on success, -errno on failure.
3250 */
d4b2bab4
TH
3251int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3252 unsigned long deadline)
7a7921e8 3253{
d7bb4cc7 3254 unsigned long interval_msec = params[0];
d4b2bab4
TH
3255 unsigned long duration = msecs_to_jiffies(params[1]);
3256 unsigned long last_jiffies, t;
d7bb4cc7
TH
3257 u32 last, cur;
3258 int rc;
3259
d4b2bab4
TH
3260 t = jiffies + msecs_to_jiffies(params[2]);
3261 if (time_before(t, deadline))
3262 deadline = t;
3263
d7bb4cc7
TH
3264 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3265 return rc;
3266 cur &= 0xf;
3267
3268 last = cur;
3269 last_jiffies = jiffies;
3270
3271 while (1) {
3272 msleep(interval_msec);
3273 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3274 return rc;
3275 cur &= 0xf;
3276
3277 /* DET stable? */
3278 if (cur == last) {
d4b2bab4 3279 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3280 continue;
3281 if (time_after(jiffies, last_jiffies + duration))
3282 return 0;
3283 continue;
3284 }
3285
3286 /* unstable, start over */
3287 last = cur;
3288 last_jiffies = jiffies;
3289
d4b2bab4
TH
3290 /* check deadline */
3291 if (time_after(jiffies, deadline))
d7bb4cc7
TH
3292 return -EBUSY;
3293 }
3294}
3295
3296/**
3297 * sata_phy_resume - resume SATA phy
3298 * @ap: ATA port to resume SATA phy for
3299 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3300 * @deadline: deadline jiffies for the operation
d7bb4cc7
TH
3301 *
3302 * Resume SATA phy of @ap and debounce it.
3303 *
3304 * LOCKING:
3305 * Kernel thread context (may sleep)
3306 *
3307 * RETURNS:
3308 * 0 on success, -errno on failure.
3309 */
d4b2bab4
TH
3310int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3311 unsigned long deadline)
d7bb4cc7
TH
3312{
3313 u32 scontrol;
81952c54
TH
3314 int rc;
3315
3316 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3317 return rc;
7a7921e8 3318
852ee16a 3319 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
3320
3321 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3322 return rc;
7a7921e8 3323
d7bb4cc7
TH
3324 /* Some PHYs react badly if SStatus is pounded immediately
3325 * after resuming. Delay 200ms before debouncing.
3326 */
3327 msleep(200);
7a7921e8 3328
d4b2bab4 3329 return sata_phy_debounce(ap, params, deadline);
7a7921e8
TH
3330}
3331
f5914a46
TH
3332/**
3333 * ata_std_prereset - prepare for reset
3334 * @ap: ATA port to be reset
d4b2bab4 3335 * @deadline: deadline jiffies for the operation
f5914a46 3336 *
b8cffc6a
TH
3337 * @ap is about to be reset. Initialize it. Failure from
3338 * prereset makes libata abort whole reset sequence and give up
3339 * that port, so prereset should be best-effort. It does its
3340 * best to prepare for reset sequence but if things go wrong, it
3341 * should just whine, not fail.
f5914a46
TH
3342 *
3343 * LOCKING:
3344 * Kernel thread context (may sleep)
3345 *
3346 * RETURNS:
3347 * 0 on success, -errno otherwise.
3348 */
d4b2bab4 3349int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
f5914a46
TH
3350{
3351 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 3352 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3353 int rc;
3354
31daabda 3355 /* handle link resume */
28324304
TH
3356 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3357 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3358 ehc->i.action |= ATA_EH_HARDRESET;
3359
f5914a46
TH
3360 /* if we're about to do hardreset, nothing more to do */
3361 if (ehc->i.action & ATA_EH_HARDRESET)
3362 return 0;
3363
3364 /* if SATA, resume phy */
3365 if (ap->cbl == ATA_CBL_SATA) {
d4b2bab4 3366 rc = sata_phy_resume(ap, timing, deadline);
b8cffc6a
TH
3367 /* whine about phy resume failure but proceed */
3368 if (rc && rc != -EOPNOTSUPP)
f5914a46
TH
3369 ata_port_printk(ap, KERN_WARNING, "failed to resume "
3370 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3371 }
3372
3373 /* Wait for !BSY if the controller can wait for the first D2H
3374 * Reg FIS and we don't know that no device is attached.
3375 */
b8cffc6a
TH
3376 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
3377 rc = ata_wait_ready(ap, deadline);
6dffaf61 3378 if (rc && rc != -ENODEV) {
b8cffc6a
TH
3379 ata_port_printk(ap, KERN_WARNING, "device not ready "
3380 "(errno=%d), forcing hardreset\n", rc);
3381 ehc->i.action |= ATA_EH_HARDRESET;
3382 }
3383 }
f5914a46
TH
3384
3385 return 0;
3386}
3387
c2bd5804
TH
3388/**
3389 * ata_std_softreset - reset host port via ATA SRST
3390 * @ap: port to reset
c2bd5804 3391 * @classes: resulting classes of attached devices
d4b2bab4 3392 * @deadline: deadline jiffies for the operation
c2bd5804 3393 *
52783c5d 3394 * Reset host port using ATA SRST.
c2bd5804
TH
3395 *
3396 * LOCKING:
3397 * Kernel thread context (may sleep)
3398 *
3399 * RETURNS:
3400 * 0 on success, -errno otherwise.
3401 */
d4b2bab4
TH
3402int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3403 unsigned long deadline)
c2bd5804
TH
3404{
3405 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3406 unsigned int devmask = 0;
3407 int rc;
c2bd5804
TH
3408 u8 err;
3409
3410 DPRINTK("ENTER\n");
3411
81952c54 3412 if (ata_port_offline(ap)) {
3a39746a
TH
3413 classes[0] = ATA_DEV_NONE;
3414 goto out;
3415 }
3416
c2bd5804
TH
3417 /* determine if device 0/1 are present */
3418 if (ata_devchk(ap, 0))
3419 devmask |= (1 << 0);
3420 if (slave_possible && ata_devchk(ap, 1))
3421 devmask |= (1 << 1);
3422
c2bd5804
TH
3423 /* select device 0 again */
3424 ap->ops->dev_select(ap, 0);
3425
3426 /* issue bus reset */
3427 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3428 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c
TH
3429 /* if link is occupied, -ENODEV too is an error */
3430 if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
d4b2bab4
TH
3431 ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3432 return rc;
c2bd5804
TH
3433 }
3434
3435 /* determine by signature whether we have ATA or ATAPI devices */
3436 classes[0] = ata_dev_try_classify(ap, 0, &err);
3437 if (slave_possible && err != 0x81)
3438 classes[1] = ata_dev_try_classify(ap, 1, &err);
3439
3a39746a 3440 out:
c2bd5804
TH
3441 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3442 return 0;
3443}
3444
3445/**
b6103f6d 3446 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3447 * @ap: port to reset
b6103f6d 3448 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3449 * @deadline: deadline jiffies for the operation
c2bd5804
TH
3450 *
3451 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3452 *
3453 * LOCKING:
3454 * Kernel thread context (may sleep)
3455 *
3456 * RETURNS:
3457 * 0 on success, -errno otherwise.
3458 */
d4b2bab4
TH
3459int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3460 unsigned long deadline)
c2bd5804 3461{
852ee16a 3462 u32 scontrol;
81952c54 3463 int rc;
852ee16a 3464
c2bd5804
TH
3465 DPRINTK("ENTER\n");
3466
3c567b7d 3467 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3468 /* SATA spec says nothing about how to reconfigure
3469 * spd. To be on the safe side, turn off phy during
3470 * reconfiguration. This works for at least ICH7 AHCI
3471 * and Sil3124.
3472 */
81952c54 3473 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3474 goto out;
81952c54 3475
a34b6fc0 3476 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3477
3478 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3479 goto out;
1c3fae4d 3480
3c567b7d 3481 sata_set_spd(ap);
1c3fae4d
TH
3482 }
3483
3484 /* issue phy wake/reset */
81952c54 3485 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3486 goto out;
81952c54 3487
852ee16a 3488 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3489
3490 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3491 goto out;
c2bd5804 3492
1c3fae4d 3493 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3494 * 10.4.2 says at least 1 ms.
3495 */
3496 msleep(1);
3497
1c3fae4d 3498 /* bring phy back */
d4b2bab4 3499 rc = sata_phy_resume(ap, timing, deadline);
b6103f6d
TH
3500 out:
3501 DPRINTK("EXIT, rc=%d\n", rc);
3502 return rc;
3503}
3504
3505/**
3506 * sata_std_hardreset - reset host port via SATA phy reset
3507 * @ap: port to reset
3508 * @class: resulting class of attached device
d4b2bab4 3509 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3510 *
3511 * SATA phy-reset host port using DET bits of SControl register,
3512 * wait for !BSY and classify the attached device.
3513 *
3514 * LOCKING:
3515 * Kernel thread context (may sleep)
3516 *
3517 * RETURNS:
3518 * 0 on success, -errno otherwise.
3519 */
d4b2bab4
TH
3520int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3521 unsigned long deadline)
b6103f6d
TH
3522{
3523 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3524 int rc;
3525
3526 DPRINTK("ENTER\n");
3527
3528 /* do hardreset */
d4b2bab4 3529 rc = sata_port_hardreset(ap, timing, deadline);
b6103f6d
TH
3530 if (rc) {
3531 ata_port_printk(ap, KERN_ERR,
3532 "COMRESET failed (errno=%d)\n", rc);
3533 return rc;
3534 }
c2bd5804 3535
c2bd5804 3536 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3537 if (ata_port_offline(ap)) {
c2bd5804
TH
3538 *class = ATA_DEV_NONE;
3539 DPRINTK("EXIT, link offline\n");
3540 return 0;
3541 }
3542
34fee227
TH
3543 /* wait a while before checking status, see SRST for more info */
3544 msleep(150);
3545
d4b2bab4 3546 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3547 /* link occupied, -ENODEV too is an error */
3548 if (rc) {
f15a1daf 3549 ata_port_printk(ap, KERN_ERR,
d4b2bab4
TH
3550 "COMRESET failed (errno=%d)\n", rc);
3551 return rc;
c2bd5804
TH
3552 }
3553
3a39746a
TH
3554 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3555
c2bd5804
TH
3556 *class = ata_dev_try_classify(ap, 0, NULL);
3557
3558 DPRINTK("EXIT, class=%u\n", *class);
3559 return 0;
3560}
3561
3562/**
3563 * ata_std_postreset - standard postreset callback
3564 * @ap: the target ata_port
3565 * @classes: classes of attached devices
3566 *
3567 * This function is invoked after a successful reset. Note that
3568 * the device might have been reset more than once using
3569 * different reset methods before postreset is invoked.
c2bd5804 3570 *
c2bd5804
TH
3571 * LOCKING:
3572 * Kernel thread context (may sleep)
3573 */
3574void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3575{
dc2b3515
TH
3576 u32 serror;
3577
c2bd5804
TH
3578 DPRINTK("ENTER\n");
3579
c2bd5804 3580 /* print link status */
81952c54 3581 sata_print_link_status(ap);
c2bd5804 3582
dc2b3515
TH
3583 /* clear SError */
3584 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3585 sata_scr_write(ap, SCR_ERROR, serror);
3586
3a39746a 3587 /* re-enable interrupts */
83625006
AI
3588 if (!ap->ops->error_handler)
3589 ap->ops->irq_on(ap);
c2bd5804
TH
3590
3591 /* is double-select really necessary? */
3592 if (classes[0] != ATA_DEV_NONE)
3593 ap->ops->dev_select(ap, 1);
3594 if (classes[1] != ATA_DEV_NONE)
3595 ap->ops->dev_select(ap, 0);
3596
3a39746a
TH
3597 /* bail out if no device is present */
3598 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3599 DPRINTK("EXIT, no device\n");
3600 return;
3601 }
3602
3603 /* set up device control */
0d5ff566
TH
3604 if (ap->ioaddr.ctl_addr)
3605 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3606
3607 DPRINTK("EXIT\n");
3608}
3609
623a3128
TH
3610/**
3611 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3612 * @dev: device to compare against
3613 * @new_class: class of the new device
3614 * @new_id: IDENTIFY page of the new device
3615 *
3616 * Compare @new_class and @new_id against @dev and determine
3617 * whether @dev is the device indicated by @new_class and
3618 * @new_id.
3619 *
3620 * LOCKING:
3621 * None.
3622 *
3623 * RETURNS:
3624 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3625 */
3373efd8
TH
3626static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3627 const u16 *new_id)
623a3128
TH
3628{
3629 const u16 *old_id = dev->id;
a0cf733b
TH
3630 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3631 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3632
3633 if (dev->class != new_class) {
f15a1daf
TH
3634 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3635 dev->class, new_class);
623a3128
TH
3636 return 0;
3637 }
3638
a0cf733b
TH
3639 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3640 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3641 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3642 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3643
3644 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3645 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3646 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3647 return 0;
3648 }
3649
3650 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3651 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3652 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3653 return 0;
3654 }
3655
623a3128
TH
3656 return 1;
3657}
3658
3659/**
fe30911b 3660 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3661 * @dev: target ATA device
bff04647 3662 * @readid_flags: read ID flags
623a3128
TH
3663 *
3664 * Re-read IDENTIFY page and make sure @dev is still attached to
3665 * the port.
3666 *
3667 * LOCKING:
3668 * Kernel thread context (may sleep)
3669 *
3670 * RETURNS:
3671 * 0 on success, negative errno otherwise
3672 */
fe30911b 3673int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3674{
5eb45c02 3675 unsigned int class = dev->class;
f15a1daf 3676 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3677 int rc;
3678
fe635c7e 3679 /* read ID data */
bff04647 3680 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3681 if (rc)
fe30911b 3682 return rc;
623a3128
TH
3683
3684 /* is the device still there? */
fe30911b
TH
3685 if (!ata_dev_same_device(dev, class, id))
3686 return -ENODEV;
623a3128 3687
fe635c7e 3688 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3689 return 0;
3690}
3691
3692/**
3693 * ata_dev_revalidate - Revalidate ATA device
3694 * @dev: device to revalidate
3695 * @readid_flags: read ID flags
3696 *
3697 * Re-read IDENTIFY page, make sure @dev is still attached to the
3698 * port and reconfigure it according to the new IDENTIFY page.
3699 *
3700 * LOCKING:
3701 * Kernel thread context (may sleep)
3702 *
3703 * RETURNS:
3704 * 0 on success, negative errno otherwise
3705 */
3706int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3707{
6ddcd3b0 3708 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3709 int rc;
3710
3711 if (!ata_dev_enabled(dev))
3712 return -ENODEV;
3713
3714 /* re-read ID */
3715 rc = ata_dev_reread_id(dev, readid_flags);
3716 if (rc)
3717 goto fail;
623a3128
TH
3718
3719 /* configure device according to the new ID */
efdaedc4 3720 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3721 if (rc)
3722 goto fail;
3723
3724 /* verify n_sectors hasn't changed */
3725 if (dev->class == ATA_DEV_ATA && dev->n_sectors != n_sectors) {
3726 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3727 "%llu != %llu\n",
3728 (unsigned long long)n_sectors,
3729 (unsigned long long)dev->n_sectors);
3730 rc = -ENODEV;
3731 goto fail;
3732 }
3733
3734 return 0;
623a3128
TH
3735
3736 fail:
f15a1daf 3737 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3738 return rc;
3739}
3740
6919a0a6
AC
3741struct ata_blacklist_entry {
3742 const char *model_num;
3743 const char *model_rev;
3744 unsigned long horkage;
3745};
3746
3747static const struct ata_blacklist_entry ata_device_blacklist [] = {
3748 /* Devices with DMA related problems under Linux */
3749 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3750 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3751 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3752 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3753 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3754 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3755 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3756 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3757 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3758 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3759 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3760 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3761 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3762 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3763 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3764 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3765 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3766 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3767 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3768 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3769 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3770 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3771 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3772 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3773 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3774 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3775 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3776 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3777 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3778 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3779 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
6919a0a6 3780
18d6e9d5 3781 /* Weird ATAPI devices */
6f23a31d
AL
3782 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 |
3783 ATA_HORKAGE_DMA_RW_ONLY },
18d6e9d5 3784
6919a0a6
AC
3785 /* Devices we expect to fail diagnostics */
3786
3787 /* Devices where NCQ should be avoided */
3788 /* NCQ is slow */
3789 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3790 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3791 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3792 /* NCQ is broken */
3793 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
471e44b2 3794 { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
96442925
JA
3795 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3796 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
36e337d0
RH
3797 /* Blacklist entries taken from Silicon Image 3124/3132
3798 Windows driver .inf file - also several Linux problem reports */
3799 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3800 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3801 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3802 /* Drives which do spurious command completion */
3803 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb
TH
3804 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
3805 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
6919a0a6
AC
3806
3807 /* Devices with NCQ limits */
3808
3809 /* End Marker */
3810 { }
1da177e4 3811};
2e9edbf8 3812
6919a0a6 3813unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3814{
8bfa79fc
TH
3815 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3816 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3817 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3818
8bfa79fc
TH
3819 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3820 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3821
6919a0a6 3822 while (ad->model_num) {
8bfa79fc 3823 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3824 if (ad->model_rev == NULL)
3825 return ad->horkage;
8bfa79fc 3826 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3827 return ad->horkage;
f4b15fef 3828 }
6919a0a6 3829 ad++;
f4b15fef 3830 }
1da177e4
LT
3831 return 0;
3832}
3833
6919a0a6
AC
3834static int ata_dma_blacklisted(const struct ata_device *dev)
3835{
3836 /* We don't support polling DMA.
3837 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3838 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3839 */
3840 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3841 (dev->flags & ATA_DFLAG_CDB_INTR))
3842 return 1;
3843 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3844}
3845
a6d5a51c
TH
3846/**
3847 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3848 * @dev: Device to compute xfermask for
3849 *
acf356b1
TH
3850 * Compute supported xfermask of @dev and store it in
3851 * dev->*_mask. This function is responsible for applying all
3852 * known limits including host controller limits, device
3853 * blacklist, etc...
a6d5a51c
TH
3854 *
3855 * LOCKING:
3856 * None.
a6d5a51c 3857 */
3373efd8 3858static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3859{
3373efd8 3860 struct ata_port *ap = dev->ap;
cca3974e 3861 struct ata_host *host = ap->host;
a6d5a51c 3862 unsigned long xfer_mask;
1da177e4 3863
37deecb5 3864 /* controller modes available */
565083e1
TH
3865 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3866 ap->mwdma_mask, ap->udma_mask);
3867
8343f889 3868 /* drive modes available */
37deecb5
TH
3869 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3870 dev->mwdma_mask, dev->udma_mask);
3871 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3872
b352e57d
AC
3873 /*
3874 * CFA Advanced TrueIDE timings are not allowed on a shared
3875 * cable
3876 */
3877 if (ata_dev_pair(dev)) {
3878 /* No PIO5 or PIO6 */
3879 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3880 /* No MWDMA3 or MWDMA 4 */
3881 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3882 }
3883
37deecb5
TH
3884 if (ata_dma_blacklisted(dev)) {
3885 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3886 ata_dev_printk(dev, KERN_WARNING,
3887 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3888 }
a6d5a51c 3889
14d66ab7
PV
3890 if ((host->flags & ATA_HOST_SIMPLEX) &&
3891 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3892 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3893 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3894 "other device, disabling DMA\n");
5444a6f4 3895 }
565083e1 3896
e424675f
JG
3897 if (ap->flags & ATA_FLAG_NO_IORDY)
3898 xfer_mask &= ata_pio_mask_no_iordy(dev);
3899
5444a6f4 3900 if (ap->ops->mode_filter)
a76b62ca 3901 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 3902
8343f889
RH
3903 /* Apply cable rule here. Don't apply it early because when
3904 * we handle hot plug the cable type can itself change.
3905 * Check this last so that we know if the transfer rate was
3906 * solely limited by the cable.
3907 * Unknown or 80 wire cables reported host side are checked
3908 * drive side as well. Cases where we know a 40wire cable
3909 * is used safely for 80 are not checked here.
3910 */
3911 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3912 /* UDMA/44 or higher would be available */
3913 if((ap->cbl == ATA_CBL_PATA40) ||
3914 (ata_drive_40wire(dev->id) &&
3915 (ap->cbl == ATA_CBL_PATA_UNK ||
3916 ap->cbl == ATA_CBL_PATA80))) {
3917 ata_dev_printk(dev, KERN_WARNING,
3918 "limited to UDMA/33 due to 40-wire cable\n");
3919 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3920 }
3921
565083e1
TH
3922 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3923 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3924}
3925
1da177e4
LT
3926/**
3927 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3928 * @dev: Device to which command will be sent
3929 *
780a87f7
JG
3930 * Issue SET FEATURES - XFER MODE command to device @dev
3931 * on port @ap.
3932 *
1da177e4 3933 * LOCKING:
0cba632b 3934 * PCI/etc. bus probe sem.
83206a29
TH
3935 *
3936 * RETURNS:
3937 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3938 */
3939
3373efd8 3940static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3941{
a0123703 3942 struct ata_taskfile tf;
83206a29 3943 unsigned int err_mask;
1da177e4
LT
3944
3945 /* set up set-features taskfile */
3946 DPRINTK("set features - xfer mode\n");
3947
464cf177
TH
3948 /* Some controllers and ATAPI devices show flaky interrupt
3949 * behavior after setting xfer mode. Use polling instead.
3950 */
3373efd8 3951 ata_tf_init(dev, &tf);
a0123703
TH
3952 tf.command = ATA_CMD_SET_FEATURES;
3953 tf.feature = SETFEATURES_XFER;
464cf177 3954 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
3955 tf.protocol = ATA_PROT_NODATA;
3956 tf.nsect = dev->xfer_mode;
1da177e4 3957
3373efd8 3958 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3959
83206a29
TH
3960 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3961 return err_mask;
1da177e4
LT
3962}
3963
8bf62ece
AL
3964/**
3965 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3966 * @dev: Device to which command will be sent
e2a7f77a
RD
3967 * @heads: Number of heads (taskfile parameter)
3968 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3969 *
3970 * LOCKING:
6aff8f1f
TH
3971 * Kernel thread context (may sleep)
3972 *
3973 * RETURNS:
3974 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3975 */
3373efd8
TH
3976static unsigned int ata_dev_init_params(struct ata_device *dev,
3977 u16 heads, u16 sectors)
8bf62ece 3978{
a0123703 3979 struct ata_taskfile tf;
6aff8f1f 3980 unsigned int err_mask;
8bf62ece
AL
3981
3982 /* Number of sectors per track 1-255. Number of heads 1-16 */
3983 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3984 return AC_ERR_INVALID;
8bf62ece
AL
3985
3986 /* set up init dev params taskfile */
3987 DPRINTK("init dev params \n");
3988
3373efd8 3989 ata_tf_init(dev, &tf);
a0123703
TH
3990 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3991 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3992 tf.protocol = ATA_PROT_NODATA;
3993 tf.nsect = sectors;
3994 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3995
3373efd8 3996 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3997
6aff8f1f
TH
3998 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3999 return err_mask;
8bf62ece
AL
4000}
4001
1da177e4 4002/**
0cba632b
JG
4003 * ata_sg_clean - Unmap DMA memory associated with command
4004 * @qc: Command containing DMA memory to be released
4005 *
4006 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4007 *
4008 * LOCKING:
cca3974e 4009 * spin_lock_irqsave(host lock)
1da177e4 4010 */
70e6ad0c 4011void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4012{
4013 struct ata_port *ap = qc->ap;
cedc9a47 4014 struct scatterlist *sg = qc->__sg;
1da177e4 4015 int dir = qc->dma_dir;
cedc9a47 4016 void *pad_buf = NULL;
1da177e4 4017
a4631474
TH
4018 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4019 WARN_ON(sg == NULL);
1da177e4
LT
4020
4021 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4022 WARN_ON(qc->n_elem > 1);
1da177e4 4023
2c13b7ce 4024 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4025
cedc9a47
JG
4026 /* if we padded the buffer out to 32-bit bound, and data
4027 * xfer direction is from-device, we must copy from the
4028 * pad buffer back into the supplied buffer
4029 */
4030 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4031 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4032
4033 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4034 if (qc->n_elem)
2f1f610b 4035 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4036 /* restore last sg */
4037 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4038 if (pad_buf) {
4039 struct scatterlist *psg = &qc->pad_sgent;
4040 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4041 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4042 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4043 }
4044 } else {
2e242fa9 4045 if (qc->n_elem)
2f1f610b 4046 dma_unmap_single(ap->dev,
e1410f2d
JG
4047 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4048 dir);
cedc9a47
JG
4049 /* restore sg */
4050 sg->length += qc->pad_len;
4051 if (pad_buf)
4052 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4053 pad_buf, qc->pad_len);
4054 }
1da177e4
LT
4055
4056 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4057 qc->__sg = NULL;
1da177e4
LT
4058}
4059
4060/**
4061 * ata_fill_sg - Fill PCI IDE PRD table
4062 * @qc: Metadata associated with taskfile to be transferred
4063 *
780a87f7
JG
4064 * Fill PCI IDE PRD (scatter-gather) table with segments
4065 * associated with the current disk command.
4066 *
1da177e4 4067 * LOCKING:
cca3974e 4068 * spin_lock_irqsave(host lock)
1da177e4
LT
4069 *
4070 */
4071static void ata_fill_sg(struct ata_queued_cmd *qc)
4072{
1da177e4 4073 struct ata_port *ap = qc->ap;
cedc9a47
JG
4074 struct scatterlist *sg;
4075 unsigned int idx;
1da177e4 4076
a4631474 4077 WARN_ON(qc->__sg == NULL);
f131883e 4078 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4079
4080 idx = 0;
cedc9a47 4081 ata_for_each_sg(sg, qc) {
1da177e4
LT
4082 u32 addr, offset;
4083 u32 sg_len, len;
4084
4085 /* determine if physical DMA addr spans 64K boundary.
4086 * Note h/w doesn't support 64-bit, so we unconditionally
4087 * truncate dma_addr_t to u32.
4088 */
4089 addr = (u32) sg_dma_address(sg);
4090 sg_len = sg_dma_len(sg);
4091
4092 while (sg_len) {
4093 offset = addr & 0xffff;
4094 len = sg_len;
4095 if ((offset + sg_len) > 0x10000)
4096 len = 0x10000 - offset;
4097
4098 ap->prd[idx].addr = cpu_to_le32(addr);
4099 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4100 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4101
4102 idx++;
4103 sg_len -= len;
4104 addr += len;
4105 }
4106 }
4107
4108 if (idx)
4109 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4110}
4111/**
4112 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4113 * @qc: Metadata associated with taskfile to check
4114 *
780a87f7
JG
4115 * Allow low-level driver to filter ATA PACKET commands, returning
4116 * a status indicating whether or not it is OK to use DMA for the
4117 * supplied PACKET command.
4118 *
1da177e4 4119 * LOCKING:
cca3974e 4120 * spin_lock_irqsave(host lock)
0cba632b 4121 *
1da177e4
LT
4122 * RETURNS: 0 when ATAPI DMA can be used
4123 * nonzero otherwise
4124 */
4125int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4126{
4127 struct ata_port *ap = qc->ap;
4128 int rc = 0; /* Assume ATAPI DMA is OK by default */
4129
6f23a31d
AL
4130 /* some drives can only do ATAPI DMA on read/write */
4131 if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
4132 struct scsi_cmnd *cmd = qc->scsicmd;
4133 u8 *scsicmd = cmd->cmnd;
4134
4135 switch (scsicmd[0]) {
4136 case READ_10:
4137 case WRITE_10:
4138 case READ_12:
4139 case WRITE_12:
4140 case READ_6:
4141 case WRITE_6:
4142 /* atapi dma maybe ok */
4143 break;
4144 default:
4145 /* turn off atapi dma */
4146 return 1;
4147 }
4148 }
4149
1da177e4
LT
4150 if (ap->ops->check_atapi_dma)
4151 rc = ap->ops->check_atapi_dma(qc);
4152
4153 return rc;
4154}
4155/**
4156 * ata_qc_prep - Prepare taskfile for submission
4157 * @qc: Metadata associated with taskfile to be prepared
4158 *
780a87f7
JG
4159 * Prepare ATA taskfile for submission.
4160 *
1da177e4 4161 * LOCKING:
cca3974e 4162 * spin_lock_irqsave(host lock)
1da177e4
LT
4163 */
4164void ata_qc_prep(struct ata_queued_cmd *qc)
4165{
4166 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4167 return;
4168
4169 ata_fill_sg(qc);
4170}
4171
e46834cd
BK
4172void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4173
0cba632b
JG
4174/**
4175 * ata_sg_init_one - Associate command with memory buffer
4176 * @qc: Command to be associated
4177 * @buf: Memory buffer
4178 * @buflen: Length of memory buffer, in bytes.
4179 *
4180 * Initialize the data-related elements of queued_cmd @qc
4181 * to point to a single memory buffer, @buf of byte length @buflen.
4182 *
4183 * LOCKING:
cca3974e 4184 * spin_lock_irqsave(host lock)
0cba632b
JG
4185 */
4186
1da177e4
LT
4187void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4188{
1da177e4
LT
4189 qc->flags |= ATA_QCFLAG_SINGLE;
4190
cedc9a47 4191 qc->__sg = &qc->sgent;
1da177e4 4192 qc->n_elem = 1;
cedc9a47 4193 qc->orig_n_elem = 1;
1da177e4 4194 qc->buf_virt = buf;
233277ca 4195 qc->nbytes = buflen;
1da177e4 4196
61c0596c 4197 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4198}
4199
0cba632b
JG
4200/**
4201 * ata_sg_init - Associate command with scatter-gather table.
4202 * @qc: Command to be associated
4203 * @sg: Scatter-gather table.
4204 * @n_elem: Number of elements in s/g table.
4205 *
4206 * Initialize the data-related elements of queued_cmd @qc
4207 * to point to a scatter-gather table @sg, containing @n_elem
4208 * elements.
4209 *
4210 * LOCKING:
cca3974e 4211 * spin_lock_irqsave(host lock)
0cba632b
JG
4212 */
4213
1da177e4
LT
4214void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4215 unsigned int n_elem)
4216{
4217 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4218 qc->__sg = sg;
1da177e4 4219 qc->n_elem = n_elem;
cedc9a47 4220 qc->orig_n_elem = n_elem;
1da177e4
LT
4221}
4222
4223/**
0cba632b
JG
4224 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4225 * @qc: Command with memory buffer to be mapped.
4226 *
4227 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4228 *
4229 * LOCKING:
cca3974e 4230 * spin_lock_irqsave(host lock)
1da177e4
LT
4231 *
4232 * RETURNS:
0cba632b 4233 * Zero on success, negative on error.
1da177e4
LT
4234 */
4235
4236static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4237{
4238 struct ata_port *ap = qc->ap;
4239 int dir = qc->dma_dir;
cedc9a47 4240 struct scatterlist *sg = qc->__sg;
1da177e4 4241 dma_addr_t dma_address;
2e242fa9 4242 int trim_sg = 0;
1da177e4 4243
cedc9a47
JG
4244 /* we must lengthen transfers to end on a 32-bit boundary */
4245 qc->pad_len = sg->length & 3;
4246 if (qc->pad_len) {
4247 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4248 struct scatterlist *psg = &qc->pad_sgent;
4249
a4631474 4250 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4251
4252 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4253
4254 if (qc->tf.flags & ATA_TFLAG_WRITE)
4255 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4256 qc->pad_len);
4257
4258 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4259 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4260 /* trim sg */
4261 sg->length -= qc->pad_len;
2e242fa9
TH
4262 if (sg->length == 0)
4263 trim_sg = 1;
cedc9a47
JG
4264
4265 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4266 sg->length, qc->pad_len);
4267 }
4268
2e242fa9
TH
4269 if (trim_sg) {
4270 qc->n_elem--;
e1410f2d
JG
4271 goto skip_map;
4272 }
4273
2f1f610b 4274 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4275 sg->length, dir);
537a95d9
TH
4276 if (dma_mapping_error(dma_address)) {
4277 /* restore sg */
4278 sg->length += qc->pad_len;
1da177e4 4279 return -1;
537a95d9 4280 }
1da177e4
LT
4281
4282 sg_dma_address(sg) = dma_address;
32529e01 4283 sg_dma_len(sg) = sg->length;
1da177e4 4284
2e242fa9 4285skip_map:
1da177e4
LT
4286 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4287 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4288
4289 return 0;
4290}
4291
4292/**
0cba632b
JG
4293 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4294 * @qc: Command with scatter-gather table to be mapped.
4295 *
4296 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4297 *
4298 * LOCKING:
cca3974e 4299 * spin_lock_irqsave(host lock)
1da177e4
LT
4300 *
4301 * RETURNS:
0cba632b 4302 * Zero on success, negative on error.
1da177e4
LT
4303 *
4304 */
4305
4306static int ata_sg_setup(struct ata_queued_cmd *qc)
4307{
4308 struct ata_port *ap = qc->ap;
cedc9a47
JG
4309 struct scatterlist *sg = qc->__sg;
4310 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4311 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4312
44877b4e 4313 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4314 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4315
cedc9a47
JG
4316 /* we must lengthen transfers to end on a 32-bit boundary */
4317 qc->pad_len = lsg->length & 3;
4318 if (qc->pad_len) {
4319 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4320 struct scatterlist *psg = &qc->pad_sgent;
4321 unsigned int offset;
4322
a4631474 4323 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4324
4325 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4326
4327 /*
4328 * psg->page/offset are used to copy to-be-written
4329 * data in this function or read data in ata_sg_clean.
4330 */
4331 offset = lsg->offset + lsg->length - qc->pad_len;
4332 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4333 psg->offset = offset_in_page(offset);
4334
4335 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4336 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4337 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4338 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4339 }
4340
4341 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4342 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4343 /* trim last sg */
4344 lsg->length -= qc->pad_len;
e1410f2d
JG
4345 if (lsg->length == 0)
4346 trim_sg = 1;
cedc9a47
JG
4347
4348 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4349 qc->n_elem - 1, lsg->length, qc->pad_len);
4350 }
4351
e1410f2d
JG
4352 pre_n_elem = qc->n_elem;
4353 if (trim_sg && pre_n_elem)
4354 pre_n_elem--;
4355
4356 if (!pre_n_elem) {
4357 n_elem = 0;
4358 goto skip_map;
4359 }
4360
1da177e4 4361 dir = qc->dma_dir;
2f1f610b 4362 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4363 if (n_elem < 1) {
4364 /* restore last sg */
4365 lsg->length += qc->pad_len;
1da177e4 4366 return -1;
537a95d9 4367 }
1da177e4
LT
4368
4369 DPRINTK("%d sg elements mapped\n", n_elem);
4370
e1410f2d 4371skip_map:
1da177e4
LT
4372 qc->n_elem = n_elem;
4373
4374 return 0;
4375}
4376
0baab86b 4377/**
c893a3ae 4378 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4379 * @buf: Buffer to swap
4380 * @buf_words: Number of 16-bit words in buffer.
4381 *
4382 * Swap halves of 16-bit words if needed to convert from
4383 * little-endian byte order to native cpu byte order, or
4384 * vice-versa.
4385 *
4386 * LOCKING:
6f0ef4fa 4387 * Inherited from caller.
0baab86b 4388 */
1da177e4
LT
4389void swap_buf_le16(u16 *buf, unsigned int buf_words)
4390{
4391#ifdef __BIG_ENDIAN
4392 unsigned int i;
4393
4394 for (i = 0; i < buf_words; i++)
4395 buf[i] = le16_to_cpu(buf[i]);
4396#endif /* __BIG_ENDIAN */
4397}
4398
6ae4cfb5 4399/**
0d5ff566 4400 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4401 * @adev: device to target
6ae4cfb5
AL
4402 * @buf: data buffer
4403 * @buflen: buffer length
344babaa 4404 * @write_data: read/write
6ae4cfb5
AL
4405 *
4406 * Transfer data from/to the device data register by PIO.
4407 *
4408 * LOCKING:
4409 * Inherited from caller.
6ae4cfb5 4410 */
0d5ff566
TH
4411void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4412 unsigned int buflen, int write_data)
1da177e4 4413{
a6b2c5d4 4414 struct ata_port *ap = adev->ap;
6ae4cfb5 4415 unsigned int words = buflen >> 1;
1da177e4 4416
6ae4cfb5 4417 /* Transfer multiple of 2 bytes */
1da177e4 4418 if (write_data)
0d5ff566 4419 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4420 else
0d5ff566 4421 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4422
4423 /* Transfer trailing 1 byte, if any. */
4424 if (unlikely(buflen & 0x01)) {
4425 u16 align_buf[1] = { 0 };
4426 unsigned char *trailing_buf = buf + buflen - 1;
4427
4428 if (write_data) {
4429 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4430 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4431 } else {
0d5ff566 4432 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4433 memcpy(trailing_buf, align_buf, 1);
4434 }
4435 }
1da177e4
LT
4436}
4437
75e99585 4438/**
0d5ff566 4439 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4440 * @adev: device to target
4441 * @buf: data buffer
4442 * @buflen: buffer length
4443 * @write_data: read/write
4444 *
88574551 4445 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4446 * transfer with interrupts disabled.
4447 *
4448 * LOCKING:
4449 * Inherited from caller.
4450 */
0d5ff566
TH
4451void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4452 unsigned int buflen, int write_data)
75e99585
AC
4453{
4454 unsigned long flags;
4455 local_irq_save(flags);
0d5ff566 4456 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4457 local_irq_restore(flags);
4458}
4459
4460
6ae4cfb5 4461/**
5a5dbd18 4462 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4463 * @qc: Command on going
4464 *
5a5dbd18 4465 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4466 *
4467 * LOCKING:
4468 * Inherited from caller.
4469 */
4470
1da177e4
LT
4471static void ata_pio_sector(struct ata_queued_cmd *qc)
4472{
4473 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4474 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4475 struct ata_port *ap = qc->ap;
4476 struct page *page;
4477 unsigned int offset;
4478 unsigned char *buf;
4479
5a5dbd18 4480 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4481 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4482
4483 page = sg[qc->cursg].page;
726f0785 4484 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4485
4486 /* get the current page and offset */
4487 page = nth_page(page, (offset >> PAGE_SHIFT));
4488 offset %= PAGE_SIZE;
4489
1da177e4
LT
4490 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4491
91b8b313
AL
4492 if (PageHighMem(page)) {
4493 unsigned long flags;
4494
a6b2c5d4 4495 /* FIXME: use a bounce buffer */
91b8b313
AL
4496 local_irq_save(flags);
4497 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4498
91b8b313 4499 /* do the actual data transfer */
5a5dbd18 4500 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4501
91b8b313
AL
4502 kunmap_atomic(buf, KM_IRQ0);
4503 local_irq_restore(flags);
4504 } else {
4505 buf = page_address(page);
5a5dbd18 4506 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4507 }
1da177e4 4508
5a5dbd18
ML
4509 qc->curbytes += qc->sect_size;
4510 qc->cursg_ofs += qc->sect_size;
1da177e4 4511
726f0785 4512 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4513 qc->cursg++;
4514 qc->cursg_ofs = 0;
4515 }
1da177e4 4516}
1da177e4 4517
07f6f7d0 4518/**
5a5dbd18 4519 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4520 * @qc: Command on going
4521 *
5a5dbd18 4522 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4523 * ATA device for the DRQ request.
4524 *
4525 * LOCKING:
4526 * Inherited from caller.
4527 */
1da177e4 4528
07f6f7d0
AL
4529static void ata_pio_sectors(struct ata_queued_cmd *qc)
4530{
4531 if (is_multi_taskfile(&qc->tf)) {
4532 /* READ/WRITE MULTIPLE */
4533 unsigned int nsect;
4534
587005de 4535 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4536
5a5dbd18 4537 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4538 qc->dev->multi_count);
07f6f7d0
AL
4539 while (nsect--)
4540 ata_pio_sector(qc);
4541 } else
4542 ata_pio_sector(qc);
4543}
4544
c71c1857
AL
4545/**
4546 * atapi_send_cdb - Write CDB bytes to hardware
4547 * @ap: Port to which ATAPI device is attached.
4548 * @qc: Taskfile currently active
4549 *
4550 * When device has indicated its readiness to accept
4551 * a CDB, this function is called. Send the CDB.
4552 *
4553 * LOCKING:
4554 * caller.
4555 */
4556
4557static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4558{
4559 /* send SCSI cdb */
4560 DPRINTK("send cdb\n");
db024d53 4561 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4562
a6b2c5d4 4563 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4564 ata_altstatus(ap); /* flush */
4565
4566 switch (qc->tf.protocol) {
4567 case ATA_PROT_ATAPI:
4568 ap->hsm_task_state = HSM_ST;
4569 break;
4570 case ATA_PROT_ATAPI_NODATA:
4571 ap->hsm_task_state = HSM_ST_LAST;
4572 break;
4573 case ATA_PROT_ATAPI_DMA:
4574 ap->hsm_task_state = HSM_ST_LAST;
4575 /* initiate bmdma */
4576 ap->ops->bmdma_start(qc);
4577 break;
4578 }
1da177e4
LT
4579}
4580
6ae4cfb5
AL
4581/**
4582 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4583 * @qc: Command on going
4584 * @bytes: number of bytes
4585 *
4586 * Transfer Transfer data from/to the ATAPI device.
4587 *
4588 * LOCKING:
4589 * Inherited from caller.
4590 *
4591 */
4592
1da177e4
LT
4593static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4594{
4595 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4596 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4597 struct ata_port *ap = qc->ap;
4598 struct page *page;
4599 unsigned char *buf;
4600 unsigned int offset, count;
4601
563a6e1f 4602 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4603 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4604
4605next_sg:
563a6e1f 4606 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4607 /*
563a6e1f
AL
4608 * The end of qc->sg is reached and the device expects
4609 * more data to transfer. In order not to overrun qc->sg
4610 * and fulfill length specified in the byte count register,
4611 * - for read case, discard trailing data from the device
4612 * - for write case, padding zero data to the device
4613 */
4614 u16 pad_buf[1] = { 0 };
4615 unsigned int words = bytes >> 1;
4616 unsigned int i;
4617
4618 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4619 ata_dev_printk(qc->dev, KERN_WARNING,
4620 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4621
4622 for (i = 0; i < words; i++)
a6b2c5d4 4623 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4624
14be71f4 4625 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4626 return;
4627 }
4628
cedc9a47 4629 sg = &qc->__sg[qc->cursg];
1da177e4 4630
1da177e4
LT
4631 page = sg->page;
4632 offset = sg->offset + qc->cursg_ofs;
4633
4634 /* get the current page and offset */
4635 page = nth_page(page, (offset >> PAGE_SHIFT));
4636 offset %= PAGE_SIZE;
4637
6952df03 4638 /* don't overrun current sg */
32529e01 4639 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4640
4641 /* don't cross page boundaries */
4642 count = min(count, (unsigned int)PAGE_SIZE - offset);
4643
7282aa4b
AL
4644 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4645
91b8b313
AL
4646 if (PageHighMem(page)) {
4647 unsigned long flags;
4648
a6b2c5d4 4649 /* FIXME: use bounce buffer */
91b8b313
AL
4650 local_irq_save(flags);
4651 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4652
91b8b313 4653 /* do the actual data transfer */
a6b2c5d4 4654 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4655
91b8b313
AL
4656 kunmap_atomic(buf, KM_IRQ0);
4657 local_irq_restore(flags);
4658 } else {
4659 buf = page_address(page);
a6b2c5d4 4660 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4661 }
1da177e4
LT
4662
4663 bytes -= count;
4664 qc->curbytes += count;
4665 qc->cursg_ofs += count;
4666
32529e01 4667 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4668 qc->cursg++;
4669 qc->cursg_ofs = 0;
4670 }
4671
563a6e1f 4672 if (bytes)
1da177e4 4673 goto next_sg;
1da177e4
LT
4674}
4675
6ae4cfb5
AL
4676/**
4677 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4678 * @qc: Command on going
4679 *
4680 * Transfer Transfer data from/to the ATAPI device.
4681 *
4682 * LOCKING:
4683 * Inherited from caller.
6ae4cfb5
AL
4684 */
4685
1da177e4
LT
4686static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4687{
4688 struct ata_port *ap = qc->ap;
4689 struct ata_device *dev = qc->dev;
4690 unsigned int ireason, bc_lo, bc_hi, bytes;
4691 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4692
eec4c3f3
AL
4693 /* Abuse qc->result_tf for temp storage of intermediate TF
4694 * here to save some kernel stack usage.
4695 * For normal completion, qc->result_tf is not relevant. For
4696 * error, qc->result_tf is later overwritten by ata_qc_complete().
4697 * So, the correctness of qc->result_tf is not affected.
4698 */
4699 ap->ops->tf_read(ap, &qc->result_tf);
4700 ireason = qc->result_tf.nsect;
4701 bc_lo = qc->result_tf.lbam;
4702 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4703 bytes = (bc_hi << 8) | bc_lo;
4704
4705 /* shall be cleared to zero, indicating xfer of data */
4706 if (ireason & (1 << 0))
4707 goto err_out;
4708
4709 /* make sure transfer direction matches expected */
4710 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4711 if (do_write != i_write)
4712 goto err_out;
4713
44877b4e 4714 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4715
1da177e4
LT
4716 __atapi_pio_bytes(qc, bytes);
4717
4718 return;
4719
4720err_out:
f15a1daf 4721 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4722 qc->err_mask |= AC_ERR_HSM;
14be71f4 4723 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4724}
4725
4726/**
c234fb00
AL
4727 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4728 * @ap: the target ata_port
4729 * @qc: qc on going
1da177e4 4730 *
c234fb00
AL
4731 * RETURNS:
4732 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4733 */
c234fb00
AL
4734
4735static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4736{
c234fb00
AL
4737 if (qc->tf.flags & ATA_TFLAG_POLLING)
4738 return 1;
1da177e4 4739
c234fb00
AL
4740 if (ap->hsm_task_state == HSM_ST_FIRST) {
4741 if (qc->tf.protocol == ATA_PROT_PIO &&
4742 (qc->tf.flags & ATA_TFLAG_WRITE))
4743 return 1;
1da177e4 4744
c234fb00
AL
4745 if (is_atapi_taskfile(&qc->tf) &&
4746 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4747 return 1;
fe79e683
AL
4748 }
4749
c234fb00
AL
4750 return 0;
4751}
1da177e4 4752
c17ea20d
TH
4753/**
4754 * ata_hsm_qc_complete - finish a qc running on standard HSM
4755 * @qc: Command to complete
4756 * @in_wq: 1 if called from workqueue, 0 otherwise
4757 *
4758 * Finish @qc which is running on standard HSM.
4759 *
4760 * LOCKING:
cca3974e 4761 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4762 * Otherwise, none on entry and grabs host lock.
4763 */
4764static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4765{
4766 struct ata_port *ap = qc->ap;
4767 unsigned long flags;
4768
4769 if (ap->ops->error_handler) {
4770 if (in_wq) {
ba6a1308 4771 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4772
cca3974e
JG
4773 /* EH might have kicked in while host lock is
4774 * released.
c17ea20d
TH
4775 */
4776 qc = ata_qc_from_tag(ap, qc->tag);
4777 if (qc) {
4778 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4779 ap->ops->irq_on(ap);
c17ea20d
TH
4780 ata_qc_complete(qc);
4781 } else
4782 ata_port_freeze(ap);
4783 }
4784
ba6a1308 4785 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4786 } else {
4787 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4788 ata_qc_complete(qc);
4789 else
4790 ata_port_freeze(ap);
4791 }
4792 } else {
4793 if (in_wq) {
ba6a1308 4794 spin_lock_irqsave(ap->lock, flags);
83625006 4795 ap->ops->irq_on(ap);
c17ea20d 4796 ata_qc_complete(qc);
ba6a1308 4797 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4798 } else
4799 ata_qc_complete(qc);
4800 }
1da177e4 4801
c81e29b4 4802 ata_altstatus(ap); /* flush */
c17ea20d
TH
4803}
4804
bb5cb290
AL
4805/**
4806 * ata_hsm_move - move the HSM to the next state.
4807 * @ap: the target ata_port
4808 * @qc: qc on going
4809 * @status: current device status
4810 * @in_wq: 1 if called from workqueue, 0 otherwise
4811 *
4812 * RETURNS:
4813 * 1 when poll next status needed, 0 otherwise.
4814 */
9a1004d0
TH
4815int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4816 u8 status, int in_wq)
e2cec771 4817{
bb5cb290
AL
4818 unsigned long flags = 0;
4819 int poll_next;
4820
6912ccd5
AL
4821 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4822
bb5cb290
AL
4823 /* Make sure ata_qc_issue_prot() does not throw things
4824 * like DMA polling into the workqueue. Notice that
4825 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4826 */
c234fb00 4827 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4828
e2cec771 4829fsm_start:
999bb6f4 4830 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4831 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4832
e2cec771
AL
4833 switch (ap->hsm_task_state) {
4834 case HSM_ST_FIRST:
bb5cb290
AL
4835 /* Send first data block or PACKET CDB */
4836
4837 /* If polling, we will stay in the work queue after
4838 * sending the data. Otherwise, interrupt handler
4839 * takes over after sending the data.
4840 */
4841 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4842
e2cec771 4843 /* check device status */
3655d1d3
AL
4844 if (unlikely((status & ATA_DRQ) == 0)) {
4845 /* handle BSY=0, DRQ=0 as error */
4846 if (likely(status & (ATA_ERR | ATA_DF)))
4847 /* device stops HSM for abort/error */
4848 qc->err_mask |= AC_ERR_DEV;
4849 else
4850 /* HSM violation. Let EH handle this */
4851 qc->err_mask |= AC_ERR_HSM;
4852
14be71f4 4853 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4854 goto fsm_start;
1da177e4
LT
4855 }
4856
71601958
AL
4857 /* Device should not ask for data transfer (DRQ=1)
4858 * when it finds something wrong.
eee6c32f
AL
4859 * We ignore DRQ here and stop the HSM by
4860 * changing hsm_task_state to HSM_ST_ERR and
4861 * let the EH abort the command or reset the device.
71601958
AL
4862 */
4863 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4864 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4865 "error, dev_stat 0x%X\n", status);
3655d1d3 4866 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4867 ap->hsm_task_state = HSM_ST_ERR;
4868 goto fsm_start;
71601958 4869 }
1da177e4 4870
bb5cb290
AL
4871 /* Send the CDB (atapi) or the first data block (ata pio out).
4872 * During the state transition, interrupt handler shouldn't
4873 * be invoked before the data transfer is complete and
4874 * hsm_task_state is changed. Hence, the following locking.
4875 */
4876 if (in_wq)
ba6a1308 4877 spin_lock_irqsave(ap->lock, flags);
1da177e4 4878
bb5cb290
AL
4879 if (qc->tf.protocol == ATA_PROT_PIO) {
4880 /* PIO data out protocol.
4881 * send first data block.
4882 */
0565c26d 4883
bb5cb290
AL
4884 /* ata_pio_sectors() might change the state
4885 * to HSM_ST_LAST. so, the state is changed here
4886 * before ata_pio_sectors().
4887 */
4888 ap->hsm_task_state = HSM_ST;
4889 ata_pio_sectors(qc);
4890 ata_altstatus(ap); /* flush */
4891 } else
4892 /* send CDB */
4893 atapi_send_cdb(ap, qc);
4894
4895 if (in_wq)
ba6a1308 4896 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4897
4898 /* if polling, ata_pio_task() handles the rest.
4899 * otherwise, interrupt handler takes over from here.
4900 */
e2cec771 4901 break;
1c848984 4902
e2cec771
AL
4903 case HSM_ST:
4904 /* complete command or read/write the data register */
4905 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4906 /* ATAPI PIO protocol */
4907 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4908 /* No more data to transfer or device error.
4909 * Device error will be tagged in HSM_ST_LAST.
4910 */
e2cec771
AL
4911 ap->hsm_task_state = HSM_ST_LAST;
4912 goto fsm_start;
4913 }
1da177e4 4914
71601958
AL
4915 /* Device should not ask for data transfer (DRQ=1)
4916 * when it finds something wrong.
eee6c32f
AL
4917 * We ignore DRQ here and stop the HSM by
4918 * changing hsm_task_state to HSM_ST_ERR and
4919 * let the EH abort the command or reset the device.
71601958
AL
4920 */
4921 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4922 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4923 "device error, dev_stat 0x%X\n",
4924 status);
3655d1d3 4925 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4926 ap->hsm_task_state = HSM_ST_ERR;
4927 goto fsm_start;
71601958 4928 }
1da177e4 4929
e2cec771 4930 atapi_pio_bytes(qc);
7fb6ec28 4931
e2cec771
AL
4932 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4933 /* bad ireason reported by device */
4934 goto fsm_start;
1da177e4 4935
e2cec771
AL
4936 } else {
4937 /* ATA PIO protocol */
4938 if (unlikely((status & ATA_DRQ) == 0)) {
4939 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4940 if (likely(status & (ATA_ERR | ATA_DF)))
4941 /* device stops HSM for abort/error */
4942 qc->err_mask |= AC_ERR_DEV;
4943 else
55a8e2c8
TH
4944 /* HSM violation. Let EH handle this.
4945 * Phantom devices also trigger this
4946 * condition. Mark hint.
4947 */
4948 qc->err_mask |= AC_ERR_HSM |
4949 AC_ERR_NODEV_HINT;
3655d1d3 4950
e2cec771
AL
4951 ap->hsm_task_state = HSM_ST_ERR;
4952 goto fsm_start;
4953 }
1da177e4 4954
eee6c32f
AL
4955 /* For PIO reads, some devices may ask for
4956 * data transfer (DRQ=1) alone with ERR=1.
4957 * We respect DRQ here and transfer one
4958 * block of junk data before changing the
4959 * hsm_task_state to HSM_ST_ERR.
4960 *
4961 * For PIO writes, ERR=1 DRQ=1 doesn't make
4962 * sense since the data block has been
4963 * transferred to the device.
71601958
AL
4964 */
4965 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4966 /* data might be corrputed */
4967 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4968
4969 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4970 ata_pio_sectors(qc);
4971 ata_altstatus(ap);
4972 status = ata_wait_idle(ap);
4973 }
4974
3655d1d3
AL
4975 if (status & (ATA_BUSY | ATA_DRQ))
4976 qc->err_mask |= AC_ERR_HSM;
4977
eee6c32f
AL
4978 /* ata_pio_sectors() might change the
4979 * state to HSM_ST_LAST. so, the state
4980 * is changed after ata_pio_sectors().
4981 */
4982 ap->hsm_task_state = HSM_ST_ERR;
4983 goto fsm_start;
71601958
AL
4984 }
4985
e2cec771
AL
4986 ata_pio_sectors(qc);
4987
4988 if (ap->hsm_task_state == HSM_ST_LAST &&
4989 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4990 /* all data read */
4991 ata_altstatus(ap);
52a32205 4992 status = ata_wait_idle(ap);
e2cec771
AL
4993 goto fsm_start;
4994 }
4995 }
4996
4997 ata_altstatus(ap); /* flush */
bb5cb290 4998 poll_next = 1;
1da177e4
LT
4999 break;
5000
14be71f4 5001 case HSM_ST_LAST:
6912ccd5
AL
5002 if (unlikely(!ata_ok(status))) {
5003 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5004 ap->hsm_task_state = HSM_ST_ERR;
5005 goto fsm_start;
5006 }
5007
5008 /* no more data to transfer */
4332a771 5009 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5010 ap->print_id, qc->dev->devno, status);
e2cec771 5011
6912ccd5
AL
5012 WARN_ON(qc->err_mask);
5013
e2cec771 5014 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5015
e2cec771 5016 /* complete taskfile transaction */
c17ea20d 5017 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5018
5019 poll_next = 0;
1da177e4
LT
5020 break;
5021
14be71f4 5022 case HSM_ST_ERR:
e2cec771
AL
5023 /* make sure qc->err_mask is available to
5024 * know what's wrong and recover
5025 */
5026 WARN_ON(qc->err_mask == 0);
5027
5028 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5029
999bb6f4 5030 /* complete taskfile transaction */
c17ea20d 5031 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5032
5033 poll_next = 0;
e2cec771
AL
5034 break;
5035 default:
bb5cb290 5036 poll_next = 0;
6912ccd5 5037 BUG();
1da177e4
LT
5038 }
5039
bb5cb290 5040 return poll_next;
1da177e4
LT
5041}
5042
65f27f38 5043static void ata_pio_task(struct work_struct *work)
8061f5f0 5044{
65f27f38
DH
5045 struct ata_port *ap =
5046 container_of(work, struct ata_port, port_task.work);
5047 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5048 u8 status;
a1af3734 5049 int poll_next;
8061f5f0 5050
7fb6ec28 5051fsm_start:
a1af3734 5052 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5053
a1af3734
AL
5054 /*
5055 * This is purely heuristic. This is a fast path.
5056 * Sometimes when we enter, BSY will be cleared in
5057 * a chk-status or two. If not, the drive is probably seeking
5058 * or something. Snooze for a couple msecs, then
5059 * chk-status again. If still busy, queue delayed work.
5060 */
5061 status = ata_busy_wait(ap, ATA_BUSY, 5);
5062 if (status & ATA_BUSY) {
5063 msleep(2);
5064 status = ata_busy_wait(ap, ATA_BUSY, 10);
5065 if (status & ATA_BUSY) {
31ce6dae 5066 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5067 return;
5068 }
8061f5f0
TH
5069 }
5070
a1af3734
AL
5071 /* move the HSM */
5072 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5073
a1af3734
AL
5074 /* another command or interrupt handler
5075 * may be running at this point.
5076 */
5077 if (poll_next)
7fb6ec28 5078 goto fsm_start;
8061f5f0
TH
5079}
5080
1da177e4
LT
5081/**
5082 * ata_qc_new - Request an available ATA command, for queueing
5083 * @ap: Port associated with device @dev
5084 * @dev: Device from whom we request an available command structure
5085 *
5086 * LOCKING:
0cba632b 5087 * None.
1da177e4
LT
5088 */
5089
5090static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5091{
5092 struct ata_queued_cmd *qc = NULL;
5093 unsigned int i;
5094
e3180499 5095 /* no command while frozen */
b51e9e5d 5096 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5097 return NULL;
5098
2ab7db1f
TH
5099 /* the last tag is reserved for internal command. */
5100 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5101 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5102 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5103 break;
5104 }
5105
5106 if (qc)
5107 qc->tag = i;
5108
5109 return qc;
5110}
5111
5112/**
5113 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5114 * @dev: Device from whom we request an available command structure
5115 *
5116 * LOCKING:
0cba632b 5117 * None.
1da177e4
LT
5118 */
5119
3373efd8 5120struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5121{
3373efd8 5122 struct ata_port *ap = dev->ap;
1da177e4
LT
5123 struct ata_queued_cmd *qc;
5124
5125 qc = ata_qc_new(ap);
5126 if (qc) {
1da177e4
LT
5127 qc->scsicmd = NULL;
5128 qc->ap = ap;
5129 qc->dev = dev;
1da177e4 5130
2c13b7ce 5131 ata_qc_reinit(qc);
1da177e4
LT
5132 }
5133
5134 return qc;
5135}
5136
1da177e4
LT
5137/**
5138 * ata_qc_free - free unused ata_queued_cmd
5139 * @qc: Command to complete
5140 *
5141 * Designed to free unused ata_queued_cmd object
5142 * in case something prevents using it.
5143 *
5144 * LOCKING:
cca3974e 5145 * spin_lock_irqsave(host lock)
1da177e4
LT
5146 */
5147void ata_qc_free(struct ata_queued_cmd *qc)
5148{
4ba946e9
TH
5149 struct ata_port *ap = qc->ap;
5150 unsigned int tag;
5151
a4631474 5152 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5153
4ba946e9
TH
5154 qc->flags = 0;
5155 tag = qc->tag;
5156 if (likely(ata_tag_valid(tag))) {
4ba946e9 5157 qc->tag = ATA_TAG_POISON;
6cec4a39 5158 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5159 }
1da177e4
LT
5160}
5161
76014427 5162void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5163{
dedaf2b0
TH
5164 struct ata_port *ap = qc->ap;
5165
a4631474
TH
5166 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5167 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5168
5169 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5170 ata_sg_clean(qc);
5171
7401abf2 5172 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
5173 if (qc->tf.protocol == ATA_PROT_NCQ)
5174 ap->sactive &= ~(1 << qc->tag);
5175 else
5176 ap->active_tag = ATA_TAG_POISON;
7401abf2 5177
3f3791d3
AL
5178 /* atapi: mark qc as inactive to prevent the interrupt handler
5179 * from completing the command twice later, before the error handler
5180 * is called. (when rc != 0 and atapi request sense is needed)
5181 */
5182 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5183 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5184
1da177e4 5185 /* call completion callback */
77853bf2 5186 qc->complete_fn(qc);
1da177e4
LT
5187}
5188
39599a53
TH
5189static void fill_result_tf(struct ata_queued_cmd *qc)
5190{
5191 struct ata_port *ap = qc->ap;
5192
39599a53 5193 qc->result_tf.flags = qc->tf.flags;
4742d54f 5194 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5195}
5196
f686bcb8
TH
5197/**
5198 * ata_qc_complete - Complete an active ATA command
5199 * @qc: Command to complete
5200 * @err_mask: ATA Status register contents
5201 *
5202 * Indicate to the mid and upper layers that an ATA
5203 * command has completed, with either an ok or not-ok status.
5204 *
5205 * LOCKING:
cca3974e 5206 * spin_lock_irqsave(host lock)
f686bcb8
TH
5207 */
5208void ata_qc_complete(struct ata_queued_cmd *qc)
5209{
5210 struct ata_port *ap = qc->ap;
5211
5212 /* XXX: New EH and old EH use different mechanisms to
5213 * synchronize EH with regular execution path.
5214 *
5215 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5216 * Normal execution path is responsible for not accessing a
5217 * failed qc. libata core enforces the rule by returning NULL
5218 * from ata_qc_from_tag() for failed qcs.
5219 *
5220 * Old EH depends on ata_qc_complete() nullifying completion
5221 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5222 * not synchronize with interrupt handler. Only PIO task is
5223 * taken care of.
5224 */
5225 if (ap->ops->error_handler) {
b51e9e5d 5226 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5227
5228 if (unlikely(qc->err_mask))
5229 qc->flags |= ATA_QCFLAG_FAILED;
5230
5231 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5232 if (!ata_tag_internal(qc->tag)) {
5233 /* always fill result TF for failed qc */
39599a53 5234 fill_result_tf(qc);
f686bcb8
TH
5235 ata_qc_schedule_eh(qc);
5236 return;
5237 }
5238 }
5239
5240 /* read result TF if requested */
5241 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5242 fill_result_tf(qc);
f686bcb8
TH
5243
5244 __ata_qc_complete(qc);
5245 } else {
5246 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5247 return;
5248
5249 /* read result TF if failed or requested */
5250 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5251 fill_result_tf(qc);
f686bcb8
TH
5252
5253 __ata_qc_complete(qc);
5254 }
5255}
5256
dedaf2b0
TH
5257/**
5258 * ata_qc_complete_multiple - Complete multiple qcs successfully
5259 * @ap: port in question
5260 * @qc_active: new qc_active mask
5261 * @finish_qc: LLDD callback invoked before completing a qc
5262 *
5263 * Complete in-flight commands. This functions is meant to be
5264 * called from low-level driver's interrupt routine to complete
5265 * requests normally. ap->qc_active and @qc_active is compared
5266 * and commands are completed accordingly.
5267 *
5268 * LOCKING:
cca3974e 5269 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5270 *
5271 * RETURNS:
5272 * Number of completed commands on success, -errno otherwise.
5273 */
5274int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5275 void (*finish_qc)(struct ata_queued_cmd *))
5276{
5277 int nr_done = 0;
5278 u32 done_mask;
5279 int i;
5280
5281 done_mask = ap->qc_active ^ qc_active;
5282
5283 if (unlikely(done_mask & qc_active)) {
5284 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5285 "(%08x->%08x)\n", ap->qc_active, qc_active);
5286 return -EINVAL;
5287 }
5288
5289 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5290 struct ata_queued_cmd *qc;
5291
5292 if (!(done_mask & (1 << i)))
5293 continue;
5294
5295 if ((qc = ata_qc_from_tag(ap, i))) {
5296 if (finish_qc)
5297 finish_qc(qc);
5298 ata_qc_complete(qc);
5299 nr_done++;
5300 }
5301 }
5302
5303 return nr_done;
5304}
5305
1da177e4
LT
5306static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5307{
5308 struct ata_port *ap = qc->ap;
5309
5310 switch (qc->tf.protocol) {
3dc1d881 5311 case ATA_PROT_NCQ:
1da177e4
LT
5312 case ATA_PROT_DMA:
5313 case ATA_PROT_ATAPI_DMA:
5314 return 1;
5315
5316 case ATA_PROT_ATAPI:
5317 case ATA_PROT_PIO:
1da177e4
LT
5318 if (ap->flags & ATA_FLAG_PIO_DMA)
5319 return 1;
5320
5321 /* fall through */
5322
5323 default:
5324 return 0;
5325 }
5326
5327 /* never reached */
5328}
5329
5330/**
5331 * ata_qc_issue - issue taskfile to device
5332 * @qc: command to issue to device
5333 *
5334 * Prepare an ATA command to submission to device.
5335 * This includes mapping the data into a DMA-able
5336 * area, filling in the S/G table, and finally
5337 * writing the taskfile to hardware, starting the command.
5338 *
5339 * LOCKING:
cca3974e 5340 * spin_lock_irqsave(host lock)
1da177e4 5341 */
8e0e694a 5342void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5343{
5344 struct ata_port *ap = qc->ap;
5345
dedaf2b0
TH
5346 /* Make sure only one non-NCQ command is outstanding. The
5347 * check is skipped for old EH because it reuses active qc to
5348 * request ATAPI sense.
5349 */
5350 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5351
5352 if (qc->tf.protocol == ATA_PROT_NCQ) {
5353 WARN_ON(ap->sactive & (1 << qc->tag));
5354 ap->sactive |= 1 << qc->tag;
5355 } else {
5356 WARN_ON(ap->sactive);
5357 ap->active_tag = qc->tag;
5358 }
5359
e4a70e76 5360 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5361 ap->qc_active |= 1 << qc->tag;
e4a70e76 5362
1da177e4
LT
5363 if (ata_should_dma_map(qc)) {
5364 if (qc->flags & ATA_QCFLAG_SG) {
5365 if (ata_sg_setup(qc))
8e436af9 5366 goto sg_err;
1da177e4
LT
5367 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5368 if (ata_sg_setup_one(qc))
8e436af9 5369 goto sg_err;
1da177e4
LT
5370 }
5371 } else {
5372 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5373 }
5374
5375 ap->ops->qc_prep(qc);
5376
8e0e694a
TH
5377 qc->err_mask |= ap->ops->qc_issue(qc);
5378 if (unlikely(qc->err_mask))
5379 goto err;
5380 return;
1da177e4 5381
8e436af9
TH
5382sg_err:
5383 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5384 qc->err_mask |= AC_ERR_SYSTEM;
5385err:
5386 ata_qc_complete(qc);
1da177e4
LT
5387}
5388
5389/**
5390 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5391 * @qc: command to issue to device
5392 *
5393 * Using various libata functions and hooks, this function
5394 * starts an ATA command. ATA commands are grouped into
5395 * classes called "protocols", and issuing each type of protocol
5396 * is slightly different.
5397 *
0baab86b
EF
5398 * May be used as the qc_issue() entry in ata_port_operations.
5399 *
1da177e4 5400 * LOCKING:
cca3974e 5401 * spin_lock_irqsave(host lock)
1da177e4
LT
5402 *
5403 * RETURNS:
9a3d9eb0 5404 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5405 */
5406
9a3d9eb0 5407unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5408{
5409 struct ata_port *ap = qc->ap;
5410
e50362ec
AL
5411 /* Use polling pio if the LLD doesn't handle
5412 * interrupt driven pio and atapi CDB interrupt.
5413 */
5414 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5415 switch (qc->tf.protocol) {
5416 case ATA_PROT_PIO:
e3472cbe 5417 case ATA_PROT_NODATA:
e50362ec
AL
5418 case ATA_PROT_ATAPI:
5419 case ATA_PROT_ATAPI_NODATA:
5420 qc->tf.flags |= ATA_TFLAG_POLLING;
5421 break;
5422 case ATA_PROT_ATAPI_DMA:
5423 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5424 /* see ata_dma_blacklisted() */
e50362ec
AL
5425 BUG();
5426 break;
5427 default:
5428 break;
5429 }
5430 }
5431
312f7da2 5432 /* select the device */
1da177e4
LT
5433 ata_dev_select(ap, qc->dev->devno, 1, 0);
5434
312f7da2 5435 /* start the command */
1da177e4
LT
5436 switch (qc->tf.protocol) {
5437 case ATA_PROT_NODATA:
312f7da2
AL
5438 if (qc->tf.flags & ATA_TFLAG_POLLING)
5439 ata_qc_set_polling(qc);
5440
e5338254 5441 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5442 ap->hsm_task_state = HSM_ST_LAST;
5443
5444 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5445 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5446
1da177e4
LT
5447 break;
5448
5449 case ATA_PROT_DMA:
587005de 5450 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5451
1da177e4
LT
5452 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5453 ap->ops->bmdma_setup(qc); /* set up bmdma */
5454 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5455 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5456 break;
5457
312f7da2
AL
5458 case ATA_PROT_PIO:
5459 if (qc->tf.flags & ATA_TFLAG_POLLING)
5460 ata_qc_set_polling(qc);
1da177e4 5461
e5338254 5462 ata_tf_to_host(ap, &qc->tf);
312f7da2 5463
54f00389
AL
5464 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5465 /* PIO data out protocol */
5466 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5467 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5468
5469 /* always send first data block using
e27486db 5470 * the ata_pio_task() codepath.
54f00389 5471 */
312f7da2 5472 } else {
54f00389
AL
5473 /* PIO data in protocol */
5474 ap->hsm_task_state = HSM_ST;
5475
5476 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5477 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5478
5479 /* if polling, ata_pio_task() handles the rest.
5480 * otherwise, interrupt handler takes over from here.
5481 */
312f7da2
AL
5482 }
5483
1da177e4
LT
5484 break;
5485
1da177e4 5486 case ATA_PROT_ATAPI:
1da177e4 5487 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5488 if (qc->tf.flags & ATA_TFLAG_POLLING)
5489 ata_qc_set_polling(qc);
5490
e5338254 5491 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5492
312f7da2
AL
5493 ap->hsm_task_state = HSM_ST_FIRST;
5494
5495 /* send cdb by polling if no cdb interrupt */
5496 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5497 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5498 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5499 break;
5500
5501 case ATA_PROT_ATAPI_DMA:
587005de 5502 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5503
1da177e4
LT
5504 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5505 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5506 ap->hsm_task_state = HSM_ST_FIRST;
5507
5508 /* send cdb by polling if no cdb interrupt */
5509 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5510 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5511 break;
5512
5513 default:
5514 WARN_ON(1);
9a3d9eb0 5515 return AC_ERR_SYSTEM;
1da177e4
LT
5516 }
5517
5518 return 0;
5519}
5520
1da177e4
LT
5521/**
5522 * ata_host_intr - Handle host interrupt for given (port, task)
5523 * @ap: Port on which interrupt arrived (possibly...)
5524 * @qc: Taskfile currently active in engine
5525 *
5526 * Handle host interrupt for given queued command. Currently,
5527 * only DMA interrupts are handled. All other commands are
5528 * handled via polling with interrupts disabled (nIEN bit).
5529 *
5530 * LOCKING:
cca3974e 5531 * spin_lock_irqsave(host lock)
1da177e4
LT
5532 *
5533 * RETURNS:
5534 * One if interrupt was handled, zero if not (shared irq).
5535 */
5536
5537inline unsigned int ata_host_intr (struct ata_port *ap,
5538 struct ata_queued_cmd *qc)
5539{
ea54763f 5540 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5541 u8 status, host_stat = 0;
1da177e4 5542
312f7da2 5543 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5544 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5545
312f7da2
AL
5546 /* Check whether we are expecting interrupt in this state */
5547 switch (ap->hsm_task_state) {
5548 case HSM_ST_FIRST:
6912ccd5
AL
5549 /* Some pre-ATAPI-4 devices assert INTRQ
5550 * at this state when ready to receive CDB.
5551 */
1da177e4 5552
312f7da2
AL
5553 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5554 * The flag was turned on only for atapi devices.
5555 * No need to check is_atapi_taskfile(&qc->tf) again.
5556 */
5557 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5558 goto idle_irq;
1da177e4 5559 break;
312f7da2
AL
5560 case HSM_ST_LAST:
5561 if (qc->tf.protocol == ATA_PROT_DMA ||
5562 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5563 /* check status of DMA engine */
5564 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5565 VPRINTK("ata%u: host_stat 0x%X\n",
5566 ap->print_id, host_stat);
312f7da2
AL
5567
5568 /* if it's not our irq... */
5569 if (!(host_stat & ATA_DMA_INTR))
5570 goto idle_irq;
5571
5572 /* before we do anything else, clear DMA-Start bit */
5573 ap->ops->bmdma_stop(qc);
a4f16610
AL
5574
5575 if (unlikely(host_stat & ATA_DMA_ERR)) {
5576 /* error when transfering data to/from memory */
5577 qc->err_mask |= AC_ERR_HOST_BUS;
5578 ap->hsm_task_state = HSM_ST_ERR;
5579 }
312f7da2
AL
5580 }
5581 break;
5582 case HSM_ST:
5583 break;
1da177e4
LT
5584 default:
5585 goto idle_irq;
5586 }
5587
312f7da2
AL
5588 /* check altstatus */
5589 status = ata_altstatus(ap);
5590 if (status & ATA_BUSY)
5591 goto idle_irq;
1da177e4 5592
312f7da2
AL
5593 /* check main status, clearing INTRQ */
5594 status = ata_chk_status(ap);
5595 if (unlikely(status & ATA_BUSY))
5596 goto idle_irq;
1da177e4 5597
312f7da2
AL
5598 /* ack bmdma irq events */
5599 ap->ops->irq_clear(ap);
1da177e4 5600
bb5cb290 5601 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5602
5603 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5604 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5605 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5606
1da177e4
LT
5607 return 1; /* irq handled */
5608
5609idle_irq:
5610 ap->stats.idle_irq++;
5611
5612#ifdef ATA_IRQ_TRAP
5613 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5614 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5615 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5616 return 1;
1da177e4
LT
5617 }
5618#endif
5619 return 0; /* irq not handled */
5620}
5621
5622/**
5623 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5624 * @irq: irq line (unused)
cca3974e 5625 * @dev_instance: pointer to our ata_host information structure
1da177e4 5626 *
0cba632b
JG
5627 * Default interrupt handler for PCI IDE devices. Calls
5628 * ata_host_intr() for each port that is not disabled.
5629 *
1da177e4 5630 * LOCKING:
cca3974e 5631 * Obtains host lock during operation.
1da177e4
LT
5632 *
5633 * RETURNS:
0cba632b 5634 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5635 */
5636
7d12e780 5637irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5638{
cca3974e 5639 struct ata_host *host = dev_instance;
1da177e4
LT
5640 unsigned int i;
5641 unsigned int handled = 0;
5642 unsigned long flags;
5643
5644 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5645 spin_lock_irqsave(&host->lock, flags);
1da177e4 5646
cca3974e 5647 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5648 struct ata_port *ap;
5649
cca3974e 5650 ap = host->ports[i];
c1389503 5651 if (ap &&
029f5468 5652 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5653 struct ata_queued_cmd *qc;
5654
5655 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5656 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5657 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5658 handled |= ata_host_intr(ap, qc);
5659 }
5660 }
5661
cca3974e 5662 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5663
5664 return IRQ_RETVAL(handled);
5665}
5666
34bf2170
TH
5667/**
5668 * sata_scr_valid - test whether SCRs are accessible
5669 * @ap: ATA port to test SCR accessibility for
5670 *
5671 * Test whether SCRs are accessible for @ap.
5672 *
5673 * LOCKING:
5674 * None.
5675 *
5676 * RETURNS:
5677 * 1 if SCRs are accessible, 0 otherwise.
5678 */
5679int sata_scr_valid(struct ata_port *ap)
5680{
5681 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5682}
5683
5684/**
5685 * sata_scr_read - read SCR register of the specified port
5686 * @ap: ATA port to read SCR for
5687 * @reg: SCR to read
5688 * @val: Place to store read value
5689 *
5690 * Read SCR register @reg of @ap into *@val. This function is
5691 * guaranteed to succeed if the cable type of the port is SATA
5692 * and the port implements ->scr_read.
5693 *
5694 * LOCKING:
5695 * None.
5696 *
5697 * RETURNS:
5698 * 0 on success, negative errno on failure.
5699 */
5700int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5701{
5702 if (sata_scr_valid(ap)) {
5703 *val = ap->ops->scr_read(ap, reg);
5704 return 0;
5705 }
5706 return -EOPNOTSUPP;
5707}
5708
5709/**
5710 * sata_scr_write - write SCR register of the specified port
5711 * @ap: ATA port to write SCR for
5712 * @reg: SCR to write
5713 * @val: value to write
5714 *
5715 * Write @val to SCR register @reg of @ap. This function is
5716 * guaranteed to succeed if the cable type of the port is SATA
5717 * and the port implements ->scr_read.
5718 *
5719 * LOCKING:
5720 * None.
5721 *
5722 * RETURNS:
5723 * 0 on success, negative errno on failure.
5724 */
5725int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5726{
5727 if (sata_scr_valid(ap)) {
5728 ap->ops->scr_write(ap, reg, val);
5729 return 0;
5730 }
5731 return -EOPNOTSUPP;
5732}
5733
5734/**
5735 * sata_scr_write_flush - write SCR register of the specified port and flush
5736 * @ap: ATA port to write SCR for
5737 * @reg: SCR to write
5738 * @val: value to write
5739 *
5740 * This function is identical to sata_scr_write() except that this
5741 * function performs flush after writing to the register.
5742 *
5743 * LOCKING:
5744 * None.
5745 *
5746 * RETURNS:
5747 * 0 on success, negative errno on failure.
5748 */
5749int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5750{
5751 if (sata_scr_valid(ap)) {
5752 ap->ops->scr_write(ap, reg, val);
5753 ap->ops->scr_read(ap, reg);
5754 return 0;
5755 }
5756 return -EOPNOTSUPP;
5757}
5758
5759/**
5760 * ata_port_online - test whether the given port is online
5761 * @ap: ATA port to test
5762 *
5763 * Test whether @ap is online. Note that this function returns 0
5764 * if online status of @ap cannot be obtained, so
5765 * ata_port_online(ap) != !ata_port_offline(ap).
5766 *
5767 * LOCKING:
5768 * None.
5769 *
5770 * RETURNS:
5771 * 1 if the port online status is available and online.
5772 */
5773int ata_port_online(struct ata_port *ap)
5774{
5775 u32 sstatus;
5776
5777 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5778 return 1;
5779 return 0;
5780}
5781
5782/**
5783 * ata_port_offline - test whether the given port is offline
5784 * @ap: ATA port to test
5785 *
5786 * Test whether @ap is offline. Note that this function returns
5787 * 0 if offline status of @ap cannot be obtained, so
5788 * ata_port_online(ap) != !ata_port_offline(ap).
5789 *
5790 * LOCKING:
5791 * None.
5792 *
5793 * RETURNS:
5794 * 1 if the port offline status is available and offline.
5795 */
5796int ata_port_offline(struct ata_port *ap)
5797{
5798 u32 sstatus;
5799
5800 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5801 return 1;
5802 return 0;
5803}
0baab86b 5804
77b08fb5 5805int ata_flush_cache(struct ata_device *dev)
9b847548 5806{
977e6b9f 5807 unsigned int err_mask;
9b847548
JA
5808 u8 cmd;
5809
5810 if (!ata_try_flush_cache(dev))
5811 return 0;
5812
6fc49adb 5813 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5814 cmd = ATA_CMD_FLUSH_EXT;
5815 else
5816 cmd = ATA_CMD_FLUSH;
5817
977e6b9f
TH
5818 err_mask = ata_do_simple_cmd(dev, cmd);
5819 if (err_mask) {
5820 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5821 return -EIO;
5822 }
5823
5824 return 0;
9b847548
JA
5825}
5826
6ffa01d8 5827#ifdef CONFIG_PM
cca3974e
JG
5828static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5829 unsigned int action, unsigned int ehi_flags,
5830 int wait)
500530f6
TH
5831{
5832 unsigned long flags;
5833 int i, rc;
5834
cca3974e
JG
5835 for (i = 0; i < host->n_ports; i++) {
5836 struct ata_port *ap = host->ports[i];
500530f6
TH
5837
5838 /* Previous resume operation might still be in
5839 * progress. Wait for PM_PENDING to clear.
5840 */
5841 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5842 ata_port_wait_eh(ap);
5843 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5844 }
5845
5846 /* request PM ops to EH */
5847 spin_lock_irqsave(ap->lock, flags);
5848
5849 ap->pm_mesg = mesg;
5850 if (wait) {
5851 rc = 0;
5852 ap->pm_result = &rc;
5853 }
5854
5855 ap->pflags |= ATA_PFLAG_PM_PENDING;
5856 ap->eh_info.action |= action;
5857 ap->eh_info.flags |= ehi_flags;
5858
5859 ata_port_schedule_eh(ap);
5860
5861 spin_unlock_irqrestore(ap->lock, flags);
5862
5863 /* wait and check result */
5864 if (wait) {
5865 ata_port_wait_eh(ap);
5866 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5867 if (rc)
5868 return rc;
5869 }
5870 }
5871
5872 return 0;
5873}
5874
5875/**
cca3974e
JG
5876 * ata_host_suspend - suspend host
5877 * @host: host to suspend
500530f6
TH
5878 * @mesg: PM message
5879 *
cca3974e 5880 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5881 * function requests EH to perform PM operations and waits for EH
5882 * to finish.
5883 *
5884 * LOCKING:
5885 * Kernel thread context (may sleep).
5886 *
5887 * RETURNS:
5888 * 0 on success, -errno on failure.
5889 */
cca3974e 5890int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5891{
9666f400 5892 int rc;
500530f6 5893
cca3974e 5894 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
5895 if (rc == 0)
5896 host->dev->power.power_state = mesg;
500530f6
TH
5897 return rc;
5898}
5899
5900/**
cca3974e
JG
5901 * ata_host_resume - resume host
5902 * @host: host to resume
500530f6 5903 *
cca3974e 5904 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5905 * function requests EH to perform PM operations and returns.
5906 * Note that all resume operations are performed parallely.
5907 *
5908 * LOCKING:
5909 * Kernel thread context (may sleep).
5910 */
cca3974e 5911void ata_host_resume(struct ata_host *host)
500530f6 5912{
cca3974e
JG
5913 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5914 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5915 host->dev->power.power_state = PMSG_ON;
500530f6 5916}
6ffa01d8 5917#endif
500530f6 5918
c893a3ae
RD
5919/**
5920 * ata_port_start - Set port up for dma.
5921 * @ap: Port to initialize
5922 *
5923 * Called just after data structures for each port are
5924 * initialized. Allocates space for PRD table.
5925 *
5926 * May be used as the port_start() entry in ata_port_operations.
5927 *
5928 * LOCKING:
5929 * Inherited from caller.
5930 */
f0d36efd 5931int ata_port_start(struct ata_port *ap)
1da177e4 5932{
2f1f610b 5933 struct device *dev = ap->dev;
6037d6bb 5934 int rc;
1da177e4 5935
f0d36efd
TH
5936 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5937 GFP_KERNEL);
1da177e4
LT
5938 if (!ap->prd)
5939 return -ENOMEM;
5940
6037d6bb 5941 rc = ata_pad_alloc(ap, dev);
f0d36efd 5942 if (rc)
6037d6bb 5943 return rc;
1da177e4 5944
f0d36efd
TH
5945 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5946 (unsigned long long)ap->prd_dma);
1da177e4
LT
5947 return 0;
5948}
5949
3ef3b43d
TH
5950/**
5951 * ata_dev_init - Initialize an ata_device structure
5952 * @dev: Device structure to initialize
5953 *
5954 * Initialize @dev in preparation for probing.
5955 *
5956 * LOCKING:
5957 * Inherited from caller.
5958 */
5959void ata_dev_init(struct ata_device *dev)
5960{
5961 struct ata_port *ap = dev->ap;
72fa4b74
TH
5962 unsigned long flags;
5963
5a04bf4b
TH
5964 /* SATA spd limit is bound to the first device */
5965 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5966
72fa4b74
TH
5967 /* High bits of dev->flags are used to record warm plug
5968 * requests which occur asynchronously. Synchronize using
cca3974e 5969 * host lock.
72fa4b74 5970 */
ba6a1308 5971 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5972 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5973 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5974
72fa4b74
TH
5975 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5976 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5977 dev->pio_mask = UINT_MAX;
5978 dev->mwdma_mask = UINT_MAX;
5979 dev->udma_mask = UINT_MAX;
5980}
5981
1da177e4 5982/**
f3187195
TH
5983 * ata_port_alloc - allocate and initialize basic ATA port resources
5984 * @host: ATA host this allocated port belongs to
1da177e4 5985 *
f3187195
TH
5986 * Allocate and initialize basic ATA port resources.
5987 *
5988 * RETURNS:
5989 * Allocate ATA port on success, NULL on failure.
0cba632b 5990 *
1da177e4 5991 * LOCKING:
f3187195 5992 * Inherited from calling layer (may sleep).
1da177e4 5993 */
f3187195 5994struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5995{
f3187195 5996 struct ata_port *ap;
1da177e4
LT
5997 unsigned int i;
5998
f3187195
TH
5999 DPRINTK("ENTER\n");
6000
6001 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6002 if (!ap)
6003 return NULL;
6004
f4d6d004 6005 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6006 ap->lock = &host->lock;
198e0fed 6007 ap->flags = ATA_FLAG_DISABLED;
f3187195 6008 ap->print_id = -1;
1da177e4 6009 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6010 ap->host = host;
f3187195
TH
6011 ap->dev = host->dev;
6012
5a04bf4b 6013 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
6014 ap->active_tag = ATA_TAG_POISON;
6015 ap->last_ctl = 0xFF;
bd5d825c
BP
6016
6017#if defined(ATA_VERBOSE_DEBUG)
6018 /* turn on all debugging levels */
6019 ap->msg_enable = 0x00FF;
6020#elif defined(ATA_DEBUG)
6021 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6022#else
0dd4b21f 6023 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6024#endif
1da177e4 6025
65f27f38
DH
6026 INIT_DELAYED_WORK(&ap->port_task, NULL);
6027 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6028 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6029 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6030 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 6031
838df628 6032 ap->cbl = ATA_CBL_NONE;
838df628 6033
acf356b1
TH
6034 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6035 struct ata_device *dev = &ap->device[i];
38d87234 6036 dev->ap = ap;
72fa4b74 6037 dev->devno = i;
3ef3b43d 6038 ata_dev_init(dev);
acf356b1 6039 }
1da177e4
LT
6040
6041#ifdef ATA_IRQ_TRAP
6042 ap->stats.unhandled_irq = 1;
6043 ap->stats.idle_irq = 1;
6044#endif
1da177e4 6045 return ap;
1da177e4
LT
6046}
6047
f0d36efd
TH
6048static void ata_host_release(struct device *gendev, void *res)
6049{
6050 struct ata_host *host = dev_get_drvdata(gendev);
6051 int i;
6052
6053 for (i = 0; i < host->n_ports; i++) {
6054 struct ata_port *ap = host->ports[i];
6055
ecef7253
TH
6056 if (!ap)
6057 continue;
6058
6059 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6060 ap->ops->port_stop(ap);
f0d36efd
TH
6061 }
6062
ecef7253 6063 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6064 host->ops->host_stop(host);
1aa56cca 6065
1aa506e4
TH
6066 for (i = 0; i < host->n_ports; i++) {
6067 struct ata_port *ap = host->ports[i];
6068
4911487a
TH
6069 if (!ap)
6070 continue;
6071
6072 if (ap->scsi_host)
1aa506e4
TH
6073 scsi_host_put(ap->scsi_host);
6074
4911487a 6075 kfree(ap);
1aa506e4
TH
6076 host->ports[i] = NULL;
6077 }
6078
1aa56cca 6079 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6080}
6081
f3187195
TH
6082/**
6083 * ata_host_alloc - allocate and init basic ATA host resources
6084 * @dev: generic device this host is associated with
6085 * @max_ports: maximum number of ATA ports associated with this host
6086 *
6087 * Allocate and initialize basic ATA host resources. LLD calls
6088 * this function to allocate a host, initializes it fully and
6089 * attaches it using ata_host_register().
6090 *
6091 * @max_ports ports are allocated and host->n_ports is
6092 * initialized to @max_ports. The caller is allowed to decrease
6093 * host->n_ports before calling ata_host_register(). The unused
6094 * ports will be automatically freed on registration.
6095 *
6096 * RETURNS:
6097 * Allocate ATA host on success, NULL on failure.
6098 *
6099 * LOCKING:
6100 * Inherited from calling layer (may sleep).
6101 */
6102struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6103{
6104 struct ata_host *host;
6105 size_t sz;
6106 int i;
6107
6108 DPRINTK("ENTER\n");
6109
6110 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6111 return NULL;
6112
6113 /* alloc a container for our list of ATA ports (buses) */
6114 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6115 /* alloc a container for our list of ATA ports (buses) */
6116 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6117 if (!host)
6118 goto err_out;
6119
6120 devres_add(dev, host);
6121 dev_set_drvdata(dev, host);
6122
6123 spin_lock_init(&host->lock);
6124 host->dev = dev;
6125 host->n_ports = max_ports;
6126
6127 /* allocate ports bound to this host */
6128 for (i = 0; i < max_ports; i++) {
6129 struct ata_port *ap;
6130
6131 ap = ata_port_alloc(host);
6132 if (!ap)
6133 goto err_out;
6134
6135 ap->port_no = i;
6136 host->ports[i] = ap;
6137 }
6138
6139 devres_remove_group(dev, NULL);
6140 return host;
6141
6142 err_out:
6143 devres_release_group(dev, NULL);
6144 return NULL;
6145}
6146
f5cda257
TH
6147/**
6148 * ata_host_alloc_pinfo - alloc host and init with port_info array
6149 * @dev: generic device this host is associated with
6150 * @ppi: array of ATA port_info to initialize host with
6151 * @n_ports: number of ATA ports attached to this host
6152 *
6153 * Allocate ATA host and initialize with info from @ppi. If NULL
6154 * terminated, @ppi may contain fewer entries than @n_ports. The
6155 * last entry will be used for the remaining ports.
6156 *
6157 * RETURNS:
6158 * Allocate ATA host on success, NULL on failure.
6159 *
6160 * LOCKING:
6161 * Inherited from calling layer (may sleep).
6162 */
6163struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6164 const struct ata_port_info * const * ppi,
6165 int n_ports)
6166{
6167 const struct ata_port_info *pi;
6168 struct ata_host *host;
6169 int i, j;
6170
6171 host = ata_host_alloc(dev, n_ports);
6172 if (!host)
6173 return NULL;
6174
6175 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6176 struct ata_port *ap = host->ports[i];
6177
6178 if (ppi[j])
6179 pi = ppi[j++];
6180
6181 ap->pio_mask = pi->pio_mask;
6182 ap->mwdma_mask = pi->mwdma_mask;
6183 ap->udma_mask = pi->udma_mask;
6184 ap->flags |= pi->flags;
6185 ap->ops = pi->port_ops;
6186
6187 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6188 host->ops = pi->port_ops;
6189 if (!host->private_data && pi->private_data)
6190 host->private_data = pi->private_data;
6191 }
6192
6193 return host;
6194}
6195
ecef7253
TH
6196/**
6197 * ata_host_start - start and freeze ports of an ATA host
6198 * @host: ATA host to start ports for
6199 *
6200 * Start and then freeze ports of @host. Started status is
6201 * recorded in host->flags, so this function can be called
6202 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6203 * once. If host->ops isn't initialized yet, its set to the
6204 * first non-dummy port ops.
ecef7253
TH
6205 *
6206 * LOCKING:
6207 * Inherited from calling layer (may sleep).
6208 *
6209 * RETURNS:
6210 * 0 if all ports are started successfully, -errno otherwise.
6211 */
6212int ata_host_start(struct ata_host *host)
6213{
6214 int i, rc;
6215
6216 if (host->flags & ATA_HOST_STARTED)
6217 return 0;
6218
6219 for (i = 0; i < host->n_ports; i++) {
6220 struct ata_port *ap = host->ports[i];
6221
f3187195
TH
6222 if (!host->ops && !ata_port_is_dummy(ap))
6223 host->ops = ap->ops;
6224
ecef7253
TH
6225 if (ap->ops->port_start) {
6226 rc = ap->ops->port_start(ap);
6227 if (rc) {
6228 ata_port_printk(ap, KERN_ERR, "failed to "
6229 "start port (errno=%d)\n", rc);
6230 goto err_out;
6231 }
6232 }
6233
6234 ata_eh_freeze_port(ap);
6235 }
6236
6237 host->flags |= ATA_HOST_STARTED;
6238 return 0;
6239
6240 err_out:
6241 while (--i >= 0) {
6242 struct ata_port *ap = host->ports[i];
6243
6244 if (ap->ops->port_stop)
6245 ap->ops->port_stop(ap);
6246 }
6247 return rc;
6248}
6249
b03732f0 6250/**
cca3974e
JG
6251 * ata_sas_host_init - Initialize a host struct
6252 * @host: host to initialize
6253 * @dev: device host is attached to
6254 * @flags: host flags
6255 * @ops: port_ops
b03732f0
BK
6256 *
6257 * LOCKING:
6258 * PCI/etc. bus probe sem.
6259 *
6260 */
f3187195 6261/* KILLME - the only user left is ipr */
cca3974e
JG
6262void ata_host_init(struct ata_host *host, struct device *dev,
6263 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6264{
cca3974e
JG
6265 spin_lock_init(&host->lock);
6266 host->dev = dev;
6267 host->flags = flags;
6268 host->ops = ops;
b03732f0
BK
6269}
6270
f3187195
TH
6271/**
6272 * ata_host_register - register initialized ATA host
6273 * @host: ATA host to register
6274 * @sht: template for SCSI host
6275 *
6276 * Register initialized ATA host. @host is allocated using
6277 * ata_host_alloc() and fully initialized by LLD. This function
6278 * starts ports, registers @host with ATA and SCSI layers and
6279 * probe registered devices.
6280 *
6281 * LOCKING:
6282 * Inherited from calling layer (may sleep).
6283 *
6284 * RETURNS:
6285 * 0 on success, -errno otherwise.
6286 */
6287int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6288{
6289 int i, rc;
6290
6291 /* host must have been started */
6292 if (!(host->flags & ATA_HOST_STARTED)) {
6293 dev_printk(KERN_ERR, host->dev,
6294 "BUG: trying to register unstarted host\n");
6295 WARN_ON(1);
6296 return -EINVAL;
6297 }
6298
6299 /* Blow away unused ports. This happens when LLD can't
6300 * determine the exact number of ports to allocate at
6301 * allocation time.
6302 */
6303 for (i = host->n_ports; host->ports[i]; i++)
6304 kfree(host->ports[i]);
6305
6306 /* give ports names and add SCSI hosts */
6307 for (i = 0; i < host->n_ports; i++)
6308 host->ports[i]->print_id = ata_print_id++;
6309
6310 rc = ata_scsi_add_hosts(host, sht);
6311 if (rc)
6312 return rc;
6313
6314 /* set cable, sata_spd_limit and report */
6315 for (i = 0; i < host->n_ports; i++) {
6316 struct ata_port *ap = host->ports[i];
6317 int irq_line;
6318 u32 scontrol;
6319 unsigned long xfer_mask;
6320
6321 /* set SATA cable type if still unset */
6322 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6323 ap->cbl = ATA_CBL_SATA;
6324
6325 /* init sata_spd_limit to the current value */
6326 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6327 int spd = (scontrol >> 4) & 0xf;
afe3cc51
TH
6328 if (spd)
6329 ap->hw_sata_spd_limit &= (1 << spd) - 1;
f3187195
TH
6330 }
6331 ap->sata_spd_limit = ap->hw_sata_spd_limit;
6332
6333 /* report the secondary IRQ for second channel legacy */
6334 irq_line = host->irq;
6335 if (i == 1 && host->irq2)
6336 irq_line = host->irq2;
6337
6338 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6339 ap->udma_mask);
6340
6341 /* print per-port info to dmesg */
6342 if (!ata_port_is_dummy(ap))
6343 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6344 "ctl 0x%p bmdma 0x%p irq %d\n",
6345 ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
6346 ata_mode_string(xfer_mask),
6347 ap->ioaddr.cmd_addr,
6348 ap->ioaddr.ctl_addr,
6349 ap->ioaddr.bmdma_addr,
6350 irq_line);
6351 else
6352 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6353 }
6354
6355 /* perform each probe synchronously */
6356 DPRINTK("probe begin\n");
6357 for (i = 0; i < host->n_ports; i++) {
6358 struct ata_port *ap = host->ports[i];
6359 int rc;
6360
6361 /* probe */
6362 if (ap->ops->error_handler) {
6363 struct ata_eh_info *ehi = &ap->eh_info;
6364 unsigned long flags;
6365
6366 ata_port_probe(ap);
6367
6368 /* kick EH for boot probing */
6369 spin_lock_irqsave(ap->lock, flags);
6370
6371 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6372 ehi->action |= ATA_EH_SOFTRESET;
6373 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6374
f4d6d004 6375 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6376 ap->pflags |= ATA_PFLAG_LOADING;
6377 ata_port_schedule_eh(ap);
6378
6379 spin_unlock_irqrestore(ap->lock, flags);
6380
6381 /* wait for EH to finish */
6382 ata_port_wait_eh(ap);
6383 } else {
6384 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6385 rc = ata_bus_probe(ap);
6386 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6387
6388 if (rc) {
6389 /* FIXME: do something useful here?
6390 * Current libata behavior will
6391 * tear down everything when
6392 * the module is removed
6393 * or the h/w is unplugged.
6394 */
6395 }
6396 }
6397 }
6398
6399 /* probes are done, now scan each port's disk(s) */
6400 DPRINTK("host probe begin\n");
6401 for (i = 0; i < host->n_ports; i++) {
6402 struct ata_port *ap = host->ports[i];
6403
6404 ata_scsi_scan_host(ap);
6405 }
6406
6407 return 0;
6408}
6409
f5cda257
TH
6410/**
6411 * ata_host_activate - start host, request IRQ and register it
6412 * @host: target ATA host
6413 * @irq: IRQ to request
6414 * @irq_handler: irq_handler used when requesting IRQ
6415 * @irq_flags: irq_flags used when requesting IRQ
6416 * @sht: scsi_host_template to use when registering the host
6417 *
6418 * After allocating an ATA host and initializing it, most libata
6419 * LLDs perform three steps to activate the host - start host,
6420 * request IRQ and register it. This helper takes necessasry
6421 * arguments and performs the three steps in one go.
6422 *
6423 * LOCKING:
6424 * Inherited from calling layer (may sleep).
6425 *
6426 * RETURNS:
6427 * 0 on success, -errno otherwise.
6428 */
6429int ata_host_activate(struct ata_host *host, int irq,
6430 irq_handler_t irq_handler, unsigned long irq_flags,
6431 struct scsi_host_template *sht)
6432{
6433 int rc;
6434
6435 rc = ata_host_start(host);
6436 if (rc)
6437 return rc;
6438
6439 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6440 dev_driver_string(host->dev), host);
6441 if (rc)
6442 return rc;
6443
6444 rc = ata_host_register(host, sht);
6445 /* if failed, just free the IRQ and leave ports alone */
6446 if (rc)
6447 devm_free_irq(host->dev, irq, host);
6448
22888423
OJ
6449 /* Used to print device info at probe */
6450 host->irq = irq;
6451
f5cda257
TH
6452 return rc;
6453}
6454
720ba126
TH
6455/**
6456 * ata_port_detach - Detach ATA port in prepration of device removal
6457 * @ap: ATA port to be detached
6458 *
6459 * Detach all ATA devices and the associated SCSI devices of @ap;
6460 * then, remove the associated SCSI host. @ap is guaranteed to
6461 * be quiescent on return from this function.
6462 *
6463 * LOCKING:
6464 * Kernel thread context (may sleep).
6465 */
6466void ata_port_detach(struct ata_port *ap)
6467{
6468 unsigned long flags;
6469 int i;
6470
6471 if (!ap->ops->error_handler)
c3cf30a9 6472 goto skip_eh;
720ba126
TH
6473
6474 /* tell EH we're leaving & flush EH */
ba6a1308 6475 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6476 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6477 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6478
6479 ata_port_wait_eh(ap);
6480
6481 /* EH is now guaranteed to see UNLOADING, so no new device
6482 * will be attached. Disable all existing devices.
6483 */
ba6a1308 6484 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
6485
6486 for (i = 0; i < ATA_MAX_DEVICES; i++)
6487 ata_dev_disable(&ap->device[i]);
6488
ba6a1308 6489 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6490
6491 /* Final freeze & EH. All in-flight commands are aborted. EH
6492 * will be skipped and retrials will be terminated with bad
6493 * target.
6494 */
ba6a1308 6495 spin_lock_irqsave(ap->lock, flags);
720ba126 6496 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6497 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6498
6499 ata_port_wait_eh(ap);
6500
6501 /* Flush hotplug task. The sequence is similar to
6502 * ata_port_flush_task().
6503 */
28e53bdd 6504 cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
720ba126 6505 cancel_delayed_work(&ap->hotplug_task);
28e53bdd 6506 cancel_work_sync(&ap->hotplug_task.work);
720ba126 6507
c3cf30a9 6508 skip_eh:
720ba126 6509 /* remove the associated SCSI host */
cca3974e 6510 scsi_remove_host(ap->scsi_host);
720ba126
TH
6511}
6512
0529c159
TH
6513/**
6514 * ata_host_detach - Detach all ports of an ATA host
6515 * @host: Host to detach
6516 *
6517 * Detach all ports of @host.
6518 *
6519 * LOCKING:
6520 * Kernel thread context (may sleep).
6521 */
6522void ata_host_detach(struct ata_host *host)
6523{
6524 int i;
6525
6526 for (i = 0; i < host->n_ports; i++)
6527 ata_port_detach(host->ports[i]);
6528}
6529
1da177e4
LT
6530/**
6531 * ata_std_ports - initialize ioaddr with standard port offsets.
6532 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6533 *
6534 * Utility function which initializes data_addr, error_addr,
6535 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6536 * device_addr, status_addr, and command_addr to standard offsets
6537 * relative to cmd_addr.
6538 *
6539 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6540 */
0baab86b 6541
1da177e4
LT
6542void ata_std_ports(struct ata_ioports *ioaddr)
6543{
6544 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6545 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6546 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6547 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6548 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6549 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6550 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6551 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6552 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6553 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6554}
6555
0baab86b 6556
374b1873
JG
6557#ifdef CONFIG_PCI
6558
1da177e4
LT
6559/**
6560 * ata_pci_remove_one - PCI layer callback for device removal
6561 * @pdev: PCI device that was removed
6562 *
b878ca5d
TH
6563 * PCI layer indicates to libata via this hook that hot-unplug or
6564 * module unload event has occurred. Detach all ports. Resource
6565 * release is handled via devres.
1da177e4
LT
6566 *
6567 * LOCKING:
6568 * Inherited from PCI layer (may sleep).
6569 */
f0d36efd 6570void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6571{
6572 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6573 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6574
b878ca5d 6575 ata_host_detach(host);
1da177e4
LT
6576}
6577
6578/* move to PCI subsystem */
057ace5e 6579int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6580{
6581 unsigned long tmp = 0;
6582
6583 switch (bits->width) {
6584 case 1: {
6585 u8 tmp8 = 0;
6586 pci_read_config_byte(pdev, bits->reg, &tmp8);
6587 tmp = tmp8;
6588 break;
6589 }
6590 case 2: {
6591 u16 tmp16 = 0;
6592 pci_read_config_word(pdev, bits->reg, &tmp16);
6593 tmp = tmp16;
6594 break;
6595 }
6596 case 4: {
6597 u32 tmp32 = 0;
6598 pci_read_config_dword(pdev, bits->reg, &tmp32);
6599 tmp = tmp32;
6600 break;
6601 }
6602
6603 default:
6604 return -EINVAL;
6605 }
6606
6607 tmp &= bits->mask;
6608
6609 return (tmp == bits->val) ? 1 : 0;
6610}
9b847548 6611
6ffa01d8 6612#ifdef CONFIG_PM
3c5100c1 6613void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6614{
6615 pci_save_state(pdev);
4c90d971 6616 pci_disable_device(pdev);
500530f6 6617
4c90d971 6618 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6619 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6620}
6621
553c4aa6 6622int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6623{
553c4aa6
TH
6624 int rc;
6625
9b847548
JA
6626 pci_set_power_state(pdev, PCI_D0);
6627 pci_restore_state(pdev);
553c4aa6 6628
b878ca5d 6629 rc = pcim_enable_device(pdev);
553c4aa6
TH
6630 if (rc) {
6631 dev_printk(KERN_ERR, &pdev->dev,
6632 "failed to enable device after resume (%d)\n", rc);
6633 return rc;
6634 }
6635
9b847548 6636 pci_set_master(pdev);
553c4aa6 6637 return 0;
500530f6
TH
6638}
6639
3c5100c1 6640int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6641{
cca3974e 6642 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6643 int rc = 0;
6644
cca3974e 6645 rc = ata_host_suspend(host, mesg);
500530f6
TH
6646 if (rc)
6647 return rc;
6648
3c5100c1 6649 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6650
6651 return 0;
6652}
6653
6654int ata_pci_device_resume(struct pci_dev *pdev)
6655{
cca3974e 6656 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6657 int rc;
500530f6 6658
553c4aa6
TH
6659 rc = ata_pci_device_do_resume(pdev);
6660 if (rc == 0)
6661 ata_host_resume(host);
6662 return rc;
9b847548 6663}
6ffa01d8
TH
6664#endif /* CONFIG_PM */
6665
1da177e4
LT
6666#endif /* CONFIG_PCI */
6667
6668
1da177e4
LT
6669static int __init ata_init(void)
6670{
a8601e5f 6671 ata_probe_timeout *= HZ;
1da177e4
LT
6672 ata_wq = create_workqueue("ata");
6673 if (!ata_wq)
6674 return -ENOMEM;
6675
453b07ac
TH
6676 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6677 if (!ata_aux_wq) {
6678 destroy_workqueue(ata_wq);
6679 return -ENOMEM;
6680 }
6681
1da177e4
LT
6682 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6683 return 0;
6684}
6685
6686static void __exit ata_exit(void)
6687{
6688 destroy_workqueue(ata_wq);
453b07ac 6689 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6690}
6691
a4625085 6692subsys_initcall(ata_init);
1da177e4
LT
6693module_exit(ata_exit);
6694
67846b30 6695static unsigned long ratelimit_time;
34af946a 6696static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6697
6698int ata_ratelimit(void)
6699{
6700 int rc;
6701 unsigned long flags;
6702
6703 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6704
6705 if (time_after(jiffies, ratelimit_time)) {
6706 rc = 1;
6707 ratelimit_time = jiffies + (HZ/5);
6708 } else
6709 rc = 0;
6710
6711 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6712
6713 return rc;
6714}
6715
c22daff4
TH
6716/**
6717 * ata_wait_register - wait until register value changes
6718 * @reg: IO-mapped register
6719 * @mask: Mask to apply to read register value
6720 * @val: Wait condition
6721 * @interval_msec: polling interval in milliseconds
6722 * @timeout_msec: timeout in milliseconds
6723 *
6724 * Waiting for some bits of register to change is a common
6725 * operation for ATA controllers. This function reads 32bit LE
6726 * IO-mapped register @reg and tests for the following condition.
6727 *
6728 * (*@reg & mask) != val
6729 *
6730 * If the condition is met, it returns; otherwise, the process is
6731 * repeated after @interval_msec until timeout.
6732 *
6733 * LOCKING:
6734 * Kernel thread context (may sleep)
6735 *
6736 * RETURNS:
6737 * The final register value.
6738 */
6739u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6740 unsigned long interval_msec,
6741 unsigned long timeout_msec)
6742{
6743 unsigned long timeout;
6744 u32 tmp;
6745
6746 tmp = ioread32(reg);
6747
6748 /* Calculate timeout _after_ the first read to make sure
6749 * preceding writes reach the controller before starting to
6750 * eat away the timeout.
6751 */
6752 timeout = jiffies + (timeout_msec * HZ) / 1000;
6753
6754 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6755 msleep(interval_msec);
6756 tmp = ioread32(reg);
6757 }
6758
6759 return tmp;
6760}
6761
dd5b06c4
TH
6762/*
6763 * Dummy port_ops
6764 */
6765static void ata_dummy_noret(struct ata_port *ap) { }
6766static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6767static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6768
6769static u8 ata_dummy_check_status(struct ata_port *ap)
6770{
6771 return ATA_DRDY;
6772}
6773
6774static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6775{
6776 return AC_ERR_SYSTEM;
6777}
6778
6779const struct ata_port_operations ata_dummy_port_ops = {
6780 .port_disable = ata_port_disable,
6781 .check_status = ata_dummy_check_status,
6782 .check_altstatus = ata_dummy_check_status,
6783 .dev_select = ata_noop_dev_select,
6784 .qc_prep = ata_noop_qc_prep,
6785 .qc_issue = ata_dummy_qc_issue,
6786 .freeze = ata_dummy_noret,
6787 .thaw = ata_dummy_noret,
6788 .error_handler = ata_dummy_noret,
6789 .post_internal_cmd = ata_dummy_qc_noret,
6790 .irq_clear = ata_dummy_noret,
6791 .port_start = ata_dummy_ret0,
6792 .port_stop = ata_dummy_noret,
6793};
6794
21b0ad4f
TH
6795const struct ata_port_info ata_dummy_port_info = {
6796 .port_ops = &ata_dummy_port_ops,
6797};
6798
1da177e4
LT
6799/*
6800 * libata is essentially a library of internal helper functions for
6801 * low-level ATA host controller drivers. As such, the API/ABI is
6802 * likely to change as new drivers are added and updated.
6803 * Do not depend on ABI/API stability.
6804 */
6805
e9c83914
TH
6806EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6807EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6808EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6809EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 6810EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
6811EXPORT_SYMBOL_GPL(ata_std_bios_param);
6812EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6813EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6814EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 6815EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 6816EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6817EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 6818EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 6819EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6820EXPORT_SYMBOL_GPL(ata_sg_init);
6821EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6822EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6823EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6824EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6825EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6826EXPORT_SYMBOL_GPL(ata_tf_load);
6827EXPORT_SYMBOL_GPL(ata_tf_read);
6828EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6829EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 6830EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
6831EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6832EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6833EXPORT_SYMBOL_GPL(ata_check_status);
6834EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6835EXPORT_SYMBOL_GPL(ata_exec_command);
6836EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 6837EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 6838EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 6839EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
6840EXPORT_SYMBOL_GPL(ata_data_xfer);
6841EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6842EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6843EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6844EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6845EXPORT_SYMBOL_GPL(ata_bmdma_start);
6846EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6847EXPORT_SYMBOL_GPL(ata_bmdma_status);
6848EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6849EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6850EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6851EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6852EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6853EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6854EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6855EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6856EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6857EXPORT_SYMBOL_GPL(sata_phy_debounce);
6858EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6859EXPORT_SYMBOL_GPL(sata_phy_reset);
6860EXPORT_SYMBOL_GPL(__sata_phy_reset);
6861EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6862EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6863EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6864EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6865EXPORT_SYMBOL_GPL(sata_std_hardreset);
6866EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6867EXPORT_SYMBOL_GPL(ata_dev_classify);
6868EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6869EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6870EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6871EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6872EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 6873EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 6874EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6875EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6876EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6877EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6878EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6879EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6880EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6881EXPORT_SYMBOL_GPL(sata_scr_valid);
6882EXPORT_SYMBOL_GPL(sata_scr_read);
6883EXPORT_SYMBOL_GPL(sata_scr_write);
6884EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6885EXPORT_SYMBOL_GPL(ata_port_online);
6886EXPORT_SYMBOL_GPL(ata_port_offline);
6ffa01d8 6887#ifdef CONFIG_PM
cca3974e
JG
6888EXPORT_SYMBOL_GPL(ata_host_suspend);
6889EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6890#endif /* CONFIG_PM */
6a62a04d
TH
6891EXPORT_SYMBOL_GPL(ata_id_string);
6892EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 6893EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6919a0a6 6894EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6895EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6896
1bc4ccff 6897EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6898EXPORT_SYMBOL_GPL(ata_timing_compute);
6899EXPORT_SYMBOL_GPL(ata_timing_merge);
6900
1da177e4
LT
6901#ifdef CONFIG_PCI
6902EXPORT_SYMBOL_GPL(pci_test_config_bits);
d491b27b 6903EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
1626aeb8 6904EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
21b0ad4f 6905EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
1da177e4
LT
6906EXPORT_SYMBOL_GPL(ata_pci_init_one);
6907EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6908#ifdef CONFIG_PM
500530f6
TH
6909EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6910EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6911EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6912EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6913#endif /* CONFIG_PM */
67951ade
AC
6914EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6915EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6916#endif /* CONFIG_PCI */
9b847548 6917
ece1d636 6918EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6919EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6920EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6921EXPORT_SYMBOL_GPL(ata_port_freeze);
6922EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6923EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6924EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6925EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6926EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6927EXPORT_SYMBOL_GPL(ata_irq_on);
6928EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6929EXPORT_SYMBOL_GPL(ata_irq_ack);
6930EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6931EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
6932
6933EXPORT_SYMBOL_GPL(ata_cable_40wire);
6934EXPORT_SYMBOL_GPL(ata_cable_80wire);
6935EXPORT_SYMBOL_GPL(ata_cable_unknown);
6936EXPORT_SYMBOL_GPL(ata_cable_sata);