libata: kill non-sense warning message
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
8bc3fc47 62#define DRV_VERSION "2.21" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
1e999736
AC
92static int ata_ignore_hpa = 0;
93module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
94MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
95
a8601e5f
AM
96static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
97module_param(ata_probe_timeout, int, 0444);
98MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
99
d7d0dad6
JG
100int libata_noacpi = 1;
101module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
102MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
103
1da177e4
LT
104MODULE_AUTHOR("Jeff Garzik");
105MODULE_DESCRIPTION("Library module for ATA devices");
106MODULE_LICENSE("GPL");
107MODULE_VERSION(DRV_VERSION);
108
0baab86b 109
1da177e4
LT
110/**
111 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
112 * @tf: Taskfile to convert
113 * @fis: Buffer into which data will output
114 * @pmp: Port multiplier port
115 *
116 * Converts a standard ATA taskfile to a Serial ATA
117 * FIS structure (Register - Host to Device).
118 *
119 * LOCKING:
120 * Inherited from caller.
121 */
122
057ace5e 123void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
124{
125 fis[0] = 0x27; /* Register - Host to Device FIS */
126 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
127 bit 7 indicates Command FIS */
128 fis[2] = tf->command;
129 fis[3] = tf->feature;
130
131 fis[4] = tf->lbal;
132 fis[5] = tf->lbam;
133 fis[6] = tf->lbah;
134 fis[7] = tf->device;
135
136 fis[8] = tf->hob_lbal;
137 fis[9] = tf->hob_lbam;
138 fis[10] = tf->hob_lbah;
139 fis[11] = tf->hob_feature;
140
141 fis[12] = tf->nsect;
142 fis[13] = tf->hob_nsect;
143 fis[14] = 0;
144 fis[15] = tf->ctl;
145
146 fis[16] = 0;
147 fis[17] = 0;
148 fis[18] = 0;
149 fis[19] = 0;
150}
151
152/**
153 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
154 * @fis: Buffer from which data will be input
155 * @tf: Taskfile to output
156 *
e12a1be6 157 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
158 *
159 * LOCKING:
160 * Inherited from caller.
161 */
162
057ace5e 163void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
164{
165 tf->command = fis[2]; /* status */
166 tf->feature = fis[3]; /* error */
167
168 tf->lbal = fis[4];
169 tf->lbam = fis[5];
170 tf->lbah = fis[6];
171 tf->device = fis[7];
172
173 tf->hob_lbal = fis[8];
174 tf->hob_lbam = fis[9];
175 tf->hob_lbah = fis[10];
176
177 tf->nsect = fis[12];
178 tf->hob_nsect = fis[13];
179}
180
8cbd6df1
AL
181static const u8 ata_rw_cmds[] = {
182 /* pio multi */
183 ATA_CMD_READ_MULTI,
184 ATA_CMD_WRITE_MULTI,
185 ATA_CMD_READ_MULTI_EXT,
186 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
187 0,
188 0,
189 0,
190 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
191 /* pio */
192 ATA_CMD_PIO_READ,
193 ATA_CMD_PIO_WRITE,
194 ATA_CMD_PIO_READ_EXT,
195 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
196 0,
197 0,
198 0,
199 0,
8cbd6df1
AL
200 /* dma */
201 ATA_CMD_READ,
202 ATA_CMD_WRITE,
203 ATA_CMD_READ_EXT,
9a3dccc4
TH
204 ATA_CMD_WRITE_EXT,
205 0,
206 0,
207 0,
208 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 209};
1da177e4
LT
210
211/**
8cbd6df1 212 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
213 * @tf: command to examine and configure
214 * @dev: device tf belongs to
1da177e4 215 *
2e9edbf8 216 * Examine the device configuration and tf->flags to calculate
8cbd6df1 217 * the proper read/write commands and protocol to use.
1da177e4
LT
218 *
219 * LOCKING:
220 * caller.
221 */
bd056d7e 222static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 223{
9a3dccc4 224 u8 cmd;
1da177e4 225
9a3dccc4 226 int index, fua, lba48, write;
2e9edbf8 227
9a3dccc4 228 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
229 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
230 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 231
8cbd6df1
AL
232 if (dev->flags & ATA_DFLAG_PIO) {
233 tf->protocol = ATA_PROT_PIO;
9a3dccc4 234 index = dev->multi_count ? 0 : 8;
bd056d7e 235 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
236 /* Unable to use DMA due to host limitation */
237 tf->protocol = ATA_PROT_PIO;
0565c26d 238 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
239 } else {
240 tf->protocol = ATA_PROT_DMA;
9a3dccc4 241 index = 16;
8cbd6df1 242 }
1da177e4 243
9a3dccc4
TH
244 cmd = ata_rw_cmds[index + fua + lba48 + write];
245 if (cmd) {
246 tf->command = cmd;
247 return 0;
248 }
249 return -1;
1da177e4
LT
250}
251
35b649fe
TH
252/**
253 * ata_tf_read_block - Read block address from ATA taskfile
254 * @tf: ATA taskfile of interest
255 * @dev: ATA device @tf belongs to
256 *
257 * LOCKING:
258 * None.
259 *
260 * Read block address from @tf. This function can handle all
261 * three address formats - LBA, LBA48 and CHS. tf->protocol and
262 * flags select the address format to use.
263 *
264 * RETURNS:
265 * Block address read from @tf.
266 */
267u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
268{
269 u64 block = 0;
270
271 if (tf->flags & ATA_TFLAG_LBA) {
272 if (tf->flags & ATA_TFLAG_LBA48) {
273 block |= (u64)tf->hob_lbah << 40;
274 block |= (u64)tf->hob_lbam << 32;
275 block |= tf->hob_lbal << 24;
276 } else
277 block |= (tf->device & 0xf) << 24;
278
279 block |= tf->lbah << 16;
280 block |= tf->lbam << 8;
281 block |= tf->lbal;
282 } else {
283 u32 cyl, head, sect;
284
285 cyl = tf->lbam | (tf->lbah << 8);
286 head = tf->device & 0xf;
287 sect = tf->lbal;
288
289 block = (cyl * dev->heads + head) * dev->sectors + sect;
290 }
291
292 return block;
293}
294
bd056d7e
TH
295/**
296 * ata_build_rw_tf - Build ATA taskfile for given read/write request
297 * @tf: Target ATA taskfile
298 * @dev: ATA device @tf belongs to
299 * @block: Block address
300 * @n_block: Number of blocks
301 * @tf_flags: RW/FUA etc...
302 * @tag: tag
303 *
304 * LOCKING:
305 * None.
306 *
307 * Build ATA taskfile @tf for read/write request described by
308 * @block, @n_block, @tf_flags and @tag on @dev.
309 *
310 * RETURNS:
311 *
312 * 0 on success, -ERANGE if the request is too large for @dev,
313 * -EINVAL if the request is invalid.
314 */
315int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
316 u64 block, u32 n_block, unsigned int tf_flags,
317 unsigned int tag)
318{
319 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
320 tf->flags |= tf_flags;
321
6d1245bf 322 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
323 /* yay, NCQ */
324 if (!lba_48_ok(block, n_block))
325 return -ERANGE;
326
327 tf->protocol = ATA_PROT_NCQ;
328 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
329
330 if (tf->flags & ATA_TFLAG_WRITE)
331 tf->command = ATA_CMD_FPDMA_WRITE;
332 else
333 tf->command = ATA_CMD_FPDMA_READ;
334
335 tf->nsect = tag << 3;
336 tf->hob_feature = (n_block >> 8) & 0xff;
337 tf->feature = n_block & 0xff;
338
339 tf->hob_lbah = (block >> 40) & 0xff;
340 tf->hob_lbam = (block >> 32) & 0xff;
341 tf->hob_lbal = (block >> 24) & 0xff;
342 tf->lbah = (block >> 16) & 0xff;
343 tf->lbam = (block >> 8) & 0xff;
344 tf->lbal = block & 0xff;
345
346 tf->device = 1 << 6;
347 if (tf->flags & ATA_TFLAG_FUA)
348 tf->device |= 1 << 7;
349 } else if (dev->flags & ATA_DFLAG_LBA) {
350 tf->flags |= ATA_TFLAG_LBA;
351
352 if (lba_28_ok(block, n_block)) {
353 /* use LBA28 */
354 tf->device |= (block >> 24) & 0xf;
355 } else if (lba_48_ok(block, n_block)) {
356 if (!(dev->flags & ATA_DFLAG_LBA48))
357 return -ERANGE;
358
359 /* use LBA48 */
360 tf->flags |= ATA_TFLAG_LBA48;
361
362 tf->hob_nsect = (n_block >> 8) & 0xff;
363
364 tf->hob_lbah = (block >> 40) & 0xff;
365 tf->hob_lbam = (block >> 32) & 0xff;
366 tf->hob_lbal = (block >> 24) & 0xff;
367 } else
368 /* request too large even for LBA48 */
369 return -ERANGE;
370
371 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
372 return -EINVAL;
373
374 tf->nsect = n_block & 0xff;
375
376 tf->lbah = (block >> 16) & 0xff;
377 tf->lbam = (block >> 8) & 0xff;
378 tf->lbal = block & 0xff;
379
380 tf->device |= ATA_LBA;
381 } else {
382 /* CHS */
383 u32 sect, head, cyl, track;
384
385 /* The request -may- be too large for CHS addressing. */
386 if (!lba_28_ok(block, n_block))
387 return -ERANGE;
388
389 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
390 return -EINVAL;
391
392 /* Convert LBA to CHS */
393 track = (u32)block / dev->sectors;
394 cyl = track / dev->heads;
395 head = track % dev->heads;
396 sect = (u32)block % dev->sectors + 1;
397
398 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
399 (u32)block, track, cyl, head, sect);
400
401 /* Check whether the converted CHS can fit.
402 Cylinder: 0-65535
403 Head: 0-15
404 Sector: 1-255*/
405 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
406 return -ERANGE;
407
408 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
409 tf->lbal = sect;
410 tf->lbam = cyl;
411 tf->lbah = cyl >> 8;
412 tf->device |= head;
413 }
414
415 return 0;
416}
417
cb95d562
TH
418/**
419 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
420 * @pio_mask: pio_mask
421 * @mwdma_mask: mwdma_mask
422 * @udma_mask: udma_mask
423 *
424 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
425 * unsigned int xfer_mask.
426 *
427 * LOCKING:
428 * None.
429 *
430 * RETURNS:
431 * Packed xfer_mask.
432 */
433static unsigned int ata_pack_xfermask(unsigned int pio_mask,
434 unsigned int mwdma_mask,
435 unsigned int udma_mask)
436{
437 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
438 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
439 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
440}
441
c0489e4e
TH
442/**
443 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
444 * @xfer_mask: xfer_mask to unpack
445 * @pio_mask: resulting pio_mask
446 * @mwdma_mask: resulting mwdma_mask
447 * @udma_mask: resulting udma_mask
448 *
449 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
450 * Any NULL distination masks will be ignored.
451 */
452static void ata_unpack_xfermask(unsigned int xfer_mask,
453 unsigned int *pio_mask,
454 unsigned int *mwdma_mask,
455 unsigned int *udma_mask)
456{
457 if (pio_mask)
458 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
459 if (mwdma_mask)
460 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
461 if (udma_mask)
462 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
463}
464
cb95d562 465static const struct ata_xfer_ent {
be9a50c8 466 int shift, bits;
cb95d562
TH
467 u8 base;
468} ata_xfer_tbl[] = {
469 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
470 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
471 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
472 { -1, },
473};
474
475/**
476 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
477 * @xfer_mask: xfer_mask of interest
478 *
479 * Return matching XFER_* value for @xfer_mask. Only the highest
480 * bit of @xfer_mask is considered.
481 *
482 * LOCKING:
483 * None.
484 *
485 * RETURNS:
486 * Matching XFER_* value, 0 if no match found.
487 */
488static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
489{
490 int highbit = fls(xfer_mask) - 1;
491 const struct ata_xfer_ent *ent;
492
493 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
494 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
495 return ent->base + highbit - ent->shift;
496 return 0;
497}
498
499/**
500 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
501 * @xfer_mode: XFER_* of interest
502 *
503 * Return matching xfer_mask for @xfer_mode.
504 *
505 * LOCKING:
506 * None.
507 *
508 * RETURNS:
509 * Matching xfer_mask, 0 if no match found.
510 */
511static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
512{
513 const struct ata_xfer_ent *ent;
514
515 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
517 return 1 << (ent->shift + xfer_mode - ent->base);
518 return 0;
519}
520
521/**
522 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
523 * @xfer_mode: XFER_* of interest
524 *
525 * Return matching xfer_shift for @xfer_mode.
526 *
527 * LOCKING:
528 * None.
529 *
530 * RETURNS:
531 * Matching xfer_shift, -1 if no match found.
532 */
533static int ata_xfer_mode2shift(unsigned int xfer_mode)
534{
535 const struct ata_xfer_ent *ent;
536
537 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
539 return ent->shift;
540 return -1;
541}
542
1da177e4 543/**
1da7b0d0
TH
544 * ata_mode_string - convert xfer_mask to string
545 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
546 *
547 * Determine string which represents the highest speed
1da7b0d0 548 * (highest bit in @modemask).
1da177e4
LT
549 *
550 * LOCKING:
551 * None.
552 *
553 * RETURNS:
554 * Constant C string representing highest speed listed in
1da7b0d0 555 * @mode_mask, or the constant C string "<n/a>".
1da177e4 556 */
1da7b0d0 557static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 558{
75f554bc
TH
559 static const char * const xfer_mode_str[] = {
560 "PIO0",
561 "PIO1",
562 "PIO2",
563 "PIO3",
564 "PIO4",
b352e57d
AC
565 "PIO5",
566 "PIO6",
75f554bc
TH
567 "MWDMA0",
568 "MWDMA1",
569 "MWDMA2",
b352e57d
AC
570 "MWDMA3",
571 "MWDMA4",
75f554bc
TH
572 "UDMA/16",
573 "UDMA/25",
574 "UDMA/33",
575 "UDMA/44",
576 "UDMA/66",
577 "UDMA/100",
578 "UDMA/133",
579 "UDMA7",
580 };
1da7b0d0 581 int highbit;
1da177e4 582
1da7b0d0
TH
583 highbit = fls(xfer_mask) - 1;
584 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
585 return xfer_mode_str[highbit];
1da177e4 586 return "<n/a>";
1da177e4
LT
587}
588
4c360c81
TH
589static const char *sata_spd_string(unsigned int spd)
590{
591 static const char * const spd_str[] = {
592 "1.5 Gbps",
593 "3.0 Gbps",
594 };
595
596 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
597 return "<unknown>";
598 return spd_str[spd - 1];
599}
600
3373efd8 601void ata_dev_disable(struct ata_device *dev)
0b8efb0a 602{
0dd4b21f 603 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 604 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
605 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
606 ATA_DNXFER_QUIET);
0b8efb0a
TH
607 dev->class++;
608 }
609}
610
1da177e4 611/**
0d5ff566 612 * ata_devchk - PATA device presence detection
1da177e4
LT
613 * @ap: ATA channel to examine
614 * @device: Device to examine (starting at zero)
615 *
616 * This technique was originally described in
617 * Hale Landis's ATADRVR (www.ata-atapi.com), and
618 * later found its way into the ATA/ATAPI spec.
619 *
620 * Write a pattern to the ATA shadow registers,
621 * and if a device is present, it will respond by
622 * correctly storing and echoing back the
623 * ATA shadow register contents.
624 *
625 * LOCKING:
626 * caller.
627 */
628
0d5ff566 629static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
630{
631 struct ata_ioports *ioaddr = &ap->ioaddr;
632 u8 nsect, lbal;
633
634 ap->ops->dev_select(ap, device);
635
0d5ff566
TH
636 iowrite8(0x55, ioaddr->nsect_addr);
637 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 638
0d5ff566
TH
639 iowrite8(0xaa, ioaddr->nsect_addr);
640 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 641
0d5ff566
TH
642 iowrite8(0x55, ioaddr->nsect_addr);
643 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 644
0d5ff566
TH
645 nsect = ioread8(ioaddr->nsect_addr);
646 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
647
648 if ((nsect == 0x55) && (lbal == 0xaa))
649 return 1; /* we found a device */
650
651 return 0; /* nothing found */
652}
653
1da177e4
LT
654/**
655 * ata_dev_classify - determine device type based on ATA-spec signature
656 * @tf: ATA taskfile register set for device to be identified
657 *
658 * Determine from taskfile register contents whether a device is
659 * ATA or ATAPI, as per "Signature and persistence" section
660 * of ATA/PI spec (volume 1, sect 5.14).
661 *
662 * LOCKING:
663 * None.
664 *
665 * RETURNS:
666 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
667 * the event of failure.
668 */
669
057ace5e 670unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
671{
672 /* Apple's open source Darwin code hints that some devices only
673 * put a proper signature into the LBA mid/high registers,
674 * So, we only check those. It's sufficient for uniqueness.
675 */
676
677 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
678 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
679 DPRINTK("found ATA device by sig\n");
680 return ATA_DEV_ATA;
681 }
682
683 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
684 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
685 DPRINTK("found ATAPI device by sig\n");
686 return ATA_DEV_ATAPI;
687 }
688
689 DPRINTK("unknown device\n");
690 return ATA_DEV_UNKNOWN;
691}
692
693/**
694 * ata_dev_try_classify - Parse returned ATA device signature
695 * @ap: ATA channel to examine
696 * @device: Device to examine (starting at zero)
b4dc7623 697 * @r_err: Value of error register on completion
1da177e4
LT
698 *
699 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
700 * an ATA/ATAPI-defined set of values is placed in the ATA
701 * shadow registers, indicating the results of device detection
702 * and diagnostics.
703 *
704 * Select the ATA device, and read the values from the ATA shadow
705 * registers. Then parse according to the Error register value,
706 * and the spec-defined values examined by ata_dev_classify().
707 *
708 * LOCKING:
709 * caller.
b4dc7623
TH
710 *
711 * RETURNS:
712 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
713 */
714
a619f981 715unsigned int
b4dc7623 716ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 717{
1da177e4
LT
718 struct ata_taskfile tf;
719 unsigned int class;
720 u8 err;
721
722 ap->ops->dev_select(ap, device);
723
724 memset(&tf, 0, sizeof(tf));
725
1da177e4 726 ap->ops->tf_read(ap, &tf);
0169e284 727 err = tf.feature;
b4dc7623
TH
728 if (r_err)
729 *r_err = err;
1da177e4 730
93590859
AC
731 /* see if device passed diags: if master then continue and warn later */
732 if (err == 0 && device == 0)
733 /* diagnostic fail : do nothing _YET_ */
734 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
735 else if (err == 1)
1da177e4
LT
736 /* do nothing */ ;
737 else if ((device == 0) && (err == 0x81))
738 /* do nothing */ ;
739 else
b4dc7623 740 return ATA_DEV_NONE;
1da177e4 741
b4dc7623 742 /* determine if device is ATA or ATAPI */
1da177e4 743 class = ata_dev_classify(&tf);
b4dc7623 744
1da177e4 745 if (class == ATA_DEV_UNKNOWN)
b4dc7623 746 return ATA_DEV_NONE;
1da177e4 747 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
748 return ATA_DEV_NONE;
749 return class;
1da177e4
LT
750}
751
752/**
6a62a04d 753 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
754 * @id: IDENTIFY DEVICE results we will examine
755 * @s: string into which data is output
756 * @ofs: offset into identify device page
757 * @len: length of string to return. must be an even number.
758 *
759 * The strings in the IDENTIFY DEVICE page are broken up into
760 * 16-bit chunks. Run through the string, and output each
761 * 8-bit chunk linearly, regardless of platform.
762 *
763 * LOCKING:
764 * caller.
765 */
766
6a62a04d
TH
767void ata_id_string(const u16 *id, unsigned char *s,
768 unsigned int ofs, unsigned int len)
1da177e4
LT
769{
770 unsigned int c;
771
772 while (len > 0) {
773 c = id[ofs] >> 8;
774 *s = c;
775 s++;
776
777 c = id[ofs] & 0xff;
778 *s = c;
779 s++;
780
781 ofs++;
782 len -= 2;
783 }
784}
785
0e949ff3 786/**
6a62a04d 787 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
788 * @id: IDENTIFY DEVICE results we will examine
789 * @s: string into which data is output
790 * @ofs: offset into identify device page
791 * @len: length of string to return. must be an odd number.
792 *
6a62a04d 793 * This function is identical to ata_id_string except that it
0e949ff3
TH
794 * trims trailing spaces and terminates the resulting string with
795 * null. @len must be actual maximum length (even number) + 1.
796 *
797 * LOCKING:
798 * caller.
799 */
6a62a04d
TH
800void ata_id_c_string(const u16 *id, unsigned char *s,
801 unsigned int ofs, unsigned int len)
0e949ff3
TH
802{
803 unsigned char *p;
804
805 WARN_ON(!(len & 1));
806
6a62a04d 807 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
808
809 p = s + strnlen(s, len - 1);
810 while (p > s && p[-1] == ' ')
811 p--;
812 *p = '\0';
813}
0baab86b 814
1e999736
AC
815static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
816{
817 u64 sectors = 0;
818
819 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
820 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
821 sectors |= (tf->hob_lbal & 0xff) << 24;
822 sectors |= (tf->lbah & 0xff) << 16;
823 sectors |= (tf->lbam & 0xff) << 8;
824 sectors |= (tf->lbal & 0xff);
825
826 return ++sectors;
827}
828
829static u64 ata_tf_to_lba(struct ata_taskfile *tf)
830{
831 u64 sectors = 0;
832
833 sectors |= (tf->device & 0x0f) << 24;
834 sectors |= (tf->lbah & 0xff) << 16;
835 sectors |= (tf->lbam & 0xff) << 8;
836 sectors |= (tf->lbal & 0xff);
837
838 return ++sectors;
839}
840
841/**
842 * ata_read_native_max_address_ext - LBA48 native max query
843 * @dev: Device to query
844 *
845 * Perform an LBA48 size query upon the device in question. Return the
846 * actual LBA48 size or zero if the command fails.
847 */
848
849static u64 ata_read_native_max_address_ext(struct ata_device *dev)
850{
851 unsigned int err;
852 struct ata_taskfile tf;
853
854 ata_tf_init(dev, &tf);
855
856 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
857 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
858 tf.protocol |= ATA_PROT_NODATA;
859 tf.device |= 0x40;
860
861 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
862 if (err)
863 return 0;
864
865 return ata_tf_to_lba48(&tf);
866}
867
868/**
869 * ata_read_native_max_address - LBA28 native max query
870 * @dev: Device to query
871 *
872 * Performa an LBA28 size query upon the device in question. Return the
873 * actual LBA28 size or zero if the command fails.
874 */
875
876static u64 ata_read_native_max_address(struct ata_device *dev)
877{
878 unsigned int err;
879 struct ata_taskfile tf;
880
881 ata_tf_init(dev, &tf);
882
883 tf.command = ATA_CMD_READ_NATIVE_MAX;
884 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
885 tf.protocol |= ATA_PROT_NODATA;
886 tf.device |= 0x40;
887
888 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
889 if (err)
890 return 0;
891
892 return ata_tf_to_lba(&tf);
893}
894
895/**
896 * ata_set_native_max_address_ext - LBA48 native max set
897 * @dev: Device to query
6b38d1d1 898 * @new_sectors: new max sectors value to set for the device
1e999736
AC
899 *
900 * Perform an LBA48 size set max upon the device in question. Return the
901 * actual LBA48 size or zero if the command fails.
902 */
903
904static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
905{
906 unsigned int err;
907 struct ata_taskfile tf;
908
909 new_sectors--;
910
911 ata_tf_init(dev, &tf);
912
913 tf.command = ATA_CMD_SET_MAX_EXT;
914 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
915 tf.protocol |= ATA_PROT_NODATA;
916 tf.device |= 0x40;
917
918 tf.lbal = (new_sectors >> 0) & 0xff;
919 tf.lbam = (new_sectors >> 8) & 0xff;
920 tf.lbah = (new_sectors >> 16) & 0xff;
921
922 tf.hob_lbal = (new_sectors >> 24) & 0xff;
923 tf.hob_lbam = (new_sectors >> 32) & 0xff;
924 tf.hob_lbah = (new_sectors >> 40) & 0xff;
925
926 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
927 if (err)
928 return 0;
929
930 return ata_tf_to_lba48(&tf);
931}
932
933/**
934 * ata_set_native_max_address - LBA28 native max set
935 * @dev: Device to query
6b38d1d1 936 * @new_sectors: new max sectors value to set for the device
1e999736
AC
937 *
938 * Perform an LBA28 size set max upon the device in question. Return the
939 * actual LBA28 size or zero if the command fails.
940 */
941
942static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
943{
944 unsigned int err;
945 struct ata_taskfile tf;
946
947 new_sectors--;
948
949 ata_tf_init(dev, &tf);
950
951 tf.command = ATA_CMD_SET_MAX;
952 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
953 tf.protocol |= ATA_PROT_NODATA;
954
955 tf.lbal = (new_sectors >> 0) & 0xff;
956 tf.lbam = (new_sectors >> 8) & 0xff;
957 tf.lbah = (new_sectors >> 16) & 0xff;
958 tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
959
960 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
961 if (err)
962 return 0;
963
964 return ata_tf_to_lba(&tf);
965}
966
967/**
968 * ata_hpa_resize - Resize a device with an HPA set
969 * @dev: Device to resize
970 *
971 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
972 * it if required to the full size of the media. The caller must check
973 * the drive has the HPA feature set enabled.
974 */
975
976static u64 ata_hpa_resize(struct ata_device *dev)
977{
978 u64 sectors = dev->n_sectors;
979 u64 hpa_sectors;
a617c09f 980
1e999736
AC
981 if (ata_id_has_lba48(dev->id))
982 hpa_sectors = ata_read_native_max_address_ext(dev);
983 else
984 hpa_sectors = ata_read_native_max_address(dev);
985
986 /* if no hpa, both should be equal */
bd1d5ec6
AM
987 ata_dev_printk(dev, KERN_INFO, "%s 1: sectors = %lld, "
988 "hpa_sectors = %lld\n",
989 __FUNCTION__, (long long)sectors, (long long)hpa_sectors);
1e999736
AC
990
991 if (hpa_sectors > sectors) {
992 ata_dev_printk(dev, KERN_INFO,
993 "Host Protected Area detected:\n"
994 "\tcurrent size: %lld sectors\n"
995 "\tnative size: %lld sectors\n",
bd1d5ec6 996 (long long)sectors, (long long)hpa_sectors);
1e999736
AC
997
998 if (ata_ignore_hpa) {
999 if (ata_id_has_lba48(dev->id))
1000 hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
1001 else
bd1d5ec6
AM
1002 hpa_sectors = ata_set_native_max_address(dev,
1003 hpa_sectors);
1e999736
AC
1004
1005 if (hpa_sectors) {
bd1d5ec6
AM
1006 ata_dev_printk(dev, KERN_INFO, "native size "
1007 "increased to %lld sectors\n",
1008 (long long)hpa_sectors);
1e999736
AC
1009 return hpa_sectors;
1010 }
1011 }
1012 }
1013 return sectors;
1014}
1015
2940740b
TH
1016static u64 ata_id_n_sectors(const u16 *id)
1017{
1018 if (ata_id_has_lba(id)) {
1019 if (ata_id_has_lba48(id))
1020 return ata_id_u64(id, 100);
1021 else
1022 return ata_id_u32(id, 60);
1023 } else {
1024 if (ata_id_current_chs_valid(id))
1025 return ata_id_u32(id, 57);
1026 else
1027 return id[1] * id[3] * id[6];
1028 }
1029}
1030
10305f0f
A
1031/**
1032 * ata_id_to_dma_mode - Identify DMA mode from id block
1033 * @dev: device to identify
cc261267 1034 * @unknown: mode to assume if we cannot tell
10305f0f
A
1035 *
1036 * Set up the timing values for the device based upon the identify
1037 * reported values for the DMA mode. This function is used by drivers
1038 * which rely upon firmware configured modes, but wish to report the
1039 * mode correctly when possible.
1040 *
1041 * In addition we emit similarly formatted messages to the default
1042 * ata_dev_set_mode handler, in order to provide consistency of
1043 * presentation.
1044 */
1045
1046void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1047{
1048 unsigned int mask;
1049 u8 mode;
1050
1051 /* Pack the DMA modes */
1052 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1053 if (dev->id[53] & 0x04)
1054 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1055
1056 /* Select the mode in use */
1057 mode = ata_xfer_mask2mode(mask);
1058
1059 if (mode != 0) {
1060 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1061 ata_mode_string(mask));
1062 } else {
1063 /* SWDMA perhaps ? */
1064 mode = unknown;
1065 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1066 }
1067
1068 /* Configure the device reporting */
1069 dev->xfer_mode = mode;
1070 dev->xfer_shift = ata_xfer_mode2shift(mode);
1071}
1072
0baab86b
EF
1073/**
1074 * ata_noop_dev_select - Select device 0/1 on ATA bus
1075 * @ap: ATA channel to manipulate
1076 * @device: ATA device (numbered from zero) to select
1077 *
1078 * This function performs no actual function.
1079 *
1080 * May be used as the dev_select() entry in ata_port_operations.
1081 *
1082 * LOCKING:
1083 * caller.
1084 */
1da177e4
LT
1085void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1086{
1087}
1088
0baab86b 1089
1da177e4
LT
1090/**
1091 * ata_std_dev_select - Select device 0/1 on ATA bus
1092 * @ap: ATA channel to manipulate
1093 * @device: ATA device (numbered from zero) to select
1094 *
1095 * Use the method defined in the ATA specification to
1096 * make either device 0, or device 1, active on the
0baab86b
EF
1097 * ATA channel. Works with both PIO and MMIO.
1098 *
1099 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1100 *
1101 * LOCKING:
1102 * caller.
1103 */
1104
1105void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1106{
1107 u8 tmp;
1108
1109 if (device == 0)
1110 tmp = ATA_DEVICE_OBS;
1111 else
1112 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1113
0d5ff566 1114 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1115 ata_pause(ap); /* needed; also flushes, for mmio */
1116}
1117
1118/**
1119 * ata_dev_select - Select device 0/1 on ATA bus
1120 * @ap: ATA channel to manipulate
1121 * @device: ATA device (numbered from zero) to select
1122 * @wait: non-zero to wait for Status register BSY bit to clear
1123 * @can_sleep: non-zero if context allows sleeping
1124 *
1125 * Use the method defined in the ATA specification to
1126 * make either device 0, or device 1, active on the
1127 * ATA channel.
1128 *
1129 * This is a high-level version of ata_std_dev_select(),
1130 * which additionally provides the services of inserting
1131 * the proper pauses and status polling, where needed.
1132 *
1133 * LOCKING:
1134 * caller.
1135 */
1136
1137void ata_dev_select(struct ata_port *ap, unsigned int device,
1138 unsigned int wait, unsigned int can_sleep)
1139{
88574551 1140 if (ata_msg_probe(ap))
44877b4e
TH
1141 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1142 "device %u, wait %u\n", device, wait);
1da177e4
LT
1143
1144 if (wait)
1145 ata_wait_idle(ap);
1146
1147 ap->ops->dev_select(ap, device);
1148
1149 if (wait) {
1150 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1151 msleep(150);
1152 ata_wait_idle(ap);
1153 }
1154}
1155
1156/**
1157 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1158 * @id: IDENTIFY DEVICE page to dump
1da177e4 1159 *
0bd3300a
TH
1160 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1161 * page.
1da177e4
LT
1162 *
1163 * LOCKING:
1164 * caller.
1165 */
1166
0bd3300a 1167static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1168{
1169 DPRINTK("49==0x%04x "
1170 "53==0x%04x "
1171 "63==0x%04x "
1172 "64==0x%04x "
1173 "75==0x%04x \n",
0bd3300a
TH
1174 id[49],
1175 id[53],
1176 id[63],
1177 id[64],
1178 id[75]);
1da177e4
LT
1179 DPRINTK("80==0x%04x "
1180 "81==0x%04x "
1181 "82==0x%04x "
1182 "83==0x%04x "
1183 "84==0x%04x \n",
0bd3300a
TH
1184 id[80],
1185 id[81],
1186 id[82],
1187 id[83],
1188 id[84]);
1da177e4
LT
1189 DPRINTK("88==0x%04x "
1190 "93==0x%04x\n",
0bd3300a
TH
1191 id[88],
1192 id[93]);
1da177e4
LT
1193}
1194
cb95d562
TH
1195/**
1196 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1197 * @id: IDENTIFY data to compute xfer mask from
1198 *
1199 * Compute the xfermask for this device. This is not as trivial
1200 * as it seems if we must consider early devices correctly.
1201 *
1202 * FIXME: pre IDE drive timing (do we care ?).
1203 *
1204 * LOCKING:
1205 * None.
1206 *
1207 * RETURNS:
1208 * Computed xfermask
1209 */
1210static unsigned int ata_id_xfermask(const u16 *id)
1211{
1212 unsigned int pio_mask, mwdma_mask, udma_mask;
1213
1214 /* Usual case. Word 53 indicates word 64 is valid */
1215 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1216 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1217 pio_mask <<= 3;
1218 pio_mask |= 0x7;
1219 } else {
1220 /* If word 64 isn't valid then Word 51 high byte holds
1221 * the PIO timing number for the maximum. Turn it into
1222 * a mask.
1223 */
7a0f1c8a 1224 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1225 if (mode < 5) /* Valid PIO range */
1226 pio_mask = (2 << mode) - 1;
1227 else
1228 pio_mask = 1;
cb95d562
TH
1229
1230 /* But wait.. there's more. Design your standards by
1231 * committee and you too can get a free iordy field to
1232 * process. However its the speeds not the modes that
1233 * are supported... Note drivers using the timing API
1234 * will get this right anyway
1235 */
1236 }
1237
1238 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1239
b352e57d
AC
1240 if (ata_id_is_cfa(id)) {
1241 /*
1242 * Process compact flash extended modes
1243 */
1244 int pio = id[163] & 0x7;
1245 int dma = (id[163] >> 3) & 7;
1246
1247 if (pio)
1248 pio_mask |= (1 << 5);
1249 if (pio > 1)
1250 pio_mask |= (1 << 6);
1251 if (dma)
1252 mwdma_mask |= (1 << 3);
1253 if (dma > 1)
1254 mwdma_mask |= (1 << 4);
1255 }
1256
fb21f0d0
TH
1257 udma_mask = 0;
1258 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1259 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1260
1261 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1262}
1263
86e45b6b
TH
1264/**
1265 * ata_port_queue_task - Queue port_task
1266 * @ap: The ata_port to queue port_task for
e2a7f77a 1267 * @fn: workqueue function to be scheduled
65f27f38 1268 * @data: data for @fn to use
e2a7f77a 1269 * @delay: delay time for workqueue function
86e45b6b
TH
1270 *
1271 * Schedule @fn(@data) for execution after @delay jiffies using
1272 * port_task. There is one port_task per port and it's the
1273 * user(low level driver)'s responsibility to make sure that only
1274 * one task is active at any given time.
1275 *
1276 * libata core layer takes care of synchronization between
1277 * port_task and EH. ata_port_queue_task() may be ignored for EH
1278 * synchronization.
1279 *
1280 * LOCKING:
1281 * Inherited from caller.
1282 */
65f27f38 1283void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1284 unsigned long delay)
1285{
1286 int rc;
1287
b51e9e5d 1288 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1289 return;
1290
65f27f38
DH
1291 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1292 ap->port_task_data = data;
86e45b6b 1293
52bad64d 1294 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1295
1296 /* rc == 0 means that another user is using port task */
1297 WARN_ON(rc == 0);
1298}
1299
1300/**
1301 * ata_port_flush_task - Flush port_task
1302 * @ap: The ata_port to flush port_task for
1303 *
1304 * After this function completes, port_task is guranteed not to
1305 * be running or scheduled.
1306 *
1307 * LOCKING:
1308 * Kernel thread context (may sleep)
1309 */
1310void ata_port_flush_task(struct ata_port *ap)
1311{
1312 unsigned long flags;
1313
1314 DPRINTK("ENTER\n");
1315
ba6a1308 1316 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1317 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1318 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1319
1320 DPRINTK("flush #1\n");
28e53bdd 1321 cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
86e45b6b
TH
1322
1323 /*
1324 * At this point, if a task is running, it's guaranteed to see
1325 * the FLUSH flag; thus, it will never queue pio tasks again.
1326 * Cancel and flush.
1327 */
1328 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1329 if (ata_msg_ctl(ap))
88574551
TH
1330 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1331 __FUNCTION__);
28e53bdd 1332 cancel_work_sync(&ap->port_task.work);
86e45b6b
TH
1333 }
1334
ba6a1308 1335 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1336 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1337 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1338
0dd4b21f
BP
1339 if (ata_msg_ctl(ap))
1340 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1341}
1342
7102d230 1343static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1344{
77853bf2 1345 struct completion *waiting = qc->private_data;
a2a7a662 1346
a2a7a662 1347 complete(waiting);
a2a7a662
TH
1348}
1349
1350/**
2432697b 1351 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1352 * @dev: Device to which the command is sent
1353 * @tf: Taskfile registers for the command and the result
d69cf37d 1354 * @cdb: CDB for packet command
a2a7a662 1355 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1356 * @sg: sg list for the data buffer of the command
1357 * @n_elem: Number of sg entries
a2a7a662
TH
1358 *
1359 * Executes libata internal command with timeout. @tf contains
1360 * command on entry and result on return. Timeout and error
1361 * conditions are reported via return value. No recovery action
1362 * is taken after a command times out. It's caller's duty to
1363 * clean up after timeout.
1364 *
1365 * LOCKING:
1366 * None. Should be called with kernel context, might sleep.
551e8889
TH
1367 *
1368 * RETURNS:
1369 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1370 */
2432697b
TH
1371unsigned ata_exec_internal_sg(struct ata_device *dev,
1372 struct ata_taskfile *tf, const u8 *cdb,
1373 int dma_dir, struct scatterlist *sg,
1374 unsigned int n_elem)
a2a7a662 1375{
3373efd8 1376 struct ata_port *ap = dev->ap;
a2a7a662
TH
1377 u8 command = tf->command;
1378 struct ata_queued_cmd *qc;
2ab7db1f 1379 unsigned int tag, preempted_tag;
dedaf2b0 1380 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1381 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1382 unsigned long flags;
77853bf2 1383 unsigned int err_mask;
d95a717f 1384 int rc;
a2a7a662 1385
ba6a1308 1386 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1387
e3180499 1388 /* no internal command while frozen */
b51e9e5d 1389 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1390 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1391 return AC_ERR_SYSTEM;
1392 }
1393
2ab7db1f 1394 /* initialize internal qc */
a2a7a662 1395
2ab7db1f
TH
1396 /* XXX: Tag 0 is used for drivers with legacy EH as some
1397 * drivers choke if any other tag is given. This breaks
1398 * ata_tag_internal() test for those drivers. Don't use new
1399 * EH stuff without converting to it.
1400 */
1401 if (ap->ops->error_handler)
1402 tag = ATA_TAG_INTERNAL;
1403 else
1404 tag = 0;
1405
6cec4a39 1406 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1407 BUG();
f69499f4 1408 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1409
1410 qc->tag = tag;
1411 qc->scsicmd = NULL;
1412 qc->ap = ap;
1413 qc->dev = dev;
1414 ata_qc_reinit(qc);
1415
1416 preempted_tag = ap->active_tag;
dedaf2b0
TH
1417 preempted_sactive = ap->sactive;
1418 preempted_qc_active = ap->qc_active;
2ab7db1f 1419 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1420 ap->sactive = 0;
1421 ap->qc_active = 0;
2ab7db1f
TH
1422
1423 /* prepare & issue qc */
a2a7a662 1424 qc->tf = *tf;
d69cf37d
TH
1425 if (cdb)
1426 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1427 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1428 qc->dma_dir = dma_dir;
1429 if (dma_dir != DMA_NONE) {
2432697b
TH
1430 unsigned int i, buflen = 0;
1431
1432 for (i = 0; i < n_elem; i++)
1433 buflen += sg[i].length;
1434
1435 ata_sg_init(qc, sg, n_elem);
49c80429 1436 qc->nbytes = buflen;
a2a7a662
TH
1437 }
1438
77853bf2 1439 qc->private_data = &wait;
a2a7a662
TH
1440 qc->complete_fn = ata_qc_complete_internal;
1441
8e0e694a 1442 ata_qc_issue(qc);
a2a7a662 1443
ba6a1308 1444 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1445
a8601e5f 1446 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1447
1448 ata_port_flush_task(ap);
41ade50c 1449
d95a717f 1450 if (!rc) {
ba6a1308 1451 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1452
1453 /* We're racing with irq here. If we lose, the
1454 * following test prevents us from completing the qc
d95a717f
TH
1455 * twice. If we win, the port is frozen and will be
1456 * cleaned up by ->post_internal_cmd().
a2a7a662 1457 */
77853bf2 1458 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1459 qc->err_mask |= AC_ERR_TIMEOUT;
1460
1461 if (ap->ops->error_handler)
1462 ata_port_freeze(ap);
1463 else
1464 ata_qc_complete(qc);
f15a1daf 1465
0dd4b21f
BP
1466 if (ata_msg_warn(ap))
1467 ata_dev_printk(dev, KERN_WARNING,
88574551 1468 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1469 }
1470
ba6a1308 1471 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1472 }
1473
d95a717f
TH
1474 /* do post_internal_cmd */
1475 if (ap->ops->post_internal_cmd)
1476 ap->ops->post_internal_cmd(qc);
1477
a51d644a
TH
1478 /* perform minimal error analysis */
1479 if (qc->flags & ATA_QCFLAG_FAILED) {
1480 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1481 qc->err_mask |= AC_ERR_DEV;
1482
1483 if (!qc->err_mask)
1484 qc->err_mask |= AC_ERR_OTHER;
1485
1486 if (qc->err_mask & ~AC_ERR_OTHER)
1487 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1488 }
1489
15869303 1490 /* finish up */
ba6a1308 1491 spin_lock_irqsave(ap->lock, flags);
15869303 1492
e61e0672 1493 *tf = qc->result_tf;
77853bf2
TH
1494 err_mask = qc->err_mask;
1495
1496 ata_qc_free(qc);
2ab7db1f 1497 ap->active_tag = preempted_tag;
dedaf2b0
TH
1498 ap->sactive = preempted_sactive;
1499 ap->qc_active = preempted_qc_active;
77853bf2 1500
1f7dd3e9
TH
1501 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1502 * Until those drivers are fixed, we detect the condition
1503 * here, fail the command with AC_ERR_SYSTEM and reenable the
1504 * port.
1505 *
1506 * Note that this doesn't change any behavior as internal
1507 * command failure results in disabling the device in the
1508 * higher layer for LLDDs without new reset/EH callbacks.
1509 *
1510 * Kill the following code as soon as those drivers are fixed.
1511 */
198e0fed 1512 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1513 err_mask |= AC_ERR_SYSTEM;
1514 ata_port_probe(ap);
1515 }
1516
ba6a1308 1517 spin_unlock_irqrestore(ap->lock, flags);
15869303 1518
77853bf2 1519 return err_mask;
a2a7a662
TH
1520}
1521
2432697b 1522/**
33480a0e 1523 * ata_exec_internal - execute libata internal command
2432697b
TH
1524 * @dev: Device to which the command is sent
1525 * @tf: Taskfile registers for the command and the result
1526 * @cdb: CDB for packet command
1527 * @dma_dir: Data tranfer direction of the command
1528 * @buf: Data buffer of the command
1529 * @buflen: Length of data buffer
1530 *
1531 * Wrapper around ata_exec_internal_sg() which takes simple
1532 * buffer instead of sg list.
1533 *
1534 * LOCKING:
1535 * None. Should be called with kernel context, might sleep.
1536 *
1537 * RETURNS:
1538 * Zero on success, AC_ERR_* mask on failure
1539 */
1540unsigned ata_exec_internal(struct ata_device *dev,
1541 struct ata_taskfile *tf, const u8 *cdb,
1542 int dma_dir, void *buf, unsigned int buflen)
1543{
33480a0e
TH
1544 struct scatterlist *psg = NULL, sg;
1545 unsigned int n_elem = 0;
2432697b 1546
33480a0e
TH
1547 if (dma_dir != DMA_NONE) {
1548 WARN_ON(!buf);
1549 sg_init_one(&sg, buf, buflen);
1550 psg = &sg;
1551 n_elem++;
1552 }
2432697b 1553
33480a0e 1554 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1555}
1556
977e6b9f
TH
1557/**
1558 * ata_do_simple_cmd - execute simple internal command
1559 * @dev: Device to which the command is sent
1560 * @cmd: Opcode to execute
1561 *
1562 * Execute a 'simple' command, that only consists of the opcode
1563 * 'cmd' itself, without filling any other registers
1564 *
1565 * LOCKING:
1566 * Kernel thread context (may sleep).
1567 *
1568 * RETURNS:
1569 * Zero on success, AC_ERR_* mask on failure
e58eb583 1570 */
77b08fb5 1571unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1572{
1573 struct ata_taskfile tf;
e58eb583
TH
1574
1575 ata_tf_init(dev, &tf);
1576
1577 tf.command = cmd;
1578 tf.flags |= ATA_TFLAG_DEVICE;
1579 tf.protocol = ATA_PROT_NODATA;
1580
977e6b9f 1581 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1582}
1583
1bc4ccff
AC
1584/**
1585 * ata_pio_need_iordy - check if iordy needed
1586 * @adev: ATA device
1587 *
1588 * Check if the current speed of the device requires IORDY. Used
1589 * by various controllers for chip configuration.
1590 */
a617c09f 1591
1bc4ccff
AC
1592unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1593{
432729f0
AC
1594 /* Controller doesn't support IORDY. Probably a pointless check
1595 as the caller should know this */
1596 if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1597 return 0;
432729f0
AC
1598 /* PIO3 and higher it is mandatory */
1599 if (adev->pio_mode > XFER_PIO_2)
1600 return 1;
1601 /* We turn it on when possible */
1602 if (ata_id_has_iordy(adev->id))
1bc4ccff 1603 return 1;
432729f0
AC
1604 return 0;
1605}
2e9edbf8 1606
432729f0
AC
1607/**
1608 * ata_pio_mask_no_iordy - Return the non IORDY mask
1609 * @adev: ATA device
1610 *
1611 * Compute the highest mode possible if we are not using iordy. Return
1612 * -1 if no iordy mode is available.
1613 */
a617c09f 1614
432729f0
AC
1615static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1616{
1bc4ccff 1617 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1618 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1619 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1620 /* Is the speed faster than the drive allows non IORDY ? */
1621 if (pio) {
1622 /* This is cycle times not frequency - watch the logic! */
1623 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1624 return 3 << ATA_SHIFT_PIO;
1625 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1626 }
1627 }
432729f0 1628 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1629}
1630
1da177e4 1631/**
49016aca 1632 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1633 * @dev: target device
1634 * @p_class: pointer to class of the target device (may be changed)
bff04647 1635 * @flags: ATA_READID_* flags
fe635c7e 1636 * @id: buffer to read IDENTIFY data into
1da177e4 1637 *
49016aca
TH
1638 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1639 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1640 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1641 * for pre-ATA4 drives.
1da177e4
LT
1642 *
1643 * LOCKING:
49016aca
TH
1644 * Kernel thread context (may sleep)
1645 *
1646 * RETURNS:
1647 * 0 on success, -errno otherwise.
1da177e4 1648 */
a9beec95 1649int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1650 unsigned int flags, u16 *id)
1da177e4 1651{
3373efd8 1652 struct ata_port *ap = dev->ap;
49016aca 1653 unsigned int class = *p_class;
a0123703 1654 struct ata_taskfile tf;
49016aca
TH
1655 unsigned int err_mask = 0;
1656 const char *reason;
54936f8b 1657 int may_fallback = 1, tried_spinup = 0;
49016aca 1658 int rc;
1da177e4 1659
0dd4b21f 1660 if (ata_msg_ctl(ap))
44877b4e 1661 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1662
49016aca 1663 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1664 retry:
3373efd8 1665 ata_tf_init(dev, &tf);
a0123703 1666
49016aca
TH
1667 switch (class) {
1668 case ATA_DEV_ATA:
a0123703 1669 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1670 break;
1671 case ATA_DEV_ATAPI:
a0123703 1672 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1673 break;
1674 default:
1675 rc = -ENODEV;
1676 reason = "unsupported class";
1677 goto err_out;
1da177e4
LT
1678 }
1679
a0123703 1680 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1681
1682 /* Some devices choke if TF registers contain garbage. Make
1683 * sure those are properly initialized.
1684 */
1685 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1686
1687 /* Device presence detection is unreliable on some
1688 * controllers. Always poll IDENTIFY if available.
1689 */
1690 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1691
3373efd8 1692 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1693 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1694 if (err_mask) {
800b3996 1695 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1696 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1697 ap->print_id, dev->devno);
55a8e2c8
TH
1698 return -ENOENT;
1699 }
1700
54936f8b
TH
1701 /* Device or controller might have reported the wrong
1702 * device class. Give a shot at the other IDENTIFY if
1703 * the current one is aborted by the device.
1704 */
1705 if (may_fallback &&
1706 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1707 may_fallback = 0;
1708
1709 if (class == ATA_DEV_ATA)
1710 class = ATA_DEV_ATAPI;
1711 else
1712 class = ATA_DEV_ATA;
1713 goto retry;
1714 }
1715
49016aca
TH
1716 rc = -EIO;
1717 reason = "I/O error";
1da177e4
LT
1718 goto err_out;
1719 }
1720
54936f8b
TH
1721 /* Falling back doesn't make sense if ID data was read
1722 * successfully at least once.
1723 */
1724 may_fallback = 0;
1725
49016aca 1726 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1727
49016aca 1728 /* sanity check */
a4f5749b 1729 rc = -EINVAL;
6070068b 1730 reason = "device reports invalid type";
a4f5749b
TH
1731
1732 if (class == ATA_DEV_ATA) {
1733 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1734 goto err_out;
1735 } else {
1736 if (ata_id_is_ata(id))
1737 goto err_out;
49016aca
TH
1738 }
1739
169439c2
ML
1740 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1741 tried_spinup = 1;
1742 /*
1743 * Drive powered-up in standby mode, and requires a specific
1744 * SET_FEATURES spin-up subcommand before it will accept
1745 * anything other than the original IDENTIFY command.
1746 */
1747 ata_tf_init(dev, &tf);
1748 tf.command = ATA_CMD_SET_FEATURES;
1749 tf.feature = SETFEATURES_SPINUP;
1750 tf.protocol = ATA_PROT_NODATA;
1751 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1752 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1753 if (err_mask) {
1754 rc = -EIO;
1755 reason = "SPINUP failed";
1756 goto err_out;
1757 }
1758 /*
1759 * If the drive initially returned incomplete IDENTIFY info,
1760 * we now must reissue the IDENTIFY command.
1761 */
1762 if (id[2] == 0x37c8)
1763 goto retry;
1764 }
1765
bff04647 1766 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1767 /*
1768 * The exact sequence expected by certain pre-ATA4 drives is:
1769 * SRST RESET
1770 * IDENTIFY
1771 * INITIALIZE DEVICE PARAMETERS
1772 * anything else..
1773 * Some drives were very specific about that exact sequence.
1774 */
1775 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1776 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1777 if (err_mask) {
1778 rc = -EIO;
1779 reason = "INIT_DEV_PARAMS failed";
1780 goto err_out;
1781 }
1782
1783 /* current CHS translation info (id[53-58]) might be
1784 * changed. reread the identify device info.
1785 */
bff04647 1786 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1787 goto retry;
1788 }
1789 }
1790
1791 *p_class = class;
fe635c7e 1792
49016aca
TH
1793 return 0;
1794
1795 err_out:
88574551 1796 if (ata_msg_warn(ap))
0dd4b21f 1797 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1798 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1799 return rc;
1800}
1801
3373efd8 1802static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1803{
3373efd8 1804 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1805}
1806
a6e6ce8e
TH
1807static void ata_dev_config_ncq(struct ata_device *dev,
1808 char *desc, size_t desc_sz)
1809{
1810 struct ata_port *ap = dev->ap;
1811 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1812
1813 if (!ata_id_has_ncq(dev->id)) {
1814 desc[0] = '\0';
1815 return;
1816 }
6919a0a6
AC
1817 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1818 snprintf(desc, desc_sz, "NCQ (not used)");
1819 return;
1820 }
a6e6ce8e 1821 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1822 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1823 dev->flags |= ATA_DFLAG_NCQ;
1824 }
1825
1826 if (hdepth >= ddepth)
1827 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1828 else
1829 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1830}
1831
49016aca 1832/**
ffeae418 1833 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1834 * @dev: Target device to configure
1835 *
1836 * Configure @dev according to @dev->id. Generic and low-level
1837 * driver specific fixups are also applied.
49016aca
TH
1838 *
1839 * LOCKING:
ffeae418
TH
1840 * Kernel thread context (may sleep)
1841 *
1842 * RETURNS:
1843 * 0 on success, -errno otherwise
49016aca 1844 */
efdaedc4 1845int ata_dev_configure(struct ata_device *dev)
49016aca 1846{
3373efd8 1847 struct ata_port *ap = dev->ap;
efdaedc4 1848 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1849 const u16 *id = dev->id;
ff8854b2 1850 unsigned int xfer_mask;
b352e57d 1851 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1852 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1853 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1854 int rc;
49016aca 1855
0dd4b21f 1856 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1857 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1858 __FUNCTION__);
ffeae418 1859 return 0;
49016aca
TH
1860 }
1861
0dd4b21f 1862 if (ata_msg_probe(ap))
44877b4e 1863 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1864
08573a86 1865 /* set _SDD */
3a32a8e9 1866 rc = ata_acpi_push_id(dev);
08573a86
KCA
1867 if (rc) {
1868 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1869 rc);
1870 }
1871
1872 /* retrieve and execute the ATA task file of _GTF */
1873 ata_acpi_exec_tfs(ap);
1874
c39f5ebe 1875 /* print device capabilities */
0dd4b21f 1876 if (ata_msg_probe(ap))
88574551
TH
1877 ata_dev_printk(dev, KERN_DEBUG,
1878 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1879 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1880 __FUNCTION__,
f15a1daf
TH
1881 id[49], id[82], id[83], id[84],
1882 id[85], id[86], id[87], id[88]);
c39f5ebe 1883
208a9933 1884 /* initialize to-be-configured parameters */
ea1dd4e1 1885 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1886 dev->max_sectors = 0;
1887 dev->cdb_len = 0;
1888 dev->n_sectors = 0;
1889 dev->cylinders = 0;
1890 dev->heads = 0;
1891 dev->sectors = 0;
1892
1da177e4
LT
1893 /*
1894 * common ATA, ATAPI feature tests
1895 */
1896
ff8854b2 1897 /* find max transfer mode; for printk only */
1148c3a7 1898 xfer_mask = ata_id_xfermask(id);
1da177e4 1899
0dd4b21f
BP
1900 if (ata_msg_probe(ap))
1901 ata_dump_id(id);
1da177e4 1902
ef143d57
AL
1903 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1904 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1905 sizeof(fwrevbuf));
1906
1907 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1908 sizeof(modelbuf));
1909
1da177e4
LT
1910 /* ATA-specific feature tests */
1911 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1912 if (ata_id_is_cfa(id)) {
1913 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1914 ata_dev_printk(dev, KERN_WARNING,
1915 "supports DRM functions and may "
1916 "not be fully accessable.\n");
b352e57d
AC
1917 snprintf(revbuf, 7, "CFA");
1918 }
1919 else
1920 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1921
1148c3a7 1922 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1923
3f64f565
EM
1924 if (dev->id[59] & 0x100)
1925 dev->multi_count = dev->id[59] & 0xff;
1926
1148c3a7 1927 if (ata_id_has_lba(id)) {
4c2d721a 1928 const char *lba_desc;
a6e6ce8e 1929 char ncq_desc[20];
8bf62ece 1930
4c2d721a
TH
1931 lba_desc = "LBA";
1932 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1933 if (ata_id_has_lba48(id)) {
8bf62ece 1934 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1935 lba_desc = "LBA48";
6fc49adb
TH
1936
1937 if (dev->n_sectors >= (1UL << 28) &&
1938 ata_id_has_flush_ext(id))
1939 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1940 }
8bf62ece 1941
1e999736
AC
1942 if (ata_id_hpa_enabled(dev->id))
1943 dev->n_sectors = ata_hpa_resize(dev);
1944
a6e6ce8e
TH
1945 /* config NCQ */
1946 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1947
8bf62ece 1948 /* print device info to dmesg */
3f64f565
EM
1949 if (ata_msg_drv(ap) && print_info) {
1950 ata_dev_printk(dev, KERN_INFO,
1951 "%s: %s, %s, max %s\n",
1952 revbuf, modelbuf, fwrevbuf,
1953 ata_mode_string(xfer_mask));
1954 ata_dev_printk(dev, KERN_INFO,
1955 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1956 (unsigned long long)dev->n_sectors,
3f64f565
EM
1957 dev->multi_count, lba_desc, ncq_desc);
1958 }
ffeae418 1959 } else {
8bf62ece
AL
1960 /* CHS */
1961
1962 /* Default translation */
1148c3a7
TH
1963 dev->cylinders = id[1];
1964 dev->heads = id[3];
1965 dev->sectors = id[6];
8bf62ece 1966
1148c3a7 1967 if (ata_id_current_chs_valid(id)) {
8bf62ece 1968 /* Current CHS translation is valid. */
1148c3a7
TH
1969 dev->cylinders = id[54];
1970 dev->heads = id[55];
1971 dev->sectors = id[56];
8bf62ece
AL
1972 }
1973
1974 /* print device info to dmesg */
3f64f565 1975 if (ata_msg_drv(ap) && print_info) {
88574551 1976 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1977 "%s: %s, %s, max %s\n",
1978 revbuf, modelbuf, fwrevbuf,
1979 ata_mode_string(xfer_mask));
a84471fe 1980 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1981 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1982 (unsigned long long)dev->n_sectors,
1983 dev->multi_count, dev->cylinders,
1984 dev->heads, dev->sectors);
1985 }
07f6f7d0
AL
1986 }
1987
6e7846e9 1988 dev->cdb_len = 16;
1da177e4
LT
1989 }
1990
1991 /* ATAPI-specific feature tests */
2c13b7ce 1992 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1993 char *cdb_intr_string = "";
1994
1148c3a7 1995 rc = atapi_cdb_len(id);
1da177e4 1996 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1997 if (ata_msg_warn(ap))
88574551
TH
1998 ata_dev_printk(dev, KERN_WARNING,
1999 "unsupported CDB len\n");
ffeae418 2000 rc = -EINVAL;
1da177e4
LT
2001 goto err_out_nosup;
2002 }
6e7846e9 2003 dev->cdb_len = (unsigned int) rc;
1da177e4 2004
08a556db 2005 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2006 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2007 cdb_intr_string = ", CDB intr";
2008 }
312f7da2 2009
1da177e4 2010 /* print device info to dmesg */
5afc8142 2011 if (ata_msg_drv(ap) && print_info)
ef143d57
AL
2012 ata_dev_printk(dev, KERN_INFO,
2013 "ATAPI: %s, %s, max %s%s\n",
2014 modelbuf, fwrevbuf,
12436c30
TH
2015 ata_mode_string(xfer_mask),
2016 cdb_intr_string);
1da177e4
LT
2017 }
2018
914ed354
TH
2019 /* determine max_sectors */
2020 dev->max_sectors = ATA_MAX_SECTORS;
2021 if (dev->flags & ATA_DFLAG_LBA48)
2022 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2023
93590859
AC
2024 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2025 /* Let the user know. We don't want to disallow opens for
2026 rescue purposes, or in case the vendor is just a blithering
2027 idiot */
2028 if (print_info) {
2029 ata_dev_printk(dev, KERN_WARNING,
2030"Drive reports diagnostics failure. This may indicate a drive\n");
2031 ata_dev_printk(dev, KERN_WARNING,
2032"fault or invalid emulation. Contact drive vendor for information.\n");
2033 }
2034 }
2035
4b2f3ede 2036 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2037 if (ata_dev_knobble(dev)) {
5afc8142 2038 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2039 ata_dev_printk(dev, KERN_INFO,
2040 "applying bridge limits\n");
5a529139 2041 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2042 dev->max_sectors = ATA_MAX_SECTORS;
2043 }
2044
18d6e9d5 2045 if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2046 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2047 dev->max_sectors);
18d6e9d5 2048
6f23a31d
AL
2049 /* limit ATAPI DMA to R/W commands only */
2050 if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
2051 dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
2052
4b2f3ede 2053 if (ap->ops->dev_config)
cd0d3bbc 2054 ap->ops->dev_config(dev);
4b2f3ede 2055
0dd4b21f
BP
2056 if (ata_msg_probe(ap))
2057 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2058 __FUNCTION__, ata_chk_status(ap));
ffeae418 2059 return 0;
1da177e4
LT
2060
2061err_out_nosup:
0dd4b21f 2062 if (ata_msg_probe(ap))
88574551
TH
2063 ata_dev_printk(dev, KERN_DEBUG,
2064 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2065 return rc;
1da177e4
LT
2066}
2067
be0d18df 2068/**
2e41e8e6 2069 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2070 * @ap: port
2071 *
2e41e8e6 2072 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2073 * detection.
2074 */
2075
2076int ata_cable_40wire(struct ata_port *ap)
2077{
2078 return ATA_CBL_PATA40;
2079}
2080
2081/**
2e41e8e6 2082 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2083 * @ap: port
2084 *
2e41e8e6 2085 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2086 * detection.
2087 */
2088
2089int ata_cable_80wire(struct ata_port *ap)
2090{
2091 return ATA_CBL_PATA80;
2092}
2093
2094/**
2095 * ata_cable_unknown - return unknown PATA cable.
2096 * @ap: port
2097 *
2098 * Helper method for drivers which have no PATA cable detection.
2099 */
2100
2101int ata_cable_unknown(struct ata_port *ap)
2102{
2103 return ATA_CBL_PATA_UNK;
2104}
2105
2106/**
2107 * ata_cable_sata - return SATA cable type
2108 * @ap: port
2109 *
2110 * Helper method for drivers which have SATA cables
2111 */
2112
2113int ata_cable_sata(struct ata_port *ap)
2114{
2115 return ATA_CBL_SATA;
2116}
2117
1da177e4
LT
2118/**
2119 * ata_bus_probe - Reset and probe ATA bus
2120 * @ap: Bus to probe
2121 *
0cba632b
JG
2122 * Master ATA bus probing function. Initiates a hardware-dependent
2123 * bus reset, then attempts to identify any devices found on
2124 * the bus.
2125 *
1da177e4 2126 * LOCKING:
0cba632b 2127 * PCI/etc. bus probe sem.
1da177e4
LT
2128 *
2129 * RETURNS:
96072e69 2130 * Zero on success, negative errno otherwise.
1da177e4
LT
2131 */
2132
80289167 2133int ata_bus_probe(struct ata_port *ap)
1da177e4 2134{
28ca5c57 2135 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2136 int tries[ATA_MAX_DEVICES];
4ae72a1e 2137 int i, rc;
e82cbdb9 2138 struct ata_device *dev;
1da177e4 2139
28ca5c57 2140 ata_port_probe(ap);
c19ba8af 2141
14d2bac1
TH
2142 for (i = 0; i < ATA_MAX_DEVICES; i++)
2143 tries[i] = ATA_PROBE_MAX_TRIES;
2144
2145 retry:
2044470c 2146 /* reset and determine device classes */
52783c5d 2147 ap->ops->phy_reset(ap);
2061a47a 2148
52783c5d
TH
2149 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2150 dev = &ap->device[i];
c19ba8af 2151
52783c5d
TH
2152 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2153 dev->class != ATA_DEV_UNKNOWN)
2154 classes[dev->devno] = dev->class;
2155 else
2156 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2157
52783c5d 2158 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2159 }
1da177e4 2160
52783c5d 2161 ata_port_probe(ap);
2044470c 2162
b6079ca4
AC
2163 /* after the reset the device state is PIO 0 and the controller
2164 state is undefined. Record the mode */
2165
2166 for (i = 0; i < ATA_MAX_DEVICES; i++)
2167 ap->device[i].pio_mode = XFER_PIO_0;
2168
f31f0cc2
JG
2169 /* read IDENTIFY page and configure devices. We have to do the identify
2170 specific sequence bass-ackwards so that PDIAG- is released by
2171 the slave device */
2172
2173 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
e82cbdb9 2174 dev = &ap->device[i];
28ca5c57 2175
ec573755
TH
2176 if (tries[i])
2177 dev->class = classes[i];
ffeae418 2178
14d2bac1 2179 if (!ata_dev_enabled(dev))
ffeae418 2180 continue;
ffeae418 2181
bff04647
TH
2182 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2183 dev->id);
14d2bac1
TH
2184 if (rc)
2185 goto fail;
f31f0cc2
JG
2186 }
2187
be0d18df
AC
2188 /* Now ask for the cable type as PDIAG- should have been released */
2189 if (ap->ops->cable_detect)
2190 ap->cbl = ap->ops->cable_detect(ap);
2191
f31f0cc2
JG
2192 /* After the identify sequence we can now set up the devices. We do
2193 this in the normal order so that the user doesn't get confused */
2194
2195 for(i = 0; i < ATA_MAX_DEVICES; i++) {
2196 dev = &ap->device[i];
2197 if (!ata_dev_enabled(dev))
2198 continue;
14d2bac1 2199
efdaedc4
TH
2200 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
2201 rc = ata_dev_configure(dev);
2202 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2203 if (rc)
2204 goto fail;
1da177e4
LT
2205 }
2206
e82cbdb9 2207 /* configure transfer mode */
3adcebb2 2208 rc = ata_set_mode(ap, &dev);
4ae72a1e 2209 if (rc)
51713d35 2210 goto fail;
1da177e4 2211
e82cbdb9
TH
2212 for (i = 0; i < ATA_MAX_DEVICES; i++)
2213 if (ata_dev_enabled(&ap->device[i]))
2214 return 0;
1da177e4 2215
e82cbdb9
TH
2216 /* no device present, disable port */
2217 ata_port_disable(ap);
1da177e4 2218 ap->ops->port_disable(ap);
96072e69 2219 return -ENODEV;
14d2bac1
TH
2220
2221 fail:
4ae72a1e
TH
2222 tries[dev->devno]--;
2223
14d2bac1
TH
2224 switch (rc) {
2225 case -EINVAL:
4ae72a1e 2226 /* eeek, something went very wrong, give up */
14d2bac1
TH
2227 tries[dev->devno] = 0;
2228 break;
4ae72a1e
TH
2229
2230 case -ENODEV:
2231 /* give it just one more chance */
2232 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2233 case -EIO:
4ae72a1e
TH
2234 if (tries[dev->devno] == 1) {
2235 /* This is the last chance, better to slow
2236 * down than lose it.
2237 */
2238 sata_down_spd_limit(ap);
2239 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2240 }
14d2bac1
TH
2241 }
2242
4ae72a1e 2243 if (!tries[dev->devno])
3373efd8 2244 ata_dev_disable(dev);
ec573755 2245
14d2bac1 2246 goto retry;
1da177e4
LT
2247}
2248
2249/**
0cba632b
JG
2250 * ata_port_probe - Mark port as enabled
2251 * @ap: Port for which we indicate enablement
1da177e4 2252 *
0cba632b
JG
2253 * Modify @ap data structure such that the system
2254 * thinks that the entire port is enabled.
2255 *
cca3974e 2256 * LOCKING: host lock, or some other form of
0cba632b 2257 * serialization.
1da177e4
LT
2258 */
2259
2260void ata_port_probe(struct ata_port *ap)
2261{
198e0fed 2262 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2263}
2264
3be680b7
TH
2265/**
2266 * sata_print_link_status - Print SATA link status
2267 * @ap: SATA port to printk link status about
2268 *
2269 * This function prints link speed and status of a SATA link.
2270 *
2271 * LOCKING:
2272 * None.
2273 */
43727fbc 2274void sata_print_link_status(struct ata_port *ap)
3be680b7 2275{
6d5f9732 2276 u32 sstatus, scontrol, tmp;
3be680b7 2277
81952c54 2278 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 2279 return;
81952c54 2280 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 2281
81952c54 2282 if (ata_port_online(ap)) {
3be680b7 2283 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
2284 ata_port_printk(ap, KERN_INFO,
2285 "SATA link up %s (SStatus %X SControl %X)\n",
2286 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2287 } else {
f15a1daf
TH
2288 ata_port_printk(ap, KERN_INFO,
2289 "SATA link down (SStatus %X SControl %X)\n",
2290 sstatus, scontrol);
3be680b7
TH
2291 }
2292}
2293
1da177e4 2294/**
780a87f7
JG
2295 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2296 * @ap: SATA port associated with target SATA PHY.
1da177e4 2297 *
780a87f7
JG
2298 * This function issues commands to standard SATA Sxxx
2299 * PHY registers, to wake up the phy (and device), and
2300 * clear any reset condition.
1da177e4
LT
2301 *
2302 * LOCKING:
0cba632b 2303 * PCI/etc. bus probe sem.
1da177e4
LT
2304 *
2305 */
2306void __sata_phy_reset(struct ata_port *ap)
2307{
2308 u32 sstatus;
2309 unsigned long timeout = jiffies + (HZ * 5);
2310
2311 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2312 /* issue phy wake/reset */
81952c54 2313 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
2314 /* Couldn't find anything in SATA I/II specs, but
2315 * AHCI-1.1 10.4.2 says at least 1 ms. */
2316 mdelay(1);
1da177e4 2317 }
81952c54
TH
2318 /* phy wake/clear reset */
2319 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
2320
2321 /* wait for phy to become ready, if necessary */
2322 do {
2323 msleep(200);
81952c54 2324 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
2325 if ((sstatus & 0xf) != 1)
2326 break;
2327 } while (time_before(jiffies, timeout));
2328
3be680b7
TH
2329 /* print link status */
2330 sata_print_link_status(ap);
656563e3 2331
3be680b7 2332 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2333 if (!ata_port_offline(ap))
1da177e4 2334 ata_port_probe(ap);
3be680b7 2335 else
1da177e4 2336 ata_port_disable(ap);
1da177e4 2337
198e0fed 2338 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2339 return;
2340
2341 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2342 ata_port_disable(ap);
2343 return;
2344 }
2345
2346 ap->cbl = ATA_CBL_SATA;
2347}
2348
2349/**
780a87f7
JG
2350 * sata_phy_reset - Reset SATA bus.
2351 * @ap: SATA port associated with target SATA PHY.
1da177e4 2352 *
780a87f7
JG
2353 * This function resets the SATA bus, and then probes
2354 * the bus for devices.
1da177e4
LT
2355 *
2356 * LOCKING:
0cba632b 2357 * PCI/etc. bus probe sem.
1da177e4
LT
2358 *
2359 */
2360void sata_phy_reset(struct ata_port *ap)
2361{
2362 __sata_phy_reset(ap);
198e0fed 2363 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2364 return;
2365 ata_bus_reset(ap);
2366}
2367
ebdfca6e
AC
2368/**
2369 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2370 * @adev: device
2371 *
2372 * Obtain the other device on the same cable, or if none is
2373 * present NULL is returned
2374 */
2e9edbf8 2375
3373efd8 2376struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2377{
3373efd8 2378 struct ata_port *ap = adev->ap;
ebdfca6e 2379 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2380 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2381 return NULL;
2382 return pair;
2383}
2384
1da177e4 2385/**
780a87f7
JG
2386 * ata_port_disable - Disable port.
2387 * @ap: Port to be disabled.
1da177e4 2388 *
780a87f7
JG
2389 * Modify @ap data structure such that the system
2390 * thinks that the entire port is disabled, and should
2391 * never attempt to probe or communicate with devices
2392 * on this port.
2393 *
cca3974e 2394 * LOCKING: host lock, or some other form of
780a87f7 2395 * serialization.
1da177e4
LT
2396 */
2397
2398void ata_port_disable(struct ata_port *ap)
2399{
2400 ap->device[0].class = ATA_DEV_NONE;
2401 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2402 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2403}
2404
1c3fae4d 2405/**
3c567b7d 2406 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2407 * @ap: Port to adjust SATA spd limit for
2408 *
2409 * Adjust SATA spd limit of @ap downward. Note that this
2410 * function only adjusts the limit. The change must be applied
3c567b7d 2411 * using sata_set_spd().
1c3fae4d
TH
2412 *
2413 * LOCKING:
2414 * Inherited from caller.
2415 *
2416 * RETURNS:
2417 * 0 on success, negative errno on failure
2418 */
3c567b7d 2419int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2420{
81952c54
TH
2421 u32 sstatus, spd, mask;
2422 int rc, highbit;
1c3fae4d 2423
81952c54
TH
2424 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2425 if (rc)
2426 return rc;
1c3fae4d
TH
2427
2428 mask = ap->sata_spd_limit;
2429 if (mask <= 1)
2430 return -EINVAL;
2431 highbit = fls(mask) - 1;
2432 mask &= ~(1 << highbit);
2433
81952c54 2434 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2435 if (spd <= 1)
2436 return -EINVAL;
2437 spd--;
2438 mask &= (1 << spd) - 1;
2439 if (!mask)
2440 return -EINVAL;
2441
2442 ap->sata_spd_limit = mask;
2443
f15a1daf
TH
2444 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2445 sata_spd_string(fls(mask)));
1c3fae4d
TH
2446
2447 return 0;
2448}
2449
3c567b7d 2450static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2451{
2452 u32 spd, limit;
2453
2454 if (ap->sata_spd_limit == UINT_MAX)
2455 limit = 0;
2456 else
2457 limit = fls(ap->sata_spd_limit);
2458
2459 spd = (*scontrol >> 4) & 0xf;
2460 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2461
2462 return spd != limit;
2463}
2464
2465/**
3c567b7d 2466 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2467 * @ap: Port in question
2468 *
2469 * Test whether the spd limit in SControl matches
2470 * @ap->sata_spd_limit. This function is used to determine
2471 * whether hardreset is necessary to apply SATA spd
2472 * configuration.
2473 *
2474 * LOCKING:
2475 * Inherited from caller.
2476 *
2477 * RETURNS:
2478 * 1 if SATA spd configuration is needed, 0 otherwise.
2479 */
3c567b7d 2480int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2481{
2482 u32 scontrol;
2483
81952c54 2484 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2485 return 0;
2486
3c567b7d 2487 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2488}
2489
2490/**
3c567b7d 2491 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2492 * @ap: Port to set SATA spd for
2493 *
2494 * Set SATA spd of @ap according to sata_spd_limit.
2495 *
2496 * LOCKING:
2497 * Inherited from caller.
2498 *
2499 * RETURNS:
2500 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2501 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2502 */
3c567b7d 2503int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2504{
2505 u32 scontrol;
81952c54 2506 int rc;
1c3fae4d 2507
81952c54
TH
2508 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2509 return rc;
1c3fae4d 2510
3c567b7d 2511 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2512 return 0;
2513
81952c54
TH
2514 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2515 return rc;
2516
1c3fae4d
TH
2517 return 1;
2518}
2519
452503f9
AC
2520/*
2521 * This mode timing computation functionality is ported over from
2522 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2523 */
2524/*
b352e57d 2525 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2526 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2527 * for UDMA6, which is currently supported only by Maxtor drives.
2528 *
2529 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2530 */
2531
2532static const struct ata_timing ata_timing[] = {
2533
2534 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2535 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2536 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2537 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2538
b352e57d
AC
2539 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2540 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2541 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2542 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2543 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2544
2545/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2546
452503f9
AC
2547 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2548 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2549 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2550
452503f9
AC
2551 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2552 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2553 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2554
b352e57d
AC
2555 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2556 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2557 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2558 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2559
2560 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2561 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2562 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2563
2564/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2565
2566 { 0xFF }
2567};
2568
2569#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2570#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2571
2572static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2573{
2574 q->setup = EZ(t->setup * 1000, T);
2575 q->act8b = EZ(t->act8b * 1000, T);
2576 q->rec8b = EZ(t->rec8b * 1000, T);
2577 q->cyc8b = EZ(t->cyc8b * 1000, T);
2578 q->active = EZ(t->active * 1000, T);
2579 q->recover = EZ(t->recover * 1000, T);
2580 q->cycle = EZ(t->cycle * 1000, T);
2581 q->udma = EZ(t->udma * 1000, UT);
2582}
2583
2584void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2585 struct ata_timing *m, unsigned int what)
2586{
2587 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2588 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2589 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2590 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2591 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2592 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2593 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2594 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2595}
2596
2597static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2598{
2599 const struct ata_timing *t;
2600
2601 for (t = ata_timing; t->mode != speed; t++)
91190758 2602 if (t->mode == 0xFF)
452503f9 2603 return NULL;
2e9edbf8 2604 return t;
452503f9
AC
2605}
2606
2607int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2608 struct ata_timing *t, int T, int UT)
2609{
2610 const struct ata_timing *s;
2611 struct ata_timing p;
2612
2613 /*
2e9edbf8 2614 * Find the mode.
75b1f2f8 2615 */
452503f9
AC
2616
2617 if (!(s = ata_timing_find_mode(speed)))
2618 return -EINVAL;
2619
75b1f2f8
AL
2620 memcpy(t, s, sizeof(*s));
2621
452503f9
AC
2622 /*
2623 * If the drive is an EIDE drive, it can tell us it needs extended
2624 * PIO/MW_DMA cycle timing.
2625 */
2626
2627 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2628 memset(&p, 0, sizeof(p));
2629 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2630 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2631 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2632 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2633 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2634 }
2635 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2636 }
2637
2638 /*
2639 * Convert the timing to bus clock counts.
2640 */
2641
75b1f2f8 2642 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2643
2644 /*
c893a3ae
RD
2645 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2646 * S.M.A.R.T * and some other commands. We have to ensure that the
2647 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2648 */
2649
fd3367af 2650 if (speed > XFER_PIO_6) {
452503f9
AC
2651 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2652 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2653 }
2654
2655 /*
c893a3ae 2656 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2657 */
2658
2659 if (t->act8b + t->rec8b < t->cyc8b) {
2660 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2661 t->rec8b = t->cyc8b - t->act8b;
2662 }
2663
2664 if (t->active + t->recover < t->cycle) {
2665 t->active += (t->cycle - (t->active + t->recover)) / 2;
2666 t->recover = t->cycle - t->active;
2667 }
a617c09f 2668
4f701d1e
AC
2669 /* In a few cases quantisation may produce enough errors to
2670 leave t->cycle too low for the sum of active and recovery
2671 if so we must correct this */
2672 if (t->active + t->recover > t->cycle)
2673 t->cycle = t->active + t->recover;
452503f9
AC
2674
2675 return 0;
2676}
2677
cf176e1a
TH
2678/**
2679 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2680 * @dev: Device to adjust xfer masks
458337db 2681 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2682 *
2683 * Adjust xfer masks of @dev downward. Note that this function
2684 * does not apply the change. Invoking ata_set_mode() afterwards
2685 * will apply the limit.
2686 *
2687 * LOCKING:
2688 * Inherited from caller.
2689 *
2690 * RETURNS:
2691 * 0 on success, negative errno on failure
2692 */
458337db 2693int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2694{
458337db
TH
2695 char buf[32];
2696 unsigned int orig_mask, xfer_mask;
2697 unsigned int pio_mask, mwdma_mask, udma_mask;
2698 int quiet, highbit;
cf176e1a 2699
458337db
TH
2700 quiet = !!(sel & ATA_DNXFER_QUIET);
2701 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2702
458337db
TH
2703 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2704 dev->mwdma_mask,
2705 dev->udma_mask);
2706 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2707
458337db
TH
2708 switch (sel) {
2709 case ATA_DNXFER_PIO:
2710 highbit = fls(pio_mask) - 1;
2711 pio_mask &= ~(1 << highbit);
2712 break;
2713
2714 case ATA_DNXFER_DMA:
2715 if (udma_mask) {
2716 highbit = fls(udma_mask) - 1;
2717 udma_mask &= ~(1 << highbit);
2718 if (!udma_mask)
2719 return -ENOENT;
2720 } else if (mwdma_mask) {
2721 highbit = fls(mwdma_mask) - 1;
2722 mwdma_mask &= ~(1 << highbit);
2723 if (!mwdma_mask)
2724 return -ENOENT;
2725 }
2726 break;
2727
2728 case ATA_DNXFER_40C:
2729 udma_mask &= ATA_UDMA_MASK_40C;
2730 break;
2731
2732 case ATA_DNXFER_FORCE_PIO0:
2733 pio_mask &= 1;
2734 case ATA_DNXFER_FORCE_PIO:
2735 mwdma_mask = 0;
2736 udma_mask = 0;
2737 break;
2738
458337db
TH
2739 default:
2740 BUG();
2741 }
2742
2743 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2744
2745 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2746 return -ENOENT;
2747
2748 if (!quiet) {
2749 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2750 snprintf(buf, sizeof(buf), "%s:%s",
2751 ata_mode_string(xfer_mask),
2752 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2753 else
2754 snprintf(buf, sizeof(buf), "%s",
2755 ata_mode_string(xfer_mask));
2756
2757 ata_dev_printk(dev, KERN_WARNING,
2758 "limiting speed to %s\n", buf);
2759 }
cf176e1a
TH
2760
2761 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2762 &dev->udma_mask);
2763
cf176e1a 2764 return 0;
cf176e1a
TH
2765}
2766
3373efd8 2767static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2768{
baa1e78a 2769 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2770 unsigned int err_mask;
2771 int rc;
1da177e4 2772
e8384607 2773 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2774 if (dev->xfer_shift == ATA_SHIFT_PIO)
2775 dev->flags |= ATA_DFLAG_PIO;
2776
3373efd8 2777 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2778 /* Old CFA may refuse this command, which is just fine */
2779 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2780 err_mask &= ~AC_ERR_DEV;
2781
83206a29 2782 if (err_mask) {
f15a1daf
TH
2783 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2784 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2785 return -EIO;
2786 }
1da177e4 2787
baa1e78a 2788 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2789 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2790 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2791 if (rc)
83206a29 2792 return rc;
48a8a14f 2793
23e71c3d
TH
2794 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2795 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2796
f15a1daf
TH
2797 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2798 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2799 return 0;
1da177e4
LT
2800}
2801
1da177e4 2802/**
04351821 2803 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
1da177e4 2804 * @ap: port on which timings will be programmed
e82cbdb9 2805 * @r_failed_dev: out paramter for failed device
1da177e4 2806 *
04351821
A
2807 * Standard implementation of the function used to tune and set
2808 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2809 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2810 * returned in @r_failed_dev.
780a87f7 2811 *
1da177e4 2812 * LOCKING:
0cba632b 2813 * PCI/etc. bus probe sem.
e82cbdb9
TH
2814 *
2815 * RETURNS:
2816 * 0 on success, negative errno otherwise
1da177e4 2817 */
04351821
A
2818
2819int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2820{
e8e0619f 2821 struct ata_device *dev;
e82cbdb9 2822 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2823
3adcebb2 2824
a6d5a51c
TH
2825 /* step 1: calculate xfer_mask */
2826 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2827 unsigned int pio_mask, dma_mask;
a6d5a51c 2828
e8e0619f
TH
2829 dev = &ap->device[i];
2830
e1211e3f 2831 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2832 continue;
2833
3373efd8 2834 ata_dev_xfermask(dev);
1da177e4 2835
acf356b1
TH
2836 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2837 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2838 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2839 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2840
4f65977d 2841 found = 1;
5444a6f4
AC
2842 if (dev->dma_mode)
2843 used_dma = 1;
a6d5a51c 2844 }
4f65977d 2845 if (!found)
e82cbdb9 2846 goto out;
a6d5a51c
TH
2847
2848 /* step 2: always set host PIO timings */
e8e0619f
TH
2849 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2850 dev = &ap->device[i];
2851 if (!ata_dev_enabled(dev))
2852 continue;
2853
2854 if (!dev->pio_mode) {
f15a1daf 2855 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2856 rc = -EINVAL;
e82cbdb9 2857 goto out;
e8e0619f
TH
2858 }
2859
2860 dev->xfer_mode = dev->pio_mode;
2861 dev->xfer_shift = ATA_SHIFT_PIO;
2862 if (ap->ops->set_piomode)
2863 ap->ops->set_piomode(ap, dev);
2864 }
1da177e4 2865
a6d5a51c 2866 /* step 3: set host DMA timings */
e8e0619f
TH
2867 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2868 dev = &ap->device[i];
2869
2870 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2871 continue;
2872
2873 dev->xfer_mode = dev->dma_mode;
2874 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2875 if (ap->ops->set_dmamode)
2876 ap->ops->set_dmamode(ap, dev);
2877 }
1da177e4
LT
2878
2879 /* step 4: update devices' xfer mode */
83206a29 2880 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2881 dev = &ap->device[i];
1da177e4 2882
18d90deb 2883 /* don't update suspended devices' xfer mode */
9666f400 2884 if (!ata_dev_enabled(dev))
83206a29
TH
2885 continue;
2886
3373efd8 2887 rc = ata_dev_set_mode(dev);
5bbc53f4 2888 if (rc)
e82cbdb9 2889 goto out;
83206a29 2890 }
1da177e4 2891
e8e0619f
TH
2892 /* Record simplex status. If we selected DMA then the other
2893 * host channels are not permitted to do so.
5444a6f4 2894 */
cca3974e 2895 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2896 ap->host->simplex_claimed = ap;
5444a6f4 2897
e82cbdb9
TH
2898 out:
2899 if (rc)
2900 *r_failed_dev = dev;
2901 return rc;
1da177e4
LT
2902}
2903
04351821
A
2904/**
2905 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2906 * @ap: port on which timings will be programmed
2907 * @r_failed_dev: out paramter for failed device
2908 *
2909 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2910 * ata_set_mode() fails, pointer to the failing device is
2911 * returned in @r_failed_dev.
2912 *
2913 * LOCKING:
2914 * PCI/etc. bus probe sem.
2915 *
2916 * RETURNS:
2917 * 0 on success, negative errno otherwise
2918 */
2919int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2920{
2921 /* has private set_mode? */
2922 if (ap->ops->set_mode)
2923 return ap->ops->set_mode(ap, r_failed_dev);
2924 return ata_do_set_mode(ap, r_failed_dev);
2925}
2926
1fdffbce
JG
2927/**
2928 * ata_tf_to_host - issue ATA taskfile to host controller
2929 * @ap: port to which command is being issued
2930 * @tf: ATA taskfile register set
2931 *
2932 * Issues ATA taskfile register set to ATA host controller,
2933 * with proper synchronization with interrupt handler and
2934 * other threads.
2935 *
2936 * LOCKING:
cca3974e 2937 * spin_lock_irqsave(host lock)
1fdffbce
JG
2938 */
2939
2940static inline void ata_tf_to_host(struct ata_port *ap,
2941 const struct ata_taskfile *tf)
2942{
2943 ap->ops->tf_load(ap, tf);
2944 ap->ops->exec_command(ap, tf);
2945}
2946
1da177e4
LT
2947/**
2948 * ata_busy_sleep - sleep until BSY clears, or timeout
2949 * @ap: port containing status register to be polled
2950 * @tmout_pat: impatience timeout
2951 * @tmout: overall timeout
2952 *
780a87f7
JG
2953 * Sleep until ATA Status register bit BSY clears,
2954 * or a timeout occurs.
2955 *
d1adc1bb
TH
2956 * LOCKING:
2957 * Kernel thread context (may sleep).
2958 *
2959 * RETURNS:
2960 * 0 on success, -errno otherwise.
1da177e4 2961 */
d1adc1bb
TH
2962int ata_busy_sleep(struct ata_port *ap,
2963 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2964{
2965 unsigned long timer_start, timeout;
2966 u8 status;
2967
2968 status = ata_busy_wait(ap, ATA_BUSY, 300);
2969 timer_start = jiffies;
2970 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2971 while (status != 0xff && (status & ATA_BUSY) &&
2972 time_before(jiffies, timeout)) {
1da177e4
LT
2973 msleep(50);
2974 status = ata_busy_wait(ap, ATA_BUSY, 3);
2975 }
2976
d1adc1bb 2977 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2978 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2979 "port is slow to respond, please be patient "
2980 "(Status 0x%x)\n", status);
1da177e4
LT
2981
2982 timeout = timer_start + tmout;
d1adc1bb
TH
2983 while (status != 0xff && (status & ATA_BUSY) &&
2984 time_before(jiffies, timeout)) {
1da177e4
LT
2985 msleep(50);
2986 status = ata_chk_status(ap);
2987 }
2988
d1adc1bb
TH
2989 if (status == 0xff)
2990 return -ENODEV;
2991
1da177e4 2992 if (status & ATA_BUSY) {
f15a1daf 2993 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2994 "(%lu secs, Status 0x%x)\n",
2995 tmout / HZ, status);
d1adc1bb 2996 return -EBUSY;
1da177e4
LT
2997 }
2998
2999 return 0;
3000}
3001
d4b2bab4
TH
3002/**
3003 * ata_wait_ready - sleep until BSY clears, or timeout
3004 * @ap: port containing status register to be polled
3005 * @deadline: deadline jiffies for the operation
3006 *
3007 * Sleep until ATA Status register bit BSY clears, or timeout
3008 * occurs.
3009 *
3010 * LOCKING:
3011 * Kernel thread context (may sleep).
3012 *
3013 * RETURNS:
3014 * 0 on success, -errno otherwise.
3015 */
3016int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3017{
3018 unsigned long start = jiffies;
3019 int warned = 0;
3020
3021 while (1) {
3022 u8 status = ata_chk_status(ap);
3023 unsigned long now = jiffies;
3024
3025 if (!(status & ATA_BUSY))
3026 return 0;
fd7fe701 3027 if (!ata_port_online(ap) && status == 0xff)
d4b2bab4
TH
3028 return -ENODEV;
3029 if (time_after(now, deadline))
3030 return -EBUSY;
3031
3032 if (!warned && time_after(now, start + 5 * HZ) &&
3033 (deadline - now > 3 * HZ)) {
3034 ata_port_printk(ap, KERN_WARNING,
3035 "port is slow to respond, please be patient "
3036 "(Status 0x%x)\n", status);
3037 warned = 1;
3038 }
3039
3040 msleep(50);
3041 }
3042}
3043
3044static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3045 unsigned long deadline)
1da177e4
LT
3046{
3047 struct ata_ioports *ioaddr = &ap->ioaddr;
3048 unsigned int dev0 = devmask & (1 << 0);
3049 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3050 int rc, ret = 0;
1da177e4
LT
3051
3052 /* if device 0 was found in ata_devchk, wait for its
3053 * BSY bit to clear
3054 */
d4b2bab4
TH
3055 if (dev0) {
3056 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3057 if (rc) {
3058 if (rc != -ENODEV)
3059 return rc;
3060 ret = rc;
3061 }
d4b2bab4 3062 }
1da177e4 3063
e141d999
TH
3064 /* if device 1 was found in ata_devchk, wait for register
3065 * access briefly, then wait for BSY to clear.
1da177e4 3066 */
e141d999
TH
3067 if (dev1) {
3068 int i;
1da177e4
LT
3069
3070 ap->ops->dev_select(ap, 1);
e141d999
TH
3071
3072 /* Wait for register access. Some ATAPI devices fail
3073 * to set nsect/lbal after reset, so don't waste too
3074 * much time on it. We're gonna wait for !BSY anyway.
3075 */
3076 for (i = 0; i < 2; i++) {
3077 u8 nsect, lbal;
3078
3079 nsect = ioread8(ioaddr->nsect_addr);
3080 lbal = ioread8(ioaddr->lbal_addr);
3081 if ((nsect == 1) && (lbal == 1))
3082 break;
3083 msleep(50); /* give drive a breather */
3084 }
3085
d4b2bab4 3086 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3087 if (rc) {
3088 if (rc != -ENODEV)
3089 return rc;
3090 ret = rc;
3091 }
d4b2bab4 3092 }
1da177e4
LT
3093
3094 /* is all this really necessary? */
3095 ap->ops->dev_select(ap, 0);
3096 if (dev1)
3097 ap->ops->dev_select(ap, 1);
3098 if (dev0)
3099 ap->ops->dev_select(ap, 0);
d4b2bab4 3100
9b89391c 3101 return ret;
1da177e4
LT
3102}
3103
d4b2bab4
TH
3104static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3105 unsigned long deadline)
1da177e4
LT
3106{
3107 struct ata_ioports *ioaddr = &ap->ioaddr;
3108
44877b4e 3109 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3110
3111 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3112 iowrite8(ap->ctl, ioaddr->ctl_addr);
3113 udelay(20); /* FIXME: flush */
3114 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3115 udelay(20); /* FIXME: flush */
3116 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3117
3118 /* spec mandates ">= 2ms" before checking status.
3119 * We wait 150ms, because that was the magic delay used for
3120 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3121 * between when the ATA command register is written, and then
3122 * status is checked. Because waiting for "a while" before
3123 * checking status is fine, post SRST, we perform this magic
3124 * delay here as well.
09c7ad79
AC
3125 *
3126 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3127 */
3128 msleep(150);
3129
2e9edbf8 3130 /* Before we perform post reset processing we want to see if
298a41ca
TH
3131 * the bus shows 0xFF because the odd clown forgets the D7
3132 * pulldown resistor.
3133 */
d1adc1bb 3134 if (ata_check_status(ap) == 0xFF)
9b89391c 3135 return -ENODEV;
09c7ad79 3136
d4b2bab4 3137 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3138}
3139
3140/**
3141 * ata_bus_reset - reset host port and associated ATA channel
3142 * @ap: port to reset
3143 *
3144 * This is typically the first time we actually start issuing
3145 * commands to the ATA channel. We wait for BSY to clear, then
3146 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3147 * result. Determine what devices, if any, are on the channel
3148 * by looking at the device 0/1 error register. Look at the signature
3149 * stored in each device's taskfile registers, to determine if
3150 * the device is ATA or ATAPI.
3151 *
3152 * LOCKING:
0cba632b 3153 * PCI/etc. bus probe sem.
cca3974e 3154 * Obtains host lock.
1da177e4
LT
3155 *
3156 * SIDE EFFECTS:
198e0fed 3157 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3158 */
3159
3160void ata_bus_reset(struct ata_port *ap)
3161{
3162 struct ata_ioports *ioaddr = &ap->ioaddr;
3163 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3164 u8 err;
aec5c3c1 3165 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3166 int rc;
1da177e4 3167
44877b4e 3168 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3169
3170 /* determine if device 0/1 are present */
3171 if (ap->flags & ATA_FLAG_SATA_RESET)
3172 dev0 = 1;
3173 else {
3174 dev0 = ata_devchk(ap, 0);
3175 if (slave_possible)
3176 dev1 = ata_devchk(ap, 1);
3177 }
3178
3179 if (dev0)
3180 devmask |= (1 << 0);
3181 if (dev1)
3182 devmask |= (1 << 1);
3183
3184 /* select device 0 again */
3185 ap->ops->dev_select(ap, 0);
3186
3187 /* issue bus reset */
9b89391c
TH
3188 if (ap->flags & ATA_FLAG_SRST) {
3189 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3190 if (rc && rc != -ENODEV)
aec5c3c1 3191 goto err_out;
9b89391c 3192 }
1da177e4
LT
3193
3194 /*
3195 * determine by signature whether we have ATA or ATAPI devices
3196 */
b4dc7623 3197 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 3198 if ((slave_possible) && (err != 0x81))
b4dc7623 3199 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
3200
3201 /* re-enable interrupts */
83625006 3202 ap->ops->irq_on(ap);
1da177e4
LT
3203
3204 /* is double-select really necessary? */
3205 if (ap->device[1].class != ATA_DEV_NONE)
3206 ap->ops->dev_select(ap, 1);
3207 if (ap->device[0].class != ATA_DEV_NONE)
3208 ap->ops->dev_select(ap, 0);
3209
3210 /* if no devices were detected, disable this port */
3211 if ((ap->device[0].class == ATA_DEV_NONE) &&
3212 (ap->device[1].class == ATA_DEV_NONE))
3213 goto err_out;
3214
3215 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3216 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3217 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3218 }
3219
3220 DPRINTK("EXIT\n");
3221 return;
3222
3223err_out:
f15a1daf 3224 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
3225 ap->ops->port_disable(ap);
3226
3227 DPRINTK("EXIT\n");
3228}
3229
d7bb4cc7
TH
3230/**
3231 * sata_phy_debounce - debounce SATA phy status
3232 * @ap: ATA port to debounce SATA phy status for
3233 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3234 * @deadline: deadline jiffies for the operation
d7bb4cc7
TH
3235 *
3236 * Make sure SStatus of @ap reaches stable state, determined by
3237 * holding the same value where DET is not 1 for @duration polled
3238 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3239 * beginning of the stable state. Because DET gets stuck at 1 on
3240 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3241 * until timeout then returns 0 if DET is stable at 1.
3242 *
d4b2bab4
TH
3243 * @timeout is further limited by @deadline. The sooner of the
3244 * two is used.
3245 *
d7bb4cc7
TH
3246 * LOCKING:
3247 * Kernel thread context (may sleep)
3248 *
3249 * RETURNS:
3250 * 0 on success, -errno on failure.
3251 */
d4b2bab4
TH
3252int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3253 unsigned long deadline)
7a7921e8 3254{
d7bb4cc7 3255 unsigned long interval_msec = params[0];
d4b2bab4
TH
3256 unsigned long duration = msecs_to_jiffies(params[1]);
3257 unsigned long last_jiffies, t;
d7bb4cc7
TH
3258 u32 last, cur;
3259 int rc;
3260
d4b2bab4
TH
3261 t = jiffies + msecs_to_jiffies(params[2]);
3262 if (time_before(t, deadline))
3263 deadline = t;
3264
d7bb4cc7
TH
3265 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3266 return rc;
3267 cur &= 0xf;
3268
3269 last = cur;
3270 last_jiffies = jiffies;
3271
3272 while (1) {
3273 msleep(interval_msec);
3274 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3275 return rc;
3276 cur &= 0xf;
3277
3278 /* DET stable? */
3279 if (cur == last) {
d4b2bab4 3280 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3281 continue;
3282 if (time_after(jiffies, last_jiffies + duration))
3283 return 0;
3284 continue;
3285 }
3286
3287 /* unstable, start over */
3288 last = cur;
3289 last_jiffies = jiffies;
3290
d4b2bab4
TH
3291 /* check deadline */
3292 if (time_after(jiffies, deadline))
d7bb4cc7
TH
3293 return -EBUSY;
3294 }
3295}
3296
3297/**
3298 * sata_phy_resume - resume SATA phy
3299 * @ap: ATA port to resume SATA phy for
3300 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3301 * @deadline: deadline jiffies for the operation
d7bb4cc7
TH
3302 *
3303 * Resume SATA phy of @ap and debounce it.
3304 *
3305 * LOCKING:
3306 * Kernel thread context (may sleep)
3307 *
3308 * RETURNS:
3309 * 0 on success, -errno on failure.
3310 */
d4b2bab4
TH
3311int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3312 unsigned long deadline)
d7bb4cc7
TH
3313{
3314 u32 scontrol;
81952c54
TH
3315 int rc;
3316
3317 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3318 return rc;
7a7921e8 3319
852ee16a 3320 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
3321
3322 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3323 return rc;
7a7921e8 3324
d7bb4cc7
TH
3325 /* Some PHYs react badly if SStatus is pounded immediately
3326 * after resuming. Delay 200ms before debouncing.
3327 */
3328 msleep(200);
7a7921e8 3329
d4b2bab4 3330 return sata_phy_debounce(ap, params, deadline);
7a7921e8
TH
3331}
3332
f5914a46
TH
3333/**
3334 * ata_std_prereset - prepare for reset
3335 * @ap: ATA port to be reset
d4b2bab4 3336 * @deadline: deadline jiffies for the operation
f5914a46 3337 *
b8cffc6a
TH
3338 * @ap is about to be reset. Initialize it. Failure from
3339 * prereset makes libata abort whole reset sequence and give up
3340 * that port, so prereset should be best-effort. It does its
3341 * best to prepare for reset sequence but if things go wrong, it
3342 * should just whine, not fail.
f5914a46
TH
3343 *
3344 * LOCKING:
3345 * Kernel thread context (may sleep)
3346 *
3347 * RETURNS:
3348 * 0 on success, -errno otherwise.
3349 */
d4b2bab4 3350int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
f5914a46
TH
3351{
3352 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 3353 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3354 int rc;
3355
31daabda 3356 /* handle link resume */
28324304
TH
3357 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3358 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3359 ehc->i.action |= ATA_EH_HARDRESET;
3360
f5914a46
TH
3361 /* if we're about to do hardreset, nothing more to do */
3362 if (ehc->i.action & ATA_EH_HARDRESET)
3363 return 0;
3364
3365 /* if SATA, resume phy */
3366 if (ap->cbl == ATA_CBL_SATA) {
d4b2bab4 3367 rc = sata_phy_resume(ap, timing, deadline);
b8cffc6a
TH
3368 /* whine about phy resume failure but proceed */
3369 if (rc && rc != -EOPNOTSUPP)
f5914a46
TH
3370 ata_port_printk(ap, KERN_WARNING, "failed to resume "
3371 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3372 }
3373
3374 /* Wait for !BSY if the controller can wait for the first D2H
3375 * Reg FIS and we don't know that no device is attached.
3376 */
b8cffc6a
TH
3377 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
3378 rc = ata_wait_ready(ap, deadline);
6dffaf61 3379 if (rc && rc != -ENODEV) {
b8cffc6a
TH
3380 ata_port_printk(ap, KERN_WARNING, "device not ready "
3381 "(errno=%d), forcing hardreset\n", rc);
3382 ehc->i.action |= ATA_EH_HARDRESET;
3383 }
3384 }
f5914a46
TH
3385
3386 return 0;
3387}
3388
c2bd5804
TH
3389/**
3390 * ata_std_softreset - reset host port via ATA SRST
3391 * @ap: port to reset
c2bd5804 3392 * @classes: resulting classes of attached devices
d4b2bab4 3393 * @deadline: deadline jiffies for the operation
c2bd5804 3394 *
52783c5d 3395 * Reset host port using ATA SRST.
c2bd5804
TH
3396 *
3397 * LOCKING:
3398 * Kernel thread context (may sleep)
3399 *
3400 * RETURNS:
3401 * 0 on success, -errno otherwise.
3402 */
d4b2bab4
TH
3403int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3404 unsigned long deadline)
c2bd5804
TH
3405{
3406 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3407 unsigned int devmask = 0;
3408 int rc;
c2bd5804
TH
3409 u8 err;
3410
3411 DPRINTK("ENTER\n");
3412
81952c54 3413 if (ata_port_offline(ap)) {
3a39746a
TH
3414 classes[0] = ATA_DEV_NONE;
3415 goto out;
3416 }
3417
c2bd5804
TH
3418 /* determine if device 0/1 are present */
3419 if (ata_devchk(ap, 0))
3420 devmask |= (1 << 0);
3421 if (slave_possible && ata_devchk(ap, 1))
3422 devmask |= (1 << 1);
3423
c2bd5804
TH
3424 /* select device 0 again */
3425 ap->ops->dev_select(ap, 0);
3426
3427 /* issue bus reset */
3428 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3429 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c
TH
3430 /* if link is occupied, -ENODEV too is an error */
3431 if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
d4b2bab4
TH
3432 ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3433 return rc;
c2bd5804
TH
3434 }
3435
3436 /* determine by signature whether we have ATA or ATAPI devices */
3437 classes[0] = ata_dev_try_classify(ap, 0, &err);
3438 if (slave_possible && err != 0x81)
3439 classes[1] = ata_dev_try_classify(ap, 1, &err);
3440
3a39746a 3441 out:
c2bd5804
TH
3442 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3443 return 0;
3444}
3445
3446/**
b6103f6d 3447 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3448 * @ap: port to reset
b6103f6d 3449 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3450 * @deadline: deadline jiffies for the operation
c2bd5804
TH
3451 *
3452 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3453 *
3454 * LOCKING:
3455 * Kernel thread context (may sleep)
3456 *
3457 * RETURNS:
3458 * 0 on success, -errno otherwise.
3459 */
d4b2bab4
TH
3460int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3461 unsigned long deadline)
c2bd5804 3462{
852ee16a 3463 u32 scontrol;
81952c54 3464 int rc;
852ee16a 3465
c2bd5804
TH
3466 DPRINTK("ENTER\n");
3467
3c567b7d 3468 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3469 /* SATA spec says nothing about how to reconfigure
3470 * spd. To be on the safe side, turn off phy during
3471 * reconfiguration. This works for at least ICH7 AHCI
3472 * and Sil3124.
3473 */
81952c54 3474 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3475 goto out;
81952c54 3476
a34b6fc0 3477 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3478
3479 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3480 goto out;
1c3fae4d 3481
3c567b7d 3482 sata_set_spd(ap);
1c3fae4d
TH
3483 }
3484
3485 /* issue phy wake/reset */
81952c54 3486 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3487 goto out;
81952c54 3488
852ee16a 3489 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3490
3491 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3492 goto out;
c2bd5804 3493
1c3fae4d 3494 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3495 * 10.4.2 says at least 1 ms.
3496 */
3497 msleep(1);
3498
1c3fae4d 3499 /* bring phy back */
d4b2bab4 3500 rc = sata_phy_resume(ap, timing, deadline);
b6103f6d
TH
3501 out:
3502 DPRINTK("EXIT, rc=%d\n", rc);
3503 return rc;
3504}
3505
3506/**
3507 * sata_std_hardreset - reset host port via SATA phy reset
3508 * @ap: port to reset
3509 * @class: resulting class of attached device
d4b2bab4 3510 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3511 *
3512 * SATA phy-reset host port using DET bits of SControl register,
3513 * wait for !BSY and classify the attached device.
3514 *
3515 * LOCKING:
3516 * Kernel thread context (may sleep)
3517 *
3518 * RETURNS:
3519 * 0 on success, -errno otherwise.
3520 */
d4b2bab4
TH
3521int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3522 unsigned long deadline)
b6103f6d
TH
3523{
3524 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3525 int rc;
3526
3527 DPRINTK("ENTER\n");
3528
3529 /* do hardreset */
d4b2bab4 3530 rc = sata_port_hardreset(ap, timing, deadline);
b6103f6d
TH
3531 if (rc) {
3532 ata_port_printk(ap, KERN_ERR,
3533 "COMRESET failed (errno=%d)\n", rc);
3534 return rc;
3535 }
c2bd5804 3536
c2bd5804 3537 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3538 if (ata_port_offline(ap)) {
c2bd5804
TH
3539 *class = ATA_DEV_NONE;
3540 DPRINTK("EXIT, link offline\n");
3541 return 0;
3542 }
3543
34fee227
TH
3544 /* wait a while before checking status, see SRST for more info */
3545 msleep(150);
3546
d4b2bab4 3547 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3548 /* link occupied, -ENODEV too is an error */
3549 if (rc) {
f15a1daf 3550 ata_port_printk(ap, KERN_ERR,
d4b2bab4
TH
3551 "COMRESET failed (errno=%d)\n", rc);
3552 return rc;
c2bd5804
TH
3553 }
3554
3a39746a
TH
3555 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3556
c2bd5804
TH
3557 *class = ata_dev_try_classify(ap, 0, NULL);
3558
3559 DPRINTK("EXIT, class=%u\n", *class);
3560 return 0;
3561}
3562
3563/**
3564 * ata_std_postreset - standard postreset callback
3565 * @ap: the target ata_port
3566 * @classes: classes of attached devices
3567 *
3568 * This function is invoked after a successful reset. Note that
3569 * the device might have been reset more than once using
3570 * different reset methods before postreset is invoked.
c2bd5804 3571 *
c2bd5804
TH
3572 * LOCKING:
3573 * Kernel thread context (may sleep)
3574 */
3575void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3576{
dc2b3515
TH
3577 u32 serror;
3578
c2bd5804
TH
3579 DPRINTK("ENTER\n");
3580
c2bd5804 3581 /* print link status */
81952c54 3582 sata_print_link_status(ap);
c2bd5804 3583
dc2b3515
TH
3584 /* clear SError */
3585 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3586 sata_scr_write(ap, SCR_ERROR, serror);
3587
3a39746a 3588 /* re-enable interrupts */
83625006
AI
3589 if (!ap->ops->error_handler)
3590 ap->ops->irq_on(ap);
c2bd5804
TH
3591
3592 /* is double-select really necessary? */
3593 if (classes[0] != ATA_DEV_NONE)
3594 ap->ops->dev_select(ap, 1);
3595 if (classes[1] != ATA_DEV_NONE)
3596 ap->ops->dev_select(ap, 0);
3597
3a39746a
TH
3598 /* bail out if no device is present */
3599 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3600 DPRINTK("EXIT, no device\n");
3601 return;
3602 }
3603
3604 /* set up device control */
0d5ff566
TH
3605 if (ap->ioaddr.ctl_addr)
3606 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3607
3608 DPRINTK("EXIT\n");
3609}
3610
623a3128
TH
3611/**
3612 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3613 * @dev: device to compare against
3614 * @new_class: class of the new device
3615 * @new_id: IDENTIFY page of the new device
3616 *
3617 * Compare @new_class and @new_id against @dev and determine
3618 * whether @dev is the device indicated by @new_class and
3619 * @new_id.
3620 *
3621 * LOCKING:
3622 * None.
3623 *
3624 * RETURNS:
3625 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3626 */
3373efd8
TH
3627static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3628 const u16 *new_id)
623a3128
TH
3629{
3630 const u16 *old_id = dev->id;
a0cf733b
TH
3631 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3632 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3633
3634 if (dev->class != new_class) {
f15a1daf
TH
3635 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3636 dev->class, new_class);
623a3128
TH
3637 return 0;
3638 }
3639
a0cf733b
TH
3640 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3641 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3642 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3643 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3644
3645 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3646 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3647 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3648 return 0;
3649 }
3650
3651 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3652 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3653 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3654 return 0;
3655 }
3656
623a3128
TH
3657 return 1;
3658}
3659
3660/**
fe30911b 3661 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3662 * @dev: target ATA device
bff04647 3663 * @readid_flags: read ID flags
623a3128
TH
3664 *
3665 * Re-read IDENTIFY page and make sure @dev is still attached to
3666 * the port.
3667 *
3668 * LOCKING:
3669 * Kernel thread context (may sleep)
3670 *
3671 * RETURNS:
3672 * 0 on success, negative errno otherwise
3673 */
fe30911b 3674int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3675{
5eb45c02 3676 unsigned int class = dev->class;
f15a1daf 3677 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3678 int rc;
3679
fe635c7e 3680 /* read ID data */
bff04647 3681 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3682 if (rc)
fe30911b 3683 return rc;
623a3128
TH
3684
3685 /* is the device still there? */
fe30911b
TH
3686 if (!ata_dev_same_device(dev, class, id))
3687 return -ENODEV;
623a3128 3688
fe635c7e 3689 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3690 return 0;
3691}
3692
3693/**
3694 * ata_dev_revalidate - Revalidate ATA device
3695 * @dev: device to revalidate
3696 * @readid_flags: read ID flags
3697 *
3698 * Re-read IDENTIFY page, make sure @dev is still attached to the
3699 * port and reconfigure it according to the new IDENTIFY page.
3700 *
3701 * LOCKING:
3702 * Kernel thread context (may sleep)
3703 *
3704 * RETURNS:
3705 * 0 on success, negative errno otherwise
3706 */
3707int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3708{
6ddcd3b0 3709 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3710 int rc;
3711
3712 if (!ata_dev_enabled(dev))
3713 return -ENODEV;
3714
3715 /* re-read ID */
3716 rc = ata_dev_reread_id(dev, readid_flags);
3717 if (rc)
3718 goto fail;
623a3128
TH
3719
3720 /* configure device according to the new ID */
efdaedc4 3721 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3722 if (rc)
3723 goto fail;
3724
3725 /* verify n_sectors hasn't changed */
3726 if (dev->class == ATA_DEV_ATA && dev->n_sectors != n_sectors) {
3727 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3728 "%llu != %llu\n",
3729 (unsigned long long)n_sectors,
3730 (unsigned long long)dev->n_sectors);
3731 rc = -ENODEV;
3732 goto fail;
3733 }
3734
3735 return 0;
623a3128
TH
3736
3737 fail:
f15a1daf 3738 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3739 return rc;
3740}
3741
6919a0a6
AC
3742struct ata_blacklist_entry {
3743 const char *model_num;
3744 const char *model_rev;
3745 unsigned long horkage;
3746};
3747
3748static const struct ata_blacklist_entry ata_device_blacklist [] = {
3749 /* Devices with DMA related problems under Linux */
3750 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3751 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3752 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3753 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3754 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3755 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3756 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3757 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3758 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3759 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3760 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3761 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3762 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3763 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3764 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3765 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3766 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3767 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3768 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3769 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3770 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3771 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3772 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3773 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3774 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3775 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3776 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3777 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3778 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3779 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3780 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
6919a0a6 3781
18d6e9d5 3782 /* Weird ATAPI devices */
6f23a31d
AL
3783 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 |
3784 ATA_HORKAGE_DMA_RW_ONLY },
18d6e9d5 3785
6919a0a6
AC
3786 /* Devices we expect to fail diagnostics */
3787
3788 /* Devices where NCQ should be avoided */
3789 /* NCQ is slow */
3790 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3791 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3792 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3793 /* NCQ is broken */
3794 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
471e44b2 3795 { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
96442925
JA
3796 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3797 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
36e337d0
RH
3798 /* Blacklist entries taken from Silicon Image 3124/3132
3799 Windows driver .inf file - also several Linux problem reports */
3800 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3801 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3802 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3803 /* Drives which do spurious command completion */
3804 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb
TH
3805 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
3806 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
6919a0a6
AC
3807
3808 /* Devices with NCQ limits */
3809
3810 /* End Marker */
3811 { }
1da177e4 3812};
2e9edbf8 3813
6919a0a6 3814unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3815{
8bfa79fc
TH
3816 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3817 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3818 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3819
8bfa79fc
TH
3820 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3821 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3822
6919a0a6 3823 while (ad->model_num) {
8bfa79fc 3824 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3825 if (ad->model_rev == NULL)
3826 return ad->horkage;
8bfa79fc 3827 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3828 return ad->horkage;
f4b15fef 3829 }
6919a0a6 3830 ad++;
f4b15fef 3831 }
1da177e4
LT
3832 return 0;
3833}
3834
6919a0a6
AC
3835static int ata_dma_blacklisted(const struct ata_device *dev)
3836{
3837 /* We don't support polling DMA.
3838 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3839 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3840 */
3841 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3842 (dev->flags & ATA_DFLAG_CDB_INTR))
3843 return 1;
3844 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3845}
3846
a6d5a51c
TH
3847/**
3848 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3849 * @dev: Device to compute xfermask for
3850 *
acf356b1
TH
3851 * Compute supported xfermask of @dev and store it in
3852 * dev->*_mask. This function is responsible for applying all
3853 * known limits including host controller limits, device
3854 * blacklist, etc...
a6d5a51c
TH
3855 *
3856 * LOCKING:
3857 * None.
a6d5a51c 3858 */
3373efd8 3859static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3860{
3373efd8 3861 struct ata_port *ap = dev->ap;
cca3974e 3862 struct ata_host *host = ap->host;
a6d5a51c 3863 unsigned long xfer_mask;
1da177e4 3864
37deecb5 3865 /* controller modes available */
565083e1
TH
3866 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3867 ap->mwdma_mask, ap->udma_mask);
3868
8343f889 3869 /* drive modes available */
37deecb5
TH
3870 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3871 dev->mwdma_mask, dev->udma_mask);
3872 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3873
b352e57d
AC
3874 /*
3875 * CFA Advanced TrueIDE timings are not allowed on a shared
3876 * cable
3877 */
3878 if (ata_dev_pair(dev)) {
3879 /* No PIO5 or PIO6 */
3880 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3881 /* No MWDMA3 or MWDMA 4 */
3882 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3883 }
3884
37deecb5
TH
3885 if (ata_dma_blacklisted(dev)) {
3886 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3887 ata_dev_printk(dev, KERN_WARNING,
3888 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3889 }
a6d5a51c 3890
14d66ab7
PV
3891 if ((host->flags & ATA_HOST_SIMPLEX) &&
3892 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3893 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3894 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3895 "other device, disabling DMA\n");
5444a6f4 3896 }
565083e1 3897
e424675f
JG
3898 if (ap->flags & ATA_FLAG_NO_IORDY)
3899 xfer_mask &= ata_pio_mask_no_iordy(dev);
3900
5444a6f4 3901 if (ap->ops->mode_filter)
a76b62ca 3902 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 3903
8343f889
RH
3904 /* Apply cable rule here. Don't apply it early because when
3905 * we handle hot plug the cable type can itself change.
3906 * Check this last so that we know if the transfer rate was
3907 * solely limited by the cable.
3908 * Unknown or 80 wire cables reported host side are checked
3909 * drive side as well. Cases where we know a 40wire cable
3910 * is used safely for 80 are not checked here.
3911 */
3912 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3913 /* UDMA/44 or higher would be available */
3914 if((ap->cbl == ATA_CBL_PATA40) ||
3915 (ata_drive_40wire(dev->id) &&
3916 (ap->cbl == ATA_CBL_PATA_UNK ||
3917 ap->cbl == ATA_CBL_PATA80))) {
3918 ata_dev_printk(dev, KERN_WARNING,
3919 "limited to UDMA/33 due to 40-wire cable\n");
3920 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3921 }
3922
565083e1
TH
3923 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3924 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3925}
3926
1da177e4
LT
3927/**
3928 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3929 * @dev: Device to which command will be sent
3930 *
780a87f7
JG
3931 * Issue SET FEATURES - XFER MODE command to device @dev
3932 * on port @ap.
3933 *
1da177e4 3934 * LOCKING:
0cba632b 3935 * PCI/etc. bus probe sem.
83206a29
TH
3936 *
3937 * RETURNS:
3938 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3939 */
3940
3373efd8 3941static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3942{
a0123703 3943 struct ata_taskfile tf;
83206a29 3944 unsigned int err_mask;
1da177e4
LT
3945
3946 /* set up set-features taskfile */
3947 DPRINTK("set features - xfer mode\n");
3948
464cf177
TH
3949 /* Some controllers and ATAPI devices show flaky interrupt
3950 * behavior after setting xfer mode. Use polling instead.
3951 */
3373efd8 3952 ata_tf_init(dev, &tf);
a0123703
TH
3953 tf.command = ATA_CMD_SET_FEATURES;
3954 tf.feature = SETFEATURES_XFER;
464cf177 3955 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
3956 tf.protocol = ATA_PROT_NODATA;
3957 tf.nsect = dev->xfer_mode;
1da177e4 3958
3373efd8 3959 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3960
83206a29
TH
3961 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3962 return err_mask;
1da177e4
LT
3963}
3964
8bf62ece
AL
3965/**
3966 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3967 * @dev: Device to which command will be sent
e2a7f77a
RD
3968 * @heads: Number of heads (taskfile parameter)
3969 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3970 *
3971 * LOCKING:
6aff8f1f
TH
3972 * Kernel thread context (may sleep)
3973 *
3974 * RETURNS:
3975 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3976 */
3373efd8
TH
3977static unsigned int ata_dev_init_params(struct ata_device *dev,
3978 u16 heads, u16 sectors)
8bf62ece 3979{
a0123703 3980 struct ata_taskfile tf;
6aff8f1f 3981 unsigned int err_mask;
8bf62ece
AL
3982
3983 /* Number of sectors per track 1-255. Number of heads 1-16 */
3984 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3985 return AC_ERR_INVALID;
8bf62ece
AL
3986
3987 /* set up init dev params taskfile */
3988 DPRINTK("init dev params \n");
3989
3373efd8 3990 ata_tf_init(dev, &tf);
a0123703
TH
3991 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3992 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3993 tf.protocol = ATA_PROT_NODATA;
3994 tf.nsect = sectors;
3995 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3996
3373efd8 3997 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3998
6aff8f1f
TH
3999 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4000 return err_mask;
8bf62ece
AL
4001}
4002
1da177e4 4003/**
0cba632b
JG
4004 * ata_sg_clean - Unmap DMA memory associated with command
4005 * @qc: Command containing DMA memory to be released
4006 *
4007 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4008 *
4009 * LOCKING:
cca3974e 4010 * spin_lock_irqsave(host lock)
1da177e4 4011 */
70e6ad0c 4012void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4013{
4014 struct ata_port *ap = qc->ap;
cedc9a47 4015 struct scatterlist *sg = qc->__sg;
1da177e4 4016 int dir = qc->dma_dir;
cedc9a47 4017 void *pad_buf = NULL;
1da177e4 4018
a4631474
TH
4019 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4020 WARN_ON(sg == NULL);
1da177e4
LT
4021
4022 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4023 WARN_ON(qc->n_elem > 1);
1da177e4 4024
2c13b7ce 4025 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4026
cedc9a47
JG
4027 /* if we padded the buffer out to 32-bit bound, and data
4028 * xfer direction is from-device, we must copy from the
4029 * pad buffer back into the supplied buffer
4030 */
4031 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4032 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4033
4034 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4035 if (qc->n_elem)
2f1f610b 4036 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4037 /* restore last sg */
4038 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4039 if (pad_buf) {
4040 struct scatterlist *psg = &qc->pad_sgent;
4041 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4042 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4043 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4044 }
4045 } else {
2e242fa9 4046 if (qc->n_elem)
2f1f610b 4047 dma_unmap_single(ap->dev,
e1410f2d
JG
4048 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4049 dir);
cedc9a47
JG
4050 /* restore sg */
4051 sg->length += qc->pad_len;
4052 if (pad_buf)
4053 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4054 pad_buf, qc->pad_len);
4055 }
1da177e4
LT
4056
4057 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4058 qc->__sg = NULL;
1da177e4
LT
4059}
4060
4061/**
4062 * ata_fill_sg - Fill PCI IDE PRD table
4063 * @qc: Metadata associated with taskfile to be transferred
4064 *
780a87f7
JG
4065 * Fill PCI IDE PRD (scatter-gather) table with segments
4066 * associated with the current disk command.
4067 *
1da177e4 4068 * LOCKING:
cca3974e 4069 * spin_lock_irqsave(host lock)
1da177e4
LT
4070 *
4071 */
4072static void ata_fill_sg(struct ata_queued_cmd *qc)
4073{
1da177e4 4074 struct ata_port *ap = qc->ap;
cedc9a47
JG
4075 struct scatterlist *sg;
4076 unsigned int idx;
1da177e4 4077
a4631474 4078 WARN_ON(qc->__sg == NULL);
f131883e 4079 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4080
4081 idx = 0;
cedc9a47 4082 ata_for_each_sg(sg, qc) {
1da177e4
LT
4083 u32 addr, offset;
4084 u32 sg_len, len;
4085
4086 /* determine if physical DMA addr spans 64K boundary.
4087 * Note h/w doesn't support 64-bit, so we unconditionally
4088 * truncate dma_addr_t to u32.
4089 */
4090 addr = (u32) sg_dma_address(sg);
4091 sg_len = sg_dma_len(sg);
4092
4093 while (sg_len) {
4094 offset = addr & 0xffff;
4095 len = sg_len;
4096 if ((offset + sg_len) > 0x10000)
4097 len = 0x10000 - offset;
4098
4099 ap->prd[idx].addr = cpu_to_le32(addr);
4100 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4101 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4102
4103 idx++;
4104 sg_len -= len;
4105 addr += len;
4106 }
4107 }
4108
4109 if (idx)
4110 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4111}
4112/**
4113 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4114 * @qc: Metadata associated with taskfile to check
4115 *
780a87f7
JG
4116 * Allow low-level driver to filter ATA PACKET commands, returning
4117 * a status indicating whether or not it is OK to use DMA for the
4118 * supplied PACKET command.
4119 *
1da177e4 4120 * LOCKING:
cca3974e 4121 * spin_lock_irqsave(host lock)
0cba632b 4122 *
1da177e4
LT
4123 * RETURNS: 0 when ATAPI DMA can be used
4124 * nonzero otherwise
4125 */
4126int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4127{
4128 struct ata_port *ap = qc->ap;
4129 int rc = 0; /* Assume ATAPI DMA is OK by default */
4130
6f23a31d
AL
4131 /* some drives can only do ATAPI DMA on read/write */
4132 if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
4133 struct scsi_cmnd *cmd = qc->scsicmd;
4134 u8 *scsicmd = cmd->cmnd;
4135
4136 switch (scsicmd[0]) {
4137 case READ_10:
4138 case WRITE_10:
4139 case READ_12:
4140 case WRITE_12:
4141 case READ_6:
4142 case WRITE_6:
4143 /* atapi dma maybe ok */
4144 break;
4145 default:
4146 /* turn off atapi dma */
4147 return 1;
4148 }
4149 }
4150
1da177e4
LT
4151 if (ap->ops->check_atapi_dma)
4152 rc = ap->ops->check_atapi_dma(qc);
4153
4154 return rc;
4155}
4156/**
4157 * ata_qc_prep - Prepare taskfile for submission
4158 * @qc: Metadata associated with taskfile to be prepared
4159 *
780a87f7
JG
4160 * Prepare ATA taskfile for submission.
4161 *
1da177e4 4162 * LOCKING:
cca3974e 4163 * spin_lock_irqsave(host lock)
1da177e4
LT
4164 */
4165void ata_qc_prep(struct ata_queued_cmd *qc)
4166{
4167 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4168 return;
4169
4170 ata_fill_sg(qc);
4171}
4172
e46834cd
BK
4173void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4174
0cba632b
JG
4175/**
4176 * ata_sg_init_one - Associate command with memory buffer
4177 * @qc: Command to be associated
4178 * @buf: Memory buffer
4179 * @buflen: Length of memory buffer, in bytes.
4180 *
4181 * Initialize the data-related elements of queued_cmd @qc
4182 * to point to a single memory buffer, @buf of byte length @buflen.
4183 *
4184 * LOCKING:
cca3974e 4185 * spin_lock_irqsave(host lock)
0cba632b
JG
4186 */
4187
1da177e4
LT
4188void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4189{
1da177e4
LT
4190 qc->flags |= ATA_QCFLAG_SINGLE;
4191
cedc9a47 4192 qc->__sg = &qc->sgent;
1da177e4 4193 qc->n_elem = 1;
cedc9a47 4194 qc->orig_n_elem = 1;
1da177e4 4195 qc->buf_virt = buf;
233277ca 4196 qc->nbytes = buflen;
1da177e4 4197
61c0596c 4198 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4199}
4200
0cba632b
JG
4201/**
4202 * ata_sg_init - Associate command with scatter-gather table.
4203 * @qc: Command to be associated
4204 * @sg: Scatter-gather table.
4205 * @n_elem: Number of elements in s/g table.
4206 *
4207 * Initialize the data-related elements of queued_cmd @qc
4208 * to point to a scatter-gather table @sg, containing @n_elem
4209 * elements.
4210 *
4211 * LOCKING:
cca3974e 4212 * spin_lock_irqsave(host lock)
0cba632b
JG
4213 */
4214
1da177e4
LT
4215void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4216 unsigned int n_elem)
4217{
4218 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4219 qc->__sg = sg;
1da177e4 4220 qc->n_elem = n_elem;
cedc9a47 4221 qc->orig_n_elem = n_elem;
1da177e4
LT
4222}
4223
4224/**
0cba632b
JG
4225 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4226 * @qc: Command with memory buffer to be mapped.
4227 *
4228 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4229 *
4230 * LOCKING:
cca3974e 4231 * spin_lock_irqsave(host lock)
1da177e4
LT
4232 *
4233 * RETURNS:
0cba632b 4234 * Zero on success, negative on error.
1da177e4
LT
4235 */
4236
4237static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4238{
4239 struct ata_port *ap = qc->ap;
4240 int dir = qc->dma_dir;
cedc9a47 4241 struct scatterlist *sg = qc->__sg;
1da177e4 4242 dma_addr_t dma_address;
2e242fa9 4243 int trim_sg = 0;
1da177e4 4244
cedc9a47
JG
4245 /* we must lengthen transfers to end on a 32-bit boundary */
4246 qc->pad_len = sg->length & 3;
4247 if (qc->pad_len) {
4248 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4249 struct scatterlist *psg = &qc->pad_sgent;
4250
a4631474 4251 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4252
4253 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4254
4255 if (qc->tf.flags & ATA_TFLAG_WRITE)
4256 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4257 qc->pad_len);
4258
4259 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4260 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4261 /* trim sg */
4262 sg->length -= qc->pad_len;
2e242fa9
TH
4263 if (sg->length == 0)
4264 trim_sg = 1;
cedc9a47
JG
4265
4266 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4267 sg->length, qc->pad_len);
4268 }
4269
2e242fa9
TH
4270 if (trim_sg) {
4271 qc->n_elem--;
e1410f2d
JG
4272 goto skip_map;
4273 }
4274
2f1f610b 4275 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4276 sg->length, dir);
537a95d9
TH
4277 if (dma_mapping_error(dma_address)) {
4278 /* restore sg */
4279 sg->length += qc->pad_len;
1da177e4 4280 return -1;
537a95d9 4281 }
1da177e4
LT
4282
4283 sg_dma_address(sg) = dma_address;
32529e01 4284 sg_dma_len(sg) = sg->length;
1da177e4 4285
2e242fa9 4286skip_map:
1da177e4
LT
4287 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4288 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4289
4290 return 0;
4291}
4292
4293/**
0cba632b
JG
4294 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4295 * @qc: Command with scatter-gather table to be mapped.
4296 *
4297 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4298 *
4299 * LOCKING:
cca3974e 4300 * spin_lock_irqsave(host lock)
1da177e4
LT
4301 *
4302 * RETURNS:
0cba632b 4303 * Zero on success, negative on error.
1da177e4
LT
4304 *
4305 */
4306
4307static int ata_sg_setup(struct ata_queued_cmd *qc)
4308{
4309 struct ata_port *ap = qc->ap;
cedc9a47
JG
4310 struct scatterlist *sg = qc->__sg;
4311 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4312 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4313
44877b4e 4314 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4315 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4316
cedc9a47
JG
4317 /* we must lengthen transfers to end on a 32-bit boundary */
4318 qc->pad_len = lsg->length & 3;
4319 if (qc->pad_len) {
4320 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4321 struct scatterlist *psg = &qc->pad_sgent;
4322 unsigned int offset;
4323
a4631474 4324 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4325
4326 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4327
4328 /*
4329 * psg->page/offset are used to copy to-be-written
4330 * data in this function or read data in ata_sg_clean.
4331 */
4332 offset = lsg->offset + lsg->length - qc->pad_len;
4333 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4334 psg->offset = offset_in_page(offset);
4335
4336 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4337 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4338 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4339 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4340 }
4341
4342 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4343 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4344 /* trim last sg */
4345 lsg->length -= qc->pad_len;
e1410f2d
JG
4346 if (lsg->length == 0)
4347 trim_sg = 1;
cedc9a47
JG
4348
4349 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4350 qc->n_elem - 1, lsg->length, qc->pad_len);
4351 }
4352
e1410f2d
JG
4353 pre_n_elem = qc->n_elem;
4354 if (trim_sg && pre_n_elem)
4355 pre_n_elem--;
4356
4357 if (!pre_n_elem) {
4358 n_elem = 0;
4359 goto skip_map;
4360 }
4361
1da177e4 4362 dir = qc->dma_dir;
2f1f610b 4363 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4364 if (n_elem < 1) {
4365 /* restore last sg */
4366 lsg->length += qc->pad_len;
1da177e4 4367 return -1;
537a95d9 4368 }
1da177e4
LT
4369
4370 DPRINTK("%d sg elements mapped\n", n_elem);
4371
e1410f2d 4372skip_map:
1da177e4
LT
4373 qc->n_elem = n_elem;
4374
4375 return 0;
4376}
4377
0baab86b 4378/**
c893a3ae 4379 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4380 * @buf: Buffer to swap
4381 * @buf_words: Number of 16-bit words in buffer.
4382 *
4383 * Swap halves of 16-bit words if needed to convert from
4384 * little-endian byte order to native cpu byte order, or
4385 * vice-versa.
4386 *
4387 * LOCKING:
6f0ef4fa 4388 * Inherited from caller.
0baab86b 4389 */
1da177e4
LT
4390void swap_buf_le16(u16 *buf, unsigned int buf_words)
4391{
4392#ifdef __BIG_ENDIAN
4393 unsigned int i;
4394
4395 for (i = 0; i < buf_words; i++)
4396 buf[i] = le16_to_cpu(buf[i]);
4397#endif /* __BIG_ENDIAN */
4398}
4399
6ae4cfb5 4400/**
0d5ff566 4401 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4402 * @adev: device to target
6ae4cfb5
AL
4403 * @buf: data buffer
4404 * @buflen: buffer length
344babaa 4405 * @write_data: read/write
6ae4cfb5
AL
4406 *
4407 * Transfer data from/to the device data register by PIO.
4408 *
4409 * LOCKING:
4410 * Inherited from caller.
6ae4cfb5 4411 */
0d5ff566
TH
4412void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4413 unsigned int buflen, int write_data)
1da177e4 4414{
a6b2c5d4 4415 struct ata_port *ap = adev->ap;
6ae4cfb5 4416 unsigned int words = buflen >> 1;
1da177e4 4417
6ae4cfb5 4418 /* Transfer multiple of 2 bytes */
1da177e4 4419 if (write_data)
0d5ff566 4420 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4421 else
0d5ff566 4422 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4423
4424 /* Transfer trailing 1 byte, if any. */
4425 if (unlikely(buflen & 0x01)) {
4426 u16 align_buf[1] = { 0 };
4427 unsigned char *trailing_buf = buf + buflen - 1;
4428
4429 if (write_data) {
4430 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4431 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4432 } else {
0d5ff566 4433 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4434 memcpy(trailing_buf, align_buf, 1);
4435 }
4436 }
1da177e4
LT
4437}
4438
75e99585 4439/**
0d5ff566 4440 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4441 * @adev: device to target
4442 * @buf: data buffer
4443 * @buflen: buffer length
4444 * @write_data: read/write
4445 *
88574551 4446 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4447 * transfer with interrupts disabled.
4448 *
4449 * LOCKING:
4450 * Inherited from caller.
4451 */
0d5ff566
TH
4452void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4453 unsigned int buflen, int write_data)
75e99585
AC
4454{
4455 unsigned long flags;
4456 local_irq_save(flags);
0d5ff566 4457 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4458 local_irq_restore(flags);
4459}
4460
4461
6ae4cfb5 4462/**
5a5dbd18 4463 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4464 * @qc: Command on going
4465 *
5a5dbd18 4466 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4467 *
4468 * LOCKING:
4469 * Inherited from caller.
4470 */
4471
1da177e4
LT
4472static void ata_pio_sector(struct ata_queued_cmd *qc)
4473{
4474 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4475 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4476 struct ata_port *ap = qc->ap;
4477 struct page *page;
4478 unsigned int offset;
4479 unsigned char *buf;
4480
5a5dbd18 4481 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4482 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4483
4484 page = sg[qc->cursg].page;
726f0785 4485 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4486
4487 /* get the current page and offset */
4488 page = nth_page(page, (offset >> PAGE_SHIFT));
4489 offset %= PAGE_SIZE;
4490
1da177e4
LT
4491 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4492
91b8b313
AL
4493 if (PageHighMem(page)) {
4494 unsigned long flags;
4495
a6b2c5d4 4496 /* FIXME: use a bounce buffer */
91b8b313
AL
4497 local_irq_save(flags);
4498 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4499
91b8b313 4500 /* do the actual data transfer */
5a5dbd18 4501 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4502
91b8b313
AL
4503 kunmap_atomic(buf, KM_IRQ0);
4504 local_irq_restore(flags);
4505 } else {
4506 buf = page_address(page);
5a5dbd18 4507 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4508 }
1da177e4 4509
5a5dbd18
ML
4510 qc->curbytes += qc->sect_size;
4511 qc->cursg_ofs += qc->sect_size;
1da177e4 4512
726f0785 4513 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4514 qc->cursg++;
4515 qc->cursg_ofs = 0;
4516 }
1da177e4 4517}
1da177e4 4518
07f6f7d0 4519/**
5a5dbd18 4520 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4521 * @qc: Command on going
4522 *
5a5dbd18 4523 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4524 * ATA device for the DRQ request.
4525 *
4526 * LOCKING:
4527 * Inherited from caller.
4528 */
1da177e4 4529
07f6f7d0
AL
4530static void ata_pio_sectors(struct ata_queued_cmd *qc)
4531{
4532 if (is_multi_taskfile(&qc->tf)) {
4533 /* READ/WRITE MULTIPLE */
4534 unsigned int nsect;
4535
587005de 4536 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4537
5a5dbd18 4538 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4539 qc->dev->multi_count);
07f6f7d0
AL
4540 while (nsect--)
4541 ata_pio_sector(qc);
4542 } else
4543 ata_pio_sector(qc);
4544}
4545
c71c1857
AL
4546/**
4547 * atapi_send_cdb - Write CDB bytes to hardware
4548 * @ap: Port to which ATAPI device is attached.
4549 * @qc: Taskfile currently active
4550 *
4551 * When device has indicated its readiness to accept
4552 * a CDB, this function is called. Send the CDB.
4553 *
4554 * LOCKING:
4555 * caller.
4556 */
4557
4558static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4559{
4560 /* send SCSI cdb */
4561 DPRINTK("send cdb\n");
db024d53 4562 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4563
a6b2c5d4 4564 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4565 ata_altstatus(ap); /* flush */
4566
4567 switch (qc->tf.protocol) {
4568 case ATA_PROT_ATAPI:
4569 ap->hsm_task_state = HSM_ST;
4570 break;
4571 case ATA_PROT_ATAPI_NODATA:
4572 ap->hsm_task_state = HSM_ST_LAST;
4573 break;
4574 case ATA_PROT_ATAPI_DMA:
4575 ap->hsm_task_state = HSM_ST_LAST;
4576 /* initiate bmdma */
4577 ap->ops->bmdma_start(qc);
4578 break;
4579 }
1da177e4
LT
4580}
4581
6ae4cfb5
AL
4582/**
4583 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4584 * @qc: Command on going
4585 * @bytes: number of bytes
4586 *
4587 * Transfer Transfer data from/to the ATAPI device.
4588 *
4589 * LOCKING:
4590 * Inherited from caller.
4591 *
4592 */
4593
1da177e4
LT
4594static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4595{
4596 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4597 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4598 struct ata_port *ap = qc->ap;
4599 struct page *page;
4600 unsigned char *buf;
4601 unsigned int offset, count;
4602
563a6e1f 4603 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4604 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4605
4606next_sg:
563a6e1f 4607 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4608 /*
563a6e1f
AL
4609 * The end of qc->sg is reached and the device expects
4610 * more data to transfer. In order not to overrun qc->sg
4611 * and fulfill length specified in the byte count register,
4612 * - for read case, discard trailing data from the device
4613 * - for write case, padding zero data to the device
4614 */
4615 u16 pad_buf[1] = { 0 };
4616 unsigned int words = bytes >> 1;
4617 unsigned int i;
4618
4619 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4620 ata_dev_printk(qc->dev, KERN_WARNING,
4621 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4622
4623 for (i = 0; i < words; i++)
a6b2c5d4 4624 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4625
14be71f4 4626 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4627 return;
4628 }
4629
cedc9a47 4630 sg = &qc->__sg[qc->cursg];
1da177e4 4631
1da177e4
LT
4632 page = sg->page;
4633 offset = sg->offset + qc->cursg_ofs;
4634
4635 /* get the current page and offset */
4636 page = nth_page(page, (offset >> PAGE_SHIFT));
4637 offset %= PAGE_SIZE;
4638
6952df03 4639 /* don't overrun current sg */
32529e01 4640 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4641
4642 /* don't cross page boundaries */
4643 count = min(count, (unsigned int)PAGE_SIZE - offset);
4644
7282aa4b
AL
4645 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4646
91b8b313
AL
4647 if (PageHighMem(page)) {
4648 unsigned long flags;
4649
a6b2c5d4 4650 /* FIXME: use bounce buffer */
91b8b313
AL
4651 local_irq_save(flags);
4652 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4653
91b8b313 4654 /* do the actual data transfer */
a6b2c5d4 4655 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4656
91b8b313
AL
4657 kunmap_atomic(buf, KM_IRQ0);
4658 local_irq_restore(flags);
4659 } else {
4660 buf = page_address(page);
a6b2c5d4 4661 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4662 }
1da177e4
LT
4663
4664 bytes -= count;
4665 qc->curbytes += count;
4666 qc->cursg_ofs += count;
4667
32529e01 4668 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4669 qc->cursg++;
4670 qc->cursg_ofs = 0;
4671 }
4672
563a6e1f 4673 if (bytes)
1da177e4 4674 goto next_sg;
1da177e4
LT
4675}
4676
6ae4cfb5
AL
4677/**
4678 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4679 * @qc: Command on going
4680 *
4681 * Transfer Transfer data from/to the ATAPI device.
4682 *
4683 * LOCKING:
4684 * Inherited from caller.
6ae4cfb5
AL
4685 */
4686
1da177e4
LT
4687static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4688{
4689 struct ata_port *ap = qc->ap;
4690 struct ata_device *dev = qc->dev;
4691 unsigned int ireason, bc_lo, bc_hi, bytes;
4692 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4693
eec4c3f3
AL
4694 /* Abuse qc->result_tf for temp storage of intermediate TF
4695 * here to save some kernel stack usage.
4696 * For normal completion, qc->result_tf is not relevant. For
4697 * error, qc->result_tf is later overwritten by ata_qc_complete().
4698 * So, the correctness of qc->result_tf is not affected.
4699 */
4700 ap->ops->tf_read(ap, &qc->result_tf);
4701 ireason = qc->result_tf.nsect;
4702 bc_lo = qc->result_tf.lbam;
4703 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4704 bytes = (bc_hi << 8) | bc_lo;
4705
4706 /* shall be cleared to zero, indicating xfer of data */
4707 if (ireason & (1 << 0))
4708 goto err_out;
4709
4710 /* make sure transfer direction matches expected */
4711 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4712 if (do_write != i_write)
4713 goto err_out;
4714
44877b4e 4715 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4716
1da177e4
LT
4717 __atapi_pio_bytes(qc, bytes);
4718
4719 return;
4720
4721err_out:
f15a1daf 4722 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4723 qc->err_mask |= AC_ERR_HSM;
14be71f4 4724 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4725}
4726
4727/**
c234fb00
AL
4728 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4729 * @ap: the target ata_port
4730 * @qc: qc on going
1da177e4 4731 *
c234fb00
AL
4732 * RETURNS:
4733 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4734 */
c234fb00
AL
4735
4736static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4737{
c234fb00
AL
4738 if (qc->tf.flags & ATA_TFLAG_POLLING)
4739 return 1;
1da177e4 4740
c234fb00
AL
4741 if (ap->hsm_task_state == HSM_ST_FIRST) {
4742 if (qc->tf.protocol == ATA_PROT_PIO &&
4743 (qc->tf.flags & ATA_TFLAG_WRITE))
4744 return 1;
1da177e4 4745
c234fb00
AL
4746 if (is_atapi_taskfile(&qc->tf) &&
4747 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4748 return 1;
fe79e683
AL
4749 }
4750
c234fb00
AL
4751 return 0;
4752}
1da177e4 4753
c17ea20d
TH
4754/**
4755 * ata_hsm_qc_complete - finish a qc running on standard HSM
4756 * @qc: Command to complete
4757 * @in_wq: 1 if called from workqueue, 0 otherwise
4758 *
4759 * Finish @qc which is running on standard HSM.
4760 *
4761 * LOCKING:
cca3974e 4762 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4763 * Otherwise, none on entry and grabs host lock.
4764 */
4765static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4766{
4767 struct ata_port *ap = qc->ap;
4768 unsigned long flags;
4769
4770 if (ap->ops->error_handler) {
4771 if (in_wq) {
ba6a1308 4772 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4773
cca3974e
JG
4774 /* EH might have kicked in while host lock is
4775 * released.
c17ea20d
TH
4776 */
4777 qc = ata_qc_from_tag(ap, qc->tag);
4778 if (qc) {
4779 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4780 ap->ops->irq_on(ap);
c17ea20d
TH
4781 ata_qc_complete(qc);
4782 } else
4783 ata_port_freeze(ap);
4784 }
4785
ba6a1308 4786 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4787 } else {
4788 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4789 ata_qc_complete(qc);
4790 else
4791 ata_port_freeze(ap);
4792 }
4793 } else {
4794 if (in_wq) {
ba6a1308 4795 spin_lock_irqsave(ap->lock, flags);
83625006 4796 ap->ops->irq_on(ap);
c17ea20d 4797 ata_qc_complete(qc);
ba6a1308 4798 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4799 } else
4800 ata_qc_complete(qc);
4801 }
1da177e4 4802
c81e29b4 4803 ata_altstatus(ap); /* flush */
c17ea20d
TH
4804}
4805
bb5cb290
AL
4806/**
4807 * ata_hsm_move - move the HSM to the next state.
4808 * @ap: the target ata_port
4809 * @qc: qc on going
4810 * @status: current device status
4811 * @in_wq: 1 if called from workqueue, 0 otherwise
4812 *
4813 * RETURNS:
4814 * 1 when poll next status needed, 0 otherwise.
4815 */
9a1004d0
TH
4816int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4817 u8 status, int in_wq)
e2cec771 4818{
bb5cb290
AL
4819 unsigned long flags = 0;
4820 int poll_next;
4821
6912ccd5
AL
4822 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4823
bb5cb290
AL
4824 /* Make sure ata_qc_issue_prot() does not throw things
4825 * like DMA polling into the workqueue. Notice that
4826 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4827 */
c234fb00 4828 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4829
e2cec771 4830fsm_start:
999bb6f4 4831 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4832 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4833
e2cec771
AL
4834 switch (ap->hsm_task_state) {
4835 case HSM_ST_FIRST:
bb5cb290
AL
4836 /* Send first data block or PACKET CDB */
4837
4838 /* If polling, we will stay in the work queue after
4839 * sending the data. Otherwise, interrupt handler
4840 * takes over after sending the data.
4841 */
4842 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4843
e2cec771 4844 /* check device status */
3655d1d3
AL
4845 if (unlikely((status & ATA_DRQ) == 0)) {
4846 /* handle BSY=0, DRQ=0 as error */
4847 if (likely(status & (ATA_ERR | ATA_DF)))
4848 /* device stops HSM for abort/error */
4849 qc->err_mask |= AC_ERR_DEV;
4850 else
4851 /* HSM violation. Let EH handle this */
4852 qc->err_mask |= AC_ERR_HSM;
4853
14be71f4 4854 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4855 goto fsm_start;
1da177e4
LT
4856 }
4857
71601958
AL
4858 /* Device should not ask for data transfer (DRQ=1)
4859 * when it finds something wrong.
eee6c32f
AL
4860 * We ignore DRQ here and stop the HSM by
4861 * changing hsm_task_state to HSM_ST_ERR and
4862 * let the EH abort the command or reset the device.
71601958
AL
4863 */
4864 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4865 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4866 "error, dev_stat 0x%X\n", status);
3655d1d3 4867 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4868 ap->hsm_task_state = HSM_ST_ERR;
4869 goto fsm_start;
71601958 4870 }
1da177e4 4871
bb5cb290
AL
4872 /* Send the CDB (atapi) or the first data block (ata pio out).
4873 * During the state transition, interrupt handler shouldn't
4874 * be invoked before the data transfer is complete and
4875 * hsm_task_state is changed. Hence, the following locking.
4876 */
4877 if (in_wq)
ba6a1308 4878 spin_lock_irqsave(ap->lock, flags);
1da177e4 4879
bb5cb290
AL
4880 if (qc->tf.protocol == ATA_PROT_PIO) {
4881 /* PIO data out protocol.
4882 * send first data block.
4883 */
0565c26d 4884
bb5cb290
AL
4885 /* ata_pio_sectors() might change the state
4886 * to HSM_ST_LAST. so, the state is changed here
4887 * before ata_pio_sectors().
4888 */
4889 ap->hsm_task_state = HSM_ST;
4890 ata_pio_sectors(qc);
4891 ata_altstatus(ap); /* flush */
4892 } else
4893 /* send CDB */
4894 atapi_send_cdb(ap, qc);
4895
4896 if (in_wq)
ba6a1308 4897 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4898
4899 /* if polling, ata_pio_task() handles the rest.
4900 * otherwise, interrupt handler takes over from here.
4901 */
e2cec771 4902 break;
1c848984 4903
e2cec771
AL
4904 case HSM_ST:
4905 /* complete command or read/write the data register */
4906 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4907 /* ATAPI PIO protocol */
4908 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4909 /* No more data to transfer or device error.
4910 * Device error will be tagged in HSM_ST_LAST.
4911 */
e2cec771
AL
4912 ap->hsm_task_state = HSM_ST_LAST;
4913 goto fsm_start;
4914 }
1da177e4 4915
71601958
AL
4916 /* Device should not ask for data transfer (DRQ=1)
4917 * when it finds something wrong.
eee6c32f
AL
4918 * We ignore DRQ here and stop the HSM by
4919 * changing hsm_task_state to HSM_ST_ERR and
4920 * let the EH abort the command or reset the device.
71601958
AL
4921 */
4922 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4923 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4924 "device error, dev_stat 0x%X\n",
4925 status);
3655d1d3 4926 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4927 ap->hsm_task_state = HSM_ST_ERR;
4928 goto fsm_start;
71601958 4929 }
1da177e4 4930
e2cec771 4931 atapi_pio_bytes(qc);
7fb6ec28 4932
e2cec771
AL
4933 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4934 /* bad ireason reported by device */
4935 goto fsm_start;
1da177e4 4936
e2cec771
AL
4937 } else {
4938 /* ATA PIO protocol */
4939 if (unlikely((status & ATA_DRQ) == 0)) {
4940 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4941 if (likely(status & (ATA_ERR | ATA_DF)))
4942 /* device stops HSM for abort/error */
4943 qc->err_mask |= AC_ERR_DEV;
4944 else
55a8e2c8
TH
4945 /* HSM violation. Let EH handle this.
4946 * Phantom devices also trigger this
4947 * condition. Mark hint.
4948 */
4949 qc->err_mask |= AC_ERR_HSM |
4950 AC_ERR_NODEV_HINT;
3655d1d3 4951
e2cec771
AL
4952 ap->hsm_task_state = HSM_ST_ERR;
4953 goto fsm_start;
4954 }
1da177e4 4955
eee6c32f
AL
4956 /* For PIO reads, some devices may ask for
4957 * data transfer (DRQ=1) alone with ERR=1.
4958 * We respect DRQ here and transfer one
4959 * block of junk data before changing the
4960 * hsm_task_state to HSM_ST_ERR.
4961 *
4962 * For PIO writes, ERR=1 DRQ=1 doesn't make
4963 * sense since the data block has been
4964 * transferred to the device.
71601958
AL
4965 */
4966 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4967 /* data might be corrputed */
4968 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4969
4970 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4971 ata_pio_sectors(qc);
4972 ata_altstatus(ap);
4973 status = ata_wait_idle(ap);
4974 }
4975
3655d1d3
AL
4976 if (status & (ATA_BUSY | ATA_DRQ))
4977 qc->err_mask |= AC_ERR_HSM;
4978
eee6c32f
AL
4979 /* ata_pio_sectors() might change the
4980 * state to HSM_ST_LAST. so, the state
4981 * is changed after ata_pio_sectors().
4982 */
4983 ap->hsm_task_state = HSM_ST_ERR;
4984 goto fsm_start;
71601958
AL
4985 }
4986
e2cec771
AL
4987 ata_pio_sectors(qc);
4988
4989 if (ap->hsm_task_state == HSM_ST_LAST &&
4990 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4991 /* all data read */
4992 ata_altstatus(ap);
52a32205 4993 status = ata_wait_idle(ap);
e2cec771
AL
4994 goto fsm_start;
4995 }
4996 }
4997
4998 ata_altstatus(ap); /* flush */
bb5cb290 4999 poll_next = 1;
1da177e4
LT
5000 break;
5001
14be71f4 5002 case HSM_ST_LAST:
6912ccd5
AL
5003 if (unlikely(!ata_ok(status))) {
5004 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5005 ap->hsm_task_state = HSM_ST_ERR;
5006 goto fsm_start;
5007 }
5008
5009 /* no more data to transfer */
4332a771 5010 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5011 ap->print_id, qc->dev->devno, status);
e2cec771 5012
6912ccd5
AL
5013 WARN_ON(qc->err_mask);
5014
e2cec771 5015 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5016
e2cec771 5017 /* complete taskfile transaction */
c17ea20d 5018 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5019
5020 poll_next = 0;
1da177e4
LT
5021 break;
5022
14be71f4 5023 case HSM_ST_ERR:
e2cec771
AL
5024 /* make sure qc->err_mask is available to
5025 * know what's wrong and recover
5026 */
5027 WARN_ON(qc->err_mask == 0);
5028
5029 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5030
999bb6f4 5031 /* complete taskfile transaction */
c17ea20d 5032 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5033
5034 poll_next = 0;
e2cec771
AL
5035 break;
5036 default:
bb5cb290 5037 poll_next = 0;
6912ccd5 5038 BUG();
1da177e4
LT
5039 }
5040
bb5cb290 5041 return poll_next;
1da177e4
LT
5042}
5043
65f27f38 5044static void ata_pio_task(struct work_struct *work)
8061f5f0 5045{
65f27f38
DH
5046 struct ata_port *ap =
5047 container_of(work, struct ata_port, port_task.work);
5048 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5049 u8 status;
a1af3734 5050 int poll_next;
8061f5f0 5051
7fb6ec28 5052fsm_start:
a1af3734 5053 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5054
a1af3734
AL
5055 /*
5056 * This is purely heuristic. This is a fast path.
5057 * Sometimes when we enter, BSY will be cleared in
5058 * a chk-status or two. If not, the drive is probably seeking
5059 * or something. Snooze for a couple msecs, then
5060 * chk-status again. If still busy, queue delayed work.
5061 */
5062 status = ata_busy_wait(ap, ATA_BUSY, 5);
5063 if (status & ATA_BUSY) {
5064 msleep(2);
5065 status = ata_busy_wait(ap, ATA_BUSY, 10);
5066 if (status & ATA_BUSY) {
31ce6dae 5067 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5068 return;
5069 }
8061f5f0
TH
5070 }
5071
a1af3734
AL
5072 /* move the HSM */
5073 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5074
a1af3734
AL
5075 /* another command or interrupt handler
5076 * may be running at this point.
5077 */
5078 if (poll_next)
7fb6ec28 5079 goto fsm_start;
8061f5f0
TH
5080}
5081
1da177e4
LT
5082/**
5083 * ata_qc_new - Request an available ATA command, for queueing
5084 * @ap: Port associated with device @dev
5085 * @dev: Device from whom we request an available command structure
5086 *
5087 * LOCKING:
0cba632b 5088 * None.
1da177e4
LT
5089 */
5090
5091static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5092{
5093 struct ata_queued_cmd *qc = NULL;
5094 unsigned int i;
5095
e3180499 5096 /* no command while frozen */
b51e9e5d 5097 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5098 return NULL;
5099
2ab7db1f
TH
5100 /* the last tag is reserved for internal command. */
5101 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5102 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5103 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5104 break;
5105 }
5106
5107 if (qc)
5108 qc->tag = i;
5109
5110 return qc;
5111}
5112
5113/**
5114 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5115 * @dev: Device from whom we request an available command structure
5116 *
5117 * LOCKING:
0cba632b 5118 * None.
1da177e4
LT
5119 */
5120
3373efd8 5121struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5122{
3373efd8 5123 struct ata_port *ap = dev->ap;
1da177e4
LT
5124 struct ata_queued_cmd *qc;
5125
5126 qc = ata_qc_new(ap);
5127 if (qc) {
1da177e4
LT
5128 qc->scsicmd = NULL;
5129 qc->ap = ap;
5130 qc->dev = dev;
1da177e4 5131
2c13b7ce 5132 ata_qc_reinit(qc);
1da177e4
LT
5133 }
5134
5135 return qc;
5136}
5137
1da177e4
LT
5138/**
5139 * ata_qc_free - free unused ata_queued_cmd
5140 * @qc: Command to complete
5141 *
5142 * Designed to free unused ata_queued_cmd object
5143 * in case something prevents using it.
5144 *
5145 * LOCKING:
cca3974e 5146 * spin_lock_irqsave(host lock)
1da177e4
LT
5147 */
5148void ata_qc_free(struct ata_queued_cmd *qc)
5149{
4ba946e9
TH
5150 struct ata_port *ap = qc->ap;
5151 unsigned int tag;
5152
a4631474 5153 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5154
4ba946e9
TH
5155 qc->flags = 0;
5156 tag = qc->tag;
5157 if (likely(ata_tag_valid(tag))) {
4ba946e9 5158 qc->tag = ATA_TAG_POISON;
6cec4a39 5159 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5160 }
1da177e4
LT
5161}
5162
76014427 5163void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5164{
dedaf2b0
TH
5165 struct ata_port *ap = qc->ap;
5166
a4631474
TH
5167 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5168 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5169
5170 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5171 ata_sg_clean(qc);
5172
7401abf2 5173 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
5174 if (qc->tf.protocol == ATA_PROT_NCQ)
5175 ap->sactive &= ~(1 << qc->tag);
5176 else
5177 ap->active_tag = ATA_TAG_POISON;
7401abf2 5178
3f3791d3
AL
5179 /* atapi: mark qc as inactive to prevent the interrupt handler
5180 * from completing the command twice later, before the error handler
5181 * is called. (when rc != 0 and atapi request sense is needed)
5182 */
5183 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5184 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5185
1da177e4 5186 /* call completion callback */
77853bf2 5187 qc->complete_fn(qc);
1da177e4
LT
5188}
5189
39599a53
TH
5190static void fill_result_tf(struct ata_queued_cmd *qc)
5191{
5192 struct ata_port *ap = qc->ap;
5193
39599a53 5194 qc->result_tf.flags = qc->tf.flags;
4742d54f 5195 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5196}
5197
f686bcb8
TH
5198/**
5199 * ata_qc_complete - Complete an active ATA command
5200 * @qc: Command to complete
5201 * @err_mask: ATA Status register contents
5202 *
5203 * Indicate to the mid and upper layers that an ATA
5204 * command has completed, with either an ok or not-ok status.
5205 *
5206 * LOCKING:
cca3974e 5207 * spin_lock_irqsave(host lock)
f686bcb8
TH
5208 */
5209void ata_qc_complete(struct ata_queued_cmd *qc)
5210{
5211 struct ata_port *ap = qc->ap;
5212
5213 /* XXX: New EH and old EH use different mechanisms to
5214 * synchronize EH with regular execution path.
5215 *
5216 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5217 * Normal execution path is responsible for not accessing a
5218 * failed qc. libata core enforces the rule by returning NULL
5219 * from ata_qc_from_tag() for failed qcs.
5220 *
5221 * Old EH depends on ata_qc_complete() nullifying completion
5222 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5223 * not synchronize with interrupt handler. Only PIO task is
5224 * taken care of.
5225 */
5226 if (ap->ops->error_handler) {
b51e9e5d 5227 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5228
5229 if (unlikely(qc->err_mask))
5230 qc->flags |= ATA_QCFLAG_FAILED;
5231
5232 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5233 if (!ata_tag_internal(qc->tag)) {
5234 /* always fill result TF for failed qc */
39599a53 5235 fill_result_tf(qc);
f686bcb8
TH
5236 ata_qc_schedule_eh(qc);
5237 return;
5238 }
5239 }
5240
5241 /* read result TF if requested */
5242 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5243 fill_result_tf(qc);
f686bcb8
TH
5244
5245 __ata_qc_complete(qc);
5246 } else {
5247 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5248 return;
5249
5250 /* read result TF if failed or requested */
5251 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5252 fill_result_tf(qc);
f686bcb8
TH
5253
5254 __ata_qc_complete(qc);
5255 }
5256}
5257
dedaf2b0
TH
5258/**
5259 * ata_qc_complete_multiple - Complete multiple qcs successfully
5260 * @ap: port in question
5261 * @qc_active: new qc_active mask
5262 * @finish_qc: LLDD callback invoked before completing a qc
5263 *
5264 * Complete in-flight commands. This functions is meant to be
5265 * called from low-level driver's interrupt routine to complete
5266 * requests normally. ap->qc_active and @qc_active is compared
5267 * and commands are completed accordingly.
5268 *
5269 * LOCKING:
cca3974e 5270 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5271 *
5272 * RETURNS:
5273 * Number of completed commands on success, -errno otherwise.
5274 */
5275int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5276 void (*finish_qc)(struct ata_queued_cmd *))
5277{
5278 int nr_done = 0;
5279 u32 done_mask;
5280 int i;
5281
5282 done_mask = ap->qc_active ^ qc_active;
5283
5284 if (unlikely(done_mask & qc_active)) {
5285 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5286 "(%08x->%08x)\n", ap->qc_active, qc_active);
5287 return -EINVAL;
5288 }
5289
5290 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5291 struct ata_queued_cmd *qc;
5292
5293 if (!(done_mask & (1 << i)))
5294 continue;
5295
5296 if ((qc = ata_qc_from_tag(ap, i))) {
5297 if (finish_qc)
5298 finish_qc(qc);
5299 ata_qc_complete(qc);
5300 nr_done++;
5301 }
5302 }
5303
5304 return nr_done;
5305}
5306
1da177e4
LT
5307static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5308{
5309 struct ata_port *ap = qc->ap;
5310
5311 switch (qc->tf.protocol) {
3dc1d881 5312 case ATA_PROT_NCQ:
1da177e4
LT
5313 case ATA_PROT_DMA:
5314 case ATA_PROT_ATAPI_DMA:
5315 return 1;
5316
5317 case ATA_PROT_ATAPI:
5318 case ATA_PROT_PIO:
1da177e4
LT
5319 if (ap->flags & ATA_FLAG_PIO_DMA)
5320 return 1;
5321
5322 /* fall through */
5323
5324 default:
5325 return 0;
5326 }
5327
5328 /* never reached */
5329}
5330
5331/**
5332 * ata_qc_issue - issue taskfile to device
5333 * @qc: command to issue to device
5334 *
5335 * Prepare an ATA command to submission to device.
5336 * This includes mapping the data into a DMA-able
5337 * area, filling in the S/G table, and finally
5338 * writing the taskfile to hardware, starting the command.
5339 *
5340 * LOCKING:
cca3974e 5341 * spin_lock_irqsave(host lock)
1da177e4 5342 */
8e0e694a 5343void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5344{
5345 struct ata_port *ap = qc->ap;
5346
dedaf2b0
TH
5347 /* Make sure only one non-NCQ command is outstanding. The
5348 * check is skipped for old EH because it reuses active qc to
5349 * request ATAPI sense.
5350 */
5351 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5352
5353 if (qc->tf.protocol == ATA_PROT_NCQ) {
5354 WARN_ON(ap->sactive & (1 << qc->tag));
5355 ap->sactive |= 1 << qc->tag;
5356 } else {
5357 WARN_ON(ap->sactive);
5358 ap->active_tag = qc->tag;
5359 }
5360
e4a70e76 5361 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5362 ap->qc_active |= 1 << qc->tag;
e4a70e76 5363
1da177e4
LT
5364 if (ata_should_dma_map(qc)) {
5365 if (qc->flags & ATA_QCFLAG_SG) {
5366 if (ata_sg_setup(qc))
8e436af9 5367 goto sg_err;
1da177e4
LT
5368 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5369 if (ata_sg_setup_one(qc))
8e436af9 5370 goto sg_err;
1da177e4
LT
5371 }
5372 } else {
5373 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5374 }
5375
5376 ap->ops->qc_prep(qc);
5377
8e0e694a
TH
5378 qc->err_mask |= ap->ops->qc_issue(qc);
5379 if (unlikely(qc->err_mask))
5380 goto err;
5381 return;
1da177e4 5382
8e436af9
TH
5383sg_err:
5384 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5385 qc->err_mask |= AC_ERR_SYSTEM;
5386err:
5387 ata_qc_complete(qc);
1da177e4
LT
5388}
5389
5390/**
5391 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5392 * @qc: command to issue to device
5393 *
5394 * Using various libata functions and hooks, this function
5395 * starts an ATA command. ATA commands are grouped into
5396 * classes called "protocols", and issuing each type of protocol
5397 * is slightly different.
5398 *
0baab86b
EF
5399 * May be used as the qc_issue() entry in ata_port_operations.
5400 *
1da177e4 5401 * LOCKING:
cca3974e 5402 * spin_lock_irqsave(host lock)
1da177e4
LT
5403 *
5404 * RETURNS:
9a3d9eb0 5405 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5406 */
5407
9a3d9eb0 5408unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5409{
5410 struct ata_port *ap = qc->ap;
5411
e50362ec
AL
5412 /* Use polling pio if the LLD doesn't handle
5413 * interrupt driven pio and atapi CDB interrupt.
5414 */
5415 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5416 switch (qc->tf.protocol) {
5417 case ATA_PROT_PIO:
e3472cbe 5418 case ATA_PROT_NODATA:
e50362ec
AL
5419 case ATA_PROT_ATAPI:
5420 case ATA_PROT_ATAPI_NODATA:
5421 qc->tf.flags |= ATA_TFLAG_POLLING;
5422 break;
5423 case ATA_PROT_ATAPI_DMA:
5424 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5425 /* see ata_dma_blacklisted() */
e50362ec
AL
5426 BUG();
5427 break;
5428 default:
5429 break;
5430 }
5431 }
5432
312f7da2 5433 /* select the device */
1da177e4
LT
5434 ata_dev_select(ap, qc->dev->devno, 1, 0);
5435
312f7da2 5436 /* start the command */
1da177e4
LT
5437 switch (qc->tf.protocol) {
5438 case ATA_PROT_NODATA:
312f7da2
AL
5439 if (qc->tf.flags & ATA_TFLAG_POLLING)
5440 ata_qc_set_polling(qc);
5441
e5338254 5442 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5443 ap->hsm_task_state = HSM_ST_LAST;
5444
5445 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5446 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5447
1da177e4
LT
5448 break;
5449
5450 case ATA_PROT_DMA:
587005de 5451 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5452
1da177e4
LT
5453 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5454 ap->ops->bmdma_setup(qc); /* set up bmdma */
5455 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5456 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5457 break;
5458
312f7da2
AL
5459 case ATA_PROT_PIO:
5460 if (qc->tf.flags & ATA_TFLAG_POLLING)
5461 ata_qc_set_polling(qc);
1da177e4 5462
e5338254 5463 ata_tf_to_host(ap, &qc->tf);
312f7da2 5464
54f00389
AL
5465 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5466 /* PIO data out protocol */
5467 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5468 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5469
5470 /* always send first data block using
e27486db 5471 * the ata_pio_task() codepath.
54f00389 5472 */
312f7da2 5473 } else {
54f00389
AL
5474 /* PIO data in protocol */
5475 ap->hsm_task_state = HSM_ST;
5476
5477 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5478 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5479
5480 /* if polling, ata_pio_task() handles the rest.
5481 * otherwise, interrupt handler takes over from here.
5482 */
312f7da2
AL
5483 }
5484
1da177e4
LT
5485 break;
5486
1da177e4 5487 case ATA_PROT_ATAPI:
1da177e4 5488 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5489 if (qc->tf.flags & ATA_TFLAG_POLLING)
5490 ata_qc_set_polling(qc);
5491
e5338254 5492 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5493
312f7da2
AL
5494 ap->hsm_task_state = HSM_ST_FIRST;
5495
5496 /* send cdb by polling if no cdb interrupt */
5497 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5498 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5499 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5500 break;
5501
5502 case ATA_PROT_ATAPI_DMA:
587005de 5503 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5504
1da177e4
LT
5505 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5506 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5507 ap->hsm_task_state = HSM_ST_FIRST;
5508
5509 /* send cdb by polling if no cdb interrupt */
5510 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5511 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5512 break;
5513
5514 default:
5515 WARN_ON(1);
9a3d9eb0 5516 return AC_ERR_SYSTEM;
1da177e4
LT
5517 }
5518
5519 return 0;
5520}
5521
1da177e4
LT
5522/**
5523 * ata_host_intr - Handle host interrupt for given (port, task)
5524 * @ap: Port on which interrupt arrived (possibly...)
5525 * @qc: Taskfile currently active in engine
5526 *
5527 * Handle host interrupt for given queued command. Currently,
5528 * only DMA interrupts are handled. All other commands are
5529 * handled via polling with interrupts disabled (nIEN bit).
5530 *
5531 * LOCKING:
cca3974e 5532 * spin_lock_irqsave(host lock)
1da177e4
LT
5533 *
5534 * RETURNS:
5535 * One if interrupt was handled, zero if not (shared irq).
5536 */
5537
5538inline unsigned int ata_host_intr (struct ata_port *ap,
5539 struct ata_queued_cmd *qc)
5540{
ea54763f 5541 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5542 u8 status, host_stat = 0;
1da177e4 5543
312f7da2 5544 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5545 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5546
312f7da2
AL
5547 /* Check whether we are expecting interrupt in this state */
5548 switch (ap->hsm_task_state) {
5549 case HSM_ST_FIRST:
6912ccd5
AL
5550 /* Some pre-ATAPI-4 devices assert INTRQ
5551 * at this state when ready to receive CDB.
5552 */
1da177e4 5553
312f7da2
AL
5554 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5555 * The flag was turned on only for atapi devices.
5556 * No need to check is_atapi_taskfile(&qc->tf) again.
5557 */
5558 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5559 goto idle_irq;
1da177e4 5560 break;
312f7da2
AL
5561 case HSM_ST_LAST:
5562 if (qc->tf.protocol == ATA_PROT_DMA ||
5563 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5564 /* check status of DMA engine */
5565 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5566 VPRINTK("ata%u: host_stat 0x%X\n",
5567 ap->print_id, host_stat);
312f7da2
AL
5568
5569 /* if it's not our irq... */
5570 if (!(host_stat & ATA_DMA_INTR))
5571 goto idle_irq;
5572
5573 /* before we do anything else, clear DMA-Start bit */
5574 ap->ops->bmdma_stop(qc);
a4f16610
AL
5575
5576 if (unlikely(host_stat & ATA_DMA_ERR)) {
5577 /* error when transfering data to/from memory */
5578 qc->err_mask |= AC_ERR_HOST_BUS;
5579 ap->hsm_task_state = HSM_ST_ERR;
5580 }
312f7da2
AL
5581 }
5582 break;
5583 case HSM_ST:
5584 break;
1da177e4
LT
5585 default:
5586 goto idle_irq;
5587 }
5588
312f7da2
AL
5589 /* check altstatus */
5590 status = ata_altstatus(ap);
5591 if (status & ATA_BUSY)
5592 goto idle_irq;
1da177e4 5593
312f7da2
AL
5594 /* check main status, clearing INTRQ */
5595 status = ata_chk_status(ap);
5596 if (unlikely(status & ATA_BUSY))
5597 goto idle_irq;
1da177e4 5598
312f7da2
AL
5599 /* ack bmdma irq events */
5600 ap->ops->irq_clear(ap);
1da177e4 5601
bb5cb290 5602 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5603
5604 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5605 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5606 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5607
1da177e4
LT
5608 return 1; /* irq handled */
5609
5610idle_irq:
5611 ap->stats.idle_irq++;
5612
5613#ifdef ATA_IRQ_TRAP
5614 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5615 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5616 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5617 return 1;
1da177e4
LT
5618 }
5619#endif
5620 return 0; /* irq not handled */
5621}
5622
5623/**
5624 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5625 * @irq: irq line (unused)
cca3974e 5626 * @dev_instance: pointer to our ata_host information structure
1da177e4 5627 *
0cba632b
JG
5628 * Default interrupt handler for PCI IDE devices. Calls
5629 * ata_host_intr() for each port that is not disabled.
5630 *
1da177e4 5631 * LOCKING:
cca3974e 5632 * Obtains host lock during operation.
1da177e4
LT
5633 *
5634 * RETURNS:
0cba632b 5635 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5636 */
5637
7d12e780 5638irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5639{
cca3974e 5640 struct ata_host *host = dev_instance;
1da177e4
LT
5641 unsigned int i;
5642 unsigned int handled = 0;
5643 unsigned long flags;
5644
5645 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5646 spin_lock_irqsave(&host->lock, flags);
1da177e4 5647
cca3974e 5648 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5649 struct ata_port *ap;
5650
cca3974e 5651 ap = host->ports[i];
c1389503 5652 if (ap &&
029f5468 5653 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5654 struct ata_queued_cmd *qc;
5655
5656 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5657 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5658 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5659 handled |= ata_host_intr(ap, qc);
5660 }
5661 }
5662
cca3974e 5663 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5664
5665 return IRQ_RETVAL(handled);
5666}
5667
34bf2170
TH
5668/**
5669 * sata_scr_valid - test whether SCRs are accessible
5670 * @ap: ATA port to test SCR accessibility for
5671 *
5672 * Test whether SCRs are accessible for @ap.
5673 *
5674 * LOCKING:
5675 * None.
5676 *
5677 * RETURNS:
5678 * 1 if SCRs are accessible, 0 otherwise.
5679 */
5680int sata_scr_valid(struct ata_port *ap)
5681{
5682 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5683}
5684
5685/**
5686 * sata_scr_read - read SCR register of the specified port
5687 * @ap: ATA port to read SCR for
5688 * @reg: SCR to read
5689 * @val: Place to store read value
5690 *
5691 * Read SCR register @reg of @ap into *@val. This function is
5692 * guaranteed to succeed if the cable type of the port is SATA
5693 * and the port implements ->scr_read.
5694 *
5695 * LOCKING:
5696 * None.
5697 *
5698 * RETURNS:
5699 * 0 on success, negative errno on failure.
5700 */
5701int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5702{
5703 if (sata_scr_valid(ap)) {
5704 *val = ap->ops->scr_read(ap, reg);
5705 return 0;
5706 }
5707 return -EOPNOTSUPP;
5708}
5709
5710/**
5711 * sata_scr_write - write SCR register of the specified port
5712 * @ap: ATA port to write SCR for
5713 * @reg: SCR to write
5714 * @val: value to write
5715 *
5716 * Write @val to SCR register @reg of @ap. This function is
5717 * guaranteed to succeed if the cable type of the port is SATA
5718 * and the port implements ->scr_read.
5719 *
5720 * LOCKING:
5721 * None.
5722 *
5723 * RETURNS:
5724 * 0 on success, negative errno on failure.
5725 */
5726int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5727{
5728 if (sata_scr_valid(ap)) {
5729 ap->ops->scr_write(ap, reg, val);
5730 return 0;
5731 }
5732 return -EOPNOTSUPP;
5733}
5734
5735/**
5736 * sata_scr_write_flush - write SCR register of the specified port and flush
5737 * @ap: ATA port to write SCR for
5738 * @reg: SCR to write
5739 * @val: value to write
5740 *
5741 * This function is identical to sata_scr_write() except that this
5742 * function performs flush after writing to the register.
5743 *
5744 * LOCKING:
5745 * None.
5746 *
5747 * RETURNS:
5748 * 0 on success, negative errno on failure.
5749 */
5750int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5751{
5752 if (sata_scr_valid(ap)) {
5753 ap->ops->scr_write(ap, reg, val);
5754 ap->ops->scr_read(ap, reg);
5755 return 0;
5756 }
5757 return -EOPNOTSUPP;
5758}
5759
5760/**
5761 * ata_port_online - test whether the given port is online
5762 * @ap: ATA port to test
5763 *
5764 * Test whether @ap is online. Note that this function returns 0
5765 * if online status of @ap cannot be obtained, so
5766 * ata_port_online(ap) != !ata_port_offline(ap).
5767 *
5768 * LOCKING:
5769 * None.
5770 *
5771 * RETURNS:
5772 * 1 if the port online status is available and online.
5773 */
5774int ata_port_online(struct ata_port *ap)
5775{
5776 u32 sstatus;
5777
5778 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5779 return 1;
5780 return 0;
5781}
5782
5783/**
5784 * ata_port_offline - test whether the given port is offline
5785 * @ap: ATA port to test
5786 *
5787 * Test whether @ap is offline. Note that this function returns
5788 * 0 if offline status of @ap cannot be obtained, so
5789 * ata_port_online(ap) != !ata_port_offline(ap).
5790 *
5791 * LOCKING:
5792 * None.
5793 *
5794 * RETURNS:
5795 * 1 if the port offline status is available and offline.
5796 */
5797int ata_port_offline(struct ata_port *ap)
5798{
5799 u32 sstatus;
5800
5801 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5802 return 1;
5803 return 0;
5804}
0baab86b 5805
77b08fb5 5806int ata_flush_cache(struct ata_device *dev)
9b847548 5807{
977e6b9f 5808 unsigned int err_mask;
9b847548
JA
5809 u8 cmd;
5810
5811 if (!ata_try_flush_cache(dev))
5812 return 0;
5813
6fc49adb 5814 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5815 cmd = ATA_CMD_FLUSH_EXT;
5816 else
5817 cmd = ATA_CMD_FLUSH;
5818
977e6b9f
TH
5819 err_mask = ata_do_simple_cmd(dev, cmd);
5820 if (err_mask) {
5821 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5822 return -EIO;
5823 }
5824
5825 return 0;
9b847548
JA
5826}
5827
6ffa01d8 5828#ifdef CONFIG_PM
cca3974e
JG
5829static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5830 unsigned int action, unsigned int ehi_flags,
5831 int wait)
500530f6
TH
5832{
5833 unsigned long flags;
5834 int i, rc;
5835
cca3974e
JG
5836 for (i = 0; i < host->n_ports; i++) {
5837 struct ata_port *ap = host->ports[i];
500530f6
TH
5838
5839 /* Previous resume operation might still be in
5840 * progress. Wait for PM_PENDING to clear.
5841 */
5842 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5843 ata_port_wait_eh(ap);
5844 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5845 }
5846
5847 /* request PM ops to EH */
5848 spin_lock_irqsave(ap->lock, flags);
5849
5850 ap->pm_mesg = mesg;
5851 if (wait) {
5852 rc = 0;
5853 ap->pm_result = &rc;
5854 }
5855
5856 ap->pflags |= ATA_PFLAG_PM_PENDING;
5857 ap->eh_info.action |= action;
5858 ap->eh_info.flags |= ehi_flags;
5859
5860 ata_port_schedule_eh(ap);
5861
5862 spin_unlock_irqrestore(ap->lock, flags);
5863
5864 /* wait and check result */
5865 if (wait) {
5866 ata_port_wait_eh(ap);
5867 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5868 if (rc)
5869 return rc;
5870 }
5871 }
5872
5873 return 0;
5874}
5875
5876/**
cca3974e
JG
5877 * ata_host_suspend - suspend host
5878 * @host: host to suspend
500530f6
TH
5879 * @mesg: PM message
5880 *
cca3974e 5881 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5882 * function requests EH to perform PM operations and waits for EH
5883 * to finish.
5884 *
5885 * LOCKING:
5886 * Kernel thread context (may sleep).
5887 *
5888 * RETURNS:
5889 * 0 on success, -errno on failure.
5890 */
cca3974e 5891int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5892{
9666f400 5893 int rc;
500530f6 5894
cca3974e 5895 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
5896 if (rc == 0)
5897 host->dev->power.power_state = mesg;
500530f6
TH
5898 return rc;
5899}
5900
5901/**
cca3974e
JG
5902 * ata_host_resume - resume host
5903 * @host: host to resume
500530f6 5904 *
cca3974e 5905 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5906 * function requests EH to perform PM operations and returns.
5907 * Note that all resume operations are performed parallely.
5908 *
5909 * LOCKING:
5910 * Kernel thread context (may sleep).
5911 */
cca3974e 5912void ata_host_resume(struct ata_host *host)
500530f6 5913{
cca3974e
JG
5914 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5915 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5916 host->dev->power.power_state = PMSG_ON;
500530f6 5917}
6ffa01d8 5918#endif
500530f6 5919
c893a3ae
RD
5920/**
5921 * ata_port_start - Set port up for dma.
5922 * @ap: Port to initialize
5923 *
5924 * Called just after data structures for each port are
5925 * initialized. Allocates space for PRD table.
5926 *
5927 * May be used as the port_start() entry in ata_port_operations.
5928 *
5929 * LOCKING:
5930 * Inherited from caller.
5931 */
f0d36efd 5932int ata_port_start(struct ata_port *ap)
1da177e4 5933{
2f1f610b 5934 struct device *dev = ap->dev;
6037d6bb 5935 int rc;
1da177e4 5936
f0d36efd
TH
5937 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5938 GFP_KERNEL);
1da177e4
LT
5939 if (!ap->prd)
5940 return -ENOMEM;
5941
6037d6bb 5942 rc = ata_pad_alloc(ap, dev);
f0d36efd 5943 if (rc)
6037d6bb 5944 return rc;
1da177e4 5945
f0d36efd
TH
5946 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5947 (unsigned long long)ap->prd_dma);
1da177e4
LT
5948 return 0;
5949}
5950
3ef3b43d
TH
5951/**
5952 * ata_dev_init - Initialize an ata_device structure
5953 * @dev: Device structure to initialize
5954 *
5955 * Initialize @dev in preparation for probing.
5956 *
5957 * LOCKING:
5958 * Inherited from caller.
5959 */
5960void ata_dev_init(struct ata_device *dev)
5961{
5962 struct ata_port *ap = dev->ap;
72fa4b74
TH
5963 unsigned long flags;
5964
5a04bf4b
TH
5965 /* SATA spd limit is bound to the first device */
5966 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5967
72fa4b74
TH
5968 /* High bits of dev->flags are used to record warm plug
5969 * requests which occur asynchronously. Synchronize using
cca3974e 5970 * host lock.
72fa4b74 5971 */
ba6a1308 5972 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5973 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5974 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5975
72fa4b74
TH
5976 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5977 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5978 dev->pio_mask = UINT_MAX;
5979 dev->mwdma_mask = UINT_MAX;
5980 dev->udma_mask = UINT_MAX;
5981}
5982
1da177e4 5983/**
f3187195
TH
5984 * ata_port_alloc - allocate and initialize basic ATA port resources
5985 * @host: ATA host this allocated port belongs to
1da177e4 5986 *
f3187195
TH
5987 * Allocate and initialize basic ATA port resources.
5988 *
5989 * RETURNS:
5990 * Allocate ATA port on success, NULL on failure.
0cba632b 5991 *
1da177e4 5992 * LOCKING:
f3187195 5993 * Inherited from calling layer (may sleep).
1da177e4 5994 */
f3187195 5995struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5996{
f3187195 5997 struct ata_port *ap;
1da177e4
LT
5998 unsigned int i;
5999
f3187195
TH
6000 DPRINTK("ENTER\n");
6001
6002 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6003 if (!ap)
6004 return NULL;
6005
f4d6d004 6006 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6007 ap->lock = &host->lock;
198e0fed 6008 ap->flags = ATA_FLAG_DISABLED;
f3187195 6009 ap->print_id = -1;
1da177e4 6010 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6011 ap->host = host;
f3187195
TH
6012 ap->dev = host->dev;
6013
5a04bf4b 6014 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
6015 ap->active_tag = ATA_TAG_POISON;
6016 ap->last_ctl = 0xFF;
bd5d825c
BP
6017
6018#if defined(ATA_VERBOSE_DEBUG)
6019 /* turn on all debugging levels */
6020 ap->msg_enable = 0x00FF;
6021#elif defined(ATA_DEBUG)
6022 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6023#else
0dd4b21f 6024 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6025#endif
1da177e4 6026
65f27f38
DH
6027 INIT_DELAYED_WORK(&ap->port_task, NULL);
6028 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6029 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6030 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6031 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 6032
838df628 6033 ap->cbl = ATA_CBL_NONE;
838df628 6034
acf356b1
TH
6035 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6036 struct ata_device *dev = &ap->device[i];
38d87234 6037 dev->ap = ap;
72fa4b74 6038 dev->devno = i;
3ef3b43d 6039 ata_dev_init(dev);
acf356b1 6040 }
1da177e4
LT
6041
6042#ifdef ATA_IRQ_TRAP
6043 ap->stats.unhandled_irq = 1;
6044 ap->stats.idle_irq = 1;
6045#endif
1da177e4 6046 return ap;
1da177e4
LT
6047}
6048
f0d36efd
TH
6049static void ata_host_release(struct device *gendev, void *res)
6050{
6051 struct ata_host *host = dev_get_drvdata(gendev);
6052 int i;
6053
6054 for (i = 0; i < host->n_ports; i++) {
6055 struct ata_port *ap = host->ports[i];
6056
ecef7253
TH
6057 if (!ap)
6058 continue;
6059
6060 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6061 ap->ops->port_stop(ap);
f0d36efd
TH
6062 }
6063
ecef7253 6064 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6065 host->ops->host_stop(host);
1aa56cca 6066
1aa506e4
TH
6067 for (i = 0; i < host->n_ports; i++) {
6068 struct ata_port *ap = host->ports[i];
6069
4911487a
TH
6070 if (!ap)
6071 continue;
6072
6073 if (ap->scsi_host)
1aa506e4
TH
6074 scsi_host_put(ap->scsi_host);
6075
4911487a 6076 kfree(ap);
1aa506e4
TH
6077 host->ports[i] = NULL;
6078 }
6079
1aa56cca 6080 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6081}
6082
f3187195
TH
6083/**
6084 * ata_host_alloc - allocate and init basic ATA host resources
6085 * @dev: generic device this host is associated with
6086 * @max_ports: maximum number of ATA ports associated with this host
6087 *
6088 * Allocate and initialize basic ATA host resources. LLD calls
6089 * this function to allocate a host, initializes it fully and
6090 * attaches it using ata_host_register().
6091 *
6092 * @max_ports ports are allocated and host->n_ports is
6093 * initialized to @max_ports. The caller is allowed to decrease
6094 * host->n_ports before calling ata_host_register(). The unused
6095 * ports will be automatically freed on registration.
6096 *
6097 * RETURNS:
6098 * Allocate ATA host on success, NULL on failure.
6099 *
6100 * LOCKING:
6101 * Inherited from calling layer (may sleep).
6102 */
6103struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6104{
6105 struct ata_host *host;
6106 size_t sz;
6107 int i;
6108
6109 DPRINTK("ENTER\n");
6110
6111 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6112 return NULL;
6113
6114 /* alloc a container for our list of ATA ports (buses) */
6115 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6116 /* alloc a container for our list of ATA ports (buses) */
6117 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6118 if (!host)
6119 goto err_out;
6120
6121 devres_add(dev, host);
6122 dev_set_drvdata(dev, host);
6123
6124 spin_lock_init(&host->lock);
6125 host->dev = dev;
6126 host->n_ports = max_ports;
6127
6128 /* allocate ports bound to this host */
6129 for (i = 0; i < max_ports; i++) {
6130 struct ata_port *ap;
6131
6132 ap = ata_port_alloc(host);
6133 if (!ap)
6134 goto err_out;
6135
6136 ap->port_no = i;
6137 host->ports[i] = ap;
6138 }
6139
6140 devres_remove_group(dev, NULL);
6141 return host;
6142
6143 err_out:
6144 devres_release_group(dev, NULL);
6145 return NULL;
6146}
6147
f5cda257
TH
6148/**
6149 * ata_host_alloc_pinfo - alloc host and init with port_info array
6150 * @dev: generic device this host is associated with
6151 * @ppi: array of ATA port_info to initialize host with
6152 * @n_ports: number of ATA ports attached to this host
6153 *
6154 * Allocate ATA host and initialize with info from @ppi. If NULL
6155 * terminated, @ppi may contain fewer entries than @n_ports. The
6156 * last entry will be used for the remaining ports.
6157 *
6158 * RETURNS:
6159 * Allocate ATA host on success, NULL on failure.
6160 *
6161 * LOCKING:
6162 * Inherited from calling layer (may sleep).
6163 */
6164struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6165 const struct ata_port_info * const * ppi,
6166 int n_ports)
6167{
6168 const struct ata_port_info *pi;
6169 struct ata_host *host;
6170 int i, j;
6171
6172 host = ata_host_alloc(dev, n_ports);
6173 if (!host)
6174 return NULL;
6175
6176 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6177 struct ata_port *ap = host->ports[i];
6178
6179 if (ppi[j])
6180 pi = ppi[j++];
6181
6182 ap->pio_mask = pi->pio_mask;
6183 ap->mwdma_mask = pi->mwdma_mask;
6184 ap->udma_mask = pi->udma_mask;
6185 ap->flags |= pi->flags;
6186 ap->ops = pi->port_ops;
6187
6188 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6189 host->ops = pi->port_ops;
6190 if (!host->private_data && pi->private_data)
6191 host->private_data = pi->private_data;
6192 }
6193
6194 return host;
6195}
6196
ecef7253
TH
6197/**
6198 * ata_host_start - start and freeze ports of an ATA host
6199 * @host: ATA host to start ports for
6200 *
6201 * Start and then freeze ports of @host. Started status is
6202 * recorded in host->flags, so this function can be called
6203 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6204 * once. If host->ops isn't initialized yet, its set to the
6205 * first non-dummy port ops.
ecef7253
TH
6206 *
6207 * LOCKING:
6208 * Inherited from calling layer (may sleep).
6209 *
6210 * RETURNS:
6211 * 0 if all ports are started successfully, -errno otherwise.
6212 */
6213int ata_host_start(struct ata_host *host)
6214{
6215 int i, rc;
6216
6217 if (host->flags & ATA_HOST_STARTED)
6218 return 0;
6219
6220 for (i = 0; i < host->n_ports; i++) {
6221 struct ata_port *ap = host->ports[i];
6222
f3187195
TH
6223 if (!host->ops && !ata_port_is_dummy(ap))
6224 host->ops = ap->ops;
6225
ecef7253
TH
6226 if (ap->ops->port_start) {
6227 rc = ap->ops->port_start(ap);
6228 if (rc) {
6229 ata_port_printk(ap, KERN_ERR, "failed to "
6230 "start port (errno=%d)\n", rc);
6231 goto err_out;
6232 }
6233 }
6234
6235 ata_eh_freeze_port(ap);
6236 }
6237
6238 host->flags |= ATA_HOST_STARTED;
6239 return 0;
6240
6241 err_out:
6242 while (--i >= 0) {
6243 struct ata_port *ap = host->ports[i];
6244
6245 if (ap->ops->port_stop)
6246 ap->ops->port_stop(ap);
6247 }
6248 return rc;
6249}
6250
b03732f0 6251/**
cca3974e
JG
6252 * ata_sas_host_init - Initialize a host struct
6253 * @host: host to initialize
6254 * @dev: device host is attached to
6255 * @flags: host flags
6256 * @ops: port_ops
b03732f0
BK
6257 *
6258 * LOCKING:
6259 * PCI/etc. bus probe sem.
6260 *
6261 */
f3187195 6262/* KILLME - the only user left is ipr */
cca3974e
JG
6263void ata_host_init(struct ata_host *host, struct device *dev,
6264 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6265{
cca3974e
JG
6266 spin_lock_init(&host->lock);
6267 host->dev = dev;
6268 host->flags = flags;
6269 host->ops = ops;
b03732f0
BK
6270}
6271
f3187195
TH
6272/**
6273 * ata_host_register - register initialized ATA host
6274 * @host: ATA host to register
6275 * @sht: template for SCSI host
6276 *
6277 * Register initialized ATA host. @host is allocated using
6278 * ata_host_alloc() and fully initialized by LLD. This function
6279 * starts ports, registers @host with ATA and SCSI layers and
6280 * probe registered devices.
6281 *
6282 * LOCKING:
6283 * Inherited from calling layer (may sleep).
6284 *
6285 * RETURNS:
6286 * 0 on success, -errno otherwise.
6287 */
6288int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6289{
6290 int i, rc;
6291
6292 /* host must have been started */
6293 if (!(host->flags & ATA_HOST_STARTED)) {
6294 dev_printk(KERN_ERR, host->dev,
6295 "BUG: trying to register unstarted host\n");
6296 WARN_ON(1);
6297 return -EINVAL;
6298 }
6299
6300 /* Blow away unused ports. This happens when LLD can't
6301 * determine the exact number of ports to allocate at
6302 * allocation time.
6303 */
6304 for (i = host->n_ports; host->ports[i]; i++)
6305 kfree(host->ports[i]);
6306
6307 /* give ports names and add SCSI hosts */
6308 for (i = 0; i < host->n_ports; i++)
6309 host->ports[i]->print_id = ata_print_id++;
6310
6311 rc = ata_scsi_add_hosts(host, sht);
6312 if (rc)
6313 return rc;
6314
6315 /* set cable, sata_spd_limit and report */
6316 for (i = 0; i < host->n_ports; i++) {
6317 struct ata_port *ap = host->ports[i];
6318 int irq_line;
6319 u32 scontrol;
6320 unsigned long xfer_mask;
6321
6322 /* set SATA cable type if still unset */
6323 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6324 ap->cbl = ATA_CBL_SATA;
6325
6326 /* init sata_spd_limit to the current value */
6327 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6328 int spd = (scontrol >> 4) & 0xf;
afe3cc51
TH
6329 if (spd)
6330 ap->hw_sata_spd_limit &= (1 << spd) - 1;
f3187195
TH
6331 }
6332 ap->sata_spd_limit = ap->hw_sata_spd_limit;
6333
6334 /* report the secondary IRQ for second channel legacy */
6335 irq_line = host->irq;
6336 if (i == 1 && host->irq2)
6337 irq_line = host->irq2;
6338
6339 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6340 ap->udma_mask);
6341
6342 /* print per-port info to dmesg */
6343 if (!ata_port_is_dummy(ap))
6344 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6345 "ctl 0x%p bmdma 0x%p irq %d\n",
6346 ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
6347 ata_mode_string(xfer_mask),
6348 ap->ioaddr.cmd_addr,
6349 ap->ioaddr.ctl_addr,
6350 ap->ioaddr.bmdma_addr,
6351 irq_line);
6352 else
6353 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6354 }
6355
6356 /* perform each probe synchronously */
6357 DPRINTK("probe begin\n");
6358 for (i = 0; i < host->n_ports; i++) {
6359 struct ata_port *ap = host->ports[i];
6360 int rc;
6361
6362 /* probe */
6363 if (ap->ops->error_handler) {
6364 struct ata_eh_info *ehi = &ap->eh_info;
6365 unsigned long flags;
6366
6367 ata_port_probe(ap);
6368
6369 /* kick EH for boot probing */
6370 spin_lock_irqsave(ap->lock, flags);
6371
6372 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6373 ehi->action |= ATA_EH_SOFTRESET;
6374 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6375
f4d6d004 6376 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6377 ap->pflags |= ATA_PFLAG_LOADING;
6378 ata_port_schedule_eh(ap);
6379
6380 spin_unlock_irqrestore(ap->lock, flags);
6381
6382 /* wait for EH to finish */
6383 ata_port_wait_eh(ap);
6384 } else {
6385 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6386 rc = ata_bus_probe(ap);
6387 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6388
6389 if (rc) {
6390 /* FIXME: do something useful here?
6391 * Current libata behavior will
6392 * tear down everything when
6393 * the module is removed
6394 * or the h/w is unplugged.
6395 */
6396 }
6397 }
6398 }
6399
6400 /* probes are done, now scan each port's disk(s) */
6401 DPRINTK("host probe begin\n");
6402 for (i = 0; i < host->n_ports; i++) {
6403 struct ata_port *ap = host->ports[i];
6404
6405 ata_scsi_scan_host(ap);
6406 }
6407
6408 return 0;
6409}
6410
f5cda257
TH
6411/**
6412 * ata_host_activate - start host, request IRQ and register it
6413 * @host: target ATA host
6414 * @irq: IRQ to request
6415 * @irq_handler: irq_handler used when requesting IRQ
6416 * @irq_flags: irq_flags used when requesting IRQ
6417 * @sht: scsi_host_template to use when registering the host
6418 *
6419 * After allocating an ATA host and initializing it, most libata
6420 * LLDs perform three steps to activate the host - start host,
6421 * request IRQ and register it. This helper takes necessasry
6422 * arguments and performs the three steps in one go.
6423 *
6424 * LOCKING:
6425 * Inherited from calling layer (may sleep).
6426 *
6427 * RETURNS:
6428 * 0 on success, -errno otherwise.
6429 */
6430int ata_host_activate(struct ata_host *host, int irq,
6431 irq_handler_t irq_handler, unsigned long irq_flags,
6432 struct scsi_host_template *sht)
6433{
6434 int rc;
6435
6436 rc = ata_host_start(host);
6437 if (rc)
6438 return rc;
6439
6440 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6441 dev_driver_string(host->dev), host);
6442 if (rc)
6443 return rc;
6444
6445 rc = ata_host_register(host, sht);
6446 /* if failed, just free the IRQ and leave ports alone */
6447 if (rc)
6448 devm_free_irq(host->dev, irq, host);
6449
22888423
OJ
6450 /* Used to print device info at probe */
6451 host->irq = irq;
6452
f5cda257
TH
6453 return rc;
6454}
6455
720ba126
TH
6456/**
6457 * ata_port_detach - Detach ATA port in prepration of device removal
6458 * @ap: ATA port to be detached
6459 *
6460 * Detach all ATA devices and the associated SCSI devices of @ap;
6461 * then, remove the associated SCSI host. @ap is guaranteed to
6462 * be quiescent on return from this function.
6463 *
6464 * LOCKING:
6465 * Kernel thread context (may sleep).
6466 */
6467void ata_port_detach(struct ata_port *ap)
6468{
6469 unsigned long flags;
6470 int i;
6471
6472 if (!ap->ops->error_handler)
c3cf30a9 6473 goto skip_eh;
720ba126
TH
6474
6475 /* tell EH we're leaving & flush EH */
ba6a1308 6476 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6477 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6478 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6479
6480 ata_port_wait_eh(ap);
6481
6482 /* EH is now guaranteed to see UNLOADING, so no new device
6483 * will be attached. Disable all existing devices.
6484 */
ba6a1308 6485 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
6486
6487 for (i = 0; i < ATA_MAX_DEVICES; i++)
6488 ata_dev_disable(&ap->device[i]);
6489
ba6a1308 6490 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6491
6492 /* Final freeze & EH. All in-flight commands are aborted. EH
6493 * will be skipped and retrials will be terminated with bad
6494 * target.
6495 */
ba6a1308 6496 spin_lock_irqsave(ap->lock, flags);
720ba126 6497 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6498 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6499
6500 ata_port_wait_eh(ap);
6501
6502 /* Flush hotplug task. The sequence is similar to
6503 * ata_port_flush_task().
6504 */
28e53bdd 6505 cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
720ba126 6506 cancel_delayed_work(&ap->hotplug_task);
28e53bdd 6507 cancel_work_sync(&ap->hotplug_task.work);
720ba126 6508
c3cf30a9 6509 skip_eh:
720ba126 6510 /* remove the associated SCSI host */
cca3974e 6511 scsi_remove_host(ap->scsi_host);
720ba126
TH
6512}
6513
0529c159
TH
6514/**
6515 * ata_host_detach - Detach all ports of an ATA host
6516 * @host: Host to detach
6517 *
6518 * Detach all ports of @host.
6519 *
6520 * LOCKING:
6521 * Kernel thread context (may sleep).
6522 */
6523void ata_host_detach(struct ata_host *host)
6524{
6525 int i;
6526
6527 for (i = 0; i < host->n_ports; i++)
6528 ata_port_detach(host->ports[i]);
6529}
6530
1da177e4
LT
6531/**
6532 * ata_std_ports - initialize ioaddr with standard port offsets.
6533 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6534 *
6535 * Utility function which initializes data_addr, error_addr,
6536 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6537 * device_addr, status_addr, and command_addr to standard offsets
6538 * relative to cmd_addr.
6539 *
6540 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6541 */
0baab86b 6542
1da177e4
LT
6543void ata_std_ports(struct ata_ioports *ioaddr)
6544{
6545 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6546 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6547 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6548 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6549 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6550 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6551 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6552 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6553 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6554 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6555}
6556
0baab86b 6557
374b1873
JG
6558#ifdef CONFIG_PCI
6559
1da177e4
LT
6560/**
6561 * ata_pci_remove_one - PCI layer callback for device removal
6562 * @pdev: PCI device that was removed
6563 *
b878ca5d
TH
6564 * PCI layer indicates to libata via this hook that hot-unplug or
6565 * module unload event has occurred. Detach all ports. Resource
6566 * release is handled via devres.
1da177e4
LT
6567 *
6568 * LOCKING:
6569 * Inherited from PCI layer (may sleep).
6570 */
f0d36efd 6571void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6572{
6573 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6574 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6575
b878ca5d 6576 ata_host_detach(host);
1da177e4
LT
6577}
6578
6579/* move to PCI subsystem */
057ace5e 6580int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6581{
6582 unsigned long tmp = 0;
6583
6584 switch (bits->width) {
6585 case 1: {
6586 u8 tmp8 = 0;
6587 pci_read_config_byte(pdev, bits->reg, &tmp8);
6588 tmp = tmp8;
6589 break;
6590 }
6591 case 2: {
6592 u16 tmp16 = 0;
6593 pci_read_config_word(pdev, bits->reg, &tmp16);
6594 tmp = tmp16;
6595 break;
6596 }
6597 case 4: {
6598 u32 tmp32 = 0;
6599 pci_read_config_dword(pdev, bits->reg, &tmp32);
6600 tmp = tmp32;
6601 break;
6602 }
6603
6604 default:
6605 return -EINVAL;
6606 }
6607
6608 tmp &= bits->mask;
6609
6610 return (tmp == bits->val) ? 1 : 0;
6611}
9b847548 6612
6ffa01d8 6613#ifdef CONFIG_PM
3c5100c1 6614void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6615{
6616 pci_save_state(pdev);
4c90d971 6617 pci_disable_device(pdev);
500530f6 6618
4c90d971 6619 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6620 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6621}
6622
553c4aa6 6623int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6624{
553c4aa6
TH
6625 int rc;
6626
9b847548
JA
6627 pci_set_power_state(pdev, PCI_D0);
6628 pci_restore_state(pdev);
553c4aa6 6629
b878ca5d 6630 rc = pcim_enable_device(pdev);
553c4aa6
TH
6631 if (rc) {
6632 dev_printk(KERN_ERR, &pdev->dev,
6633 "failed to enable device after resume (%d)\n", rc);
6634 return rc;
6635 }
6636
9b847548 6637 pci_set_master(pdev);
553c4aa6 6638 return 0;
500530f6
TH
6639}
6640
3c5100c1 6641int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6642{
cca3974e 6643 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6644 int rc = 0;
6645
cca3974e 6646 rc = ata_host_suspend(host, mesg);
500530f6
TH
6647 if (rc)
6648 return rc;
6649
3c5100c1 6650 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6651
6652 return 0;
6653}
6654
6655int ata_pci_device_resume(struct pci_dev *pdev)
6656{
cca3974e 6657 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6658 int rc;
500530f6 6659
553c4aa6
TH
6660 rc = ata_pci_device_do_resume(pdev);
6661 if (rc == 0)
6662 ata_host_resume(host);
6663 return rc;
9b847548 6664}
6ffa01d8
TH
6665#endif /* CONFIG_PM */
6666
1da177e4
LT
6667#endif /* CONFIG_PCI */
6668
6669
1da177e4
LT
6670static int __init ata_init(void)
6671{
a8601e5f 6672 ata_probe_timeout *= HZ;
1da177e4
LT
6673 ata_wq = create_workqueue("ata");
6674 if (!ata_wq)
6675 return -ENOMEM;
6676
453b07ac
TH
6677 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6678 if (!ata_aux_wq) {
6679 destroy_workqueue(ata_wq);
6680 return -ENOMEM;
6681 }
6682
1da177e4
LT
6683 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6684 return 0;
6685}
6686
6687static void __exit ata_exit(void)
6688{
6689 destroy_workqueue(ata_wq);
453b07ac 6690 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6691}
6692
a4625085 6693subsys_initcall(ata_init);
1da177e4
LT
6694module_exit(ata_exit);
6695
67846b30 6696static unsigned long ratelimit_time;
34af946a 6697static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6698
6699int ata_ratelimit(void)
6700{
6701 int rc;
6702 unsigned long flags;
6703
6704 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6705
6706 if (time_after(jiffies, ratelimit_time)) {
6707 rc = 1;
6708 ratelimit_time = jiffies + (HZ/5);
6709 } else
6710 rc = 0;
6711
6712 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6713
6714 return rc;
6715}
6716
c22daff4
TH
6717/**
6718 * ata_wait_register - wait until register value changes
6719 * @reg: IO-mapped register
6720 * @mask: Mask to apply to read register value
6721 * @val: Wait condition
6722 * @interval_msec: polling interval in milliseconds
6723 * @timeout_msec: timeout in milliseconds
6724 *
6725 * Waiting for some bits of register to change is a common
6726 * operation for ATA controllers. This function reads 32bit LE
6727 * IO-mapped register @reg and tests for the following condition.
6728 *
6729 * (*@reg & mask) != val
6730 *
6731 * If the condition is met, it returns; otherwise, the process is
6732 * repeated after @interval_msec until timeout.
6733 *
6734 * LOCKING:
6735 * Kernel thread context (may sleep)
6736 *
6737 * RETURNS:
6738 * The final register value.
6739 */
6740u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6741 unsigned long interval_msec,
6742 unsigned long timeout_msec)
6743{
6744 unsigned long timeout;
6745 u32 tmp;
6746
6747 tmp = ioread32(reg);
6748
6749 /* Calculate timeout _after_ the first read to make sure
6750 * preceding writes reach the controller before starting to
6751 * eat away the timeout.
6752 */
6753 timeout = jiffies + (timeout_msec * HZ) / 1000;
6754
6755 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6756 msleep(interval_msec);
6757 tmp = ioread32(reg);
6758 }
6759
6760 return tmp;
6761}
6762
dd5b06c4
TH
6763/*
6764 * Dummy port_ops
6765 */
6766static void ata_dummy_noret(struct ata_port *ap) { }
6767static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6768static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6769
6770static u8 ata_dummy_check_status(struct ata_port *ap)
6771{
6772 return ATA_DRDY;
6773}
6774
6775static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6776{
6777 return AC_ERR_SYSTEM;
6778}
6779
6780const struct ata_port_operations ata_dummy_port_ops = {
6781 .port_disable = ata_port_disable,
6782 .check_status = ata_dummy_check_status,
6783 .check_altstatus = ata_dummy_check_status,
6784 .dev_select = ata_noop_dev_select,
6785 .qc_prep = ata_noop_qc_prep,
6786 .qc_issue = ata_dummy_qc_issue,
6787 .freeze = ata_dummy_noret,
6788 .thaw = ata_dummy_noret,
6789 .error_handler = ata_dummy_noret,
6790 .post_internal_cmd = ata_dummy_qc_noret,
6791 .irq_clear = ata_dummy_noret,
6792 .port_start = ata_dummy_ret0,
6793 .port_stop = ata_dummy_noret,
6794};
6795
21b0ad4f
TH
6796const struct ata_port_info ata_dummy_port_info = {
6797 .port_ops = &ata_dummy_port_ops,
6798};
6799
1da177e4
LT
6800/*
6801 * libata is essentially a library of internal helper functions for
6802 * low-level ATA host controller drivers. As such, the API/ABI is
6803 * likely to change as new drivers are added and updated.
6804 * Do not depend on ABI/API stability.
6805 */
6806
e9c83914
TH
6807EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6808EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6809EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6810EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 6811EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
6812EXPORT_SYMBOL_GPL(ata_std_bios_param);
6813EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6814EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6815EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 6816EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 6817EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6818EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 6819EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 6820EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6821EXPORT_SYMBOL_GPL(ata_sg_init);
6822EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6823EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6824EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6825EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6826EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6827EXPORT_SYMBOL_GPL(ata_tf_load);
6828EXPORT_SYMBOL_GPL(ata_tf_read);
6829EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6830EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 6831EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
6832EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6833EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6834EXPORT_SYMBOL_GPL(ata_check_status);
6835EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6836EXPORT_SYMBOL_GPL(ata_exec_command);
6837EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 6838EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 6839EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 6840EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
6841EXPORT_SYMBOL_GPL(ata_data_xfer);
6842EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6843EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6844EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6845EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6846EXPORT_SYMBOL_GPL(ata_bmdma_start);
6847EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6848EXPORT_SYMBOL_GPL(ata_bmdma_status);
6849EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6850EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6851EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6852EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6853EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6854EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6855EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6856EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6857EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6858EXPORT_SYMBOL_GPL(sata_phy_debounce);
6859EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6860EXPORT_SYMBOL_GPL(sata_phy_reset);
6861EXPORT_SYMBOL_GPL(__sata_phy_reset);
6862EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6863EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6864EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6865EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6866EXPORT_SYMBOL_GPL(sata_std_hardreset);
6867EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6868EXPORT_SYMBOL_GPL(ata_dev_classify);
6869EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6870EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6871EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6872EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6873EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 6874EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 6875EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6876EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6877EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6878EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6879EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6880EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6881EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6882EXPORT_SYMBOL_GPL(sata_scr_valid);
6883EXPORT_SYMBOL_GPL(sata_scr_read);
6884EXPORT_SYMBOL_GPL(sata_scr_write);
6885EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6886EXPORT_SYMBOL_GPL(ata_port_online);
6887EXPORT_SYMBOL_GPL(ata_port_offline);
6ffa01d8 6888#ifdef CONFIG_PM
cca3974e
JG
6889EXPORT_SYMBOL_GPL(ata_host_suspend);
6890EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6891#endif /* CONFIG_PM */
6a62a04d
TH
6892EXPORT_SYMBOL_GPL(ata_id_string);
6893EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 6894EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6919a0a6 6895EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6896EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6897
1bc4ccff 6898EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6899EXPORT_SYMBOL_GPL(ata_timing_compute);
6900EXPORT_SYMBOL_GPL(ata_timing_merge);
6901
1da177e4
LT
6902#ifdef CONFIG_PCI
6903EXPORT_SYMBOL_GPL(pci_test_config_bits);
d491b27b 6904EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
1626aeb8 6905EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
21b0ad4f 6906EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
1da177e4
LT
6907EXPORT_SYMBOL_GPL(ata_pci_init_one);
6908EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6909#ifdef CONFIG_PM
500530f6
TH
6910EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6911EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6912EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6913EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6914#endif /* CONFIG_PM */
67951ade
AC
6915EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6916EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6917#endif /* CONFIG_PCI */
9b847548 6918
ece1d636 6919EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6920EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6921EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6922EXPORT_SYMBOL_GPL(ata_port_freeze);
6923EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6924EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6925EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6926EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6927EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6928EXPORT_SYMBOL_GPL(ata_irq_on);
6929EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6930EXPORT_SYMBOL_GPL(ata_irq_ack);
6931EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6932EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
6933
6934EXPORT_SYMBOL_GPL(ata_cable_40wire);
6935EXPORT_SYMBOL_GPL(ata_cable_80wire);
6936EXPORT_SYMBOL_GPL(ata_cable_unknown);
6937EXPORT_SYMBOL_GPL(ata_cable_sata);