[libata] sata_mv: clean up DMA boundary issues, turn on 64-bit DMA
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
cb48cab7 62#define DRV_VERSION "2.20" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
44877b4e 75static unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
d7d0dad6
JG
96int libata_noacpi = 1;
97module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
98MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
99
1da177e4
LT
100MODULE_AUTHOR("Jeff Garzik");
101MODULE_DESCRIPTION("Library module for ATA devices");
102MODULE_LICENSE("GPL");
103MODULE_VERSION(DRV_VERSION);
104
0baab86b 105
1da177e4
LT
106/**
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
111 *
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
114 *
115 * LOCKING:
116 * Inherited from caller.
117 */
118
057ace5e 119void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
120{
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
126
127 fis[4] = tf->lbal;
128 fis[5] = tf->lbam;
129 fis[6] = tf->lbah;
130 fis[7] = tf->device;
131
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
136
137 fis[12] = tf->nsect;
138 fis[13] = tf->hob_nsect;
139 fis[14] = 0;
140 fis[15] = tf->ctl;
141
142 fis[16] = 0;
143 fis[17] = 0;
144 fis[18] = 0;
145 fis[19] = 0;
146}
147
148/**
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
152 *
e12a1be6 153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
154 *
155 * LOCKING:
156 * Inherited from caller.
157 */
158
057ace5e 159void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
160{
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
163
164 tf->lbal = fis[4];
165 tf->lbam = fis[5];
166 tf->lbah = fis[6];
167 tf->device = fis[7];
168
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
172
173 tf->nsect = fis[12];
174 tf->hob_nsect = fis[13];
175}
176
8cbd6df1
AL
177static const u8 ata_rw_cmds[] = {
178 /* pio multi */
179 ATA_CMD_READ_MULTI,
180 ATA_CMD_WRITE_MULTI,
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
187 /* pio */
188 ATA_CMD_PIO_READ,
189 ATA_CMD_PIO_WRITE,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
192 0,
193 0,
194 0,
195 0,
8cbd6df1
AL
196 /* dma */
197 ATA_CMD_READ,
198 ATA_CMD_WRITE,
199 ATA_CMD_READ_EXT,
9a3dccc4
TH
200 ATA_CMD_WRITE_EXT,
201 0,
202 0,
203 0,
204 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 205};
1da177e4
LT
206
207/**
8cbd6df1 208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
1da177e4 211 *
2e9edbf8 212 * Examine the device configuration and tf->flags to calculate
8cbd6df1 213 * the proper read/write commands and protocol to use.
1da177e4
LT
214 *
215 * LOCKING:
216 * caller.
217 */
bd056d7e 218static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 219{
9a3dccc4 220 u8 cmd;
1da177e4 221
9a3dccc4 222 int index, fua, lba48, write;
2e9edbf8 223
9a3dccc4 224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 227
8cbd6df1
AL
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
9a3dccc4 230 index = dev->multi_count ? 0 : 8;
bd056d7e 231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
0565c26d 234 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
235 } else {
236 tf->protocol = ATA_PROT_DMA;
9a3dccc4 237 index = 16;
8cbd6df1 238 }
1da177e4 239
9a3dccc4
TH
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 if (cmd) {
242 tf->command = cmd;
243 return 0;
244 }
245 return -1;
1da177e4
LT
246}
247
35b649fe
TH
248/**
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
252 *
253 * LOCKING:
254 * None.
255 *
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
259 *
260 * RETURNS:
261 * Block address read from @tf.
262 */
263u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
264{
265 u64 block = 0;
266
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
272 } else
273 block |= (tf->device & 0xf) << 24;
274
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
277 block |= tf->lbal;
278 } else {
279 u32 cyl, head, sect;
280
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
283 sect = tf->lbal;
284
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
286 }
287
288 return block;
289}
290
bd056d7e
TH
291/**
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
298 * @tag: tag
299 *
300 * LOCKING:
301 * None.
302 *
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
305 *
306 * RETURNS:
307 *
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
310 */
311int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
313 unsigned int tag)
314{
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
317
6d1245bf 318 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
319 /* yay, NCQ */
320 if (!lba_48_ok(block, n_block))
321 return -ERANGE;
322
323 tf->protocol = ATA_PROT_NCQ;
324 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
325
326 if (tf->flags & ATA_TFLAG_WRITE)
327 tf->command = ATA_CMD_FPDMA_WRITE;
328 else
329 tf->command = ATA_CMD_FPDMA_READ;
330
331 tf->nsect = tag << 3;
332 tf->hob_feature = (n_block >> 8) & 0xff;
333 tf->feature = n_block & 0xff;
334
335 tf->hob_lbah = (block >> 40) & 0xff;
336 tf->hob_lbam = (block >> 32) & 0xff;
337 tf->hob_lbal = (block >> 24) & 0xff;
338 tf->lbah = (block >> 16) & 0xff;
339 tf->lbam = (block >> 8) & 0xff;
340 tf->lbal = block & 0xff;
341
342 tf->device = 1 << 6;
343 if (tf->flags & ATA_TFLAG_FUA)
344 tf->device |= 1 << 7;
345 } else if (dev->flags & ATA_DFLAG_LBA) {
346 tf->flags |= ATA_TFLAG_LBA;
347
348 if (lba_28_ok(block, n_block)) {
349 /* use LBA28 */
350 tf->device |= (block >> 24) & 0xf;
351 } else if (lba_48_ok(block, n_block)) {
352 if (!(dev->flags & ATA_DFLAG_LBA48))
353 return -ERANGE;
354
355 /* use LBA48 */
356 tf->flags |= ATA_TFLAG_LBA48;
357
358 tf->hob_nsect = (n_block >> 8) & 0xff;
359
360 tf->hob_lbah = (block >> 40) & 0xff;
361 tf->hob_lbam = (block >> 32) & 0xff;
362 tf->hob_lbal = (block >> 24) & 0xff;
363 } else
364 /* request too large even for LBA48 */
365 return -ERANGE;
366
367 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
368 return -EINVAL;
369
370 tf->nsect = n_block & 0xff;
371
372 tf->lbah = (block >> 16) & 0xff;
373 tf->lbam = (block >> 8) & 0xff;
374 tf->lbal = block & 0xff;
375
376 tf->device |= ATA_LBA;
377 } else {
378 /* CHS */
379 u32 sect, head, cyl, track;
380
381 /* The request -may- be too large for CHS addressing. */
382 if (!lba_28_ok(block, n_block))
383 return -ERANGE;
384
385 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
386 return -EINVAL;
387
388 /* Convert LBA to CHS */
389 track = (u32)block / dev->sectors;
390 cyl = track / dev->heads;
391 head = track % dev->heads;
392 sect = (u32)block % dev->sectors + 1;
393
394 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
395 (u32)block, track, cyl, head, sect);
396
397 /* Check whether the converted CHS can fit.
398 Cylinder: 0-65535
399 Head: 0-15
400 Sector: 1-255*/
401 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
402 return -ERANGE;
403
404 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
405 tf->lbal = sect;
406 tf->lbam = cyl;
407 tf->lbah = cyl >> 8;
408 tf->device |= head;
409 }
410
411 return 0;
412}
413
cb95d562
TH
414/**
415 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
416 * @pio_mask: pio_mask
417 * @mwdma_mask: mwdma_mask
418 * @udma_mask: udma_mask
419 *
420 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
421 * unsigned int xfer_mask.
422 *
423 * LOCKING:
424 * None.
425 *
426 * RETURNS:
427 * Packed xfer_mask.
428 */
429static unsigned int ata_pack_xfermask(unsigned int pio_mask,
430 unsigned int mwdma_mask,
431 unsigned int udma_mask)
432{
433 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
434 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
435 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
436}
437
c0489e4e
TH
438/**
439 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
440 * @xfer_mask: xfer_mask to unpack
441 * @pio_mask: resulting pio_mask
442 * @mwdma_mask: resulting mwdma_mask
443 * @udma_mask: resulting udma_mask
444 *
445 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
446 * Any NULL distination masks will be ignored.
447 */
448static void ata_unpack_xfermask(unsigned int xfer_mask,
449 unsigned int *pio_mask,
450 unsigned int *mwdma_mask,
451 unsigned int *udma_mask)
452{
453 if (pio_mask)
454 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
455 if (mwdma_mask)
456 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
457 if (udma_mask)
458 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
459}
460
cb95d562 461static const struct ata_xfer_ent {
be9a50c8 462 int shift, bits;
cb95d562
TH
463 u8 base;
464} ata_xfer_tbl[] = {
465 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
466 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
467 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
468 { -1, },
469};
470
471/**
472 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
473 * @xfer_mask: xfer_mask of interest
474 *
475 * Return matching XFER_* value for @xfer_mask. Only the highest
476 * bit of @xfer_mask is considered.
477 *
478 * LOCKING:
479 * None.
480 *
481 * RETURNS:
482 * Matching XFER_* value, 0 if no match found.
483 */
484static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
485{
486 int highbit = fls(xfer_mask) - 1;
487 const struct ata_xfer_ent *ent;
488
489 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
490 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
491 return ent->base + highbit - ent->shift;
492 return 0;
493}
494
495/**
496 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
497 * @xfer_mode: XFER_* of interest
498 *
499 * Return matching xfer_mask for @xfer_mode.
500 *
501 * LOCKING:
502 * None.
503 *
504 * RETURNS:
505 * Matching xfer_mask, 0 if no match found.
506 */
507static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
508{
509 const struct ata_xfer_ent *ent;
510
511 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
512 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
513 return 1 << (ent->shift + xfer_mode - ent->base);
514 return 0;
515}
516
517/**
518 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
519 * @xfer_mode: XFER_* of interest
520 *
521 * Return matching xfer_shift for @xfer_mode.
522 *
523 * LOCKING:
524 * None.
525 *
526 * RETURNS:
527 * Matching xfer_shift, -1 if no match found.
528 */
529static int ata_xfer_mode2shift(unsigned int xfer_mode)
530{
531 const struct ata_xfer_ent *ent;
532
533 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
534 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
535 return ent->shift;
536 return -1;
537}
538
1da177e4 539/**
1da7b0d0
TH
540 * ata_mode_string - convert xfer_mask to string
541 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
542 *
543 * Determine string which represents the highest speed
1da7b0d0 544 * (highest bit in @modemask).
1da177e4
LT
545 *
546 * LOCKING:
547 * None.
548 *
549 * RETURNS:
550 * Constant C string representing highest speed listed in
1da7b0d0 551 * @mode_mask, or the constant C string "<n/a>".
1da177e4 552 */
1da7b0d0 553static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 554{
75f554bc
TH
555 static const char * const xfer_mode_str[] = {
556 "PIO0",
557 "PIO1",
558 "PIO2",
559 "PIO3",
560 "PIO4",
b352e57d
AC
561 "PIO5",
562 "PIO6",
75f554bc
TH
563 "MWDMA0",
564 "MWDMA1",
565 "MWDMA2",
b352e57d
AC
566 "MWDMA3",
567 "MWDMA4",
75f554bc
TH
568 "UDMA/16",
569 "UDMA/25",
570 "UDMA/33",
571 "UDMA/44",
572 "UDMA/66",
573 "UDMA/100",
574 "UDMA/133",
575 "UDMA7",
576 };
1da7b0d0 577 int highbit;
1da177e4 578
1da7b0d0
TH
579 highbit = fls(xfer_mask) - 1;
580 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
581 return xfer_mode_str[highbit];
1da177e4 582 return "<n/a>";
1da177e4
LT
583}
584
4c360c81
TH
585static const char *sata_spd_string(unsigned int spd)
586{
587 static const char * const spd_str[] = {
588 "1.5 Gbps",
589 "3.0 Gbps",
590 };
591
592 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
593 return "<unknown>";
594 return spd_str[spd - 1];
595}
596
3373efd8 597void ata_dev_disable(struct ata_device *dev)
0b8efb0a 598{
0dd4b21f 599 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 600 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
601 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
602 ATA_DNXFER_QUIET);
0b8efb0a
TH
603 dev->class++;
604 }
605}
606
1da177e4 607/**
0d5ff566 608 * ata_devchk - PATA device presence detection
1da177e4
LT
609 * @ap: ATA channel to examine
610 * @device: Device to examine (starting at zero)
611 *
612 * This technique was originally described in
613 * Hale Landis's ATADRVR (www.ata-atapi.com), and
614 * later found its way into the ATA/ATAPI spec.
615 *
616 * Write a pattern to the ATA shadow registers,
617 * and if a device is present, it will respond by
618 * correctly storing and echoing back the
619 * ATA shadow register contents.
620 *
621 * LOCKING:
622 * caller.
623 */
624
0d5ff566 625static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
626{
627 struct ata_ioports *ioaddr = &ap->ioaddr;
628 u8 nsect, lbal;
629
630 ap->ops->dev_select(ap, device);
631
0d5ff566
TH
632 iowrite8(0x55, ioaddr->nsect_addr);
633 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 634
0d5ff566
TH
635 iowrite8(0xaa, ioaddr->nsect_addr);
636 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 637
0d5ff566
TH
638 iowrite8(0x55, ioaddr->nsect_addr);
639 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 640
0d5ff566
TH
641 nsect = ioread8(ioaddr->nsect_addr);
642 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
643
644 if ((nsect == 0x55) && (lbal == 0xaa))
645 return 1; /* we found a device */
646
647 return 0; /* nothing found */
648}
649
1da177e4
LT
650/**
651 * ata_dev_classify - determine device type based on ATA-spec signature
652 * @tf: ATA taskfile register set for device to be identified
653 *
654 * Determine from taskfile register contents whether a device is
655 * ATA or ATAPI, as per "Signature and persistence" section
656 * of ATA/PI spec (volume 1, sect 5.14).
657 *
658 * LOCKING:
659 * None.
660 *
661 * RETURNS:
662 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
663 * the event of failure.
664 */
665
057ace5e 666unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
667{
668 /* Apple's open source Darwin code hints that some devices only
669 * put a proper signature into the LBA mid/high registers,
670 * So, we only check those. It's sufficient for uniqueness.
671 */
672
673 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
674 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
675 DPRINTK("found ATA device by sig\n");
676 return ATA_DEV_ATA;
677 }
678
679 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
680 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
681 DPRINTK("found ATAPI device by sig\n");
682 return ATA_DEV_ATAPI;
683 }
684
685 DPRINTK("unknown device\n");
686 return ATA_DEV_UNKNOWN;
687}
688
689/**
690 * ata_dev_try_classify - Parse returned ATA device signature
691 * @ap: ATA channel to examine
692 * @device: Device to examine (starting at zero)
b4dc7623 693 * @r_err: Value of error register on completion
1da177e4
LT
694 *
695 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
696 * an ATA/ATAPI-defined set of values is placed in the ATA
697 * shadow registers, indicating the results of device detection
698 * and diagnostics.
699 *
700 * Select the ATA device, and read the values from the ATA shadow
701 * registers. Then parse according to the Error register value,
702 * and the spec-defined values examined by ata_dev_classify().
703 *
704 * LOCKING:
705 * caller.
b4dc7623
TH
706 *
707 * RETURNS:
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
709 */
710
a619f981 711unsigned int
b4dc7623 712ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 713{
1da177e4
LT
714 struct ata_taskfile tf;
715 unsigned int class;
716 u8 err;
717
718 ap->ops->dev_select(ap, device);
719
720 memset(&tf, 0, sizeof(tf));
721
1da177e4 722 ap->ops->tf_read(ap, &tf);
0169e284 723 err = tf.feature;
b4dc7623
TH
724 if (r_err)
725 *r_err = err;
1da177e4 726
93590859
AC
727 /* see if device passed diags: if master then continue and warn later */
728 if (err == 0 && device == 0)
729 /* diagnostic fail : do nothing _YET_ */
730 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
731 else if (err == 1)
1da177e4
LT
732 /* do nothing */ ;
733 else if ((device == 0) && (err == 0x81))
734 /* do nothing */ ;
735 else
b4dc7623 736 return ATA_DEV_NONE;
1da177e4 737
b4dc7623 738 /* determine if device is ATA or ATAPI */
1da177e4 739 class = ata_dev_classify(&tf);
b4dc7623 740
1da177e4 741 if (class == ATA_DEV_UNKNOWN)
b4dc7623 742 return ATA_DEV_NONE;
1da177e4 743 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
744 return ATA_DEV_NONE;
745 return class;
1da177e4
LT
746}
747
748/**
6a62a04d 749 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
750 * @id: IDENTIFY DEVICE results we will examine
751 * @s: string into which data is output
752 * @ofs: offset into identify device page
753 * @len: length of string to return. must be an even number.
754 *
755 * The strings in the IDENTIFY DEVICE page are broken up into
756 * 16-bit chunks. Run through the string, and output each
757 * 8-bit chunk linearly, regardless of platform.
758 *
759 * LOCKING:
760 * caller.
761 */
762
6a62a04d
TH
763void ata_id_string(const u16 *id, unsigned char *s,
764 unsigned int ofs, unsigned int len)
1da177e4
LT
765{
766 unsigned int c;
767
768 while (len > 0) {
769 c = id[ofs] >> 8;
770 *s = c;
771 s++;
772
773 c = id[ofs] & 0xff;
774 *s = c;
775 s++;
776
777 ofs++;
778 len -= 2;
779 }
780}
781
0e949ff3 782/**
6a62a04d 783 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
784 * @id: IDENTIFY DEVICE results we will examine
785 * @s: string into which data is output
786 * @ofs: offset into identify device page
787 * @len: length of string to return. must be an odd number.
788 *
6a62a04d 789 * This function is identical to ata_id_string except that it
0e949ff3
TH
790 * trims trailing spaces and terminates the resulting string with
791 * null. @len must be actual maximum length (even number) + 1.
792 *
793 * LOCKING:
794 * caller.
795 */
6a62a04d
TH
796void ata_id_c_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len)
0e949ff3
TH
798{
799 unsigned char *p;
800
801 WARN_ON(!(len & 1));
802
6a62a04d 803 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
804
805 p = s + strnlen(s, len - 1);
806 while (p > s && p[-1] == ' ')
807 p--;
808 *p = '\0';
809}
0baab86b 810
2940740b
TH
811static u64 ata_id_n_sectors(const u16 *id)
812{
813 if (ata_id_has_lba(id)) {
814 if (ata_id_has_lba48(id))
815 return ata_id_u64(id, 100);
816 else
817 return ata_id_u32(id, 60);
818 } else {
819 if (ata_id_current_chs_valid(id))
820 return ata_id_u32(id, 57);
821 else
822 return id[1] * id[3] * id[6];
823 }
824}
825
10305f0f
A
826/**
827 * ata_id_to_dma_mode - Identify DMA mode from id block
828 * @dev: device to identify
cc261267 829 * @unknown: mode to assume if we cannot tell
10305f0f
A
830 *
831 * Set up the timing values for the device based upon the identify
832 * reported values for the DMA mode. This function is used by drivers
833 * which rely upon firmware configured modes, but wish to report the
834 * mode correctly when possible.
835 *
836 * In addition we emit similarly formatted messages to the default
837 * ata_dev_set_mode handler, in order to provide consistency of
838 * presentation.
839 */
840
841void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
842{
843 unsigned int mask;
844 u8 mode;
845
846 /* Pack the DMA modes */
847 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
848 if (dev->id[53] & 0x04)
849 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
850
851 /* Select the mode in use */
852 mode = ata_xfer_mask2mode(mask);
853
854 if (mode != 0) {
855 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
856 ata_mode_string(mask));
857 } else {
858 /* SWDMA perhaps ? */
859 mode = unknown;
860 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
861 }
862
863 /* Configure the device reporting */
864 dev->xfer_mode = mode;
865 dev->xfer_shift = ata_xfer_mode2shift(mode);
866}
867
0baab86b
EF
868/**
869 * ata_noop_dev_select - Select device 0/1 on ATA bus
870 * @ap: ATA channel to manipulate
871 * @device: ATA device (numbered from zero) to select
872 *
873 * This function performs no actual function.
874 *
875 * May be used as the dev_select() entry in ata_port_operations.
876 *
877 * LOCKING:
878 * caller.
879 */
1da177e4
LT
880void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
881{
882}
883
0baab86b 884
1da177e4
LT
885/**
886 * ata_std_dev_select - Select device 0/1 on ATA bus
887 * @ap: ATA channel to manipulate
888 * @device: ATA device (numbered from zero) to select
889 *
890 * Use the method defined in the ATA specification to
891 * make either device 0, or device 1, active on the
0baab86b
EF
892 * ATA channel. Works with both PIO and MMIO.
893 *
894 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
895 *
896 * LOCKING:
897 * caller.
898 */
899
900void ata_std_dev_select (struct ata_port *ap, unsigned int device)
901{
902 u8 tmp;
903
904 if (device == 0)
905 tmp = ATA_DEVICE_OBS;
906 else
907 tmp = ATA_DEVICE_OBS | ATA_DEV1;
908
0d5ff566 909 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
910 ata_pause(ap); /* needed; also flushes, for mmio */
911}
912
913/**
914 * ata_dev_select - Select device 0/1 on ATA bus
915 * @ap: ATA channel to manipulate
916 * @device: ATA device (numbered from zero) to select
917 * @wait: non-zero to wait for Status register BSY bit to clear
918 * @can_sleep: non-zero if context allows sleeping
919 *
920 * Use the method defined in the ATA specification to
921 * make either device 0, or device 1, active on the
922 * ATA channel.
923 *
924 * This is a high-level version of ata_std_dev_select(),
925 * which additionally provides the services of inserting
926 * the proper pauses and status polling, where needed.
927 *
928 * LOCKING:
929 * caller.
930 */
931
932void ata_dev_select(struct ata_port *ap, unsigned int device,
933 unsigned int wait, unsigned int can_sleep)
934{
88574551 935 if (ata_msg_probe(ap))
44877b4e
TH
936 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
937 "device %u, wait %u\n", device, wait);
1da177e4
LT
938
939 if (wait)
940 ata_wait_idle(ap);
941
942 ap->ops->dev_select(ap, device);
943
944 if (wait) {
945 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
946 msleep(150);
947 ata_wait_idle(ap);
948 }
949}
950
951/**
952 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 953 * @id: IDENTIFY DEVICE page to dump
1da177e4 954 *
0bd3300a
TH
955 * Dump selected 16-bit words from the given IDENTIFY DEVICE
956 * page.
1da177e4
LT
957 *
958 * LOCKING:
959 * caller.
960 */
961
0bd3300a 962static inline void ata_dump_id(const u16 *id)
1da177e4
LT
963{
964 DPRINTK("49==0x%04x "
965 "53==0x%04x "
966 "63==0x%04x "
967 "64==0x%04x "
968 "75==0x%04x \n",
0bd3300a
TH
969 id[49],
970 id[53],
971 id[63],
972 id[64],
973 id[75]);
1da177e4
LT
974 DPRINTK("80==0x%04x "
975 "81==0x%04x "
976 "82==0x%04x "
977 "83==0x%04x "
978 "84==0x%04x \n",
0bd3300a
TH
979 id[80],
980 id[81],
981 id[82],
982 id[83],
983 id[84]);
1da177e4
LT
984 DPRINTK("88==0x%04x "
985 "93==0x%04x\n",
0bd3300a
TH
986 id[88],
987 id[93]);
1da177e4
LT
988}
989
cb95d562
TH
990/**
991 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
992 * @id: IDENTIFY data to compute xfer mask from
993 *
994 * Compute the xfermask for this device. This is not as trivial
995 * as it seems if we must consider early devices correctly.
996 *
997 * FIXME: pre IDE drive timing (do we care ?).
998 *
999 * LOCKING:
1000 * None.
1001 *
1002 * RETURNS:
1003 * Computed xfermask
1004 */
1005static unsigned int ata_id_xfermask(const u16 *id)
1006{
1007 unsigned int pio_mask, mwdma_mask, udma_mask;
1008
1009 /* Usual case. Word 53 indicates word 64 is valid */
1010 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1011 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1012 pio_mask <<= 3;
1013 pio_mask |= 0x7;
1014 } else {
1015 /* If word 64 isn't valid then Word 51 high byte holds
1016 * the PIO timing number for the maximum. Turn it into
1017 * a mask.
1018 */
7a0f1c8a 1019 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1020 if (mode < 5) /* Valid PIO range */
1021 pio_mask = (2 << mode) - 1;
1022 else
1023 pio_mask = 1;
cb95d562
TH
1024
1025 /* But wait.. there's more. Design your standards by
1026 * committee and you too can get a free iordy field to
1027 * process. However its the speeds not the modes that
1028 * are supported... Note drivers using the timing API
1029 * will get this right anyway
1030 */
1031 }
1032
1033 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1034
b352e57d
AC
1035 if (ata_id_is_cfa(id)) {
1036 /*
1037 * Process compact flash extended modes
1038 */
1039 int pio = id[163] & 0x7;
1040 int dma = (id[163] >> 3) & 7;
1041
1042 if (pio)
1043 pio_mask |= (1 << 5);
1044 if (pio > 1)
1045 pio_mask |= (1 << 6);
1046 if (dma)
1047 mwdma_mask |= (1 << 3);
1048 if (dma > 1)
1049 mwdma_mask |= (1 << 4);
1050 }
1051
fb21f0d0
TH
1052 udma_mask = 0;
1053 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1054 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1055
1056 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1057}
1058
86e45b6b
TH
1059/**
1060 * ata_port_queue_task - Queue port_task
1061 * @ap: The ata_port to queue port_task for
e2a7f77a 1062 * @fn: workqueue function to be scheduled
65f27f38 1063 * @data: data for @fn to use
e2a7f77a 1064 * @delay: delay time for workqueue function
86e45b6b
TH
1065 *
1066 * Schedule @fn(@data) for execution after @delay jiffies using
1067 * port_task. There is one port_task per port and it's the
1068 * user(low level driver)'s responsibility to make sure that only
1069 * one task is active at any given time.
1070 *
1071 * libata core layer takes care of synchronization between
1072 * port_task and EH. ata_port_queue_task() may be ignored for EH
1073 * synchronization.
1074 *
1075 * LOCKING:
1076 * Inherited from caller.
1077 */
65f27f38 1078void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1079 unsigned long delay)
1080{
1081 int rc;
1082
b51e9e5d 1083 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1084 return;
1085
65f27f38
DH
1086 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1087 ap->port_task_data = data;
86e45b6b 1088
52bad64d 1089 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1090
1091 /* rc == 0 means that another user is using port task */
1092 WARN_ON(rc == 0);
1093}
1094
1095/**
1096 * ata_port_flush_task - Flush port_task
1097 * @ap: The ata_port to flush port_task for
1098 *
1099 * After this function completes, port_task is guranteed not to
1100 * be running or scheduled.
1101 *
1102 * LOCKING:
1103 * Kernel thread context (may sleep)
1104 */
1105void ata_port_flush_task(struct ata_port *ap)
1106{
1107 unsigned long flags;
1108
1109 DPRINTK("ENTER\n");
1110
ba6a1308 1111 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1112 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1113 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1114
1115 DPRINTK("flush #1\n");
1116 flush_workqueue(ata_wq);
1117
1118 /*
1119 * At this point, if a task is running, it's guaranteed to see
1120 * the FLUSH flag; thus, it will never queue pio tasks again.
1121 * Cancel and flush.
1122 */
1123 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1124 if (ata_msg_ctl(ap))
88574551
TH
1125 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1126 __FUNCTION__);
86e45b6b
TH
1127 flush_workqueue(ata_wq);
1128 }
1129
ba6a1308 1130 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1131 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1132 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1133
0dd4b21f
BP
1134 if (ata_msg_ctl(ap))
1135 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1136}
1137
7102d230 1138static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1139{
77853bf2 1140 struct completion *waiting = qc->private_data;
a2a7a662 1141
a2a7a662 1142 complete(waiting);
a2a7a662
TH
1143}
1144
1145/**
2432697b 1146 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1147 * @dev: Device to which the command is sent
1148 * @tf: Taskfile registers for the command and the result
d69cf37d 1149 * @cdb: CDB for packet command
a2a7a662 1150 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1151 * @sg: sg list for the data buffer of the command
1152 * @n_elem: Number of sg entries
a2a7a662
TH
1153 *
1154 * Executes libata internal command with timeout. @tf contains
1155 * command on entry and result on return. Timeout and error
1156 * conditions are reported via return value. No recovery action
1157 * is taken after a command times out. It's caller's duty to
1158 * clean up after timeout.
1159 *
1160 * LOCKING:
1161 * None. Should be called with kernel context, might sleep.
551e8889
TH
1162 *
1163 * RETURNS:
1164 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1165 */
2432697b
TH
1166unsigned ata_exec_internal_sg(struct ata_device *dev,
1167 struct ata_taskfile *tf, const u8 *cdb,
1168 int dma_dir, struct scatterlist *sg,
1169 unsigned int n_elem)
a2a7a662 1170{
3373efd8 1171 struct ata_port *ap = dev->ap;
a2a7a662
TH
1172 u8 command = tf->command;
1173 struct ata_queued_cmd *qc;
2ab7db1f 1174 unsigned int tag, preempted_tag;
dedaf2b0 1175 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1176 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1177 unsigned long flags;
77853bf2 1178 unsigned int err_mask;
d95a717f 1179 int rc;
a2a7a662 1180
ba6a1308 1181 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1182
e3180499 1183 /* no internal command while frozen */
b51e9e5d 1184 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1185 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1186 return AC_ERR_SYSTEM;
1187 }
1188
2ab7db1f 1189 /* initialize internal qc */
a2a7a662 1190
2ab7db1f
TH
1191 /* XXX: Tag 0 is used for drivers with legacy EH as some
1192 * drivers choke if any other tag is given. This breaks
1193 * ata_tag_internal() test for those drivers. Don't use new
1194 * EH stuff without converting to it.
1195 */
1196 if (ap->ops->error_handler)
1197 tag = ATA_TAG_INTERNAL;
1198 else
1199 tag = 0;
1200
6cec4a39 1201 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1202 BUG();
f69499f4 1203 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1204
1205 qc->tag = tag;
1206 qc->scsicmd = NULL;
1207 qc->ap = ap;
1208 qc->dev = dev;
1209 ata_qc_reinit(qc);
1210
1211 preempted_tag = ap->active_tag;
dedaf2b0
TH
1212 preempted_sactive = ap->sactive;
1213 preempted_qc_active = ap->qc_active;
2ab7db1f 1214 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1215 ap->sactive = 0;
1216 ap->qc_active = 0;
2ab7db1f
TH
1217
1218 /* prepare & issue qc */
a2a7a662 1219 qc->tf = *tf;
d69cf37d
TH
1220 if (cdb)
1221 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1222 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1223 qc->dma_dir = dma_dir;
1224 if (dma_dir != DMA_NONE) {
2432697b
TH
1225 unsigned int i, buflen = 0;
1226
1227 for (i = 0; i < n_elem; i++)
1228 buflen += sg[i].length;
1229
1230 ata_sg_init(qc, sg, n_elem);
49c80429 1231 qc->nbytes = buflen;
a2a7a662
TH
1232 }
1233
77853bf2 1234 qc->private_data = &wait;
a2a7a662
TH
1235 qc->complete_fn = ata_qc_complete_internal;
1236
8e0e694a 1237 ata_qc_issue(qc);
a2a7a662 1238
ba6a1308 1239 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1240
a8601e5f 1241 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1242
1243 ata_port_flush_task(ap);
41ade50c 1244
d95a717f 1245 if (!rc) {
ba6a1308 1246 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1247
1248 /* We're racing with irq here. If we lose, the
1249 * following test prevents us from completing the qc
d95a717f
TH
1250 * twice. If we win, the port is frozen and will be
1251 * cleaned up by ->post_internal_cmd().
a2a7a662 1252 */
77853bf2 1253 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1254 qc->err_mask |= AC_ERR_TIMEOUT;
1255
1256 if (ap->ops->error_handler)
1257 ata_port_freeze(ap);
1258 else
1259 ata_qc_complete(qc);
f15a1daf 1260
0dd4b21f
BP
1261 if (ata_msg_warn(ap))
1262 ata_dev_printk(dev, KERN_WARNING,
88574551 1263 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1264 }
1265
ba6a1308 1266 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1267 }
1268
d95a717f
TH
1269 /* do post_internal_cmd */
1270 if (ap->ops->post_internal_cmd)
1271 ap->ops->post_internal_cmd(qc);
1272
18d90deb 1273 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1274 if (ata_msg_warn(ap))
88574551 1275 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1276 "zero err_mask for failed "
88574551 1277 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1278 qc->err_mask |= AC_ERR_OTHER;
1279 }
1280
15869303 1281 /* finish up */
ba6a1308 1282 spin_lock_irqsave(ap->lock, flags);
15869303 1283
e61e0672 1284 *tf = qc->result_tf;
77853bf2
TH
1285 err_mask = qc->err_mask;
1286
1287 ata_qc_free(qc);
2ab7db1f 1288 ap->active_tag = preempted_tag;
dedaf2b0
TH
1289 ap->sactive = preempted_sactive;
1290 ap->qc_active = preempted_qc_active;
77853bf2 1291
1f7dd3e9
TH
1292 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1293 * Until those drivers are fixed, we detect the condition
1294 * here, fail the command with AC_ERR_SYSTEM and reenable the
1295 * port.
1296 *
1297 * Note that this doesn't change any behavior as internal
1298 * command failure results in disabling the device in the
1299 * higher layer for LLDDs without new reset/EH callbacks.
1300 *
1301 * Kill the following code as soon as those drivers are fixed.
1302 */
198e0fed 1303 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1304 err_mask |= AC_ERR_SYSTEM;
1305 ata_port_probe(ap);
1306 }
1307
ba6a1308 1308 spin_unlock_irqrestore(ap->lock, flags);
15869303 1309
77853bf2 1310 return err_mask;
a2a7a662
TH
1311}
1312
2432697b 1313/**
33480a0e 1314 * ata_exec_internal - execute libata internal command
2432697b
TH
1315 * @dev: Device to which the command is sent
1316 * @tf: Taskfile registers for the command and the result
1317 * @cdb: CDB for packet command
1318 * @dma_dir: Data tranfer direction of the command
1319 * @buf: Data buffer of the command
1320 * @buflen: Length of data buffer
1321 *
1322 * Wrapper around ata_exec_internal_sg() which takes simple
1323 * buffer instead of sg list.
1324 *
1325 * LOCKING:
1326 * None. Should be called with kernel context, might sleep.
1327 *
1328 * RETURNS:
1329 * Zero on success, AC_ERR_* mask on failure
1330 */
1331unsigned ata_exec_internal(struct ata_device *dev,
1332 struct ata_taskfile *tf, const u8 *cdb,
1333 int dma_dir, void *buf, unsigned int buflen)
1334{
33480a0e
TH
1335 struct scatterlist *psg = NULL, sg;
1336 unsigned int n_elem = 0;
2432697b 1337
33480a0e
TH
1338 if (dma_dir != DMA_NONE) {
1339 WARN_ON(!buf);
1340 sg_init_one(&sg, buf, buflen);
1341 psg = &sg;
1342 n_elem++;
1343 }
2432697b 1344
33480a0e 1345 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1346}
1347
977e6b9f
TH
1348/**
1349 * ata_do_simple_cmd - execute simple internal command
1350 * @dev: Device to which the command is sent
1351 * @cmd: Opcode to execute
1352 *
1353 * Execute a 'simple' command, that only consists of the opcode
1354 * 'cmd' itself, without filling any other registers
1355 *
1356 * LOCKING:
1357 * Kernel thread context (may sleep).
1358 *
1359 * RETURNS:
1360 * Zero on success, AC_ERR_* mask on failure
e58eb583 1361 */
77b08fb5 1362unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1363{
1364 struct ata_taskfile tf;
e58eb583
TH
1365
1366 ata_tf_init(dev, &tf);
1367
1368 tf.command = cmd;
1369 tf.flags |= ATA_TFLAG_DEVICE;
1370 tf.protocol = ATA_PROT_NODATA;
1371
977e6b9f 1372 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1373}
1374
1bc4ccff
AC
1375/**
1376 * ata_pio_need_iordy - check if iordy needed
1377 * @adev: ATA device
1378 *
1379 * Check if the current speed of the device requires IORDY. Used
1380 * by various controllers for chip configuration.
1381 */
1382
1383unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1384{
1385 int pio;
1386 int speed = adev->pio_mode - XFER_PIO_0;
1387
1388 if (speed < 2)
1389 return 0;
1390 if (speed > 2)
1391 return 1;
2e9edbf8 1392
1bc4ccff
AC
1393 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1394
1395 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1396 pio = adev->id[ATA_ID_EIDE_PIO];
1397 /* Is the speed faster than the drive allows non IORDY ? */
1398 if (pio) {
1399 /* This is cycle times not frequency - watch the logic! */
1400 if (pio > 240) /* PIO2 is 240nS per cycle */
1401 return 1;
1402 return 0;
1403 }
1404 }
1405 return 0;
1406}
1407
1da177e4 1408/**
49016aca 1409 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1410 * @dev: target device
1411 * @p_class: pointer to class of the target device (may be changed)
bff04647 1412 * @flags: ATA_READID_* flags
fe635c7e 1413 * @id: buffer to read IDENTIFY data into
1da177e4 1414 *
49016aca
TH
1415 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1416 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1417 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1418 * for pre-ATA4 drives.
1da177e4
LT
1419 *
1420 * LOCKING:
49016aca
TH
1421 * Kernel thread context (may sleep)
1422 *
1423 * RETURNS:
1424 * 0 on success, -errno otherwise.
1da177e4 1425 */
a9beec95 1426int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1427 unsigned int flags, u16 *id)
1da177e4 1428{
3373efd8 1429 struct ata_port *ap = dev->ap;
49016aca 1430 unsigned int class = *p_class;
a0123703 1431 struct ata_taskfile tf;
49016aca
TH
1432 unsigned int err_mask = 0;
1433 const char *reason;
1434 int rc;
1da177e4 1435
0dd4b21f 1436 if (ata_msg_ctl(ap))
44877b4e 1437 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1438
49016aca 1439 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1440
49016aca 1441 retry:
3373efd8 1442 ata_tf_init(dev, &tf);
a0123703 1443
49016aca
TH
1444 switch (class) {
1445 case ATA_DEV_ATA:
a0123703 1446 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1447 break;
1448 case ATA_DEV_ATAPI:
a0123703 1449 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1450 break;
1451 default:
1452 rc = -ENODEV;
1453 reason = "unsupported class";
1454 goto err_out;
1da177e4
LT
1455 }
1456
a0123703 1457 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1458
1459 /* Some devices choke if TF registers contain garbage. Make
1460 * sure those are properly initialized.
1461 */
1462 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1463
1464 /* Device presence detection is unreliable on some
1465 * controllers. Always poll IDENTIFY if available.
1466 */
1467 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1468
3373efd8 1469 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1470 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1471 if (err_mask) {
800b3996 1472 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1473 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1474 ap->print_id, dev->devno);
55a8e2c8
TH
1475 return -ENOENT;
1476 }
1477
49016aca
TH
1478 rc = -EIO;
1479 reason = "I/O error";
1da177e4
LT
1480 goto err_out;
1481 }
1482
49016aca 1483 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1484
49016aca 1485 /* sanity check */
a4f5749b
TH
1486 rc = -EINVAL;
1487 reason = "device reports illegal type";
1488
1489 if (class == ATA_DEV_ATA) {
1490 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1491 goto err_out;
1492 } else {
1493 if (ata_id_is_ata(id))
1494 goto err_out;
49016aca
TH
1495 }
1496
bff04647 1497 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1498 /*
1499 * The exact sequence expected by certain pre-ATA4 drives is:
1500 * SRST RESET
1501 * IDENTIFY
1502 * INITIALIZE DEVICE PARAMETERS
1503 * anything else..
1504 * Some drives were very specific about that exact sequence.
1505 */
1506 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1507 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1508 if (err_mask) {
1509 rc = -EIO;
1510 reason = "INIT_DEV_PARAMS failed";
1511 goto err_out;
1512 }
1513
1514 /* current CHS translation info (id[53-58]) might be
1515 * changed. reread the identify device info.
1516 */
bff04647 1517 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1518 goto retry;
1519 }
1520 }
1521
1522 *p_class = class;
fe635c7e 1523
49016aca
TH
1524 return 0;
1525
1526 err_out:
88574551 1527 if (ata_msg_warn(ap))
0dd4b21f 1528 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1529 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1530 return rc;
1531}
1532
3373efd8 1533static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1534{
3373efd8 1535 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1536}
1537
a6e6ce8e
TH
1538static void ata_dev_config_ncq(struct ata_device *dev,
1539 char *desc, size_t desc_sz)
1540{
1541 struct ata_port *ap = dev->ap;
1542 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1543
1544 if (!ata_id_has_ncq(dev->id)) {
1545 desc[0] = '\0';
1546 return;
1547 }
6919a0a6
AC
1548 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1549 snprintf(desc, desc_sz, "NCQ (not used)");
1550 return;
1551 }
a6e6ce8e 1552 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1553 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1554 dev->flags |= ATA_DFLAG_NCQ;
1555 }
1556
1557 if (hdepth >= ddepth)
1558 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1559 else
1560 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1561}
1562
e6d902a3
BK
1563static void ata_set_port_max_cmd_len(struct ata_port *ap)
1564{
1565 int i;
1566
cca3974e
JG
1567 if (ap->scsi_host) {
1568 unsigned int len = 0;
1569
e6d902a3 1570 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1571 len = max(len, ap->device[i].cdb_len);
1572
1573 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1574 }
1575}
1576
49016aca 1577/**
ffeae418 1578 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1579 * @dev: Target device to configure
1580 *
1581 * Configure @dev according to @dev->id. Generic and low-level
1582 * driver specific fixups are also applied.
49016aca
TH
1583 *
1584 * LOCKING:
ffeae418
TH
1585 * Kernel thread context (may sleep)
1586 *
1587 * RETURNS:
1588 * 0 on success, -errno otherwise
49016aca 1589 */
efdaedc4 1590int ata_dev_configure(struct ata_device *dev)
49016aca 1591{
3373efd8 1592 struct ata_port *ap = dev->ap;
efdaedc4 1593 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1594 const u16 *id = dev->id;
ff8854b2 1595 unsigned int xfer_mask;
b352e57d 1596 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1597 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1598 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1599 int rc;
49016aca 1600
0dd4b21f 1601 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1602 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1603 __FUNCTION__);
ffeae418 1604 return 0;
49016aca
TH
1605 }
1606
0dd4b21f 1607 if (ata_msg_probe(ap))
44877b4e 1608 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1609
08573a86
KCA
1610 /* set _SDD */
1611 rc = ata_acpi_push_id(ap, dev->devno);
1612 if (rc) {
1613 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1614 rc);
1615 }
1616
1617 /* retrieve and execute the ATA task file of _GTF */
1618 ata_acpi_exec_tfs(ap);
1619
c39f5ebe 1620 /* print device capabilities */
0dd4b21f 1621 if (ata_msg_probe(ap))
88574551
TH
1622 ata_dev_printk(dev, KERN_DEBUG,
1623 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1624 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1625 __FUNCTION__,
f15a1daf
TH
1626 id[49], id[82], id[83], id[84],
1627 id[85], id[86], id[87], id[88]);
c39f5ebe 1628
208a9933 1629 /* initialize to-be-configured parameters */
ea1dd4e1 1630 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1631 dev->max_sectors = 0;
1632 dev->cdb_len = 0;
1633 dev->n_sectors = 0;
1634 dev->cylinders = 0;
1635 dev->heads = 0;
1636 dev->sectors = 0;
1637
1da177e4
LT
1638 /*
1639 * common ATA, ATAPI feature tests
1640 */
1641
ff8854b2 1642 /* find max transfer mode; for printk only */
1148c3a7 1643 xfer_mask = ata_id_xfermask(id);
1da177e4 1644
0dd4b21f
BP
1645 if (ata_msg_probe(ap))
1646 ata_dump_id(id);
1da177e4
LT
1647
1648 /* ATA-specific feature tests */
1649 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1650 if (ata_id_is_cfa(id)) {
1651 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1652 ata_dev_printk(dev, KERN_WARNING,
1653 "supports DRM functions and may "
1654 "not be fully accessable.\n");
b352e57d
AC
1655 snprintf(revbuf, 7, "CFA");
1656 }
1657 else
1658 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1659
1148c3a7 1660 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1661
3f64f565 1662 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1663 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1664 sizeof(fwrevbuf));
1665
591a6e8e 1666 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1667 sizeof(modelbuf));
1668
1669 if (dev->id[59] & 0x100)
1670 dev->multi_count = dev->id[59] & 0xff;
1671
1148c3a7 1672 if (ata_id_has_lba(id)) {
4c2d721a 1673 const char *lba_desc;
a6e6ce8e 1674 char ncq_desc[20];
8bf62ece 1675
4c2d721a
TH
1676 lba_desc = "LBA";
1677 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1678 if (ata_id_has_lba48(id)) {
8bf62ece 1679 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1680 lba_desc = "LBA48";
6fc49adb
TH
1681
1682 if (dev->n_sectors >= (1UL << 28) &&
1683 ata_id_has_flush_ext(id))
1684 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1685 }
8bf62ece 1686
a6e6ce8e
TH
1687 /* config NCQ */
1688 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1689
8bf62ece 1690 /* print device info to dmesg */
3f64f565
EM
1691 if (ata_msg_drv(ap) && print_info) {
1692 ata_dev_printk(dev, KERN_INFO,
1693 "%s: %s, %s, max %s\n",
1694 revbuf, modelbuf, fwrevbuf,
1695 ata_mode_string(xfer_mask));
1696 ata_dev_printk(dev, KERN_INFO,
1697 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1698 (unsigned long long)dev->n_sectors,
3f64f565
EM
1699 dev->multi_count, lba_desc, ncq_desc);
1700 }
ffeae418 1701 } else {
8bf62ece
AL
1702 /* CHS */
1703
1704 /* Default translation */
1148c3a7
TH
1705 dev->cylinders = id[1];
1706 dev->heads = id[3];
1707 dev->sectors = id[6];
8bf62ece 1708
1148c3a7 1709 if (ata_id_current_chs_valid(id)) {
8bf62ece 1710 /* Current CHS translation is valid. */
1148c3a7
TH
1711 dev->cylinders = id[54];
1712 dev->heads = id[55];
1713 dev->sectors = id[56];
8bf62ece
AL
1714 }
1715
1716 /* print device info to dmesg */
3f64f565 1717 if (ata_msg_drv(ap) && print_info) {
88574551 1718 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1719 "%s: %s, %s, max %s\n",
1720 revbuf, modelbuf, fwrevbuf,
1721 ata_mode_string(xfer_mask));
a84471fe 1722 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1723 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1724 (unsigned long long)dev->n_sectors,
1725 dev->multi_count, dev->cylinders,
1726 dev->heads, dev->sectors);
1727 }
07f6f7d0
AL
1728 }
1729
6e7846e9 1730 dev->cdb_len = 16;
1da177e4
LT
1731 }
1732
1733 /* ATAPI-specific feature tests */
2c13b7ce 1734 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1735 char *cdb_intr_string = "";
1736
1148c3a7 1737 rc = atapi_cdb_len(id);
1da177e4 1738 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1739 if (ata_msg_warn(ap))
88574551
TH
1740 ata_dev_printk(dev, KERN_WARNING,
1741 "unsupported CDB len\n");
ffeae418 1742 rc = -EINVAL;
1da177e4
LT
1743 goto err_out_nosup;
1744 }
6e7846e9 1745 dev->cdb_len = (unsigned int) rc;
1da177e4 1746
08a556db 1747 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1748 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1749 cdb_intr_string = ", CDB intr";
1750 }
312f7da2 1751
1da177e4 1752 /* print device info to dmesg */
5afc8142 1753 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1754 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1755 ata_mode_string(xfer_mask),
1756 cdb_intr_string);
1da177e4
LT
1757 }
1758
914ed354
TH
1759 /* determine max_sectors */
1760 dev->max_sectors = ATA_MAX_SECTORS;
1761 if (dev->flags & ATA_DFLAG_LBA48)
1762 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1763
93590859
AC
1764 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1765 /* Let the user know. We don't want to disallow opens for
1766 rescue purposes, or in case the vendor is just a blithering
1767 idiot */
1768 if (print_info) {
1769 ata_dev_printk(dev, KERN_WARNING,
1770"Drive reports diagnostics failure. This may indicate a drive\n");
1771 ata_dev_printk(dev, KERN_WARNING,
1772"fault or invalid emulation. Contact drive vendor for information.\n");
1773 }
1774 }
1775
e6d902a3 1776 ata_set_port_max_cmd_len(ap);
6e7846e9 1777
4b2f3ede 1778 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1779 if (ata_dev_knobble(dev)) {
5afc8142 1780 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1781 ata_dev_printk(dev, KERN_INFO,
1782 "applying bridge limits\n");
5a529139 1783 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1784 dev->max_sectors = ATA_MAX_SECTORS;
1785 }
1786
18d6e9d5
AL
1787 if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
1788 dev->max_sectors = min(ATA_MAX_SECTORS_128, dev->max_sectors);
1789
6f23a31d
AL
1790 /* limit ATAPI DMA to R/W commands only */
1791 if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
1792 dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
1793
4b2f3ede
TH
1794 if (ap->ops->dev_config)
1795 ap->ops->dev_config(ap, dev);
1796
0dd4b21f
BP
1797 if (ata_msg_probe(ap))
1798 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1799 __FUNCTION__, ata_chk_status(ap));
ffeae418 1800 return 0;
1da177e4
LT
1801
1802err_out_nosup:
0dd4b21f 1803 if (ata_msg_probe(ap))
88574551
TH
1804 ata_dev_printk(dev, KERN_DEBUG,
1805 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1806 return rc;
1da177e4
LT
1807}
1808
1809/**
1810 * ata_bus_probe - Reset and probe ATA bus
1811 * @ap: Bus to probe
1812 *
0cba632b
JG
1813 * Master ATA bus probing function. Initiates a hardware-dependent
1814 * bus reset, then attempts to identify any devices found on
1815 * the bus.
1816 *
1da177e4 1817 * LOCKING:
0cba632b 1818 * PCI/etc. bus probe sem.
1da177e4
LT
1819 *
1820 * RETURNS:
96072e69 1821 * Zero on success, negative errno otherwise.
1da177e4
LT
1822 */
1823
80289167 1824int ata_bus_probe(struct ata_port *ap)
1da177e4 1825{
28ca5c57 1826 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 1827 int tries[ATA_MAX_DEVICES];
4ae72a1e 1828 int i, rc;
e82cbdb9 1829 struct ata_device *dev;
1da177e4 1830
28ca5c57 1831 ata_port_probe(ap);
c19ba8af 1832
14d2bac1
TH
1833 for (i = 0; i < ATA_MAX_DEVICES; i++)
1834 tries[i] = ATA_PROBE_MAX_TRIES;
1835
1836 retry:
2044470c 1837 /* reset and determine device classes */
52783c5d 1838 ap->ops->phy_reset(ap);
2061a47a 1839
52783c5d
TH
1840 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1841 dev = &ap->device[i];
c19ba8af 1842
52783c5d
TH
1843 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1844 dev->class != ATA_DEV_UNKNOWN)
1845 classes[dev->devno] = dev->class;
1846 else
1847 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1848
52783c5d 1849 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1850 }
1da177e4 1851
52783c5d 1852 ata_port_probe(ap);
2044470c 1853
b6079ca4
AC
1854 /* after the reset the device state is PIO 0 and the controller
1855 state is undefined. Record the mode */
1856
1857 for (i = 0; i < ATA_MAX_DEVICES; i++)
1858 ap->device[i].pio_mode = XFER_PIO_0;
1859
f31f0cc2
JG
1860 /* read IDENTIFY page and configure devices. We have to do the identify
1861 specific sequence bass-ackwards so that PDIAG- is released by
1862 the slave device */
1863
1864 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
e82cbdb9 1865 dev = &ap->device[i];
28ca5c57 1866
ec573755
TH
1867 if (tries[i])
1868 dev->class = classes[i];
ffeae418 1869
14d2bac1 1870 if (!ata_dev_enabled(dev))
ffeae418 1871 continue;
ffeae418 1872
bff04647
TH
1873 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1874 dev->id);
14d2bac1
TH
1875 if (rc)
1876 goto fail;
f31f0cc2
JG
1877 }
1878
1879 /* After the identify sequence we can now set up the devices. We do
1880 this in the normal order so that the user doesn't get confused */
1881
1882 for(i = 0; i < ATA_MAX_DEVICES; i++) {
1883 dev = &ap->device[i];
1884 if (!ata_dev_enabled(dev))
1885 continue;
14d2bac1 1886
efdaedc4
TH
1887 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1888 rc = ata_dev_configure(dev);
1889 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1890 if (rc)
1891 goto fail;
1da177e4
LT
1892 }
1893
e82cbdb9 1894 /* configure transfer mode */
3adcebb2 1895 rc = ata_set_mode(ap, &dev);
4ae72a1e 1896 if (rc)
51713d35 1897 goto fail;
1da177e4 1898
e82cbdb9
TH
1899 for (i = 0; i < ATA_MAX_DEVICES; i++)
1900 if (ata_dev_enabled(&ap->device[i]))
1901 return 0;
1da177e4 1902
e82cbdb9
TH
1903 /* no device present, disable port */
1904 ata_port_disable(ap);
1da177e4 1905 ap->ops->port_disable(ap);
96072e69 1906 return -ENODEV;
14d2bac1
TH
1907
1908 fail:
4ae72a1e
TH
1909 tries[dev->devno]--;
1910
14d2bac1
TH
1911 switch (rc) {
1912 case -EINVAL:
4ae72a1e 1913 /* eeek, something went very wrong, give up */
14d2bac1
TH
1914 tries[dev->devno] = 0;
1915 break;
4ae72a1e
TH
1916
1917 case -ENODEV:
1918 /* give it just one more chance */
1919 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 1920 case -EIO:
4ae72a1e
TH
1921 if (tries[dev->devno] == 1) {
1922 /* This is the last chance, better to slow
1923 * down than lose it.
1924 */
1925 sata_down_spd_limit(ap);
1926 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
1927 }
14d2bac1
TH
1928 }
1929
4ae72a1e 1930 if (!tries[dev->devno])
3373efd8 1931 ata_dev_disable(dev);
ec573755 1932
14d2bac1 1933 goto retry;
1da177e4
LT
1934}
1935
1936/**
0cba632b
JG
1937 * ata_port_probe - Mark port as enabled
1938 * @ap: Port for which we indicate enablement
1da177e4 1939 *
0cba632b
JG
1940 * Modify @ap data structure such that the system
1941 * thinks that the entire port is enabled.
1942 *
cca3974e 1943 * LOCKING: host lock, or some other form of
0cba632b 1944 * serialization.
1da177e4
LT
1945 */
1946
1947void ata_port_probe(struct ata_port *ap)
1948{
198e0fed 1949 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1950}
1951
3be680b7
TH
1952/**
1953 * sata_print_link_status - Print SATA link status
1954 * @ap: SATA port to printk link status about
1955 *
1956 * This function prints link speed and status of a SATA link.
1957 *
1958 * LOCKING:
1959 * None.
1960 */
43727fbc 1961void sata_print_link_status(struct ata_port *ap)
3be680b7 1962{
6d5f9732 1963 u32 sstatus, scontrol, tmp;
3be680b7 1964
81952c54 1965 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1966 return;
81952c54 1967 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1968
81952c54 1969 if (ata_port_online(ap)) {
3be680b7 1970 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1971 ata_port_printk(ap, KERN_INFO,
1972 "SATA link up %s (SStatus %X SControl %X)\n",
1973 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1974 } else {
f15a1daf
TH
1975 ata_port_printk(ap, KERN_INFO,
1976 "SATA link down (SStatus %X SControl %X)\n",
1977 sstatus, scontrol);
3be680b7
TH
1978 }
1979}
1980
1da177e4 1981/**
780a87f7
JG
1982 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1983 * @ap: SATA port associated with target SATA PHY.
1da177e4 1984 *
780a87f7
JG
1985 * This function issues commands to standard SATA Sxxx
1986 * PHY registers, to wake up the phy (and device), and
1987 * clear any reset condition.
1da177e4
LT
1988 *
1989 * LOCKING:
0cba632b 1990 * PCI/etc. bus probe sem.
1da177e4
LT
1991 *
1992 */
1993void __sata_phy_reset(struct ata_port *ap)
1994{
1995 u32 sstatus;
1996 unsigned long timeout = jiffies + (HZ * 5);
1997
1998 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1999 /* issue phy wake/reset */
81952c54 2000 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
2001 /* Couldn't find anything in SATA I/II specs, but
2002 * AHCI-1.1 10.4.2 says at least 1 ms. */
2003 mdelay(1);
1da177e4 2004 }
81952c54
TH
2005 /* phy wake/clear reset */
2006 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
2007
2008 /* wait for phy to become ready, if necessary */
2009 do {
2010 msleep(200);
81952c54 2011 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
2012 if ((sstatus & 0xf) != 1)
2013 break;
2014 } while (time_before(jiffies, timeout));
2015
3be680b7
TH
2016 /* print link status */
2017 sata_print_link_status(ap);
656563e3 2018
3be680b7 2019 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2020 if (!ata_port_offline(ap))
1da177e4 2021 ata_port_probe(ap);
3be680b7 2022 else
1da177e4 2023 ata_port_disable(ap);
1da177e4 2024
198e0fed 2025 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2026 return;
2027
2028 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2029 ata_port_disable(ap);
2030 return;
2031 }
2032
2033 ap->cbl = ATA_CBL_SATA;
2034}
2035
2036/**
780a87f7
JG
2037 * sata_phy_reset - Reset SATA bus.
2038 * @ap: SATA port associated with target SATA PHY.
1da177e4 2039 *
780a87f7
JG
2040 * This function resets the SATA bus, and then probes
2041 * the bus for devices.
1da177e4
LT
2042 *
2043 * LOCKING:
0cba632b 2044 * PCI/etc. bus probe sem.
1da177e4
LT
2045 *
2046 */
2047void sata_phy_reset(struct ata_port *ap)
2048{
2049 __sata_phy_reset(ap);
198e0fed 2050 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2051 return;
2052 ata_bus_reset(ap);
2053}
2054
ebdfca6e
AC
2055/**
2056 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2057 * @adev: device
2058 *
2059 * Obtain the other device on the same cable, or if none is
2060 * present NULL is returned
2061 */
2e9edbf8 2062
3373efd8 2063struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2064{
3373efd8 2065 struct ata_port *ap = adev->ap;
ebdfca6e 2066 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2067 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2068 return NULL;
2069 return pair;
2070}
2071
1da177e4 2072/**
780a87f7
JG
2073 * ata_port_disable - Disable port.
2074 * @ap: Port to be disabled.
1da177e4 2075 *
780a87f7
JG
2076 * Modify @ap data structure such that the system
2077 * thinks that the entire port is disabled, and should
2078 * never attempt to probe or communicate with devices
2079 * on this port.
2080 *
cca3974e 2081 * LOCKING: host lock, or some other form of
780a87f7 2082 * serialization.
1da177e4
LT
2083 */
2084
2085void ata_port_disable(struct ata_port *ap)
2086{
2087 ap->device[0].class = ATA_DEV_NONE;
2088 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2089 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2090}
2091
1c3fae4d 2092/**
3c567b7d 2093 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2094 * @ap: Port to adjust SATA spd limit for
2095 *
2096 * Adjust SATA spd limit of @ap downward. Note that this
2097 * function only adjusts the limit. The change must be applied
3c567b7d 2098 * using sata_set_spd().
1c3fae4d
TH
2099 *
2100 * LOCKING:
2101 * Inherited from caller.
2102 *
2103 * RETURNS:
2104 * 0 on success, negative errno on failure
2105 */
3c567b7d 2106int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2107{
81952c54
TH
2108 u32 sstatus, spd, mask;
2109 int rc, highbit;
1c3fae4d 2110
81952c54
TH
2111 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2112 if (rc)
2113 return rc;
1c3fae4d
TH
2114
2115 mask = ap->sata_spd_limit;
2116 if (mask <= 1)
2117 return -EINVAL;
2118 highbit = fls(mask) - 1;
2119 mask &= ~(1 << highbit);
2120
81952c54 2121 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2122 if (spd <= 1)
2123 return -EINVAL;
2124 spd--;
2125 mask &= (1 << spd) - 1;
2126 if (!mask)
2127 return -EINVAL;
2128
2129 ap->sata_spd_limit = mask;
2130
f15a1daf
TH
2131 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2132 sata_spd_string(fls(mask)));
1c3fae4d
TH
2133
2134 return 0;
2135}
2136
3c567b7d 2137static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2138{
2139 u32 spd, limit;
2140
2141 if (ap->sata_spd_limit == UINT_MAX)
2142 limit = 0;
2143 else
2144 limit = fls(ap->sata_spd_limit);
2145
2146 spd = (*scontrol >> 4) & 0xf;
2147 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2148
2149 return spd != limit;
2150}
2151
2152/**
3c567b7d 2153 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2154 * @ap: Port in question
2155 *
2156 * Test whether the spd limit in SControl matches
2157 * @ap->sata_spd_limit. This function is used to determine
2158 * whether hardreset is necessary to apply SATA spd
2159 * configuration.
2160 *
2161 * LOCKING:
2162 * Inherited from caller.
2163 *
2164 * RETURNS:
2165 * 1 if SATA spd configuration is needed, 0 otherwise.
2166 */
3c567b7d 2167int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2168{
2169 u32 scontrol;
2170
81952c54 2171 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2172 return 0;
2173
3c567b7d 2174 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2175}
2176
2177/**
3c567b7d 2178 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2179 * @ap: Port to set SATA spd for
2180 *
2181 * Set SATA spd of @ap according to sata_spd_limit.
2182 *
2183 * LOCKING:
2184 * Inherited from caller.
2185 *
2186 * RETURNS:
2187 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2188 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2189 */
3c567b7d 2190int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2191{
2192 u32 scontrol;
81952c54 2193 int rc;
1c3fae4d 2194
81952c54
TH
2195 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2196 return rc;
1c3fae4d 2197
3c567b7d 2198 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2199 return 0;
2200
81952c54
TH
2201 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2202 return rc;
2203
1c3fae4d
TH
2204 return 1;
2205}
2206
452503f9
AC
2207/*
2208 * This mode timing computation functionality is ported over from
2209 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2210 */
2211/*
b352e57d 2212 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2213 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2214 * for UDMA6, which is currently supported only by Maxtor drives.
2215 *
2216 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2217 */
2218
2219static const struct ata_timing ata_timing[] = {
2220
2221 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2222 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2223 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2224 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2225
b352e57d
AC
2226 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2227 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2228 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2229 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2230 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2231
2232/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2233
452503f9
AC
2234 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2235 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2236 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2237
452503f9
AC
2238 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2239 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2240 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2241
b352e57d
AC
2242 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2243 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2244 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2245 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2246
2247 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2248 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2249 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2250
2251/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2252
2253 { 0xFF }
2254};
2255
2256#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2257#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2258
2259static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2260{
2261 q->setup = EZ(t->setup * 1000, T);
2262 q->act8b = EZ(t->act8b * 1000, T);
2263 q->rec8b = EZ(t->rec8b * 1000, T);
2264 q->cyc8b = EZ(t->cyc8b * 1000, T);
2265 q->active = EZ(t->active * 1000, T);
2266 q->recover = EZ(t->recover * 1000, T);
2267 q->cycle = EZ(t->cycle * 1000, T);
2268 q->udma = EZ(t->udma * 1000, UT);
2269}
2270
2271void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2272 struct ata_timing *m, unsigned int what)
2273{
2274 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2275 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2276 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2277 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2278 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2279 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2280 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2281 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2282}
2283
2284static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2285{
2286 const struct ata_timing *t;
2287
2288 for (t = ata_timing; t->mode != speed; t++)
91190758 2289 if (t->mode == 0xFF)
452503f9 2290 return NULL;
2e9edbf8 2291 return t;
452503f9
AC
2292}
2293
2294int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2295 struct ata_timing *t, int T, int UT)
2296{
2297 const struct ata_timing *s;
2298 struct ata_timing p;
2299
2300 /*
2e9edbf8 2301 * Find the mode.
75b1f2f8 2302 */
452503f9
AC
2303
2304 if (!(s = ata_timing_find_mode(speed)))
2305 return -EINVAL;
2306
75b1f2f8
AL
2307 memcpy(t, s, sizeof(*s));
2308
452503f9
AC
2309 /*
2310 * If the drive is an EIDE drive, it can tell us it needs extended
2311 * PIO/MW_DMA cycle timing.
2312 */
2313
2314 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2315 memset(&p, 0, sizeof(p));
2316 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2317 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2318 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2319 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2320 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2321 }
2322 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2323 }
2324
2325 /*
2326 * Convert the timing to bus clock counts.
2327 */
2328
75b1f2f8 2329 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2330
2331 /*
c893a3ae
RD
2332 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2333 * S.M.A.R.T * and some other commands. We have to ensure that the
2334 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2335 */
2336
fd3367af 2337 if (speed > XFER_PIO_6) {
452503f9
AC
2338 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2339 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2340 }
2341
2342 /*
c893a3ae 2343 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2344 */
2345
2346 if (t->act8b + t->rec8b < t->cyc8b) {
2347 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2348 t->rec8b = t->cyc8b - t->act8b;
2349 }
2350
2351 if (t->active + t->recover < t->cycle) {
2352 t->active += (t->cycle - (t->active + t->recover)) / 2;
2353 t->recover = t->cycle - t->active;
2354 }
2355
2356 return 0;
2357}
2358
cf176e1a
TH
2359/**
2360 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2361 * @dev: Device to adjust xfer masks
458337db 2362 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2363 *
2364 * Adjust xfer masks of @dev downward. Note that this function
2365 * does not apply the change. Invoking ata_set_mode() afterwards
2366 * will apply the limit.
2367 *
2368 * LOCKING:
2369 * Inherited from caller.
2370 *
2371 * RETURNS:
2372 * 0 on success, negative errno on failure
2373 */
458337db 2374int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2375{
458337db
TH
2376 char buf[32];
2377 unsigned int orig_mask, xfer_mask;
2378 unsigned int pio_mask, mwdma_mask, udma_mask;
2379 int quiet, highbit;
cf176e1a 2380
458337db
TH
2381 quiet = !!(sel & ATA_DNXFER_QUIET);
2382 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2383
458337db
TH
2384 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2385 dev->mwdma_mask,
2386 dev->udma_mask);
2387 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2388
458337db
TH
2389 switch (sel) {
2390 case ATA_DNXFER_PIO:
2391 highbit = fls(pio_mask) - 1;
2392 pio_mask &= ~(1 << highbit);
2393 break;
2394
2395 case ATA_DNXFER_DMA:
2396 if (udma_mask) {
2397 highbit = fls(udma_mask) - 1;
2398 udma_mask &= ~(1 << highbit);
2399 if (!udma_mask)
2400 return -ENOENT;
2401 } else if (mwdma_mask) {
2402 highbit = fls(mwdma_mask) - 1;
2403 mwdma_mask &= ~(1 << highbit);
2404 if (!mwdma_mask)
2405 return -ENOENT;
2406 }
2407 break;
2408
2409 case ATA_DNXFER_40C:
2410 udma_mask &= ATA_UDMA_MASK_40C;
2411 break;
2412
2413 case ATA_DNXFER_FORCE_PIO0:
2414 pio_mask &= 1;
2415 case ATA_DNXFER_FORCE_PIO:
2416 mwdma_mask = 0;
2417 udma_mask = 0;
2418 break;
2419
458337db
TH
2420 default:
2421 BUG();
2422 }
2423
2424 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2425
2426 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2427 return -ENOENT;
2428
2429 if (!quiet) {
2430 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2431 snprintf(buf, sizeof(buf), "%s:%s",
2432 ata_mode_string(xfer_mask),
2433 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2434 else
2435 snprintf(buf, sizeof(buf), "%s",
2436 ata_mode_string(xfer_mask));
2437
2438 ata_dev_printk(dev, KERN_WARNING,
2439 "limiting speed to %s\n", buf);
2440 }
cf176e1a
TH
2441
2442 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2443 &dev->udma_mask);
2444
cf176e1a 2445 return 0;
cf176e1a
TH
2446}
2447
3373efd8 2448static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2449{
baa1e78a 2450 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2451 unsigned int err_mask;
2452 int rc;
1da177e4 2453
e8384607 2454 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2455 if (dev->xfer_shift == ATA_SHIFT_PIO)
2456 dev->flags |= ATA_DFLAG_PIO;
2457
3373efd8 2458 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2459 /* Old CFA may refuse this command, which is just fine */
2460 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2461 err_mask &= ~AC_ERR_DEV;
2462
83206a29 2463 if (err_mask) {
f15a1daf
TH
2464 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2465 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2466 return -EIO;
2467 }
1da177e4 2468
baa1e78a 2469 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2470 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2471 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2472 if (rc)
83206a29 2473 return rc;
48a8a14f 2474
23e71c3d
TH
2475 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2476 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2477
f15a1daf
TH
2478 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2479 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2480 return 0;
1da177e4
LT
2481}
2482
1da177e4
LT
2483/**
2484 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2485 * @ap: port on which timings will be programmed
e82cbdb9 2486 * @r_failed_dev: out paramter for failed device
1da177e4 2487 *
e82cbdb9
TH
2488 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2489 * ata_set_mode() fails, pointer to the failing device is
2490 * returned in @r_failed_dev.
780a87f7 2491 *
1da177e4 2492 * LOCKING:
0cba632b 2493 * PCI/etc. bus probe sem.
e82cbdb9
TH
2494 *
2495 * RETURNS:
2496 * 0 on success, negative errno otherwise
1da177e4 2497 */
1ad8e7f9 2498int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2499{
e8e0619f 2500 struct ata_device *dev;
e82cbdb9 2501 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2502
3adcebb2 2503 /* has private set_mode? */
b229a7b0
A
2504 if (ap->ops->set_mode)
2505 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2506
a6d5a51c
TH
2507 /* step 1: calculate xfer_mask */
2508 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2509 unsigned int pio_mask, dma_mask;
a6d5a51c 2510
e8e0619f
TH
2511 dev = &ap->device[i];
2512
e1211e3f 2513 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2514 continue;
2515
3373efd8 2516 ata_dev_xfermask(dev);
1da177e4 2517
acf356b1
TH
2518 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2519 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2520 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2521 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2522
4f65977d 2523 found = 1;
5444a6f4
AC
2524 if (dev->dma_mode)
2525 used_dma = 1;
a6d5a51c 2526 }
4f65977d 2527 if (!found)
e82cbdb9 2528 goto out;
a6d5a51c
TH
2529
2530 /* step 2: always set host PIO timings */
e8e0619f
TH
2531 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2532 dev = &ap->device[i];
2533 if (!ata_dev_enabled(dev))
2534 continue;
2535
2536 if (!dev->pio_mode) {
f15a1daf 2537 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2538 rc = -EINVAL;
e82cbdb9 2539 goto out;
e8e0619f
TH
2540 }
2541
2542 dev->xfer_mode = dev->pio_mode;
2543 dev->xfer_shift = ATA_SHIFT_PIO;
2544 if (ap->ops->set_piomode)
2545 ap->ops->set_piomode(ap, dev);
2546 }
1da177e4 2547
a6d5a51c 2548 /* step 3: set host DMA timings */
e8e0619f
TH
2549 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2550 dev = &ap->device[i];
2551
2552 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2553 continue;
2554
2555 dev->xfer_mode = dev->dma_mode;
2556 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2557 if (ap->ops->set_dmamode)
2558 ap->ops->set_dmamode(ap, dev);
2559 }
1da177e4
LT
2560
2561 /* step 4: update devices' xfer mode */
83206a29 2562 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2563 dev = &ap->device[i];
1da177e4 2564
18d90deb 2565 /* don't update suspended devices' xfer mode */
02670bf3 2566 if (!ata_dev_ready(dev))
83206a29
TH
2567 continue;
2568
3373efd8 2569 rc = ata_dev_set_mode(dev);
5bbc53f4 2570 if (rc)
e82cbdb9 2571 goto out;
83206a29 2572 }
1da177e4 2573
e8e0619f
TH
2574 /* Record simplex status. If we selected DMA then the other
2575 * host channels are not permitted to do so.
5444a6f4 2576 */
cca3974e 2577 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2578 ap->host->simplex_claimed = ap;
5444a6f4 2579
e8e0619f 2580 /* step5: chip specific finalisation */
1da177e4
LT
2581 if (ap->ops->post_set_mode)
2582 ap->ops->post_set_mode(ap);
e82cbdb9
TH
2583 out:
2584 if (rc)
2585 *r_failed_dev = dev;
2586 return rc;
1da177e4
LT
2587}
2588
1fdffbce
JG
2589/**
2590 * ata_tf_to_host - issue ATA taskfile to host controller
2591 * @ap: port to which command is being issued
2592 * @tf: ATA taskfile register set
2593 *
2594 * Issues ATA taskfile register set to ATA host controller,
2595 * with proper synchronization with interrupt handler and
2596 * other threads.
2597 *
2598 * LOCKING:
cca3974e 2599 * spin_lock_irqsave(host lock)
1fdffbce
JG
2600 */
2601
2602static inline void ata_tf_to_host(struct ata_port *ap,
2603 const struct ata_taskfile *tf)
2604{
2605 ap->ops->tf_load(ap, tf);
2606 ap->ops->exec_command(ap, tf);
2607}
2608
1da177e4
LT
2609/**
2610 * ata_busy_sleep - sleep until BSY clears, or timeout
2611 * @ap: port containing status register to be polled
2612 * @tmout_pat: impatience timeout
2613 * @tmout: overall timeout
2614 *
780a87f7
JG
2615 * Sleep until ATA Status register bit BSY clears,
2616 * or a timeout occurs.
2617 *
d1adc1bb
TH
2618 * LOCKING:
2619 * Kernel thread context (may sleep).
2620 *
2621 * RETURNS:
2622 * 0 on success, -errno otherwise.
1da177e4 2623 */
d1adc1bb
TH
2624int ata_busy_sleep(struct ata_port *ap,
2625 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2626{
2627 unsigned long timer_start, timeout;
2628 u8 status;
2629
2630 status = ata_busy_wait(ap, ATA_BUSY, 300);
2631 timer_start = jiffies;
2632 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2633 while (status != 0xff && (status & ATA_BUSY) &&
2634 time_before(jiffies, timeout)) {
1da177e4
LT
2635 msleep(50);
2636 status = ata_busy_wait(ap, ATA_BUSY, 3);
2637 }
2638
d1adc1bb 2639 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2640 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2641 "port is slow to respond, please be patient "
2642 "(Status 0x%x)\n", status);
1da177e4
LT
2643
2644 timeout = timer_start + tmout;
d1adc1bb
TH
2645 while (status != 0xff && (status & ATA_BUSY) &&
2646 time_before(jiffies, timeout)) {
1da177e4
LT
2647 msleep(50);
2648 status = ata_chk_status(ap);
2649 }
2650
d1adc1bb
TH
2651 if (status == 0xff)
2652 return -ENODEV;
2653
1da177e4 2654 if (status & ATA_BUSY) {
f15a1daf 2655 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2656 "(%lu secs, Status 0x%x)\n",
2657 tmout / HZ, status);
d1adc1bb 2658 return -EBUSY;
1da177e4
LT
2659 }
2660
2661 return 0;
2662}
2663
2664static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2665{
2666 struct ata_ioports *ioaddr = &ap->ioaddr;
2667 unsigned int dev0 = devmask & (1 << 0);
2668 unsigned int dev1 = devmask & (1 << 1);
2669 unsigned long timeout;
2670
2671 /* if device 0 was found in ata_devchk, wait for its
2672 * BSY bit to clear
2673 */
2674 if (dev0)
2675 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2676
2677 /* if device 1 was found in ata_devchk, wait for
2678 * register access, then wait for BSY to clear
2679 */
2680 timeout = jiffies + ATA_TMOUT_BOOT;
2681 while (dev1) {
2682 u8 nsect, lbal;
2683
2684 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2685 nsect = ioread8(ioaddr->nsect_addr);
2686 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2687 if ((nsect == 1) && (lbal == 1))
2688 break;
2689 if (time_after(jiffies, timeout)) {
2690 dev1 = 0;
2691 break;
2692 }
2693 msleep(50); /* give drive a breather */
2694 }
2695 if (dev1)
2696 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2697
2698 /* is all this really necessary? */
2699 ap->ops->dev_select(ap, 0);
2700 if (dev1)
2701 ap->ops->dev_select(ap, 1);
2702 if (dev0)
2703 ap->ops->dev_select(ap, 0);
2704}
2705
1da177e4
LT
2706static unsigned int ata_bus_softreset(struct ata_port *ap,
2707 unsigned int devmask)
2708{
2709 struct ata_ioports *ioaddr = &ap->ioaddr;
2710
44877b4e 2711 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
2712
2713 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2714 iowrite8(ap->ctl, ioaddr->ctl_addr);
2715 udelay(20); /* FIXME: flush */
2716 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2717 udelay(20); /* FIXME: flush */
2718 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2719
2720 /* spec mandates ">= 2ms" before checking status.
2721 * We wait 150ms, because that was the magic delay used for
2722 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2723 * between when the ATA command register is written, and then
2724 * status is checked. Because waiting for "a while" before
2725 * checking status is fine, post SRST, we perform this magic
2726 * delay here as well.
09c7ad79
AC
2727 *
2728 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2729 */
2730 msleep(150);
2731
2e9edbf8 2732 /* Before we perform post reset processing we want to see if
298a41ca
TH
2733 * the bus shows 0xFF because the odd clown forgets the D7
2734 * pulldown resistor.
2735 */
d1adc1bb
TH
2736 if (ata_check_status(ap) == 0xFF)
2737 return 0;
09c7ad79 2738
1da177e4
LT
2739 ata_bus_post_reset(ap, devmask);
2740
2741 return 0;
2742}
2743
2744/**
2745 * ata_bus_reset - reset host port and associated ATA channel
2746 * @ap: port to reset
2747 *
2748 * This is typically the first time we actually start issuing
2749 * commands to the ATA channel. We wait for BSY to clear, then
2750 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2751 * result. Determine what devices, if any, are on the channel
2752 * by looking at the device 0/1 error register. Look at the signature
2753 * stored in each device's taskfile registers, to determine if
2754 * the device is ATA or ATAPI.
2755 *
2756 * LOCKING:
0cba632b 2757 * PCI/etc. bus probe sem.
cca3974e 2758 * Obtains host lock.
1da177e4
LT
2759 *
2760 * SIDE EFFECTS:
198e0fed 2761 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2762 */
2763
2764void ata_bus_reset(struct ata_port *ap)
2765{
2766 struct ata_ioports *ioaddr = &ap->ioaddr;
2767 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2768 u8 err;
aec5c3c1 2769 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4 2770
44877b4e 2771 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
2772
2773 /* determine if device 0/1 are present */
2774 if (ap->flags & ATA_FLAG_SATA_RESET)
2775 dev0 = 1;
2776 else {
2777 dev0 = ata_devchk(ap, 0);
2778 if (slave_possible)
2779 dev1 = ata_devchk(ap, 1);
2780 }
2781
2782 if (dev0)
2783 devmask |= (1 << 0);
2784 if (dev1)
2785 devmask |= (1 << 1);
2786
2787 /* select device 0 again */
2788 ap->ops->dev_select(ap, 0);
2789
2790 /* issue bus reset */
2791 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2792 if (ata_bus_softreset(ap, devmask))
2793 goto err_out;
1da177e4
LT
2794
2795 /*
2796 * determine by signature whether we have ATA or ATAPI devices
2797 */
b4dc7623 2798 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2799 if ((slave_possible) && (err != 0x81))
b4dc7623 2800 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2801
2802 /* re-enable interrupts */
83625006 2803 ap->ops->irq_on(ap);
1da177e4
LT
2804
2805 /* is double-select really necessary? */
2806 if (ap->device[1].class != ATA_DEV_NONE)
2807 ap->ops->dev_select(ap, 1);
2808 if (ap->device[0].class != ATA_DEV_NONE)
2809 ap->ops->dev_select(ap, 0);
2810
2811 /* if no devices were detected, disable this port */
2812 if ((ap->device[0].class == ATA_DEV_NONE) &&
2813 (ap->device[1].class == ATA_DEV_NONE))
2814 goto err_out;
2815
2816 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2817 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2818 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2819 }
2820
2821 DPRINTK("EXIT\n");
2822 return;
2823
2824err_out:
f15a1daf 2825 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2826 ap->ops->port_disable(ap);
2827
2828 DPRINTK("EXIT\n");
2829}
2830
d7bb4cc7
TH
2831/**
2832 * sata_phy_debounce - debounce SATA phy status
2833 * @ap: ATA port to debounce SATA phy status for
2834 * @params: timing parameters { interval, duratinon, timeout } in msec
2835 *
2836 * Make sure SStatus of @ap reaches stable state, determined by
2837 * holding the same value where DET is not 1 for @duration polled
2838 * every @interval, before @timeout. Timeout constraints the
2839 * beginning of the stable state. Because, after hot unplugging,
2840 * DET gets stuck at 1 on some controllers, this functions waits
2841 * until timeout then returns 0 if DET is stable at 1.
2842 *
2843 * LOCKING:
2844 * Kernel thread context (may sleep)
2845 *
2846 * RETURNS:
2847 * 0 on success, -errno on failure.
2848 */
2849int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2850{
d7bb4cc7
TH
2851 unsigned long interval_msec = params[0];
2852 unsigned long duration = params[1] * HZ / 1000;
2853 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2854 unsigned long last_jiffies;
2855 u32 last, cur;
2856 int rc;
2857
2858 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2859 return rc;
2860 cur &= 0xf;
2861
2862 last = cur;
2863 last_jiffies = jiffies;
2864
2865 while (1) {
2866 msleep(interval_msec);
2867 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2868 return rc;
2869 cur &= 0xf;
2870
2871 /* DET stable? */
2872 if (cur == last) {
2873 if (cur == 1 && time_before(jiffies, timeout))
2874 continue;
2875 if (time_after(jiffies, last_jiffies + duration))
2876 return 0;
2877 continue;
2878 }
2879
2880 /* unstable, start over */
2881 last = cur;
2882 last_jiffies = jiffies;
2883
2884 /* check timeout */
2885 if (time_after(jiffies, timeout))
2886 return -EBUSY;
2887 }
2888}
2889
2890/**
2891 * sata_phy_resume - resume SATA phy
2892 * @ap: ATA port to resume SATA phy for
2893 * @params: timing parameters { interval, duratinon, timeout } in msec
2894 *
2895 * Resume SATA phy of @ap and debounce it.
2896 *
2897 * LOCKING:
2898 * Kernel thread context (may sleep)
2899 *
2900 * RETURNS:
2901 * 0 on success, -errno on failure.
2902 */
2903int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2904{
2905 u32 scontrol;
81952c54
TH
2906 int rc;
2907
2908 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2909 return rc;
7a7921e8 2910
852ee16a 2911 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2912
2913 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2914 return rc;
7a7921e8 2915
d7bb4cc7
TH
2916 /* Some PHYs react badly if SStatus is pounded immediately
2917 * after resuming. Delay 200ms before debouncing.
2918 */
2919 msleep(200);
7a7921e8 2920
d7bb4cc7 2921 return sata_phy_debounce(ap, params);
7a7921e8
TH
2922}
2923
f5914a46
TH
2924static void ata_wait_spinup(struct ata_port *ap)
2925{
2926 struct ata_eh_context *ehc = &ap->eh_context;
2927 unsigned long end, secs;
2928 int rc;
2929
2930 /* first, debounce phy if SATA */
2931 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2932 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2933
2934 /* if debounced successfully and offline, no need to wait */
2935 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2936 return;
2937 }
2938
2939 /* okay, let's give the drive time to spin up */
2940 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2941 secs = ((end - jiffies) + HZ - 1) / HZ;
2942
2943 if (time_after(jiffies, end))
2944 return;
2945
2946 if (secs > 5)
2947 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2948 "(%lu secs)\n", secs);
2949
2950 schedule_timeout_uninterruptible(end - jiffies);
2951}
2952
2953/**
2954 * ata_std_prereset - prepare for reset
2955 * @ap: ATA port to be reset
2956 *
2957 * @ap is about to be reset. Initialize it.
2958 *
2959 * LOCKING:
2960 * Kernel thread context (may sleep)
2961 *
2962 * RETURNS:
2963 * 0 on success, -errno otherwise.
2964 */
2965int ata_std_prereset(struct ata_port *ap)
2966{
2967 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2968 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2969 int rc;
2970
28324304
TH
2971 /* handle link resume & hotplug spinup */
2972 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2973 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2974 ehc->i.action |= ATA_EH_HARDRESET;
2975
2976 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2977 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2978 ata_wait_spinup(ap);
f5914a46
TH
2979
2980 /* if we're about to do hardreset, nothing more to do */
2981 if (ehc->i.action & ATA_EH_HARDRESET)
2982 return 0;
2983
2984 /* if SATA, resume phy */
2985 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2986 rc = sata_phy_resume(ap, timing);
2987 if (rc && rc != -EOPNOTSUPP) {
2988 /* phy resume failed */
2989 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2990 "link for reset (errno=%d)\n", rc);
2991 return rc;
2992 }
2993 }
2994
2995 /* Wait for !BSY if the controller can wait for the first D2H
2996 * Reg FIS and we don't know that no device is attached.
2997 */
2998 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2999 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
3000
3001 return 0;
3002}
3003
c2bd5804
TH
3004/**
3005 * ata_std_softreset - reset host port via ATA SRST
3006 * @ap: port to reset
c2bd5804
TH
3007 * @classes: resulting classes of attached devices
3008 *
52783c5d 3009 * Reset host port using ATA SRST.
c2bd5804
TH
3010 *
3011 * LOCKING:
3012 * Kernel thread context (may sleep)
3013 *
3014 * RETURNS:
3015 * 0 on success, -errno otherwise.
3016 */
2bf2cb26 3017int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
3018{
3019 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3020 unsigned int devmask = 0, err_mask;
3021 u8 err;
3022
3023 DPRINTK("ENTER\n");
3024
81952c54 3025 if (ata_port_offline(ap)) {
3a39746a
TH
3026 classes[0] = ATA_DEV_NONE;
3027 goto out;
3028 }
3029
c2bd5804
TH
3030 /* determine if device 0/1 are present */
3031 if (ata_devchk(ap, 0))
3032 devmask |= (1 << 0);
3033 if (slave_possible && ata_devchk(ap, 1))
3034 devmask |= (1 << 1);
3035
c2bd5804
TH
3036 /* select device 0 again */
3037 ap->ops->dev_select(ap, 0);
3038
3039 /* issue bus reset */
3040 DPRINTK("about to softreset, devmask=%x\n", devmask);
3041 err_mask = ata_bus_softreset(ap, devmask);
3042 if (err_mask) {
f15a1daf
TH
3043 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
3044 err_mask);
c2bd5804
TH
3045 return -EIO;
3046 }
3047
3048 /* determine by signature whether we have ATA or ATAPI devices */
3049 classes[0] = ata_dev_try_classify(ap, 0, &err);
3050 if (slave_possible && err != 0x81)
3051 classes[1] = ata_dev_try_classify(ap, 1, &err);
3052
3a39746a 3053 out:
c2bd5804
TH
3054 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3055 return 0;
3056}
3057
3058/**
b6103f6d 3059 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3060 * @ap: port to reset
b6103f6d 3061 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
3062 *
3063 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3064 *
3065 * LOCKING:
3066 * Kernel thread context (may sleep)
3067 *
3068 * RETURNS:
3069 * 0 on success, -errno otherwise.
3070 */
b6103f6d 3071int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 3072{
852ee16a 3073 u32 scontrol;
81952c54 3074 int rc;
852ee16a 3075
c2bd5804
TH
3076 DPRINTK("ENTER\n");
3077
3c567b7d 3078 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3079 /* SATA spec says nothing about how to reconfigure
3080 * spd. To be on the safe side, turn off phy during
3081 * reconfiguration. This works for at least ICH7 AHCI
3082 * and Sil3124.
3083 */
81952c54 3084 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3085 goto out;
81952c54 3086
a34b6fc0 3087 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3088
3089 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3090 goto out;
1c3fae4d 3091
3c567b7d 3092 sata_set_spd(ap);
1c3fae4d
TH
3093 }
3094
3095 /* issue phy wake/reset */
81952c54 3096 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3097 goto out;
81952c54 3098
852ee16a 3099 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3100
3101 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3102 goto out;
c2bd5804 3103
1c3fae4d 3104 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3105 * 10.4.2 says at least 1 ms.
3106 */
3107 msleep(1);
3108
1c3fae4d 3109 /* bring phy back */
b6103f6d
TH
3110 rc = sata_phy_resume(ap, timing);
3111 out:
3112 DPRINTK("EXIT, rc=%d\n", rc);
3113 return rc;
3114}
3115
3116/**
3117 * sata_std_hardreset - reset host port via SATA phy reset
3118 * @ap: port to reset
3119 * @class: resulting class of attached device
3120 *
3121 * SATA phy-reset host port using DET bits of SControl register,
3122 * wait for !BSY and classify the attached device.
3123 *
3124 * LOCKING:
3125 * Kernel thread context (may sleep)
3126 *
3127 * RETURNS:
3128 * 0 on success, -errno otherwise.
3129 */
3130int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3131{
3132 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3133 int rc;
3134
3135 DPRINTK("ENTER\n");
3136
3137 /* do hardreset */
3138 rc = sata_port_hardreset(ap, timing);
3139 if (rc) {
3140 ata_port_printk(ap, KERN_ERR,
3141 "COMRESET failed (errno=%d)\n", rc);
3142 return rc;
3143 }
c2bd5804 3144
c2bd5804 3145 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3146 if (ata_port_offline(ap)) {
c2bd5804
TH
3147 *class = ATA_DEV_NONE;
3148 DPRINTK("EXIT, link offline\n");
3149 return 0;
3150 }
3151
34fee227
TH
3152 /* wait a while before checking status, see SRST for more info */
3153 msleep(150);
3154
c2bd5804 3155 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3156 ata_port_printk(ap, KERN_ERR,
3157 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3158 return -EIO;
3159 }
3160
3a39746a
TH
3161 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3162
c2bd5804
TH
3163 *class = ata_dev_try_classify(ap, 0, NULL);
3164
3165 DPRINTK("EXIT, class=%u\n", *class);
3166 return 0;
3167}
3168
3169/**
3170 * ata_std_postreset - standard postreset callback
3171 * @ap: the target ata_port
3172 * @classes: classes of attached devices
3173 *
3174 * This function is invoked after a successful reset. Note that
3175 * the device might have been reset more than once using
3176 * different reset methods before postreset is invoked.
c2bd5804 3177 *
c2bd5804
TH
3178 * LOCKING:
3179 * Kernel thread context (may sleep)
3180 */
3181void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3182{
dc2b3515
TH
3183 u32 serror;
3184
c2bd5804
TH
3185 DPRINTK("ENTER\n");
3186
c2bd5804 3187 /* print link status */
81952c54 3188 sata_print_link_status(ap);
c2bd5804 3189
dc2b3515
TH
3190 /* clear SError */
3191 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3192 sata_scr_write(ap, SCR_ERROR, serror);
3193
3a39746a 3194 /* re-enable interrupts */
83625006
AI
3195 if (!ap->ops->error_handler)
3196 ap->ops->irq_on(ap);
c2bd5804
TH
3197
3198 /* is double-select really necessary? */
3199 if (classes[0] != ATA_DEV_NONE)
3200 ap->ops->dev_select(ap, 1);
3201 if (classes[1] != ATA_DEV_NONE)
3202 ap->ops->dev_select(ap, 0);
3203
3a39746a
TH
3204 /* bail out if no device is present */
3205 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3206 DPRINTK("EXIT, no device\n");
3207 return;
3208 }
3209
3210 /* set up device control */
0d5ff566
TH
3211 if (ap->ioaddr.ctl_addr)
3212 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3213
3214 DPRINTK("EXIT\n");
3215}
3216
623a3128
TH
3217/**
3218 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3219 * @dev: device to compare against
3220 * @new_class: class of the new device
3221 * @new_id: IDENTIFY page of the new device
3222 *
3223 * Compare @new_class and @new_id against @dev and determine
3224 * whether @dev is the device indicated by @new_class and
3225 * @new_id.
3226 *
3227 * LOCKING:
3228 * None.
3229 *
3230 * RETURNS:
3231 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3232 */
3373efd8
TH
3233static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3234 const u16 *new_id)
623a3128
TH
3235{
3236 const u16 *old_id = dev->id;
a0cf733b
TH
3237 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3238 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3239 u64 new_n_sectors;
3240
3241 if (dev->class != new_class) {
f15a1daf
TH
3242 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3243 dev->class, new_class);
623a3128
TH
3244 return 0;
3245 }
3246
a0cf733b
TH
3247 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3248 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3249 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3250 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3251 new_n_sectors = ata_id_n_sectors(new_id);
3252
3253 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3254 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3255 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3256 return 0;
3257 }
3258
3259 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3260 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3261 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3262 return 0;
3263 }
3264
3265 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3266 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3267 "%llu != %llu\n",
3268 (unsigned long long)dev->n_sectors,
3269 (unsigned long long)new_n_sectors);
623a3128
TH
3270 return 0;
3271 }
3272
3273 return 1;
3274}
3275
3276/**
3277 * ata_dev_revalidate - Revalidate ATA device
623a3128 3278 * @dev: device to revalidate
bff04647 3279 * @readid_flags: read ID flags
623a3128
TH
3280 *
3281 * Re-read IDENTIFY page and make sure @dev is still attached to
3282 * the port.
3283 *
3284 * LOCKING:
3285 * Kernel thread context (may sleep)
3286 *
3287 * RETURNS:
3288 * 0 on success, negative errno otherwise
3289 */
bff04647 3290int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3291{
5eb45c02 3292 unsigned int class = dev->class;
f15a1daf 3293 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3294 int rc;
3295
5eb45c02
TH
3296 if (!ata_dev_enabled(dev)) {
3297 rc = -ENODEV;
3298 goto fail;
3299 }
623a3128 3300
fe635c7e 3301 /* read ID data */
bff04647 3302 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3303 if (rc)
3304 goto fail;
3305
3306 /* is the device still there? */
3373efd8 3307 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3308 rc = -ENODEV;
3309 goto fail;
3310 }
3311
fe635c7e 3312 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3313
3314 /* configure device according to the new ID */
efdaedc4 3315 rc = ata_dev_configure(dev);
5eb45c02
TH
3316 if (rc == 0)
3317 return 0;
623a3128
TH
3318
3319 fail:
f15a1daf 3320 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3321 return rc;
3322}
3323
6919a0a6
AC
3324struct ata_blacklist_entry {
3325 const char *model_num;
3326 const char *model_rev;
3327 unsigned long horkage;
3328};
3329
3330static const struct ata_blacklist_entry ata_device_blacklist [] = {
3331 /* Devices with DMA related problems under Linux */
3332 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3333 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3334 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3335 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3336 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3337 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3338 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3339 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3340 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3341 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3342 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3343 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3344 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3345 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3346 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3347 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3348 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3349 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3350 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3351 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3352 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3353 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3354 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3355 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3356 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3357 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3358 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3359 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3360 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3361
18d6e9d5 3362 /* Weird ATAPI devices */
6f23a31d
AL
3363 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 |
3364 ATA_HORKAGE_DMA_RW_ONLY },
18d6e9d5 3365
6919a0a6
AC
3366 /* Devices we expect to fail diagnostics */
3367
3368 /* Devices where NCQ should be avoided */
3369 /* NCQ is slow */
3370 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3371 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3372 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3373 /* NCQ is broken */
3374 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
96442925
JA
3375 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3376 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
36e337d0
RH
3377 /* Blacklist entries taken from Silicon Image 3124/3132
3378 Windows driver .inf file - also several Linux problem reports */
3379 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3380 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3381 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6
AC
3382
3383 /* Devices with NCQ limits */
3384
3385 /* End Marker */
3386 { }
1da177e4 3387};
2e9edbf8 3388
6919a0a6 3389unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3390{
8bfa79fc
TH
3391 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3392 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3393 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3394
8bfa79fc
TH
3395 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3396 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3397
6919a0a6 3398 while (ad->model_num) {
8bfa79fc 3399 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3400 if (ad->model_rev == NULL)
3401 return ad->horkage;
8bfa79fc 3402 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3403 return ad->horkage;
f4b15fef 3404 }
6919a0a6 3405 ad++;
f4b15fef 3406 }
1da177e4
LT
3407 return 0;
3408}
3409
6919a0a6
AC
3410static int ata_dma_blacklisted(const struct ata_device *dev)
3411{
3412 /* We don't support polling DMA.
3413 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3414 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3415 */
3416 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3417 (dev->flags & ATA_DFLAG_CDB_INTR))
3418 return 1;
3419 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3420}
3421
a6d5a51c
TH
3422/**
3423 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3424 * @dev: Device to compute xfermask for
3425 *
acf356b1
TH
3426 * Compute supported xfermask of @dev and store it in
3427 * dev->*_mask. This function is responsible for applying all
3428 * known limits including host controller limits, device
3429 * blacklist, etc...
a6d5a51c
TH
3430 *
3431 * LOCKING:
3432 * None.
a6d5a51c 3433 */
3373efd8 3434static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3435{
3373efd8 3436 struct ata_port *ap = dev->ap;
cca3974e 3437 struct ata_host *host = ap->host;
a6d5a51c 3438 unsigned long xfer_mask;
1da177e4 3439
37deecb5 3440 /* controller modes available */
565083e1
TH
3441 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3442 ap->mwdma_mask, ap->udma_mask);
3443
3444 /* Apply cable rule here. Don't apply it early because when
3445 * we handle hot plug the cable type can itself change.
3446 */
3447 if (ap->cbl == ATA_CBL_PATA40)
3448 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3449 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3450 * host side are checked drive side as well. Cases where we know a
3451 * 40wire cable is used safely for 80 are not checked here.
3452 */
3453 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3454 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3455
1da177e4 3456
37deecb5
TH
3457 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3458 dev->mwdma_mask, dev->udma_mask);
3459 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3460
b352e57d
AC
3461 /*
3462 * CFA Advanced TrueIDE timings are not allowed on a shared
3463 * cable
3464 */
3465 if (ata_dev_pair(dev)) {
3466 /* No PIO5 or PIO6 */
3467 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3468 /* No MWDMA3 or MWDMA 4 */
3469 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3470 }
3471
37deecb5
TH
3472 if (ata_dma_blacklisted(dev)) {
3473 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3474 ata_dev_printk(dev, KERN_WARNING,
3475 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3476 }
a6d5a51c 3477
14d66ab7
PV
3478 if ((host->flags & ATA_HOST_SIMPLEX) &&
3479 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3480 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3481 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3482 "other device, disabling DMA\n");
5444a6f4 3483 }
565083e1 3484
5444a6f4
AC
3485 if (ap->ops->mode_filter)
3486 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3487
565083e1
TH
3488 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3489 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3490}
3491
1da177e4
LT
3492/**
3493 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3494 * @dev: Device to which command will be sent
3495 *
780a87f7
JG
3496 * Issue SET FEATURES - XFER MODE command to device @dev
3497 * on port @ap.
3498 *
1da177e4 3499 * LOCKING:
0cba632b 3500 * PCI/etc. bus probe sem.
83206a29
TH
3501 *
3502 * RETURNS:
3503 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3504 */
3505
3373efd8 3506static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3507{
a0123703 3508 struct ata_taskfile tf;
83206a29 3509 unsigned int err_mask;
1da177e4
LT
3510
3511 /* set up set-features taskfile */
3512 DPRINTK("set features - xfer mode\n");
3513
3373efd8 3514 ata_tf_init(dev, &tf);
a0123703
TH
3515 tf.command = ATA_CMD_SET_FEATURES;
3516 tf.feature = SETFEATURES_XFER;
3517 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3518 tf.protocol = ATA_PROT_NODATA;
3519 tf.nsect = dev->xfer_mode;
1da177e4 3520
3373efd8 3521 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3522
83206a29
TH
3523 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3524 return err_mask;
1da177e4
LT
3525}
3526
8bf62ece
AL
3527/**
3528 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3529 * @dev: Device to which command will be sent
e2a7f77a
RD
3530 * @heads: Number of heads (taskfile parameter)
3531 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3532 *
3533 * LOCKING:
6aff8f1f
TH
3534 * Kernel thread context (may sleep)
3535 *
3536 * RETURNS:
3537 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3538 */
3373efd8
TH
3539static unsigned int ata_dev_init_params(struct ata_device *dev,
3540 u16 heads, u16 sectors)
8bf62ece 3541{
a0123703 3542 struct ata_taskfile tf;
6aff8f1f 3543 unsigned int err_mask;
8bf62ece
AL
3544
3545 /* Number of sectors per track 1-255. Number of heads 1-16 */
3546 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3547 return AC_ERR_INVALID;
8bf62ece
AL
3548
3549 /* set up init dev params taskfile */
3550 DPRINTK("init dev params \n");
3551
3373efd8 3552 ata_tf_init(dev, &tf);
a0123703
TH
3553 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3554 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3555 tf.protocol = ATA_PROT_NODATA;
3556 tf.nsect = sectors;
3557 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3558
3373efd8 3559 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3560
6aff8f1f
TH
3561 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3562 return err_mask;
8bf62ece
AL
3563}
3564
1da177e4 3565/**
0cba632b
JG
3566 * ata_sg_clean - Unmap DMA memory associated with command
3567 * @qc: Command containing DMA memory to be released
3568 *
3569 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3570 *
3571 * LOCKING:
cca3974e 3572 * spin_lock_irqsave(host lock)
1da177e4 3573 */
70e6ad0c 3574void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3575{
3576 struct ata_port *ap = qc->ap;
cedc9a47 3577 struct scatterlist *sg = qc->__sg;
1da177e4 3578 int dir = qc->dma_dir;
cedc9a47 3579 void *pad_buf = NULL;
1da177e4 3580
a4631474
TH
3581 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3582 WARN_ON(sg == NULL);
1da177e4
LT
3583
3584 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3585 WARN_ON(qc->n_elem > 1);
1da177e4 3586
2c13b7ce 3587 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3588
cedc9a47
JG
3589 /* if we padded the buffer out to 32-bit bound, and data
3590 * xfer direction is from-device, we must copy from the
3591 * pad buffer back into the supplied buffer
3592 */
3593 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3594 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3595
3596 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3597 if (qc->n_elem)
2f1f610b 3598 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3599 /* restore last sg */
3600 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3601 if (pad_buf) {
3602 struct scatterlist *psg = &qc->pad_sgent;
3603 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3604 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3605 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3606 }
3607 } else {
2e242fa9 3608 if (qc->n_elem)
2f1f610b 3609 dma_unmap_single(ap->dev,
e1410f2d
JG
3610 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3611 dir);
cedc9a47
JG
3612 /* restore sg */
3613 sg->length += qc->pad_len;
3614 if (pad_buf)
3615 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3616 pad_buf, qc->pad_len);
3617 }
1da177e4
LT
3618
3619 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3620 qc->__sg = NULL;
1da177e4
LT
3621}
3622
3623/**
3624 * ata_fill_sg - Fill PCI IDE PRD table
3625 * @qc: Metadata associated with taskfile to be transferred
3626 *
780a87f7
JG
3627 * Fill PCI IDE PRD (scatter-gather) table with segments
3628 * associated with the current disk command.
3629 *
1da177e4 3630 * LOCKING:
cca3974e 3631 * spin_lock_irqsave(host lock)
1da177e4
LT
3632 *
3633 */
3634static void ata_fill_sg(struct ata_queued_cmd *qc)
3635{
1da177e4 3636 struct ata_port *ap = qc->ap;
cedc9a47
JG
3637 struct scatterlist *sg;
3638 unsigned int idx;
1da177e4 3639
a4631474 3640 WARN_ON(qc->__sg == NULL);
f131883e 3641 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3642
3643 idx = 0;
cedc9a47 3644 ata_for_each_sg(sg, qc) {
1da177e4
LT
3645 u32 addr, offset;
3646 u32 sg_len, len;
3647
3648 /* determine if physical DMA addr spans 64K boundary.
3649 * Note h/w doesn't support 64-bit, so we unconditionally
3650 * truncate dma_addr_t to u32.
3651 */
3652 addr = (u32) sg_dma_address(sg);
3653 sg_len = sg_dma_len(sg);
3654
3655 while (sg_len) {
3656 offset = addr & 0xffff;
3657 len = sg_len;
3658 if ((offset + sg_len) > 0x10000)
3659 len = 0x10000 - offset;
3660
3661 ap->prd[idx].addr = cpu_to_le32(addr);
3662 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3663 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3664
3665 idx++;
3666 sg_len -= len;
3667 addr += len;
3668 }
3669 }
3670
3671 if (idx)
3672 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3673}
3674/**
3675 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3676 * @qc: Metadata associated with taskfile to check
3677 *
780a87f7
JG
3678 * Allow low-level driver to filter ATA PACKET commands, returning
3679 * a status indicating whether or not it is OK to use DMA for the
3680 * supplied PACKET command.
3681 *
1da177e4 3682 * LOCKING:
cca3974e 3683 * spin_lock_irqsave(host lock)
0cba632b 3684 *
1da177e4
LT
3685 * RETURNS: 0 when ATAPI DMA can be used
3686 * nonzero otherwise
3687 */
3688int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3689{
3690 struct ata_port *ap = qc->ap;
3691 int rc = 0; /* Assume ATAPI DMA is OK by default */
3692
6f23a31d
AL
3693 /* some drives can only do ATAPI DMA on read/write */
3694 if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
3695 struct scsi_cmnd *cmd = qc->scsicmd;
3696 u8 *scsicmd = cmd->cmnd;
3697
3698 switch (scsicmd[0]) {
3699 case READ_10:
3700 case WRITE_10:
3701 case READ_12:
3702 case WRITE_12:
3703 case READ_6:
3704 case WRITE_6:
3705 /* atapi dma maybe ok */
3706 break;
3707 default:
3708 /* turn off atapi dma */
3709 return 1;
3710 }
3711 }
3712
1da177e4
LT
3713 if (ap->ops->check_atapi_dma)
3714 rc = ap->ops->check_atapi_dma(qc);
3715
3716 return rc;
3717}
3718/**
3719 * ata_qc_prep - Prepare taskfile for submission
3720 * @qc: Metadata associated with taskfile to be prepared
3721 *
780a87f7
JG
3722 * Prepare ATA taskfile for submission.
3723 *
1da177e4 3724 * LOCKING:
cca3974e 3725 * spin_lock_irqsave(host lock)
1da177e4
LT
3726 */
3727void ata_qc_prep(struct ata_queued_cmd *qc)
3728{
3729 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3730 return;
3731
3732 ata_fill_sg(qc);
3733}
3734
e46834cd
BK
3735void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3736
0cba632b
JG
3737/**
3738 * ata_sg_init_one - Associate command with memory buffer
3739 * @qc: Command to be associated
3740 * @buf: Memory buffer
3741 * @buflen: Length of memory buffer, in bytes.
3742 *
3743 * Initialize the data-related elements of queued_cmd @qc
3744 * to point to a single memory buffer, @buf of byte length @buflen.
3745 *
3746 * LOCKING:
cca3974e 3747 * spin_lock_irqsave(host lock)
0cba632b
JG
3748 */
3749
1da177e4
LT
3750void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3751{
1da177e4
LT
3752 qc->flags |= ATA_QCFLAG_SINGLE;
3753
cedc9a47 3754 qc->__sg = &qc->sgent;
1da177e4 3755 qc->n_elem = 1;
cedc9a47 3756 qc->orig_n_elem = 1;
1da177e4 3757 qc->buf_virt = buf;
233277ca 3758 qc->nbytes = buflen;
1da177e4 3759
61c0596c 3760 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3761}
3762
0cba632b
JG
3763/**
3764 * ata_sg_init - Associate command with scatter-gather table.
3765 * @qc: Command to be associated
3766 * @sg: Scatter-gather table.
3767 * @n_elem: Number of elements in s/g table.
3768 *
3769 * Initialize the data-related elements of queued_cmd @qc
3770 * to point to a scatter-gather table @sg, containing @n_elem
3771 * elements.
3772 *
3773 * LOCKING:
cca3974e 3774 * spin_lock_irqsave(host lock)
0cba632b
JG
3775 */
3776
1da177e4
LT
3777void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3778 unsigned int n_elem)
3779{
3780 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3781 qc->__sg = sg;
1da177e4 3782 qc->n_elem = n_elem;
cedc9a47 3783 qc->orig_n_elem = n_elem;
1da177e4
LT
3784}
3785
3786/**
0cba632b
JG
3787 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3788 * @qc: Command with memory buffer to be mapped.
3789 *
3790 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3791 *
3792 * LOCKING:
cca3974e 3793 * spin_lock_irqsave(host lock)
1da177e4
LT
3794 *
3795 * RETURNS:
0cba632b 3796 * Zero on success, negative on error.
1da177e4
LT
3797 */
3798
3799static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3800{
3801 struct ata_port *ap = qc->ap;
3802 int dir = qc->dma_dir;
cedc9a47 3803 struct scatterlist *sg = qc->__sg;
1da177e4 3804 dma_addr_t dma_address;
2e242fa9 3805 int trim_sg = 0;
1da177e4 3806
cedc9a47
JG
3807 /* we must lengthen transfers to end on a 32-bit boundary */
3808 qc->pad_len = sg->length & 3;
3809 if (qc->pad_len) {
3810 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3811 struct scatterlist *psg = &qc->pad_sgent;
3812
a4631474 3813 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3814
3815 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3816
3817 if (qc->tf.flags & ATA_TFLAG_WRITE)
3818 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3819 qc->pad_len);
3820
3821 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3822 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3823 /* trim sg */
3824 sg->length -= qc->pad_len;
2e242fa9
TH
3825 if (sg->length == 0)
3826 trim_sg = 1;
cedc9a47
JG
3827
3828 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3829 sg->length, qc->pad_len);
3830 }
3831
2e242fa9
TH
3832 if (trim_sg) {
3833 qc->n_elem--;
e1410f2d
JG
3834 goto skip_map;
3835 }
3836
2f1f610b 3837 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3838 sg->length, dir);
537a95d9
TH
3839 if (dma_mapping_error(dma_address)) {
3840 /* restore sg */
3841 sg->length += qc->pad_len;
1da177e4 3842 return -1;
537a95d9 3843 }
1da177e4
LT
3844
3845 sg_dma_address(sg) = dma_address;
32529e01 3846 sg_dma_len(sg) = sg->length;
1da177e4 3847
2e242fa9 3848skip_map:
1da177e4
LT
3849 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3850 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3851
3852 return 0;
3853}
3854
3855/**
0cba632b
JG
3856 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3857 * @qc: Command with scatter-gather table to be mapped.
3858 *
3859 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3860 *
3861 * LOCKING:
cca3974e 3862 * spin_lock_irqsave(host lock)
1da177e4
LT
3863 *
3864 * RETURNS:
0cba632b 3865 * Zero on success, negative on error.
1da177e4
LT
3866 *
3867 */
3868
3869static int ata_sg_setup(struct ata_queued_cmd *qc)
3870{
3871 struct ata_port *ap = qc->ap;
cedc9a47
JG
3872 struct scatterlist *sg = qc->__sg;
3873 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3874 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 3875
44877b4e 3876 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 3877 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3878
cedc9a47
JG
3879 /* we must lengthen transfers to end on a 32-bit boundary */
3880 qc->pad_len = lsg->length & 3;
3881 if (qc->pad_len) {
3882 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3883 struct scatterlist *psg = &qc->pad_sgent;
3884 unsigned int offset;
3885
a4631474 3886 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3887
3888 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3889
3890 /*
3891 * psg->page/offset are used to copy to-be-written
3892 * data in this function or read data in ata_sg_clean.
3893 */
3894 offset = lsg->offset + lsg->length - qc->pad_len;
3895 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3896 psg->offset = offset_in_page(offset);
3897
3898 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3899 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3900 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3901 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3902 }
3903
3904 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3905 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3906 /* trim last sg */
3907 lsg->length -= qc->pad_len;
e1410f2d
JG
3908 if (lsg->length == 0)
3909 trim_sg = 1;
cedc9a47
JG
3910
3911 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3912 qc->n_elem - 1, lsg->length, qc->pad_len);
3913 }
3914
e1410f2d
JG
3915 pre_n_elem = qc->n_elem;
3916 if (trim_sg && pre_n_elem)
3917 pre_n_elem--;
3918
3919 if (!pre_n_elem) {
3920 n_elem = 0;
3921 goto skip_map;
3922 }
3923
1da177e4 3924 dir = qc->dma_dir;
2f1f610b 3925 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3926 if (n_elem < 1) {
3927 /* restore last sg */
3928 lsg->length += qc->pad_len;
1da177e4 3929 return -1;
537a95d9 3930 }
1da177e4
LT
3931
3932 DPRINTK("%d sg elements mapped\n", n_elem);
3933
e1410f2d 3934skip_map:
1da177e4
LT
3935 qc->n_elem = n_elem;
3936
3937 return 0;
3938}
3939
0baab86b 3940/**
c893a3ae 3941 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3942 * @buf: Buffer to swap
3943 * @buf_words: Number of 16-bit words in buffer.
3944 *
3945 * Swap halves of 16-bit words if needed to convert from
3946 * little-endian byte order to native cpu byte order, or
3947 * vice-versa.
3948 *
3949 * LOCKING:
6f0ef4fa 3950 * Inherited from caller.
0baab86b 3951 */
1da177e4
LT
3952void swap_buf_le16(u16 *buf, unsigned int buf_words)
3953{
3954#ifdef __BIG_ENDIAN
3955 unsigned int i;
3956
3957 for (i = 0; i < buf_words; i++)
3958 buf[i] = le16_to_cpu(buf[i]);
3959#endif /* __BIG_ENDIAN */
3960}
3961
6ae4cfb5 3962/**
0d5ff566 3963 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 3964 * @adev: device to target
6ae4cfb5
AL
3965 * @buf: data buffer
3966 * @buflen: buffer length
344babaa 3967 * @write_data: read/write
6ae4cfb5
AL
3968 *
3969 * Transfer data from/to the device data register by PIO.
3970 *
3971 * LOCKING:
3972 * Inherited from caller.
6ae4cfb5 3973 */
0d5ff566
TH
3974void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3975 unsigned int buflen, int write_data)
1da177e4 3976{
a6b2c5d4 3977 struct ata_port *ap = adev->ap;
6ae4cfb5 3978 unsigned int words = buflen >> 1;
1da177e4 3979
6ae4cfb5 3980 /* Transfer multiple of 2 bytes */
1da177e4 3981 if (write_data)
0d5ff566 3982 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 3983 else
0d5ff566 3984 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
3985
3986 /* Transfer trailing 1 byte, if any. */
3987 if (unlikely(buflen & 0x01)) {
3988 u16 align_buf[1] = { 0 };
3989 unsigned char *trailing_buf = buf + buflen - 1;
3990
3991 if (write_data) {
3992 memcpy(align_buf, trailing_buf, 1);
0d5ff566 3993 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 3994 } else {
0d5ff566 3995 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
3996 memcpy(trailing_buf, align_buf, 1);
3997 }
3998 }
1da177e4
LT
3999}
4000
75e99585 4001/**
0d5ff566 4002 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4003 * @adev: device to target
4004 * @buf: data buffer
4005 * @buflen: buffer length
4006 * @write_data: read/write
4007 *
88574551 4008 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4009 * transfer with interrupts disabled.
4010 *
4011 * LOCKING:
4012 * Inherited from caller.
4013 */
0d5ff566
TH
4014void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4015 unsigned int buflen, int write_data)
75e99585
AC
4016{
4017 unsigned long flags;
4018 local_irq_save(flags);
0d5ff566 4019 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4020 local_irq_restore(flags);
4021}
4022
4023
6ae4cfb5
AL
4024/**
4025 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
4026 * @qc: Command on going
4027 *
4028 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
4029 *
4030 * LOCKING:
4031 * Inherited from caller.
4032 */
4033
1da177e4
LT
4034static void ata_pio_sector(struct ata_queued_cmd *qc)
4035{
4036 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4037 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4038 struct ata_port *ap = qc->ap;
4039 struct page *page;
4040 unsigned int offset;
4041 unsigned char *buf;
4042
726f0785 4043 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 4044 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4045
4046 page = sg[qc->cursg].page;
726f0785 4047 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4048
4049 /* get the current page and offset */
4050 page = nth_page(page, (offset >> PAGE_SHIFT));
4051 offset %= PAGE_SIZE;
4052
1da177e4
LT
4053 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4054
91b8b313
AL
4055 if (PageHighMem(page)) {
4056 unsigned long flags;
4057
a6b2c5d4 4058 /* FIXME: use a bounce buffer */
91b8b313
AL
4059 local_irq_save(flags);
4060 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4061
91b8b313 4062 /* do the actual data transfer */
a6b2c5d4 4063 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 4064
91b8b313
AL
4065 kunmap_atomic(buf, KM_IRQ0);
4066 local_irq_restore(flags);
4067 } else {
4068 buf = page_address(page);
a6b2c5d4 4069 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 4070 }
1da177e4 4071
726f0785
TH
4072 qc->curbytes += ATA_SECT_SIZE;
4073 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 4074
726f0785 4075 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4076 qc->cursg++;
4077 qc->cursg_ofs = 0;
4078 }
1da177e4 4079}
1da177e4 4080
07f6f7d0
AL
4081/**
4082 * ata_pio_sectors - Transfer one or many 512-byte sectors.
4083 * @qc: Command on going
4084 *
c81e29b4 4085 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
4086 * ATA device for the DRQ request.
4087 *
4088 * LOCKING:
4089 * Inherited from caller.
4090 */
1da177e4 4091
07f6f7d0
AL
4092static void ata_pio_sectors(struct ata_queued_cmd *qc)
4093{
4094 if (is_multi_taskfile(&qc->tf)) {
4095 /* READ/WRITE MULTIPLE */
4096 unsigned int nsect;
4097
587005de 4098 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4099
726f0785
TH
4100 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
4101 qc->dev->multi_count);
07f6f7d0
AL
4102 while (nsect--)
4103 ata_pio_sector(qc);
4104 } else
4105 ata_pio_sector(qc);
4106}
4107
c71c1857
AL
4108/**
4109 * atapi_send_cdb - Write CDB bytes to hardware
4110 * @ap: Port to which ATAPI device is attached.
4111 * @qc: Taskfile currently active
4112 *
4113 * When device has indicated its readiness to accept
4114 * a CDB, this function is called. Send the CDB.
4115 *
4116 * LOCKING:
4117 * caller.
4118 */
4119
4120static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4121{
4122 /* send SCSI cdb */
4123 DPRINTK("send cdb\n");
db024d53 4124 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4125
a6b2c5d4 4126 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4127 ata_altstatus(ap); /* flush */
4128
4129 switch (qc->tf.protocol) {
4130 case ATA_PROT_ATAPI:
4131 ap->hsm_task_state = HSM_ST;
4132 break;
4133 case ATA_PROT_ATAPI_NODATA:
4134 ap->hsm_task_state = HSM_ST_LAST;
4135 break;
4136 case ATA_PROT_ATAPI_DMA:
4137 ap->hsm_task_state = HSM_ST_LAST;
4138 /* initiate bmdma */
4139 ap->ops->bmdma_start(qc);
4140 break;
4141 }
1da177e4
LT
4142}
4143
6ae4cfb5
AL
4144/**
4145 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4146 * @qc: Command on going
4147 * @bytes: number of bytes
4148 *
4149 * Transfer Transfer data from/to the ATAPI device.
4150 *
4151 * LOCKING:
4152 * Inherited from caller.
4153 *
4154 */
4155
1da177e4
LT
4156static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4157{
4158 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4159 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4160 struct ata_port *ap = qc->ap;
4161 struct page *page;
4162 unsigned char *buf;
4163 unsigned int offset, count;
4164
563a6e1f 4165 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4166 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4167
4168next_sg:
563a6e1f 4169 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4170 /*
563a6e1f
AL
4171 * The end of qc->sg is reached and the device expects
4172 * more data to transfer. In order not to overrun qc->sg
4173 * and fulfill length specified in the byte count register,
4174 * - for read case, discard trailing data from the device
4175 * - for write case, padding zero data to the device
4176 */
4177 u16 pad_buf[1] = { 0 };
4178 unsigned int words = bytes >> 1;
4179 unsigned int i;
4180
4181 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4182 ata_dev_printk(qc->dev, KERN_WARNING,
4183 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4184
4185 for (i = 0; i < words; i++)
a6b2c5d4 4186 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4187
14be71f4 4188 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4189 return;
4190 }
4191
cedc9a47 4192 sg = &qc->__sg[qc->cursg];
1da177e4 4193
1da177e4
LT
4194 page = sg->page;
4195 offset = sg->offset + qc->cursg_ofs;
4196
4197 /* get the current page and offset */
4198 page = nth_page(page, (offset >> PAGE_SHIFT));
4199 offset %= PAGE_SIZE;
4200
6952df03 4201 /* don't overrun current sg */
32529e01 4202 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4203
4204 /* don't cross page boundaries */
4205 count = min(count, (unsigned int)PAGE_SIZE - offset);
4206
7282aa4b
AL
4207 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4208
91b8b313
AL
4209 if (PageHighMem(page)) {
4210 unsigned long flags;
4211
a6b2c5d4 4212 /* FIXME: use bounce buffer */
91b8b313
AL
4213 local_irq_save(flags);
4214 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4215
91b8b313 4216 /* do the actual data transfer */
a6b2c5d4 4217 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4218
91b8b313
AL
4219 kunmap_atomic(buf, KM_IRQ0);
4220 local_irq_restore(flags);
4221 } else {
4222 buf = page_address(page);
a6b2c5d4 4223 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4224 }
1da177e4
LT
4225
4226 bytes -= count;
4227 qc->curbytes += count;
4228 qc->cursg_ofs += count;
4229
32529e01 4230 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4231 qc->cursg++;
4232 qc->cursg_ofs = 0;
4233 }
4234
563a6e1f 4235 if (bytes)
1da177e4 4236 goto next_sg;
1da177e4
LT
4237}
4238
6ae4cfb5
AL
4239/**
4240 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4241 * @qc: Command on going
4242 *
4243 * Transfer Transfer data from/to the ATAPI device.
4244 *
4245 * LOCKING:
4246 * Inherited from caller.
6ae4cfb5
AL
4247 */
4248
1da177e4
LT
4249static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4250{
4251 struct ata_port *ap = qc->ap;
4252 struct ata_device *dev = qc->dev;
4253 unsigned int ireason, bc_lo, bc_hi, bytes;
4254 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4255
eec4c3f3
AL
4256 /* Abuse qc->result_tf for temp storage of intermediate TF
4257 * here to save some kernel stack usage.
4258 * For normal completion, qc->result_tf is not relevant. For
4259 * error, qc->result_tf is later overwritten by ata_qc_complete().
4260 * So, the correctness of qc->result_tf is not affected.
4261 */
4262 ap->ops->tf_read(ap, &qc->result_tf);
4263 ireason = qc->result_tf.nsect;
4264 bc_lo = qc->result_tf.lbam;
4265 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4266 bytes = (bc_hi << 8) | bc_lo;
4267
4268 /* shall be cleared to zero, indicating xfer of data */
4269 if (ireason & (1 << 0))
4270 goto err_out;
4271
4272 /* make sure transfer direction matches expected */
4273 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4274 if (do_write != i_write)
4275 goto err_out;
4276
44877b4e 4277 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4278
1da177e4
LT
4279 __atapi_pio_bytes(qc, bytes);
4280
4281 return;
4282
4283err_out:
f15a1daf 4284 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4285 qc->err_mask |= AC_ERR_HSM;
14be71f4 4286 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4287}
4288
4289/**
c234fb00
AL
4290 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4291 * @ap: the target ata_port
4292 * @qc: qc on going
1da177e4 4293 *
c234fb00
AL
4294 * RETURNS:
4295 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4296 */
c234fb00
AL
4297
4298static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4299{
c234fb00
AL
4300 if (qc->tf.flags & ATA_TFLAG_POLLING)
4301 return 1;
1da177e4 4302
c234fb00
AL
4303 if (ap->hsm_task_state == HSM_ST_FIRST) {
4304 if (qc->tf.protocol == ATA_PROT_PIO &&
4305 (qc->tf.flags & ATA_TFLAG_WRITE))
4306 return 1;
1da177e4 4307
c234fb00
AL
4308 if (is_atapi_taskfile(&qc->tf) &&
4309 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4310 return 1;
fe79e683
AL
4311 }
4312
c234fb00
AL
4313 return 0;
4314}
1da177e4 4315
c17ea20d
TH
4316/**
4317 * ata_hsm_qc_complete - finish a qc running on standard HSM
4318 * @qc: Command to complete
4319 * @in_wq: 1 if called from workqueue, 0 otherwise
4320 *
4321 * Finish @qc which is running on standard HSM.
4322 *
4323 * LOCKING:
cca3974e 4324 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4325 * Otherwise, none on entry and grabs host lock.
4326 */
4327static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4328{
4329 struct ata_port *ap = qc->ap;
4330 unsigned long flags;
4331
4332 if (ap->ops->error_handler) {
4333 if (in_wq) {
ba6a1308 4334 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4335
cca3974e
JG
4336 /* EH might have kicked in while host lock is
4337 * released.
c17ea20d
TH
4338 */
4339 qc = ata_qc_from_tag(ap, qc->tag);
4340 if (qc) {
4341 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4342 ap->ops->irq_on(ap);
c17ea20d
TH
4343 ata_qc_complete(qc);
4344 } else
4345 ata_port_freeze(ap);
4346 }
4347
ba6a1308 4348 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4349 } else {
4350 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4351 ata_qc_complete(qc);
4352 else
4353 ata_port_freeze(ap);
4354 }
4355 } else {
4356 if (in_wq) {
ba6a1308 4357 spin_lock_irqsave(ap->lock, flags);
83625006 4358 ap->ops->irq_on(ap);
c17ea20d 4359 ata_qc_complete(qc);
ba6a1308 4360 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4361 } else
4362 ata_qc_complete(qc);
4363 }
1da177e4 4364
c81e29b4 4365 ata_altstatus(ap); /* flush */
c17ea20d
TH
4366}
4367
bb5cb290
AL
4368/**
4369 * ata_hsm_move - move the HSM to the next state.
4370 * @ap: the target ata_port
4371 * @qc: qc on going
4372 * @status: current device status
4373 * @in_wq: 1 if called from workqueue, 0 otherwise
4374 *
4375 * RETURNS:
4376 * 1 when poll next status needed, 0 otherwise.
4377 */
9a1004d0
TH
4378int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4379 u8 status, int in_wq)
e2cec771 4380{
bb5cb290
AL
4381 unsigned long flags = 0;
4382 int poll_next;
4383
6912ccd5
AL
4384 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4385
bb5cb290
AL
4386 /* Make sure ata_qc_issue_prot() does not throw things
4387 * like DMA polling into the workqueue. Notice that
4388 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4389 */
c234fb00 4390 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4391
e2cec771 4392fsm_start:
999bb6f4 4393 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4394 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4395
e2cec771
AL
4396 switch (ap->hsm_task_state) {
4397 case HSM_ST_FIRST:
bb5cb290
AL
4398 /* Send first data block or PACKET CDB */
4399
4400 /* If polling, we will stay in the work queue after
4401 * sending the data. Otherwise, interrupt handler
4402 * takes over after sending the data.
4403 */
4404 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4405
e2cec771 4406 /* check device status */
3655d1d3
AL
4407 if (unlikely((status & ATA_DRQ) == 0)) {
4408 /* handle BSY=0, DRQ=0 as error */
4409 if (likely(status & (ATA_ERR | ATA_DF)))
4410 /* device stops HSM for abort/error */
4411 qc->err_mask |= AC_ERR_DEV;
4412 else
4413 /* HSM violation. Let EH handle this */
4414 qc->err_mask |= AC_ERR_HSM;
4415
14be71f4 4416 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4417 goto fsm_start;
1da177e4
LT
4418 }
4419
71601958
AL
4420 /* Device should not ask for data transfer (DRQ=1)
4421 * when it finds something wrong.
eee6c32f
AL
4422 * We ignore DRQ here and stop the HSM by
4423 * changing hsm_task_state to HSM_ST_ERR and
4424 * let the EH abort the command or reset the device.
71601958
AL
4425 */
4426 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4427 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4428 "error, dev_stat 0x%X\n", status);
3655d1d3 4429 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4430 ap->hsm_task_state = HSM_ST_ERR;
4431 goto fsm_start;
71601958 4432 }
1da177e4 4433
bb5cb290
AL
4434 /* Send the CDB (atapi) or the first data block (ata pio out).
4435 * During the state transition, interrupt handler shouldn't
4436 * be invoked before the data transfer is complete and
4437 * hsm_task_state is changed. Hence, the following locking.
4438 */
4439 if (in_wq)
ba6a1308 4440 spin_lock_irqsave(ap->lock, flags);
1da177e4 4441
bb5cb290
AL
4442 if (qc->tf.protocol == ATA_PROT_PIO) {
4443 /* PIO data out protocol.
4444 * send first data block.
4445 */
0565c26d 4446
bb5cb290
AL
4447 /* ata_pio_sectors() might change the state
4448 * to HSM_ST_LAST. so, the state is changed here
4449 * before ata_pio_sectors().
4450 */
4451 ap->hsm_task_state = HSM_ST;
4452 ata_pio_sectors(qc);
4453 ata_altstatus(ap); /* flush */
4454 } else
4455 /* send CDB */
4456 atapi_send_cdb(ap, qc);
4457
4458 if (in_wq)
ba6a1308 4459 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4460
4461 /* if polling, ata_pio_task() handles the rest.
4462 * otherwise, interrupt handler takes over from here.
4463 */
e2cec771 4464 break;
1c848984 4465
e2cec771
AL
4466 case HSM_ST:
4467 /* complete command or read/write the data register */
4468 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4469 /* ATAPI PIO protocol */
4470 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4471 /* No more data to transfer or device error.
4472 * Device error will be tagged in HSM_ST_LAST.
4473 */
e2cec771
AL
4474 ap->hsm_task_state = HSM_ST_LAST;
4475 goto fsm_start;
4476 }
1da177e4 4477
71601958
AL
4478 /* Device should not ask for data transfer (DRQ=1)
4479 * when it finds something wrong.
eee6c32f
AL
4480 * We ignore DRQ here and stop the HSM by
4481 * changing hsm_task_state to HSM_ST_ERR and
4482 * let the EH abort the command or reset the device.
71601958
AL
4483 */
4484 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4485 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4486 "device error, dev_stat 0x%X\n",
4487 status);
3655d1d3 4488 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4489 ap->hsm_task_state = HSM_ST_ERR;
4490 goto fsm_start;
71601958 4491 }
1da177e4 4492
e2cec771 4493 atapi_pio_bytes(qc);
7fb6ec28 4494
e2cec771
AL
4495 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4496 /* bad ireason reported by device */
4497 goto fsm_start;
1da177e4 4498
e2cec771
AL
4499 } else {
4500 /* ATA PIO protocol */
4501 if (unlikely((status & ATA_DRQ) == 0)) {
4502 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4503 if (likely(status & (ATA_ERR | ATA_DF)))
4504 /* device stops HSM for abort/error */
4505 qc->err_mask |= AC_ERR_DEV;
4506 else
55a8e2c8
TH
4507 /* HSM violation. Let EH handle this.
4508 * Phantom devices also trigger this
4509 * condition. Mark hint.
4510 */
4511 qc->err_mask |= AC_ERR_HSM |
4512 AC_ERR_NODEV_HINT;
3655d1d3 4513
e2cec771
AL
4514 ap->hsm_task_state = HSM_ST_ERR;
4515 goto fsm_start;
4516 }
1da177e4 4517
eee6c32f
AL
4518 /* For PIO reads, some devices may ask for
4519 * data transfer (DRQ=1) alone with ERR=1.
4520 * We respect DRQ here and transfer one
4521 * block of junk data before changing the
4522 * hsm_task_state to HSM_ST_ERR.
4523 *
4524 * For PIO writes, ERR=1 DRQ=1 doesn't make
4525 * sense since the data block has been
4526 * transferred to the device.
71601958
AL
4527 */
4528 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4529 /* data might be corrputed */
4530 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4531
4532 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4533 ata_pio_sectors(qc);
4534 ata_altstatus(ap);
4535 status = ata_wait_idle(ap);
4536 }
4537
3655d1d3
AL
4538 if (status & (ATA_BUSY | ATA_DRQ))
4539 qc->err_mask |= AC_ERR_HSM;
4540
eee6c32f
AL
4541 /* ata_pio_sectors() might change the
4542 * state to HSM_ST_LAST. so, the state
4543 * is changed after ata_pio_sectors().
4544 */
4545 ap->hsm_task_state = HSM_ST_ERR;
4546 goto fsm_start;
71601958
AL
4547 }
4548
e2cec771
AL
4549 ata_pio_sectors(qc);
4550
4551 if (ap->hsm_task_state == HSM_ST_LAST &&
4552 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4553 /* all data read */
4554 ata_altstatus(ap);
52a32205 4555 status = ata_wait_idle(ap);
e2cec771
AL
4556 goto fsm_start;
4557 }
4558 }
4559
4560 ata_altstatus(ap); /* flush */
bb5cb290 4561 poll_next = 1;
1da177e4
LT
4562 break;
4563
14be71f4 4564 case HSM_ST_LAST:
6912ccd5
AL
4565 if (unlikely(!ata_ok(status))) {
4566 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4567 ap->hsm_task_state = HSM_ST_ERR;
4568 goto fsm_start;
4569 }
4570
4571 /* no more data to transfer */
4332a771 4572 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 4573 ap->print_id, qc->dev->devno, status);
e2cec771 4574
6912ccd5
AL
4575 WARN_ON(qc->err_mask);
4576
e2cec771 4577 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4578
e2cec771 4579 /* complete taskfile transaction */
c17ea20d 4580 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4581
4582 poll_next = 0;
1da177e4
LT
4583 break;
4584
14be71f4 4585 case HSM_ST_ERR:
e2cec771
AL
4586 /* make sure qc->err_mask is available to
4587 * know what's wrong and recover
4588 */
4589 WARN_ON(qc->err_mask == 0);
4590
4591 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4592
999bb6f4 4593 /* complete taskfile transaction */
c17ea20d 4594 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4595
4596 poll_next = 0;
e2cec771
AL
4597 break;
4598 default:
bb5cb290 4599 poll_next = 0;
6912ccd5 4600 BUG();
1da177e4
LT
4601 }
4602
bb5cb290 4603 return poll_next;
1da177e4
LT
4604}
4605
65f27f38 4606static void ata_pio_task(struct work_struct *work)
8061f5f0 4607{
65f27f38
DH
4608 struct ata_port *ap =
4609 container_of(work, struct ata_port, port_task.work);
4610 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4611 u8 status;
a1af3734 4612 int poll_next;
8061f5f0 4613
7fb6ec28 4614fsm_start:
a1af3734 4615 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4616
a1af3734
AL
4617 /*
4618 * This is purely heuristic. This is a fast path.
4619 * Sometimes when we enter, BSY will be cleared in
4620 * a chk-status or two. If not, the drive is probably seeking
4621 * or something. Snooze for a couple msecs, then
4622 * chk-status again. If still busy, queue delayed work.
4623 */
4624 status = ata_busy_wait(ap, ATA_BUSY, 5);
4625 if (status & ATA_BUSY) {
4626 msleep(2);
4627 status = ata_busy_wait(ap, ATA_BUSY, 10);
4628 if (status & ATA_BUSY) {
31ce6dae 4629 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4630 return;
4631 }
8061f5f0
TH
4632 }
4633
a1af3734
AL
4634 /* move the HSM */
4635 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4636
a1af3734
AL
4637 /* another command or interrupt handler
4638 * may be running at this point.
4639 */
4640 if (poll_next)
7fb6ec28 4641 goto fsm_start;
8061f5f0
TH
4642}
4643
1da177e4
LT
4644/**
4645 * ata_qc_new - Request an available ATA command, for queueing
4646 * @ap: Port associated with device @dev
4647 * @dev: Device from whom we request an available command structure
4648 *
4649 * LOCKING:
0cba632b 4650 * None.
1da177e4
LT
4651 */
4652
4653static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4654{
4655 struct ata_queued_cmd *qc = NULL;
4656 unsigned int i;
4657
e3180499 4658 /* no command while frozen */
b51e9e5d 4659 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4660 return NULL;
4661
2ab7db1f
TH
4662 /* the last tag is reserved for internal command. */
4663 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4664 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4665 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4666 break;
4667 }
4668
4669 if (qc)
4670 qc->tag = i;
4671
4672 return qc;
4673}
4674
4675/**
4676 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4677 * @dev: Device from whom we request an available command structure
4678 *
4679 * LOCKING:
0cba632b 4680 * None.
1da177e4
LT
4681 */
4682
3373efd8 4683struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4684{
3373efd8 4685 struct ata_port *ap = dev->ap;
1da177e4
LT
4686 struct ata_queued_cmd *qc;
4687
4688 qc = ata_qc_new(ap);
4689 if (qc) {
1da177e4
LT
4690 qc->scsicmd = NULL;
4691 qc->ap = ap;
4692 qc->dev = dev;
1da177e4 4693
2c13b7ce 4694 ata_qc_reinit(qc);
1da177e4
LT
4695 }
4696
4697 return qc;
4698}
4699
1da177e4
LT
4700/**
4701 * ata_qc_free - free unused ata_queued_cmd
4702 * @qc: Command to complete
4703 *
4704 * Designed to free unused ata_queued_cmd object
4705 * in case something prevents using it.
4706 *
4707 * LOCKING:
cca3974e 4708 * spin_lock_irqsave(host lock)
1da177e4
LT
4709 */
4710void ata_qc_free(struct ata_queued_cmd *qc)
4711{
4ba946e9
TH
4712 struct ata_port *ap = qc->ap;
4713 unsigned int tag;
4714
a4631474 4715 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4716
4ba946e9
TH
4717 qc->flags = 0;
4718 tag = qc->tag;
4719 if (likely(ata_tag_valid(tag))) {
4ba946e9 4720 qc->tag = ATA_TAG_POISON;
6cec4a39 4721 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4722 }
1da177e4
LT
4723}
4724
76014427 4725void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4726{
dedaf2b0
TH
4727 struct ata_port *ap = qc->ap;
4728
a4631474
TH
4729 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4730 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4731
4732 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4733 ata_sg_clean(qc);
4734
7401abf2 4735 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4736 if (qc->tf.protocol == ATA_PROT_NCQ)
4737 ap->sactive &= ~(1 << qc->tag);
4738 else
4739 ap->active_tag = ATA_TAG_POISON;
7401abf2 4740
3f3791d3
AL
4741 /* atapi: mark qc as inactive to prevent the interrupt handler
4742 * from completing the command twice later, before the error handler
4743 * is called. (when rc != 0 and atapi request sense is needed)
4744 */
4745 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4746 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4747
1da177e4 4748 /* call completion callback */
77853bf2 4749 qc->complete_fn(qc);
1da177e4
LT
4750}
4751
39599a53
TH
4752static void fill_result_tf(struct ata_queued_cmd *qc)
4753{
4754 struct ata_port *ap = qc->ap;
4755
39599a53 4756 qc->result_tf.flags = qc->tf.flags;
4742d54f 4757 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
4758}
4759
f686bcb8
TH
4760/**
4761 * ata_qc_complete - Complete an active ATA command
4762 * @qc: Command to complete
4763 * @err_mask: ATA Status register contents
4764 *
4765 * Indicate to the mid and upper layers that an ATA
4766 * command has completed, with either an ok or not-ok status.
4767 *
4768 * LOCKING:
cca3974e 4769 * spin_lock_irqsave(host lock)
f686bcb8
TH
4770 */
4771void ata_qc_complete(struct ata_queued_cmd *qc)
4772{
4773 struct ata_port *ap = qc->ap;
4774
4775 /* XXX: New EH and old EH use different mechanisms to
4776 * synchronize EH with regular execution path.
4777 *
4778 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4779 * Normal execution path is responsible for not accessing a
4780 * failed qc. libata core enforces the rule by returning NULL
4781 * from ata_qc_from_tag() for failed qcs.
4782 *
4783 * Old EH depends on ata_qc_complete() nullifying completion
4784 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4785 * not synchronize with interrupt handler. Only PIO task is
4786 * taken care of.
4787 */
4788 if (ap->ops->error_handler) {
b51e9e5d 4789 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4790
4791 if (unlikely(qc->err_mask))
4792 qc->flags |= ATA_QCFLAG_FAILED;
4793
4794 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4795 if (!ata_tag_internal(qc->tag)) {
4796 /* always fill result TF for failed qc */
39599a53 4797 fill_result_tf(qc);
f686bcb8
TH
4798 ata_qc_schedule_eh(qc);
4799 return;
4800 }
4801 }
4802
4803 /* read result TF if requested */
4804 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4805 fill_result_tf(qc);
f686bcb8
TH
4806
4807 __ata_qc_complete(qc);
4808 } else {
4809 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4810 return;
4811
4812 /* read result TF if failed or requested */
4813 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4814 fill_result_tf(qc);
f686bcb8
TH
4815
4816 __ata_qc_complete(qc);
4817 }
4818}
4819
dedaf2b0
TH
4820/**
4821 * ata_qc_complete_multiple - Complete multiple qcs successfully
4822 * @ap: port in question
4823 * @qc_active: new qc_active mask
4824 * @finish_qc: LLDD callback invoked before completing a qc
4825 *
4826 * Complete in-flight commands. This functions is meant to be
4827 * called from low-level driver's interrupt routine to complete
4828 * requests normally. ap->qc_active and @qc_active is compared
4829 * and commands are completed accordingly.
4830 *
4831 * LOCKING:
cca3974e 4832 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4833 *
4834 * RETURNS:
4835 * Number of completed commands on success, -errno otherwise.
4836 */
4837int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4838 void (*finish_qc)(struct ata_queued_cmd *))
4839{
4840 int nr_done = 0;
4841 u32 done_mask;
4842 int i;
4843
4844 done_mask = ap->qc_active ^ qc_active;
4845
4846 if (unlikely(done_mask & qc_active)) {
4847 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4848 "(%08x->%08x)\n", ap->qc_active, qc_active);
4849 return -EINVAL;
4850 }
4851
4852 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4853 struct ata_queued_cmd *qc;
4854
4855 if (!(done_mask & (1 << i)))
4856 continue;
4857
4858 if ((qc = ata_qc_from_tag(ap, i))) {
4859 if (finish_qc)
4860 finish_qc(qc);
4861 ata_qc_complete(qc);
4862 nr_done++;
4863 }
4864 }
4865
4866 return nr_done;
4867}
4868
1da177e4
LT
4869static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4870{
4871 struct ata_port *ap = qc->ap;
4872
4873 switch (qc->tf.protocol) {
3dc1d881 4874 case ATA_PROT_NCQ:
1da177e4
LT
4875 case ATA_PROT_DMA:
4876 case ATA_PROT_ATAPI_DMA:
4877 return 1;
4878
4879 case ATA_PROT_ATAPI:
4880 case ATA_PROT_PIO:
1da177e4
LT
4881 if (ap->flags & ATA_FLAG_PIO_DMA)
4882 return 1;
4883
4884 /* fall through */
4885
4886 default:
4887 return 0;
4888 }
4889
4890 /* never reached */
4891}
4892
4893/**
4894 * ata_qc_issue - issue taskfile to device
4895 * @qc: command to issue to device
4896 *
4897 * Prepare an ATA command to submission to device.
4898 * This includes mapping the data into a DMA-able
4899 * area, filling in the S/G table, and finally
4900 * writing the taskfile to hardware, starting the command.
4901 *
4902 * LOCKING:
cca3974e 4903 * spin_lock_irqsave(host lock)
1da177e4 4904 */
8e0e694a 4905void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4906{
4907 struct ata_port *ap = qc->ap;
4908
dedaf2b0
TH
4909 /* Make sure only one non-NCQ command is outstanding. The
4910 * check is skipped for old EH because it reuses active qc to
4911 * request ATAPI sense.
4912 */
4913 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4914
4915 if (qc->tf.protocol == ATA_PROT_NCQ) {
4916 WARN_ON(ap->sactive & (1 << qc->tag));
4917 ap->sactive |= 1 << qc->tag;
4918 } else {
4919 WARN_ON(ap->sactive);
4920 ap->active_tag = qc->tag;
4921 }
4922
e4a70e76 4923 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4924 ap->qc_active |= 1 << qc->tag;
e4a70e76 4925
1da177e4
LT
4926 if (ata_should_dma_map(qc)) {
4927 if (qc->flags & ATA_QCFLAG_SG) {
4928 if (ata_sg_setup(qc))
8e436af9 4929 goto sg_err;
1da177e4
LT
4930 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4931 if (ata_sg_setup_one(qc))
8e436af9 4932 goto sg_err;
1da177e4
LT
4933 }
4934 } else {
4935 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4936 }
4937
4938 ap->ops->qc_prep(qc);
4939
8e0e694a
TH
4940 qc->err_mask |= ap->ops->qc_issue(qc);
4941 if (unlikely(qc->err_mask))
4942 goto err;
4943 return;
1da177e4 4944
8e436af9
TH
4945sg_err:
4946 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4947 qc->err_mask |= AC_ERR_SYSTEM;
4948err:
4949 ata_qc_complete(qc);
1da177e4
LT
4950}
4951
4952/**
4953 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4954 * @qc: command to issue to device
4955 *
4956 * Using various libata functions and hooks, this function
4957 * starts an ATA command. ATA commands are grouped into
4958 * classes called "protocols", and issuing each type of protocol
4959 * is slightly different.
4960 *
0baab86b
EF
4961 * May be used as the qc_issue() entry in ata_port_operations.
4962 *
1da177e4 4963 * LOCKING:
cca3974e 4964 * spin_lock_irqsave(host lock)
1da177e4
LT
4965 *
4966 * RETURNS:
9a3d9eb0 4967 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4968 */
4969
9a3d9eb0 4970unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4971{
4972 struct ata_port *ap = qc->ap;
4973
e50362ec
AL
4974 /* Use polling pio if the LLD doesn't handle
4975 * interrupt driven pio and atapi CDB interrupt.
4976 */
4977 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4978 switch (qc->tf.protocol) {
4979 case ATA_PROT_PIO:
e3472cbe 4980 case ATA_PROT_NODATA:
e50362ec
AL
4981 case ATA_PROT_ATAPI:
4982 case ATA_PROT_ATAPI_NODATA:
4983 qc->tf.flags |= ATA_TFLAG_POLLING;
4984 break;
4985 case ATA_PROT_ATAPI_DMA:
4986 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4987 /* see ata_dma_blacklisted() */
e50362ec
AL
4988 BUG();
4989 break;
4990 default:
4991 break;
4992 }
4993 }
4994
3d3cca37
TH
4995 /* Some controllers show flaky interrupt behavior after
4996 * setting xfer mode. Use polling instead.
4997 */
4998 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4999 qc->tf.feature == SETFEATURES_XFER) &&
5000 (ap->flags & ATA_FLAG_SETXFER_POLLING))
5001 qc->tf.flags |= ATA_TFLAG_POLLING;
5002
312f7da2 5003 /* select the device */
1da177e4
LT
5004 ata_dev_select(ap, qc->dev->devno, 1, 0);
5005
312f7da2 5006 /* start the command */
1da177e4
LT
5007 switch (qc->tf.protocol) {
5008 case ATA_PROT_NODATA:
312f7da2
AL
5009 if (qc->tf.flags & ATA_TFLAG_POLLING)
5010 ata_qc_set_polling(qc);
5011
e5338254 5012 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5013 ap->hsm_task_state = HSM_ST_LAST;
5014
5015 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5016 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5017
1da177e4
LT
5018 break;
5019
5020 case ATA_PROT_DMA:
587005de 5021 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5022
1da177e4
LT
5023 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5024 ap->ops->bmdma_setup(qc); /* set up bmdma */
5025 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5026 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5027 break;
5028
312f7da2
AL
5029 case ATA_PROT_PIO:
5030 if (qc->tf.flags & ATA_TFLAG_POLLING)
5031 ata_qc_set_polling(qc);
1da177e4 5032
e5338254 5033 ata_tf_to_host(ap, &qc->tf);
312f7da2 5034
54f00389
AL
5035 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5036 /* PIO data out protocol */
5037 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5038 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5039
5040 /* always send first data block using
e27486db 5041 * the ata_pio_task() codepath.
54f00389 5042 */
312f7da2 5043 } else {
54f00389
AL
5044 /* PIO data in protocol */
5045 ap->hsm_task_state = HSM_ST;
5046
5047 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5048 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5049
5050 /* if polling, ata_pio_task() handles the rest.
5051 * otherwise, interrupt handler takes over from here.
5052 */
312f7da2
AL
5053 }
5054
1da177e4
LT
5055 break;
5056
1da177e4 5057 case ATA_PROT_ATAPI:
1da177e4 5058 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5059 if (qc->tf.flags & ATA_TFLAG_POLLING)
5060 ata_qc_set_polling(qc);
5061
e5338254 5062 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5063
312f7da2
AL
5064 ap->hsm_task_state = HSM_ST_FIRST;
5065
5066 /* send cdb by polling if no cdb interrupt */
5067 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5068 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5069 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5070 break;
5071
5072 case ATA_PROT_ATAPI_DMA:
587005de 5073 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5074
1da177e4
LT
5075 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5076 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5077 ap->hsm_task_state = HSM_ST_FIRST;
5078
5079 /* send cdb by polling if no cdb interrupt */
5080 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5081 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5082 break;
5083
5084 default:
5085 WARN_ON(1);
9a3d9eb0 5086 return AC_ERR_SYSTEM;
1da177e4
LT
5087 }
5088
5089 return 0;
5090}
5091
1da177e4
LT
5092/**
5093 * ata_host_intr - Handle host interrupt for given (port, task)
5094 * @ap: Port on which interrupt arrived (possibly...)
5095 * @qc: Taskfile currently active in engine
5096 *
5097 * Handle host interrupt for given queued command. Currently,
5098 * only DMA interrupts are handled. All other commands are
5099 * handled via polling with interrupts disabled (nIEN bit).
5100 *
5101 * LOCKING:
cca3974e 5102 * spin_lock_irqsave(host lock)
1da177e4
LT
5103 *
5104 * RETURNS:
5105 * One if interrupt was handled, zero if not (shared irq).
5106 */
5107
5108inline unsigned int ata_host_intr (struct ata_port *ap,
5109 struct ata_queued_cmd *qc)
5110{
ea54763f 5111 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5112 u8 status, host_stat = 0;
1da177e4 5113
312f7da2 5114 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5115 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5116
312f7da2
AL
5117 /* Check whether we are expecting interrupt in this state */
5118 switch (ap->hsm_task_state) {
5119 case HSM_ST_FIRST:
6912ccd5
AL
5120 /* Some pre-ATAPI-4 devices assert INTRQ
5121 * at this state when ready to receive CDB.
5122 */
1da177e4 5123
312f7da2
AL
5124 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5125 * The flag was turned on only for atapi devices.
5126 * No need to check is_atapi_taskfile(&qc->tf) again.
5127 */
5128 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5129 goto idle_irq;
1da177e4 5130 break;
312f7da2
AL
5131 case HSM_ST_LAST:
5132 if (qc->tf.protocol == ATA_PROT_DMA ||
5133 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5134 /* check status of DMA engine */
5135 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5136 VPRINTK("ata%u: host_stat 0x%X\n",
5137 ap->print_id, host_stat);
312f7da2
AL
5138
5139 /* if it's not our irq... */
5140 if (!(host_stat & ATA_DMA_INTR))
5141 goto idle_irq;
5142
5143 /* before we do anything else, clear DMA-Start bit */
5144 ap->ops->bmdma_stop(qc);
a4f16610
AL
5145
5146 if (unlikely(host_stat & ATA_DMA_ERR)) {
5147 /* error when transfering data to/from memory */
5148 qc->err_mask |= AC_ERR_HOST_BUS;
5149 ap->hsm_task_state = HSM_ST_ERR;
5150 }
312f7da2
AL
5151 }
5152 break;
5153 case HSM_ST:
5154 break;
1da177e4
LT
5155 default:
5156 goto idle_irq;
5157 }
5158
312f7da2
AL
5159 /* check altstatus */
5160 status = ata_altstatus(ap);
5161 if (status & ATA_BUSY)
5162 goto idle_irq;
1da177e4 5163
312f7da2
AL
5164 /* check main status, clearing INTRQ */
5165 status = ata_chk_status(ap);
5166 if (unlikely(status & ATA_BUSY))
5167 goto idle_irq;
1da177e4 5168
312f7da2
AL
5169 /* ack bmdma irq events */
5170 ap->ops->irq_clear(ap);
1da177e4 5171
bb5cb290 5172 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5173
5174 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5175 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5176 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5177
1da177e4
LT
5178 return 1; /* irq handled */
5179
5180idle_irq:
5181 ap->stats.idle_irq++;
5182
5183#ifdef ATA_IRQ_TRAP
5184 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5185 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5186 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5187 return 1;
1da177e4
LT
5188 }
5189#endif
5190 return 0; /* irq not handled */
5191}
5192
5193/**
5194 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5195 * @irq: irq line (unused)
cca3974e 5196 * @dev_instance: pointer to our ata_host information structure
1da177e4 5197 *
0cba632b
JG
5198 * Default interrupt handler for PCI IDE devices. Calls
5199 * ata_host_intr() for each port that is not disabled.
5200 *
1da177e4 5201 * LOCKING:
cca3974e 5202 * Obtains host lock during operation.
1da177e4
LT
5203 *
5204 * RETURNS:
0cba632b 5205 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5206 */
5207
7d12e780 5208irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5209{
cca3974e 5210 struct ata_host *host = dev_instance;
1da177e4
LT
5211 unsigned int i;
5212 unsigned int handled = 0;
5213 unsigned long flags;
5214
5215 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5216 spin_lock_irqsave(&host->lock, flags);
1da177e4 5217
cca3974e 5218 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5219 struct ata_port *ap;
5220
cca3974e 5221 ap = host->ports[i];
c1389503 5222 if (ap &&
029f5468 5223 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5224 struct ata_queued_cmd *qc;
5225
5226 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5227 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5228 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5229 handled |= ata_host_intr(ap, qc);
5230 }
5231 }
5232
cca3974e 5233 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5234
5235 return IRQ_RETVAL(handled);
5236}
5237
34bf2170
TH
5238/**
5239 * sata_scr_valid - test whether SCRs are accessible
5240 * @ap: ATA port to test SCR accessibility for
5241 *
5242 * Test whether SCRs are accessible for @ap.
5243 *
5244 * LOCKING:
5245 * None.
5246 *
5247 * RETURNS:
5248 * 1 if SCRs are accessible, 0 otherwise.
5249 */
5250int sata_scr_valid(struct ata_port *ap)
5251{
5252 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5253}
5254
5255/**
5256 * sata_scr_read - read SCR register of the specified port
5257 * @ap: ATA port to read SCR for
5258 * @reg: SCR to read
5259 * @val: Place to store read value
5260 *
5261 * Read SCR register @reg of @ap into *@val. This function is
5262 * guaranteed to succeed if the cable type of the port is SATA
5263 * and the port implements ->scr_read.
5264 *
5265 * LOCKING:
5266 * None.
5267 *
5268 * RETURNS:
5269 * 0 on success, negative errno on failure.
5270 */
5271int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5272{
5273 if (sata_scr_valid(ap)) {
5274 *val = ap->ops->scr_read(ap, reg);
5275 return 0;
5276 }
5277 return -EOPNOTSUPP;
5278}
5279
5280/**
5281 * sata_scr_write - write SCR register of the specified port
5282 * @ap: ATA port to write SCR for
5283 * @reg: SCR to write
5284 * @val: value to write
5285 *
5286 * Write @val to SCR register @reg of @ap. This function is
5287 * guaranteed to succeed if the cable type of the port is SATA
5288 * and the port implements ->scr_read.
5289 *
5290 * LOCKING:
5291 * None.
5292 *
5293 * RETURNS:
5294 * 0 on success, negative errno on failure.
5295 */
5296int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5297{
5298 if (sata_scr_valid(ap)) {
5299 ap->ops->scr_write(ap, reg, val);
5300 return 0;
5301 }
5302 return -EOPNOTSUPP;
5303}
5304
5305/**
5306 * sata_scr_write_flush - write SCR register of the specified port and flush
5307 * @ap: ATA port to write SCR for
5308 * @reg: SCR to write
5309 * @val: value to write
5310 *
5311 * This function is identical to sata_scr_write() except that this
5312 * function performs flush after writing to the register.
5313 *
5314 * LOCKING:
5315 * None.
5316 *
5317 * RETURNS:
5318 * 0 on success, negative errno on failure.
5319 */
5320int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5321{
5322 if (sata_scr_valid(ap)) {
5323 ap->ops->scr_write(ap, reg, val);
5324 ap->ops->scr_read(ap, reg);
5325 return 0;
5326 }
5327 return -EOPNOTSUPP;
5328}
5329
5330/**
5331 * ata_port_online - test whether the given port is online
5332 * @ap: ATA port to test
5333 *
5334 * Test whether @ap is online. Note that this function returns 0
5335 * if online status of @ap cannot be obtained, so
5336 * ata_port_online(ap) != !ata_port_offline(ap).
5337 *
5338 * LOCKING:
5339 * None.
5340 *
5341 * RETURNS:
5342 * 1 if the port online status is available and online.
5343 */
5344int ata_port_online(struct ata_port *ap)
5345{
5346 u32 sstatus;
5347
5348 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5349 return 1;
5350 return 0;
5351}
5352
5353/**
5354 * ata_port_offline - test whether the given port is offline
5355 * @ap: ATA port to test
5356 *
5357 * Test whether @ap is offline. Note that this function returns
5358 * 0 if offline status of @ap cannot be obtained, so
5359 * ata_port_online(ap) != !ata_port_offline(ap).
5360 *
5361 * LOCKING:
5362 * None.
5363 *
5364 * RETURNS:
5365 * 1 if the port offline status is available and offline.
5366 */
5367int ata_port_offline(struct ata_port *ap)
5368{
5369 u32 sstatus;
5370
5371 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5372 return 1;
5373 return 0;
5374}
0baab86b 5375
77b08fb5 5376int ata_flush_cache(struct ata_device *dev)
9b847548 5377{
977e6b9f 5378 unsigned int err_mask;
9b847548
JA
5379 u8 cmd;
5380
5381 if (!ata_try_flush_cache(dev))
5382 return 0;
5383
6fc49adb 5384 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5385 cmd = ATA_CMD_FLUSH_EXT;
5386 else
5387 cmd = ATA_CMD_FLUSH;
5388
977e6b9f
TH
5389 err_mask = ata_do_simple_cmd(dev, cmd);
5390 if (err_mask) {
5391 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5392 return -EIO;
5393 }
5394
5395 return 0;
9b847548
JA
5396}
5397
6ffa01d8 5398#ifdef CONFIG_PM
cca3974e
JG
5399static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5400 unsigned int action, unsigned int ehi_flags,
5401 int wait)
500530f6
TH
5402{
5403 unsigned long flags;
5404 int i, rc;
5405
cca3974e
JG
5406 for (i = 0; i < host->n_ports; i++) {
5407 struct ata_port *ap = host->ports[i];
500530f6
TH
5408
5409 /* Previous resume operation might still be in
5410 * progress. Wait for PM_PENDING to clear.
5411 */
5412 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5413 ata_port_wait_eh(ap);
5414 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5415 }
5416
5417 /* request PM ops to EH */
5418 spin_lock_irqsave(ap->lock, flags);
5419
5420 ap->pm_mesg = mesg;
5421 if (wait) {
5422 rc = 0;
5423 ap->pm_result = &rc;
5424 }
5425
5426 ap->pflags |= ATA_PFLAG_PM_PENDING;
5427 ap->eh_info.action |= action;
5428 ap->eh_info.flags |= ehi_flags;
5429
5430 ata_port_schedule_eh(ap);
5431
5432 spin_unlock_irqrestore(ap->lock, flags);
5433
5434 /* wait and check result */
5435 if (wait) {
5436 ata_port_wait_eh(ap);
5437 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5438 if (rc)
5439 return rc;
5440 }
5441 }
5442
5443 return 0;
5444}
5445
5446/**
cca3974e
JG
5447 * ata_host_suspend - suspend host
5448 * @host: host to suspend
500530f6
TH
5449 * @mesg: PM message
5450 *
cca3974e 5451 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5452 * function requests EH to perform PM operations and waits for EH
5453 * to finish.
5454 *
5455 * LOCKING:
5456 * Kernel thread context (may sleep).
5457 *
5458 * RETURNS:
5459 * 0 on success, -errno on failure.
5460 */
cca3974e 5461int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5462{
5463 int i, j, rc;
5464
cca3974e 5465 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5466 if (rc)
5467 goto fail;
5468
5469 /* EH is quiescent now. Fail if we have any ready device.
5470 * This happens if hotplug occurs between completion of device
5471 * suspension and here.
5472 */
cca3974e
JG
5473 for (i = 0; i < host->n_ports; i++) {
5474 struct ata_port *ap = host->ports[i];
500530f6
TH
5475
5476 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5477 struct ata_device *dev = &ap->device[j];
5478
5479 if (ata_dev_ready(dev)) {
5480 ata_port_printk(ap, KERN_WARNING,
5481 "suspend failed, device %d "
5482 "still active\n", dev->devno);
5483 rc = -EBUSY;
5484 goto fail;
5485 }
5486 }
5487 }
5488
cca3974e 5489 host->dev->power.power_state = mesg;
500530f6
TH
5490 return 0;
5491
5492 fail:
cca3974e 5493 ata_host_resume(host);
500530f6
TH
5494 return rc;
5495}
5496
5497/**
cca3974e
JG
5498 * ata_host_resume - resume host
5499 * @host: host to resume
500530f6 5500 *
cca3974e 5501 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5502 * function requests EH to perform PM operations and returns.
5503 * Note that all resume operations are performed parallely.
5504 *
5505 * LOCKING:
5506 * Kernel thread context (may sleep).
5507 */
cca3974e 5508void ata_host_resume(struct ata_host *host)
500530f6 5509{
cca3974e
JG
5510 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5511 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5512 host->dev->power.power_state = PMSG_ON;
500530f6 5513}
6ffa01d8 5514#endif
500530f6 5515
c893a3ae
RD
5516/**
5517 * ata_port_start - Set port up for dma.
5518 * @ap: Port to initialize
5519 *
5520 * Called just after data structures for each port are
5521 * initialized. Allocates space for PRD table.
5522 *
5523 * May be used as the port_start() entry in ata_port_operations.
5524 *
5525 * LOCKING:
5526 * Inherited from caller.
5527 */
f0d36efd 5528int ata_port_start(struct ata_port *ap)
1da177e4 5529{
2f1f610b 5530 struct device *dev = ap->dev;
6037d6bb 5531 int rc;
1da177e4 5532
f0d36efd
TH
5533 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5534 GFP_KERNEL);
1da177e4
LT
5535 if (!ap->prd)
5536 return -ENOMEM;
5537
6037d6bb 5538 rc = ata_pad_alloc(ap, dev);
f0d36efd 5539 if (rc)
6037d6bb 5540 return rc;
1da177e4 5541
f0d36efd
TH
5542 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5543 (unsigned long long)ap->prd_dma);
1da177e4
LT
5544 return 0;
5545}
5546
3ef3b43d
TH
5547/**
5548 * ata_dev_init - Initialize an ata_device structure
5549 * @dev: Device structure to initialize
5550 *
5551 * Initialize @dev in preparation for probing.
5552 *
5553 * LOCKING:
5554 * Inherited from caller.
5555 */
5556void ata_dev_init(struct ata_device *dev)
5557{
5558 struct ata_port *ap = dev->ap;
72fa4b74
TH
5559 unsigned long flags;
5560
5a04bf4b
TH
5561 /* SATA spd limit is bound to the first device */
5562 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5563
72fa4b74
TH
5564 /* High bits of dev->flags are used to record warm plug
5565 * requests which occur asynchronously. Synchronize using
cca3974e 5566 * host lock.
72fa4b74 5567 */
ba6a1308 5568 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5569 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5570 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5571
72fa4b74
TH
5572 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5573 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5574 dev->pio_mask = UINT_MAX;
5575 dev->mwdma_mask = UINT_MAX;
5576 dev->udma_mask = UINT_MAX;
5577}
5578
1da177e4 5579/**
155a8a9c 5580 * ata_port_init - Initialize an ata_port structure
1da177e4 5581 * @ap: Structure to initialize
cca3974e 5582 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5583 * @ent: Probe information provided by low-level driver
5584 * @port_no: Port number associated with this ata_port
5585 *
155a8a9c 5586 * Initialize a new ata_port structure.
0cba632b 5587 *
1da177e4 5588 * LOCKING:
0cba632b 5589 * Inherited from caller.
1da177e4 5590 */
cca3974e 5591void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5592 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5593{
5594 unsigned int i;
5595
cca3974e 5596 ap->lock = &host->lock;
198e0fed 5597 ap->flags = ATA_FLAG_DISABLED;
44877b4e 5598 ap->print_id = ata_print_id++;
1da177e4 5599 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5600 ap->host = host;
2f1f610b 5601 ap->dev = ent->dev;
1da177e4 5602 ap->port_no = port_no;
fea63e38
TH
5603 if (port_no == 1 && ent->pinfo2) {
5604 ap->pio_mask = ent->pinfo2->pio_mask;
5605 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5606 ap->udma_mask = ent->pinfo2->udma_mask;
5607 ap->flags |= ent->pinfo2->flags;
5608 ap->ops = ent->pinfo2->port_ops;
5609 } else {
5610 ap->pio_mask = ent->pio_mask;
5611 ap->mwdma_mask = ent->mwdma_mask;
5612 ap->udma_mask = ent->udma_mask;
5613 ap->flags |= ent->port_flags;
5614 ap->ops = ent->port_ops;
5615 }
5a04bf4b 5616 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5617 ap->active_tag = ATA_TAG_POISON;
5618 ap->last_ctl = 0xFF;
bd5d825c
BP
5619
5620#if defined(ATA_VERBOSE_DEBUG)
5621 /* turn on all debugging levels */
5622 ap->msg_enable = 0x00FF;
5623#elif defined(ATA_DEBUG)
5624 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5625#else
0dd4b21f 5626 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5627#endif
1da177e4 5628
65f27f38
DH
5629 INIT_DELAYED_WORK(&ap->port_task, NULL);
5630 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5631 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5632 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5633 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5634
838df628
TH
5635 /* set cable type */
5636 ap->cbl = ATA_CBL_NONE;
5637 if (ap->flags & ATA_FLAG_SATA)
5638 ap->cbl = ATA_CBL_SATA;
5639
acf356b1
TH
5640 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5641 struct ata_device *dev = &ap->device[i];
38d87234 5642 dev->ap = ap;
72fa4b74 5643 dev->devno = i;
3ef3b43d 5644 ata_dev_init(dev);
acf356b1 5645 }
1da177e4
LT
5646
5647#ifdef ATA_IRQ_TRAP
5648 ap->stats.unhandled_irq = 1;
5649 ap->stats.idle_irq = 1;
5650#endif
5651
5652 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5653}
5654
155a8a9c 5655/**
4608c160
TH
5656 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5657 * @ap: ATA port to initialize SCSI host for
5658 * @shost: SCSI host associated with @ap
155a8a9c 5659 *
4608c160 5660 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5661 *
5662 * LOCKING:
5663 * Inherited from caller.
5664 */
4608c160 5665static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5666{
cca3974e 5667 ap->scsi_host = shost;
155a8a9c 5668
44877b4e 5669 shost->unique_id = ap->print_id;
4608c160
TH
5670 shost->max_id = 16;
5671 shost->max_lun = 1;
5672 shost->max_channel = 1;
5673 shost->max_cmd_len = 12;
155a8a9c
BK
5674}
5675
1da177e4 5676/**
996139f1 5677 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5678 * @ent: Information provided by low-level driver
cca3974e 5679 * @host: Collections of ports to which we add
1da177e4
LT
5680 * @port_no: Port number associated with this host
5681 *
0cba632b
JG
5682 * Attach low-level ATA driver to system.
5683 *
1da177e4 5684 * LOCKING:
0cba632b 5685 * PCI/etc. bus probe sem.
1da177e4
LT
5686 *
5687 * RETURNS:
0cba632b 5688 * New ata_port on success, for NULL on error.
1da177e4 5689 */
996139f1 5690static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5691 struct ata_host *host,
1da177e4
LT
5692 unsigned int port_no)
5693{
996139f1 5694 struct Scsi_Host *shost;
1da177e4 5695 struct ata_port *ap;
1da177e4
LT
5696
5697 DPRINTK("ENTER\n");
aec5c3c1 5698
52783c5d 5699 if (!ent->port_ops->error_handler &&
cca3974e 5700 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5701 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5702 port_no);
5703 return NULL;
5704 }
5705
996139f1
JG
5706 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5707 if (!shost)
1da177e4
LT
5708 return NULL;
5709
996139f1 5710 shost->transportt = &ata_scsi_transport_template;
30afc84c 5711
996139f1 5712 ap = ata_shost_to_port(shost);
1da177e4 5713
cca3974e 5714 ata_port_init(ap, host, ent, port_no);
996139f1 5715 ata_port_init_shost(ap, shost);
1da177e4 5716
1da177e4 5717 return ap;
1da177e4
LT
5718}
5719
f0d36efd
TH
5720static void ata_host_release(struct device *gendev, void *res)
5721{
5722 struct ata_host *host = dev_get_drvdata(gendev);
5723 int i;
5724
5725 for (i = 0; i < host->n_ports; i++) {
5726 struct ata_port *ap = host->ports[i];
5727
1aa506e4 5728 if (ap && ap->ops->port_stop)
f0d36efd 5729 ap->ops->port_stop(ap);
f0d36efd
TH
5730 }
5731
5732 if (host->ops->host_stop)
5733 host->ops->host_stop(host);
1aa56cca 5734
1aa506e4
TH
5735 for (i = 0; i < host->n_ports; i++) {
5736 struct ata_port *ap = host->ports[i];
5737
5738 if (ap)
5739 scsi_host_put(ap->scsi_host);
5740
5741 host->ports[i] = NULL;
5742 }
5743
1aa56cca 5744 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5745}
5746
b03732f0 5747/**
cca3974e
JG
5748 * ata_sas_host_init - Initialize a host struct
5749 * @host: host to initialize
5750 * @dev: device host is attached to
5751 * @flags: host flags
5752 * @ops: port_ops
b03732f0
BK
5753 *
5754 * LOCKING:
5755 * PCI/etc. bus probe sem.
5756 *
5757 */
5758
cca3974e
JG
5759void ata_host_init(struct ata_host *host, struct device *dev,
5760 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5761{
cca3974e
JG
5762 spin_lock_init(&host->lock);
5763 host->dev = dev;
5764 host->flags = flags;
5765 host->ops = ops;
b03732f0
BK
5766}
5767
1da177e4 5768/**
0cba632b
JG
5769 * ata_device_add - Register hardware device with ATA and SCSI layers
5770 * @ent: Probe information describing hardware device to be registered
5771 *
5772 * This function processes the information provided in the probe
5773 * information struct @ent, allocates the necessary ATA and SCSI
5774 * host information structures, initializes them, and registers
5775 * everything with requisite kernel subsystems.
5776 *
5777 * This function requests irqs, probes the ATA bus, and probes
5778 * the SCSI bus.
1da177e4
LT
5779 *
5780 * LOCKING:
0cba632b 5781 * PCI/etc. bus probe sem.
1da177e4
LT
5782 *
5783 * RETURNS:
0cba632b 5784 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5785 */
057ace5e 5786int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5787{
6d0500df 5788 unsigned int i;
1da177e4 5789 struct device *dev = ent->dev;
cca3974e 5790 struct ata_host *host;
39b07ce6 5791 int rc;
1da177e4
LT
5792
5793 DPRINTK("ENTER\n");
f20b16ff 5794
02f076aa
AC
5795 if (ent->irq == 0) {
5796 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5797 return 0;
5798 }
f0d36efd
TH
5799
5800 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5801 return 0;
5802
1da177e4 5803 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5804 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5805 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5806 if (!host)
f0d36efd
TH
5807 goto err_out;
5808 devres_add(dev, host);
5809 dev_set_drvdata(dev, host);
1da177e4 5810
cca3974e
JG
5811 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5812 host->n_ports = ent->n_ports;
5813 host->irq = ent->irq;
5814 host->irq2 = ent->irq2;
0d5ff566 5815 host->iomap = ent->iomap;
cca3974e 5816 host->private_data = ent->private_data;
1da177e4
LT
5817
5818 /* register each port bound to this device */
cca3974e 5819 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5820 struct ata_port *ap;
5821 unsigned long xfer_mode_mask;
2ec7df04 5822 int irq_line = ent->irq;
1da177e4 5823
cca3974e 5824 ap = ata_port_add(ent, host, i);
c38778c3 5825 host->ports[i] = ap;
1da177e4
LT
5826 if (!ap)
5827 goto err_out;
5828
dd5b06c4
TH
5829 /* dummy? */
5830 if (ent->dummy_port_mask & (1 << i)) {
5831 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5832 ap->ops = &ata_dummy_port_ops;
5833 continue;
5834 }
5835
5836 /* start port */
5837 rc = ap->ops->port_start(ap);
5838 if (rc) {
cca3974e
JG
5839 host->ports[i] = NULL;
5840 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5841 goto err_out;
5842 }
5843
2ec7df04
AC
5844 /* Report the secondary IRQ for second channel legacy */
5845 if (i == 1 && ent->irq2)
5846 irq_line = ent->irq2;
5847
1da177e4
LT
5848 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5849 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5850 (ap->pio_mask << ATA_SHIFT_PIO);
5851
5852 /* print per-port info to dmesg */
0d5ff566
TH
5853 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5854 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5855 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5856 ata_mode_string(xfer_mode_mask),
5857 ap->ioaddr.cmd_addr,
5858 ap->ioaddr.ctl_addr,
5859 ap->ioaddr.bmdma_addr,
2ec7df04 5860 irq_line);
1da177e4 5861
0f0a3ad3
TH
5862 /* freeze port before requesting IRQ */
5863 ata_eh_freeze_port(ap);
1da177e4
LT
5864 }
5865
2ec7df04 5866 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5867 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5868 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5869 if (rc) {
5870 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5871 ent->irq, rc);
1da177e4 5872 goto err_out;
39b07ce6 5873 }
1da177e4 5874
2ec7df04
AC
5875 /* do we have a second IRQ for the other channel, eg legacy mode */
5876 if (ent->irq2) {
5877 /* We will get weird core code crashes later if this is true
5878 so trap it now */
5879 BUG_ON(ent->irq == ent->irq2);
5880
f0d36efd
TH
5881 rc = devm_request_irq(dev, ent->irq2,
5882 ent->port_ops->irq_handler, ent->irq_flags,
5883 DRV_NAME, host);
2ec7df04
AC
5884 if (rc) {
5885 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5886 ent->irq2, rc);
f0d36efd 5887 goto err_out;
2ec7df04
AC
5888 }
5889 }
5890
f0d36efd 5891 /* resource acquisition complete */
b878ca5d 5892 devres_remove_group(dev, ata_device_add);
f0d36efd 5893
1da177e4
LT
5894 /* perform each probe synchronously */
5895 DPRINTK("probe begin\n");
cca3974e
JG
5896 for (i = 0; i < host->n_ports; i++) {
5897 struct ata_port *ap = host->ports[i];
5a04bf4b 5898 u32 scontrol;
1da177e4
LT
5899 int rc;
5900
5a04bf4b
TH
5901 /* init sata_spd_limit to the current value */
5902 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5903 int spd = (scontrol >> 4) & 0xf;
5904 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5905 }
5906 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5907
cca3974e 5908 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5909 if (rc) {
f15a1daf 5910 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5911 /* FIXME: do something useful here */
5912 /* FIXME: handle unconditional calls to
5913 * scsi_scan_host and ata_host_remove, below,
5914 * at the very least
5915 */
5916 }
3e706399 5917
52783c5d 5918 if (ap->ops->error_handler) {
1cdaf534 5919 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5920 unsigned long flags;
5921
5922 ata_port_probe(ap);
5923
5924 /* kick EH for boot probing */
ba6a1308 5925 spin_lock_irqsave(ap->lock, flags);
3e706399 5926
1cdaf534
TH
5927 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5928 ehi->action |= ATA_EH_SOFTRESET;
5929 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5930
b51e9e5d 5931 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5932 ata_port_schedule_eh(ap);
5933
ba6a1308 5934 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5935
5936 /* wait for EH to finish */
5937 ata_port_wait_eh(ap);
5938 } else {
44877b4e 5939 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
3e706399 5940 rc = ata_bus_probe(ap);
44877b4e 5941 DPRINTK("ata%u: bus probe end\n", ap->print_id);
3e706399
TH
5942
5943 if (rc) {
5944 /* FIXME: do something useful here?
5945 * Current libata behavior will
5946 * tear down everything when
5947 * the module is removed
5948 * or the h/w is unplugged.
5949 */
5950 }
5951 }
1da177e4
LT
5952 }
5953
5954 /* probes are done, now scan each port's disk(s) */
c893a3ae 5955 DPRINTK("host probe begin\n");
cca3974e
JG
5956 for (i = 0; i < host->n_ports; i++) {
5957 struct ata_port *ap = host->ports[i];
1da177e4 5958
644dd0cc 5959 ata_scsi_scan_host(ap);
1da177e4
LT
5960 }
5961
1da177e4
LT
5962 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5963 return ent->n_ports; /* success */
5964
f0d36efd
TH
5965 err_out:
5966 devres_release_group(dev, ata_device_add);
f0d36efd 5967 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
5968 return 0;
5969}
5970
720ba126
TH
5971/**
5972 * ata_port_detach - Detach ATA port in prepration of device removal
5973 * @ap: ATA port to be detached
5974 *
5975 * Detach all ATA devices and the associated SCSI devices of @ap;
5976 * then, remove the associated SCSI host. @ap is guaranteed to
5977 * be quiescent on return from this function.
5978 *
5979 * LOCKING:
5980 * Kernel thread context (may sleep).
5981 */
5982void ata_port_detach(struct ata_port *ap)
5983{
5984 unsigned long flags;
5985 int i;
5986
5987 if (!ap->ops->error_handler)
c3cf30a9 5988 goto skip_eh;
720ba126
TH
5989
5990 /* tell EH we're leaving & flush EH */
ba6a1308 5991 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5992 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5993 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5994
5995 ata_port_wait_eh(ap);
5996
5997 /* EH is now guaranteed to see UNLOADING, so no new device
5998 * will be attached. Disable all existing devices.
5999 */
ba6a1308 6000 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
6001
6002 for (i = 0; i < ATA_MAX_DEVICES; i++)
6003 ata_dev_disable(&ap->device[i]);
6004
ba6a1308 6005 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6006
6007 /* Final freeze & EH. All in-flight commands are aborted. EH
6008 * will be skipped and retrials will be terminated with bad
6009 * target.
6010 */
ba6a1308 6011 spin_lock_irqsave(ap->lock, flags);
720ba126 6012 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6013 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6014
6015 ata_port_wait_eh(ap);
6016
6017 /* Flush hotplug task. The sequence is similar to
6018 * ata_port_flush_task().
6019 */
6020 flush_workqueue(ata_aux_wq);
6021 cancel_delayed_work(&ap->hotplug_task);
6022 flush_workqueue(ata_aux_wq);
6023
c3cf30a9 6024 skip_eh:
720ba126 6025 /* remove the associated SCSI host */
cca3974e 6026 scsi_remove_host(ap->scsi_host);
720ba126
TH
6027}
6028
0529c159
TH
6029/**
6030 * ata_host_detach - Detach all ports of an ATA host
6031 * @host: Host to detach
6032 *
6033 * Detach all ports of @host.
6034 *
6035 * LOCKING:
6036 * Kernel thread context (may sleep).
6037 */
6038void ata_host_detach(struct ata_host *host)
6039{
6040 int i;
6041
6042 for (i = 0; i < host->n_ports; i++)
6043 ata_port_detach(host->ports[i]);
6044}
6045
f6d950e2
BK
6046struct ata_probe_ent *
6047ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
6048{
6049 struct ata_probe_ent *probe_ent;
6050
4d05447e 6051 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
6052 if (!probe_ent) {
6053 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
6054 kobject_name(&(dev->kobj)));
6055 return NULL;
6056 }
6057
6058 INIT_LIST_HEAD(&probe_ent->node);
6059 probe_ent->dev = dev;
6060
6061 probe_ent->sht = port->sht;
cca3974e 6062 probe_ent->port_flags = port->flags;
f6d950e2
BK
6063 probe_ent->pio_mask = port->pio_mask;
6064 probe_ent->mwdma_mask = port->mwdma_mask;
6065 probe_ent->udma_mask = port->udma_mask;
6066 probe_ent->port_ops = port->port_ops;
d639ca94 6067 probe_ent->private_data = port->private_data;
f6d950e2
BK
6068
6069 return probe_ent;
6070}
6071
1da177e4
LT
6072/**
6073 * ata_std_ports - initialize ioaddr with standard port offsets.
6074 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6075 *
6076 * Utility function which initializes data_addr, error_addr,
6077 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6078 * device_addr, status_addr, and command_addr to standard offsets
6079 * relative to cmd_addr.
6080 *
6081 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6082 */
0baab86b 6083
1da177e4
LT
6084void ata_std_ports(struct ata_ioports *ioaddr)
6085{
6086 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6087 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6088 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6089 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6090 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6091 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6092 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6093 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6094 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6095 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6096}
6097
0baab86b 6098
374b1873
JG
6099#ifdef CONFIG_PCI
6100
1da177e4
LT
6101/**
6102 * ata_pci_remove_one - PCI layer callback for device removal
6103 * @pdev: PCI device that was removed
6104 *
b878ca5d
TH
6105 * PCI layer indicates to libata via this hook that hot-unplug or
6106 * module unload event has occurred. Detach all ports. Resource
6107 * release is handled via devres.
1da177e4
LT
6108 *
6109 * LOCKING:
6110 * Inherited from PCI layer (may sleep).
6111 */
f0d36efd 6112void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6113{
6114 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6115 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6116
b878ca5d 6117 ata_host_detach(host);
1da177e4
LT
6118}
6119
6120/* move to PCI subsystem */
057ace5e 6121int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6122{
6123 unsigned long tmp = 0;
6124
6125 switch (bits->width) {
6126 case 1: {
6127 u8 tmp8 = 0;
6128 pci_read_config_byte(pdev, bits->reg, &tmp8);
6129 tmp = tmp8;
6130 break;
6131 }
6132 case 2: {
6133 u16 tmp16 = 0;
6134 pci_read_config_word(pdev, bits->reg, &tmp16);
6135 tmp = tmp16;
6136 break;
6137 }
6138 case 4: {
6139 u32 tmp32 = 0;
6140 pci_read_config_dword(pdev, bits->reg, &tmp32);
6141 tmp = tmp32;
6142 break;
6143 }
6144
6145 default:
6146 return -EINVAL;
6147 }
6148
6149 tmp &= bits->mask;
6150
6151 return (tmp == bits->val) ? 1 : 0;
6152}
9b847548 6153
6ffa01d8 6154#ifdef CONFIG_PM
3c5100c1 6155void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6156{
6157 pci_save_state(pdev);
4c90d971 6158 pci_disable_device(pdev);
500530f6 6159
4c90d971 6160 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6161 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6162}
6163
553c4aa6 6164int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6165{
553c4aa6
TH
6166 int rc;
6167
9b847548
JA
6168 pci_set_power_state(pdev, PCI_D0);
6169 pci_restore_state(pdev);
553c4aa6 6170
b878ca5d 6171 rc = pcim_enable_device(pdev);
553c4aa6
TH
6172 if (rc) {
6173 dev_printk(KERN_ERR, &pdev->dev,
6174 "failed to enable device after resume (%d)\n", rc);
6175 return rc;
6176 }
6177
9b847548 6178 pci_set_master(pdev);
553c4aa6 6179 return 0;
500530f6
TH
6180}
6181
3c5100c1 6182int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6183{
cca3974e 6184 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6185 int rc = 0;
6186
cca3974e 6187 rc = ata_host_suspend(host, mesg);
500530f6
TH
6188 if (rc)
6189 return rc;
6190
3c5100c1 6191 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6192
6193 return 0;
6194}
6195
6196int ata_pci_device_resume(struct pci_dev *pdev)
6197{
cca3974e 6198 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6199 int rc;
500530f6 6200
553c4aa6
TH
6201 rc = ata_pci_device_do_resume(pdev);
6202 if (rc == 0)
6203 ata_host_resume(host);
6204 return rc;
9b847548 6205}
6ffa01d8
TH
6206#endif /* CONFIG_PM */
6207
1da177e4
LT
6208#endif /* CONFIG_PCI */
6209
6210
1da177e4
LT
6211static int __init ata_init(void)
6212{
a8601e5f 6213 ata_probe_timeout *= HZ;
1da177e4
LT
6214 ata_wq = create_workqueue("ata");
6215 if (!ata_wq)
6216 return -ENOMEM;
6217
453b07ac
TH
6218 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6219 if (!ata_aux_wq) {
6220 destroy_workqueue(ata_wq);
6221 return -ENOMEM;
6222 }
6223
1da177e4
LT
6224 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6225 return 0;
6226}
6227
6228static void __exit ata_exit(void)
6229{
6230 destroy_workqueue(ata_wq);
453b07ac 6231 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6232}
6233
a4625085 6234subsys_initcall(ata_init);
1da177e4
LT
6235module_exit(ata_exit);
6236
67846b30 6237static unsigned long ratelimit_time;
34af946a 6238static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6239
6240int ata_ratelimit(void)
6241{
6242 int rc;
6243 unsigned long flags;
6244
6245 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6246
6247 if (time_after(jiffies, ratelimit_time)) {
6248 rc = 1;
6249 ratelimit_time = jiffies + (HZ/5);
6250 } else
6251 rc = 0;
6252
6253 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6254
6255 return rc;
6256}
6257
c22daff4
TH
6258/**
6259 * ata_wait_register - wait until register value changes
6260 * @reg: IO-mapped register
6261 * @mask: Mask to apply to read register value
6262 * @val: Wait condition
6263 * @interval_msec: polling interval in milliseconds
6264 * @timeout_msec: timeout in milliseconds
6265 *
6266 * Waiting for some bits of register to change is a common
6267 * operation for ATA controllers. This function reads 32bit LE
6268 * IO-mapped register @reg and tests for the following condition.
6269 *
6270 * (*@reg & mask) != val
6271 *
6272 * If the condition is met, it returns; otherwise, the process is
6273 * repeated after @interval_msec until timeout.
6274 *
6275 * LOCKING:
6276 * Kernel thread context (may sleep)
6277 *
6278 * RETURNS:
6279 * The final register value.
6280 */
6281u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6282 unsigned long interval_msec,
6283 unsigned long timeout_msec)
6284{
6285 unsigned long timeout;
6286 u32 tmp;
6287
6288 tmp = ioread32(reg);
6289
6290 /* Calculate timeout _after_ the first read to make sure
6291 * preceding writes reach the controller before starting to
6292 * eat away the timeout.
6293 */
6294 timeout = jiffies + (timeout_msec * HZ) / 1000;
6295
6296 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6297 msleep(interval_msec);
6298 tmp = ioread32(reg);
6299 }
6300
6301 return tmp;
6302}
6303
dd5b06c4
TH
6304/*
6305 * Dummy port_ops
6306 */
6307static void ata_dummy_noret(struct ata_port *ap) { }
6308static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6309static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6310
6311static u8 ata_dummy_check_status(struct ata_port *ap)
6312{
6313 return ATA_DRDY;
6314}
6315
6316static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6317{
6318 return AC_ERR_SYSTEM;
6319}
6320
6321const struct ata_port_operations ata_dummy_port_ops = {
6322 .port_disable = ata_port_disable,
6323 .check_status = ata_dummy_check_status,
6324 .check_altstatus = ata_dummy_check_status,
6325 .dev_select = ata_noop_dev_select,
6326 .qc_prep = ata_noop_qc_prep,
6327 .qc_issue = ata_dummy_qc_issue,
6328 .freeze = ata_dummy_noret,
6329 .thaw = ata_dummy_noret,
6330 .error_handler = ata_dummy_noret,
6331 .post_internal_cmd = ata_dummy_qc_noret,
6332 .irq_clear = ata_dummy_noret,
6333 .port_start = ata_dummy_ret0,
6334 .port_stop = ata_dummy_noret,
6335};
6336
1da177e4
LT
6337/*
6338 * libata is essentially a library of internal helper functions for
6339 * low-level ATA host controller drivers. As such, the API/ABI is
6340 * likely to change as new drivers are added and updated.
6341 * Do not depend on ABI/API stability.
6342 */
6343
e9c83914
TH
6344EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6345EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6346EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6347EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6348EXPORT_SYMBOL_GPL(ata_std_bios_param);
6349EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6350EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6351EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6352EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6353EXPORT_SYMBOL_GPL(ata_sg_init);
6354EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6355EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6356EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6357EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6358EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6359EXPORT_SYMBOL_GPL(ata_tf_load);
6360EXPORT_SYMBOL_GPL(ata_tf_read);
6361EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6362EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 6363EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
6364EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6365EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6366EXPORT_SYMBOL_GPL(ata_check_status);
6367EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6368EXPORT_SYMBOL_GPL(ata_exec_command);
6369EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6370EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6371EXPORT_SYMBOL_GPL(ata_data_xfer);
6372EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6373EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6374EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6375EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6376EXPORT_SYMBOL_GPL(ata_bmdma_start);
6377EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6378EXPORT_SYMBOL_GPL(ata_bmdma_status);
6379EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6380EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6381EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6382EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6383EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6384EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6385EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6386EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6387EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6388EXPORT_SYMBOL_GPL(sata_phy_debounce);
6389EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6390EXPORT_SYMBOL_GPL(sata_phy_reset);
6391EXPORT_SYMBOL_GPL(__sata_phy_reset);
6392EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6393EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6394EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6395EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6396EXPORT_SYMBOL_GPL(sata_std_hardreset);
6397EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6398EXPORT_SYMBOL_GPL(ata_dev_classify);
6399EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6400EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6401EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6402EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6403EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6404EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6405EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6406EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6407EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6408EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6409EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6410EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6411EXPORT_SYMBOL_GPL(sata_scr_valid);
6412EXPORT_SYMBOL_GPL(sata_scr_read);
6413EXPORT_SYMBOL_GPL(sata_scr_write);
6414EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6415EXPORT_SYMBOL_GPL(ata_port_online);
6416EXPORT_SYMBOL_GPL(ata_port_offline);
6ffa01d8 6417#ifdef CONFIG_PM
cca3974e
JG
6418EXPORT_SYMBOL_GPL(ata_host_suspend);
6419EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6420#endif /* CONFIG_PM */
6a62a04d
TH
6421EXPORT_SYMBOL_GPL(ata_id_string);
6422EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 6423EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6919a0a6 6424EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6425EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6426
1bc4ccff 6427EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6428EXPORT_SYMBOL_GPL(ata_timing_compute);
6429EXPORT_SYMBOL_GPL(ata_timing_merge);
6430
1da177e4
LT
6431#ifdef CONFIG_PCI
6432EXPORT_SYMBOL_GPL(pci_test_config_bits);
6433EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6434EXPORT_SYMBOL_GPL(ata_pci_init_one);
6435EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6436#ifdef CONFIG_PM
500530f6
TH
6437EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6438EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6439EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6440EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6441#endif /* CONFIG_PM */
67951ade
AC
6442EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6443EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6444#endif /* CONFIG_PCI */
9b847548 6445
6ffa01d8 6446#ifdef CONFIG_PM
9b847548
JA
6447EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6448EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6ffa01d8 6449#endif /* CONFIG_PM */
ece1d636 6450
ece1d636 6451EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6452EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6453EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6454EXPORT_SYMBOL_GPL(ata_port_freeze);
6455EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6456EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6457EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6458EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6459EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6460EXPORT_SYMBOL_GPL(ata_irq_on);
6461EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6462EXPORT_SYMBOL_GPL(ata_irq_ack);
6463EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6464EXPORT_SYMBOL_GPL(ata_dev_try_classify);