sata_promise: simplify port setup
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5
JG
62#define DRV_VERSION "2.10" /* must be exactly four chars */
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
44877b4e 75static unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
11ef697b
KCA
96int noacpi;
97module_param(noacpi, int, 0444);
98MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
99
1da177e4
LT
100MODULE_AUTHOR("Jeff Garzik");
101MODULE_DESCRIPTION("Library module for ATA devices");
102MODULE_LICENSE("GPL");
103MODULE_VERSION(DRV_VERSION);
104
0baab86b 105
1da177e4
LT
106/**
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
111 *
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
114 *
115 * LOCKING:
116 * Inherited from caller.
117 */
118
057ace5e 119void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
120{
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
126
127 fis[4] = tf->lbal;
128 fis[5] = tf->lbam;
129 fis[6] = tf->lbah;
130 fis[7] = tf->device;
131
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
136
137 fis[12] = tf->nsect;
138 fis[13] = tf->hob_nsect;
139 fis[14] = 0;
140 fis[15] = tf->ctl;
141
142 fis[16] = 0;
143 fis[17] = 0;
144 fis[18] = 0;
145 fis[19] = 0;
146}
147
148/**
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
152 *
e12a1be6 153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
154 *
155 * LOCKING:
156 * Inherited from caller.
157 */
158
057ace5e 159void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
160{
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
163
164 tf->lbal = fis[4];
165 tf->lbam = fis[5];
166 tf->lbah = fis[6];
167 tf->device = fis[7];
168
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
172
173 tf->nsect = fis[12];
174 tf->hob_nsect = fis[13];
175}
176
8cbd6df1
AL
177static const u8 ata_rw_cmds[] = {
178 /* pio multi */
179 ATA_CMD_READ_MULTI,
180 ATA_CMD_WRITE_MULTI,
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
187 /* pio */
188 ATA_CMD_PIO_READ,
189 ATA_CMD_PIO_WRITE,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
192 0,
193 0,
194 0,
195 0,
8cbd6df1
AL
196 /* dma */
197 ATA_CMD_READ,
198 ATA_CMD_WRITE,
199 ATA_CMD_READ_EXT,
9a3dccc4
TH
200 ATA_CMD_WRITE_EXT,
201 0,
202 0,
203 0,
204 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 205};
1da177e4
LT
206
207/**
8cbd6df1 208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
1da177e4 211 *
2e9edbf8 212 * Examine the device configuration and tf->flags to calculate
8cbd6df1 213 * the proper read/write commands and protocol to use.
1da177e4
LT
214 *
215 * LOCKING:
216 * caller.
217 */
bd056d7e 218static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 219{
9a3dccc4 220 u8 cmd;
1da177e4 221
9a3dccc4 222 int index, fua, lba48, write;
2e9edbf8 223
9a3dccc4 224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 227
8cbd6df1
AL
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
9a3dccc4 230 index = dev->multi_count ? 0 : 8;
bd056d7e 231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
0565c26d 234 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
235 } else {
236 tf->protocol = ATA_PROT_DMA;
9a3dccc4 237 index = 16;
8cbd6df1 238 }
1da177e4 239
9a3dccc4
TH
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 if (cmd) {
242 tf->command = cmd;
243 return 0;
244 }
245 return -1;
1da177e4
LT
246}
247
35b649fe
TH
248/**
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
252 *
253 * LOCKING:
254 * None.
255 *
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
259 *
260 * RETURNS:
261 * Block address read from @tf.
262 */
263u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
264{
265 u64 block = 0;
266
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
272 } else
273 block |= (tf->device & 0xf) << 24;
274
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
277 block |= tf->lbal;
278 } else {
279 u32 cyl, head, sect;
280
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
283 sect = tf->lbal;
284
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
286 }
287
288 return block;
289}
290
bd056d7e
TH
291/**
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
298 * @tag: tag
299 *
300 * LOCKING:
301 * None.
302 *
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
305 *
306 * RETURNS:
307 *
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
310 */
311int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
313 unsigned int tag)
314{
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
317
6d1245bf 318 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
319 /* yay, NCQ */
320 if (!lba_48_ok(block, n_block))
321 return -ERANGE;
322
323 tf->protocol = ATA_PROT_NCQ;
324 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
325
326 if (tf->flags & ATA_TFLAG_WRITE)
327 tf->command = ATA_CMD_FPDMA_WRITE;
328 else
329 tf->command = ATA_CMD_FPDMA_READ;
330
331 tf->nsect = tag << 3;
332 tf->hob_feature = (n_block >> 8) & 0xff;
333 tf->feature = n_block & 0xff;
334
335 tf->hob_lbah = (block >> 40) & 0xff;
336 tf->hob_lbam = (block >> 32) & 0xff;
337 tf->hob_lbal = (block >> 24) & 0xff;
338 tf->lbah = (block >> 16) & 0xff;
339 tf->lbam = (block >> 8) & 0xff;
340 tf->lbal = block & 0xff;
341
342 tf->device = 1 << 6;
343 if (tf->flags & ATA_TFLAG_FUA)
344 tf->device |= 1 << 7;
345 } else if (dev->flags & ATA_DFLAG_LBA) {
346 tf->flags |= ATA_TFLAG_LBA;
347
348 if (lba_28_ok(block, n_block)) {
349 /* use LBA28 */
350 tf->device |= (block >> 24) & 0xf;
351 } else if (lba_48_ok(block, n_block)) {
352 if (!(dev->flags & ATA_DFLAG_LBA48))
353 return -ERANGE;
354
355 /* use LBA48 */
356 tf->flags |= ATA_TFLAG_LBA48;
357
358 tf->hob_nsect = (n_block >> 8) & 0xff;
359
360 tf->hob_lbah = (block >> 40) & 0xff;
361 tf->hob_lbam = (block >> 32) & 0xff;
362 tf->hob_lbal = (block >> 24) & 0xff;
363 } else
364 /* request too large even for LBA48 */
365 return -ERANGE;
366
367 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
368 return -EINVAL;
369
370 tf->nsect = n_block & 0xff;
371
372 tf->lbah = (block >> 16) & 0xff;
373 tf->lbam = (block >> 8) & 0xff;
374 tf->lbal = block & 0xff;
375
376 tf->device |= ATA_LBA;
377 } else {
378 /* CHS */
379 u32 sect, head, cyl, track;
380
381 /* The request -may- be too large for CHS addressing. */
382 if (!lba_28_ok(block, n_block))
383 return -ERANGE;
384
385 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
386 return -EINVAL;
387
388 /* Convert LBA to CHS */
389 track = (u32)block / dev->sectors;
390 cyl = track / dev->heads;
391 head = track % dev->heads;
392 sect = (u32)block % dev->sectors + 1;
393
394 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
395 (u32)block, track, cyl, head, sect);
396
397 /* Check whether the converted CHS can fit.
398 Cylinder: 0-65535
399 Head: 0-15
400 Sector: 1-255*/
401 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
402 return -ERANGE;
403
404 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
405 tf->lbal = sect;
406 tf->lbam = cyl;
407 tf->lbah = cyl >> 8;
408 tf->device |= head;
409 }
410
411 return 0;
412}
413
cb95d562
TH
414/**
415 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
416 * @pio_mask: pio_mask
417 * @mwdma_mask: mwdma_mask
418 * @udma_mask: udma_mask
419 *
420 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
421 * unsigned int xfer_mask.
422 *
423 * LOCKING:
424 * None.
425 *
426 * RETURNS:
427 * Packed xfer_mask.
428 */
429static unsigned int ata_pack_xfermask(unsigned int pio_mask,
430 unsigned int mwdma_mask,
431 unsigned int udma_mask)
432{
433 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
434 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
435 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
436}
437
c0489e4e
TH
438/**
439 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
440 * @xfer_mask: xfer_mask to unpack
441 * @pio_mask: resulting pio_mask
442 * @mwdma_mask: resulting mwdma_mask
443 * @udma_mask: resulting udma_mask
444 *
445 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
446 * Any NULL distination masks will be ignored.
447 */
448static void ata_unpack_xfermask(unsigned int xfer_mask,
449 unsigned int *pio_mask,
450 unsigned int *mwdma_mask,
451 unsigned int *udma_mask)
452{
453 if (pio_mask)
454 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
455 if (mwdma_mask)
456 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
457 if (udma_mask)
458 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
459}
460
cb95d562 461static const struct ata_xfer_ent {
be9a50c8 462 int shift, bits;
cb95d562
TH
463 u8 base;
464} ata_xfer_tbl[] = {
465 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
466 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
467 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
468 { -1, },
469};
470
471/**
472 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
473 * @xfer_mask: xfer_mask of interest
474 *
475 * Return matching XFER_* value for @xfer_mask. Only the highest
476 * bit of @xfer_mask is considered.
477 *
478 * LOCKING:
479 * None.
480 *
481 * RETURNS:
482 * Matching XFER_* value, 0 if no match found.
483 */
484static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
485{
486 int highbit = fls(xfer_mask) - 1;
487 const struct ata_xfer_ent *ent;
488
489 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
490 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
491 return ent->base + highbit - ent->shift;
492 return 0;
493}
494
495/**
496 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
497 * @xfer_mode: XFER_* of interest
498 *
499 * Return matching xfer_mask for @xfer_mode.
500 *
501 * LOCKING:
502 * None.
503 *
504 * RETURNS:
505 * Matching xfer_mask, 0 if no match found.
506 */
507static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
508{
509 const struct ata_xfer_ent *ent;
510
511 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
512 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
513 return 1 << (ent->shift + xfer_mode - ent->base);
514 return 0;
515}
516
517/**
518 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
519 * @xfer_mode: XFER_* of interest
520 *
521 * Return matching xfer_shift for @xfer_mode.
522 *
523 * LOCKING:
524 * None.
525 *
526 * RETURNS:
527 * Matching xfer_shift, -1 if no match found.
528 */
529static int ata_xfer_mode2shift(unsigned int xfer_mode)
530{
531 const struct ata_xfer_ent *ent;
532
533 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
534 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
535 return ent->shift;
536 return -1;
537}
538
1da177e4 539/**
1da7b0d0
TH
540 * ata_mode_string - convert xfer_mask to string
541 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
542 *
543 * Determine string which represents the highest speed
1da7b0d0 544 * (highest bit in @modemask).
1da177e4
LT
545 *
546 * LOCKING:
547 * None.
548 *
549 * RETURNS:
550 * Constant C string representing highest speed listed in
1da7b0d0 551 * @mode_mask, or the constant C string "<n/a>".
1da177e4 552 */
1da7b0d0 553static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 554{
75f554bc
TH
555 static const char * const xfer_mode_str[] = {
556 "PIO0",
557 "PIO1",
558 "PIO2",
559 "PIO3",
560 "PIO4",
b352e57d
AC
561 "PIO5",
562 "PIO6",
75f554bc
TH
563 "MWDMA0",
564 "MWDMA1",
565 "MWDMA2",
b352e57d
AC
566 "MWDMA3",
567 "MWDMA4",
75f554bc
TH
568 "UDMA/16",
569 "UDMA/25",
570 "UDMA/33",
571 "UDMA/44",
572 "UDMA/66",
573 "UDMA/100",
574 "UDMA/133",
575 "UDMA7",
576 };
1da7b0d0 577 int highbit;
1da177e4 578
1da7b0d0
TH
579 highbit = fls(xfer_mask) - 1;
580 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
581 return xfer_mode_str[highbit];
1da177e4 582 return "<n/a>";
1da177e4
LT
583}
584
4c360c81
TH
585static const char *sata_spd_string(unsigned int spd)
586{
587 static const char * const spd_str[] = {
588 "1.5 Gbps",
589 "3.0 Gbps",
590 };
591
592 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
593 return "<unknown>";
594 return spd_str[spd - 1];
595}
596
3373efd8 597void ata_dev_disable(struct ata_device *dev)
0b8efb0a 598{
0dd4b21f 599 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 600 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
601 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
602 ATA_DNXFER_QUIET);
0b8efb0a
TH
603 dev->class++;
604 }
605}
606
1da177e4 607/**
0d5ff566 608 * ata_devchk - PATA device presence detection
1da177e4
LT
609 * @ap: ATA channel to examine
610 * @device: Device to examine (starting at zero)
611 *
612 * This technique was originally described in
613 * Hale Landis's ATADRVR (www.ata-atapi.com), and
614 * later found its way into the ATA/ATAPI spec.
615 *
616 * Write a pattern to the ATA shadow registers,
617 * and if a device is present, it will respond by
618 * correctly storing and echoing back the
619 * ATA shadow register contents.
620 *
621 * LOCKING:
622 * caller.
623 */
624
0d5ff566 625static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
626{
627 struct ata_ioports *ioaddr = &ap->ioaddr;
628 u8 nsect, lbal;
629
630 ap->ops->dev_select(ap, device);
631
0d5ff566
TH
632 iowrite8(0x55, ioaddr->nsect_addr);
633 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 634
0d5ff566
TH
635 iowrite8(0xaa, ioaddr->nsect_addr);
636 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 637
0d5ff566
TH
638 iowrite8(0x55, ioaddr->nsect_addr);
639 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 640
0d5ff566
TH
641 nsect = ioread8(ioaddr->nsect_addr);
642 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
643
644 if ((nsect == 0x55) && (lbal == 0xaa))
645 return 1; /* we found a device */
646
647 return 0; /* nothing found */
648}
649
1da177e4
LT
650/**
651 * ata_dev_classify - determine device type based on ATA-spec signature
652 * @tf: ATA taskfile register set for device to be identified
653 *
654 * Determine from taskfile register contents whether a device is
655 * ATA or ATAPI, as per "Signature and persistence" section
656 * of ATA/PI spec (volume 1, sect 5.14).
657 *
658 * LOCKING:
659 * None.
660 *
661 * RETURNS:
662 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
663 * the event of failure.
664 */
665
057ace5e 666unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
667{
668 /* Apple's open source Darwin code hints that some devices only
669 * put a proper signature into the LBA mid/high registers,
670 * So, we only check those. It's sufficient for uniqueness.
671 */
672
673 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
674 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
675 DPRINTK("found ATA device by sig\n");
676 return ATA_DEV_ATA;
677 }
678
679 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
680 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
681 DPRINTK("found ATAPI device by sig\n");
682 return ATA_DEV_ATAPI;
683 }
684
685 DPRINTK("unknown device\n");
686 return ATA_DEV_UNKNOWN;
687}
688
689/**
690 * ata_dev_try_classify - Parse returned ATA device signature
691 * @ap: ATA channel to examine
692 * @device: Device to examine (starting at zero)
b4dc7623 693 * @r_err: Value of error register on completion
1da177e4
LT
694 *
695 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
696 * an ATA/ATAPI-defined set of values is placed in the ATA
697 * shadow registers, indicating the results of device detection
698 * and diagnostics.
699 *
700 * Select the ATA device, and read the values from the ATA shadow
701 * registers. Then parse according to the Error register value,
702 * and the spec-defined values examined by ata_dev_classify().
703 *
704 * LOCKING:
705 * caller.
b4dc7623
TH
706 *
707 * RETURNS:
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
709 */
710
a619f981 711unsigned int
b4dc7623 712ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 713{
1da177e4
LT
714 struct ata_taskfile tf;
715 unsigned int class;
716 u8 err;
717
718 ap->ops->dev_select(ap, device);
719
720 memset(&tf, 0, sizeof(tf));
721
1da177e4 722 ap->ops->tf_read(ap, &tf);
0169e284 723 err = tf.feature;
b4dc7623
TH
724 if (r_err)
725 *r_err = err;
1da177e4 726
93590859
AC
727 /* see if device passed diags: if master then continue and warn later */
728 if (err == 0 && device == 0)
729 /* diagnostic fail : do nothing _YET_ */
730 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
731 else if (err == 1)
1da177e4
LT
732 /* do nothing */ ;
733 else if ((device == 0) && (err == 0x81))
734 /* do nothing */ ;
735 else
b4dc7623 736 return ATA_DEV_NONE;
1da177e4 737
b4dc7623 738 /* determine if device is ATA or ATAPI */
1da177e4 739 class = ata_dev_classify(&tf);
b4dc7623 740
1da177e4 741 if (class == ATA_DEV_UNKNOWN)
b4dc7623 742 return ATA_DEV_NONE;
1da177e4 743 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
744 return ATA_DEV_NONE;
745 return class;
1da177e4
LT
746}
747
748/**
6a62a04d 749 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
750 * @id: IDENTIFY DEVICE results we will examine
751 * @s: string into which data is output
752 * @ofs: offset into identify device page
753 * @len: length of string to return. must be an even number.
754 *
755 * The strings in the IDENTIFY DEVICE page are broken up into
756 * 16-bit chunks. Run through the string, and output each
757 * 8-bit chunk linearly, regardless of platform.
758 *
759 * LOCKING:
760 * caller.
761 */
762
6a62a04d
TH
763void ata_id_string(const u16 *id, unsigned char *s,
764 unsigned int ofs, unsigned int len)
1da177e4
LT
765{
766 unsigned int c;
767
768 while (len > 0) {
769 c = id[ofs] >> 8;
770 *s = c;
771 s++;
772
773 c = id[ofs] & 0xff;
774 *s = c;
775 s++;
776
777 ofs++;
778 len -= 2;
779 }
780}
781
0e949ff3 782/**
6a62a04d 783 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
784 * @id: IDENTIFY DEVICE results we will examine
785 * @s: string into which data is output
786 * @ofs: offset into identify device page
787 * @len: length of string to return. must be an odd number.
788 *
6a62a04d 789 * This function is identical to ata_id_string except that it
0e949ff3
TH
790 * trims trailing spaces and terminates the resulting string with
791 * null. @len must be actual maximum length (even number) + 1.
792 *
793 * LOCKING:
794 * caller.
795 */
6a62a04d
TH
796void ata_id_c_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len)
0e949ff3
TH
798{
799 unsigned char *p;
800
801 WARN_ON(!(len & 1));
802
6a62a04d 803 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
804
805 p = s + strnlen(s, len - 1);
806 while (p > s && p[-1] == ' ')
807 p--;
808 *p = '\0';
809}
0baab86b 810
2940740b
TH
811static u64 ata_id_n_sectors(const u16 *id)
812{
813 if (ata_id_has_lba(id)) {
814 if (ata_id_has_lba48(id))
815 return ata_id_u64(id, 100);
816 else
817 return ata_id_u32(id, 60);
818 } else {
819 if (ata_id_current_chs_valid(id))
820 return ata_id_u32(id, 57);
821 else
822 return id[1] * id[3] * id[6];
823 }
824}
825
0baab86b
EF
826/**
827 * ata_noop_dev_select - Select device 0/1 on ATA bus
828 * @ap: ATA channel to manipulate
829 * @device: ATA device (numbered from zero) to select
830 *
831 * This function performs no actual function.
832 *
833 * May be used as the dev_select() entry in ata_port_operations.
834 *
835 * LOCKING:
836 * caller.
837 */
1da177e4
LT
838void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
839{
840}
841
0baab86b 842
1da177e4
LT
843/**
844 * ata_std_dev_select - Select device 0/1 on ATA bus
845 * @ap: ATA channel to manipulate
846 * @device: ATA device (numbered from zero) to select
847 *
848 * Use the method defined in the ATA specification to
849 * make either device 0, or device 1, active on the
0baab86b
EF
850 * ATA channel. Works with both PIO and MMIO.
851 *
852 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
853 *
854 * LOCKING:
855 * caller.
856 */
857
858void ata_std_dev_select (struct ata_port *ap, unsigned int device)
859{
860 u8 tmp;
861
862 if (device == 0)
863 tmp = ATA_DEVICE_OBS;
864 else
865 tmp = ATA_DEVICE_OBS | ATA_DEV1;
866
0d5ff566 867 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
868 ata_pause(ap); /* needed; also flushes, for mmio */
869}
870
871/**
872 * ata_dev_select - Select device 0/1 on ATA bus
873 * @ap: ATA channel to manipulate
874 * @device: ATA device (numbered from zero) to select
875 * @wait: non-zero to wait for Status register BSY bit to clear
876 * @can_sleep: non-zero if context allows sleeping
877 *
878 * Use the method defined in the ATA specification to
879 * make either device 0, or device 1, active on the
880 * ATA channel.
881 *
882 * This is a high-level version of ata_std_dev_select(),
883 * which additionally provides the services of inserting
884 * the proper pauses and status polling, where needed.
885 *
886 * LOCKING:
887 * caller.
888 */
889
890void ata_dev_select(struct ata_port *ap, unsigned int device,
891 unsigned int wait, unsigned int can_sleep)
892{
88574551 893 if (ata_msg_probe(ap))
44877b4e
TH
894 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
895 "device %u, wait %u\n", device, wait);
1da177e4
LT
896
897 if (wait)
898 ata_wait_idle(ap);
899
900 ap->ops->dev_select(ap, device);
901
902 if (wait) {
903 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
904 msleep(150);
905 ata_wait_idle(ap);
906 }
907}
908
909/**
910 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 911 * @id: IDENTIFY DEVICE page to dump
1da177e4 912 *
0bd3300a
TH
913 * Dump selected 16-bit words from the given IDENTIFY DEVICE
914 * page.
1da177e4
LT
915 *
916 * LOCKING:
917 * caller.
918 */
919
0bd3300a 920static inline void ata_dump_id(const u16 *id)
1da177e4
LT
921{
922 DPRINTK("49==0x%04x "
923 "53==0x%04x "
924 "63==0x%04x "
925 "64==0x%04x "
926 "75==0x%04x \n",
0bd3300a
TH
927 id[49],
928 id[53],
929 id[63],
930 id[64],
931 id[75]);
1da177e4
LT
932 DPRINTK("80==0x%04x "
933 "81==0x%04x "
934 "82==0x%04x "
935 "83==0x%04x "
936 "84==0x%04x \n",
0bd3300a
TH
937 id[80],
938 id[81],
939 id[82],
940 id[83],
941 id[84]);
1da177e4
LT
942 DPRINTK("88==0x%04x "
943 "93==0x%04x\n",
0bd3300a
TH
944 id[88],
945 id[93]);
1da177e4
LT
946}
947
cb95d562
TH
948/**
949 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
950 * @id: IDENTIFY data to compute xfer mask from
951 *
952 * Compute the xfermask for this device. This is not as trivial
953 * as it seems if we must consider early devices correctly.
954 *
955 * FIXME: pre IDE drive timing (do we care ?).
956 *
957 * LOCKING:
958 * None.
959 *
960 * RETURNS:
961 * Computed xfermask
962 */
963static unsigned int ata_id_xfermask(const u16 *id)
964{
965 unsigned int pio_mask, mwdma_mask, udma_mask;
966
967 /* Usual case. Word 53 indicates word 64 is valid */
968 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
969 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
970 pio_mask <<= 3;
971 pio_mask |= 0x7;
972 } else {
973 /* If word 64 isn't valid then Word 51 high byte holds
974 * the PIO timing number for the maximum. Turn it into
975 * a mask.
976 */
7a0f1c8a 977 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
978 if (mode < 5) /* Valid PIO range */
979 pio_mask = (2 << mode) - 1;
980 else
981 pio_mask = 1;
cb95d562
TH
982
983 /* But wait.. there's more. Design your standards by
984 * committee and you too can get a free iordy field to
985 * process. However its the speeds not the modes that
986 * are supported... Note drivers using the timing API
987 * will get this right anyway
988 */
989 }
990
991 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 992
b352e57d
AC
993 if (ata_id_is_cfa(id)) {
994 /*
995 * Process compact flash extended modes
996 */
997 int pio = id[163] & 0x7;
998 int dma = (id[163] >> 3) & 7;
999
1000 if (pio)
1001 pio_mask |= (1 << 5);
1002 if (pio > 1)
1003 pio_mask |= (1 << 6);
1004 if (dma)
1005 mwdma_mask |= (1 << 3);
1006 if (dma > 1)
1007 mwdma_mask |= (1 << 4);
1008 }
1009
fb21f0d0
TH
1010 udma_mask = 0;
1011 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1012 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1013
1014 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1015}
1016
86e45b6b
TH
1017/**
1018 * ata_port_queue_task - Queue port_task
1019 * @ap: The ata_port to queue port_task for
e2a7f77a 1020 * @fn: workqueue function to be scheduled
65f27f38 1021 * @data: data for @fn to use
e2a7f77a 1022 * @delay: delay time for workqueue function
86e45b6b
TH
1023 *
1024 * Schedule @fn(@data) for execution after @delay jiffies using
1025 * port_task. There is one port_task per port and it's the
1026 * user(low level driver)'s responsibility to make sure that only
1027 * one task is active at any given time.
1028 *
1029 * libata core layer takes care of synchronization between
1030 * port_task and EH. ata_port_queue_task() may be ignored for EH
1031 * synchronization.
1032 *
1033 * LOCKING:
1034 * Inherited from caller.
1035 */
65f27f38 1036void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1037 unsigned long delay)
1038{
1039 int rc;
1040
b51e9e5d 1041 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1042 return;
1043
65f27f38
DH
1044 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1045 ap->port_task_data = data;
86e45b6b 1046
52bad64d 1047 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1048
1049 /* rc == 0 means that another user is using port task */
1050 WARN_ON(rc == 0);
1051}
1052
1053/**
1054 * ata_port_flush_task - Flush port_task
1055 * @ap: The ata_port to flush port_task for
1056 *
1057 * After this function completes, port_task is guranteed not to
1058 * be running or scheduled.
1059 *
1060 * LOCKING:
1061 * Kernel thread context (may sleep)
1062 */
1063void ata_port_flush_task(struct ata_port *ap)
1064{
1065 unsigned long flags;
1066
1067 DPRINTK("ENTER\n");
1068
ba6a1308 1069 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1070 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1071 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1072
1073 DPRINTK("flush #1\n");
1074 flush_workqueue(ata_wq);
1075
1076 /*
1077 * At this point, if a task is running, it's guaranteed to see
1078 * the FLUSH flag; thus, it will never queue pio tasks again.
1079 * Cancel and flush.
1080 */
1081 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1082 if (ata_msg_ctl(ap))
88574551
TH
1083 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1084 __FUNCTION__);
86e45b6b
TH
1085 flush_workqueue(ata_wq);
1086 }
1087
ba6a1308 1088 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1089 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1090 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1091
0dd4b21f
BP
1092 if (ata_msg_ctl(ap))
1093 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1094}
1095
7102d230 1096static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1097{
77853bf2 1098 struct completion *waiting = qc->private_data;
a2a7a662 1099
a2a7a662 1100 complete(waiting);
a2a7a662
TH
1101}
1102
1103/**
2432697b 1104 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1105 * @dev: Device to which the command is sent
1106 * @tf: Taskfile registers for the command and the result
d69cf37d 1107 * @cdb: CDB for packet command
a2a7a662 1108 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1109 * @sg: sg list for the data buffer of the command
1110 * @n_elem: Number of sg entries
a2a7a662
TH
1111 *
1112 * Executes libata internal command with timeout. @tf contains
1113 * command on entry and result on return. Timeout and error
1114 * conditions are reported via return value. No recovery action
1115 * is taken after a command times out. It's caller's duty to
1116 * clean up after timeout.
1117 *
1118 * LOCKING:
1119 * None. Should be called with kernel context, might sleep.
551e8889
TH
1120 *
1121 * RETURNS:
1122 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1123 */
2432697b
TH
1124unsigned ata_exec_internal_sg(struct ata_device *dev,
1125 struct ata_taskfile *tf, const u8 *cdb,
1126 int dma_dir, struct scatterlist *sg,
1127 unsigned int n_elem)
a2a7a662 1128{
3373efd8 1129 struct ata_port *ap = dev->ap;
a2a7a662
TH
1130 u8 command = tf->command;
1131 struct ata_queued_cmd *qc;
2ab7db1f 1132 unsigned int tag, preempted_tag;
dedaf2b0 1133 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1134 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1135 unsigned long flags;
77853bf2 1136 unsigned int err_mask;
d95a717f 1137 int rc;
a2a7a662 1138
ba6a1308 1139 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1140
e3180499 1141 /* no internal command while frozen */
b51e9e5d 1142 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1143 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1144 return AC_ERR_SYSTEM;
1145 }
1146
2ab7db1f 1147 /* initialize internal qc */
a2a7a662 1148
2ab7db1f
TH
1149 /* XXX: Tag 0 is used for drivers with legacy EH as some
1150 * drivers choke if any other tag is given. This breaks
1151 * ata_tag_internal() test for those drivers. Don't use new
1152 * EH stuff without converting to it.
1153 */
1154 if (ap->ops->error_handler)
1155 tag = ATA_TAG_INTERNAL;
1156 else
1157 tag = 0;
1158
6cec4a39 1159 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1160 BUG();
f69499f4 1161 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1162
1163 qc->tag = tag;
1164 qc->scsicmd = NULL;
1165 qc->ap = ap;
1166 qc->dev = dev;
1167 ata_qc_reinit(qc);
1168
1169 preempted_tag = ap->active_tag;
dedaf2b0
TH
1170 preempted_sactive = ap->sactive;
1171 preempted_qc_active = ap->qc_active;
2ab7db1f 1172 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1173 ap->sactive = 0;
1174 ap->qc_active = 0;
2ab7db1f
TH
1175
1176 /* prepare & issue qc */
a2a7a662 1177 qc->tf = *tf;
d69cf37d
TH
1178 if (cdb)
1179 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1180 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1181 qc->dma_dir = dma_dir;
1182 if (dma_dir != DMA_NONE) {
2432697b
TH
1183 unsigned int i, buflen = 0;
1184
1185 for (i = 0; i < n_elem; i++)
1186 buflen += sg[i].length;
1187
1188 ata_sg_init(qc, sg, n_elem);
49c80429 1189 qc->nbytes = buflen;
a2a7a662
TH
1190 }
1191
77853bf2 1192 qc->private_data = &wait;
a2a7a662
TH
1193 qc->complete_fn = ata_qc_complete_internal;
1194
8e0e694a 1195 ata_qc_issue(qc);
a2a7a662 1196
ba6a1308 1197 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1198
a8601e5f 1199 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1200
1201 ata_port_flush_task(ap);
41ade50c 1202
d95a717f 1203 if (!rc) {
ba6a1308 1204 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1205
1206 /* We're racing with irq here. If we lose, the
1207 * following test prevents us from completing the qc
d95a717f
TH
1208 * twice. If we win, the port is frozen and will be
1209 * cleaned up by ->post_internal_cmd().
a2a7a662 1210 */
77853bf2 1211 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1212 qc->err_mask |= AC_ERR_TIMEOUT;
1213
1214 if (ap->ops->error_handler)
1215 ata_port_freeze(ap);
1216 else
1217 ata_qc_complete(qc);
f15a1daf 1218
0dd4b21f
BP
1219 if (ata_msg_warn(ap))
1220 ata_dev_printk(dev, KERN_WARNING,
88574551 1221 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1222 }
1223
ba6a1308 1224 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1225 }
1226
d95a717f
TH
1227 /* do post_internal_cmd */
1228 if (ap->ops->post_internal_cmd)
1229 ap->ops->post_internal_cmd(qc);
1230
18d90deb 1231 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1232 if (ata_msg_warn(ap))
88574551 1233 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1234 "zero err_mask for failed "
88574551 1235 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1236 qc->err_mask |= AC_ERR_OTHER;
1237 }
1238
15869303 1239 /* finish up */
ba6a1308 1240 spin_lock_irqsave(ap->lock, flags);
15869303 1241
e61e0672 1242 *tf = qc->result_tf;
77853bf2
TH
1243 err_mask = qc->err_mask;
1244
1245 ata_qc_free(qc);
2ab7db1f 1246 ap->active_tag = preempted_tag;
dedaf2b0
TH
1247 ap->sactive = preempted_sactive;
1248 ap->qc_active = preempted_qc_active;
77853bf2 1249
1f7dd3e9
TH
1250 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1251 * Until those drivers are fixed, we detect the condition
1252 * here, fail the command with AC_ERR_SYSTEM and reenable the
1253 * port.
1254 *
1255 * Note that this doesn't change any behavior as internal
1256 * command failure results in disabling the device in the
1257 * higher layer for LLDDs without new reset/EH callbacks.
1258 *
1259 * Kill the following code as soon as those drivers are fixed.
1260 */
198e0fed 1261 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1262 err_mask |= AC_ERR_SYSTEM;
1263 ata_port_probe(ap);
1264 }
1265
ba6a1308 1266 spin_unlock_irqrestore(ap->lock, flags);
15869303 1267
77853bf2 1268 return err_mask;
a2a7a662
TH
1269}
1270
2432697b 1271/**
33480a0e 1272 * ata_exec_internal - execute libata internal command
2432697b
TH
1273 * @dev: Device to which the command is sent
1274 * @tf: Taskfile registers for the command and the result
1275 * @cdb: CDB for packet command
1276 * @dma_dir: Data tranfer direction of the command
1277 * @buf: Data buffer of the command
1278 * @buflen: Length of data buffer
1279 *
1280 * Wrapper around ata_exec_internal_sg() which takes simple
1281 * buffer instead of sg list.
1282 *
1283 * LOCKING:
1284 * None. Should be called with kernel context, might sleep.
1285 *
1286 * RETURNS:
1287 * Zero on success, AC_ERR_* mask on failure
1288 */
1289unsigned ata_exec_internal(struct ata_device *dev,
1290 struct ata_taskfile *tf, const u8 *cdb,
1291 int dma_dir, void *buf, unsigned int buflen)
1292{
33480a0e
TH
1293 struct scatterlist *psg = NULL, sg;
1294 unsigned int n_elem = 0;
2432697b 1295
33480a0e
TH
1296 if (dma_dir != DMA_NONE) {
1297 WARN_ON(!buf);
1298 sg_init_one(&sg, buf, buflen);
1299 psg = &sg;
1300 n_elem++;
1301 }
2432697b 1302
33480a0e 1303 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1304}
1305
977e6b9f
TH
1306/**
1307 * ata_do_simple_cmd - execute simple internal command
1308 * @dev: Device to which the command is sent
1309 * @cmd: Opcode to execute
1310 *
1311 * Execute a 'simple' command, that only consists of the opcode
1312 * 'cmd' itself, without filling any other registers
1313 *
1314 * LOCKING:
1315 * Kernel thread context (may sleep).
1316 *
1317 * RETURNS:
1318 * Zero on success, AC_ERR_* mask on failure
e58eb583 1319 */
77b08fb5 1320unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1321{
1322 struct ata_taskfile tf;
e58eb583
TH
1323
1324 ata_tf_init(dev, &tf);
1325
1326 tf.command = cmd;
1327 tf.flags |= ATA_TFLAG_DEVICE;
1328 tf.protocol = ATA_PROT_NODATA;
1329
977e6b9f 1330 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1331}
1332
1bc4ccff
AC
1333/**
1334 * ata_pio_need_iordy - check if iordy needed
1335 * @adev: ATA device
1336 *
1337 * Check if the current speed of the device requires IORDY. Used
1338 * by various controllers for chip configuration.
1339 */
1340
1341unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1342{
1343 int pio;
1344 int speed = adev->pio_mode - XFER_PIO_0;
1345
1346 if (speed < 2)
1347 return 0;
1348 if (speed > 2)
1349 return 1;
2e9edbf8 1350
1bc4ccff
AC
1351 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1352
1353 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1354 pio = adev->id[ATA_ID_EIDE_PIO];
1355 /* Is the speed faster than the drive allows non IORDY ? */
1356 if (pio) {
1357 /* This is cycle times not frequency - watch the logic! */
1358 if (pio > 240) /* PIO2 is 240nS per cycle */
1359 return 1;
1360 return 0;
1361 }
1362 }
1363 return 0;
1364}
1365
1da177e4 1366/**
49016aca 1367 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1368 * @dev: target device
1369 * @p_class: pointer to class of the target device (may be changed)
bff04647 1370 * @flags: ATA_READID_* flags
fe635c7e 1371 * @id: buffer to read IDENTIFY data into
1da177e4 1372 *
49016aca
TH
1373 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1374 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1375 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1376 * for pre-ATA4 drives.
1da177e4
LT
1377 *
1378 * LOCKING:
49016aca
TH
1379 * Kernel thread context (may sleep)
1380 *
1381 * RETURNS:
1382 * 0 on success, -errno otherwise.
1da177e4 1383 */
a9beec95 1384int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1385 unsigned int flags, u16 *id)
1da177e4 1386{
3373efd8 1387 struct ata_port *ap = dev->ap;
49016aca 1388 unsigned int class = *p_class;
a0123703 1389 struct ata_taskfile tf;
49016aca
TH
1390 unsigned int err_mask = 0;
1391 const char *reason;
1392 int rc;
1da177e4 1393
0dd4b21f 1394 if (ata_msg_ctl(ap))
44877b4e 1395 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1396
49016aca 1397 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1398
49016aca 1399 retry:
3373efd8 1400 ata_tf_init(dev, &tf);
a0123703 1401
49016aca
TH
1402 switch (class) {
1403 case ATA_DEV_ATA:
a0123703 1404 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1405 break;
1406 case ATA_DEV_ATAPI:
a0123703 1407 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1408 break;
1409 default:
1410 rc = -ENODEV;
1411 reason = "unsupported class";
1412 goto err_out;
1da177e4
LT
1413 }
1414
a0123703 1415 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1416
1417 /* Some devices choke if TF registers contain garbage. Make
1418 * sure those are properly initialized.
1419 */
1420 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1421
1422 /* Device presence detection is unreliable on some
1423 * controllers. Always poll IDENTIFY if available.
1424 */
1425 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1426
3373efd8 1427 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1428 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1429 if (err_mask) {
800b3996 1430 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1431 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1432 ap->print_id, dev->devno);
55a8e2c8
TH
1433 return -ENOENT;
1434 }
1435
49016aca
TH
1436 rc = -EIO;
1437 reason = "I/O error";
1da177e4
LT
1438 goto err_out;
1439 }
1440
49016aca 1441 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1442
49016aca 1443 /* sanity check */
a4f5749b
TH
1444 rc = -EINVAL;
1445 reason = "device reports illegal type";
1446
1447 if (class == ATA_DEV_ATA) {
1448 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1449 goto err_out;
1450 } else {
1451 if (ata_id_is_ata(id))
1452 goto err_out;
49016aca
TH
1453 }
1454
bff04647 1455 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1456 /*
1457 * The exact sequence expected by certain pre-ATA4 drives is:
1458 * SRST RESET
1459 * IDENTIFY
1460 * INITIALIZE DEVICE PARAMETERS
1461 * anything else..
1462 * Some drives were very specific about that exact sequence.
1463 */
1464 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1465 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1466 if (err_mask) {
1467 rc = -EIO;
1468 reason = "INIT_DEV_PARAMS failed";
1469 goto err_out;
1470 }
1471
1472 /* current CHS translation info (id[53-58]) might be
1473 * changed. reread the identify device info.
1474 */
bff04647 1475 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1476 goto retry;
1477 }
1478 }
1479
1480 *p_class = class;
fe635c7e 1481
49016aca
TH
1482 return 0;
1483
1484 err_out:
88574551 1485 if (ata_msg_warn(ap))
0dd4b21f 1486 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1487 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1488 return rc;
1489}
1490
3373efd8 1491static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1492{
3373efd8 1493 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1494}
1495
a6e6ce8e
TH
1496static void ata_dev_config_ncq(struct ata_device *dev,
1497 char *desc, size_t desc_sz)
1498{
1499 struct ata_port *ap = dev->ap;
1500 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1501
1502 if (!ata_id_has_ncq(dev->id)) {
1503 desc[0] = '\0';
1504 return;
1505 }
6919a0a6
AC
1506 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1507 snprintf(desc, desc_sz, "NCQ (not used)");
1508 return;
1509 }
a6e6ce8e 1510 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1511 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1512 dev->flags |= ATA_DFLAG_NCQ;
1513 }
1514
1515 if (hdepth >= ddepth)
1516 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1517 else
1518 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1519}
1520
e6d902a3
BK
1521static void ata_set_port_max_cmd_len(struct ata_port *ap)
1522{
1523 int i;
1524
cca3974e
JG
1525 if (ap->scsi_host) {
1526 unsigned int len = 0;
1527
e6d902a3 1528 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1529 len = max(len, ap->device[i].cdb_len);
1530
1531 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1532 }
1533}
1534
49016aca 1535/**
ffeae418 1536 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1537 * @dev: Target device to configure
1538 *
1539 * Configure @dev according to @dev->id. Generic and low-level
1540 * driver specific fixups are also applied.
49016aca
TH
1541 *
1542 * LOCKING:
ffeae418
TH
1543 * Kernel thread context (may sleep)
1544 *
1545 * RETURNS:
1546 * 0 on success, -errno otherwise
49016aca 1547 */
efdaedc4 1548int ata_dev_configure(struct ata_device *dev)
49016aca 1549{
3373efd8 1550 struct ata_port *ap = dev->ap;
efdaedc4 1551 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1552 const u16 *id = dev->id;
ff8854b2 1553 unsigned int xfer_mask;
b352e57d 1554 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1555 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1556 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1557 int rc;
49016aca 1558
0dd4b21f 1559 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1560 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1561 __FUNCTION__);
ffeae418 1562 return 0;
49016aca
TH
1563 }
1564
0dd4b21f 1565 if (ata_msg_probe(ap))
44877b4e 1566 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1567
08573a86
KCA
1568 /* set _SDD */
1569 rc = ata_acpi_push_id(ap, dev->devno);
1570 if (rc) {
1571 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1572 rc);
1573 }
1574
1575 /* retrieve and execute the ATA task file of _GTF */
1576 ata_acpi_exec_tfs(ap);
1577
c39f5ebe 1578 /* print device capabilities */
0dd4b21f 1579 if (ata_msg_probe(ap))
88574551
TH
1580 ata_dev_printk(dev, KERN_DEBUG,
1581 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1582 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1583 __FUNCTION__,
f15a1daf
TH
1584 id[49], id[82], id[83], id[84],
1585 id[85], id[86], id[87], id[88]);
c39f5ebe 1586
208a9933 1587 /* initialize to-be-configured parameters */
ea1dd4e1 1588 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1589 dev->max_sectors = 0;
1590 dev->cdb_len = 0;
1591 dev->n_sectors = 0;
1592 dev->cylinders = 0;
1593 dev->heads = 0;
1594 dev->sectors = 0;
1595
1da177e4
LT
1596 /*
1597 * common ATA, ATAPI feature tests
1598 */
1599
ff8854b2 1600 /* find max transfer mode; for printk only */
1148c3a7 1601 xfer_mask = ata_id_xfermask(id);
1da177e4 1602
0dd4b21f
BP
1603 if (ata_msg_probe(ap))
1604 ata_dump_id(id);
1da177e4
LT
1605
1606 /* ATA-specific feature tests */
1607 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1608 if (ata_id_is_cfa(id)) {
1609 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1610 ata_dev_printk(dev, KERN_WARNING,
1611 "supports DRM functions and may "
1612 "not be fully accessable.\n");
b352e57d
AC
1613 snprintf(revbuf, 7, "CFA");
1614 }
1615 else
1616 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1617
1148c3a7 1618 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1619
3f64f565 1620 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1621 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1622 sizeof(fwrevbuf));
1623
591a6e8e 1624 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1625 sizeof(modelbuf));
1626
1627 if (dev->id[59] & 0x100)
1628 dev->multi_count = dev->id[59] & 0xff;
1629
1148c3a7 1630 if (ata_id_has_lba(id)) {
4c2d721a 1631 const char *lba_desc;
a6e6ce8e 1632 char ncq_desc[20];
8bf62ece 1633
4c2d721a
TH
1634 lba_desc = "LBA";
1635 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1636 if (ata_id_has_lba48(id)) {
8bf62ece 1637 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1638 lba_desc = "LBA48";
6fc49adb
TH
1639
1640 if (dev->n_sectors >= (1UL << 28) &&
1641 ata_id_has_flush_ext(id))
1642 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1643 }
8bf62ece 1644
a6e6ce8e
TH
1645 /* config NCQ */
1646 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1647
8bf62ece 1648 /* print device info to dmesg */
3f64f565
EM
1649 if (ata_msg_drv(ap) && print_info) {
1650 ata_dev_printk(dev, KERN_INFO,
1651 "%s: %s, %s, max %s\n",
1652 revbuf, modelbuf, fwrevbuf,
1653 ata_mode_string(xfer_mask));
1654 ata_dev_printk(dev, KERN_INFO,
1655 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1656 (unsigned long long)dev->n_sectors,
3f64f565
EM
1657 dev->multi_count, lba_desc, ncq_desc);
1658 }
ffeae418 1659 } else {
8bf62ece
AL
1660 /* CHS */
1661
1662 /* Default translation */
1148c3a7
TH
1663 dev->cylinders = id[1];
1664 dev->heads = id[3];
1665 dev->sectors = id[6];
8bf62ece 1666
1148c3a7 1667 if (ata_id_current_chs_valid(id)) {
8bf62ece 1668 /* Current CHS translation is valid. */
1148c3a7
TH
1669 dev->cylinders = id[54];
1670 dev->heads = id[55];
1671 dev->sectors = id[56];
8bf62ece
AL
1672 }
1673
1674 /* print device info to dmesg */
3f64f565 1675 if (ata_msg_drv(ap) && print_info) {
88574551 1676 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1677 "%s: %s, %s, max %s\n",
1678 revbuf, modelbuf, fwrevbuf,
1679 ata_mode_string(xfer_mask));
1680 ata_dev_printk(dev, KERN_INFO,
1681 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1682 (unsigned long long)dev->n_sectors,
1683 dev->multi_count, dev->cylinders,
1684 dev->heads, dev->sectors);
1685 }
07f6f7d0
AL
1686 }
1687
6e7846e9 1688 dev->cdb_len = 16;
1da177e4
LT
1689 }
1690
1691 /* ATAPI-specific feature tests */
2c13b7ce 1692 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1693 char *cdb_intr_string = "";
1694
1148c3a7 1695 rc = atapi_cdb_len(id);
1da177e4 1696 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1697 if (ata_msg_warn(ap))
88574551
TH
1698 ata_dev_printk(dev, KERN_WARNING,
1699 "unsupported CDB len\n");
ffeae418 1700 rc = -EINVAL;
1da177e4
LT
1701 goto err_out_nosup;
1702 }
6e7846e9 1703 dev->cdb_len = (unsigned int) rc;
1da177e4 1704
08a556db 1705 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1706 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1707 cdb_intr_string = ", CDB intr";
1708 }
312f7da2 1709
1da177e4 1710 /* print device info to dmesg */
5afc8142 1711 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1712 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1713 ata_mode_string(xfer_mask),
1714 cdb_intr_string);
1da177e4
LT
1715 }
1716
914ed354
TH
1717 /* determine max_sectors */
1718 dev->max_sectors = ATA_MAX_SECTORS;
1719 if (dev->flags & ATA_DFLAG_LBA48)
1720 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1721
93590859
AC
1722 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1723 /* Let the user know. We don't want to disallow opens for
1724 rescue purposes, or in case the vendor is just a blithering
1725 idiot */
1726 if (print_info) {
1727 ata_dev_printk(dev, KERN_WARNING,
1728"Drive reports diagnostics failure. This may indicate a drive\n");
1729 ata_dev_printk(dev, KERN_WARNING,
1730"fault or invalid emulation. Contact drive vendor for information.\n");
1731 }
1732 }
1733
e6d902a3 1734 ata_set_port_max_cmd_len(ap);
6e7846e9 1735
4b2f3ede 1736 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1737 if (ata_dev_knobble(dev)) {
5afc8142 1738 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1739 ata_dev_printk(dev, KERN_INFO,
1740 "applying bridge limits\n");
5a529139 1741 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1742 dev->max_sectors = ATA_MAX_SECTORS;
1743 }
1744
1745 if (ap->ops->dev_config)
1746 ap->ops->dev_config(ap, dev);
1747
0dd4b21f
BP
1748 if (ata_msg_probe(ap))
1749 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1750 __FUNCTION__, ata_chk_status(ap));
ffeae418 1751 return 0;
1da177e4
LT
1752
1753err_out_nosup:
0dd4b21f 1754 if (ata_msg_probe(ap))
88574551
TH
1755 ata_dev_printk(dev, KERN_DEBUG,
1756 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1757 return rc;
1da177e4
LT
1758}
1759
1760/**
1761 * ata_bus_probe - Reset and probe ATA bus
1762 * @ap: Bus to probe
1763 *
0cba632b
JG
1764 * Master ATA bus probing function. Initiates a hardware-dependent
1765 * bus reset, then attempts to identify any devices found on
1766 * the bus.
1767 *
1da177e4 1768 * LOCKING:
0cba632b 1769 * PCI/etc. bus probe sem.
1da177e4
LT
1770 *
1771 * RETURNS:
96072e69 1772 * Zero on success, negative errno otherwise.
1da177e4
LT
1773 */
1774
80289167 1775int ata_bus_probe(struct ata_port *ap)
1da177e4 1776{
28ca5c57 1777 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 1778 int tries[ATA_MAX_DEVICES];
4ae72a1e 1779 int i, rc;
e82cbdb9 1780 struct ata_device *dev;
1da177e4 1781
28ca5c57 1782 ata_port_probe(ap);
c19ba8af 1783
14d2bac1
TH
1784 for (i = 0; i < ATA_MAX_DEVICES; i++)
1785 tries[i] = ATA_PROBE_MAX_TRIES;
1786
1787 retry:
2044470c 1788 /* reset and determine device classes */
52783c5d 1789 ap->ops->phy_reset(ap);
2061a47a 1790
52783c5d
TH
1791 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1792 dev = &ap->device[i];
c19ba8af 1793
52783c5d
TH
1794 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1795 dev->class != ATA_DEV_UNKNOWN)
1796 classes[dev->devno] = dev->class;
1797 else
1798 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1799
52783c5d 1800 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1801 }
1da177e4 1802
52783c5d 1803 ata_port_probe(ap);
2044470c 1804
b6079ca4
AC
1805 /* after the reset the device state is PIO 0 and the controller
1806 state is undefined. Record the mode */
1807
1808 for (i = 0; i < ATA_MAX_DEVICES; i++)
1809 ap->device[i].pio_mode = XFER_PIO_0;
1810
28ca5c57 1811 /* read IDENTIFY page and configure devices */
1da177e4 1812 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e82cbdb9 1813 dev = &ap->device[i];
28ca5c57 1814
ec573755
TH
1815 if (tries[i])
1816 dev->class = classes[i];
ffeae418 1817
14d2bac1 1818 if (!ata_dev_enabled(dev))
ffeae418 1819 continue;
ffeae418 1820
bff04647
TH
1821 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1822 dev->id);
14d2bac1
TH
1823 if (rc)
1824 goto fail;
1825
efdaedc4
TH
1826 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1827 rc = ata_dev_configure(dev);
1828 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1829 if (rc)
1830 goto fail;
1da177e4
LT
1831 }
1832
e82cbdb9 1833 /* configure transfer mode */
3adcebb2 1834 rc = ata_set_mode(ap, &dev);
4ae72a1e 1835 if (rc)
51713d35 1836 goto fail;
1da177e4 1837
e82cbdb9
TH
1838 for (i = 0; i < ATA_MAX_DEVICES; i++)
1839 if (ata_dev_enabled(&ap->device[i]))
1840 return 0;
1da177e4 1841
e82cbdb9
TH
1842 /* no device present, disable port */
1843 ata_port_disable(ap);
1da177e4 1844 ap->ops->port_disable(ap);
96072e69 1845 return -ENODEV;
14d2bac1
TH
1846
1847 fail:
4ae72a1e
TH
1848 tries[dev->devno]--;
1849
14d2bac1
TH
1850 switch (rc) {
1851 case -EINVAL:
4ae72a1e 1852 /* eeek, something went very wrong, give up */
14d2bac1
TH
1853 tries[dev->devno] = 0;
1854 break;
4ae72a1e
TH
1855
1856 case -ENODEV:
1857 /* give it just one more chance */
1858 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 1859 case -EIO:
4ae72a1e
TH
1860 if (tries[dev->devno] == 1) {
1861 /* This is the last chance, better to slow
1862 * down than lose it.
1863 */
1864 sata_down_spd_limit(ap);
1865 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
1866 }
14d2bac1
TH
1867 }
1868
4ae72a1e 1869 if (!tries[dev->devno])
3373efd8 1870 ata_dev_disable(dev);
ec573755 1871
14d2bac1 1872 goto retry;
1da177e4
LT
1873}
1874
1875/**
0cba632b
JG
1876 * ata_port_probe - Mark port as enabled
1877 * @ap: Port for which we indicate enablement
1da177e4 1878 *
0cba632b
JG
1879 * Modify @ap data structure such that the system
1880 * thinks that the entire port is enabled.
1881 *
cca3974e 1882 * LOCKING: host lock, or some other form of
0cba632b 1883 * serialization.
1da177e4
LT
1884 */
1885
1886void ata_port_probe(struct ata_port *ap)
1887{
198e0fed 1888 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1889}
1890
3be680b7
TH
1891/**
1892 * sata_print_link_status - Print SATA link status
1893 * @ap: SATA port to printk link status about
1894 *
1895 * This function prints link speed and status of a SATA link.
1896 *
1897 * LOCKING:
1898 * None.
1899 */
1900static void sata_print_link_status(struct ata_port *ap)
1901{
6d5f9732 1902 u32 sstatus, scontrol, tmp;
3be680b7 1903
81952c54 1904 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1905 return;
81952c54 1906 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1907
81952c54 1908 if (ata_port_online(ap)) {
3be680b7 1909 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1910 ata_port_printk(ap, KERN_INFO,
1911 "SATA link up %s (SStatus %X SControl %X)\n",
1912 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1913 } else {
f15a1daf
TH
1914 ata_port_printk(ap, KERN_INFO,
1915 "SATA link down (SStatus %X SControl %X)\n",
1916 sstatus, scontrol);
3be680b7
TH
1917 }
1918}
1919
1da177e4 1920/**
780a87f7
JG
1921 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1922 * @ap: SATA port associated with target SATA PHY.
1da177e4 1923 *
780a87f7
JG
1924 * This function issues commands to standard SATA Sxxx
1925 * PHY registers, to wake up the phy (and device), and
1926 * clear any reset condition.
1da177e4
LT
1927 *
1928 * LOCKING:
0cba632b 1929 * PCI/etc. bus probe sem.
1da177e4
LT
1930 *
1931 */
1932void __sata_phy_reset(struct ata_port *ap)
1933{
1934 u32 sstatus;
1935 unsigned long timeout = jiffies + (HZ * 5);
1936
1937 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1938 /* issue phy wake/reset */
81952c54 1939 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1940 /* Couldn't find anything in SATA I/II specs, but
1941 * AHCI-1.1 10.4.2 says at least 1 ms. */
1942 mdelay(1);
1da177e4 1943 }
81952c54
TH
1944 /* phy wake/clear reset */
1945 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
1946
1947 /* wait for phy to become ready, if necessary */
1948 do {
1949 msleep(200);
81952c54 1950 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
1951 if ((sstatus & 0xf) != 1)
1952 break;
1953 } while (time_before(jiffies, timeout));
1954
3be680b7
TH
1955 /* print link status */
1956 sata_print_link_status(ap);
656563e3 1957
3be680b7 1958 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 1959 if (!ata_port_offline(ap))
1da177e4 1960 ata_port_probe(ap);
3be680b7 1961 else
1da177e4 1962 ata_port_disable(ap);
1da177e4 1963
198e0fed 1964 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1965 return;
1966
1967 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1968 ata_port_disable(ap);
1969 return;
1970 }
1971
1972 ap->cbl = ATA_CBL_SATA;
1973}
1974
1975/**
780a87f7
JG
1976 * sata_phy_reset - Reset SATA bus.
1977 * @ap: SATA port associated with target SATA PHY.
1da177e4 1978 *
780a87f7
JG
1979 * This function resets the SATA bus, and then probes
1980 * the bus for devices.
1da177e4
LT
1981 *
1982 * LOCKING:
0cba632b 1983 * PCI/etc. bus probe sem.
1da177e4
LT
1984 *
1985 */
1986void sata_phy_reset(struct ata_port *ap)
1987{
1988 __sata_phy_reset(ap);
198e0fed 1989 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1990 return;
1991 ata_bus_reset(ap);
1992}
1993
ebdfca6e
AC
1994/**
1995 * ata_dev_pair - return other device on cable
ebdfca6e
AC
1996 * @adev: device
1997 *
1998 * Obtain the other device on the same cable, or if none is
1999 * present NULL is returned
2000 */
2e9edbf8 2001
3373efd8 2002struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2003{
3373efd8 2004 struct ata_port *ap = adev->ap;
ebdfca6e 2005 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2006 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2007 return NULL;
2008 return pair;
2009}
2010
1da177e4 2011/**
780a87f7
JG
2012 * ata_port_disable - Disable port.
2013 * @ap: Port to be disabled.
1da177e4 2014 *
780a87f7
JG
2015 * Modify @ap data structure such that the system
2016 * thinks that the entire port is disabled, and should
2017 * never attempt to probe or communicate with devices
2018 * on this port.
2019 *
cca3974e 2020 * LOCKING: host lock, or some other form of
780a87f7 2021 * serialization.
1da177e4
LT
2022 */
2023
2024void ata_port_disable(struct ata_port *ap)
2025{
2026 ap->device[0].class = ATA_DEV_NONE;
2027 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2028 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2029}
2030
1c3fae4d 2031/**
3c567b7d 2032 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2033 * @ap: Port to adjust SATA spd limit for
2034 *
2035 * Adjust SATA spd limit of @ap downward. Note that this
2036 * function only adjusts the limit. The change must be applied
3c567b7d 2037 * using sata_set_spd().
1c3fae4d
TH
2038 *
2039 * LOCKING:
2040 * Inherited from caller.
2041 *
2042 * RETURNS:
2043 * 0 on success, negative errno on failure
2044 */
3c567b7d 2045int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2046{
81952c54
TH
2047 u32 sstatus, spd, mask;
2048 int rc, highbit;
1c3fae4d 2049
81952c54
TH
2050 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2051 if (rc)
2052 return rc;
1c3fae4d
TH
2053
2054 mask = ap->sata_spd_limit;
2055 if (mask <= 1)
2056 return -EINVAL;
2057 highbit = fls(mask) - 1;
2058 mask &= ~(1 << highbit);
2059
81952c54 2060 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2061 if (spd <= 1)
2062 return -EINVAL;
2063 spd--;
2064 mask &= (1 << spd) - 1;
2065 if (!mask)
2066 return -EINVAL;
2067
2068 ap->sata_spd_limit = mask;
2069
f15a1daf
TH
2070 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2071 sata_spd_string(fls(mask)));
1c3fae4d
TH
2072
2073 return 0;
2074}
2075
3c567b7d 2076static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2077{
2078 u32 spd, limit;
2079
2080 if (ap->sata_spd_limit == UINT_MAX)
2081 limit = 0;
2082 else
2083 limit = fls(ap->sata_spd_limit);
2084
2085 spd = (*scontrol >> 4) & 0xf;
2086 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2087
2088 return spd != limit;
2089}
2090
2091/**
3c567b7d 2092 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2093 * @ap: Port in question
2094 *
2095 * Test whether the spd limit in SControl matches
2096 * @ap->sata_spd_limit. This function is used to determine
2097 * whether hardreset is necessary to apply SATA spd
2098 * configuration.
2099 *
2100 * LOCKING:
2101 * Inherited from caller.
2102 *
2103 * RETURNS:
2104 * 1 if SATA spd configuration is needed, 0 otherwise.
2105 */
3c567b7d 2106int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2107{
2108 u32 scontrol;
2109
81952c54 2110 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2111 return 0;
2112
3c567b7d 2113 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2114}
2115
2116/**
3c567b7d 2117 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2118 * @ap: Port to set SATA spd for
2119 *
2120 * Set SATA spd of @ap according to sata_spd_limit.
2121 *
2122 * LOCKING:
2123 * Inherited from caller.
2124 *
2125 * RETURNS:
2126 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2127 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2128 */
3c567b7d 2129int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2130{
2131 u32 scontrol;
81952c54 2132 int rc;
1c3fae4d 2133
81952c54
TH
2134 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2135 return rc;
1c3fae4d 2136
3c567b7d 2137 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2138 return 0;
2139
81952c54
TH
2140 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2141 return rc;
2142
1c3fae4d
TH
2143 return 1;
2144}
2145
452503f9
AC
2146/*
2147 * This mode timing computation functionality is ported over from
2148 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2149 */
2150/*
b352e57d 2151 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2152 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2153 * for UDMA6, which is currently supported only by Maxtor drives.
2154 *
2155 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2156 */
2157
2158static const struct ata_timing ata_timing[] = {
2159
2160 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2161 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2162 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2163 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2164
b352e57d
AC
2165 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2166 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2167 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2168 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2169 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2170
2171/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2172
452503f9
AC
2173 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2174 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2175 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2176
452503f9
AC
2177 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2178 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2179 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2180
b352e57d
AC
2181 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2182 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2183 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2184 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2185
2186 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2187 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2188 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2189
2190/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2191
2192 { 0xFF }
2193};
2194
2195#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2196#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2197
2198static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2199{
2200 q->setup = EZ(t->setup * 1000, T);
2201 q->act8b = EZ(t->act8b * 1000, T);
2202 q->rec8b = EZ(t->rec8b * 1000, T);
2203 q->cyc8b = EZ(t->cyc8b * 1000, T);
2204 q->active = EZ(t->active * 1000, T);
2205 q->recover = EZ(t->recover * 1000, T);
2206 q->cycle = EZ(t->cycle * 1000, T);
2207 q->udma = EZ(t->udma * 1000, UT);
2208}
2209
2210void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2211 struct ata_timing *m, unsigned int what)
2212{
2213 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2214 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2215 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2216 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2217 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2218 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2219 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2220 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2221}
2222
2223static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2224{
2225 const struct ata_timing *t;
2226
2227 for (t = ata_timing; t->mode != speed; t++)
91190758 2228 if (t->mode == 0xFF)
452503f9 2229 return NULL;
2e9edbf8 2230 return t;
452503f9
AC
2231}
2232
2233int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2234 struct ata_timing *t, int T, int UT)
2235{
2236 const struct ata_timing *s;
2237 struct ata_timing p;
2238
2239 /*
2e9edbf8 2240 * Find the mode.
75b1f2f8 2241 */
452503f9
AC
2242
2243 if (!(s = ata_timing_find_mode(speed)))
2244 return -EINVAL;
2245
75b1f2f8
AL
2246 memcpy(t, s, sizeof(*s));
2247
452503f9
AC
2248 /*
2249 * If the drive is an EIDE drive, it can tell us it needs extended
2250 * PIO/MW_DMA cycle timing.
2251 */
2252
2253 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2254 memset(&p, 0, sizeof(p));
2255 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2256 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2257 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2258 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2259 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2260 }
2261 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2262 }
2263
2264 /*
2265 * Convert the timing to bus clock counts.
2266 */
2267
75b1f2f8 2268 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2269
2270 /*
c893a3ae
RD
2271 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2272 * S.M.A.R.T * and some other commands. We have to ensure that the
2273 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2274 */
2275
fd3367af 2276 if (speed > XFER_PIO_6) {
452503f9
AC
2277 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2278 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2279 }
2280
2281 /*
c893a3ae 2282 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2283 */
2284
2285 if (t->act8b + t->rec8b < t->cyc8b) {
2286 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2287 t->rec8b = t->cyc8b - t->act8b;
2288 }
2289
2290 if (t->active + t->recover < t->cycle) {
2291 t->active += (t->cycle - (t->active + t->recover)) / 2;
2292 t->recover = t->cycle - t->active;
2293 }
2294
2295 return 0;
2296}
2297
cf176e1a
TH
2298/**
2299 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2300 * @dev: Device to adjust xfer masks
458337db 2301 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2302 *
2303 * Adjust xfer masks of @dev downward. Note that this function
2304 * does not apply the change. Invoking ata_set_mode() afterwards
2305 * will apply the limit.
2306 *
2307 * LOCKING:
2308 * Inherited from caller.
2309 *
2310 * RETURNS:
2311 * 0 on success, negative errno on failure
2312 */
458337db 2313int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2314{
458337db
TH
2315 char buf[32];
2316 unsigned int orig_mask, xfer_mask;
2317 unsigned int pio_mask, mwdma_mask, udma_mask;
2318 int quiet, highbit;
cf176e1a 2319
458337db
TH
2320 quiet = !!(sel & ATA_DNXFER_QUIET);
2321 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2322
458337db
TH
2323 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2324 dev->mwdma_mask,
2325 dev->udma_mask);
2326 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2327
458337db
TH
2328 switch (sel) {
2329 case ATA_DNXFER_PIO:
2330 highbit = fls(pio_mask) - 1;
2331 pio_mask &= ~(1 << highbit);
2332 break;
2333
2334 case ATA_DNXFER_DMA:
2335 if (udma_mask) {
2336 highbit = fls(udma_mask) - 1;
2337 udma_mask &= ~(1 << highbit);
2338 if (!udma_mask)
2339 return -ENOENT;
2340 } else if (mwdma_mask) {
2341 highbit = fls(mwdma_mask) - 1;
2342 mwdma_mask &= ~(1 << highbit);
2343 if (!mwdma_mask)
2344 return -ENOENT;
2345 }
2346 break;
2347
2348 case ATA_DNXFER_40C:
2349 udma_mask &= ATA_UDMA_MASK_40C;
2350 break;
2351
2352 case ATA_DNXFER_FORCE_PIO0:
2353 pio_mask &= 1;
2354 case ATA_DNXFER_FORCE_PIO:
2355 mwdma_mask = 0;
2356 udma_mask = 0;
2357 break;
2358
458337db
TH
2359 default:
2360 BUG();
2361 }
2362
2363 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2364
2365 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2366 return -ENOENT;
2367
2368 if (!quiet) {
2369 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2370 snprintf(buf, sizeof(buf), "%s:%s",
2371 ata_mode_string(xfer_mask),
2372 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2373 else
2374 snprintf(buf, sizeof(buf), "%s",
2375 ata_mode_string(xfer_mask));
2376
2377 ata_dev_printk(dev, KERN_WARNING,
2378 "limiting speed to %s\n", buf);
2379 }
cf176e1a
TH
2380
2381 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2382 &dev->udma_mask);
2383
cf176e1a 2384 return 0;
cf176e1a
TH
2385}
2386
3373efd8 2387static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2388{
baa1e78a 2389 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2390 unsigned int err_mask;
2391 int rc;
1da177e4 2392
e8384607 2393 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2394 if (dev->xfer_shift == ATA_SHIFT_PIO)
2395 dev->flags |= ATA_DFLAG_PIO;
2396
3373efd8 2397 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2398 /* Old CFA may refuse this command, which is just fine */
2399 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2400 err_mask &= ~AC_ERR_DEV;
2401
83206a29 2402 if (err_mask) {
f15a1daf
TH
2403 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2404 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2405 return -EIO;
2406 }
1da177e4 2407
baa1e78a 2408 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2409 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2410 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2411 if (rc)
83206a29 2412 return rc;
48a8a14f 2413
23e71c3d
TH
2414 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2415 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2416
f15a1daf
TH
2417 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2418 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2419 return 0;
1da177e4
LT
2420}
2421
1da177e4
LT
2422/**
2423 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2424 * @ap: port on which timings will be programmed
e82cbdb9 2425 * @r_failed_dev: out paramter for failed device
1da177e4 2426 *
e82cbdb9
TH
2427 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2428 * ata_set_mode() fails, pointer to the failing device is
2429 * returned in @r_failed_dev.
780a87f7 2430 *
1da177e4 2431 * LOCKING:
0cba632b 2432 * PCI/etc. bus probe sem.
e82cbdb9
TH
2433 *
2434 * RETURNS:
2435 * 0 on success, negative errno otherwise
1da177e4 2436 */
1ad8e7f9 2437int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2438{
e8e0619f 2439 struct ata_device *dev;
e82cbdb9 2440 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2441
3adcebb2 2442 /* has private set_mode? */
b229a7b0
A
2443 if (ap->ops->set_mode)
2444 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2445
a6d5a51c
TH
2446 /* step 1: calculate xfer_mask */
2447 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2448 unsigned int pio_mask, dma_mask;
a6d5a51c 2449
e8e0619f
TH
2450 dev = &ap->device[i];
2451
e1211e3f 2452 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2453 continue;
2454
3373efd8 2455 ata_dev_xfermask(dev);
1da177e4 2456
acf356b1
TH
2457 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2458 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2459 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2460 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2461
4f65977d 2462 found = 1;
5444a6f4
AC
2463 if (dev->dma_mode)
2464 used_dma = 1;
a6d5a51c 2465 }
4f65977d 2466 if (!found)
e82cbdb9 2467 goto out;
a6d5a51c
TH
2468
2469 /* step 2: always set host PIO timings */
e8e0619f
TH
2470 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2471 dev = &ap->device[i];
2472 if (!ata_dev_enabled(dev))
2473 continue;
2474
2475 if (!dev->pio_mode) {
f15a1daf 2476 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2477 rc = -EINVAL;
e82cbdb9 2478 goto out;
e8e0619f
TH
2479 }
2480
2481 dev->xfer_mode = dev->pio_mode;
2482 dev->xfer_shift = ATA_SHIFT_PIO;
2483 if (ap->ops->set_piomode)
2484 ap->ops->set_piomode(ap, dev);
2485 }
1da177e4 2486
a6d5a51c 2487 /* step 3: set host DMA timings */
e8e0619f
TH
2488 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2489 dev = &ap->device[i];
2490
2491 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2492 continue;
2493
2494 dev->xfer_mode = dev->dma_mode;
2495 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2496 if (ap->ops->set_dmamode)
2497 ap->ops->set_dmamode(ap, dev);
2498 }
1da177e4
LT
2499
2500 /* step 4: update devices' xfer mode */
83206a29 2501 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2502 dev = &ap->device[i];
1da177e4 2503
18d90deb 2504 /* don't update suspended devices' xfer mode */
02670bf3 2505 if (!ata_dev_ready(dev))
83206a29
TH
2506 continue;
2507
3373efd8 2508 rc = ata_dev_set_mode(dev);
5bbc53f4 2509 if (rc)
e82cbdb9 2510 goto out;
83206a29 2511 }
1da177e4 2512
e8e0619f
TH
2513 /* Record simplex status. If we selected DMA then the other
2514 * host channels are not permitted to do so.
5444a6f4 2515 */
cca3974e
JG
2516 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2517 ap->host->simplex_claimed = 1;
5444a6f4 2518
e8e0619f 2519 /* step5: chip specific finalisation */
1da177e4
LT
2520 if (ap->ops->post_set_mode)
2521 ap->ops->post_set_mode(ap);
2522
e82cbdb9
TH
2523 out:
2524 if (rc)
2525 *r_failed_dev = dev;
2526 return rc;
1da177e4
LT
2527}
2528
1fdffbce
JG
2529/**
2530 * ata_tf_to_host - issue ATA taskfile to host controller
2531 * @ap: port to which command is being issued
2532 * @tf: ATA taskfile register set
2533 *
2534 * Issues ATA taskfile register set to ATA host controller,
2535 * with proper synchronization with interrupt handler and
2536 * other threads.
2537 *
2538 * LOCKING:
cca3974e 2539 * spin_lock_irqsave(host lock)
1fdffbce
JG
2540 */
2541
2542static inline void ata_tf_to_host(struct ata_port *ap,
2543 const struct ata_taskfile *tf)
2544{
2545 ap->ops->tf_load(ap, tf);
2546 ap->ops->exec_command(ap, tf);
2547}
2548
1da177e4
LT
2549/**
2550 * ata_busy_sleep - sleep until BSY clears, or timeout
2551 * @ap: port containing status register to be polled
2552 * @tmout_pat: impatience timeout
2553 * @tmout: overall timeout
2554 *
780a87f7
JG
2555 * Sleep until ATA Status register bit BSY clears,
2556 * or a timeout occurs.
2557 *
d1adc1bb
TH
2558 * LOCKING:
2559 * Kernel thread context (may sleep).
2560 *
2561 * RETURNS:
2562 * 0 on success, -errno otherwise.
1da177e4 2563 */
d1adc1bb
TH
2564int ata_busy_sleep(struct ata_port *ap,
2565 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2566{
2567 unsigned long timer_start, timeout;
2568 u8 status;
2569
2570 status = ata_busy_wait(ap, ATA_BUSY, 300);
2571 timer_start = jiffies;
2572 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2573 while (status != 0xff && (status & ATA_BUSY) &&
2574 time_before(jiffies, timeout)) {
1da177e4
LT
2575 msleep(50);
2576 status = ata_busy_wait(ap, ATA_BUSY, 3);
2577 }
2578
d1adc1bb 2579 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2580 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2581 "port is slow to respond, please be patient "
2582 "(Status 0x%x)\n", status);
1da177e4
LT
2583
2584 timeout = timer_start + tmout;
d1adc1bb
TH
2585 while (status != 0xff && (status & ATA_BUSY) &&
2586 time_before(jiffies, timeout)) {
1da177e4
LT
2587 msleep(50);
2588 status = ata_chk_status(ap);
2589 }
2590
d1adc1bb
TH
2591 if (status == 0xff)
2592 return -ENODEV;
2593
1da177e4 2594 if (status & ATA_BUSY) {
f15a1daf 2595 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2596 "(%lu secs, Status 0x%x)\n",
2597 tmout / HZ, status);
d1adc1bb 2598 return -EBUSY;
1da177e4
LT
2599 }
2600
2601 return 0;
2602}
2603
2604static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2605{
2606 struct ata_ioports *ioaddr = &ap->ioaddr;
2607 unsigned int dev0 = devmask & (1 << 0);
2608 unsigned int dev1 = devmask & (1 << 1);
2609 unsigned long timeout;
2610
2611 /* if device 0 was found in ata_devchk, wait for its
2612 * BSY bit to clear
2613 */
2614 if (dev0)
2615 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2616
2617 /* if device 1 was found in ata_devchk, wait for
2618 * register access, then wait for BSY to clear
2619 */
2620 timeout = jiffies + ATA_TMOUT_BOOT;
2621 while (dev1) {
2622 u8 nsect, lbal;
2623
2624 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2625 nsect = ioread8(ioaddr->nsect_addr);
2626 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2627 if ((nsect == 1) && (lbal == 1))
2628 break;
2629 if (time_after(jiffies, timeout)) {
2630 dev1 = 0;
2631 break;
2632 }
2633 msleep(50); /* give drive a breather */
2634 }
2635 if (dev1)
2636 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2637
2638 /* is all this really necessary? */
2639 ap->ops->dev_select(ap, 0);
2640 if (dev1)
2641 ap->ops->dev_select(ap, 1);
2642 if (dev0)
2643 ap->ops->dev_select(ap, 0);
2644}
2645
1da177e4
LT
2646static unsigned int ata_bus_softreset(struct ata_port *ap,
2647 unsigned int devmask)
2648{
2649 struct ata_ioports *ioaddr = &ap->ioaddr;
2650
44877b4e 2651 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
2652
2653 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2654 iowrite8(ap->ctl, ioaddr->ctl_addr);
2655 udelay(20); /* FIXME: flush */
2656 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2657 udelay(20); /* FIXME: flush */
2658 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2659
2660 /* spec mandates ">= 2ms" before checking status.
2661 * We wait 150ms, because that was the magic delay used for
2662 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2663 * between when the ATA command register is written, and then
2664 * status is checked. Because waiting for "a while" before
2665 * checking status is fine, post SRST, we perform this magic
2666 * delay here as well.
09c7ad79
AC
2667 *
2668 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2669 */
2670 msleep(150);
2671
2e9edbf8 2672 /* Before we perform post reset processing we want to see if
298a41ca
TH
2673 * the bus shows 0xFF because the odd clown forgets the D7
2674 * pulldown resistor.
2675 */
d1adc1bb
TH
2676 if (ata_check_status(ap) == 0xFF)
2677 return 0;
09c7ad79 2678
1da177e4
LT
2679 ata_bus_post_reset(ap, devmask);
2680
2681 return 0;
2682}
2683
2684/**
2685 * ata_bus_reset - reset host port and associated ATA channel
2686 * @ap: port to reset
2687 *
2688 * This is typically the first time we actually start issuing
2689 * commands to the ATA channel. We wait for BSY to clear, then
2690 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2691 * result. Determine what devices, if any, are on the channel
2692 * by looking at the device 0/1 error register. Look at the signature
2693 * stored in each device's taskfile registers, to determine if
2694 * the device is ATA or ATAPI.
2695 *
2696 * LOCKING:
0cba632b 2697 * PCI/etc. bus probe sem.
cca3974e 2698 * Obtains host lock.
1da177e4
LT
2699 *
2700 * SIDE EFFECTS:
198e0fed 2701 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2702 */
2703
2704void ata_bus_reset(struct ata_port *ap)
2705{
2706 struct ata_ioports *ioaddr = &ap->ioaddr;
2707 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2708 u8 err;
aec5c3c1 2709 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4 2710
44877b4e 2711 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
2712
2713 /* determine if device 0/1 are present */
2714 if (ap->flags & ATA_FLAG_SATA_RESET)
2715 dev0 = 1;
2716 else {
2717 dev0 = ata_devchk(ap, 0);
2718 if (slave_possible)
2719 dev1 = ata_devchk(ap, 1);
2720 }
2721
2722 if (dev0)
2723 devmask |= (1 << 0);
2724 if (dev1)
2725 devmask |= (1 << 1);
2726
2727 /* select device 0 again */
2728 ap->ops->dev_select(ap, 0);
2729
2730 /* issue bus reset */
2731 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2732 if (ata_bus_softreset(ap, devmask))
2733 goto err_out;
1da177e4
LT
2734
2735 /*
2736 * determine by signature whether we have ATA or ATAPI devices
2737 */
b4dc7623 2738 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2739 if ((slave_possible) && (err != 0x81))
b4dc7623 2740 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2741
2742 /* re-enable interrupts */
83625006 2743 ap->ops->irq_on(ap);
1da177e4
LT
2744
2745 /* is double-select really necessary? */
2746 if (ap->device[1].class != ATA_DEV_NONE)
2747 ap->ops->dev_select(ap, 1);
2748 if (ap->device[0].class != ATA_DEV_NONE)
2749 ap->ops->dev_select(ap, 0);
2750
2751 /* if no devices were detected, disable this port */
2752 if ((ap->device[0].class == ATA_DEV_NONE) &&
2753 (ap->device[1].class == ATA_DEV_NONE))
2754 goto err_out;
2755
2756 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2757 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2758 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2759 }
2760
2761 DPRINTK("EXIT\n");
2762 return;
2763
2764err_out:
f15a1daf 2765 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2766 ap->ops->port_disable(ap);
2767
2768 DPRINTK("EXIT\n");
2769}
2770
d7bb4cc7
TH
2771/**
2772 * sata_phy_debounce - debounce SATA phy status
2773 * @ap: ATA port to debounce SATA phy status for
2774 * @params: timing parameters { interval, duratinon, timeout } in msec
2775 *
2776 * Make sure SStatus of @ap reaches stable state, determined by
2777 * holding the same value where DET is not 1 for @duration polled
2778 * every @interval, before @timeout. Timeout constraints the
2779 * beginning of the stable state. Because, after hot unplugging,
2780 * DET gets stuck at 1 on some controllers, this functions waits
2781 * until timeout then returns 0 if DET is stable at 1.
2782 *
2783 * LOCKING:
2784 * Kernel thread context (may sleep)
2785 *
2786 * RETURNS:
2787 * 0 on success, -errno on failure.
2788 */
2789int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2790{
d7bb4cc7
TH
2791 unsigned long interval_msec = params[0];
2792 unsigned long duration = params[1] * HZ / 1000;
2793 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2794 unsigned long last_jiffies;
2795 u32 last, cur;
2796 int rc;
2797
2798 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2799 return rc;
2800 cur &= 0xf;
2801
2802 last = cur;
2803 last_jiffies = jiffies;
2804
2805 while (1) {
2806 msleep(interval_msec);
2807 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2808 return rc;
2809 cur &= 0xf;
2810
2811 /* DET stable? */
2812 if (cur == last) {
2813 if (cur == 1 && time_before(jiffies, timeout))
2814 continue;
2815 if (time_after(jiffies, last_jiffies + duration))
2816 return 0;
2817 continue;
2818 }
2819
2820 /* unstable, start over */
2821 last = cur;
2822 last_jiffies = jiffies;
2823
2824 /* check timeout */
2825 if (time_after(jiffies, timeout))
2826 return -EBUSY;
2827 }
2828}
2829
2830/**
2831 * sata_phy_resume - resume SATA phy
2832 * @ap: ATA port to resume SATA phy for
2833 * @params: timing parameters { interval, duratinon, timeout } in msec
2834 *
2835 * Resume SATA phy of @ap and debounce it.
2836 *
2837 * LOCKING:
2838 * Kernel thread context (may sleep)
2839 *
2840 * RETURNS:
2841 * 0 on success, -errno on failure.
2842 */
2843int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2844{
2845 u32 scontrol;
81952c54
TH
2846 int rc;
2847
2848 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2849 return rc;
7a7921e8 2850
852ee16a 2851 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2852
2853 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2854 return rc;
7a7921e8 2855
d7bb4cc7
TH
2856 /* Some PHYs react badly if SStatus is pounded immediately
2857 * after resuming. Delay 200ms before debouncing.
2858 */
2859 msleep(200);
7a7921e8 2860
d7bb4cc7 2861 return sata_phy_debounce(ap, params);
7a7921e8
TH
2862}
2863
f5914a46
TH
2864static void ata_wait_spinup(struct ata_port *ap)
2865{
2866 struct ata_eh_context *ehc = &ap->eh_context;
2867 unsigned long end, secs;
2868 int rc;
2869
2870 /* first, debounce phy if SATA */
2871 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2872 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2873
2874 /* if debounced successfully and offline, no need to wait */
2875 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2876 return;
2877 }
2878
2879 /* okay, let's give the drive time to spin up */
2880 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2881 secs = ((end - jiffies) + HZ - 1) / HZ;
2882
2883 if (time_after(jiffies, end))
2884 return;
2885
2886 if (secs > 5)
2887 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2888 "(%lu secs)\n", secs);
2889
2890 schedule_timeout_uninterruptible(end - jiffies);
2891}
2892
2893/**
2894 * ata_std_prereset - prepare for reset
2895 * @ap: ATA port to be reset
2896 *
2897 * @ap is about to be reset. Initialize it.
2898 *
2899 * LOCKING:
2900 * Kernel thread context (may sleep)
2901 *
2902 * RETURNS:
2903 * 0 on success, -errno otherwise.
2904 */
2905int ata_std_prereset(struct ata_port *ap)
2906{
2907 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2908 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2909 int rc;
2910
28324304
TH
2911 /* handle link resume & hotplug spinup */
2912 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2913 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2914 ehc->i.action |= ATA_EH_HARDRESET;
2915
2916 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2917 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2918 ata_wait_spinup(ap);
f5914a46
TH
2919
2920 /* if we're about to do hardreset, nothing more to do */
2921 if (ehc->i.action & ATA_EH_HARDRESET)
2922 return 0;
2923
2924 /* if SATA, resume phy */
2925 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2926 rc = sata_phy_resume(ap, timing);
2927 if (rc && rc != -EOPNOTSUPP) {
2928 /* phy resume failed */
2929 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2930 "link for reset (errno=%d)\n", rc);
2931 return rc;
2932 }
2933 }
2934
2935 /* Wait for !BSY if the controller can wait for the first D2H
2936 * Reg FIS and we don't know that no device is attached.
2937 */
2938 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2939 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2940
2941 return 0;
2942}
2943
c2bd5804
TH
2944/**
2945 * ata_std_softreset - reset host port via ATA SRST
2946 * @ap: port to reset
c2bd5804
TH
2947 * @classes: resulting classes of attached devices
2948 *
52783c5d 2949 * Reset host port using ATA SRST.
c2bd5804
TH
2950 *
2951 * LOCKING:
2952 * Kernel thread context (may sleep)
2953 *
2954 * RETURNS:
2955 * 0 on success, -errno otherwise.
2956 */
2bf2cb26 2957int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
2958{
2959 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2960 unsigned int devmask = 0, err_mask;
2961 u8 err;
2962
2963 DPRINTK("ENTER\n");
2964
81952c54 2965 if (ata_port_offline(ap)) {
3a39746a
TH
2966 classes[0] = ATA_DEV_NONE;
2967 goto out;
2968 }
2969
c2bd5804
TH
2970 /* determine if device 0/1 are present */
2971 if (ata_devchk(ap, 0))
2972 devmask |= (1 << 0);
2973 if (slave_possible && ata_devchk(ap, 1))
2974 devmask |= (1 << 1);
2975
c2bd5804
TH
2976 /* select device 0 again */
2977 ap->ops->dev_select(ap, 0);
2978
2979 /* issue bus reset */
2980 DPRINTK("about to softreset, devmask=%x\n", devmask);
2981 err_mask = ata_bus_softreset(ap, devmask);
2982 if (err_mask) {
f15a1daf
TH
2983 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2984 err_mask);
c2bd5804
TH
2985 return -EIO;
2986 }
2987
2988 /* determine by signature whether we have ATA or ATAPI devices */
2989 classes[0] = ata_dev_try_classify(ap, 0, &err);
2990 if (slave_possible && err != 0x81)
2991 classes[1] = ata_dev_try_classify(ap, 1, &err);
2992
3a39746a 2993 out:
c2bd5804
TH
2994 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2995 return 0;
2996}
2997
2998/**
b6103f6d 2999 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3000 * @ap: port to reset
b6103f6d 3001 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
3002 *
3003 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3004 *
3005 * LOCKING:
3006 * Kernel thread context (may sleep)
3007 *
3008 * RETURNS:
3009 * 0 on success, -errno otherwise.
3010 */
b6103f6d 3011int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 3012{
852ee16a 3013 u32 scontrol;
81952c54 3014 int rc;
852ee16a 3015
c2bd5804
TH
3016 DPRINTK("ENTER\n");
3017
3c567b7d 3018 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3019 /* SATA spec says nothing about how to reconfigure
3020 * spd. To be on the safe side, turn off phy during
3021 * reconfiguration. This works for at least ICH7 AHCI
3022 * and Sil3124.
3023 */
81952c54 3024 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3025 goto out;
81952c54 3026
a34b6fc0 3027 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3028
3029 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3030 goto out;
1c3fae4d 3031
3c567b7d 3032 sata_set_spd(ap);
1c3fae4d
TH
3033 }
3034
3035 /* issue phy wake/reset */
81952c54 3036 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3037 goto out;
81952c54 3038
852ee16a 3039 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3040
3041 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3042 goto out;
c2bd5804 3043
1c3fae4d 3044 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3045 * 10.4.2 says at least 1 ms.
3046 */
3047 msleep(1);
3048
1c3fae4d 3049 /* bring phy back */
b6103f6d
TH
3050 rc = sata_phy_resume(ap, timing);
3051 out:
3052 DPRINTK("EXIT, rc=%d\n", rc);
3053 return rc;
3054}
3055
3056/**
3057 * sata_std_hardreset - reset host port via SATA phy reset
3058 * @ap: port to reset
3059 * @class: resulting class of attached device
3060 *
3061 * SATA phy-reset host port using DET bits of SControl register,
3062 * wait for !BSY and classify the attached device.
3063 *
3064 * LOCKING:
3065 * Kernel thread context (may sleep)
3066 *
3067 * RETURNS:
3068 * 0 on success, -errno otherwise.
3069 */
3070int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3071{
3072 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3073 int rc;
3074
3075 DPRINTK("ENTER\n");
3076
3077 /* do hardreset */
3078 rc = sata_port_hardreset(ap, timing);
3079 if (rc) {
3080 ata_port_printk(ap, KERN_ERR,
3081 "COMRESET failed (errno=%d)\n", rc);
3082 return rc;
3083 }
c2bd5804 3084
c2bd5804 3085 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3086 if (ata_port_offline(ap)) {
c2bd5804
TH
3087 *class = ATA_DEV_NONE;
3088 DPRINTK("EXIT, link offline\n");
3089 return 0;
3090 }
3091
34fee227
TH
3092 /* wait a while before checking status, see SRST for more info */
3093 msleep(150);
3094
c2bd5804 3095 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3096 ata_port_printk(ap, KERN_ERR,
3097 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3098 return -EIO;
3099 }
3100
3a39746a
TH
3101 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3102
c2bd5804
TH
3103 *class = ata_dev_try_classify(ap, 0, NULL);
3104
3105 DPRINTK("EXIT, class=%u\n", *class);
3106 return 0;
3107}
3108
3109/**
3110 * ata_std_postreset - standard postreset callback
3111 * @ap: the target ata_port
3112 * @classes: classes of attached devices
3113 *
3114 * This function is invoked after a successful reset. Note that
3115 * the device might have been reset more than once using
3116 * different reset methods before postreset is invoked.
c2bd5804 3117 *
c2bd5804
TH
3118 * LOCKING:
3119 * Kernel thread context (may sleep)
3120 */
3121void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3122{
dc2b3515
TH
3123 u32 serror;
3124
c2bd5804
TH
3125 DPRINTK("ENTER\n");
3126
c2bd5804 3127 /* print link status */
81952c54 3128 sata_print_link_status(ap);
c2bd5804 3129
dc2b3515
TH
3130 /* clear SError */
3131 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3132 sata_scr_write(ap, SCR_ERROR, serror);
3133
3a39746a 3134 /* re-enable interrupts */
83625006
AI
3135 if (!ap->ops->error_handler)
3136 ap->ops->irq_on(ap);
c2bd5804
TH
3137
3138 /* is double-select really necessary? */
3139 if (classes[0] != ATA_DEV_NONE)
3140 ap->ops->dev_select(ap, 1);
3141 if (classes[1] != ATA_DEV_NONE)
3142 ap->ops->dev_select(ap, 0);
3143
3a39746a
TH
3144 /* bail out if no device is present */
3145 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3146 DPRINTK("EXIT, no device\n");
3147 return;
3148 }
3149
3150 /* set up device control */
0d5ff566
TH
3151 if (ap->ioaddr.ctl_addr)
3152 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3153
3154 DPRINTK("EXIT\n");
3155}
3156
623a3128
TH
3157/**
3158 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3159 * @dev: device to compare against
3160 * @new_class: class of the new device
3161 * @new_id: IDENTIFY page of the new device
3162 *
3163 * Compare @new_class and @new_id against @dev and determine
3164 * whether @dev is the device indicated by @new_class and
3165 * @new_id.
3166 *
3167 * LOCKING:
3168 * None.
3169 *
3170 * RETURNS:
3171 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3172 */
3373efd8
TH
3173static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3174 const u16 *new_id)
623a3128
TH
3175{
3176 const u16 *old_id = dev->id;
a0cf733b
TH
3177 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3178 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3179 u64 new_n_sectors;
3180
3181 if (dev->class != new_class) {
f15a1daf
TH
3182 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3183 dev->class, new_class);
623a3128
TH
3184 return 0;
3185 }
3186
a0cf733b
TH
3187 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3188 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3189 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3190 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3191 new_n_sectors = ata_id_n_sectors(new_id);
3192
3193 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3194 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3195 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3196 return 0;
3197 }
3198
3199 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3200 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3201 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3202 return 0;
3203 }
3204
3205 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3206 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3207 "%llu != %llu\n",
3208 (unsigned long long)dev->n_sectors,
3209 (unsigned long long)new_n_sectors);
623a3128
TH
3210 return 0;
3211 }
3212
3213 return 1;
3214}
3215
3216/**
3217 * ata_dev_revalidate - Revalidate ATA device
623a3128 3218 * @dev: device to revalidate
bff04647 3219 * @readid_flags: read ID flags
623a3128
TH
3220 *
3221 * Re-read IDENTIFY page and make sure @dev is still attached to
3222 * the port.
3223 *
3224 * LOCKING:
3225 * Kernel thread context (may sleep)
3226 *
3227 * RETURNS:
3228 * 0 on success, negative errno otherwise
3229 */
bff04647 3230int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3231{
5eb45c02 3232 unsigned int class = dev->class;
f15a1daf 3233 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3234 int rc;
3235
5eb45c02
TH
3236 if (!ata_dev_enabled(dev)) {
3237 rc = -ENODEV;
3238 goto fail;
3239 }
623a3128 3240
fe635c7e 3241 /* read ID data */
bff04647 3242 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3243 if (rc)
3244 goto fail;
3245
3246 /* is the device still there? */
3373efd8 3247 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3248 rc = -ENODEV;
3249 goto fail;
3250 }
3251
fe635c7e 3252 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3253
3254 /* configure device according to the new ID */
efdaedc4 3255 rc = ata_dev_configure(dev);
5eb45c02
TH
3256 if (rc == 0)
3257 return 0;
623a3128
TH
3258
3259 fail:
f15a1daf 3260 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3261 return rc;
3262}
3263
6919a0a6
AC
3264struct ata_blacklist_entry {
3265 const char *model_num;
3266 const char *model_rev;
3267 unsigned long horkage;
3268};
3269
3270static const struct ata_blacklist_entry ata_device_blacklist [] = {
3271 /* Devices with DMA related problems under Linux */
3272 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3273 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3274 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3275 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3276 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3277 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3278 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3279 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3280 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3281 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3282 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3283 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3284 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3285 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3286 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3287 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3288 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3289 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3290 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3291 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3292 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3293 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3294 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3295 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3296 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3297 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3298 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3299 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3300 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3301
3302 /* Devices we expect to fail diagnostics */
3303
3304 /* Devices where NCQ should be avoided */
3305 /* NCQ is slow */
3306 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3307
3308 /* Devices with NCQ limits */
3309
3310 /* End Marker */
3311 { }
1da177e4 3312};
2e9edbf8 3313
6919a0a6 3314unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3315{
8bfa79fc
TH
3316 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3317 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3318 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3319
8bfa79fc
TH
3320 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3321 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3322
6919a0a6 3323 while (ad->model_num) {
8bfa79fc 3324 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3325 if (ad->model_rev == NULL)
3326 return ad->horkage;
8bfa79fc 3327 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3328 return ad->horkage;
f4b15fef 3329 }
6919a0a6 3330 ad++;
f4b15fef 3331 }
1da177e4
LT
3332 return 0;
3333}
3334
6919a0a6
AC
3335static int ata_dma_blacklisted(const struct ata_device *dev)
3336{
3337 /* We don't support polling DMA.
3338 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3339 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3340 */
3341 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3342 (dev->flags & ATA_DFLAG_CDB_INTR))
3343 return 1;
3344 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3345}
3346
a6d5a51c
TH
3347/**
3348 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3349 * @dev: Device to compute xfermask for
3350 *
acf356b1
TH
3351 * Compute supported xfermask of @dev and store it in
3352 * dev->*_mask. This function is responsible for applying all
3353 * known limits including host controller limits, device
3354 * blacklist, etc...
a6d5a51c
TH
3355 *
3356 * LOCKING:
3357 * None.
a6d5a51c 3358 */
3373efd8 3359static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3360{
3373efd8 3361 struct ata_port *ap = dev->ap;
cca3974e 3362 struct ata_host *host = ap->host;
a6d5a51c 3363 unsigned long xfer_mask;
1da177e4 3364
37deecb5 3365 /* controller modes available */
565083e1
TH
3366 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3367 ap->mwdma_mask, ap->udma_mask);
3368
3369 /* Apply cable rule here. Don't apply it early because when
3370 * we handle hot plug the cable type can itself change.
3371 */
3372 if (ap->cbl == ATA_CBL_PATA40)
3373 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3374 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3375 * host side are checked drive side as well. Cases where we know a
3376 * 40wire cable is used safely for 80 are not checked here.
3377 */
3378 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3379 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3380
1da177e4 3381
37deecb5
TH
3382 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3383 dev->mwdma_mask, dev->udma_mask);
3384 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3385
b352e57d
AC
3386 /*
3387 * CFA Advanced TrueIDE timings are not allowed on a shared
3388 * cable
3389 */
3390 if (ata_dev_pair(dev)) {
3391 /* No PIO5 or PIO6 */
3392 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3393 /* No MWDMA3 or MWDMA 4 */
3394 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3395 }
3396
37deecb5
TH
3397 if (ata_dma_blacklisted(dev)) {
3398 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3399 ata_dev_printk(dev, KERN_WARNING,
3400 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3401 }
a6d5a51c 3402
cca3974e 3403 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
37deecb5
TH
3404 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3405 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3406 "other device, disabling DMA\n");
5444a6f4 3407 }
565083e1 3408
5444a6f4
AC
3409 if (ap->ops->mode_filter)
3410 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3411
565083e1
TH
3412 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3413 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3414}
3415
1da177e4
LT
3416/**
3417 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3418 * @dev: Device to which command will be sent
3419 *
780a87f7
JG
3420 * Issue SET FEATURES - XFER MODE command to device @dev
3421 * on port @ap.
3422 *
1da177e4 3423 * LOCKING:
0cba632b 3424 * PCI/etc. bus probe sem.
83206a29
TH
3425 *
3426 * RETURNS:
3427 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3428 */
3429
3373efd8 3430static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3431{
a0123703 3432 struct ata_taskfile tf;
83206a29 3433 unsigned int err_mask;
1da177e4
LT
3434
3435 /* set up set-features taskfile */
3436 DPRINTK("set features - xfer mode\n");
3437
3373efd8 3438 ata_tf_init(dev, &tf);
a0123703
TH
3439 tf.command = ATA_CMD_SET_FEATURES;
3440 tf.feature = SETFEATURES_XFER;
3441 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3442 tf.protocol = ATA_PROT_NODATA;
3443 tf.nsect = dev->xfer_mode;
1da177e4 3444
3373efd8 3445 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3446
83206a29
TH
3447 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3448 return err_mask;
1da177e4
LT
3449}
3450
8bf62ece
AL
3451/**
3452 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3453 * @dev: Device to which command will be sent
e2a7f77a
RD
3454 * @heads: Number of heads (taskfile parameter)
3455 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3456 *
3457 * LOCKING:
6aff8f1f
TH
3458 * Kernel thread context (may sleep)
3459 *
3460 * RETURNS:
3461 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3462 */
3373efd8
TH
3463static unsigned int ata_dev_init_params(struct ata_device *dev,
3464 u16 heads, u16 sectors)
8bf62ece 3465{
a0123703 3466 struct ata_taskfile tf;
6aff8f1f 3467 unsigned int err_mask;
8bf62ece
AL
3468
3469 /* Number of sectors per track 1-255. Number of heads 1-16 */
3470 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3471 return AC_ERR_INVALID;
8bf62ece
AL
3472
3473 /* set up init dev params taskfile */
3474 DPRINTK("init dev params \n");
3475
3373efd8 3476 ata_tf_init(dev, &tf);
a0123703
TH
3477 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3478 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3479 tf.protocol = ATA_PROT_NODATA;
3480 tf.nsect = sectors;
3481 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3482
3373efd8 3483 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3484
6aff8f1f
TH
3485 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3486 return err_mask;
8bf62ece
AL
3487}
3488
1da177e4 3489/**
0cba632b
JG
3490 * ata_sg_clean - Unmap DMA memory associated with command
3491 * @qc: Command containing DMA memory to be released
3492 *
3493 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3494 *
3495 * LOCKING:
cca3974e 3496 * spin_lock_irqsave(host lock)
1da177e4 3497 */
70e6ad0c 3498void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3499{
3500 struct ata_port *ap = qc->ap;
cedc9a47 3501 struct scatterlist *sg = qc->__sg;
1da177e4 3502 int dir = qc->dma_dir;
cedc9a47 3503 void *pad_buf = NULL;
1da177e4 3504
a4631474
TH
3505 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3506 WARN_ON(sg == NULL);
1da177e4
LT
3507
3508 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3509 WARN_ON(qc->n_elem > 1);
1da177e4 3510
2c13b7ce 3511 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3512
cedc9a47
JG
3513 /* if we padded the buffer out to 32-bit bound, and data
3514 * xfer direction is from-device, we must copy from the
3515 * pad buffer back into the supplied buffer
3516 */
3517 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3518 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3519
3520 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3521 if (qc->n_elem)
2f1f610b 3522 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3523 /* restore last sg */
3524 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3525 if (pad_buf) {
3526 struct scatterlist *psg = &qc->pad_sgent;
3527 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3528 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3529 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3530 }
3531 } else {
2e242fa9 3532 if (qc->n_elem)
2f1f610b 3533 dma_unmap_single(ap->dev,
e1410f2d
JG
3534 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3535 dir);
cedc9a47
JG
3536 /* restore sg */
3537 sg->length += qc->pad_len;
3538 if (pad_buf)
3539 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3540 pad_buf, qc->pad_len);
3541 }
1da177e4
LT
3542
3543 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3544 qc->__sg = NULL;
1da177e4
LT
3545}
3546
3547/**
3548 * ata_fill_sg - Fill PCI IDE PRD table
3549 * @qc: Metadata associated with taskfile to be transferred
3550 *
780a87f7
JG
3551 * Fill PCI IDE PRD (scatter-gather) table with segments
3552 * associated with the current disk command.
3553 *
1da177e4 3554 * LOCKING:
cca3974e 3555 * spin_lock_irqsave(host lock)
1da177e4
LT
3556 *
3557 */
3558static void ata_fill_sg(struct ata_queued_cmd *qc)
3559{
1da177e4 3560 struct ata_port *ap = qc->ap;
cedc9a47
JG
3561 struct scatterlist *sg;
3562 unsigned int idx;
1da177e4 3563
a4631474 3564 WARN_ON(qc->__sg == NULL);
f131883e 3565 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3566
3567 idx = 0;
cedc9a47 3568 ata_for_each_sg(sg, qc) {
1da177e4
LT
3569 u32 addr, offset;
3570 u32 sg_len, len;
3571
3572 /* determine if physical DMA addr spans 64K boundary.
3573 * Note h/w doesn't support 64-bit, so we unconditionally
3574 * truncate dma_addr_t to u32.
3575 */
3576 addr = (u32) sg_dma_address(sg);
3577 sg_len = sg_dma_len(sg);
3578
3579 while (sg_len) {
3580 offset = addr & 0xffff;
3581 len = sg_len;
3582 if ((offset + sg_len) > 0x10000)
3583 len = 0x10000 - offset;
3584
3585 ap->prd[idx].addr = cpu_to_le32(addr);
3586 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3587 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3588
3589 idx++;
3590 sg_len -= len;
3591 addr += len;
3592 }
3593 }
3594
3595 if (idx)
3596 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3597}
3598/**
3599 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3600 * @qc: Metadata associated with taskfile to check
3601 *
780a87f7
JG
3602 * Allow low-level driver to filter ATA PACKET commands, returning
3603 * a status indicating whether or not it is OK to use DMA for the
3604 * supplied PACKET command.
3605 *
1da177e4 3606 * LOCKING:
cca3974e 3607 * spin_lock_irqsave(host lock)
0cba632b 3608 *
1da177e4
LT
3609 * RETURNS: 0 when ATAPI DMA can be used
3610 * nonzero otherwise
3611 */
3612int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3613{
3614 struct ata_port *ap = qc->ap;
3615 int rc = 0; /* Assume ATAPI DMA is OK by default */
3616
3617 if (ap->ops->check_atapi_dma)
3618 rc = ap->ops->check_atapi_dma(qc);
3619
3620 return rc;
3621}
3622/**
3623 * ata_qc_prep - Prepare taskfile for submission
3624 * @qc: Metadata associated with taskfile to be prepared
3625 *
780a87f7
JG
3626 * Prepare ATA taskfile for submission.
3627 *
1da177e4 3628 * LOCKING:
cca3974e 3629 * spin_lock_irqsave(host lock)
1da177e4
LT
3630 */
3631void ata_qc_prep(struct ata_queued_cmd *qc)
3632{
3633 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3634 return;
3635
3636 ata_fill_sg(qc);
3637}
3638
e46834cd
BK
3639void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3640
0cba632b
JG
3641/**
3642 * ata_sg_init_one - Associate command with memory buffer
3643 * @qc: Command to be associated
3644 * @buf: Memory buffer
3645 * @buflen: Length of memory buffer, in bytes.
3646 *
3647 * Initialize the data-related elements of queued_cmd @qc
3648 * to point to a single memory buffer, @buf of byte length @buflen.
3649 *
3650 * LOCKING:
cca3974e 3651 * spin_lock_irqsave(host lock)
0cba632b
JG
3652 */
3653
1da177e4
LT
3654void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3655{
1da177e4
LT
3656 qc->flags |= ATA_QCFLAG_SINGLE;
3657
cedc9a47 3658 qc->__sg = &qc->sgent;
1da177e4 3659 qc->n_elem = 1;
cedc9a47 3660 qc->orig_n_elem = 1;
1da177e4 3661 qc->buf_virt = buf;
233277ca 3662 qc->nbytes = buflen;
1da177e4 3663
61c0596c 3664 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3665}
3666
0cba632b
JG
3667/**
3668 * ata_sg_init - Associate command with scatter-gather table.
3669 * @qc: Command to be associated
3670 * @sg: Scatter-gather table.
3671 * @n_elem: Number of elements in s/g table.
3672 *
3673 * Initialize the data-related elements of queued_cmd @qc
3674 * to point to a scatter-gather table @sg, containing @n_elem
3675 * elements.
3676 *
3677 * LOCKING:
cca3974e 3678 * spin_lock_irqsave(host lock)
0cba632b
JG
3679 */
3680
1da177e4
LT
3681void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3682 unsigned int n_elem)
3683{
3684 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3685 qc->__sg = sg;
1da177e4 3686 qc->n_elem = n_elem;
cedc9a47 3687 qc->orig_n_elem = n_elem;
1da177e4
LT
3688}
3689
3690/**
0cba632b
JG
3691 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3692 * @qc: Command with memory buffer to be mapped.
3693 *
3694 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3695 *
3696 * LOCKING:
cca3974e 3697 * spin_lock_irqsave(host lock)
1da177e4
LT
3698 *
3699 * RETURNS:
0cba632b 3700 * Zero on success, negative on error.
1da177e4
LT
3701 */
3702
3703static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3704{
3705 struct ata_port *ap = qc->ap;
3706 int dir = qc->dma_dir;
cedc9a47 3707 struct scatterlist *sg = qc->__sg;
1da177e4 3708 dma_addr_t dma_address;
2e242fa9 3709 int trim_sg = 0;
1da177e4 3710
cedc9a47
JG
3711 /* we must lengthen transfers to end on a 32-bit boundary */
3712 qc->pad_len = sg->length & 3;
3713 if (qc->pad_len) {
3714 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3715 struct scatterlist *psg = &qc->pad_sgent;
3716
a4631474 3717 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3718
3719 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3720
3721 if (qc->tf.flags & ATA_TFLAG_WRITE)
3722 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3723 qc->pad_len);
3724
3725 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3726 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3727 /* trim sg */
3728 sg->length -= qc->pad_len;
2e242fa9
TH
3729 if (sg->length == 0)
3730 trim_sg = 1;
cedc9a47
JG
3731
3732 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3733 sg->length, qc->pad_len);
3734 }
3735
2e242fa9
TH
3736 if (trim_sg) {
3737 qc->n_elem--;
e1410f2d
JG
3738 goto skip_map;
3739 }
3740
2f1f610b 3741 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3742 sg->length, dir);
537a95d9
TH
3743 if (dma_mapping_error(dma_address)) {
3744 /* restore sg */
3745 sg->length += qc->pad_len;
1da177e4 3746 return -1;
537a95d9 3747 }
1da177e4
LT
3748
3749 sg_dma_address(sg) = dma_address;
32529e01 3750 sg_dma_len(sg) = sg->length;
1da177e4 3751
2e242fa9 3752skip_map:
1da177e4
LT
3753 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3754 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3755
3756 return 0;
3757}
3758
3759/**
0cba632b
JG
3760 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3761 * @qc: Command with scatter-gather table to be mapped.
3762 *
3763 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3764 *
3765 * LOCKING:
cca3974e 3766 * spin_lock_irqsave(host lock)
1da177e4
LT
3767 *
3768 * RETURNS:
0cba632b 3769 * Zero on success, negative on error.
1da177e4
LT
3770 *
3771 */
3772
3773static int ata_sg_setup(struct ata_queued_cmd *qc)
3774{
3775 struct ata_port *ap = qc->ap;
cedc9a47
JG
3776 struct scatterlist *sg = qc->__sg;
3777 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3778 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 3779
44877b4e 3780 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 3781 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3782
cedc9a47
JG
3783 /* we must lengthen transfers to end on a 32-bit boundary */
3784 qc->pad_len = lsg->length & 3;
3785 if (qc->pad_len) {
3786 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3787 struct scatterlist *psg = &qc->pad_sgent;
3788 unsigned int offset;
3789
a4631474 3790 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3791
3792 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3793
3794 /*
3795 * psg->page/offset are used to copy to-be-written
3796 * data in this function or read data in ata_sg_clean.
3797 */
3798 offset = lsg->offset + lsg->length - qc->pad_len;
3799 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3800 psg->offset = offset_in_page(offset);
3801
3802 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3803 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3804 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3805 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3806 }
3807
3808 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3809 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3810 /* trim last sg */
3811 lsg->length -= qc->pad_len;
e1410f2d
JG
3812 if (lsg->length == 0)
3813 trim_sg = 1;
cedc9a47
JG
3814
3815 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3816 qc->n_elem - 1, lsg->length, qc->pad_len);
3817 }
3818
e1410f2d
JG
3819 pre_n_elem = qc->n_elem;
3820 if (trim_sg && pre_n_elem)
3821 pre_n_elem--;
3822
3823 if (!pre_n_elem) {
3824 n_elem = 0;
3825 goto skip_map;
3826 }
3827
1da177e4 3828 dir = qc->dma_dir;
2f1f610b 3829 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3830 if (n_elem < 1) {
3831 /* restore last sg */
3832 lsg->length += qc->pad_len;
1da177e4 3833 return -1;
537a95d9 3834 }
1da177e4
LT
3835
3836 DPRINTK("%d sg elements mapped\n", n_elem);
3837
e1410f2d 3838skip_map:
1da177e4
LT
3839 qc->n_elem = n_elem;
3840
3841 return 0;
3842}
3843
0baab86b 3844/**
c893a3ae 3845 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3846 * @buf: Buffer to swap
3847 * @buf_words: Number of 16-bit words in buffer.
3848 *
3849 * Swap halves of 16-bit words if needed to convert from
3850 * little-endian byte order to native cpu byte order, or
3851 * vice-versa.
3852 *
3853 * LOCKING:
6f0ef4fa 3854 * Inherited from caller.
0baab86b 3855 */
1da177e4
LT
3856void swap_buf_le16(u16 *buf, unsigned int buf_words)
3857{
3858#ifdef __BIG_ENDIAN
3859 unsigned int i;
3860
3861 for (i = 0; i < buf_words; i++)
3862 buf[i] = le16_to_cpu(buf[i]);
3863#endif /* __BIG_ENDIAN */
3864}
3865
6ae4cfb5 3866/**
0d5ff566 3867 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 3868 * @adev: device to target
6ae4cfb5
AL
3869 * @buf: data buffer
3870 * @buflen: buffer length
344babaa 3871 * @write_data: read/write
6ae4cfb5
AL
3872 *
3873 * Transfer data from/to the device data register by PIO.
3874 *
3875 * LOCKING:
3876 * Inherited from caller.
6ae4cfb5 3877 */
0d5ff566
TH
3878void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3879 unsigned int buflen, int write_data)
1da177e4 3880{
a6b2c5d4 3881 struct ata_port *ap = adev->ap;
6ae4cfb5 3882 unsigned int words = buflen >> 1;
1da177e4 3883
6ae4cfb5 3884 /* Transfer multiple of 2 bytes */
1da177e4 3885 if (write_data)
0d5ff566 3886 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 3887 else
0d5ff566 3888 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
3889
3890 /* Transfer trailing 1 byte, if any. */
3891 if (unlikely(buflen & 0x01)) {
3892 u16 align_buf[1] = { 0 };
3893 unsigned char *trailing_buf = buf + buflen - 1;
3894
3895 if (write_data) {
3896 memcpy(align_buf, trailing_buf, 1);
0d5ff566 3897 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 3898 } else {
0d5ff566 3899 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
3900 memcpy(trailing_buf, align_buf, 1);
3901 }
3902 }
1da177e4
LT
3903}
3904
75e99585 3905/**
0d5ff566 3906 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
3907 * @adev: device to target
3908 * @buf: data buffer
3909 * @buflen: buffer length
3910 * @write_data: read/write
3911 *
88574551 3912 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3913 * transfer with interrupts disabled.
3914 *
3915 * LOCKING:
3916 * Inherited from caller.
3917 */
0d5ff566
TH
3918void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3919 unsigned int buflen, int write_data)
75e99585
AC
3920{
3921 unsigned long flags;
3922 local_irq_save(flags);
0d5ff566 3923 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
3924 local_irq_restore(flags);
3925}
3926
3927
6ae4cfb5
AL
3928/**
3929 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3930 * @qc: Command on going
3931 *
3932 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3933 *
3934 * LOCKING:
3935 * Inherited from caller.
3936 */
3937
1da177e4
LT
3938static void ata_pio_sector(struct ata_queued_cmd *qc)
3939{
3940 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3941 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3942 struct ata_port *ap = qc->ap;
3943 struct page *page;
3944 unsigned int offset;
3945 unsigned char *buf;
3946
726f0785 3947 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 3948 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3949
3950 page = sg[qc->cursg].page;
726f0785 3951 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
3952
3953 /* get the current page and offset */
3954 page = nth_page(page, (offset >> PAGE_SHIFT));
3955 offset %= PAGE_SIZE;
3956
1da177e4
LT
3957 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3958
91b8b313
AL
3959 if (PageHighMem(page)) {
3960 unsigned long flags;
3961
a6b2c5d4 3962 /* FIXME: use a bounce buffer */
91b8b313
AL
3963 local_irq_save(flags);
3964 buf = kmap_atomic(page, KM_IRQ0);
083958d3 3965
91b8b313 3966 /* do the actual data transfer */
a6b2c5d4 3967 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 3968
91b8b313
AL
3969 kunmap_atomic(buf, KM_IRQ0);
3970 local_irq_restore(flags);
3971 } else {
3972 buf = page_address(page);
a6b2c5d4 3973 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 3974 }
1da177e4 3975
726f0785
TH
3976 qc->curbytes += ATA_SECT_SIZE;
3977 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 3978
726f0785 3979 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
3980 qc->cursg++;
3981 qc->cursg_ofs = 0;
3982 }
1da177e4 3983}
1da177e4 3984
07f6f7d0
AL
3985/**
3986 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3987 * @qc: Command on going
3988 *
c81e29b4 3989 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
3990 * ATA device for the DRQ request.
3991 *
3992 * LOCKING:
3993 * Inherited from caller.
3994 */
1da177e4 3995
07f6f7d0
AL
3996static void ata_pio_sectors(struct ata_queued_cmd *qc)
3997{
3998 if (is_multi_taskfile(&qc->tf)) {
3999 /* READ/WRITE MULTIPLE */
4000 unsigned int nsect;
4001
587005de 4002 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4003
726f0785
TH
4004 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
4005 qc->dev->multi_count);
07f6f7d0
AL
4006 while (nsect--)
4007 ata_pio_sector(qc);
4008 } else
4009 ata_pio_sector(qc);
4010}
4011
c71c1857
AL
4012/**
4013 * atapi_send_cdb - Write CDB bytes to hardware
4014 * @ap: Port to which ATAPI device is attached.
4015 * @qc: Taskfile currently active
4016 *
4017 * When device has indicated its readiness to accept
4018 * a CDB, this function is called. Send the CDB.
4019 *
4020 * LOCKING:
4021 * caller.
4022 */
4023
4024static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4025{
4026 /* send SCSI cdb */
4027 DPRINTK("send cdb\n");
db024d53 4028 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4029
a6b2c5d4 4030 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4031 ata_altstatus(ap); /* flush */
4032
4033 switch (qc->tf.protocol) {
4034 case ATA_PROT_ATAPI:
4035 ap->hsm_task_state = HSM_ST;
4036 break;
4037 case ATA_PROT_ATAPI_NODATA:
4038 ap->hsm_task_state = HSM_ST_LAST;
4039 break;
4040 case ATA_PROT_ATAPI_DMA:
4041 ap->hsm_task_state = HSM_ST_LAST;
4042 /* initiate bmdma */
4043 ap->ops->bmdma_start(qc);
4044 break;
4045 }
1da177e4
LT
4046}
4047
6ae4cfb5
AL
4048/**
4049 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4050 * @qc: Command on going
4051 * @bytes: number of bytes
4052 *
4053 * Transfer Transfer data from/to the ATAPI device.
4054 *
4055 * LOCKING:
4056 * Inherited from caller.
4057 *
4058 */
4059
1da177e4
LT
4060static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4061{
4062 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4063 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4064 struct ata_port *ap = qc->ap;
4065 struct page *page;
4066 unsigned char *buf;
4067 unsigned int offset, count;
4068
563a6e1f 4069 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4070 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4071
4072next_sg:
563a6e1f 4073 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4074 /*
563a6e1f
AL
4075 * The end of qc->sg is reached and the device expects
4076 * more data to transfer. In order not to overrun qc->sg
4077 * and fulfill length specified in the byte count register,
4078 * - for read case, discard trailing data from the device
4079 * - for write case, padding zero data to the device
4080 */
4081 u16 pad_buf[1] = { 0 };
4082 unsigned int words = bytes >> 1;
4083 unsigned int i;
4084
4085 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4086 ata_dev_printk(qc->dev, KERN_WARNING,
4087 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4088
4089 for (i = 0; i < words; i++)
a6b2c5d4 4090 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4091
14be71f4 4092 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4093 return;
4094 }
4095
cedc9a47 4096 sg = &qc->__sg[qc->cursg];
1da177e4 4097
1da177e4
LT
4098 page = sg->page;
4099 offset = sg->offset + qc->cursg_ofs;
4100
4101 /* get the current page and offset */
4102 page = nth_page(page, (offset >> PAGE_SHIFT));
4103 offset %= PAGE_SIZE;
4104
6952df03 4105 /* don't overrun current sg */
32529e01 4106 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4107
4108 /* don't cross page boundaries */
4109 count = min(count, (unsigned int)PAGE_SIZE - offset);
4110
7282aa4b
AL
4111 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4112
91b8b313
AL
4113 if (PageHighMem(page)) {
4114 unsigned long flags;
4115
a6b2c5d4 4116 /* FIXME: use bounce buffer */
91b8b313
AL
4117 local_irq_save(flags);
4118 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4119
91b8b313 4120 /* do the actual data transfer */
a6b2c5d4 4121 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4122
91b8b313
AL
4123 kunmap_atomic(buf, KM_IRQ0);
4124 local_irq_restore(flags);
4125 } else {
4126 buf = page_address(page);
a6b2c5d4 4127 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4128 }
1da177e4
LT
4129
4130 bytes -= count;
4131 qc->curbytes += count;
4132 qc->cursg_ofs += count;
4133
32529e01 4134 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4135 qc->cursg++;
4136 qc->cursg_ofs = 0;
4137 }
4138
563a6e1f 4139 if (bytes)
1da177e4 4140 goto next_sg;
1da177e4
LT
4141}
4142
6ae4cfb5
AL
4143/**
4144 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4145 * @qc: Command on going
4146 *
4147 * Transfer Transfer data from/to the ATAPI device.
4148 *
4149 * LOCKING:
4150 * Inherited from caller.
6ae4cfb5
AL
4151 */
4152
1da177e4
LT
4153static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4154{
4155 struct ata_port *ap = qc->ap;
4156 struct ata_device *dev = qc->dev;
4157 unsigned int ireason, bc_lo, bc_hi, bytes;
4158 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4159
eec4c3f3
AL
4160 /* Abuse qc->result_tf for temp storage of intermediate TF
4161 * here to save some kernel stack usage.
4162 * For normal completion, qc->result_tf is not relevant. For
4163 * error, qc->result_tf is later overwritten by ata_qc_complete().
4164 * So, the correctness of qc->result_tf is not affected.
4165 */
4166 ap->ops->tf_read(ap, &qc->result_tf);
4167 ireason = qc->result_tf.nsect;
4168 bc_lo = qc->result_tf.lbam;
4169 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4170 bytes = (bc_hi << 8) | bc_lo;
4171
4172 /* shall be cleared to zero, indicating xfer of data */
4173 if (ireason & (1 << 0))
4174 goto err_out;
4175
4176 /* make sure transfer direction matches expected */
4177 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4178 if (do_write != i_write)
4179 goto err_out;
4180
44877b4e 4181 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4182
1da177e4
LT
4183 __atapi_pio_bytes(qc, bytes);
4184
4185 return;
4186
4187err_out:
f15a1daf 4188 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4189 qc->err_mask |= AC_ERR_HSM;
14be71f4 4190 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4191}
4192
4193/**
c234fb00
AL
4194 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4195 * @ap: the target ata_port
4196 * @qc: qc on going
1da177e4 4197 *
c234fb00
AL
4198 * RETURNS:
4199 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4200 */
c234fb00
AL
4201
4202static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4203{
c234fb00
AL
4204 if (qc->tf.flags & ATA_TFLAG_POLLING)
4205 return 1;
1da177e4 4206
c234fb00
AL
4207 if (ap->hsm_task_state == HSM_ST_FIRST) {
4208 if (qc->tf.protocol == ATA_PROT_PIO &&
4209 (qc->tf.flags & ATA_TFLAG_WRITE))
4210 return 1;
1da177e4 4211
c234fb00
AL
4212 if (is_atapi_taskfile(&qc->tf) &&
4213 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4214 return 1;
fe79e683
AL
4215 }
4216
c234fb00
AL
4217 return 0;
4218}
1da177e4 4219
c17ea20d
TH
4220/**
4221 * ata_hsm_qc_complete - finish a qc running on standard HSM
4222 * @qc: Command to complete
4223 * @in_wq: 1 if called from workqueue, 0 otherwise
4224 *
4225 * Finish @qc which is running on standard HSM.
4226 *
4227 * LOCKING:
cca3974e 4228 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4229 * Otherwise, none on entry and grabs host lock.
4230 */
4231static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4232{
4233 struct ata_port *ap = qc->ap;
4234 unsigned long flags;
4235
4236 if (ap->ops->error_handler) {
4237 if (in_wq) {
ba6a1308 4238 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4239
cca3974e
JG
4240 /* EH might have kicked in while host lock is
4241 * released.
c17ea20d
TH
4242 */
4243 qc = ata_qc_from_tag(ap, qc->tag);
4244 if (qc) {
4245 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4246 ap->ops->irq_on(ap);
c17ea20d
TH
4247 ata_qc_complete(qc);
4248 } else
4249 ata_port_freeze(ap);
4250 }
4251
ba6a1308 4252 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4253 } else {
4254 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4255 ata_qc_complete(qc);
4256 else
4257 ata_port_freeze(ap);
4258 }
4259 } else {
4260 if (in_wq) {
ba6a1308 4261 spin_lock_irqsave(ap->lock, flags);
83625006 4262 ap->ops->irq_on(ap);
c17ea20d 4263 ata_qc_complete(qc);
ba6a1308 4264 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4265 } else
4266 ata_qc_complete(qc);
4267 }
1da177e4 4268
c81e29b4 4269 ata_altstatus(ap); /* flush */
c17ea20d
TH
4270}
4271
bb5cb290
AL
4272/**
4273 * ata_hsm_move - move the HSM to the next state.
4274 * @ap: the target ata_port
4275 * @qc: qc on going
4276 * @status: current device status
4277 * @in_wq: 1 if called from workqueue, 0 otherwise
4278 *
4279 * RETURNS:
4280 * 1 when poll next status needed, 0 otherwise.
4281 */
9a1004d0
TH
4282int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4283 u8 status, int in_wq)
e2cec771 4284{
bb5cb290
AL
4285 unsigned long flags = 0;
4286 int poll_next;
4287
6912ccd5
AL
4288 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4289
bb5cb290
AL
4290 /* Make sure ata_qc_issue_prot() does not throw things
4291 * like DMA polling into the workqueue. Notice that
4292 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4293 */
c234fb00 4294 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4295
e2cec771 4296fsm_start:
999bb6f4 4297 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4298 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4299
e2cec771
AL
4300 switch (ap->hsm_task_state) {
4301 case HSM_ST_FIRST:
bb5cb290
AL
4302 /* Send first data block or PACKET CDB */
4303
4304 /* If polling, we will stay in the work queue after
4305 * sending the data. Otherwise, interrupt handler
4306 * takes over after sending the data.
4307 */
4308 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4309
e2cec771 4310 /* check device status */
3655d1d3
AL
4311 if (unlikely((status & ATA_DRQ) == 0)) {
4312 /* handle BSY=0, DRQ=0 as error */
4313 if (likely(status & (ATA_ERR | ATA_DF)))
4314 /* device stops HSM for abort/error */
4315 qc->err_mask |= AC_ERR_DEV;
4316 else
4317 /* HSM violation. Let EH handle this */
4318 qc->err_mask |= AC_ERR_HSM;
4319
14be71f4 4320 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4321 goto fsm_start;
1da177e4
LT
4322 }
4323
71601958
AL
4324 /* Device should not ask for data transfer (DRQ=1)
4325 * when it finds something wrong.
eee6c32f
AL
4326 * We ignore DRQ here and stop the HSM by
4327 * changing hsm_task_state to HSM_ST_ERR and
4328 * let the EH abort the command or reset the device.
71601958
AL
4329 */
4330 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4331 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4332 "error, dev_stat 0x%X\n", status);
3655d1d3 4333 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4334 ap->hsm_task_state = HSM_ST_ERR;
4335 goto fsm_start;
71601958 4336 }
1da177e4 4337
bb5cb290
AL
4338 /* Send the CDB (atapi) or the first data block (ata pio out).
4339 * During the state transition, interrupt handler shouldn't
4340 * be invoked before the data transfer is complete and
4341 * hsm_task_state is changed. Hence, the following locking.
4342 */
4343 if (in_wq)
ba6a1308 4344 spin_lock_irqsave(ap->lock, flags);
1da177e4 4345
bb5cb290
AL
4346 if (qc->tf.protocol == ATA_PROT_PIO) {
4347 /* PIO data out protocol.
4348 * send first data block.
4349 */
0565c26d 4350
bb5cb290
AL
4351 /* ata_pio_sectors() might change the state
4352 * to HSM_ST_LAST. so, the state is changed here
4353 * before ata_pio_sectors().
4354 */
4355 ap->hsm_task_state = HSM_ST;
4356 ata_pio_sectors(qc);
4357 ata_altstatus(ap); /* flush */
4358 } else
4359 /* send CDB */
4360 atapi_send_cdb(ap, qc);
4361
4362 if (in_wq)
ba6a1308 4363 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4364
4365 /* if polling, ata_pio_task() handles the rest.
4366 * otherwise, interrupt handler takes over from here.
4367 */
e2cec771 4368 break;
1c848984 4369
e2cec771
AL
4370 case HSM_ST:
4371 /* complete command or read/write the data register */
4372 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4373 /* ATAPI PIO protocol */
4374 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4375 /* No more data to transfer or device error.
4376 * Device error will be tagged in HSM_ST_LAST.
4377 */
e2cec771
AL
4378 ap->hsm_task_state = HSM_ST_LAST;
4379 goto fsm_start;
4380 }
1da177e4 4381
71601958
AL
4382 /* Device should not ask for data transfer (DRQ=1)
4383 * when it finds something wrong.
eee6c32f
AL
4384 * We ignore DRQ here and stop the HSM by
4385 * changing hsm_task_state to HSM_ST_ERR and
4386 * let the EH abort the command or reset the device.
71601958
AL
4387 */
4388 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4389 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4390 "device error, dev_stat 0x%X\n",
4391 status);
3655d1d3 4392 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4393 ap->hsm_task_state = HSM_ST_ERR;
4394 goto fsm_start;
71601958 4395 }
1da177e4 4396
e2cec771 4397 atapi_pio_bytes(qc);
7fb6ec28 4398
e2cec771
AL
4399 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4400 /* bad ireason reported by device */
4401 goto fsm_start;
1da177e4 4402
e2cec771
AL
4403 } else {
4404 /* ATA PIO protocol */
4405 if (unlikely((status & ATA_DRQ) == 0)) {
4406 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4407 if (likely(status & (ATA_ERR | ATA_DF)))
4408 /* device stops HSM for abort/error */
4409 qc->err_mask |= AC_ERR_DEV;
4410 else
55a8e2c8
TH
4411 /* HSM violation. Let EH handle this.
4412 * Phantom devices also trigger this
4413 * condition. Mark hint.
4414 */
4415 qc->err_mask |= AC_ERR_HSM |
4416 AC_ERR_NODEV_HINT;
3655d1d3 4417
e2cec771
AL
4418 ap->hsm_task_state = HSM_ST_ERR;
4419 goto fsm_start;
4420 }
1da177e4 4421
eee6c32f
AL
4422 /* For PIO reads, some devices may ask for
4423 * data transfer (DRQ=1) alone with ERR=1.
4424 * We respect DRQ here and transfer one
4425 * block of junk data before changing the
4426 * hsm_task_state to HSM_ST_ERR.
4427 *
4428 * For PIO writes, ERR=1 DRQ=1 doesn't make
4429 * sense since the data block has been
4430 * transferred to the device.
71601958
AL
4431 */
4432 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4433 /* data might be corrputed */
4434 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4435
4436 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4437 ata_pio_sectors(qc);
4438 ata_altstatus(ap);
4439 status = ata_wait_idle(ap);
4440 }
4441
3655d1d3
AL
4442 if (status & (ATA_BUSY | ATA_DRQ))
4443 qc->err_mask |= AC_ERR_HSM;
4444
eee6c32f
AL
4445 /* ata_pio_sectors() might change the
4446 * state to HSM_ST_LAST. so, the state
4447 * is changed after ata_pio_sectors().
4448 */
4449 ap->hsm_task_state = HSM_ST_ERR;
4450 goto fsm_start;
71601958
AL
4451 }
4452
e2cec771
AL
4453 ata_pio_sectors(qc);
4454
4455 if (ap->hsm_task_state == HSM_ST_LAST &&
4456 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4457 /* all data read */
4458 ata_altstatus(ap);
52a32205 4459 status = ata_wait_idle(ap);
e2cec771
AL
4460 goto fsm_start;
4461 }
4462 }
4463
4464 ata_altstatus(ap); /* flush */
bb5cb290 4465 poll_next = 1;
1da177e4
LT
4466 break;
4467
14be71f4 4468 case HSM_ST_LAST:
6912ccd5
AL
4469 if (unlikely(!ata_ok(status))) {
4470 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4471 ap->hsm_task_state = HSM_ST_ERR;
4472 goto fsm_start;
4473 }
4474
4475 /* no more data to transfer */
4332a771 4476 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 4477 ap->print_id, qc->dev->devno, status);
e2cec771 4478
6912ccd5
AL
4479 WARN_ON(qc->err_mask);
4480
e2cec771 4481 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4482
e2cec771 4483 /* complete taskfile transaction */
c17ea20d 4484 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4485
4486 poll_next = 0;
1da177e4
LT
4487 break;
4488
14be71f4 4489 case HSM_ST_ERR:
e2cec771
AL
4490 /* make sure qc->err_mask is available to
4491 * know what's wrong and recover
4492 */
4493 WARN_ON(qc->err_mask == 0);
4494
4495 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4496
999bb6f4 4497 /* complete taskfile transaction */
c17ea20d 4498 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4499
4500 poll_next = 0;
e2cec771
AL
4501 break;
4502 default:
bb5cb290 4503 poll_next = 0;
6912ccd5 4504 BUG();
1da177e4
LT
4505 }
4506
bb5cb290 4507 return poll_next;
1da177e4
LT
4508}
4509
65f27f38 4510static void ata_pio_task(struct work_struct *work)
8061f5f0 4511{
65f27f38
DH
4512 struct ata_port *ap =
4513 container_of(work, struct ata_port, port_task.work);
4514 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4515 u8 status;
a1af3734 4516 int poll_next;
8061f5f0 4517
7fb6ec28 4518fsm_start:
a1af3734 4519 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4520
a1af3734
AL
4521 /*
4522 * This is purely heuristic. This is a fast path.
4523 * Sometimes when we enter, BSY will be cleared in
4524 * a chk-status or two. If not, the drive is probably seeking
4525 * or something. Snooze for a couple msecs, then
4526 * chk-status again. If still busy, queue delayed work.
4527 */
4528 status = ata_busy_wait(ap, ATA_BUSY, 5);
4529 if (status & ATA_BUSY) {
4530 msleep(2);
4531 status = ata_busy_wait(ap, ATA_BUSY, 10);
4532 if (status & ATA_BUSY) {
31ce6dae 4533 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4534 return;
4535 }
8061f5f0
TH
4536 }
4537
a1af3734
AL
4538 /* move the HSM */
4539 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4540
a1af3734
AL
4541 /* another command or interrupt handler
4542 * may be running at this point.
4543 */
4544 if (poll_next)
7fb6ec28 4545 goto fsm_start;
8061f5f0
TH
4546}
4547
1da177e4
LT
4548/**
4549 * ata_qc_new - Request an available ATA command, for queueing
4550 * @ap: Port associated with device @dev
4551 * @dev: Device from whom we request an available command structure
4552 *
4553 * LOCKING:
0cba632b 4554 * None.
1da177e4
LT
4555 */
4556
4557static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4558{
4559 struct ata_queued_cmd *qc = NULL;
4560 unsigned int i;
4561
e3180499 4562 /* no command while frozen */
b51e9e5d 4563 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4564 return NULL;
4565
2ab7db1f
TH
4566 /* the last tag is reserved for internal command. */
4567 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4568 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4569 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4570 break;
4571 }
4572
4573 if (qc)
4574 qc->tag = i;
4575
4576 return qc;
4577}
4578
4579/**
4580 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4581 * @dev: Device from whom we request an available command structure
4582 *
4583 * LOCKING:
0cba632b 4584 * None.
1da177e4
LT
4585 */
4586
3373efd8 4587struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4588{
3373efd8 4589 struct ata_port *ap = dev->ap;
1da177e4
LT
4590 struct ata_queued_cmd *qc;
4591
4592 qc = ata_qc_new(ap);
4593 if (qc) {
1da177e4
LT
4594 qc->scsicmd = NULL;
4595 qc->ap = ap;
4596 qc->dev = dev;
1da177e4 4597
2c13b7ce 4598 ata_qc_reinit(qc);
1da177e4
LT
4599 }
4600
4601 return qc;
4602}
4603
1da177e4
LT
4604/**
4605 * ata_qc_free - free unused ata_queued_cmd
4606 * @qc: Command to complete
4607 *
4608 * Designed to free unused ata_queued_cmd object
4609 * in case something prevents using it.
4610 *
4611 * LOCKING:
cca3974e 4612 * spin_lock_irqsave(host lock)
1da177e4
LT
4613 */
4614void ata_qc_free(struct ata_queued_cmd *qc)
4615{
4ba946e9
TH
4616 struct ata_port *ap = qc->ap;
4617 unsigned int tag;
4618
a4631474 4619 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4620
4ba946e9
TH
4621 qc->flags = 0;
4622 tag = qc->tag;
4623 if (likely(ata_tag_valid(tag))) {
4ba946e9 4624 qc->tag = ATA_TAG_POISON;
6cec4a39 4625 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4626 }
1da177e4
LT
4627}
4628
76014427 4629void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4630{
dedaf2b0
TH
4631 struct ata_port *ap = qc->ap;
4632
a4631474
TH
4633 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4634 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4635
4636 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4637 ata_sg_clean(qc);
4638
7401abf2 4639 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4640 if (qc->tf.protocol == ATA_PROT_NCQ)
4641 ap->sactive &= ~(1 << qc->tag);
4642 else
4643 ap->active_tag = ATA_TAG_POISON;
7401abf2 4644
3f3791d3
AL
4645 /* atapi: mark qc as inactive to prevent the interrupt handler
4646 * from completing the command twice later, before the error handler
4647 * is called. (when rc != 0 and atapi request sense is needed)
4648 */
4649 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4650 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4651
1da177e4 4652 /* call completion callback */
77853bf2 4653 qc->complete_fn(qc);
1da177e4
LT
4654}
4655
39599a53
TH
4656static void fill_result_tf(struct ata_queued_cmd *qc)
4657{
4658 struct ata_port *ap = qc->ap;
4659
4660 ap->ops->tf_read(ap, &qc->result_tf);
4661 qc->result_tf.flags = qc->tf.flags;
4662}
4663
f686bcb8
TH
4664/**
4665 * ata_qc_complete - Complete an active ATA command
4666 * @qc: Command to complete
4667 * @err_mask: ATA Status register contents
4668 *
4669 * Indicate to the mid and upper layers that an ATA
4670 * command has completed, with either an ok or not-ok status.
4671 *
4672 * LOCKING:
cca3974e 4673 * spin_lock_irqsave(host lock)
f686bcb8
TH
4674 */
4675void ata_qc_complete(struct ata_queued_cmd *qc)
4676{
4677 struct ata_port *ap = qc->ap;
4678
4679 /* XXX: New EH and old EH use different mechanisms to
4680 * synchronize EH with regular execution path.
4681 *
4682 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4683 * Normal execution path is responsible for not accessing a
4684 * failed qc. libata core enforces the rule by returning NULL
4685 * from ata_qc_from_tag() for failed qcs.
4686 *
4687 * Old EH depends on ata_qc_complete() nullifying completion
4688 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4689 * not synchronize with interrupt handler. Only PIO task is
4690 * taken care of.
4691 */
4692 if (ap->ops->error_handler) {
b51e9e5d 4693 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4694
4695 if (unlikely(qc->err_mask))
4696 qc->flags |= ATA_QCFLAG_FAILED;
4697
4698 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4699 if (!ata_tag_internal(qc->tag)) {
4700 /* always fill result TF for failed qc */
39599a53 4701 fill_result_tf(qc);
f686bcb8
TH
4702 ata_qc_schedule_eh(qc);
4703 return;
4704 }
4705 }
4706
4707 /* read result TF if requested */
4708 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4709 fill_result_tf(qc);
f686bcb8
TH
4710
4711 __ata_qc_complete(qc);
4712 } else {
4713 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4714 return;
4715
4716 /* read result TF if failed or requested */
4717 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4718 fill_result_tf(qc);
f686bcb8
TH
4719
4720 __ata_qc_complete(qc);
4721 }
4722}
4723
dedaf2b0
TH
4724/**
4725 * ata_qc_complete_multiple - Complete multiple qcs successfully
4726 * @ap: port in question
4727 * @qc_active: new qc_active mask
4728 * @finish_qc: LLDD callback invoked before completing a qc
4729 *
4730 * Complete in-flight commands. This functions is meant to be
4731 * called from low-level driver's interrupt routine to complete
4732 * requests normally. ap->qc_active and @qc_active is compared
4733 * and commands are completed accordingly.
4734 *
4735 * LOCKING:
cca3974e 4736 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4737 *
4738 * RETURNS:
4739 * Number of completed commands on success, -errno otherwise.
4740 */
4741int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4742 void (*finish_qc)(struct ata_queued_cmd *))
4743{
4744 int nr_done = 0;
4745 u32 done_mask;
4746 int i;
4747
4748 done_mask = ap->qc_active ^ qc_active;
4749
4750 if (unlikely(done_mask & qc_active)) {
4751 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4752 "(%08x->%08x)\n", ap->qc_active, qc_active);
4753 return -EINVAL;
4754 }
4755
4756 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4757 struct ata_queued_cmd *qc;
4758
4759 if (!(done_mask & (1 << i)))
4760 continue;
4761
4762 if ((qc = ata_qc_from_tag(ap, i))) {
4763 if (finish_qc)
4764 finish_qc(qc);
4765 ata_qc_complete(qc);
4766 nr_done++;
4767 }
4768 }
4769
4770 return nr_done;
4771}
4772
1da177e4
LT
4773static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4774{
4775 struct ata_port *ap = qc->ap;
4776
4777 switch (qc->tf.protocol) {
3dc1d881 4778 case ATA_PROT_NCQ:
1da177e4
LT
4779 case ATA_PROT_DMA:
4780 case ATA_PROT_ATAPI_DMA:
4781 return 1;
4782
4783 case ATA_PROT_ATAPI:
4784 case ATA_PROT_PIO:
1da177e4
LT
4785 if (ap->flags & ATA_FLAG_PIO_DMA)
4786 return 1;
4787
4788 /* fall through */
4789
4790 default:
4791 return 0;
4792 }
4793
4794 /* never reached */
4795}
4796
4797/**
4798 * ata_qc_issue - issue taskfile to device
4799 * @qc: command to issue to device
4800 *
4801 * Prepare an ATA command to submission to device.
4802 * This includes mapping the data into a DMA-able
4803 * area, filling in the S/G table, and finally
4804 * writing the taskfile to hardware, starting the command.
4805 *
4806 * LOCKING:
cca3974e 4807 * spin_lock_irqsave(host lock)
1da177e4 4808 */
8e0e694a 4809void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4810{
4811 struct ata_port *ap = qc->ap;
4812
dedaf2b0
TH
4813 /* Make sure only one non-NCQ command is outstanding. The
4814 * check is skipped for old EH because it reuses active qc to
4815 * request ATAPI sense.
4816 */
4817 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4818
4819 if (qc->tf.protocol == ATA_PROT_NCQ) {
4820 WARN_ON(ap->sactive & (1 << qc->tag));
4821 ap->sactive |= 1 << qc->tag;
4822 } else {
4823 WARN_ON(ap->sactive);
4824 ap->active_tag = qc->tag;
4825 }
4826
e4a70e76 4827 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4828 ap->qc_active |= 1 << qc->tag;
e4a70e76 4829
1da177e4
LT
4830 if (ata_should_dma_map(qc)) {
4831 if (qc->flags & ATA_QCFLAG_SG) {
4832 if (ata_sg_setup(qc))
8e436af9 4833 goto sg_err;
1da177e4
LT
4834 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4835 if (ata_sg_setup_one(qc))
8e436af9 4836 goto sg_err;
1da177e4
LT
4837 }
4838 } else {
4839 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4840 }
4841
4842 ap->ops->qc_prep(qc);
4843
8e0e694a
TH
4844 qc->err_mask |= ap->ops->qc_issue(qc);
4845 if (unlikely(qc->err_mask))
4846 goto err;
4847 return;
1da177e4 4848
8e436af9
TH
4849sg_err:
4850 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4851 qc->err_mask |= AC_ERR_SYSTEM;
4852err:
4853 ata_qc_complete(qc);
1da177e4
LT
4854}
4855
4856/**
4857 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4858 * @qc: command to issue to device
4859 *
4860 * Using various libata functions and hooks, this function
4861 * starts an ATA command. ATA commands are grouped into
4862 * classes called "protocols", and issuing each type of protocol
4863 * is slightly different.
4864 *
0baab86b
EF
4865 * May be used as the qc_issue() entry in ata_port_operations.
4866 *
1da177e4 4867 * LOCKING:
cca3974e 4868 * spin_lock_irqsave(host lock)
1da177e4
LT
4869 *
4870 * RETURNS:
9a3d9eb0 4871 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4872 */
4873
9a3d9eb0 4874unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4875{
4876 struct ata_port *ap = qc->ap;
4877
e50362ec
AL
4878 /* Use polling pio if the LLD doesn't handle
4879 * interrupt driven pio and atapi CDB interrupt.
4880 */
4881 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4882 switch (qc->tf.protocol) {
4883 case ATA_PROT_PIO:
e3472cbe 4884 case ATA_PROT_NODATA:
e50362ec
AL
4885 case ATA_PROT_ATAPI:
4886 case ATA_PROT_ATAPI_NODATA:
4887 qc->tf.flags |= ATA_TFLAG_POLLING;
4888 break;
4889 case ATA_PROT_ATAPI_DMA:
4890 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4891 /* see ata_dma_blacklisted() */
e50362ec
AL
4892 BUG();
4893 break;
4894 default:
4895 break;
4896 }
4897 }
4898
3d3cca37
TH
4899 /* Some controllers show flaky interrupt behavior after
4900 * setting xfer mode. Use polling instead.
4901 */
4902 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4903 qc->tf.feature == SETFEATURES_XFER) &&
4904 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4905 qc->tf.flags |= ATA_TFLAG_POLLING;
4906
312f7da2 4907 /* select the device */
1da177e4
LT
4908 ata_dev_select(ap, qc->dev->devno, 1, 0);
4909
312f7da2 4910 /* start the command */
1da177e4
LT
4911 switch (qc->tf.protocol) {
4912 case ATA_PROT_NODATA:
312f7da2
AL
4913 if (qc->tf.flags & ATA_TFLAG_POLLING)
4914 ata_qc_set_polling(qc);
4915
e5338254 4916 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4917 ap->hsm_task_state = HSM_ST_LAST;
4918
4919 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4920 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 4921
1da177e4
LT
4922 break;
4923
4924 case ATA_PROT_DMA:
587005de 4925 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4926
1da177e4
LT
4927 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4928 ap->ops->bmdma_setup(qc); /* set up bmdma */
4929 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 4930 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4931 break;
4932
312f7da2
AL
4933 case ATA_PROT_PIO:
4934 if (qc->tf.flags & ATA_TFLAG_POLLING)
4935 ata_qc_set_polling(qc);
1da177e4 4936
e5338254 4937 ata_tf_to_host(ap, &qc->tf);
312f7da2 4938
54f00389
AL
4939 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4940 /* PIO data out protocol */
4941 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 4942 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4943
4944 /* always send first data block using
e27486db 4945 * the ata_pio_task() codepath.
54f00389 4946 */
312f7da2 4947 } else {
54f00389
AL
4948 /* PIO data in protocol */
4949 ap->hsm_task_state = HSM_ST;
4950
4951 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4952 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4953
4954 /* if polling, ata_pio_task() handles the rest.
4955 * otherwise, interrupt handler takes over from here.
4956 */
312f7da2
AL
4957 }
4958
1da177e4
LT
4959 break;
4960
1da177e4 4961 case ATA_PROT_ATAPI:
1da177e4 4962 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
4963 if (qc->tf.flags & ATA_TFLAG_POLLING)
4964 ata_qc_set_polling(qc);
4965
e5338254 4966 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 4967
312f7da2
AL
4968 ap->hsm_task_state = HSM_ST_FIRST;
4969
4970 /* send cdb by polling if no cdb interrupt */
4971 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4972 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 4973 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4974 break;
4975
4976 case ATA_PROT_ATAPI_DMA:
587005de 4977 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4978
1da177e4
LT
4979 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4980 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
4981 ap->hsm_task_state = HSM_ST_FIRST;
4982
4983 /* send cdb by polling if no cdb interrupt */
4984 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 4985 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4986 break;
4987
4988 default:
4989 WARN_ON(1);
9a3d9eb0 4990 return AC_ERR_SYSTEM;
1da177e4
LT
4991 }
4992
4993 return 0;
4994}
4995
1da177e4
LT
4996/**
4997 * ata_host_intr - Handle host interrupt for given (port, task)
4998 * @ap: Port on which interrupt arrived (possibly...)
4999 * @qc: Taskfile currently active in engine
5000 *
5001 * Handle host interrupt for given queued command. Currently,
5002 * only DMA interrupts are handled. All other commands are
5003 * handled via polling with interrupts disabled (nIEN bit).
5004 *
5005 * LOCKING:
cca3974e 5006 * spin_lock_irqsave(host lock)
1da177e4
LT
5007 *
5008 * RETURNS:
5009 * One if interrupt was handled, zero if not (shared irq).
5010 */
5011
5012inline unsigned int ata_host_intr (struct ata_port *ap,
5013 struct ata_queued_cmd *qc)
5014{
ea54763f 5015 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5016 u8 status, host_stat = 0;
1da177e4 5017
312f7da2 5018 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5019 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5020
312f7da2
AL
5021 /* Check whether we are expecting interrupt in this state */
5022 switch (ap->hsm_task_state) {
5023 case HSM_ST_FIRST:
6912ccd5
AL
5024 /* Some pre-ATAPI-4 devices assert INTRQ
5025 * at this state when ready to receive CDB.
5026 */
1da177e4 5027
312f7da2
AL
5028 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5029 * The flag was turned on only for atapi devices.
5030 * No need to check is_atapi_taskfile(&qc->tf) again.
5031 */
5032 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5033 goto idle_irq;
1da177e4 5034 break;
312f7da2
AL
5035 case HSM_ST_LAST:
5036 if (qc->tf.protocol == ATA_PROT_DMA ||
5037 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5038 /* check status of DMA engine */
5039 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5040 VPRINTK("ata%u: host_stat 0x%X\n",
5041 ap->print_id, host_stat);
312f7da2
AL
5042
5043 /* if it's not our irq... */
5044 if (!(host_stat & ATA_DMA_INTR))
5045 goto idle_irq;
5046
5047 /* before we do anything else, clear DMA-Start bit */
5048 ap->ops->bmdma_stop(qc);
a4f16610
AL
5049
5050 if (unlikely(host_stat & ATA_DMA_ERR)) {
5051 /* error when transfering data to/from memory */
5052 qc->err_mask |= AC_ERR_HOST_BUS;
5053 ap->hsm_task_state = HSM_ST_ERR;
5054 }
312f7da2
AL
5055 }
5056 break;
5057 case HSM_ST:
5058 break;
1da177e4
LT
5059 default:
5060 goto idle_irq;
5061 }
5062
312f7da2
AL
5063 /* check altstatus */
5064 status = ata_altstatus(ap);
5065 if (status & ATA_BUSY)
5066 goto idle_irq;
1da177e4 5067
312f7da2
AL
5068 /* check main status, clearing INTRQ */
5069 status = ata_chk_status(ap);
5070 if (unlikely(status & ATA_BUSY))
5071 goto idle_irq;
1da177e4 5072
312f7da2
AL
5073 /* ack bmdma irq events */
5074 ap->ops->irq_clear(ap);
1da177e4 5075
bb5cb290 5076 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5077
5078 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5079 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5080 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5081
1da177e4
LT
5082 return 1; /* irq handled */
5083
5084idle_irq:
5085 ap->stats.idle_irq++;
5086
5087#ifdef ATA_IRQ_TRAP
5088 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5089 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5090 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5091 return 1;
1da177e4
LT
5092 }
5093#endif
5094 return 0; /* irq not handled */
5095}
5096
5097/**
5098 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5099 * @irq: irq line (unused)
cca3974e 5100 * @dev_instance: pointer to our ata_host information structure
1da177e4 5101 *
0cba632b
JG
5102 * Default interrupt handler for PCI IDE devices. Calls
5103 * ata_host_intr() for each port that is not disabled.
5104 *
1da177e4 5105 * LOCKING:
cca3974e 5106 * Obtains host lock during operation.
1da177e4
LT
5107 *
5108 * RETURNS:
0cba632b 5109 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5110 */
5111
7d12e780 5112irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5113{
cca3974e 5114 struct ata_host *host = dev_instance;
1da177e4
LT
5115 unsigned int i;
5116 unsigned int handled = 0;
5117 unsigned long flags;
5118
5119 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5120 spin_lock_irqsave(&host->lock, flags);
1da177e4 5121
cca3974e 5122 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5123 struct ata_port *ap;
5124
cca3974e 5125 ap = host->ports[i];
c1389503 5126 if (ap &&
029f5468 5127 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5128 struct ata_queued_cmd *qc;
5129
5130 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5131 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5132 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5133 handled |= ata_host_intr(ap, qc);
5134 }
5135 }
5136
cca3974e 5137 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5138
5139 return IRQ_RETVAL(handled);
5140}
5141
34bf2170
TH
5142/**
5143 * sata_scr_valid - test whether SCRs are accessible
5144 * @ap: ATA port to test SCR accessibility for
5145 *
5146 * Test whether SCRs are accessible for @ap.
5147 *
5148 * LOCKING:
5149 * None.
5150 *
5151 * RETURNS:
5152 * 1 if SCRs are accessible, 0 otherwise.
5153 */
5154int sata_scr_valid(struct ata_port *ap)
5155{
5156 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5157}
5158
5159/**
5160 * sata_scr_read - read SCR register of the specified port
5161 * @ap: ATA port to read SCR for
5162 * @reg: SCR to read
5163 * @val: Place to store read value
5164 *
5165 * Read SCR register @reg of @ap into *@val. This function is
5166 * guaranteed to succeed if the cable type of the port is SATA
5167 * and the port implements ->scr_read.
5168 *
5169 * LOCKING:
5170 * None.
5171 *
5172 * RETURNS:
5173 * 0 on success, negative errno on failure.
5174 */
5175int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5176{
5177 if (sata_scr_valid(ap)) {
5178 *val = ap->ops->scr_read(ap, reg);
5179 return 0;
5180 }
5181 return -EOPNOTSUPP;
5182}
5183
5184/**
5185 * sata_scr_write - write SCR register of the specified port
5186 * @ap: ATA port to write SCR for
5187 * @reg: SCR to write
5188 * @val: value to write
5189 *
5190 * Write @val to SCR register @reg of @ap. This function is
5191 * guaranteed to succeed if the cable type of the port is SATA
5192 * and the port implements ->scr_read.
5193 *
5194 * LOCKING:
5195 * None.
5196 *
5197 * RETURNS:
5198 * 0 on success, negative errno on failure.
5199 */
5200int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5201{
5202 if (sata_scr_valid(ap)) {
5203 ap->ops->scr_write(ap, reg, val);
5204 return 0;
5205 }
5206 return -EOPNOTSUPP;
5207}
5208
5209/**
5210 * sata_scr_write_flush - write SCR register of the specified port and flush
5211 * @ap: ATA port to write SCR for
5212 * @reg: SCR to write
5213 * @val: value to write
5214 *
5215 * This function is identical to sata_scr_write() except that this
5216 * function performs flush after writing to the register.
5217 *
5218 * LOCKING:
5219 * None.
5220 *
5221 * RETURNS:
5222 * 0 on success, negative errno on failure.
5223 */
5224int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5225{
5226 if (sata_scr_valid(ap)) {
5227 ap->ops->scr_write(ap, reg, val);
5228 ap->ops->scr_read(ap, reg);
5229 return 0;
5230 }
5231 return -EOPNOTSUPP;
5232}
5233
5234/**
5235 * ata_port_online - test whether the given port is online
5236 * @ap: ATA port to test
5237 *
5238 * Test whether @ap is online. Note that this function returns 0
5239 * if online status of @ap cannot be obtained, so
5240 * ata_port_online(ap) != !ata_port_offline(ap).
5241 *
5242 * LOCKING:
5243 * None.
5244 *
5245 * RETURNS:
5246 * 1 if the port online status is available and online.
5247 */
5248int ata_port_online(struct ata_port *ap)
5249{
5250 u32 sstatus;
5251
5252 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5253 return 1;
5254 return 0;
5255}
5256
5257/**
5258 * ata_port_offline - test whether the given port is offline
5259 * @ap: ATA port to test
5260 *
5261 * Test whether @ap is offline. Note that this function returns
5262 * 0 if offline status of @ap cannot be obtained, so
5263 * ata_port_online(ap) != !ata_port_offline(ap).
5264 *
5265 * LOCKING:
5266 * None.
5267 *
5268 * RETURNS:
5269 * 1 if the port offline status is available and offline.
5270 */
5271int ata_port_offline(struct ata_port *ap)
5272{
5273 u32 sstatus;
5274
5275 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5276 return 1;
5277 return 0;
5278}
0baab86b 5279
77b08fb5 5280int ata_flush_cache(struct ata_device *dev)
9b847548 5281{
977e6b9f 5282 unsigned int err_mask;
9b847548
JA
5283 u8 cmd;
5284
5285 if (!ata_try_flush_cache(dev))
5286 return 0;
5287
6fc49adb 5288 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5289 cmd = ATA_CMD_FLUSH_EXT;
5290 else
5291 cmd = ATA_CMD_FLUSH;
5292
977e6b9f
TH
5293 err_mask = ata_do_simple_cmd(dev, cmd);
5294 if (err_mask) {
5295 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5296 return -EIO;
5297 }
5298
5299 return 0;
9b847548
JA
5300}
5301
cca3974e
JG
5302static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5303 unsigned int action, unsigned int ehi_flags,
5304 int wait)
500530f6
TH
5305{
5306 unsigned long flags;
5307 int i, rc;
5308
cca3974e
JG
5309 for (i = 0; i < host->n_ports; i++) {
5310 struct ata_port *ap = host->ports[i];
500530f6
TH
5311
5312 /* Previous resume operation might still be in
5313 * progress. Wait for PM_PENDING to clear.
5314 */
5315 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5316 ata_port_wait_eh(ap);
5317 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5318 }
5319
5320 /* request PM ops to EH */
5321 spin_lock_irqsave(ap->lock, flags);
5322
5323 ap->pm_mesg = mesg;
5324 if (wait) {
5325 rc = 0;
5326 ap->pm_result = &rc;
5327 }
5328
5329 ap->pflags |= ATA_PFLAG_PM_PENDING;
5330 ap->eh_info.action |= action;
5331 ap->eh_info.flags |= ehi_flags;
5332
5333 ata_port_schedule_eh(ap);
5334
5335 spin_unlock_irqrestore(ap->lock, flags);
5336
5337 /* wait and check result */
5338 if (wait) {
5339 ata_port_wait_eh(ap);
5340 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5341 if (rc)
5342 return rc;
5343 }
5344 }
5345
5346 return 0;
5347}
5348
5349/**
cca3974e
JG
5350 * ata_host_suspend - suspend host
5351 * @host: host to suspend
500530f6
TH
5352 * @mesg: PM message
5353 *
cca3974e 5354 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5355 * function requests EH to perform PM operations and waits for EH
5356 * to finish.
5357 *
5358 * LOCKING:
5359 * Kernel thread context (may sleep).
5360 *
5361 * RETURNS:
5362 * 0 on success, -errno on failure.
5363 */
cca3974e 5364int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5365{
5366 int i, j, rc;
5367
cca3974e 5368 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5369 if (rc)
5370 goto fail;
5371
5372 /* EH is quiescent now. Fail if we have any ready device.
5373 * This happens if hotplug occurs between completion of device
5374 * suspension and here.
5375 */
cca3974e
JG
5376 for (i = 0; i < host->n_ports; i++) {
5377 struct ata_port *ap = host->ports[i];
500530f6
TH
5378
5379 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5380 struct ata_device *dev = &ap->device[j];
5381
5382 if (ata_dev_ready(dev)) {
5383 ata_port_printk(ap, KERN_WARNING,
5384 "suspend failed, device %d "
5385 "still active\n", dev->devno);
5386 rc = -EBUSY;
5387 goto fail;
5388 }
5389 }
5390 }
5391
cca3974e 5392 host->dev->power.power_state = mesg;
500530f6
TH
5393 return 0;
5394
5395 fail:
cca3974e 5396 ata_host_resume(host);
500530f6
TH
5397 return rc;
5398}
5399
5400/**
cca3974e
JG
5401 * ata_host_resume - resume host
5402 * @host: host to resume
500530f6 5403 *
cca3974e 5404 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5405 * function requests EH to perform PM operations and returns.
5406 * Note that all resume operations are performed parallely.
5407 *
5408 * LOCKING:
5409 * Kernel thread context (may sleep).
5410 */
cca3974e 5411void ata_host_resume(struct ata_host *host)
500530f6 5412{
cca3974e
JG
5413 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5414 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5415 host->dev->power.power_state = PMSG_ON;
500530f6
TH
5416}
5417
c893a3ae
RD
5418/**
5419 * ata_port_start - Set port up for dma.
5420 * @ap: Port to initialize
5421 *
5422 * Called just after data structures for each port are
5423 * initialized. Allocates space for PRD table.
5424 *
5425 * May be used as the port_start() entry in ata_port_operations.
5426 *
5427 * LOCKING:
5428 * Inherited from caller.
5429 */
f0d36efd 5430int ata_port_start(struct ata_port *ap)
1da177e4 5431{
2f1f610b 5432 struct device *dev = ap->dev;
6037d6bb 5433 int rc;
1da177e4 5434
f0d36efd
TH
5435 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5436 GFP_KERNEL);
1da177e4
LT
5437 if (!ap->prd)
5438 return -ENOMEM;
5439
6037d6bb 5440 rc = ata_pad_alloc(ap, dev);
f0d36efd 5441 if (rc)
6037d6bb 5442 return rc;
1da177e4 5443
f0d36efd
TH
5444 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5445 (unsigned long long)ap->prd_dma);
1da177e4
LT
5446 return 0;
5447}
5448
3ef3b43d
TH
5449/**
5450 * ata_dev_init - Initialize an ata_device structure
5451 * @dev: Device structure to initialize
5452 *
5453 * Initialize @dev in preparation for probing.
5454 *
5455 * LOCKING:
5456 * Inherited from caller.
5457 */
5458void ata_dev_init(struct ata_device *dev)
5459{
5460 struct ata_port *ap = dev->ap;
72fa4b74
TH
5461 unsigned long flags;
5462
5a04bf4b
TH
5463 /* SATA spd limit is bound to the first device */
5464 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5465
72fa4b74
TH
5466 /* High bits of dev->flags are used to record warm plug
5467 * requests which occur asynchronously. Synchronize using
cca3974e 5468 * host lock.
72fa4b74 5469 */
ba6a1308 5470 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5471 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5472 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5473
72fa4b74
TH
5474 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5475 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5476 dev->pio_mask = UINT_MAX;
5477 dev->mwdma_mask = UINT_MAX;
5478 dev->udma_mask = UINT_MAX;
5479}
5480
1da177e4 5481/**
155a8a9c 5482 * ata_port_init - Initialize an ata_port structure
1da177e4 5483 * @ap: Structure to initialize
cca3974e 5484 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5485 * @ent: Probe information provided by low-level driver
5486 * @port_no: Port number associated with this ata_port
5487 *
155a8a9c 5488 * Initialize a new ata_port structure.
0cba632b 5489 *
1da177e4 5490 * LOCKING:
0cba632b 5491 * Inherited from caller.
1da177e4 5492 */
cca3974e 5493void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5494 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5495{
5496 unsigned int i;
5497
cca3974e 5498 ap->lock = &host->lock;
198e0fed 5499 ap->flags = ATA_FLAG_DISABLED;
44877b4e 5500 ap->print_id = ata_print_id++;
1da177e4 5501 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5502 ap->host = host;
2f1f610b 5503 ap->dev = ent->dev;
1da177e4 5504 ap->port_no = port_no;
fea63e38
TH
5505 if (port_no == 1 && ent->pinfo2) {
5506 ap->pio_mask = ent->pinfo2->pio_mask;
5507 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5508 ap->udma_mask = ent->pinfo2->udma_mask;
5509 ap->flags |= ent->pinfo2->flags;
5510 ap->ops = ent->pinfo2->port_ops;
5511 } else {
5512 ap->pio_mask = ent->pio_mask;
5513 ap->mwdma_mask = ent->mwdma_mask;
5514 ap->udma_mask = ent->udma_mask;
5515 ap->flags |= ent->port_flags;
5516 ap->ops = ent->port_ops;
5517 }
5a04bf4b 5518 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5519 ap->active_tag = ATA_TAG_POISON;
5520 ap->last_ctl = 0xFF;
bd5d825c
BP
5521
5522#if defined(ATA_VERBOSE_DEBUG)
5523 /* turn on all debugging levels */
5524 ap->msg_enable = 0x00FF;
5525#elif defined(ATA_DEBUG)
5526 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5527#else
0dd4b21f 5528 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5529#endif
1da177e4 5530
65f27f38
DH
5531 INIT_DELAYED_WORK(&ap->port_task, NULL);
5532 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5533 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5534 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5535 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5536
838df628
TH
5537 /* set cable type */
5538 ap->cbl = ATA_CBL_NONE;
5539 if (ap->flags & ATA_FLAG_SATA)
5540 ap->cbl = ATA_CBL_SATA;
5541
acf356b1
TH
5542 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5543 struct ata_device *dev = &ap->device[i];
38d87234 5544 dev->ap = ap;
72fa4b74 5545 dev->devno = i;
3ef3b43d 5546 ata_dev_init(dev);
acf356b1 5547 }
1da177e4
LT
5548
5549#ifdef ATA_IRQ_TRAP
5550 ap->stats.unhandled_irq = 1;
5551 ap->stats.idle_irq = 1;
5552#endif
5553
5554 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5555}
5556
155a8a9c 5557/**
4608c160
TH
5558 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5559 * @ap: ATA port to initialize SCSI host for
5560 * @shost: SCSI host associated with @ap
155a8a9c 5561 *
4608c160 5562 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5563 *
5564 * LOCKING:
5565 * Inherited from caller.
5566 */
4608c160 5567static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5568{
cca3974e 5569 ap->scsi_host = shost;
155a8a9c 5570
44877b4e 5571 shost->unique_id = ap->print_id;
4608c160
TH
5572 shost->max_id = 16;
5573 shost->max_lun = 1;
5574 shost->max_channel = 1;
5575 shost->max_cmd_len = 12;
155a8a9c
BK
5576}
5577
1da177e4 5578/**
996139f1 5579 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5580 * @ent: Information provided by low-level driver
cca3974e 5581 * @host: Collections of ports to which we add
1da177e4
LT
5582 * @port_no: Port number associated with this host
5583 *
0cba632b
JG
5584 * Attach low-level ATA driver to system.
5585 *
1da177e4 5586 * LOCKING:
0cba632b 5587 * PCI/etc. bus probe sem.
1da177e4
LT
5588 *
5589 * RETURNS:
0cba632b 5590 * New ata_port on success, for NULL on error.
1da177e4 5591 */
996139f1 5592static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5593 struct ata_host *host,
1da177e4
LT
5594 unsigned int port_no)
5595{
996139f1 5596 struct Scsi_Host *shost;
1da177e4 5597 struct ata_port *ap;
1da177e4
LT
5598
5599 DPRINTK("ENTER\n");
aec5c3c1 5600
52783c5d 5601 if (!ent->port_ops->error_handler &&
cca3974e 5602 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5603 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5604 port_no);
5605 return NULL;
5606 }
5607
996139f1
JG
5608 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5609 if (!shost)
1da177e4
LT
5610 return NULL;
5611
996139f1 5612 shost->transportt = &ata_scsi_transport_template;
30afc84c 5613
996139f1 5614 ap = ata_shost_to_port(shost);
1da177e4 5615
cca3974e 5616 ata_port_init(ap, host, ent, port_no);
996139f1 5617 ata_port_init_shost(ap, shost);
1da177e4 5618
1da177e4 5619 return ap;
1da177e4
LT
5620}
5621
f0d36efd
TH
5622static void ata_host_release(struct device *gendev, void *res)
5623{
5624 struct ata_host *host = dev_get_drvdata(gendev);
5625 int i;
5626
5627 for (i = 0; i < host->n_ports; i++) {
5628 struct ata_port *ap = host->ports[i];
5629
5630 if (!ap)
5631 continue;
5632
5633 if (ap->ops->port_stop)
5634 ap->ops->port_stop(ap);
5635
5636 scsi_host_put(ap->scsi_host);
5637 }
5638
5639 if (host->ops->host_stop)
5640 host->ops->host_stop(host);
5641}
5642
b03732f0 5643/**
cca3974e
JG
5644 * ata_sas_host_init - Initialize a host struct
5645 * @host: host to initialize
5646 * @dev: device host is attached to
5647 * @flags: host flags
5648 * @ops: port_ops
b03732f0
BK
5649 *
5650 * LOCKING:
5651 * PCI/etc. bus probe sem.
5652 *
5653 */
5654
cca3974e
JG
5655void ata_host_init(struct ata_host *host, struct device *dev,
5656 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5657{
cca3974e
JG
5658 spin_lock_init(&host->lock);
5659 host->dev = dev;
5660 host->flags = flags;
5661 host->ops = ops;
b03732f0
BK
5662}
5663
1da177e4 5664/**
0cba632b
JG
5665 * ata_device_add - Register hardware device with ATA and SCSI layers
5666 * @ent: Probe information describing hardware device to be registered
5667 *
5668 * This function processes the information provided in the probe
5669 * information struct @ent, allocates the necessary ATA and SCSI
5670 * host information structures, initializes them, and registers
5671 * everything with requisite kernel subsystems.
5672 *
5673 * This function requests irqs, probes the ATA bus, and probes
5674 * the SCSI bus.
1da177e4
LT
5675 *
5676 * LOCKING:
0cba632b 5677 * PCI/etc. bus probe sem.
1da177e4
LT
5678 *
5679 * RETURNS:
0cba632b 5680 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5681 */
057ace5e 5682int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5683{
6d0500df 5684 unsigned int i;
1da177e4 5685 struct device *dev = ent->dev;
cca3974e 5686 struct ata_host *host;
39b07ce6 5687 int rc;
1da177e4
LT
5688
5689 DPRINTK("ENTER\n");
f20b16ff 5690
02f076aa
AC
5691 if (ent->irq == 0) {
5692 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5693 return 0;
5694 }
f0d36efd
TH
5695
5696 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5697 return 0;
5698
1da177e4 5699 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5700 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5701 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5702 if (!host)
f0d36efd
TH
5703 goto err_out;
5704 devres_add(dev, host);
5705 dev_set_drvdata(dev, host);
1da177e4 5706
cca3974e
JG
5707 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5708 host->n_ports = ent->n_ports;
5709 host->irq = ent->irq;
5710 host->irq2 = ent->irq2;
0d5ff566 5711 host->iomap = ent->iomap;
cca3974e 5712 host->private_data = ent->private_data;
1da177e4
LT
5713
5714 /* register each port bound to this device */
cca3974e 5715 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5716 struct ata_port *ap;
5717 unsigned long xfer_mode_mask;
2ec7df04 5718 int irq_line = ent->irq;
1da177e4 5719
cca3974e 5720 ap = ata_port_add(ent, host, i);
c38778c3 5721 host->ports[i] = ap;
1da177e4
LT
5722 if (!ap)
5723 goto err_out;
5724
dd5b06c4
TH
5725 /* dummy? */
5726 if (ent->dummy_port_mask & (1 << i)) {
5727 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5728 ap->ops = &ata_dummy_port_ops;
5729 continue;
5730 }
5731
5732 /* start port */
5733 rc = ap->ops->port_start(ap);
5734 if (rc) {
cca3974e
JG
5735 host->ports[i] = NULL;
5736 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5737 goto err_out;
5738 }
5739
2ec7df04
AC
5740 /* Report the secondary IRQ for second channel legacy */
5741 if (i == 1 && ent->irq2)
5742 irq_line = ent->irq2;
5743
1da177e4
LT
5744 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5745 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5746 (ap->pio_mask << ATA_SHIFT_PIO);
5747
5748 /* print per-port info to dmesg */
0d5ff566
TH
5749 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5750 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5751 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5752 ata_mode_string(xfer_mode_mask),
5753 ap->ioaddr.cmd_addr,
5754 ap->ioaddr.ctl_addr,
5755 ap->ioaddr.bmdma_addr,
2ec7df04 5756 irq_line);
1da177e4 5757
0f0a3ad3
TH
5758 /* freeze port before requesting IRQ */
5759 ata_eh_freeze_port(ap);
1da177e4
LT
5760 }
5761
2ec7df04 5762 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5763 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5764 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5765 if (rc) {
5766 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5767 ent->irq, rc);
1da177e4 5768 goto err_out;
39b07ce6 5769 }
1da177e4 5770
2ec7df04
AC
5771 /* do we have a second IRQ for the other channel, eg legacy mode */
5772 if (ent->irq2) {
5773 /* We will get weird core code crashes later if this is true
5774 so trap it now */
5775 BUG_ON(ent->irq == ent->irq2);
5776
f0d36efd
TH
5777 rc = devm_request_irq(dev, ent->irq2,
5778 ent->port_ops->irq_handler, ent->irq_flags,
5779 DRV_NAME, host);
2ec7df04
AC
5780 if (rc) {
5781 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5782 ent->irq2, rc);
f0d36efd 5783 goto err_out;
2ec7df04
AC
5784 }
5785 }
5786
f0d36efd 5787 /* resource acquisition complete */
b878ca5d 5788 devres_remove_group(dev, ata_device_add);
f0d36efd 5789
1da177e4
LT
5790 /* perform each probe synchronously */
5791 DPRINTK("probe begin\n");
cca3974e
JG
5792 for (i = 0; i < host->n_ports; i++) {
5793 struct ata_port *ap = host->ports[i];
5a04bf4b 5794 u32 scontrol;
1da177e4
LT
5795 int rc;
5796
5a04bf4b
TH
5797 /* init sata_spd_limit to the current value */
5798 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5799 int spd = (scontrol >> 4) & 0xf;
5800 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5801 }
5802 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5803
cca3974e 5804 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5805 if (rc) {
f15a1daf 5806 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5807 /* FIXME: do something useful here */
5808 /* FIXME: handle unconditional calls to
5809 * scsi_scan_host and ata_host_remove, below,
5810 * at the very least
5811 */
5812 }
3e706399 5813
52783c5d 5814 if (ap->ops->error_handler) {
1cdaf534 5815 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5816 unsigned long flags;
5817
5818 ata_port_probe(ap);
5819
5820 /* kick EH for boot probing */
ba6a1308 5821 spin_lock_irqsave(ap->lock, flags);
3e706399 5822
1cdaf534
TH
5823 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5824 ehi->action |= ATA_EH_SOFTRESET;
5825 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5826
b51e9e5d 5827 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5828 ata_port_schedule_eh(ap);
5829
ba6a1308 5830 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5831
5832 /* wait for EH to finish */
5833 ata_port_wait_eh(ap);
5834 } else {
44877b4e 5835 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
3e706399 5836 rc = ata_bus_probe(ap);
44877b4e 5837 DPRINTK("ata%u: bus probe end\n", ap->print_id);
3e706399
TH
5838
5839 if (rc) {
5840 /* FIXME: do something useful here?
5841 * Current libata behavior will
5842 * tear down everything when
5843 * the module is removed
5844 * or the h/w is unplugged.
5845 */
5846 }
5847 }
1da177e4
LT
5848 }
5849
5850 /* probes are done, now scan each port's disk(s) */
c893a3ae 5851 DPRINTK("host probe begin\n");
cca3974e
JG
5852 for (i = 0; i < host->n_ports; i++) {
5853 struct ata_port *ap = host->ports[i];
1da177e4 5854
644dd0cc 5855 ata_scsi_scan_host(ap);
1da177e4
LT
5856 }
5857
1da177e4
LT
5858 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5859 return ent->n_ports; /* success */
5860
f0d36efd
TH
5861 err_out:
5862 devres_release_group(dev, ata_device_add);
5863 dev_set_drvdata(dev, NULL);
5864 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
5865 return 0;
5866}
5867
720ba126
TH
5868/**
5869 * ata_port_detach - Detach ATA port in prepration of device removal
5870 * @ap: ATA port to be detached
5871 *
5872 * Detach all ATA devices and the associated SCSI devices of @ap;
5873 * then, remove the associated SCSI host. @ap is guaranteed to
5874 * be quiescent on return from this function.
5875 *
5876 * LOCKING:
5877 * Kernel thread context (may sleep).
5878 */
5879void ata_port_detach(struct ata_port *ap)
5880{
5881 unsigned long flags;
5882 int i;
5883
5884 if (!ap->ops->error_handler)
c3cf30a9 5885 goto skip_eh;
720ba126
TH
5886
5887 /* tell EH we're leaving & flush EH */
ba6a1308 5888 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5889 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5890 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5891
5892 ata_port_wait_eh(ap);
5893
5894 /* EH is now guaranteed to see UNLOADING, so no new device
5895 * will be attached. Disable all existing devices.
5896 */
ba6a1308 5897 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5898
5899 for (i = 0; i < ATA_MAX_DEVICES; i++)
5900 ata_dev_disable(&ap->device[i]);
5901
ba6a1308 5902 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5903
5904 /* Final freeze & EH. All in-flight commands are aborted. EH
5905 * will be skipped and retrials will be terminated with bad
5906 * target.
5907 */
ba6a1308 5908 spin_lock_irqsave(ap->lock, flags);
720ba126 5909 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5910 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5911
5912 ata_port_wait_eh(ap);
5913
5914 /* Flush hotplug task. The sequence is similar to
5915 * ata_port_flush_task().
5916 */
5917 flush_workqueue(ata_aux_wq);
5918 cancel_delayed_work(&ap->hotplug_task);
5919 flush_workqueue(ata_aux_wq);
5920
c3cf30a9 5921 skip_eh:
720ba126 5922 /* remove the associated SCSI host */
cca3974e 5923 scsi_remove_host(ap->scsi_host);
720ba126
TH
5924}
5925
0529c159
TH
5926/**
5927 * ata_host_detach - Detach all ports of an ATA host
5928 * @host: Host to detach
5929 *
5930 * Detach all ports of @host.
5931 *
5932 * LOCKING:
5933 * Kernel thread context (may sleep).
5934 */
5935void ata_host_detach(struct ata_host *host)
5936{
5937 int i;
5938
5939 for (i = 0; i < host->n_ports; i++)
5940 ata_port_detach(host->ports[i]);
5941}
5942
f6d950e2
BK
5943struct ata_probe_ent *
5944ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5945{
5946 struct ata_probe_ent *probe_ent;
5947
4d05447e 5948 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
5949 if (!probe_ent) {
5950 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5951 kobject_name(&(dev->kobj)));
5952 return NULL;
5953 }
5954
5955 INIT_LIST_HEAD(&probe_ent->node);
5956 probe_ent->dev = dev;
5957
5958 probe_ent->sht = port->sht;
cca3974e 5959 probe_ent->port_flags = port->flags;
f6d950e2
BK
5960 probe_ent->pio_mask = port->pio_mask;
5961 probe_ent->mwdma_mask = port->mwdma_mask;
5962 probe_ent->udma_mask = port->udma_mask;
5963 probe_ent->port_ops = port->port_ops;
d639ca94 5964 probe_ent->private_data = port->private_data;
f6d950e2
BK
5965
5966 return probe_ent;
5967}
5968
1da177e4
LT
5969/**
5970 * ata_std_ports - initialize ioaddr with standard port offsets.
5971 * @ioaddr: IO address structure to be initialized
0baab86b
EF
5972 *
5973 * Utility function which initializes data_addr, error_addr,
5974 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5975 * device_addr, status_addr, and command_addr to standard offsets
5976 * relative to cmd_addr.
5977 *
5978 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 5979 */
0baab86b 5980
1da177e4
LT
5981void ata_std_ports(struct ata_ioports *ioaddr)
5982{
5983 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5984 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5985 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5986 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5987 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5988 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5989 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5990 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5991 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5992 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5993}
5994
0baab86b 5995
374b1873
JG
5996#ifdef CONFIG_PCI
5997
1da177e4
LT
5998/**
5999 * ata_pci_remove_one - PCI layer callback for device removal
6000 * @pdev: PCI device that was removed
6001 *
b878ca5d
TH
6002 * PCI layer indicates to libata via this hook that hot-unplug or
6003 * module unload event has occurred. Detach all ports. Resource
6004 * release is handled via devres.
1da177e4
LT
6005 *
6006 * LOCKING:
6007 * Inherited from PCI layer (may sleep).
6008 */
f0d36efd 6009void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6010{
6011 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6012 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6013
b878ca5d 6014 ata_host_detach(host);
1da177e4
LT
6015}
6016
6017/* move to PCI subsystem */
057ace5e 6018int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6019{
6020 unsigned long tmp = 0;
6021
6022 switch (bits->width) {
6023 case 1: {
6024 u8 tmp8 = 0;
6025 pci_read_config_byte(pdev, bits->reg, &tmp8);
6026 tmp = tmp8;
6027 break;
6028 }
6029 case 2: {
6030 u16 tmp16 = 0;
6031 pci_read_config_word(pdev, bits->reg, &tmp16);
6032 tmp = tmp16;
6033 break;
6034 }
6035 case 4: {
6036 u32 tmp32 = 0;
6037 pci_read_config_dword(pdev, bits->reg, &tmp32);
6038 tmp = tmp32;
6039 break;
6040 }
6041
6042 default:
6043 return -EINVAL;
6044 }
6045
6046 tmp &= bits->mask;
6047
6048 return (tmp == bits->val) ? 1 : 0;
6049}
9b847548 6050
3c5100c1 6051void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6052{
6053 pci_save_state(pdev);
4c90d971 6054 pci_disable_device(pdev);
500530f6 6055
4c90d971 6056 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6057 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6058}
6059
553c4aa6 6060int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6061{
553c4aa6
TH
6062 int rc;
6063
9b847548
JA
6064 pci_set_power_state(pdev, PCI_D0);
6065 pci_restore_state(pdev);
553c4aa6 6066
b878ca5d 6067 rc = pcim_enable_device(pdev);
553c4aa6
TH
6068 if (rc) {
6069 dev_printk(KERN_ERR, &pdev->dev,
6070 "failed to enable device after resume (%d)\n", rc);
6071 return rc;
6072 }
6073
9b847548 6074 pci_set_master(pdev);
553c4aa6 6075 return 0;
500530f6
TH
6076}
6077
3c5100c1 6078int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6079{
cca3974e 6080 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6081 int rc = 0;
6082
cca3974e 6083 rc = ata_host_suspend(host, mesg);
500530f6
TH
6084 if (rc)
6085 return rc;
6086
3c5100c1 6087 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6088
6089 return 0;
6090}
6091
6092int ata_pci_device_resume(struct pci_dev *pdev)
6093{
cca3974e 6094 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6095 int rc;
500530f6 6096
553c4aa6
TH
6097 rc = ata_pci_device_do_resume(pdev);
6098 if (rc == 0)
6099 ata_host_resume(host);
6100 return rc;
9b847548 6101}
1da177e4
LT
6102#endif /* CONFIG_PCI */
6103
6104
1da177e4
LT
6105static int __init ata_init(void)
6106{
a8601e5f 6107 ata_probe_timeout *= HZ;
1da177e4
LT
6108 ata_wq = create_workqueue("ata");
6109 if (!ata_wq)
6110 return -ENOMEM;
6111
453b07ac
TH
6112 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6113 if (!ata_aux_wq) {
6114 destroy_workqueue(ata_wq);
6115 return -ENOMEM;
6116 }
6117
1da177e4
LT
6118 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6119 return 0;
6120}
6121
6122static void __exit ata_exit(void)
6123{
6124 destroy_workqueue(ata_wq);
453b07ac 6125 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6126}
6127
a4625085 6128subsys_initcall(ata_init);
1da177e4
LT
6129module_exit(ata_exit);
6130
67846b30 6131static unsigned long ratelimit_time;
34af946a 6132static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6133
6134int ata_ratelimit(void)
6135{
6136 int rc;
6137 unsigned long flags;
6138
6139 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6140
6141 if (time_after(jiffies, ratelimit_time)) {
6142 rc = 1;
6143 ratelimit_time = jiffies + (HZ/5);
6144 } else
6145 rc = 0;
6146
6147 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6148
6149 return rc;
6150}
6151
c22daff4
TH
6152/**
6153 * ata_wait_register - wait until register value changes
6154 * @reg: IO-mapped register
6155 * @mask: Mask to apply to read register value
6156 * @val: Wait condition
6157 * @interval_msec: polling interval in milliseconds
6158 * @timeout_msec: timeout in milliseconds
6159 *
6160 * Waiting for some bits of register to change is a common
6161 * operation for ATA controllers. This function reads 32bit LE
6162 * IO-mapped register @reg and tests for the following condition.
6163 *
6164 * (*@reg & mask) != val
6165 *
6166 * If the condition is met, it returns; otherwise, the process is
6167 * repeated after @interval_msec until timeout.
6168 *
6169 * LOCKING:
6170 * Kernel thread context (may sleep)
6171 *
6172 * RETURNS:
6173 * The final register value.
6174 */
6175u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6176 unsigned long interval_msec,
6177 unsigned long timeout_msec)
6178{
6179 unsigned long timeout;
6180 u32 tmp;
6181
6182 tmp = ioread32(reg);
6183
6184 /* Calculate timeout _after_ the first read to make sure
6185 * preceding writes reach the controller before starting to
6186 * eat away the timeout.
6187 */
6188 timeout = jiffies + (timeout_msec * HZ) / 1000;
6189
6190 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6191 msleep(interval_msec);
6192 tmp = ioread32(reg);
6193 }
6194
6195 return tmp;
6196}
6197
dd5b06c4
TH
6198/*
6199 * Dummy port_ops
6200 */
6201static void ata_dummy_noret(struct ata_port *ap) { }
6202static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6203static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6204
6205static u8 ata_dummy_check_status(struct ata_port *ap)
6206{
6207 return ATA_DRDY;
6208}
6209
6210static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6211{
6212 return AC_ERR_SYSTEM;
6213}
6214
6215const struct ata_port_operations ata_dummy_port_ops = {
6216 .port_disable = ata_port_disable,
6217 .check_status = ata_dummy_check_status,
6218 .check_altstatus = ata_dummy_check_status,
6219 .dev_select = ata_noop_dev_select,
6220 .qc_prep = ata_noop_qc_prep,
6221 .qc_issue = ata_dummy_qc_issue,
6222 .freeze = ata_dummy_noret,
6223 .thaw = ata_dummy_noret,
6224 .error_handler = ata_dummy_noret,
6225 .post_internal_cmd = ata_dummy_qc_noret,
6226 .irq_clear = ata_dummy_noret,
6227 .port_start = ata_dummy_ret0,
6228 .port_stop = ata_dummy_noret,
6229};
6230
1da177e4
LT
6231/*
6232 * libata is essentially a library of internal helper functions for
6233 * low-level ATA host controller drivers. As such, the API/ABI is
6234 * likely to change as new drivers are added and updated.
6235 * Do not depend on ABI/API stability.
6236 */
6237
e9c83914
TH
6238EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6239EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6240EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6241EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6242EXPORT_SYMBOL_GPL(ata_std_bios_param);
6243EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6244EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6245EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6246EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6247EXPORT_SYMBOL_GPL(ata_sg_init);
6248EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6249EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6250EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6251EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6252EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6253EXPORT_SYMBOL_GPL(ata_tf_load);
6254EXPORT_SYMBOL_GPL(ata_tf_read);
6255EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6256EXPORT_SYMBOL_GPL(ata_std_dev_select);
6257EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6258EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6259EXPORT_SYMBOL_GPL(ata_check_status);
6260EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6261EXPORT_SYMBOL_GPL(ata_exec_command);
6262EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6263EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6264EXPORT_SYMBOL_GPL(ata_data_xfer);
6265EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6266EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6267EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6268EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6269EXPORT_SYMBOL_GPL(ata_bmdma_start);
6270EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6271EXPORT_SYMBOL_GPL(ata_bmdma_status);
6272EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6273EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6274EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6275EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6276EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6277EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6278EXPORT_SYMBOL_GPL(ata_port_probe);
3c567b7d 6279EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6280EXPORT_SYMBOL_GPL(sata_phy_debounce);
6281EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6282EXPORT_SYMBOL_GPL(sata_phy_reset);
6283EXPORT_SYMBOL_GPL(__sata_phy_reset);
6284EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6285EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6286EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6287EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6288EXPORT_SYMBOL_GPL(sata_std_hardreset);
6289EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6290EXPORT_SYMBOL_GPL(ata_dev_classify);
6291EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6292EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6293EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6294EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6295EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6296EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6297EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6298EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6299EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6300EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6301EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6302EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6303EXPORT_SYMBOL_GPL(sata_scr_valid);
6304EXPORT_SYMBOL_GPL(sata_scr_read);
6305EXPORT_SYMBOL_GPL(sata_scr_write);
6306EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6307EXPORT_SYMBOL_GPL(ata_port_online);
6308EXPORT_SYMBOL_GPL(ata_port_offline);
cca3974e
JG
6309EXPORT_SYMBOL_GPL(ata_host_suspend);
6310EXPORT_SYMBOL_GPL(ata_host_resume);
6a62a04d
TH
6311EXPORT_SYMBOL_GPL(ata_id_string);
6312EXPORT_SYMBOL_GPL(ata_id_c_string);
6919a0a6 6313EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6314EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6315
1bc4ccff 6316EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6317EXPORT_SYMBOL_GPL(ata_timing_compute);
6318EXPORT_SYMBOL_GPL(ata_timing_merge);
6319
1da177e4
LT
6320#ifdef CONFIG_PCI
6321EXPORT_SYMBOL_GPL(pci_test_config_bits);
6322EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6323EXPORT_SYMBOL_GPL(ata_pci_init_one);
6324EXPORT_SYMBOL_GPL(ata_pci_remove_one);
500530f6
TH
6325EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6326EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6327EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6328EXPORT_SYMBOL_GPL(ata_pci_device_resume);
67951ade
AC
6329EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6330EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6331#endif /* CONFIG_PCI */
9b847548 6332
9b847548
JA
6333EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6334EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
ece1d636 6335
ece1d636 6336EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6337EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6338EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6339EXPORT_SYMBOL_GPL(ata_port_freeze);
6340EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6341EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6342EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6343EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6344EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6345EXPORT_SYMBOL_GPL(ata_irq_on);
6346EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6347EXPORT_SYMBOL_GPL(ata_irq_ack);
6348EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6349EXPORT_SYMBOL_GPL(ata_dev_try_classify);