pata_hpt37x: Updates from drivers/ide work
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
cb48cab7 62#define DRV_VERSION "2.20" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
44877b4e 75static unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
d7d0dad6
JG
96int libata_noacpi = 1;
97module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
98MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
99
1da177e4
LT
100MODULE_AUTHOR("Jeff Garzik");
101MODULE_DESCRIPTION("Library module for ATA devices");
102MODULE_LICENSE("GPL");
103MODULE_VERSION(DRV_VERSION);
104
0baab86b 105
1da177e4
LT
106/**
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
111 *
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
114 *
115 * LOCKING:
116 * Inherited from caller.
117 */
118
057ace5e 119void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
120{
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
126
127 fis[4] = tf->lbal;
128 fis[5] = tf->lbam;
129 fis[6] = tf->lbah;
130 fis[7] = tf->device;
131
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
136
137 fis[12] = tf->nsect;
138 fis[13] = tf->hob_nsect;
139 fis[14] = 0;
140 fis[15] = tf->ctl;
141
142 fis[16] = 0;
143 fis[17] = 0;
144 fis[18] = 0;
145 fis[19] = 0;
146}
147
148/**
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
152 *
e12a1be6 153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
154 *
155 * LOCKING:
156 * Inherited from caller.
157 */
158
057ace5e 159void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
160{
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
163
164 tf->lbal = fis[4];
165 tf->lbam = fis[5];
166 tf->lbah = fis[6];
167 tf->device = fis[7];
168
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
172
173 tf->nsect = fis[12];
174 tf->hob_nsect = fis[13];
175}
176
8cbd6df1
AL
177static const u8 ata_rw_cmds[] = {
178 /* pio multi */
179 ATA_CMD_READ_MULTI,
180 ATA_CMD_WRITE_MULTI,
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
187 /* pio */
188 ATA_CMD_PIO_READ,
189 ATA_CMD_PIO_WRITE,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
192 0,
193 0,
194 0,
195 0,
8cbd6df1
AL
196 /* dma */
197 ATA_CMD_READ,
198 ATA_CMD_WRITE,
199 ATA_CMD_READ_EXT,
9a3dccc4
TH
200 ATA_CMD_WRITE_EXT,
201 0,
202 0,
203 0,
204 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 205};
1da177e4
LT
206
207/**
8cbd6df1 208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
1da177e4 211 *
2e9edbf8 212 * Examine the device configuration and tf->flags to calculate
8cbd6df1 213 * the proper read/write commands and protocol to use.
1da177e4
LT
214 *
215 * LOCKING:
216 * caller.
217 */
bd056d7e 218static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 219{
9a3dccc4 220 u8 cmd;
1da177e4 221
9a3dccc4 222 int index, fua, lba48, write;
2e9edbf8 223
9a3dccc4 224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 227
8cbd6df1
AL
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
9a3dccc4 230 index = dev->multi_count ? 0 : 8;
bd056d7e 231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
0565c26d 234 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
235 } else {
236 tf->protocol = ATA_PROT_DMA;
9a3dccc4 237 index = 16;
8cbd6df1 238 }
1da177e4 239
9a3dccc4
TH
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 if (cmd) {
242 tf->command = cmd;
243 return 0;
244 }
245 return -1;
1da177e4
LT
246}
247
35b649fe
TH
248/**
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
252 *
253 * LOCKING:
254 * None.
255 *
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
259 *
260 * RETURNS:
261 * Block address read from @tf.
262 */
263u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
264{
265 u64 block = 0;
266
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
272 } else
273 block |= (tf->device & 0xf) << 24;
274
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
277 block |= tf->lbal;
278 } else {
279 u32 cyl, head, sect;
280
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
283 sect = tf->lbal;
284
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
286 }
287
288 return block;
289}
290
bd056d7e
TH
291/**
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
298 * @tag: tag
299 *
300 * LOCKING:
301 * None.
302 *
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
305 *
306 * RETURNS:
307 *
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
310 */
311int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
313 unsigned int tag)
314{
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
317
6d1245bf 318 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
319 /* yay, NCQ */
320 if (!lba_48_ok(block, n_block))
321 return -ERANGE;
322
323 tf->protocol = ATA_PROT_NCQ;
324 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
325
326 if (tf->flags & ATA_TFLAG_WRITE)
327 tf->command = ATA_CMD_FPDMA_WRITE;
328 else
329 tf->command = ATA_CMD_FPDMA_READ;
330
331 tf->nsect = tag << 3;
332 tf->hob_feature = (n_block >> 8) & 0xff;
333 tf->feature = n_block & 0xff;
334
335 tf->hob_lbah = (block >> 40) & 0xff;
336 tf->hob_lbam = (block >> 32) & 0xff;
337 tf->hob_lbal = (block >> 24) & 0xff;
338 tf->lbah = (block >> 16) & 0xff;
339 tf->lbam = (block >> 8) & 0xff;
340 tf->lbal = block & 0xff;
341
342 tf->device = 1 << 6;
343 if (tf->flags & ATA_TFLAG_FUA)
344 tf->device |= 1 << 7;
345 } else if (dev->flags & ATA_DFLAG_LBA) {
346 tf->flags |= ATA_TFLAG_LBA;
347
348 if (lba_28_ok(block, n_block)) {
349 /* use LBA28 */
350 tf->device |= (block >> 24) & 0xf;
351 } else if (lba_48_ok(block, n_block)) {
352 if (!(dev->flags & ATA_DFLAG_LBA48))
353 return -ERANGE;
354
355 /* use LBA48 */
356 tf->flags |= ATA_TFLAG_LBA48;
357
358 tf->hob_nsect = (n_block >> 8) & 0xff;
359
360 tf->hob_lbah = (block >> 40) & 0xff;
361 tf->hob_lbam = (block >> 32) & 0xff;
362 tf->hob_lbal = (block >> 24) & 0xff;
363 } else
364 /* request too large even for LBA48 */
365 return -ERANGE;
366
367 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
368 return -EINVAL;
369
370 tf->nsect = n_block & 0xff;
371
372 tf->lbah = (block >> 16) & 0xff;
373 tf->lbam = (block >> 8) & 0xff;
374 tf->lbal = block & 0xff;
375
376 tf->device |= ATA_LBA;
377 } else {
378 /* CHS */
379 u32 sect, head, cyl, track;
380
381 /* The request -may- be too large for CHS addressing. */
382 if (!lba_28_ok(block, n_block))
383 return -ERANGE;
384
385 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
386 return -EINVAL;
387
388 /* Convert LBA to CHS */
389 track = (u32)block / dev->sectors;
390 cyl = track / dev->heads;
391 head = track % dev->heads;
392 sect = (u32)block % dev->sectors + 1;
393
394 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
395 (u32)block, track, cyl, head, sect);
396
397 /* Check whether the converted CHS can fit.
398 Cylinder: 0-65535
399 Head: 0-15
400 Sector: 1-255*/
401 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
402 return -ERANGE;
403
404 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
405 tf->lbal = sect;
406 tf->lbam = cyl;
407 tf->lbah = cyl >> 8;
408 tf->device |= head;
409 }
410
411 return 0;
412}
413
cb95d562
TH
414/**
415 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
416 * @pio_mask: pio_mask
417 * @mwdma_mask: mwdma_mask
418 * @udma_mask: udma_mask
419 *
420 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
421 * unsigned int xfer_mask.
422 *
423 * LOCKING:
424 * None.
425 *
426 * RETURNS:
427 * Packed xfer_mask.
428 */
429static unsigned int ata_pack_xfermask(unsigned int pio_mask,
430 unsigned int mwdma_mask,
431 unsigned int udma_mask)
432{
433 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
434 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
435 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
436}
437
c0489e4e
TH
438/**
439 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
440 * @xfer_mask: xfer_mask to unpack
441 * @pio_mask: resulting pio_mask
442 * @mwdma_mask: resulting mwdma_mask
443 * @udma_mask: resulting udma_mask
444 *
445 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
446 * Any NULL distination masks will be ignored.
447 */
448static void ata_unpack_xfermask(unsigned int xfer_mask,
449 unsigned int *pio_mask,
450 unsigned int *mwdma_mask,
451 unsigned int *udma_mask)
452{
453 if (pio_mask)
454 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
455 if (mwdma_mask)
456 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
457 if (udma_mask)
458 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
459}
460
cb95d562 461static const struct ata_xfer_ent {
be9a50c8 462 int shift, bits;
cb95d562
TH
463 u8 base;
464} ata_xfer_tbl[] = {
465 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
466 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
467 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
468 { -1, },
469};
470
471/**
472 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
473 * @xfer_mask: xfer_mask of interest
474 *
475 * Return matching XFER_* value for @xfer_mask. Only the highest
476 * bit of @xfer_mask is considered.
477 *
478 * LOCKING:
479 * None.
480 *
481 * RETURNS:
482 * Matching XFER_* value, 0 if no match found.
483 */
484static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
485{
486 int highbit = fls(xfer_mask) - 1;
487 const struct ata_xfer_ent *ent;
488
489 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
490 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
491 return ent->base + highbit - ent->shift;
492 return 0;
493}
494
495/**
496 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
497 * @xfer_mode: XFER_* of interest
498 *
499 * Return matching xfer_mask for @xfer_mode.
500 *
501 * LOCKING:
502 * None.
503 *
504 * RETURNS:
505 * Matching xfer_mask, 0 if no match found.
506 */
507static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
508{
509 const struct ata_xfer_ent *ent;
510
511 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
512 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
513 return 1 << (ent->shift + xfer_mode - ent->base);
514 return 0;
515}
516
517/**
518 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
519 * @xfer_mode: XFER_* of interest
520 *
521 * Return matching xfer_shift for @xfer_mode.
522 *
523 * LOCKING:
524 * None.
525 *
526 * RETURNS:
527 * Matching xfer_shift, -1 if no match found.
528 */
529static int ata_xfer_mode2shift(unsigned int xfer_mode)
530{
531 const struct ata_xfer_ent *ent;
532
533 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
534 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
535 return ent->shift;
536 return -1;
537}
538
1da177e4 539/**
1da7b0d0
TH
540 * ata_mode_string - convert xfer_mask to string
541 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
542 *
543 * Determine string which represents the highest speed
1da7b0d0 544 * (highest bit in @modemask).
1da177e4
LT
545 *
546 * LOCKING:
547 * None.
548 *
549 * RETURNS:
550 * Constant C string representing highest speed listed in
1da7b0d0 551 * @mode_mask, or the constant C string "<n/a>".
1da177e4 552 */
1da7b0d0 553static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 554{
75f554bc
TH
555 static const char * const xfer_mode_str[] = {
556 "PIO0",
557 "PIO1",
558 "PIO2",
559 "PIO3",
560 "PIO4",
b352e57d
AC
561 "PIO5",
562 "PIO6",
75f554bc
TH
563 "MWDMA0",
564 "MWDMA1",
565 "MWDMA2",
b352e57d
AC
566 "MWDMA3",
567 "MWDMA4",
75f554bc
TH
568 "UDMA/16",
569 "UDMA/25",
570 "UDMA/33",
571 "UDMA/44",
572 "UDMA/66",
573 "UDMA/100",
574 "UDMA/133",
575 "UDMA7",
576 };
1da7b0d0 577 int highbit;
1da177e4 578
1da7b0d0
TH
579 highbit = fls(xfer_mask) - 1;
580 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
581 return xfer_mode_str[highbit];
1da177e4 582 return "<n/a>";
1da177e4
LT
583}
584
4c360c81
TH
585static const char *sata_spd_string(unsigned int spd)
586{
587 static const char * const spd_str[] = {
588 "1.5 Gbps",
589 "3.0 Gbps",
590 };
591
592 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
593 return "<unknown>";
594 return spd_str[spd - 1];
595}
596
3373efd8 597void ata_dev_disable(struct ata_device *dev)
0b8efb0a 598{
0dd4b21f 599 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 600 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
601 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
602 ATA_DNXFER_QUIET);
0b8efb0a
TH
603 dev->class++;
604 }
605}
606
1da177e4 607/**
0d5ff566 608 * ata_devchk - PATA device presence detection
1da177e4
LT
609 * @ap: ATA channel to examine
610 * @device: Device to examine (starting at zero)
611 *
612 * This technique was originally described in
613 * Hale Landis's ATADRVR (www.ata-atapi.com), and
614 * later found its way into the ATA/ATAPI spec.
615 *
616 * Write a pattern to the ATA shadow registers,
617 * and if a device is present, it will respond by
618 * correctly storing and echoing back the
619 * ATA shadow register contents.
620 *
621 * LOCKING:
622 * caller.
623 */
624
0d5ff566 625static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
626{
627 struct ata_ioports *ioaddr = &ap->ioaddr;
628 u8 nsect, lbal;
629
630 ap->ops->dev_select(ap, device);
631
0d5ff566
TH
632 iowrite8(0x55, ioaddr->nsect_addr);
633 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 634
0d5ff566
TH
635 iowrite8(0xaa, ioaddr->nsect_addr);
636 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 637
0d5ff566
TH
638 iowrite8(0x55, ioaddr->nsect_addr);
639 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 640
0d5ff566
TH
641 nsect = ioread8(ioaddr->nsect_addr);
642 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
643
644 if ((nsect == 0x55) && (lbal == 0xaa))
645 return 1; /* we found a device */
646
647 return 0; /* nothing found */
648}
649
1da177e4
LT
650/**
651 * ata_dev_classify - determine device type based on ATA-spec signature
652 * @tf: ATA taskfile register set for device to be identified
653 *
654 * Determine from taskfile register contents whether a device is
655 * ATA or ATAPI, as per "Signature and persistence" section
656 * of ATA/PI spec (volume 1, sect 5.14).
657 *
658 * LOCKING:
659 * None.
660 *
661 * RETURNS:
662 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
663 * the event of failure.
664 */
665
057ace5e 666unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
667{
668 /* Apple's open source Darwin code hints that some devices only
669 * put a proper signature into the LBA mid/high registers,
670 * So, we only check those. It's sufficient for uniqueness.
671 */
672
673 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
674 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
675 DPRINTK("found ATA device by sig\n");
676 return ATA_DEV_ATA;
677 }
678
679 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
680 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
681 DPRINTK("found ATAPI device by sig\n");
682 return ATA_DEV_ATAPI;
683 }
684
685 DPRINTK("unknown device\n");
686 return ATA_DEV_UNKNOWN;
687}
688
689/**
690 * ata_dev_try_classify - Parse returned ATA device signature
691 * @ap: ATA channel to examine
692 * @device: Device to examine (starting at zero)
b4dc7623 693 * @r_err: Value of error register on completion
1da177e4
LT
694 *
695 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
696 * an ATA/ATAPI-defined set of values is placed in the ATA
697 * shadow registers, indicating the results of device detection
698 * and diagnostics.
699 *
700 * Select the ATA device, and read the values from the ATA shadow
701 * registers. Then parse according to the Error register value,
702 * and the spec-defined values examined by ata_dev_classify().
703 *
704 * LOCKING:
705 * caller.
b4dc7623
TH
706 *
707 * RETURNS:
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
709 */
710
a619f981 711unsigned int
b4dc7623 712ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 713{
1da177e4
LT
714 struct ata_taskfile tf;
715 unsigned int class;
716 u8 err;
717
718 ap->ops->dev_select(ap, device);
719
720 memset(&tf, 0, sizeof(tf));
721
1da177e4 722 ap->ops->tf_read(ap, &tf);
0169e284 723 err = tf.feature;
b4dc7623
TH
724 if (r_err)
725 *r_err = err;
1da177e4 726
93590859
AC
727 /* see if device passed diags: if master then continue and warn later */
728 if (err == 0 && device == 0)
729 /* diagnostic fail : do nothing _YET_ */
730 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
731 else if (err == 1)
1da177e4
LT
732 /* do nothing */ ;
733 else if ((device == 0) && (err == 0x81))
734 /* do nothing */ ;
735 else
b4dc7623 736 return ATA_DEV_NONE;
1da177e4 737
b4dc7623 738 /* determine if device is ATA or ATAPI */
1da177e4 739 class = ata_dev_classify(&tf);
b4dc7623 740
1da177e4 741 if (class == ATA_DEV_UNKNOWN)
b4dc7623 742 return ATA_DEV_NONE;
1da177e4 743 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
744 return ATA_DEV_NONE;
745 return class;
1da177e4
LT
746}
747
748/**
6a62a04d 749 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
750 * @id: IDENTIFY DEVICE results we will examine
751 * @s: string into which data is output
752 * @ofs: offset into identify device page
753 * @len: length of string to return. must be an even number.
754 *
755 * The strings in the IDENTIFY DEVICE page are broken up into
756 * 16-bit chunks. Run through the string, and output each
757 * 8-bit chunk linearly, regardless of platform.
758 *
759 * LOCKING:
760 * caller.
761 */
762
6a62a04d
TH
763void ata_id_string(const u16 *id, unsigned char *s,
764 unsigned int ofs, unsigned int len)
1da177e4
LT
765{
766 unsigned int c;
767
768 while (len > 0) {
769 c = id[ofs] >> 8;
770 *s = c;
771 s++;
772
773 c = id[ofs] & 0xff;
774 *s = c;
775 s++;
776
777 ofs++;
778 len -= 2;
779 }
780}
781
0e949ff3 782/**
6a62a04d 783 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
784 * @id: IDENTIFY DEVICE results we will examine
785 * @s: string into which data is output
786 * @ofs: offset into identify device page
787 * @len: length of string to return. must be an odd number.
788 *
6a62a04d 789 * This function is identical to ata_id_string except that it
0e949ff3
TH
790 * trims trailing spaces and terminates the resulting string with
791 * null. @len must be actual maximum length (even number) + 1.
792 *
793 * LOCKING:
794 * caller.
795 */
6a62a04d
TH
796void ata_id_c_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len)
0e949ff3
TH
798{
799 unsigned char *p;
800
801 WARN_ON(!(len & 1));
802
6a62a04d 803 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
804
805 p = s + strnlen(s, len - 1);
806 while (p > s && p[-1] == ' ')
807 p--;
808 *p = '\0';
809}
0baab86b 810
2940740b
TH
811static u64 ata_id_n_sectors(const u16 *id)
812{
813 if (ata_id_has_lba(id)) {
814 if (ata_id_has_lba48(id))
815 return ata_id_u64(id, 100);
816 else
817 return ata_id_u32(id, 60);
818 } else {
819 if (ata_id_current_chs_valid(id))
820 return ata_id_u32(id, 57);
821 else
822 return id[1] * id[3] * id[6];
823 }
824}
825
10305f0f
A
826/**
827 * ata_id_to_dma_mode - Identify DMA mode from id block
828 * @dev: device to identify
cc261267 829 * @unknown: mode to assume if we cannot tell
10305f0f
A
830 *
831 * Set up the timing values for the device based upon the identify
832 * reported values for the DMA mode. This function is used by drivers
833 * which rely upon firmware configured modes, but wish to report the
834 * mode correctly when possible.
835 *
836 * In addition we emit similarly formatted messages to the default
837 * ata_dev_set_mode handler, in order to provide consistency of
838 * presentation.
839 */
840
841void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
842{
843 unsigned int mask;
844 u8 mode;
845
846 /* Pack the DMA modes */
847 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
848 if (dev->id[53] & 0x04)
849 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
850
851 /* Select the mode in use */
852 mode = ata_xfer_mask2mode(mask);
853
854 if (mode != 0) {
855 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
856 ata_mode_string(mask));
857 } else {
858 /* SWDMA perhaps ? */
859 mode = unknown;
860 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
861 }
862
863 /* Configure the device reporting */
864 dev->xfer_mode = mode;
865 dev->xfer_shift = ata_xfer_mode2shift(mode);
866}
867
0baab86b
EF
868/**
869 * ata_noop_dev_select - Select device 0/1 on ATA bus
870 * @ap: ATA channel to manipulate
871 * @device: ATA device (numbered from zero) to select
872 *
873 * This function performs no actual function.
874 *
875 * May be used as the dev_select() entry in ata_port_operations.
876 *
877 * LOCKING:
878 * caller.
879 */
1da177e4
LT
880void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
881{
882}
883
0baab86b 884
1da177e4
LT
885/**
886 * ata_std_dev_select - Select device 0/1 on ATA bus
887 * @ap: ATA channel to manipulate
888 * @device: ATA device (numbered from zero) to select
889 *
890 * Use the method defined in the ATA specification to
891 * make either device 0, or device 1, active on the
0baab86b
EF
892 * ATA channel. Works with both PIO and MMIO.
893 *
894 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
895 *
896 * LOCKING:
897 * caller.
898 */
899
900void ata_std_dev_select (struct ata_port *ap, unsigned int device)
901{
902 u8 tmp;
903
904 if (device == 0)
905 tmp = ATA_DEVICE_OBS;
906 else
907 tmp = ATA_DEVICE_OBS | ATA_DEV1;
908
0d5ff566 909 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
910 ata_pause(ap); /* needed; also flushes, for mmio */
911}
912
913/**
914 * ata_dev_select - Select device 0/1 on ATA bus
915 * @ap: ATA channel to manipulate
916 * @device: ATA device (numbered from zero) to select
917 * @wait: non-zero to wait for Status register BSY bit to clear
918 * @can_sleep: non-zero if context allows sleeping
919 *
920 * Use the method defined in the ATA specification to
921 * make either device 0, or device 1, active on the
922 * ATA channel.
923 *
924 * This is a high-level version of ata_std_dev_select(),
925 * which additionally provides the services of inserting
926 * the proper pauses and status polling, where needed.
927 *
928 * LOCKING:
929 * caller.
930 */
931
932void ata_dev_select(struct ata_port *ap, unsigned int device,
933 unsigned int wait, unsigned int can_sleep)
934{
88574551 935 if (ata_msg_probe(ap))
44877b4e
TH
936 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
937 "device %u, wait %u\n", device, wait);
1da177e4
LT
938
939 if (wait)
940 ata_wait_idle(ap);
941
942 ap->ops->dev_select(ap, device);
943
944 if (wait) {
945 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
946 msleep(150);
947 ata_wait_idle(ap);
948 }
949}
950
951/**
952 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 953 * @id: IDENTIFY DEVICE page to dump
1da177e4 954 *
0bd3300a
TH
955 * Dump selected 16-bit words from the given IDENTIFY DEVICE
956 * page.
1da177e4
LT
957 *
958 * LOCKING:
959 * caller.
960 */
961
0bd3300a 962static inline void ata_dump_id(const u16 *id)
1da177e4
LT
963{
964 DPRINTK("49==0x%04x "
965 "53==0x%04x "
966 "63==0x%04x "
967 "64==0x%04x "
968 "75==0x%04x \n",
0bd3300a
TH
969 id[49],
970 id[53],
971 id[63],
972 id[64],
973 id[75]);
1da177e4
LT
974 DPRINTK("80==0x%04x "
975 "81==0x%04x "
976 "82==0x%04x "
977 "83==0x%04x "
978 "84==0x%04x \n",
0bd3300a
TH
979 id[80],
980 id[81],
981 id[82],
982 id[83],
983 id[84]);
1da177e4
LT
984 DPRINTK("88==0x%04x "
985 "93==0x%04x\n",
0bd3300a
TH
986 id[88],
987 id[93]);
1da177e4
LT
988}
989
cb95d562
TH
990/**
991 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
992 * @id: IDENTIFY data to compute xfer mask from
993 *
994 * Compute the xfermask for this device. This is not as trivial
995 * as it seems if we must consider early devices correctly.
996 *
997 * FIXME: pre IDE drive timing (do we care ?).
998 *
999 * LOCKING:
1000 * None.
1001 *
1002 * RETURNS:
1003 * Computed xfermask
1004 */
1005static unsigned int ata_id_xfermask(const u16 *id)
1006{
1007 unsigned int pio_mask, mwdma_mask, udma_mask;
1008
1009 /* Usual case. Word 53 indicates word 64 is valid */
1010 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1011 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1012 pio_mask <<= 3;
1013 pio_mask |= 0x7;
1014 } else {
1015 /* If word 64 isn't valid then Word 51 high byte holds
1016 * the PIO timing number for the maximum. Turn it into
1017 * a mask.
1018 */
7a0f1c8a 1019 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1020 if (mode < 5) /* Valid PIO range */
1021 pio_mask = (2 << mode) - 1;
1022 else
1023 pio_mask = 1;
cb95d562
TH
1024
1025 /* But wait.. there's more. Design your standards by
1026 * committee and you too can get a free iordy field to
1027 * process. However its the speeds not the modes that
1028 * are supported... Note drivers using the timing API
1029 * will get this right anyway
1030 */
1031 }
1032
1033 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1034
b352e57d
AC
1035 if (ata_id_is_cfa(id)) {
1036 /*
1037 * Process compact flash extended modes
1038 */
1039 int pio = id[163] & 0x7;
1040 int dma = (id[163] >> 3) & 7;
1041
1042 if (pio)
1043 pio_mask |= (1 << 5);
1044 if (pio > 1)
1045 pio_mask |= (1 << 6);
1046 if (dma)
1047 mwdma_mask |= (1 << 3);
1048 if (dma > 1)
1049 mwdma_mask |= (1 << 4);
1050 }
1051
fb21f0d0
TH
1052 udma_mask = 0;
1053 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1054 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1055
1056 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1057}
1058
86e45b6b
TH
1059/**
1060 * ata_port_queue_task - Queue port_task
1061 * @ap: The ata_port to queue port_task for
e2a7f77a 1062 * @fn: workqueue function to be scheduled
65f27f38 1063 * @data: data for @fn to use
e2a7f77a 1064 * @delay: delay time for workqueue function
86e45b6b
TH
1065 *
1066 * Schedule @fn(@data) for execution after @delay jiffies using
1067 * port_task. There is one port_task per port and it's the
1068 * user(low level driver)'s responsibility to make sure that only
1069 * one task is active at any given time.
1070 *
1071 * libata core layer takes care of synchronization between
1072 * port_task and EH. ata_port_queue_task() may be ignored for EH
1073 * synchronization.
1074 *
1075 * LOCKING:
1076 * Inherited from caller.
1077 */
65f27f38 1078void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1079 unsigned long delay)
1080{
1081 int rc;
1082
b51e9e5d 1083 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1084 return;
1085
65f27f38
DH
1086 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1087 ap->port_task_data = data;
86e45b6b 1088
52bad64d 1089 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1090
1091 /* rc == 0 means that another user is using port task */
1092 WARN_ON(rc == 0);
1093}
1094
1095/**
1096 * ata_port_flush_task - Flush port_task
1097 * @ap: The ata_port to flush port_task for
1098 *
1099 * After this function completes, port_task is guranteed not to
1100 * be running or scheduled.
1101 *
1102 * LOCKING:
1103 * Kernel thread context (may sleep)
1104 */
1105void ata_port_flush_task(struct ata_port *ap)
1106{
1107 unsigned long flags;
1108
1109 DPRINTK("ENTER\n");
1110
ba6a1308 1111 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1112 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1113 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1114
1115 DPRINTK("flush #1\n");
1116 flush_workqueue(ata_wq);
1117
1118 /*
1119 * At this point, if a task is running, it's guaranteed to see
1120 * the FLUSH flag; thus, it will never queue pio tasks again.
1121 * Cancel and flush.
1122 */
1123 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1124 if (ata_msg_ctl(ap))
88574551
TH
1125 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1126 __FUNCTION__);
86e45b6b
TH
1127 flush_workqueue(ata_wq);
1128 }
1129
ba6a1308 1130 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1131 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1132 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1133
0dd4b21f
BP
1134 if (ata_msg_ctl(ap))
1135 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1136}
1137
7102d230 1138static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1139{
77853bf2 1140 struct completion *waiting = qc->private_data;
a2a7a662 1141
a2a7a662 1142 complete(waiting);
a2a7a662
TH
1143}
1144
1145/**
2432697b 1146 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1147 * @dev: Device to which the command is sent
1148 * @tf: Taskfile registers for the command and the result
d69cf37d 1149 * @cdb: CDB for packet command
a2a7a662 1150 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1151 * @sg: sg list for the data buffer of the command
1152 * @n_elem: Number of sg entries
a2a7a662
TH
1153 *
1154 * Executes libata internal command with timeout. @tf contains
1155 * command on entry and result on return. Timeout and error
1156 * conditions are reported via return value. No recovery action
1157 * is taken after a command times out. It's caller's duty to
1158 * clean up after timeout.
1159 *
1160 * LOCKING:
1161 * None. Should be called with kernel context, might sleep.
551e8889
TH
1162 *
1163 * RETURNS:
1164 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1165 */
2432697b
TH
1166unsigned ata_exec_internal_sg(struct ata_device *dev,
1167 struct ata_taskfile *tf, const u8 *cdb,
1168 int dma_dir, struct scatterlist *sg,
1169 unsigned int n_elem)
a2a7a662 1170{
3373efd8 1171 struct ata_port *ap = dev->ap;
a2a7a662
TH
1172 u8 command = tf->command;
1173 struct ata_queued_cmd *qc;
2ab7db1f 1174 unsigned int tag, preempted_tag;
dedaf2b0 1175 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1176 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1177 unsigned long flags;
77853bf2 1178 unsigned int err_mask;
d95a717f 1179 int rc;
a2a7a662 1180
ba6a1308 1181 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1182
e3180499 1183 /* no internal command while frozen */
b51e9e5d 1184 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1185 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1186 return AC_ERR_SYSTEM;
1187 }
1188
2ab7db1f 1189 /* initialize internal qc */
a2a7a662 1190
2ab7db1f
TH
1191 /* XXX: Tag 0 is used for drivers with legacy EH as some
1192 * drivers choke if any other tag is given. This breaks
1193 * ata_tag_internal() test for those drivers. Don't use new
1194 * EH stuff without converting to it.
1195 */
1196 if (ap->ops->error_handler)
1197 tag = ATA_TAG_INTERNAL;
1198 else
1199 tag = 0;
1200
6cec4a39 1201 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1202 BUG();
f69499f4 1203 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1204
1205 qc->tag = tag;
1206 qc->scsicmd = NULL;
1207 qc->ap = ap;
1208 qc->dev = dev;
1209 ata_qc_reinit(qc);
1210
1211 preempted_tag = ap->active_tag;
dedaf2b0
TH
1212 preempted_sactive = ap->sactive;
1213 preempted_qc_active = ap->qc_active;
2ab7db1f 1214 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1215 ap->sactive = 0;
1216 ap->qc_active = 0;
2ab7db1f
TH
1217
1218 /* prepare & issue qc */
a2a7a662 1219 qc->tf = *tf;
d69cf37d
TH
1220 if (cdb)
1221 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1222 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1223 qc->dma_dir = dma_dir;
1224 if (dma_dir != DMA_NONE) {
2432697b
TH
1225 unsigned int i, buflen = 0;
1226
1227 for (i = 0; i < n_elem; i++)
1228 buflen += sg[i].length;
1229
1230 ata_sg_init(qc, sg, n_elem);
49c80429 1231 qc->nbytes = buflen;
a2a7a662
TH
1232 }
1233
77853bf2 1234 qc->private_data = &wait;
a2a7a662
TH
1235 qc->complete_fn = ata_qc_complete_internal;
1236
8e0e694a 1237 ata_qc_issue(qc);
a2a7a662 1238
ba6a1308 1239 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1240
a8601e5f 1241 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1242
1243 ata_port_flush_task(ap);
41ade50c 1244
d95a717f 1245 if (!rc) {
ba6a1308 1246 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1247
1248 /* We're racing with irq here. If we lose, the
1249 * following test prevents us from completing the qc
d95a717f
TH
1250 * twice. If we win, the port is frozen and will be
1251 * cleaned up by ->post_internal_cmd().
a2a7a662 1252 */
77853bf2 1253 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1254 qc->err_mask |= AC_ERR_TIMEOUT;
1255
1256 if (ap->ops->error_handler)
1257 ata_port_freeze(ap);
1258 else
1259 ata_qc_complete(qc);
f15a1daf 1260
0dd4b21f
BP
1261 if (ata_msg_warn(ap))
1262 ata_dev_printk(dev, KERN_WARNING,
88574551 1263 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1264 }
1265
ba6a1308 1266 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1267 }
1268
d95a717f
TH
1269 /* do post_internal_cmd */
1270 if (ap->ops->post_internal_cmd)
1271 ap->ops->post_internal_cmd(qc);
1272
18d90deb 1273 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1274 if (ata_msg_warn(ap))
88574551 1275 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1276 "zero err_mask for failed "
88574551 1277 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1278 qc->err_mask |= AC_ERR_OTHER;
1279 }
1280
15869303 1281 /* finish up */
ba6a1308 1282 spin_lock_irqsave(ap->lock, flags);
15869303 1283
e61e0672 1284 *tf = qc->result_tf;
77853bf2
TH
1285 err_mask = qc->err_mask;
1286
1287 ata_qc_free(qc);
2ab7db1f 1288 ap->active_tag = preempted_tag;
dedaf2b0
TH
1289 ap->sactive = preempted_sactive;
1290 ap->qc_active = preempted_qc_active;
77853bf2 1291
1f7dd3e9
TH
1292 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1293 * Until those drivers are fixed, we detect the condition
1294 * here, fail the command with AC_ERR_SYSTEM and reenable the
1295 * port.
1296 *
1297 * Note that this doesn't change any behavior as internal
1298 * command failure results in disabling the device in the
1299 * higher layer for LLDDs without new reset/EH callbacks.
1300 *
1301 * Kill the following code as soon as those drivers are fixed.
1302 */
198e0fed 1303 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1304 err_mask |= AC_ERR_SYSTEM;
1305 ata_port_probe(ap);
1306 }
1307
ba6a1308 1308 spin_unlock_irqrestore(ap->lock, flags);
15869303 1309
77853bf2 1310 return err_mask;
a2a7a662
TH
1311}
1312
2432697b 1313/**
33480a0e 1314 * ata_exec_internal - execute libata internal command
2432697b
TH
1315 * @dev: Device to which the command is sent
1316 * @tf: Taskfile registers for the command and the result
1317 * @cdb: CDB for packet command
1318 * @dma_dir: Data tranfer direction of the command
1319 * @buf: Data buffer of the command
1320 * @buflen: Length of data buffer
1321 *
1322 * Wrapper around ata_exec_internal_sg() which takes simple
1323 * buffer instead of sg list.
1324 *
1325 * LOCKING:
1326 * None. Should be called with kernel context, might sleep.
1327 *
1328 * RETURNS:
1329 * Zero on success, AC_ERR_* mask on failure
1330 */
1331unsigned ata_exec_internal(struct ata_device *dev,
1332 struct ata_taskfile *tf, const u8 *cdb,
1333 int dma_dir, void *buf, unsigned int buflen)
1334{
33480a0e
TH
1335 struct scatterlist *psg = NULL, sg;
1336 unsigned int n_elem = 0;
2432697b 1337
33480a0e
TH
1338 if (dma_dir != DMA_NONE) {
1339 WARN_ON(!buf);
1340 sg_init_one(&sg, buf, buflen);
1341 psg = &sg;
1342 n_elem++;
1343 }
2432697b 1344
33480a0e 1345 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1346}
1347
977e6b9f
TH
1348/**
1349 * ata_do_simple_cmd - execute simple internal command
1350 * @dev: Device to which the command is sent
1351 * @cmd: Opcode to execute
1352 *
1353 * Execute a 'simple' command, that only consists of the opcode
1354 * 'cmd' itself, without filling any other registers
1355 *
1356 * LOCKING:
1357 * Kernel thread context (may sleep).
1358 *
1359 * RETURNS:
1360 * Zero on success, AC_ERR_* mask on failure
e58eb583 1361 */
77b08fb5 1362unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1363{
1364 struct ata_taskfile tf;
e58eb583
TH
1365
1366 ata_tf_init(dev, &tf);
1367
1368 tf.command = cmd;
1369 tf.flags |= ATA_TFLAG_DEVICE;
1370 tf.protocol = ATA_PROT_NODATA;
1371
977e6b9f 1372 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1373}
1374
1bc4ccff
AC
1375/**
1376 * ata_pio_need_iordy - check if iordy needed
1377 * @adev: ATA device
1378 *
1379 * Check if the current speed of the device requires IORDY. Used
1380 * by various controllers for chip configuration.
1381 */
1382
1383unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1384{
1385 int pio;
1386 int speed = adev->pio_mode - XFER_PIO_0;
1387
1388 if (speed < 2)
1389 return 0;
1390 if (speed > 2)
1391 return 1;
2e9edbf8 1392
1bc4ccff
AC
1393 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1394
1395 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1396 pio = adev->id[ATA_ID_EIDE_PIO];
1397 /* Is the speed faster than the drive allows non IORDY ? */
1398 if (pio) {
1399 /* This is cycle times not frequency - watch the logic! */
1400 if (pio > 240) /* PIO2 is 240nS per cycle */
1401 return 1;
1402 return 0;
1403 }
1404 }
1405 return 0;
1406}
1407
1da177e4 1408/**
49016aca 1409 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1410 * @dev: target device
1411 * @p_class: pointer to class of the target device (may be changed)
bff04647 1412 * @flags: ATA_READID_* flags
fe635c7e 1413 * @id: buffer to read IDENTIFY data into
1da177e4 1414 *
49016aca
TH
1415 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1416 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1417 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1418 * for pre-ATA4 drives.
1da177e4
LT
1419 *
1420 * LOCKING:
49016aca
TH
1421 * Kernel thread context (may sleep)
1422 *
1423 * RETURNS:
1424 * 0 on success, -errno otherwise.
1da177e4 1425 */
a9beec95 1426int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1427 unsigned int flags, u16 *id)
1da177e4 1428{
3373efd8 1429 struct ata_port *ap = dev->ap;
49016aca 1430 unsigned int class = *p_class;
a0123703 1431 struct ata_taskfile tf;
49016aca
TH
1432 unsigned int err_mask = 0;
1433 const char *reason;
1434 int rc;
1da177e4 1435
0dd4b21f 1436 if (ata_msg_ctl(ap))
44877b4e 1437 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1438
49016aca 1439 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1440
49016aca 1441 retry:
3373efd8 1442 ata_tf_init(dev, &tf);
a0123703 1443
49016aca
TH
1444 switch (class) {
1445 case ATA_DEV_ATA:
a0123703 1446 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1447 break;
1448 case ATA_DEV_ATAPI:
a0123703 1449 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1450 break;
1451 default:
1452 rc = -ENODEV;
1453 reason = "unsupported class";
1454 goto err_out;
1da177e4
LT
1455 }
1456
a0123703 1457 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1458
1459 /* Some devices choke if TF registers contain garbage. Make
1460 * sure those are properly initialized.
1461 */
1462 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1463
1464 /* Device presence detection is unreliable on some
1465 * controllers. Always poll IDENTIFY if available.
1466 */
1467 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1468
3373efd8 1469 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1470 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1471 if (err_mask) {
800b3996 1472 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1473 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1474 ap->print_id, dev->devno);
55a8e2c8
TH
1475 return -ENOENT;
1476 }
1477
49016aca
TH
1478 rc = -EIO;
1479 reason = "I/O error";
1da177e4
LT
1480 goto err_out;
1481 }
1482
49016aca 1483 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1484
49016aca 1485 /* sanity check */
a4f5749b
TH
1486 rc = -EINVAL;
1487 reason = "device reports illegal type";
1488
1489 if (class == ATA_DEV_ATA) {
1490 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1491 goto err_out;
1492 } else {
1493 if (ata_id_is_ata(id))
1494 goto err_out;
49016aca
TH
1495 }
1496
bff04647 1497 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1498 /*
1499 * The exact sequence expected by certain pre-ATA4 drives is:
1500 * SRST RESET
1501 * IDENTIFY
1502 * INITIALIZE DEVICE PARAMETERS
1503 * anything else..
1504 * Some drives were very specific about that exact sequence.
1505 */
1506 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1507 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1508 if (err_mask) {
1509 rc = -EIO;
1510 reason = "INIT_DEV_PARAMS failed";
1511 goto err_out;
1512 }
1513
1514 /* current CHS translation info (id[53-58]) might be
1515 * changed. reread the identify device info.
1516 */
bff04647 1517 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1518 goto retry;
1519 }
1520 }
1521
1522 *p_class = class;
fe635c7e 1523
49016aca
TH
1524 return 0;
1525
1526 err_out:
88574551 1527 if (ata_msg_warn(ap))
0dd4b21f 1528 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1529 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1530 return rc;
1531}
1532
3373efd8 1533static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1534{
3373efd8 1535 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1536}
1537
a6e6ce8e
TH
1538static void ata_dev_config_ncq(struct ata_device *dev,
1539 char *desc, size_t desc_sz)
1540{
1541 struct ata_port *ap = dev->ap;
1542 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1543
1544 if (!ata_id_has_ncq(dev->id)) {
1545 desc[0] = '\0';
1546 return;
1547 }
6919a0a6
AC
1548 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1549 snprintf(desc, desc_sz, "NCQ (not used)");
1550 return;
1551 }
a6e6ce8e 1552 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1553 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1554 dev->flags |= ATA_DFLAG_NCQ;
1555 }
1556
1557 if (hdepth >= ddepth)
1558 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1559 else
1560 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1561}
1562
49016aca 1563/**
ffeae418 1564 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1565 * @dev: Target device to configure
1566 *
1567 * Configure @dev according to @dev->id. Generic and low-level
1568 * driver specific fixups are also applied.
49016aca
TH
1569 *
1570 * LOCKING:
ffeae418
TH
1571 * Kernel thread context (may sleep)
1572 *
1573 * RETURNS:
1574 * 0 on success, -errno otherwise
49016aca 1575 */
efdaedc4 1576int ata_dev_configure(struct ata_device *dev)
49016aca 1577{
3373efd8 1578 struct ata_port *ap = dev->ap;
efdaedc4 1579 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1580 const u16 *id = dev->id;
ff8854b2 1581 unsigned int xfer_mask;
b352e57d 1582 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1583 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1584 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1585 int rc;
49016aca 1586
0dd4b21f 1587 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1588 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1589 __FUNCTION__);
ffeae418 1590 return 0;
49016aca
TH
1591 }
1592
0dd4b21f 1593 if (ata_msg_probe(ap))
44877b4e 1594 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1595
08573a86
KCA
1596 /* set _SDD */
1597 rc = ata_acpi_push_id(ap, dev->devno);
1598 if (rc) {
1599 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1600 rc);
1601 }
1602
1603 /* retrieve and execute the ATA task file of _GTF */
1604 ata_acpi_exec_tfs(ap);
1605
c39f5ebe 1606 /* print device capabilities */
0dd4b21f 1607 if (ata_msg_probe(ap))
88574551
TH
1608 ata_dev_printk(dev, KERN_DEBUG,
1609 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1610 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1611 __FUNCTION__,
f15a1daf
TH
1612 id[49], id[82], id[83], id[84],
1613 id[85], id[86], id[87], id[88]);
c39f5ebe 1614
208a9933 1615 /* initialize to-be-configured parameters */
ea1dd4e1 1616 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1617 dev->max_sectors = 0;
1618 dev->cdb_len = 0;
1619 dev->n_sectors = 0;
1620 dev->cylinders = 0;
1621 dev->heads = 0;
1622 dev->sectors = 0;
1623
1da177e4
LT
1624 /*
1625 * common ATA, ATAPI feature tests
1626 */
1627
ff8854b2 1628 /* find max transfer mode; for printk only */
1148c3a7 1629 xfer_mask = ata_id_xfermask(id);
1da177e4 1630
0dd4b21f
BP
1631 if (ata_msg_probe(ap))
1632 ata_dump_id(id);
1da177e4
LT
1633
1634 /* ATA-specific feature tests */
1635 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1636 if (ata_id_is_cfa(id)) {
1637 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1638 ata_dev_printk(dev, KERN_WARNING,
1639 "supports DRM functions and may "
1640 "not be fully accessable.\n");
b352e57d
AC
1641 snprintf(revbuf, 7, "CFA");
1642 }
1643 else
1644 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1645
1148c3a7 1646 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1647
3f64f565 1648 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1649 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1650 sizeof(fwrevbuf));
1651
591a6e8e 1652 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1653 sizeof(modelbuf));
1654
1655 if (dev->id[59] & 0x100)
1656 dev->multi_count = dev->id[59] & 0xff;
1657
1148c3a7 1658 if (ata_id_has_lba(id)) {
4c2d721a 1659 const char *lba_desc;
a6e6ce8e 1660 char ncq_desc[20];
8bf62ece 1661
4c2d721a
TH
1662 lba_desc = "LBA";
1663 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1664 if (ata_id_has_lba48(id)) {
8bf62ece 1665 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1666 lba_desc = "LBA48";
6fc49adb
TH
1667
1668 if (dev->n_sectors >= (1UL << 28) &&
1669 ata_id_has_flush_ext(id))
1670 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1671 }
8bf62ece 1672
a6e6ce8e
TH
1673 /* config NCQ */
1674 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1675
8bf62ece 1676 /* print device info to dmesg */
3f64f565
EM
1677 if (ata_msg_drv(ap) && print_info) {
1678 ata_dev_printk(dev, KERN_INFO,
1679 "%s: %s, %s, max %s\n",
1680 revbuf, modelbuf, fwrevbuf,
1681 ata_mode_string(xfer_mask));
1682 ata_dev_printk(dev, KERN_INFO,
1683 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1684 (unsigned long long)dev->n_sectors,
3f64f565
EM
1685 dev->multi_count, lba_desc, ncq_desc);
1686 }
ffeae418 1687 } else {
8bf62ece
AL
1688 /* CHS */
1689
1690 /* Default translation */
1148c3a7
TH
1691 dev->cylinders = id[1];
1692 dev->heads = id[3];
1693 dev->sectors = id[6];
8bf62ece 1694
1148c3a7 1695 if (ata_id_current_chs_valid(id)) {
8bf62ece 1696 /* Current CHS translation is valid. */
1148c3a7
TH
1697 dev->cylinders = id[54];
1698 dev->heads = id[55];
1699 dev->sectors = id[56];
8bf62ece
AL
1700 }
1701
1702 /* print device info to dmesg */
3f64f565 1703 if (ata_msg_drv(ap) && print_info) {
88574551 1704 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1705 "%s: %s, %s, max %s\n",
1706 revbuf, modelbuf, fwrevbuf,
1707 ata_mode_string(xfer_mask));
a84471fe 1708 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1709 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1710 (unsigned long long)dev->n_sectors,
1711 dev->multi_count, dev->cylinders,
1712 dev->heads, dev->sectors);
1713 }
07f6f7d0
AL
1714 }
1715
6e7846e9 1716 dev->cdb_len = 16;
1da177e4
LT
1717 }
1718
1719 /* ATAPI-specific feature tests */
2c13b7ce 1720 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1721 char *cdb_intr_string = "";
1722
1148c3a7 1723 rc = atapi_cdb_len(id);
1da177e4 1724 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1725 if (ata_msg_warn(ap))
88574551
TH
1726 ata_dev_printk(dev, KERN_WARNING,
1727 "unsupported CDB len\n");
ffeae418 1728 rc = -EINVAL;
1da177e4
LT
1729 goto err_out_nosup;
1730 }
6e7846e9 1731 dev->cdb_len = (unsigned int) rc;
1da177e4 1732
08a556db 1733 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1734 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1735 cdb_intr_string = ", CDB intr";
1736 }
312f7da2 1737
1da177e4 1738 /* print device info to dmesg */
5afc8142 1739 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1740 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1741 ata_mode_string(xfer_mask),
1742 cdb_intr_string);
1da177e4
LT
1743 }
1744
914ed354
TH
1745 /* determine max_sectors */
1746 dev->max_sectors = ATA_MAX_SECTORS;
1747 if (dev->flags & ATA_DFLAG_LBA48)
1748 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1749
93590859
AC
1750 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1751 /* Let the user know. We don't want to disallow opens for
1752 rescue purposes, or in case the vendor is just a blithering
1753 idiot */
1754 if (print_info) {
1755 ata_dev_printk(dev, KERN_WARNING,
1756"Drive reports diagnostics failure. This may indicate a drive\n");
1757 ata_dev_printk(dev, KERN_WARNING,
1758"fault or invalid emulation. Contact drive vendor for information.\n");
1759 }
1760 }
1761
4b2f3ede 1762 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1763 if (ata_dev_knobble(dev)) {
5afc8142 1764 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1765 ata_dev_printk(dev, KERN_INFO,
1766 "applying bridge limits\n");
5a529139 1767 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1768 dev->max_sectors = ATA_MAX_SECTORS;
1769 }
1770
18d6e9d5
AL
1771 if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
1772 dev->max_sectors = min(ATA_MAX_SECTORS_128, dev->max_sectors);
1773
6f23a31d
AL
1774 /* limit ATAPI DMA to R/W commands only */
1775 if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
1776 dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
1777
4b2f3ede 1778 if (ap->ops->dev_config)
cd0d3bbc 1779 ap->ops->dev_config(dev);
4b2f3ede 1780
0dd4b21f
BP
1781 if (ata_msg_probe(ap))
1782 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1783 __FUNCTION__, ata_chk_status(ap));
ffeae418 1784 return 0;
1da177e4
LT
1785
1786err_out_nosup:
0dd4b21f 1787 if (ata_msg_probe(ap))
88574551
TH
1788 ata_dev_printk(dev, KERN_DEBUG,
1789 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1790 return rc;
1da177e4
LT
1791}
1792
be0d18df 1793/**
2e41e8e6 1794 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
1795 * @ap: port
1796 *
2e41e8e6 1797 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
1798 * detection.
1799 */
1800
1801int ata_cable_40wire(struct ata_port *ap)
1802{
1803 return ATA_CBL_PATA40;
1804}
1805
1806/**
2e41e8e6 1807 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
1808 * @ap: port
1809 *
2e41e8e6 1810 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
1811 * detection.
1812 */
1813
1814int ata_cable_80wire(struct ata_port *ap)
1815{
1816 return ATA_CBL_PATA80;
1817}
1818
1819/**
1820 * ata_cable_unknown - return unknown PATA cable.
1821 * @ap: port
1822 *
1823 * Helper method for drivers which have no PATA cable detection.
1824 */
1825
1826int ata_cable_unknown(struct ata_port *ap)
1827{
1828 return ATA_CBL_PATA_UNK;
1829}
1830
1831/**
1832 * ata_cable_sata - return SATA cable type
1833 * @ap: port
1834 *
1835 * Helper method for drivers which have SATA cables
1836 */
1837
1838int ata_cable_sata(struct ata_port *ap)
1839{
1840 return ATA_CBL_SATA;
1841}
1842
1da177e4
LT
1843/**
1844 * ata_bus_probe - Reset and probe ATA bus
1845 * @ap: Bus to probe
1846 *
0cba632b
JG
1847 * Master ATA bus probing function. Initiates a hardware-dependent
1848 * bus reset, then attempts to identify any devices found on
1849 * the bus.
1850 *
1da177e4 1851 * LOCKING:
0cba632b 1852 * PCI/etc. bus probe sem.
1da177e4
LT
1853 *
1854 * RETURNS:
96072e69 1855 * Zero on success, negative errno otherwise.
1da177e4
LT
1856 */
1857
80289167 1858int ata_bus_probe(struct ata_port *ap)
1da177e4 1859{
28ca5c57 1860 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 1861 int tries[ATA_MAX_DEVICES];
4ae72a1e 1862 int i, rc;
e82cbdb9 1863 struct ata_device *dev;
1da177e4 1864
28ca5c57 1865 ata_port_probe(ap);
c19ba8af 1866
14d2bac1
TH
1867 for (i = 0; i < ATA_MAX_DEVICES; i++)
1868 tries[i] = ATA_PROBE_MAX_TRIES;
1869
1870 retry:
2044470c 1871 /* reset and determine device classes */
52783c5d 1872 ap->ops->phy_reset(ap);
2061a47a 1873
52783c5d
TH
1874 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1875 dev = &ap->device[i];
c19ba8af 1876
52783c5d
TH
1877 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1878 dev->class != ATA_DEV_UNKNOWN)
1879 classes[dev->devno] = dev->class;
1880 else
1881 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1882
52783c5d 1883 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1884 }
1da177e4 1885
52783c5d 1886 ata_port_probe(ap);
2044470c 1887
b6079ca4
AC
1888 /* after the reset the device state is PIO 0 and the controller
1889 state is undefined. Record the mode */
1890
1891 for (i = 0; i < ATA_MAX_DEVICES; i++)
1892 ap->device[i].pio_mode = XFER_PIO_0;
1893
f31f0cc2
JG
1894 /* read IDENTIFY page and configure devices. We have to do the identify
1895 specific sequence bass-ackwards so that PDIAG- is released by
1896 the slave device */
1897
1898 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
e82cbdb9 1899 dev = &ap->device[i];
28ca5c57 1900
ec573755
TH
1901 if (tries[i])
1902 dev->class = classes[i];
ffeae418 1903
14d2bac1 1904 if (!ata_dev_enabled(dev))
ffeae418 1905 continue;
ffeae418 1906
bff04647
TH
1907 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1908 dev->id);
14d2bac1
TH
1909 if (rc)
1910 goto fail;
f31f0cc2
JG
1911 }
1912
be0d18df
AC
1913 /* Now ask for the cable type as PDIAG- should have been released */
1914 if (ap->ops->cable_detect)
1915 ap->cbl = ap->ops->cable_detect(ap);
1916
f31f0cc2
JG
1917 /* After the identify sequence we can now set up the devices. We do
1918 this in the normal order so that the user doesn't get confused */
1919
1920 for(i = 0; i < ATA_MAX_DEVICES; i++) {
1921 dev = &ap->device[i];
1922 if (!ata_dev_enabled(dev))
1923 continue;
14d2bac1 1924
efdaedc4
TH
1925 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1926 rc = ata_dev_configure(dev);
1927 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1928 if (rc)
1929 goto fail;
1da177e4
LT
1930 }
1931
e82cbdb9 1932 /* configure transfer mode */
3adcebb2 1933 rc = ata_set_mode(ap, &dev);
4ae72a1e 1934 if (rc)
51713d35 1935 goto fail;
1da177e4 1936
e82cbdb9
TH
1937 for (i = 0; i < ATA_MAX_DEVICES; i++)
1938 if (ata_dev_enabled(&ap->device[i]))
1939 return 0;
1da177e4 1940
e82cbdb9
TH
1941 /* no device present, disable port */
1942 ata_port_disable(ap);
1da177e4 1943 ap->ops->port_disable(ap);
96072e69 1944 return -ENODEV;
14d2bac1
TH
1945
1946 fail:
4ae72a1e
TH
1947 tries[dev->devno]--;
1948
14d2bac1
TH
1949 switch (rc) {
1950 case -EINVAL:
4ae72a1e 1951 /* eeek, something went very wrong, give up */
14d2bac1
TH
1952 tries[dev->devno] = 0;
1953 break;
4ae72a1e
TH
1954
1955 case -ENODEV:
1956 /* give it just one more chance */
1957 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 1958 case -EIO:
4ae72a1e
TH
1959 if (tries[dev->devno] == 1) {
1960 /* This is the last chance, better to slow
1961 * down than lose it.
1962 */
1963 sata_down_spd_limit(ap);
1964 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
1965 }
14d2bac1
TH
1966 }
1967
4ae72a1e 1968 if (!tries[dev->devno])
3373efd8 1969 ata_dev_disable(dev);
ec573755 1970
14d2bac1 1971 goto retry;
1da177e4
LT
1972}
1973
1974/**
0cba632b
JG
1975 * ata_port_probe - Mark port as enabled
1976 * @ap: Port for which we indicate enablement
1da177e4 1977 *
0cba632b
JG
1978 * Modify @ap data structure such that the system
1979 * thinks that the entire port is enabled.
1980 *
cca3974e 1981 * LOCKING: host lock, or some other form of
0cba632b 1982 * serialization.
1da177e4
LT
1983 */
1984
1985void ata_port_probe(struct ata_port *ap)
1986{
198e0fed 1987 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1988}
1989
3be680b7
TH
1990/**
1991 * sata_print_link_status - Print SATA link status
1992 * @ap: SATA port to printk link status about
1993 *
1994 * This function prints link speed and status of a SATA link.
1995 *
1996 * LOCKING:
1997 * None.
1998 */
43727fbc 1999void sata_print_link_status(struct ata_port *ap)
3be680b7 2000{
6d5f9732 2001 u32 sstatus, scontrol, tmp;
3be680b7 2002
81952c54 2003 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 2004 return;
81952c54 2005 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 2006
81952c54 2007 if (ata_port_online(ap)) {
3be680b7 2008 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
2009 ata_port_printk(ap, KERN_INFO,
2010 "SATA link up %s (SStatus %X SControl %X)\n",
2011 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2012 } else {
f15a1daf
TH
2013 ata_port_printk(ap, KERN_INFO,
2014 "SATA link down (SStatus %X SControl %X)\n",
2015 sstatus, scontrol);
3be680b7
TH
2016 }
2017}
2018
1da177e4 2019/**
780a87f7
JG
2020 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2021 * @ap: SATA port associated with target SATA PHY.
1da177e4 2022 *
780a87f7
JG
2023 * This function issues commands to standard SATA Sxxx
2024 * PHY registers, to wake up the phy (and device), and
2025 * clear any reset condition.
1da177e4
LT
2026 *
2027 * LOCKING:
0cba632b 2028 * PCI/etc. bus probe sem.
1da177e4
LT
2029 *
2030 */
2031void __sata_phy_reset(struct ata_port *ap)
2032{
2033 u32 sstatus;
2034 unsigned long timeout = jiffies + (HZ * 5);
2035
2036 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2037 /* issue phy wake/reset */
81952c54 2038 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
2039 /* Couldn't find anything in SATA I/II specs, but
2040 * AHCI-1.1 10.4.2 says at least 1 ms. */
2041 mdelay(1);
1da177e4 2042 }
81952c54
TH
2043 /* phy wake/clear reset */
2044 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
2045
2046 /* wait for phy to become ready, if necessary */
2047 do {
2048 msleep(200);
81952c54 2049 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
2050 if ((sstatus & 0xf) != 1)
2051 break;
2052 } while (time_before(jiffies, timeout));
2053
3be680b7
TH
2054 /* print link status */
2055 sata_print_link_status(ap);
656563e3 2056
3be680b7 2057 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2058 if (!ata_port_offline(ap))
1da177e4 2059 ata_port_probe(ap);
3be680b7 2060 else
1da177e4 2061 ata_port_disable(ap);
1da177e4 2062
198e0fed 2063 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2064 return;
2065
2066 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2067 ata_port_disable(ap);
2068 return;
2069 }
2070
2071 ap->cbl = ATA_CBL_SATA;
2072}
2073
2074/**
780a87f7
JG
2075 * sata_phy_reset - Reset SATA bus.
2076 * @ap: SATA port associated with target SATA PHY.
1da177e4 2077 *
780a87f7
JG
2078 * This function resets the SATA bus, and then probes
2079 * the bus for devices.
1da177e4
LT
2080 *
2081 * LOCKING:
0cba632b 2082 * PCI/etc. bus probe sem.
1da177e4
LT
2083 *
2084 */
2085void sata_phy_reset(struct ata_port *ap)
2086{
2087 __sata_phy_reset(ap);
198e0fed 2088 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2089 return;
2090 ata_bus_reset(ap);
2091}
2092
ebdfca6e
AC
2093/**
2094 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2095 * @adev: device
2096 *
2097 * Obtain the other device on the same cable, or if none is
2098 * present NULL is returned
2099 */
2e9edbf8 2100
3373efd8 2101struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2102{
3373efd8 2103 struct ata_port *ap = adev->ap;
ebdfca6e 2104 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2105 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2106 return NULL;
2107 return pair;
2108}
2109
1da177e4 2110/**
780a87f7
JG
2111 * ata_port_disable - Disable port.
2112 * @ap: Port to be disabled.
1da177e4 2113 *
780a87f7
JG
2114 * Modify @ap data structure such that the system
2115 * thinks that the entire port is disabled, and should
2116 * never attempt to probe or communicate with devices
2117 * on this port.
2118 *
cca3974e 2119 * LOCKING: host lock, or some other form of
780a87f7 2120 * serialization.
1da177e4
LT
2121 */
2122
2123void ata_port_disable(struct ata_port *ap)
2124{
2125 ap->device[0].class = ATA_DEV_NONE;
2126 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2127 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2128}
2129
1c3fae4d 2130/**
3c567b7d 2131 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2132 * @ap: Port to adjust SATA spd limit for
2133 *
2134 * Adjust SATA spd limit of @ap downward. Note that this
2135 * function only adjusts the limit. The change must be applied
3c567b7d 2136 * using sata_set_spd().
1c3fae4d
TH
2137 *
2138 * LOCKING:
2139 * Inherited from caller.
2140 *
2141 * RETURNS:
2142 * 0 on success, negative errno on failure
2143 */
3c567b7d 2144int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2145{
81952c54
TH
2146 u32 sstatus, spd, mask;
2147 int rc, highbit;
1c3fae4d 2148
81952c54
TH
2149 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2150 if (rc)
2151 return rc;
1c3fae4d
TH
2152
2153 mask = ap->sata_spd_limit;
2154 if (mask <= 1)
2155 return -EINVAL;
2156 highbit = fls(mask) - 1;
2157 mask &= ~(1 << highbit);
2158
81952c54 2159 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2160 if (spd <= 1)
2161 return -EINVAL;
2162 spd--;
2163 mask &= (1 << spd) - 1;
2164 if (!mask)
2165 return -EINVAL;
2166
2167 ap->sata_spd_limit = mask;
2168
f15a1daf
TH
2169 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2170 sata_spd_string(fls(mask)));
1c3fae4d
TH
2171
2172 return 0;
2173}
2174
3c567b7d 2175static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2176{
2177 u32 spd, limit;
2178
2179 if (ap->sata_spd_limit == UINT_MAX)
2180 limit = 0;
2181 else
2182 limit = fls(ap->sata_spd_limit);
2183
2184 spd = (*scontrol >> 4) & 0xf;
2185 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2186
2187 return spd != limit;
2188}
2189
2190/**
3c567b7d 2191 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2192 * @ap: Port in question
2193 *
2194 * Test whether the spd limit in SControl matches
2195 * @ap->sata_spd_limit. This function is used to determine
2196 * whether hardreset is necessary to apply SATA spd
2197 * configuration.
2198 *
2199 * LOCKING:
2200 * Inherited from caller.
2201 *
2202 * RETURNS:
2203 * 1 if SATA spd configuration is needed, 0 otherwise.
2204 */
3c567b7d 2205int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2206{
2207 u32 scontrol;
2208
81952c54 2209 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2210 return 0;
2211
3c567b7d 2212 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2213}
2214
2215/**
3c567b7d 2216 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2217 * @ap: Port to set SATA spd for
2218 *
2219 * Set SATA spd of @ap according to sata_spd_limit.
2220 *
2221 * LOCKING:
2222 * Inherited from caller.
2223 *
2224 * RETURNS:
2225 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2226 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2227 */
3c567b7d 2228int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2229{
2230 u32 scontrol;
81952c54 2231 int rc;
1c3fae4d 2232
81952c54
TH
2233 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2234 return rc;
1c3fae4d 2235
3c567b7d 2236 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2237 return 0;
2238
81952c54
TH
2239 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2240 return rc;
2241
1c3fae4d
TH
2242 return 1;
2243}
2244
452503f9
AC
2245/*
2246 * This mode timing computation functionality is ported over from
2247 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2248 */
2249/*
b352e57d 2250 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2251 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2252 * for UDMA6, which is currently supported only by Maxtor drives.
2253 *
2254 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2255 */
2256
2257static const struct ata_timing ata_timing[] = {
2258
2259 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2260 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2261 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2262 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2263
b352e57d
AC
2264 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2265 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2266 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2267 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2268 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2269
2270/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2271
452503f9
AC
2272 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2273 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2274 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2275
452503f9
AC
2276 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2277 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2278 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2279
b352e57d
AC
2280 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2281 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2282 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2283 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2284
2285 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2286 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2287 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2288
2289/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2290
2291 { 0xFF }
2292};
2293
2294#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2295#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2296
2297static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2298{
2299 q->setup = EZ(t->setup * 1000, T);
2300 q->act8b = EZ(t->act8b * 1000, T);
2301 q->rec8b = EZ(t->rec8b * 1000, T);
2302 q->cyc8b = EZ(t->cyc8b * 1000, T);
2303 q->active = EZ(t->active * 1000, T);
2304 q->recover = EZ(t->recover * 1000, T);
2305 q->cycle = EZ(t->cycle * 1000, T);
2306 q->udma = EZ(t->udma * 1000, UT);
2307}
2308
2309void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2310 struct ata_timing *m, unsigned int what)
2311{
2312 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2313 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2314 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2315 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2316 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2317 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2318 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2319 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2320}
2321
2322static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2323{
2324 const struct ata_timing *t;
2325
2326 for (t = ata_timing; t->mode != speed; t++)
91190758 2327 if (t->mode == 0xFF)
452503f9 2328 return NULL;
2e9edbf8 2329 return t;
452503f9
AC
2330}
2331
2332int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2333 struct ata_timing *t, int T, int UT)
2334{
2335 const struct ata_timing *s;
2336 struct ata_timing p;
2337
2338 /*
2e9edbf8 2339 * Find the mode.
75b1f2f8 2340 */
452503f9
AC
2341
2342 if (!(s = ata_timing_find_mode(speed)))
2343 return -EINVAL;
2344
75b1f2f8
AL
2345 memcpy(t, s, sizeof(*s));
2346
452503f9
AC
2347 /*
2348 * If the drive is an EIDE drive, it can tell us it needs extended
2349 * PIO/MW_DMA cycle timing.
2350 */
2351
2352 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2353 memset(&p, 0, sizeof(p));
2354 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2355 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2356 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2357 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2358 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2359 }
2360 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2361 }
2362
2363 /*
2364 * Convert the timing to bus clock counts.
2365 */
2366
75b1f2f8 2367 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2368
2369 /*
c893a3ae
RD
2370 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2371 * S.M.A.R.T * and some other commands. We have to ensure that the
2372 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2373 */
2374
fd3367af 2375 if (speed > XFER_PIO_6) {
452503f9
AC
2376 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2377 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2378 }
2379
2380 /*
c893a3ae 2381 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2382 */
2383
2384 if (t->act8b + t->rec8b < t->cyc8b) {
2385 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2386 t->rec8b = t->cyc8b - t->act8b;
2387 }
2388
2389 if (t->active + t->recover < t->cycle) {
2390 t->active += (t->cycle - (t->active + t->recover)) / 2;
2391 t->recover = t->cycle - t->active;
2392 }
2393
2394 return 0;
2395}
2396
cf176e1a
TH
2397/**
2398 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2399 * @dev: Device to adjust xfer masks
458337db 2400 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2401 *
2402 * Adjust xfer masks of @dev downward. Note that this function
2403 * does not apply the change. Invoking ata_set_mode() afterwards
2404 * will apply the limit.
2405 *
2406 * LOCKING:
2407 * Inherited from caller.
2408 *
2409 * RETURNS:
2410 * 0 on success, negative errno on failure
2411 */
458337db 2412int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2413{
458337db
TH
2414 char buf[32];
2415 unsigned int orig_mask, xfer_mask;
2416 unsigned int pio_mask, mwdma_mask, udma_mask;
2417 int quiet, highbit;
cf176e1a 2418
458337db
TH
2419 quiet = !!(sel & ATA_DNXFER_QUIET);
2420 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2421
458337db
TH
2422 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2423 dev->mwdma_mask,
2424 dev->udma_mask);
2425 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2426
458337db
TH
2427 switch (sel) {
2428 case ATA_DNXFER_PIO:
2429 highbit = fls(pio_mask) - 1;
2430 pio_mask &= ~(1 << highbit);
2431 break;
2432
2433 case ATA_DNXFER_DMA:
2434 if (udma_mask) {
2435 highbit = fls(udma_mask) - 1;
2436 udma_mask &= ~(1 << highbit);
2437 if (!udma_mask)
2438 return -ENOENT;
2439 } else if (mwdma_mask) {
2440 highbit = fls(mwdma_mask) - 1;
2441 mwdma_mask &= ~(1 << highbit);
2442 if (!mwdma_mask)
2443 return -ENOENT;
2444 }
2445 break;
2446
2447 case ATA_DNXFER_40C:
2448 udma_mask &= ATA_UDMA_MASK_40C;
2449 break;
2450
2451 case ATA_DNXFER_FORCE_PIO0:
2452 pio_mask &= 1;
2453 case ATA_DNXFER_FORCE_PIO:
2454 mwdma_mask = 0;
2455 udma_mask = 0;
2456 break;
2457
458337db
TH
2458 default:
2459 BUG();
2460 }
2461
2462 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2463
2464 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2465 return -ENOENT;
2466
2467 if (!quiet) {
2468 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2469 snprintf(buf, sizeof(buf), "%s:%s",
2470 ata_mode_string(xfer_mask),
2471 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2472 else
2473 snprintf(buf, sizeof(buf), "%s",
2474 ata_mode_string(xfer_mask));
2475
2476 ata_dev_printk(dev, KERN_WARNING,
2477 "limiting speed to %s\n", buf);
2478 }
cf176e1a
TH
2479
2480 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2481 &dev->udma_mask);
2482
cf176e1a 2483 return 0;
cf176e1a
TH
2484}
2485
3373efd8 2486static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2487{
baa1e78a 2488 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2489 unsigned int err_mask;
2490 int rc;
1da177e4 2491
e8384607 2492 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2493 if (dev->xfer_shift == ATA_SHIFT_PIO)
2494 dev->flags |= ATA_DFLAG_PIO;
2495
3373efd8 2496 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2497 /* Old CFA may refuse this command, which is just fine */
2498 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2499 err_mask &= ~AC_ERR_DEV;
2500
83206a29 2501 if (err_mask) {
f15a1daf
TH
2502 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2503 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2504 return -EIO;
2505 }
1da177e4 2506
baa1e78a 2507 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2508 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2509 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2510 if (rc)
83206a29 2511 return rc;
48a8a14f 2512
23e71c3d
TH
2513 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2514 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2515
f15a1daf
TH
2516 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2517 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2518 return 0;
1da177e4
LT
2519}
2520
1da177e4
LT
2521/**
2522 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2523 * @ap: port on which timings will be programmed
e82cbdb9 2524 * @r_failed_dev: out paramter for failed device
1da177e4 2525 *
e82cbdb9
TH
2526 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2527 * ata_set_mode() fails, pointer to the failing device is
2528 * returned in @r_failed_dev.
780a87f7 2529 *
1da177e4 2530 * LOCKING:
0cba632b 2531 * PCI/etc. bus probe sem.
e82cbdb9
TH
2532 *
2533 * RETURNS:
2534 * 0 on success, negative errno otherwise
1da177e4 2535 */
1ad8e7f9 2536int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2537{
e8e0619f 2538 struct ata_device *dev;
e82cbdb9 2539 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2540
3adcebb2 2541 /* has private set_mode? */
b229a7b0
A
2542 if (ap->ops->set_mode)
2543 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2544
a6d5a51c
TH
2545 /* step 1: calculate xfer_mask */
2546 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2547 unsigned int pio_mask, dma_mask;
a6d5a51c 2548
e8e0619f
TH
2549 dev = &ap->device[i];
2550
e1211e3f 2551 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2552 continue;
2553
3373efd8 2554 ata_dev_xfermask(dev);
1da177e4 2555
acf356b1
TH
2556 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2557 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2558 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2559 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2560
4f65977d 2561 found = 1;
5444a6f4
AC
2562 if (dev->dma_mode)
2563 used_dma = 1;
a6d5a51c 2564 }
4f65977d 2565 if (!found)
e82cbdb9 2566 goto out;
a6d5a51c
TH
2567
2568 /* step 2: always set host PIO timings */
e8e0619f
TH
2569 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2570 dev = &ap->device[i];
2571 if (!ata_dev_enabled(dev))
2572 continue;
2573
2574 if (!dev->pio_mode) {
f15a1daf 2575 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2576 rc = -EINVAL;
e82cbdb9 2577 goto out;
e8e0619f
TH
2578 }
2579
2580 dev->xfer_mode = dev->pio_mode;
2581 dev->xfer_shift = ATA_SHIFT_PIO;
2582 if (ap->ops->set_piomode)
2583 ap->ops->set_piomode(ap, dev);
2584 }
1da177e4 2585
a6d5a51c 2586 /* step 3: set host DMA timings */
e8e0619f
TH
2587 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2588 dev = &ap->device[i];
2589
2590 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2591 continue;
2592
2593 dev->xfer_mode = dev->dma_mode;
2594 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2595 if (ap->ops->set_dmamode)
2596 ap->ops->set_dmamode(ap, dev);
2597 }
1da177e4
LT
2598
2599 /* step 4: update devices' xfer mode */
83206a29 2600 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2601 dev = &ap->device[i];
1da177e4 2602
18d90deb 2603 /* don't update suspended devices' xfer mode */
02670bf3 2604 if (!ata_dev_ready(dev))
83206a29
TH
2605 continue;
2606
3373efd8 2607 rc = ata_dev_set_mode(dev);
5bbc53f4 2608 if (rc)
e82cbdb9 2609 goto out;
83206a29 2610 }
1da177e4 2611
e8e0619f
TH
2612 /* Record simplex status. If we selected DMA then the other
2613 * host channels are not permitted to do so.
5444a6f4 2614 */
cca3974e 2615 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2616 ap->host->simplex_claimed = ap;
5444a6f4 2617
e8e0619f 2618 /* step5: chip specific finalisation */
1da177e4
LT
2619 if (ap->ops->post_set_mode)
2620 ap->ops->post_set_mode(ap);
e82cbdb9
TH
2621 out:
2622 if (rc)
2623 *r_failed_dev = dev;
2624 return rc;
1da177e4
LT
2625}
2626
1fdffbce
JG
2627/**
2628 * ata_tf_to_host - issue ATA taskfile to host controller
2629 * @ap: port to which command is being issued
2630 * @tf: ATA taskfile register set
2631 *
2632 * Issues ATA taskfile register set to ATA host controller,
2633 * with proper synchronization with interrupt handler and
2634 * other threads.
2635 *
2636 * LOCKING:
cca3974e 2637 * spin_lock_irqsave(host lock)
1fdffbce
JG
2638 */
2639
2640static inline void ata_tf_to_host(struct ata_port *ap,
2641 const struct ata_taskfile *tf)
2642{
2643 ap->ops->tf_load(ap, tf);
2644 ap->ops->exec_command(ap, tf);
2645}
2646
1da177e4
LT
2647/**
2648 * ata_busy_sleep - sleep until BSY clears, or timeout
2649 * @ap: port containing status register to be polled
2650 * @tmout_pat: impatience timeout
2651 * @tmout: overall timeout
2652 *
780a87f7
JG
2653 * Sleep until ATA Status register bit BSY clears,
2654 * or a timeout occurs.
2655 *
d1adc1bb
TH
2656 * LOCKING:
2657 * Kernel thread context (may sleep).
2658 *
2659 * RETURNS:
2660 * 0 on success, -errno otherwise.
1da177e4 2661 */
d1adc1bb
TH
2662int ata_busy_sleep(struct ata_port *ap,
2663 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2664{
2665 unsigned long timer_start, timeout;
2666 u8 status;
2667
2668 status = ata_busy_wait(ap, ATA_BUSY, 300);
2669 timer_start = jiffies;
2670 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2671 while (status != 0xff && (status & ATA_BUSY) &&
2672 time_before(jiffies, timeout)) {
1da177e4
LT
2673 msleep(50);
2674 status = ata_busy_wait(ap, ATA_BUSY, 3);
2675 }
2676
d1adc1bb 2677 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2678 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2679 "port is slow to respond, please be patient "
2680 "(Status 0x%x)\n", status);
1da177e4
LT
2681
2682 timeout = timer_start + tmout;
d1adc1bb
TH
2683 while (status != 0xff && (status & ATA_BUSY) &&
2684 time_before(jiffies, timeout)) {
1da177e4
LT
2685 msleep(50);
2686 status = ata_chk_status(ap);
2687 }
2688
d1adc1bb
TH
2689 if (status == 0xff)
2690 return -ENODEV;
2691
1da177e4 2692 if (status & ATA_BUSY) {
f15a1daf 2693 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2694 "(%lu secs, Status 0x%x)\n",
2695 tmout / HZ, status);
d1adc1bb 2696 return -EBUSY;
1da177e4
LT
2697 }
2698
2699 return 0;
2700}
2701
2702static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2703{
2704 struct ata_ioports *ioaddr = &ap->ioaddr;
2705 unsigned int dev0 = devmask & (1 << 0);
2706 unsigned int dev1 = devmask & (1 << 1);
2707 unsigned long timeout;
2708
2709 /* if device 0 was found in ata_devchk, wait for its
2710 * BSY bit to clear
2711 */
2712 if (dev0)
2713 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2714
2715 /* if device 1 was found in ata_devchk, wait for
2716 * register access, then wait for BSY to clear
2717 */
2718 timeout = jiffies + ATA_TMOUT_BOOT;
2719 while (dev1) {
2720 u8 nsect, lbal;
2721
2722 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2723 nsect = ioread8(ioaddr->nsect_addr);
2724 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2725 if ((nsect == 1) && (lbal == 1))
2726 break;
2727 if (time_after(jiffies, timeout)) {
2728 dev1 = 0;
2729 break;
2730 }
2731 msleep(50); /* give drive a breather */
2732 }
2733 if (dev1)
2734 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2735
2736 /* is all this really necessary? */
2737 ap->ops->dev_select(ap, 0);
2738 if (dev1)
2739 ap->ops->dev_select(ap, 1);
2740 if (dev0)
2741 ap->ops->dev_select(ap, 0);
2742}
2743
1da177e4
LT
2744static unsigned int ata_bus_softreset(struct ata_port *ap,
2745 unsigned int devmask)
2746{
2747 struct ata_ioports *ioaddr = &ap->ioaddr;
2748
44877b4e 2749 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
2750
2751 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2752 iowrite8(ap->ctl, ioaddr->ctl_addr);
2753 udelay(20); /* FIXME: flush */
2754 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2755 udelay(20); /* FIXME: flush */
2756 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2757
2758 /* spec mandates ">= 2ms" before checking status.
2759 * We wait 150ms, because that was the magic delay used for
2760 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2761 * between when the ATA command register is written, and then
2762 * status is checked. Because waiting for "a while" before
2763 * checking status is fine, post SRST, we perform this magic
2764 * delay here as well.
09c7ad79
AC
2765 *
2766 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2767 */
2768 msleep(150);
2769
2e9edbf8 2770 /* Before we perform post reset processing we want to see if
298a41ca
TH
2771 * the bus shows 0xFF because the odd clown forgets the D7
2772 * pulldown resistor.
2773 */
d1adc1bb
TH
2774 if (ata_check_status(ap) == 0xFF)
2775 return 0;
09c7ad79 2776
1da177e4
LT
2777 ata_bus_post_reset(ap, devmask);
2778
2779 return 0;
2780}
2781
2782/**
2783 * ata_bus_reset - reset host port and associated ATA channel
2784 * @ap: port to reset
2785 *
2786 * This is typically the first time we actually start issuing
2787 * commands to the ATA channel. We wait for BSY to clear, then
2788 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2789 * result. Determine what devices, if any, are on the channel
2790 * by looking at the device 0/1 error register. Look at the signature
2791 * stored in each device's taskfile registers, to determine if
2792 * the device is ATA or ATAPI.
2793 *
2794 * LOCKING:
0cba632b 2795 * PCI/etc. bus probe sem.
cca3974e 2796 * Obtains host lock.
1da177e4
LT
2797 *
2798 * SIDE EFFECTS:
198e0fed 2799 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2800 */
2801
2802void ata_bus_reset(struct ata_port *ap)
2803{
2804 struct ata_ioports *ioaddr = &ap->ioaddr;
2805 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2806 u8 err;
aec5c3c1 2807 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4 2808
44877b4e 2809 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
2810
2811 /* determine if device 0/1 are present */
2812 if (ap->flags & ATA_FLAG_SATA_RESET)
2813 dev0 = 1;
2814 else {
2815 dev0 = ata_devchk(ap, 0);
2816 if (slave_possible)
2817 dev1 = ata_devchk(ap, 1);
2818 }
2819
2820 if (dev0)
2821 devmask |= (1 << 0);
2822 if (dev1)
2823 devmask |= (1 << 1);
2824
2825 /* select device 0 again */
2826 ap->ops->dev_select(ap, 0);
2827
2828 /* issue bus reset */
2829 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2830 if (ata_bus_softreset(ap, devmask))
2831 goto err_out;
1da177e4
LT
2832
2833 /*
2834 * determine by signature whether we have ATA or ATAPI devices
2835 */
b4dc7623 2836 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2837 if ((slave_possible) && (err != 0x81))
b4dc7623 2838 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2839
2840 /* re-enable interrupts */
83625006 2841 ap->ops->irq_on(ap);
1da177e4
LT
2842
2843 /* is double-select really necessary? */
2844 if (ap->device[1].class != ATA_DEV_NONE)
2845 ap->ops->dev_select(ap, 1);
2846 if (ap->device[0].class != ATA_DEV_NONE)
2847 ap->ops->dev_select(ap, 0);
2848
2849 /* if no devices were detected, disable this port */
2850 if ((ap->device[0].class == ATA_DEV_NONE) &&
2851 (ap->device[1].class == ATA_DEV_NONE))
2852 goto err_out;
2853
2854 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2855 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2856 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2857 }
2858
2859 DPRINTK("EXIT\n");
2860 return;
2861
2862err_out:
f15a1daf 2863 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2864 ap->ops->port_disable(ap);
2865
2866 DPRINTK("EXIT\n");
2867}
2868
d7bb4cc7
TH
2869/**
2870 * sata_phy_debounce - debounce SATA phy status
2871 * @ap: ATA port to debounce SATA phy status for
2872 * @params: timing parameters { interval, duratinon, timeout } in msec
2873 *
2874 * Make sure SStatus of @ap reaches stable state, determined by
2875 * holding the same value where DET is not 1 for @duration polled
2876 * every @interval, before @timeout. Timeout constraints the
2877 * beginning of the stable state. Because, after hot unplugging,
2878 * DET gets stuck at 1 on some controllers, this functions waits
2879 * until timeout then returns 0 if DET is stable at 1.
2880 *
2881 * LOCKING:
2882 * Kernel thread context (may sleep)
2883 *
2884 * RETURNS:
2885 * 0 on success, -errno on failure.
2886 */
2887int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2888{
d7bb4cc7
TH
2889 unsigned long interval_msec = params[0];
2890 unsigned long duration = params[1] * HZ / 1000;
2891 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2892 unsigned long last_jiffies;
2893 u32 last, cur;
2894 int rc;
2895
2896 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2897 return rc;
2898 cur &= 0xf;
2899
2900 last = cur;
2901 last_jiffies = jiffies;
2902
2903 while (1) {
2904 msleep(interval_msec);
2905 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2906 return rc;
2907 cur &= 0xf;
2908
2909 /* DET stable? */
2910 if (cur == last) {
2911 if (cur == 1 && time_before(jiffies, timeout))
2912 continue;
2913 if (time_after(jiffies, last_jiffies + duration))
2914 return 0;
2915 continue;
2916 }
2917
2918 /* unstable, start over */
2919 last = cur;
2920 last_jiffies = jiffies;
2921
2922 /* check timeout */
2923 if (time_after(jiffies, timeout))
2924 return -EBUSY;
2925 }
2926}
2927
2928/**
2929 * sata_phy_resume - resume SATA phy
2930 * @ap: ATA port to resume SATA phy for
2931 * @params: timing parameters { interval, duratinon, timeout } in msec
2932 *
2933 * Resume SATA phy of @ap and debounce it.
2934 *
2935 * LOCKING:
2936 * Kernel thread context (may sleep)
2937 *
2938 * RETURNS:
2939 * 0 on success, -errno on failure.
2940 */
2941int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2942{
2943 u32 scontrol;
81952c54
TH
2944 int rc;
2945
2946 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2947 return rc;
7a7921e8 2948
852ee16a 2949 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2950
2951 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2952 return rc;
7a7921e8 2953
d7bb4cc7
TH
2954 /* Some PHYs react badly if SStatus is pounded immediately
2955 * after resuming. Delay 200ms before debouncing.
2956 */
2957 msleep(200);
7a7921e8 2958
d7bb4cc7 2959 return sata_phy_debounce(ap, params);
7a7921e8
TH
2960}
2961
f5914a46
TH
2962static void ata_wait_spinup(struct ata_port *ap)
2963{
2964 struct ata_eh_context *ehc = &ap->eh_context;
2965 unsigned long end, secs;
2966 int rc;
2967
2968 /* first, debounce phy if SATA */
2969 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2970 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2971
2972 /* if debounced successfully and offline, no need to wait */
2973 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2974 return;
2975 }
2976
2977 /* okay, let's give the drive time to spin up */
2978 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2979 secs = ((end - jiffies) + HZ - 1) / HZ;
2980
2981 if (time_after(jiffies, end))
2982 return;
2983
2984 if (secs > 5)
2985 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2986 "(%lu secs)\n", secs);
2987
2988 schedule_timeout_uninterruptible(end - jiffies);
2989}
2990
2991/**
2992 * ata_std_prereset - prepare for reset
2993 * @ap: ATA port to be reset
2994 *
2995 * @ap is about to be reset. Initialize it.
2996 *
2997 * LOCKING:
2998 * Kernel thread context (may sleep)
2999 *
3000 * RETURNS:
3001 * 0 on success, -errno otherwise.
3002 */
3003int ata_std_prereset(struct ata_port *ap)
3004{
3005 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 3006 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3007 int rc;
3008
28324304
TH
3009 /* handle link resume & hotplug spinup */
3010 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3011 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3012 ehc->i.action |= ATA_EH_HARDRESET;
3013
3014 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
3015 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
3016 ata_wait_spinup(ap);
f5914a46
TH
3017
3018 /* if we're about to do hardreset, nothing more to do */
3019 if (ehc->i.action & ATA_EH_HARDRESET)
3020 return 0;
3021
3022 /* if SATA, resume phy */
3023 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
3024 rc = sata_phy_resume(ap, timing);
3025 if (rc && rc != -EOPNOTSUPP) {
3026 /* phy resume failed */
3027 ata_port_printk(ap, KERN_WARNING, "failed to resume "
3028 "link for reset (errno=%d)\n", rc);
3029 return rc;
3030 }
3031 }
3032
3033 /* Wait for !BSY if the controller can wait for the first D2H
3034 * Reg FIS and we don't know that no device is attached.
3035 */
3036 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
3037 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
3038
3039 return 0;
3040}
3041
c2bd5804
TH
3042/**
3043 * ata_std_softreset - reset host port via ATA SRST
3044 * @ap: port to reset
c2bd5804
TH
3045 * @classes: resulting classes of attached devices
3046 *
52783c5d 3047 * Reset host port using ATA SRST.
c2bd5804
TH
3048 *
3049 * LOCKING:
3050 * Kernel thread context (may sleep)
3051 *
3052 * RETURNS:
3053 * 0 on success, -errno otherwise.
3054 */
2bf2cb26 3055int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
3056{
3057 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3058 unsigned int devmask = 0, err_mask;
3059 u8 err;
3060
3061 DPRINTK("ENTER\n");
3062
81952c54 3063 if (ata_port_offline(ap)) {
3a39746a
TH
3064 classes[0] = ATA_DEV_NONE;
3065 goto out;
3066 }
3067
c2bd5804
TH
3068 /* determine if device 0/1 are present */
3069 if (ata_devchk(ap, 0))
3070 devmask |= (1 << 0);
3071 if (slave_possible && ata_devchk(ap, 1))
3072 devmask |= (1 << 1);
3073
c2bd5804
TH
3074 /* select device 0 again */
3075 ap->ops->dev_select(ap, 0);
3076
3077 /* issue bus reset */
3078 DPRINTK("about to softreset, devmask=%x\n", devmask);
3079 err_mask = ata_bus_softreset(ap, devmask);
3080 if (err_mask) {
f15a1daf
TH
3081 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
3082 err_mask);
c2bd5804
TH
3083 return -EIO;
3084 }
3085
3086 /* determine by signature whether we have ATA or ATAPI devices */
3087 classes[0] = ata_dev_try_classify(ap, 0, &err);
3088 if (slave_possible && err != 0x81)
3089 classes[1] = ata_dev_try_classify(ap, 1, &err);
3090
3a39746a 3091 out:
c2bd5804
TH
3092 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3093 return 0;
3094}
3095
3096/**
b6103f6d 3097 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3098 * @ap: port to reset
b6103f6d 3099 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
3100 *
3101 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3102 *
3103 * LOCKING:
3104 * Kernel thread context (may sleep)
3105 *
3106 * RETURNS:
3107 * 0 on success, -errno otherwise.
3108 */
b6103f6d 3109int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 3110{
852ee16a 3111 u32 scontrol;
81952c54 3112 int rc;
852ee16a 3113
c2bd5804
TH
3114 DPRINTK("ENTER\n");
3115
3c567b7d 3116 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3117 /* SATA spec says nothing about how to reconfigure
3118 * spd. To be on the safe side, turn off phy during
3119 * reconfiguration. This works for at least ICH7 AHCI
3120 * and Sil3124.
3121 */
81952c54 3122 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3123 goto out;
81952c54 3124
a34b6fc0 3125 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3126
3127 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3128 goto out;
1c3fae4d 3129
3c567b7d 3130 sata_set_spd(ap);
1c3fae4d
TH
3131 }
3132
3133 /* issue phy wake/reset */
81952c54 3134 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3135 goto out;
81952c54 3136
852ee16a 3137 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3138
3139 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3140 goto out;
c2bd5804 3141
1c3fae4d 3142 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3143 * 10.4.2 says at least 1 ms.
3144 */
3145 msleep(1);
3146
1c3fae4d 3147 /* bring phy back */
b6103f6d
TH
3148 rc = sata_phy_resume(ap, timing);
3149 out:
3150 DPRINTK("EXIT, rc=%d\n", rc);
3151 return rc;
3152}
3153
3154/**
3155 * sata_std_hardreset - reset host port via SATA phy reset
3156 * @ap: port to reset
3157 * @class: resulting class of attached device
3158 *
3159 * SATA phy-reset host port using DET bits of SControl register,
3160 * wait for !BSY and classify the attached device.
3161 *
3162 * LOCKING:
3163 * Kernel thread context (may sleep)
3164 *
3165 * RETURNS:
3166 * 0 on success, -errno otherwise.
3167 */
3168int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3169{
3170 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3171 int rc;
3172
3173 DPRINTK("ENTER\n");
3174
3175 /* do hardreset */
3176 rc = sata_port_hardreset(ap, timing);
3177 if (rc) {
3178 ata_port_printk(ap, KERN_ERR,
3179 "COMRESET failed (errno=%d)\n", rc);
3180 return rc;
3181 }
c2bd5804 3182
c2bd5804 3183 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3184 if (ata_port_offline(ap)) {
c2bd5804
TH
3185 *class = ATA_DEV_NONE;
3186 DPRINTK("EXIT, link offline\n");
3187 return 0;
3188 }
3189
34fee227
TH
3190 /* wait a while before checking status, see SRST for more info */
3191 msleep(150);
3192
c2bd5804 3193 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3194 ata_port_printk(ap, KERN_ERR,
3195 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3196 return -EIO;
3197 }
3198
3a39746a
TH
3199 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3200
c2bd5804
TH
3201 *class = ata_dev_try_classify(ap, 0, NULL);
3202
3203 DPRINTK("EXIT, class=%u\n", *class);
3204 return 0;
3205}
3206
3207/**
3208 * ata_std_postreset - standard postreset callback
3209 * @ap: the target ata_port
3210 * @classes: classes of attached devices
3211 *
3212 * This function is invoked after a successful reset. Note that
3213 * the device might have been reset more than once using
3214 * different reset methods before postreset is invoked.
c2bd5804 3215 *
c2bd5804
TH
3216 * LOCKING:
3217 * Kernel thread context (may sleep)
3218 */
3219void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3220{
dc2b3515
TH
3221 u32 serror;
3222
c2bd5804
TH
3223 DPRINTK("ENTER\n");
3224
c2bd5804 3225 /* print link status */
81952c54 3226 sata_print_link_status(ap);
c2bd5804 3227
dc2b3515
TH
3228 /* clear SError */
3229 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3230 sata_scr_write(ap, SCR_ERROR, serror);
3231
3a39746a 3232 /* re-enable interrupts */
83625006
AI
3233 if (!ap->ops->error_handler)
3234 ap->ops->irq_on(ap);
c2bd5804
TH
3235
3236 /* is double-select really necessary? */
3237 if (classes[0] != ATA_DEV_NONE)
3238 ap->ops->dev_select(ap, 1);
3239 if (classes[1] != ATA_DEV_NONE)
3240 ap->ops->dev_select(ap, 0);
3241
3a39746a
TH
3242 /* bail out if no device is present */
3243 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3244 DPRINTK("EXIT, no device\n");
3245 return;
3246 }
3247
3248 /* set up device control */
0d5ff566
TH
3249 if (ap->ioaddr.ctl_addr)
3250 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3251
3252 DPRINTK("EXIT\n");
3253}
3254
623a3128
TH
3255/**
3256 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3257 * @dev: device to compare against
3258 * @new_class: class of the new device
3259 * @new_id: IDENTIFY page of the new device
3260 *
3261 * Compare @new_class and @new_id against @dev and determine
3262 * whether @dev is the device indicated by @new_class and
3263 * @new_id.
3264 *
3265 * LOCKING:
3266 * None.
3267 *
3268 * RETURNS:
3269 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3270 */
3373efd8
TH
3271static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3272 const u16 *new_id)
623a3128
TH
3273{
3274 const u16 *old_id = dev->id;
a0cf733b
TH
3275 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3276 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3277 u64 new_n_sectors;
3278
3279 if (dev->class != new_class) {
f15a1daf
TH
3280 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3281 dev->class, new_class);
623a3128
TH
3282 return 0;
3283 }
3284
a0cf733b
TH
3285 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3286 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3287 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3288 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3289 new_n_sectors = ata_id_n_sectors(new_id);
3290
3291 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3292 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3293 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3294 return 0;
3295 }
3296
3297 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3298 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3299 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3300 return 0;
3301 }
3302
3303 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3304 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3305 "%llu != %llu\n",
3306 (unsigned long long)dev->n_sectors,
3307 (unsigned long long)new_n_sectors);
623a3128
TH
3308 return 0;
3309 }
3310
3311 return 1;
3312}
3313
3314/**
3315 * ata_dev_revalidate - Revalidate ATA device
623a3128 3316 * @dev: device to revalidate
bff04647 3317 * @readid_flags: read ID flags
623a3128
TH
3318 *
3319 * Re-read IDENTIFY page and make sure @dev is still attached to
3320 * the port.
3321 *
3322 * LOCKING:
3323 * Kernel thread context (may sleep)
3324 *
3325 * RETURNS:
3326 * 0 on success, negative errno otherwise
3327 */
bff04647 3328int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3329{
5eb45c02 3330 unsigned int class = dev->class;
f15a1daf 3331 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3332 int rc;
3333
5eb45c02
TH
3334 if (!ata_dev_enabled(dev)) {
3335 rc = -ENODEV;
3336 goto fail;
3337 }
623a3128 3338
fe635c7e 3339 /* read ID data */
bff04647 3340 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3341 if (rc)
3342 goto fail;
3343
3344 /* is the device still there? */
3373efd8 3345 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3346 rc = -ENODEV;
3347 goto fail;
3348 }
3349
fe635c7e 3350 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3351
3352 /* configure device according to the new ID */
efdaedc4 3353 rc = ata_dev_configure(dev);
5eb45c02
TH
3354 if (rc == 0)
3355 return 0;
623a3128
TH
3356
3357 fail:
f15a1daf 3358 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3359 return rc;
3360}
3361
6919a0a6
AC
3362struct ata_blacklist_entry {
3363 const char *model_num;
3364 const char *model_rev;
3365 unsigned long horkage;
3366};
3367
3368static const struct ata_blacklist_entry ata_device_blacklist [] = {
3369 /* Devices with DMA related problems under Linux */
3370 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3371 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3372 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3373 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3374 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3375 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3376 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3377 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3378 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3379 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3380 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3381 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3382 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3383 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3384 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3385 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3386 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3387 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3388 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3389 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3390 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3391 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3392 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3393 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3394 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3395 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3396 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3397 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3398 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3399
18d6e9d5 3400 /* Weird ATAPI devices */
6f23a31d
AL
3401 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 |
3402 ATA_HORKAGE_DMA_RW_ONLY },
18d6e9d5 3403
6919a0a6
AC
3404 /* Devices we expect to fail diagnostics */
3405
3406 /* Devices where NCQ should be avoided */
3407 /* NCQ is slow */
3408 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3409 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3410 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3411 /* NCQ is broken */
3412 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
96442925
JA
3413 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3414 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
36e337d0
RH
3415 /* Blacklist entries taken from Silicon Image 3124/3132
3416 Windows driver .inf file - also several Linux problem reports */
3417 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3418 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3419 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6
AC
3420
3421 /* Devices with NCQ limits */
3422
3423 /* End Marker */
3424 { }
1da177e4 3425};
2e9edbf8 3426
6919a0a6 3427unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3428{
8bfa79fc
TH
3429 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3430 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3431 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3432
8bfa79fc
TH
3433 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3434 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3435
6919a0a6 3436 while (ad->model_num) {
8bfa79fc 3437 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3438 if (ad->model_rev == NULL)
3439 return ad->horkage;
8bfa79fc 3440 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3441 return ad->horkage;
f4b15fef 3442 }
6919a0a6 3443 ad++;
f4b15fef 3444 }
1da177e4
LT
3445 return 0;
3446}
3447
6919a0a6
AC
3448static int ata_dma_blacklisted(const struct ata_device *dev)
3449{
3450 /* We don't support polling DMA.
3451 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3452 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3453 */
3454 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3455 (dev->flags & ATA_DFLAG_CDB_INTR))
3456 return 1;
3457 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3458}
3459
a6d5a51c
TH
3460/**
3461 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3462 * @dev: Device to compute xfermask for
3463 *
acf356b1
TH
3464 * Compute supported xfermask of @dev and store it in
3465 * dev->*_mask. This function is responsible for applying all
3466 * known limits including host controller limits, device
3467 * blacklist, etc...
a6d5a51c
TH
3468 *
3469 * LOCKING:
3470 * None.
a6d5a51c 3471 */
3373efd8 3472static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3473{
3373efd8 3474 struct ata_port *ap = dev->ap;
cca3974e 3475 struct ata_host *host = ap->host;
a6d5a51c 3476 unsigned long xfer_mask;
1da177e4 3477
37deecb5 3478 /* controller modes available */
565083e1
TH
3479 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3480 ap->mwdma_mask, ap->udma_mask);
3481
8343f889 3482 /* drive modes available */
37deecb5
TH
3483 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3484 dev->mwdma_mask, dev->udma_mask);
3485 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3486
b352e57d
AC
3487 /*
3488 * CFA Advanced TrueIDE timings are not allowed on a shared
3489 * cable
3490 */
3491 if (ata_dev_pair(dev)) {
3492 /* No PIO5 or PIO6 */
3493 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3494 /* No MWDMA3 or MWDMA 4 */
3495 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3496 }
3497
37deecb5
TH
3498 if (ata_dma_blacklisted(dev)) {
3499 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3500 ata_dev_printk(dev, KERN_WARNING,
3501 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3502 }
a6d5a51c 3503
14d66ab7
PV
3504 if ((host->flags & ATA_HOST_SIMPLEX) &&
3505 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3506 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3507 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3508 "other device, disabling DMA\n");
5444a6f4 3509 }
565083e1 3510
5444a6f4
AC
3511 if (ap->ops->mode_filter)
3512 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3513
8343f889
RH
3514 /* Apply cable rule here. Don't apply it early because when
3515 * we handle hot plug the cable type can itself change.
3516 * Check this last so that we know if the transfer rate was
3517 * solely limited by the cable.
3518 * Unknown or 80 wire cables reported host side are checked
3519 * drive side as well. Cases where we know a 40wire cable
3520 * is used safely for 80 are not checked here.
3521 */
3522 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3523 /* UDMA/44 or higher would be available */
3524 if((ap->cbl == ATA_CBL_PATA40) ||
3525 (ata_drive_40wire(dev->id) &&
3526 (ap->cbl == ATA_CBL_PATA_UNK ||
3527 ap->cbl == ATA_CBL_PATA80))) {
3528 ata_dev_printk(dev, KERN_WARNING,
3529 "limited to UDMA/33 due to 40-wire cable\n");
3530 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3531 }
3532
565083e1
TH
3533 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3534 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3535}
3536
1da177e4
LT
3537/**
3538 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3539 * @dev: Device to which command will be sent
3540 *
780a87f7
JG
3541 * Issue SET FEATURES - XFER MODE command to device @dev
3542 * on port @ap.
3543 *
1da177e4 3544 * LOCKING:
0cba632b 3545 * PCI/etc. bus probe sem.
83206a29
TH
3546 *
3547 * RETURNS:
3548 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3549 */
3550
3373efd8 3551static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3552{
a0123703 3553 struct ata_taskfile tf;
83206a29 3554 unsigned int err_mask;
1da177e4
LT
3555
3556 /* set up set-features taskfile */
3557 DPRINTK("set features - xfer mode\n");
3558
3373efd8 3559 ata_tf_init(dev, &tf);
a0123703
TH
3560 tf.command = ATA_CMD_SET_FEATURES;
3561 tf.feature = SETFEATURES_XFER;
3562 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3563 tf.protocol = ATA_PROT_NODATA;
3564 tf.nsect = dev->xfer_mode;
1da177e4 3565
3373efd8 3566 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3567
83206a29
TH
3568 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3569 return err_mask;
1da177e4
LT
3570}
3571
8bf62ece
AL
3572/**
3573 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3574 * @dev: Device to which command will be sent
e2a7f77a
RD
3575 * @heads: Number of heads (taskfile parameter)
3576 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3577 *
3578 * LOCKING:
6aff8f1f
TH
3579 * Kernel thread context (may sleep)
3580 *
3581 * RETURNS:
3582 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3583 */
3373efd8
TH
3584static unsigned int ata_dev_init_params(struct ata_device *dev,
3585 u16 heads, u16 sectors)
8bf62ece 3586{
a0123703 3587 struct ata_taskfile tf;
6aff8f1f 3588 unsigned int err_mask;
8bf62ece
AL
3589
3590 /* Number of sectors per track 1-255. Number of heads 1-16 */
3591 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3592 return AC_ERR_INVALID;
8bf62ece
AL
3593
3594 /* set up init dev params taskfile */
3595 DPRINTK("init dev params \n");
3596
3373efd8 3597 ata_tf_init(dev, &tf);
a0123703
TH
3598 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3599 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3600 tf.protocol = ATA_PROT_NODATA;
3601 tf.nsect = sectors;
3602 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3603
3373efd8 3604 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3605
6aff8f1f
TH
3606 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3607 return err_mask;
8bf62ece
AL
3608}
3609
1da177e4 3610/**
0cba632b
JG
3611 * ata_sg_clean - Unmap DMA memory associated with command
3612 * @qc: Command containing DMA memory to be released
3613 *
3614 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3615 *
3616 * LOCKING:
cca3974e 3617 * spin_lock_irqsave(host lock)
1da177e4 3618 */
70e6ad0c 3619void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3620{
3621 struct ata_port *ap = qc->ap;
cedc9a47 3622 struct scatterlist *sg = qc->__sg;
1da177e4 3623 int dir = qc->dma_dir;
cedc9a47 3624 void *pad_buf = NULL;
1da177e4 3625
a4631474
TH
3626 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3627 WARN_ON(sg == NULL);
1da177e4
LT
3628
3629 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3630 WARN_ON(qc->n_elem > 1);
1da177e4 3631
2c13b7ce 3632 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3633
cedc9a47
JG
3634 /* if we padded the buffer out to 32-bit bound, and data
3635 * xfer direction is from-device, we must copy from the
3636 * pad buffer back into the supplied buffer
3637 */
3638 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3639 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3640
3641 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3642 if (qc->n_elem)
2f1f610b 3643 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3644 /* restore last sg */
3645 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3646 if (pad_buf) {
3647 struct scatterlist *psg = &qc->pad_sgent;
3648 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3649 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3650 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3651 }
3652 } else {
2e242fa9 3653 if (qc->n_elem)
2f1f610b 3654 dma_unmap_single(ap->dev,
e1410f2d
JG
3655 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3656 dir);
cedc9a47
JG
3657 /* restore sg */
3658 sg->length += qc->pad_len;
3659 if (pad_buf)
3660 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3661 pad_buf, qc->pad_len);
3662 }
1da177e4
LT
3663
3664 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3665 qc->__sg = NULL;
1da177e4
LT
3666}
3667
3668/**
3669 * ata_fill_sg - Fill PCI IDE PRD table
3670 * @qc: Metadata associated with taskfile to be transferred
3671 *
780a87f7
JG
3672 * Fill PCI IDE PRD (scatter-gather) table with segments
3673 * associated with the current disk command.
3674 *
1da177e4 3675 * LOCKING:
cca3974e 3676 * spin_lock_irqsave(host lock)
1da177e4
LT
3677 *
3678 */
3679static void ata_fill_sg(struct ata_queued_cmd *qc)
3680{
1da177e4 3681 struct ata_port *ap = qc->ap;
cedc9a47
JG
3682 struct scatterlist *sg;
3683 unsigned int idx;
1da177e4 3684
a4631474 3685 WARN_ON(qc->__sg == NULL);
f131883e 3686 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3687
3688 idx = 0;
cedc9a47 3689 ata_for_each_sg(sg, qc) {
1da177e4
LT
3690 u32 addr, offset;
3691 u32 sg_len, len;
3692
3693 /* determine if physical DMA addr spans 64K boundary.
3694 * Note h/w doesn't support 64-bit, so we unconditionally
3695 * truncate dma_addr_t to u32.
3696 */
3697 addr = (u32) sg_dma_address(sg);
3698 sg_len = sg_dma_len(sg);
3699
3700 while (sg_len) {
3701 offset = addr & 0xffff;
3702 len = sg_len;
3703 if ((offset + sg_len) > 0x10000)
3704 len = 0x10000 - offset;
3705
3706 ap->prd[idx].addr = cpu_to_le32(addr);
3707 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3708 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3709
3710 idx++;
3711 sg_len -= len;
3712 addr += len;
3713 }
3714 }
3715
3716 if (idx)
3717 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3718}
3719/**
3720 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3721 * @qc: Metadata associated with taskfile to check
3722 *
780a87f7
JG
3723 * Allow low-level driver to filter ATA PACKET commands, returning
3724 * a status indicating whether or not it is OK to use DMA for the
3725 * supplied PACKET command.
3726 *
1da177e4 3727 * LOCKING:
cca3974e 3728 * spin_lock_irqsave(host lock)
0cba632b 3729 *
1da177e4
LT
3730 * RETURNS: 0 when ATAPI DMA can be used
3731 * nonzero otherwise
3732 */
3733int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3734{
3735 struct ata_port *ap = qc->ap;
3736 int rc = 0; /* Assume ATAPI DMA is OK by default */
3737
6f23a31d
AL
3738 /* some drives can only do ATAPI DMA on read/write */
3739 if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
3740 struct scsi_cmnd *cmd = qc->scsicmd;
3741 u8 *scsicmd = cmd->cmnd;
3742
3743 switch (scsicmd[0]) {
3744 case READ_10:
3745 case WRITE_10:
3746 case READ_12:
3747 case WRITE_12:
3748 case READ_6:
3749 case WRITE_6:
3750 /* atapi dma maybe ok */
3751 break;
3752 default:
3753 /* turn off atapi dma */
3754 return 1;
3755 }
3756 }
3757
1da177e4
LT
3758 if (ap->ops->check_atapi_dma)
3759 rc = ap->ops->check_atapi_dma(qc);
3760
3761 return rc;
3762}
3763/**
3764 * ata_qc_prep - Prepare taskfile for submission
3765 * @qc: Metadata associated with taskfile to be prepared
3766 *
780a87f7
JG
3767 * Prepare ATA taskfile for submission.
3768 *
1da177e4 3769 * LOCKING:
cca3974e 3770 * spin_lock_irqsave(host lock)
1da177e4
LT
3771 */
3772void ata_qc_prep(struct ata_queued_cmd *qc)
3773{
3774 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3775 return;
3776
3777 ata_fill_sg(qc);
3778}
3779
e46834cd
BK
3780void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3781
0cba632b
JG
3782/**
3783 * ata_sg_init_one - Associate command with memory buffer
3784 * @qc: Command to be associated
3785 * @buf: Memory buffer
3786 * @buflen: Length of memory buffer, in bytes.
3787 *
3788 * Initialize the data-related elements of queued_cmd @qc
3789 * to point to a single memory buffer, @buf of byte length @buflen.
3790 *
3791 * LOCKING:
cca3974e 3792 * spin_lock_irqsave(host lock)
0cba632b
JG
3793 */
3794
1da177e4
LT
3795void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3796{
1da177e4
LT
3797 qc->flags |= ATA_QCFLAG_SINGLE;
3798
cedc9a47 3799 qc->__sg = &qc->sgent;
1da177e4 3800 qc->n_elem = 1;
cedc9a47 3801 qc->orig_n_elem = 1;
1da177e4 3802 qc->buf_virt = buf;
233277ca 3803 qc->nbytes = buflen;
1da177e4 3804
61c0596c 3805 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3806}
3807
0cba632b
JG
3808/**
3809 * ata_sg_init - Associate command with scatter-gather table.
3810 * @qc: Command to be associated
3811 * @sg: Scatter-gather table.
3812 * @n_elem: Number of elements in s/g table.
3813 *
3814 * Initialize the data-related elements of queued_cmd @qc
3815 * to point to a scatter-gather table @sg, containing @n_elem
3816 * elements.
3817 *
3818 * LOCKING:
cca3974e 3819 * spin_lock_irqsave(host lock)
0cba632b
JG
3820 */
3821
1da177e4
LT
3822void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3823 unsigned int n_elem)
3824{
3825 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3826 qc->__sg = sg;
1da177e4 3827 qc->n_elem = n_elem;
cedc9a47 3828 qc->orig_n_elem = n_elem;
1da177e4
LT
3829}
3830
3831/**
0cba632b
JG
3832 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3833 * @qc: Command with memory buffer to be mapped.
3834 *
3835 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3836 *
3837 * LOCKING:
cca3974e 3838 * spin_lock_irqsave(host lock)
1da177e4
LT
3839 *
3840 * RETURNS:
0cba632b 3841 * Zero on success, negative on error.
1da177e4
LT
3842 */
3843
3844static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3845{
3846 struct ata_port *ap = qc->ap;
3847 int dir = qc->dma_dir;
cedc9a47 3848 struct scatterlist *sg = qc->__sg;
1da177e4 3849 dma_addr_t dma_address;
2e242fa9 3850 int trim_sg = 0;
1da177e4 3851
cedc9a47
JG
3852 /* we must lengthen transfers to end on a 32-bit boundary */
3853 qc->pad_len = sg->length & 3;
3854 if (qc->pad_len) {
3855 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3856 struct scatterlist *psg = &qc->pad_sgent;
3857
a4631474 3858 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3859
3860 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3861
3862 if (qc->tf.flags & ATA_TFLAG_WRITE)
3863 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3864 qc->pad_len);
3865
3866 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3867 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3868 /* trim sg */
3869 sg->length -= qc->pad_len;
2e242fa9
TH
3870 if (sg->length == 0)
3871 trim_sg = 1;
cedc9a47
JG
3872
3873 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3874 sg->length, qc->pad_len);
3875 }
3876
2e242fa9
TH
3877 if (trim_sg) {
3878 qc->n_elem--;
e1410f2d
JG
3879 goto skip_map;
3880 }
3881
2f1f610b 3882 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3883 sg->length, dir);
537a95d9
TH
3884 if (dma_mapping_error(dma_address)) {
3885 /* restore sg */
3886 sg->length += qc->pad_len;
1da177e4 3887 return -1;
537a95d9 3888 }
1da177e4
LT
3889
3890 sg_dma_address(sg) = dma_address;
32529e01 3891 sg_dma_len(sg) = sg->length;
1da177e4 3892
2e242fa9 3893skip_map:
1da177e4
LT
3894 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3895 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3896
3897 return 0;
3898}
3899
3900/**
0cba632b
JG
3901 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3902 * @qc: Command with scatter-gather table to be mapped.
3903 *
3904 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3905 *
3906 * LOCKING:
cca3974e 3907 * spin_lock_irqsave(host lock)
1da177e4
LT
3908 *
3909 * RETURNS:
0cba632b 3910 * Zero on success, negative on error.
1da177e4
LT
3911 *
3912 */
3913
3914static int ata_sg_setup(struct ata_queued_cmd *qc)
3915{
3916 struct ata_port *ap = qc->ap;
cedc9a47
JG
3917 struct scatterlist *sg = qc->__sg;
3918 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3919 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 3920
44877b4e 3921 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 3922 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3923
cedc9a47
JG
3924 /* we must lengthen transfers to end on a 32-bit boundary */
3925 qc->pad_len = lsg->length & 3;
3926 if (qc->pad_len) {
3927 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3928 struct scatterlist *psg = &qc->pad_sgent;
3929 unsigned int offset;
3930
a4631474 3931 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3932
3933 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3934
3935 /*
3936 * psg->page/offset are used to copy to-be-written
3937 * data in this function or read data in ata_sg_clean.
3938 */
3939 offset = lsg->offset + lsg->length - qc->pad_len;
3940 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3941 psg->offset = offset_in_page(offset);
3942
3943 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3944 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3945 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3946 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3947 }
3948
3949 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3950 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3951 /* trim last sg */
3952 lsg->length -= qc->pad_len;
e1410f2d
JG
3953 if (lsg->length == 0)
3954 trim_sg = 1;
cedc9a47
JG
3955
3956 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3957 qc->n_elem - 1, lsg->length, qc->pad_len);
3958 }
3959
e1410f2d
JG
3960 pre_n_elem = qc->n_elem;
3961 if (trim_sg && pre_n_elem)
3962 pre_n_elem--;
3963
3964 if (!pre_n_elem) {
3965 n_elem = 0;
3966 goto skip_map;
3967 }
3968
1da177e4 3969 dir = qc->dma_dir;
2f1f610b 3970 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3971 if (n_elem < 1) {
3972 /* restore last sg */
3973 lsg->length += qc->pad_len;
1da177e4 3974 return -1;
537a95d9 3975 }
1da177e4
LT
3976
3977 DPRINTK("%d sg elements mapped\n", n_elem);
3978
e1410f2d 3979skip_map:
1da177e4
LT
3980 qc->n_elem = n_elem;
3981
3982 return 0;
3983}
3984
0baab86b 3985/**
c893a3ae 3986 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3987 * @buf: Buffer to swap
3988 * @buf_words: Number of 16-bit words in buffer.
3989 *
3990 * Swap halves of 16-bit words if needed to convert from
3991 * little-endian byte order to native cpu byte order, or
3992 * vice-versa.
3993 *
3994 * LOCKING:
6f0ef4fa 3995 * Inherited from caller.
0baab86b 3996 */
1da177e4
LT
3997void swap_buf_le16(u16 *buf, unsigned int buf_words)
3998{
3999#ifdef __BIG_ENDIAN
4000 unsigned int i;
4001
4002 for (i = 0; i < buf_words; i++)
4003 buf[i] = le16_to_cpu(buf[i]);
4004#endif /* __BIG_ENDIAN */
4005}
4006
6ae4cfb5 4007/**
0d5ff566 4008 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4009 * @adev: device to target
6ae4cfb5
AL
4010 * @buf: data buffer
4011 * @buflen: buffer length
344babaa 4012 * @write_data: read/write
6ae4cfb5
AL
4013 *
4014 * Transfer data from/to the device data register by PIO.
4015 *
4016 * LOCKING:
4017 * Inherited from caller.
6ae4cfb5 4018 */
0d5ff566
TH
4019void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4020 unsigned int buflen, int write_data)
1da177e4 4021{
a6b2c5d4 4022 struct ata_port *ap = adev->ap;
6ae4cfb5 4023 unsigned int words = buflen >> 1;
1da177e4 4024
6ae4cfb5 4025 /* Transfer multiple of 2 bytes */
1da177e4 4026 if (write_data)
0d5ff566 4027 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4028 else
0d5ff566 4029 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4030
4031 /* Transfer trailing 1 byte, if any. */
4032 if (unlikely(buflen & 0x01)) {
4033 u16 align_buf[1] = { 0 };
4034 unsigned char *trailing_buf = buf + buflen - 1;
4035
4036 if (write_data) {
4037 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4038 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4039 } else {
0d5ff566 4040 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4041 memcpy(trailing_buf, align_buf, 1);
4042 }
4043 }
1da177e4
LT
4044}
4045
75e99585 4046/**
0d5ff566 4047 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4048 * @adev: device to target
4049 * @buf: data buffer
4050 * @buflen: buffer length
4051 * @write_data: read/write
4052 *
88574551 4053 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4054 * transfer with interrupts disabled.
4055 *
4056 * LOCKING:
4057 * Inherited from caller.
4058 */
0d5ff566
TH
4059void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4060 unsigned int buflen, int write_data)
75e99585
AC
4061{
4062 unsigned long flags;
4063 local_irq_save(flags);
0d5ff566 4064 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4065 local_irq_restore(flags);
4066}
4067
4068
6ae4cfb5
AL
4069/**
4070 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
4071 * @qc: Command on going
4072 *
4073 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
4074 *
4075 * LOCKING:
4076 * Inherited from caller.
4077 */
4078
1da177e4
LT
4079static void ata_pio_sector(struct ata_queued_cmd *qc)
4080{
4081 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4082 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4083 struct ata_port *ap = qc->ap;
4084 struct page *page;
4085 unsigned int offset;
4086 unsigned char *buf;
4087
726f0785 4088 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 4089 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4090
4091 page = sg[qc->cursg].page;
726f0785 4092 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4093
4094 /* get the current page and offset */
4095 page = nth_page(page, (offset >> PAGE_SHIFT));
4096 offset %= PAGE_SIZE;
4097
1da177e4
LT
4098 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4099
91b8b313
AL
4100 if (PageHighMem(page)) {
4101 unsigned long flags;
4102
a6b2c5d4 4103 /* FIXME: use a bounce buffer */
91b8b313
AL
4104 local_irq_save(flags);
4105 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4106
91b8b313 4107 /* do the actual data transfer */
a6b2c5d4 4108 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 4109
91b8b313
AL
4110 kunmap_atomic(buf, KM_IRQ0);
4111 local_irq_restore(flags);
4112 } else {
4113 buf = page_address(page);
a6b2c5d4 4114 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 4115 }
1da177e4 4116
726f0785
TH
4117 qc->curbytes += ATA_SECT_SIZE;
4118 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 4119
726f0785 4120 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4121 qc->cursg++;
4122 qc->cursg_ofs = 0;
4123 }
1da177e4 4124}
1da177e4 4125
07f6f7d0
AL
4126/**
4127 * ata_pio_sectors - Transfer one or many 512-byte sectors.
4128 * @qc: Command on going
4129 *
c81e29b4 4130 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
4131 * ATA device for the DRQ request.
4132 *
4133 * LOCKING:
4134 * Inherited from caller.
4135 */
1da177e4 4136
07f6f7d0
AL
4137static void ata_pio_sectors(struct ata_queued_cmd *qc)
4138{
4139 if (is_multi_taskfile(&qc->tf)) {
4140 /* READ/WRITE MULTIPLE */
4141 unsigned int nsect;
4142
587005de 4143 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4144
726f0785
TH
4145 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
4146 qc->dev->multi_count);
07f6f7d0
AL
4147 while (nsect--)
4148 ata_pio_sector(qc);
4149 } else
4150 ata_pio_sector(qc);
4151}
4152
c71c1857
AL
4153/**
4154 * atapi_send_cdb - Write CDB bytes to hardware
4155 * @ap: Port to which ATAPI device is attached.
4156 * @qc: Taskfile currently active
4157 *
4158 * When device has indicated its readiness to accept
4159 * a CDB, this function is called. Send the CDB.
4160 *
4161 * LOCKING:
4162 * caller.
4163 */
4164
4165static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4166{
4167 /* send SCSI cdb */
4168 DPRINTK("send cdb\n");
db024d53 4169 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4170
a6b2c5d4 4171 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4172 ata_altstatus(ap); /* flush */
4173
4174 switch (qc->tf.protocol) {
4175 case ATA_PROT_ATAPI:
4176 ap->hsm_task_state = HSM_ST;
4177 break;
4178 case ATA_PROT_ATAPI_NODATA:
4179 ap->hsm_task_state = HSM_ST_LAST;
4180 break;
4181 case ATA_PROT_ATAPI_DMA:
4182 ap->hsm_task_state = HSM_ST_LAST;
4183 /* initiate bmdma */
4184 ap->ops->bmdma_start(qc);
4185 break;
4186 }
1da177e4
LT
4187}
4188
6ae4cfb5
AL
4189/**
4190 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4191 * @qc: Command on going
4192 * @bytes: number of bytes
4193 *
4194 * Transfer Transfer data from/to the ATAPI device.
4195 *
4196 * LOCKING:
4197 * Inherited from caller.
4198 *
4199 */
4200
1da177e4
LT
4201static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4202{
4203 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4204 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4205 struct ata_port *ap = qc->ap;
4206 struct page *page;
4207 unsigned char *buf;
4208 unsigned int offset, count;
4209
563a6e1f 4210 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4211 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4212
4213next_sg:
563a6e1f 4214 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4215 /*
563a6e1f
AL
4216 * The end of qc->sg is reached and the device expects
4217 * more data to transfer. In order not to overrun qc->sg
4218 * and fulfill length specified in the byte count register,
4219 * - for read case, discard trailing data from the device
4220 * - for write case, padding zero data to the device
4221 */
4222 u16 pad_buf[1] = { 0 };
4223 unsigned int words = bytes >> 1;
4224 unsigned int i;
4225
4226 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4227 ata_dev_printk(qc->dev, KERN_WARNING,
4228 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4229
4230 for (i = 0; i < words; i++)
a6b2c5d4 4231 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4232
14be71f4 4233 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4234 return;
4235 }
4236
cedc9a47 4237 sg = &qc->__sg[qc->cursg];
1da177e4 4238
1da177e4
LT
4239 page = sg->page;
4240 offset = sg->offset + qc->cursg_ofs;
4241
4242 /* get the current page and offset */
4243 page = nth_page(page, (offset >> PAGE_SHIFT));
4244 offset %= PAGE_SIZE;
4245
6952df03 4246 /* don't overrun current sg */
32529e01 4247 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4248
4249 /* don't cross page boundaries */
4250 count = min(count, (unsigned int)PAGE_SIZE - offset);
4251
7282aa4b
AL
4252 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4253
91b8b313
AL
4254 if (PageHighMem(page)) {
4255 unsigned long flags;
4256
a6b2c5d4 4257 /* FIXME: use bounce buffer */
91b8b313
AL
4258 local_irq_save(flags);
4259 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4260
91b8b313 4261 /* do the actual data transfer */
a6b2c5d4 4262 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4263
91b8b313
AL
4264 kunmap_atomic(buf, KM_IRQ0);
4265 local_irq_restore(flags);
4266 } else {
4267 buf = page_address(page);
a6b2c5d4 4268 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4269 }
1da177e4
LT
4270
4271 bytes -= count;
4272 qc->curbytes += count;
4273 qc->cursg_ofs += count;
4274
32529e01 4275 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4276 qc->cursg++;
4277 qc->cursg_ofs = 0;
4278 }
4279
563a6e1f 4280 if (bytes)
1da177e4 4281 goto next_sg;
1da177e4
LT
4282}
4283
6ae4cfb5
AL
4284/**
4285 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4286 * @qc: Command on going
4287 *
4288 * Transfer Transfer data from/to the ATAPI device.
4289 *
4290 * LOCKING:
4291 * Inherited from caller.
6ae4cfb5
AL
4292 */
4293
1da177e4
LT
4294static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4295{
4296 struct ata_port *ap = qc->ap;
4297 struct ata_device *dev = qc->dev;
4298 unsigned int ireason, bc_lo, bc_hi, bytes;
4299 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4300
eec4c3f3
AL
4301 /* Abuse qc->result_tf for temp storage of intermediate TF
4302 * here to save some kernel stack usage.
4303 * For normal completion, qc->result_tf is not relevant. For
4304 * error, qc->result_tf is later overwritten by ata_qc_complete().
4305 * So, the correctness of qc->result_tf is not affected.
4306 */
4307 ap->ops->tf_read(ap, &qc->result_tf);
4308 ireason = qc->result_tf.nsect;
4309 bc_lo = qc->result_tf.lbam;
4310 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4311 bytes = (bc_hi << 8) | bc_lo;
4312
4313 /* shall be cleared to zero, indicating xfer of data */
4314 if (ireason & (1 << 0))
4315 goto err_out;
4316
4317 /* make sure transfer direction matches expected */
4318 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4319 if (do_write != i_write)
4320 goto err_out;
4321
44877b4e 4322 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4323
1da177e4
LT
4324 __atapi_pio_bytes(qc, bytes);
4325
4326 return;
4327
4328err_out:
f15a1daf 4329 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4330 qc->err_mask |= AC_ERR_HSM;
14be71f4 4331 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4332}
4333
4334/**
c234fb00
AL
4335 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4336 * @ap: the target ata_port
4337 * @qc: qc on going
1da177e4 4338 *
c234fb00
AL
4339 * RETURNS:
4340 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4341 */
c234fb00
AL
4342
4343static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4344{
c234fb00
AL
4345 if (qc->tf.flags & ATA_TFLAG_POLLING)
4346 return 1;
1da177e4 4347
c234fb00
AL
4348 if (ap->hsm_task_state == HSM_ST_FIRST) {
4349 if (qc->tf.protocol == ATA_PROT_PIO &&
4350 (qc->tf.flags & ATA_TFLAG_WRITE))
4351 return 1;
1da177e4 4352
c234fb00
AL
4353 if (is_atapi_taskfile(&qc->tf) &&
4354 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4355 return 1;
fe79e683
AL
4356 }
4357
c234fb00
AL
4358 return 0;
4359}
1da177e4 4360
c17ea20d
TH
4361/**
4362 * ata_hsm_qc_complete - finish a qc running on standard HSM
4363 * @qc: Command to complete
4364 * @in_wq: 1 if called from workqueue, 0 otherwise
4365 *
4366 * Finish @qc which is running on standard HSM.
4367 *
4368 * LOCKING:
cca3974e 4369 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4370 * Otherwise, none on entry and grabs host lock.
4371 */
4372static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4373{
4374 struct ata_port *ap = qc->ap;
4375 unsigned long flags;
4376
4377 if (ap->ops->error_handler) {
4378 if (in_wq) {
ba6a1308 4379 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4380
cca3974e
JG
4381 /* EH might have kicked in while host lock is
4382 * released.
c17ea20d
TH
4383 */
4384 qc = ata_qc_from_tag(ap, qc->tag);
4385 if (qc) {
4386 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4387 ap->ops->irq_on(ap);
c17ea20d
TH
4388 ata_qc_complete(qc);
4389 } else
4390 ata_port_freeze(ap);
4391 }
4392
ba6a1308 4393 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4394 } else {
4395 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4396 ata_qc_complete(qc);
4397 else
4398 ata_port_freeze(ap);
4399 }
4400 } else {
4401 if (in_wq) {
ba6a1308 4402 spin_lock_irqsave(ap->lock, flags);
83625006 4403 ap->ops->irq_on(ap);
c17ea20d 4404 ata_qc_complete(qc);
ba6a1308 4405 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4406 } else
4407 ata_qc_complete(qc);
4408 }
1da177e4 4409
c81e29b4 4410 ata_altstatus(ap); /* flush */
c17ea20d
TH
4411}
4412
bb5cb290
AL
4413/**
4414 * ata_hsm_move - move the HSM to the next state.
4415 * @ap: the target ata_port
4416 * @qc: qc on going
4417 * @status: current device status
4418 * @in_wq: 1 if called from workqueue, 0 otherwise
4419 *
4420 * RETURNS:
4421 * 1 when poll next status needed, 0 otherwise.
4422 */
9a1004d0
TH
4423int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4424 u8 status, int in_wq)
e2cec771 4425{
bb5cb290
AL
4426 unsigned long flags = 0;
4427 int poll_next;
4428
6912ccd5
AL
4429 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4430
bb5cb290
AL
4431 /* Make sure ata_qc_issue_prot() does not throw things
4432 * like DMA polling into the workqueue. Notice that
4433 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4434 */
c234fb00 4435 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4436
e2cec771 4437fsm_start:
999bb6f4 4438 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4439 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4440
e2cec771
AL
4441 switch (ap->hsm_task_state) {
4442 case HSM_ST_FIRST:
bb5cb290
AL
4443 /* Send first data block or PACKET CDB */
4444
4445 /* If polling, we will stay in the work queue after
4446 * sending the data. Otherwise, interrupt handler
4447 * takes over after sending the data.
4448 */
4449 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4450
e2cec771 4451 /* check device status */
3655d1d3
AL
4452 if (unlikely((status & ATA_DRQ) == 0)) {
4453 /* handle BSY=0, DRQ=0 as error */
4454 if (likely(status & (ATA_ERR | ATA_DF)))
4455 /* device stops HSM for abort/error */
4456 qc->err_mask |= AC_ERR_DEV;
4457 else
4458 /* HSM violation. Let EH handle this */
4459 qc->err_mask |= AC_ERR_HSM;
4460
14be71f4 4461 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4462 goto fsm_start;
1da177e4
LT
4463 }
4464
71601958
AL
4465 /* Device should not ask for data transfer (DRQ=1)
4466 * when it finds something wrong.
eee6c32f
AL
4467 * We ignore DRQ here and stop the HSM by
4468 * changing hsm_task_state to HSM_ST_ERR and
4469 * let the EH abort the command or reset the device.
71601958
AL
4470 */
4471 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4472 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4473 "error, dev_stat 0x%X\n", status);
3655d1d3 4474 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4475 ap->hsm_task_state = HSM_ST_ERR;
4476 goto fsm_start;
71601958 4477 }
1da177e4 4478
bb5cb290
AL
4479 /* Send the CDB (atapi) or the first data block (ata pio out).
4480 * During the state transition, interrupt handler shouldn't
4481 * be invoked before the data transfer is complete and
4482 * hsm_task_state is changed. Hence, the following locking.
4483 */
4484 if (in_wq)
ba6a1308 4485 spin_lock_irqsave(ap->lock, flags);
1da177e4 4486
bb5cb290
AL
4487 if (qc->tf.protocol == ATA_PROT_PIO) {
4488 /* PIO data out protocol.
4489 * send first data block.
4490 */
0565c26d 4491
bb5cb290
AL
4492 /* ata_pio_sectors() might change the state
4493 * to HSM_ST_LAST. so, the state is changed here
4494 * before ata_pio_sectors().
4495 */
4496 ap->hsm_task_state = HSM_ST;
4497 ata_pio_sectors(qc);
4498 ata_altstatus(ap); /* flush */
4499 } else
4500 /* send CDB */
4501 atapi_send_cdb(ap, qc);
4502
4503 if (in_wq)
ba6a1308 4504 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4505
4506 /* if polling, ata_pio_task() handles the rest.
4507 * otherwise, interrupt handler takes over from here.
4508 */
e2cec771 4509 break;
1c848984 4510
e2cec771
AL
4511 case HSM_ST:
4512 /* complete command or read/write the data register */
4513 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4514 /* ATAPI PIO protocol */
4515 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4516 /* No more data to transfer or device error.
4517 * Device error will be tagged in HSM_ST_LAST.
4518 */
e2cec771
AL
4519 ap->hsm_task_state = HSM_ST_LAST;
4520 goto fsm_start;
4521 }
1da177e4 4522
71601958
AL
4523 /* Device should not ask for data transfer (DRQ=1)
4524 * when it finds something wrong.
eee6c32f
AL
4525 * We ignore DRQ here and stop the HSM by
4526 * changing hsm_task_state to HSM_ST_ERR and
4527 * let the EH abort the command or reset the device.
71601958
AL
4528 */
4529 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4530 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4531 "device error, dev_stat 0x%X\n",
4532 status);
3655d1d3 4533 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4534 ap->hsm_task_state = HSM_ST_ERR;
4535 goto fsm_start;
71601958 4536 }
1da177e4 4537
e2cec771 4538 atapi_pio_bytes(qc);
7fb6ec28 4539
e2cec771
AL
4540 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4541 /* bad ireason reported by device */
4542 goto fsm_start;
1da177e4 4543
e2cec771
AL
4544 } else {
4545 /* ATA PIO protocol */
4546 if (unlikely((status & ATA_DRQ) == 0)) {
4547 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4548 if (likely(status & (ATA_ERR | ATA_DF)))
4549 /* device stops HSM for abort/error */
4550 qc->err_mask |= AC_ERR_DEV;
4551 else
55a8e2c8
TH
4552 /* HSM violation. Let EH handle this.
4553 * Phantom devices also trigger this
4554 * condition. Mark hint.
4555 */
4556 qc->err_mask |= AC_ERR_HSM |
4557 AC_ERR_NODEV_HINT;
3655d1d3 4558
e2cec771
AL
4559 ap->hsm_task_state = HSM_ST_ERR;
4560 goto fsm_start;
4561 }
1da177e4 4562
eee6c32f
AL
4563 /* For PIO reads, some devices may ask for
4564 * data transfer (DRQ=1) alone with ERR=1.
4565 * We respect DRQ here and transfer one
4566 * block of junk data before changing the
4567 * hsm_task_state to HSM_ST_ERR.
4568 *
4569 * For PIO writes, ERR=1 DRQ=1 doesn't make
4570 * sense since the data block has been
4571 * transferred to the device.
71601958
AL
4572 */
4573 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4574 /* data might be corrputed */
4575 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4576
4577 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4578 ata_pio_sectors(qc);
4579 ata_altstatus(ap);
4580 status = ata_wait_idle(ap);
4581 }
4582
3655d1d3
AL
4583 if (status & (ATA_BUSY | ATA_DRQ))
4584 qc->err_mask |= AC_ERR_HSM;
4585
eee6c32f
AL
4586 /* ata_pio_sectors() might change the
4587 * state to HSM_ST_LAST. so, the state
4588 * is changed after ata_pio_sectors().
4589 */
4590 ap->hsm_task_state = HSM_ST_ERR;
4591 goto fsm_start;
71601958
AL
4592 }
4593
e2cec771
AL
4594 ata_pio_sectors(qc);
4595
4596 if (ap->hsm_task_state == HSM_ST_LAST &&
4597 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4598 /* all data read */
4599 ata_altstatus(ap);
52a32205 4600 status = ata_wait_idle(ap);
e2cec771
AL
4601 goto fsm_start;
4602 }
4603 }
4604
4605 ata_altstatus(ap); /* flush */
bb5cb290 4606 poll_next = 1;
1da177e4
LT
4607 break;
4608
14be71f4 4609 case HSM_ST_LAST:
6912ccd5
AL
4610 if (unlikely(!ata_ok(status))) {
4611 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4612 ap->hsm_task_state = HSM_ST_ERR;
4613 goto fsm_start;
4614 }
4615
4616 /* no more data to transfer */
4332a771 4617 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 4618 ap->print_id, qc->dev->devno, status);
e2cec771 4619
6912ccd5
AL
4620 WARN_ON(qc->err_mask);
4621
e2cec771 4622 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4623
e2cec771 4624 /* complete taskfile transaction */
c17ea20d 4625 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4626
4627 poll_next = 0;
1da177e4
LT
4628 break;
4629
14be71f4 4630 case HSM_ST_ERR:
e2cec771
AL
4631 /* make sure qc->err_mask is available to
4632 * know what's wrong and recover
4633 */
4634 WARN_ON(qc->err_mask == 0);
4635
4636 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4637
999bb6f4 4638 /* complete taskfile transaction */
c17ea20d 4639 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4640
4641 poll_next = 0;
e2cec771
AL
4642 break;
4643 default:
bb5cb290 4644 poll_next = 0;
6912ccd5 4645 BUG();
1da177e4
LT
4646 }
4647
bb5cb290 4648 return poll_next;
1da177e4
LT
4649}
4650
65f27f38 4651static void ata_pio_task(struct work_struct *work)
8061f5f0 4652{
65f27f38
DH
4653 struct ata_port *ap =
4654 container_of(work, struct ata_port, port_task.work);
4655 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4656 u8 status;
a1af3734 4657 int poll_next;
8061f5f0 4658
7fb6ec28 4659fsm_start:
a1af3734 4660 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4661
a1af3734
AL
4662 /*
4663 * This is purely heuristic. This is a fast path.
4664 * Sometimes when we enter, BSY will be cleared in
4665 * a chk-status or two. If not, the drive is probably seeking
4666 * or something. Snooze for a couple msecs, then
4667 * chk-status again. If still busy, queue delayed work.
4668 */
4669 status = ata_busy_wait(ap, ATA_BUSY, 5);
4670 if (status & ATA_BUSY) {
4671 msleep(2);
4672 status = ata_busy_wait(ap, ATA_BUSY, 10);
4673 if (status & ATA_BUSY) {
31ce6dae 4674 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4675 return;
4676 }
8061f5f0
TH
4677 }
4678
a1af3734
AL
4679 /* move the HSM */
4680 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4681
a1af3734
AL
4682 /* another command or interrupt handler
4683 * may be running at this point.
4684 */
4685 if (poll_next)
7fb6ec28 4686 goto fsm_start;
8061f5f0
TH
4687}
4688
1da177e4
LT
4689/**
4690 * ata_qc_new - Request an available ATA command, for queueing
4691 * @ap: Port associated with device @dev
4692 * @dev: Device from whom we request an available command structure
4693 *
4694 * LOCKING:
0cba632b 4695 * None.
1da177e4
LT
4696 */
4697
4698static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4699{
4700 struct ata_queued_cmd *qc = NULL;
4701 unsigned int i;
4702
e3180499 4703 /* no command while frozen */
b51e9e5d 4704 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4705 return NULL;
4706
2ab7db1f
TH
4707 /* the last tag is reserved for internal command. */
4708 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4709 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4710 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4711 break;
4712 }
4713
4714 if (qc)
4715 qc->tag = i;
4716
4717 return qc;
4718}
4719
4720/**
4721 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4722 * @dev: Device from whom we request an available command structure
4723 *
4724 * LOCKING:
0cba632b 4725 * None.
1da177e4
LT
4726 */
4727
3373efd8 4728struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4729{
3373efd8 4730 struct ata_port *ap = dev->ap;
1da177e4
LT
4731 struct ata_queued_cmd *qc;
4732
4733 qc = ata_qc_new(ap);
4734 if (qc) {
1da177e4
LT
4735 qc->scsicmd = NULL;
4736 qc->ap = ap;
4737 qc->dev = dev;
1da177e4 4738
2c13b7ce 4739 ata_qc_reinit(qc);
1da177e4
LT
4740 }
4741
4742 return qc;
4743}
4744
1da177e4
LT
4745/**
4746 * ata_qc_free - free unused ata_queued_cmd
4747 * @qc: Command to complete
4748 *
4749 * Designed to free unused ata_queued_cmd object
4750 * in case something prevents using it.
4751 *
4752 * LOCKING:
cca3974e 4753 * spin_lock_irqsave(host lock)
1da177e4
LT
4754 */
4755void ata_qc_free(struct ata_queued_cmd *qc)
4756{
4ba946e9
TH
4757 struct ata_port *ap = qc->ap;
4758 unsigned int tag;
4759
a4631474 4760 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4761
4ba946e9
TH
4762 qc->flags = 0;
4763 tag = qc->tag;
4764 if (likely(ata_tag_valid(tag))) {
4ba946e9 4765 qc->tag = ATA_TAG_POISON;
6cec4a39 4766 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4767 }
1da177e4
LT
4768}
4769
76014427 4770void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4771{
dedaf2b0
TH
4772 struct ata_port *ap = qc->ap;
4773
a4631474
TH
4774 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4775 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4776
4777 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4778 ata_sg_clean(qc);
4779
7401abf2 4780 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4781 if (qc->tf.protocol == ATA_PROT_NCQ)
4782 ap->sactive &= ~(1 << qc->tag);
4783 else
4784 ap->active_tag = ATA_TAG_POISON;
7401abf2 4785
3f3791d3
AL
4786 /* atapi: mark qc as inactive to prevent the interrupt handler
4787 * from completing the command twice later, before the error handler
4788 * is called. (when rc != 0 and atapi request sense is needed)
4789 */
4790 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4791 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4792
1da177e4 4793 /* call completion callback */
77853bf2 4794 qc->complete_fn(qc);
1da177e4
LT
4795}
4796
39599a53
TH
4797static void fill_result_tf(struct ata_queued_cmd *qc)
4798{
4799 struct ata_port *ap = qc->ap;
4800
39599a53 4801 qc->result_tf.flags = qc->tf.flags;
4742d54f 4802 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
4803}
4804
f686bcb8
TH
4805/**
4806 * ata_qc_complete - Complete an active ATA command
4807 * @qc: Command to complete
4808 * @err_mask: ATA Status register contents
4809 *
4810 * Indicate to the mid and upper layers that an ATA
4811 * command has completed, with either an ok or not-ok status.
4812 *
4813 * LOCKING:
cca3974e 4814 * spin_lock_irqsave(host lock)
f686bcb8
TH
4815 */
4816void ata_qc_complete(struct ata_queued_cmd *qc)
4817{
4818 struct ata_port *ap = qc->ap;
4819
4820 /* XXX: New EH and old EH use different mechanisms to
4821 * synchronize EH with regular execution path.
4822 *
4823 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4824 * Normal execution path is responsible for not accessing a
4825 * failed qc. libata core enforces the rule by returning NULL
4826 * from ata_qc_from_tag() for failed qcs.
4827 *
4828 * Old EH depends on ata_qc_complete() nullifying completion
4829 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4830 * not synchronize with interrupt handler. Only PIO task is
4831 * taken care of.
4832 */
4833 if (ap->ops->error_handler) {
b51e9e5d 4834 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4835
4836 if (unlikely(qc->err_mask))
4837 qc->flags |= ATA_QCFLAG_FAILED;
4838
4839 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4840 if (!ata_tag_internal(qc->tag)) {
4841 /* always fill result TF for failed qc */
39599a53 4842 fill_result_tf(qc);
f686bcb8
TH
4843 ata_qc_schedule_eh(qc);
4844 return;
4845 }
4846 }
4847
4848 /* read result TF if requested */
4849 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4850 fill_result_tf(qc);
f686bcb8
TH
4851
4852 __ata_qc_complete(qc);
4853 } else {
4854 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4855 return;
4856
4857 /* read result TF if failed or requested */
4858 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4859 fill_result_tf(qc);
f686bcb8
TH
4860
4861 __ata_qc_complete(qc);
4862 }
4863}
4864
dedaf2b0
TH
4865/**
4866 * ata_qc_complete_multiple - Complete multiple qcs successfully
4867 * @ap: port in question
4868 * @qc_active: new qc_active mask
4869 * @finish_qc: LLDD callback invoked before completing a qc
4870 *
4871 * Complete in-flight commands. This functions is meant to be
4872 * called from low-level driver's interrupt routine to complete
4873 * requests normally. ap->qc_active and @qc_active is compared
4874 * and commands are completed accordingly.
4875 *
4876 * LOCKING:
cca3974e 4877 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4878 *
4879 * RETURNS:
4880 * Number of completed commands on success, -errno otherwise.
4881 */
4882int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4883 void (*finish_qc)(struct ata_queued_cmd *))
4884{
4885 int nr_done = 0;
4886 u32 done_mask;
4887 int i;
4888
4889 done_mask = ap->qc_active ^ qc_active;
4890
4891 if (unlikely(done_mask & qc_active)) {
4892 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4893 "(%08x->%08x)\n", ap->qc_active, qc_active);
4894 return -EINVAL;
4895 }
4896
4897 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4898 struct ata_queued_cmd *qc;
4899
4900 if (!(done_mask & (1 << i)))
4901 continue;
4902
4903 if ((qc = ata_qc_from_tag(ap, i))) {
4904 if (finish_qc)
4905 finish_qc(qc);
4906 ata_qc_complete(qc);
4907 nr_done++;
4908 }
4909 }
4910
4911 return nr_done;
4912}
4913
1da177e4
LT
4914static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4915{
4916 struct ata_port *ap = qc->ap;
4917
4918 switch (qc->tf.protocol) {
3dc1d881 4919 case ATA_PROT_NCQ:
1da177e4
LT
4920 case ATA_PROT_DMA:
4921 case ATA_PROT_ATAPI_DMA:
4922 return 1;
4923
4924 case ATA_PROT_ATAPI:
4925 case ATA_PROT_PIO:
1da177e4
LT
4926 if (ap->flags & ATA_FLAG_PIO_DMA)
4927 return 1;
4928
4929 /* fall through */
4930
4931 default:
4932 return 0;
4933 }
4934
4935 /* never reached */
4936}
4937
4938/**
4939 * ata_qc_issue - issue taskfile to device
4940 * @qc: command to issue to device
4941 *
4942 * Prepare an ATA command to submission to device.
4943 * This includes mapping the data into a DMA-able
4944 * area, filling in the S/G table, and finally
4945 * writing the taskfile to hardware, starting the command.
4946 *
4947 * LOCKING:
cca3974e 4948 * spin_lock_irqsave(host lock)
1da177e4 4949 */
8e0e694a 4950void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4951{
4952 struct ata_port *ap = qc->ap;
4953
dedaf2b0
TH
4954 /* Make sure only one non-NCQ command is outstanding. The
4955 * check is skipped for old EH because it reuses active qc to
4956 * request ATAPI sense.
4957 */
4958 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4959
4960 if (qc->tf.protocol == ATA_PROT_NCQ) {
4961 WARN_ON(ap->sactive & (1 << qc->tag));
4962 ap->sactive |= 1 << qc->tag;
4963 } else {
4964 WARN_ON(ap->sactive);
4965 ap->active_tag = qc->tag;
4966 }
4967
e4a70e76 4968 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4969 ap->qc_active |= 1 << qc->tag;
e4a70e76 4970
1da177e4
LT
4971 if (ata_should_dma_map(qc)) {
4972 if (qc->flags & ATA_QCFLAG_SG) {
4973 if (ata_sg_setup(qc))
8e436af9 4974 goto sg_err;
1da177e4
LT
4975 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4976 if (ata_sg_setup_one(qc))
8e436af9 4977 goto sg_err;
1da177e4
LT
4978 }
4979 } else {
4980 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4981 }
4982
4983 ap->ops->qc_prep(qc);
4984
8e0e694a
TH
4985 qc->err_mask |= ap->ops->qc_issue(qc);
4986 if (unlikely(qc->err_mask))
4987 goto err;
4988 return;
1da177e4 4989
8e436af9
TH
4990sg_err:
4991 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4992 qc->err_mask |= AC_ERR_SYSTEM;
4993err:
4994 ata_qc_complete(qc);
1da177e4
LT
4995}
4996
4997/**
4998 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4999 * @qc: command to issue to device
5000 *
5001 * Using various libata functions and hooks, this function
5002 * starts an ATA command. ATA commands are grouped into
5003 * classes called "protocols", and issuing each type of protocol
5004 * is slightly different.
5005 *
0baab86b
EF
5006 * May be used as the qc_issue() entry in ata_port_operations.
5007 *
1da177e4 5008 * LOCKING:
cca3974e 5009 * spin_lock_irqsave(host lock)
1da177e4
LT
5010 *
5011 * RETURNS:
9a3d9eb0 5012 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5013 */
5014
9a3d9eb0 5015unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5016{
5017 struct ata_port *ap = qc->ap;
5018
e50362ec
AL
5019 /* Use polling pio if the LLD doesn't handle
5020 * interrupt driven pio and atapi CDB interrupt.
5021 */
5022 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5023 switch (qc->tf.protocol) {
5024 case ATA_PROT_PIO:
e3472cbe 5025 case ATA_PROT_NODATA:
e50362ec
AL
5026 case ATA_PROT_ATAPI:
5027 case ATA_PROT_ATAPI_NODATA:
5028 qc->tf.flags |= ATA_TFLAG_POLLING;
5029 break;
5030 case ATA_PROT_ATAPI_DMA:
5031 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5032 /* see ata_dma_blacklisted() */
e50362ec
AL
5033 BUG();
5034 break;
5035 default:
5036 break;
5037 }
5038 }
5039
3d3cca37
TH
5040 /* Some controllers show flaky interrupt behavior after
5041 * setting xfer mode. Use polling instead.
5042 */
5043 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
5044 qc->tf.feature == SETFEATURES_XFER) &&
5045 (ap->flags & ATA_FLAG_SETXFER_POLLING))
5046 qc->tf.flags |= ATA_TFLAG_POLLING;
5047
312f7da2 5048 /* select the device */
1da177e4
LT
5049 ata_dev_select(ap, qc->dev->devno, 1, 0);
5050
312f7da2 5051 /* start the command */
1da177e4
LT
5052 switch (qc->tf.protocol) {
5053 case ATA_PROT_NODATA:
312f7da2
AL
5054 if (qc->tf.flags & ATA_TFLAG_POLLING)
5055 ata_qc_set_polling(qc);
5056
e5338254 5057 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5058 ap->hsm_task_state = HSM_ST_LAST;
5059
5060 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5061 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5062
1da177e4
LT
5063 break;
5064
5065 case ATA_PROT_DMA:
587005de 5066 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5067
1da177e4
LT
5068 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5069 ap->ops->bmdma_setup(qc); /* set up bmdma */
5070 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5071 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5072 break;
5073
312f7da2
AL
5074 case ATA_PROT_PIO:
5075 if (qc->tf.flags & ATA_TFLAG_POLLING)
5076 ata_qc_set_polling(qc);
1da177e4 5077
e5338254 5078 ata_tf_to_host(ap, &qc->tf);
312f7da2 5079
54f00389
AL
5080 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5081 /* PIO data out protocol */
5082 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5083 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5084
5085 /* always send first data block using
e27486db 5086 * the ata_pio_task() codepath.
54f00389 5087 */
312f7da2 5088 } else {
54f00389
AL
5089 /* PIO data in protocol */
5090 ap->hsm_task_state = HSM_ST;
5091
5092 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5093 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5094
5095 /* if polling, ata_pio_task() handles the rest.
5096 * otherwise, interrupt handler takes over from here.
5097 */
312f7da2
AL
5098 }
5099
1da177e4
LT
5100 break;
5101
1da177e4 5102 case ATA_PROT_ATAPI:
1da177e4 5103 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5104 if (qc->tf.flags & ATA_TFLAG_POLLING)
5105 ata_qc_set_polling(qc);
5106
e5338254 5107 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5108
312f7da2
AL
5109 ap->hsm_task_state = HSM_ST_FIRST;
5110
5111 /* send cdb by polling if no cdb interrupt */
5112 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5113 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5114 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5115 break;
5116
5117 case ATA_PROT_ATAPI_DMA:
587005de 5118 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5119
1da177e4
LT
5120 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5121 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5122 ap->hsm_task_state = HSM_ST_FIRST;
5123
5124 /* send cdb by polling if no cdb interrupt */
5125 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5126 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5127 break;
5128
5129 default:
5130 WARN_ON(1);
9a3d9eb0 5131 return AC_ERR_SYSTEM;
1da177e4
LT
5132 }
5133
5134 return 0;
5135}
5136
1da177e4
LT
5137/**
5138 * ata_host_intr - Handle host interrupt for given (port, task)
5139 * @ap: Port on which interrupt arrived (possibly...)
5140 * @qc: Taskfile currently active in engine
5141 *
5142 * Handle host interrupt for given queued command. Currently,
5143 * only DMA interrupts are handled. All other commands are
5144 * handled via polling with interrupts disabled (nIEN bit).
5145 *
5146 * LOCKING:
cca3974e 5147 * spin_lock_irqsave(host lock)
1da177e4
LT
5148 *
5149 * RETURNS:
5150 * One if interrupt was handled, zero if not (shared irq).
5151 */
5152
5153inline unsigned int ata_host_intr (struct ata_port *ap,
5154 struct ata_queued_cmd *qc)
5155{
ea54763f 5156 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5157 u8 status, host_stat = 0;
1da177e4 5158
312f7da2 5159 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5160 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5161
312f7da2
AL
5162 /* Check whether we are expecting interrupt in this state */
5163 switch (ap->hsm_task_state) {
5164 case HSM_ST_FIRST:
6912ccd5
AL
5165 /* Some pre-ATAPI-4 devices assert INTRQ
5166 * at this state when ready to receive CDB.
5167 */
1da177e4 5168
312f7da2
AL
5169 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5170 * The flag was turned on only for atapi devices.
5171 * No need to check is_atapi_taskfile(&qc->tf) again.
5172 */
5173 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5174 goto idle_irq;
1da177e4 5175 break;
312f7da2
AL
5176 case HSM_ST_LAST:
5177 if (qc->tf.protocol == ATA_PROT_DMA ||
5178 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5179 /* check status of DMA engine */
5180 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5181 VPRINTK("ata%u: host_stat 0x%X\n",
5182 ap->print_id, host_stat);
312f7da2
AL
5183
5184 /* if it's not our irq... */
5185 if (!(host_stat & ATA_DMA_INTR))
5186 goto idle_irq;
5187
5188 /* before we do anything else, clear DMA-Start bit */
5189 ap->ops->bmdma_stop(qc);
a4f16610
AL
5190
5191 if (unlikely(host_stat & ATA_DMA_ERR)) {
5192 /* error when transfering data to/from memory */
5193 qc->err_mask |= AC_ERR_HOST_BUS;
5194 ap->hsm_task_state = HSM_ST_ERR;
5195 }
312f7da2
AL
5196 }
5197 break;
5198 case HSM_ST:
5199 break;
1da177e4
LT
5200 default:
5201 goto idle_irq;
5202 }
5203
312f7da2
AL
5204 /* check altstatus */
5205 status = ata_altstatus(ap);
5206 if (status & ATA_BUSY)
5207 goto idle_irq;
1da177e4 5208
312f7da2
AL
5209 /* check main status, clearing INTRQ */
5210 status = ata_chk_status(ap);
5211 if (unlikely(status & ATA_BUSY))
5212 goto idle_irq;
1da177e4 5213
312f7da2
AL
5214 /* ack bmdma irq events */
5215 ap->ops->irq_clear(ap);
1da177e4 5216
bb5cb290 5217 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5218
5219 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5220 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5221 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5222
1da177e4
LT
5223 return 1; /* irq handled */
5224
5225idle_irq:
5226 ap->stats.idle_irq++;
5227
5228#ifdef ATA_IRQ_TRAP
5229 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5230 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5231 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5232 return 1;
1da177e4
LT
5233 }
5234#endif
5235 return 0; /* irq not handled */
5236}
5237
5238/**
5239 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5240 * @irq: irq line (unused)
cca3974e 5241 * @dev_instance: pointer to our ata_host information structure
1da177e4 5242 *
0cba632b
JG
5243 * Default interrupt handler for PCI IDE devices. Calls
5244 * ata_host_intr() for each port that is not disabled.
5245 *
1da177e4 5246 * LOCKING:
cca3974e 5247 * Obtains host lock during operation.
1da177e4
LT
5248 *
5249 * RETURNS:
0cba632b 5250 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5251 */
5252
7d12e780 5253irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5254{
cca3974e 5255 struct ata_host *host = dev_instance;
1da177e4
LT
5256 unsigned int i;
5257 unsigned int handled = 0;
5258 unsigned long flags;
5259
5260 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5261 spin_lock_irqsave(&host->lock, flags);
1da177e4 5262
cca3974e 5263 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5264 struct ata_port *ap;
5265
cca3974e 5266 ap = host->ports[i];
c1389503 5267 if (ap &&
029f5468 5268 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5269 struct ata_queued_cmd *qc;
5270
5271 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5272 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5273 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5274 handled |= ata_host_intr(ap, qc);
5275 }
5276 }
5277
cca3974e 5278 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5279
5280 return IRQ_RETVAL(handled);
5281}
5282
34bf2170
TH
5283/**
5284 * sata_scr_valid - test whether SCRs are accessible
5285 * @ap: ATA port to test SCR accessibility for
5286 *
5287 * Test whether SCRs are accessible for @ap.
5288 *
5289 * LOCKING:
5290 * None.
5291 *
5292 * RETURNS:
5293 * 1 if SCRs are accessible, 0 otherwise.
5294 */
5295int sata_scr_valid(struct ata_port *ap)
5296{
5297 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5298}
5299
5300/**
5301 * sata_scr_read - read SCR register of the specified port
5302 * @ap: ATA port to read SCR for
5303 * @reg: SCR to read
5304 * @val: Place to store read value
5305 *
5306 * Read SCR register @reg of @ap into *@val. This function is
5307 * guaranteed to succeed if the cable type of the port is SATA
5308 * and the port implements ->scr_read.
5309 *
5310 * LOCKING:
5311 * None.
5312 *
5313 * RETURNS:
5314 * 0 on success, negative errno on failure.
5315 */
5316int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5317{
5318 if (sata_scr_valid(ap)) {
5319 *val = ap->ops->scr_read(ap, reg);
5320 return 0;
5321 }
5322 return -EOPNOTSUPP;
5323}
5324
5325/**
5326 * sata_scr_write - write SCR register of the specified port
5327 * @ap: ATA port to write SCR for
5328 * @reg: SCR to write
5329 * @val: value to write
5330 *
5331 * Write @val to SCR register @reg of @ap. This function is
5332 * guaranteed to succeed if the cable type of the port is SATA
5333 * and the port implements ->scr_read.
5334 *
5335 * LOCKING:
5336 * None.
5337 *
5338 * RETURNS:
5339 * 0 on success, negative errno on failure.
5340 */
5341int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5342{
5343 if (sata_scr_valid(ap)) {
5344 ap->ops->scr_write(ap, reg, val);
5345 return 0;
5346 }
5347 return -EOPNOTSUPP;
5348}
5349
5350/**
5351 * sata_scr_write_flush - write SCR register of the specified port and flush
5352 * @ap: ATA port to write SCR for
5353 * @reg: SCR to write
5354 * @val: value to write
5355 *
5356 * This function is identical to sata_scr_write() except that this
5357 * function performs flush after writing to the register.
5358 *
5359 * LOCKING:
5360 * None.
5361 *
5362 * RETURNS:
5363 * 0 on success, negative errno on failure.
5364 */
5365int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5366{
5367 if (sata_scr_valid(ap)) {
5368 ap->ops->scr_write(ap, reg, val);
5369 ap->ops->scr_read(ap, reg);
5370 return 0;
5371 }
5372 return -EOPNOTSUPP;
5373}
5374
5375/**
5376 * ata_port_online - test whether the given port is online
5377 * @ap: ATA port to test
5378 *
5379 * Test whether @ap is online. Note that this function returns 0
5380 * if online status of @ap cannot be obtained, so
5381 * ata_port_online(ap) != !ata_port_offline(ap).
5382 *
5383 * LOCKING:
5384 * None.
5385 *
5386 * RETURNS:
5387 * 1 if the port online status is available and online.
5388 */
5389int ata_port_online(struct ata_port *ap)
5390{
5391 u32 sstatus;
5392
5393 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5394 return 1;
5395 return 0;
5396}
5397
5398/**
5399 * ata_port_offline - test whether the given port is offline
5400 * @ap: ATA port to test
5401 *
5402 * Test whether @ap is offline. Note that this function returns
5403 * 0 if offline status of @ap cannot be obtained, so
5404 * ata_port_online(ap) != !ata_port_offline(ap).
5405 *
5406 * LOCKING:
5407 * None.
5408 *
5409 * RETURNS:
5410 * 1 if the port offline status is available and offline.
5411 */
5412int ata_port_offline(struct ata_port *ap)
5413{
5414 u32 sstatus;
5415
5416 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5417 return 1;
5418 return 0;
5419}
0baab86b 5420
77b08fb5 5421int ata_flush_cache(struct ata_device *dev)
9b847548 5422{
977e6b9f 5423 unsigned int err_mask;
9b847548
JA
5424 u8 cmd;
5425
5426 if (!ata_try_flush_cache(dev))
5427 return 0;
5428
6fc49adb 5429 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5430 cmd = ATA_CMD_FLUSH_EXT;
5431 else
5432 cmd = ATA_CMD_FLUSH;
5433
977e6b9f
TH
5434 err_mask = ata_do_simple_cmd(dev, cmd);
5435 if (err_mask) {
5436 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5437 return -EIO;
5438 }
5439
5440 return 0;
9b847548
JA
5441}
5442
6ffa01d8 5443#ifdef CONFIG_PM
cca3974e
JG
5444static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5445 unsigned int action, unsigned int ehi_flags,
5446 int wait)
500530f6
TH
5447{
5448 unsigned long flags;
5449 int i, rc;
5450
cca3974e
JG
5451 for (i = 0; i < host->n_ports; i++) {
5452 struct ata_port *ap = host->ports[i];
500530f6
TH
5453
5454 /* Previous resume operation might still be in
5455 * progress. Wait for PM_PENDING to clear.
5456 */
5457 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5458 ata_port_wait_eh(ap);
5459 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5460 }
5461
5462 /* request PM ops to EH */
5463 spin_lock_irqsave(ap->lock, flags);
5464
5465 ap->pm_mesg = mesg;
5466 if (wait) {
5467 rc = 0;
5468 ap->pm_result = &rc;
5469 }
5470
5471 ap->pflags |= ATA_PFLAG_PM_PENDING;
5472 ap->eh_info.action |= action;
5473 ap->eh_info.flags |= ehi_flags;
5474
5475 ata_port_schedule_eh(ap);
5476
5477 spin_unlock_irqrestore(ap->lock, flags);
5478
5479 /* wait and check result */
5480 if (wait) {
5481 ata_port_wait_eh(ap);
5482 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5483 if (rc)
5484 return rc;
5485 }
5486 }
5487
5488 return 0;
5489}
5490
5491/**
cca3974e
JG
5492 * ata_host_suspend - suspend host
5493 * @host: host to suspend
500530f6
TH
5494 * @mesg: PM message
5495 *
cca3974e 5496 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5497 * function requests EH to perform PM operations and waits for EH
5498 * to finish.
5499 *
5500 * LOCKING:
5501 * Kernel thread context (may sleep).
5502 *
5503 * RETURNS:
5504 * 0 on success, -errno on failure.
5505 */
cca3974e 5506int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5507{
5508 int i, j, rc;
5509
cca3974e 5510 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5511 if (rc)
5512 goto fail;
5513
5514 /* EH is quiescent now. Fail if we have any ready device.
5515 * This happens if hotplug occurs between completion of device
5516 * suspension and here.
5517 */
cca3974e
JG
5518 for (i = 0; i < host->n_ports; i++) {
5519 struct ata_port *ap = host->ports[i];
500530f6
TH
5520
5521 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5522 struct ata_device *dev = &ap->device[j];
5523
5524 if (ata_dev_ready(dev)) {
5525 ata_port_printk(ap, KERN_WARNING,
5526 "suspend failed, device %d "
5527 "still active\n", dev->devno);
5528 rc = -EBUSY;
5529 goto fail;
5530 }
5531 }
5532 }
5533
cca3974e 5534 host->dev->power.power_state = mesg;
500530f6
TH
5535 return 0;
5536
5537 fail:
cca3974e 5538 ata_host_resume(host);
500530f6
TH
5539 return rc;
5540}
5541
5542/**
cca3974e
JG
5543 * ata_host_resume - resume host
5544 * @host: host to resume
500530f6 5545 *
cca3974e 5546 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5547 * function requests EH to perform PM operations and returns.
5548 * Note that all resume operations are performed parallely.
5549 *
5550 * LOCKING:
5551 * Kernel thread context (may sleep).
5552 */
cca3974e 5553void ata_host_resume(struct ata_host *host)
500530f6 5554{
cca3974e
JG
5555 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5556 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5557 host->dev->power.power_state = PMSG_ON;
500530f6 5558}
6ffa01d8 5559#endif
500530f6 5560
c893a3ae
RD
5561/**
5562 * ata_port_start - Set port up for dma.
5563 * @ap: Port to initialize
5564 *
5565 * Called just after data structures for each port are
5566 * initialized. Allocates space for PRD table.
5567 *
5568 * May be used as the port_start() entry in ata_port_operations.
5569 *
5570 * LOCKING:
5571 * Inherited from caller.
5572 */
f0d36efd 5573int ata_port_start(struct ata_port *ap)
1da177e4 5574{
2f1f610b 5575 struct device *dev = ap->dev;
6037d6bb 5576 int rc;
1da177e4 5577
f0d36efd
TH
5578 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5579 GFP_KERNEL);
1da177e4
LT
5580 if (!ap->prd)
5581 return -ENOMEM;
5582
6037d6bb 5583 rc = ata_pad_alloc(ap, dev);
f0d36efd 5584 if (rc)
6037d6bb 5585 return rc;
1da177e4 5586
f0d36efd
TH
5587 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5588 (unsigned long long)ap->prd_dma);
1da177e4
LT
5589 return 0;
5590}
5591
3ef3b43d
TH
5592/**
5593 * ata_dev_init - Initialize an ata_device structure
5594 * @dev: Device structure to initialize
5595 *
5596 * Initialize @dev in preparation for probing.
5597 *
5598 * LOCKING:
5599 * Inherited from caller.
5600 */
5601void ata_dev_init(struct ata_device *dev)
5602{
5603 struct ata_port *ap = dev->ap;
72fa4b74
TH
5604 unsigned long flags;
5605
5a04bf4b
TH
5606 /* SATA spd limit is bound to the first device */
5607 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5608
72fa4b74
TH
5609 /* High bits of dev->flags are used to record warm plug
5610 * requests which occur asynchronously. Synchronize using
cca3974e 5611 * host lock.
72fa4b74 5612 */
ba6a1308 5613 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5614 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5615 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5616
72fa4b74
TH
5617 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5618 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5619 dev->pio_mask = UINT_MAX;
5620 dev->mwdma_mask = UINT_MAX;
5621 dev->udma_mask = UINT_MAX;
5622}
5623
1da177e4 5624/**
155a8a9c 5625 * ata_port_init - Initialize an ata_port structure
1da177e4 5626 * @ap: Structure to initialize
cca3974e 5627 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5628 * @ent: Probe information provided by low-level driver
5629 * @port_no: Port number associated with this ata_port
5630 *
155a8a9c 5631 * Initialize a new ata_port structure.
0cba632b 5632 *
1da177e4 5633 * LOCKING:
0cba632b 5634 * Inherited from caller.
1da177e4 5635 */
cca3974e 5636void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5637 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5638{
5639 unsigned int i;
5640
cca3974e 5641 ap->lock = &host->lock;
198e0fed 5642 ap->flags = ATA_FLAG_DISABLED;
44877b4e 5643 ap->print_id = ata_print_id++;
1da177e4 5644 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5645 ap->host = host;
2f1f610b 5646 ap->dev = ent->dev;
1da177e4 5647 ap->port_no = port_no;
fea63e38
TH
5648 if (port_no == 1 && ent->pinfo2) {
5649 ap->pio_mask = ent->pinfo2->pio_mask;
5650 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5651 ap->udma_mask = ent->pinfo2->udma_mask;
5652 ap->flags |= ent->pinfo2->flags;
5653 ap->ops = ent->pinfo2->port_ops;
5654 } else {
5655 ap->pio_mask = ent->pio_mask;
5656 ap->mwdma_mask = ent->mwdma_mask;
5657 ap->udma_mask = ent->udma_mask;
5658 ap->flags |= ent->port_flags;
5659 ap->ops = ent->port_ops;
5660 }
5a04bf4b 5661 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5662 ap->active_tag = ATA_TAG_POISON;
5663 ap->last_ctl = 0xFF;
bd5d825c
BP
5664
5665#if defined(ATA_VERBOSE_DEBUG)
5666 /* turn on all debugging levels */
5667 ap->msg_enable = 0x00FF;
5668#elif defined(ATA_DEBUG)
5669 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5670#else
0dd4b21f 5671 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5672#endif
1da177e4 5673
65f27f38
DH
5674 INIT_DELAYED_WORK(&ap->port_task, NULL);
5675 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5676 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5677 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5678 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5679
838df628
TH
5680 /* set cable type */
5681 ap->cbl = ATA_CBL_NONE;
5682 if (ap->flags & ATA_FLAG_SATA)
5683 ap->cbl = ATA_CBL_SATA;
5684
acf356b1
TH
5685 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5686 struct ata_device *dev = &ap->device[i];
38d87234 5687 dev->ap = ap;
72fa4b74 5688 dev->devno = i;
3ef3b43d 5689 ata_dev_init(dev);
acf356b1 5690 }
1da177e4
LT
5691
5692#ifdef ATA_IRQ_TRAP
5693 ap->stats.unhandled_irq = 1;
5694 ap->stats.idle_irq = 1;
5695#endif
5696
5697 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5698}
5699
155a8a9c 5700/**
4608c160
TH
5701 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5702 * @ap: ATA port to initialize SCSI host for
5703 * @shost: SCSI host associated with @ap
155a8a9c 5704 *
4608c160 5705 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5706 *
5707 * LOCKING:
5708 * Inherited from caller.
5709 */
4608c160 5710static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5711{
cca3974e 5712 ap->scsi_host = shost;
155a8a9c 5713
44877b4e 5714 shost->unique_id = ap->print_id;
4608c160
TH
5715 shost->max_id = 16;
5716 shost->max_lun = 1;
5717 shost->max_channel = 1;
f0ef88ed 5718 shost->max_cmd_len = 16;
155a8a9c
BK
5719}
5720
1da177e4 5721/**
996139f1 5722 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5723 * @ent: Information provided by low-level driver
cca3974e 5724 * @host: Collections of ports to which we add
1da177e4
LT
5725 * @port_no: Port number associated with this host
5726 *
0cba632b
JG
5727 * Attach low-level ATA driver to system.
5728 *
1da177e4 5729 * LOCKING:
0cba632b 5730 * PCI/etc. bus probe sem.
1da177e4
LT
5731 *
5732 * RETURNS:
0cba632b 5733 * New ata_port on success, for NULL on error.
1da177e4 5734 */
996139f1 5735static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5736 struct ata_host *host,
1da177e4
LT
5737 unsigned int port_no)
5738{
996139f1 5739 struct Scsi_Host *shost;
1da177e4 5740 struct ata_port *ap;
1da177e4
LT
5741
5742 DPRINTK("ENTER\n");
aec5c3c1 5743
52783c5d 5744 if (!ent->port_ops->error_handler &&
cca3974e 5745 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5746 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5747 port_no);
5748 return NULL;
5749 }
5750
996139f1
JG
5751 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5752 if (!shost)
1da177e4
LT
5753 return NULL;
5754
996139f1 5755 shost->transportt = &ata_scsi_transport_template;
30afc84c 5756
996139f1 5757 ap = ata_shost_to_port(shost);
1da177e4 5758
cca3974e 5759 ata_port_init(ap, host, ent, port_no);
996139f1 5760 ata_port_init_shost(ap, shost);
1da177e4 5761
1da177e4 5762 return ap;
1da177e4
LT
5763}
5764
f0d36efd
TH
5765static void ata_host_release(struct device *gendev, void *res)
5766{
5767 struct ata_host *host = dev_get_drvdata(gendev);
5768 int i;
5769
5770 for (i = 0; i < host->n_ports; i++) {
5771 struct ata_port *ap = host->ports[i];
5772
1aa506e4 5773 if (ap && ap->ops->port_stop)
f0d36efd 5774 ap->ops->port_stop(ap);
f0d36efd
TH
5775 }
5776
5777 if (host->ops->host_stop)
5778 host->ops->host_stop(host);
1aa56cca 5779
1aa506e4
TH
5780 for (i = 0; i < host->n_ports; i++) {
5781 struct ata_port *ap = host->ports[i];
5782
5783 if (ap)
5784 scsi_host_put(ap->scsi_host);
5785
5786 host->ports[i] = NULL;
5787 }
5788
1aa56cca 5789 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5790}
5791
b03732f0 5792/**
cca3974e
JG
5793 * ata_sas_host_init - Initialize a host struct
5794 * @host: host to initialize
5795 * @dev: device host is attached to
5796 * @flags: host flags
5797 * @ops: port_ops
b03732f0
BK
5798 *
5799 * LOCKING:
5800 * PCI/etc. bus probe sem.
5801 *
5802 */
5803
cca3974e
JG
5804void ata_host_init(struct ata_host *host, struct device *dev,
5805 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5806{
cca3974e
JG
5807 spin_lock_init(&host->lock);
5808 host->dev = dev;
5809 host->flags = flags;
5810 host->ops = ops;
b03732f0
BK
5811}
5812
1da177e4 5813/**
0cba632b
JG
5814 * ata_device_add - Register hardware device with ATA and SCSI layers
5815 * @ent: Probe information describing hardware device to be registered
5816 *
5817 * This function processes the information provided in the probe
5818 * information struct @ent, allocates the necessary ATA and SCSI
5819 * host information structures, initializes them, and registers
5820 * everything with requisite kernel subsystems.
5821 *
5822 * This function requests irqs, probes the ATA bus, and probes
5823 * the SCSI bus.
1da177e4
LT
5824 *
5825 * LOCKING:
0cba632b 5826 * PCI/etc. bus probe sem.
1da177e4
LT
5827 *
5828 * RETURNS:
0cba632b 5829 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5830 */
057ace5e 5831int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5832{
6d0500df 5833 unsigned int i;
1da177e4 5834 struct device *dev = ent->dev;
cca3974e 5835 struct ata_host *host;
39b07ce6 5836 int rc;
1da177e4
LT
5837
5838 DPRINTK("ENTER\n");
f20b16ff 5839
02f076aa
AC
5840 if (ent->irq == 0) {
5841 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5842 return 0;
5843 }
f0d36efd
TH
5844
5845 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5846 return 0;
5847
1da177e4 5848 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5849 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5850 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5851 if (!host)
f0d36efd
TH
5852 goto err_out;
5853 devres_add(dev, host);
5854 dev_set_drvdata(dev, host);
1da177e4 5855
cca3974e
JG
5856 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5857 host->n_ports = ent->n_ports;
5858 host->irq = ent->irq;
5859 host->irq2 = ent->irq2;
0d5ff566 5860 host->iomap = ent->iomap;
cca3974e 5861 host->private_data = ent->private_data;
1da177e4
LT
5862
5863 /* register each port bound to this device */
cca3974e 5864 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5865 struct ata_port *ap;
5866 unsigned long xfer_mode_mask;
2ec7df04 5867 int irq_line = ent->irq;
1da177e4 5868
cca3974e 5869 ap = ata_port_add(ent, host, i);
c38778c3 5870 host->ports[i] = ap;
1da177e4
LT
5871 if (!ap)
5872 goto err_out;
5873
dd5b06c4
TH
5874 /* dummy? */
5875 if (ent->dummy_port_mask & (1 << i)) {
5876 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5877 ap->ops = &ata_dummy_port_ops;
5878 continue;
5879 }
5880
5881 /* start port */
5882 rc = ap->ops->port_start(ap);
5883 if (rc) {
cca3974e
JG
5884 host->ports[i] = NULL;
5885 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5886 goto err_out;
5887 }
5888
2ec7df04
AC
5889 /* Report the secondary IRQ for second channel legacy */
5890 if (i == 1 && ent->irq2)
5891 irq_line = ent->irq2;
5892
1da177e4
LT
5893 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5894 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5895 (ap->pio_mask << ATA_SHIFT_PIO);
5896
5897 /* print per-port info to dmesg */
0d5ff566
TH
5898 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5899 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5900 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5901 ata_mode_string(xfer_mode_mask),
5902 ap->ioaddr.cmd_addr,
5903 ap->ioaddr.ctl_addr,
5904 ap->ioaddr.bmdma_addr,
2ec7df04 5905 irq_line);
1da177e4 5906
0f0a3ad3
TH
5907 /* freeze port before requesting IRQ */
5908 ata_eh_freeze_port(ap);
1da177e4
LT
5909 }
5910
2ec7df04 5911 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5912 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5913 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5914 if (rc) {
5915 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5916 ent->irq, rc);
1da177e4 5917 goto err_out;
39b07ce6 5918 }
1da177e4 5919
2ec7df04
AC
5920 /* do we have a second IRQ for the other channel, eg legacy mode */
5921 if (ent->irq2) {
5922 /* We will get weird core code crashes later if this is true
5923 so trap it now */
5924 BUG_ON(ent->irq == ent->irq2);
5925
f0d36efd
TH
5926 rc = devm_request_irq(dev, ent->irq2,
5927 ent->port_ops->irq_handler, ent->irq_flags,
5928 DRV_NAME, host);
2ec7df04
AC
5929 if (rc) {
5930 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5931 ent->irq2, rc);
f0d36efd 5932 goto err_out;
2ec7df04
AC
5933 }
5934 }
5935
f0d36efd 5936 /* resource acquisition complete */
b878ca5d 5937 devres_remove_group(dev, ata_device_add);
f0d36efd 5938
1da177e4
LT
5939 /* perform each probe synchronously */
5940 DPRINTK("probe begin\n");
cca3974e
JG
5941 for (i = 0; i < host->n_ports; i++) {
5942 struct ata_port *ap = host->ports[i];
5a04bf4b 5943 u32 scontrol;
1da177e4
LT
5944 int rc;
5945
5a04bf4b
TH
5946 /* init sata_spd_limit to the current value */
5947 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5948 int spd = (scontrol >> 4) & 0xf;
5949 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5950 }
5951 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5952
cca3974e 5953 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5954 if (rc) {
f15a1daf 5955 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5956 /* FIXME: do something useful here */
5957 /* FIXME: handle unconditional calls to
5958 * scsi_scan_host and ata_host_remove, below,
5959 * at the very least
5960 */
5961 }
3e706399 5962
52783c5d 5963 if (ap->ops->error_handler) {
1cdaf534 5964 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5965 unsigned long flags;
5966
5967 ata_port_probe(ap);
5968
5969 /* kick EH for boot probing */
ba6a1308 5970 spin_lock_irqsave(ap->lock, flags);
3e706399 5971
1cdaf534
TH
5972 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5973 ehi->action |= ATA_EH_SOFTRESET;
5974 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5975
b51e9e5d 5976 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5977 ata_port_schedule_eh(ap);
5978
ba6a1308 5979 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5980
5981 /* wait for EH to finish */
5982 ata_port_wait_eh(ap);
5983 } else {
44877b4e 5984 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
3e706399 5985 rc = ata_bus_probe(ap);
44877b4e 5986 DPRINTK("ata%u: bus probe end\n", ap->print_id);
3e706399
TH
5987
5988 if (rc) {
5989 /* FIXME: do something useful here?
5990 * Current libata behavior will
5991 * tear down everything when
5992 * the module is removed
5993 * or the h/w is unplugged.
5994 */
5995 }
5996 }
1da177e4
LT
5997 }
5998
5999 /* probes are done, now scan each port's disk(s) */
c893a3ae 6000 DPRINTK("host probe begin\n");
cca3974e
JG
6001 for (i = 0; i < host->n_ports; i++) {
6002 struct ata_port *ap = host->ports[i];
1da177e4 6003
644dd0cc 6004 ata_scsi_scan_host(ap);
1da177e4
LT
6005 }
6006
1da177e4
LT
6007 VPRINTK("EXIT, returning %u\n", ent->n_ports);
6008 return ent->n_ports; /* success */
6009
f0d36efd
TH
6010 err_out:
6011 devres_release_group(dev, ata_device_add);
f0d36efd 6012 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
6013 return 0;
6014}
6015
720ba126
TH
6016/**
6017 * ata_port_detach - Detach ATA port in prepration of device removal
6018 * @ap: ATA port to be detached
6019 *
6020 * Detach all ATA devices and the associated SCSI devices of @ap;
6021 * then, remove the associated SCSI host. @ap is guaranteed to
6022 * be quiescent on return from this function.
6023 *
6024 * LOCKING:
6025 * Kernel thread context (may sleep).
6026 */
6027void ata_port_detach(struct ata_port *ap)
6028{
6029 unsigned long flags;
6030 int i;
6031
6032 if (!ap->ops->error_handler)
c3cf30a9 6033 goto skip_eh;
720ba126
TH
6034
6035 /* tell EH we're leaving & flush EH */
ba6a1308 6036 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6037 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6038 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6039
6040 ata_port_wait_eh(ap);
6041
6042 /* EH is now guaranteed to see UNLOADING, so no new device
6043 * will be attached. Disable all existing devices.
6044 */
ba6a1308 6045 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
6046
6047 for (i = 0; i < ATA_MAX_DEVICES; i++)
6048 ata_dev_disable(&ap->device[i]);
6049
ba6a1308 6050 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6051
6052 /* Final freeze & EH. All in-flight commands are aborted. EH
6053 * will be skipped and retrials will be terminated with bad
6054 * target.
6055 */
ba6a1308 6056 spin_lock_irqsave(ap->lock, flags);
720ba126 6057 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6058 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6059
6060 ata_port_wait_eh(ap);
6061
6062 /* Flush hotplug task. The sequence is similar to
6063 * ata_port_flush_task().
6064 */
6065 flush_workqueue(ata_aux_wq);
6066 cancel_delayed_work(&ap->hotplug_task);
6067 flush_workqueue(ata_aux_wq);
6068
c3cf30a9 6069 skip_eh:
720ba126 6070 /* remove the associated SCSI host */
cca3974e 6071 scsi_remove_host(ap->scsi_host);
720ba126
TH
6072}
6073
0529c159
TH
6074/**
6075 * ata_host_detach - Detach all ports of an ATA host
6076 * @host: Host to detach
6077 *
6078 * Detach all ports of @host.
6079 *
6080 * LOCKING:
6081 * Kernel thread context (may sleep).
6082 */
6083void ata_host_detach(struct ata_host *host)
6084{
6085 int i;
6086
6087 for (i = 0; i < host->n_ports; i++)
6088 ata_port_detach(host->ports[i]);
6089}
6090
f6d950e2
BK
6091struct ata_probe_ent *
6092ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
6093{
6094 struct ata_probe_ent *probe_ent;
6095
4d05447e 6096 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
6097 if (!probe_ent) {
6098 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
6099 kobject_name(&(dev->kobj)));
6100 return NULL;
6101 }
6102
6103 INIT_LIST_HEAD(&probe_ent->node);
6104 probe_ent->dev = dev;
6105
6106 probe_ent->sht = port->sht;
cca3974e 6107 probe_ent->port_flags = port->flags;
f6d950e2
BK
6108 probe_ent->pio_mask = port->pio_mask;
6109 probe_ent->mwdma_mask = port->mwdma_mask;
6110 probe_ent->udma_mask = port->udma_mask;
6111 probe_ent->port_ops = port->port_ops;
d639ca94 6112 probe_ent->private_data = port->private_data;
f6d950e2
BK
6113
6114 return probe_ent;
6115}
6116
1da177e4
LT
6117/**
6118 * ata_std_ports - initialize ioaddr with standard port offsets.
6119 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6120 *
6121 * Utility function which initializes data_addr, error_addr,
6122 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6123 * device_addr, status_addr, and command_addr to standard offsets
6124 * relative to cmd_addr.
6125 *
6126 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6127 */
0baab86b 6128
1da177e4
LT
6129void ata_std_ports(struct ata_ioports *ioaddr)
6130{
6131 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6132 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6133 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6134 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6135 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6136 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6137 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6138 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6139 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6140 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6141}
6142
0baab86b 6143
374b1873
JG
6144#ifdef CONFIG_PCI
6145
1da177e4
LT
6146/**
6147 * ata_pci_remove_one - PCI layer callback for device removal
6148 * @pdev: PCI device that was removed
6149 *
b878ca5d
TH
6150 * PCI layer indicates to libata via this hook that hot-unplug or
6151 * module unload event has occurred. Detach all ports. Resource
6152 * release is handled via devres.
1da177e4
LT
6153 *
6154 * LOCKING:
6155 * Inherited from PCI layer (may sleep).
6156 */
f0d36efd 6157void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6158{
6159 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6160 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6161
b878ca5d 6162 ata_host_detach(host);
1da177e4
LT
6163}
6164
6165/* move to PCI subsystem */
057ace5e 6166int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6167{
6168 unsigned long tmp = 0;
6169
6170 switch (bits->width) {
6171 case 1: {
6172 u8 tmp8 = 0;
6173 pci_read_config_byte(pdev, bits->reg, &tmp8);
6174 tmp = tmp8;
6175 break;
6176 }
6177 case 2: {
6178 u16 tmp16 = 0;
6179 pci_read_config_word(pdev, bits->reg, &tmp16);
6180 tmp = tmp16;
6181 break;
6182 }
6183 case 4: {
6184 u32 tmp32 = 0;
6185 pci_read_config_dword(pdev, bits->reg, &tmp32);
6186 tmp = tmp32;
6187 break;
6188 }
6189
6190 default:
6191 return -EINVAL;
6192 }
6193
6194 tmp &= bits->mask;
6195
6196 return (tmp == bits->val) ? 1 : 0;
6197}
9b847548 6198
6ffa01d8 6199#ifdef CONFIG_PM
3c5100c1 6200void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6201{
6202 pci_save_state(pdev);
4c90d971 6203 pci_disable_device(pdev);
500530f6 6204
4c90d971 6205 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6206 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6207}
6208
553c4aa6 6209int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6210{
553c4aa6
TH
6211 int rc;
6212
9b847548
JA
6213 pci_set_power_state(pdev, PCI_D0);
6214 pci_restore_state(pdev);
553c4aa6 6215
b878ca5d 6216 rc = pcim_enable_device(pdev);
553c4aa6
TH
6217 if (rc) {
6218 dev_printk(KERN_ERR, &pdev->dev,
6219 "failed to enable device after resume (%d)\n", rc);
6220 return rc;
6221 }
6222
9b847548 6223 pci_set_master(pdev);
553c4aa6 6224 return 0;
500530f6
TH
6225}
6226
3c5100c1 6227int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6228{
cca3974e 6229 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6230 int rc = 0;
6231
cca3974e 6232 rc = ata_host_suspend(host, mesg);
500530f6
TH
6233 if (rc)
6234 return rc;
6235
3c5100c1 6236 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6237
6238 return 0;
6239}
6240
6241int ata_pci_device_resume(struct pci_dev *pdev)
6242{
cca3974e 6243 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6244 int rc;
500530f6 6245
553c4aa6
TH
6246 rc = ata_pci_device_do_resume(pdev);
6247 if (rc == 0)
6248 ata_host_resume(host);
6249 return rc;
9b847548 6250}
6ffa01d8
TH
6251#endif /* CONFIG_PM */
6252
1da177e4
LT
6253#endif /* CONFIG_PCI */
6254
6255
1da177e4
LT
6256static int __init ata_init(void)
6257{
a8601e5f 6258 ata_probe_timeout *= HZ;
1da177e4
LT
6259 ata_wq = create_workqueue("ata");
6260 if (!ata_wq)
6261 return -ENOMEM;
6262
453b07ac
TH
6263 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6264 if (!ata_aux_wq) {
6265 destroy_workqueue(ata_wq);
6266 return -ENOMEM;
6267 }
6268
1da177e4
LT
6269 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6270 return 0;
6271}
6272
6273static void __exit ata_exit(void)
6274{
6275 destroy_workqueue(ata_wq);
453b07ac 6276 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6277}
6278
a4625085 6279subsys_initcall(ata_init);
1da177e4
LT
6280module_exit(ata_exit);
6281
67846b30 6282static unsigned long ratelimit_time;
34af946a 6283static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6284
6285int ata_ratelimit(void)
6286{
6287 int rc;
6288 unsigned long flags;
6289
6290 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6291
6292 if (time_after(jiffies, ratelimit_time)) {
6293 rc = 1;
6294 ratelimit_time = jiffies + (HZ/5);
6295 } else
6296 rc = 0;
6297
6298 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6299
6300 return rc;
6301}
6302
c22daff4
TH
6303/**
6304 * ata_wait_register - wait until register value changes
6305 * @reg: IO-mapped register
6306 * @mask: Mask to apply to read register value
6307 * @val: Wait condition
6308 * @interval_msec: polling interval in milliseconds
6309 * @timeout_msec: timeout in milliseconds
6310 *
6311 * Waiting for some bits of register to change is a common
6312 * operation for ATA controllers. This function reads 32bit LE
6313 * IO-mapped register @reg and tests for the following condition.
6314 *
6315 * (*@reg & mask) != val
6316 *
6317 * If the condition is met, it returns; otherwise, the process is
6318 * repeated after @interval_msec until timeout.
6319 *
6320 * LOCKING:
6321 * Kernel thread context (may sleep)
6322 *
6323 * RETURNS:
6324 * The final register value.
6325 */
6326u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6327 unsigned long interval_msec,
6328 unsigned long timeout_msec)
6329{
6330 unsigned long timeout;
6331 u32 tmp;
6332
6333 tmp = ioread32(reg);
6334
6335 /* Calculate timeout _after_ the first read to make sure
6336 * preceding writes reach the controller before starting to
6337 * eat away the timeout.
6338 */
6339 timeout = jiffies + (timeout_msec * HZ) / 1000;
6340
6341 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6342 msleep(interval_msec);
6343 tmp = ioread32(reg);
6344 }
6345
6346 return tmp;
6347}
6348
dd5b06c4
TH
6349/*
6350 * Dummy port_ops
6351 */
6352static void ata_dummy_noret(struct ata_port *ap) { }
6353static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6354static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6355
6356static u8 ata_dummy_check_status(struct ata_port *ap)
6357{
6358 return ATA_DRDY;
6359}
6360
6361static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6362{
6363 return AC_ERR_SYSTEM;
6364}
6365
6366const struct ata_port_operations ata_dummy_port_ops = {
6367 .port_disable = ata_port_disable,
6368 .check_status = ata_dummy_check_status,
6369 .check_altstatus = ata_dummy_check_status,
6370 .dev_select = ata_noop_dev_select,
6371 .qc_prep = ata_noop_qc_prep,
6372 .qc_issue = ata_dummy_qc_issue,
6373 .freeze = ata_dummy_noret,
6374 .thaw = ata_dummy_noret,
6375 .error_handler = ata_dummy_noret,
6376 .post_internal_cmd = ata_dummy_qc_noret,
6377 .irq_clear = ata_dummy_noret,
6378 .port_start = ata_dummy_ret0,
6379 .port_stop = ata_dummy_noret,
6380};
6381
1da177e4
LT
6382/*
6383 * libata is essentially a library of internal helper functions for
6384 * low-level ATA host controller drivers. As such, the API/ABI is
6385 * likely to change as new drivers are added and updated.
6386 * Do not depend on ABI/API stability.
6387 */
6388
e9c83914
TH
6389EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6390EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6391EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6392EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6393EXPORT_SYMBOL_GPL(ata_std_bios_param);
6394EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6395EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6396EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6397EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6398EXPORT_SYMBOL_GPL(ata_sg_init);
6399EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6400EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6401EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6402EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6403EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6404EXPORT_SYMBOL_GPL(ata_tf_load);
6405EXPORT_SYMBOL_GPL(ata_tf_read);
6406EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6407EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 6408EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
6409EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6410EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6411EXPORT_SYMBOL_GPL(ata_check_status);
6412EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6413EXPORT_SYMBOL_GPL(ata_exec_command);
6414EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6415EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6416EXPORT_SYMBOL_GPL(ata_data_xfer);
6417EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6418EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6419EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6420EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6421EXPORT_SYMBOL_GPL(ata_bmdma_start);
6422EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6423EXPORT_SYMBOL_GPL(ata_bmdma_status);
6424EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6425EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6426EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6427EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6428EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6429EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6430EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6431EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6432EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6433EXPORT_SYMBOL_GPL(sata_phy_debounce);
6434EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6435EXPORT_SYMBOL_GPL(sata_phy_reset);
6436EXPORT_SYMBOL_GPL(__sata_phy_reset);
6437EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6438EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6439EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6440EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6441EXPORT_SYMBOL_GPL(sata_std_hardreset);
6442EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6443EXPORT_SYMBOL_GPL(ata_dev_classify);
6444EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6445EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6446EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6447EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6448EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6449EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6450EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6451EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6452EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6453EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6454EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6455EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6456EXPORT_SYMBOL_GPL(sata_scr_valid);
6457EXPORT_SYMBOL_GPL(sata_scr_read);
6458EXPORT_SYMBOL_GPL(sata_scr_write);
6459EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6460EXPORT_SYMBOL_GPL(ata_port_online);
6461EXPORT_SYMBOL_GPL(ata_port_offline);
6ffa01d8 6462#ifdef CONFIG_PM
cca3974e
JG
6463EXPORT_SYMBOL_GPL(ata_host_suspend);
6464EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6465#endif /* CONFIG_PM */
6a62a04d
TH
6466EXPORT_SYMBOL_GPL(ata_id_string);
6467EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 6468EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6919a0a6 6469EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6470EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6471
1bc4ccff 6472EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6473EXPORT_SYMBOL_GPL(ata_timing_compute);
6474EXPORT_SYMBOL_GPL(ata_timing_merge);
6475
1da177e4
LT
6476#ifdef CONFIG_PCI
6477EXPORT_SYMBOL_GPL(pci_test_config_bits);
6478EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6479EXPORT_SYMBOL_GPL(ata_pci_init_one);
6480EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6481#ifdef CONFIG_PM
500530f6
TH
6482EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6483EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6484EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6485EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6486#endif /* CONFIG_PM */
67951ade
AC
6487EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6488EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6489#endif /* CONFIG_PCI */
9b847548 6490
6ffa01d8 6491#ifdef CONFIG_PM
9b847548
JA
6492EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6493EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6ffa01d8 6494#endif /* CONFIG_PM */
ece1d636 6495
ece1d636 6496EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6497EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6498EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6499EXPORT_SYMBOL_GPL(ata_port_freeze);
6500EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6501EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6502EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6503EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6504EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6505EXPORT_SYMBOL_GPL(ata_irq_on);
6506EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6507EXPORT_SYMBOL_GPL(ata_irq_ack);
6508EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6509EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
6510
6511EXPORT_SYMBOL_GPL(ata_cable_40wire);
6512EXPORT_SYMBOL_GPL(ata_cable_80wire);
6513EXPORT_SYMBOL_GPL(ata_cable_unknown);
6514EXPORT_SYMBOL_GPL(ata_cable_sata);