libata: HPA support
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
cb48cab7 62#define DRV_VERSION "2.20" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
1e999736
AC
92static int ata_ignore_hpa = 0;
93module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
94MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
95
a8601e5f
AM
96static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
97module_param(ata_probe_timeout, int, 0444);
98MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
99
d7d0dad6
JG
100int libata_noacpi = 1;
101module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
102MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
103
1da177e4
LT
104MODULE_AUTHOR("Jeff Garzik");
105MODULE_DESCRIPTION("Library module for ATA devices");
106MODULE_LICENSE("GPL");
107MODULE_VERSION(DRV_VERSION);
108
0baab86b 109
1da177e4
LT
110/**
111 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
112 * @tf: Taskfile to convert
113 * @fis: Buffer into which data will output
114 * @pmp: Port multiplier port
115 *
116 * Converts a standard ATA taskfile to a Serial ATA
117 * FIS structure (Register - Host to Device).
118 *
119 * LOCKING:
120 * Inherited from caller.
121 */
122
057ace5e 123void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
124{
125 fis[0] = 0x27; /* Register - Host to Device FIS */
126 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
127 bit 7 indicates Command FIS */
128 fis[2] = tf->command;
129 fis[3] = tf->feature;
130
131 fis[4] = tf->lbal;
132 fis[5] = tf->lbam;
133 fis[6] = tf->lbah;
134 fis[7] = tf->device;
135
136 fis[8] = tf->hob_lbal;
137 fis[9] = tf->hob_lbam;
138 fis[10] = tf->hob_lbah;
139 fis[11] = tf->hob_feature;
140
141 fis[12] = tf->nsect;
142 fis[13] = tf->hob_nsect;
143 fis[14] = 0;
144 fis[15] = tf->ctl;
145
146 fis[16] = 0;
147 fis[17] = 0;
148 fis[18] = 0;
149 fis[19] = 0;
150}
151
152/**
153 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
154 * @fis: Buffer from which data will be input
155 * @tf: Taskfile to output
156 *
e12a1be6 157 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
158 *
159 * LOCKING:
160 * Inherited from caller.
161 */
162
057ace5e 163void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
164{
165 tf->command = fis[2]; /* status */
166 tf->feature = fis[3]; /* error */
167
168 tf->lbal = fis[4];
169 tf->lbam = fis[5];
170 tf->lbah = fis[6];
171 tf->device = fis[7];
172
173 tf->hob_lbal = fis[8];
174 tf->hob_lbam = fis[9];
175 tf->hob_lbah = fis[10];
176
177 tf->nsect = fis[12];
178 tf->hob_nsect = fis[13];
179}
180
8cbd6df1
AL
181static const u8 ata_rw_cmds[] = {
182 /* pio multi */
183 ATA_CMD_READ_MULTI,
184 ATA_CMD_WRITE_MULTI,
185 ATA_CMD_READ_MULTI_EXT,
186 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
187 0,
188 0,
189 0,
190 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
191 /* pio */
192 ATA_CMD_PIO_READ,
193 ATA_CMD_PIO_WRITE,
194 ATA_CMD_PIO_READ_EXT,
195 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
196 0,
197 0,
198 0,
199 0,
8cbd6df1
AL
200 /* dma */
201 ATA_CMD_READ,
202 ATA_CMD_WRITE,
203 ATA_CMD_READ_EXT,
9a3dccc4
TH
204 ATA_CMD_WRITE_EXT,
205 0,
206 0,
207 0,
208 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 209};
1da177e4
LT
210
211/**
8cbd6df1 212 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
213 * @tf: command to examine and configure
214 * @dev: device tf belongs to
1da177e4 215 *
2e9edbf8 216 * Examine the device configuration and tf->flags to calculate
8cbd6df1 217 * the proper read/write commands and protocol to use.
1da177e4
LT
218 *
219 * LOCKING:
220 * caller.
221 */
bd056d7e 222static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 223{
9a3dccc4 224 u8 cmd;
1da177e4 225
9a3dccc4 226 int index, fua, lba48, write;
2e9edbf8 227
9a3dccc4 228 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
229 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
230 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 231
8cbd6df1
AL
232 if (dev->flags & ATA_DFLAG_PIO) {
233 tf->protocol = ATA_PROT_PIO;
9a3dccc4 234 index = dev->multi_count ? 0 : 8;
bd056d7e 235 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
236 /* Unable to use DMA due to host limitation */
237 tf->protocol = ATA_PROT_PIO;
0565c26d 238 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
239 } else {
240 tf->protocol = ATA_PROT_DMA;
9a3dccc4 241 index = 16;
8cbd6df1 242 }
1da177e4 243
9a3dccc4
TH
244 cmd = ata_rw_cmds[index + fua + lba48 + write];
245 if (cmd) {
246 tf->command = cmd;
247 return 0;
248 }
249 return -1;
1da177e4
LT
250}
251
35b649fe
TH
252/**
253 * ata_tf_read_block - Read block address from ATA taskfile
254 * @tf: ATA taskfile of interest
255 * @dev: ATA device @tf belongs to
256 *
257 * LOCKING:
258 * None.
259 *
260 * Read block address from @tf. This function can handle all
261 * three address formats - LBA, LBA48 and CHS. tf->protocol and
262 * flags select the address format to use.
263 *
264 * RETURNS:
265 * Block address read from @tf.
266 */
267u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
268{
269 u64 block = 0;
270
271 if (tf->flags & ATA_TFLAG_LBA) {
272 if (tf->flags & ATA_TFLAG_LBA48) {
273 block |= (u64)tf->hob_lbah << 40;
274 block |= (u64)tf->hob_lbam << 32;
275 block |= tf->hob_lbal << 24;
276 } else
277 block |= (tf->device & 0xf) << 24;
278
279 block |= tf->lbah << 16;
280 block |= tf->lbam << 8;
281 block |= tf->lbal;
282 } else {
283 u32 cyl, head, sect;
284
285 cyl = tf->lbam | (tf->lbah << 8);
286 head = tf->device & 0xf;
287 sect = tf->lbal;
288
289 block = (cyl * dev->heads + head) * dev->sectors + sect;
290 }
291
292 return block;
293}
294
bd056d7e
TH
295/**
296 * ata_build_rw_tf - Build ATA taskfile for given read/write request
297 * @tf: Target ATA taskfile
298 * @dev: ATA device @tf belongs to
299 * @block: Block address
300 * @n_block: Number of blocks
301 * @tf_flags: RW/FUA etc...
302 * @tag: tag
303 *
304 * LOCKING:
305 * None.
306 *
307 * Build ATA taskfile @tf for read/write request described by
308 * @block, @n_block, @tf_flags and @tag on @dev.
309 *
310 * RETURNS:
311 *
312 * 0 on success, -ERANGE if the request is too large for @dev,
313 * -EINVAL if the request is invalid.
314 */
315int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
316 u64 block, u32 n_block, unsigned int tf_flags,
317 unsigned int tag)
318{
319 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
320 tf->flags |= tf_flags;
321
6d1245bf 322 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
323 /* yay, NCQ */
324 if (!lba_48_ok(block, n_block))
325 return -ERANGE;
326
327 tf->protocol = ATA_PROT_NCQ;
328 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
329
330 if (tf->flags & ATA_TFLAG_WRITE)
331 tf->command = ATA_CMD_FPDMA_WRITE;
332 else
333 tf->command = ATA_CMD_FPDMA_READ;
334
335 tf->nsect = tag << 3;
336 tf->hob_feature = (n_block >> 8) & 0xff;
337 tf->feature = n_block & 0xff;
338
339 tf->hob_lbah = (block >> 40) & 0xff;
340 tf->hob_lbam = (block >> 32) & 0xff;
341 tf->hob_lbal = (block >> 24) & 0xff;
342 tf->lbah = (block >> 16) & 0xff;
343 tf->lbam = (block >> 8) & 0xff;
344 tf->lbal = block & 0xff;
345
346 tf->device = 1 << 6;
347 if (tf->flags & ATA_TFLAG_FUA)
348 tf->device |= 1 << 7;
349 } else if (dev->flags & ATA_DFLAG_LBA) {
350 tf->flags |= ATA_TFLAG_LBA;
351
352 if (lba_28_ok(block, n_block)) {
353 /* use LBA28 */
354 tf->device |= (block >> 24) & 0xf;
355 } else if (lba_48_ok(block, n_block)) {
356 if (!(dev->flags & ATA_DFLAG_LBA48))
357 return -ERANGE;
358
359 /* use LBA48 */
360 tf->flags |= ATA_TFLAG_LBA48;
361
362 tf->hob_nsect = (n_block >> 8) & 0xff;
363
364 tf->hob_lbah = (block >> 40) & 0xff;
365 tf->hob_lbam = (block >> 32) & 0xff;
366 tf->hob_lbal = (block >> 24) & 0xff;
367 } else
368 /* request too large even for LBA48 */
369 return -ERANGE;
370
371 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
372 return -EINVAL;
373
374 tf->nsect = n_block & 0xff;
375
376 tf->lbah = (block >> 16) & 0xff;
377 tf->lbam = (block >> 8) & 0xff;
378 tf->lbal = block & 0xff;
379
380 tf->device |= ATA_LBA;
381 } else {
382 /* CHS */
383 u32 sect, head, cyl, track;
384
385 /* The request -may- be too large for CHS addressing. */
386 if (!lba_28_ok(block, n_block))
387 return -ERANGE;
388
389 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
390 return -EINVAL;
391
392 /* Convert LBA to CHS */
393 track = (u32)block / dev->sectors;
394 cyl = track / dev->heads;
395 head = track % dev->heads;
396 sect = (u32)block % dev->sectors + 1;
397
398 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
399 (u32)block, track, cyl, head, sect);
400
401 /* Check whether the converted CHS can fit.
402 Cylinder: 0-65535
403 Head: 0-15
404 Sector: 1-255*/
405 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
406 return -ERANGE;
407
408 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
409 tf->lbal = sect;
410 tf->lbam = cyl;
411 tf->lbah = cyl >> 8;
412 tf->device |= head;
413 }
414
415 return 0;
416}
417
cb95d562
TH
418/**
419 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
420 * @pio_mask: pio_mask
421 * @mwdma_mask: mwdma_mask
422 * @udma_mask: udma_mask
423 *
424 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
425 * unsigned int xfer_mask.
426 *
427 * LOCKING:
428 * None.
429 *
430 * RETURNS:
431 * Packed xfer_mask.
432 */
433static unsigned int ata_pack_xfermask(unsigned int pio_mask,
434 unsigned int mwdma_mask,
435 unsigned int udma_mask)
436{
437 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
438 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
439 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
440}
441
c0489e4e
TH
442/**
443 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
444 * @xfer_mask: xfer_mask to unpack
445 * @pio_mask: resulting pio_mask
446 * @mwdma_mask: resulting mwdma_mask
447 * @udma_mask: resulting udma_mask
448 *
449 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
450 * Any NULL distination masks will be ignored.
451 */
452static void ata_unpack_xfermask(unsigned int xfer_mask,
453 unsigned int *pio_mask,
454 unsigned int *mwdma_mask,
455 unsigned int *udma_mask)
456{
457 if (pio_mask)
458 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
459 if (mwdma_mask)
460 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
461 if (udma_mask)
462 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
463}
464
cb95d562 465static const struct ata_xfer_ent {
be9a50c8 466 int shift, bits;
cb95d562
TH
467 u8 base;
468} ata_xfer_tbl[] = {
469 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
470 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
471 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
472 { -1, },
473};
474
475/**
476 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
477 * @xfer_mask: xfer_mask of interest
478 *
479 * Return matching XFER_* value for @xfer_mask. Only the highest
480 * bit of @xfer_mask is considered.
481 *
482 * LOCKING:
483 * None.
484 *
485 * RETURNS:
486 * Matching XFER_* value, 0 if no match found.
487 */
488static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
489{
490 int highbit = fls(xfer_mask) - 1;
491 const struct ata_xfer_ent *ent;
492
493 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
494 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
495 return ent->base + highbit - ent->shift;
496 return 0;
497}
498
499/**
500 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
501 * @xfer_mode: XFER_* of interest
502 *
503 * Return matching xfer_mask for @xfer_mode.
504 *
505 * LOCKING:
506 * None.
507 *
508 * RETURNS:
509 * Matching xfer_mask, 0 if no match found.
510 */
511static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
512{
513 const struct ata_xfer_ent *ent;
514
515 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
517 return 1 << (ent->shift + xfer_mode - ent->base);
518 return 0;
519}
520
521/**
522 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
523 * @xfer_mode: XFER_* of interest
524 *
525 * Return matching xfer_shift for @xfer_mode.
526 *
527 * LOCKING:
528 * None.
529 *
530 * RETURNS:
531 * Matching xfer_shift, -1 if no match found.
532 */
533static int ata_xfer_mode2shift(unsigned int xfer_mode)
534{
535 const struct ata_xfer_ent *ent;
536
537 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
539 return ent->shift;
540 return -1;
541}
542
1da177e4 543/**
1da7b0d0
TH
544 * ata_mode_string - convert xfer_mask to string
545 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
546 *
547 * Determine string which represents the highest speed
1da7b0d0 548 * (highest bit in @modemask).
1da177e4
LT
549 *
550 * LOCKING:
551 * None.
552 *
553 * RETURNS:
554 * Constant C string representing highest speed listed in
1da7b0d0 555 * @mode_mask, or the constant C string "<n/a>".
1da177e4 556 */
1da7b0d0 557static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 558{
75f554bc
TH
559 static const char * const xfer_mode_str[] = {
560 "PIO0",
561 "PIO1",
562 "PIO2",
563 "PIO3",
564 "PIO4",
b352e57d
AC
565 "PIO5",
566 "PIO6",
75f554bc
TH
567 "MWDMA0",
568 "MWDMA1",
569 "MWDMA2",
b352e57d
AC
570 "MWDMA3",
571 "MWDMA4",
75f554bc
TH
572 "UDMA/16",
573 "UDMA/25",
574 "UDMA/33",
575 "UDMA/44",
576 "UDMA/66",
577 "UDMA/100",
578 "UDMA/133",
579 "UDMA7",
580 };
1da7b0d0 581 int highbit;
1da177e4 582
1da7b0d0
TH
583 highbit = fls(xfer_mask) - 1;
584 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
585 return xfer_mode_str[highbit];
1da177e4 586 return "<n/a>";
1da177e4
LT
587}
588
4c360c81
TH
589static const char *sata_spd_string(unsigned int spd)
590{
591 static const char * const spd_str[] = {
592 "1.5 Gbps",
593 "3.0 Gbps",
594 };
595
596 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
597 return "<unknown>";
598 return spd_str[spd - 1];
599}
600
3373efd8 601void ata_dev_disable(struct ata_device *dev)
0b8efb0a 602{
0dd4b21f 603 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 604 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
605 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
606 ATA_DNXFER_QUIET);
0b8efb0a
TH
607 dev->class++;
608 }
609}
610
1da177e4 611/**
0d5ff566 612 * ata_devchk - PATA device presence detection
1da177e4
LT
613 * @ap: ATA channel to examine
614 * @device: Device to examine (starting at zero)
615 *
616 * This technique was originally described in
617 * Hale Landis's ATADRVR (www.ata-atapi.com), and
618 * later found its way into the ATA/ATAPI spec.
619 *
620 * Write a pattern to the ATA shadow registers,
621 * and if a device is present, it will respond by
622 * correctly storing and echoing back the
623 * ATA shadow register contents.
624 *
625 * LOCKING:
626 * caller.
627 */
628
0d5ff566 629static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
630{
631 struct ata_ioports *ioaddr = &ap->ioaddr;
632 u8 nsect, lbal;
633
634 ap->ops->dev_select(ap, device);
635
0d5ff566
TH
636 iowrite8(0x55, ioaddr->nsect_addr);
637 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 638
0d5ff566
TH
639 iowrite8(0xaa, ioaddr->nsect_addr);
640 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 641
0d5ff566
TH
642 iowrite8(0x55, ioaddr->nsect_addr);
643 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 644
0d5ff566
TH
645 nsect = ioread8(ioaddr->nsect_addr);
646 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
647
648 if ((nsect == 0x55) && (lbal == 0xaa))
649 return 1; /* we found a device */
650
651 return 0; /* nothing found */
652}
653
1da177e4
LT
654/**
655 * ata_dev_classify - determine device type based on ATA-spec signature
656 * @tf: ATA taskfile register set for device to be identified
657 *
658 * Determine from taskfile register contents whether a device is
659 * ATA or ATAPI, as per "Signature and persistence" section
660 * of ATA/PI spec (volume 1, sect 5.14).
661 *
662 * LOCKING:
663 * None.
664 *
665 * RETURNS:
666 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
667 * the event of failure.
668 */
669
057ace5e 670unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
671{
672 /* Apple's open source Darwin code hints that some devices only
673 * put a proper signature into the LBA mid/high registers,
674 * So, we only check those. It's sufficient for uniqueness.
675 */
676
677 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
678 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
679 DPRINTK("found ATA device by sig\n");
680 return ATA_DEV_ATA;
681 }
682
683 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
684 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
685 DPRINTK("found ATAPI device by sig\n");
686 return ATA_DEV_ATAPI;
687 }
688
689 DPRINTK("unknown device\n");
690 return ATA_DEV_UNKNOWN;
691}
692
693/**
694 * ata_dev_try_classify - Parse returned ATA device signature
695 * @ap: ATA channel to examine
696 * @device: Device to examine (starting at zero)
b4dc7623 697 * @r_err: Value of error register on completion
1da177e4
LT
698 *
699 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
700 * an ATA/ATAPI-defined set of values is placed in the ATA
701 * shadow registers, indicating the results of device detection
702 * and diagnostics.
703 *
704 * Select the ATA device, and read the values from the ATA shadow
705 * registers. Then parse according to the Error register value,
706 * and the spec-defined values examined by ata_dev_classify().
707 *
708 * LOCKING:
709 * caller.
b4dc7623
TH
710 *
711 * RETURNS:
712 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
713 */
714
a619f981 715unsigned int
b4dc7623 716ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 717{
1da177e4
LT
718 struct ata_taskfile tf;
719 unsigned int class;
720 u8 err;
721
722 ap->ops->dev_select(ap, device);
723
724 memset(&tf, 0, sizeof(tf));
725
1da177e4 726 ap->ops->tf_read(ap, &tf);
0169e284 727 err = tf.feature;
b4dc7623
TH
728 if (r_err)
729 *r_err = err;
1da177e4 730
93590859
AC
731 /* see if device passed diags: if master then continue and warn later */
732 if (err == 0 && device == 0)
733 /* diagnostic fail : do nothing _YET_ */
734 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
735 else if (err == 1)
1da177e4
LT
736 /* do nothing */ ;
737 else if ((device == 0) && (err == 0x81))
738 /* do nothing */ ;
739 else
b4dc7623 740 return ATA_DEV_NONE;
1da177e4 741
b4dc7623 742 /* determine if device is ATA or ATAPI */
1da177e4 743 class = ata_dev_classify(&tf);
b4dc7623 744
1da177e4 745 if (class == ATA_DEV_UNKNOWN)
b4dc7623 746 return ATA_DEV_NONE;
1da177e4 747 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
748 return ATA_DEV_NONE;
749 return class;
1da177e4
LT
750}
751
752/**
6a62a04d 753 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
754 * @id: IDENTIFY DEVICE results we will examine
755 * @s: string into which data is output
756 * @ofs: offset into identify device page
757 * @len: length of string to return. must be an even number.
758 *
759 * The strings in the IDENTIFY DEVICE page are broken up into
760 * 16-bit chunks. Run through the string, and output each
761 * 8-bit chunk linearly, regardless of platform.
762 *
763 * LOCKING:
764 * caller.
765 */
766
6a62a04d
TH
767void ata_id_string(const u16 *id, unsigned char *s,
768 unsigned int ofs, unsigned int len)
1da177e4
LT
769{
770 unsigned int c;
771
772 while (len > 0) {
773 c = id[ofs] >> 8;
774 *s = c;
775 s++;
776
777 c = id[ofs] & 0xff;
778 *s = c;
779 s++;
780
781 ofs++;
782 len -= 2;
783 }
784}
785
0e949ff3 786/**
6a62a04d 787 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
788 * @id: IDENTIFY DEVICE results we will examine
789 * @s: string into which data is output
790 * @ofs: offset into identify device page
791 * @len: length of string to return. must be an odd number.
792 *
6a62a04d 793 * This function is identical to ata_id_string except that it
0e949ff3
TH
794 * trims trailing spaces and terminates the resulting string with
795 * null. @len must be actual maximum length (even number) + 1.
796 *
797 * LOCKING:
798 * caller.
799 */
6a62a04d
TH
800void ata_id_c_string(const u16 *id, unsigned char *s,
801 unsigned int ofs, unsigned int len)
0e949ff3
TH
802{
803 unsigned char *p;
804
805 WARN_ON(!(len & 1));
806
6a62a04d 807 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
808
809 p = s + strnlen(s, len - 1);
810 while (p > s && p[-1] == ' ')
811 p--;
812 *p = '\0';
813}
0baab86b 814
1e999736
AC
815static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
816{
817 u64 sectors = 0;
818
819 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
820 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
821 sectors |= (tf->hob_lbal & 0xff) << 24;
822 sectors |= (tf->lbah & 0xff) << 16;
823 sectors |= (tf->lbam & 0xff) << 8;
824 sectors |= (tf->lbal & 0xff);
825
826 return ++sectors;
827}
828
829static u64 ata_tf_to_lba(struct ata_taskfile *tf)
830{
831 u64 sectors = 0;
832
833 sectors |= (tf->device & 0x0f) << 24;
834 sectors |= (tf->lbah & 0xff) << 16;
835 sectors |= (tf->lbam & 0xff) << 8;
836 sectors |= (tf->lbal & 0xff);
837
838 return ++sectors;
839}
840
841/**
842 * ata_read_native_max_address_ext - LBA48 native max query
843 * @dev: Device to query
844 *
845 * Perform an LBA48 size query upon the device in question. Return the
846 * actual LBA48 size or zero if the command fails.
847 */
848
849static u64 ata_read_native_max_address_ext(struct ata_device *dev)
850{
851 unsigned int err;
852 struct ata_taskfile tf;
853
854 ata_tf_init(dev, &tf);
855
856 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
857 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
858 tf.protocol |= ATA_PROT_NODATA;
859 tf.device |= 0x40;
860
861 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
862 if (err)
863 return 0;
864
865 return ata_tf_to_lba48(&tf);
866}
867
868/**
869 * ata_read_native_max_address - LBA28 native max query
870 * @dev: Device to query
871 *
872 * Performa an LBA28 size query upon the device in question. Return the
873 * actual LBA28 size or zero if the command fails.
874 */
875
876static u64 ata_read_native_max_address(struct ata_device *dev)
877{
878 unsigned int err;
879 struct ata_taskfile tf;
880
881 ata_tf_init(dev, &tf);
882
883 tf.command = ATA_CMD_READ_NATIVE_MAX;
884 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
885 tf.protocol |= ATA_PROT_NODATA;
886 tf.device |= 0x40;
887
888 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
889 if (err)
890 return 0;
891
892 return ata_tf_to_lba(&tf);
893}
894
895/**
896 * ata_set_native_max_address_ext - LBA48 native max set
897 * @dev: Device to query
898 *
899 * Perform an LBA48 size set max upon the device in question. Return the
900 * actual LBA48 size or zero if the command fails.
901 */
902
903static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
904{
905 unsigned int err;
906 struct ata_taskfile tf;
907
908 new_sectors--;
909
910 ata_tf_init(dev, &tf);
911
912 tf.command = ATA_CMD_SET_MAX_EXT;
913 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
914 tf.protocol |= ATA_PROT_NODATA;
915 tf.device |= 0x40;
916
917 tf.lbal = (new_sectors >> 0) & 0xff;
918 tf.lbam = (new_sectors >> 8) & 0xff;
919 tf.lbah = (new_sectors >> 16) & 0xff;
920
921 tf.hob_lbal = (new_sectors >> 24) & 0xff;
922 tf.hob_lbam = (new_sectors >> 32) & 0xff;
923 tf.hob_lbah = (new_sectors >> 40) & 0xff;
924
925 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
926 if (err)
927 return 0;
928
929 return ata_tf_to_lba48(&tf);
930}
931
932/**
933 * ata_set_native_max_address - LBA28 native max set
934 * @dev: Device to query
935 *
936 * Perform an LBA28 size set max upon the device in question. Return the
937 * actual LBA28 size or zero if the command fails.
938 */
939
940static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
941{
942 unsigned int err;
943 struct ata_taskfile tf;
944
945 new_sectors--;
946
947 ata_tf_init(dev, &tf);
948
949 tf.command = ATA_CMD_SET_MAX;
950 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
951 tf.protocol |= ATA_PROT_NODATA;
952
953 tf.lbal = (new_sectors >> 0) & 0xff;
954 tf.lbam = (new_sectors >> 8) & 0xff;
955 tf.lbah = (new_sectors >> 16) & 0xff;
956 tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
957
958 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
959 if (err)
960 return 0;
961
962 return ata_tf_to_lba(&tf);
963}
964
965/**
966 * ata_hpa_resize - Resize a device with an HPA set
967 * @dev: Device to resize
968 *
969 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
970 * it if required to the full size of the media. The caller must check
971 * the drive has the HPA feature set enabled.
972 */
973
974static u64 ata_hpa_resize(struct ata_device *dev)
975{
976 u64 sectors = dev->n_sectors;
977 u64 hpa_sectors;
978
979 if (ata_id_has_lba48(dev->id))
980 hpa_sectors = ata_read_native_max_address_ext(dev);
981 else
982 hpa_sectors = ata_read_native_max_address(dev);
983
984 /* if no hpa, both should be equal */
985 ata_dev_printk(dev, KERN_INFO, "%s 1: sectors = %lld, hpa_sectors = %lld\n",
986 __FUNCTION__, sectors, hpa_sectors);
987
988 if (hpa_sectors > sectors) {
989 ata_dev_printk(dev, KERN_INFO,
990 "Host Protected Area detected:\n"
991 "\tcurrent size: %lld sectors\n"
992 "\tnative size: %lld sectors\n",
993 sectors, hpa_sectors);
994
995 if (ata_ignore_hpa) {
996 if (ata_id_has_lba48(dev->id))
997 hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
998 else
999 hpa_sectors = ata_set_native_max_address(dev, hpa_sectors);
1000
1001 if (hpa_sectors) {
1002 ata_dev_printk(dev, KERN_INFO,
1003 "native size increased to %lld sectors\n", hpa_sectors);
1004 return hpa_sectors;
1005 }
1006 }
1007 }
1008 return sectors;
1009}
1010
2940740b
TH
1011static u64 ata_id_n_sectors(const u16 *id)
1012{
1013 if (ata_id_has_lba(id)) {
1014 if (ata_id_has_lba48(id))
1015 return ata_id_u64(id, 100);
1016 else
1017 return ata_id_u32(id, 60);
1018 } else {
1019 if (ata_id_current_chs_valid(id))
1020 return ata_id_u32(id, 57);
1021 else
1022 return id[1] * id[3] * id[6];
1023 }
1024}
1025
10305f0f
A
1026/**
1027 * ata_id_to_dma_mode - Identify DMA mode from id block
1028 * @dev: device to identify
cc261267 1029 * @unknown: mode to assume if we cannot tell
10305f0f
A
1030 *
1031 * Set up the timing values for the device based upon the identify
1032 * reported values for the DMA mode. This function is used by drivers
1033 * which rely upon firmware configured modes, but wish to report the
1034 * mode correctly when possible.
1035 *
1036 * In addition we emit similarly formatted messages to the default
1037 * ata_dev_set_mode handler, in order to provide consistency of
1038 * presentation.
1039 */
1040
1041void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1042{
1043 unsigned int mask;
1044 u8 mode;
1045
1046 /* Pack the DMA modes */
1047 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1048 if (dev->id[53] & 0x04)
1049 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1050
1051 /* Select the mode in use */
1052 mode = ata_xfer_mask2mode(mask);
1053
1054 if (mode != 0) {
1055 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1056 ata_mode_string(mask));
1057 } else {
1058 /* SWDMA perhaps ? */
1059 mode = unknown;
1060 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1061 }
1062
1063 /* Configure the device reporting */
1064 dev->xfer_mode = mode;
1065 dev->xfer_shift = ata_xfer_mode2shift(mode);
1066}
1067
0baab86b
EF
1068/**
1069 * ata_noop_dev_select - Select device 0/1 on ATA bus
1070 * @ap: ATA channel to manipulate
1071 * @device: ATA device (numbered from zero) to select
1072 *
1073 * This function performs no actual function.
1074 *
1075 * May be used as the dev_select() entry in ata_port_operations.
1076 *
1077 * LOCKING:
1078 * caller.
1079 */
1da177e4
LT
1080void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1081{
1082}
1083
0baab86b 1084
1da177e4
LT
1085/**
1086 * ata_std_dev_select - Select device 0/1 on ATA bus
1087 * @ap: ATA channel to manipulate
1088 * @device: ATA device (numbered from zero) to select
1089 *
1090 * Use the method defined in the ATA specification to
1091 * make either device 0, or device 1, active on the
0baab86b
EF
1092 * ATA channel. Works with both PIO and MMIO.
1093 *
1094 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1095 *
1096 * LOCKING:
1097 * caller.
1098 */
1099
1100void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1101{
1102 u8 tmp;
1103
1104 if (device == 0)
1105 tmp = ATA_DEVICE_OBS;
1106 else
1107 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1108
0d5ff566 1109 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1110 ata_pause(ap); /* needed; also flushes, for mmio */
1111}
1112
1113/**
1114 * ata_dev_select - Select device 0/1 on ATA bus
1115 * @ap: ATA channel to manipulate
1116 * @device: ATA device (numbered from zero) to select
1117 * @wait: non-zero to wait for Status register BSY bit to clear
1118 * @can_sleep: non-zero if context allows sleeping
1119 *
1120 * Use the method defined in the ATA specification to
1121 * make either device 0, or device 1, active on the
1122 * ATA channel.
1123 *
1124 * This is a high-level version of ata_std_dev_select(),
1125 * which additionally provides the services of inserting
1126 * the proper pauses and status polling, where needed.
1127 *
1128 * LOCKING:
1129 * caller.
1130 */
1131
1132void ata_dev_select(struct ata_port *ap, unsigned int device,
1133 unsigned int wait, unsigned int can_sleep)
1134{
88574551 1135 if (ata_msg_probe(ap))
44877b4e
TH
1136 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1137 "device %u, wait %u\n", device, wait);
1da177e4
LT
1138
1139 if (wait)
1140 ata_wait_idle(ap);
1141
1142 ap->ops->dev_select(ap, device);
1143
1144 if (wait) {
1145 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1146 msleep(150);
1147 ata_wait_idle(ap);
1148 }
1149}
1150
1151/**
1152 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1153 * @id: IDENTIFY DEVICE page to dump
1da177e4 1154 *
0bd3300a
TH
1155 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1156 * page.
1da177e4
LT
1157 *
1158 * LOCKING:
1159 * caller.
1160 */
1161
0bd3300a 1162static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1163{
1164 DPRINTK("49==0x%04x "
1165 "53==0x%04x "
1166 "63==0x%04x "
1167 "64==0x%04x "
1168 "75==0x%04x \n",
0bd3300a
TH
1169 id[49],
1170 id[53],
1171 id[63],
1172 id[64],
1173 id[75]);
1da177e4
LT
1174 DPRINTK("80==0x%04x "
1175 "81==0x%04x "
1176 "82==0x%04x "
1177 "83==0x%04x "
1178 "84==0x%04x \n",
0bd3300a
TH
1179 id[80],
1180 id[81],
1181 id[82],
1182 id[83],
1183 id[84]);
1da177e4
LT
1184 DPRINTK("88==0x%04x "
1185 "93==0x%04x\n",
0bd3300a
TH
1186 id[88],
1187 id[93]);
1da177e4
LT
1188}
1189
cb95d562
TH
1190/**
1191 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1192 * @id: IDENTIFY data to compute xfer mask from
1193 *
1194 * Compute the xfermask for this device. This is not as trivial
1195 * as it seems if we must consider early devices correctly.
1196 *
1197 * FIXME: pre IDE drive timing (do we care ?).
1198 *
1199 * LOCKING:
1200 * None.
1201 *
1202 * RETURNS:
1203 * Computed xfermask
1204 */
1205static unsigned int ata_id_xfermask(const u16 *id)
1206{
1207 unsigned int pio_mask, mwdma_mask, udma_mask;
1208
1209 /* Usual case. Word 53 indicates word 64 is valid */
1210 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1211 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1212 pio_mask <<= 3;
1213 pio_mask |= 0x7;
1214 } else {
1215 /* If word 64 isn't valid then Word 51 high byte holds
1216 * the PIO timing number for the maximum. Turn it into
1217 * a mask.
1218 */
7a0f1c8a 1219 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1220 if (mode < 5) /* Valid PIO range */
1221 pio_mask = (2 << mode) - 1;
1222 else
1223 pio_mask = 1;
cb95d562
TH
1224
1225 /* But wait.. there's more. Design your standards by
1226 * committee and you too can get a free iordy field to
1227 * process. However its the speeds not the modes that
1228 * are supported... Note drivers using the timing API
1229 * will get this right anyway
1230 */
1231 }
1232
1233 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1234
b352e57d
AC
1235 if (ata_id_is_cfa(id)) {
1236 /*
1237 * Process compact flash extended modes
1238 */
1239 int pio = id[163] & 0x7;
1240 int dma = (id[163] >> 3) & 7;
1241
1242 if (pio)
1243 pio_mask |= (1 << 5);
1244 if (pio > 1)
1245 pio_mask |= (1 << 6);
1246 if (dma)
1247 mwdma_mask |= (1 << 3);
1248 if (dma > 1)
1249 mwdma_mask |= (1 << 4);
1250 }
1251
fb21f0d0
TH
1252 udma_mask = 0;
1253 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1254 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1255
1256 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1257}
1258
86e45b6b
TH
1259/**
1260 * ata_port_queue_task - Queue port_task
1261 * @ap: The ata_port to queue port_task for
e2a7f77a 1262 * @fn: workqueue function to be scheduled
65f27f38 1263 * @data: data for @fn to use
e2a7f77a 1264 * @delay: delay time for workqueue function
86e45b6b
TH
1265 *
1266 * Schedule @fn(@data) for execution after @delay jiffies using
1267 * port_task. There is one port_task per port and it's the
1268 * user(low level driver)'s responsibility to make sure that only
1269 * one task is active at any given time.
1270 *
1271 * libata core layer takes care of synchronization between
1272 * port_task and EH. ata_port_queue_task() may be ignored for EH
1273 * synchronization.
1274 *
1275 * LOCKING:
1276 * Inherited from caller.
1277 */
65f27f38 1278void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1279 unsigned long delay)
1280{
1281 int rc;
1282
b51e9e5d 1283 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1284 return;
1285
65f27f38
DH
1286 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1287 ap->port_task_data = data;
86e45b6b 1288
52bad64d 1289 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1290
1291 /* rc == 0 means that another user is using port task */
1292 WARN_ON(rc == 0);
1293}
1294
1295/**
1296 * ata_port_flush_task - Flush port_task
1297 * @ap: The ata_port to flush port_task for
1298 *
1299 * After this function completes, port_task is guranteed not to
1300 * be running or scheduled.
1301 *
1302 * LOCKING:
1303 * Kernel thread context (may sleep)
1304 */
1305void ata_port_flush_task(struct ata_port *ap)
1306{
1307 unsigned long flags;
1308
1309 DPRINTK("ENTER\n");
1310
ba6a1308 1311 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1312 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1313 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1314
1315 DPRINTK("flush #1\n");
1316 flush_workqueue(ata_wq);
1317
1318 /*
1319 * At this point, if a task is running, it's guaranteed to see
1320 * the FLUSH flag; thus, it will never queue pio tasks again.
1321 * Cancel and flush.
1322 */
1323 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1324 if (ata_msg_ctl(ap))
88574551
TH
1325 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1326 __FUNCTION__);
86e45b6b
TH
1327 flush_workqueue(ata_wq);
1328 }
1329
ba6a1308 1330 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1331 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1332 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1333
0dd4b21f
BP
1334 if (ata_msg_ctl(ap))
1335 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1336}
1337
7102d230 1338static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1339{
77853bf2 1340 struct completion *waiting = qc->private_data;
a2a7a662 1341
a2a7a662 1342 complete(waiting);
a2a7a662
TH
1343}
1344
1345/**
2432697b 1346 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1347 * @dev: Device to which the command is sent
1348 * @tf: Taskfile registers for the command and the result
d69cf37d 1349 * @cdb: CDB for packet command
a2a7a662 1350 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1351 * @sg: sg list for the data buffer of the command
1352 * @n_elem: Number of sg entries
a2a7a662
TH
1353 *
1354 * Executes libata internal command with timeout. @tf contains
1355 * command on entry and result on return. Timeout and error
1356 * conditions are reported via return value. No recovery action
1357 * is taken after a command times out. It's caller's duty to
1358 * clean up after timeout.
1359 *
1360 * LOCKING:
1361 * None. Should be called with kernel context, might sleep.
551e8889
TH
1362 *
1363 * RETURNS:
1364 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1365 */
2432697b
TH
1366unsigned ata_exec_internal_sg(struct ata_device *dev,
1367 struct ata_taskfile *tf, const u8 *cdb,
1368 int dma_dir, struct scatterlist *sg,
1369 unsigned int n_elem)
a2a7a662 1370{
3373efd8 1371 struct ata_port *ap = dev->ap;
a2a7a662
TH
1372 u8 command = tf->command;
1373 struct ata_queued_cmd *qc;
2ab7db1f 1374 unsigned int tag, preempted_tag;
dedaf2b0 1375 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1376 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1377 unsigned long flags;
77853bf2 1378 unsigned int err_mask;
d95a717f 1379 int rc;
a2a7a662 1380
ba6a1308 1381 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1382
e3180499 1383 /* no internal command while frozen */
b51e9e5d 1384 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1385 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1386 return AC_ERR_SYSTEM;
1387 }
1388
2ab7db1f 1389 /* initialize internal qc */
a2a7a662 1390
2ab7db1f
TH
1391 /* XXX: Tag 0 is used for drivers with legacy EH as some
1392 * drivers choke if any other tag is given. This breaks
1393 * ata_tag_internal() test for those drivers. Don't use new
1394 * EH stuff without converting to it.
1395 */
1396 if (ap->ops->error_handler)
1397 tag = ATA_TAG_INTERNAL;
1398 else
1399 tag = 0;
1400
6cec4a39 1401 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1402 BUG();
f69499f4 1403 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1404
1405 qc->tag = tag;
1406 qc->scsicmd = NULL;
1407 qc->ap = ap;
1408 qc->dev = dev;
1409 ata_qc_reinit(qc);
1410
1411 preempted_tag = ap->active_tag;
dedaf2b0
TH
1412 preempted_sactive = ap->sactive;
1413 preempted_qc_active = ap->qc_active;
2ab7db1f 1414 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1415 ap->sactive = 0;
1416 ap->qc_active = 0;
2ab7db1f
TH
1417
1418 /* prepare & issue qc */
a2a7a662 1419 qc->tf = *tf;
d69cf37d
TH
1420 if (cdb)
1421 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1422 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1423 qc->dma_dir = dma_dir;
1424 if (dma_dir != DMA_NONE) {
2432697b
TH
1425 unsigned int i, buflen = 0;
1426
1427 for (i = 0; i < n_elem; i++)
1428 buflen += sg[i].length;
1429
1430 ata_sg_init(qc, sg, n_elem);
49c80429 1431 qc->nbytes = buflen;
a2a7a662
TH
1432 }
1433
77853bf2 1434 qc->private_data = &wait;
a2a7a662
TH
1435 qc->complete_fn = ata_qc_complete_internal;
1436
8e0e694a 1437 ata_qc_issue(qc);
a2a7a662 1438
ba6a1308 1439 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1440
a8601e5f 1441 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1442
1443 ata_port_flush_task(ap);
41ade50c 1444
d95a717f 1445 if (!rc) {
ba6a1308 1446 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1447
1448 /* We're racing with irq here. If we lose, the
1449 * following test prevents us from completing the qc
d95a717f
TH
1450 * twice. If we win, the port is frozen and will be
1451 * cleaned up by ->post_internal_cmd().
a2a7a662 1452 */
77853bf2 1453 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1454 qc->err_mask |= AC_ERR_TIMEOUT;
1455
1456 if (ap->ops->error_handler)
1457 ata_port_freeze(ap);
1458 else
1459 ata_qc_complete(qc);
f15a1daf 1460
0dd4b21f
BP
1461 if (ata_msg_warn(ap))
1462 ata_dev_printk(dev, KERN_WARNING,
88574551 1463 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1464 }
1465
ba6a1308 1466 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1467 }
1468
d95a717f
TH
1469 /* do post_internal_cmd */
1470 if (ap->ops->post_internal_cmd)
1471 ap->ops->post_internal_cmd(qc);
1472
a51d644a
TH
1473 /* perform minimal error analysis */
1474 if (qc->flags & ATA_QCFLAG_FAILED) {
1475 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1476 qc->err_mask |= AC_ERR_DEV;
1477
1478 if (!qc->err_mask)
1479 qc->err_mask |= AC_ERR_OTHER;
1480
1481 if (qc->err_mask & ~AC_ERR_OTHER)
1482 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1483 }
1484
15869303 1485 /* finish up */
ba6a1308 1486 spin_lock_irqsave(ap->lock, flags);
15869303 1487
e61e0672 1488 *tf = qc->result_tf;
77853bf2
TH
1489 err_mask = qc->err_mask;
1490
1491 ata_qc_free(qc);
2ab7db1f 1492 ap->active_tag = preempted_tag;
dedaf2b0
TH
1493 ap->sactive = preempted_sactive;
1494 ap->qc_active = preempted_qc_active;
77853bf2 1495
1f7dd3e9
TH
1496 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1497 * Until those drivers are fixed, we detect the condition
1498 * here, fail the command with AC_ERR_SYSTEM and reenable the
1499 * port.
1500 *
1501 * Note that this doesn't change any behavior as internal
1502 * command failure results in disabling the device in the
1503 * higher layer for LLDDs without new reset/EH callbacks.
1504 *
1505 * Kill the following code as soon as those drivers are fixed.
1506 */
198e0fed 1507 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1508 err_mask |= AC_ERR_SYSTEM;
1509 ata_port_probe(ap);
1510 }
1511
ba6a1308 1512 spin_unlock_irqrestore(ap->lock, flags);
15869303 1513
77853bf2 1514 return err_mask;
a2a7a662
TH
1515}
1516
2432697b 1517/**
33480a0e 1518 * ata_exec_internal - execute libata internal command
2432697b
TH
1519 * @dev: Device to which the command is sent
1520 * @tf: Taskfile registers for the command and the result
1521 * @cdb: CDB for packet command
1522 * @dma_dir: Data tranfer direction of the command
1523 * @buf: Data buffer of the command
1524 * @buflen: Length of data buffer
1525 *
1526 * Wrapper around ata_exec_internal_sg() which takes simple
1527 * buffer instead of sg list.
1528 *
1529 * LOCKING:
1530 * None. Should be called with kernel context, might sleep.
1531 *
1532 * RETURNS:
1533 * Zero on success, AC_ERR_* mask on failure
1534 */
1535unsigned ata_exec_internal(struct ata_device *dev,
1536 struct ata_taskfile *tf, const u8 *cdb,
1537 int dma_dir, void *buf, unsigned int buflen)
1538{
33480a0e
TH
1539 struct scatterlist *psg = NULL, sg;
1540 unsigned int n_elem = 0;
2432697b 1541
33480a0e
TH
1542 if (dma_dir != DMA_NONE) {
1543 WARN_ON(!buf);
1544 sg_init_one(&sg, buf, buflen);
1545 psg = &sg;
1546 n_elem++;
1547 }
2432697b 1548
33480a0e 1549 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1550}
1551
977e6b9f
TH
1552/**
1553 * ata_do_simple_cmd - execute simple internal command
1554 * @dev: Device to which the command is sent
1555 * @cmd: Opcode to execute
1556 *
1557 * Execute a 'simple' command, that only consists of the opcode
1558 * 'cmd' itself, without filling any other registers
1559 *
1560 * LOCKING:
1561 * Kernel thread context (may sleep).
1562 *
1563 * RETURNS:
1564 * Zero on success, AC_ERR_* mask on failure
e58eb583 1565 */
77b08fb5 1566unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1567{
1568 struct ata_taskfile tf;
e58eb583
TH
1569
1570 ata_tf_init(dev, &tf);
1571
1572 tf.command = cmd;
1573 tf.flags |= ATA_TFLAG_DEVICE;
1574 tf.protocol = ATA_PROT_NODATA;
1575
977e6b9f 1576 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1577}
1578
1bc4ccff
AC
1579/**
1580 * ata_pio_need_iordy - check if iordy needed
1581 * @adev: ATA device
1582 *
1583 * Check if the current speed of the device requires IORDY. Used
1584 * by various controllers for chip configuration.
1585 */
432729f0 1586
1bc4ccff
AC
1587unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1588{
432729f0
AC
1589 /* Controller doesn't support IORDY. Probably a pointless check
1590 as the caller should know this */
1591 if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1592 return 0;
432729f0
AC
1593 /* PIO3 and higher it is mandatory */
1594 if (adev->pio_mode > XFER_PIO_2)
1595 return 1;
1596 /* We turn it on when possible */
1597 if (ata_id_has_iordy(adev->id))
1bc4ccff 1598 return 1;
432729f0
AC
1599 return 0;
1600}
2e9edbf8 1601
432729f0
AC
1602/**
1603 * ata_pio_mask_no_iordy - Return the non IORDY mask
1604 * @adev: ATA device
1605 *
1606 * Compute the highest mode possible if we are not using iordy. Return
1607 * -1 if no iordy mode is available.
1608 */
1609
1610static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1611{
1bc4ccff 1612 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1613 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1614 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1615 /* Is the speed faster than the drive allows non IORDY ? */
1616 if (pio) {
1617 /* This is cycle times not frequency - watch the logic! */
1618 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1619 return 3 << ATA_SHIFT_PIO;
1620 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1621 }
1622 }
432729f0 1623 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1624}
1625
1da177e4 1626/**
49016aca 1627 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1628 * @dev: target device
1629 * @p_class: pointer to class of the target device (may be changed)
bff04647 1630 * @flags: ATA_READID_* flags
fe635c7e 1631 * @id: buffer to read IDENTIFY data into
1da177e4 1632 *
49016aca
TH
1633 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1634 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1635 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1636 * for pre-ATA4 drives.
1da177e4
LT
1637 *
1638 * LOCKING:
49016aca
TH
1639 * Kernel thread context (may sleep)
1640 *
1641 * RETURNS:
1642 * 0 on success, -errno otherwise.
1da177e4 1643 */
a9beec95 1644int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1645 unsigned int flags, u16 *id)
1da177e4 1646{
3373efd8 1647 struct ata_port *ap = dev->ap;
49016aca 1648 unsigned int class = *p_class;
a0123703 1649 struct ata_taskfile tf;
49016aca
TH
1650 unsigned int err_mask = 0;
1651 const char *reason;
1652 int rc;
1da177e4 1653
0dd4b21f 1654 if (ata_msg_ctl(ap))
44877b4e 1655 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1656
49016aca 1657 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1658
49016aca 1659 retry:
3373efd8 1660 ata_tf_init(dev, &tf);
a0123703 1661
49016aca
TH
1662 switch (class) {
1663 case ATA_DEV_ATA:
a0123703 1664 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1665 break;
1666 case ATA_DEV_ATAPI:
a0123703 1667 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1668 break;
1669 default:
1670 rc = -ENODEV;
1671 reason = "unsupported class";
1672 goto err_out;
1da177e4
LT
1673 }
1674
a0123703 1675 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1676
1677 /* Some devices choke if TF registers contain garbage. Make
1678 * sure those are properly initialized.
1679 */
1680 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1681
1682 /* Device presence detection is unreliable on some
1683 * controllers. Always poll IDENTIFY if available.
1684 */
1685 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1686
3373efd8 1687 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1688 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1689 if (err_mask) {
800b3996 1690 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1691 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1692 ap->print_id, dev->devno);
55a8e2c8
TH
1693 return -ENOENT;
1694 }
1695
49016aca
TH
1696 rc = -EIO;
1697 reason = "I/O error";
1da177e4
LT
1698 goto err_out;
1699 }
1700
49016aca 1701 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1702
49016aca 1703 /* sanity check */
a4f5749b
TH
1704 rc = -EINVAL;
1705 reason = "device reports illegal type";
1706
1707 if (class == ATA_DEV_ATA) {
1708 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1709 goto err_out;
1710 } else {
1711 if (ata_id_is_ata(id))
1712 goto err_out;
49016aca
TH
1713 }
1714
bff04647 1715 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1716 /*
1717 * The exact sequence expected by certain pre-ATA4 drives is:
1718 * SRST RESET
1719 * IDENTIFY
1720 * INITIALIZE DEVICE PARAMETERS
1721 * anything else..
1722 * Some drives were very specific about that exact sequence.
1723 */
1724 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1725 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1726 if (err_mask) {
1727 rc = -EIO;
1728 reason = "INIT_DEV_PARAMS failed";
1729 goto err_out;
1730 }
1731
1732 /* current CHS translation info (id[53-58]) might be
1733 * changed. reread the identify device info.
1734 */
bff04647 1735 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1736 goto retry;
1737 }
1738 }
1739
1740 *p_class = class;
fe635c7e 1741
49016aca
TH
1742 return 0;
1743
1744 err_out:
88574551 1745 if (ata_msg_warn(ap))
0dd4b21f 1746 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1747 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1748 return rc;
1749}
1750
3373efd8 1751static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1752{
3373efd8 1753 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1754}
1755
a6e6ce8e
TH
1756static void ata_dev_config_ncq(struct ata_device *dev,
1757 char *desc, size_t desc_sz)
1758{
1759 struct ata_port *ap = dev->ap;
1760 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1761
1762 if (!ata_id_has_ncq(dev->id)) {
1763 desc[0] = '\0';
1764 return;
1765 }
6919a0a6
AC
1766 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1767 snprintf(desc, desc_sz, "NCQ (not used)");
1768 return;
1769 }
a6e6ce8e 1770 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1771 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1772 dev->flags |= ATA_DFLAG_NCQ;
1773 }
1774
1775 if (hdepth >= ddepth)
1776 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1777 else
1778 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1779}
1780
49016aca 1781/**
ffeae418 1782 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1783 * @dev: Target device to configure
1784 *
1785 * Configure @dev according to @dev->id. Generic and low-level
1786 * driver specific fixups are also applied.
49016aca
TH
1787 *
1788 * LOCKING:
ffeae418
TH
1789 * Kernel thread context (may sleep)
1790 *
1791 * RETURNS:
1792 * 0 on success, -errno otherwise
49016aca 1793 */
efdaedc4 1794int ata_dev_configure(struct ata_device *dev)
49016aca 1795{
3373efd8 1796 struct ata_port *ap = dev->ap;
efdaedc4 1797 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1798 const u16 *id = dev->id;
ff8854b2 1799 unsigned int xfer_mask;
b352e57d 1800 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1801 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1802 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1803 int rc;
49016aca 1804
0dd4b21f 1805 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1806 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1807 __FUNCTION__);
ffeae418 1808 return 0;
49016aca
TH
1809 }
1810
0dd4b21f 1811 if (ata_msg_probe(ap))
44877b4e 1812 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1813
08573a86
KCA
1814 /* set _SDD */
1815 rc = ata_acpi_push_id(ap, dev->devno);
1816 if (rc) {
1817 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1818 rc);
1819 }
1820
1821 /* retrieve and execute the ATA task file of _GTF */
1822 ata_acpi_exec_tfs(ap);
1823
c39f5ebe 1824 /* print device capabilities */
0dd4b21f 1825 if (ata_msg_probe(ap))
88574551
TH
1826 ata_dev_printk(dev, KERN_DEBUG,
1827 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1828 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1829 __FUNCTION__,
f15a1daf
TH
1830 id[49], id[82], id[83], id[84],
1831 id[85], id[86], id[87], id[88]);
c39f5ebe 1832
208a9933 1833 /* initialize to-be-configured parameters */
ea1dd4e1 1834 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1835 dev->max_sectors = 0;
1836 dev->cdb_len = 0;
1837 dev->n_sectors = 0;
1838 dev->cylinders = 0;
1839 dev->heads = 0;
1840 dev->sectors = 0;
1841
1da177e4
LT
1842 /*
1843 * common ATA, ATAPI feature tests
1844 */
1845
ff8854b2 1846 /* find max transfer mode; for printk only */
1148c3a7 1847 xfer_mask = ata_id_xfermask(id);
1da177e4 1848
0dd4b21f
BP
1849 if (ata_msg_probe(ap))
1850 ata_dump_id(id);
1da177e4
LT
1851
1852 /* ATA-specific feature tests */
1853 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1854 if (ata_id_is_cfa(id)) {
1855 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1856 ata_dev_printk(dev, KERN_WARNING,
1857 "supports DRM functions and may "
1858 "not be fully accessable.\n");
b352e57d
AC
1859 snprintf(revbuf, 7, "CFA");
1860 }
1861 else
1862 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1863
1148c3a7 1864 dev->n_sectors = ata_id_n_sectors(id);
1e999736 1865 dev->n_sectors_boot = dev->n_sectors;
2940740b 1866
3f64f565 1867 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1868 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1869 sizeof(fwrevbuf));
1870
591a6e8e 1871 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1872 sizeof(modelbuf));
1873
1874 if (dev->id[59] & 0x100)
1875 dev->multi_count = dev->id[59] & 0xff;
1876
1148c3a7 1877 if (ata_id_has_lba(id)) {
4c2d721a 1878 const char *lba_desc;
a6e6ce8e 1879 char ncq_desc[20];
8bf62ece 1880
4c2d721a
TH
1881 lba_desc = "LBA";
1882 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1883 if (ata_id_has_lba48(id)) {
8bf62ece 1884 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1885 lba_desc = "LBA48";
6fc49adb
TH
1886
1887 if (dev->n_sectors >= (1UL << 28) &&
1888 ata_id_has_flush_ext(id))
1889 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1890 }
8bf62ece 1891
1e999736
AC
1892 if (ata_id_hpa_enabled(dev->id))
1893 dev->n_sectors = ata_hpa_resize(dev);
1894
a6e6ce8e
TH
1895 /* config NCQ */
1896 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1897
8bf62ece 1898 /* print device info to dmesg */
3f64f565
EM
1899 if (ata_msg_drv(ap) && print_info) {
1900 ata_dev_printk(dev, KERN_INFO,
1901 "%s: %s, %s, max %s\n",
1902 revbuf, modelbuf, fwrevbuf,
1903 ata_mode_string(xfer_mask));
1904 ata_dev_printk(dev, KERN_INFO,
1905 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1906 (unsigned long long)dev->n_sectors,
3f64f565
EM
1907 dev->multi_count, lba_desc, ncq_desc);
1908 }
ffeae418 1909 } else {
8bf62ece
AL
1910 /* CHS */
1911
1912 /* Default translation */
1148c3a7
TH
1913 dev->cylinders = id[1];
1914 dev->heads = id[3];
1915 dev->sectors = id[6];
8bf62ece 1916
1148c3a7 1917 if (ata_id_current_chs_valid(id)) {
8bf62ece 1918 /* Current CHS translation is valid. */
1148c3a7
TH
1919 dev->cylinders = id[54];
1920 dev->heads = id[55];
1921 dev->sectors = id[56];
8bf62ece
AL
1922 }
1923
1924 /* print device info to dmesg */
3f64f565 1925 if (ata_msg_drv(ap) && print_info) {
88574551 1926 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1927 "%s: %s, %s, max %s\n",
1928 revbuf, modelbuf, fwrevbuf,
1929 ata_mode_string(xfer_mask));
a84471fe 1930 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1931 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1932 (unsigned long long)dev->n_sectors,
1933 dev->multi_count, dev->cylinders,
1934 dev->heads, dev->sectors);
1935 }
07f6f7d0
AL
1936 }
1937
6e7846e9 1938 dev->cdb_len = 16;
1da177e4
LT
1939 }
1940
1941 /* ATAPI-specific feature tests */
2c13b7ce 1942 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1943 char *cdb_intr_string = "";
1944
1148c3a7 1945 rc = atapi_cdb_len(id);
1da177e4 1946 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1947 if (ata_msg_warn(ap))
88574551
TH
1948 ata_dev_printk(dev, KERN_WARNING,
1949 "unsupported CDB len\n");
ffeae418 1950 rc = -EINVAL;
1da177e4
LT
1951 goto err_out_nosup;
1952 }
6e7846e9 1953 dev->cdb_len = (unsigned int) rc;
1da177e4 1954
08a556db 1955 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1956 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1957 cdb_intr_string = ", CDB intr";
1958 }
312f7da2 1959
1da177e4 1960 /* print device info to dmesg */
5afc8142 1961 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1962 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1963 ata_mode_string(xfer_mask),
1964 cdb_intr_string);
1da177e4
LT
1965 }
1966
914ed354
TH
1967 /* determine max_sectors */
1968 dev->max_sectors = ATA_MAX_SECTORS;
1969 if (dev->flags & ATA_DFLAG_LBA48)
1970 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1971
93590859
AC
1972 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1973 /* Let the user know. We don't want to disallow opens for
1974 rescue purposes, or in case the vendor is just a blithering
1975 idiot */
1976 if (print_info) {
1977 ata_dev_printk(dev, KERN_WARNING,
1978"Drive reports diagnostics failure. This may indicate a drive\n");
1979 ata_dev_printk(dev, KERN_WARNING,
1980"fault or invalid emulation. Contact drive vendor for information.\n");
1981 }
1982 }
1983
4b2f3ede 1984 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1985 if (ata_dev_knobble(dev)) {
5afc8142 1986 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1987 ata_dev_printk(dev, KERN_INFO,
1988 "applying bridge limits\n");
5a529139 1989 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1990 dev->max_sectors = ATA_MAX_SECTORS;
1991 }
1992
18d6e9d5 1993 if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
1994 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
1995 dev->max_sectors);
18d6e9d5 1996
6f23a31d
AL
1997 /* limit ATAPI DMA to R/W commands only */
1998 if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
1999 dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
2000
4b2f3ede 2001 if (ap->ops->dev_config)
cd0d3bbc 2002 ap->ops->dev_config(dev);
4b2f3ede 2003
0dd4b21f
BP
2004 if (ata_msg_probe(ap))
2005 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2006 __FUNCTION__, ata_chk_status(ap));
ffeae418 2007 return 0;
1da177e4
LT
2008
2009err_out_nosup:
0dd4b21f 2010 if (ata_msg_probe(ap))
88574551
TH
2011 ata_dev_printk(dev, KERN_DEBUG,
2012 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2013 return rc;
1da177e4
LT
2014}
2015
be0d18df 2016/**
2e41e8e6 2017 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2018 * @ap: port
2019 *
2e41e8e6 2020 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2021 * detection.
2022 */
2023
2024int ata_cable_40wire(struct ata_port *ap)
2025{
2026 return ATA_CBL_PATA40;
2027}
2028
2029/**
2e41e8e6 2030 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2031 * @ap: port
2032 *
2e41e8e6 2033 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2034 * detection.
2035 */
2036
2037int ata_cable_80wire(struct ata_port *ap)
2038{
2039 return ATA_CBL_PATA80;
2040}
2041
2042/**
2043 * ata_cable_unknown - return unknown PATA cable.
2044 * @ap: port
2045 *
2046 * Helper method for drivers which have no PATA cable detection.
2047 */
2048
2049int ata_cable_unknown(struct ata_port *ap)
2050{
2051 return ATA_CBL_PATA_UNK;
2052}
2053
2054/**
2055 * ata_cable_sata - return SATA cable type
2056 * @ap: port
2057 *
2058 * Helper method for drivers which have SATA cables
2059 */
2060
2061int ata_cable_sata(struct ata_port *ap)
2062{
2063 return ATA_CBL_SATA;
2064}
2065
1da177e4
LT
2066/**
2067 * ata_bus_probe - Reset and probe ATA bus
2068 * @ap: Bus to probe
2069 *
0cba632b
JG
2070 * Master ATA bus probing function. Initiates a hardware-dependent
2071 * bus reset, then attempts to identify any devices found on
2072 * the bus.
2073 *
1da177e4 2074 * LOCKING:
0cba632b 2075 * PCI/etc. bus probe sem.
1da177e4
LT
2076 *
2077 * RETURNS:
96072e69 2078 * Zero on success, negative errno otherwise.
1da177e4
LT
2079 */
2080
80289167 2081int ata_bus_probe(struct ata_port *ap)
1da177e4 2082{
28ca5c57 2083 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2084 int tries[ATA_MAX_DEVICES];
4ae72a1e 2085 int i, rc;
e82cbdb9 2086 struct ata_device *dev;
1da177e4 2087
28ca5c57 2088 ata_port_probe(ap);
c19ba8af 2089
14d2bac1
TH
2090 for (i = 0; i < ATA_MAX_DEVICES; i++)
2091 tries[i] = ATA_PROBE_MAX_TRIES;
2092
2093 retry:
2044470c 2094 /* reset and determine device classes */
52783c5d 2095 ap->ops->phy_reset(ap);
2061a47a 2096
52783c5d
TH
2097 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2098 dev = &ap->device[i];
c19ba8af 2099
52783c5d
TH
2100 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2101 dev->class != ATA_DEV_UNKNOWN)
2102 classes[dev->devno] = dev->class;
2103 else
2104 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2105
52783c5d 2106 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2107 }
1da177e4 2108
52783c5d 2109 ata_port_probe(ap);
2044470c 2110
b6079ca4
AC
2111 /* after the reset the device state is PIO 0 and the controller
2112 state is undefined. Record the mode */
2113
2114 for (i = 0; i < ATA_MAX_DEVICES; i++)
2115 ap->device[i].pio_mode = XFER_PIO_0;
2116
f31f0cc2
JG
2117 /* read IDENTIFY page and configure devices. We have to do the identify
2118 specific sequence bass-ackwards so that PDIAG- is released by
2119 the slave device */
2120
2121 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
e82cbdb9 2122 dev = &ap->device[i];
28ca5c57 2123
ec573755
TH
2124 if (tries[i])
2125 dev->class = classes[i];
ffeae418 2126
14d2bac1 2127 if (!ata_dev_enabled(dev))
ffeae418 2128 continue;
ffeae418 2129
bff04647
TH
2130 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2131 dev->id);
14d2bac1
TH
2132 if (rc)
2133 goto fail;
f31f0cc2
JG
2134 }
2135
be0d18df
AC
2136 /* Now ask for the cable type as PDIAG- should have been released */
2137 if (ap->ops->cable_detect)
2138 ap->cbl = ap->ops->cable_detect(ap);
2139
f31f0cc2
JG
2140 /* After the identify sequence we can now set up the devices. We do
2141 this in the normal order so that the user doesn't get confused */
2142
2143 for(i = 0; i < ATA_MAX_DEVICES; i++) {
2144 dev = &ap->device[i];
2145 if (!ata_dev_enabled(dev))
2146 continue;
14d2bac1 2147
efdaedc4
TH
2148 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
2149 rc = ata_dev_configure(dev);
2150 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2151 if (rc)
2152 goto fail;
1da177e4
LT
2153 }
2154
e82cbdb9 2155 /* configure transfer mode */
3adcebb2 2156 rc = ata_set_mode(ap, &dev);
4ae72a1e 2157 if (rc)
51713d35 2158 goto fail;
1da177e4 2159
e82cbdb9
TH
2160 for (i = 0; i < ATA_MAX_DEVICES; i++)
2161 if (ata_dev_enabled(&ap->device[i]))
2162 return 0;
1da177e4 2163
e82cbdb9
TH
2164 /* no device present, disable port */
2165 ata_port_disable(ap);
1da177e4 2166 ap->ops->port_disable(ap);
96072e69 2167 return -ENODEV;
14d2bac1
TH
2168
2169 fail:
4ae72a1e
TH
2170 tries[dev->devno]--;
2171
14d2bac1
TH
2172 switch (rc) {
2173 case -EINVAL:
4ae72a1e 2174 /* eeek, something went very wrong, give up */
14d2bac1
TH
2175 tries[dev->devno] = 0;
2176 break;
4ae72a1e
TH
2177
2178 case -ENODEV:
2179 /* give it just one more chance */
2180 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2181 case -EIO:
4ae72a1e
TH
2182 if (tries[dev->devno] == 1) {
2183 /* This is the last chance, better to slow
2184 * down than lose it.
2185 */
2186 sata_down_spd_limit(ap);
2187 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2188 }
14d2bac1
TH
2189 }
2190
4ae72a1e 2191 if (!tries[dev->devno])
3373efd8 2192 ata_dev_disable(dev);
ec573755 2193
14d2bac1 2194 goto retry;
1da177e4
LT
2195}
2196
2197/**
0cba632b
JG
2198 * ata_port_probe - Mark port as enabled
2199 * @ap: Port for which we indicate enablement
1da177e4 2200 *
0cba632b
JG
2201 * Modify @ap data structure such that the system
2202 * thinks that the entire port is enabled.
2203 *
cca3974e 2204 * LOCKING: host lock, or some other form of
0cba632b 2205 * serialization.
1da177e4
LT
2206 */
2207
2208void ata_port_probe(struct ata_port *ap)
2209{
198e0fed 2210 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2211}
2212
3be680b7
TH
2213/**
2214 * sata_print_link_status - Print SATA link status
2215 * @ap: SATA port to printk link status about
2216 *
2217 * This function prints link speed and status of a SATA link.
2218 *
2219 * LOCKING:
2220 * None.
2221 */
43727fbc 2222void sata_print_link_status(struct ata_port *ap)
3be680b7 2223{
6d5f9732 2224 u32 sstatus, scontrol, tmp;
3be680b7 2225
81952c54 2226 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 2227 return;
81952c54 2228 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 2229
81952c54 2230 if (ata_port_online(ap)) {
3be680b7 2231 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
2232 ata_port_printk(ap, KERN_INFO,
2233 "SATA link up %s (SStatus %X SControl %X)\n",
2234 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2235 } else {
f15a1daf
TH
2236 ata_port_printk(ap, KERN_INFO,
2237 "SATA link down (SStatus %X SControl %X)\n",
2238 sstatus, scontrol);
3be680b7
TH
2239 }
2240}
2241
1da177e4 2242/**
780a87f7
JG
2243 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2244 * @ap: SATA port associated with target SATA PHY.
1da177e4 2245 *
780a87f7
JG
2246 * This function issues commands to standard SATA Sxxx
2247 * PHY registers, to wake up the phy (and device), and
2248 * clear any reset condition.
1da177e4
LT
2249 *
2250 * LOCKING:
0cba632b 2251 * PCI/etc. bus probe sem.
1da177e4
LT
2252 *
2253 */
2254void __sata_phy_reset(struct ata_port *ap)
2255{
2256 u32 sstatus;
2257 unsigned long timeout = jiffies + (HZ * 5);
2258
2259 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2260 /* issue phy wake/reset */
81952c54 2261 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
2262 /* Couldn't find anything in SATA I/II specs, but
2263 * AHCI-1.1 10.4.2 says at least 1 ms. */
2264 mdelay(1);
1da177e4 2265 }
81952c54
TH
2266 /* phy wake/clear reset */
2267 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
2268
2269 /* wait for phy to become ready, if necessary */
2270 do {
2271 msleep(200);
81952c54 2272 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
2273 if ((sstatus & 0xf) != 1)
2274 break;
2275 } while (time_before(jiffies, timeout));
2276
3be680b7
TH
2277 /* print link status */
2278 sata_print_link_status(ap);
656563e3 2279
3be680b7 2280 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2281 if (!ata_port_offline(ap))
1da177e4 2282 ata_port_probe(ap);
3be680b7 2283 else
1da177e4 2284 ata_port_disable(ap);
1da177e4 2285
198e0fed 2286 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2287 return;
2288
2289 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2290 ata_port_disable(ap);
2291 return;
2292 }
2293
2294 ap->cbl = ATA_CBL_SATA;
2295}
2296
2297/**
780a87f7
JG
2298 * sata_phy_reset - Reset SATA bus.
2299 * @ap: SATA port associated with target SATA PHY.
1da177e4 2300 *
780a87f7
JG
2301 * This function resets the SATA bus, and then probes
2302 * the bus for devices.
1da177e4
LT
2303 *
2304 * LOCKING:
0cba632b 2305 * PCI/etc. bus probe sem.
1da177e4
LT
2306 *
2307 */
2308void sata_phy_reset(struct ata_port *ap)
2309{
2310 __sata_phy_reset(ap);
198e0fed 2311 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2312 return;
2313 ata_bus_reset(ap);
2314}
2315
ebdfca6e
AC
2316/**
2317 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2318 * @adev: device
2319 *
2320 * Obtain the other device on the same cable, or if none is
2321 * present NULL is returned
2322 */
2e9edbf8 2323
3373efd8 2324struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2325{
3373efd8 2326 struct ata_port *ap = adev->ap;
ebdfca6e 2327 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2328 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2329 return NULL;
2330 return pair;
2331}
2332
1da177e4 2333/**
780a87f7
JG
2334 * ata_port_disable - Disable port.
2335 * @ap: Port to be disabled.
1da177e4 2336 *
780a87f7
JG
2337 * Modify @ap data structure such that the system
2338 * thinks that the entire port is disabled, and should
2339 * never attempt to probe or communicate with devices
2340 * on this port.
2341 *
cca3974e 2342 * LOCKING: host lock, or some other form of
780a87f7 2343 * serialization.
1da177e4
LT
2344 */
2345
2346void ata_port_disable(struct ata_port *ap)
2347{
2348 ap->device[0].class = ATA_DEV_NONE;
2349 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2350 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2351}
2352
1c3fae4d 2353/**
3c567b7d 2354 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2355 * @ap: Port to adjust SATA spd limit for
2356 *
2357 * Adjust SATA spd limit of @ap downward. Note that this
2358 * function only adjusts the limit. The change must be applied
3c567b7d 2359 * using sata_set_spd().
1c3fae4d
TH
2360 *
2361 * LOCKING:
2362 * Inherited from caller.
2363 *
2364 * RETURNS:
2365 * 0 on success, negative errno on failure
2366 */
3c567b7d 2367int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2368{
81952c54
TH
2369 u32 sstatus, spd, mask;
2370 int rc, highbit;
1c3fae4d 2371
81952c54
TH
2372 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2373 if (rc)
2374 return rc;
1c3fae4d
TH
2375
2376 mask = ap->sata_spd_limit;
2377 if (mask <= 1)
2378 return -EINVAL;
2379 highbit = fls(mask) - 1;
2380 mask &= ~(1 << highbit);
2381
81952c54 2382 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2383 if (spd <= 1)
2384 return -EINVAL;
2385 spd--;
2386 mask &= (1 << spd) - 1;
2387 if (!mask)
2388 return -EINVAL;
2389
2390 ap->sata_spd_limit = mask;
2391
f15a1daf
TH
2392 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2393 sata_spd_string(fls(mask)));
1c3fae4d
TH
2394
2395 return 0;
2396}
2397
3c567b7d 2398static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2399{
2400 u32 spd, limit;
2401
2402 if (ap->sata_spd_limit == UINT_MAX)
2403 limit = 0;
2404 else
2405 limit = fls(ap->sata_spd_limit);
2406
2407 spd = (*scontrol >> 4) & 0xf;
2408 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2409
2410 return spd != limit;
2411}
2412
2413/**
3c567b7d 2414 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2415 * @ap: Port in question
2416 *
2417 * Test whether the spd limit in SControl matches
2418 * @ap->sata_spd_limit. This function is used to determine
2419 * whether hardreset is necessary to apply SATA spd
2420 * configuration.
2421 *
2422 * LOCKING:
2423 * Inherited from caller.
2424 *
2425 * RETURNS:
2426 * 1 if SATA spd configuration is needed, 0 otherwise.
2427 */
3c567b7d 2428int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2429{
2430 u32 scontrol;
2431
81952c54 2432 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2433 return 0;
2434
3c567b7d 2435 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2436}
2437
2438/**
3c567b7d 2439 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2440 * @ap: Port to set SATA spd for
2441 *
2442 * Set SATA spd of @ap according to sata_spd_limit.
2443 *
2444 * LOCKING:
2445 * Inherited from caller.
2446 *
2447 * RETURNS:
2448 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2449 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2450 */
3c567b7d 2451int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2452{
2453 u32 scontrol;
81952c54 2454 int rc;
1c3fae4d 2455
81952c54
TH
2456 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2457 return rc;
1c3fae4d 2458
3c567b7d 2459 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2460 return 0;
2461
81952c54
TH
2462 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2463 return rc;
2464
1c3fae4d
TH
2465 return 1;
2466}
2467
452503f9
AC
2468/*
2469 * This mode timing computation functionality is ported over from
2470 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2471 */
2472/*
b352e57d 2473 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2474 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2475 * for UDMA6, which is currently supported only by Maxtor drives.
2476 *
2477 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2478 */
2479
2480static const struct ata_timing ata_timing[] = {
2481
2482 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2483 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2484 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2485 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2486
b352e57d
AC
2487 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2488 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2489 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2490 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2491 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2492
2493/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2494
452503f9
AC
2495 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2496 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2497 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2498
452503f9
AC
2499 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2500 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2501 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2502
b352e57d
AC
2503 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2504 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2505 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2506 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2507
2508 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2509 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2510 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2511
2512/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2513
2514 { 0xFF }
2515};
2516
2517#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2518#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2519
2520static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2521{
2522 q->setup = EZ(t->setup * 1000, T);
2523 q->act8b = EZ(t->act8b * 1000, T);
2524 q->rec8b = EZ(t->rec8b * 1000, T);
2525 q->cyc8b = EZ(t->cyc8b * 1000, T);
2526 q->active = EZ(t->active * 1000, T);
2527 q->recover = EZ(t->recover * 1000, T);
2528 q->cycle = EZ(t->cycle * 1000, T);
2529 q->udma = EZ(t->udma * 1000, UT);
2530}
2531
2532void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2533 struct ata_timing *m, unsigned int what)
2534{
2535 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2536 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2537 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2538 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2539 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2540 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2541 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2542 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2543}
2544
2545static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2546{
2547 const struct ata_timing *t;
2548
2549 for (t = ata_timing; t->mode != speed; t++)
91190758 2550 if (t->mode == 0xFF)
452503f9 2551 return NULL;
2e9edbf8 2552 return t;
452503f9
AC
2553}
2554
2555int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2556 struct ata_timing *t, int T, int UT)
2557{
2558 const struct ata_timing *s;
2559 struct ata_timing p;
2560
2561 /*
2e9edbf8 2562 * Find the mode.
75b1f2f8 2563 */
452503f9
AC
2564
2565 if (!(s = ata_timing_find_mode(speed)))
2566 return -EINVAL;
2567
75b1f2f8
AL
2568 memcpy(t, s, sizeof(*s));
2569
452503f9
AC
2570 /*
2571 * If the drive is an EIDE drive, it can tell us it needs extended
2572 * PIO/MW_DMA cycle timing.
2573 */
2574
2575 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2576 memset(&p, 0, sizeof(p));
2577 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2578 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2579 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2580 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2581 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2582 }
2583 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2584 }
2585
2586 /*
2587 * Convert the timing to bus clock counts.
2588 */
2589
75b1f2f8 2590 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2591
2592 /*
c893a3ae
RD
2593 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2594 * S.M.A.R.T * and some other commands. We have to ensure that the
2595 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2596 */
2597
fd3367af 2598 if (speed > XFER_PIO_6) {
452503f9
AC
2599 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2600 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2601 }
2602
2603 /*
c893a3ae 2604 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2605 */
2606
2607 if (t->act8b + t->rec8b < t->cyc8b) {
2608 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2609 t->rec8b = t->cyc8b - t->act8b;
2610 }
2611
2612 if (t->active + t->recover < t->cycle) {
2613 t->active += (t->cycle - (t->active + t->recover)) / 2;
2614 t->recover = t->cycle - t->active;
2615 }
2616
2617 return 0;
2618}
2619
cf176e1a
TH
2620/**
2621 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2622 * @dev: Device to adjust xfer masks
458337db 2623 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2624 *
2625 * Adjust xfer masks of @dev downward. Note that this function
2626 * does not apply the change. Invoking ata_set_mode() afterwards
2627 * will apply the limit.
2628 *
2629 * LOCKING:
2630 * Inherited from caller.
2631 *
2632 * RETURNS:
2633 * 0 on success, negative errno on failure
2634 */
458337db 2635int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2636{
458337db
TH
2637 char buf[32];
2638 unsigned int orig_mask, xfer_mask;
2639 unsigned int pio_mask, mwdma_mask, udma_mask;
2640 int quiet, highbit;
cf176e1a 2641
458337db
TH
2642 quiet = !!(sel & ATA_DNXFER_QUIET);
2643 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2644
458337db
TH
2645 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2646 dev->mwdma_mask,
2647 dev->udma_mask);
2648 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2649
458337db
TH
2650 switch (sel) {
2651 case ATA_DNXFER_PIO:
2652 highbit = fls(pio_mask) - 1;
2653 pio_mask &= ~(1 << highbit);
2654 break;
2655
2656 case ATA_DNXFER_DMA:
2657 if (udma_mask) {
2658 highbit = fls(udma_mask) - 1;
2659 udma_mask &= ~(1 << highbit);
2660 if (!udma_mask)
2661 return -ENOENT;
2662 } else if (mwdma_mask) {
2663 highbit = fls(mwdma_mask) - 1;
2664 mwdma_mask &= ~(1 << highbit);
2665 if (!mwdma_mask)
2666 return -ENOENT;
2667 }
2668 break;
2669
2670 case ATA_DNXFER_40C:
2671 udma_mask &= ATA_UDMA_MASK_40C;
2672 break;
2673
2674 case ATA_DNXFER_FORCE_PIO0:
2675 pio_mask &= 1;
2676 case ATA_DNXFER_FORCE_PIO:
2677 mwdma_mask = 0;
2678 udma_mask = 0;
2679 break;
2680
458337db
TH
2681 default:
2682 BUG();
2683 }
2684
2685 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2686
2687 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2688 return -ENOENT;
2689
2690 if (!quiet) {
2691 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2692 snprintf(buf, sizeof(buf), "%s:%s",
2693 ata_mode_string(xfer_mask),
2694 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2695 else
2696 snprintf(buf, sizeof(buf), "%s",
2697 ata_mode_string(xfer_mask));
2698
2699 ata_dev_printk(dev, KERN_WARNING,
2700 "limiting speed to %s\n", buf);
2701 }
cf176e1a
TH
2702
2703 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2704 &dev->udma_mask);
2705
cf176e1a 2706 return 0;
cf176e1a
TH
2707}
2708
3373efd8 2709static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2710{
baa1e78a 2711 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2712 unsigned int err_mask;
2713 int rc;
1da177e4 2714
e8384607 2715 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2716 if (dev->xfer_shift == ATA_SHIFT_PIO)
2717 dev->flags |= ATA_DFLAG_PIO;
2718
3373efd8 2719 err_mask = ata_dev_set_xfermode(dev);
11750a40
A
2720 /* Old CFA may refuse this command, which is just fine */
2721 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2722 err_mask &= ~AC_ERR_DEV;
2723
83206a29 2724 if (err_mask) {
f15a1daf
TH
2725 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2726 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2727 return -EIO;
2728 }
1da177e4 2729
baa1e78a 2730 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2731 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2732 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2733 if (rc)
83206a29 2734 return rc;
48a8a14f 2735
23e71c3d
TH
2736 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2737 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2738
f15a1daf
TH
2739 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2740 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2741 return 0;
1da177e4
LT
2742}
2743
1da177e4 2744/**
04351821 2745 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
1da177e4 2746 * @ap: port on which timings will be programmed
e82cbdb9 2747 * @r_failed_dev: out paramter for failed device
1da177e4 2748 *
04351821
A
2749 * Standard implementation of the function used to tune and set
2750 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2751 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2752 * returned in @r_failed_dev.
780a87f7 2753 *
1da177e4 2754 * LOCKING:
0cba632b 2755 * PCI/etc. bus probe sem.
e82cbdb9
TH
2756 *
2757 * RETURNS:
2758 * 0 on success, negative errno otherwise
1da177e4 2759 */
04351821
A
2760
2761int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2762{
e8e0619f 2763 struct ata_device *dev;
e82cbdb9 2764 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2765
3adcebb2 2766
a6d5a51c
TH
2767 /* step 1: calculate xfer_mask */
2768 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2769 unsigned int pio_mask, dma_mask;
a6d5a51c 2770
e8e0619f
TH
2771 dev = &ap->device[i];
2772
e1211e3f 2773 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2774 continue;
2775
3373efd8 2776 ata_dev_xfermask(dev);
1da177e4 2777
acf356b1
TH
2778 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2779 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2780 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2781 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2782
4f65977d 2783 found = 1;
5444a6f4
AC
2784 if (dev->dma_mode)
2785 used_dma = 1;
a6d5a51c 2786 }
4f65977d 2787 if (!found)
e82cbdb9 2788 goto out;
a6d5a51c
TH
2789
2790 /* step 2: always set host PIO timings */
e8e0619f
TH
2791 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2792 dev = &ap->device[i];
2793 if (!ata_dev_enabled(dev))
2794 continue;
2795
2796 if (!dev->pio_mode) {
f15a1daf 2797 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2798 rc = -EINVAL;
e82cbdb9 2799 goto out;
e8e0619f
TH
2800 }
2801
2802 dev->xfer_mode = dev->pio_mode;
2803 dev->xfer_shift = ATA_SHIFT_PIO;
2804 if (ap->ops->set_piomode)
2805 ap->ops->set_piomode(ap, dev);
2806 }
1da177e4 2807
a6d5a51c 2808 /* step 3: set host DMA timings */
e8e0619f
TH
2809 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2810 dev = &ap->device[i];
2811
2812 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2813 continue;
2814
2815 dev->xfer_mode = dev->dma_mode;
2816 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2817 if (ap->ops->set_dmamode)
2818 ap->ops->set_dmamode(ap, dev);
2819 }
1da177e4
LT
2820
2821 /* step 4: update devices' xfer mode */
83206a29 2822 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2823 dev = &ap->device[i];
1da177e4 2824
18d90deb 2825 /* don't update suspended devices' xfer mode */
02670bf3 2826 if (!ata_dev_ready(dev))
83206a29
TH
2827 continue;
2828
3373efd8 2829 rc = ata_dev_set_mode(dev);
5bbc53f4 2830 if (rc)
e82cbdb9 2831 goto out;
83206a29 2832 }
1da177e4 2833
e8e0619f
TH
2834 /* Record simplex status. If we selected DMA then the other
2835 * host channels are not permitted to do so.
5444a6f4 2836 */
cca3974e 2837 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2838 ap->host->simplex_claimed = ap;
5444a6f4 2839
e8e0619f 2840 /* step5: chip specific finalisation */
1da177e4
LT
2841 if (ap->ops->post_set_mode)
2842 ap->ops->post_set_mode(ap);
e82cbdb9
TH
2843 out:
2844 if (rc)
2845 *r_failed_dev = dev;
2846 return rc;
1da177e4
LT
2847}
2848
04351821
A
2849/**
2850 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2851 * @ap: port on which timings will be programmed
2852 * @r_failed_dev: out paramter for failed device
2853 *
2854 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2855 * ata_set_mode() fails, pointer to the failing device is
2856 * returned in @r_failed_dev.
2857 *
2858 * LOCKING:
2859 * PCI/etc. bus probe sem.
2860 *
2861 * RETURNS:
2862 * 0 on success, negative errno otherwise
2863 */
2864int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2865{
2866 /* has private set_mode? */
2867 if (ap->ops->set_mode)
2868 return ap->ops->set_mode(ap, r_failed_dev);
2869 return ata_do_set_mode(ap, r_failed_dev);
2870}
2871
1fdffbce
JG
2872/**
2873 * ata_tf_to_host - issue ATA taskfile to host controller
2874 * @ap: port to which command is being issued
2875 * @tf: ATA taskfile register set
2876 *
2877 * Issues ATA taskfile register set to ATA host controller,
2878 * with proper synchronization with interrupt handler and
2879 * other threads.
2880 *
2881 * LOCKING:
cca3974e 2882 * spin_lock_irqsave(host lock)
1fdffbce
JG
2883 */
2884
2885static inline void ata_tf_to_host(struct ata_port *ap,
2886 const struct ata_taskfile *tf)
2887{
2888 ap->ops->tf_load(ap, tf);
2889 ap->ops->exec_command(ap, tf);
2890}
2891
1da177e4
LT
2892/**
2893 * ata_busy_sleep - sleep until BSY clears, or timeout
2894 * @ap: port containing status register to be polled
2895 * @tmout_pat: impatience timeout
2896 * @tmout: overall timeout
2897 *
780a87f7
JG
2898 * Sleep until ATA Status register bit BSY clears,
2899 * or a timeout occurs.
2900 *
d1adc1bb
TH
2901 * LOCKING:
2902 * Kernel thread context (may sleep).
2903 *
2904 * RETURNS:
2905 * 0 on success, -errno otherwise.
1da177e4 2906 */
d1adc1bb
TH
2907int ata_busy_sleep(struct ata_port *ap,
2908 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2909{
2910 unsigned long timer_start, timeout;
2911 u8 status;
2912
2913 status = ata_busy_wait(ap, ATA_BUSY, 300);
2914 timer_start = jiffies;
2915 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2916 while (status != 0xff && (status & ATA_BUSY) &&
2917 time_before(jiffies, timeout)) {
1da177e4
LT
2918 msleep(50);
2919 status = ata_busy_wait(ap, ATA_BUSY, 3);
2920 }
2921
d1adc1bb 2922 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2923 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2924 "port is slow to respond, please be patient "
2925 "(Status 0x%x)\n", status);
1da177e4
LT
2926
2927 timeout = timer_start + tmout;
d1adc1bb
TH
2928 while (status != 0xff && (status & ATA_BUSY) &&
2929 time_before(jiffies, timeout)) {
1da177e4
LT
2930 msleep(50);
2931 status = ata_chk_status(ap);
2932 }
2933
d1adc1bb
TH
2934 if (status == 0xff)
2935 return -ENODEV;
2936
1da177e4 2937 if (status & ATA_BUSY) {
f15a1daf 2938 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2939 "(%lu secs, Status 0x%x)\n",
2940 tmout / HZ, status);
d1adc1bb 2941 return -EBUSY;
1da177e4
LT
2942 }
2943
2944 return 0;
2945}
2946
2947static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2948{
2949 struct ata_ioports *ioaddr = &ap->ioaddr;
2950 unsigned int dev0 = devmask & (1 << 0);
2951 unsigned int dev1 = devmask & (1 << 1);
2952 unsigned long timeout;
2953
2954 /* if device 0 was found in ata_devchk, wait for its
2955 * BSY bit to clear
2956 */
2957 if (dev0)
2958 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2959
2960 /* if device 1 was found in ata_devchk, wait for
2961 * register access, then wait for BSY to clear
2962 */
2963 timeout = jiffies + ATA_TMOUT_BOOT;
2964 while (dev1) {
2965 u8 nsect, lbal;
2966
2967 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2968 nsect = ioread8(ioaddr->nsect_addr);
2969 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2970 if ((nsect == 1) && (lbal == 1))
2971 break;
2972 if (time_after(jiffies, timeout)) {
2973 dev1 = 0;
2974 break;
2975 }
2976 msleep(50); /* give drive a breather */
2977 }
2978 if (dev1)
2979 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2980
2981 /* is all this really necessary? */
2982 ap->ops->dev_select(ap, 0);
2983 if (dev1)
2984 ap->ops->dev_select(ap, 1);
2985 if (dev0)
2986 ap->ops->dev_select(ap, 0);
2987}
2988
1da177e4
LT
2989static unsigned int ata_bus_softreset(struct ata_port *ap,
2990 unsigned int devmask)
2991{
2992 struct ata_ioports *ioaddr = &ap->ioaddr;
2993
44877b4e 2994 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
2995
2996 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2997 iowrite8(ap->ctl, ioaddr->ctl_addr);
2998 udelay(20); /* FIXME: flush */
2999 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3000 udelay(20); /* FIXME: flush */
3001 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3002
3003 /* spec mandates ">= 2ms" before checking status.
3004 * We wait 150ms, because that was the magic delay used for
3005 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3006 * between when the ATA command register is written, and then
3007 * status is checked. Because waiting for "a while" before
3008 * checking status is fine, post SRST, we perform this magic
3009 * delay here as well.
09c7ad79
AC
3010 *
3011 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3012 */
3013 msleep(150);
3014
2e9edbf8 3015 /* Before we perform post reset processing we want to see if
298a41ca
TH
3016 * the bus shows 0xFF because the odd clown forgets the D7
3017 * pulldown resistor.
3018 */
d1adc1bb
TH
3019 if (ata_check_status(ap) == 0xFF)
3020 return 0;
09c7ad79 3021
1da177e4
LT
3022 ata_bus_post_reset(ap, devmask);
3023
3024 return 0;
3025}
3026
3027/**
3028 * ata_bus_reset - reset host port and associated ATA channel
3029 * @ap: port to reset
3030 *
3031 * This is typically the first time we actually start issuing
3032 * commands to the ATA channel. We wait for BSY to clear, then
3033 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3034 * result. Determine what devices, if any, are on the channel
3035 * by looking at the device 0/1 error register. Look at the signature
3036 * stored in each device's taskfile registers, to determine if
3037 * the device is ATA or ATAPI.
3038 *
3039 * LOCKING:
0cba632b 3040 * PCI/etc. bus probe sem.
cca3974e 3041 * Obtains host lock.
1da177e4
LT
3042 *
3043 * SIDE EFFECTS:
198e0fed 3044 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3045 */
3046
3047void ata_bus_reset(struct ata_port *ap)
3048{
3049 struct ata_ioports *ioaddr = &ap->ioaddr;
3050 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3051 u8 err;
aec5c3c1 3052 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4 3053
44877b4e 3054 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3055
3056 /* determine if device 0/1 are present */
3057 if (ap->flags & ATA_FLAG_SATA_RESET)
3058 dev0 = 1;
3059 else {
3060 dev0 = ata_devchk(ap, 0);
3061 if (slave_possible)
3062 dev1 = ata_devchk(ap, 1);
3063 }
3064
3065 if (dev0)
3066 devmask |= (1 << 0);
3067 if (dev1)
3068 devmask |= (1 << 1);
3069
3070 /* select device 0 again */
3071 ap->ops->dev_select(ap, 0);
3072
3073 /* issue bus reset */
3074 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
3075 if (ata_bus_softreset(ap, devmask))
3076 goto err_out;
1da177e4
LT
3077
3078 /*
3079 * determine by signature whether we have ATA or ATAPI devices
3080 */
b4dc7623 3081 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 3082 if ((slave_possible) && (err != 0x81))
b4dc7623 3083 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
3084
3085 /* re-enable interrupts */
83625006 3086 ap->ops->irq_on(ap);
1da177e4
LT
3087
3088 /* is double-select really necessary? */
3089 if (ap->device[1].class != ATA_DEV_NONE)
3090 ap->ops->dev_select(ap, 1);
3091 if (ap->device[0].class != ATA_DEV_NONE)
3092 ap->ops->dev_select(ap, 0);
3093
3094 /* if no devices were detected, disable this port */
3095 if ((ap->device[0].class == ATA_DEV_NONE) &&
3096 (ap->device[1].class == ATA_DEV_NONE))
3097 goto err_out;
3098
3099 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3100 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3101 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3102 }
3103
3104 DPRINTK("EXIT\n");
3105 return;
3106
3107err_out:
f15a1daf 3108 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
3109 ap->ops->port_disable(ap);
3110
3111 DPRINTK("EXIT\n");
3112}
3113
d7bb4cc7
TH
3114/**
3115 * sata_phy_debounce - debounce SATA phy status
3116 * @ap: ATA port to debounce SATA phy status for
3117 * @params: timing parameters { interval, duratinon, timeout } in msec
3118 *
3119 * Make sure SStatus of @ap reaches stable state, determined by
3120 * holding the same value where DET is not 1 for @duration polled
3121 * every @interval, before @timeout. Timeout constraints the
3122 * beginning of the stable state. Because, after hot unplugging,
3123 * DET gets stuck at 1 on some controllers, this functions waits
3124 * until timeout then returns 0 if DET is stable at 1.
3125 *
3126 * LOCKING:
3127 * Kernel thread context (may sleep)
3128 *
3129 * RETURNS:
3130 * 0 on success, -errno on failure.
3131 */
3132int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 3133{
d7bb4cc7
TH
3134 unsigned long interval_msec = params[0];
3135 unsigned long duration = params[1] * HZ / 1000;
3136 unsigned long timeout = jiffies + params[2] * HZ / 1000;
3137 unsigned long last_jiffies;
3138 u32 last, cur;
3139 int rc;
3140
3141 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3142 return rc;
3143 cur &= 0xf;
3144
3145 last = cur;
3146 last_jiffies = jiffies;
3147
3148 while (1) {
3149 msleep(interval_msec);
3150 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3151 return rc;
3152 cur &= 0xf;
3153
3154 /* DET stable? */
3155 if (cur == last) {
3156 if (cur == 1 && time_before(jiffies, timeout))
3157 continue;
3158 if (time_after(jiffies, last_jiffies + duration))
3159 return 0;
3160 continue;
3161 }
3162
3163 /* unstable, start over */
3164 last = cur;
3165 last_jiffies = jiffies;
3166
3167 /* check timeout */
3168 if (time_after(jiffies, timeout))
3169 return -EBUSY;
3170 }
3171}
3172
3173/**
3174 * sata_phy_resume - resume SATA phy
3175 * @ap: ATA port to resume SATA phy for
3176 * @params: timing parameters { interval, duratinon, timeout } in msec
3177 *
3178 * Resume SATA phy of @ap and debounce it.
3179 *
3180 * LOCKING:
3181 * Kernel thread context (may sleep)
3182 *
3183 * RETURNS:
3184 * 0 on success, -errno on failure.
3185 */
3186int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
3187{
3188 u32 scontrol;
81952c54
TH
3189 int rc;
3190
3191 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3192 return rc;
7a7921e8 3193
852ee16a 3194 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
3195
3196 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3197 return rc;
7a7921e8 3198
d7bb4cc7
TH
3199 /* Some PHYs react badly if SStatus is pounded immediately
3200 * after resuming. Delay 200ms before debouncing.
3201 */
3202 msleep(200);
7a7921e8 3203
d7bb4cc7 3204 return sata_phy_debounce(ap, params);
7a7921e8
TH
3205}
3206
f5914a46
TH
3207static void ata_wait_spinup(struct ata_port *ap)
3208{
3209 struct ata_eh_context *ehc = &ap->eh_context;
3210 unsigned long end, secs;
3211 int rc;
3212
3213 /* first, debounce phy if SATA */
3214 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 3215 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
3216
3217 /* if debounced successfully and offline, no need to wait */
3218 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
3219 return;
3220 }
3221
3222 /* okay, let's give the drive time to spin up */
3223 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
3224 secs = ((end - jiffies) + HZ - 1) / HZ;
3225
3226 if (time_after(jiffies, end))
3227 return;
3228
3229 if (secs > 5)
3230 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
3231 "(%lu secs)\n", secs);
3232
3233 schedule_timeout_uninterruptible(end - jiffies);
3234}
3235
3236/**
3237 * ata_std_prereset - prepare for reset
3238 * @ap: ATA port to be reset
3239 *
3240 * @ap is about to be reset. Initialize it.
3241 *
3242 * LOCKING:
3243 * Kernel thread context (may sleep)
3244 *
3245 * RETURNS:
3246 * 0 on success, -errno otherwise.
3247 */
3248int ata_std_prereset(struct ata_port *ap)
3249{
3250 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 3251 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3252 int rc;
3253
28324304
TH
3254 /* handle link resume & hotplug spinup */
3255 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3256 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3257 ehc->i.action |= ATA_EH_HARDRESET;
3258
3259 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
3260 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
3261 ata_wait_spinup(ap);
f5914a46
TH
3262
3263 /* if we're about to do hardreset, nothing more to do */
3264 if (ehc->i.action & ATA_EH_HARDRESET)
3265 return 0;
3266
3267 /* if SATA, resume phy */
3268 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
3269 rc = sata_phy_resume(ap, timing);
3270 if (rc && rc != -EOPNOTSUPP) {
3271 /* phy resume failed */
3272 ata_port_printk(ap, KERN_WARNING, "failed to resume "
3273 "link for reset (errno=%d)\n", rc);
3274 return rc;
3275 }
3276 }
3277
3278 /* Wait for !BSY if the controller can wait for the first D2H
3279 * Reg FIS and we don't know that no device is attached.
3280 */
3281 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
3282 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
3283
3284 return 0;
3285}
3286
c2bd5804
TH
3287/**
3288 * ata_std_softreset - reset host port via ATA SRST
3289 * @ap: port to reset
c2bd5804
TH
3290 * @classes: resulting classes of attached devices
3291 *
52783c5d 3292 * Reset host port using ATA SRST.
c2bd5804
TH
3293 *
3294 * LOCKING:
3295 * Kernel thread context (may sleep)
3296 *
3297 * RETURNS:
3298 * 0 on success, -errno otherwise.
3299 */
2bf2cb26 3300int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
3301{
3302 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3303 unsigned int devmask = 0, err_mask;
3304 u8 err;
3305
3306 DPRINTK("ENTER\n");
3307
81952c54 3308 if (ata_port_offline(ap)) {
3a39746a
TH
3309 classes[0] = ATA_DEV_NONE;
3310 goto out;
3311 }
3312
c2bd5804
TH
3313 /* determine if device 0/1 are present */
3314 if (ata_devchk(ap, 0))
3315 devmask |= (1 << 0);
3316 if (slave_possible && ata_devchk(ap, 1))
3317 devmask |= (1 << 1);
3318
c2bd5804
TH
3319 /* select device 0 again */
3320 ap->ops->dev_select(ap, 0);
3321
3322 /* issue bus reset */
3323 DPRINTK("about to softreset, devmask=%x\n", devmask);
3324 err_mask = ata_bus_softreset(ap, devmask);
3325 if (err_mask) {
f15a1daf
TH
3326 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
3327 err_mask);
c2bd5804
TH
3328 return -EIO;
3329 }
3330
3331 /* determine by signature whether we have ATA or ATAPI devices */
3332 classes[0] = ata_dev_try_classify(ap, 0, &err);
3333 if (slave_possible && err != 0x81)
3334 classes[1] = ata_dev_try_classify(ap, 1, &err);
3335
3a39746a 3336 out:
c2bd5804
TH
3337 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3338 return 0;
3339}
3340
3341/**
b6103f6d 3342 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3343 * @ap: port to reset
b6103f6d 3344 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
3345 *
3346 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3347 *
3348 * LOCKING:
3349 * Kernel thread context (may sleep)
3350 *
3351 * RETURNS:
3352 * 0 on success, -errno otherwise.
3353 */
b6103f6d 3354int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 3355{
852ee16a 3356 u32 scontrol;
81952c54 3357 int rc;
852ee16a 3358
c2bd5804
TH
3359 DPRINTK("ENTER\n");
3360
3c567b7d 3361 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3362 /* SATA spec says nothing about how to reconfigure
3363 * spd. To be on the safe side, turn off phy during
3364 * reconfiguration. This works for at least ICH7 AHCI
3365 * and Sil3124.
3366 */
81952c54 3367 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3368 goto out;
81952c54 3369
a34b6fc0 3370 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3371
3372 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3373 goto out;
1c3fae4d 3374
3c567b7d 3375 sata_set_spd(ap);
1c3fae4d
TH
3376 }
3377
3378 /* issue phy wake/reset */
81952c54 3379 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3380 goto out;
81952c54 3381
852ee16a 3382 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3383
3384 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3385 goto out;
c2bd5804 3386
1c3fae4d 3387 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3388 * 10.4.2 says at least 1 ms.
3389 */
3390 msleep(1);
3391
1c3fae4d 3392 /* bring phy back */
b6103f6d
TH
3393 rc = sata_phy_resume(ap, timing);
3394 out:
3395 DPRINTK("EXIT, rc=%d\n", rc);
3396 return rc;
3397}
3398
3399/**
3400 * sata_std_hardreset - reset host port via SATA phy reset
3401 * @ap: port to reset
3402 * @class: resulting class of attached device
3403 *
3404 * SATA phy-reset host port using DET bits of SControl register,
3405 * wait for !BSY and classify the attached device.
3406 *
3407 * LOCKING:
3408 * Kernel thread context (may sleep)
3409 *
3410 * RETURNS:
3411 * 0 on success, -errno otherwise.
3412 */
3413int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3414{
3415 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3416 int rc;
3417
3418 DPRINTK("ENTER\n");
3419
3420 /* do hardreset */
3421 rc = sata_port_hardreset(ap, timing);
3422 if (rc) {
3423 ata_port_printk(ap, KERN_ERR,
3424 "COMRESET failed (errno=%d)\n", rc);
3425 return rc;
3426 }
c2bd5804 3427
c2bd5804 3428 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3429 if (ata_port_offline(ap)) {
c2bd5804
TH
3430 *class = ATA_DEV_NONE;
3431 DPRINTK("EXIT, link offline\n");
3432 return 0;
3433 }
3434
34fee227
TH
3435 /* wait a while before checking status, see SRST for more info */
3436 msleep(150);
3437
c2bd5804 3438 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3439 ata_port_printk(ap, KERN_ERR,
3440 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3441 return -EIO;
3442 }
3443
3a39746a
TH
3444 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3445
c2bd5804
TH
3446 *class = ata_dev_try_classify(ap, 0, NULL);
3447
3448 DPRINTK("EXIT, class=%u\n", *class);
3449 return 0;
3450}
3451
3452/**
3453 * ata_std_postreset - standard postreset callback
3454 * @ap: the target ata_port
3455 * @classes: classes of attached devices
3456 *
3457 * This function is invoked after a successful reset. Note that
3458 * the device might have been reset more than once using
3459 * different reset methods before postreset is invoked.
c2bd5804 3460 *
c2bd5804
TH
3461 * LOCKING:
3462 * Kernel thread context (may sleep)
3463 */
3464void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3465{
dc2b3515
TH
3466 u32 serror;
3467
c2bd5804
TH
3468 DPRINTK("ENTER\n");
3469
c2bd5804 3470 /* print link status */
81952c54 3471 sata_print_link_status(ap);
c2bd5804 3472
dc2b3515
TH
3473 /* clear SError */
3474 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3475 sata_scr_write(ap, SCR_ERROR, serror);
3476
3a39746a 3477 /* re-enable interrupts */
83625006
AI
3478 if (!ap->ops->error_handler)
3479 ap->ops->irq_on(ap);
c2bd5804
TH
3480
3481 /* is double-select really necessary? */
3482 if (classes[0] != ATA_DEV_NONE)
3483 ap->ops->dev_select(ap, 1);
3484 if (classes[1] != ATA_DEV_NONE)
3485 ap->ops->dev_select(ap, 0);
3486
3a39746a
TH
3487 /* bail out if no device is present */
3488 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3489 DPRINTK("EXIT, no device\n");
3490 return;
3491 }
3492
3493 /* set up device control */
0d5ff566
TH
3494 if (ap->ioaddr.ctl_addr)
3495 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3496
3497 DPRINTK("EXIT\n");
3498}
3499
623a3128
TH
3500/**
3501 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3502 * @dev: device to compare against
3503 * @new_class: class of the new device
3504 * @new_id: IDENTIFY page of the new device
3505 *
3506 * Compare @new_class and @new_id against @dev and determine
3507 * whether @dev is the device indicated by @new_class and
3508 * @new_id.
3509 *
3510 * LOCKING:
3511 * None.
3512 *
3513 * RETURNS:
3514 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3515 */
3373efd8
TH
3516static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3517 const u16 *new_id)
623a3128
TH
3518{
3519 const u16 *old_id = dev->id;
a0cf733b
TH
3520 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3521 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3522 u64 new_n_sectors;
3523
3524 if (dev->class != new_class) {
f15a1daf
TH
3525 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3526 dev->class, new_class);
623a3128
TH
3527 return 0;
3528 }
3529
a0cf733b
TH
3530 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3531 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3532 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3533 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3534 new_n_sectors = ata_id_n_sectors(new_id);
3535
3536 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3537 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3538 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3539 return 0;
3540 }
3541
3542 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3543 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3544 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3545 return 0;
3546 }
3547
3548 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3549 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3550 "%llu != %llu\n",
3551 (unsigned long long)dev->n_sectors,
3552 (unsigned long long)new_n_sectors);
1e999736
AC
3553 /* Are we the boot time size - if so we appear to be the
3554 same disk at this point and our HPA got reapplied */
3555 if (ata_ignore_hpa && dev->n_sectors_boot == new_n_sectors
3556 && ata_id_hpa_enabled(new_id))
3557 return 1;
623a3128
TH
3558 return 0;
3559 }
3560
3561 return 1;
3562}
3563
3564/**
3565 * ata_dev_revalidate - Revalidate ATA device
623a3128 3566 * @dev: device to revalidate
bff04647 3567 * @readid_flags: read ID flags
623a3128
TH
3568 *
3569 * Re-read IDENTIFY page and make sure @dev is still attached to
3570 * the port.
3571 *
3572 * LOCKING:
3573 * Kernel thread context (may sleep)
3574 *
3575 * RETURNS:
3576 * 0 on success, negative errno otherwise
3577 */
bff04647 3578int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3579{
5eb45c02 3580 unsigned int class = dev->class;
f15a1daf 3581 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3582 int rc;
3583
5eb45c02
TH
3584 if (!ata_dev_enabled(dev)) {
3585 rc = -ENODEV;
3586 goto fail;
3587 }
623a3128 3588
fe635c7e 3589 /* read ID data */
bff04647 3590 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3591 if (rc)
3592 goto fail;
3593
3594 /* is the device still there? */
3373efd8 3595 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3596 rc = -ENODEV;
3597 goto fail;
3598 }
3599
fe635c7e 3600 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3601
3602 /* configure device according to the new ID */
efdaedc4 3603 rc = ata_dev_configure(dev);
5eb45c02
TH
3604 if (rc == 0)
3605 return 0;
623a3128
TH
3606
3607 fail:
f15a1daf 3608 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3609 return rc;
3610}
3611
6919a0a6
AC
3612struct ata_blacklist_entry {
3613 const char *model_num;
3614 const char *model_rev;
3615 unsigned long horkage;
3616};
3617
3618static const struct ata_blacklist_entry ata_device_blacklist [] = {
3619 /* Devices with DMA related problems under Linux */
3620 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3621 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3622 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3623 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3624 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3625 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3626 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3627 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3628 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3629 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3630 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3631 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3632 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3633 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3634 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3635 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3636 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3637 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3638 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3639 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3640 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3641 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3642 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3643 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3644 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3645 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3646 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3647 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3648 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3649
18d6e9d5 3650 /* Weird ATAPI devices */
6f23a31d
AL
3651 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 |
3652 ATA_HORKAGE_DMA_RW_ONLY },
18d6e9d5 3653
6919a0a6
AC
3654 /* Devices we expect to fail diagnostics */
3655
3656 /* Devices where NCQ should be avoided */
3657 /* NCQ is slow */
3658 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3659 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3660 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3661 /* NCQ is broken */
3662 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
96442925
JA
3663 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3664 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
36e337d0
RH
3665 /* Blacklist entries taken from Silicon Image 3124/3132
3666 Windows driver .inf file - also several Linux problem reports */
3667 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3668 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3669 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6
AC
3670
3671 /* Devices with NCQ limits */
3672
3673 /* End Marker */
3674 { }
1da177e4 3675};
2e9edbf8 3676
6919a0a6 3677unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3678{
8bfa79fc
TH
3679 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3680 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3681 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3682
8bfa79fc
TH
3683 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3684 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3685
6919a0a6 3686 while (ad->model_num) {
8bfa79fc 3687 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3688 if (ad->model_rev == NULL)
3689 return ad->horkage;
8bfa79fc 3690 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3691 return ad->horkage;
f4b15fef 3692 }
6919a0a6 3693 ad++;
f4b15fef 3694 }
1da177e4
LT
3695 return 0;
3696}
3697
6919a0a6
AC
3698static int ata_dma_blacklisted(const struct ata_device *dev)
3699{
3700 /* We don't support polling DMA.
3701 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3702 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3703 */
3704 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3705 (dev->flags & ATA_DFLAG_CDB_INTR))
3706 return 1;
3707 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3708}
3709
a6d5a51c
TH
3710/**
3711 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3712 * @dev: Device to compute xfermask for
3713 *
acf356b1
TH
3714 * Compute supported xfermask of @dev and store it in
3715 * dev->*_mask. This function is responsible for applying all
3716 * known limits including host controller limits, device
3717 * blacklist, etc...
a6d5a51c
TH
3718 *
3719 * LOCKING:
3720 * None.
a6d5a51c 3721 */
3373efd8 3722static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3723{
3373efd8 3724 struct ata_port *ap = dev->ap;
cca3974e 3725 struct ata_host *host = ap->host;
a6d5a51c 3726 unsigned long xfer_mask;
1da177e4 3727
37deecb5 3728 /* controller modes available */
565083e1
TH
3729 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3730 ap->mwdma_mask, ap->udma_mask);
3731
8343f889 3732 /* drive modes available */
37deecb5
TH
3733 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3734 dev->mwdma_mask, dev->udma_mask);
3735 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3736
b352e57d
AC
3737 /*
3738 * CFA Advanced TrueIDE timings are not allowed on a shared
3739 * cable
3740 */
3741 if (ata_dev_pair(dev)) {
3742 /* No PIO5 or PIO6 */
3743 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3744 /* No MWDMA3 or MWDMA 4 */
3745 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3746 }
3747
37deecb5
TH
3748 if (ata_dma_blacklisted(dev)) {
3749 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3750 ata_dev_printk(dev, KERN_WARNING,
3751 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3752 }
a6d5a51c 3753
14d66ab7
PV
3754 if ((host->flags & ATA_HOST_SIMPLEX) &&
3755 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3756 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3757 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3758 "other device, disabling DMA\n");
5444a6f4 3759 }
565083e1 3760
e424675f
JG
3761 if (ap->flags & ATA_FLAG_NO_IORDY)
3762 xfer_mask &= ata_pio_mask_no_iordy(dev);
3763
5444a6f4 3764 if (ap->ops->mode_filter)
a76b62ca 3765 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 3766
8343f889
RH
3767 /* Apply cable rule here. Don't apply it early because when
3768 * we handle hot plug the cable type can itself change.
3769 * Check this last so that we know if the transfer rate was
3770 * solely limited by the cable.
3771 * Unknown or 80 wire cables reported host side are checked
3772 * drive side as well. Cases where we know a 40wire cable
3773 * is used safely for 80 are not checked here.
3774 */
3775 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3776 /* UDMA/44 or higher would be available */
3777 if((ap->cbl == ATA_CBL_PATA40) ||
3778 (ata_drive_40wire(dev->id) &&
3779 (ap->cbl == ATA_CBL_PATA_UNK ||
3780 ap->cbl == ATA_CBL_PATA80))) {
3781 ata_dev_printk(dev, KERN_WARNING,
3782 "limited to UDMA/33 due to 40-wire cable\n");
3783 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3784 }
3785
565083e1
TH
3786 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3787 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3788}
3789
1da177e4
LT
3790/**
3791 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3792 * @dev: Device to which command will be sent
3793 *
780a87f7
JG
3794 * Issue SET FEATURES - XFER MODE command to device @dev
3795 * on port @ap.
3796 *
1da177e4 3797 * LOCKING:
0cba632b 3798 * PCI/etc. bus probe sem.
83206a29
TH
3799 *
3800 * RETURNS:
3801 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3802 */
3803
3373efd8 3804static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3805{
a0123703 3806 struct ata_taskfile tf;
83206a29 3807 unsigned int err_mask;
1da177e4
LT
3808
3809 /* set up set-features taskfile */
3810 DPRINTK("set features - xfer mode\n");
3811
3373efd8 3812 ata_tf_init(dev, &tf);
a0123703
TH
3813 tf.command = ATA_CMD_SET_FEATURES;
3814 tf.feature = SETFEATURES_XFER;
3815 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3816 tf.protocol = ATA_PROT_NODATA;
3817 tf.nsect = dev->xfer_mode;
1da177e4 3818
3373efd8 3819 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3820
83206a29
TH
3821 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3822 return err_mask;
1da177e4
LT
3823}
3824
8bf62ece
AL
3825/**
3826 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3827 * @dev: Device to which command will be sent
e2a7f77a
RD
3828 * @heads: Number of heads (taskfile parameter)
3829 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3830 *
3831 * LOCKING:
6aff8f1f
TH
3832 * Kernel thread context (may sleep)
3833 *
3834 * RETURNS:
3835 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3836 */
3373efd8
TH
3837static unsigned int ata_dev_init_params(struct ata_device *dev,
3838 u16 heads, u16 sectors)
8bf62ece 3839{
a0123703 3840 struct ata_taskfile tf;
6aff8f1f 3841 unsigned int err_mask;
8bf62ece
AL
3842
3843 /* Number of sectors per track 1-255. Number of heads 1-16 */
3844 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3845 return AC_ERR_INVALID;
8bf62ece
AL
3846
3847 /* set up init dev params taskfile */
3848 DPRINTK("init dev params \n");
3849
3373efd8 3850 ata_tf_init(dev, &tf);
a0123703
TH
3851 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3852 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3853 tf.protocol = ATA_PROT_NODATA;
3854 tf.nsect = sectors;
3855 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3856
3373efd8 3857 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3858
6aff8f1f
TH
3859 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3860 return err_mask;
8bf62ece
AL
3861}
3862
1da177e4 3863/**
0cba632b
JG
3864 * ata_sg_clean - Unmap DMA memory associated with command
3865 * @qc: Command containing DMA memory to be released
3866 *
3867 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3868 *
3869 * LOCKING:
cca3974e 3870 * spin_lock_irqsave(host lock)
1da177e4 3871 */
70e6ad0c 3872void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3873{
3874 struct ata_port *ap = qc->ap;
cedc9a47 3875 struct scatterlist *sg = qc->__sg;
1da177e4 3876 int dir = qc->dma_dir;
cedc9a47 3877 void *pad_buf = NULL;
1da177e4 3878
a4631474
TH
3879 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3880 WARN_ON(sg == NULL);
1da177e4
LT
3881
3882 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3883 WARN_ON(qc->n_elem > 1);
1da177e4 3884
2c13b7ce 3885 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3886
cedc9a47
JG
3887 /* if we padded the buffer out to 32-bit bound, and data
3888 * xfer direction is from-device, we must copy from the
3889 * pad buffer back into the supplied buffer
3890 */
3891 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3892 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3893
3894 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3895 if (qc->n_elem)
2f1f610b 3896 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3897 /* restore last sg */
3898 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3899 if (pad_buf) {
3900 struct scatterlist *psg = &qc->pad_sgent;
3901 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3902 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3903 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3904 }
3905 } else {
2e242fa9 3906 if (qc->n_elem)
2f1f610b 3907 dma_unmap_single(ap->dev,
e1410f2d
JG
3908 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3909 dir);
cedc9a47
JG
3910 /* restore sg */
3911 sg->length += qc->pad_len;
3912 if (pad_buf)
3913 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3914 pad_buf, qc->pad_len);
3915 }
1da177e4
LT
3916
3917 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3918 qc->__sg = NULL;
1da177e4
LT
3919}
3920
3921/**
3922 * ata_fill_sg - Fill PCI IDE PRD table
3923 * @qc: Metadata associated with taskfile to be transferred
3924 *
780a87f7
JG
3925 * Fill PCI IDE PRD (scatter-gather) table with segments
3926 * associated with the current disk command.
3927 *
1da177e4 3928 * LOCKING:
cca3974e 3929 * spin_lock_irqsave(host lock)
1da177e4
LT
3930 *
3931 */
3932static void ata_fill_sg(struct ata_queued_cmd *qc)
3933{
1da177e4 3934 struct ata_port *ap = qc->ap;
cedc9a47
JG
3935 struct scatterlist *sg;
3936 unsigned int idx;
1da177e4 3937
a4631474 3938 WARN_ON(qc->__sg == NULL);
f131883e 3939 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3940
3941 idx = 0;
cedc9a47 3942 ata_for_each_sg(sg, qc) {
1da177e4
LT
3943 u32 addr, offset;
3944 u32 sg_len, len;
3945
3946 /* determine if physical DMA addr spans 64K boundary.
3947 * Note h/w doesn't support 64-bit, so we unconditionally
3948 * truncate dma_addr_t to u32.
3949 */
3950 addr = (u32) sg_dma_address(sg);
3951 sg_len = sg_dma_len(sg);
3952
3953 while (sg_len) {
3954 offset = addr & 0xffff;
3955 len = sg_len;
3956 if ((offset + sg_len) > 0x10000)
3957 len = 0x10000 - offset;
3958
3959 ap->prd[idx].addr = cpu_to_le32(addr);
3960 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3961 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3962
3963 idx++;
3964 sg_len -= len;
3965 addr += len;
3966 }
3967 }
3968
3969 if (idx)
3970 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3971}
3972/**
3973 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3974 * @qc: Metadata associated with taskfile to check
3975 *
780a87f7
JG
3976 * Allow low-level driver to filter ATA PACKET commands, returning
3977 * a status indicating whether or not it is OK to use DMA for the
3978 * supplied PACKET command.
3979 *
1da177e4 3980 * LOCKING:
cca3974e 3981 * spin_lock_irqsave(host lock)
0cba632b 3982 *
1da177e4
LT
3983 * RETURNS: 0 when ATAPI DMA can be used
3984 * nonzero otherwise
3985 */
3986int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3987{
3988 struct ata_port *ap = qc->ap;
3989 int rc = 0; /* Assume ATAPI DMA is OK by default */
3990
6f23a31d
AL
3991 /* some drives can only do ATAPI DMA on read/write */
3992 if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
3993 struct scsi_cmnd *cmd = qc->scsicmd;
3994 u8 *scsicmd = cmd->cmnd;
3995
3996 switch (scsicmd[0]) {
3997 case READ_10:
3998 case WRITE_10:
3999 case READ_12:
4000 case WRITE_12:
4001 case READ_6:
4002 case WRITE_6:
4003 /* atapi dma maybe ok */
4004 break;
4005 default:
4006 /* turn off atapi dma */
4007 return 1;
4008 }
4009 }
4010
1da177e4
LT
4011 if (ap->ops->check_atapi_dma)
4012 rc = ap->ops->check_atapi_dma(qc);
4013
4014 return rc;
4015}
4016/**
4017 * ata_qc_prep - Prepare taskfile for submission
4018 * @qc: Metadata associated with taskfile to be prepared
4019 *
780a87f7
JG
4020 * Prepare ATA taskfile for submission.
4021 *
1da177e4 4022 * LOCKING:
cca3974e 4023 * spin_lock_irqsave(host lock)
1da177e4
LT
4024 */
4025void ata_qc_prep(struct ata_queued_cmd *qc)
4026{
4027 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4028 return;
4029
4030 ata_fill_sg(qc);
4031}
4032
e46834cd
BK
4033void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4034
0cba632b
JG
4035/**
4036 * ata_sg_init_one - Associate command with memory buffer
4037 * @qc: Command to be associated
4038 * @buf: Memory buffer
4039 * @buflen: Length of memory buffer, in bytes.
4040 *
4041 * Initialize the data-related elements of queued_cmd @qc
4042 * to point to a single memory buffer, @buf of byte length @buflen.
4043 *
4044 * LOCKING:
cca3974e 4045 * spin_lock_irqsave(host lock)
0cba632b
JG
4046 */
4047
1da177e4
LT
4048void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4049{
1da177e4
LT
4050 qc->flags |= ATA_QCFLAG_SINGLE;
4051
cedc9a47 4052 qc->__sg = &qc->sgent;
1da177e4 4053 qc->n_elem = 1;
cedc9a47 4054 qc->orig_n_elem = 1;
1da177e4 4055 qc->buf_virt = buf;
233277ca 4056 qc->nbytes = buflen;
1da177e4 4057
61c0596c 4058 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4059}
4060
0cba632b
JG
4061/**
4062 * ata_sg_init - Associate command with scatter-gather table.
4063 * @qc: Command to be associated
4064 * @sg: Scatter-gather table.
4065 * @n_elem: Number of elements in s/g table.
4066 *
4067 * Initialize the data-related elements of queued_cmd @qc
4068 * to point to a scatter-gather table @sg, containing @n_elem
4069 * elements.
4070 *
4071 * LOCKING:
cca3974e 4072 * spin_lock_irqsave(host lock)
0cba632b
JG
4073 */
4074
1da177e4
LT
4075void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4076 unsigned int n_elem)
4077{
4078 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4079 qc->__sg = sg;
1da177e4 4080 qc->n_elem = n_elem;
cedc9a47 4081 qc->orig_n_elem = n_elem;
1da177e4
LT
4082}
4083
4084/**
0cba632b
JG
4085 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4086 * @qc: Command with memory buffer to be mapped.
4087 *
4088 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4089 *
4090 * LOCKING:
cca3974e 4091 * spin_lock_irqsave(host lock)
1da177e4
LT
4092 *
4093 * RETURNS:
0cba632b 4094 * Zero on success, negative on error.
1da177e4
LT
4095 */
4096
4097static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4098{
4099 struct ata_port *ap = qc->ap;
4100 int dir = qc->dma_dir;
cedc9a47 4101 struct scatterlist *sg = qc->__sg;
1da177e4 4102 dma_addr_t dma_address;
2e242fa9 4103 int trim_sg = 0;
1da177e4 4104
cedc9a47
JG
4105 /* we must lengthen transfers to end on a 32-bit boundary */
4106 qc->pad_len = sg->length & 3;
4107 if (qc->pad_len) {
4108 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4109 struct scatterlist *psg = &qc->pad_sgent;
4110
a4631474 4111 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4112
4113 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4114
4115 if (qc->tf.flags & ATA_TFLAG_WRITE)
4116 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4117 qc->pad_len);
4118
4119 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4120 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4121 /* trim sg */
4122 sg->length -= qc->pad_len;
2e242fa9
TH
4123 if (sg->length == 0)
4124 trim_sg = 1;
cedc9a47
JG
4125
4126 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4127 sg->length, qc->pad_len);
4128 }
4129
2e242fa9
TH
4130 if (trim_sg) {
4131 qc->n_elem--;
e1410f2d
JG
4132 goto skip_map;
4133 }
4134
2f1f610b 4135 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4136 sg->length, dir);
537a95d9
TH
4137 if (dma_mapping_error(dma_address)) {
4138 /* restore sg */
4139 sg->length += qc->pad_len;
1da177e4 4140 return -1;
537a95d9 4141 }
1da177e4
LT
4142
4143 sg_dma_address(sg) = dma_address;
32529e01 4144 sg_dma_len(sg) = sg->length;
1da177e4 4145
2e242fa9 4146skip_map:
1da177e4
LT
4147 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4148 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4149
4150 return 0;
4151}
4152
4153/**
0cba632b
JG
4154 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4155 * @qc: Command with scatter-gather table to be mapped.
4156 *
4157 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4158 *
4159 * LOCKING:
cca3974e 4160 * spin_lock_irqsave(host lock)
1da177e4
LT
4161 *
4162 * RETURNS:
0cba632b 4163 * Zero on success, negative on error.
1da177e4
LT
4164 *
4165 */
4166
4167static int ata_sg_setup(struct ata_queued_cmd *qc)
4168{
4169 struct ata_port *ap = qc->ap;
cedc9a47
JG
4170 struct scatterlist *sg = qc->__sg;
4171 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4172 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4173
44877b4e 4174 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4175 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4176
cedc9a47
JG
4177 /* we must lengthen transfers to end on a 32-bit boundary */
4178 qc->pad_len = lsg->length & 3;
4179 if (qc->pad_len) {
4180 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4181 struct scatterlist *psg = &qc->pad_sgent;
4182 unsigned int offset;
4183
a4631474 4184 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4185
4186 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4187
4188 /*
4189 * psg->page/offset are used to copy to-be-written
4190 * data in this function or read data in ata_sg_clean.
4191 */
4192 offset = lsg->offset + lsg->length - qc->pad_len;
4193 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4194 psg->offset = offset_in_page(offset);
4195
4196 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4197 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4198 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4199 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4200 }
4201
4202 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4203 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4204 /* trim last sg */
4205 lsg->length -= qc->pad_len;
e1410f2d
JG
4206 if (lsg->length == 0)
4207 trim_sg = 1;
cedc9a47
JG
4208
4209 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4210 qc->n_elem - 1, lsg->length, qc->pad_len);
4211 }
4212
e1410f2d
JG
4213 pre_n_elem = qc->n_elem;
4214 if (trim_sg && pre_n_elem)
4215 pre_n_elem--;
4216
4217 if (!pre_n_elem) {
4218 n_elem = 0;
4219 goto skip_map;
4220 }
4221
1da177e4 4222 dir = qc->dma_dir;
2f1f610b 4223 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4224 if (n_elem < 1) {
4225 /* restore last sg */
4226 lsg->length += qc->pad_len;
1da177e4 4227 return -1;
537a95d9 4228 }
1da177e4
LT
4229
4230 DPRINTK("%d sg elements mapped\n", n_elem);
4231
e1410f2d 4232skip_map:
1da177e4
LT
4233 qc->n_elem = n_elem;
4234
4235 return 0;
4236}
4237
0baab86b 4238/**
c893a3ae 4239 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4240 * @buf: Buffer to swap
4241 * @buf_words: Number of 16-bit words in buffer.
4242 *
4243 * Swap halves of 16-bit words if needed to convert from
4244 * little-endian byte order to native cpu byte order, or
4245 * vice-versa.
4246 *
4247 * LOCKING:
6f0ef4fa 4248 * Inherited from caller.
0baab86b 4249 */
1da177e4
LT
4250void swap_buf_le16(u16 *buf, unsigned int buf_words)
4251{
4252#ifdef __BIG_ENDIAN
4253 unsigned int i;
4254
4255 for (i = 0; i < buf_words; i++)
4256 buf[i] = le16_to_cpu(buf[i]);
4257#endif /* __BIG_ENDIAN */
4258}
4259
6ae4cfb5 4260/**
0d5ff566 4261 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4262 * @adev: device to target
6ae4cfb5
AL
4263 * @buf: data buffer
4264 * @buflen: buffer length
344babaa 4265 * @write_data: read/write
6ae4cfb5
AL
4266 *
4267 * Transfer data from/to the device data register by PIO.
4268 *
4269 * LOCKING:
4270 * Inherited from caller.
6ae4cfb5 4271 */
0d5ff566
TH
4272void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4273 unsigned int buflen, int write_data)
1da177e4 4274{
a6b2c5d4 4275 struct ata_port *ap = adev->ap;
6ae4cfb5 4276 unsigned int words = buflen >> 1;
1da177e4 4277
6ae4cfb5 4278 /* Transfer multiple of 2 bytes */
1da177e4 4279 if (write_data)
0d5ff566 4280 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4281 else
0d5ff566 4282 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4283
4284 /* Transfer trailing 1 byte, if any. */
4285 if (unlikely(buflen & 0x01)) {
4286 u16 align_buf[1] = { 0 };
4287 unsigned char *trailing_buf = buf + buflen - 1;
4288
4289 if (write_data) {
4290 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4291 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4292 } else {
0d5ff566 4293 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4294 memcpy(trailing_buf, align_buf, 1);
4295 }
4296 }
1da177e4
LT
4297}
4298
75e99585 4299/**
0d5ff566 4300 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4301 * @adev: device to target
4302 * @buf: data buffer
4303 * @buflen: buffer length
4304 * @write_data: read/write
4305 *
88574551 4306 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4307 * transfer with interrupts disabled.
4308 *
4309 * LOCKING:
4310 * Inherited from caller.
4311 */
0d5ff566
TH
4312void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4313 unsigned int buflen, int write_data)
75e99585
AC
4314{
4315 unsigned long flags;
4316 local_irq_save(flags);
0d5ff566 4317 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4318 local_irq_restore(flags);
4319}
4320
4321
6ae4cfb5 4322/**
5a5dbd18 4323 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4324 * @qc: Command on going
4325 *
5a5dbd18 4326 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4327 *
4328 * LOCKING:
4329 * Inherited from caller.
4330 */
4331
1da177e4
LT
4332static void ata_pio_sector(struct ata_queued_cmd *qc)
4333{
4334 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4335 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4336 struct ata_port *ap = qc->ap;
4337 struct page *page;
4338 unsigned int offset;
4339 unsigned char *buf;
4340
5a5dbd18 4341 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4342 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4343
4344 page = sg[qc->cursg].page;
726f0785 4345 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4346
4347 /* get the current page and offset */
4348 page = nth_page(page, (offset >> PAGE_SHIFT));
4349 offset %= PAGE_SIZE;
4350
1da177e4
LT
4351 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4352
91b8b313
AL
4353 if (PageHighMem(page)) {
4354 unsigned long flags;
4355
a6b2c5d4 4356 /* FIXME: use a bounce buffer */
91b8b313
AL
4357 local_irq_save(flags);
4358 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4359
91b8b313 4360 /* do the actual data transfer */
5a5dbd18 4361 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4362
91b8b313
AL
4363 kunmap_atomic(buf, KM_IRQ0);
4364 local_irq_restore(flags);
4365 } else {
4366 buf = page_address(page);
5a5dbd18 4367 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4368 }
1da177e4 4369
5a5dbd18
ML
4370 qc->curbytes += qc->sect_size;
4371 qc->cursg_ofs += qc->sect_size;
1da177e4 4372
726f0785 4373 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4374 qc->cursg++;
4375 qc->cursg_ofs = 0;
4376 }
1da177e4 4377}
1da177e4 4378
07f6f7d0 4379/**
5a5dbd18 4380 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4381 * @qc: Command on going
4382 *
5a5dbd18 4383 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4384 * ATA device for the DRQ request.
4385 *
4386 * LOCKING:
4387 * Inherited from caller.
4388 */
1da177e4 4389
07f6f7d0
AL
4390static void ata_pio_sectors(struct ata_queued_cmd *qc)
4391{
4392 if (is_multi_taskfile(&qc->tf)) {
4393 /* READ/WRITE MULTIPLE */
4394 unsigned int nsect;
4395
587005de 4396 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4397
5a5dbd18 4398 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4399 qc->dev->multi_count);
07f6f7d0
AL
4400 while (nsect--)
4401 ata_pio_sector(qc);
4402 } else
4403 ata_pio_sector(qc);
4404}
4405
c71c1857
AL
4406/**
4407 * atapi_send_cdb - Write CDB bytes to hardware
4408 * @ap: Port to which ATAPI device is attached.
4409 * @qc: Taskfile currently active
4410 *
4411 * When device has indicated its readiness to accept
4412 * a CDB, this function is called. Send the CDB.
4413 *
4414 * LOCKING:
4415 * caller.
4416 */
4417
4418static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4419{
4420 /* send SCSI cdb */
4421 DPRINTK("send cdb\n");
db024d53 4422 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4423
a6b2c5d4 4424 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4425 ata_altstatus(ap); /* flush */
4426
4427 switch (qc->tf.protocol) {
4428 case ATA_PROT_ATAPI:
4429 ap->hsm_task_state = HSM_ST;
4430 break;
4431 case ATA_PROT_ATAPI_NODATA:
4432 ap->hsm_task_state = HSM_ST_LAST;
4433 break;
4434 case ATA_PROT_ATAPI_DMA:
4435 ap->hsm_task_state = HSM_ST_LAST;
4436 /* initiate bmdma */
4437 ap->ops->bmdma_start(qc);
4438 break;
4439 }
1da177e4
LT
4440}
4441
6ae4cfb5
AL
4442/**
4443 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4444 * @qc: Command on going
4445 * @bytes: number of bytes
4446 *
4447 * Transfer Transfer data from/to the ATAPI device.
4448 *
4449 * LOCKING:
4450 * Inherited from caller.
4451 *
4452 */
4453
1da177e4
LT
4454static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4455{
4456 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4457 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4458 struct ata_port *ap = qc->ap;
4459 struct page *page;
4460 unsigned char *buf;
4461 unsigned int offset, count;
4462
563a6e1f 4463 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4464 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4465
4466next_sg:
563a6e1f 4467 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4468 /*
563a6e1f
AL
4469 * The end of qc->sg is reached and the device expects
4470 * more data to transfer. In order not to overrun qc->sg
4471 * and fulfill length specified in the byte count register,
4472 * - for read case, discard trailing data from the device
4473 * - for write case, padding zero data to the device
4474 */
4475 u16 pad_buf[1] = { 0 };
4476 unsigned int words = bytes >> 1;
4477 unsigned int i;
4478
4479 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4480 ata_dev_printk(qc->dev, KERN_WARNING,
4481 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4482
4483 for (i = 0; i < words; i++)
a6b2c5d4 4484 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4485
14be71f4 4486 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4487 return;
4488 }
4489
cedc9a47 4490 sg = &qc->__sg[qc->cursg];
1da177e4 4491
1da177e4
LT
4492 page = sg->page;
4493 offset = sg->offset + qc->cursg_ofs;
4494
4495 /* get the current page and offset */
4496 page = nth_page(page, (offset >> PAGE_SHIFT));
4497 offset %= PAGE_SIZE;
4498
6952df03 4499 /* don't overrun current sg */
32529e01 4500 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4501
4502 /* don't cross page boundaries */
4503 count = min(count, (unsigned int)PAGE_SIZE - offset);
4504
7282aa4b
AL
4505 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4506
91b8b313
AL
4507 if (PageHighMem(page)) {
4508 unsigned long flags;
4509
a6b2c5d4 4510 /* FIXME: use bounce buffer */
91b8b313
AL
4511 local_irq_save(flags);
4512 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4513
91b8b313 4514 /* do the actual data transfer */
a6b2c5d4 4515 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4516
91b8b313
AL
4517 kunmap_atomic(buf, KM_IRQ0);
4518 local_irq_restore(flags);
4519 } else {
4520 buf = page_address(page);
a6b2c5d4 4521 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4522 }
1da177e4
LT
4523
4524 bytes -= count;
4525 qc->curbytes += count;
4526 qc->cursg_ofs += count;
4527
32529e01 4528 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4529 qc->cursg++;
4530 qc->cursg_ofs = 0;
4531 }
4532
563a6e1f 4533 if (bytes)
1da177e4 4534 goto next_sg;
1da177e4
LT
4535}
4536
6ae4cfb5
AL
4537/**
4538 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4539 * @qc: Command on going
4540 *
4541 * Transfer Transfer data from/to the ATAPI device.
4542 *
4543 * LOCKING:
4544 * Inherited from caller.
6ae4cfb5
AL
4545 */
4546
1da177e4
LT
4547static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4548{
4549 struct ata_port *ap = qc->ap;
4550 struct ata_device *dev = qc->dev;
4551 unsigned int ireason, bc_lo, bc_hi, bytes;
4552 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4553
eec4c3f3
AL
4554 /* Abuse qc->result_tf for temp storage of intermediate TF
4555 * here to save some kernel stack usage.
4556 * For normal completion, qc->result_tf is not relevant. For
4557 * error, qc->result_tf is later overwritten by ata_qc_complete().
4558 * So, the correctness of qc->result_tf is not affected.
4559 */
4560 ap->ops->tf_read(ap, &qc->result_tf);
4561 ireason = qc->result_tf.nsect;
4562 bc_lo = qc->result_tf.lbam;
4563 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4564 bytes = (bc_hi << 8) | bc_lo;
4565
4566 /* shall be cleared to zero, indicating xfer of data */
4567 if (ireason & (1 << 0))
4568 goto err_out;
4569
4570 /* make sure transfer direction matches expected */
4571 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4572 if (do_write != i_write)
4573 goto err_out;
4574
44877b4e 4575 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4576
1da177e4
LT
4577 __atapi_pio_bytes(qc, bytes);
4578
4579 return;
4580
4581err_out:
f15a1daf 4582 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4583 qc->err_mask |= AC_ERR_HSM;
14be71f4 4584 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4585}
4586
4587/**
c234fb00
AL
4588 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4589 * @ap: the target ata_port
4590 * @qc: qc on going
1da177e4 4591 *
c234fb00
AL
4592 * RETURNS:
4593 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4594 */
c234fb00
AL
4595
4596static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4597{
c234fb00
AL
4598 if (qc->tf.flags & ATA_TFLAG_POLLING)
4599 return 1;
1da177e4 4600
c234fb00
AL
4601 if (ap->hsm_task_state == HSM_ST_FIRST) {
4602 if (qc->tf.protocol == ATA_PROT_PIO &&
4603 (qc->tf.flags & ATA_TFLAG_WRITE))
4604 return 1;
1da177e4 4605
c234fb00
AL
4606 if (is_atapi_taskfile(&qc->tf) &&
4607 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4608 return 1;
fe79e683
AL
4609 }
4610
c234fb00
AL
4611 return 0;
4612}
1da177e4 4613
c17ea20d
TH
4614/**
4615 * ata_hsm_qc_complete - finish a qc running on standard HSM
4616 * @qc: Command to complete
4617 * @in_wq: 1 if called from workqueue, 0 otherwise
4618 *
4619 * Finish @qc which is running on standard HSM.
4620 *
4621 * LOCKING:
cca3974e 4622 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4623 * Otherwise, none on entry and grabs host lock.
4624 */
4625static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4626{
4627 struct ata_port *ap = qc->ap;
4628 unsigned long flags;
4629
4630 if (ap->ops->error_handler) {
4631 if (in_wq) {
ba6a1308 4632 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4633
cca3974e
JG
4634 /* EH might have kicked in while host lock is
4635 * released.
c17ea20d
TH
4636 */
4637 qc = ata_qc_from_tag(ap, qc->tag);
4638 if (qc) {
4639 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4640 ap->ops->irq_on(ap);
c17ea20d
TH
4641 ata_qc_complete(qc);
4642 } else
4643 ata_port_freeze(ap);
4644 }
4645
ba6a1308 4646 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4647 } else {
4648 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4649 ata_qc_complete(qc);
4650 else
4651 ata_port_freeze(ap);
4652 }
4653 } else {
4654 if (in_wq) {
ba6a1308 4655 spin_lock_irqsave(ap->lock, flags);
83625006 4656 ap->ops->irq_on(ap);
c17ea20d 4657 ata_qc_complete(qc);
ba6a1308 4658 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4659 } else
4660 ata_qc_complete(qc);
4661 }
1da177e4 4662
c81e29b4 4663 ata_altstatus(ap); /* flush */
c17ea20d
TH
4664}
4665
bb5cb290
AL
4666/**
4667 * ata_hsm_move - move the HSM to the next state.
4668 * @ap: the target ata_port
4669 * @qc: qc on going
4670 * @status: current device status
4671 * @in_wq: 1 if called from workqueue, 0 otherwise
4672 *
4673 * RETURNS:
4674 * 1 when poll next status needed, 0 otherwise.
4675 */
9a1004d0
TH
4676int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4677 u8 status, int in_wq)
e2cec771 4678{
bb5cb290
AL
4679 unsigned long flags = 0;
4680 int poll_next;
4681
6912ccd5
AL
4682 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4683
bb5cb290
AL
4684 /* Make sure ata_qc_issue_prot() does not throw things
4685 * like DMA polling into the workqueue. Notice that
4686 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4687 */
c234fb00 4688 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4689
e2cec771 4690fsm_start:
999bb6f4 4691 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4692 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4693
e2cec771
AL
4694 switch (ap->hsm_task_state) {
4695 case HSM_ST_FIRST:
bb5cb290
AL
4696 /* Send first data block or PACKET CDB */
4697
4698 /* If polling, we will stay in the work queue after
4699 * sending the data. Otherwise, interrupt handler
4700 * takes over after sending the data.
4701 */
4702 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4703
e2cec771 4704 /* check device status */
3655d1d3
AL
4705 if (unlikely((status & ATA_DRQ) == 0)) {
4706 /* handle BSY=0, DRQ=0 as error */
4707 if (likely(status & (ATA_ERR | ATA_DF)))
4708 /* device stops HSM for abort/error */
4709 qc->err_mask |= AC_ERR_DEV;
4710 else
4711 /* HSM violation. Let EH handle this */
4712 qc->err_mask |= AC_ERR_HSM;
4713
14be71f4 4714 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4715 goto fsm_start;
1da177e4
LT
4716 }
4717
71601958
AL
4718 /* Device should not ask for data transfer (DRQ=1)
4719 * when it finds something wrong.
eee6c32f
AL
4720 * We ignore DRQ here and stop the HSM by
4721 * changing hsm_task_state to HSM_ST_ERR and
4722 * let the EH abort the command or reset the device.
71601958
AL
4723 */
4724 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4725 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4726 "error, dev_stat 0x%X\n", status);
3655d1d3 4727 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4728 ap->hsm_task_state = HSM_ST_ERR;
4729 goto fsm_start;
71601958 4730 }
1da177e4 4731
bb5cb290
AL
4732 /* Send the CDB (atapi) or the first data block (ata pio out).
4733 * During the state transition, interrupt handler shouldn't
4734 * be invoked before the data transfer is complete and
4735 * hsm_task_state is changed. Hence, the following locking.
4736 */
4737 if (in_wq)
ba6a1308 4738 spin_lock_irqsave(ap->lock, flags);
1da177e4 4739
bb5cb290
AL
4740 if (qc->tf.protocol == ATA_PROT_PIO) {
4741 /* PIO data out protocol.
4742 * send first data block.
4743 */
0565c26d 4744
bb5cb290
AL
4745 /* ata_pio_sectors() might change the state
4746 * to HSM_ST_LAST. so, the state is changed here
4747 * before ata_pio_sectors().
4748 */
4749 ap->hsm_task_state = HSM_ST;
4750 ata_pio_sectors(qc);
4751 ata_altstatus(ap); /* flush */
4752 } else
4753 /* send CDB */
4754 atapi_send_cdb(ap, qc);
4755
4756 if (in_wq)
ba6a1308 4757 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4758
4759 /* if polling, ata_pio_task() handles the rest.
4760 * otherwise, interrupt handler takes over from here.
4761 */
e2cec771 4762 break;
1c848984 4763
e2cec771
AL
4764 case HSM_ST:
4765 /* complete command or read/write the data register */
4766 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4767 /* ATAPI PIO protocol */
4768 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4769 /* No more data to transfer or device error.
4770 * Device error will be tagged in HSM_ST_LAST.
4771 */
e2cec771
AL
4772 ap->hsm_task_state = HSM_ST_LAST;
4773 goto fsm_start;
4774 }
1da177e4 4775
71601958
AL
4776 /* Device should not ask for data transfer (DRQ=1)
4777 * when it finds something wrong.
eee6c32f
AL
4778 * We ignore DRQ here and stop the HSM by
4779 * changing hsm_task_state to HSM_ST_ERR and
4780 * let the EH abort the command or reset the device.
71601958
AL
4781 */
4782 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4783 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4784 "device error, dev_stat 0x%X\n",
4785 status);
3655d1d3 4786 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4787 ap->hsm_task_state = HSM_ST_ERR;
4788 goto fsm_start;
71601958 4789 }
1da177e4 4790
e2cec771 4791 atapi_pio_bytes(qc);
7fb6ec28 4792
e2cec771
AL
4793 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4794 /* bad ireason reported by device */
4795 goto fsm_start;
1da177e4 4796
e2cec771
AL
4797 } else {
4798 /* ATA PIO protocol */
4799 if (unlikely((status & ATA_DRQ) == 0)) {
4800 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4801 if (likely(status & (ATA_ERR | ATA_DF)))
4802 /* device stops HSM for abort/error */
4803 qc->err_mask |= AC_ERR_DEV;
4804 else
55a8e2c8
TH
4805 /* HSM violation. Let EH handle this.
4806 * Phantom devices also trigger this
4807 * condition. Mark hint.
4808 */
4809 qc->err_mask |= AC_ERR_HSM |
4810 AC_ERR_NODEV_HINT;
3655d1d3 4811
e2cec771
AL
4812 ap->hsm_task_state = HSM_ST_ERR;
4813 goto fsm_start;
4814 }
1da177e4 4815
eee6c32f
AL
4816 /* For PIO reads, some devices may ask for
4817 * data transfer (DRQ=1) alone with ERR=1.
4818 * We respect DRQ here and transfer one
4819 * block of junk data before changing the
4820 * hsm_task_state to HSM_ST_ERR.
4821 *
4822 * For PIO writes, ERR=1 DRQ=1 doesn't make
4823 * sense since the data block has been
4824 * transferred to the device.
71601958
AL
4825 */
4826 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4827 /* data might be corrputed */
4828 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4829
4830 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4831 ata_pio_sectors(qc);
4832 ata_altstatus(ap);
4833 status = ata_wait_idle(ap);
4834 }
4835
3655d1d3
AL
4836 if (status & (ATA_BUSY | ATA_DRQ))
4837 qc->err_mask |= AC_ERR_HSM;
4838
eee6c32f
AL
4839 /* ata_pio_sectors() might change the
4840 * state to HSM_ST_LAST. so, the state
4841 * is changed after ata_pio_sectors().
4842 */
4843 ap->hsm_task_state = HSM_ST_ERR;
4844 goto fsm_start;
71601958
AL
4845 }
4846
e2cec771
AL
4847 ata_pio_sectors(qc);
4848
4849 if (ap->hsm_task_state == HSM_ST_LAST &&
4850 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4851 /* all data read */
4852 ata_altstatus(ap);
52a32205 4853 status = ata_wait_idle(ap);
e2cec771
AL
4854 goto fsm_start;
4855 }
4856 }
4857
4858 ata_altstatus(ap); /* flush */
bb5cb290 4859 poll_next = 1;
1da177e4
LT
4860 break;
4861
14be71f4 4862 case HSM_ST_LAST:
6912ccd5
AL
4863 if (unlikely(!ata_ok(status))) {
4864 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4865 ap->hsm_task_state = HSM_ST_ERR;
4866 goto fsm_start;
4867 }
4868
4869 /* no more data to transfer */
4332a771 4870 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 4871 ap->print_id, qc->dev->devno, status);
e2cec771 4872
6912ccd5
AL
4873 WARN_ON(qc->err_mask);
4874
e2cec771 4875 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4876
e2cec771 4877 /* complete taskfile transaction */
c17ea20d 4878 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4879
4880 poll_next = 0;
1da177e4
LT
4881 break;
4882
14be71f4 4883 case HSM_ST_ERR:
e2cec771
AL
4884 /* make sure qc->err_mask is available to
4885 * know what's wrong and recover
4886 */
4887 WARN_ON(qc->err_mask == 0);
4888
4889 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4890
999bb6f4 4891 /* complete taskfile transaction */
c17ea20d 4892 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4893
4894 poll_next = 0;
e2cec771
AL
4895 break;
4896 default:
bb5cb290 4897 poll_next = 0;
6912ccd5 4898 BUG();
1da177e4
LT
4899 }
4900
bb5cb290 4901 return poll_next;
1da177e4
LT
4902}
4903
65f27f38 4904static void ata_pio_task(struct work_struct *work)
8061f5f0 4905{
65f27f38
DH
4906 struct ata_port *ap =
4907 container_of(work, struct ata_port, port_task.work);
4908 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4909 u8 status;
a1af3734 4910 int poll_next;
8061f5f0 4911
7fb6ec28 4912fsm_start:
a1af3734 4913 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4914
a1af3734
AL
4915 /*
4916 * This is purely heuristic. This is a fast path.
4917 * Sometimes when we enter, BSY will be cleared in
4918 * a chk-status or two. If not, the drive is probably seeking
4919 * or something. Snooze for a couple msecs, then
4920 * chk-status again. If still busy, queue delayed work.
4921 */
4922 status = ata_busy_wait(ap, ATA_BUSY, 5);
4923 if (status & ATA_BUSY) {
4924 msleep(2);
4925 status = ata_busy_wait(ap, ATA_BUSY, 10);
4926 if (status & ATA_BUSY) {
31ce6dae 4927 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4928 return;
4929 }
8061f5f0
TH
4930 }
4931
a1af3734
AL
4932 /* move the HSM */
4933 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4934
a1af3734
AL
4935 /* another command or interrupt handler
4936 * may be running at this point.
4937 */
4938 if (poll_next)
7fb6ec28 4939 goto fsm_start;
8061f5f0
TH
4940}
4941
1da177e4
LT
4942/**
4943 * ata_qc_new - Request an available ATA command, for queueing
4944 * @ap: Port associated with device @dev
4945 * @dev: Device from whom we request an available command structure
4946 *
4947 * LOCKING:
0cba632b 4948 * None.
1da177e4
LT
4949 */
4950
4951static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4952{
4953 struct ata_queued_cmd *qc = NULL;
4954 unsigned int i;
4955
e3180499 4956 /* no command while frozen */
b51e9e5d 4957 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4958 return NULL;
4959
2ab7db1f
TH
4960 /* the last tag is reserved for internal command. */
4961 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4962 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4963 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4964 break;
4965 }
4966
4967 if (qc)
4968 qc->tag = i;
4969
4970 return qc;
4971}
4972
4973/**
4974 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4975 * @dev: Device from whom we request an available command structure
4976 *
4977 * LOCKING:
0cba632b 4978 * None.
1da177e4
LT
4979 */
4980
3373efd8 4981struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4982{
3373efd8 4983 struct ata_port *ap = dev->ap;
1da177e4
LT
4984 struct ata_queued_cmd *qc;
4985
4986 qc = ata_qc_new(ap);
4987 if (qc) {
1da177e4
LT
4988 qc->scsicmd = NULL;
4989 qc->ap = ap;
4990 qc->dev = dev;
1da177e4 4991
2c13b7ce 4992 ata_qc_reinit(qc);
1da177e4
LT
4993 }
4994
4995 return qc;
4996}
4997
1da177e4
LT
4998/**
4999 * ata_qc_free - free unused ata_queued_cmd
5000 * @qc: Command to complete
5001 *
5002 * Designed to free unused ata_queued_cmd object
5003 * in case something prevents using it.
5004 *
5005 * LOCKING:
cca3974e 5006 * spin_lock_irqsave(host lock)
1da177e4
LT
5007 */
5008void ata_qc_free(struct ata_queued_cmd *qc)
5009{
4ba946e9
TH
5010 struct ata_port *ap = qc->ap;
5011 unsigned int tag;
5012
a4631474 5013 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5014
4ba946e9
TH
5015 qc->flags = 0;
5016 tag = qc->tag;
5017 if (likely(ata_tag_valid(tag))) {
4ba946e9 5018 qc->tag = ATA_TAG_POISON;
6cec4a39 5019 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5020 }
1da177e4
LT
5021}
5022
76014427 5023void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5024{
dedaf2b0
TH
5025 struct ata_port *ap = qc->ap;
5026
a4631474
TH
5027 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5028 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5029
5030 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5031 ata_sg_clean(qc);
5032
7401abf2 5033 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
5034 if (qc->tf.protocol == ATA_PROT_NCQ)
5035 ap->sactive &= ~(1 << qc->tag);
5036 else
5037 ap->active_tag = ATA_TAG_POISON;
7401abf2 5038
3f3791d3
AL
5039 /* atapi: mark qc as inactive to prevent the interrupt handler
5040 * from completing the command twice later, before the error handler
5041 * is called. (when rc != 0 and atapi request sense is needed)
5042 */
5043 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5044 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5045
1da177e4 5046 /* call completion callback */
77853bf2 5047 qc->complete_fn(qc);
1da177e4
LT
5048}
5049
39599a53
TH
5050static void fill_result_tf(struct ata_queued_cmd *qc)
5051{
5052 struct ata_port *ap = qc->ap;
5053
39599a53 5054 qc->result_tf.flags = qc->tf.flags;
4742d54f 5055 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5056}
5057
f686bcb8
TH
5058/**
5059 * ata_qc_complete - Complete an active ATA command
5060 * @qc: Command to complete
5061 * @err_mask: ATA Status register contents
5062 *
5063 * Indicate to the mid and upper layers that an ATA
5064 * command has completed, with either an ok or not-ok status.
5065 *
5066 * LOCKING:
cca3974e 5067 * spin_lock_irqsave(host lock)
f686bcb8
TH
5068 */
5069void ata_qc_complete(struct ata_queued_cmd *qc)
5070{
5071 struct ata_port *ap = qc->ap;
5072
5073 /* XXX: New EH and old EH use different mechanisms to
5074 * synchronize EH with regular execution path.
5075 *
5076 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5077 * Normal execution path is responsible for not accessing a
5078 * failed qc. libata core enforces the rule by returning NULL
5079 * from ata_qc_from_tag() for failed qcs.
5080 *
5081 * Old EH depends on ata_qc_complete() nullifying completion
5082 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5083 * not synchronize with interrupt handler. Only PIO task is
5084 * taken care of.
5085 */
5086 if (ap->ops->error_handler) {
b51e9e5d 5087 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5088
5089 if (unlikely(qc->err_mask))
5090 qc->flags |= ATA_QCFLAG_FAILED;
5091
5092 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5093 if (!ata_tag_internal(qc->tag)) {
5094 /* always fill result TF for failed qc */
39599a53 5095 fill_result_tf(qc);
f686bcb8
TH
5096 ata_qc_schedule_eh(qc);
5097 return;
5098 }
5099 }
5100
5101 /* read result TF if requested */
5102 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5103 fill_result_tf(qc);
f686bcb8
TH
5104
5105 __ata_qc_complete(qc);
5106 } else {
5107 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5108 return;
5109
5110 /* read result TF if failed or requested */
5111 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5112 fill_result_tf(qc);
f686bcb8
TH
5113
5114 __ata_qc_complete(qc);
5115 }
5116}
5117
dedaf2b0
TH
5118/**
5119 * ata_qc_complete_multiple - Complete multiple qcs successfully
5120 * @ap: port in question
5121 * @qc_active: new qc_active mask
5122 * @finish_qc: LLDD callback invoked before completing a qc
5123 *
5124 * Complete in-flight commands. This functions is meant to be
5125 * called from low-level driver's interrupt routine to complete
5126 * requests normally. ap->qc_active and @qc_active is compared
5127 * and commands are completed accordingly.
5128 *
5129 * LOCKING:
cca3974e 5130 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5131 *
5132 * RETURNS:
5133 * Number of completed commands on success, -errno otherwise.
5134 */
5135int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5136 void (*finish_qc)(struct ata_queued_cmd *))
5137{
5138 int nr_done = 0;
5139 u32 done_mask;
5140 int i;
5141
5142 done_mask = ap->qc_active ^ qc_active;
5143
5144 if (unlikely(done_mask & qc_active)) {
5145 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5146 "(%08x->%08x)\n", ap->qc_active, qc_active);
5147 return -EINVAL;
5148 }
5149
5150 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5151 struct ata_queued_cmd *qc;
5152
5153 if (!(done_mask & (1 << i)))
5154 continue;
5155
5156 if ((qc = ata_qc_from_tag(ap, i))) {
5157 if (finish_qc)
5158 finish_qc(qc);
5159 ata_qc_complete(qc);
5160 nr_done++;
5161 }
5162 }
5163
5164 return nr_done;
5165}
5166
1da177e4
LT
5167static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5168{
5169 struct ata_port *ap = qc->ap;
5170
5171 switch (qc->tf.protocol) {
3dc1d881 5172 case ATA_PROT_NCQ:
1da177e4
LT
5173 case ATA_PROT_DMA:
5174 case ATA_PROT_ATAPI_DMA:
5175 return 1;
5176
5177 case ATA_PROT_ATAPI:
5178 case ATA_PROT_PIO:
1da177e4
LT
5179 if (ap->flags & ATA_FLAG_PIO_DMA)
5180 return 1;
5181
5182 /* fall through */
5183
5184 default:
5185 return 0;
5186 }
5187
5188 /* never reached */
5189}
5190
5191/**
5192 * ata_qc_issue - issue taskfile to device
5193 * @qc: command to issue to device
5194 *
5195 * Prepare an ATA command to submission to device.
5196 * This includes mapping the data into a DMA-able
5197 * area, filling in the S/G table, and finally
5198 * writing the taskfile to hardware, starting the command.
5199 *
5200 * LOCKING:
cca3974e 5201 * spin_lock_irqsave(host lock)
1da177e4 5202 */
8e0e694a 5203void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5204{
5205 struct ata_port *ap = qc->ap;
5206
dedaf2b0
TH
5207 /* Make sure only one non-NCQ command is outstanding. The
5208 * check is skipped for old EH because it reuses active qc to
5209 * request ATAPI sense.
5210 */
5211 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5212
5213 if (qc->tf.protocol == ATA_PROT_NCQ) {
5214 WARN_ON(ap->sactive & (1 << qc->tag));
5215 ap->sactive |= 1 << qc->tag;
5216 } else {
5217 WARN_ON(ap->sactive);
5218 ap->active_tag = qc->tag;
5219 }
5220
e4a70e76 5221 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5222 ap->qc_active |= 1 << qc->tag;
e4a70e76 5223
1da177e4
LT
5224 if (ata_should_dma_map(qc)) {
5225 if (qc->flags & ATA_QCFLAG_SG) {
5226 if (ata_sg_setup(qc))
8e436af9 5227 goto sg_err;
1da177e4
LT
5228 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5229 if (ata_sg_setup_one(qc))
8e436af9 5230 goto sg_err;
1da177e4
LT
5231 }
5232 } else {
5233 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5234 }
5235
5236 ap->ops->qc_prep(qc);
5237
8e0e694a
TH
5238 qc->err_mask |= ap->ops->qc_issue(qc);
5239 if (unlikely(qc->err_mask))
5240 goto err;
5241 return;
1da177e4 5242
8e436af9
TH
5243sg_err:
5244 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5245 qc->err_mask |= AC_ERR_SYSTEM;
5246err:
5247 ata_qc_complete(qc);
1da177e4
LT
5248}
5249
5250/**
5251 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5252 * @qc: command to issue to device
5253 *
5254 * Using various libata functions and hooks, this function
5255 * starts an ATA command. ATA commands are grouped into
5256 * classes called "protocols", and issuing each type of protocol
5257 * is slightly different.
5258 *
0baab86b
EF
5259 * May be used as the qc_issue() entry in ata_port_operations.
5260 *
1da177e4 5261 * LOCKING:
cca3974e 5262 * spin_lock_irqsave(host lock)
1da177e4
LT
5263 *
5264 * RETURNS:
9a3d9eb0 5265 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5266 */
5267
9a3d9eb0 5268unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5269{
5270 struct ata_port *ap = qc->ap;
5271
e50362ec
AL
5272 /* Use polling pio if the LLD doesn't handle
5273 * interrupt driven pio and atapi CDB interrupt.
5274 */
5275 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5276 switch (qc->tf.protocol) {
5277 case ATA_PROT_PIO:
e3472cbe 5278 case ATA_PROT_NODATA:
e50362ec
AL
5279 case ATA_PROT_ATAPI:
5280 case ATA_PROT_ATAPI_NODATA:
5281 qc->tf.flags |= ATA_TFLAG_POLLING;
5282 break;
5283 case ATA_PROT_ATAPI_DMA:
5284 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5285 /* see ata_dma_blacklisted() */
e50362ec
AL
5286 BUG();
5287 break;
5288 default:
5289 break;
5290 }
5291 }
5292
3d3cca37
TH
5293 /* Some controllers show flaky interrupt behavior after
5294 * setting xfer mode. Use polling instead.
5295 */
5296 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
5297 qc->tf.feature == SETFEATURES_XFER) &&
5298 (ap->flags & ATA_FLAG_SETXFER_POLLING))
5299 qc->tf.flags |= ATA_TFLAG_POLLING;
5300
312f7da2 5301 /* select the device */
1da177e4
LT
5302 ata_dev_select(ap, qc->dev->devno, 1, 0);
5303
312f7da2 5304 /* start the command */
1da177e4
LT
5305 switch (qc->tf.protocol) {
5306 case ATA_PROT_NODATA:
312f7da2
AL
5307 if (qc->tf.flags & ATA_TFLAG_POLLING)
5308 ata_qc_set_polling(qc);
5309
e5338254 5310 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5311 ap->hsm_task_state = HSM_ST_LAST;
5312
5313 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5314 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5315
1da177e4
LT
5316 break;
5317
5318 case ATA_PROT_DMA:
587005de 5319 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5320
1da177e4
LT
5321 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5322 ap->ops->bmdma_setup(qc); /* set up bmdma */
5323 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5324 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5325 break;
5326
312f7da2
AL
5327 case ATA_PROT_PIO:
5328 if (qc->tf.flags & ATA_TFLAG_POLLING)
5329 ata_qc_set_polling(qc);
1da177e4 5330
e5338254 5331 ata_tf_to_host(ap, &qc->tf);
312f7da2 5332
54f00389
AL
5333 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5334 /* PIO data out protocol */
5335 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5336 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5337
5338 /* always send first data block using
e27486db 5339 * the ata_pio_task() codepath.
54f00389 5340 */
312f7da2 5341 } else {
54f00389
AL
5342 /* PIO data in protocol */
5343 ap->hsm_task_state = HSM_ST;
5344
5345 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5346 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5347
5348 /* if polling, ata_pio_task() handles the rest.
5349 * otherwise, interrupt handler takes over from here.
5350 */
312f7da2
AL
5351 }
5352
1da177e4
LT
5353 break;
5354
1da177e4 5355 case ATA_PROT_ATAPI:
1da177e4 5356 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5357 if (qc->tf.flags & ATA_TFLAG_POLLING)
5358 ata_qc_set_polling(qc);
5359
e5338254 5360 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5361
312f7da2
AL
5362 ap->hsm_task_state = HSM_ST_FIRST;
5363
5364 /* send cdb by polling if no cdb interrupt */
5365 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5366 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5367 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5368 break;
5369
5370 case ATA_PROT_ATAPI_DMA:
587005de 5371 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5372
1da177e4
LT
5373 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5374 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5375 ap->hsm_task_state = HSM_ST_FIRST;
5376
5377 /* send cdb by polling if no cdb interrupt */
5378 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5379 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5380 break;
5381
5382 default:
5383 WARN_ON(1);
9a3d9eb0 5384 return AC_ERR_SYSTEM;
1da177e4
LT
5385 }
5386
5387 return 0;
5388}
5389
1da177e4
LT
5390/**
5391 * ata_host_intr - Handle host interrupt for given (port, task)
5392 * @ap: Port on which interrupt arrived (possibly...)
5393 * @qc: Taskfile currently active in engine
5394 *
5395 * Handle host interrupt for given queued command. Currently,
5396 * only DMA interrupts are handled. All other commands are
5397 * handled via polling with interrupts disabled (nIEN bit).
5398 *
5399 * LOCKING:
cca3974e 5400 * spin_lock_irqsave(host lock)
1da177e4
LT
5401 *
5402 * RETURNS:
5403 * One if interrupt was handled, zero if not (shared irq).
5404 */
5405
5406inline unsigned int ata_host_intr (struct ata_port *ap,
5407 struct ata_queued_cmd *qc)
5408{
ea54763f 5409 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5410 u8 status, host_stat = 0;
1da177e4 5411
312f7da2 5412 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5413 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5414
312f7da2
AL
5415 /* Check whether we are expecting interrupt in this state */
5416 switch (ap->hsm_task_state) {
5417 case HSM_ST_FIRST:
6912ccd5
AL
5418 /* Some pre-ATAPI-4 devices assert INTRQ
5419 * at this state when ready to receive CDB.
5420 */
1da177e4 5421
312f7da2
AL
5422 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5423 * The flag was turned on only for atapi devices.
5424 * No need to check is_atapi_taskfile(&qc->tf) again.
5425 */
5426 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5427 goto idle_irq;
1da177e4 5428 break;
312f7da2
AL
5429 case HSM_ST_LAST:
5430 if (qc->tf.protocol == ATA_PROT_DMA ||
5431 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5432 /* check status of DMA engine */
5433 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5434 VPRINTK("ata%u: host_stat 0x%X\n",
5435 ap->print_id, host_stat);
312f7da2
AL
5436
5437 /* if it's not our irq... */
5438 if (!(host_stat & ATA_DMA_INTR))
5439 goto idle_irq;
5440
5441 /* before we do anything else, clear DMA-Start bit */
5442 ap->ops->bmdma_stop(qc);
a4f16610
AL
5443
5444 if (unlikely(host_stat & ATA_DMA_ERR)) {
5445 /* error when transfering data to/from memory */
5446 qc->err_mask |= AC_ERR_HOST_BUS;
5447 ap->hsm_task_state = HSM_ST_ERR;
5448 }
312f7da2
AL
5449 }
5450 break;
5451 case HSM_ST:
5452 break;
1da177e4
LT
5453 default:
5454 goto idle_irq;
5455 }
5456
312f7da2
AL
5457 /* check altstatus */
5458 status = ata_altstatus(ap);
5459 if (status & ATA_BUSY)
5460 goto idle_irq;
1da177e4 5461
312f7da2
AL
5462 /* check main status, clearing INTRQ */
5463 status = ata_chk_status(ap);
5464 if (unlikely(status & ATA_BUSY))
5465 goto idle_irq;
1da177e4 5466
312f7da2
AL
5467 /* ack bmdma irq events */
5468 ap->ops->irq_clear(ap);
1da177e4 5469
bb5cb290 5470 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5471
5472 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5473 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5474 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5475
1da177e4
LT
5476 return 1; /* irq handled */
5477
5478idle_irq:
5479 ap->stats.idle_irq++;
5480
5481#ifdef ATA_IRQ_TRAP
5482 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5483 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5484 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5485 return 1;
1da177e4
LT
5486 }
5487#endif
5488 return 0; /* irq not handled */
5489}
5490
5491/**
5492 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5493 * @irq: irq line (unused)
cca3974e 5494 * @dev_instance: pointer to our ata_host information structure
1da177e4 5495 *
0cba632b
JG
5496 * Default interrupt handler for PCI IDE devices. Calls
5497 * ata_host_intr() for each port that is not disabled.
5498 *
1da177e4 5499 * LOCKING:
cca3974e 5500 * Obtains host lock during operation.
1da177e4
LT
5501 *
5502 * RETURNS:
0cba632b 5503 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5504 */
5505
7d12e780 5506irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5507{
cca3974e 5508 struct ata_host *host = dev_instance;
1da177e4
LT
5509 unsigned int i;
5510 unsigned int handled = 0;
5511 unsigned long flags;
5512
5513 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5514 spin_lock_irqsave(&host->lock, flags);
1da177e4 5515
cca3974e 5516 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5517 struct ata_port *ap;
5518
cca3974e 5519 ap = host->ports[i];
c1389503 5520 if (ap &&
029f5468 5521 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5522 struct ata_queued_cmd *qc;
5523
5524 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5525 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5526 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5527 handled |= ata_host_intr(ap, qc);
5528 }
5529 }
5530
cca3974e 5531 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5532
5533 return IRQ_RETVAL(handled);
5534}
5535
34bf2170
TH
5536/**
5537 * sata_scr_valid - test whether SCRs are accessible
5538 * @ap: ATA port to test SCR accessibility for
5539 *
5540 * Test whether SCRs are accessible for @ap.
5541 *
5542 * LOCKING:
5543 * None.
5544 *
5545 * RETURNS:
5546 * 1 if SCRs are accessible, 0 otherwise.
5547 */
5548int sata_scr_valid(struct ata_port *ap)
5549{
5550 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5551}
5552
5553/**
5554 * sata_scr_read - read SCR register of the specified port
5555 * @ap: ATA port to read SCR for
5556 * @reg: SCR to read
5557 * @val: Place to store read value
5558 *
5559 * Read SCR register @reg of @ap into *@val. This function is
5560 * guaranteed to succeed if the cable type of the port is SATA
5561 * and the port implements ->scr_read.
5562 *
5563 * LOCKING:
5564 * None.
5565 *
5566 * RETURNS:
5567 * 0 on success, negative errno on failure.
5568 */
5569int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5570{
5571 if (sata_scr_valid(ap)) {
5572 *val = ap->ops->scr_read(ap, reg);
5573 return 0;
5574 }
5575 return -EOPNOTSUPP;
5576}
5577
5578/**
5579 * sata_scr_write - write SCR register of the specified port
5580 * @ap: ATA port to write SCR for
5581 * @reg: SCR to write
5582 * @val: value to write
5583 *
5584 * Write @val to SCR register @reg of @ap. This function is
5585 * guaranteed to succeed if the cable type of the port is SATA
5586 * and the port implements ->scr_read.
5587 *
5588 * LOCKING:
5589 * None.
5590 *
5591 * RETURNS:
5592 * 0 on success, negative errno on failure.
5593 */
5594int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5595{
5596 if (sata_scr_valid(ap)) {
5597 ap->ops->scr_write(ap, reg, val);
5598 return 0;
5599 }
5600 return -EOPNOTSUPP;
5601}
5602
5603/**
5604 * sata_scr_write_flush - write SCR register of the specified port and flush
5605 * @ap: ATA port to write SCR for
5606 * @reg: SCR to write
5607 * @val: value to write
5608 *
5609 * This function is identical to sata_scr_write() except that this
5610 * function performs flush after writing to the register.
5611 *
5612 * LOCKING:
5613 * None.
5614 *
5615 * RETURNS:
5616 * 0 on success, negative errno on failure.
5617 */
5618int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5619{
5620 if (sata_scr_valid(ap)) {
5621 ap->ops->scr_write(ap, reg, val);
5622 ap->ops->scr_read(ap, reg);
5623 return 0;
5624 }
5625 return -EOPNOTSUPP;
5626}
5627
5628/**
5629 * ata_port_online - test whether the given port is online
5630 * @ap: ATA port to test
5631 *
5632 * Test whether @ap is online. Note that this function returns 0
5633 * if online status of @ap cannot be obtained, so
5634 * ata_port_online(ap) != !ata_port_offline(ap).
5635 *
5636 * LOCKING:
5637 * None.
5638 *
5639 * RETURNS:
5640 * 1 if the port online status is available and online.
5641 */
5642int ata_port_online(struct ata_port *ap)
5643{
5644 u32 sstatus;
5645
5646 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5647 return 1;
5648 return 0;
5649}
5650
5651/**
5652 * ata_port_offline - test whether the given port is offline
5653 * @ap: ATA port to test
5654 *
5655 * Test whether @ap is offline. Note that this function returns
5656 * 0 if offline status of @ap cannot be obtained, so
5657 * ata_port_online(ap) != !ata_port_offline(ap).
5658 *
5659 * LOCKING:
5660 * None.
5661 *
5662 * RETURNS:
5663 * 1 if the port offline status is available and offline.
5664 */
5665int ata_port_offline(struct ata_port *ap)
5666{
5667 u32 sstatus;
5668
5669 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5670 return 1;
5671 return 0;
5672}
0baab86b 5673
77b08fb5 5674int ata_flush_cache(struct ata_device *dev)
9b847548 5675{
977e6b9f 5676 unsigned int err_mask;
9b847548
JA
5677 u8 cmd;
5678
5679 if (!ata_try_flush_cache(dev))
5680 return 0;
5681
6fc49adb 5682 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5683 cmd = ATA_CMD_FLUSH_EXT;
5684 else
5685 cmd = ATA_CMD_FLUSH;
5686
977e6b9f
TH
5687 err_mask = ata_do_simple_cmd(dev, cmd);
5688 if (err_mask) {
5689 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5690 return -EIO;
5691 }
5692
5693 return 0;
9b847548
JA
5694}
5695
6ffa01d8 5696#ifdef CONFIG_PM
cca3974e
JG
5697static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5698 unsigned int action, unsigned int ehi_flags,
5699 int wait)
500530f6
TH
5700{
5701 unsigned long flags;
5702 int i, rc;
5703
cca3974e
JG
5704 for (i = 0; i < host->n_ports; i++) {
5705 struct ata_port *ap = host->ports[i];
500530f6
TH
5706
5707 /* Previous resume operation might still be in
5708 * progress. Wait for PM_PENDING to clear.
5709 */
5710 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5711 ata_port_wait_eh(ap);
5712 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5713 }
5714
5715 /* request PM ops to EH */
5716 spin_lock_irqsave(ap->lock, flags);
5717
5718 ap->pm_mesg = mesg;
5719 if (wait) {
5720 rc = 0;
5721 ap->pm_result = &rc;
5722 }
5723
5724 ap->pflags |= ATA_PFLAG_PM_PENDING;
5725 ap->eh_info.action |= action;
5726 ap->eh_info.flags |= ehi_flags;
5727
5728 ata_port_schedule_eh(ap);
5729
5730 spin_unlock_irqrestore(ap->lock, flags);
5731
5732 /* wait and check result */
5733 if (wait) {
5734 ata_port_wait_eh(ap);
5735 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5736 if (rc)
5737 return rc;
5738 }
5739 }
5740
5741 return 0;
5742}
5743
5744/**
cca3974e
JG
5745 * ata_host_suspend - suspend host
5746 * @host: host to suspend
500530f6
TH
5747 * @mesg: PM message
5748 *
cca3974e 5749 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5750 * function requests EH to perform PM operations and waits for EH
5751 * to finish.
5752 *
5753 * LOCKING:
5754 * Kernel thread context (may sleep).
5755 *
5756 * RETURNS:
5757 * 0 on success, -errno on failure.
5758 */
cca3974e 5759int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5760{
5761 int i, j, rc;
5762
cca3974e 5763 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5764 if (rc)
5765 goto fail;
5766
5767 /* EH is quiescent now. Fail if we have any ready device.
5768 * This happens if hotplug occurs between completion of device
5769 * suspension and here.
5770 */
cca3974e
JG
5771 for (i = 0; i < host->n_ports; i++) {
5772 struct ata_port *ap = host->ports[i];
500530f6
TH
5773
5774 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5775 struct ata_device *dev = &ap->device[j];
5776
5777 if (ata_dev_ready(dev)) {
5778 ata_port_printk(ap, KERN_WARNING,
5779 "suspend failed, device %d "
5780 "still active\n", dev->devno);
5781 rc = -EBUSY;
5782 goto fail;
5783 }
5784 }
5785 }
5786
cca3974e 5787 host->dev->power.power_state = mesg;
500530f6
TH
5788 return 0;
5789
5790 fail:
cca3974e 5791 ata_host_resume(host);
500530f6
TH
5792 return rc;
5793}
5794
5795/**
cca3974e
JG
5796 * ata_host_resume - resume host
5797 * @host: host to resume
500530f6 5798 *
cca3974e 5799 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5800 * function requests EH to perform PM operations and returns.
5801 * Note that all resume operations are performed parallely.
5802 *
5803 * LOCKING:
5804 * Kernel thread context (may sleep).
5805 */
cca3974e 5806void ata_host_resume(struct ata_host *host)
500530f6 5807{
cca3974e
JG
5808 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5809 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5810 host->dev->power.power_state = PMSG_ON;
500530f6 5811}
6ffa01d8 5812#endif
500530f6 5813
c893a3ae
RD
5814/**
5815 * ata_port_start - Set port up for dma.
5816 * @ap: Port to initialize
5817 *
5818 * Called just after data structures for each port are
5819 * initialized. Allocates space for PRD table.
5820 *
5821 * May be used as the port_start() entry in ata_port_operations.
5822 *
5823 * LOCKING:
5824 * Inherited from caller.
5825 */
f0d36efd 5826int ata_port_start(struct ata_port *ap)
1da177e4 5827{
2f1f610b 5828 struct device *dev = ap->dev;
6037d6bb 5829 int rc;
1da177e4 5830
f0d36efd
TH
5831 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5832 GFP_KERNEL);
1da177e4
LT
5833 if (!ap->prd)
5834 return -ENOMEM;
5835
6037d6bb 5836 rc = ata_pad_alloc(ap, dev);
f0d36efd 5837 if (rc)
6037d6bb 5838 return rc;
1da177e4 5839
f0d36efd
TH
5840 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5841 (unsigned long long)ap->prd_dma);
1da177e4
LT
5842 return 0;
5843}
5844
3ef3b43d
TH
5845/**
5846 * ata_dev_init - Initialize an ata_device structure
5847 * @dev: Device structure to initialize
5848 *
5849 * Initialize @dev in preparation for probing.
5850 *
5851 * LOCKING:
5852 * Inherited from caller.
5853 */
5854void ata_dev_init(struct ata_device *dev)
5855{
5856 struct ata_port *ap = dev->ap;
72fa4b74
TH
5857 unsigned long flags;
5858
5a04bf4b
TH
5859 /* SATA spd limit is bound to the first device */
5860 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5861
72fa4b74
TH
5862 /* High bits of dev->flags are used to record warm plug
5863 * requests which occur asynchronously. Synchronize using
cca3974e 5864 * host lock.
72fa4b74 5865 */
ba6a1308 5866 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5867 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5868 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5869
72fa4b74
TH
5870 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5871 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5872 dev->pio_mask = UINT_MAX;
5873 dev->mwdma_mask = UINT_MAX;
5874 dev->udma_mask = UINT_MAX;
5875}
5876
1da177e4 5877/**
f3187195
TH
5878 * ata_port_alloc - allocate and initialize basic ATA port resources
5879 * @host: ATA host this allocated port belongs to
1da177e4 5880 *
f3187195
TH
5881 * Allocate and initialize basic ATA port resources.
5882 *
5883 * RETURNS:
5884 * Allocate ATA port on success, NULL on failure.
0cba632b 5885 *
1da177e4 5886 * LOCKING:
f3187195 5887 * Inherited from calling layer (may sleep).
1da177e4 5888 */
f3187195 5889struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5890{
f3187195 5891 struct ata_port *ap;
1da177e4
LT
5892 unsigned int i;
5893
f3187195
TH
5894 DPRINTK("ENTER\n");
5895
5896 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5897 if (!ap)
5898 return NULL;
5899
cca3974e 5900 ap->lock = &host->lock;
198e0fed 5901 ap->flags = ATA_FLAG_DISABLED;
f3187195 5902 ap->print_id = -1;
1da177e4 5903 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5904 ap->host = host;
f3187195
TH
5905 ap->dev = host->dev;
5906
5a04bf4b 5907 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5908 ap->active_tag = ATA_TAG_POISON;
5909 ap->last_ctl = 0xFF;
bd5d825c
BP
5910
5911#if defined(ATA_VERBOSE_DEBUG)
5912 /* turn on all debugging levels */
5913 ap->msg_enable = 0x00FF;
5914#elif defined(ATA_DEBUG)
5915 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5916#else
0dd4b21f 5917 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5918#endif
1da177e4 5919
65f27f38
DH
5920 INIT_DELAYED_WORK(&ap->port_task, NULL);
5921 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5922 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5923 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5924 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5925
838df628 5926 ap->cbl = ATA_CBL_NONE;
838df628 5927
acf356b1
TH
5928 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5929 struct ata_device *dev = &ap->device[i];
38d87234 5930 dev->ap = ap;
72fa4b74 5931 dev->devno = i;
3ef3b43d 5932 ata_dev_init(dev);
acf356b1 5933 }
1da177e4
LT
5934
5935#ifdef ATA_IRQ_TRAP
5936 ap->stats.unhandled_irq = 1;
5937 ap->stats.idle_irq = 1;
5938#endif
1da177e4 5939 return ap;
1da177e4
LT
5940}
5941
f0d36efd
TH
5942static void ata_host_release(struct device *gendev, void *res)
5943{
5944 struct ata_host *host = dev_get_drvdata(gendev);
5945 int i;
5946
5947 for (i = 0; i < host->n_ports; i++) {
5948 struct ata_port *ap = host->ports[i];
5949
ecef7253
TH
5950 if (!ap)
5951 continue;
5952
5953 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 5954 ap->ops->port_stop(ap);
f0d36efd
TH
5955 }
5956
ecef7253 5957 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 5958 host->ops->host_stop(host);
1aa56cca 5959
1aa506e4
TH
5960 for (i = 0; i < host->n_ports; i++) {
5961 struct ata_port *ap = host->ports[i];
5962
4911487a
TH
5963 if (!ap)
5964 continue;
5965
5966 if (ap->scsi_host)
1aa506e4
TH
5967 scsi_host_put(ap->scsi_host);
5968
4911487a 5969 kfree(ap);
1aa506e4
TH
5970 host->ports[i] = NULL;
5971 }
5972
1aa56cca 5973 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5974}
5975
f3187195
TH
5976/**
5977 * ata_host_alloc - allocate and init basic ATA host resources
5978 * @dev: generic device this host is associated with
5979 * @max_ports: maximum number of ATA ports associated with this host
5980 *
5981 * Allocate and initialize basic ATA host resources. LLD calls
5982 * this function to allocate a host, initializes it fully and
5983 * attaches it using ata_host_register().
5984 *
5985 * @max_ports ports are allocated and host->n_ports is
5986 * initialized to @max_ports. The caller is allowed to decrease
5987 * host->n_ports before calling ata_host_register(). The unused
5988 * ports will be automatically freed on registration.
5989 *
5990 * RETURNS:
5991 * Allocate ATA host on success, NULL on failure.
5992 *
5993 * LOCKING:
5994 * Inherited from calling layer (may sleep).
5995 */
5996struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5997{
5998 struct ata_host *host;
5999 size_t sz;
6000 int i;
6001
6002 DPRINTK("ENTER\n");
6003
6004 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6005 return NULL;
6006
6007 /* alloc a container for our list of ATA ports (buses) */
6008 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6009 /* alloc a container for our list of ATA ports (buses) */
6010 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6011 if (!host)
6012 goto err_out;
6013
6014 devres_add(dev, host);
6015 dev_set_drvdata(dev, host);
6016
6017 spin_lock_init(&host->lock);
6018 host->dev = dev;
6019 host->n_ports = max_ports;
6020
6021 /* allocate ports bound to this host */
6022 for (i = 0; i < max_ports; i++) {
6023 struct ata_port *ap;
6024
6025 ap = ata_port_alloc(host);
6026 if (!ap)
6027 goto err_out;
6028
6029 ap->port_no = i;
6030 host->ports[i] = ap;
6031 }
6032
6033 devres_remove_group(dev, NULL);
6034 return host;
6035
6036 err_out:
6037 devres_release_group(dev, NULL);
6038 return NULL;
6039}
6040
f5cda257
TH
6041/**
6042 * ata_host_alloc_pinfo - alloc host and init with port_info array
6043 * @dev: generic device this host is associated with
6044 * @ppi: array of ATA port_info to initialize host with
6045 * @n_ports: number of ATA ports attached to this host
6046 *
6047 * Allocate ATA host and initialize with info from @ppi. If NULL
6048 * terminated, @ppi may contain fewer entries than @n_ports. The
6049 * last entry will be used for the remaining ports.
6050 *
6051 * RETURNS:
6052 * Allocate ATA host on success, NULL on failure.
6053 *
6054 * LOCKING:
6055 * Inherited from calling layer (may sleep).
6056 */
6057struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6058 const struct ata_port_info * const * ppi,
6059 int n_ports)
6060{
6061 const struct ata_port_info *pi;
6062 struct ata_host *host;
6063 int i, j;
6064
6065 host = ata_host_alloc(dev, n_ports);
6066 if (!host)
6067 return NULL;
6068
6069 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6070 struct ata_port *ap = host->ports[i];
6071
6072 if (ppi[j])
6073 pi = ppi[j++];
6074
6075 ap->pio_mask = pi->pio_mask;
6076 ap->mwdma_mask = pi->mwdma_mask;
6077 ap->udma_mask = pi->udma_mask;
6078 ap->flags |= pi->flags;
6079 ap->ops = pi->port_ops;
6080
6081 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6082 host->ops = pi->port_ops;
6083 if (!host->private_data && pi->private_data)
6084 host->private_data = pi->private_data;
6085 }
6086
6087 return host;
6088}
6089
ecef7253
TH
6090/**
6091 * ata_host_start - start and freeze ports of an ATA host
6092 * @host: ATA host to start ports for
6093 *
6094 * Start and then freeze ports of @host. Started status is
6095 * recorded in host->flags, so this function can be called
6096 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6097 * once. If host->ops isn't initialized yet, its set to the
6098 * first non-dummy port ops.
ecef7253
TH
6099 *
6100 * LOCKING:
6101 * Inherited from calling layer (may sleep).
6102 *
6103 * RETURNS:
6104 * 0 if all ports are started successfully, -errno otherwise.
6105 */
6106int ata_host_start(struct ata_host *host)
6107{
6108 int i, rc;
6109
6110 if (host->flags & ATA_HOST_STARTED)
6111 return 0;
6112
6113 for (i = 0; i < host->n_ports; i++) {
6114 struct ata_port *ap = host->ports[i];
6115
f3187195
TH
6116 if (!host->ops && !ata_port_is_dummy(ap))
6117 host->ops = ap->ops;
6118
ecef7253
TH
6119 if (ap->ops->port_start) {
6120 rc = ap->ops->port_start(ap);
6121 if (rc) {
6122 ata_port_printk(ap, KERN_ERR, "failed to "
6123 "start port (errno=%d)\n", rc);
6124 goto err_out;
6125 }
6126 }
6127
6128 ata_eh_freeze_port(ap);
6129 }
6130
6131 host->flags |= ATA_HOST_STARTED;
6132 return 0;
6133
6134 err_out:
6135 while (--i >= 0) {
6136 struct ata_port *ap = host->ports[i];
6137
6138 if (ap->ops->port_stop)
6139 ap->ops->port_stop(ap);
6140 }
6141 return rc;
6142}
6143
b03732f0 6144/**
cca3974e
JG
6145 * ata_sas_host_init - Initialize a host struct
6146 * @host: host to initialize
6147 * @dev: device host is attached to
6148 * @flags: host flags
6149 * @ops: port_ops
b03732f0
BK
6150 *
6151 * LOCKING:
6152 * PCI/etc. bus probe sem.
6153 *
6154 */
f3187195 6155/* KILLME - the only user left is ipr */
cca3974e
JG
6156void ata_host_init(struct ata_host *host, struct device *dev,
6157 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6158{
cca3974e
JG
6159 spin_lock_init(&host->lock);
6160 host->dev = dev;
6161 host->flags = flags;
6162 host->ops = ops;
b03732f0
BK
6163}
6164
f3187195
TH
6165/**
6166 * ata_host_register - register initialized ATA host
6167 * @host: ATA host to register
6168 * @sht: template for SCSI host
6169 *
6170 * Register initialized ATA host. @host is allocated using
6171 * ata_host_alloc() and fully initialized by LLD. This function
6172 * starts ports, registers @host with ATA and SCSI layers and
6173 * probe registered devices.
6174 *
6175 * LOCKING:
6176 * Inherited from calling layer (may sleep).
6177 *
6178 * RETURNS:
6179 * 0 on success, -errno otherwise.
6180 */
6181int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6182{
6183 int i, rc;
6184
6185 /* host must have been started */
6186 if (!(host->flags & ATA_HOST_STARTED)) {
6187 dev_printk(KERN_ERR, host->dev,
6188 "BUG: trying to register unstarted host\n");
6189 WARN_ON(1);
6190 return -EINVAL;
6191 }
6192
6193 /* Blow away unused ports. This happens when LLD can't
6194 * determine the exact number of ports to allocate at
6195 * allocation time.
6196 */
6197 for (i = host->n_ports; host->ports[i]; i++)
6198 kfree(host->ports[i]);
6199
6200 /* give ports names and add SCSI hosts */
6201 for (i = 0; i < host->n_ports; i++)
6202 host->ports[i]->print_id = ata_print_id++;
6203
6204 rc = ata_scsi_add_hosts(host, sht);
6205 if (rc)
6206 return rc;
6207
6208 /* set cable, sata_spd_limit and report */
6209 for (i = 0; i < host->n_ports; i++) {
6210 struct ata_port *ap = host->ports[i];
6211 int irq_line;
6212 u32 scontrol;
6213 unsigned long xfer_mask;
6214
6215 /* set SATA cable type if still unset */
6216 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6217 ap->cbl = ATA_CBL_SATA;
6218
6219 /* init sata_spd_limit to the current value */
6220 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6221 int spd = (scontrol >> 4) & 0xf;
6222 ap->hw_sata_spd_limit &= (1 << spd) - 1;
6223 }
6224 ap->sata_spd_limit = ap->hw_sata_spd_limit;
6225
6226 /* report the secondary IRQ for second channel legacy */
6227 irq_line = host->irq;
6228 if (i == 1 && host->irq2)
6229 irq_line = host->irq2;
6230
6231 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6232 ap->udma_mask);
6233
6234 /* print per-port info to dmesg */
6235 if (!ata_port_is_dummy(ap))
6236 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6237 "ctl 0x%p bmdma 0x%p irq %d\n",
6238 ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
6239 ata_mode_string(xfer_mask),
6240 ap->ioaddr.cmd_addr,
6241 ap->ioaddr.ctl_addr,
6242 ap->ioaddr.bmdma_addr,
6243 irq_line);
6244 else
6245 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6246 }
6247
6248 /* perform each probe synchronously */
6249 DPRINTK("probe begin\n");
6250 for (i = 0; i < host->n_ports; i++) {
6251 struct ata_port *ap = host->ports[i];
6252 int rc;
6253
6254 /* probe */
6255 if (ap->ops->error_handler) {
6256 struct ata_eh_info *ehi = &ap->eh_info;
6257 unsigned long flags;
6258
6259 ata_port_probe(ap);
6260
6261 /* kick EH for boot probing */
6262 spin_lock_irqsave(ap->lock, flags);
6263
6264 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6265 ehi->action |= ATA_EH_SOFTRESET;
6266 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6267
6268 ap->pflags |= ATA_PFLAG_LOADING;
6269 ata_port_schedule_eh(ap);
6270
6271 spin_unlock_irqrestore(ap->lock, flags);
6272
6273 /* wait for EH to finish */
6274 ata_port_wait_eh(ap);
6275 } else {
6276 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6277 rc = ata_bus_probe(ap);
6278 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6279
6280 if (rc) {
6281 /* FIXME: do something useful here?
6282 * Current libata behavior will
6283 * tear down everything when
6284 * the module is removed
6285 * or the h/w is unplugged.
6286 */
6287 }
6288 }
6289 }
6290
6291 /* probes are done, now scan each port's disk(s) */
6292 DPRINTK("host probe begin\n");
6293 for (i = 0; i < host->n_ports; i++) {
6294 struct ata_port *ap = host->ports[i];
6295
6296 ata_scsi_scan_host(ap);
6297 }
6298
6299 return 0;
6300}
6301
f5cda257
TH
6302/**
6303 * ata_host_activate - start host, request IRQ and register it
6304 * @host: target ATA host
6305 * @irq: IRQ to request
6306 * @irq_handler: irq_handler used when requesting IRQ
6307 * @irq_flags: irq_flags used when requesting IRQ
6308 * @sht: scsi_host_template to use when registering the host
6309 *
6310 * After allocating an ATA host and initializing it, most libata
6311 * LLDs perform three steps to activate the host - start host,
6312 * request IRQ and register it. This helper takes necessasry
6313 * arguments and performs the three steps in one go.
6314 *
6315 * LOCKING:
6316 * Inherited from calling layer (may sleep).
6317 *
6318 * RETURNS:
6319 * 0 on success, -errno otherwise.
6320 */
6321int ata_host_activate(struct ata_host *host, int irq,
6322 irq_handler_t irq_handler, unsigned long irq_flags,
6323 struct scsi_host_template *sht)
6324{
6325 int rc;
6326
6327 rc = ata_host_start(host);
6328 if (rc)
6329 return rc;
6330
6331 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6332 dev_driver_string(host->dev), host);
6333 if (rc)
6334 return rc;
6335
6336 rc = ata_host_register(host, sht);
6337 /* if failed, just free the IRQ and leave ports alone */
6338 if (rc)
6339 devm_free_irq(host->dev, irq, host);
6340
6341 return rc;
6342}
6343
720ba126
TH
6344/**
6345 * ata_port_detach - Detach ATA port in prepration of device removal
6346 * @ap: ATA port to be detached
6347 *
6348 * Detach all ATA devices and the associated SCSI devices of @ap;
6349 * then, remove the associated SCSI host. @ap is guaranteed to
6350 * be quiescent on return from this function.
6351 *
6352 * LOCKING:
6353 * Kernel thread context (may sleep).
6354 */
6355void ata_port_detach(struct ata_port *ap)
6356{
6357 unsigned long flags;
6358 int i;
6359
6360 if (!ap->ops->error_handler)
c3cf30a9 6361 goto skip_eh;
720ba126
TH
6362
6363 /* tell EH we're leaving & flush EH */
ba6a1308 6364 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6365 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6366 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6367
6368 ata_port_wait_eh(ap);
6369
6370 /* EH is now guaranteed to see UNLOADING, so no new device
6371 * will be attached. Disable all existing devices.
6372 */
ba6a1308 6373 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
6374
6375 for (i = 0; i < ATA_MAX_DEVICES; i++)
6376 ata_dev_disable(&ap->device[i]);
6377
ba6a1308 6378 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6379
6380 /* Final freeze & EH. All in-flight commands are aborted. EH
6381 * will be skipped and retrials will be terminated with bad
6382 * target.
6383 */
ba6a1308 6384 spin_lock_irqsave(ap->lock, flags);
720ba126 6385 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6386 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6387
6388 ata_port_wait_eh(ap);
6389
6390 /* Flush hotplug task. The sequence is similar to
6391 * ata_port_flush_task().
6392 */
6393 flush_workqueue(ata_aux_wq);
6394 cancel_delayed_work(&ap->hotplug_task);
6395 flush_workqueue(ata_aux_wq);
6396
c3cf30a9 6397 skip_eh:
720ba126 6398 /* remove the associated SCSI host */
cca3974e 6399 scsi_remove_host(ap->scsi_host);
720ba126
TH
6400}
6401
0529c159
TH
6402/**
6403 * ata_host_detach - Detach all ports of an ATA host
6404 * @host: Host to detach
6405 *
6406 * Detach all ports of @host.
6407 *
6408 * LOCKING:
6409 * Kernel thread context (may sleep).
6410 */
6411void ata_host_detach(struct ata_host *host)
6412{
6413 int i;
6414
6415 for (i = 0; i < host->n_ports; i++)
6416 ata_port_detach(host->ports[i]);
6417}
6418
1da177e4
LT
6419/**
6420 * ata_std_ports - initialize ioaddr with standard port offsets.
6421 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6422 *
6423 * Utility function which initializes data_addr, error_addr,
6424 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6425 * device_addr, status_addr, and command_addr to standard offsets
6426 * relative to cmd_addr.
6427 *
6428 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6429 */
0baab86b 6430
1da177e4
LT
6431void ata_std_ports(struct ata_ioports *ioaddr)
6432{
6433 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6434 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6435 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6436 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6437 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6438 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6439 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6440 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6441 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6442 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6443}
6444
0baab86b 6445
374b1873
JG
6446#ifdef CONFIG_PCI
6447
1da177e4
LT
6448/**
6449 * ata_pci_remove_one - PCI layer callback for device removal
6450 * @pdev: PCI device that was removed
6451 *
b878ca5d
TH
6452 * PCI layer indicates to libata via this hook that hot-unplug or
6453 * module unload event has occurred. Detach all ports. Resource
6454 * release is handled via devres.
1da177e4
LT
6455 *
6456 * LOCKING:
6457 * Inherited from PCI layer (may sleep).
6458 */
f0d36efd 6459void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6460{
6461 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6462 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6463
b878ca5d 6464 ata_host_detach(host);
1da177e4
LT
6465}
6466
6467/* move to PCI subsystem */
057ace5e 6468int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6469{
6470 unsigned long tmp = 0;
6471
6472 switch (bits->width) {
6473 case 1: {
6474 u8 tmp8 = 0;
6475 pci_read_config_byte(pdev, bits->reg, &tmp8);
6476 tmp = tmp8;
6477 break;
6478 }
6479 case 2: {
6480 u16 tmp16 = 0;
6481 pci_read_config_word(pdev, bits->reg, &tmp16);
6482 tmp = tmp16;
6483 break;
6484 }
6485 case 4: {
6486 u32 tmp32 = 0;
6487 pci_read_config_dword(pdev, bits->reg, &tmp32);
6488 tmp = tmp32;
6489 break;
6490 }
6491
6492 default:
6493 return -EINVAL;
6494 }
6495
6496 tmp &= bits->mask;
6497
6498 return (tmp == bits->val) ? 1 : 0;
6499}
9b847548 6500
6ffa01d8 6501#ifdef CONFIG_PM
3c5100c1 6502void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6503{
6504 pci_save_state(pdev);
4c90d971 6505 pci_disable_device(pdev);
500530f6 6506
4c90d971 6507 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6508 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6509}
6510
553c4aa6 6511int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6512{
553c4aa6
TH
6513 int rc;
6514
9b847548
JA
6515 pci_set_power_state(pdev, PCI_D0);
6516 pci_restore_state(pdev);
553c4aa6 6517
b878ca5d 6518 rc = pcim_enable_device(pdev);
553c4aa6
TH
6519 if (rc) {
6520 dev_printk(KERN_ERR, &pdev->dev,
6521 "failed to enable device after resume (%d)\n", rc);
6522 return rc;
6523 }
6524
9b847548 6525 pci_set_master(pdev);
553c4aa6 6526 return 0;
500530f6
TH
6527}
6528
3c5100c1 6529int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6530{
cca3974e 6531 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6532 int rc = 0;
6533
cca3974e 6534 rc = ata_host_suspend(host, mesg);
500530f6
TH
6535 if (rc)
6536 return rc;
6537
3c5100c1 6538 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6539
6540 return 0;
6541}
6542
6543int ata_pci_device_resume(struct pci_dev *pdev)
6544{
cca3974e 6545 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6546 int rc;
500530f6 6547
553c4aa6
TH
6548 rc = ata_pci_device_do_resume(pdev);
6549 if (rc == 0)
6550 ata_host_resume(host);
6551 return rc;
9b847548 6552}
6ffa01d8
TH
6553#endif /* CONFIG_PM */
6554
1da177e4
LT
6555#endif /* CONFIG_PCI */
6556
6557
1da177e4
LT
6558static int __init ata_init(void)
6559{
a8601e5f 6560 ata_probe_timeout *= HZ;
1da177e4
LT
6561 ata_wq = create_workqueue("ata");
6562 if (!ata_wq)
6563 return -ENOMEM;
6564
453b07ac
TH
6565 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6566 if (!ata_aux_wq) {
6567 destroy_workqueue(ata_wq);
6568 return -ENOMEM;
6569 }
6570
1da177e4
LT
6571 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6572 return 0;
6573}
6574
6575static void __exit ata_exit(void)
6576{
6577 destroy_workqueue(ata_wq);
453b07ac 6578 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6579}
6580
a4625085 6581subsys_initcall(ata_init);
1da177e4
LT
6582module_exit(ata_exit);
6583
67846b30 6584static unsigned long ratelimit_time;
34af946a 6585static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6586
6587int ata_ratelimit(void)
6588{
6589 int rc;
6590 unsigned long flags;
6591
6592 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6593
6594 if (time_after(jiffies, ratelimit_time)) {
6595 rc = 1;
6596 ratelimit_time = jiffies + (HZ/5);
6597 } else
6598 rc = 0;
6599
6600 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6601
6602 return rc;
6603}
6604
c22daff4
TH
6605/**
6606 * ata_wait_register - wait until register value changes
6607 * @reg: IO-mapped register
6608 * @mask: Mask to apply to read register value
6609 * @val: Wait condition
6610 * @interval_msec: polling interval in milliseconds
6611 * @timeout_msec: timeout in milliseconds
6612 *
6613 * Waiting for some bits of register to change is a common
6614 * operation for ATA controllers. This function reads 32bit LE
6615 * IO-mapped register @reg and tests for the following condition.
6616 *
6617 * (*@reg & mask) != val
6618 *
6619 * If the condition is met, it returns; otherwise, the process is
6620 * repeated after @interval_msec until timeout.
6621 *
6622 * LOCKING:
6623 * Kernel thread context (may sleep)
6624 *
6625 * RETURNS:
6626 * The final register value.
6627 */
6628u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6629 unsigned long interval_msec,
6630 unsigned long timeout_msec)
6631{
6632 unsigned long timeout;
6633 u32 tmp;
6634
6635 tmp = ioread32(reg);
6636
6637 /* Calculate timeout _after_ the first read to make sure
6638 * preceding writes reach the controller before starting to
6639 * eat away the timeout.
6640 */
6641 timeout = jiffies + (timeout_msec * HZ) / 1000;
6642
6643 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6644 msleep(interval_msec);
6645 tmp = ioread32(reg);
6646 }
6647
6648 return tmp;
6649}
6650
dd5b06c4
TH
6651/*
6652 * Dummy port_ops
6653 */
6654static void ata_dummy_noret(struct ata_port *ap) { }
6655static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6656static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6657
6658static u8 ata_dummy_check_status(struct ata_port *ap)
6659{
6660 return ATA_DRDY;
6661}
6662
6663static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6664{
6665 return AC_ERR_SYSTEM;
6666}
6667
6668const struct ata_port_operations ata_dummy_port_ops = {
6669 .port_disable = ata_port_disable,
6670 .check_status = ata_dummy_check_status,
6671 .check_altstatus = ata_dummy_check_status,
6672 .dev_select = ata_noop_dev_select,
6673 .qc_prep = ata_noop_qc_prep,
6674 .qc_issue = ata_dummy_qc_issue,
6675 .freeze = ata_dummy_noret,
6676 .thaw = ata_dummy_noret,
6677 .error_handler = ata_dummy_noret,
6678 .post_internal_cmd = ata_dummy_qc_noret,
6679 .irq_clear = ata_dummy_noret,
6680 .port_start = ata_dummy_ret0,
6681 .port_stop = ata_dummy_noret,
6682};
6683
21b0ad4f
TH
6684const struct ata_port_info ata_dummy_port_info = {
6685 .port_ops = &ata_dummy_port_ops,
6686};
6687
1da177e4
LT
6688/*
6689 * libata is essentially a library of internal helper functions for
6690 * low-level ATA host controller drivers. As such, the API/ABI is
6691 * likely to change as new drivers are added and updated.
6692 * Do not depend on ABI/API stability.
6693 */
6694
e9c83914
TH
6695EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6696EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6697EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6698EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 6699EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
6700EXPORT_SYMBOL_GPL(ata_std_bios_param);
6701EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6702EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6703EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 6704EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 6705EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6706EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 6707EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 6708EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6709EXPORT_SYMBOL_GPL(ata_sg_init);
6710EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6711EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6712EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6713EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6714EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6715EXPORT_SYMBOL_GPL(ata_tf_load);
6716EXPORT_SYMBOL_GPL(ata_tf_read);
6717EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6718EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 6719EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
6720EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6721EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6722EXPORT_SYMBOL_GPL(ata_check_status);
6723EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6724EXPORT_SYMBOL_GPL(ata_exec_command);
6725EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6726EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 6727EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
6728EXPORT_SYMBOL_GPL(ata_data_xfer);
6729EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6730EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6731EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6732EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6733EXPORT_SYMBOL_GPL(ata_bmdma_start);
6734EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6735EXPORT_SYMBOL_GPL(ata_bmdma_status);
6736EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6737EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6738EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6739EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6740EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6741EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6742EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6743EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6744EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6745EXPORT_SYMBOL_GPL(sata_phy_debounce);
6746EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6747EXPORT_SYMBOL_GPL(sata_phy_reset);
6748EXPORT_SYMBOL_GPL(__sata_phy_reset);
6749EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6750EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6751EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6752EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6753EXPORT_SYMBOL_GPL(sata_std_hardreset);
6754EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6755EXPORT_SYMBOL_GPL(ata_dev_classify);
6756EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6757EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6758EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6759EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6760EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6761EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6762EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6763EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6764EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6765EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6766EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6767EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6768EXPORT_SYMBOL_GPL(sata_scr_valid);
6769EXPORT_SYMBOL_GPL(sata_scr_read);
6770EXPORT_SYMBOL_GPL(sata_scr_write);
6771EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6772EXPORT_SYMBOL_GPL(ata_port_online);
6773EXPORT_SYMBOL_GPL(ata_port_offline);
6ffa01d8 6774#ifdef CONFIG_PM
cca3974e
JG
6775EXPORT_SYMBOL_GPL(ata_host_suspend);
6776EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6777#endif /* CONFIG_PM */
6a62a04d
TH
6778EXPORT_SYMBOL_GPL(ata_id_string);
6779EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 6780EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6919a0a6 6781EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6782EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6783
1bc4ccff 6784EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6785EXPORT_SYMBOL_GPL(ata_timing_compute);
6786EXPORT_SYMBOL_GPL(ata_timing_merge);
6787
1da177e4
LT
6788#ifdef CONFIG_PCI
6789EXPORT_SYMBOL_GPL(pci_test_config_bits);
d491b27b 6790EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
21b0ad4f 6791EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
1da177e4
LT
6792EXPORT_SYMBOL_GPL(ata_pci_init_one);
6793EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6794#ifdef CONFIG_PM
500530f6
TH
6795EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6796EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6797EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6798EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6799#endif /* CONFIG_PM */
67951ade
AC
6800EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6801EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6802#endif /* CONFIG_PCI */
9b847548 6803
6ffa01d8 6804#ifdef CONFIG_PM
9b847548
JA
6805EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6806EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6ffa01d8 6807#endif /* CONFIG_PM */
ece1d636 6808
ece1d636 6809EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6810EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6811EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6812EXPORT_SYMBOL_GPL(ata_port_freeze);
6813EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6814EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6815EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6816EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6817EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6818EXPORT_SYMBOL_GPL(ata_irq_on);
6819EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6820EXPORT_SYMBOL_GPL(ata_irq_ack);
6821EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6822EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
6823
6824EXPORT_SYMBOL_GPL(ata_cable_40wire);
6825EXPORT_SYMBOL_GPL(ata_cable_80wire);
6826EXPORT_SYMBOL_GPL(ata_cable_unknown);
6827EXPORT_SYMBOL_GPL(ata_cable_sata);