Merge branch 'upstream-2.6.17'
[linux-block.git] / drivers / scsi / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/init.h>
40#include <linux/list.h>
41#include <linux/mm.h>
42#include <linux/highmem.h>
43#include <linux/spinlock.h>
44#include <linux/blkdev.h>
45#include <linux/delay.h>
46#include <linux/timer.h>
47#include <linux/interrupt.h>
48#include <linux/completion.h>
49#include <linux/suspend.h>
50#include <linux/workqueue.h>
67846b30 51#include <linux/jiffies.h>
378f058c 52#include <linux/scatterlist.h>
1da177e4 53#include <scsi/scsi.h>
1da177e4 54#include "scsi_priv.h"
193515d5 55#include <scsi/scsi_cmnd.h>
1da177e4
LT
56#include <scsi/scsi_host.h>
57#include <linux/libata.h>
58#include <asm/io.h>
59#include <asm/semaphore.h>
60#include <asm/byteorder.h>
61
62#include "libata.h"
63
59a10b17 64static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
8bf62ece 65static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
1da177e4
LT
66static void ata_set_mode(struct ata_port *ap);
67static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
057ace5e 68static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
1da177e4 69static int fgb(u32 bitmap);
057ace5e 70static int ata_choose_xfer_mode(const struct ata_port *ap,
1da177e4
LT
71 u8 *xfer_mode_out,
72 unsigned int *xfer_shift_out);
e33b9dfa 73static void ata_pio_error(struct ata_port *ap);
1da177e4
LT
74
75static unsigned int ata_unique_id = 1;
76static struct workqueue_struct *ata_wq;
77
1623c81e
JG
78int atapi_enabled = 0;
79module_param(atapi_enabled, int, 0444);
80MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
81
1da177e4
LT
82MODULE_AUTHOR("Jeff Garzik");
83MODULE_DESCRIPTION("Library module for ATA devices");
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_VERSION);
86
87/**
6f0ef4fa 88 * ata_tf_load_pio - send taskfile registers to host controller
1da177e4
LT
89 * @ap: Port to which output is sent
90 * @tf: ATA taskfile register set
91 *
92 * Outputs ATA taskfile to standard ATA host controller.
93 *
94 * LOCKING:
95 * Inherited from caller.
96 */
97
057ace5e 98static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
1da177e4
LT
99{
100 struct ata_ioports *ioaddr = &ap->ioaddr;
101 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
102
103 if (tf->ctl != ap->last_ctl) {
104 outb(tf->ctl, ioaddr->ctl_addr);
105 ap->last_ctl = tf->ctl;
106 ata_wait_idle(ap);
107 }
108
109 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
110 outb(tf->hob_feature, ioaddr->feature_addr);
111 outb(tf->hob_nsect, ioaddr->nsect_addr);
112 outb(tf->hob_lbal, ioaddr->lbal_addr);
113 outb(tf->hob_lbam, ioaddr->lbam_addr);
114 outb(tf->hob_lbah, ioaddr->lbah_addr);
115 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
116 tf->hob_feature,
117 tf->hob_nsect,
118 tf->hob_lbal,
119 tf->hob_lbam,
120 tf->hob_lbah);
121 }
122
123 if (is_addr) {
124 outb(tf->feature, ioaddr->feature_addr);
125 outb(tf->nsect, ioaddr->nsect_addr);
126 outb(tf->lbal, ioaddr->lbal_addr);
127 outb(tf->lbam, ioaddr->lbam_addr);
128 outb(tf->lbah, ioaddr->lbah_addr);
129 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
130 tf->feature,
131 tf->nsect,
132 tf->lbal,
133 tf->lbam,
134 tf->lbah);
135 }
136
137 if (tf->flags & ATA_TFLAG_DEVICE) {
138 outb(tf->device, ioaddr->device_addr);
139 VPRINTK("device 0x%X\n", tf->device);
140 }
141
142 ata_wait_idle(ap);
143}
144
145/**
146 * ata_tf_load_mmio - send taskfile registers to host controller
147 * @ap: Port to which output is sent
148 * @tf: ATA taskfile register set
149 *
150 * Outputs ATA taskfile to standard ATA host controller using MMIO.
151 *
152 * LOCKING:
153 * Inherited from caller.
154 */
155
057ace5e 156static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
1da177e4
LT
157{
158 struct ata_ioports *ioaddr = &ap->ioaddr;
159 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
160
161 if (tf->ctl != ap->last_ctl) {
162 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
163 ap->last_ctl = tf->ctl;
164 ata_wait_idle(ap);
165 }
166
167 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
168 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
169 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
170 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
171 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
172 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
173 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
174 tf->hob_feature,
175 tf->hob_nsect,
176 tf->hob_lbal,
177 tf->hob_lbam,
178 tf->hob_lbah);
179 }
180
181 if (is_addr) {
182 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
183 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
184 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
185 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
186 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
187 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
188 tf->feature,
189 tf->nsect,
190 tf->lbal,
191 tf->lbam,
192 tf->lbah);
193 }
194
195 if (tf->flags & ATA_TFLAG_DEVICE) {
196 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
197 VPRINTK("device 0x%X\n", tf->device);
198 }
199
200 ata_wait_idle(ap);
201}
202
0baab86b
EF
203
204/**
205 * ata_tf_load - send taskfile registers to host controller
206 * @ap: Port to which output is sent
207 * @tf: ATA taskfile register set
208 *
209 * Outputs ATA taskfile to standard ATA host controller using MMIO
210 * or PIO as indicated by the ATA_FLAG_MMIO flag.
211 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
212 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
213 * hob_lbal, hob_lbam, and hob_lbah.
214 *
215 * This function waits for idle (!BUSY and !DRQ) after writing
216 * registers. If the control register has a new value, this
217 * function also waits for idle after writing control and before
218 * writing the remaining registers.
219 *
220 * May be used as the tf_load() entry in ata_port_operations.
221 *
222 * LOCKING:
223 * Inherited from caller.
224 */
057ace5e 225void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
1da177e4
LT
226{
227 if (ap->flags & ATA_FLAG_MMIO)
228 ata_tf_load_mmio(ap, tf);
229 else
230 ata_tf_load_pio(ap, tf);
231}
232
233/**
0baab86b 234 * ata_exec_command_pio - issue ATA command to host controller
1da177e4
LT
235 * @ap: port to which command is being issued
236 * @tf: ATA taskfile register set
237 *
0baab86b 238 * Issues PIO write to ATA command register, with proper
1da177e4
LT
239 * synchronization with interrupt handler / other threads.
240 *
241 * LOCKING:
242 * spin_lock_irqsave(host_set lock)
243 */
244
057ace5e 245static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
1da177e4
LT
246{
247 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
248
249 outb(tf->command, ap->ioaddr.command_addr);
250 ata_pause(ap);
251}
252
253
254/**
255 * ata_exec_command_mmio - issue ATA command to host controller
256 * @ap: port to which command is being issued
257 * @tf: ATA taskfile register set
258 *
259 * Issues MMIO write to ATA command register, with proper
260 * synchronization with interrupt handler / other threads.
261 *
262 * LOCKING:
263 * spin_lock_irqsave(host_set lock)
264 */
265
057ace5e 266static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
1da177e4
LT
267{
268 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
269
270 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
271 ata_pause(ap);
272}
273
0baab86b
EF
274
275/**
276 * ata_exec_command - issue ATA command to host controller
277 * @ap: port to which command is being issued
278 * @tf: ATA taskfile register set
279 *
280 * Issues PIO/MMIO write to ATA command register, with proper
281 * synchronization with interrupt handler / other threads.
282 *
283 * LOCKING:
284 * spin_lock_irqsave(host_set lock)
285 */
057ace5e 286void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
1da177e4
LT
287{
288 if (ap->flags & ATA_FLAG_MMIO)
289 ata_exec_command_mmio(ap, tf);
290 else
291 ata_exec_command_pio(ap, tf);
292}
293
1da177e4
LT
294/**
295 * ata_tf_to_host - issue ATA taskfile to host controller
296 * @ap: port to which command is being issued
297 * @tf: ATA taskfile register set
298 *
299 * Issues ATA taskfile register set to ATA host controller,
300 * with proper synchronization with interrupt handler and
301 * other threads.
302 *
303 * LOCKING:
1da177e4
LT
304 * spin_lock_irqsave(host_set lock)
305 */
306
e5338254
JG
307static inline void ata_tf_to_host(struct ata_port *ap,
308 const struct ata_taskfile *tf)
1da177e4
LT
309{
310 ap->ops->tf_load(ap, tf);
311 ap->ops->exec_command(ap, tf);
312}
313
314/**
0baab86b 315 * ata_tf_read_pio - input device's ATA taskfile shadow registers
1da177e4
LT
316 * @ap: Port from which input is read
317 * @tf: ATA taskfile register set for storing input
318 *
319 * Reads ATA taskfile registers for currently-selected device
320 * into @tf.
321 *
322 * LOCKING:
323 * Inherited from caller.
324 */
325
326static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
327{
328 struct ata_ioports *ioaddr = &ap->ioaddr;
329
ac19bff2 330 tf->command = ata_check_status(ap);
0169e284 331 tf->feature = inb(ioaddr->error_addr);
1da177e4
LT
332 tf->nsect = inb(ioaddr->nsect_addr);
333 tf->lbal = inb(ioaddr->lbal_addr);
334 tf->lbam = inb(ioaddr->lbam_addr);
335 tf->lbah = inb(ioaddr->lbah_addr);
336 tf->device = inb(ioaddr->device_addr);
337
338 if (tf->flags & ATA_TFLAG_LBA48) {
339 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
340 tf->hob_feature = inb(ioaddr->error_addr);
341 tf->hob_nsect = inb(ioaddr->nsect_addr);
342 tf->hob_lbal = inb(ioaddr->lbal_addr);
343 tf->hob_lbam = inb(ioaddr->lbam_addr);
344 tf->hob_lbah = inb(ioaddr->lbah_addr);
345 }
346}
347
348/**
349 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
350 * @ap: Port from which input is read
351 * @tf: ATA taskfile register set for storing input
352 *
353 * Reads ATA taskfile registers for currently-selected device
354 * into @tf via MMIO.
355 *
356 * LOCKING:
357 * Inherited from caller.
358 */
359
360static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
361{
362 struct ata_ioports *ioaddr = &ap->ioaddr;
363
ac19bff2 364 tf->command = ata_check_status(ap);
0169e284 365 tf->feature = readb((void __iomem *)ioaddr->error_addr);
1da177e4
LT
366 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
367 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
368 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
369 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
370 tf->device = readb((void __iomem *)ioaddr->device_addr);
371
372 if (tf->flags & ATA_TFLAG_LBA48) {
373 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
374 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
375 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
376 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
377 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
378 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
379 }
380}
381
0baab86b
EF
382
383/**
384 * ata_tf_read - input device's ATA taskfile shadow registers
385 * @ap: Port from which input is read
386 * @tf: ATA taskfile register set for storing input
387 *
388 * Reads ATA taskfile registers for currently-selected device
389 * into @tf.
390 *
391 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
392 * is set, also reads the hob registers.
393 *
394 * May be used as the tf_read() entry in ata_port_operations.
395 *
396 * LOCKING:
397 * Inherited from caller.
398 */
1da177e4
LT
399void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
400{
401 if (ap->flags & ATA_FLAG_MMIO)
402 ata_tf_read_mmio(ap, tf);
403 else
404 ata_tf_read_pio(ap, tf);
405}
406
407/**
408 * ata_check_status_pio - Read device status reg & clear interrupt
409 * @ap: port where the device is
410 *
411 * Reads ATA taskfile status register for currently-selected device
0baab86b 412 * and return its value. This also clears pending interrupts
1da177e4
LT
413 * from this device
414 *
415 * LOCKING:
416 * Inherited from caller.
417 */
418static u8 ata_check_status_pio(struct ata_port *ap)
419{
420 return inb(ap->ioaddr.status_addr);
421}
422
423/**
424 * ata_check_status_mmio - Read device status reg & clear interrupt
425 * @ap: port where the device is
426 *
427 * Reads ATA taskfile status register for currently-selected device
0baab86b 428 * via MMIO and return its value. This also clears pending interrupts
1da177e4
LT
429 * from this device
430 *
431 * LOCKING:
432 * Inherited from caller.
433 */
434static u8 ata_check_status_mmio(struct ata_port *ap)
435{
436 return readb((void __iomem *) ap->ioaddr.status_addr);
437}
438
0baab86b
EF
439
440/**
441 * ata_check_status - Read device status reg & clear interrupt
442 * @ap: port where the device is
443 *
444 * Reads ATA taskfile status register for currently-selected device
445 * and return its value. This also clears pending interrupts
446 * from this device
447 *
448 * May be used as the check_status() entry in ata_port_operations.
449 *
450 * LOCKING:
451 * Inherited from caller.
452 */
1da177e4
LT
453u8 ata_check_status(struct ata_port *ap)
454{
455 if (ap->flags & ATA_FLAG_MMIO)
456 return ata_check_status_mmio(ap);
457 return ata_check_status_pio(ap);
458}
459
0baab86b
EF
460
461/**
462 * ata_altstatus - Read device alternate status reg
463 * @ap: port where the device is
464 *
465 * Reads ATA taskfile alternate status register for
466 * currently-selected device and return its value.
467 *
468 * Note: may NOT be used as the check_altstatus() entry in
469 * ata_port_operations.
470 *
471 * LOCKING:
472 * Inherited from caller.
473 */
1da177e4
LT
474u8 ata_altstatus(struct ata_port *ap)
475{
476 if (ap->ops->check_altstatus)
477 return ap->ops->check_altstatus(ap);
478
479 if (ap->flags & ATA_FLAG_MMIO)
480 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
481 return inb(ap->ioaddr.altstatus_addr);
482}
483
0baab86b 484
1da177e4
LT
485/**
486 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
487 * @tf: Taskfile to convert
488 * @fis: Buffer into which data will output
489 * @pmp: Port multiplier port
490 *
491 * Converts a standard ATA taskfile to a Serial ATA
492 * FIS structure (Register - Host to Device).
493 *
494 * LOCKING:
495 * Inherited from caller.
496 */
497
057ace5e 498void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
499{
500 fis[0] = 0x27; /* Register - Host to Device FIS */
501 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
502 bit 7 indicates Command FIS */
503 fis[2] = tf->command;
504 fis[3] = tf->feature;
505
506 fis[4] = tf->lbal;
507 fis[5] = tf->lbam;
508 fis[6] = tf->lbah;
509 fis[7] = tf->device;
510
511 fis[8] = tf->hob_lbal;
512 fis[9] = tf->hob_lbam;
513 fis[10] = tf->hob_lbah;
514 fis[11] = tf->hob_feature;
515
516 fis[12] = tf->nsect;
517 fis[13] = tf->hob_nsect;
518 fis[14] = 0;
519 fis[15] = tf->ctl;
520
521 fis[16] = 0;
522 fis[17] = 0;
523 fis[18] = 0;
524 fis[19] = 0;
525}
526
527/**
528 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
529 * @fis: Buffer from which data will be input
530 * @tf: Taskfile to output
531 *
e12a1be6 532 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
533 *
534 * LOCKING:
535 * Inherited from caller.
536 */
537
057ace5e 538void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
539{
540 tf->command = fis[2]; /* status */
541 tf->feature = fis[3]; /* error */
542
543 tf->lbal = fis[4];
544 tf->lbam = fis[5];
545 tf->lbah = fis[6];
546 tf->device = fis[7];
547
548 tf->hob_lbal = fis[8];
549 tf->hob_lbam = fis[9];
550 tf->hob_lbah = fis[10];
551
552 tf->nsect = fis[12];
553 tf->hob_nsect = fis[13];
554}
555
8cbd6df1
AL
556static const u8 ata_rw_cmds[] = {
557 /* pio multi */
558 ATA_CMD_READ_MULTI,
559 ATA_CMD_WRITE_MULTI,
560 ATA_CMD_READ_MULTI_EXT,
561 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
562 0,
563 0,
564 0,
565 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
566 /* pio */
567 ATA_CMD_PIO_READ,
568 ATA_CMD_PIO_WRITE,
569 ATA_CMD_PIO_READ_EXT,
570 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
571 0,
572 0,
573 0,
574 0,
8cbd6df1
AL
575 /* dma */
576 ATA_CMD_READ,
577 ATA_CMD_WRITE,
578 ATA_CMD_READ_EXT,
9a3dccc4
TH
579 ATA_CMD_WRITE_EXT,
580 0,
581 0,
582 0,
583 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 584};
1da177e4
LT
585
586/**
8cbd6df1
AL
587 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
588 * @qc: command to examine and configure
1da177e4 589 *
8cbd6df1
AL
590 * Examine the device configuration and tf->flags to calculate
591 * the proper read/write commands and protocol to use.
1da177e4
LT
592 *
593 * LOCKING:
594 * caller.
595 */
9a3dccc4 596int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
1da177e4 597{
8cbd6df1
AL
598 struct ata_taskfile *tf = &qc->tf;
599 struct ata_device *dev = qc->dev;
9a3dccc4 600 u8 cmd;
1da177e4 601
9a3dccc4 602 int index, fua, lba48, write;
8cbd6df1 603
9a3dccc4 604 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
605 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
606 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 607
8cbd6df1
AL
608 if (dev->flags & ATA_DFLAG_PIO) {
609 tf->protocol = ATA_PROT_PIO;
9a3dccc4 610 index = dev->multi_count ? 0 : 8;
8d238e01
AC
611 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
612 /* Unable to use DMA due to host limitation */
613 tf->protocol = ATA_PROT_PIO;
614 index = dev->multi_count ? 0 : 4;
8cbd6df1
AL
615 } else {
616 tf->protocol = ATA_PROT_DMA;
9a3dccc4 617 index = 16;
8cbd6df1 618 }
1da177e4 619
9a3dccc4
TH
620 cmd = ata_rw_cmds[index + fua + lba48 + write];
621 if (cmd) {
622 tf->command = cmd;
623 return 0;
624 }
625 return -1;
1da177e4
LT
626}
627
98ac62de 628static const char * const xfer_mode_str[] = {
1da177e4
LT
629 "UDMA/16",
630 "UDMA/25",
631 "UDMA/33",
632 "UDMA/44",
633 "UDMA/66",
634 "UDMA/100",
635 "UDMA/133",
636 "UDMA7",
637 "MWDMA0",
638 "MWDMA1",
639 "MWDMA2",
640 "PIO0",
641 "PIO1",
642 "PIO2",
643 "PIO3",
644 "PIO4",
645};
646
647/**
648 * ata_udma_string - convert UDMA bit offset to string
649 * @mask: mask of bits supported; only highest bit counts.
650 *
651 * Determine string which represents the highest speed
652 * (highest bit in @udma_mask).
653 *
654 * LOCKING:
655 * None.
656 *
657 * RETURNS:
658 * Constant C string representing highest speed listed in
659 * @udma_mask, or the constant C string "<n/a>".
660 */
661
662static const char *ata_mode_string(unsigned int mask)
663{
664 int i;
665
666 for (i = 7; i >= 0; i--)
667 if (mask & (1 << i))
668 goto out;
669 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
670 if (mask & (1 << i))
671 goto out;
672 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
673 if (mask & (1 << i))
674 goto out;
675
676 return "<n/a>";
677
678out:
679 return xfer_mode_str[i];
680}
681
682/**
683 * ata_pio_devchk - PATA device presence detection
684 * @ap: ATA channel to examine
685 * @device: Device to examine (starting at zero)
686 *
687 * This technique was originally described in
688 * Hale Landis's ATADRVR (www.ata-atapi.com), and
689 * later found its way into the ATA/ATAPI spec.
690 *
691 * Write a pattern to the ATA shadow registers,
692 * and if a device is present, it will respond by
693 * correctly storing and echoing back the
694 * ATA shadow register contents.
695 *
696 * LOCKING:
697 * caller.
698 */
699
700static unsigned int ata_pio_devchk(struct ata_port *ap,
701 unsigned int device)
702{
703 struct ata_ioports *ioaddr = &ap->ioaddr;
704 u8 nsect, lbal;
705
706 ap->ops->dev_select(ap, device);
707
708 outb(0x55, ioaddr->nsect_addr);
709 outb(0xaa, ioaddr->lbal_addr);
710
711 outb(0xaa, ioaddr->nsect_addr);
712 outb(0x55, ioaddr->lbal_addr);
713
714 outb(0x55, ioaddr->nsect_addr);
715 outb(0xaa, ioaddr->lbal_addr);
716
717 nsect = inb(ioaddr->nsect_addr);
718 lbal = inb(ioaddr->lbal_addr);
719
720 if ((nsect == 0x55) && (lbal == 0xaa))
721 return 1; /* we found a device */
722
723 return 0; /* nothing found */
724}
725
726/**
727 * ata_mmio_devchk - PATA device presence detection
728 * @ap: ATA channel to examine
729 * @device: Device to examine (starting at zero)
730 *
731 * This technique was originally described in
732 * Hale Landis's ATADRVR (www.ata-atapi.com), and
733 * later found its way into the ATA/ATAPI spec.
734 *
735 * Write a pattern to the ATA shadow registers,
736 * and if a device is present, it will respond by
737 * correctly storing and echoing back the
738 * ATA shadow register contents.
739 *
740 * LOCKING:
741 * caller.
742 */
743
744static unsigned int ata_mmio_devchk(struct ata_port *ap,
745 unsigned int device)
746{
747 struct ata_ioports *ioaddr = &ap->ioaddr;
748 u8 nsect, lbal;
749
750 ap->ops->dev_select(ap, device);
751
752 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
753 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
754
755 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
756 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
757
758 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
759 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
760
761 nsect = readb((void __iomem *) ioaddr->nsect_addr);
762 lbal = readb((void __iomem *) ioaddr->lbal_addr);
763
764 if ((nsect == 0x55) && (lbal == 0xaa))
765 return 1; /* we found a device */
766
767 return 0; /* nothing found */
768}
769
770/**
771 * ata_devchk - PATA device presence detection
772 * @ap: ATA channel to examine
773 * @device: Device to examine (starting at zero)
774 *
775 * Dispatch ATA device presence detection, depending
776 * on whether we are using PIO or MMIO to talk to the
777 * ATA shadow registers.
778 *
779 * LOCKING:
780 * caller.
781 */
782
783static unsigned int ata_devchk(struct ata_port *ap,
784 unsigned int device)
785{
786 if (ap->flags & ATA_FLAG_MMIO)
787 return ata_mmio_devchk(ap, device);
788 return ata_pio_devchk(ap, device);
789}
790
791/**
792 * ata_dev_classify - determine device type based on ATA-spec signature
793 * @tf: ATA taskfile register set for device to be identified
794 *
795 * Determine from taskfile register contents whether a device is
796 * ATA or ATAPI, as per "Signature and persistence" section
797 * of ATA/PI spec (volume 1, sect 5.14).
798 *
799 * LOCKING:
800 * None.
801 *
802 * RETURNS:
803 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
804 * the event of failure.
805 */
806
057ace5e 807unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
808{
809 /* Apple's open source Darwin code hints that some devices only
810 * put a proper signature into the LBA mid/high registers,
811 * So, we only check those. It's sufficient for uniqueness.
812 */
813
814 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
815 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
816 DPRINTK("found ATA device by sig\n");
817 return ATA_DEV_ATA;
818 }
819
820 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
821 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
822 DPRINTK("found ATAPI device by sig\n");
823 return ATA_DEV_ATAPI;
824 }
825
826 DPRINTK("unknown device\n");
827 return ATA_DEV_UNKNOWN;
828}
829
830/**
831 * ata_dev_try_classify - Parse returned ATA device signature
832 * @ap: ATA channel to examine
833 * @device: Device to examine (starting at zero)
b4dc7623 834 * @r_err: Value of error register on completion
1da177e4
LT
835 *
836 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
837 * an ATA/ATAPI-defined set of values is placed in the ATA
838 * shadow registers, indicating the results of device detection
839 * and diagnostics.
840 *
841 * Select the ATA device, and read the values from the ATA shadow
842 * registers. Then parse according to the Error register value,
843 * and the spec-defined values examined by ata_dev_classify().
844 *
845 * LOCKING:
846 * caller.
b4dc7623
TH
847 *
848 * RETURNS:
849 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
850 */
851
b4dc7623
TH
852static unsigned int
853ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 854{
1da177e4
LT
855 struct ata_taskfile tf;
856 unsigned int class;
857 u8 err;
858
859 ap->ops->dev_select(ap, device);
860
861 memset(&tf, 0, sizeof(tf));
862
1da177e4 863 ap->ops->tf_read(ap, &tf);
0169e284 864 err = tf.feature;
b4dc7623
TH
865 if (r_err)
866 *r_err = err;
1da177e4
LT
867
868 /* see if device passed diags */
869 if (err == 1)
870 /* do nothing */ ;
871 else if ((device == 0) && (err == 0x81))
872 /* do nothing */ ;
873 else
b4dc7623 874 return ATA_DEV_NONE;
1da177e4 875
b4dc7623 876 /* determine if device is ATA or ATAPI */
1da177e4 877 class = ata_dev_classify(&tf);
b4dc7623 878
1da177e4 879 if (class == ATA_DEV_UNKNOWN)
b4dc7623 880 return ATA_DEV_NONE;
1da177e4 881 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
882 return ATA_DEV_NONE;
883 return class;
1da177e4
LT
884}
885
886/**
887 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
888 * @id: IDENTIFY DEVICE results we will examine
889 * @s: string into which data is output
890 * @ofs: offset into identify device page
891 * @len: length of string to return. must be an even number.
892 *
893 * The strings in the IDENTIFY DEVICE page are broken up into
894 * 16-bit chunks. Run through the string, and output each
895 * 8-bit chunk linearly, regardless of platform.
896 *
897 * LOCKING:
898 * caller.
899 */
900
057ace5e 901void ata_dev_id_string(const u16 *id, unsigned char *s,
1da177e4
LT
902 unsigned int ofs, unsigned int len)
903{
904 unsigned int c;
905
906 while (len > 0) {
907 c = id[ofs] >> 8;
908 *s = c;
909 s++;
910
911 c = id[ofs] & 0xff;
912 *s = c;
913 s++;
914
915 ofs++;
916 len -= 2;
917 }
918}
919
0baab86b
EF
920
921/**
922 * ata_noop_dev_select - Select device 0/1 on ATA bus
923 * @ap: ATA channel to manipulate
924 * @device: ATA device (numbered from zero) to select
925 *
926 * This function performs no actual function.
927 *
928 * May be used as the dev_select() entry in ata_port_operations.
929 *
930 * LOCKING:
931 * caller.
932 */
1da177e4
LT
933void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
934{
935}
936
0baab86b 937
1da177e4
LT
938/**
939 * ata_std_dev_select - Select device 0/1 on ATA bus
940 * @ap: ATA channel to manipulate
941 * @device: ATA device (numbered from zero) to select
942 *
943 * Use the method defined in the ATA specification to
944 * make either device 0, or device 1, active on the
0baab86b
EF
945 * ATA channel. Works with both PIO and MMIO.
946 *
947 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
948 *
949 * LOCKING:
950 * caller.
951 */
952
953void ata_std_dev_select (struct ata_port *ap, unsigned int device)
954{
955 u8 tmp;
956
957 if (device == 0)
958 tmp = ATA_DEVICE_OBS;
959 else
960 tmp = ATA_DEVICE_OBS | ATA_DEV1;
961
962 if (ap->flags & ATA_FLAG_MMIO) {
963 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
964 } else {
965 outb(tmp, ap->ioaddr.device_addr);
966 }
967 ata_pause(ap); /* needed; also flushes, for mmio */
968}
969
970/**
971 * ata_dev_select - Select device 0/1 on ATA bus
972 * @ap: ATA channel to manipulate
973 * @device: ATA device (numbered from zero) to select
974 * @wait: non-zero to wait for Status register BSY bit to clear
975 * @can_sleep: non-zero if context allows sleeping
976 *
977 * Use the method defined in the ATA specification to
978 * make either device 0, or device 1, active on the
979 * ATA channel.
980 *
981 * This is a high-level version of ata_std_dev_select(),
982 * which additionally provides the services of inserting
983 * the proper pauses and status polling, where needed.
984 *
985 * LOCKING:
986 * caller.
987 */
988
989void ata_dev_select(struct ata_port *ap, unsigned int device,
990 unsigned int wait, unsigned int can_sleep)
991{
992 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
993 ap->id, device, wait);
994
995 if (wait)
996 ata_wait_idle(ap);
997
998 ap->ops->dev_select(ap, device);
999
1000 if (wait) {
1001 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1002 msleep(150);
1003 ata_wait_idle(ap);
1004 }
1005}
1006
1007/**
1008 * ata_dump_id - IDENTIFY DEVICE info debugging output
1009 * @dev: Device whose IDENTIFY DEVICE page we will dump
1010 *
1011 * Dump selected 16-bit words from a detected device's
1012 * IDENTIFY PAGE page.
1013 *
1014 * LOCKING:
1015 * caller.
1016 */
1017
057ace5e 1018static inline void ata_dump_id(const struct ata_device *dev)
1da177e4
LT
1019{
1020 DPRINTK("49==0x%04x "
1021 "53==0x%04x "
1022 "63==0x%04x "
1023 "64==0x%04x "
1024 "75==0x%04x \n",
1025 dev->id[49],
1026 dev->id[53],
1027 dev->id[63],
1028 dev->id[64],
1029 dev->id[75]);
1030 DPRINTK("80==0x%04x "
1031 "81==0x%04x "
1032 "82==0x%04x "
1033 "83==0x%04x "
1034 "84==0x%04x \n",
1035 dev->id[80],
1036 dev->id[81],
1037 dev->id[82],
1038 dev->id[83],
1039 dev->id[84]);
1040 DPRINTK("88==0x%04x "
1041 "93==0x%04x\n",
1042 dev->id[88],
1043 dev->id[93]);
1044}
1045
11e29e21
AC
1046/*
1047 * Compute the PIO modes available for this device. This is not as
1048 * trivial as it seems if we must consider early devices correctly.
1049 *
1050 * FIXME: pre IDE drive timing (do we care ?).
1051 */
1052
057ace5e 1053static unsigned int ata_pio_modes(const struct ata_device *adev)
11e29e21
AC
1054{
1055 u16 modes;
1056
ffa29456
AC
1057 /* Usual case. Word 53 indicates word 64 is valid */
1058 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) {
11e29e21
AC
1059 modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
1060 modes <<= 3;
1061 modes |= 0x7;
1062 return modes;
1063 }
1064
ffa29456
AC
1065 /* If word 64 isn't valid then Word 51 high byte holds the PIO timing
1066 number for the maximum. Turn it into a mask and return it */
1067 modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ;
11e29e21 1068 return modes;
ffa29456
AC
1069 /* But wait.. there's more. Design your standards by committee and
1070 you too can get a free iordy field to process. However its the
1071 speeds not the modes that are supported... Note drivers using the
1072 timing API will get this right anyway */
11e29e21
AC
1073}
1074
95064379
TH
1075static inline void
1076ata_queue_packet_task(struct ata_port *ap)
1077{
1078 queue_work(ata_wq, &ap->packet_task);
1079}
1080
1081static inline void
1082ata_queue_pio_task(struct ata_port *ap)
1083{
1084 queue_work(ata_wq, &ap->pio_task);
1085}
1086
1087static inline void
1088ata_queue_delayed_pio_task(struct ata_port *ap, unsigned long delay)
1089{
1090 queue_delayed_work(ata_wq, &ap->pio_task, delay);
1091}
1092
77853bf2 1093void ata_qc_complete_internal(struct ata_queued_cmd *qc)
64f043d8 1094{
77853bf2 1095 struct completion *waiting = qc->private_data;
64f043d8 1096
77853bf2 1097 qc->ap->ops->tf_read(qc->ap, &qc->tf);
a2a7a662 1098 complete(waiting);
a2a7a662
TH
1099}
1100
1101/**
1102 * ata_exec_internal - execute libata internal command
1103 * @ap: Port to which the command is sent
1104 * @dev: Device to which the command is sent
1105 * @tf: Taskfile registers for the command and the result
1106 * @dma_dir: Data tranfer direction of the command
1107 * @buf: Data buffer of the command
1108 * @buflen: Length of data buffer
1109 *
1110 * Executes libata internal command with timeout. @tf contains
1111 * command on entry and result on return. Timeout and error
1112 * conditions are reported via return value. No recovery action
1113 * is taken after a command times out. It's caller's duty to
1114 * clean up after timeout.
1115 *
1116 * LOCKING:
1117 * None. Should be called with kernel context, might sleep.
1118 */
1119
1120static unsigned
1121ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1122 struct ata_taskfile *tf,
1123 int dma_dir, void *buf, unsigned int buflen)
1124{
1125 u8 command = tf->command;
1126 struct ata_queued_cmd *qc;
1127 DECLARE_COMPLETION(wait);
1128 unsigned long flags;
77853bf2 1129 unsigned int err_mask;
a2a7a662
TH
1130
1131 spin_lock_irqsave(&ap->host_set->lock, flags);
1132
1133 qc = ata_qc_new_init(ap, dev);
1134 BUG_ON(qc == NULL);
1135
1136 qc->tf = *tf;
1137 qc->dma_dir = dma_dir;
1138 if (dma_dir != DMA_NONE) {
1139 ata_sg_init_one(qc, buf, buflen);
1140 qc->nsect = buflen / ATA_SECT_SIZE;
1141 }
1142
77853bf2 1143 qc->private_data = &wait;
a2a7a662
TH
1144 qc->complete_fn = ata_qc_complete_internal;
1145
9a3d9eb0
TH
1146 qc->err_mask = ata_qc_issue(qc);
1147 if (qc->err_mask)
8e436af9 1148 ata_qc_complete(qc);
a2a7a662
TH
1149
1150 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1151
1152 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
1153 spin_lock_irqsave(&ap->host_set->lock, flags);
1154
1155 /* We're racing with irq here. If we lose, the
1156 * following test prevents us from completing the qc
1157 * again. If completion irq occurs after here but
1158 * before the caller cleans up, it will result in a
1159 * spurious interrupt. We can live with that.
1160 */
77853bf2 1161 if (qc->flags & ATA_QCFLAG_ACTIVE) {
11a56d24 1162 qc->err_mask = AC_ERR_TIMEOUT;
a2a7a662
TH
1163 ata_qc_complete(qc);
1164 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1165 ap->id, command);
64f043d8
JG
1166 }
1167
a2a7a662 1168 spin_unlock_irqrestore(&ap->host_set->lock, flags);
64f043d8
JG
1169 }
1170
77853bf2
TH
1171 *tf = qc->tf;
1172 err_mask = qc->err_mask;
1173
1174 ata_qc_free(qc);
1175
1176 return err_mask;
64f043d8
JG
1177}
1178
1bc4ccff
AC
1179/**
1180 * ata_pio_need_iordy - check if iordy needed
1181 * @adev: ATA device
1182 *
1183 * Check if the current speed of the device requires IORDY. Used
1184 * by various controllers for chip configuration.
1185 */
1186
1187unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1188{
1189 int pio;
1190 int speed = adev->pio_mode - XFER_PIO_0;
1191
1192 if (speed < 2)
1193 return 0;
1194 if (speed > 2)
1195 return 1;
1196
1197 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1198
1199 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1200 pio = adev->id[ATA_ID_EIDE_PIO];
1201 /* Is the speed faster than the drive allows non IORDY ? */
1202 if (pio) {
1203 /* This is cycle times not frequency - watch the logic! */
1204 if (pio > 240) /* PIO2 is 240nS per cycle */
1205 return 1;
1206 return 0;
1207 }
1208 }
1209 return 0;
1210}
1211
1da177e4
LT
1212/**
1213 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1214 * @ap: port on which device we wish to probe resides
1215 * @device: device bus address, starting at zero
1216 *
1217 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
1218 * command, and read back the 512-byte device information page.
1219 * The device information page is fed to us via the standard
1220 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
1221 * using standard PIO-IN paths)
1222 *
1223 * After reading the device information page, we use several
1224 * bits of information from it to initialize data structures
1225 * that will be used during the lifetime of the ata_device.
1226 * Other data from the info page is used to disqualify certain
1227 * older ATA devices we do not wish to support.
1228 *
1229 * LOCKING:
1230 * Inherited from caller. Some functions called by this function
1231 * obtain the host_set lock.
1232 */
1233
1234static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1235{
1236 struct ata_device *dev = &ap->device[device];
8bf62ece 1237 unsigned int major_version;
1da177e4
LT
1238 u16 tmp;
1239 unsigned long xfer_modes;
1da177e4 1240 unsigned int using_edd;
a0123703
TH
1241 struct ata_taskfile tf;
1242 unsigned int err_mask;
1da177e4
LT
1243 int rc;
1244
1245 if (!ata_dev_present(dev)) {
1246 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1247 ap->id, device);
1248 return;
1249 }
1250
1251 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1252 using_edd = 0;
1253 else
1254 using_edd = 1;
1255
1256 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1257
1258 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1259 dev->class == ATA_DEV_NONE);
1260
1261 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1262
1da177e4 1263retry:
a0123703
TH
1264 ata_tf_init(ap, &tf, device);
1265
1da177e4 1266 if (dev->class == ATA_DEV_ATA) {
a0123703 1267 tf.command = ATA_CMD_ID_ATA;
1da177e4
LT
1268 DPRINTK("do ATA identify\n");
1269 } else {
a0123703 1270 tf.command = ATA_CMD_ID_ATAPI;
1da177e4
LT
1271 DPRINTK("do ATAPI identify\n");
1272 }
1273
a0123703 1274 tf.protocol = ATA_PROT_PIO;
1da177e4 1275
a0123703
TH
1276 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1277 dev->id, sizeof(dev->id));
1da177e4 1278
a0123703
TH
1279 if (err_mask) {
1280 if (err_mask & ~AC_ERR_DEV)
1281 goto err_out;
1da177e4 1282
1da177e4
LT
1283 /*
1284 * arg! EDD works for all test cases, but seems to return
1285 * the ATA signature for some ATAPI devices. Until the
1286 * reason for this is found and fixed, we fix up the mess
1287 * here. If IDENTIFY DEVICE returns command aborted
1288 * (as ATAPI devices do), then we issue an
1289 * IDENTIFY PACKET DEVICE.
1290 *
1291 * ATA software reset (SRST, the default) does not appear
1292 * to have this problem.
1293 */
7c398335 1294 if ((using_edd) && (dev->class == ATA_DEV_ATA)) {
a0123703 1295 u8 err = tf.feature;
1da177e4
LT
1296 if (err & ATA_ABORTED) {
1297 dev->class = ATA_DEV_ATAPI;
1da177e4
LT
1298 goto retry;
1299 }
1300 }
1301 goto err_out;
1302 }
1303
1304 swap_buf_le16(dev->id, ATA_ID_WORDS);
1305
1306 /* print device capabilities */
1307 printk(KERN_DEBUG "ata%u: dev %u cfg "
1308 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1309 ap->id, device, dev->id[49],
1310 dev->id[82], dev->id[83], dev->id[84],
1311 dev->id[85], dev->id[86], dev->id[87],
1312 dev->id[88]);
1313
1314 /*
1315 * common ATA, ATAPI feature tests
1316 */
1317
8bf62ece
AL
1318 /* we require DMA support (bits 8 of word 49) */
1319 if (!ata_id_has_dma(dev->id)) {
1320 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1da177e4
LT
1321 goto err_out_nosup;
1322 }
1323
1324 /* quick-n-dirty find max transfer mode; for printk only */
1325 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1326 if (!xfer_modes)
1327 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
11e29e21
AC
1328 if (!xfer_modes)
1329 xfer_modes = ata_pio_modes(dev);
1da177e4
LT
1330
1331 ata_dump_id(dev);
1332
1333 /* ATA-specific feature tests */
1334 if (dev->class == ATA_DEV_ATA) {
1335 if (!ata_id_is_ata(dev->id)) /* sanity check */
1336 goto err_out_nosup;
1337
8bf62ece 1338 /* get major version */
1da177e4 1339 tmp = dev->id[ATA_ID_MAJOR_VER];
8bf62ece
AL
1340 for (major_version = 14; major_version >= 1; major_version--)
1341 if (tmp & (1 << major_version))
1da177e4
LT
1342 break;
1343
8bf62ece
AL
1344 /*
1345 * The exact sequence expected by certain pre-ATA4 drives is:
1346 * SRST RESET
1347 * IDENTIFY
1348 * INITIALIZE DEVICE PARAMETERS
1349 * anything else..
1350 * Some drives were very specific about that exact sequence.
1351 */
59a10b17 1352 if (major_version < 4 || (!ata_id_has_lba(dev->id))) {
8bf62ece
AL
1353 ata_dev_init_params(ap, dev);
1354
59a10b17
AL
1355 /* current CHS translation info (id[53-58]) might be
1356 * changed. reread the identify device info.
1357 */
1358 ata_dev_reread_id(ap, dev);
1359 }
1360
8bf62ece
AL
1361 if (ata_id_has_lba(dev->id)) {
1362 dev->flags |= ATA_DFLAG_LBA;
1363
1364 if (ata_id_has_lba48(dev->id)) {
1365 dev->flags |= ATA_DFLAG_LBA48;
1366 dev->n_sectors = ata_id_u64(dev->id, 100);
1367 } else {
1368 dev->n_sectors = ata_id_u32(dev->id, 60);
1369 }
1370
1371 /* print device info to dmesg */
1372 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1373 ap->id, device,
1374 major_version,
1375 ata_mode_string(xfer_modes),
1376 (unsigned long long)dev->n_sectors,
1377 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1378 } else {
1379 /* CHS */
1380
1381 /* Default translation */
1382 dev->cylinders = dev->id[1];
1383 dev->heads = dev->id[3];
1384 dev->sectors = dev->id[6];
1385 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1386
1387 if (ata_id_current_chs_valid(dev->id)) {
1388 /* Current CHS translation is valid. */
1389 dev->cylinders = dev->id[54];
1390 dev->heads = dev->id[55];
1391 dev->sectors = dev->id[56];
1392
1393 dev->n_sectors = ata_id_u32(dev->id, 57);
1394 }
1395
1396 /* print device info to dmesg */
1397 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1398 ap->id, device,
1399 major_version,
1400 ata_mode_string(xfer_modes),
1401 (unsigned long long)dev->n_sectors,
1402 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1da177e4 1403
1da177e4
LT
1404 }
1405
07f6f7d0
AL
1406 if (dev->id[59] & 0x100) {
1407 dev->multi_count = dev->id[59] & 0xff;
1408 DPRINTK("ata%u: dev %u multi count %u\n",
1409 ap->id, device, dev->multi_count);
1410 }
1411
1da177e4 1412 ap->host->max_cmd_len = 16;
1da177e4
LT
1413 }
1414
1415 /* ATAPI-specific feature tests */
2c13b7ce 1416 else if (dev->class == ATA_DEV_ATAPI) {
1da177e4
LT
1417 if (ata_id_is_ata(dev->id)) /* sanity check */
1418 goto err_out_nosup;
1419
1420 rc = atapi_cdb_len(dev->id);
1421 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1422 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1423 goto err_out_nosup;
1424 }
1425 ap->cdb_len = (unsigned int) rc;
1426 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1427
312f7da2
AL
1428 if (ata_id_cdb_intr(dev->id))
1429 dev->flags |= ATA_DFLAG_CDB_INTR;
1430
1da177e4
LT
1431 /* print device info to dmesg */
1432 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1433 ap->id, device,
1434 ata_mode_string(xfer_modes));
1435 }
1436
1437 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1438 return;
1439
1440err_out_nosup:
1441 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1442 ap->id, device);
1443err_out:
1444 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1445 DPRINTK("EXIT, err\n");
1446}
1447
6f2f3812 1448
057ace5e 1449static inline u8 ata_dev_knobble(const struct ata_port *ap)
6f2f3812
BC
1450{
1451 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1452}
1453
1454/**
1455 * ata_dev_config - Run device specific handlers and check for
1456 * SATA->PATA bridges
8a60a071 1457 * @ap: Bus
6f2f3812
BC
1458 * @i: Device
1459 *
1460 * LOCKING:
1461 */
8a60a071 1462
6f2f3812
BC
1463void ata_dev_config(struct ata_port *ap, unsigned int i)
1464{
1465 /* limit bridge transfers to udma5, 200 sectors */
1466 if (ata_dev_knobble(ap)) {
1467 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1468 ap->id, ap->device->devno);
1469 ap->udma_mask &= ATA_UDMA5;
1470 ap->host->max_sectors = ATA_MAX_SECTORS;
1471 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
9d824d07 1472 ap->device[i].flags |= ATA_DFLAG_LOCK_SECTORS;
6f2f3812
BC
1473 }
1474
1475 if (ap->ops->dev_config)
1476 ap->ops->dev_config(ap, &ap->device[i]);
1477}
1478
1da177e4
LT
1479/**
1480 * ata_bus_probe - Reset and probe ATA bus
1481 * @ap: Bus to probe
1482 *
0cba632b
JG
1483 * Master ATA bus probing function. Initiates a hardware-dependent
1484 * bus reset, then attempts to identify any devices found on
1485 * the bus.
1486 *
1da177e4 1487 * LOCKING:
0cba632b 1488 * PCI/etc. bus probe sem.
1da177e4
LT
1489 *
1490 * RETURNS:
1491 * Zero on success, non-zero on error.
1492 */
1493
1494static int ata_bus_probe(struct ata_port *ap)
1495{
1496 unsigned int i, found = 0;
1497
c19ba8af
TH
1498 if (ap->ops->probe_reset) {
1499 unsigned int classes[ATA_MAX_DEVICES];
1500 int rc;
1501
1502 ata_port_probe(ap);
1503
1504 rc = ap->ops->probe_reset(ap, classes);
1505 if (rc == 0) {
1506 for (i = 0; i < ATA_MAX_DEVICES; i++)
1507 ap->device[i].class = classes[i];
1508 } else {
1509 printk(KERN_ERR "ata%u: probe reset failed, "
1510 "disabling port\n", ap->id);
1511 ata_port_disable(ap);
1512 }
1513 } else
1514 ap->ops->phy_reset(ap);
1515
1da177e4
LT
1516 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1517 goto err_out;
1518
1519 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1520 ata_dev_identify(ap, i);
1521 if (ata_dev_present(&ap->device[i])) {
1522 found = 1;
6f2f3812 1523 ata_dev_config(ap,i);
1da177e4
LT
1524 }
1525 }
1526
1527 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1528 goto err_out_disable;
1529
1530 ata_set_mode(ap);
1531 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1532 goto err_out_disable;
1533
1534 return 0;
1535
1536err_out_disable:
1537 ap->ops->port_disable(ap);
1538err_out:
1539 return -1;
1540}
1541
1542/**
0cba632b
JG
1543 * ata_port_probe - Mark port as enabled
1544 * @ap: Port for which we indicate enablement
1da177e4 1545 *
0cba632b
JG
1546 * Modify @ap data structure such that the system
1547 * thinks that the entire port is enabled.
1548 *
1549 * LOCKING: host_set lock, or some other form of
1550 * serialization.
1da177e4
LT
1551 */
1552
1553void ata_port_probe(struct ata_port *ap)
1554{
1555 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1556}
1557
3be680b7
TH
1558/**
1559 * sata_print_link_status - Print SATA link status
1560 * @ap: SATA port to printk link status about
1561 *
1562 * This function prints link speed and status of a SATA link.
1563 *
1564 * LOCKING:
1565 * None.
1566 */
1567static void sata_print_link_status(struct ata_port *ap)
1568{
1569 u32 sstatus, tmp;
1570 const char *speed;
1571
1572 if (!ap->ops->scr_read)
1573 return;
1574
1575 sstatus = scr_read(ap, SCR_STATUS);
1576
1577 if (sata_dev_present(ap)) {
1578 tmp = (sstatus >> 4) & 0xf;
1579 if (tmp & (1 << 0))
1580 speed = "1.5";
1581 else if (tmp & (1 << 1))
1582 speed = "3.0";
1583 else
1584 speed = "<unknown>";
1585 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1586 ap->id, speed, sstatus);
1587 } else {
1588 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1589 ap->id, sstatus);
1590 }
1591}
1592
1da177e4 1593/**
780a87f7
JG
1594 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1595 * @ap: SATA port associated with target SATA PHY.
1da177e4 1596 *
780a87f7
JG
1597 * This function issues commands to standard SATA Sxxx
1598 * PHY registers, to wake up the phy (and device), and
1599 * clear any reset condition.
1da177e4
LT
1600 *
1601 * LOCKING:
0cba632b 1602 * PCI/etc. bus probe sem.
1da177e4
LT
1603 *
1604 */
1605void __sata_phy_reset(struct ata_port *ap)
1606{
1607 u32 sstatus;
1608 unsigned long timeout = jiffies + (HZ * 5);
1609
1610 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e
BR
1611 /* issue phy wake/reset */
1612 scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1613 /* Couldn't find anything in SATA I/II specs, but
1614 * AHCI-1.1 10.4.2 says at least 1 ms. */
1615 mdelay(1);
1da177e4 1616 }
cdcca89e 1617 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1da177e4
LT
1618
1619 /* wait for phy to become ready, if necessary */
1620 do {
1621 msleep(200);
1622 sstatus = scr_read(ap, SCR_STATUS);
1623 if ((sstatus & 0xf) != 1)
1624 break;
1625 } while (time_before(jiffies, timeout));
1626
3be680b7
TH
1627 /* print link status */
1628 sata_print_link_status(ap);
656563e3 1629
3be680b7
TH
1630 /* TODO: phy layer with polling, timeouts, etc. */
1631 if (sata_dev_present(ap))
1da177e4 1632 ata_port_probe(ap);
3be680b7 1633 else
1da177e4 1634 ata_port_disable(ap);
1da177e4
LT
1635
1636 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1637 return;
1638
1639 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1640 ata_port_disable(ap);
1641 return;
1642 }
1643
1644 ap->cbl = ATA_CBL_SATA;
1645}
1646
1647/**
780a87f7
JG
1648 * sata_phy_reset - Reset SATA bus.
1649 * @ap: SATA port associated with target SATA PHY.
1da177e4 1650 *
780a87f7
JG
1651 * This function resets the SATA bus, and then probes
1652 * the bus for devices.
1da177e4
LT
1653 *
1654 * LOCKING:
0cba632b 1655 * PCI/etc. bus probe sem.
1da177e4
LT
1656 *
1657 */
1658void sata_phy_reset(struct ata_port *ap)
1659{
1660 __sata_phy_reset(ap);
1661 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1662 return;
1663 ata_bus_reset(ap);
1664}
1665
1666/**
780a87f7
JG
1667 * ata_port_disable - Disable port.
1668 * @ap: Port to be disabled.
1da177e4 1669 *
780a87f7
JG
1670 * Modify @ap data structure such that the system
1671 * thinks that the entire port is disabled, and should
1672 * never attempt to probe or communicate with devices
1673 * on this port.
1674 *
1675 * LOCKING: host_set lock, or some other form of
1676 * serialization.
1da177e4
LT
1677 */
1678
1679void ata_port_disable(struct ata_port *ap)
1680{
1681 ap->device[0].class = ATA_DEV_NONE;
1682 ap->device[1].class = ATA_DEV_NONE;
1683 ap->flags |= ATA_FLAG_PORT_DISABLED;
1684}
1685
452503f9
AC
1686/*
1687 * This mode timing computation functionality is ported over from
1688 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1689 */
1690/*
1691 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1692 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1693 * for PIO 5, which is a nonstandard extension and UDMA6, which
1694 * is currently supported only by Maxtor drives.
1695 */
1696
1697static const struct ata_timing ata_timing[] = {
1698
1699 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1700 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1701 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1702 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1703
1704 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1705 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1706 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1707
1708/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1709
1710 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1711 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1712 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1713
1714 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1715 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1716 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1717
1718/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1719 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1720 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1721
1722 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1723 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1724 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1725
1726/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1727
1728 { 0xFF }
1729};
1730
1731#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1732#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1733
1734static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1735{
1736 q->setup = EZ(t->setup * 1000, T);
1737 q->act8b = EZ(t->act8b * 1000, T);
1738 q->rec8b = EZ(t->rec8b * 1000, T);
1739 q->cyc8b = EZ(t->cyc8b * 1000, T);
1740 q->active = EZ(t->active * 1000, T);
1741 q->recover = EZ(t->recover * 1000, T);
1742 q->cycle = EZ(t->cycle * 1000, T);
1743 q->udma = EZ(t->udma * 1000, UT);
1744}
1745
1746void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1747 struct ata_timing *m, unsigned int what)
1748{
1749 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1750 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1751 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1752 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1753 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1754 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1755 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1756 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1757}
1758
1759static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1760{
1761 const struct ata_timing *t;
1762
1763 for (t = ata_timing; t->mode != speed; t++)
91190758 1764 if (t->mode == 0xFF)
452503f9
AC
1765 return NULL;
1766 return t;
1767}
1768
1769int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1770 struct ata_timing *t, int T, int UT)
1771{
1772 const struct ata_timing *s;
1773 struct ata_timing p;
1774
1775 /*
1776 * Find the mode.
75b1f2f8 1777 */
452503f9
AC
1778
1779 if (!(s = ata_timing_find_mode(speed)))
1780 return -EINVAL;
1781
75b1f2f8
AL
1782 memcpy(t, s, sizeof(*s));
1783
452503f9
AC
1784 /*
1785 * If the drive is an EIDE drive, it can tell us it needs extended
1786 * PIO/MW_DMA cycle timing.
1787 */
1788
1789 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1790 memset(&p, 0, sizeof(p));
1791 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1792 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1793 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1794 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1795 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1796 }
1797 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1798 }
1799
1800 /*
1801 * Convert the timing to bus clock counts.
1802 */
1803
75b1f2f8 1804 ata_timing_quantize(t, t, T, UT);
452503f9
AC
1805
1806 /*
1807 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T
1808 * and some other commands. We have to ensure that the DMA cycle timing is
1809 * slower/equal than the fastest PIO timing.
1810 */
1811
1812 if (speed > XFER_PIO_4) {
1813 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1814 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1815 }
1816
1817 /*
1818 * Lenghten active & recovery time so that cycle time is correct.
1819 */
1820
1821 if (t->act8b + t->rec8b < t->cyc8b) {
1822 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1823 t->rec8b = t->cyc8b - t->act8b;
1824 }
1825
1826 if (t->active + t->recover < t->cycle) {
1827 t->active += (t->cycle - (t->active + t->recover)) / 2;
1828 t->recover = t->cycle - t->active;
1829 }
1830
1831 return 0;
1832}
1833
057ace5e 1834static const struct {
1da177e4
LT
1835 unsigned int shift;
1836 u8 base;
1837} xfer_mode_classes[] = {
1838 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1839 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1840 { ATA_SHIFT_PIO, XFER_PIO_0 },
1841};
1842
858119e1 1843static u8 base_from_shift(unsigned int shift)
1da177e4
LT
1844{
1845 int i;
1846
1847 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1848 if (xfer_mode_classes[i].shift == shift)
1849 return xfer_mode_classes[i].base;
1850
1851 return 0xff;
1852}
1853
1854static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1855{
1856 int ofs, idx;
1857 u8 base;
1858
1859 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1860 return;
1861
1862 if (dev->xfer_shift == ATA_SHIFT_PIO)
1863 dev->flags |= ATA_DFLAG_PIO;
1864
1865 ata_dev_set_xfermode(ap, dev);
1866
1867 base = base_from_shift(dev->xfer_shift);
1868 ofs = dev->xfer_mode - base;
1869 idx = ofs + dev->xfer_shift;
1870 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1871
1872 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1873 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1874
1875 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1876 ap->id, dev->devno, xfer_mode_str[idx]);
1877}
1878
1879static int ata_host_set_pio(struct ata_port *ap)
1880{
1881 unsigned int mask;
1882 int x, i;
1883 u8 base, xfer_mode;
1884
1885 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1886 x = fgb(mask);
1887 if (x < 0) {
1888 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1889 return -1;
1890 }
1891
1892 base = base_from_shift(ATA_SHIFT_PIO);
1893 xfer_mode = base + x;
1894
1895 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1896 (int)base, (int)xfer_mode, mask, x);
1897
1898 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1899 struct ata_device *dev = &ap->device[i];
1900 if (ata_dev_present(dev)) {
1901 dev->pio_mode = xfer_mode;
1902 dev->xfer_mode = xfer_mode;
1903 dev->xfer_shift = ATA_SHIFT_PIO;
1904 if (ap->ops->set_piomode)
1905 ap->ops->set_piomode(ap, dev);
1906 }
1907 }
1908
1909 return 0;
1910}
1911
1912static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1913 unsigned int xfer_shift)
1914{
1915 int i;
1916
1917 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1918 struct ata_device *dev = &ap->device[i];
1919 if (ata_dev_present(dev)) {
1920 dev->dma_mode = xfer_mode;
1921 dev->xfer_mode = xfer_mode;
1922 dev->xfer_shift = xfer_shift;
1923 if (ap->ops->set_dmamode)
1924 ap->ops->set_dmamode(ap, dev);
1925 }
1926 }
1927}
1928
1929/**
1930 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1931 * @ap: port on which timings will be programmed
1932 *
780a87f7
JG
1933 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1934 *
1da177e4 1935 * LOCKING:
0cba632b 1936 * PCI/etc. bus probe sem.
1da177e4
LT
1937 *
1938 */
1939static void ata_set_mode(struct ata_port *ap)
1940{
8cbd6df1 1941 unsigned int xfer_shift;
1da177e4
LT
1942 u8 xfer_mode;
1943 int rc;
1944
1945 /* step 1: always set host PIO timings */
1946 rc = ata_host_set_pio(ap);
1947 if (rc)
1948 goto err_out;
1949
1950 /* step 2: choose the best data xfer mode */
1951 xfer_mode = xfer_shift = 0;
1952 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1953 if (rc)
1954 goto err_out;
1955
1956 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1957 if (xfer_shift != ATA_SHIFT_PIO)
1958 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1959
1960 /* step 4: update devices' xfer mode */
1961 ata_dev_set_mode(ap, &ap->device[0]);
1962 ata_dev_set_mode(ap, &ap->device[1]);
1963
1964 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1965 return;
1966
1967 if (ap->ops->post_set_mode)
1968 ap->ops->post_set_mode(ap);
1969
1da177e4
LT
1970 return;
1971
1972err_out:
1973 ata_port_disable(ap);
1974}
1975
1976/**
1977 * ata_busy_sleep - sleep until BSY clears, or timeout
1978 * @ap: port containing status register to be polled
1979 * @tmout_pat: impatience timeout
1980 * @tmout: overall timeout
1981 *
780a87f7
JG
1982 * Sleep until ATA Status register bit BSY clears,
1983 * or a timeout occurs.
1984 *
1985 * LOCKING: None.
1da177e4
LT
1986 *
1987 */
1988
6f8b9958
TH
1989unsigned int ata_busy_sleep (struct ata_port *ap,
1990 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
1991{
1992 unsigned long timer_start, timeout;
1993 u8 status;
1994
1995 status = ata_busy_wait(ap, ATA_BUSY, 300);
1996 timer_start = jiffies;
1997 timeout = timer_start + tmout_pat;
1998 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1999 msleep(50);
2000 status = ata_busy_wait(ap, ATA_BUSY, 3);
2001 }
2002
2003 if (status & ATA_BUSY)
2004 printk(KERN_WARNING "ata%u is slow to respond, "
2005 "please be patient\n", ap->id);
2006
2007 timeout = timer_start + tmout;
2008 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
2009 msleep(50);
2010 status = ata_chk_status(ap);
2011 }
2012
2013 if (status & ATA_BUSY) {
2014 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
2015 ap->id, tmout / HZ);
2016 return 1;
2017 }
2018
2019 return 0;
2020}
2021
2022static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2023{
2024 struct ata_ioports *ioaddr = &ap->ioaddr;
2025 unsigned int dev0 = devmask & (1 << 0);
2026 unsigned int dev1 = devmask & (1 << 1);
2027 unsigned long timeout;
2028
2029 /* if device 0 was found in ata_devchk, wait for its
2030 * BSY bit to clear
2031 */
2032 if (dev0)
2033 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2034
2035 /* if device 1 was found in ata_devchk, wait for
2036 * register access, then wait for BSY to clear
2037 */
2038 timeout = jiffies + ATA_TMOUT_BOOT;
2039 while (dev1) {
2040 u8 nsect, lbal;
2041
2042 ap->ops->dev_select(ap, 1);
2043 if (ap->flags & ATA_FLAG_MMIO) {
2044 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2045 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2046 } else {
2047 nsect = inb(ioaddr->nsect_addr);
2048 lbal = inb(ioaddr->lbal_addr);
2049 }
2050 if ((nsect == 1) && (lbal == 1))
2051 break;
2052 if (time_after(jiffies, timeout)) {
2053 dev1 = 0;
2054 break;
2055 }
2056 msleep(50); /* give drive a breather */
2057 }
2058 if (dev1)
2059 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2060
2061 /* is all this really necessary? */
2062 ap->ops->dev_select(ap, 0);
2063 if (dev1)
2064 ap->ops->dev_select(ap, 1);
2065 if (dev0)
2066 ap->ops->dev_select(ap, 0);
2067}
2068
2069/**
0cba632b
JG
2070 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
2071 * @ap: Port to reset and probe
2072 *
2073 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
2074 * probe the bus. Not often used these days.
1da177e4
LT
2075 *
2076 * LOCKING:
0cba632b 2077 * PCI/etc. bus probe sem.
e5338254 2078 * Obtains host_set lock.
1da177e4
LT
2079 *
2080 */
2081
2082static unsigned int ata_bus_edd(struct ata_port *ap)
2083{
2084 struct ata_taskfile tf;
e5338254 2085 unsigned long flags;
1da177e4
LT
2086
2087 /* set up execute-device-diag (bus reset) taskfile */
2088 /* also, take interrupts to a known state (disabled) */
2089 DPRINTK("execute-device-diag\n");
2090 ata_tf_init(ap, &tf, 0);
2091 tf.ctl |= ATA_NIEN;
2092 tf.command = ATA_CMD_EDD;
2093 tf.protocol = ATA_PROT_NODATA;
2094
2095 /* do bus reset */
e5338254 2096 spin_lock_irqsave(&ap->host_set->lock, flags);
1da177e4 2097 ata_tf_to_host(ap, &tf);
e5338254 2098 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1da177e4
LT
2099
2100 /* spec says at least 2ms. but who knows with those
2101 * crazy ATAPI devices...
2102 */
2103 msleep(150);
2104
2105 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2106}
2107
2108static unsigned int ata_bus_softreset(struct ata_port *ap,
2109 unsigned int devmask)
2110{
2111 struct ata_ioports *ioaddr = &ap->ioaddr;
2112
2113 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2114
2115 /* software reset. causes dev0 to be selected */
2116 if (ap->flags & ATA_FLAG_MMIO) {
2117 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2118 udelay(20); /* FIXME: flush */
2119 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2120 udelay(20); /* FIXME: flush */
2121 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2122 } else {
2123 outb(ap->ctl, ioaddr->ctl_addr);
2124 udelay(10);
2125 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2126 udelay(10);
2127 outb(ap->ctl, ioaddr->ctl_addr);
2128 }
2129
2130 /* spec mandates ">= 2ms" before checking status.
2131 * We wait 150ms, because that was the magic delay used for
2132 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2133 * between when the ATA command register is written, and then
2134 * status is checked. Because waiting for "a while" before
2135 * checking status is fine, post SRST, we perform this magic
2136 * delay here as well.
2137 */
2138 msleep(150);
2139
2140 ata_bus_post_reset(ap, devmask);
2141
2142 return 0;
2143}
2144
2145/**
2146 * ata_bus_reset - reset host port and associated ATA channel
2147 * @ap: port to reset
2148 *
2149 * This is typically the first time we actually start issuing
2150 * commands to the ATA channel. We wait for BSY to clear, then
2151 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2152 * result. Determine what devices, if any, are on the channel
2153 * by looking at the device 0/1 error register. Look at the signature
2154 * stored in each device's taskfile registers, to determine if
2155 * the device is ATA or ATAPI.
2156 *
2157 * LOCKING:
0cba632b
JG
2158 * PCI/etc. bus probe sem.
2159 * Obtains host_set lock.
1da177e4
LT
2160 *
2161 * SIDE EFFECTS:
2162 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2163 */
2164
2165void ata_bus_reset(struct ata_port *ap)
2166{
2167 struct ata_ioports *ioaddr = &ap->ioaddr;
2168 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2169 u8 err;
2170 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2171
2172 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2173
2174 /* determine if device 0/1 are present */
2175 if (ap->flags & ATA_FLAG_SATA_RESET)
2176 dev0 = 1;
2177 else {
2178 dev0 = ata_devchk(ap, 0);
2179 if (slave_possible)
2180 dev1 = ata_devchk(ap, 1);
2181 }
2182
2183 if (dev0)
2184 devmask |= (1 << 0);
2185 if (dev1)
2186 devmask |= (1 << 1);
2187
2188 /* select device 0 again */
2189 ap->ops->dev_select(ap, 0);
2190
2191 /* issue bus reset */
2192 if (ap->flags & ATA_FLAG_SRST)
2193 rc = ata_bus_softreset(ap, devmask);
2194 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2195 /* set up device control */
2196 if (ap->flags & ATA_FLAG_MMIO)
2197 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2198 else
2199 outb(ap->ctl, ioaddr->ctl_addr);
2200 rc = ata_bus_edd(ap);
2201 }
2202
2203 if (rc)
2204 goto err_out;
2205
2206 /*
2207 * determine by signature whether we have ATA or ATAPI devices
2208 */
b4dc7623 2209 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2210 if ((slave_possible) && (err != 0x81))
b4dc7623 2211 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2212
2213 /* re-enable interrupts */
2214 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2215 ata_irq_on(ap);
2216
2217 /* is double-select really necessary? */
2218 if (ap->device[1].class != ATA_DEV_NONE)
2219 ap->ops->dev_select(ap, 1);
2220 if (ap->device[0].class != ATA_DEV_NONE)
2221 ap->ops->dev_select(ap, 0);
2222
2223 /* if no devices were detected, disable this port */
2224 if ((ap->device[0].class == ATA_DEV_NONE) &&
2225 (ap->device[1].class == ATA_DEV_NONE))
2226 goto err_out;
2227
2228 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2229 /* set up device control for ATA_FLAG_SATA_RESET */
2230 if (ap->flags & ATA_FLAG_MMIO)
2231 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2232 else
2233 outb(ap->ctl, ioaddr->ctl_addr);
2234 }
2235
2236 DPRINTK("EXIT\n");
2237 return;
2238
2239err_out:
2240 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2241 ap->ops->port_disable(ap);
2242
2243 DPRINTK("EXIT\n");
2244}
2245
057ace5e
JG
2246static void ata_pr_blacklisted(const struct ata_port *ap,
2247 const struct ata_device *dev)
1da177e4
LT
2248{
2249 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
2250 ap->id, dev->devno);
2251}
2252
98ac62de 2253static const char * const ata_dma_blacklist [] = {
1da177e4
LT
2254 "WDC AC11000H",
2255 "WDC AC22100H",
2256 "WDC AC32500H",
2257 "WDC AC33100H",
2258 "WDC AC31600H",
2259 "WDC AC32100H",
2260 "WDC AC23200L",
2261 "Compaq CRD-8241B",
2262 "CRD-8400B",
2263 "CRD-8480B",
2264 "CRD-8482B",
2265 "CRD-84",
2266 "SanDisk SDP3B",
2267 "SanDisk SDP3B-64",
2268 "SANYO CD-ROM CRD",
2269 "HITACHI CDR-8",
2270 "HITACHI CDR-8335",
2271 "HITACHI CDR-8435",
2272 "Toshiba CD-ROM XM-6202B",
e922256a 2273 "TOSHIBA CD-ROM XM-1702BC",
1da177e4
LT
2274 "CD-532E-A",
2275 "E-IDE CD-ROM CR-840",
2276 "CD-ROM Drive/F5A",
2277 "WPI CDD-820",
2278 "SAMSUNG CD-ROM SC-148C",
2279 "SAMSUNG CD-ROM SC",
2280 "SanDisk SDP3B-64",
1da177e4
LT
2281 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
2282 "_NEC DV5800A",
2283};
2284
057ace5e 2285static int ata_dma_blacklisted(const struct ata_device *dev)
1da177e4
LT
2286{
2287 unsigned char model_num[40];
2288 char *s;
2289 unsigned int len;
2290 int i;
2291
2292 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2293 sizeof(model_num));
2294 s = &model_num[0];
2295 len = strnlen(s, sizeof(model_num));
2296
2297 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2298 while ((len > 0) && (s[len - 1] == ' ')) {
2299 len--;
2300 s[len] = 0;
2301 }
2302
2303 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
2304 if (!strncmp(ata_dma_blacklist[i], s, len))
2305 return 1;
2306
2307 return 0;
2308}
2309
057ace5e 2310static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
1da177e4 2311{
057ace5e 2312 const struct ata_device *master, *slave;
1da177e4
LT
2313 unsigned int mask;
2314
2315 master = &ap->device[0];
2316 slave = &ap->device[1];
2317
2318 assert (ata_dev_present(master) || ata_dev_present(slave));
2319
2320 if (shift == ATA_SHIFT_UDMA) {
2321 mask = ap->udma_mask;
2322 if (ata_dev_present(master)) {
2323 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
057ace5e 2324 if (ata_dma_blacklisted(master)) {
1da177e4
LT
2325 mask = 0;
2326 ata_pr_blacklisted(ap, master);
2327 }
2328 }
2329 if (ata_dev_present(slave)) {
2330 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
057ace5e 2331 if (ata_dma_blacklisted(slave)) {
1da177e4
LT
2332 mask = 0;
2333 ata_pr_blacklisted(ap, slave);
2334 }
2335 }
2336 }
2337 else if (shift == ATA_SHIFT_MWDMA) {
2338 mask = ap->mwdma_mask;
2339 if (ata_dev_present(master)) {
2340 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
057ace5e 2341 if (ata_dma_blacklisted(master)) {
1da177e4
LT
2342 mask = 0;
2343 ata_pr_blacklisted(ap, master);
2344 }
2345 }
2346 if (ata_dev_present(slave)) {
2347 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
057ace5e 2348 if (ata_dma_blacklisted(slave)) {
1da177e4
LT
2349 mask = 0;
2350 ata_pr_blacklisted(ap, slave);
2351 }
2352 }
2353 }
2354 else if (shift == ATA_SHIFT_PIO) {
2355 mask = ap->pio_mask;
2356 if (ata_dev_present(master)) {
2357 /* spec doesn't return explicit support for
2358 * PIO0-2, so we fake it
2359 */
2360 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
2361 tmp_mode <<= 3;
2362 tmp_mode |= 0x7;
2363 mask &= tmp_mode;
2364 }
2365 if (ata_dev_present(slave)) {
2366 /* spec doesn't return explicit support for
2367 * PIO0-2, so we fake it
2368 */
2369 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
2370 tmp_mode <<= 3;
2371 tmp_mode |= 0x7;
2372 mask &= tmp_mode;
2373 }
2374 }
2375 else {
2376 mask = 0xffffffff; /* shut up compiler warning */
2377 BUG();
2378 }
2379
2380 return mask;
2381}
2382
2383/* find greatest bit */
2384static int fgb(u32 bitmap)
2385{
2386 unsigned int i;
2387 int x = -1;
2388
2389 for (i = 0; i < 32; i++)
2390 if (bitmap & (1 << i))
2391 x = i;
2392
2393 return x;
2394}
2395
2396/**
2397 * ata_choose_xfer_mode - attempt to find best transfer mode
2398 * @ap: Port for which an xfer mode will be selected
2399 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
2400 * @xfer_shift_out: (output) bit shift that selects this mode
2401 *
0cba632b
JG
2402 * Based on host and device capabilities, determine the
2403 * maximum transfer mode that is amenable to all.
2404 *
1da177e4 2405 * LOCKING:
0cba632b 2406 * PCI/etc. bus probe sem.
1da177e4
LT
2407 *
2408 * RETURNS:
2409 * Zero on success, negative on error.
2410 */
2411
057ace5e 2412static int ata_choose_xfer_mode(const struct ata_port *ap,
1da177e4
LT
2413 u8 *xfer_mode_out,
2414 unsigned int *xfer_shift_out)
2415{
2416 unsigned int mask, shift;
2417 int x, i;
2418
2419 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
2420 shift = xfer_mode_classes[i].shift;
2421 mask = ata_get_mode_mask(ap, shift);
2422
2423 x = fgb(mask);
2424 if (x >= 0) {
2425 *xfer_mode_out = xfer_mode_classes[i].base + x;
2426 *xfer_shift_out = shift;
2427 return 0;
2428 }
2429 }
2430
2431 return -1;
2432}
2433
2434/**
2435 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2436 * @ap: Port associated with device @dev
2437 * @dev: Device to which command will be sent
2438 *
780a87f7
JG
2439 * Issue SET FEATURES - XFER MODE command to device @dev
2440 * on port @ap.
2441 *
1da177e4 2442 * LOCKING:
0cba632b 2443 * PCI/etc. bus probe sem.
1da177e4
LT
2444 */
2445
2446static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2447{
a0123703 2448 struct ata_taskfile tf;
1da177e4
LT
2449
2450 /* set up set-features taskfile */
2451 DPRINTK("set features - xfer mode\n");
2452
a0123703
TH
2453 ata_tf_init(ap, &tf, dev->devno);
2454 tf.command = ATA_CMD_SET_FEATURES;
2455 tf.feature = SETFEATURES_XFER;
2456 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2457 tf.protocol = ATA_PROT_NODATA;
2458 tf.nsect = dev->xfer_mode;
1da177e4 2459
a0123703
TH
2460 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2461 printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
2462 ap->id);
1da177e4 2463 ata_port_disable(ap);
a0123703 2464 }
1da177e4
LT
2465
2466 DPRINTK("EXIT\n");
2467}
2468
59a10b17
AL
2469/**
2470 * ata_dev_reread_id - Reread the device identify device info
2471 * @ap: port where the device is
2472 * @dev: device to reread the identify device info
2473 *
2474 * LOCKING:
2475 */
2476
2477static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
2478{
a0123703 2479 struct ata_taskfile tf;
59a10b17 2480
a0123703 2481 ata_tf_init(ap, &tf, dev->devno);
59a10b17
AL
2482
2483 if (dev->class == ATA_DEV_ATA) {
a0123703 2484 tf.command = ATA_CMD_ID_ATA;
59a10b17
AL
2485 DPRINTK("do ATA identify\n");
2486 } else {
a0123703 2487 tf.command = ATA_CMD_ID_ATAPI;
59a10b17
AL
2488 DPRINTK("do ATAPI identify\n");
2489 }
2490
a0123703
TH
2491 tf.flags |= ATA_TFLAG_DEVICE;
2492 tf.protocol = ATA_PROT_PIO;
59a10b17 2493
a0123703
TH
2494 if (ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
2495 dev->id, sizeof(dev->id)))
59a10b17
AL
2496 goto err_out;
2497
59a10b17
AL
2498 swap_buf_le16(dev->id, ATA_ID_WORDS);
2499
2500 ata_dump_id(dev);
2501
2502 DPRINTK("EXIT\n");
2503
2504 return;
2505err_out:
a0123703 2506 printk(KERN_ERR "ata%u: failed to reread ID, disabled\n", ap->id);
59a10b17
AL
2507 ata_port_disable(ap);
2508}
2509
8bf62ece
AL
2510/**
2511 * ata_dev_init_params - Issue INIT DEV PARAMS command
2512 * @ap: Port associated with device @dev
2513 * @dev: Device to which command will be sent
2514 *
2515 * LOCKING:
2516 */
2517
2518static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2519{
a0123703 2520 struct ata_taskfile tf;
8bf62ece
AL
2521 u16 sectors = dev->id[6];
2522 u16 heads = dev->id[3];
2523
2524 /* Number of sectors per track 1-255. Number of heads 1-16 */
2525 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2526 return;
2527
2528 /* set up init dev params taskfile */
2529 DPRINTK("init dev params \n");
2530
a0123703
TH
2531 ata_tf_init(ap, &tf, dev->devno);
2532 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2533 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2534 tf.protocol = ATA_PROT_NODATA;
2535 tf.nsect = sectors;
2536 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 2537
a0123703
TH
2538 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2539 printk(KERN_ERR "ata%u: failed to init parameters, disabled\n",
2540 ap->id);
8bf62ece 2541 ata_port_disable(ap);
a0123703 2542 }
8bf62ece
AL
2543
2544 DPRINTK("EXIT\n");
2545}
2546
1da177e4 2547/**
0cba632b
JG
2548 * ata_sg_clean - Unmap DMA memory associated with command
2549 * @qc: Command containing DMA memory to be released
2550 *
2551 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
2552 *
2553 * LOCKING:
0cba632b 2554 * spin_lock_irqsave(host_set lock)
1da177e4
LT
2555 */
2556
2557static void ata_sg_clean(struct ata_queued_cmd *qc)
2558{
2559 struct ata_port *ap = qc->ap;
cedc9a47 2560 struct scatterlist *sg = qc->__sg;
1da177e4 2561 int dir = qc->dma_dir;
cedc9a47 2562 void *pad_buf = NULL;
1da177e4
LT
2563
2564 assert(qc->flags & ATA_QCFLAG_DMAMAP);
2565 assert(sg != NULL);
2566
2567 if (qc->flags & ATA_QCFLAG_SINGLE)
2568 assert(qc->n_elem == 1);
2569
2c13b7ce 2570 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 2571
cedc9a47
JG
2572 /* if we padded the buffer out to 32-bit bound, and data
2573 * xfer direction is from-device, we must copy from the
2574 * pad buffer back into the supplied buffer
2575 */
2576 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2577 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2578
2579 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d
JG
2580 if (qc->n_elem)
2581 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
cedc9a47
JG
2582 /* restore last sg */
2583 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2584 if (pad_buf) {
2585 struct scatterlist *psg = &qc->pad_sgent;
2586 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2587 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 2588 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
2589 }
2590 } else {
e1410f2d
JG
2591 if (sg_dma_len(&sg[0]) > 0)
2592 dma_unmap_single(ap->host_set->dev,
2593 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2594 dir);
cedc9a47
JG
2595 /* restore sg */
2596 sg->length += qc->pad_len;
2597 if (pad_buf)
2598 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2599 pad_buf, qc->pad_len);
2600 }
1da177e4
LT
2601
2602 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 2603 qc->__sg = NULL;
1da177e4
LT
2604}
2605
2606/**
2607 * ata_fill_sg - Fill PCI IDE PRD table
2608 * @qc: Metadata associated with taskfile to be transferred
2609 *
780a87f7
JG
2610 * Fill PCI IDE PRD (scatter-gather) table with segments
2611 * associated with the current disk command.
2612 *
1da177e4 2613 * LOCKING:
780a87f7 2614 * spin_lock_irqsave(host_set lock)
1da177e4
LT
2615 *
2616 */
2617static void ata_fill_sg(struct ata_queued_cmd *qc)
2618{
1da177e4 2619 struct ata_port *ap = qc->ap;
cedc9a47
JG
2620 struct scatterlist *sg;
2621 unsigned int idx;
1da177e4 2622
cedc9a47 2623 assert(qc->__sg != NULL);
1da177e4
LT
2624 assert(qc->n_elem > 0);
2625
2626 idx = 0;
cedc9a47 2627 ata_for_each_sg(sg, qc) {
1da177e4
LT
2628 u32 addr, offset;
2629 u32 sg_len, len;
2630
2631 /* determine if physical DMA addr spans 64K boundary.
2632 * Note h/w doesn't support 64-bit, so we unconditionally
2633 * truncate dma_addr_t to u32.
2634 */
2635 addr = (u32) sg_dma_address(sg);
2636 sg_len = sg_dma_len(sg);
2637
2638 while (sg_len) {
2639 offset = addr & 0xffff;
2640 len = sg_len;
2641 if ((offset + sg_len) > 0x10000)
2642 len = 0x10000 - offset;
2643
2644 ap->prd[idx].addr = cpu_to_le32(addr);
2645 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2646 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2647
2648 idx++;
2649 sg_len -= len;
2650 addr += len;
2651 }
2652 }
2653
2654 if (idx)
2655 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2656}
2657/**
2658 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2659 * @qc: Metadata associated with taskfile to check
2660 *
780a87f7
JG
2661 * Allow low-level driver to filter ATA PACKET commands, returning
2662 * a status indicating whether or not it is OK to use DMA for the
2663 * supplied PACKET command.
2664 *
1da177e4 2665 * LOCKING:
0cba632b
JG
2666 * spin_lock_irqsave(host_set lock)
2667 *
1da177e4
LT
2668 * RETURNS: 0 when ATAPI DMA can be used
2669 * nonzero otherwise
2670 */
2671int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2672{
2673 struct ata_port *ap = qc->ap;
2674 int rc = 0; /* Assume ATAPI DMA is OK by default */
2675
2676 if (ap->ops->check_atapi_dma)
2677 rc = ap->ops->check_atapi_dma(qc);
2678
2679 return rc;
2680}
2681/**
2682 * ata_qc_prep - Prepare taskfile for submission
2683 * @qc: Metadata associated with taskfile to be prepared
2684 *
780a87f7
JG
2685 * Prepare ATA taskfile for submission.
2686 *
1da177e4
LT
2687 * LOCKING:
2688 * spin_lock_irqsave(host_set lock)
2689 */
2690void ata_qc_prep(struct ata_queued_cmd *qc)
2691{
2692 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2693 return;
2694
2695 ata_fill_sg(qc);
2696}
2697
0cba632b
JG
2698/**
2699 * ata_sg_init_one - Associate command with memory buffer
2700 * @qc: Command to be associated
2701 * @buf: Memory buffer
2702 * @buflen: Length of memory buffer, in bytes.
2703 *
2704 * Initialize the data-related elements of queued_cmd @qc
2705 * to point to a single memory buffer, @buf of byte length @buflen.
2706 *
2707 * LOCKING:
2708 * spin_lock_irqsave(host_set lock)
2709 */
2710
1da177e4
LT
2711void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2712{
2713 struct scatterlist *sg;
2714
2715 qc->flags |= ATA_QCFLAG_SINGLE;
2716
2717 memset(&qc->sgent, 0, sizeof(qc->sgent));
cedc9a47 2718 qc->__sg = &qc->sgent;
1da177e4 2719 qc->n_elem = 1;
cedc9a47 2720 qc->orig_n_elem = 1;
1da177e4
LT
2721 qc->buf_virt = buf;
2722
cedc9a47 2723 sg = qc->__sg;
f0612bbc 2724 sg_init_one(sg, buf, buflen);
1da177e4
LT
2725}
2726
0cba632b
JG
2727/**
2728 * ata_sg_init - Associate command with scatter-gather table.
2729 * @qc: Command to be associated
2730 * @sg: Scatter-gather table.
2731 * @n_elem: Number of elements in s/g table.
2732 *
2733 * Initialize the data-related elements of queued_cmd @qc
2734 * to point to a scatter-gather table @sg, containing @n_elem
2735 * elements.
2736 *
2737 * LOCKING:
2738 * spin_lock_irqsave(host_set lock)
2739 */
2740
1da177e4
LT
2741void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2742 unsigned int n_elem)
2743{
2744 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 2745 qc->__sg = sg;
1da177e4 2746 qc->n_elem = n_elem;
cedc9a47 2747 qc->orig_n_elem = n_elem;
1da177e4
LT
2748}
2749
2750/**
0cba632b
JG
2751 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2752 * @qc: Command with memory buffer to be mapped.
2753 *
2754 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
2755 *
2756 * LOCKING:
2757 * spin_lock_irqsave(host_set lock)
2758 *
2759 * RETURNS:
0cba632b 2760 * Zero on success, negative on error.
1da177e4
LT
2761 */
2762
2763static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2764{
2765 struct ata_port *ap = qc->ap;
2766 int dir = qc->dma_dir;
cedc9a47 2767 struct scatterlist *sg = qc->__sg;
1da177e4
LT
2768 dma_addr_t dma_address;
2769
cedc9a47
JG
2770 /* we must lengthen transfers to end on a 32-bit boundary */
2771 qc->pad_len = sg->length & 3;
2772 if (qc->pad_len) {
2773 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2774 struct scatterlist *psg = &qc->pad_sgent;
2775
2776 assert(qc->dev->class == ATA_DEV_ATAPI);
2777
2778 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2779
2780 if (qc->tf.flags & ATA_TFLAG_WRITE)
2781 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
2782 qc->pad_len);
2783
2784 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2785 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2786 /* trim sg */
2787 sg->length -= qc->pad_len;
2788
2789 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
2790 sg->length, qc->pad_len);
2791 }
2792
e1410f2d
JG
2793 if (!sg->length) {
2794 sg_dma_address(sg) = 0;
2795 goto skip_map;
2796 }
2797
1da177e4 2798 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
32529e01 2799 sg->length, dir);
537a95d9
TH
2800 if (dma_mapping_error(dma_address)) {
2801 /* restore sg */
2802 sg->length += qc->pad_len;
1da177e4 2803 return -1;
537a95d9 2804 }
1da177e4
LT
2805
2806 sg_dma_address(sg) = dma_address;
e1410f2d 2807skip_map:
32529e01 2808 sg_dma_len(sg) = sg->length;
1da177e4
LT
2809
2810 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2811 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2812
2813 return 0;
2814}
2815
2816/**
0cba632b
JG
2817 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
2818 * @qc: Command with scatter-gather table to be mapped.
2819 *
2820 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
2821 *
2822 * LOCKING:
2823 * spin_lock_irqsave(host_set lock)
2824 *
2825 * RETURNS:
0cba632b 2826 * Zero on success, negative on error.
1da177e4
LT
2827 *
2828 */
2829
2830static int ata_sg_setup(struct ata_queued_cmd *qc)
2831{
2832 struct ata_port *ap = qc->ap;
cedc9a47
JG
2833 struct scatterlist *sg = qc->__sg;
2834 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 2835 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4
LT
2836
2837 VPRINTK("ENTER, ata%u\n", ap->id);
2838 assert(qc->flags & ATA_QCFLAG_SG);
2839
cedc9a47
JG
2840 /* we must lengthen transfers to end on a 32-bit boundary */
2841 qc->pad_len = lsg->length & 3;
2842 if (qc->pad_len) {
2843 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2844 struct scatterlist *psg = &qc->pad_sgent;
2845 unsigned int offset;
2846
2847 assert(qc->dev->class == ATA_DEV_ATAPI);
2848
2849 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2850
2851 /*
2852 * psg->page/offset are used to copy to-be-written
2853 * data in this function or read data in ata_sg_clean.
2854 */
2855 offset = lsg->offset + lsg->length - qc->pad_len;
2856 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
2857 psg->offset = offset_in_page(offset);
2858
2859 if (qc->tf.flags & ATA_TFLAG_WRITE) {
2860 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2861 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 2862 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
2863 }
2864
2865 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2866 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2867 /* trim last sg */
2868 lsg->length -= qc->pad_len;
e1410f2d
JG
2869 if (lsg->length == 0)
2870 trim_sg = 1;
cedc9a47
JG
2871
2872 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
2873 qc->n_elem - 1, lsg->length, qc->pad_len);
2874 }
2875
e1410f2d
JG
2876 pre_n_elem = qc->n_elem;
2877 if (trim_sg && pre_n_elem)
2878 pre_n_elem--;
2879
2880 if (!pre_n_elem) {
2881 n_elem = 0;
2882 goto skip_map;
2883 }
2884
1da177e4 2885 dir = qc->dma_dir;
e1410f2d 2886 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
537a95d9
TH
2887 if (n_elem < 1) {
2888 /* restore last sg */
2889 lsg->length += qc->pad_len;
1da177e4 2890 return -1;
537a95d9 2891 }
1da177e4
LT
2892
2893 DPRINTK("%d sg elements mapped\n", n_elem);
2894
e1410f2d 2895skip_map:
1da177e4
LT
2896 qc->n_elem = n_elem;
2897
2898 return 0;
2899}
2900
40e8c82c
TH
2901/**
2902 * ata_poll_qc_complete - turn irq back on and finish qc
2903 * @qc: Command to complete
8e8b77dd 2904 * @err_mask: ATA status register content
40e8c82c
TH
2905 *
2906 * LOCKING:
2907 * None. (grabs host lock)
2908 */
2909
a22e2eb0 2910void ata_poll_qc_complete(struct ata_queued_cmd *qc)
40e8c82c
TH
2911{
2912 struct ata_port *ap = qc->ap;
b8f6153e 2913 unsigned long flags;
40e8c82c 2914
b8f6153e 2915 spin_lock_irqsave(&ap->host_set->lock, flags);
40e8c82c 2916 ata_irq_on(ap);
a22e2eb0 2917 ata_qc_complete(qc);
b8f6153e 2918 spin_unlock_irqrestore(&ap->host_set->lock, flags);
40e8c82c
TH
2919}
2920
1da177e4
LT
2921/**
2922 * ata_pio_poll -
6f0ef4fa 2923 * @ap: the target ata_port
1da177e4
LT
2924 *
2925 * LOCKING:
0cba632b 2926 * None. (executing in kernel thread context)
1da177e4
LT
2927 *
2928 * RETURNS:
6f0ef4fa 2929 * timeout value to use
1da177e4
LT
2930 */
2931
2932static unsigned long ata_pio_poll(struct ata_port *ap)
2933{
c14b8331 2934 struct ata_queued_cmd *qc;
1da177e4 2935 u8 status;
14be71f4
AL
2936 unsigned int poll_state = HSM_ST_UNKNOWN;
2937 unsigned int reg_state = HSM_ST_UNKNOWN;
14be71f4 2938
c14b8331
AL
2939 qc = ata_qc_from_tag(ap, ap->active_tag);
2940 assert(qc != NULL);
2941
14be71f4
AL
2942 switch (ap->hsm_task_state) {
2943 case HSM_ST:
2944 case HSM_ST_POLL:
2945 poll_state = HSM_ST_POLL;
2946 reg_state = HSM_ST;
1da177e4 2947 break;
14be71f4
AL
2948 case HSM_ST_LAST:
2949 case HSM_ST_LAST_POLL:
2950 poll_state = HSM_ST_LAST_POLL;
2951 reg_state = HSM_ST_LAST;
1da177e4
LT
2952 break;
2953 default:
2954 BUG();
2955 break;
2956 }
2957
2958 status = ata_chk_status(ap);
2959 if (status & ATA_BUSY) {
2960 if (time_after(jiffies, ap->pio_task_timeout)) {
11a56d24 2961 qc->err_mask |= AC_ERR_TIMEOUT;
7c398335 2962 ap->hsm_task_state = HSM_ST_TMOUT;
1da177e4
LT
2963 return 0;
2964 }
14be71f4 2965 ap->hsm_task_state = poll_state;
1da177e4
LT
2966 return ATA_SHORT_PAUSE;
2967 }
2968
14be71f4 2969 ap->hsm_task_state = reg_state;
1da177e4
LT
2970 return 0;
2971}
2972
2973/**
6f0ef4fa
RD
2974 * ata_pio_complete - check if drive is busy or idle
2975 * @ap: the target ata_port
1da177e4
LT
2976 *
2977 * LOCKING:
0cba632b 2978 * None. (executing in kernel thread context)
7fb6ec28
JG
2979 *
2980 * RETURNS:
fbcdd80b
AL
2981 * Zero if qc completed.
2982 * Non-zero if has next.
1da177e4
LT
2983 */
2984
7fb6ec28 2985static int ata_pio_complete (struct ata_port *ap)
1da177e4
LT
2986{
2987 struct ata_queued_cmd *qc;
2988 u8 drv_stat;
2989
2990 /*
31433ea3
AC
2991 * This is purely heuristic. This is a fast path. Sometimes when
2992 * we enter, BSY will be cleared in a chk-status or two. If not,
2993 * the drive is probably seeking or something. Snooze for a couple
2994 * msecs, then chk-status again. If still busy, fall back to
07f6f7d0 2995 * HSM_ST_LAST_POLL state.
1da177e4 2996 */
fe79e683
AL
2997 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
2998 if (drv_stat & ATA_BUSY) {
1da177e4 2999 msleep(2);
fe79e683
AL
3000 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3001 if (drv_stat & ATA_BUSY) {
14be71f4 3002 ap->hsm_task_state = HSM_ST_LAST_POLL;
1da177e4 3003 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
fbcdd80b 3004 return 1;
1da177e4
LT
3005 }
3006 }
3007
c14b8331
AL
3008 qc = ata_qc_from_tag(ap, ap->active_tag);
3009 assert(qc != NULL);
3010
1da177e4
LT
3011 drv_stat = ata_wait_idle(ap);
3012 if (!ata_ok(drv_stat)) {
1c848984 3013 qc->err_mask |= __ac_err_mask(drv_stat);
14be71f4 3014 ap->hsm_task_state = HSM_ST_ERR;
fbcdd80b 3015 return 1;
1da177e4
LT
3016 }
3017
14be71f4 3018 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 3019
a22e2eb0
AL
3020 assert(qc->err_mask == 0);
3021 ata_poll_qc_complete(qc);
7fb6ec28
JG
3022
3023 /* another command may start at this point */
3024
fbcdd80b 3025 return 0;
1da177e4
LT
3026}
3027
0baab86b
EF
3028
3029/**
6f0ef4fa 3030 * swap_buf_le16 - swap halves of 16-words in place
0baab86b
EF
3031 * @buf: Buffer to swap
3032 * @buf_words: Number of 16-bit words in buffer.
3033 *
3034 * Swap halves of 16-bit words if needed to convert from
3035 * little-endian byte order to native cpu byte order, or
3036 * vice-versa.
3037 *
3038 * LOCKING:
6f0ef4fa 3039 * Inherited from caller.
0baab86b 3040 */
1da177e4
LT
3041void swap_buf_le16(u16 *buf, unsigned int buf_words)
3042{
3043#ifdef __BIG_ENDIAN
3044 unsigned int i;
3045
3046 for (i = 0; i < buf_words; i++)
3047 buf[i] = le16_to_cpu(buf[i]);
3048#endif /* __BIG_ENDIAN */
3049}
3050
6ae4cfb5
AL
3051/**
3052 * ata_mmio_data_xfer - Transfer data by MMIO
3053 * @ap: port to read/write
3054 * @buf: data buffer
3055 * @buflen: buffer length
344babaa 3056 * @write_data: read/write
6ae4cfb5
AL
3057 *
3058 * Transfer data from/to the device data register by MMIO.
3059 *
3060 * LOCKING:
3061 * Inherited from caller.
6ae4cfb5
AL
3062 */
3063
1da177e4
LT
3064static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3065 unsigned int buflen, int write_data)
3066{
3067 unsigned int i;
3068 unsigned int words = buflen >> 1;
3069 u16 *buf16 = (u16 *) buf;
3070 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3071
6ae4cfb5 3072 /* Transfer multiple of 2 bytes */
1da177e4
LT
3073 if (write_data) {
3074 for (i = 0; i < words; i++)
3075 writew(le16_to_cpu(buf16[i]), mmio);
3076 } else {
3077 for (i = 0; i < words; i++)
3078 buf16[i] = cpu_to_le16(readw(mmio));
3079 }
6ae4cfb5
AL
3080
3081 /* Transfer trailing 1 byte, if any. */
3082 if (unlikely(buflen & 0x01)) {
3083 u16 align_buf[1] = { 0 };
3084 unsigned char *trailing_buf = buf + buflen - 1;
3085
3086 if (write_data) {
3087 memcpy(align_buf, trailing_buf, 1);
3088 writew(le16_to_cpu(align_buf[0]), mmio);
3089 } else {
3090 align_buf[0] = cpu_to_le16(readw(mmio));
3091 memcpy(trailing_buf, align_buf, 1);
3092 }
3093 }
1da177e4
LT
3094}
3095
6ae4cfb5
AL
3096/**
3097 * ata_pio_data_xfer - Transfer data by PIO
3098 * @ap: port to read/write
3099 * @buf: data buffer
3100 * @buflen: buffer length
344babaa 3101 * @write_data: read/write
6ae4cfb5
AL
3102 *
3103 * Transfer data from/to the device data register by PIO.
3104 *
3105 * LOCKING:
3106 * Inherited from caller.
6ae4cfb5
AL
3107 */
3108
1da177e4
LT
3109static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3110 unsigned int buflen, int write_data)
3111{
6ae4cfb5 3112 unsigned int words = buflen >> 1;
1da177e4 3113
6ae4cfb5 3114 /* Transfer multiple of 2 bytes */
1da177e4 3115 if (write_data)
6ae4cfb5 3116 outsw(ap->ioaddr.data_addr, buf, words);
1da177e4 3117 else
6ae4cfb5
AL
3118 insw(ap->ioaddr.data_addr, buf, words);
3119
3120 /* Transfer trailing 1 byte, if any. */
3121 if (unlikely(buflen & 0x01)) {
3122 u16 align_buf[1] = { 0 };
3123 unsigned char *trailing_buf = buf + buflen - 1;
3124
3125 if (write_data) {
3126 memcpy(align_buf, trailing_buf, 1);
3127 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3128 } else {
3129 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3130 memcpy(trailing_buf, align_buf, 1);
3131 }
3132 }
1da177e4
LT
3133}
3134
6ae4cfb5
AL
3135/**
3136 * ata_data_xfer - Transfer data from/to the data register.
3137 * @ap: port to read/write
3138 * @buf: data buffer
3139 * @buflen: buffer length
3140 * @do_write: read/write
3141 *
3142 * Transfer data from/to the device data register.
3143 *
3144 * LOCKING:
3145 * Inherited from caller.
6ae4cfb5
AL
3146 */
3147
1da177e4
LT
3148static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3149 unsigned int buflen, int do_write)
3150{
a1bd9e68
AC
3151 /* Make the crap hardware pay the costs not the good stuff */
3152 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3153 unsigned long flags;
3154 local_irq_save(flags);
3155 if (ap->flags & ATA_FLAG_MMIO)
3156 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3157 else
3158 ata_pio_data_xfer(ap, buf, buflen, do_write);
3159 local_irq_restore(flags);
3160 } else {
3161 if (ap->flags & ATA_FLAG_MMIO)
3162 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3163 else
3164 ata_pio_data_xfer(ap, buf, buflen, do_write);
3165 }
1da177e4
LT
3166}
3167
6ae4cfb5
AL
3168/**
3169 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3170 * @qc: Command on going
3171 *
3172 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3173 *
3174 * LOCKING:
3175 * Inherited from caller.
3176 */
3177
1da177e4
LT
3178static void ata_pio_sector(struct ata_queued_cmd *qc)
3179{
3180 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3181 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3182 struct ata_port *ap = qc->ap;
3183 struct page *page;
3184 unsigned int offset;
3185 unsigned char *buf;
3186
3187 if (qc->cursect == (qc->nsect - 1))
14be71f4 3188 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3189
3190 page = sg[qc->cursg].page;
3191 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3192
3193 /* get the current page and offset */
3194 page = nth_page(page, (offset >> PAGE_SHIFT));
3195 offset %= PAGE_SIZE;
3196
1da177e4
LT
3197 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3198
91b8b313
AL
3199 if (PageHighMem(page)) {
3200 unsigned long flags;
3201
3202 local_irq_save(flags);
3203 buf = kmap_atomic(page, KM_IRQ0);
083958d3 3204
91b8b313
AL
3205 /* do the actual data transfer */
3206 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 3207
91b8b313
AL
3208 kunmap_atomic(buf, KM_IRQ0);
3209 local_irq_restore(flags);
3210 } else {
3211 buf = page_address(page);
3212 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3213 }
7282aa4b
AL
3214
3215 qc->cursect++;
3216 qc->cursg_ofs++;
3217
3218 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3219 qc->cursg++;
3220 qc->cursg_ofs = 0;
3221 }
1da177e4
LT
3222}
3223
07f6f7d0
AL
3224/**
3225 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3226 * @qc: Command on going
3227 *
3228 * Transfer one or many ATA_SECT_SIZE of data from/to the
3229 * ATA device for the DRQ request.
3230 *
3231 * LOCKING:
3232 * Inherited from caller.
3233 */
3234
3235static void ata_pio_sectors(struct ata_queued_cmd *qc)
3236{
3237 if (is_multi_taskfile(&qc->tf)) {
3238 /* READ/WRITE MULTIPLE */
3239 unsigned int nsect;
3240
3241 assert(qc->dev->multi_count);
3242
3243 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3244 while (nsect--)
3245 ata_pio_sector(qc);
3246 } else
3247 ata_pio_sector(qc);
3248}
3249
c71c1857
AL
3250/**
3251 * atapi_send_cdb - Write CDB bytes to hardware
3252 * @ap: Port to which ATAPI device is attached.
3253 * @qc: Taskfile currently active
3254 *
3255 * When device has indicated its readiness to accept
3256 * a CDB, this function is called. Send the CDB.
3257 *
3258 * LOCKING:
3259 * caller.
3260 */
3261
3262static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3263{
3264 /* send SCSI cdb */
3265 DPRINTK("send cdb\n");
3266 assert(ap->cdb_len >= 12);
3267
3268 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3269 ata_altstatus(ap); /* flush */
3270
3271 switch (qc->tf.protocol) {
3272 case ATA_PROT_ATAPI:
3273 ap->hsm_task_state = HSM_ST;
3274 break;
3275 case ATA_PROT_ATAPI_NODATA:
3276 ap->hsm_task_state = HSM_ST_LAST;
3277 break;
3278 case ATA_PROT_ATAPI_DMA:
3279 ap->hsm_task_state = HSM_ST_LAST;
3280 /* initiate bmdma */
3281 ap->ops->bmdma_start(qc);
3282 break;
3283 }
3284}
3285
3286/**
e27486db
AL
3287 * ata_pio_first_block - Write first data block to hardware
3288 * @ap: Port to which ATA/ATAPI device is attached.
c71c1857
AL
3289 *
3290 * When device has indicated its readiness to accept
3291 * the data, this function sends out the CDB or
3292 * the first data block by PIO.
3293 * After this,
3294 * - If polling, ata_pio_task() handles the rest.
3295 * - Otherwise, interrupt handler takes over.
3296 *
3297 * LOCKING:
3298 * Kernel thread context (may sleep)
fbcdd80b
AL
3299 *
3300 * RETURNS:
3301 * Zero if irq handler takes over
3302 * Non-zero if has next (polling).
c71c1857
AL
3303 */
3304
fbcdd80b 3305static int ata_pio_first_block(struct ata_port *ap)
c71c1857 3306{
c71c1857
AL
3307 struct ata_queued_cmd *qc;
3308 u8 status;
3309 unsigned long flags;
fbcdd80b 3310 int has_next;
c71c1857
AL
3311
3312 qc = ata_qc_from_tag(ap, ap->active_tag);
3313 assert(qc != NULL);
3314 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3315
fbcdd80b
AL
3316 /* if polling, we will stay in the work queue after sending the data.
3317 * otherwise, interrupt handler takes over after sending the data.
3318 */
3319 has_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3320
c71c1857
AL
3321 /* sleep-wait for BSY to clear */
3322 DPRINTK("busy wait\n");
fbcdd80b 3323 if (ata_busy_sleep(ap, ATA_TMOUT_DATAOUT_QUICK, ATA_TMOUT_DATAOUT)) {
a4f16610 3324 qc->err_mask |= AC_ERR_ATA_BUS;
fbcdd80b 3325 ap->hsm_task_state = HSM_ST_TMOUT;
c71c1857 3326 goto err_out;
fbcdd80b 3327 }
c71c1857
AL
3328
3329 /* make sure DRQ is set */
3330 status = ata_chk_status(ap);
fbcdd80b
AL
3331 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3332 /* device status error */
a4f16610 3333 qc->err_mask |= AC_ERR_ATA_BUS;
fbcdd80b 3334 ap->hsm_task_state = HSM_ST_ERR;
c71c1857 3335 goto err_out;
fbcdd80b 3336 }
c71c1857
AL
3337
3338 /* Send the CDB (atapi) or the first data block (ata pio out).
3339 * During the state transition, interrupt handler shouldn't
3340 * be invoked before the data transfer is complete and
3341 * hsm_task_state is changed. Hence, the following locking.
3342 */
3343 spin_lock_irqsave(&ap->host_set->lock, flags);
3344
3345 if (qc->tf.protocol == ATA_PROT_PIO) {
3346 /* PIO data out protocol.
3347 * send first data block.
3348 */
3349
07f6f7d0
AL
3350 /* ata_pio_sectors() might change the state to HSM_ST_LAST.
3351 * so, the state is changed here before ata_pio_sectors().
c71c1857
AL
3352 */
3353 ap->hsm_task_state = HSM_ST;
07f6f7d0 3354 ata_pio_sectors(qc);
c71c1857
AL
3355 ata_altstatus(ap); /* flush */
3356 } else
3357 /* send CDB */
3358 atapi_send_cdb(ap, qc);
3359
fbcdd80b
AL
3360 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3361
c71c1857
AL
3362 /* if polling, ata_pio_task() handles the rest.
3363 * otherwise, interrupt handler takes over from here.
3364 */
fbcdd80b 3365 return has_next;
c71c1857
AL
3366
3367err_out:
fbcdd80b 3368 return 1; /* has next */
c71c1857
AL
3369}
3370
6ae4cfb5
AL
3371/**
3372 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3373 * @qc: Command on going
3374 * @bytes: number of bytes
3375 *
3376 * Transfer Transfer data from/to the ATAPI device.
3377 *
3378 * LOCKING:
3379 * Inherited from caller.
3380 *
3381 */
3382
1da177e4
LT
3383static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3384{
3385 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3386 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3387 struct ata_port *ap = qc->ap;
3388 struct page *page;
3389 unsigned char *buf;
3390 unsigned int offset, count;
3391
563a6e1f 3392 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 3393 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3394
3395next_sg:
563a6e1f 3396 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 3397 /*
563a6e1f
AL
3398 * The end of qc->sg is reached and the device expects
3399 * more data to transfer. In order not to overrun qc->sg
3400 * and fulfill length specified in the byte count register,
3401 * - for read case, discard trailing data from the device
3402 * - for write case, padding zero data to the device
3403 */
3404 u16 pad_buf[1] = { 0 };
3405 unsigned int words = bytes >> 1;
3406 unsigned int i;
3407
3408 if (words) /* warning if bytes > 1 */
7fb6ec28 3409 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
563a6e1f
AL
3410 ap->id, bytes);
3411
3412 for (i = 0; i < words; i++)
3413 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3414
14be71f4 3415 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
3416 return;
3417 }
3418
cedc9a47 3419 sg = &qc->__sg[qc->cursg];
1da177e4 3420
1da177e4
LT
3421 page = sg->page;
3422 offset = sg->offset + qc->cursg_ofs;
3423
3424 /* get the current page and offset */
3425 page = nth_page(page, (offset >> PAGE_SHIFT));
3426 offset %= PAGE_SIZE;
3427
6952df03 3428 /* don't overrun current sg */
32529e01 3429 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
3430
3431 /* don't cross page boundaries */
3432 count = min(count, (unsigned int)PAGE_SIZE - offset);
3433
7282aa4b
AL
3434 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3435
91b8b313
AL
3436 if (PageHighMem(page)) {
3437 unsigned long flags;
3438
3439 local_irq_save(flags);
3440 buf = kmap_atomic(page, KM_IRQ0);
083958d3 3441
91b8b313
AL
3442 /* do the actual data transfer */
3443 ata_data_xfer(ap, buf + offset, count, do_write);
7282aa4b 3444
91b8b313
AL
3445 kunmap_atomic(buf, KM_IRQ0);
3446 local_irq_restore(flags);
3447 } else {
3448 buf = page_address(page);
3449 ata_data_xfer(ap, buf + offset, count, do_write);
3450 }
7282aa4b 3451
1da177e4
LT
3452 bytes -= count;
3453 qc->curbytes += count;
3454 qc->cursg_ofs += count;
3455
32529e01 3456 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
3457 qc->cursg++;
3458 qc->cursg_ofs = 0;
3459 }
3460
563a6e1f 3461 if (bytes)
1da177e4 3462 goto next_sg;
1da177e4
LT
3463}
3464
6ae4cfb5
AL
3465/**
3466 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3467 * @qc: Command on going
3468 *
3469 * Transfer Transfer data from/to the ATAPI device.
3470 *
3471 * LOCKING:
3472 * Inherited from caller.
6ae4cfb5
AL
3473 */
3474
1da177e4
LT
3475static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3476{
3477 struct ata_port *ap = qc->ap;
3478 struct ata_device *dev = qc->dev;
3479 unsigned int ireason, bc_lo, bc_hi, bytes;
3480 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3481
3482 ap->ops->tf_read(ap, &qc->tf);
3483 ireason = qc->tf.nsect;
3484 bc_lo = qc->tf.lbam;
3485 bc_hi = qc->tf.lbah;
3486 bytes = (bc_hi << 8) | bc_lo;
3487
3488 /* shall be cleared to zero, indicating xfer of data */
3489 if (ireason & (1 << 0))
3490 goto err_out;
3491
3492 /* make sure transfer direction matches expected */
3493 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3494 if (do_write != i_write)
3495 goto err_out;
3496
312f7da2
AL
3497 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3498
1da177e4
LT
3499 __atapi_pio_bytes(qc, bytes);
3500
3501 return;
3502
3503err_out:
3504 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3505 ap->id, dev->devno);
11a56d24 3506 qc->err_mask |= AC_ERR_HSM;
14be71f4 3507 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
3508}
3509
3510/**
6f0ef4fa
RD
3511 * ata_pio_block - start PIO on a block
3512 * @ap: the target ata_port
1da177e4
LT
3513 *
3514 * LOCKING:
0cba632b 3515 * None. (executing in kernel thread context)
1da177e4
LT
3516 */
3517
3518static void ata_pio_block(struct ata_port *ap)
3519{
3520 struct ata_queued_cmd *qc;
3521 u8 status;
3522
3523 /*
6f0ef4fa 3524 * This is purely heuristic. This is a fast path.
1da177e4
LT
3525 * Sometimes when we enter, BSY will be cleared in
3526 * a chk-status or two. If not, the drive is probably seeking
3527 * or something. Snooze for a couple msecs, then
3528 * chk-status again. If still busy, fall back to
14be71f4 3529 * HSM_ST_POLL state.
1da177e4
LT
3530 */
3531 status = ata_busy_wait(ap, ATA_BUSY, 5);
3532 if (status & ATA_BUSY) {
3533 msleep(2);
3534 status = ata_busy_wait(ap, ATA_BUSY, 10);
3535 if (status & ATA_BUSY) {
14be71f4 3536 ap->hsm_task_state = HSM_ST_POLL;
1da177e4
LT
3537 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3538 return;
3539 }
3540 }
3541
3542 qc = ata_qc_from_tag(ap, ap->active_tag);
3543 assert(qc != NULL);
3544
fe79e683
AL
3545 /* check error */
3546 if (status & (ATA_ERR | ATA_DF)) {
3547 qc->err_mask |= AC_ERR_DEV;
3548 ap->hsm_task_state = HSM_ST_ERR;
3549 return;
3550 }
3551
3552 /* transfer data if any */
1da177e4 3553 if (is_atapi_taskfile(&qc->tf)) {
fe79e683 3554 /* DRQ=0 means no more data to transfer */
1da177e4 3555 if ((status & ATA_DRQ) == 0) {
14be71f4 3556 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3557 return;
3558 }
3559
3560 atapi_pio_bytes(qc);
3561 } else {
3562 /* handle BSY=0, DRQ=0 as error */
3563 if ((status & ATA_DRQ) == 0) {
11a56d24 3564 qc->err_mask |= AC_ERR_HSM;
14be71f4 3565 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
3566 return;
3567 }
3568
07f6f7d0 3569 ata_pio_sectors(qc);
1da177e4 3570 }
467b16d4
AL
3571
3572 ata_altstatus(ap); /* flush */
1da177e4
LT
3573}
3574
3575static void ata_pio_error(struct ata_port *ap)
3576{
3577 struct ata_queued_cmd *qc;
a7dac447 3578
1da177e4
LT
3579 qc = ata_qc_from_tag(ap, ap->active_tag);
3580 assert(qc != NULL);
3581
000080c3
AL
3582 if (qc->tf.command != ATA_CMD_PACKET)
3583 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3584
1c848984
AL
3585 /* make sure qc->err_mask is available to
3586 * know what's wrong and recover
3587 */
3588 assert(qc->err_mask);
3589
14be71f4 3590 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 3591
a22e2eb0 3592 ata_poll_qc_complete(qc);
1da177e4
LT
3593}
3594
3595static void ata_pio_task(void *_data)
3596{
3597 struct ata_port *ap = _data;
7fb6ec28 3598 unsigned long timeout;
fbcdd80b 3599 int has_next;
7fb6ec28
JG
3600
3601fsm_start:
3602 timeout = 0;
fbcdd80b 3603 has_next = 1;
1da177e4 3604
14be71f4 3605 switch (ap->hsm_task_state) {
e27486db 3606 case HSM_ST_FIRST:
fbcdd80b
AL
3607 has_next = ata_pio_first_block(ap);
3608 break;
e27486db 3609
14be71f4 3610 case HSM_ST:
1da177e4
LT
3611 ata_pio_block(ap);
3612 break;
3613
14be71f4 3614 case HSM_ST_LAST:
fbcdd80b 3615 has_next = ata_pio_complete(ap);
1da177e4
LT
3616 break;
3617
14be71f4
AL
3618 case HSM_ST_POLL:
3619 case HSM_ST_LAST_POLL:
1da177e4
LT
3620 timeout = ata_pio_poll(ap);
3621 break;
3622
14be71f4
AL
3623 case HSM_ST_TMOUT:
3624 case HSM_ST_ERR:
1da177e4
LT
3625 ata_pio_error(ap);
3626 return;
467b16d4
AL
3627
3628 default:
3629 BUG();
3630 return;
1da177e4
LT
3631 }
3632
3633 if (timeout)
95064379 3634 ata_queue_delayed_pio_task(ap, timeout);
fbcdd80b 3635 else if (has_next)
7fb6ec28 3636 goto fsm_start;
1da177e4
LT
3637}
3638
1da177e4
LT
3639/**
3640 * ata_qc_timeout - Handle timeout of queued command
3641 * @qc: Command that timed out
3642 *
3643 * Some part of the kernel (currently, only the SCSI layer)
3644 * has noticed that the active command on port @ap has not
3645 * completed after a specified length of time. Handle this
3646 * condition by disabling DMA (if necessary) and completing
3647 * transactions, with error if necessary.
3648 *
3649 * This also handles the case of the "lost interrupt", where
3650 * for some reason (possibly hardware bug, possibly driver bug)
3651 * an interrupt was not delivered to the driver, even though the
3652 * transaction completed successfully.
3653 *
3654 * LOCKING:
0cba632b 3655 * Inherited from SCSI layer (none, can sleep)
1da177e4
LT
3656 */
3657
3658static void ata_qc_timeout(struct ata_queued_cmd *qc)
3659{
3660 struct ata_port *ap = qc->ap;
b8f6153e 3661 struct ata_host_set *host_set = ap->host_set;
1da177e4 3662 u8 host_stat = 0, drv_stat;
b8f6153e 3663 unsigned long flags;
1da177e4
LT
3664
3665 DPRINTK("ENTER\n");
3666
b8f6153e
JG
3667 spin_lock_irqsave(&host_set->lock, flags);
3668
1da177e4
LT
3669 switch (qc->tf.protocol) {
3670
3671 case ATA_PROT_DMA:
3672 case ATA_PROT_ATAPI_DMA:
3673 host_stat = ap->ops->bmdma_status(ap);
3674
3675 /* before we do anything else, clear DMA-Start bit */
b73fc89f 3676 ap->ops->bmdma_stop(qc);
1da177e4
LT
3677
3678 /* fall through */
3679
3680 default:
3681 ata_altstatus(ap);
3682 drv_stat = ata_chk_status(ap);
3683
3684 /* ack bmdma irq events */
3685 ap->ops->irq_clear(ap);
3686
3687 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3688 ap->id, qc->tf.command, drv_stat, host_stat);
3689
312f7da2
AL
3690 ap->hsm_task_state = HSM_ST_IDLE;
3691
1da177e4 3692 /* complete taskfile transaction */
a22e2eb0 3693 qc->err_mask |= ac_err_mask(drv_stat);
1da177e4
LT
3694 break;
3695 }
b8f6153e
JG
3696
3697 spin_unlock_irqrestore(&host_set->lock, flags);
3698
a72ec4ce
TH
3699 ata_eh_qc_complete(qc);
3700
1da177e4
LT
3701 DPRINTK("EXIT\n");
3702}
3703
3704/**
3705 * ata_eng_timeout - Handle timeout of queued command
3706 * @ap: Port on which timed-out command is active
3707 *
3708 * Some part of the kernel (currently, only the SCSI layer)
3709 * has noticed that the active command on port @ap has not
3710 * completed after a specified length of time. Handle this
3711 * condition by disabling DMA (if necessary) and completing
3712 * transactions, with error if necessary.
3713 *
3714 * This also handles the case of the "lost interrupt", where
3715 * for some reason (possibly hardware bug, possibly driver bug)
3716 * an interrupt was not delivered to the driver, even though the
3717 * transaction completed successfully.
3718 *
3719 * LOCKING:
3720 * Inherited from SCSI layer (none, can sleep)
3721 */
3722
3723void ata_eng_timeout(struct ata_port *ap)
3724{
3725 struct ata_queued_cmd *qc;
3726
3727 DPRINTK("ENTER\n");
3728
3729 qc = ata_qc_from_tag(ap, ap->active_tag);
e12669e7
JG
3730 if (qc)
3731 ata_qc_timeout(qc);
3732 else {
1da177e4
LT
3733 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3734 ap->id);
3735 goto out;
3736 }
3737
1da177e4
LT
3738out:
3739 DPRINTK("EXIT\n");
3740}
3741
3742/**
3743 * ata_qc_new - Request an available ATA command, for queueing
3744 * @ap: Port associated with device @dev
3745 * @dev: Device from whom we request an available command structure
3746 *
3747 * LOCKING:
0cba632b 3748 * None.
1da177e4
LT
3749 */
3750
3751static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3752{
3753 struct ata_queued_cmd *qc = NULL;
3754 unsigned int i;
3755
3756 for (i = 0; i < ATA_MAX_QUEUE; i++)
3757 if (!test_and_set_bit(i, &ap->qactive)) {
3758 qc = ata_qc_from_tag(ap, i);
3759 break;
3760 }
3761
3762 if (qc)
3763 qc->tag = i;
3764
3765 return qc;
3766}
3767
3768/**
3769 * ata_qc_new_init - Request an available ATA command, and initialize it
3770 * @ap: Port associated with device @dev
3771 * @dev: Device from whom we request an available command structure
3772 *
3773 * LOCKING:
0cba632b 3774 * None.
1da177e4
LT
3775 */
3776
3777struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3778 struct ata_device *dev)
3779{
3780 struct ata_queued_cmd *qc;
3781
3782 qc = ata_qc_new(ap);
3783 if (qc) {
1da177e4
LT
3784 qc->scsicmd = NULL;
3785 qc->ap = ap;
3786 qc->dev = dev;
1da177e4 3787
2c13b7ce 3788 ata_qc_reinit(qc);
1da177e4
LT
3789 }
3790
3791 return qc;
3792}
3793
1da177e4
LT
3794/**
3795 * ata_qc_free - free unused ata_queued_cmd
3796 * @qc: Command to complete
3797 *
3798 * Designed to free unused ata_queued_cmd object
3799 * in case something prevents using it.
3800 *
3801 * LOCKING:
0cba632b 3802 * spin_lock_irqsave(host_set lock)
1da177e4
LT
3803 */
3804void ata_qc_free(struct ata_queued_cmd *qc)
3805{
4ba946e9
TH
3806 struct ata_port *ap = qc->ap;
3807 unsigned int tag;
3808
1da177e4 3809 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 3810
4ba946e9
TH
3811 qc->flags = 0;
3812 tag = qc->tag;
3813 if (likely(ata_tag_valid(tag))) {
3814 if (tag == ap->active_tag)
3815 ap->active_tag = ATA_TAG_POISON;
3816 qc->tag = ATA_TAG_POISON;
3817 clear_bit(tag, &ap->qactive);
3818 }
1da177e4
LT
3819}
3820
3821/**
3822 * ata_qc_complete - Complete an active ATA command
3823 * @qc: Command to complete
8e8b77dd 3824 * @err_mask: ATA Status register contents
0cba632b
JG
3825 *
3826 * Indicate to the mid and upper layers that an ATA
3827 * command has completed, with either an ok or not-ok status.
1da177e4
LT
3828 *
3829 * LOCKING:
0cba632b 3830 * spin_lock_irqsave(host_set lock)
1da177e4
LT
3831 */
3832
a22e2eb0 3833void ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 3834{
1da177e4
LT
3835 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3836 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3837
3838 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3839 ata_sg_clean(qc);
3840
3f3791d3
AL
3841 /* atapi: mark qc as inactive to prevent the interrupt handler
3842 * from completing the command twice later, before the error handler
3843 * is called. (when rc != 0 and atapi request sense is needed)
3844 */
3845 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3846
1da177e4 3847 /* call completion callback */
77853bf2 3848 qc->complete_fn(qc);
1da177e4
LT
3849}
3850
3851static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3852{
3853 struct ata_port *ap = qc->ap;
3854
3855 switch (qc->tf.protocol) {
3856 case ATA_PROT_DMA:
3857 case ATA_PROT_ATAPI_DMA:
3858 return 1;
3859
3860 case ATA_PROT_ATAPI:
3861 case ATA_PROT_PIO:
3862 case ATA_PROT_PIO_MULT:
3863 if (ap->flags & ATA_FLAG_PIO_DMA)
3864 return 1;
3865
3866 /* fall through */
3867
3868 default:
3869 return 0;
3870 }
3871
3872 /* never reached */
3873}
3874
3875/**
3876 * ata_qc_issue - issue taskfile to device
3877 * @qc: command to issue to device
3878 *
3879 * Prepare an ATA command to submission to device.
3880 * This includes mapping the data into a DMA-able
3881 * area, filling in the S/G table, and finally
3882 * writing the taskfile to hardware, starting the command.
3883 *
3884 * LOCKING:
3885 * spin_lock_irqsave(host_set lock)
3886 *
3887 * RETURNS:
9a3d9eb0 3888 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
3889 */
3890
9a3d9eb0 3891unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
3892{
3893 struct ata_port *ap = qc->ap;
3894
3895 if (ata_should_dma_map(qc)) {
3896 if (qc->flags & ATA_QCFLAG_SG) {
3897 if (ata_sg_setup(qc))
8e436af9 3898 goto sg_err;
1da177e4
LT
3899 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3900 if (ata_sg_setup_one(qc))
8e436af9 3901 goto sg_err;
1da177e4
LT
3902 }
3903 } else {
3904 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3905 }
3906
3907 ap->ops->qc_prep(qc);
3908
3909 qc->ap->active_tag = qc->tag;
3910 qc->flags |= ATA_QCFLAG_ACTIVE;
3911
3912 return ap->ops->qc_issue(qc);
3913
8e436af9
TH
3914sg_err:
3915 qc->flags &= ~ATA_QCFLAG_DMAMAP;
9a3d9eb0 3916 return AC_ERR_SYSTEM;
1da177e4
LT
3917}
3918
0baab86b 3919
1da177e4
LT
3920/**
3921 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
3922 * @qc: command to issue to device
3923 *
3924 * Using various libata functions and hooks, this function
3925 * starts an ATA command. ATA commands are grouped into
3926 * classes called "protocols", and issuing each type of protocol
3927 * is slightly different.
3928 *
0baab86b
EF
3929 * May be used as the qc_issue() entry in ata_port_operations.
3930 *
1da177e4
LT
3931 * LOCKING:
3932 * spin_lock_irqsave(host_set lock)
3933 *
3934 * RETURNS:
9a3d9eb0 3935 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
3936 */
3937
9a3d9eb0 3938unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
3939{
3940 struct ata_port *ap = qc->ap;
3941
e50362ec
AL
3942 /* Use polling pio if the LLD doesn't handle
3943 * interrupt driven pio and atapi CDB interrupt.
3944 */
3945 if (ap->flags & ATA_FLAG_PIO_POLLING) {
3946 switch (qc->tf.protocol) {
3947 case ATA_PROT_PIO:
3948 case ATA_PROT_ATAPI:
3949 case ATA_PROT_ATAPI_NODATA:
3950 qc->tf.flags |= ATA_TFLAG_POLLING;
3951 break;
3952 case ATA_PROT_ATAPI_DMA:
3953 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3954 BUG();
3955 break;
3956 default:
3957 break;
3958 }
3959 }
3960
312f7da2 3961 /* select the device */
1da177e4
LT
3962 ata_dev_select(ap, qc->dev->devno, 1, 0);
3963
312f7da2 3964 /* start the command */
1da177e4
LT
3965 switch (qc->tf.protocol) {
3966 case ATA_PROT_NODATA:
312f7da2
AL
3967 if (qc->tf.flags & ATA_TFLAG_POLLING)
3968 ata_qc_set_polling(qc);
3969
e5338254 3970 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
3971 ap->hsm_task_state = HSM_ST_LAST;
3972
3973 if (qc->tf.flags & ATA_TFLAG_POLLING)
3974 queue_work(ata_wq, &ap->pio_task);
3975
1da177e4
LT
3976 break;
3977
3978 case ATA_PROT_DMA:
312f7da2
AL
3979 assert(!(qc->tf.flags & ATA_TFLAG_POLLING));
3980
1da177e4
LT
3981 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3982 ap->ops->bmdma_setup(qc); /* set up bmdma */
3983 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 3984 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3985 break;
3986
312f7da2
AL
3987 case ATA_PROT_PIO:
3988 if (qc->tf.flags & ATA_TFLAG_POLLING)
3989 ata_qc_set_polling(qc);
3990
e5338254 3991 ata_tf_to_host(ap, &qc->tf);
312f7da2 3992
54f00389
AL
3993 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3994 /* PIO data out protocol */
3995 ap->hsm_task_state = HSM_ST_FIRST;
f6ef65e6 3996 ata_queue_pio_task(ap);
54f00389
AL
3997
3998 /* always send first data block using
e27486db 3999 * the ata_pio_task() codepath.
54f00389 4000 */
312f7da2 4001 } else {
54f00389
AL
4002 /* PIO data in protocol */
4003 ap->hsm_task_state = HSM_ST;
4004
4005 if (qc->tf.flags & ATA_TFLAG_POLLING)
f6ef65e6 4006 ata_queue_pio_task(ap);
54f00389
AL
4007
4008 /* if polling, ata_pio_task() handles the rest.
4009 * otherwise, interrupt handler takes over from here.
4010 */
312f7da2
AL
4011 }
4012
1da177e4
LT
4013 break;
4014
4015 case ATA_PROT_ATAPI:
1da177e4 4016 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
4017 if (qc->tf.flags & ATA_TFLAG_POLLING)
4018 ata_qc_set_polling(qc);
4019
e5338254 4020 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 4021
312f7da2
AL
4022 ap->hsm_task_state = HSM_ST_FIRST;
4023
4024 /* send cdb by polling if no cdb interrupt */
4025 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4026 (qc->tf.flags & ATA_TFLAG_POLLING))
f6ef65e6 4027 ata_queue_packet_task(ap);
1da177e4
LT
4028 break;
4029
4030 case ATA_PROT_ATAPI_DMA:
312f7da2
AL
4031 assert(!(qc->tf.flags & ATA_TFLAG_POLLING));
4032
1da177e4
LT
4033 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4034 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
4035 ap->hsm_task_state = HSM_ST_FIRST;
4036
4037 /* send cdb by polling if no cdb interrupt */
4038 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
f6ef65e6 4039 ata_queue_packet_task(ap);
1da177e4
LT
4040 break;
4041
4042 default:
4043 WARN_ON(1);
9a3d9eb0 4044 return AC_ERR_SYSTEM;
1da177e4
LT
4045 }
4046
4047 return 0;
4048}
4049
4050/**
0baab86b 4051 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
1da177e4
LT
4052 * @qc: Info associated with this ATA transaction.
4053 *
4054 * LOCKING:
4055 * spin_lock_irqsave(host_set lock)
4056 */
4057
4058static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
4059{
4060 struct ata_port *ap = qc->ap;
4061 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4062 u8 dmactl;
4063 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4064
4065 /* load PRD table addr. */
4066 mb(); /* make sure PRD table writes are visible to controller */
4067 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
4068
4069 /* specify data direction, triple-check start bit is clear */
4070 dmactl = readb(mmio + ATA_DMA_CMD);
4071 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4072 if (!rw)
4073 dmactl |= ATA_DMA_WR;
4074 writeb(dmactl, mmio + ATA_DMA_CMD);
4075
4076 /* issue r/w command */
4077 ap->ops->exec_command(ap, &qc->tf);
4078}
4079
4080/**
b73fc89f 4081 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
1da177e4
LT
4082 * @qc: Info associated with this ATA transaction.
4083 *
4084 * LOCKING:
4085 * spin_lock_irqsave(host_set lock)
4086 */
4087
4088static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
4089{
4090 struct ata_port *ap = qc->ap;
4091 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4092 u8 dmactl;
4093
4094 /* start host DMA transaction */
4095 dmactl = readb(mmio + ATA_DMA_CMD);
4096 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
4097
4098 /* Strictly, one may wish to issue a readb() here, to
4099 * flush the mmio write. However, control also passes
4100 * to the hardware at this point, and it will interrupt
4101 * us when we are to resume control. So, in effect,
4102 * we don't care when the mmio write flushes.
4103 * Further, a read of the DMA status register _immediately_
4104 * following the write may not be what certain flaky hardware
4105 * is expected, so I think it is best to not add a readb()
4106 * without first all the MMIO ATA cards/mobos.
4107 * Or maybe I'm just being paranoid.
4108 */
4109}
4110
4111/**
4112 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
4113 * @qc: Info associated with this ATA transaction.
4114 *
4115 * LOCKING:
4116 * spin_lock_irqsave(host_set lock)
4117 */
4118
4119static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
4120{
4121 struct ata_port *ap = qc->ap;
4122 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4123 u8 dmactl;
4124
4125 /* load PRD table addr. */
4126 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
4127
4128 /* specify data direction, triple-check start bit is clear */
4129 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4130 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4131 if (!rw)
4132 dmactl |= ATA_DMA_WR;
4133 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4134
4135 /* issue r/w command */
4136 ap->ops->exec_command(ap, &qc->tf);
4137}
4138
4139/**
4140 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
4141 * @qc: Info associated with this ATA transaction.
4142 *
4143 * LOCKING:
4144 * spin_lock_irqsave(host_set lock)
4145 */
4146
4147static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
4148{
4149 struct ata_port *ap = qc->ap;
4150 u8 dmactl;
4151
4152 /* start host DMA transaction */
4153 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4154 outb(dmactl | ATA_DMA_START,
4155 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4156}
4157
0baab86b
EF
4158
4159/**
4160 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
4161 * @qc: Info associated with this ATA transaction.
4162 *
4163 * Writes the ATA_DMA_START flag to the DMA command register.
4164 *
4165 * May be used as the bmdma_start() entry in ata_port_operations.
4166 *
4167 * LOCKING:
4168 * spin_lock_irqsave(host_set lock)
4169 */
1da177e4
LT
4170void ata_bmdma_start(struct ata_queued_cmd *qc)
4171{
4172 if (qc->ap->flags & ATA_FLAG_MMIO)
4173 ata_bmdma_start_mmio(qc);
4174 else
4175 ata_bmdma_start_pio(qc);
4176}
4177
0baab86b
EF
4178
4179/**
4180 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
4181 * @qc: Info associated with this ATA transaction.
4182 *
4183 * Writes address of PRD table to device's PRD Table Address
4184 * register, sets the DMA control register, and calls
4185 * ops->exec_command() to start the transfer.
4186 *
4187 * May be used as the bmdma_setup() entry in ata_port_operations.
4188 *
4189 * LOCKING:
4190 * spin_lock_irqsave(host_set lock)
4191 */
1da177e4
LT
4192void ata_bmdma_setup(struct ata_queued_cmd *qc)
4193{
4194 if (qc->ap->flags & ATA_FLAG_MMIO)
4195 ata_bmdma_setup_mmio(qc);
4196 else
4197 ata_bmdma_setup_pio(qc);
4198}
4199
0baab86b
EF
4200
4201/**
4202 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
decc6d0b 4203 * @ap: Port associated with this ATA transaction.
0baab86b
EF
4204 *
4205 * Clear interrupt and error flags in DMA status register.
4206 *
4207 * May be used as the irq_clear() entry in ata_port_operations.
4208 *
4209 * LOCKING:
4210 * spin_lock_irqsave(host_set lock)
4211 */
4212
1da177e4
LT
4213void ata_bmdma_irq_clear(struct ata_port *ap)
4214{
4215 if (ap->flags & ATA_FLAG_MMIO) {
4216 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
4217 writeb(readb(mmio), mmio);
4218 } else {
4219 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
4220 outb(inb(addr), addr);
4221 }
4222
4223}
4224
0baab86b
EF
4225
4226/**
4227 * ata_bmdma_status - Read PCI IDE BMDMA status
decc6d0b 4228 * @ap: Port associated with this ATA transaction.
0baab86b
EF
4229 *
4230 * Read and return BMDMA status register.
4231 *
4232 * May be used as the bmdma_status() entry in ata_port_operations.
4233 *
4234 * LOCKING:
4235 * spin_lock_irqsave(host_set lock)
4236 */
4237
1da177e4
LT
4238u8 ata_bmdma_status(struct ata_port *ap)
4239{
4240 u8 host_stat;
4241 if (ap->flags & ATA_FLAG_MMIO) {
4242 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4243 host_stat = readb(mmio + ATA_DMA_STATUS);
4244 } else
ee500aab 4245 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1da177e4
LT
4246 return host_stat;
4247}
4248
0baab86b
EF
4249
4250/**
4251 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
b73fc89f 4252 * @qc: Command we are ending DMA for
0baab86b
EF
4253 *
4254 * Clears the ATA_DMA_START flag in the dma control register
4255 *
4256 * May be used as the bmdma_stop() entry in ata_port_operations.
4257 *
4258 * LOCKING:
4259 * spin_lock_irqsave(host_set lock)
4260 */
4261
b73fc89f 4262void ata_bmdma_stop(struct ata_queued_cmd *qc)
1da177e4 4263{
b73fc89f 4264 struct ata_port *ap = qc->ap;
1da177e4
LT
4265 if (ap->flags & ATA_FLAG_MMIO) {
4266 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4267
4268 /* clear start/stop bit */
4269 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
4270 mmio + ATA_DMA_CMD);
4271 } else {
4272 /* clear start/stop bit */
4273 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
4274 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4275 }
4276
4277 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
4278 ata_altstatus(ap); /* dummy read */
4279}
4280
4281/**
4282 * ata_host_intr - Handle host interrupt for given (port, task)
4283 * @ap: Port on which interrupt arrived (possibly...)
4284 * @qc: Taskfile currently active in engine
4285 *
4286 * Handle host interrupt for given queued command. Currently,
4287 * only DMA interrupts are handled. All other commands are
4288 * handled via polling with interrupts disabled (nIEN bit).
4289 *
4290 * LOCKING:
4291 * spin_lock_irqsave(host_set lock)
4292 *
4293 * RETURNS:
4294 * One if interrupt was handled, zero if not (shared irq).
4295 */
4296
4297inline unsigned int ata_host_intr (struct ata_port *ap,
4298 struct ata_queued_cmd *qc)
4299{
312f7da2 4300 u8 status, host_stat = 0;
1da177e4 4301
312f7da2
AL
4302 VPRINTK("ata%u: protocol %d task_state %d\n",
4303 ap->id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 4304
312f7da2
AL
4305 /* Check whether we are expecting interrupt in this state */
4306 switch (ap->hsm_task_state) {
4307 case HSM_ST_FIRST:
4308 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4309 * The flag was turned on only for atapi devices.
4310 * No need to check is_atapi_taskfile(&qc->tf) again.
4311 */
4312 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 4313 goto idle_irq;
312f7da2
AL
4314 break;
4315 case HSM_ST_LAST:
4316 if (qc->tf.protocol == ATA_PROT_DMA ||
4317 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4318 /* check status of DMA engine */
4319 host_stat = ap->ops->bmdma_status(ap);
4320 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4321
4322 /* if it's not our irq... */
4323 if (!(host_stat & ATA_DMA_INTR))
4324 goto idle_irq;
4325
4326 /* before we do anything else, clear DMA-Start bit */
4327 ap->ops->bmdma_stop(qc);
a4f16610
AL
4328
4329 if (unlikely(host_stat & ATA_DMA_ERR)) {
4330 /* error when transfering data to/from memory */
4331 qc->err_mask |= AC_ERR_HOST_BUS;
4332 ap->hsm_task_state = HSM_ST_ERR;
4333 }
312f7da2
AL
4334 }
4335 break;
4336 case HSM_ST:
4337 break;
4338 default:
4339 goto idle_irq;
4340 }
1da177e4 4341
312f7da2
AL
4342 /* check altstatus */
4343 status = ata_altstatus(ap);
4344 if (status & ATA_BUSY)
4345 goto idle_irq;
1da177e4 4346
312f7da2
AL
4347 /* check main status, clearing INTRQ */
4348 status = ata_chk_status(ap);
4349 if (unlikely(status & ATA_BUSY))
4350 goto idle_irq;
1da177e4 4351
312f7da2
AL
4352 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4353 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
1da177e4 4354
312f7da2
AL
4355 /* ack bmdma irq events */
4356 ap->ops->irq_clear(ap);
1da177e4 4357
312f7da2 4358 /* check error */
a4f16610
AL
4359 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4360 qc->err_mask |= AC_ERR_DEV;
312f7da2 4361 ap->hsm_task_state = HSM_ST_ERR;
a4f16610 4362 }
312f7da2
AL
4363
4364fsm_start:
4365 switch (ap->hsm_task_state) {
4366 case HSM_ST_FIRST:
4367 /* Some pre-ATAPI-4 devices assert INTRQ
4368 * at this state when ready to receive CDB.
4369 */
4370
4371 /* check device status */
4372 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
4373 /* Wrong status. Let EH handle this */
a4f16610 4374 qc->err_mask |= AC_ERR_ATA_BUS;
312f7da2
AL
4375 ap->hsm_task_state = HSM_ST_ERR;
4376 goto fsm_start;
4377 }
4378
4379 atapi_send_cdb(ap, qc);
4380
4381 break;
4382
4383 case HSM_ST:
4384 /* complete command or read/write the data register */
4385 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4386 /* ATAPI PIO protocol */
4387 if ((status & ATA_DRQ) == 0) {
4388 /* no more data to transfer */
4389 ap->hsm_task_state = HSM_ST_LAST;
4390 goto fsm_start;
4391 }
4392
4393 atapi_pio_bytes(qc);
4394
4395 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4396 /* bad ireason reported by device */
4397 goto fsm_start;
4398
4399 } else {
4400 /* ATA PIO protocol */
4401 if (unlikely((status & ATA_DRQ) == 0)) {
4402 /* handle BSY=0, DRQ=0 as error */
a4f16610 4403 qc->err_mask |= AC_ERR_ATA_BUS;
312f7da2
AL
4404 ap->hsm_task_state = HSM_ST_ERR;
4405 goto fsm_start;
4406 }
4407
07f6f7d0 4408 ata_pio_sectors(qc);
312f7da2
AL
4409
4410 if (ap->hsm_task_state == HSM_ST_LAST &&
4411 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4412 /* all data read */
4413 ata_altstatus(ap);
4414 status = ata_chk_status(ap);
4415 goto fsm_start;
4416 }
4417 }
4418
4419 ata_altstatus(ap); /* flush */
4420 break;
4421
4422 case HSM_ST_LAST:
4423 if (unlikely(status & ATA_DRQ)) {
4424 /* handle DRQ=1 as error */
a4f16610 4425 qc->err_mask |= AC_ERR_ATA_BUS;
312f7da2
AL
4426 ap->hsm_task_state = HSM_ST_ERR;
4427 goto fsm_start;
4428 }
4429
4430 /* no more data to transfer */
4431 DPRINTK("ata%u: command complete, drv_stat 0x%x\n",
4432 ap->id, status);
4433
4434 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4
LT
4435
4436 /* complete taskfile transaction */
a22e2eb0
AL
4437 qc->err_mask |= ac_err_mask(status);
4438 ata_qc_complete(qc);
1da177e4
LT
4439 break;
4440
312f7da2 4441 case HSM_ST_ERR:
000080c3
AL
4442 if (qc->tf.command != ATA_CMD_PACKET)
4443 printk(KERN_ERR "ata%u: command error, drv_stat 0x%x host_stat 0x%x\n",
4444 ap->id, status, host_stat);
312f7da2 4445
a4f16610
AL
4446 /* make sure qc->err_mask is available to
4447 * know what's wrong and recover
4448 */
4449 assert(qc->err_mask);
4450
312f7da2 4451 ap->hsm_task_state = HSM_ST_IDLE;
278efe95 4452 ata_qc_complete(qc);
312f7da2 4453 break;
1da177e4
LT
4454 default:
4455 goto idle_irq;
4456 }
4457
4458 return 1; /* irq handled */
4459
4460idle_irq:
4461 ap->stats.idle_irq++;
4462
4463#ifdef ATA_IRQ_TRAP
4464 if ((ap->stats.idle_irq % 1000) == 0) {
4465 handled = 1;
4466 ata_irq_ack(ap, 0); /* debug trap */
4467 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4468 }
4469#endif
4470 return 0; /* irq not handled */
4471}
4472
4473/**
4474 * ata_interrupt - Default ATA host interrupt handler
0cba632b
JG
4475 * @irq: irq line (unused)
4476 * @dev_instance: pointer to our ata_host_set information structure
1da177e4
LT
4477 * @regs: unused
4478 *
0cba632b
JG
4479 * Default interrupt handler for PCI IDE devices. Calls
4480 * ata_host_intr() for each port that is not disabled.
4481 *
1da177e4 4482 * LOCKING:
0cba632b 4483 * Obtains host_set lock during operation.
1da177e4
LT
4484 *
4485 * RETURNS:
0cba632b 4486 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
4487 */
4488
4489irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4490{
4491 struct ata_host_set *host_set = dev_instance;
4492 unsigned int i;
4493 unsigned int handled = 0;
4494 unsigned long flags;
4495
4496 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4497 spin_lock_irqsave(&host_set->lock, flags);
4498
4499 for (i = 0; i < host_set->n_ports; i++) {
4500 struct ata_port *ap;
4501
4502 ap = host_set->ports[i];
c1389503 4503 if (ap &&
312f7da2 4504 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
1da177e4
LT
4505 struct ata_queued_cmd *qc;
4506
4507 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 4508 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 4509 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
4510 handled |= ata_host_intr(ap, qc);
4511 }
4512 }
4513
4514 spin_unlock_irqrestore(&host_set->lock, flags);
4515
4516 return IRQ_RETVAL(handled);
4517}
4518
0baab86b
EF
4519/**
4520 * ata_port_start - Set port up for dma.
4521 * @ap: Port to initialize
4522 *
4523 * Called just after data structures for each port are
4524 * initialized. Allocates space for PRD table.
4525 *
4526 * May be used as the port_start() entry in ata_port_operations.
4527 *
4528 * LOCKING:
6f0ef4fa 4529 * Inherited from caller.
0baab86b
EF
4530 */
4531
9b847548
JA
4532/*
4533 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4534 * without filling any other registers
4535 */
4536static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4537 u8 cmd)
4538{
4539 struct ata_taskfile tf;
4540 int err;
4541
4542 ata_tf_init(ap, &tf, dev->devno);
4543
4544 tf.command = cmd;
4545 tf.flags |= ATA_TFLAG_DEVICE;
4546 tf.protocol = ATA_PROT_NODATA;
4547
4548 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4549 if (err)
4550 printk(KERN_ERR "%s: ata command failed: %d\n",
4551 __FUNCTION__, err);
4552
4553 return err;
4554}
4555
4556static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4557{
4558 u8 cmd;
4559
4560 if (!ata_try_flush_cache(dev))
4561 return 0;
4562
4563 if (ata_id_has_flush_ext(dev->id))
4564 cmd = ATA_CMD_FLUSH_EXT;
4565 else
4566 cmd = ATA_CMD_FLUSH;
4567
4568 return ata_do_simple_cmd(ap, dev, cmd);
4569}
4570
4571static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4572{
4573 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4574}
4575
4576static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4577{
4578 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4579}
4580
4581/**
4582 * ata_device_resume - wakeup a previously suspended devices
4583 *
4584 * Kick the drive back into action, by sending it an idle immediate
4585 * command and making sure its transfer mode matches between drive
4586 * and host.
4587 *
4588 */
4589int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4590{
4591 if (ap->flags & ATA_FLAG_SUSPENDED) {
4592 ap->flags &= ~ATA_FLAG_SUSPENDED;
4593 ata_set_mode(ap);
4594 }
4595 if (!ata_dev_present(dev))
4596 return 0;
4597 if (dev->class == ATA_DEV_ATA)
4598 ata_start_drive(ap, dev);
4599
4600 return 0;
4601}
4602
4603/**
4604 * ata_device_suspend - prepare a device for suspend
4605 *
4606 * Flush the cache on the drive, if appropriate, then issue a
4607 * standbynow command.
4608 *
4609 */
4610int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4611{
4612 if (!ata_dev_present(dev))
4613 return 0;
4614 if (dev->class == ATA_DEV_ATA)
4615 ata_flush_cache(ap, dev);
4616
4617 ata_standby_drive(ap, dev);
4618 ap->flags |= ATA_FLAG_SUSPENDED;
4619 return 0;
4620}
4621
1da177e4
LT
4622int ata_port_start (struct ata_port *ap)
4623{
4624 struct device *dev = ap->host_set->dev;
6037d6bb 4625 int rc;
1da177e4
LT
4626
4627 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4628 if (!ap->prd)
4629 return -ENOMEM;
4630
6037d6bb
JG
4631 rc = ata_pad_alloc(ap, dev);
4632 if (rc) {
cedc9a47 4633 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
6037d6bb 4634 return rc;
cedc9a47
JG
4635 }
4636
1da177e4
LT
4637 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4638
4639 return 0;
4640}
4641
0baab86b
EF
4642
4643/**
4644 * ata_port_stop - Undo ata_port_start()
4645 * @ap: Port to shut down
4646 *
4647 * Frees the PRD table.
4648 *
4649 * May be used as the port_stop() entry in ata_port_operations.
4650 *
4651 * LOCKING:
6f0ef4fa 4652 * Inherited from caller.
0baab86b
EF
4653 */
4654
1da177e4
LT
4655void ata_port_stop (struct ata_port *ap)
4656{
4657 struct device *dev = ap->host_set->dev;
4658
4659 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
6037d6bb 4660 ata_pad_free(ap, dev);
1da177e4
LT
4661}
4662
aa8f0dc6
JG
4663void ata_host_stop (struct ata_host_set *host_set)
4664{
4665 if (host_set->mmio_base)
4666 iounmap(host_set->mmio_base);
4667}
4668
4669
1da177e4
LT
4670/**
4671 * ata_host_remove - Unregister SCSI host structure with upper layers
4672 * @ap: Port to unregister
4673 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4674 *
4675 * LOCKING:
6f0ef4fa 4676 * Inherited from caller.
1da177e4
LT
4677 */
4678
4679static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4680{
4681 struct Scsi_Host *sh = ap->host;
4682
4683 DPRINTK("ENTER\n");
4684
4685 if (do_unregister)
4686 scsi_remove_host(sh);
4687
4688 ap->ops->port_stop(ap);
4689}
4690
4691/**
4692 * ata_host_init - Initialize an ata_port structure
4693 * @ap: Structure to initialize
4694 * @host: associated SCSI mid-layer structure
4695 * @host_set: Collection of hosts to which @ap belongs
4696 * @ent: Probe information provided by low-level driver
4697 * @port_no: Port number associated with this ata_port
4698 *
0cba632b
JG
4699 * Initialize a new ata_port structure, and its associated
4700 * scsi_host.
4701 *
1da177e4 4702 * LOCKING:
0cba632b 4703 * Inherited from caller.
1da177e4
LT
4704 */
4705
4706static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4707 struct ata_host_set *host_set,
057ace5e 4708 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
4709{
4710 unsigned int i;
4711
4712 host->max_id = 16;
4713 host->max_lun = 1;
4714 host->max_channel = 1;
4715 host->unique_id = ata_unique_id++;
4716 host->max_cmd_len = 12;
12413197 4717
1da177e4
LT
4718 ap->flags = ATA_FLAG_PORT_DISABLED;
4719 ap->id = host->unique_id;
4720 ap->host = host;
4721 ap->ctl = ATA_DEVCTL_OBS;
4722 ap->host_set = host_set;
4723 ap->port_no = port_no;
4724 ap->hard_port_no =
4725 ent->legacy_mode ? ent->hard_port_no : port_no;
4726 ap->pio_mask = ent->pio_mask;
4727 ap->mwdma_mask = ent->mwdma_mask;
4728 ap->udma_mask = ent->udma_mask;
4729 ap->flags |= ent->host_flags;
4730 ap->ops = ent->port_ops;
4731 ap->cbl = ATA_CBL_NONE;
4732 ap->active_tag = ATA_TAG_POISON;
4733 ap->last_ctl = 0xFF;
4734
1da177e4 4735 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
a72ec4ce 4736 INIT_LIST_HEAD(&ap->eh_done_q);
1da177e4
LT
4737
4738 for (i = 0; i < ATA_MAX_DEVICES; i++)
4739 ap->device[i].devno = i;
4740
4741#ifdef ATA_IRQ_TRAP
4742 ap->stats.unhandled_irq = 1;
4743 ap->stats.idle_irq = 1;
4744#endif
4745
4746 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4747}
4748
4749/**
4750 * ata_host_add - Attach low-level ATA driver to system
4751 * @ent: Information provided by low-level driver
4752 * @host_set: Collections of ports to which we add
4753 * @port_no: Port number associated with this host
4754 *
0cba632b
JG
4755 * Attach low-level ATA driver to system.
4756 *
1da177e4 4757 * LOCKING:
0cba632b 4758 * PCI/etc. bus probe sem.
1da177e4
LT
4759 *
4760 * RETURNS:
0cba632b 4761 * New ata_port on success, for NULL on error.
1da177e4
LT
4762 */
4763
057ace5e 4764static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
1da177e4
LT
4765 struct ata_host_set *host_set,
4766 unsigned int port_no)
4767{
4768 struct Scsi_Host *host;
4769 struct ata_port *ap;
4770 int rc;
4771
4772 DPRINTK("ENTER\n");
4773 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4774 if (!host)
4775 return NULL;
4776
4777 ap = (struct ata_port *) &host->hostdata[0];
4778
4779 ata_host_init(ap, host, host_set, ent, port_no);
4780
4781 rc = ap->ops->port_start(ap);
4782 if (rc)
4783 goto err_out;
4784
4785 return ap;
4786
4787err_out:
4788 scsi_host_put(host);
4789 return NULL;
4790}
4791
4792/**
0cba632b
JG
4793 * ata_device_add - Register hardware device with ATA and SCSI layers
4794 * @ent: Probe information describing hardware device to be registered
4795 *
4796 * This function processes the information provided in the probe
4797 * information struct @ent, allocates the necessary ATA and SCSI
4798 * host information structures, initializes them, and registers
4799 * everything with requisite kernel subsystems.
4800 *
4801 * This function requests irqs, probes the ATA bus, and probes
4802 * the SCSI bus.
1da177e4
LT
4803 *
4804 * LOCKING:
0cba632b 4805 * PCI/etc. bus probe sem.
1da177e4
LT
4806 *
4807 * RETURNS:
0cba632b 4808 * Number of ports registered. Zero on error (no ports registered).
1da177e4
LT
4809 */
4810
057ace5e 4811int ata_device_add(const struct ata_probe_ent *ent)
1da177e4
LT
4812{
4813 unsigned int count = 0, i;
4814 struct device *dev = ent->dev;
4815 struct ata_host_set *host_set;
4816
4817 DPRINTK("ENTER\n");
4818 /* alloc a container for our list of ATA ports (buses) */
57f3bda8 4819 host_set = kzalloc(sizeof(struct ata_host_set) +
1da177e4
LT
4820 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4821 if (!host_set)
4822 return 0;
1da177e4
LT
4823 spin_lock_init(&host_set->lock);
4824
4825 host_set->dev = dev;
4826 host_set->n_ports = ent->n_ports;
4827 host_set->irq = ent->irq;
4828 host_set->mmio_base = ent->mmio_base;
4829 host_set->private_data = ent->private_data;
4830 host_set->ops = ent->port_ops;
4831
4832 /* register each port bound to this device */
4833 for (i = 0; i < ent->n_ports; i++) {
4834 struct ata_port *ap;
4835 unsigned long xfer_mode_mask;
4836
4837 ap = ata_host_add(ent, host_set, i);
4838 if (!ap)
4839 goto err_out;
4840
4841 host_set->ports[i] = ap;
4842 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4843 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4844 (ap->pio_mask << ATA_SHIFT_PIO);
4845
4846 /* print per-port info to dmesg */
4847 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4848 "bmdma 0x%lX irq %lu\n",
4849 ap->id,
4850 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4851 ata_mode_string(xfer_mode_mask),
4852 ap->ioaddr.cmd_addr,
4853 ap->ioaddr.ctl_addr,
4854 ap->ioaddr.bmdma_addr,
4855 ent->irq);
4856
4857 ata_chk_status(ap);
4858 host_set->ops->irq_clear(ap);
4859 count++;
4860 }
4861
57f3bda8
RD
4862 if (!count)
4863 goto err_free_ret;
1da177e4
LT
4864
4865 /* obtain irq, that is shared between channels */
4866 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4867 DRV_NAME, host_set))
4868 goto err_out;
4869
4870 /* perform each probe synchronously */
4871 DPRINTK("probe begin\n");
4872 for (i = 0; i < count; i++) {
4873 struct ata_port *ap;
4874 int rc;
4875
4876 ap = host_set->ports[i];
4877
4878 DPRINTK("ata%u: probe begin\n", ap->id);
4879 rc = ata_bus_probe(ap);
4880 DPRINTK("ata%u: probe end\n", ap->id);
4881
4882 if (rc) {
4883 /* FIXME: do something useful here?
4884 * Current libata behavior will
4885 * tear down everything when
4886 * the module is removed
4887 * or the h/w is unplugged.
4888 */
4889 }
4890
4891 rc = scsi_add_host(ap->host, dev);
4892 if (rc) {
4893 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4894 ap->id);
4895 /* FIXME: do something useful here */
4896 /* FIXME: handle unconditional calls to
4897 * scsi_scan_host and ata_host_remove, below,
4898 * at the very least
4899 */
4900 }
4901 }
4902
4903 /* probes are done, now scan each port's disk(s) */
4904 DPRINTK("probe begin\n");
4905 for (i = 0; i < count; i++) {
4906 struct ata_port *ap = host_set->ports[i];
4907
644dd0cc 4908 ata_scsi_scan_host(ap);
1da177e4
LT
4909 }
4910
4911 dev_set_drvdata(dev, host_set);
4912
4913 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4914 return ent->n_ports; /* success */
4915
4916err_out:
4917 for (i = 0; i < count; i++) {
4918 ata_host_remove(host_set->ports[i], 1);
4919 scsi_host_put(host_set->ports[i]->host);
4920 }
57f3bda8 4921err_free_ret:
1da177e4
LT
4922 kfree(host_set);
4923 VPRINTK("EXIT, returning 0\n");
4924 return 0;
4925}
4926
17b14451
AC
4927/**
4928 * ata_host_set_remove - PCI layer callback for device removal
4929 * @host_set: ATA host set that was removed
4930 *
4931 * Unregister all objects associated with this host set. Free those
4932 * objects.
4933 *
4934 * LOCKING:
4935 * Inherited from calling layer (may sleep).
4936 */
4937
17b14451
AC
4938void ata_host_set_remove(struct ata_host_set *host_set)
4939{
4940 struct ata_port *ap;
4941 unsigned int i;
4942
4943 for (i = 0; i < host_set->n_ports; i++) {
4944 ap = host_set->ports[i];
4945 scsi_remove_host(ap->host);
4946 }
4947
4948 free_irq(host_set->irq, host_set);
4949
4950 for (i = 0; i < host_set->n_ports; i++) {
4951 ap = host_set->ports[i];
4952
4953 ata_scsi_release(ap->host);
4954
4955 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4956 struct ata_ioports *ioaddr = &ap->ioaddr;
4957
4958 if (ioaddr->cmd_addr == 0x1f0)
4959 release_region(0x1f0, 8);
4960 else if (ioaddr->cmd_addr == 0x170)
4961 release_region(0x170, 8);
4962 }
4963
4964 scsi_host_put(ap->host);
4965 }
4966
4967 if (host_set->ops->host_stop)
4968 host_set->ops->host_stop(host_set);
4969
4970 kfree(host_set);
4971}
4972
1da177e4
LT
4973/**
4974 * ata_scsi_release - SCSI layer callback hook for host unload
4975 * @host: libata host to be unloaded
4976 *
4977 * Performs all duties necessary to shut down a libata port...
4978 * Kill port kthread, disable port, and release resources.
4979 *
4980 * LOCKING:
4981 * Inherited from SCSI layer.
4982 *
4983 * RETURNS:
4984 * One.
4985 */
4986
4987int ata_scsi_release(struct Scsi_Host *host)
4988{
4989 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4990
4991 DPRINTK("ENTER\n");
4992
4993 ap->ops->port_disable(ap);
4994 ata_host_remove(ap, 0);
4995
4996 DPRINTK("EXIT\n");
4997 return 1;
4998}
4999
5000/**
5001 * ata_std_ports - initialize ioaddr with standard port offsets.
5002 * @ioaddr: IO address structure to be initialized
0baab86b
EF
5003 *
5004 * Utility function which initializes data_addr, error_addr,
5005 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5006 * device_addr, status_addr, and command_addr to standard offsets
5007 * relative to cmd_addr.
5008 *
5009 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 5010 */
0baab86b 5011
1da177e4
LT
5012void ata_std_ports(struct ata_ioports *ioaddr)
5013{
5014 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5015 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5016 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5017 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5018 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5019 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5020 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5021 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5022 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5023 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5024}
5025
5026static struct ata_probe_ent *
057ace5e 5027ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
1da177e4
LT
5028{
5029 struct ata_probe_ent *probe_ent;
5030
57f3bda8 5031 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
1da177e4
LT
5032 if (!probe_ent) {
5033 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5034 kobject_name(&(dev->kobj)));
5035 return NULL;
5036 }
5037
1da177e4
LT
5038 INIT_LIST_HEAD(&probe_ent->node);
5039 probe_ent->dev = dev;
5040
5041 probe_ent->sht = port->sht;
5042 probe_ent->host_flags = port->host_flags;
5043 probe_ent->pio_mask = port->pio_mask;
5044 probe_ent->mwdma_mask = port->mwdma_mask;
5045 probe_ent->udma_mask = port->udma_mask;
5046 probe_ent->port_ops = port->port_ops;
5047
5048 return probe_ent;
5049}
5050
0baab86b
EF
5051
5052
374b1873
JG
5053#ifdef CONFIG_PCI
5054
5055void ata_pci_host_stop (struct ata_host_set *host_set)
5056{
5057 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5058
5059 pci_iounmap(pdev, host_set->mmio_base);
5060}
5061
0baab86b
EF
5062/**
5063 * ata_pci_init_native_mode - Initialize native-mode driver
5064 * @pdev: pci device to be initialized
5065 * @port: array[2] of pointers to port info structures.
47a86593 5066 * @ports: bitmap of ports present
0baab86b
EF
5067 *
5068 * Utility function which allocates and initializes an
5069 * ata_probe_ent structure for a standard dual-port
5070 * PIO-based IDE controller. The returned ata_probe_ent
5071 * structure can be passed to ata_device_add(). The returned
5072 * ata_probe_ent structure should then be freed with kfree().
47a86593
AC
5073 *
5074 * The caller need only pass the address of the primary port, the
5075 * secondary will be deduced automatically. If the device has non
5076 * standard secondary port mappings this function can be called twice,
5077 * once for each interface.
0baab86b
EF
5078 */
5079
1da177e4 5080struct ata_probe_ent *
47a86593 5081ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
1da177e4
LT
5082{
5083 struct ata_probe_ent *probe_ent =
5084 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
47a86593
AC
5085 int p = 0;
5086
1da177e4
LT
5087 if (!probe_ent)
5088 return NULL;
5089
1da177e4
LT
5090 probe_ent->irq = pdev->irq;
5091 probe_ent->irq_flags = SA_SHIRQ;
e99f8b5e 5092 probe_ent->private_data = port[0]->private_data;
1da177e4 5093
47a86593
AC
5094 if (ports & ATA_PORT_PRIMARY) {
5095 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
5096 probe_ent->port[p].altstatus_addr =
5097 probe_ent->port[p].ctl_addr =
5098 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
5099 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
5100 ata_std_ports(&probe_ent->port[p]);
5101 p++;
5102 }
1da177e4 5103
47a86593
AC
5104 if (ports & ATA_PORT_SECONDARY) {
5105 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
5106 probe_ent->port[p].altstatus_addr =
5107 probe_ent->port[p].ctl_addr =
5108 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
5109 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
5110 ata_std_ports(&probe_ent->port[p]);
5111 p++;
5112 }
1da177e4 5113
47a86593 5114 probe_ent->n_ports = p;
1da177e4
LT
5115 return probe_ent;
5116}
5117
0f0d5192 5118static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
1da177e4 5119{
47a86593 5120 struct ata_probe_ent *probe_ent;
1da177e4 5121
0f0d5192 5122 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
1da177e4
LT
5123 if (!probe_ent)
5124 return NULL;
1da177e4 5125
1da177e4 5126 probe_ent->legacy_mode = 1;
47a86593
AC
5127 probe_ent->n_ports = 1;
5128 probe_ent->hard_port_no = port_num;
e99f8b5e 5129 probe_ent->private_data = port->private_data;
47a86593
AC
5130
5131 switch(port_num)
5132 {
5133 case 0:
5134 probe_ent->irq = 14;
5135 probe_ent->port[0].cmd_addr = 0x1f0;
5136 probe_ent->port[0].altstatus_addr =
5137 probe_ent->port[0].ctl_addr = 0x3f6;
5138 break;
5139 case 1:
5140 probe_ent->irq = 15;
5141 probe_ent->port[0].cmd_addr = 0x170;
5142 probe_ent->port[0].altstatus_addr =
5143 probe_ent->port[0].ctl_addr = 0x376;
5144 break;
5145 }
5146 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
1da177e4 5147 ata_std_ports(&probe_ent->port[0]);
1da177e4
LT
5148 return probe_ent;
5149}
5150
5151/**
5152 * ata_pci_init_one - Initialize/register PCI IDE host controller
5153 * @pdev: Controller to be initialized
5154 * @port_info: Information from low-level host driver
5155 * @n_ports: Number of ports attached to host controller
5156 *
0baab86b
EF
5157 * This is a helper function which can be called from a driver's
5158 * xxx_init_one() probe function if the hardware uses traditional
5159 * IDE taskfile registers.
5160 *
5161 * This function calls pci_enable_device(), reserves its register
5162 * regions, sets the dma mask, enables bus master mode, and calls
5163 * ata_device_add()
5164 *
1da177e4
LT
5165 * LOCKING:
5166 * Inherited from PCI layer (may sleep).
5167 *
5168 * RETURNS:
0cba632b 5169 * Zero on success, negative on errno-based value on error.
1da177e4
LT
5170 */
5171
5172int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
5173 unsigned int n_ports)
5174{
47a86593 5175 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
1da177e4
LT
5176 struct ata_port_info *port[2];
5177 u8 tmp8, mask;
5178 unsigned int legacy_mode = 0;
5179 int disable_dev_on_err = 1;
5180 int rc;
5181
5182 DPRINTK("ENTER\n");
5183
5184 port[0] = port_info[0];
5185 if (n_ports > 1)
5186 port[1] = port_info[1];
5187 else
5188 port[1] = port[0];
5189
5190 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
5191 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
47a86593 5192 /* TODO: What if one channel is in native mode ... */
1da177e4
LT
5193 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
5194 mask = (1 << 2) | (1 << 0);
5195 if ((tmp8 & mask) != mask)
5196 legacy_mode = (1 << 3);
5197 }
5198
5199 /* FIXME... */
47a86593
AC
5200 if ((!legacy_mode) && (n_ports > 2)) {
5201 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
5202 n_ports = 2;
5203 /* For now */
1da177e4
LT
5204 }
5205
47a86593
AC
5206 /* FIXME: Really for ATA it isn't safe because the device may be
5207 multi-purpose and we want to leave it alone if it was already
5208 enabled. Secondly for shared use as Arjan says we want refcounting
5209
5210 Checking dev->is_enabled is insufficient as this is not set at
5211 boot for the primary video which is BIOS enabled
5212 */
5213
1da177e4
LT
5214 rc = pci_enable_device(pdev);
5215 if (rc)
5216 return rc;
5217
5218 rc = pci_request_regions(pdev, DRV_NAME);
5219 if (rc) {
5220 disable_dev_on_err = 0;
5221 goto err_out;
5222 }
5223
47a86593 5224 /* FIXME: Should use platform specific mappers for legacy port ranges */
1da177e4
LT
5225 if (legacy_mode) {
5226 if (!request_region(0x1f0, 8, "libata")) {
5227 struct resource *conflict, res;
5228 res.start = 0x1f0;
5229 res.end = 0x1f0 + 8 - 1;
5230 conflict = ____request_resource(&ioport_resource, &res);
5231 if (!strcmp(conflict->name, "libata"))
5232 legacy_mode |= (1 << 0);
5233 else {
5234 disable_dev_on_err = 0;
5235 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
5236 }
5237 } else
5238 legacy_mode |= (1 << 0);
5239
5240 if (!request_region(0x170, 8, "libata")) {
5241 struct resource *conflict, res;
5242 res.start = 0x170;
5243 res.end = 0x170 + 8 - 1;
5244 conflict = ____request_resource(&ioport_resource, &res);
5245 if (!strcmp(conflict->name, "libata"))
5246 legacy_mode |= (1 << 1);
5247 else {
5248 disable_dev_on_err = 0;
5249 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
5250 }
5251 } else
5252 legacy_mode |= (1 << 1);
5253 }
5254
5255 /* we have legacy mode, but all ports are unavailable */
5256 if (legacy_mode == (1 << 3)) {
5257 rc = -EBUSY;
5258 goto err_out_regions;
5259 }
5260
5261 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
5262 if (rc)
5263 goto err_out_regions;
5264 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
5265 if (rc)
5266 goto err_out_regions;
5267
5268 if (legacy_mode) {
47a86593 5269 if (legacy_mode & (1 << 0))
0f0d5192 5270 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
47a86593 5271 if (legacy_mode & (1 << 1))
0f0d5192 5272 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
47a86593
AC
5273 } else {
5274 if (n_ports == 2)
5275 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
5276 else
5277 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
5278 }
5279 if (!probe_ent && !probe_ent2) {
1da177e4
LT
5280 rc = -ENOMEM;
5281 goto err_out_regions;
5282 }
5283
5284 pci_set_master(pdev);
5285
5286 /* FIXME: check ata_device_add return */
5287 if (legacy_mode) {
5288 if (legacy_mode & (1 << 0))
5289 ata_device_add(probe_ent);
5290 if (legacy_mode & (1 << 1))
5291 ata_device_add(probe_ent2);
5292 } else
5293 ata_device_add(probe_ent);
5294
5295 kfree(probe_ent);
5296 kfree(probe_ent2);
5297
5298 return 0;
5299
5300err_out_regions:
5301 if (legacy_mode & (1 << 0))
5302 release_region(0x1f0, 8);
5303 if (legacy_mode & (1 << 1))
5304 release_region(0x170, 8);
5305 pci_release_regions(pdev);
5306err_out:
5307 if (disable_dev_on_err)
5308 pci_disable_device(pdev);
5309 return rc;
5310}
5311
5312/**
5313 * ata_pci_remove_one - PCI layer callback for device removal
5314 * @pdev: PCI device that was removed
5315 *
5316 * PCI layer indicates to libata via this hook that
6f0ef4fa 5317 * hot-unplug or module unload event has occurred.
1da177e4
LT
5318 * Handle this by unregistering all objects associated
5319 * with this PCI device. Free those objects. Then finally
5320 * release PCI resources and disable device.
5321 *
5322 * LOCKING:
5323 * Inherited from PCI layer (may sleep).
5324 */
5325
5326void ata_pci_remove_one (struct pci_dev *pdev)
5327{
5328 struct device *dev = pci_dev_to_dev(pdev);
5329 struct ata_host_set *host_set = dev_get_drvdata(dev);
1da177e4 5330
17b14451 5331 ata_host_set_remove(host_set);
1da177e4
LT
5332 pci_release_regions(pdev);
5333 pci_disable_device(pdev);
5334 dev_set_drvdata(dev, NULL);
5335}
5336
5337/* move to PCI subsystem */
057ace5e 5338int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
5339{
5340 unsigned long tmp = 0;
5341
5342 switch (bits->width) {
5343 case 1: {
5344 u8 tmp8 = 0;
5345 pci_read_config_byte(pdev, bits->reg, &tmp8);
5346 tmp = tmp8;
5347 break;
5348 }
5349 case 2: {
5350 u16 tmp16 = 0;
5351 pci_read_config_word(pdev, bits->reg, &tmp16);
5352 tmp = tmp16;
5353 break;
5354 }
5355 case 4: {
5356 u32 tmp32 = 0;
5357 pci_read_config_dword(pdev, bits->reg, &tmp32);
5358 tmp = tmp32;
5359 break;
5360 }
5361
5362 default:
5363 return -EINVAL;
5364 }
5365
5366 tmp &= bits->mask;
5367
5368 return (tmp == bits->val) ? 1 : 0;
5369}
9b847548
JA
5370
5371int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5372{
5373 pci_save_state(pdev);
5374 pci_disable_device(pdev);
5375 pci_set_power_state(pdev, PCI_D3hot);
5376 return 0;
5377}
5378
5379int ata_pci_device_resume(struct pci_dev *pdev)
5380{
5381 pci_set_power_state(pdev, PCI_D0);
5382 pci_restore_state(pdev);
5383 pci_enable_device(pdev);
5384 pci_set_master(pdev);
5385 return 0;
5386}
1da177e4
LT
5387#endif /* CONFIG_PCI */
5388
5389
1da177e4
LT
5390static int __init ata_init(void)
5391{
5392 ata_wq = create_workqueue("ata");
5393 if (!ata_wq)
5394 return -ENOMEM;
5395
5396 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5397 return 0;
5398}
5399
5400static void __exit ata_exit(void)
5401{
5402 destroy_workqueue(ata_wq);
5403}
5404
5405module_init(ata_init);
5406module_exit(ata_exit);
5407
67846b30
JG
5408static unsigned long ratelimit_time;
5409static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5410
5411int ata_ratelimit(void)
5412{
5413 int rc;
5414 unsigned long flags;
5415
5416 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5417
5418 if (time_after(jiffies, ratelimit_time)) {
5419 rc = 1;
5420 ratelimit_time = jiffies + (HZ/5);
5421 } else
5422 rc = 0;
5423
5424 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5425
5426 return rc;
5427}
5428
1da177e4
LT
5429/*
5430 * libata is essentially a library of internal helper functions for
5431 * low-level ATA host controller drivers. As such, the API/ABI is
5432 * likely to change as new drivers are added and updated.
5433 * Do not depend on ABI/API stability.
5434 */
5435
5436EXPORT_SYMBOL_GPL(ata_std_bios_param);
5437EXPORT_SYMBOL_GPL(ata_std_ports);
5438EXPORT_SYMBOL_GPL(ata_device_add);
17b14451 5439EXPORT_SYMBOL_GPL(ata_host_set_remove);
1da177e4
LT
5440EXPORT_SYMBOL_GPL(ata_sg_init);
5441EXPORT_SYMBOL_GPL(ata_sg_init_one);
5442EXPORT_SYMBOL_GPL(ata_qc_complete);
5443EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5444EXPORT_SYMBOL_GPL(ata_eng_timeout);
5445EXPORT_SYMBOL_GPL(ata_tf_load);
5446EXPORT_SYMBOL_GPL(ata_tf_read);
5447EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5448EXPORT_SYMBOL_GPL(ata_std_dev_select);
5449EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5450EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5451EXPORT_SYMBOL_GPL(ata_check_status);
5452EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
5453EXPORT_SYMBOL_GPL(ata_exec_command);
5454EXPORT_SYMBOL_GPL(ata_port_start);
5455EXPORT_SYMBOL_GPL(ata_port_stop);
aa8f0dc6 5456EXPORT_SYMBOL_GPL(ata_host_stop);
1da177e4
LT
5457EXPORT_SYMBOL_GPL(ata_interrupt);
5458EXPORT_SYMBOL_GPL(ata_qc_prep);
5459EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5460EXPORT_SYMBOL_GPL(ata_bmdma_start);
5461EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5462EXPORT_SYMBOL_GPL(ata_bmdma_status);
5463EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5464EXPORT_SYMBOL_GPL(ata_port_probe);
5465EXPORT_SYMBOL_GPL(sata_phy_reset);
5466EXPORT_SYMBOL_GPL(__sata_phy_reset);
5467EXPORT_SYMBOL_GPL(ata_bus_reset);
5468EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 5469EXPORT_SYMBOL_GPL(ata_ratelimit);
6f8b9958 5470EXPORT_SYMBOL_GPL(ata_busy_sleep);
1da177e4
LT
5471EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5472EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5473EXPORT_SYMBOL_GPL(ata_scsi_error);
5474EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5475EXPORT_SYMBOL_GPL(ata_scsi_release);
5476EXPORT_SYMBOL_GPL(ata_host_intr);
5477EXPORT_SYMBOL_GPL(ata_dev_classify);
5478EXPORT_SYMBOL_GPL(ata_dev_id_string);
6f2f3812 5479EXPORT_SYMBOL_GPL(ata_dev_config);
1da177e4 5480EXPORT_SYMBOL_GPL(ata_scsi_simulate);
a72ec4ce
TH
5481EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5482EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
1da177e4 5483
1bc4ccff 5484EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
5485EXPORT_SYMBOL_GPL(ata_timing_compute);
5486EXPORT_SYMBOL_GPL(ata_timing_merge);
5487
1da177e4
LT
5488#ifdef CONFIG_PCI
5489EXPORT_SYMBOL_GPL(pci_test_config_bits);
374b1873 5490EXPORT_SYMBOL_GPL(ata_pci_host_stop);
1da177e4
LT
5491EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5492EXPORT_SYMBOL_GPL(ata_pci_init_one);
5493EXPORT_SYMBOL_GPL(ata_pci_remove_one);
9b847548
JA
5494EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5495EXPORT_SYMBOL_GPL(ata_pci_device_resume);
1da177e4 5496#endif /* CONFIG_PCI */
9b847548
JA
5497
5498EXPORT_SYMBOL_GPL(ata_device_suspend);
5499EXPORT_SYMBOL_GPL(ata_device_resume);
5500EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5501EXPORT_SYMBOL_GPL(ata_scsi_device_resume);