Merge branch 'akpm' (patchbomb from Andrew Morton)
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
8c3d3d4b 4 * Maintained by: Tejun Heo <tj@kernel.org>
af36d7f0
JG
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
1da177e4
LT
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
378f058c 57#include <linux/scatterlist.h>
2dcb407e 58#include <linux/io.h>
79318057 59#include <linux/async.h>
e18086d6 60#include <linux/log2.h>
5a0e3ad6 61#include <linux/slab.h>
428ac5fc 62#include <linux/glob.h>
1da177e4 63#include <scsi/scsi.h>
193515d5 64#include <scsi/scsi_cmnd.h>
1da177e4
LT
65#include <scsi/scsi_host.h>
66#include <linux/libata.h>
1da177e4 67#include <asm/byteorder.h>
140b5e59 68#include <linux/cdrom.h>
9990b6f3 69#include <linux/ratelimit.h>
9ee4f393 70#include <linux/pm_runtime.h>
b7db04d9 71#include <linux/platform_device.h>
1da177e4
LT
72
73#include "libata.h"
d9027470 74#include "libata-transport.h"
fda0efc5 75
d7bb4cc7 76/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
77const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
78const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
79const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 80
029cfd6b 81const struct ata_port_operations ata_base_port_ops = {
0aa1113d 82 .prereset = ata_std_prereset,
203c75b8 83 .postreset = ata_std_postreset,
a1efdaba 84 .error_handler = ata_std_error_handler,
e4a9c373
DW
85 .sched_eh = ata_std_sched_eh,
86 .end_eh = ata_std_end_eh,
029cfd6b
TH
87};
88
89const struct ata_port_operations sata_port_ops = {
90 .inherits = &ata_base_port_ops,
91
92 .qc_defer = ata_std_qc_defer,
57c9efdf 93 .hardreset = sata_std_hardreset,
029cfd6b
TH
94};
95
3373efd8
TH
96static unsigned int ata_dev_init_params(struct ata_device *dev,
97 u16 heads, u16 sectors);
98static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
99static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 100static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 101
a78f57af 102atomic_t ata_print_id = ATOMIC_INIT(0);
1da177e4 103
33267325
TH
104struct ata_force_param {
105 const char *name;
106 unsigned int cbl;
107 int spd_limit;
108 unsigned long xfer_mask;
109 unsigned int horkage_on;
110 unsigned int horkage_off;
05944bdf 111 unsigned int lflags;
33267325
TH
112};
113
114struct ata_force_ent {
115 int port;
116 int device;
117 struct ata_force_param param;
118};
119
120static struct ata_force_ent *ata_force_tbl;
121static int ata_force_tbl_size;
122
123static char ata_force_param_buf[PAGE_SIZE] __initdata;
7afb4222
TH
124/* param_buf is thrown away after initialization, disallow read */
125module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
33267325
TH
126MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
127
2486fa56 128static int atapi_enabled = 1;
1623c81e 129module_param(atapi_enabled, int, 0444);
ad5d8eac 130MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
1623c81e 131
c5c61bda 132static int atapi_dmadir = 0;
95de719a 133module_param(atapi_dmadir, int, 0444);
ad5d8eac 134MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
95de719a 135
baf4fdfa
ML
136int atapi_passthru16 = 1;
137module_param(atapi_passthru16, int, 0444);
ad5d8eac 138MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
baf4fdfa 139
c3c013a2
JG
140int libata_fua = 0;
141module_param_named(fua, libata_fua, int, 0444);
ad5d8eac 142MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
c3c013a2 143
2dcb407e 144static int ata_ignore_hpa;
1e999736
AC
145module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
146MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
147
b3a70601
AC
148static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
149module_param_named(dma, libata_dma_mask, int, 0444);
150MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
151
87fbc5a0 152static int ata_probe_timeout;
a8601e5f
AM
153module_param(ata_probe_timeout, int, 0444);
154MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
155
6ebe9d86 156int libata_noacpi = 0;
d7d0dad6 157module_param_named(noacpi, libata_noacpi, int, 0444);
ad5d8eac 158MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
11ef697b 159
ae8d4ee7
AC
160int libata_allow_tpm = 0;
161module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
ad5d8eac 162MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
ae8d4ee7 163
e7ecd435
TH
164static int atapi_an;
165module_param(atapi_an, int, 0444);
166MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
167
1da177e4
LT
168MODULE_AUTHOR("Jeff Garzik");
169MODULE_DESCRIPTION("Library module for ATA devices");
170MODULE_LICENSE("GPL");
171MODULE_VERSION(DRV_VERSION);
172
0baab86b 173
9913ff8a
TH
174static bool ata_sstatus_online(u32 sstatus)
175{
176 return (sstatus & 0xf) == 0x3;
177}
178
1eca4365
TH
179/**
180 * ata_link_next - link iteration helper
181 * @link: the previous link, NULL to start
182 * @ap: ATA port containing links to iterate
183 * @mode: iteration mode, one of ATA_LITER_*
184 *
185 * LOCKING:
186 * Host lock or EH context.
aadffb68 187 *
1eca4365
TH
188 * RETURNS:
189 * Pointer to the next link.
aadffb68 190 */
1eca4365
TH
191struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
192 enum ata_link_iter_mode mode)
aadffb68 193{
1eca4365
TH
194 BUG_ON(mode != ATA_LITER_EDGE &&
195 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
196
aadffb68 197 /* NULL link indicates start of iteration */
1eca4365
TH
198 if (!link)
199 switch (mode) {
200 case ATA_LITER_EDGE:
201 case ATA_LITER_PMP_FIRST:
202 if (sata_pmp_attached(ap))
203 return ap->pmp_link;
204 /* fall through */
205 case ATA_LITER_HOST_FIRST:
206 return &ap->link;
207 }
aadffb68 208
1eca4365
TH
209 /* we just iterated over the host link, what's next? */
210 if (link == &ap->link)
211 switch (mode) {
212 case ATA_LITER_HOST_FIRST:
213 if (sata_pmp_attached(ap))
214 return ap->pmp_link;
215 /* fall through */
216 case ATA_LITER_PMP_FIRST:
217 if (unlikely(ap->slave_link))
b1c72916 218 return ap->slave_link;
1eca4365
TH
219 /* fall through */
220 case ATA_LITER_EDGE:
aadffb68 221 return NULL;
b1c72916 222 }
aadffb68 223
b1c72916
TH
224 /* slave_link excludes PMP */
225 if (unlikely(link == ap->slave_link))
226 return NULL;
227
1eca4365 228 /* we were over a PMP link */
aadffb68
TH
229 if (++link < ap->pmp_link + ap->nr_pmp_links)
230 return link;
1eca4365
TH
231
232 if (mode == ATA_LITER_PMP_FIRST)
233 return &ap->link;
234
aadffb68
TH
235 return NULL;
236}
237
1eca4365
TH
238/**
239 * ata_dev_next - device iteration helper
240 * @dev: the previous device, NULL to start
241 * @link: ATA link containing devices to iterate
242 * @mode: iteration mode, one of ATA_DITER_*
243 *
244 * LOCKING:
245 * Host lock or EH context.
246 *
247 * RETURNS:
248 * Pointer to the next device.
249 */
250struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
251 enum ata_dev_iter_mode mode)
252{
253 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
254 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
255
256 /* NULL dev indicates start of iteration */
257 if (!dev)
258 switch (mode) {
259 case ATA_DITER_ENABLED:
260 case ATA_DITER_ALL:
261 dev = link->device;
262 goto check;
263 case ATA_DITER_ENABLED_REVERSE:
264 case ATA_DITER_ALL_REVERSE:
265 dev = link->device + ata_link_max_devices(link) - 1;
266 goto check;
267 }
268
269 next:
270 /* move to the next one */
271 switch (mode) {
272 case ATA_DITER_ENABLED:
273 case ATA_DITER_ALL:
274 if (++dev < link->device + ata_link_max_devices(link))
275 goto check;
276 return NULL;
277 case ATA_DITER_ENABLED_REVERSE:
278 case ATA_DITER_ALL_REVERSE:
279 if (--dev >= link->device)
280 goto check;
281 return NULL;
282 }
283
284 check:
285 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
286 !ata_dev_enabled(dev))
287 goto next;
288 return dev;
289}
290
b1c72916
TH
291/**
292 * ata_dev_phys_link - find physical link for a device
293 * @dev: ATA device to look up physical link for
294 *
295 * Look up physical link which @dev is attached to. Note that
296 * this is different from @dev->link only when @dev is on slave
297 * link. For all other cases, it's the same as @dev->link.
298 *
299 * LOCKING:
300 * Don't care.
301 *
302 * RETURNS:
303 * Pointer to the found physical link.
304 */
305struct ata_link *ata_dev_phys_link(struct ata_device *dev)
306{
307 struct ata_port *ap = dev->link->ap;
308
309 if (!ap->slave_link)
310 return dev->link;
311 if (!dev->devno)
312 return &ap->link;
313 return ap->slave_link;
314}
315
33267325
TH
316/**
317 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 318 * @ap: ATA port of interest
33267325
TH
319 *
320 * Force cable type according to libata.force and whine about it.
321 * The last entry which has matching port number is used, so it
322 * can be specified as part of device force parameters. For
323 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
324 * same effect.
325 *
326 * LOCKING:
327 * EH context.
328 */
329void ata_force_cbl(struct ata_port *ap)
330{
331 int i;
332
333 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
334 const struct ata_force_ent *fe = &ata_force_tbl[i];
335
336 if (fe->port != -1 && fe->port != ap->print_id)
337 continue;
338
339 if (fe->param.cbl == ATA_CBL_NONE)
340 continue;
341
342 ap->cbl = fe->param.cbl;
a9a79dfe 343 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
33267325
TH
344 return;
345 }
346}
347
348/**
05944bdf 349 * ata_force_link_limits - force link limits according to libata.force
33267325
TH
350 * @link: ATA link of interest
351 *
05944bdf
TH
352 * Force link flags and SATA spd limit according to libata.force
353 * and whine about it. When only the port part is specified
354 * (e.g. 1:), the limit applies to all links connected to both
355 * the host link and all fan-out ports connected via PMP. If the
356 * device part is specified as 0 (e.g. 1.00:), it specifies the
357 * first fan-out link not the host link. Device number 15 always
b1c72916
TH
358 * points to the host link whether PMP is attached or not. If the
359 * controller has slave link, device number 16 points to it.
33267325
TH
360 *
361 * LOCKING:
362 * EH context.
363 */
05944bdf 364static void ata_force_link_limits(struct ata_link *link)
33267325 365{
05944bdf 366 bool did_spd = false;
b1c72916
TH
367 int linkno = link->pmp;
368 int i;
33267325
TH
369
370 if (ata_is_host_link(link))
b1c72916 371 linkno += 15;
33267325
TH
372
373 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
374 const struct ata_force_ent *fe = &ata_force_tbl[i];
375
376 if (fe->port != -1 && fe->port != link->ap->print_id)
377 continue;
378
379 if (fe->device != -1 && fe->device != linkno)
380 continue;
381
05944bdf
TH
382 /* only honor the first spd limit */
383 if (!did_spd && fe->param.spd_limit) {
384 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
a9a79dfe 385 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
05944bdf
TH
386 fe->param.name);
387 did_spd = true;
388 }
33267325 389
05944bdf
TH
390 /* let lflags stack */
391 if (fe->param.lflags) {
392 link->flags |= fe->param.lflags;
a9a79dfe 393 ata_link_notice(link,
05944bdf
TH
394 "FORCE: link flag 0x%x forced -> 0x%x\n",
395 fe->param.lflags, link->flags);
396 }
33267325
TH
397 }
398}
399
400/**
401 * ata_force_xfermask - force xfermask according to libata.force
402 * @dev: ATA device of interest
403 *
404 * Force xfer_mask according to libata.force and whine about it.
405 * For consistency with link selection, device number 15 selects
406 * the first device connected to the host link.
407 *
408 * LOCKING:
409 * EH context.
410 */
411static void ata_force_xfermask(struct ata_device *dev)
412{
413 int devno = dev->link->pmp + dev->devno;
414 int alt_devno = devno;
415 int i;
416
b1c72916
TH
417 /* allow n.15/16 for devices attached to host port */
418 if (ata_is_host_link(dev->link))
419 alt_devno += 15;
33267325
TH
420
421 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
422 const struct ata_force_ent *fe = &ata_force_tbl[i];
423 unsigned long pio_mask, mwdma_mask, udma_mask;
424
425 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
426 continue;
427
428 if (fe->device != -1 && fe->device != devno &&
429 fe->device != alt_devno)
430 continue;
431
432 if (!fe->param.xfer_mask)
433 continue;
434
435 ata_unpack_xfermask(fe->param.xfer_mask,
436 &pio_mask, &mwdma_mask, &udma_mask);
437 if (udma_mask)
438 dev->udma_mask = udma_mask;
439 else if (mwdma_mask) {
440 dev->udma_mask = 0;
441 dev->mwdma_mask = mwdma_mask;
442 } else {
443 dev->udma_mask = 0;
444 dev->mwdma_mask = 0;
445 dev->pio_mask = pio_mask;
446 }
447
a9a79dfe
JP
448 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
449 fe->param.name);
33267325
TH
450 return;
451 }
452}
453
454/**
455 * ata_force_horkage - force horkage according to libata.force
456 * @dev: ATA device of interest
457 *
458 * Force horkage according to libata.force and whine about it.
459 * For consistency with link selection, device number 15 selects
460 * the first device connected to the host link.
461 *
462 * LOCKING:
463 * EH context.
464 */
465static void ata_force_horkage(struct ata_device *dev)
466{
467 int devno = dev->link->pmp + dev->devno;
468 int alt_devno = devno;
469 int i;
470
b1c72916
TH
471 /* allow n.15/16 for devices attached to host port */
472 if (ata_is_host_link(dev->link))
473 alt_devno += 15;
33267325
TH
474
475 for (i = 0; i < ata_force_tbl_size; i++) {
476 const struct ata_force_ent *fe = &ata_force_tbl[i];
477
478 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
479 continue;
480
481 if (fe->device != -1 && fe->device != devno &&
482 fe->device != alt_devno)
483 continue;
484
485 if (!(~dev->horkage & fe->param.horkage_on) &&
486 !(dev->horkage & fe->param.horkage_off))
487 continue;
488
489 dev->horkage |= fe->param.horkage_on;
490 dev->horkage &= ~fe->param.horkage_off;
491
a9a79dfe
JP
492 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
493 fe->param.name);
33267325
TH
494 }
495}
496
436d34b3
TH
497/**
498 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
499 * @opcode: SCSI opcode
500 *
501 * Determine ATAPI command type from @opcode.
502 *
503 * LOCKING:
504 * None.
505 *
506 * RETURNS:
507 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
508 */
509int atapi_cmd_type(u8 opcode)
510{
511 switch (opcode) {
512 case GPCMD_READ_10:
513 case GPCMD_READ_12:
514 return ATAPI_READ;
515
516 case GPCMD_WRITE_10:
517 case GPCMD_WRITE_12:
518 case GPCMD_WRITE_AND_VERIFY_10:
519 return ATAPI_WRITE;
520
521 case GPCMD_READ_CD:
522 case GPCMD_READ_CD_MSF:
523 return ATAPI_READ_CD;
524
e52dcc48
TH
525 case ATA_16:
526 case ATA_12:
527 if (atapi_passthru16)
528 return ATAPI_PASS_THRU;
529 /* fall thru */
436d34b3
TH
530 default:
531 return ATAPI_MISC;
532 }
533}
534
1da177e4
LT
535/**
536 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
537 * @tf: Taskfile to convert
1da177e4 538 * @pmp: Port multiplier port
9977126c
TH
539 * @is_cmd: This FIS is for command
540 * @fis: Buffer into which data will output
1da177e4
LT
541 *
542 * Converts a standard ATA taskfile to a Serial ATA
543 * FIS structure (Register - Host to Device).
544 *
545 * LOCKING:
546 * Inherited from caller.
547 */
9977126c 548void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 549{
9977126c
TH
550 fis[0] = 0x27; /* Register - Host to Device FIS */
551 fis[1] = pmp & 0xf; /* Port multiplier number*/
552 if (is_cmd)
553 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
554
1da177e4
LT
555 fis[2] = tf->command;
556 fis[3] = tf->feature;
557
558 fis[4] = tf->lbal;
559 fis[5] = tf->lbam;
560 fis[6] = tf->lbah;
561 fis[7] = tf->device;
562
563 fis[8] = tf->hob_lbal;
564 fis[9] = tf->hob_lbam;
565 fis[10] = tf->hob_lbah;
566 fis[11] = tf->hob_feature;
567
568 fis[12] = tf->nsect;
569 fis[13] = tf->hob_nsect;
570 fis[14] = 0;
571 fis[15] = tf->ctl;
572
86a565e6
MC
573 fis[16] = tf->auxiliary & 0xff;
574 fis[17] = (tf->auxiliary >> 8) & 0xff;
575 fis[18] = (tf->auxiliary >> 16) & 0xff;
576 fis[19] = (tf->auxiliary >> 24) & 0xff;
1da177e4
LT
577}
578
579/**
580 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
581 * @fis: Buffer from which data will be input
582 * @tf: Taskfile to output
583 *
e12a1be6 584 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
585 *
586 * LOCKING:
587 * Inherited from caller.
588 */
589
057ace5e 590void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
591{
592 tf->command = fis[2]; /* status */
593 tf->feature = fis[3]; /* error */
594
595 tf->lbal = fis[4];
596 tf->lbam = fis[5];
597 tf->lbah = fis[6];
598 tf->device = fis[7];
599
600 tf->hob_lbal = fis[8];
601 tf->hob_lbam = fis[9];
602 tf->hob_lbah = fis[10];
603
604 tf->nsect = fis[12];
605 tf->hob_nsect = fis[13];
606}
607
8cbd6df1
AL
608static const u8 ata_rw_cmds[] = {
609 /* pio multi */
610 ATA_CMD_READ_MULTI,
611 ATA_CMD_WRITE_MULTI,
612 ATA_CMD_READ_MULTI_EXT,
613 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
614 0,
615 0,
616 0,
617 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
618 /* pio */
619 ATA_CMD_PIO_READ,
620 ATA_CMD_PIO_WRITE,
621 ATA_CMD_PIO_READ_EXT,
622 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
623 0,
624 0,
625 0,
626 0,
8cbd6df1
AL
627 /* dma */
628 ATA_CMD_READ,
629 ATA_CMD_WRITE,
630 ATA_CMD_READ_EXT,
9a3dccc4
TH
631 ATA_CMD_WRITE_EXT,
632 0,
633 0,
634 0,
635 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 636};
1da177e4
LT
637
638/**
8cbd6df1 639 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
640 * @tf: command to examine and configure
641 * @dev: device tf belongs to
1da177e4 642 *
2e9edbf8 643 * Examine the device configuration and tf->flags to calculate
8cbd6df1 644 * the proper read/write commands and protocol to use.
1da177e4
LT
645 *
646 * LOCKING:
647 * caller.
648 */
bd056d7e 649static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 650{
9a3dccc4 651 u8 cmd;
1da177e4 652
9a3dccc4 653 int index, fua, lba48, write;
2e9edbf8 654
9a3dccc4 655 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
656 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
657 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 658
8cbd6df1
AL
659 if (dev->flags & ATA_DFLAG_PIO) {
660 tf->protocol = ATA_PROT_PIO;
9a3dccc4 661 index = dev->multi_count ? 0 : 8;
9af5c9c9 662 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
663 /* Unable to use DMA due to host limitation */
664 tf->protocol = ATA_PROT_PIO;
0565c26d 665 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
666 } else {
667 tf->protocol = ATA_PROT_DMA;
9a3dccc4 668 index = 16;
8cbd6df1 669 }
1da177e4 670
9a3dccc4
TH
671 cmd = ata_rw_cmds[index + fua + lba48 + write];
672 if (cmd) {
673 tf->command = cmd;
674 return 0;
675 }
676 return -1;
1da177e4
LT
677}
678
35b649fe
TH
679/**
680 * ata_tf_read_block - Read block address from ATA taskfile
681 * @tf: ATA taskfile of interest
682 * @dev: ATA device @tf belongs to
683 *
684 * LOCKING:
685 * None.
686 *
687 * Read block address from @tf. This function can handle all
688 * three address formats - LBA, LBA48 and CHS. tf->protocol and
689 * flags select the address format to use.
690 *
691 * RETURNS:
692 * Block address read from @tf.
693 */
694u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
695{
696 u64 block = 0;
697
698 if (tf->flags & ATA_TFLAG_LBA) {
699 if (tf->flags & ATA_TFLAG_LBA48) {
700 block |= (u64)tf->hob_lbah << 40;
701 block |= (u64)tf->hob_lbam << 32;
44901a96 702 block |= (u64)tf->hob_lbal << 24;
35b649fe
TH
703 } else
704 block |= (tf->device & 0xf) << 24;
705
706 block |= tf->lbah << 16;
707 block |= tf->lbam << 8;
708 block |= tf->lbal;
709 } else {
710 u32 cyl, head, sect;
711
712 cyl = tf->lbam | (tf->lbah << 8);
713 head = tf->device & 0xf;
714 sect = tf->lbal;
715
ac8672ea 716 if (!sect) {
a9a79dfe
JP
717 ata_dev_warn(dev,
718 "device reported invalid CHS sector 0\n");
ac8672ea
TH
719 sect = 1; /* oh well */
720 }
721
722 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
35b649fe
TH
723 }
724
725 return block;
726}
727
bd056d7e
TH
728/**
729 * ata_build_rw_tf - Build ATA taskfile for given read/write request
730 * @tf: Target ATA taskfile
731 * @dev: ATA device @tf belongs to
732 * @block: Block address
733 * @n_block: Number of blocks
734 * @tf_flags: RW/FUA etc...
735 * @tag: tag
736 *
737 * LOCKING:
738 * None.
739 *
740 * Build ATA taskfile @tf for read/write request described by
741 * @block, @n_block, @tf_flags and @tag on @dev.
742 *
743 * RETURNS:
744 *
745 * 0 on success, -ERANGE if the request is too large for @dev,
746 * -EINVAL if the request is invalid.
747 */
748int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
749 u64 block, u32 n_block, unsigned int tf_flags,
750 unsigned int tag)
751{
752 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
753 tf->flags |= tf_flags;
754
6d1245bf 755 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
756 /* yay, NCQ */
757 if (!lba_48_ok(block, n_block))
758 return -ERANGE;
759
760 tf->protocol = ATA_PROT_NCQ;
761 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
762
763 if (tf->flags & ATA_TFLAG_WRITE)
764 tf->command = ATA_CMD_FPDMA_WRITE;
765 else
766 tf->command = ATA_CMD_FPDMA_READ;
767
768 tf->nsect = tag << 3;
769 tf->hob_feature = (n_block >> 8) & 0xff;
770 tf->feature = n_block & 0xff;
771
772 tf->hob_lbah = (block >> 40) & 0xff;
773 tf->hob_lbam = (block >> 32) & 0xff;
774 tf->hob_lbal = (block >> 24) & 0xff;
775 tf->lbah = (block >> 16) & 0xff;
776 tf->lbam = (block >> 8) & 0xff;
777 tf->lbal = block & 0xff;
778
9ca7cfa4 779 tf->device = ATA_LBA;
bd056d7e
TH
780 if (tf->flags & ATA_TFLAG_FUA)
781 tf->device |= 1 << 7;
782 } else if (dev->flags & ATA_DFLAG_LBA) {
783 tf->flags |= ATA_TFLAG_LBA;
784
785 if (lba_28_ok(block, n_block)) {
786 /* use LBA28 */
787 tf->device |= (block >> 24) & 0xf;
788 } else if (lba_48_ok(block, n_block)) {
789 if (!(dev->flags & ATA_DFLAG_LBA48))
790 return -ERANGE;
791
792 /* use LBA48 */
793 tf->flags |= ATA_TFLAG_LBA48;
794
795 tf->hob_nsect = (n_block >> 8) & 0xff;
796
797 tf->hob_lbah = (block >> 40) & 0xff;
798 tf->hob_lbam = (block >> 32) & 0xff;
799 tf->hob_lbal = (block >> 24) & 0xff;
800 } else
801 /* request too large even for LBA48 */
802 return -ERANGE;
803
804 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
805 return -EINVAL;
806
807 tf->nsect = n_block & 0xff;
808
809 tf->lbah = (block >> 16) & 0xff;
810 tf->lbam = (block >> 8) & 0xff;
811 tf->lbal = block & 0xff;
812
813 tf->device |= ATA_LBA;
814 } else {
815 /* CHS */
816 u32 sect, head, cyl, track;
817
818 /* The request -may- be too large for CHS addressing. */
819 if (!lba_28_ok(block, n_block))
820 return -ERANGE;
821
822 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
823 return -EINVAL;
824
825 /* Convert LBA to CHS */
826 track = (u32)block / dev->sectors;
827 cyl = track / dev->heads;
828 head = track % dev->heads;
829 sect = (u32)block % dev->sectors + 1;
830
831 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
832 (u32)block, track, cyl, head, sect);
833
834 /* Check whether the converted CHS can fit.
835 Cylinder: 0-65535
836 Head: 0-15
837 Sector: 1-255*/
838 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
839 return -ERANGE;
840
841 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
842 tf->lbal = sect;
843 tf->lbam = cyl;
844 tf->lbah = cyl >> 8;
845 tf->device |= head;
846 }
847
848 return 0;
849}
850
cb95d562
TH
851/**
852 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
853 * @pio_mask: pio_mask
854 * @mwdma_mask: mwdma_mask
855 * @udma_mask: udma_mask
856 *
857 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
858 * unsigned int xfer_mask.
859 *
860 * LOCKING:
861 * None.
862 *
863 * RETURNS:
864 * Packed xfer_mask.
865 */
7dc951ae
TH
866unsigned long ata_pack_xfermask(unsigned long pio_mask,
867 unsigned long mwdma_mask,
868 unsigned long udma_mask)
cb95d562
TH
869{
870 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
871 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
872 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
873}
874
c0489e4e
TH
875/**
876 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
877 * @xfer_mask: xfer_mask to unpack
878 * @pio_mask: resulting pio_mask
879 * @mwdma_mask: resulting mwdma_mask
880 * @udma_mask: resulting udma_mask
881 *
882 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
883 * Any NULL distination masks will be ignored.
884 */
7dc951ae
TH
885void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
886 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
887{
888 if (pio_mask)
889 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
890 if (mwdma_mask)
891 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
892 if (udma_mask)
893 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
894}
895
cb95d562 896static const struct ata_xfer_ent {
be9a50c8 897 int shift, bits;
cb95d562
TH
898 u8 base;
899} ata_xfer_tbl[] = {
70cd071e
TH
900 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
901 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
902 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
903 { -1, },
904};
905
906/**
907 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
908 * @xfer_mask: xfer_mask of interest
909 *
910 * Return matching XFER_* value for @xfer_mask. Only the highest
911 * bit of @xfer_mask is considered.
912 *
913 * LOCKING:
914 * None.
915 *
916 * RETURNS:
70cd071e 917 * Matching XFER_* value, 0xff if no match found.
cb95d562 918 */
7dc951ae 919u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
920{
921 int highbit = fls(xfer_mask) - 1;
922 const struct ata_xfer_ent *ent;
923
924 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
925 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
926 return ent->base + highbit - ent->shift;
70cd071e 927 return 0xff;
cb95d562
TH
928}
929
930/**
931 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
932 * @xfer_mode: XFER_* of interest
933 *
934 * Return matching xfer_mask for @xfer_mode.
935 *
936 * LOCKING:
937 * None.
938 *
939 * RETURNS:
940 * Matching xfer_mask, 0 if no match found.
941 */
7dc951ae 942unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
943{
944 const struct ata_xfer_ent *ent;
945
946 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
947 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
948 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
949 & ~((1 << ent->shift) - 1);
cb95d562
TH
950 return 0;
951}
952
953/**
954 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
955 * @xfer_mode: XFER_* of interest
956 *
957 * Return matching xfer_shift for @xfer_mode.
958 *
959 * LOCKING:
960 * None.
961 *
962 * RETURNS:
963 * Matching xfer_shift, -1 if no match found.
964 */
7dc951ae 965int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
966{
967 const struct ata_xfer_ent *ent;
968
969 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
970 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
971 return ent->shift;
972 return -1;
973}
974
1da177e4 975/**
1da7b0d0
TH
976 * ata_mode_string - convert xfer_mask to string
977 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
978 *
979 * Determine string which represents the highest speed
1da7b0d0 980 * (highest bit in @modemask).
1da177e4
LT
981 *
982 * LOCKING:
983 * None.
984 *
985 * RETURNS:
986 * Constant C string representing highest speed listed in
1da7b0d0 987 * @mode_mask, or the constant C string "<n/a>".
1da177e4 988 */
7dc951ae 989const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 990{
75f554bc
TH
991 static const char * const xfer_mode_str[] = {
992 "PIO0",
993 "PIO1",
994 "PIO2",
995 "PIO3",
996 "PIO4",
b352e57d
AC
997 "PIO5",
998 "PIO6",
75f554bc
TH
999 "MWDMA0",
1000 "MWDMA1",
1001 "MWDMA2",
b352e57d
AC
1002 "MWDMA3",
1003 "MWDMA4",
75f554bc
TH
1004 "UDMA/16",
1005 "UDMA/25",
1006 "UDMA/33",
1007 "UDMA/44",
1008 "UDMA/66",
1009 "UDMA/100",
1010 "UDMA/133",
1011 "UDMA7",
1012 };
1da7b0d0 1013 int highbit;
1da177e4 1014
1da7b0d0
TH
1015 highbit = fls(xfer_mask) - 1;
1016 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1017 return xfer_mode_str[highbit];
1da177e4 1018 return "<n/a>";
1da177e4
LT
1019}
1020
d9027470 1021const char *sata_spd_string(unsigned int spd)
4c360c81
TH
1022{
1023 static const char * const spd_str[] = {
1024 "1.5 Gbps",
1025 "3.0 Gbps",
8522ee25 1026 "6.0 Gbps",
4c360c81
TH
1027 };
1028
1029 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1030 return "<unknown>";
1031 return spd_str[spd - 1];
1032}
1033
1da177e4
LT
1034/**
1035 * ata_dev_classify - determine device type based on ATA-spec signature
1036 * @tf: ATA taskfile register set for device to be identified
1037 *
1038 * Determine from taskfile register contents whether a device is
1039 * ATA or ATAPI, as per "Signature and persistence" section
1040 * of ATA/PI spec (volume 1, sect 5.14).
1041 *
1042 * LOCKING:
1043 * None.
1044 *
1045 * RETURNS:
633273a3
TH
1046 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1047 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1048 */
057ace5e 1049unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1050{
1051 /* Apple's open source Darwin code hints that some devices only
1052 * put a proper signature into the LBA mid/high registers,
1053 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1054 *
1055 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1056 * signatures for ATA and ATAPI devices attached on SerialATA,
1057 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1058 * spec has never mentioned about using different signatures
1059 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1060 * Multiplier specification began to use 0x69/0x96 to identify
1061 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1062 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1063 * 0x69/0x96 shortly and described them as reserved for
1064 * SerialATA.
1065 *
1066 * We follow the current spec and consider that 0x69/0x96
1067 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
79b42bab
TH
1068 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1069 * SEMB signature. This is worked around in
1070 * ata_dev_read_id().
1da177e4 1071 */
633273a3 1072 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1073 DPRINTK("found ATA device by sig\n");
1074 return ATA_DEV_ATA;
1075 }
1076
633273a3 1077 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1078 DPRINTK("found ATAPI device by sig\n");
1079 return ATA_DEV_ATAPI;
1080 }
1081
633273a3
TH
1082 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1083 DPRINTK("found PMP device by sig\n");
1084 return ATA_DEV_PMP;
1085 }
1086
1087 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
79b42bab
TH
1088 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1089 return ATA_DEV_SEMB;
633273a3
TH
1090 }
1091
1da177e4
LT
1092 DPRINTK("unknown device\n");
1093 return ATA_DEV_UNKNOWN;
1094}
1095
1da177e4 1096/**
6a62a04d 1097 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1098 * @id: IDENTIFY DEVICE results we will examine
1099 * @s: string into which data is output
1100 * @ofs: offset into identify device page
1101 * @len: length of string to return. must be an even number.
1102 *
1103 * The strings in the IDENTIFY DEVICE page are broken up into
1104 * 16-bit chunks. Run through the string, and output each
1105 * 8-bit chunk linearly, regardless of platform.
1106 *
1107 * LOCKING:
1108 * caller.
1109 */
1110
6a62a04d
TH
1111void ata_id_string(const u16 *id, unsigned char *s,
1112 unsigned int ofs, unsigned int len)
1da177e4
LT
1113{
1114 unsigned int c;
1115
963e4975
AC
1116 BUG_ON(len & 1);
1117
1da177e4
LT
1118 while (len > 0) {
1119 c = id[ofs] >> 8;
1120 *s = c;
1121 s++;
1122
1123 c = id[ofs] & 0xff;
1124 *s = c;
1125 s++;
1126
1127 ofs++;
1128 len -= 2;
1129 }
1130}
1131
0e949ff3 1132/**
6a62a04d 1133 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1134 * @id: IDENTIFY DEVICE results we will examine
1135 * @s: string into which data is output
1136 * @ofs: offset into identify device page
1137 * @len: length of string to return. must be an odd number.
1138 *
6a62a04d 1139 * This function is identical to ata_id_string except that it
0e949ff3
TH
1140 * trims trailing spaces and terminates the resulting string with
1141 * null. @len must be actual maximum length (even number) + 1.
1142 *
1143 * LOCKING:
1144 * caller.
1145 */
6a62a04d
TH
1146void ata_id_c_string(const u16 *id, unsigned char *s,
1147 unsigned int ofs, unsigned int len)
0e949ff3
TH
1148{
1149 unsigned char *p;
1150
6a62a04d 1151 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1152
1153 p = s + strnlen(s, len - 1);
1154 while (p > s && p[-1] == ' ')
1155 p--;
1156 *p = '\0';
1157}
0baab86b 1158
db6f8759
TH
1159static u64 ata_id_n_sectors(const u16 *id)
1160{
1161 if (ata_id_has_lba(id)) {
1162 if (ata_id_has_lba48(id))
968e594a 1163 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
db6f8759 1164 else
968e594a 1165 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
db6f8759
TH
1166 } else {
1167 if (ata_id_current_chs_valid(id))
968e594a
RH
1168 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1169 id[ATA_ID_CUR_SECTORS];
db6f8759 1170 else
968e594a
RH
1171 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1172 id[ATA_ID_SECTORS];
db6f8759
TH
1173 }
1174}
1175
a5987e0a 1176u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1e999736
AC
1177{
1178 u64 sectors = 0;
1179
1180 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1181 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
ba14a9c2 1182 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1e999736
AC
1183 sectors |= (tf->lbah & 0xff) << 16;
1184 sectors |= (tf->lbam & 0xff) << 8;
1185 sectors |= (tf->lbal & 0xff);
1186
a5987e0a 1187 return sectors;
1e999736
AC
1188}
1189
a5987e0a 1190u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1e999736
AC
1191{
1192 u64 sectors = 0;
1193
1194 sectors |= (tf->device & 0x0f) << 24;
1195 sectors |= (tf->lbah & 0xff) << 16;
1196 sectors |= (tf->lbam & 0xff) << 8;
1197 sectors |= (tf->lbal & 0xff);
1198
a5987e0a 1199 return sectors;
1e999736
AC
1200}
1201
1202/**
c728a914
TH
1203 * ata_read_native_max_address - Read native max address
1204 * @dev: target device
1205 * @max_sectors: out parameter for the result native max address
1e999736 1206 *
c728a914
TH
1207 * Perform an LBA48 or LBA28 native size query upon the device in
1208 * question.
1e999736 1209 *
c728a914
TH
1210 * RETURNS:
1211 * 0 on success, -EACCES if command is aborted by the drive.
1212 * -EIO on other errors.
1e999736 1213 */
c728a914 1214static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1215{
c728a914 1216 unsigned int err_mask;
1e999736 1217 struct ata_taskfile tf;
c728a914 1218 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1219
1220 ata_tf_init(dev, &tf);
1221
c728a914 1222 /* always clear all address registers */
1e999736 1223 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1224
c728a914
TH
1225 if (lba48) {
1226 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1227 tf.flags |= ATA_TFLAG_LBA48;
1228 } else
1229 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1230
1e999736 1231 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1232 tf.device |= ATA_LBA;
1233
2b789108 1234 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1235 if (err_mask) {
a9a79dfe
JP
1236 ata_dev_warn(dev,
1237 "failed to read native max address (err_mask=0x%x)\n",
1238 err_mask);
c728a914
TH
1239 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1240 return -EACCES;
1241 return -EIO;
1242 }
1e999736 1243
c728a914 1244 if (lba48)
a5987e0a 1245 *max_sectors = ata_tf_to_lba48(&tf) + 1;
c728a914 1246 else
a5987e0a 1247 *max_sectors = ata_tf_to_lba(&tf) + 1;
2dcb407e 1248 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1249 (*max_sectors)--;
c728a914 1250 return 0;
1e999736
AC
1251}
1252
1253/**
c728a914
TH
1254 * ata_set_max_sectors - Set max sectors
1255 * @dev: target device
6b38d1d1 1256 * @new_sectors: new max sectors value to set for the device
1e999736 1257 *
c728a914
TH
1258 * Set max sectors of @dev to @new_sectors.
1259 *
1260 * RETURNS:
1261 * 0 on success, -EACCES if command is aborted or denied (due to
1262 * previous non-volatile SET_MAX) by the drive. -EIO on other
1263 * errors.
1e999736 1264 */
05027adc 1265static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1266{
c728a914 1267 unsigned int err_mask;
1e999736 1268 struct ata_taskfile tf;
c728a914 1269 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1270
1271 new_sectors--;
1272
1273 ata_tf_init(dev, &tf);
1274
1e999736 1275 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1276
1277 if (lba48) {
1278 tf.command = ATA_CMD_SET_MAX_EXT;
1279 tf.flags |= ATA_TFLAG_LBA48;
1280
1281 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1282 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1283 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1284 } else {
c728a914
TH
1285 tf.command = ATA_CMD_SET_MAX;
1286
1e582ba4
TH
1287 tf.device |= (new_sectors >> 24) & 0xf;
1288 }
1289
1e999736 1290 tf.protocol |= ATA_PROT_NODATA;
c728a914 1291 tf.device |= ATA_LBA;
1e999736
AC
1292
1293 tf.lbal = (new_sectors >> 0) & 0xff;
1294 tf.lbam = (new_sectors >> 8) & 0xff;
1295 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1296
2b789108 1297 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1298 if (err_mask) {
a9a79dfe
JP
1299 ata_dev_warn(dev,
1300 "failed to set max address (err_mask=0x%x)\n",
1301 err_mask);
c728a914
TH
1302 if (err_mask == AC_ERR_DEV &&
1303 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1304 return -EACCES;
1305 return -EIO;
1306 }
1307
c728a914 1308 return 0;
1e999736
AC
1309}
1310
1311/**
1312 * ata_hpa_resize - Resize a device with an HPA set
1313 * @dev: Device to resize
1314 *
1315 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1316 * it if required to the full size of the media. The caller must check
1317 * the drive has the HPA feature set enabled.
05027adc
TH
1318 *
1319 * RETURNS:
1320 * 0 on success, -errno on failure.
1e999736 1321 */
05027adc 1322static int ata_hpa_resize(struct ata_device *dev)
1e999736 1323{
05027adc
TH
1324 struct ata_eh_context *ehc = &dev->link->eh_context;
1325 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
445d211b 1326 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
05027adc
TH
1327 u64 sectors = ata_id_n_sectors(dev->id);
1328 u64 native_sectors;
c728a914 1329 int rc;
a617c09f 1330
05027adc
TH
1331 /* do we need to do it? */
1332 if (dev->class != ATA_DEV_ATA ||
1333 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1334 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1335 return 0;
1e999736 1336
05027adc
TH
1337 /* read native max address */
1338 rc = ata_read_native_max_address(dev, &native_sectors);
1339 if (rc) {
dda7aba1
TH
1340 /* If device aborted the command or HPA isn't going to
1341 * be unlocked, skip HPA resizing.
05027adc 1342 */
445d211b 1343 if (rc == -EACCES || !unlock_hpa) {
a9a79dfe
JP
1344 ata_dev_warn(dev,
1345 "HPA support seems broken, skipping HPA handling\n");
05027adc
TH
1346 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1347
1348 /* we can continue if device aborted the command */
1349 if (rc == -EACCES)
1350 rc = 0;
1e999736 1351 }
37301a55 1352
05027adc
TH
1353 return rc;
1354 }
5920dadf 1355 dev->n_native_sectors = native_sectors;
05027adc
TH
1356
1357 /* nothing to do? */
445d211b 1358 if (native_sectors <= sectors || !unlock_hpa) {
05027adc
TH
1359 if (!print_info || native_sectors == sectors)
1360 return 0;
1361
1362 if (native_sectors > sectors)
a9a79dfe 1363 ata_dev_info(dev,
05027adc
TH
1364 "HPA detected: current %llu, native %llu\n",
1365 (unsigned long long)sectors,
1366 (unsigned long long)native_sectors);
1367 else if (native_sectors < sectors)
a9a79dfe
JP
1368 ata_dev_warn(dev,
1369 "native sectors (%llu) is smaller than sectors (%llu)\n",
05027adc
TH
1370 (unsigned long long)native_sectors,
1371 (unsigned long long)sectors);
1372 return 0;
1373 }
1374
1375 /* let's unlock HPA */
1376 rc = ata_set_max_sectors(dev, native_sectors);
1377 if (rc == -EACCES) {
1378 /* if device aborted the command, skip HPA resizing */
a9a79dfe
JP
1379 ata_dev_warn(dev,
1380 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1381 (unsigned long long)sectors,
1382 (unsigned long long)native_sectors);
05027adc
TH
1383 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1384 return 0;
1385 } else if (rc)
1386 return rc;
1387
1388 /* re-read IDENTIFY data */
1389 rc = ata_dev_reread_id(dev, 0);
1390 if (rc) {
a9a79dfe
JP
1391 ata_dev_err(dev,
1392 "failed to re-read IDENTIFY data after HPA resizing\n");
05027adc
TH
1393 return rc;
1394 }
1395
1396 if (print_info) {
1397 u64 new_sectors = ata_id_n_sectors(dev->id);
a9a79dfe 1398 ata_dev_info(dev,
05027adc
TH
1399 "HPA unlocked: %llu -> %llu, native %llu\n",
1400 (unsigned long long)sectors,
1401 (unsigned long long)new_sectors,
1402 (unsigned long long)native_sectors);
1403 }
1404
1405 return 0;
1e999736
AC
1406}
1407
1da177e4
LT
1408/**
1409 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1410 * @id: IDENTIFY DEVICE page to dump
1da177e4 1411 *
0bd3300a
TH
1412 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1413 * page.
1da177e4
LT
1414 *
1415 * LOCKING:
1416 * caller.
1417 */
1418
0bd3300a 1419static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1420{
1421 DPRINTK("49==0x%04x "
1422 "53==0x%04x "
1423 "63==0x%04x "
1424 "64==0x%04x "
1425 "75==0x%04x \n",
0bd3300a
TH
1426 id[49],
1427 id[53],
1428 id[63],
1429 id[64],
1430 id[75]);
1da177e4
LT
1431 DPRINTK("80==0x%04x "
1432 "81==0x%04x "
1433 "82==0x%04x "
1434 "83==0x%04x "
1435 "84==0x%04x \n",
0bd3300a
TH
1436 id[80],
1437 id[81],
1438 id[82],
1439 id[83],
1440 id[84]);
1da177e4
LT
1441 DPRINTK("88==0x%04x "
1442 "93==0x%04x\n",
0bd3300a
TH
1443 id[88],
1444 id[93]);
1da177e4
LT
1445}
1446
cb95d562
TH
1447/**
1448 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1449 * @id: IDENTIFY data to compute xfer mask from
1450 *
1451 * Compute the xfermask for this device. This is not as trivial
1452 * as it seems if we must consider early devices correctly.
1453 *
1454 * FIXME: pre IDE drive timing (do we care ?).
1455 *
1456 * LOCKING:
1457 * None.
1458 *
1459 * RETURNS:
1460 * Computed xfermask
1461 */
7dc951ae 1462unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1463{
7dc951ae 1464 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1465
1466 /* Usual case. Word 53 indicates word 64 is valid */
1467 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1468 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1469 pio_mask <<= 3;
1470 pio_mask |= 0x7;
1471 } else {
1472 /* If word 64 isn't valid then Word 51 high byte holds
1473 * the PIO timing number for the maximum. Turn it into
1474 * a mask.
1475 */
7a0f1c8a 1476 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1477 if (mode < 5) /* Valid PIO range */
2dcb407e 1478 pio_mask = (2 << mode) - 1;
46767aeb
AC
1479 else
1480 pio_mask = 1;
cb95d562
TH
1481
1482 /* But wait.. there's more. Design your standards by
1483 * committee and you too can get a free iordy field to
1484 * process. However its the speeds not the modes that
1485 * are supported... Note drivers using the timing API
1486 * will get this right anyway
1487 */
1488 }
1489
1490 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1491
b352e57d
AC
1492 if (ata_id_is_cfa(id)) {
1493 /*
1494 * Process compact flash extended modes
1495 */
62afe5d7
SS
1496 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1497 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
b352e57d
AC
1498
1499 if (pio)
1500 pio_mask |= (1 << 5);
1501 if (pio > 1)
1502 pio_mask |= (1 << 6);
1503 if (dma)
1504 mwdma_mask |= (1 << 3);
1505 if (dma > 1)
1506 mwdma_mask |= (1 << 4);
1507 }
1508
fb21f0d0
TH
1509 udma_mask = 0;
1510 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1511 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1512
1513 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1514}
1515
7102d230 1516static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1517{
77853bf2 1518 struct completion *waiting = qc->private_data;
a2a7a662 1519
a2a7a662 1520 complete(waiting);
a2a7a662
TH
1521}
1522
1523/**
2432697b 1524 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1525 * @dev: Device to which the command is sent
1526 * @tf: Taskfile registers for the command and the result
d69cf37d 1527 * @cdb: CDB for packet command
e227867f 1528 * @dma_dir: Data transfer direction of the command
5c1ad8b3 1529 * @sgl: sg list for the data buffer of the command
2432697b 1530 * @n_elem: Number of sg entries
2b789108 1531 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1532 *
1533 * Executes libata internal command with timeout. @tf contains
1534 * command on entry and result on return. Timeout and error
1535 * conditions are reported via return value. No recovery action
1536 * is taken after a command times out. It's caller's duty to
1537 * clean up after timeout.
1538 *
1539 * LOCKING:
1540 * None. Should be called with kernel context, might sleep.
551e8889
TH
1541 *
1542 * RETURNS:
1543 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1544 */
2432697b
TH
1545unsigned ata_exec_internal_sg(struct ata_device *dev,
1546 struct ata_taskfile *tf, const u8 *cdb,
87260216 1547 int dma_dir, struct scatterlist *sgl,
2b789108 1548 unsigned int n_elem, unsigned long timeout)
a2a7a662 1549{
9af5c9c9
TH
1550 struct ata_link *link = dev->link;
1551 struct ata_port *ap = link->ap;
a2a7a662 1552 u8 command = tf->command;
87fbc5a0 1553 int auto_timeout = 0;
a2a7a662 1554 struct ata_queued_cmd *qc;
2ab7db1f 1555 unsigned int tag, preempted_tag;
dedaf2b0 1556 u32 preempted_sactive, preempted_qc_active;
da917d69 1557 int preempted_nr_active_links;
60be6b9a 1558 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1559 unsigned long flags;
77853bf2 1560 unsigned int err_mask;
d95a717f 1561 int rc;
a2a7a662 1562
ba6a1308 1563 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1564
e3180499 1565 /* no internal command while frozen */
b51e9e5d 1566 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1567 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1568 return AC_ERR_SYSTEM;
1569 }
1570
2ab7db1f 1571 /* initialize internal qc */
a2a7a662 1572
2ab7db1f
TH
1573 /* XXX: Tag 0 is used for drivers with legacy EH as some
1574 * drivers choke if any other tag is given. This breaks
1575 * ata_tag_internal() test for those drivers. Don't use new
1576 * EH stuff without converting to it.
1577 */
1578 if (ap->ops->error_handler)
1579 tag = ATA_TAG_INTERNAL;
1580 else
1581 tag = 0;
1582
8a8bc223
TH
1583 if (test_and_set_bit(tag, &ap->qc_allocated))
1584 BUG();
f69499f4 1585 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1586
1587 qc->tag = tag;
1588 qc->scsicmd = NULL;
1589 qc->ap = ap;
1590 qc->dev = dev;
1591 ata_qc_reinit(qc);
1592
9af5c9c9
TH
1593 preempted_tag = link->active_tag;
1594 preempted_sactive = link->sactive;
dedaf2b0 1595 preempted_qc_active = ap->qc_active;
da917d69 1596 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1597 link->active_tag = ATA_TAG_POISON;
1598 link->sactive = 0;
dedaf2b0 1599 ap->qc_active = 0;
da917d69 1600 ap->nr_active_links = 0;
2ab7db1f
TH
1601
1602 /* prepare & issue qc */
a2a7a662 1603 qc->tf = *tf;
d69cf37d
TH
1604 if (cdb)
1605 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e771451c
VP
1606
1607 /* some SATA bridges need us to indicate data xfer direction */
1608 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1609 dma_dir == DMA_FROM_DEVICE)
1610 qc->tf.feature |= ATAPI_DMADIR;
1611
e61e0672 1612 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1613 qc->dma_dir = dma_dir;
1614 if (dma_dir != DMA_NONE) {
2432697b 1615 unsigned int i, buflen = 0;
87260216 1616 struct scatterlist *sg;
2432697b 1617
87260216
JA
1618 for_each_sg(sgl, sg, n_elem, i)
1619 buflen += sg->length;
2432697b 1620
87260216 1621 ata_sg_init(qc, sgl, n_elem);
49c80429 1622 qc->nbytes = buflen;
a2a7a662
TH
1623 }
1624
77853bf2 1625 qc->private_data = &wait;
a2a7a662
TH
1626 qc->complete_fn = ata_qc_complete_internal;
1627
8e0e694a 1628 ata_qc_issue(qc);
a2a7a662 1629
ba6a1308 1630 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1631
87fbc5a0
TH
1632 if (!timeout) {
1633 if (ata_probe_timeout)
1634 timeout = ata_probe_timeout * 1000;
1635 else {
1636 timeout = ata_internal_cmd_timeout(dev, command);
1637 auto_timeout = 1;
1638 }
1639 }
2b789108 1640
c0c362b6
TH
1641 if (ap->ops->error_handler)
1642 ata_eh_release(ap);
1643
2b789108 1644 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f 1645
c0c362b6
TH
1646 if (ap->ops->error_handler)
1647 ata_eh_acquire(ap);
1648
c429137a 1649 ata_sff_flush_pio_task(ap);
41ade50c 1650
d95a717f 1651 if (!rc) {
ba6a1308 1652 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1653
1654 /* We're racing with irq here. If we lose, the
1655 * following test prevents us from completing the qc
d95a717f
TH
1656 * twice. If we win, the port is frozen and will be
1657 * cleaned up by ->post_internal_cmd().
a2a7a662 1658 */
77853bf2 1659 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1660 qc->err_mask |= AC_ERR_TIMEOUT;
1661
1662 if (ap->ops->error_handler)
1663 ata_port_freeze(ap);
1664 else
1665 ata_qc_complete(qc);
f15a1daf 1666
0dd4b21f 1667 if (ata_msg_warn(ap))
a9a79dfe
JP
1668 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1669 command);
a2a7a662
TH
1670 }
1671
ba6a1308 1672 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1673 }
1674
d95a717f
TH
1675 /* do post_internal_cmd */
1676 if (ap->ops->post_internal_cmd)
1677 ap->ops->post_internal_cmd(qc);
1678
a51d644a
TH
1679 /* perform minimal error analysis */
1680 if (qc->flags & ATA_QCFLAG_FAILED) {
1681 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1682 qc->err_mask |= AC_ERR_DEV;
1683
1684 if (!qc->err_mask)
1685 qc->err_mask |= AC_ERR_OTHER;
1686
1687 if (qc->err_mask & ~AC_ERR_OTHER)
1688 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1689 }
1690
15869303 1691 /* finish up */
ba6a1308 1692 spin_lock_irqsave(ap->lock, flags);
15869303 1693
e61e0672 1694 *tf = qc->result_tf;
77853bf2
TH
1695 err_mask = qc->err_mask;
1696
1697 ata_qc_free(qc);
9af5c9c9
TH
1698 link->active_tag = preempted_tag;
1699 link->sactive = preempted_sactive;
dedaf2b0 1700 ap->qc_active = preempted_qc_active;
da917d69 1701 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1702
ba6a1308 1703 spin_unlock_irqrestore(ap->lock, flags);
15869303 1704
87fbc5a0
TH
1705 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1706 ata_internal_cmd_timed_out(dev, command);
1707
77853bf2 1708 return err_mask;
a2a7a662
TH
1709}
1710
2432697b 1711/**
33480a0e 1712 * ata_exec_internal - execute libata internal command
2432697b
TH
1713 * @dev: Device to which the command is sent
1714 * @tf: Taskfile registers for the command and the result
1715 * @cdb: CDB for packet command
e227867f 1716 * @dma_dir: Data transfer direction of the command
2432697b
TH
1717 * @buf: Data buffer of the command
1718 * @buflen: Length of data buffer
2b789108 1719 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1720 *
1721 * Wrapper around ata_exec_internal_sg() which takes simple
1722 * buffer instead of sg list.
1723 *
1724 * LOCKING:
1725 * None. Should be called with kernel context, might sleep.
1726 *
1727 * RETURNS:
1728 * Zero on success, AC_ERR_* mask on failure
1729 */
1730unsigned ata_exec_internal(struct ata_device *dev,
1731 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1732 int dma_dir, void *buf, unsigned int buflen,
1733 unsigned long timeout)
2432697b 1734{
33480a0e
TH
1735 struct scatterlist *psg = NULL, sg;
1736 unsigned int n_elem = 0;
2432697b 1737
33480a0e
TH
1738 if (dma_dir != DMA_NONE) {
1739 WARN_ON(!buf);
1740 sg_init_one(&sg, buf, buflen);
1741 psg = &sg;
1742 n_elem++;
1743 }
2432697b 1744
2b789108
TH
1745 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1746 timeout);
2432697b
TH
1747}
1748
977e6b9f
TH
1749/**
1750 * ata_do_simple_cmd - execute simple internal command
1751 * @dev: Device to which the command is sent
1752 * @cmd: Opcode to execute
1753 *
1754 * Execute a 'simple' command, that only consists of the opcode
1755 * 'cmd' itself, without filling any other registers
1756 *
1757 * LOCKING:
1758 * Kernel thread context (may sleep).
1759 *
1760 * RETURNS:
1761 * Zero on success, AC_ERR_* mask on failure
e58eb583 1762 */
77b08fb5 1763unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1764{
1765 struct ata_taskfile tf;
e58eb583
TH
1766
1767 ata_tf_init(dev, &tf);
1768
1769 tf.command = cmd;
1770 tf.flags |= ATA_TFLAG_DEVICE;
1771 tf.protocol = ATA_PROT_NODATA;
1772
2b789108 1773 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1774}
1775
1bc4ccff
AC
1776/**
1777 * ata_pio_need_iordy - check if iordy needed
1778 * @adev: ATA device
1779 *
1780 * Check if the current speed of the device requires IORDY. Used
1781 * by various controllers for chip configuration.
1782 */
1bc4ccff
AC
1783unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1784{
0d9e6659
TH
1785 /* Don't set IORDY if we're preparing for reset. IORDY may
1786 * lead to controller lock up on certain controllers if the
1787 * port is not occupied. See bko#11703 for details.
1788 */
1789 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1790 return 0;
1791 /* Controller doesn't support IORDY. Probably a pointless
1792 * check as the caller should know this.
1793 */
9af5c9c9 1794 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1795 return 0;
5c18c4d2
DD
1796 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1797 if (ata_id_is_cfa(adev->id)
1798 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1799 return 0;
432729f0
AC
1800 /* PIO3 and higher it is mandatory */
1801 if (adev->pio_mode > XFER_PIO_2)
1802 return 1;
1803 /* We turn it on when possible */
1804 if (ata_id_has_iordy(adev->id))
1bc4ccff 1805 return 1;
432729f0
AC
1806 return 0;
1807}
2e9edbf8 1808
432729f0
AC
1809/**
1810 * ata_pio_mask_no_iordy - Return the non IORDY mask
1811 * @adev: ATA device
1812 *
1813 * Compute the highest mode possible if we are not using iordy. Return
1814 * -1 if no iordy mode is available.
1815 */
432729f0
AC
1816static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1817{
1bc4ccff 1818 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1819 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1820 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1821 /* Is the speed faster than the drive allows non IORDY ? */
1822 if (pio) {
1823 /* This is cycle times not frequency - watch the logic! */
1824 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1825 return 3 << ATA_SHIFT_PIO;
1826 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1827 }
1828 }
432729f0 1829 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1830}
1831
963e4975
AC
1832/**
1833 * ata_do_dev_read_id - default ID read method
1834 * @dev: device
1835 * @tf: proposed taskfile
1836 * @id: data buffer
1837 *
1838 * Issue the identify taskfile and hand back the buffer containing
1839 * identify data. For some RAID controllers and for pre ATA devices
1840 * this function is wrapped or replaced by the driver
1841 */
1842unsigned int ata_do_dev_read_id(struct ata_device *dev,
1843 struct ata_taskfile *tf, u16 *id)
1844{
1845 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1846 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1847}
1848
1da177e4 1849/**
49016aca 1850 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1851 * @dev: target device
1852 * @p_class: pointer to class of the target device (may be changed)
bff04647 1853 * @flags: ATA_READID_* flags
fe635c7e 1854 * @id: buffer to read IDENTIFY data into
1da177e4 1855 *
49016aca
TH
1856 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1857 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1858 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1859 * for pre-ATA4 drives.
1da177e4 1860 *
50a99018 1861 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1862 * now we abort if we hit that case.
50a99018 1863 *
1da177e4 1864 * LOCKING:
49016aca
TH
1865 * Kernel thread context (may sleep)
1866 *
1867 * RETURNS:
1868 * 0 on success, -errno otherwise.
1da177e4 1869 */
a9beec95 1870int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1871 unsigned int flags, u16 *id)
1da177e4 1872{
9af5c9c9 1873 struct ata_port *ap = dev->link->ap;
49016aca 1874 unsigned int class = *p_class;
a0123703 1875 struct ata_taskfile tf;
49016aca
TH
1876 unsigned int err_mask = 0;
1877 const char *reason;
79b42bab 1878 bool is_semb = class == ATA_DEV_SEMB;
54936f8b 1879 int may_fallback = 1, tried_spinup = 0;
49016aca 1880 int rc;
1da177e4 1881
0dd4b21f 1882 if (ata_msg_ctl(ap))
a9a79dfe 1883 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1da177e4 1884
963e4975 1885retry:
3373efd8 1886 ata_tf_init(dev, &tf);
a0123703 1887
49016aca 1888 switch (class) {
79b42bab
TH
1889 case ATA_DEV_SEMB:
1890 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
49016aca 1891 case ATA_DEV_ATA:
a0123703 1892 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1893 break;
1894 case ATA_DEV_ATAPI:
a0123703 1895 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1896 break;
1897 default:
1898 rc = -ENODEV;
1899 reason = "unsupported class";
1900 goto err_out;
1da177e4
LT
1901 }
1902
a0123703 1903 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1904
1905 /* Some devices choke if TF registers contain garbage. Make
1906 * sure those are properly initialized.
1907 */
1908 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1909
1910 /* Device presence detection is unreliable on some
1911 * controllers. Always poll IDENTIFY if available.
1912 */
1913 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1914
963e4975
AC
1915 if (ap->ops->read_id)
1916 err_mask = ap->ops->read_id(dev, &tf, id);
1917 else
1918 err_mask = ata_do_dev_read_id(dev, &tf, id);
1919
a0123703 1920 if (err_mask) {
800b3996 1921 if (err_mask & AC_ERR_NODEV_HINT) {
a9a79dfe 1922 ata_dev_dbg(dev, "NODEV after polling detection\n");
55a8e2c8
TH
1923 return -ENOENT;
1924 }
1925
79b42bab 1926 if (is_semb) {
a9a79dfe
JP
1927 ata_dev_info(dev,
1928 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
79b42bab
TH
1929 /* SEMB is not supported yet */
1930 *p_class = ATA_DEV_SEMB_UNSUP;
1931 return 0;
1932 }
1933
1ffc151f
TH
1934 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1935 /* Device or controller might have reported
1936 * the wrong device class. Give a shot at the
1937 * other IDENTIFY if the current one is
1938 * aborted by the device.
1939 */
1940 if (may_fallback) {
1941 may_fallback = 0;
1942
1943 if (class == ATA_DEV_ATA)
1944 class = ATA_DEV_ATAPI;
1945 else
1946 class = ATA_DEV_ATA;
1947 goto retry;
1948 }
1949
1950 /* Control reaches here iff the device aborted
1951 * both flavors of IDENTIFYs which happens
1952 * sometimes with phantom devices.
1953 */
a9a79dfe
JP
1954 ata_dev_dbg(dev,
1955 "both IDENTIFYs aborted, assuming NODEV\n");
1ffc151f 1956 return -ENOENT;
54936f8b
TH
1957 }
1958
49016aca
TH
1959 rc = -EIO;
1960 reason = "I/O error";
1da177e4
LT
1961 goto err_out;
1962 }
1963
43c9c591 1964 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
a9a79dfe
JP
1965 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1966 "class=%d may_fallback=%d tried_spinup=%d\n",
1967 class, may_fallback, tried_spinup);
43c9c591
TH
1968 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1969 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1970 }
1971
54936f8b
TH
1972 /* Falling back doesn't make sense if ID data was read
1973 * successfully at least once.
1974 */
1975 may_fallback = 0;
1976
49016aca 1977 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1978
49016aca 1979 /* sanity check */
a4f5749b 1980 rc = -EINVAL;
6070068b 1981 reason = "device reports invalid type";
a4f5749b
TH
1982
1983 if (class == ATA_DEV_ATA) {
1984 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1985 goto err_out;
db63a4c8
AW
1986 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1987 ata_id_is_ata(id)) {
1988 ata_dev_dbg(dev,
1989 "host indicates ignore ATA devices, ignored\n");
1990 return -ENOENT;
1991 }
a4f5749b
TH
1992 } else {
1993 if (ata_id_is_ata(id))
1994 goto err_out;
49016aca
TH
1995 }
1996
169439c2
ML
1997 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1998 tried_spinup = 1;
1999 /*
2000 * Drive powered-up in standby mode, and requires a specific
2001 * SET_FEATURES spin-up subcommand before it will accept
2002 * anything other than the original IDENTIFY command.
2003 */
218f3d30 2004 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 2005 if (err_mask && id[2] != 0x738c) {
169439c2
ML
2006 rc = -EIO;
2007 reason = "SPINUP failed";
2008 goto err_out;
2009 }
2010 /*
2011 * If the drive initially returned incomplete IDENTIFY info,
2012 * we now must reissue the IDENTIFY command.
2013 */
2014 if (id[2] == 0x37c8)
2015 goto retry;
2016 }
2017
bff04647 2018 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
2019 /*
2020 * The exact sequence expected by certain pre-ATA4 drives is:
2021 * SRST RESET
50a99018
AC
2022 * IDENTIFY (optional in early ATA)
2023 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2024 * anything else..
2025 * Some drives were very specific about that exact sequence.
50a99018
AC
2026 *
2027 * Note that ATA4 says lba is mandatory so the second check
c9404c9c 2028 * should never trigger.
49016aca
TH
2029 */
2030 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2031 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2032 if (err_mask) {
2033 rc = -EIO;
2034 reason = "INIT_DEV_PARAMS failed";
2035 goto err_out;
2036 }
2037
2038 /* current CHS translation info (id[53-58]) might be
2039 * changed. reread the identify device info.
2040 */
bff04647 2041 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2042 goto retry;
2043 }
2044 }
2045
2046 *p_class = class;
fe635c7e 2047
49016aca
TH
2048 return 0;
2049
2050 err_out:
88574551 2051 if (ata_msg_warn(ap))
a9a79dfe
JP
2052 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2053 reason, err_mask);
49016aca
TH
2054 return rc;
2055}
2056
9062712f
TH
2057static int ata_do_link_spd_horkage(struct ata_device *dev)
2058{
2059 struct ata_link *plink = ata_dev_phys_link(dev);
2060 u32 target, target_limit;
2061
2062 if (!sata_scr_valid(plink))
2063 return 0;
2064
2065 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2066 target = 1;
2067 else
2068 return 0;
2069
2070 target_limit = (1 << target) - 1;
2071
2072 /* if already on stricter limit, no need to push further */
2073 if (plink->sata_spd_limit <= target_limit)
2074 return 0;
2075
2076 plink->sata_spd_limit = target_limit;
2077
2078 /* Request another EH round by returning -EAGAIN if link is
2079 * going faster than the target speed. Forward progress is
2080 * guaranteed by setting sata_spd_limit to target_limit above.
2081 */
2082 if (plink->sata_spd > target) {
a9a79dfe
JP
2083 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2084 sata_spd_string(target));
9062712f
TH
2085 return -EAGAIN;
2086 }
2087 return 0;
2088}
2089
3373efd8 2090static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2091{
9af5c9c9 2092 struct ata_port *ap = dev->link->ap;
9ce8e307
JA
2093
2094 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2095 return 0;
2096
9af5c9c9 2097 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2098}
2099
388539f3 2100static int ata_dev_config_ncq(struct ata_device *dev,
a6e6ce8e
TH
2101 char *desc, size_t desc_sz)
2102{
9af5c9c9 2103 struct ata_port *ap = dev->link->ap;
a6e6ce8e 2104 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
388539f3
SL
2105 unsigned int err_mask;
2106 char *aa_desc = "";
a6e6ce8e
TH
2107
2108 if (!ata_id_has_ncq(dev->id)) {
2109 desc[0] = '\0';
388539f3 2110 return 0;
a6e6ce8e 2111 }
75683fe7 2112 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6 2113 snprintf(desc, desc_sz, "NCQ (not used)");
388539f3 2114 return 0;
6919a0a6 2115 }
a6e6ce8e 2116 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2117 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2118 dev->flags |= ATA_DFLAG_NCQ;
2119 }
2120
388539f3
SL
2121 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2122 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2123 ata_id_has_fpdma_aa(dev->id)) {
2124 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2125 SATA_FPDMA_AA);
2126 if (err_mask) {
a9a79dfe
JP
2127 ata_dev_err(dev,
2128 "failed to enable AA (error_mask=0x%x)\n",
2129 err_mask);
388539f3
SL
2130 if (err_mask != AC_ERR_DEV) {
2131 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2132 return -EIO;
2133 }
2134 } else
2135 aa_desc = ", AA";
2136 }
2137
a6e6ce8e 2138 if (hdepth >= ddepth)
388539f3 2139 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
a6e6ce8e 2140 else
388539f3
SL
2141 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2142 ddepth, aa_desc);
ed36911c
MC
2143
2144 if ((ap->flags & ATA_FLAG_FPDMA_AUX) &&
2145 ata_id_has_ncq_send_and_recv(dev->id)) {
2146 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2147 0, ap->sector_buf, 1);
2148 if (err_mask) {
2149 ata_dev_dbg(dev,
2150 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2151 err_mask);
2152 } else {
f78dea06
MC
2153 u8 *cmds = dev->ncq_send_recv_cmds;
2154
ed36911c 2155 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
f78dea06
MC
2156 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2157
2158 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2159 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2160 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2161 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2162 }
ed36911c
MC
2163 }
2164 }
2165
388539f3 2166 return 0;
a6e6ce8e
TH
2167}
2168
49016aca 2169/**
ffeae418 2170 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2171 * @dev: Target device to configure
2172 *
2173 * Configure @dev according to @dev->id. Generic and low-level
2174 * driver specific fixups are also applied.
49016aca
TH
2175 *
2176 * LOCKING:
ffeae418
TH
2177 * Kernel thread context (may sleep)
2178 *
2179 * RETURNS:
2180 * 0 on success, -errno otherwise
49016aca 2181 */
efdaedc4 2182int ata_dev_configure(struct ata_device *dev)
49016aca 2183{
9af5c9c9
TH
2184 struct ata_port *ap = dev->link->ap;
2185 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2186 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2187 const u16 *id = dev->id;
7dc951ae 2188 unsigned long xfer_mask;
65fe1f0f 2189 unsigned int err_mask;
b352e57d 2190 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2191 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2192 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2193 int rc;
49016aca 2194
0dd4b21f 2195 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
a9a79dfe 2196 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
ffeae418 2197 return 0;
49016aca
TH
2198 }
2199
0dd4b21f 2200 if (ata_msg_probe(ap))
a9a79dfe 2201 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1da177e4 2202
75683fe7
TH
2203 /* set horkage */
2204 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2205 ata_force_horkage(dev);
75683fe7 2206
50af2fa1 2207 if (dev->horkage & ATA_HORKAGE_DISABLE) {
a9a79dfe 2208 ata_dev_info(dev, "unsupported device, disabling\n");
50af2fa1
TH
2209 ata_dev_disable(dev);
2210 return 0;
2211 }
2212
2486fa56
TH
2213 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2214 dev->class == ATA_DEV_ATAPI) {
a9a79dfe
JP
2215 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2216 atapi_enabled ? "not supported with this driver"
2217 : "disabled");
2486fa56
TH
2218 ata_dev_disable(dev);
2219 return 0;
2220 }
2221
9062712f
TH
2222 rc = ata_do_link_spd_horkage(dev);
2223 if (rc)
2224 return rc;
2225
ecd75ad5
TH
2226 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2227 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2228 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2229 dev->horkage |= ATA_HORKAGE_NOLPM;
2230
2231 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2232 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2233 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2234 }
2235
6746544c
TH
2236 /* let ACPI work its magic */
2237 rc = ata_acpi_on_devcfg(dev);
2238 if (rc)
2239 return rc;
08573a86 2240
05027adc
TH
2241 /* massage HPA, do it early as it might change IDENTIFY data */
2242 rc = ata_hpa_resize(dev);
2243 if (rc)
2244 return rc;
2245
c39f5ebe 2246 /* print device capabilities */
0dd4b21f 2247 if (ata_msg_probe(ap))
a9a79dfe
JP
2248 ata_dev_dbg(dev,
2249 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2250 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2251 __func__,
2252 id[49], id[82], id[83], id[84],
2253 id[85], id[86], id[87], id[88]);
c39f5ebe 2254
208a9933 2255 /* initialize to-be-configured parameters */
ea1dd4e1 2256 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2257 dev->max_sectors = 0;
2258 dev->cdb_len = 0;
2259 dev->n_sectors = 0;
2260 dev->cylinders = 0;
2261 dev->heads = 0;
2262 dev->sectors = 0;
e18086d6 2263 dev->multi_count = 0;
208a9933 2264
1da177e4
LT
2265 /*
2266 * common ATA, ATAPI feature tests
2267 */
2268
ff8854b2 2269 /* find max transfer mode; for printk only */
1148c3a7 2270 xfer_mask = ata_id_xfermask(id);
1da177e4 2271
0dd4b21f
BP
2272 if (ata_msg_probe(ap))
2273 ata_dump_id(id);
1da177e4 2274
ef143d57
AL
2275 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2276 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2277 sizeof(fwrevbuf));
2278
2279 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2280 sizeof(modelbuf));
2281
1da177e4
LT
2282 /* ATA-specific feature tests */
2283 if (dev->class == ATA_DEV_ATA) {
b352e57d 2284 if (ata_id_is_cfa(id)) {
62afe5d7
SS
2285 /* CPRM may make this media unusable */
2286 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
a9a79dfe
JP
2287 ata_dev_warn(dev,
2288 "supports DRM functions and may not be fully accessible\n");
b352e57d 2289 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2290 } else {
2dcb407e 2291 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2292 /* Warn the user if the device has TPM extensions */
2293 if (ata_id_has_tpm(id))
a9a79dfe
JP
2294 ata_dev_warn(dev,
2295 "supports DRM functions and may not be fully accessible\n");
ae8d4ee7 2296 }
b352e57d 2297
1148c3a7 2298 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2299
e18086d6
ML
2300 /* get current R/W Multiple count setting */
2301 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2302 unsigned int max = dev->id[47] & 0xff;
2303 unsigned int cnt = dev->id[59] & 0xff;
2304 /* only recognize/allow powers of two here */
2305 if (is_power_of_2(max) && is_power_of_2(cnt))
2306 if (cnt <= max)
2307 dev->multi_count = cnt;
2308 }
3f64f565 2309
1148c3a7 2310 if (ata_id_has_lba(id)) {
4c2d721a 2311 const char *lba_desc;
388539f3 2312 char ncq_desc[24];
8bf62ece 2313
4c2d721a
TH
2314 lba_desc = "LBA";
2315 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2316 if (ata_id_has_lba48(id)) {
8bf62ece 2317 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2318 lba_desc = "LBA48";
6fc49adb
TH
2319
2320 if (dev->n_sectors >= (1UL << 28) &&
2321 ata_id_has_flush_ext(id))
2322 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2323 }
8bf62ece 2324
a6e6ce8e 2325 /* config NCQ */
388539f3
SL
2326 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2327 if (rc)
2328 return rc;
a6e6ce8e 2329
8bf62ece 2330 /* print device info to dmesg */
3f64f565 2331 if (ata_msg_drv(ap) && print_info) {
a9a79dfe
JP
2332 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2333 revbuf, modelbuf, fwrevbuf,
2334 ata_mode_string(xfer_mask));
2335 ata_dev_info(dev,
2336 "%llu sectors, multi %u: %s %s\n",
f15a1daf 2337 (unsigned long long)dev->n_sectors,
3f64f565
EM
2338 dev->multi_count, lba_desc, ncq_desc);
2339 }
ffeae418 2340 } else {
8bf62ece
AL
2341 /* CHS */
2342
2343 /* Default translation */
1148c3a7
TH
2344 dev->cylinders = id[1];
2345 dev->heads = id[3];
2346 dev->sectors = id[6];
8bf62ece 2347
1148c3a7 2348 if (ata_id_current_chs_valid(id)) {
8bf62ece 2349 /* Current CHS translation is valid. */
1148c3a7
TH
2350 dev->cylinders = id[54];
2351 dev->heads = id[55];
2352 dev->sectors = id[56];
8bf62ece
AL
2353 }
2354
2355 /* print device info to dmesg */
3f64f565 2356 if (ata_msg_drv(ap) && print_info) {
a9a79dfe
JP
2357 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2358 revbuf, modelbuf, fwrevbuf,
2359 ata_mode_string(xfer_mask));
2360 ata_dev_info(dev,
2361 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2362 (unsigned long long)dev->n_sectors,
2363 dev->multi_count, dev->cylinders,
2364 dev->heads, dev->sectors);
3f64f565 2365 }
07f6f7d0
AL
2366 }
2367
803739d2
SH
2368 /* Check and mark DevSlp capability. Get DevSlp timing variables
2369 * from SATA Settings page of Identify Device Data Log.
65fe1f0f 2370 */
803739d2 2371 if (ata_id_has_devslp(dev->id)) {
8e725c7f 2372 u8 *sata_setting = ap->sector_buf;
803739d2
SH
2373 int i, j;
2374
2375 dev->flags |= ATA_DFLAG_DEVSLP;
65fe1f0f
SH
2376 err_mask = ata_read_log_page(dev,
2377 ATA_LOG_SATA_ID_DEV_DATA,
2378 ATA_LOG_SATA_SETTINGS,
803739d2 2379 sata_setting,
65fe1f0f
SH
2380 1);
2381 if (err_mask)
2382 ata_dev_dbg(dev,
2383 "failed to get Identify Device Data, Emask 0x%x\n",
2384 err_mask);
803739d2
SH
2385 else
2386 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2387 j = ATA_LOG_DEVSLP_OFFSET + i;
2388 dev->devslp_timing[i] = sata_setting[j];
2389 }
65fe1f0f
SH
2390 }
2391
6e7846e9 2392 dev->cdb_len = 16;
1da177e4
LT
2393 }
2394
2395 /* ATAPI-specific feature tests */
2c13b7ce 2396 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2397 const char *cdb_intr_string = "";
2398 const char *atapi_an_string = "";
91163006 2399 const char *dma_dir_string = "";
7d77b247 2400 u32 sntf;
08a556db 2401
1148c3a7 2402 rc = atapi_cdb_len(id);
1da177e4 2403 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2404 if (ata_msg_warn(ap))
a9a79dfe 2405 ata_dev_warn(dev, "unsupported CDB len\n");
ffeae418 2406 rc = -EINVAL;
1da177e4
LT
2407 goto err_out_nosup;
2408 }
6e7846e9 2409 dev->cdb_len = (unsigned int) rc;
1da177e4 2410
7d77b247
TH
2411 /* Enable ATAPI AN if both the host and device have
2412 * the support. If PMP is attached, SNTF is required
2413 * to enable ATAPI AN to discern between PHY status
2414 * changed notifications and ATAPI ANs.
9f45cbd3 2415 */
e7ecd435
TH
2416 if (atapi_an &&
2417 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
071f44b1 2418 (!sata_pmp_attached(ap) ||
7d77b247 2419 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
9f45cbd3 2420 /* issue SET feature command to turn this on */
218f3d30
JG
2421 err_mask = ata_dev_set_feature(dev,
2422 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2423 if (err_mask)
a9a79dfe
JP
2424 ata_dev_err(dev,
2425 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2426 err_mask);
854c73a2 2427 else {
9f45cbd3 2428 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2429 atapi_an_string = ", ATAPI AN";
2430 }
9f45cbd3
KCA
2431 }
2432
08a556db 2433 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2434 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2435 cdb_intr_string = ", CDB intr";
2436 }
312f7da2 2437
966fbe19 2438 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
91163006
TH
2439 dev->flags |= ATA_DFLAG_DMADIR;
2440 dma_dir_string = ", DMADIR";
2441 }
2442
afe75951 2443 if (ata_id_has_da(dev->id)) {
b1354cbb 2444 dev->flags |= ATA_DFLAG_DA;
afe75951
AL
2445 zpodd_init(dev);
2446 }
b1354cbb 2447
1da177e4 2448 /* print device info to dmesg */
5afc8142 2449 if (ata_msg_drv(ap) && print_info)
a9a79dfe
JP
2450 ata_dev_info(dev,
2451 "ATAPI: %s, %s, max %s%s%s%s\n",
2452 modelbuf, fwrevbuf,
2453 ata_mode_string(xfer_mask),
2454 cdb_intr_string, atapi_an_string,
2455 dma_dir_string);
1da177e4
LT
2456 }
2457
914ed354
TH
2458 /* determine max_sectors */
2459 dev->max_sectors = ATA_MAX_SECTORS;
2460 if (dev->flags & ATA_DFLAG_LBA48)
2461 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2462
c5038fc0
AC
2463 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2464 200 sectors */
3373efd8 2465 if (ata_dev_knobble(dev)) {
5afc8142 2466 if (ata_msg_drv(ap) && print_info)
a9a79dfe 2467 ata_dev_info(dev, "applying bridge limits\n");
5a529139 2468 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2469 dev->max_sectors = ATA_MAX_SECTORS;
2470 }
2471
f8d8e579 2472 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2473 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2474 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2475 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2476 }
f8d8e579 2477
75683fe7 2478 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2479 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2480 dev->max_sectors);
18d6e9d5 2481
a32450e1
SH
2482 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2483 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2484
4b2f3ede 2485 if (ap->ops->dev_config)
cd0d3bbc 2486 ap->ops->dev_config(dev);
4b2f3ede 2487
c5038fc0
AC
2488 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2489 /* Let the user know. We don't want to disallow opens for
2490 rescue purposes, or in case the vendor is just a blithering
2491 idiot. Do this after the dev_config call as some controllers
2492 with buggy firmware may want to avoid reporting false device
2493 bugs */
2494
2495 if (print_info) {
a9a79dfe 2496 ata_dev_warn(dev,
c5038fc0 2497"Drive reports diagnostics failure. This may indicate a drive\n");
a9a79dfe 2498 ata_dev_warn(dev,
c5038fc0
AC
2499"fault or invalid emulation. Contact drive vendor for information.\n");
2500 }
2501 }
2502
ac70a964 2503 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
a9a79dfe
JP
2504 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2505 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
ac70a964
TH
2506 }
2507
ffeae418 2508 return 0;
1da177e4
LT
2509
2510err_out_nosup:
0dd4b21f 2511 if (ata_msg_probe(ap))
a9a79dfe 2512 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
ffeae418 2513 return rc;
1da177e4
LT
2514}
2515
be0d18df 2516/**
2e41e8e6 2517 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2518 * @ap: port
2519 *
2e41e8e6 2520 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2521 * detection.
2522 */
2523
2524int ata_cable_40wire(struct ata_port *ap)
2525{
2526 return ATA_CBL_PATA40;
2527}
2528
2529/**
2e41e8e6 2530 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2531 * @ap: port
2532 *
2e41e8e6 2533 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2534 * detection.
2535 */
2536
2537int ata_cable_80wire(struct ata_port *ap)
2538{
2539 return ATA_CBL_PATA80;
2540}
2541
2542/**
2543 * ata_cable_unknown - return unknown PATA cable.
2544 * @ap: port
2545 *
2546 * Helper method for drivers which have no PATA cable detection.
2547 */
2548
2549int ata_cable_unknown(struct ata_port *ap)
2550{
2551 return ATA_CBL_PATA_UNK;
2552}
2553
c88f90c3
TH
2554/**
2555 * ata_cable_ignore - return ignored PATA cable.
2556 * @ap: port
2557 *
2558 * Helper method for drivers which don't use cable type to limit
2559 * transfer mode.
2560 */
2561int ata_cable_ignore(struct ata_port *ap)
2562{
2563 return ATA_CBL_PATA_IGN;
2564}
2565
be0d18df
AC
2566/**
2567 * ata_cable_sata - return SATA cable type
2568 * @ap: port
2569 *
2570 * Helper method for drivers which have SATA cables
2571 */
2572
2573int ata_cable_sata(struct ata_port *ap)
2574{
2575 return ATA_CBL_SATA;
2576}
2577
1da177e4
LT
2578/**
2579 * ata_bus_probe - Reset and probe ATA bus
2580 * @ap: Bus to probe
2581 *
0cba632b
JG
2582 * Master ATA bus probing function. Initiates a hardware-dependent
2583 * bus reset, then attempts to identify any devices found on
2584 * the bus.
2585 *
1da177e4 2586 * LOCKING:
0cba632b 2587 * PCI/etc. bus probe sem.
1da177e4
LT
2588 *
2589 * RETURNS:
96072e69 2590 * Zero on success, negative errno otherwise.
1da177e4
LT
2591 */
2592
80289167 2593int ata_bus_probe(struct ata_port *ap)
1da177e4 2594{
28ca5c57 2595 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2596 int tries[ATA_MAX_DEVICES];
f58229f8 2597 int rc;
e82cbdb9 2598 struct ata_device *dev;
1da177e4 2599
1eca4365 2600 ata_for_each_dev(dev, &ap->link, ALL)
f58229f8 2601 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2602
2603 retry:
1eca4365 2604 ata_for_each_dev(dev, &ap->link, ALL) {
cdeab114
TH
2605 /* If we issue an SRST then an ATA drive (not ATAPI)
2606 * may change configuration and be in PIO0 timing. If
2607 * we do a hard reset (or are coming from power on)
2608 * this is true for ATA or ATAPI. Until we've set a
2609 * suitable controller mode we should not touch the
2610 * bus as we may be talking too fast.
2611 */
2612 dev->pio_mode = XFER_PIO_0;
5416912a 2613 dev->dma_mode = 0xff;
cdeab114
TH
2614
2615 /* If the controller has a pio mode setup function
2616 * then use it to set the chipset to rights. Don't
2617 * touch the DMA setup as that will be dealt with when
2618 * configuring devices.
2619 */
2620 if (ap->ops->set_piomode)
2621 ap->ops->set_piomode(ap, dev);
2622 }
2623
2044470c 2624 /* reset and determine device classes */
52783c5d 2625 ap->ops->phy_reset(ap);
2061a47a 2626
1eca4365 2627 ata_for_each_dev(dev, &ap->link, ALL) {
3e4ec344 2628 if (dev->class != ATA_DEV_UNKNOWN)
52783c5d
TH
2629 classes[dev->devno] = dev->class;
2630 else
2631 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2632
52783c5d 2633 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2634 }
1da177e4 2635
f31f0cc2
JG
2636 /* read IDENTIFY page and configure devices. We have to do the identify
2637 specific sequence bass-ackwards so that PDIAG- is released by
2638 the slave device */
2639
1eca4365 2640 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
f58229f8
TH
2641 if (tries[dev->devno])
2642 dev->class = classes[dev->devno];
ffeae418 2643
14d2bac1 2644 if (!ata_dev_enabled(dev))
ffeae418 2645 continue;
ffeae418 2646
bff04647
TH
2647 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2648 dev->id);
14d2bac1
TH
2649 if (rc)
2650 goto fail;
f31f0cc2
JG
2651 }
2652
be0d18df
AC
2653 /* Now ask for the cable type as PDIAG- should have been released */
2654 if (ap->ops->cable_detect)
2655 ap->cbl = ap->ops->cable_detect(ap);
2656
1eca4365
TH
2657 /* We may have SATA bridge glue hiding here irrespective of
2658 * the reported cable types and sensed types. When SATA
2659 * drives indicate we have a bridge, we don't know which end
2660 * of the link the bridge is which is a problem.
2661 */
2662 ata_for_each_dev(dev, &ap->link, ENABLED)
614fe29b
AC
2663 if (ata_id_is_sata(dev->id))
2664 ap->cbl = ATA_CBL_SATA;
614fe29b 2665
f31f0cc2
JG
2666 /* After the identify sequence we can now set up the devices. We do
2667 this in the normal order so that the user doesn't get confused */
2668
1eca4365 2669 ata_for_each_dev(dev, &ap->link, ENABLED) {
9af5c9c9 2670 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2671 rc = ata_dev_configure(dev);
9af5c9c9 2672 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2673 if (rc)
2674 goto fail;
1da177e4
LT
2675 }
2676
e82cbdb9 2677 /* configure transfer mode */
0260731f 2678 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2679 if (rc)
51713d35 2680 goto fail;
1da177e4 2681
1eca4365
TH
2682 ata_for_each_dev(dev, &ap->link, ENABLED)
2683 return 0;
1da177e4 2684
96072e69 2685 return -ENODEV;
14d2bac1
TH
2686
2687 fail:
4ae72a1e
TH
2688 tries[dev->devno]--;
2689
14d2bac1
TH
2690 switch (rc) {
2691 case -EINVAL:
4ae72a1e 2692 /* eeek, something went very wrong, give up */
14d2bac1
TH
2693 tries[dev->devno] = 0;
2694 break;
4ae72a1e
TH
2695
2696 case -ENODEV:
2697 /* give it just one more chance */
2698 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2699 case -EIO:
4ae72a1e
TH
2700 if (tries[dev->devno] == 1) {
2701 /* This is the last chance, better to slow
2702 * down than lose it.
2703 */
a07d499b 2704 sata_down_spd_limit(&ap->link, 0);
4ae72a1e
TH
2705 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2706 }
14d2bac1
TH
2707 }
2708
4ae72a1e 2709 if (!tries[dev->devno])
3373efd8 2710 ata_dev_disable(dev);
ec573755 2711
14d2bac1 2712 goto retry;
1da177e4
LT
2713}
2714
3be680b7
TH
2715/**
2716 * sata_print_link_status - Print SATA link status
936fd732 2717 * @link: SATA link to printk link status about
3be680b7
TH
2718 *
2719 * This function prints link speed and status of a SATA link.
2720 *
2721 * LOCKING:
2722 * None.
2723 */
6bdb4fc9 2724static void sata_print_link_status(struct ata_link *link)
3be680b7 2725{
6d5f9732 2726 u32 sstatus, scontrol, tmp;
3be680b7 2727
936fd732 2728 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2729 return;
936fd732 2730 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2731
b1c72916 2732 if (ata_phys_link_online(link)) {
3be680b7 2733 tmp = (sstatus >> 4) & 0xf;
a9a79dfe
JP
2734 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2735 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2736 } else {
a9a79dfe
JP
2737 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2738 sstatus, scontrol);
3be680b7
TH
2739 }
2740}
2741
ebdfca6e
AC
2742/**
2743 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2744 * @adev: device
2745 *
2746 * Obtain the other device on the same cable, or if none is
2747 * present NULL is returned
2748 */
2e9edbf8 2749
3373efd8 2750struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2751{
9af5c9c9
TH
2752 struct ata_link *link = adev->link;
2753 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2754 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2755 return NULL;
2756 return pair;
2757}
2758
1c3fae4d 2759/**
3c567b7d 2760 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2761 * @link: Link to adjust SATA spd limit for
a07d499b 2762 * @spd_limit: Additional limit
1c3fae4d 2763 *
936fd732 2764 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2765 * function only adjusts the limit. The change must be applied
3c567b7d 2766 * using sata_set_spd().
1c3fae4d 2767 *
a07d499b
TH
2768 * If @spd_limit is non-zero, the speed is limited to equal to or
2769 * lower than @spd_limit if such speed is supported. If
2770 * @spd_limit is slower than any supported speed, only the lowest
2771 * supported speed is allowed.
2772 *
1c3fae4d
TH
2773 * LOCKING:
2774 * Inherited from caller.
2775 *
2776 * RETURNS:
2777 * 0 on success, negative errno on failure
2778 */
a07d499b 2779int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
1c3fae4d 2780{
81952c54 2781 u32 sstatus, spd, mask;
a07d499b 2782 int rc, bit;
1c3fae4d 2783
936fd732 2784 if (!sata_scr_valid(link))
008a7896
TH
2785 return -EOPNOTSUPP;
2786
2787 /* If SCR can be read, use it to determine the current SPD.
936fd732 2788 * If not, use cached value in link->sata_spd.
008a7896 2789 */
936fd732 2790 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
9913ff8a 2791 if (rc == 0 && ata_sstatus_online(sstatus))
008a7896
TH
2792 spd = (sstatus >> 4) & 0xf;
2793 else
936fd732 2794 spd = link->sata_spd;
1c3fae4d 2795
936fd732 2796 mask = link->sata_spd_limit;
1c3fae4d
TH
2797 if (mask <= 1)
2798 return -EINVAL;
008a7896
TH
2799
2800 /* unconditionally mask off the highest bit */
a07d499b
TH
2801 bit = fls(mask) - 1;
2802 mask &= ~(1 << bit);
1c3fae4d 2803
008a7896
TH
2804 /* Mask off all speeds higher than or equal to the current
2805 * one. Force 1.5Gbps if current SPD is not available.
2806 */
2807 if (spd > 1)
2808 mask &= (1 << (spd - 1)) - 1;
2809 else
2810 mask &= 1;
2811
2812 /* were we already at the bottom? */
1c3fae4d
TH
2813 if (!mask)
2814 return -EINVAL;
2815
a07d499b
TH
2816 if (spd_limit) {
2817 if (mask & ((1 << spd_limit) - 1))
2818 mask &= (1 << spd_limit) - 1;
2819 else {
2820 bit = ffs(mask) - 1;
2821 mask = 1 << bit;
2822 }
2823 }
2824
936fd732 2825 link->sata_spd_limit = mask;
1c3fae4d 2826
a9a79dfe
JP
2827 ata_link_warn(link, "limiting SATA link speed to %s\n",
2828 sata_spd_string(fls(mask)));
1c3fae4d
TH
2829
2830 return 0;
2831}
2832
936fd732 2833static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2834{
5270222f
TH
2835 struct ata_link *host_link = &link->ap->link;
2836 u32 limit, target, spd;
1c3fae4d 2837
5270222f
TH
2838 limit = link->sata_spd_limit;
2839
2840 /* Don't configure downstream link faster than upstream link.
2841 * It doesn't speed up anything and some PMPs choke on such
2842 * configuration.
2843 */
2844 if (!ata_is_host_link(link) && host_link->sata_spd)
2845 limit &= (1 << host_link->sata_spd) - 1;
2846
2847 if (limit == UINT_MAX)
2848 target = 0;
1c3fae4d 2849 else
5270222f 2850 target = fls(limit);
1c3fae4d
TH
2851
2852 spd = (*scontrol >> 4) & 0xf;
5270222f 2853 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2854
5270222f 2855 return spd != target;
1c3fae4d
TH
2856}
2857
2858/**
3c567b7d 2859 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2860 * @link: Link in question
1c3fae4d
TH
2861 *
2862 * Test whether the spd limit in SControl matches
936fd732 2863 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2864 * whether hardreset is necessary to apply SATA spd
2865 * configuration.
2866 *
2867 * LOCKING:
2868 * Inherited from caller.
2869 *
2870 * RETURNS:
2871 * 1 if SATA spd configuration is needed, 0 otherwise.
2872 */
1dc55e87 2873static int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2874{
2875 u32 scontrol;
2876
936fd732 2877 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2878 return 1;
1c3fae4d 2879
936fd732 2880 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2881}
2882
2883/**
3c567b7d 2884 * sata_set_spd - set SATA spd according to spd limit
936fd732 2885 * @link: Link to set SATA spd for
1c3fae4d 2886 *
936fd732 2887 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2888 *
2889 * LOCKING:
2890 * Inherited from caller.
2891 *
2892 * RETURNS:
2893 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2894 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2895 */
936fd732 2896int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2897{
2898 u32 scontrol;
81952c54 2899 int rc;
1c3fae4d 2900
936fd732 2901 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2902 return rc;
1c3fae4d 2903
936fd732 2904 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2905 return 0;
2906
936fd732 2907 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2908 return rc;
2909
1c3fae4d
TH
2910 return 1;
2911}
2912
452503f9
AC
2913/*
2914 * This mode timing computation functionality is ported over from
2915 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2916 */
2917/*
b352e57d 2918 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2919 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2920 * for UDMA6, which is currently supported only by Maxtor drives.
2921 *
2922 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2923 */
2924
2925static const struct ata_timing ata_timing[] = {
3ada9c12
DD
2926/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
2927 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2928 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2929 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2930 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2931 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2932 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2933 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2934
2935 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2936 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2937 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2938
2939 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2940 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2941 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2942 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2943 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2944
2945/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2946 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2947 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2948 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2949 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2950 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2951 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2952 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
2953
2954 { 0xFF }
2955};
2956
2dcb407e
JG
2957#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2958#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2959
2960static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2961{
3ada9c12
DD
2962 q->setup = EZ(t->setup * 1000, T);
2963 q->act8b = EZ(t->act8b * 1000, T);
2964 q->rec8b = EZ(t->rec8b * 1000, T);
2965 q->cyc8b = EZ(t->cyc8b * 1000, T);
2966 q->active = EZ(t->active * 1000, T);
2967 q->recover = EZ(t->recover * 1000, T);
2968 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2969 q->cycle = EZ(t->cycle * 1000, T);
2970 q->udma = EZ(t->udma * 1000, UT);
452503f9
AC
2971}
2972
2973void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2974 struct ata_timing *m, unsigned int what)
2975{
2976 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2977 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2978 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2979 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2980 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2981 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3ada9c12 2982 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
452503f9
AC
2983 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2984 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2985}
2986
6357357c 2987const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 2988{
70cd071e
TH
2989 const struct ata_timing *t = ata_timing;
2990
2991 while (xfer_mode > t->mode)
2992 t++;
452503f9 2993
70cd071e
TH
2994 if (xfer_mode == t->mode)
2995 return t;
cd705d5a
BP
2996
2997 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
2998 __func__, xfer_mode);
2999
70cd071e 3000 return NULL;
452503f9
AC
3001}
3002
3003int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3004 struct ata_timing *t, int T, int UT)
3005{
9e8808a9 3006 const u16 *id = adev->id;
452503f9
AC
3007 const struct ata_timing *s;
3008 struct ata_timing p;
3009
3010 /*
2e9edbf8 3011 * Find the mode.
75b1f2f8 3012 */
452503f9
AC
3013
3014 if (!(s = ata_timing_find_mode(speed)))
3015 return -EINVAL;
3016
75b1f2f8
AL
3017 memcpy(t, s, sizeof(*s));
3018
452503f9
AC
3019 /*
3020 * If the drive is an EIDE drive, it can tell us it needs extended
3021 * PIO/MW_DMA cycle timing.
3022 */
3023
9e8808a9 3024 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
452503f9 3025 memset(&p, 0, sizeof(p));
9e8808a9 3026
bff00256 3027 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
9e8808a9
BZ
3028 if (speed <= XFER_PIO_2)
3029 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3030 else if ((speed <= XFER_PIO_4) ||
3031 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3032 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3033 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3034 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3035
452503f9
AC
3036 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3037 }
3038
3039 /*
3040 * Convert the timing to bus clock counts.
3041 */
3042
75b1f2f8 3043 ata_timing_quantize(t, t, T, UT);
452503f9
AC
3044
3045 /*
c893a3ae
RD
3046 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3047 * S.M.A.R.T * and some other commands. We have to ensure that the
3048 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
3049 */
3050
fd3367af 3051 if (speed > XFER_PIO_6) {
452503f9
AC
3052 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3053 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3054 }
3055
3056 /*
c893a3ae 3057 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
3058 */
3059
3060 if (t->act8b + t->rec8b < t->cyc8b) {
3061 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3062 t->rec8b = t->cyc8b - t->act8b;
3063 }
3064
3065 if (t->active + t->recover < t->cycle) {
3066 t->active += (t->cycle - (t->active + t->recover)) / 2;
3067 t->recover = t->cycle - t->active;
3068 }
a617c09f 3069
4f701d1e
AC
3070 /* In a few cases quantisation may produce enough errors to
3071 leave t->cycle too low for the sum of active and recovery
3072 if so we must correct this */
3073 if (t->active + t->recover > t->cycle)
3074 t->cycle = t->active + t->recover;
452503f9
AC
3075
3076 return 0;
3077}
3078
a0f79b92
TH
3079/**
3080 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3081 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3082 * @cycle: cycle duration in ns
3083 *
3084 * Return matching xfer mode for @cycle. The returned mode is of
3085 * the transfer type specified by @xfer_shift. If @cycle is too
3086 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3087 * than the fastest known mode, the fasted mode is returned.
3088 *
3089 * LOCKING:
3090 * None.
3091 *
3092 * RETURNS:
3093 * Matching xfer_mode, 0xff if no match found.
3094 */
3095u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3096{
3097 u8 base_mode = 0xff, last_mode = 0xff;
3098 const struct ata_xfer_ent *ent;
3099 const struct ata_timing *t;
3100
3101 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3102 if (ent->shift == xfer_shift)
3103 base_mode = ent->base;
3104
3105 for (t = ata_timing_find_mode(base_mode);
3106 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3107 unsigned short this_cycle;
3108
3109 switch (xfer_shift) {
3110 case ATA_SHIFT_PIO:
3111 case ATA_SHIFT_MWDMA:
3112 this_cycle = t->cycle;
3113 break;
3114 case ATA_SHIFT_UDMA:
3115 this_cycle = t->udma;
3116 break;
3117 default:
3118 return 0xff;
3119 }
3120
3121 if (cycle > this_cycle)
3122 break;
3123
3124 last_mode = t->mode;
3125 }
3126
3127 return last_mode;
3128}
3129
cf176e1a
TH
3130/**
3131 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3132 * @dev: Device to adjust xfer masks
458337db 3133 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3134 *
3135 * Adjust xfer masks of @dev downward. Note that this function
3136 * does not apply the change. Invoking ata_set_mode() afterwards
3137 * will apply the limit.
3138 *
3139 * LOCKING:
3140 * Inherited from caller.
3141 *
3142 * RETURNS:
3143 * 0 on success, negative errno on failure
3144 */
458337db 3145int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3146{
458337db 3147 char buf[32];
7dc951ae
TH
3148 unsigned long orig_mask, xfer_mask;
3149 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3150 int quiet, highbit;
cf176e1a 3151
458337db
TH
3152 quiet = !!(sel & ATA_DNXFER_QUIET);
3153 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3154
458337db
TH
3155 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3156 dev->mwdma_mask,
3157 dev->udma_mask);
3158 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3159
458337db
TH
3160 switch (sel) {
3161 case ATA_DNXFER_PIO:
3162 highbit = fls(pio_mask) - 1;
3163 pio_mask &= ~(1 << highbit);
3164 break;
3165
3166 case ATA_DNXFER_DMA:
3167 if (udma_mask) {
3168 highbit = fls(udma_mask) - 1;
3169 udma_mask &= ~(1 << highbit);
3170 if (!udma_mask)
3171 return -ENOENT;
3172 } else if (mwdma_mask) {
3173 highbit = fls(mwdma_mask) - 1;
3174 mwdma_mask &= ~(1 << highbit);
3175 if (!mwdma_mask)
3176 return -ENOENT;
3177 }
3178 break;
3179
3180 case ATA_DNXFER_40C:
3181 udma_mask &= ATA_UDMA_MASK_40C;
3182 break;
3183
3184 case ATA_DNXFER_FORCE_PIO0:
3185 pio_mask &= 1;
3186 case ATA_DNXFER_FORCE_PIO:
3187 mwdma_mask = 0;
3188 udma_mask = 0;
3189 break;
3190
458337db
TH
3191 default:
3192 BUG();
3193 }
3194
3195 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3196
3197 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3198 return -ENOENT;
3199
3200 if (!quiet) {
3201 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3202 snprintf(buf, sizeof(buf), "%s:%s",
3203 ata_mode_string(xfer_mask),
3204 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3205 else
3206 snprintf(buf, sizeof(buf), "%s",
3207 ata_mode_string(xfer_mask));
3208
a9a79dfe 3209 ata_dev_warn(dev, "limiting speed to %s\n", buf);
458337db 3210 }
cf176e1a
TH
3211
3212 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3213 &dev->udma_mask);
3214
cf176e1a 3215 return 0;
cf176e1a
TH
3216}
3217
3373efd8 3218static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3219{
d0cb43b3 3220 struct ata_port *ap = dev->link->ap;
9af5c9c9 3221 struct ata_eh_context *ehc = &dev->link->eh_context;
d0cb43b3 3222 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
4055dee7
TH
3223 const char *dev_err_whine = "";
3224 int ign_dev_err = 0;
d0cb43b3 3225 unsigned int err_mask = 0;
83206a29 3226 int rc;
1da177e4 3227
e8384607 3228 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3229 if (dev->xfer_shift == ATA_SHIFT_PIO)
3230 dev->flags |= ATA_DFLAG_PIO;
3231
d0cb43b3
TH
3232 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3233 dev_err_whine = " (SET_XFERMODE skipped)";
3234 else {
3235 if (nosetxfer)
a9a79dfe
JP
3236 ata_dev_warn(dev,
3237 "NOSETXFER but PATA detected - can't "
3238 "skip SETXFER, might malfunction\n");
d0cb43b3
TH
3239 err_mask = ata_dev_set_xfermode(dev);
3240 }
2dcb407e 3241
4055dee7
TH
3242 if (err_mask & ~AC_ERR_DEV)
3243 goto fail;
3244
3245 /* revalidate */
3246 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3247 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3248 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3249 if (rc)
3250 return rc;
3251
b93fda12
AC
3252 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3253 /* Old CFA may refuse this command, which is just fine */
3254 if (ata_id_is_cfa(dev->id))
3255 ign_dev_err = 1;
3256 /* Catch several broken garbage emulations plus some pre
3257 ATA devices */
3258 if (ata_id_major_version(dev->id) == 0 &&
3259 dev->pio_mode <= XFER_PIO_2)
3260 ign_dev_err = 1;
3261 /* Some very old devices and some bad newer ones fail
3262 any kind of SET_XFERMODE request but support PIO0-2
3263 timings and no IORDY */
3264 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3265 ign_dev_err = 1;
3266 }
3acaf94b
AC
3267 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3268 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3269 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3270 dev->dma_mode == XFER_MW_DMA_0 &&
3271 (dev->id[63] >> 8) & 1)
4055dee7 3272 ign_dev_err = 1;
3acaf94b 3273
4055dee7
TH
3274 /* if the device is actually configured correctly, ignore dev err */
3275 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3276 ign_dev_err = 1;
1da177e4 3277
4055dee7
TH
3278 if (err_mask & AC_ERR_DEV) {
3279 if (!ign_dev_err)
3280 goto fail;
3281 else
3282 dev_err_whine = " (device error ignored)";
3283 }
48a8a14f 3284
23e71c3d
TH
3285 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3286 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3287
a9a79dfe
JP
3288 ata_dev_info(dev, "configured for %s%s\n",
3289 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3290 dev_err_whine);
4055dee7 3291
83206a29 3292 return 0;
4055dee7
TH
3293
3294 fail:
a9a79dfe 3295 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
4055dee7 3296 return -EIO;
1da177e4
LT
3297}
3298
1da177e4 3299/**
04351821 3300 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3301 * @link: link on which timings will be programmed
1967b7ff 3302 * @r_failed_dev: out parameter for failed device
1da177e4 3303 *
04351821
A
3304 * Standard implementation of the function used to tune and set
3305 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3306 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3307 * returned in @r_failed_dev.
780a87f7 3308 *
1da177e4 3309 * LOCKING:
0cba632b 3310 * PCI/etc. bus probe sem.
e82cbdb9
TH
3311 *
3312 * RETURNS:
3313 * 0 on success, negative errno otherwise
1da177e4 3314 */
04351821 3315
0260731f 3316int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3317{
0260731f 3318 struct ata_port *ap = link->ap;
e8e0619f 3319 struct ata_device *dev;
f58229f8 3320 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3321
a6d5a51c 3322 /* step 1: calculate xfer_mask */
1eca4365 3323 ata_for_each_dev(dev, link, ENABLED) {
7dc951ae 3324 unsigned long pio_mask, dma_mask;
b3a70601 3325 unsigned int mode_mask;
a6d5a51c 3326
b3a70601
AC
3327 mode_mask = ATA_DMA_MASK_ATA;
3328 if (dev->class == ATA_DEV_ATAPI)
3329 mode_mask = ATA_DMA_MASK_ATAPI;
3330 else if (ata_id_is_cfa(dev->id))
3331 mode_mask = ATA_DMA_MASK_CFA;
3332
3373efd8 3333 ata_dev_xfermask(dev);
33267325 3334 ata_force_xfermask(dev);
1da177e4 3335
acf356b1 3336 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
b3a70601
AC
3337
3338 if (libata_dma_mask & mode_mask)
80a9c430
SS
3339 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3340 dev->udma_mask);
b3a70601
AC
3341 else
3342 dma_mask = 0;
3343
acf356b1
TH
3344 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3345 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3346
4f65977d 3347 found = 1;
b15b3eba 3348 if (ata_dma_enabled(dev))
5444a6f4 3349 used_dma = 1;
a6d5a51c 3350 }
4f65977d 3351 if (!found)
e82cbdb9 3352 goto out;
a6d5a51c
TH
3353
3354 /* step 2: always set host PIO timings */
1eca4365 3355 ata_for_each_dev(dev, link, ENABLED) {
70cd071e 3356 if (dev->pio_mode == 0xff) {
a9a79dfe 3357 ata_dev_warn(dev, "no PIO support\n");
e8e0619f 3358 rc = -EINVAL;
e82cbdb9 3359 goto out;
e8e0619f
TH
3360 }
3361
3362 dev->xfer_mode = dev->pio_mode;
3363 dev->xfer_shift = ATA_SHIFT_PIO;
3364 if (ap->ops->set_piomode)
3365 ap->ops->set_piomode(ap, dev);
3366 }
1da177e4 3367
a6d5a51c 3368 /* step 3: set host DMA timings */
1eca4365
TH
3369 ata_for_each_dev(dev, link, ENABLED) {
3370 if (!ata_dma_enabled(dev))
e8e0619f
TH
3371 continue;
3372
3373 dev->xfer_mode = dev->dma_mode;
3374 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3375 if (ap->ops->set_dmamode)
3376 ap->ops->set_dmamode(ap, dev);
3377 }
1da177e4
LT
3378
3379 /* step 4: update devices' xfer mode */
1eca4365 3380 ata_for_each_dev(dev, link, ENABLED) {
3373efd8 3381 rc = ata_dev_set_mode(dev);
5bbc53f4 3382 if (rc)
e82cbdb9 3383 goto out;
83206a29 3384 }
1da177e4 3385
e8e0619f
TH
3386 /* Record simplex status. If we selected DMA then the other
3387 * host channels are not permitted to do so.
5444a6f4 3388 */
cca3974e 3389 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3390 ap->host->simplex_claimed = ap;
5444a6f4 3391
e82cbdb9
TH
3392 out:
3393 if (rc)
3394 *r_failed_dev = dev;
3395 return rc;
1da177e4
LT
3396}
3397
aa2731ad
TH
3398/**
3399 * ata_wait_ready - wait for link to become ready
3400 * @link: link to be waited on
3401 * @deadline: deadline jiffies for the operation
3402 * @check_ready: callback to check link readiness
3403 *
3404 * Wait for @link to become ready. @check_ready should return
3405 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3406 * link doesn't seem to be occupied, other errno for other error
3407 * conditions.
3408 *
3409 * Transient -ENODEV conditions are allowed for
3410 * ATA_TMOUT_FF_WAIT.
3411 *
3412 * LOCKING:
3413 * EH context.
3414 *
3415 * RETURNS:
3416 * 0 if @linke is ready before @deadline; otherwise, -errno.
3417 */
3418int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3419 int (*check_ready)(struct ata_link *link))
3420{
3421 unsigned long start = jiffies;
b48d58f5 3422 unsigned long nodev_deadline;
aa2731ad
TH
3423 int warned = 0;
3424
b48d58f5
TH
3425 /* choose which 0xff timeout to use, read comment in libata.h */
3426 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3427 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3428 else
3429 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3430
b1c72916
TH
3431 /* Slave readiness can't be tested separately from master. On
3432 * M/S emulation configuration, this function should be called
3433 * only on the master and it will handle both master and slave.
3434 */
3435 WARN_ON(link == link->ap->slave_link);
3436
aa2731ad
TH
3437 if (time_after(nodev_deadline, deadline))
3438 nodev_deadline = deadline;
3439
3440 while (1) {
3441 unsigned long now = jiffies;
3442 int ready, tmp;
3443
3444 ready = tmp = check_ready(link);
3445 if (ready > 0)
3446 return 0;
3447
b48d58f5
TH
3448 /*
3449 * -ENODEV could be transient. Ignore -ENODEV if link
aa2731ad 3450 * is online. Also, some SATA devices take a long
b48d58f5
TH
3451 * time to clear 0xff after reset. Wait for
3452 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3453 * offline.
aa2731ad
TH
3454 *
3455 * Note that some PATA controllers (pata_ali) explode
3456 * if status register is read more than once when
3457 * there's no device attached.
3458 */
3459 if (ready == -ENODEV) {
3460 if (ata_link_online(link))
3461 ready = 0;
3462 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3463 !ata_link_offline(link) &&
3464 time_before(now, nodev_deadline))
3465 ready = 0;
3466 }
3467
3468 if (ready)
3469 return ready;
3470 if (time_after(now, deadline))
3471 return -EBUSY;
3472
3473 if (!warned && time_after(now, start + 5 * HZ) &&
3474 (deadline - now > 3 * HZ)) {
a9a79dfe 3475 ata_link_warn(link,
aa2731ad
TH
3476 "link is slow to respond, please be patient "
3477 "(ready=%d)\n", tmp);
3478 warned = 1;
3479 }
3480
97750ceb 3481 ata_msleep(link->ap, 50);
aa2731ad
TH
3482 }
3483}
3484
3485/**
3486 * ata_wait_after_reset - wait for link to become ready after reset
3487 * @link: link to be waited on
3488 * @deadline: deadline jiffies for the operation
3489 * @check_ready: callback to check link readiness
3490 *
3491 * Wait for @link to become ready after reset.
3492 *
3493 * LOCKING:
3494 * EH context.
3495 *
3496 * RETURNS:
3497 * 0 if @linke is ready before @deadline; otherwise, -errno.
3498 */
2b4221bb 3499int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
aa2731ad
TH
3500 int (*check_ready)(struct ata_link *link))
3501{
97750ceb 3502 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
aa2731ad
TH
3503
3504 return ata_wait_ready(link, deadline, check_ready);
3505}
3506
d7bb4cc7 3507/**
936fd732
TH
3508 * sata_link_debounce - debounce SATA phy status
3509 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3510 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3511 * @deadline: deadline jiffies for the operation
d7bb4cc7 3512 *
1152b261 3513 * Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3514 * holding the same value where DET is not 1 for @duration polled
3515 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3516 * beginning of the stable state. Because DET gets stuck at 1 on
3517 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3518 * until timeout then returns 0 if DET is stable at 1.
3519 *
d4b2bab4
TH
3520 * @timeout is further limited by @deadline. The sooner of the
3521 * two is used.
3522 *
d7bb4cc7
TH
3523 * LOCKING:
3524 * Kernel thread context (may sleep)
3525 *
3526 * RETURNS:
3527 * 0 on success, -errno on failure.
3528 */
936fd732
TH
3529int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3530 unsigned long deadline)
7a7921e8 3531{
341c2c95
TH
3532 unsigned long interval = params[0];
3533 unsigned long duration = params[1];
d4b2bab4 3534 unsigned long last_jiffies, t;
d7bb4cc7
TH
3535 u32 last, cur;
3536 int rc;
3537
341c2c95 3538 t = ata_deadline(jiffies, params[2]);
d4b2bab4
TH
3539 if (time_before(t, deadline))
3540 deadline = t;
3541
936fd732 3542 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3543 return rc;
3544 cur &= 0xf;
3545
3546 last = cur;
3547 last_jiffies = jiffies;
3548
3549 while (1) {
97750ceb 3550 ata_msleep(link->ap, interval);
936fd732 3551 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3552 return rc;
3553 cur &= 0xf;
3554
3555 /* DET stable? */
3556 if (cur == last) {
d4b2bab4 3557 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7 3558 continue;
341c2c95
TH
3559 if (time_after(jiffies,
3560 ata_deadline(last_jiffies, duration)))
d7bb4cc7
TH
3561 return 0;
3562 continue;
3563 }
3564
3565 /* unstable, start over */
3566 last = cur;
3567 last_jiffies = jiffies;
3568
f1545154
TH
3569 /* Check deadline. If debouncing failed, return
3570 * -EPIPE to tell upper layer to lower link speed.
3571 */
d4b2bab4 3572 if (time_after(jiffies, deadline))
f1545154 3573 return -EPIPE;
d7bb4cc7
TH
3574 }
3575}
3576
3577/**
936fd732
TH
3578 * sata_link_resume - resume SATA link
3579 * @link: ATA link to resume SATA
d7bb4cc7 3580 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3581 * @deadline: deadline jiffies for the operation
d7bb4cc7 3582 *
936fd732 3583 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3584 *
3585 * LOCKING:
3586 * Kernel thread context (may sleep)
3587 *
3588 * RETURNS:
3589 * 0 on success, -errno on failure.
3590 */
936fd732
TH
3591int sata_link_resume(struct ata_link *link, const unsigned long *params,
3592 unsigned long deadline)
d7bb4cc7 3593{
5040ab67 3594 int tries = ATA_LINK_RESUME_TRIES;
ac371987 3595 u32 scontrol, serror;
81952c54
TH
3596 int rc;
3597
936fd732 3598 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3599 return rc;
7a7921e8 3600
5040ab67
TH
3601 /*
3602 * Writes to SControl sometimes get ignored under certain
3603 * controllers (ata_piix SIDPR). Make sure DET actually is
3604 * cleared.
3605 */
3606 do {
3607 scontrol = (scontrol & 0x0f0) | 0x300;
3608 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3609 return rc;
3610 /*
3611 * Some PHYs react badly if SStatus is pounded
3612 * immediately after resuming. Delay 200ms before
3613 * debouncing.
3614 */
97750ceb 3615 ata_msleep(link->ap, 200);
81952c54 3616
5040ab67
TH
3617 /* is SControl restored correctly? */
3618 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3619 return rc;
3620 } while ((scontrol & 0xf0f) != 0x300 && --tries);
7a7921e8 3621
5040ab67 3622 if ((scontrol & 0xf0f) != 0x300) {
38941c95 3623 ata_link_warn(link, "failed to resume link (SControl %X)\n",
a9a79dfe 3624 scontrol);
5040ab67
TH
3625 return 0;
3626 }
3627
3628 if (tries < ATA_LINK_RESUME_TRIES)
a9a79dfe
JP
3629 ata_link_warn(link, "link resume succeeded after %d retries\n",
3630 ATA_LINK_RESUME_TRIES - tries);
7a7921e8 3631
ac371987
TH
3632 if ((rc = sata_link_debounce(link, params, deadline)))
3633 return rc;
3634
f046519f 3635 /* clear SError, some PHYs require this even for SRST to work */
ac371987
TH
3636 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3637 rc = sata_scr_write(link, SCR_ERROR, serror);
ac371987 3638
f046519f 3639 return rc != -EINVAL ? rc : 0;
7a7921e8
TH
3640}
3641
1152b261
TH
3642/**
3643 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3644 * @link: ATA link to manipulate SControl for
3645 * @policy: LPM policy to configure
3646 * @spm_wakeup: initiate LPM transition to active state
3647 *
3648 * Manipulate the IPM field of the SControl register of @link
3649 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3650 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3651 * the link. This function also clears PHYRDY_CHG before
3652 * returning.
3653 *
3654 * LOCKING:
3655 * EH context.
3656 *
3657 * RETURNS:
3658 * 0 on succes, -errno otherwise.
3659 */
3660int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3661 bool spm_wakeup)
3662{
3663 struct ata_eh_context *ehc = &link->eh_context;
3664 bool woken_up = false;
3665 u32 scontrol;
3666 int rc;
3667
3668 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3669 if (rc)
3670 return rc;
3671
3672 switch (policy) {
3673 case ATA_LPM_MAX_POWER:
3674 /* disable all LPM transitions */
65fe1f0f 3675 scontrol |= (0x7 << 8);
1152b261
TH
3676 /* initiate transition to active state */
3677 if (spm_wakeup) {
3678 scontrol |= (0x4 << 12);
3679 woken_up = true;
3680 }
3681 break;
3682 case ATA_LPM_MED_POWER:
3683 /* allow LPM to PARTIAL */
3684 scontrol &= ~(0x1 << 8);
65fe1f0f 3685 scontrol |= (0x6 << 8);
1152b261
TH
3686 break;
3687 case ATA_LPM_MIN_POWER:
8a745f1f
KCA
3688 if (ata_link_nr_enabled(link) > 0)
3689 /* no restrictions on LPM transitions */
65fe1f0f 3690 scontrol &= ~(0x7 << 8);
8a745f1f
KCA
3691 else {
3692 /* empty port, power off */
3693 scontrol &= ~0xf;
3694 scontrol |= (0x1 << 2);
3695 }
1152b261
TH
3696 break;
3697 default:
3698 WARN_ON(1);
3699 }
3700
3701 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3702 if (rc)
3703 return rc;
3704
3705 /* give the link time to transit out of LPM state */
3706 if (woken_up)
3707 msleep(10);
3708
3709 /* clear PHYRDY_CHG from SError */
3710 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3711 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3712}
3713
f5914a46 3714/**
0aa1113d 3715 * ata_std_prereset - prepare for reset
cc0680a5 3716 * @link: ATA link to be reset
d4b2bab4 3717 * @deadline: deadline jiffies for the operation
f5914a46 3718 *
cc0680a5 3719 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3720 * prereset makes libata abort whole reset sequence and give up
3721 * that port, so prereset should be best-effort. It does its
3722 * best to prepare for reset sequence but if things go wrong, it
3723 * should just whine, not fail.
f5914a46
TH
3724 *
3725 * LOCKING:
3726 * Kernel thread context (may sleep)
3727 *
3728 * RETURNS:
3729 * 0 on success, -errno otherwise.
3730 */
0aa1113d 3731int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3732{
cc0680a5 3733 struct ata_port *ap = link->ap;
936fd732 3734 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3735 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3736 int rc;
3737
f5914a46
TH
3738 /* if we're about to do hardreset, nothing more to do */
3739 if (ehc->i.action & ATA_EH_HARDRESET)
3740 return 0;
3741
936fd732 3742 /* if SATA, resume link */
a16abc0b 3743 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3744 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3745 /* whine about phy resume failure but proceed */
3746 if (rc && rc != -EOPNOTSUPP)
a9a79dfe
JP
3747 ata_link_warn(link,
3748 "failed to resume link for reset (errno=%d)\n",
3749 rc);
f5914a46
TH
3750 }
3751
45db2f6c 3752 /* no point in trying softreset on offline link */
b1c72916 3753 if (ata_phys_link_offline(link))
45db2f6c
TH
3754 ehc->i.action &= ~ATA_EH_SOFTRESET;
3755
f5914a46
TH
3756 return 0;
3757}
3758
c2bd5804 3759/**
624d5c51
TH
3760 * sata_link_hardreset - reset link via SATA phy reset
3761 * @link: link to reset
3762 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3763 * @deadline: deadline jiffies for the operation
9dadd45b
TH
3764 * @online: optional out parameter indicating link onlineness
3765 * @check_ready: optional callback to check link readiness
c2bd5804 3766 *
624d5c51 3767 * SATA phy-reset @link using DET bits of SControl register.
9dadd45b
TH
3768 * After hardreset, link readiness is waited upon using
3769 * ata_wait_ready() if @check_ready is specified. LLDs are
3770 * allowed to not specify @check_ready and wait itself after this
3771 * function returns. Device classification is LLD's
3772 * responsibility.
3773 *
3774 * *@online is set to one iff reset succeeded and @link is online
3775 * after reset.
c2bd5804
TH
3776 *
3777 * LOCKING:
3778 * Kernel thread context (may sleep)
3779 *
3780 * RETURNS:
3781 * 0 on success, -errno otherwise.
3782 */
624d5c51 3783int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
9dadd45b
TH
3784 unsigned long deadline,
3785 bool *online, int (*check_ready)(struct ata_link *))
c2bd5804 3786{
624d5c51 3787 u32 scontrol;
81952c54 3788 int rc;
852ee16a 3789
c2bd5804
TH
3790 DPRINTK("ENTER\n");
3791
9dadd45b
TH
3792 if (online)
3793 *online = false;
3794
936fd732 3795 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3796 /* SATA spec says nothing about how to reconfigure
3797 * spd. To be on the safe side, turn off phy during
3798 * reconfiguration. This works for at least ICH7 AHCI
3799 * and Sil3124.
3800 */
936fd732 3801 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3802 goto out;
81952c54 3803
a34b6fc0 3804 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3805
936fd732 3806 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3807 goto out;
1c3fae4d 3808
936fd732 3809 sata_set_spd(link);
1c3fae4d
TH
3810 }
3811
3812 /* issue phy wake/reset */
936fd732 3813 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3814 goto out;
81952c54 3815
852ee16a 3816 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3817
936fd732 3818 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3819 goto out;
c2bd5804 3820
1c3fae4d 3821 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3822 * 10.4.2 says at least 1 ms.
3823 */
97750ceb 3824 ata_msleep(link->ap, 1);
c2bd5804 3825
936fd732
TH
3826 /* bring link back */
3827 rc = sata_link_resume(link, timing, deadline);
9dadd45b
TH
3828 if (rc)
3829 goto out;
3830 /* if link is offline nothing more to do */
b1c72916 3831 if (ata_phys_link_offline(link))
9dadd45b
TH
3832 goto out;
3833
3834 /* Link is online. From this point, -ENODEV too is an error. */
3835 if (online)
3836 *online = true;
3837
071f44b1 3838 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
9dadd45b
TH
3839 /* If PMP is supported, we have to do follow-up SRST.
3840 * Some PMPs don't send D2H Reg FIS after hardreset if
3841 * the first port is empty. Wait only for
3842 * ATA_TMOUT_PMP_SRST_WAIT.
3843 */
3844 if (check_ready) {
3845 unsigned long pmp_deadline;
3846
341c2c95
TH
3847 pmp_deadline = ata_deadline(jiffies,
3848 ATA_TMOUT_PMP_SRST_WAIT);
9dadd45b
TH
3849 if (time_after(pmp_deadline, deadline))
3850 pmp_deadline = deadline;
3851 ata_wait_ready(link, pmp_deadline, check_ready);
3852 }
3853 rc = -EAGAIN;
3854 goto out;
3855 }
3856
3857 rc = 0;
3858 if (check_ready)
3859 rc = ata_wait_ready(link, deadline, check_ready);
b6103f6d 3860 out:
0cbf0711
TH
3861 if (rc && rc != -EAGAIN) {
3862 /* online is set iff link is online && reset succeeded */
3863 if (online)
3864 *online = false;
a9a79dfe 3865 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
0cbf0711 3866 }
b6103f6d
TH
3867 DPRINTK("EXIT, rc=%d\n", rc);
3868 return rc;
3869}
3870
57c9efdf
TH
3871/**
3872 * sata_std_hardreset - COMRESET w/o waiting or classification
3873 * @link: link to reset
3874 * @class: resulting class of attached device
3875 * @deadline: deadline jiffies for the operation
3876 *
3877 * Standard SATA COMRESET w/o waiting or classification.
3878 *
3879 * LOCKING:
3880 * Kernel thread context (may sleep)
3881 *
3882 * RETURNS:
3883 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3884 */
3885int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3886 unsigned long deadline)
3887{
3888 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3889 bool online;
3890 int rc;
3891
3892 /* do hardreset */
3893 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
57c9efdf
TH
3894 return online ? -EAGAIN : rc;
3895}
3896
c2bd5804 3897/**
203c75b8 3898 * ata_std_postreset - standard postreset callback
cc0680a5 3899 * @link: the target ata_link
c2bd5804
TH
3900 * @classes: classes of attached devices
3901 *
3902 * This function is invoked after a successful reset. Note that
3903 * the device might have been reset more than once using
3904 * different reset methods before postreset is invoked.
c2bd5804 3905 *
c2bd5804
TH
3906 * LOCKING:
3907 * Kernel thread context (may sleep)
3908 */
203c75b8 3909void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3910{
f046519f
TH
3911 u32 serror;
3912
c2bd5804
TH
3913 DPRINTK("ENTER\n");
3914
f046519f
TH
3915 /* reset complete, clear SError */
3916 if (!sata_scr_read(link, SCR_ERROR, &serror))
3917 sata_scr_write(link, SCR_ERROR, serror);
3918
c2bd5804 3919 /* print link status */
936fd732 3920 sata_print_link_status(link);
c2bd5804 3921
c2bd5804
TH
3922 DPRINTK("EXIT\n");
3923}
3924
623a3128
TH
3925/**
3926 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3927 * @dev: device to compare against
3928 * @new_class: class of the new device
3929 * @new_id: IDENTIFY page of the new device
3930 *
3931 * Compare @new_class and @new_id against @dev and determine
3932 * whether @dev is the device indicated by @new_class and
3933 * @new_id.
3934 *
3935 * LOCKING:
3936 * None.
3937 *
3938 * RETURNS:
3939 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3940 */
3373efd8
TH
3941static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3942 const u16 *new_id)
623a3128
TH
3943{
3944 const u16 *old_id = dev->id;
a0cf733b
TH
3945 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3946 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3947
3948 if (dev->class != new_class) {
a9a79dfe
JP
3949 ata_dev_info(dev, "class mismatch %d != %d\n",
3950 dev->class, new_class);
623a3128
TH
3951 return 0;
3952 }
3953
a0cf733b
TH
3954 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3955 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3956 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3957 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3958
3959 if (strcmp(model[0], model[1])) {
a9a79dfe
JP
3960 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3961 model[0], model[1]);
623a3128
TH
3962 return 0;
3963 }
3964
3965 if (strcmp(serial[0], serial[1])) {
a9a79dfe
JP
3966 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3967 serial[0], serial[1]);
623a3128
TH
3968 return 0;
3969 }
3970
623a3128
TH
3971 return 1;
3972}
3973
3974/**
fe30911b 3975 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3976 * @dev: target ATA device
bff04647 3977 * @readid_flags: read ID flags
623a3128
TH
3978 *
3979 * Re-read IDENTIFY page and make sure @dev is still attached to
3980 * the port.
3981 *
3982 * LOCKING:
3983 * Kernel thread context (may sleep)
3984 *
3985 * RETURNS:
3986 * 0 on success, negative errno otherwise
3987 */
fe30911b 3988int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3989{
5eb45c02 3990 unsigned int class = dev->class;
9af5c9c9 3991 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3992 int rc;
3993
fe635c7e 3994 /* read ID data */
bff04647 3995 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3996 if (rc)
fe30911b 3997 return rc;
623a3128
TH
3998
3999 /* is the device still there? */
fe30911b
TH
4000 if (!ata_dev_same_device(dev, class, id))
4001 return -ENODEV;
623a3128 4002
fe635c7e 4003 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4004 return 0;
4005}
4006
4007/**
4008 * ata_dev_revalidate - Revalidate ATA device
4009 * @dev: device to revalidate
422c9daa 4010 * @new_class: new class code
fe30911b
TH
4011 * @readid_flags: read ID flags
4012 *
4013 * Re-read IDENTIFY page, make sure @dev is still attached to the
4014 * port and reconfigure it according to the new IDENTIFY page.
4015 *
4016 * LOCKING:
4017 * Kernel thread context (may sleep)
4018 *
4019 * RETURNS:
4020 * 0 on success, negative errno otherwise
4021 */
422c9daa
TH
4022int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4023 unsigned int readid_flags)
fe30911b 4024{
6ddcd3b0 4025 u64 n_sectors = dev->n_sectors;
5920dadf 4026 u64 n_native_sectors = dev->n_native_sectors;
fe30911b
TH
4027 int rc;
4028
4029 if (!ata_dev_enabled(dev))
4030 return -ENODEV;
4031
422c9daa
TH
4032 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4033 if (ata_class_enabled(new_class) &&
f0d0613d
BP
4034 new_class != ATA_DEV_ATA &&
4035 new_class != ATA_DEV_ATAPI &&
4036 new_class != ATA_DEV_SEMB) {
a9a79dfe
JP
4037 ata_dev_info(dev, "class mismatch %u != %u\n",
4038 dev->class, new_class);
422c9daa
TH
4039 rc = -ENODEV;
4040 goto fail;
4041 }
4042
fe30911b
TH
4043 /* re-read ID */
4044 rc = ata_dev_reread_id(dev, readid_flags);
4045 if (rc)
4046 goto fail;
623a3128
TH
4047
4048 /* configure device according to the new ID */
efdaedc4 4049 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4050 if (rc)
4051 goto fail;
4052
4053 /* verify n_sectors hasn't changed */
445d211b
TH
4054 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4055 dev->n_sectors == n_sectors)
4056 return 0;
4057
4058 /* n_sectors has changed */
a9a79dfe
JP
4059 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4060 (unsigned long long)n_sectors,
4061 (unsigned long long)dev->n_sectors);
445d211b
TH
4062
4063 /*
4064 * Something could have caused HPA to be unlocked
4065 * involuntarily. If n_native_sectors hasn't changed and the
4066 * new size matches it, keep the device.
4067 */
4068 if (dev->n_native_sectors == n_native_sectors &&
4069 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
a9a79dfe
JP
4070 ata_dev_warn(dev,
4071 "new n_sectors matches native, probably "
4072 "late HPA unlock, n_sectors updated\n");
68939ce5 4073 /* use the larger n_sectors */
445d211b 4074 return 0;
6ddcd3b0
TH
4075 }
4076
445d211b
TH
4077 /*
4078 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4079 * unlocking HPA in those cases.
4080 *
4081 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4082 */
4083 if (dev->n_native_sectors == n_native_sectors &&
4084 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4085 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
a9a79dfe
JP
4086 ata_dev_warn(dev,
4087 "old n_sectors matches native, probably "
4088 "late HPA lock, will try to unlock HPA\n");
445d211b
TH
4089 /* try unlocking HPA */
4090 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4091 rc = -EIO;
4092 } else
4093 rc = -ENODEV;
623a3128 4094
445d211b
TH
4095 /* restore original n_[native_]sectors and fail */
4096 dev->n_native_sectors = n_native_sectors;
4097 dev->n_sectors = n_sectors;
623a3128 4098 fail:
a9a79dfe 4099 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4100 return rc;
4101}
4102
6919a0a6
AC
4103struct ata_blacklist_entry {
4104 const char *model_num;
4105 const char *model_rev;
4106 unsigned long horkage;
4107};
4108
4109static const struct ata_blacklist_entry ata_device_blacklist [] = {
4110 /* Devices with DMA related problems under Linux */
4111 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4112 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4113 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4114 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4115 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4116 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4117 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4118 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4119 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
7da4c935 4120 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4121 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4122 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4123 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4124 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4125 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
7da4c935 4126 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4127 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4128 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4129 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4130 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4131 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4132 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4133 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4134 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4135 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4136 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4137 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4138 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
d17d794c 4139 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
3af9a77a 4140 /* Odd clown on sil3726/4726 PMPs */
50af2fa1 4141 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
6919a0a6 4142
18d6e9d5 4143 /* Weird ATAPI devices */
40a1d531 4144 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
6a87e42e 4145 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
a32450e1 4146 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
0523f037 4147 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
18d6e9d5 4148
6919a0a6
AC
4149 /* Devices we expect to fail diagnostics */
4150
4151 /* Devices where NCQ should be avoided */
4152 /* NCQ is slow */
2dcb407e 4153 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 4154 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
4155 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4156 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4157 /* NCQ is broken */
539cc7c7 4158 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4159 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 4160 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4161 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
5ccfca97 4162 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
539cc7c7 4163
ac70a964 4164 /* Seagate NCQ + FLUSH CACHE firmware bug */
4d1f9082 4165 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964 4166 ATA_HORKAGE_FIRMWARE_WARN },
d10d491f 4167
4d1f9082 4168 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
4169 ATA_HORKAGE_FIRMWARE_WARN },
4170
4d1f9082 4171 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
4172 ATA_HORKAGE_FIRMWARE_WARN },
4173
4d1f9082 4174 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964
TH
4175 ATA_HORKAGE_FIRMWARE_WARN },
4176
87809942
MB
4177 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
4178 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
b28a613e 4179 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
87809942 4180
36e337d0
RH
4181 /* Blacklist entries taken from Silicon Image 3124/3132
4182 Windows driver .inf file - also several Linux problem reports */
4183 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4184 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4185 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4186
68b0ddb2
TH
4187 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4188 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4189
16c55b03
TH
4190 /* devices which puke on READ_NATIVE_MAX */
4191 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4192 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4193 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4194 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4195
7831387b
TH
4196 /* this one allows HPA unlocking but fails IOs on the area */
4197 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4198
93328e11
AC
4199 /* Devices which report 1 sector over size HPA */
4200 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4201 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 4202 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 4203
6bbfd53d
AC
4204 /* Devices which get the IVB wrong */
4205 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
a79067e5 4206 /* Maybe we should just blacklist TSSTcorp... */
7da4c935 4207 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
6bbfd53d 4208
9ce8e307
JA
4209 /* Devices that do not need bridging limits applied */
4210 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
04d0f1b8 4211 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
9ce8e307 4212
9062712f
TH
4213 /* Devices which aren't very happy with higher link speeds */
4214 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
c531077f 4215 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
9062712f 4216
d0cb43b3
TH
4217 /*
4218 * Devices which choke on SETXFER. Applies only if both the
4219 * device and controller are SATA.
4220 */
cd691876 4221 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
3a25179e
VL
4222 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4223 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
cd691876
TH
4224 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4225 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
d0cb43b3 4226
f78dea06 4227 /* devices that don't properly handle queued TRIM commands */
3b8d2676
MP
4228 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4229 { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4230 { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4231 { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
f78dea06 4232
ecd75ad5
TH
4233 /*
4234 * Some WD SATA-I drives spin up and down erratically when the link
4235 * is put into the slumber mode. We don't have full list of the
4236 * affected devices. Disable LPM if the device matches one of the
4237 * known prefixes and is SATA-1. As a side effect LPM partial is
4238 * lost too.
4239 *
4240 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4241 */
4242 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4243 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4244 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4245 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4246 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4247 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4248 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4249
6919a0a6
AC
4250 /* End Marker */
4251 { }
1da177e4 4252};
2e9edbf8 4253
75683fe7 4254static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4255{
8bfa79fc
TH
4256 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4257 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4258 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4259
8bfa79fc
TH
4260 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4261 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4262
6919a0a6 4263 while (ad->model_num) {
428ac5fc 4264 if (glob_match(model_num, ad->model_num)) {
6919a0a6
AC
4265 if (ad->model_rev == NULL)
4266 return ad->horkage;
428ac5fc 4267 if (glob_match(model_rev, ad->model_rev))
6919a0a6 4268 return ad->horkage;
f4b15fef 4269 }
6919a0a6 4270 ad++;
f4b15fef 4271 }
1da177e4
LT
4272 return 0;
4273}
4274
6919a0a6
AC
4275static int ata_dma_blacklisted(const struct ata_device *dev)
4276{
4277 /* We don't support polling DMA.
4278 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4279 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4280 */
9af5c9c9 4281 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4282 (dev->flags & ATA_DFLAG_CDB_INTR))
4283 return 1;
75683fe7 4284 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4285}
4286
6bbfd53d
AC
4287/**
4288 * ata_is_40wire - check drive side detection
4289 * @dev: device
4290 *
4291 * Perform drive side detection decoding, allowing for device vendors
4292 * who can't follow the documentation.
4293 */
4294
4295static int ata_is_40wire(struct ata_device *dev)
4296{
4297 if (dev->horkage & ATA_HORKAGE_IVB)
4298 return ata_drive_40wire_relaxed(dev->id);
4299 return ata_drive_40wire(dev->id);
4300}
4301
15a5551c
AC
4302/**
4303 * cable_is_40wire - 40/80/SATA decider
4304 * @ap: port to consider
4305 *
4306 * This function encapsulates the policy for speed management
4307 * in one place. At the moment we don't cache the result but
4308 * there is a good case for setting ap->cbl to the result when
4309 * we are called with unknown cables (and figuring out if it
4310 * impacts hotplug at all).
4311 *
4312 * Return 1 if the cable appears to be 40 wire.
4313 */
4314
4315static int cable_is_40wire(struct ata_port *ap)
4316{
4317 struct ata_link *link;
4318 struct ata_device *dev;
4319
4a9c7b33 4320 /* If the controller thinks we are 40 wire, we are. */
15a5551c
AC
4321 if (ap->cbl == ATA_CBL_PATA40)
4322 return 1;
4a9c7b33
TH
4323
4324 /* If the controller thinks we are 80 wire, we are. */
15a5551c
AC
4325 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4326 return 0;
4a9c7b33
TH
4327
4328 /* If the system is known to be 40 wire short cable (eg
4329 * laptop), then we allow 80 wire modes even if the drive
4330 * isn't sure.
4331 */
f792068e
AC
4332 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4333 return 0;
4a9c7b33
TH
4334
4335 /* If the controller doesn't know, we scan.
4336 *
4337 * Note: We look for all 40 wire detects at this point. Any
4338 * 80 wire detect is taken to be 80 wire cable because
4339 * - in many setups only the one drive (slave if present) will
4340 * give a valid detect
4341 * - if you have a non detect capable drive you don't want it
4342 * to colour the choice
4343 */
1eca4365
TH
4344 ata_for_each_link(link, ap, EDGE) {
4345 ata_for_each_dev(dev, link, ENABLED) {
4346 if (!ata_is_40wire(dev))
15a5551c
AC
4347 return 0;
4348 }
4349 }
4350 return 1;
4351}
4352
a6d5a51c
TH
4353/**
4354 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4355 * @dev: Device to compute xfermask for
4356 *
acf356b1
TH
4357 * Compute supported xfermask of @dev and store it in
4358 * dev->*_mask. This function is responsible for applying all
4359 * known limits including host controller limits, device
4360 * blacklist, etc...
a6d5a51c
TH
4361 *
4362 * LOCKING:
4363 * None.
a6d5a51c 4364 */
3373efd8 4365static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4366{
9af5c9c9
TH
4367 struct ata_link *link = dev->link;
4368 struct ata_port *ap = link->ap;
cca3974e 4369 struct ata_host *host = ap->host;
a6d5a51c 4370 unsigned long xfer_mask;
1da177e4 4371
37deecb5 4372 /* controller modes available */
565083e1
TH
4373 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4374 ap->mwdma_mask, ap->udma_mask);
4375
8343f889 4376 /* drive modes available */
37deecb5
TH
4377 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4378 dev->mwdma_mask, dev->udma_mask);
4379 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4380
b352e57d
AC
4381 /*
4382 * CFA Advanced TrueIDE timings are not allowed on a shared
4383 * cable
4384 */
4385 if (ata_dev_pair(dev)) {
4386 /* No PIO5 or PIO6 */
4387 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4388 /* No MWDMA3 or MWDMA 4 */
4389 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4390 }
4391
37deecb5
TH
4392 if (ata_dma_blacklisted(dev)) {
4393 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4394 ata_dev_warn(dev,
4395 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4396 }
a6d5a51c 4397
14d66ab7 4398 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4399 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5 4400 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4401 ata_dev_warn(dev,
4402 "simplex DMA is claimed by other device, disabling DMA\n");
5444a6f4 4403 }
565083e1 4404
e424675f
JG
4405 if (ap->flags & ATA_FLAG_NO_IORDY)
4406 xfer_mask &= ata_pio_mask_no_iordy(dev);
4407
5444a6f4 4408 if (ap->ops->mode_filter)
a76b62ca 4409 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4410
8343f889
RH
4411 /* Apply cable rule here. Don't apply it early because when
4412 * we handle hot plug the cable type can itself change.
4413 * Check this last so that we know if the transfer rate was
4414 * solely limited by the cable.
4415 * Unknown or 80 wire cables reported host side are checked
4416 * drive side as well. Cases where we know a 40wire cable
4417 * is used safely for 80 are not checked here.
4418 */
4419 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4420 /* UDMA/44 or higher would be available */
15a5551c 4421 if (cable_is_40wire(ap)) {
a9a79dfe
JP
4422 ata_dev_warn(dev,
4423 "limited to UDMA/33 due to 40-wire cable\n");
8343f889
RH
4424 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4425 }
4426
565083e1
TH
4427 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4428 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4429}
4430
1da177e4
LT
4431/**
4432 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4433 * @dev: Device to which command will be sent
4434 *
780a87f7
JG
4435 * Issue SET FEATURES - XFER MODE command to device @dev
4436 * on port @ap.
4437 *
1da177e4 4438 * LOCKING:
0cba632b 4439 * PCI/etc. bus probe sem.
83206a29
TH
4440 *
4441 * RETURNS:
4442 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4443 */
4444
3373efd8 4445static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4446{
a0123703 4447 struct ata_taskfile tf;
83206a29 4448 unsigned int err_mask;
1da177e4
LT
4449
4450 /* set up set-features taskfile */
4451 DPRINTK("set features - xfer mode\n");
4452
464cf177
TH
4453 /* Some controllers and ATAPI devices show flaky interrupt
4454 * behavior after setting xfer mode. Use polling instead.
4455 */
3373efd8 4456 ata_tf_init(dev, &tf);
a0123703
TH
4457 tf.command = ATA_CMD_SET_FEATURES;
4458 tf.feature = SETFEATURES_XFER;
464cf177 4459 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4460 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4461 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4462 if (ata_pio_need_iordy(dev))
4463 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4464 /* If the device has IORDY and the controller does not - turn it off */
4465 else if (ata_id_has_iordy(dev->id))
11b7becc 4466 tf.nsect = 0x01;
b9f8ab2d
AC
4467 else /* In the ancient relic department - skip all of this */
4468 return 0;
1da177e4 4469
2b789108 4470 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4471
4472 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4473 return err_mask;
4474}
1152b261 4475
9f45cbd3 4476/**
218f3d30 4477 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4478 * @dev: Device to which command will be sent
4479 * @enable: Whether to enable or disable the feature
218f3d30 4480 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4481 *
4482 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4483 * on port @ap with sector count
9f45cbd3
KCA
4484 *
4485 * LOCKING:
4486 * PCI/etc. bus probe sem.
4487 *
4488 * RETURNS:
4489 * 0 on success, AC_ERR_* mask otherwise.
4490 */
1152b261 4491unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
9f45cbd3
KCA
4492{
4493 struct ata_taskfile tf;
4494 unsigned int err_mask;
4495
4496 /* set up set-features taskfile */
4497 DPRINTK("set features - SATA features\n");
4498
4499 ata_tf_init(dev, &tf);
4500 tf.command = ATA_CMD_SET_FEATURES;
4501 tf.feature = enable;
4502 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4503 tf.protocol = ATA_PROT_NODATA;
218f3d30 4504 tf.nsect = feature;
9f45cbd3 4505
2b789108 4506 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4507
83206a29
TH
4508 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4509 return err_mask;
1da177e4 4510}
633de4cc 4511EXPORT_SYMBOL_GPL(ata_dev_set_feature);
1da177e4 4512
8bf62ece
AL
4513/**
4514 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4515 * @dev: Device to which command will be sent
e2a7f77a
RD
4516 * @heads: Number of heads (taskfile parameter)
4517 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4518 *
4519 * LOCKING:
6aff8f1f
TH
4520 * Kernel thread context (may sleep)
4521 *
4522 * RETURNS:
4523 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4524 */
3373efd8
TH
4525static unsigned int ata_dev_init_params(struct ata_device *dev,
4526 u16 heads, u16 sectors)
8bf62ece 4527{
a0123703 4528 struct ata_taskfile tf;
6aff8f1f 4529 unsigned int err_mask;
8bf62ece
AL
4530
4531 /* Number of sectors per track 1-255. Number of heads 1-16 */
4532 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4533 return AC_ERR_INVALID;
8bf62ece
AL
4534
4535 /* set up init dev params taskfile */
4536 DPRINTK("init dev params \n");
4537
3373efd8 4538 ata_tf_init(dev, &tf);
a0123703
TH
4539 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4540 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4541 tf.protocol = ATA_PROT_NODATA;
4542 tf.nsect = sectors;
4543 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4544
2b789108 4545 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4546 /* A clean abort indicates an original or just out of spec drive
4547 and we should continue as we issue the setup based on the
4548 drive reported working geometry */
4549 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4550 err_mask = 0;
8bf62ece 4551
6aff8f1f
TH
4552 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4553 return err_mask;
8bf62ece
AL
4554}
4555
1da177e4 4556/**
0cba632b
JG
4557 * ata_sg_clean - Unmap DMA memory associated with command
4558 * @qc: Command containing DMA memory to be released
4559 *
4560 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4561 *
4562 * LOCKING:
cca3974e 4563 * spin_lock_irqsave(host lock)
1da177e4 4564 */
70e6ad0c 4565void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4566{
4567 struct ata_port *ap = qc->ap;
ff2aeb1e 4568 struct scatterlist *sg = qc->sg;
1da177e4
LT
4569 int dir = qc->dma_dir;
4570
efcb3cf7 4571 WARN_ON_ONCE(sg == NULL);
1da177e4 4572
dde20207 4573 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4574
dde20207 4575 if (qc->n_elem)
5825627c 4576 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
1da177e4
LT
4577
4578 qc->flags &= ~ATA_QCFLAG_DMAMAP;
ff2aeb1e 4579 qc->sg = NULL;
1da177e4
LT
4580}
4581
1da177e4 4582/**
5895ef9a 4583 * atapi_check_dma - Check whether ATAPI DMA can be supported
1da177e4
LT
4584 * @qc: Metadata associated with taskfile to check
4585 *
780a87f7
JG
4586 * Allow low-level driver to filter ATA PACKET commands, returning
4587 * a status indicating whether or not it is OK to use DMA for the
4588 * supplied PACKET command.
4589 *
1da177e4 4590 * LOCKING:
624d5c51
TH
4591 * spin_lock_irqsave(host lock)
4592 *
4593 * RETURNS: 0 when ATAPI DMA can be used
4594 * nonzero otherwise
4595 */
5895ef9a 4596int atapi_check_dma(struct ata_queued_cmd *qc)
624d5c51
TH
4597{
4598 struct ata_port *ap = qc->ap;
71601958 4599
624d5c51
TH
4600 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4601 * few ATAPI devices choke on such DMA requests.
4602 */
6a87e42e
TH
4603 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4604 unlikely(qc->nbytes & 15))
624d5c51 4605 return 1;
e2cec771 4606
624d5c51
TH
4607 if (ap->ops->check_atapi_dma)
4608 return ap->ops->check_atapi_dma(qc);
e2cec771 4609
624d5c51
TH
4610 return 0;
4611}
1da177e4 4612
624d5c51
TH
4613/**
4614 * ata_std_qc_defer - Check whether a qc needs to be deferred
4615 * @qc: ATA command in question
4616 *
4617 * Non-NCQ commands cannot run with any other command, NCQ or
4618 * not. As upper layer only knows the queue depth, we are
4619 * responsible for maintaining exclusion. This function checks
4620 * whether a new command @qc can be issued.
4621 *
4622 * LOCKING:
4623 * spin_lock_irqsave(host lock)
4624 *
4625 * RETURNS:
4626 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4627 */
4628int ata_std_qc_defer(struct ata_queued_cmd *qc)
4629{
4630 struct ata_link *link = qc->dev->link;
e2cec771 4631
624d5c51
TH
4632 if (qc->tf.protocol == ATA_PROT_NCQ) {
4633 if (!ata_tag_valid(link->active_tag))
4634 return 0;
4635 } else {
4636 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4637 return 0;
4638 }
e2cec771 4639
624d5c51
TH
4640 return ATA_DEFER_LINK;
4641}
6912ccd5 4642
624d5c51 4643void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
1da177e4 4644
624d5c51
TH
4645/**
4646 * ata_sg_init - Associate command with scatter-gather table.
4647 * @qc: Command to be associated
4648 * @sg: Scatter-gather table.
4649 * @n_elem: Number of elements in s/g table.
4650 *
4651 * Initialize the data-related elements of queued_cmd @qc
4652 * to point to a scatter-gather table @sg, containing @n_elem
4653 * elements.
4654 *
4655 * LOCKING:
4656 * spin_lock_irqsave(host lock)
4657 */
4658void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4659 unsigned int n_elem)
4660{
4661 qc->sg = sg;
4662 qc->n_elem = n_elem;
4663 qc->cursg = qc->sg;
4664}
bb5cb290 4665
624d5c51
TH
4666/**
4667 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4668 * @qc: Command with scatter-gather table to be mapped.
4669 *
4670 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4671 *
4672 * LOCKING:
4673 * spin_lock_irqsave(host lock)
4674 *
4675 * RETURNS:
4676 * Zero on success, negative on error.
4677 *
4678 */
4679static int ata_sg_setup(struct ata_queued_cmd *qc)
4680{
4681 struct ata_port *ap = qc->ap;
4682 unsigned int n_elem;
1da177e4 4683
624d5c51 4684 VPRINTK("ENTER, ata%u\n", ap->print_id);
e2cec771 4685
624d5c51
TH
4686 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4687 if (n_elem < 1)
4688 return -1;
bb5cb290 4689
624d5c51 4690 DPRINTK("%d sg elements mapped\n", n_elem);
5825627c 4691 qc->orig_n_elem = qc->n_elem;
624d5c51
TH
4692 qc->n_elem = n_elem;
4693 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4 4694
624d5c51 4695 return 0;
1da177e4
LT
4696}
4697
624d5c51
TH
4698/**
4699 * swap_buf_le16 - swap halves of 16-bit words in place
4700 * @buf: Buffer to swap
4701 * @buf_words: Number of 16-bit words in buffer.
4702 *
4703 * Swap halves of 16-bit words if needed to convert from
4704 * little-endian byte order to native cpu byte order, or
4705 * vice-versa.
4706 *
4707 * LOCKING:
4708 * Inherited from caller.
4709 */
4710void swap_buf_le16(u16 *buf, unsigned int buf_words)
8061f5f0 4711{
624d5c51
TH
4712#ifdef __BIG_ENDIAN
4713 unsigned int i;
8061f5f0 4714
624d5c51
TH
4715 for (i = 0; i < buf_words; i++)
4716 buf[i] = le16_to_cpu(buf[i]);
4717#endif /* __BIG_ENDIAN */
8061f5f0
TH
4718}
4719
8a8bc223
TH
4720/**
4721 * ata_qc_new - Request an available ATA command, for queueing
5eb66fe0 4722 * @ap: target port
8a8bc223 4723 *
1871ee13
KH
4724 * Some ATA host controllers may implement a queue depth which is less
4725 * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
4726 * the hardware limitation.
4727 *
8a8bc223
TH
4728 * LOCKING:
4729 * None.
4730 */
4731
4732static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4733{
4734 struct ata_queued_cmd *qc = NULL;
1a112d10
TH
4735 unsigned int max_queue = ap->host->n_tags;
4736 unsigned int i, tag;
8a8bc223
TH
4737
4738 /* no command while frozen */
4739 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4740 return NULL;
4741
1871ee13
KH
4742 for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
4743 tag = tag < max_queue ? tag : 0;
8a4aeec8
DW
4744
4745 /* the last tag is reserved for internal command. */
4746 if (tag == ATA_TAG_INTERNAL)
4747 continue;
4748
4749 if (!test_and_set_bit(tag, &ap->qc_allocated)) {
4750 qc = __ata_qc_from_tag(ap, tag);
4751 qc->tag = tag;
4752 ap->last_tag = tag;
8a8bc223
TH
4753 break;
4754 }
8a4aeec8 4755 }
8a8bc223
TH
4756
4757 return qc;
4758}
4759
1da177e4
LT
4760/**
4761 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4762 * @dev: Device from whom we request an available command structure
4763 *
4764 * LOCKING:
0cba632b 4765 * None.
1da177e4
LT
4766 */
4767
8a8bc223 4768struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4769{
9af5c9c9 4770 struct ata_port *ap = dev->link->ap;
1da177e4
LT
4771 struct ata_queued_cmd *qc;
4772
8a8bc223 4773 qc = ata_qc_new(ap);
1da177e4 4774 if (qc) {
1da177e4
LT
4775 qc->scsicmd = NULL;
4776 qc->ap = ap;
4777 qc->dev = dev;
1da177e4 4778
2c13b7ce 4779 ata_qc_reinit(qc);
1da177e4
LT
4780 }
4781
4782 return qc;
4783}
4784
8a8bc223
TH
4785/**
4786 * ata_qc_free - free unused ata_queued_cmd
4787 * @qc: Command to complete
4788 *
4789 * Designed to free unused ata_queued_cmd object
4790 * in case something prevents using it.
4791 *
4792 * LOCKING:
4793 * spin_lock_irqsave(host lock)
4794 */
4795void ata_qc_free(struct ata_queued_cmd *qc)
4796{
a1104016 4797 struct ata_port *ap;
8a8bc223
TH
4798 unsigned int tag;
4799
efcb3cf7 4800 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
a1104016 4801 ap = qc->ap;
8a8bc223
TH
4802
4803 qc->flags = 0;
4804 tag = qc->tag;
4805 if (likely(ata_tag_valid(tag))) {
4806 qc->tag = ATA_TAG_POISON;
4807 clear_bit(tag, &ap->qc_allocated);
4808 }
4809}
4810
76014427 4811void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4812{
a1104016
JL
4813 struct ata_port *ap;
4814 struct ata_link *link;
dedaf2b0 4815
efcb3cf7
TH
4816 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4817 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
a1104016
JL
4818 ap = qc->ap;
4819 link = qc->dev->link;
1da177e4
LT
4820
4821 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4822 ata_sg_clean(qc);
4823
7401abf2 4824 /* command should be marked inactive atomically with qc completion */
da917d69 4825 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 4826 link->sactive &= ~(1 << qc->tag);
da917d69
TH
4827 if (!link->sactive)
4828 ap->nr_active_links--;
4829 } else {
9af5c9c9 4830 link->active_tag = ATA_TAG_POISON;
da917d69
TH
4831 ap->nr_active_links--;
4832 }
4833
4834 /* clear exclusive status */
4835 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4836 ap->excl_link == link))
4837 ap->excl_link = NULL;
7401abf2 4838
3f3791d3
AL
4839 /* atapi: mark qc as inactive to prevent the interrupt handler
4840 * from completing the command twice later, before the error handler
4841 * is called. (when rc != 0 and atapi request sense is needed)
4842 */
4843 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4844 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4845
1da177e4 4846 /* call completion callback */
77853bf2 4847 qc->complete_fn(qc);
1da177e4
LT
4848}
4849
39599a53
TH
4850static void fill_result_tf(struct ata_queued_cmd *qc)
4851{
4852 struct ata_port *ap = qc->ap;
4853
39599a53 4854 qc->result_tf.flags = qc->tf.flags;
22183bf5 4855 ap->ops->qc_fill_rtf(qc);
39599a53
TH
4856}
4857
00115e0f
TH
4858static void ata_verify_xfer(struct ata_queued_cmd *qc)
4859{
4860 struct ata_device *dev = qc->dev;
4861
00115e0f
TH
4862 if (ata_is_nodata(qc->tf.protocol))
4863 return;
4864
4865 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4866 return;
4867
4868 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4869}
4870
f686bcb8
TH
4871/**
4872 * ata_qc_complete - Complete an active ATA command
4873 * @qc: Command to complete
f686bcb8 4874 *
1aadf5c3
TH
4875 * Indicate to the mid and upper layers that an ATA command has
4876 * completed, with either an ok or not-ok status.
4877 *
4878 * Refrain from calling this function multiple times when
4879 * successfully completing multiple NCQ commands.
4880 * ata_qc_complete_multiple() should be used instead, which will
4881 * properly update IRQ expect state.
f686bcb8
TH
4882 *
4883 * LOCKING:
cca3974e 4884 * spin_lock_irqsave(host lock)
f686bcb8
TH
4885 */
4886void ata_qc_complete(struct ata_queued_cmd *qc)
4887{
4888 struct ata_port *ap = qc->ap;
4889
4890 /* XXX: New EH and old EH use different mechanisms to
4891 * synchronize EH with regular execution path.
4892 *
4893 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4894 * Normal execution path is responsible for not accessing a
4895 * failed qc. libata core enforces the rule by returning NULL
4896 * from ata_qc_from_tag() for failed qcs.
4897 *
4898 * Old EH depends on ata_qc_complete() nullifying completion
4899 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4900 * not synchronize with interrupt handler. Only PIO task is
4901 * taken care of.
4902 */
4903 if (ap->ops->error_handler) {
4dbfa39b
TH
4904 struct ata_device *dev = qc->dev;
4905 struct ata_eh_info *ehi = &dev->link->eh_info;
4906
f686bcb8
TH
4907 if (unlikely(qc->err_mask))
4908 qc->flags |= ATA_QCFLAG_FAILED;
4909
f08dc1ac
TH
4910 /*
4911 * Finish internal commands without any further processing
4912 * and always with the result TF filled.
4913 */
4914 if (unlikely(ata_tag_internal(qc->tag))) {
f4b31db9 4915 fill_result_tf(qc);
f08dc1ac
TH
4916 __ata_qc_complete(qc);
4917 return;
4918 }
f4b31db9 4919
f08dc1ac
TH
4920 /*
4921 * Non-internal qc has failed. Fill the result TF and
4922 * summon EH.
4923 */
4924 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4925 fill_result_tf(qc);
4926 ata_qc_schedule_eh(qc);
f4b31db9 4927 return;
f686bcb8
TH
4928 }
4929
4dc738ed
TH
4930 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4931
f686bcb8
TH
4932 /* read result TF if requested */
4933 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4934 fill_result_tf(qc);
f686bcb8 4935
4dbfa39b
TH
4936 /* Some commands need post-processing after successful
4937 * completion.
4938 */
4939 switch (qc->tf.command) {
4940 case ATA_CMD_SET_FEATURES:
4941 if (qc->tf.feature != SETFEATURES_WC_ON &&
4942 qc->tf.feature != SETFEATURES_WC_OFF)
4943 break;
4944 /* fall through */
4945 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4946 case ATA_CMD_SET_MULTI: /* multi_count changed */
4947 /* revalidate device */
4948 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4949 ata_port_schedule_eh(ap);
4950 break;
054a5fba
TH
4951
4952 case ATA_CMD_SLEEP:
4953 dev->flags |= ATA_DFLAG_SLEEPING;
4954 break;
4dbfa39b
TH
4955 }
4956
00115e0f
TH
4957 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4958 ata_verify_xfer(qc);
4959
f686bcb8
TH
4960 __ata_qc_complete(qc);
4961 } else {
4962 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4963 return;
4964
4965 /* read result TF if failed or requested */
4966 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4967 fill_result_tf(qc);
f686bcb8
TH
4968
4969 __ata_qc_complete(qc);
4970 }
4971}
4972
dedaf2b0
TH
4973/**
4974 * ata_qc_complete_multiple - Complete multiple qcs successfully
4975 * @ap: port in question
4976 * @qc_active: new qc_active mask
dedaf2b0
TH
4977 *
4978 * Complete in-flight commands. This functions is meant to be
4979 * called from low-level driver's interrupt routine to complete
4980 * requests normally. ap->qc_active and @qc_active is compared
4981 * and commands are completed accordingly.
4982 *
1aadf5c3
TH
4983 * Always use this function when completing multiple NCQ commands
4984 * from IRQ handlers instead of calling ata_qc_complete()
4985 * multiple times to keep IRQ expect status properly in sync.
4986 *
dedaf2b0 4987 * LOCKING:
cca3974e 4988 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4989 *
4990 * RETURNS:
4991 * Number of completed commands on success, -errno otherwise.
4992 */
79f97dad 4993int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
dedaf2b0
TH
4994{
4995 int nr_done = 0;
4996 u32 done_mask;
dedaf2b0
TH
4997
4998 done_mask = ap->qc_active ^ qc_active;
4999
5000 if (unlikely(done_mask & qc_active)) {
a9a79dfe
JP
5001 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5002 ap->qc_active, qc_active);
dedaf2b0
TH
5003 return -EINVAL;
5004 }
5005
43768180 5006 while (done_mask) {
dedaf2b0 5007 struct ata_queued_cmd *qc;
43768180 5008 unsigned int tag = __ffs(done_mask);
dedaf2b0 5009
43768180
JA
5010 qc = ata_qc_from_tag(ap, tag);
5011 if (qc) {
dedaf2b0
TH
5012 ata_qc_complete(qc);
5013 nr_done++;
5014 }
43768180 5015 done_mask &= ~(1 << tag);
dedaf2b0
TH
5016 }
5017
5018 return nr_done;
5019}
5020
1da177e4
LT
5021/**
5022 * ata_qc_issue - issue taskfile to device
5023 * @qc: command to issue to device
5024 *
5025 * Prepare an ATA command to submission to device.
5026 * This includes mapping the data into a DMA-able
5027 * area, filling in the S/G table, and finally
5028 * writing the taskfile to hardware, starting the command.
5029 *
5030 * LOCKING:
cca3974e 5031 * spin_lock_irqsave(host lock)
1da177e4 5032 */
8e0e694a 5033void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5034{
5035 struct ata_port *ap = qc->ap;
9af5c9c9 5036 struct ata_link *link = qc->dev->link;
405e66b3 5037 u8 prot = qc->tf.protocol;
1da177e4 5038
dedaf2b0
TH
5039 /* Make sure only one non-NCQ command is outstanding. The
5040 * check is skipped for old EH because it reuses active qc to
5041 * request ATAPI sense.
5042 */
efcb3cf7 5043 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 5044
1973a023 5045 if (ata_is_ncq(prot)) {
efcb3cf7 5046 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
da917d69
TH
5047
5048 if (!link->sactive)
5049 ap->nr_active_links++;
9af5c9c9 5050 link->sactive |= 1 << qc->tag;
dedaf2b0 5051 } else {
efcb3cf7 5052 WARN_ON_ONCE(link->sactive);
da917d69
TH
5053
5054 ap->nr_active_links++;
9af5c9c9 5055 link->active_tag = qc->tag;
dedaf2b0
TH
5056 }
5057
e4a70e76 5058 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5059 ap->qc_active |= 1 << qc->tag;
e4a70e76 5060
60f5d6ef
TH
5061 /*
5062 * We guarantee to LLDs that they will have at least one
f92a2636
TH
5063 * non-zero sg if the command is a data command.
5064 */
60f5d6ef
TH
5065 if (WARN_ON_ONCE(ata_is_data(prot) &&
5066 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5067 goto sys_err;
f92a2636 5068
405e66b3 5069 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 5070 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7 5071 if (ata_sg_setup(qc))
60f5d6ef 5072 goto sys_err;
1da177e4 5073
cf480626 5074 /* if device is sleeping, schedule reset and abort the link */
054a5fba 5075 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
cf480626 5076 link->eh_info.action |= ATA_EH_RESET;
054a5fba
TH
5077 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5078 ata_link_abort(link);
5079 return;
5080 }
5081
1da177e4
LT
5082 ap->ops->qc_prep(qc);
5083
8e0e694a
TH
5084 qc->err_mask |= ap->ops->qc_issue(qc);
5085 if (unlikely(qc->err_mask))
5086 goto err;
5087 return;
1da177e4 5088
60f5d6ef 5089sys_err:
8e0e694a
TH
5090 qc->err_mask |= AC_ERR_SYSTEM;
5091err:
5092 ata_qc_complete(qc);
1da177e4
LT
5093}
5094
34bf2170
TH
5095/**
5096 * sata_scr_valid - test whether SCRs are accessible
936fd732 5097 * @link: ATA link to test SCR accessibility for
34bf2170 5098 *
936fd732 5099 * Test whether SCRs are accessible for @link.
34bf2170
TH
5100 *
5101 * LOCKING:
5102 * None.
5103 *
5104 * RETURNS:
5105 * 1 if SCRs are accessible, 0 otherwise.
5106 */
936fd732 5107int sata_scr_valid(struct ata_link *link)
34bf2170 5108{
936fd732
TH
5109 struct ata_port *ap = link->ap;
5110
a16abc0b 5111 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5112}
5113
5114/**
5115 * sata_scr_read - read SCR register of the specified port
936fd732 5116 * @link: ATA link to read SCR for
34bf2170
TH
5117 * @reg: SCR to read
5118 * @val: Place to store read value
5119 *
936fd732 5120 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
5121 * guaranteed to succeed if @link is ap->link, the cable type of
5122 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
5123 *
5124 * LOCKING:
633273a3 5125 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
5126 *
5127 * RETURNS:
5128 * 0 on success, negative errno on failure.
5129 */
936fd732 5130int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 5131{
633273a3 5132 if (ata_is_host_link(link)) {
633273a3 5133 if (sata_scr_valid(link))
82ef04fb 5134 return link->ap->ops->scr_read(link, reg, val);
633273a3
TH
5135 return -EOPNOTSUPP;
5136 }
5137
5138 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
5139}
5140
5141/**
5142 * sata_scr_write - write SCR register of the specified port
936fd732 5143 * @link: ATA link to write SCR for
34bf2170
TH
5144 * @reg: SCR to write
5145 * @val: value to write
5146 *
936fd732 5147 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
5148 * guaranteed to succeed if @link is ap->link, the cable type of
5149 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
5150 *
5151 * LOCKING:
633273a3 5152 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
5153 *
5154 * RETURNS:
5155 * 0 on success, negative errno on failure.
5156 */
936fd732 5157int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 5158{
633273a3 5159 if (ata_is_host_link(link)) {
633273a3 5160 if (sata_scr_valid(link))
82ef04fb 5161 return link->ap->ops->scr_write(link, reg, val);
633273a3
TH
5162 return -EOPNOTSUPP;
5163 }
936fd732 5164
633273a3 5165 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
5166}
5167
5168/**
5169 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 5170 * @link: ATA link to write SCR for
34bf2170
TH
5171 * @reg: SCR to write
5172 * @val: value to write
5173 *
5174 * This function is identical to sata_scr_write() except that this
5175 * function performs flush after writing to the register.
5176 *
5177 * LOCKING:
633273a3 5178 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
5179 *
5180 * RETURNS:
5181 * 0 on success, negative errno on failure.
5182 */
936fd732 5183int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 5184{
633273a3 5185 if (ata_is_host_link(link)) {
633273a3 5186 int rc;
da3dbb17 5187
633273a3 5188 if (sata_scr_valid(link)) {
82ef04fb 5189 rc = link->ap->ops->scr_write(link, reg, val);
633273a3 5190 if (rc == 0)
82ef04fb 5191 rc = link->ap->ops->scr_read(link, reg, &val);
633273a3
TH
5192 return rc;
5193 }
5194 return -EOPNOTSUPP;
34bf2170 5195 }
633273a3
TH
5196
5197 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
5198}
5199
5200/**
b1c72916 5201 * ata_phys_link_online - test whether the given link is online
936fd732 5202 * @link: ATA link to test
34bf2170 5203 *
936fd732
TH
5204 * Test whether @link is online. Note that this function returns
5205 * 0 if online status of @link cannot be obtained, so
5206 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5207 *
5208 * LOCKING:
5209 * None.
5210 *
5211 * RETURNS:
b5b3fa38 5212 * True if the port online status is available and online.
34bf2170 5213 */
b1c72916 5214bool ata_phys_link_online(struct ata_link *link)
34bf2170
TH
5215{
5216 u32 sstatus;
5217
936fd732 5218 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 5219 ata_sstatus_online(sstatus))
b5b3fa38
TH
5220 return true;
5221 return false;
34bf2170
TH
5222}
5223
5224/**
b1c72916 5225 * ata_phys_link_offline - test whether the given link is offline
936fd732 5226 * @link: ATA link to test
34bf2170 5227 *
936fd732
TH
5228 * Test whether @link is offline. Note that this function
5229 * returns 0 if offline status of @link cannot be obtained, so
5230 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5231 *
5232 * LOCKING:
5233 * None.
5234 *
5235 * RETURNS:
b5b3fa38 5236 * True if the port offline status is available and offline.
34bf2170 5237 */
b1c72916 5238bool ata_phys_link_offline(struct ata_link *link)
34bf2170
TH
5239{
5240 u32 sstatus;
5241
936fd732 5242 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 5243 !ata_sstatus_online(sstatus))
b5b3fa38
TH
5244 return true;
5245 return false;
34bf2170 5246}
0baab86b 5247
b1c72916
TH
5248/**
5249 * ata_link_online - test whether the given link is online
5250 * @link: ATA link to test
5251 *
5252 * Test whether @link is online. This is identical to
5253 * ata_phys_link_online() when there's no slave link. When
5254 * there's a slave link, this function should only be called on
5255 * the master link and will return true if any of M/S links is
5256 * online.
5257 *
5258 * LOCKING:
5259 * None.
5260 *
5261 * RETURNS:
5262 * True if the port online status is available and online.
5263 */
5264bool ata_link_online(struct ata_link *link)
5265{
5266 struct ata_link *slave = link->ap->slave_link;
5267
5268 WARN_ON(link == slave); /* shouldn't be called on slave link */
5269
5270 return ata_phys_link_online(link) ||
5271 (slave && ata_phys_link_online(slave));
5272}
5273
5274/**
5275 * ata_link_offline - test whether the given link is offline
5276 * @link: ATA link to test
5277 *
5278 * Test whether @link is offline. This is identical to
5279 * ata_phys_link_offline() when there's no slave link. When
5280 * there's a slave link, this function should only be called on
5281 * the master link and will return true if both M/S links are
5282 * offline.
5283 *
5284 * LOCKING:
5285 * None.
5286 *
5287 * RETURNS:
5288 * True if the port offline status is available and offline.
5289 */
5290bool ata_link_offline(struct ata_link *link)
5291{
5292 struct ata_link *slave = link->ap->slave_link;
5293
5294 WARN_ON(link == slave); /* shouldn't be called on slave link */
5295
5296 return ata_phys_link_offline(link) &&
5297 (!slave || ata_phys_link_offline(slave));
5298}
5299
6ffa01d8 5300#ifdef CONFIG_PM
bc6e7c4b
DW
5301static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5302 unsigned int action, unsigned int ehi_flags,
5303 bool async)
500530f6 5304{
5ef41082 5305 struct ata_link *link;
500530f6 5306 unsigned long flags;
500530f6 5307
5ef41082
LM
5308 /* Previous resume operation might still be in
5309 * progress. Wait for PM_PENDING to clear.
5310 */
5311 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5312 ata_port_wait_eh(ap);
5313 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5314 }
500530f6 5315
5ef41082
LM
5316 /* request PM ops to EH */
5317 spin_lock_irqsave(ap->lock, flags);
500530f6 5318
5ef41082 5319 ap->pm_mesg = mesg;
5ef41082
LM
5320 ap->pflags |= ATA_PFLAG_PM_PENDING;
5321 ata_for_each_link(link, ap, HOST_FIRST) {
5322 link->eh_info.action |= action;
5323 link->eh_info.flags |= ehi_flags;
5324 }
500530f6 5325
5ef41082 5326 ata_port_schedule_eh(ap);
500530f6 5327
5ef41082 5328 spin_unlock_irqrestore(ap->lock, flags);
500530f6 5329
2fcbdcb4 5330 if (!async) {
5ef41082
LM
5331 ata_port_wait_eh(ap);
5332 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
500530f6 5333 }
500530f6
TH
5334}
5335
bc6e7c4b
DW
5336/*
5337 * On some hardware, device fails to respond after spun down for suspend. As
5338 * the device won't be used before being resumed, we don't need to touch the
5339 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5340 *
5341 * http://thread.gmane.org/gmane.linux.ide/46764
5342 */
5343static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5344 | ATA_EHI_NO_AUTOPSY
5345 | ATA_EHI_NO_RECOVERY;
5346
5347static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5ef41082 5348{
bc6e7c4b 5349 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5ef41082
LM
5350}
5351
bc6e7c4b 5352static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
2fcbdcb4 5353{
bc6e7c4b 5354 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
2fcbdcb4
DW
5355}
5356
bc6e7c4b 5357static int ata_port_pm_suspend(struct device *dev)
5ef41082 5358{
bc6e7c4b
DW
5359 struct ata_port *ap = to_ata_port(dev);
5360
5ef41082
LM
5361 if (pm_runtime_suspended(dev))
5362 return 0;
5363
bc6e7c4b
DW
5364 ata_port_suspend(ap, PMSG_SUSPEND);
5365 return 0;
33574d68
LM
5366}
5367
bc6e7c4b 5368static int ata_port_pm_freeze(struct device *dev)
33574d68 5369{
bc6e7c4b
DW
5370 struct ata_port *ap = to_ata_port(dev);
5371
33574d68 5372 if (pm_runtime_suspended(dev))
f5e6d0d0 5373 return 0;
33574d68 5374
bc6e7c4b
DW
5375 ata_port_suspend(ap, PMSG_FREEZE);
5376 return 0;
33574d68
LM
5377}
5378
bc6e7c4b 5379static int ata_port_pm_poweroff(struct device *dev)
33574d68 5380{
bc6e7c4b
DW
5381 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5382 return 0;
5ef41082
LM
5383}
5384
bc6e7c4b
DW
5385static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5386 | ATA_EHI_QUIET;
5ef41082 5387
bc6e7c4b
DW
5388static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5389{
5390 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5ef41082
LM
5391}
5392
bc6e7c4b 5393static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
2fcbdcb4 5394{
bc6e7c4b 5395 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
2fcbdcb4
DW
5396}
5397
bc6e7c4b 5398static int ata_port_pm_resume(struct device *dev)
e90b1e5a 5399{
200421a8 5400 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
bc6e7c4b
DW
5401 pm_runtime_disable(dev);
5402 pm_runtime_set_active(dev);
5403 pm_runtime_enable(dev);
5404 return 0;
e90b1e5a
LM
5405}
5406
7e15e9be
AL
5407/*
5408 * For ODDs, the upper layer will poll for media change every few seconds,
5409 * which will make it enter and leave suspend state every few seconds. And
5410 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5411 * is very little and the ODD may malfunction after constantly being reset.
5412 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5413 * ODD is attached to the port.
5414 */
9ee4f393
LM
5415static int ata_port_runtime_idle(struct device *dev)
5416{
7e15e9be
AL
5417 struct ata_port *ap = to_ata_port(dev);
5418 struct ata_link *link;
5419 struct ata_device *adev;
5420
5421 ata_for_each_link(link, ap, HOST_FIRST) {
5422 ata_for_each_dev(adev, link, ENABLED)
5423 if (adev->class == ATA_DEV_ATAPI &&
5424 !zpodd_dev_enabled(adev))
5425 return -EBUSY;
5426 }
5427
45f0a85c 5428 return 0;
9ee4f393
LM
5429}
5430
a7ff60db
AL
5431static int ata_port_runtime_suspend(struct device *dev)
5432{
bc6e7c4b
DW
5433 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5434 return 0;
a7ff60db
AL
5435}
5436
5437static int ata_port_runtime_resume(struct device *dev)
5438{
bc6e7c4b
DW
5439 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5440 return 0;
a7ff60db
AL
5441}
5442
5ef41082 5443static const struct dev_pm_ops ata_port_pm_ops = {
bc6e7c4b
DW
5444 .suspend = ata_port_pm_suspend,
5445 .resume = ata_port_pm_resume,
5446 .freeze = ata_port_pm_freeze,
5447 .thaw = ata_port_pm_resume,
5448 .poweroff = ata_port_pm_poweroff,
5449 .restore = ata_port_pm_resume,
9ee4f393 5450
a7ff60db
AL
5451 .runtime_suspend = ata_port_runtime_suspend,
5452 .runtime_resume = ata_port_runtime_resume,
9ee4f393 5453 .runtime_idle = ata_port_runtime_idle,
5ef41082
LM
5454};
5455
2fcbdcb4
DW
5456/* sas ports don't participate in pm runtime management of ata_ports,
5457 * and need to resume ata devices at the domain level, not the per-port
5458 * level. sas suspend/resume is async to allow parallel port recovery
5459 * since sas has multiple ata_port instances per Scsi_Host.
5460 */
bc6e7c4b 5461void ata_sas_port_suspend(struct ata_port *ap)
2fcbdcb4 5462{
bc6e7c4b 5463 ata_port_suspend_async(ap, PMSG_SUSPEND);
2fcbdcb4 5464}
bc6e7c4b 5465EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
2fcbdcb4 5466
bc6e7c4b 5467void ata_sas_port_resume(struct ata_port *ap)
2fcbdcb4 5468{
bc6e7c4b 5469 ata_port_resume_async(ap, PMSG_RESUME);
2fcbdcb4 5470}
bc6e7c4b 5471EXPORT_SYMBOL_GPL(ata_sas_port_resume);
2fcbdcb4 5472
500530f6 5473/**
cca3974e
JG
5474 * ata_host_suspend - suspend host
5475 * @host: host to suspend
500530f6
TH
5476 * @mesg: PM message
5477 *
5ef41082 5478 * Suspend @host. Actual operation is performed by port suspend.
500530f6 5479 */
cca3974e 5480int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5481{
5ef41082
LM
5482 host->dev->power.power_state = mesg;
5483 return 0;
500530f6
TH
5484}
5485
5486/**
cca3974e
JG
5487 * ata_host_resume - resume host
5488 * @host: host to resume
500530f6 5489 *
5ef41082 5490 * Resume @host. Actual operation is performed by port resume.
500530f6 5491 */
cca3974e 5492void ata_host_resume(struct ata_host *host)
500530f6 5493{
72ad6ec4 5494 host->dev->power.power_state = PMSG_ON;
500530f6 5495}
6ffa01d8 5496#endif
500530f6 5497
5ef41082
LM
5498struct device_type ata_port_type = {
5499 .name = "ata_port",
5500#ifdef CONFIG_PM
5501 .pm = &ata_port_pm_ops,
5502#endif
5503};
5504
3ef3b43d
TH
5505/**
5506 * ata_dev_init - Initialize an ata_device structure
5507 * @dev: Device structure to initialize
5508 *
5509 * Initialize @dev in preparation for probing.
5510 *
5511 * LOCKING:
5512 * Inherited from caller.
5513 */
5514void ata_dev_init(struct ata_device *dev)
5515{
b1c72916 5516 struct ata_link *link = ata_dev_phys_link(dev);
9af5c9c9 5517 struct ata_port *ap = link->ap;
72fa4b74
TH
5518 unsigned long flags;
5519
b1c72916 5520 /* SATA spd limit is bound to the attached device, reset together */
9af5c9c9
TH
5521 link->sata_spd_limit = link->hw_sata_spd_limit;
5522 link->sata_spd = 0;
5a04bf4b 5523
72fa4b74
TH
5524 /* High bits of dev->flags are used to record warm plug
5525 * requests which occur asynchronously. Synchronize using
cca3974e 5526 * host lock.
72fa4b74 5527 */
ba6a1308 5528 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5529 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 5530 dev->horkage = 0;
ba6a1308 5531 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5532
99cf610a
TH
5533 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5534 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
3ef3b43d
TH
5535 dev->pio_mask = UINT_MAX;
5536 dev->mwdma_mask = UINT_MAX;
5537 dev->udma_mask = UINT_MAX;
5538}
5539
4fb37a25
TH
5540/**
5541 * ata_link_init - Initialize an ata_link structure
5542 * @ap: ATA port link is attached to
5543 * @link: Link structure to initialize
8989805d 5544 * @pmp: Port multiplier port number
4fb37a25
TH
5545 *
5546 * Initialize @link.
5547 *
5548 * LOCKING:
5549 * Kernel thread context (may sleep)
5550 */
fb7fd614 5551void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
5552{
5553 int i;
5554
5555 /* clear everything except for devices */
d9027470
GG
5556 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5557 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
4fb37a25
TH
5558
5559 link->ap = ap;
8989805d 5560 link->pmp = pmp;
4fb37a25
TH
5561 link->active_tag = ATA_TAG_POISON;
5562 link->hw_sata_spd_limit = UINT_MAX;
5563
5564 /* can't use iterator, ap isn't initialized yet */
5565 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5566 struct ata_device *dev = &link->device[i];
5567
5568 dev->link = link;
5569 dev->devno = dev - link->device;
110f66d2
TH
5570#ifdef CONFIG_ATA_ACPI
5571 dev->gtf_filter = ata_acpi_gtf_filter;
5572#endif
4fb37a25
TH
5573 ata_dev_init(dev);
5574 }
5575}
5576
5577/**
5578 * sata_link_init_spd - Initialize link->sata_spd_limit
5579 * @link: Link to configure sata_spd_limit for
5580 *
5581 * Initialize @link->[hw_]sata_spd_limit to the currently
5582 * configured value.
5583 *
5584 * LOCKING:
5585 * Kernel thread context (may sleep).
5586 *
5587 * RETURNS:
5588 * 0 on success, -errno on failure.
5589 */
fb7fd614 5590int sata_link_init_spd(struct ata_link *link)
4fb37a25 5591{
33267325 5592 u8 spd;
4fb37a25
TH
5593 int rc;
5594
d127ea7b 5595 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
4fb37a25
TH
5596 if (rc)
5597 return rc;
5598
d127ea7b 5599 spd = (link->saved_scontrol >> 4) & 0xf;
4fb37a25
TH
5600 if (spd)
5601 link->hw_sata_spd_limit &= (1 << spd) - 1;
5602
05944bdf 5603 ata_force_link_limits(link);
33267325 5604
4fb37a25
TH
5605 link->sata_spd_limit = link->hw_sata_spd_limit;
5606
5607 return 0;
5608}
5609
1da177e4 5610/**
f3187195
TH
5611 * ata_port_alloc - allocate and initialize basic ATA port resources
5612 * @host: ATA host this allocated port belongs to
1da177e4 5613 *
f3187195
TH
5614 * Allocate and initialize basic ATA port resources.
5615 *
5616 * RETURNS:
5617 * Allocate ATA port on success, NULL on failure.
0cba632b 5618 *
1da177e4 5619 * LOCKING:
f3187195 5620 * Inherited from calling layer (may sleep).
1da177e4 5621 */
f3187195 5622struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5623{
f3187195 5624 struct ata_port *ap;
1da177e4 5625
f3187195
TH
5626 DPRINTK("ENTER\n");
5627
5628 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5629 if (!ap)
5630 return NULL;
4fca377f 5631
7b3a24c5 5632 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
cca3974e 5633 ap->lock = &host->lock;
f3187195 5634 ap->print_id = -1;
e628dc99 5635 ap->local_port_no = -1;
cca3974e 5636 ap->host = host;
f3187195 5637 ap->dev = host->dev;
bd5d825c
BP
5638
5639#if defined(ATA_VERBOSE_DEBUG)
5640 /* turn on all debugging levels */
5641 ap->msg_enable = 0x00FF;
5642#elif defined(ATA_DEBUG)
5643 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5644#else
0dd4b21f 5645 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5646#endif
1da177e4 5647
ad72cf98 5648 mutex_init(&ap->scsi_scan_mutex);
65f27f38
DH
5649 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5650 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5651 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5652 init_waitqueue_head(&ap->eh_wait_q);
45fabbb7 5653 init_completion(&ap->park_req_pending);
5ddf24c5
TH
5654 init_timer_deferrable(&ap->fastdrain_timer);
5655 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5656 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 5657
838df628 5658 ap->cbl = ATA_CBL_NONE;
838df628 5659
8989805d 5660 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
5661
5662#ifdef ATA_IRQ_TRAP
5663 ap->stats.unhandled_irq = 1;
5664 ap->stats.idle_irq = 1;
5665#endif
270390e1
TH
5666 ata_sff_port_init(ap);
5667
1da177e4 5668 return ap;
1da177e4
LT
5669}
5670
f0d36efd
TH
5671static void ata_host_release(struct device *gendev, void *res)
5672{
5673 struct ata_host *host = dev_get_drvdata(gendev);
5674 int i;
5675
1aa506e4
TH
5676 for (i = 0; i < host->n_ports; i++) {
5677 struct ata_port *ap = host->ports[i];
5678
4911487a
TH
5679 if (!ap)
5680 continue;
5681
5682 if (ap->scsi_host)
1aa506e4
TH
5683 scsi_host_put(ap->scsi_host);
5684
633273a3 5685 kfree(ap->pmp_link);
b1c72916 5686 kfree(ap->slave_link);
4911487a 5687 kfree(ap);
1aa506e4
TH
5688 host->ports[i] = NULL;
5689 }
5690
1aa56cca 5691 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5692}
5693
f3187195
TH
5694/**
5695 * ata_host_alloc - allocate and init basic ATA host resources
5696 * @dev: generic device this host is associated with
5697 * @max_ports: maximum number of ATA ports associated with this host
5698 *
5699 * Allocate and initialize basic ATA host resources. LLD calls
5700 * this function to allocate a host, initializes it fully and
5701 * attaches it using ata_host_register().
5702 *
5703 * @max_ports ports are allocated and host->n_ports is
5704 * initialized to @max_ports. The caller is allowed to decrease
5705 * host->n_ports before calling ata_host_register(). The unused
5706 * ports will be automatically freed on registration.
5707 *
5708 * RETURNS:
5709 * Allocate ATA host on success, NULL on failure.
5710 *
5711 * LOCKING:
5712 * Inherited from calling layer (may sleep).
5713 */
5714struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5715{
5716 struct ata_host *host;
5717 size_t sz;
5718 int i;
5719
5720 DPRINTK("ENTER\n");
5721
5722 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5723 return NULL;
5724
5725 /* alloc a container for our list of ATA ports (buses) */
5726 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5727 /* alloc a container for our list of ATA ports (buses) */
5728 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5729 if (!host)
5730 goto err_out;
5731
5732 devres_add(dev, host);
5733 dev_set_drvdata(dev, host);
5734
5735 spin_lock_init(&host->lock);
c0c362b6 5736 mutex_init(&host->eh_mutex);
f3187195
TH
5737 host->dev = dev;
5738 host->n_ports = max_ports;
5739
5740 /* allocate ports bound to this host */
5741 for (i = 0; i < max_ports; i++) {
5742 struct ata_port *ap;
5743
5744 ap = ata_port_alloc(host);
5745 if (!ap)
5746 goto err_out;
5747
5748 ap->port_no = i;
5749 host->ports[i] = ap;
5750 }
5751
5752 devres_remove_group(dev, NULL);
5753 return host;
5754
5755 err_out:
5756 devres_release_group(dev, NULL);
5757 return NULL;
5758}
5759
f5cda257
TH
5760/**
5761 * ata_host_alloc_pinfo - alloc host and init with port_info array
5762 * @dev: generic device this host is associated with
5763 * @ppi: array of ATA port_info to initialize host with
5764 * @n_ports: number of ATA ports attached to this host
5765 *
5766 * Allocate ATA host and initialize with info from @ppi. If NULL
5767 * terminated, @ppi may contain fewer entries than @n_ports. The
5768 * last entry will be used for the remaining ports.
5769 *
5770 * RETURNS:
5771 * Allocate ATA host on success, NULL on failure.
5772 *
5773 * LOCKING:
5774 * Inherited from calling layer (may sleep).
5775 */
5776struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5777 const struct ata_port_info * const * ppi,
5778 int n_ports)
5779{
5780 const struct ata_port_info *pi;
5781 struct ata_host *host;
5782 int i, j;
5783
5784 host = ata_host_alloc(dev, n_ports);
5785 if (!host)
5786 return NULL;
5787
5788 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5789 struct ata_port *ap = host->ports[i];
5790
5791 if (ppi[j])
5792 pi = ppi[j++];
5793
5794 ap->pio_mask = pi->pio_mask;
5795 ap->mwdma_mask = pi->mwdma_mask;
5796 ap->udma_mask = pi->udma_mask;
5797 ap->flags |= pi->flags;
0c88758b 5798 ap->link.flags |= pi->link_flags;
f5cda257
TH
5799 ap->ops = pi->port_ops;
5800
5801 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5802 host->ops = pi->port_ops;
f5cda257
TH
5803 }
5804
5805 return host;
5806}
5807
b1c72916
TH
5808/**
5809 * ata_slave_link_init - initialize slave link
5810 * @ap: port to initialize slave link for
5811 *
5812 * Create and initialize slave link for @ap. This enables slave
5813 * link handling on the port.
5814 *
5815 * In libata, a port contains links and a link contains devices.
5816 * There is single host link but if a PMP is attached to it,
5817 * there can be multiple fan-out links. On SATA, there's usually
5818 * a single device connected to a link but PATA and SATA
5819 * controllers emulating TF based interface can have two - master
5820 * and slave.
5821 *
5822 * However, there are a few controllers which don't fit into this
5823 * abstraction too well - SATA controllers which emulate TF
5824 * interface with both master and slave devices but also have
5825 * separate SCR register sets for each device. These controllers
5826 * need separate links for physical link handling
5827 * (e.g. onlineness, link speed) but should be treated like a
5828 * traditional M/S controller for everything else (e.g. command
5829 * issue, softreset).
5830 *
5831 * slave_link is libata's way of handling this class of
5832 * controllers without impacting core layer too much. For
5833 * anything other than physical link handling, the default host
5834 * link is used for both master and slave. For physical link
5835 * handling, separate @ap->slave_link is used. All dirty details
5836 * are implemented inside libata core layer. From LLD's POV, the
5837 * only difference is that prereset, hardreset and postreset are
5838 * called once more for the slave link, so the reset sequence
5839 * looks like the following.
5840 *
5841 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5842 * softreset(M) -> postreset(M) -> postreset(S)
5843 *
5844 * Note that softreset is called only for the master. Softreset
5845 * resets both M/S by definition, so SRST on master should handle
5846 * both (the standard method will work just fine).
5847 *
5848 * LOCKING:
5849 * Should be called before host is registered.
5850 *
5851 * RETURNS:
5852 * 0 on success, -errno on failure.
5853 */
5854int ata_slave_link_init(struct ata_port *ap)
5855{
5856 struct ata_link *link;
5857
5858 WARN_ON(ap->slave_link);
5859 WARN_ON(ap->flags & ATA_FLAG_PMP);
5860
5861 link = kzalloc(sizeof(*link), GFP_KERNEL);
5862 if (!link)
5863 return -ENOMEM;
5864
5865 ata_link_init(ap, link, 1);
5866 ap->slave_link = link;
5867 return 0;
5868}
5869
32ebbc0c
TH
5870static void ata_host_stop(struct device *gendev, void *res)
5871{
5872 struct ata_host *host = dev_get_drvdata(gendev);
5873 int i;
5874
5875 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5876
5877 for (i = 0; i < host->n_ports; i++) {
5878 struct ata_port *ap = host->ports[i];
5879
5880 if (ap->ops->port_stop)
5881 ap->ops->port_stop(ap);
5882 }
5883
5884 if (host->ops->host_stop)
5885 host->ops->host_stop(host);
5886}
5887
029cfd6b
TH
5888/**
5889 * ata_finalize_port_ops - finalize ata_port_operations
5890 * @ops: ata_port_operations to finalize
5891 *
5892 * An ata_port_operations can inherit from another ops and that
5893 * ops can again inherit from another. This can go on as many
5894 * times as necessary as long as there is no loop in the
5895 * inheritance chain.
5896 *
5897 * Ops tables are finalized when the host is started. NULL or
5898 * unspecified entries are inherited from the closet ancestor
5899 * which has the method and the entry is populated with it.
5900 * After finalization, the ops table directly points to all the
5901 * methods and ->inherits is no longer necessary and cleared.
5902 *
5903 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5904 *
5905 * LOCKING:
5906 * None.
5907 */
5908static void ata_finalize_port_ops(struct ata_port_operations *ops)
5909{
2da67659 5910 static DEFINE_SPINLOCK(lock);
029cfd6b
TH
5911 const struct ata_port_operations *cur;
5912 void **begin = (void **)ops;
5913 void **end = (void **)&ops->inherits;
5914 void **pp;
5915
5916 if (!ops || !ops->inherits)
5917 return;
5918
5919 spin_lock(&lock);
5920
5921 for (cur = ops->inherits; cur; cur = cur->inherits) {
5922 void **inherit = (void **)cur;
5923
5924 for (pp = begin; pp < end; pp++, inherit++)
5925 if (!*pp)
5926 *pp = *inherit;
5927 }
5928
5929 for (pp = begin; pp < end; pp++)
5930 if (IS_ERR(*pp))
5931 *pp = NULL;
5932
5933 ops->inherits = NULL;
5934
5935 spin_unlock(&lock);
5936}
5937
ecef7253
TH
5938/**
5939 * ata_host_start - start and freeze ports of an ATA host
5940 * @host: ATA host to start ports for
5941 *
5942 * Start and then freeze ports of @host. Started status is
5943 * recorded in host->flags, so this function can be called
5944 * multiple times. Ports are guaranteed to get started only
f3187195
TH
5945 * once. If host->ops isn't initialized yet, its set to the
5946 * first non-dummy port ops.
ecef7253
TH
5947 *
5948 * LOCKING:
5949 * Inherited from calling layer (may sleep).
5950 *
5951 * RETURNS:
5952 * 0 if all ports are started successfully, -errno otherwise.
5953 */
5954int ata_host_start(struct ata_host *host)
5955{
32ebbc0c
TH
5956 int have_stop = 0;
5957 void *start_dr = NULL;
ecef7253
TH
5958 int i, rc;
5959
5960 if (host->flags & ATA_HOST_STARTED)
5961 return 0;
5962
029cfd6b
TH
5963 ata_finalize_port_ops(host->ops);
5964
ecef7253
TH
5965 for (i = 0; i < host->n_ports; i++) {
5966 struct ata_port *ap = host->ports[i];
5967
029cfd6b
TH
5968 ata_finalize_port_ops(ap->ops);
5969
f3187195
TH
5970 if (!host->ops && !ata_port_is_dummy(ap))
5971 host->ops = ap->ops;
5972
32ebbc0c
TH
5973 if (ap->ops->port_stop)
5974 have_stop = 1;
5975 }
5976
5977 if (host->ops->host_stop)
5978 have_stop = 1;
5979
5980 if (have_stop) {
5981 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5982 if (!start_dr)
5983 return -ENOMEM;
5984 }
5985
5986 for (i = 0; i < host->n_ports; i++) {
5987 struct ata_port *ap = host->ports[i];
5988
ecef7253
TH
5989 if (ap->ops->port_start) {
5990 rc = ap->ops->port_start(ap);
5991 if (rc) {
0f9fe9b7 5992 if (rc != -ENODEV)
a44fec1f
JP
5993 dev_err(host->dev,
5994 "failed to start port %d (errno=%d)\n",
5995 i, rc);
ecef7253
TH
5996 goto err_out;
5997 }
5998 }
ecef7253
TH
5999 ata_eh_freeze_port(ap);
6000 }
6001
32ebbc0c
TH
6002 if (start_dr)
6003 devres_add(host->dev, start_dr);
ecef7253
TH
6004 host->flags |= ATA_HOST_STARTED;
6005 return 0;
6006
6007 err_out:
6008 while (--i >= 0) {
6009 struct ata_port *ap = host->ports[i];
6010
6011 if (ap->ops->port_stop)
6012 ap->ops->port_stop(ap);
6013 }
32ebbc0c 6014 devres_free(start_dr);
ecef7253
TH
6015 return rc;
6016}
6017
b03732f0 6018/**
8d8e7d13 6019 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
cca3974e
JG
6020 * @host: host to initialize
6021 * @dev: device host is attached to
cca3974e 6022 * @ops: port_ops
b03732f0 6023 *
b03732f0 6024 */
cca3974e 6025void ata_host_init(struct ata_host *host, struct device *dev,
8d8e7d13 6026 struct ata_port_operations *ops)
b03732f0 6027{
cca3974e 6028 spin_lock_init(&host->lock);
c0c362b6 6029 mutex_init(&host->eh_mutex);
1a112d10 6030 host->n_tags = ATA_MAX_QUEUE - 1;
cca3974e 6031 host->dev = dev;
cca3974e 6032 host->ops = ops;
b03732f0
BK
6033}
6034
9508a66f 6035void __ata_port_probe(struct ata_port *ap)
79318057 6036{
9508a66f
DW
6037 struct ata_eh_info *ehi = &ap->link.eh_info;
6038 unsigned long flags;
886ad09f 6039
9508a66f
DW
6040 /* kick EH for boot probing */
6041 spin_lock_irqsave(ap->lock, flags);
79318057 6042
9508a66f
DW
6043 ehi->probe_mask |= ATA_ALL_DEVICES;
6044 ehi->action |= ATA_EH_RESET;
6045 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
79318057 6046
9508a66f
DW
6047 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6048 ap->pflags |= ATA_PFLAG_LOADING;
6049 ata_port_schedule_eh(ap);
79318057 6050
9508a66f
DW
6051 spin_unlock_irqrestore(ap->lock, flags);
6052}
79318057 6053
9508a66f
DW
6054int ata_port_probe(struct ata_port *ap)
6055{
6056 int rc = 0;
79318057 6057
9508a66f
DW
6058 if (ap->ops->error_handler) {
6059 __ata_port_probe(ap);
79318057
AV
6060 ata_port_wait_eh(ap);
6061 } else {
6062 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6063 rc = ata_bus_probe(ap);
6064 DPRINTK("ata%u: bus probe end\n", ap->print_id);
79318057 6065 }
238c9cf9
JB
6066 return rc;
6067}
6068
6069
6070static void async_port_probe(void *data, async_cookie_t cookie)
6071{
6072 struct ata_port *ap = data;
4fca377f 6073
238c9cf9
JB
6074 /*
6075 * If we're not allowed to scan this host in parallel,
6076 * we need to wait until all previous scans have completed
6077 * before going further.
6078 * Jeff Garzik says this is only within a controller, so we
6079 * don't need to wait for port 0, only for later ports.
6080 */
6081 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6082 async_synchronize_cookie(cookie);
6083
6084 (void)ata_port_probe(ap);
f29d3b23
AV
6085
6086 /* in order to keep device order, we need to synchronize at this point */
6087 async_synchronize_cookie(cookie);
6088
6089 ata_scsi_scan_host(ap, 1);
79318057 6090}
238c9cf9 6091
f3187195
TH
6092/**
6093 * ata_host_register - register initialized ATA host
6094 * @host: ATA host to register
6095 * @sht: template for SCSI host
6096 *
6097 * Register initialized ATA host. @host is allocated using
6098 * ata_host_alloc() and fully initialized by LLD. This function
6099 * starts ports, registers @host with ATA and SCSI layers and
6100 * probe registered devices.
6101 *
6102 * LOCKING:
6103 * Inherited from calling layer (may sleep).
6104 *
6105 * RETURNS:
6106 * 0 on success, -errno otherwise.
6107 */
6108int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6109{
6110 int i, rc;
6111
1a112d10 6112 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
1871ee13 6113
f3187195
TH
6114 /* host must have been started */
6115 if (!(host->flags & ATA_HOST_STARTED)) {
a44fec1f 6116 dev_err(host->dev, "BUG: trying to register unstarted host\n");
f3187195
TH
6117 WARN_ON(1);
6118 return -EINVAL;
6119 }
6120
6121 /* Blow away unused ports. This happens when LLD can't
6122 * determine the exact number of ports to allocate at
6123 * allocation time.
6124 */
6125 for (i = host->n_ports; host->ports[i]; i++)
6126 kfree(host->ports[i]);
6127
6128 /* give ports names and add SCSI hosts */
e628dc99 6129 for (i = 0; i < host->n_ports; i++) {
85d6725b 6130 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
e628dc99
DM
6131 host->ports[i]->local_port_no = i + 1;
6132 }
4fca377f 6133
d9027470
GG
6134 /* Create associated sysfs transport objects */
6135 for (i = 0; i < host->n_ports; i++) {
6136 rc = ata_tport_add(host->dev,host->ports[i]);
6137 if (rc) {
6138 goto err_tadd;
6139 }
6140 }
6141
f3187195
TH
6142 rc = ata_scsi_add_hosts(host, sht);
6143 if (rc)
d9027470 6144 goto err_tadd;
f3187195
TH
6145
6146 /* set cable, sata_spd_limit and report */
6147 for (i = 0; i < host->n_ports; i++) {
6148 struct ata_port *ap = host->ports[i];
f3187195
TH
6149 unsigned long xfer_mask;
6150
6151 /* set SATA cable type if still unset */
6152 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6153 ap->cbl = ATA_CBL_SATA;
6154
6155 /* init sata_spd_limit to the current value */
4fb37a25 6156 sata_link_init_spd(&ap->link);
b1c72916
TH
6157 if (ap->slave_link)
6158 sata_link_init_spd(ap->slave_link);
f3187195 6159
cbcdd875 6160 /* print per-port info to dmesg */
f3187195
TH
6161 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6162 ap->udma_mask);
6163
abf6e8ed 6164 if (!ata_port_is_dummy(ap)) {
a9a79dfe
JP
6165 ata_port_info(ap, "%cATA max %s %s\n",
6166 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6167 ata_mode_string(xfer_mask),
6168 ap->link.eh_info.desc);
abf6e8ed
TH
6169 ata_ehi_clear_desc(&ap->link.eh_info);
6170 } else
a9a79dfe 6171 ata_port_info(ap, "DUMMY\n");
f3187195
TH
6172 }
6173
f6005354 6174 /* perform each probe asynchronously */
f3187195
TH
6175 for (i = 0; i < host->n_ports; i++) {
6176 struct ata_port *ap = host->ports[i];
79318057 6177 async_schedule(async_port_probe, ap);
f3187195 6178 }
f3187195
TH
6179
6180 return 0;
d9027470
GG
6181
6182 err_tadd:
6183 while (--i >= 0) {
6184 ata_tport_delete(host->ports[i]);
6185 }
6186 return rc;
6187
f3187195
TH
6188}
6189
f5cda257
TH
6190/**
6191 * ata_host_activate - start host, request IRQ and register it
6192 * @host: target ATA host
6193 * @irq: IRQ to request
6194 * @irq_handler: irq_handler used when requesting IRQ
6195 * @irq_flags: irq_flags used when requesting IRQ
6196 * @sht: scsi_host_template to use when registering the host
6197 *
6198 * After allocating an ATA host and initializing it, most libata
6199 * LLDs perform three steps to activate the host - start host,
6200 * request IRQ and register it. This helper takes necessasry
6201 * arguments and performs the three steps in one go.
6202 *
3d46b2e2
PM
6203 * An invalid IRQ skips the IRQ registration and expects the host to
6204 * have set polling mode on the port. In this case, @irq_handler
6205 * should be NULL.
6206 *
f5cda257
TH
6207 * LOCKING:
6208 * Inherited from calling layer (may sleep).
6209 *
6210 * RETURNS:
6211 * 0 on success, -errno otherwise.
6212 */
6213int ata_host_activate(struct ata_host *host, int irq,
6214 irq_handler_t irq_handler, unsigned long irq_flags,
6215 struct scsi_host_template *sht)
6216{
cbcdd875 6217 int i, rc;
f5cda257
TH
6218
6219 rc = ata_host_start(host);
6220 if (rc)
6221 return rc;
6222
3d46b2e2
PM
6223 /* Special case for polling mode */
6224 if (!irq) {
6225 WARN_ON(irq_handler);
6226 return ata_host_register(host, sht);
6227 }
6228
f5cda257
TH
6229 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6230 dev_driver_string(host->dev), host);
6231 if (rc)
6232 return rc;
6233
cbcdd875
TH
6234 for (i = 0; i < host->n_ports; i++)
6235 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6236
f5cda257
TH
6237 rc = ata_host_register(host, sht);
6238 /* if failed, just free the IRQ and leave ports alone */
6239 if (rc)
6240 devm_free_irq(host->dev, irq, host);
6241
6242 return rc;
6243}
6244
720ba126
TH
6245/**
6246 * ata_port_detach - Detach ATA port in prepration of device removal
6247 * @ap: ATA port to be detached
6248 *
6249 * Detach all ATA devices and the associated SCSI devices of @ap;
6250 * then, remove the associated SCSI host. @ap is guaranteed to
6251 * be quiescent on return from this function.
6252 *
6253 * LOCKING:
6254 * Kernel thread context (may sleep).
6255 */
741b7763 6256static void ata_port_detach(struct ata_port *ap)
720ba126
TH
6257{
6258 unsigned long flags;
a6f9bf4d
LK
6259 struct ata_link *link;
6260 struct ata_device *dev;
720ba126
TH
6261
6262 if (!ap->ops->error_handler)
c3cf30a9 6263 goto skip_eh;
720ba126
TH
6264
6265 /* tell EH we're leaving & flush EH */
ba6a1308 6266 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6267 ap->pflags |= ATA_PFLAG_UNLOADING;
ece180d1 6268 ata_port_schedule_eh(ap);
ba6a1308 6269 spin_unlock_irqrestore(ap->lock, flags);
720ba126 6270
ece180d1 6271 /* wait till EH commits suicide */
720ba126
TH
6272 ata_port_wait_eh(ap);
6273
ece180d1
TH
6274 /* it better be dead now */
6275 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
720ba126 6276
afe2c511 6277 cancel_delayed_work_sync(&ap->hotplug_task);
720ba126 6278
c3cf30a9 6279 skip_eh:
a6f9bf4d
LK
6280 /* clean up zpodd on port removal */
6281 ata_for_each_link(link, ap, HOST_FIRST) {
6282 ata_for_each_dev(dev, link, ALL) {
6283 if (zpodd_dev_enabled(dev))
6284 zpodd_exit(dev);
6285 }
6286 }
d9027470
GG
6287 if (ap->pmp_link) {
6288 int i;
6289 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6290 ata_tlink_delete(&ap->pmp_link[i]);
6291 }
720ba126 6292 /* remove the associated SCSI host */
cca3974e 6293 scsi_remove_host(ap->scsi_host);
c5700766 6294 ata_tport_delete(ap);
720ba126
TH
6295}
6296
0529c159
TH
6297/**
6298 * ata_host_detach - Detach all ports of an ATA host
6299 * @host: Host to detach
6300 *
6301 * Detach all ports of @host.
6302 *
6303 * LOCKING:
6304 * Kernel thread context (may sleep).
6305 */
6306void ata_host_detach(struct ata_host *host)
6307{
6308 int i;
6309
6310 for (i = 0; i < host->n_ports; i++)
6311 ata_port_detach(host->ports[i]);
562f0c2d
TH
6312
6313 /* the host is dead now, dissociate ACPI */
6314 ata_acpi_dissociate(host);
0529c159
TH
6315}
6316
374b1873
JG
6317#ifdef CONFIG_PCI
6318
1da177e4
LT
6319/**
6320 * ata_pci_remove_one - PCI layer callback for device removal
6321 * @pdev: PCI device that was removed
6322 *
b878ca5d
TH
6323 * PCI layer indicates to libata via this hook that hot-unplug or
6324 * module unload event has occurred. Detach all ports. Resource
6325 * release is handled via devres.
1da177e4
LT
6326 *
6327 * LOCKING:
6328 * Inherited from PCI layer (may sleep).
6329 */
f0d36efd 6330void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 6331{
04a3f5b7 6332 struct ata_host *host = pci_get_drvdata(pdev);
1da177e4 6333
b878ca5d 6334 ata_host_detach(host);
1da177e4
LT
6335}
6336
6337/* move to PCI subsystem */
057ace5e 6338int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6339{
6340 unsigned long tmp = 0;
6341
6342 switch (bits->width) {
6343 case 1: {
6344 u8 tmp8 = 0;
6345 pci_read_config_byte(pdev, bits->reg, &tmp8);
6346 tmp = tmp8;
6347 break;
6348 }
6349 case 2: {
6350 u16 tmp16 = 0;
6351 pci_read_config_word(pdev, bits->reg, &tmp16);
6352 tmp = tmp16;
6353 break;
6354 }
6355 case 4: {
6356 u32 tmp32 = 0;
6357 pci_read_config_dword(pdev, bits->reg, &tmp32);
6358 tmp = tmp32;
6359 break;
6360 }
6361
6362 default:
6363 return -EINVAL;
6364 }
6365
6366 tmp &= bits->mask;
6367
6368 return (tmp == bits->val) ? 1 : 0;
6369}
9b847548 6370
6ffa01d8 6371#ifdef CONFIG_PM
3c5100c1 6372void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6373{
6374 pci_save_state(pdev);
4c90d971 6375 pci_disable_device(pdev);
500530f6 6376
3a2d5b70 6377 if (mesg.event & PM_EVENT_SLEEP)
500530f6 6378 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6379}
6380
553c4aa6 6381int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6382{
553c4aa6
TH
6383 int rc;
6384
9b847548
JA
6385 pci_set_power_state(pdev, PCI_D0);
6386 pci_restore_state(pdev);
553c4aa6 6387
b878ca5d 6388 rc = pcim_enable_device(pdev);
553c4aa6 6389 if (rc) {
a44fec1f
JP
6390 dev_err(&pdev->dev,
6391 "failed to enable device after resume (%d)\n", rc);
553c4aa6
TH
6392 return rc;
6393 }
6394
9b847548 6395 pci_set_master(pdev);
553c4aa6 6396 return 0;
500530f6
TH
6397}
6398
3c5100c1 6399int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6400{
04a3f5b7 6401 struct ata_host *host = pci_get_drvdata(pdev);
500530f6
TH
6402 int rc = 0;
6403
cca3974e 6404 rc = ata_host_suspend(host, mesg);
500530f6
TH
6405 if (rc)
6406 return rc;
6407
3c5100c1 6408 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6409
6410 return 0;
6411}
6412
6413int ata_pci_device_resume(struct pci_dev *pdev)
6414{
04a3f5b7 6415 struct ata_host *host = pci_get_drvdata(pdev);
553c4aa6 6416 int rc;
500530f6 6417
553c4aa6
TH
6418 rc = ata_pci_device_do_resume(pdev);
6419 if (rc == 0)
6420 ata_host_resume(host);
6421 return rc;
9b847548 6422}
6ffa01d8
TH
6423#endif /* CONFIG_PM */
6424
1da177e4
LT
6425#endif /* CONFIG_PCI */
6426
b7db04d9
BN
6427/**
6428 * ata_platform_remove_one - Platform layer callback for device removal
6429 * @pdev: Platform device that was removed
6430 *
6431 * Platform layer indicates to libata via this hook that hot-unplug or
6432 * module unload event has occurred. Detach all ports. Resource
6433 * release is handled via devres.
6434 *
6435 * LOCKING:
6436 * Inherited from platform layer (may sleep).
6437 */
6438int ata_platform_remove_one(struct platform_device *pdev)
6439{
6440 struct ata_host *host = platform_get_drvdata(pdev);
6441
6442 ata_host_detach(host);
6443
6444 return 0;
6445}
6446
33267325
TH
6447static int __init ata_parse_force_one(char **cur,
6448 struct ata_force_ent *force_ent,
6449 const char **reason)
6450{
6451 /* FIXME: Currently, there's no way to tag init const data and
6452 * using __initdata causes build failure on some versions of
6453 * gcc. Once __initdataconst is implemented, add const to the
6454 * following structure.
6455 */
6456 static struct ata_force_param force_tbl[] __initdata = {
6457 { "40c", .cbl = ATA_CBL_PATA40 },
6458 { "80c", .cbl = ATA_CBL_PATA80 },
6459 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6460 { "unk", .cbl = ATA_CBL_PATA_UNK },
6461 { "ign", .cbl = ATA_CBL_PATA_IGN },
6462 { "sata", .cbl = ATA_CBL_SATA },
6463 { "1.5Gbps", .spd_limit = 1 },
6464 { "3.0Gbps", .spd_limit = 2 },
6465 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6466 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
43c9c591 6467 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
33267325
TH
6468 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6469 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6470 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6471 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6472 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6473 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6474 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6475 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6476 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6477 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6478 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6479 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6480 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6481 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6482 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6483 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6484 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6485 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6486 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6487 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6488 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6489 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6490 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6491 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6492 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6493 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6494 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6495 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6496 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6497 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6498 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6499 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6500 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6501 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
05944bdf
TH
6502 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6503 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6504 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
ca6d43b0 6505 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
966fbe19 6506 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
b8bd6dc3 6507 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
33267325
TH
6508 };
6509 char *start = *cur, *p = *cur;
6510 char *id, *val, *endp;
6511 const struct ata_force_param *match_fp = NULL;
6512 int nr_matches = 0, i;
6513
6514 /* find where this param ends and update *cur */
6515 while (*p != '\0' && *p != ',')
6516 p++;
6517
6518 if (*p == '\0')
6519 *cur = p;
6520 else
6521 *cur = p + 1;
6522
6523 *p = '\0';
6524
6525 /* parse */
6526 p = strchr(start, ':');
6527 if (!p) {
6528 val = strstrip(start);
6529 goto parse_val;
6530 }
6531 *p = '\0';
6532
6533 id = strstrip(start);
6534 val = strstrip(p + 1);
6535
6536 /* parse id */
6537 p = strchr(id, '.');
6538 if (p) {
6539 *p++ = '\0';
6540 force_ent->device = simple_strtoul(p, &endp, 10);
6541 if (p == endp || *endp != '\0') {
6542 *reason = "invalid device";
6543 return -EINVAL;
6544 }
6545 }
6546
6547 force_ent->port = simple_strtoul(id, &endp, 10);
6548 if (p == endp || *endp != '\0') {
6549 *reason = "invalid port/link";
6550 return -EINVAL;
6551 }
6552
6553 parse_val:
6554 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6555 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6556 const struct ata_force_param *fp = &force_tbl[i];
6557
6558 if (strncasecmp(val, fp->name, strlen(val)))
6559 continue;
6560
6561 nr_matches++;
6562 match_fp = fp;
6563
6564 if (strcasecmp(val, fp->name) == 0) {
6565 nr_matches = 1;
6566 break;
6567 }
6568 }
6569
6570 if (!nr_matches) {
6571 *reason = "unknown value";
6572 return -EINVAL;
6573 }
6574 if (nr_matches > 1) {
6575 *reason = "ambigious value";
6576 return -EINVAL;
6577 }
6578
6579 force_ent->param = *match_fp;
6580
6581 return 0;
6582}
6583
6584static void __init ata_parse_force_param(void)
6585{
6586 int idx = 0, size = 1;
6587 int last_port = -1, last_device = -1;
6588 char *p, *cur, *next;
6589
6590 /* calculate maximum number of params and allocate force_tbl */
6591 for (p = ata_force_param_buf; *p; p++)
6592 if (*p == ',')
6593 size++;
6594
6595 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6596 if (!ata_force_tbl) {
6597 printk(KERN_WARNING "ata: failed to extend force table, "
6598 "libata.force ignored\n");
6599 return;
6600 }
6601
6602 /* parse and populate the table */
6603 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6604 const char *reason = "";
6605 struct ata_force_ent te = { .port = -1, .device = -1 };
6606
6607 next = cur;
6608 if (ata_parse_force_one(&next, &te, &reason)) {
6609 printk(KERN_WARNING "ata: failed to parse force "
6610 "parameter \"%s\" (%s)\n",
6611 cur, reason);
6612 continue;
6613 }
6614
6615 if (te.port == -1) {
6616 te.port = last_port;
6617 te.device = last_device;
6618 }
6619
6620 ata_force_tbl[idx++] = te;
6621
6622 last_port = te.port;
6623 last_device = te.device;
6624 }
6625
6626 ata_force_tbl_size = idx;
6627}
1da177e4 6628
1da177e4
LT
6629static int __init ata_init(void)
6630{
d9027470 6631 int rc;
270390e1 6632
33267325
TH
6633 ata_parse_force_param();
6634
270390e1 6635 rc = ata_sff_init();
ad72cf98
TH
6636 if (rc) {
6637 kfree(ata_force_tbl);
6638 return rc;
6639 }
453b07ac 6640
d9027470
GG
6641 libata_transport_init();
6642 ata_scsi_transport_template = ata_attach_transport();
6643 if (!ata_scsi_transport_template) {
6644 ata_sff_exit();
6645 rc = -ENOMEM;
6646 goto err_out;
4fca377f 6647 }
d9027470 6648
1da177e4
LT
6649 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6650 return 0;
d9027470
GG
6651
6652err_out:
6653 return rc;
1da177e4
LT
6654}
6655
6656static void __exit ata_exit(void)
6657{
d9027470
GG
6658 ata_release_transport(ata_scsi_transport_template);
6659 libata_transport_exit();
270390e1 6660 ata_sff_exit();
33267325 6661 kfree(ata_force_tbl);
1da177e4
LT
6662}
6663
a4625085 6664subsys_initcall(ata_init);
1da177e4
LT
6665module_exit(ata_exit);
6666
9990b6f3 6667static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
67846b30
JG
6668
6669int ata_ratelimit(void)
6670{
9990b6f3 6671 return __ratelimit(&ratelimit);
67846b30
JG
6672}
6673
c0c362b6
TH
6674/**
6675 * ata_msleep - ATA EH owner aware msleep
6676 * @ap: ATA port to attribute the sleep to
6677 * @msecs: duration to sleep in milliseconds
6678 *
6679 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6680 * ownership is released before going to sleep and reacquired
6681 * after the sleep is complete. IOW, other ports sharing the
6682 * @ap->host will be allowed to own the EH while this task is
6683 * sleeping.
6684 *
6685 * LOCKING:
6686 * Might sleep.
6687 */
97750ceb
TH
6688void ata_msleep(struct ata_port *ap, unsigned int msecs)
6689{
c0c362b6
TH
6690 bool owns_eh = ap && ap->host->eh_owner == current;
6691
6692 if (owns_eh)
6693 ata_eh_release(ap);
6694
97750ceb 6695 msleep(msecs);
c0c362b6
TH
6696
6697 if (owns_eh)
6698 ata_eh_acquire(ap);
97750ceb
TH
6699}
6700
c22daff4
TH
6701/**
6702 * ata_wait_register - wait until register value changes
97750ceb 6703 * @ap: ATA port to wait register for, can be NULL
c22daff4
TH
6704 * @reg: IO-mapped register
6705 * @mask: Mask to apply to read register value
6706 * @val: Wait condition
341c2c95
TH
6707 * @interval: polling interval in milliseconds
6708 * @timeout: timeout in milliseconds
c22daff4
TH
6709 *
6710 * Waiting for some bits of register to change is a common
6711 * operation for ATA controllers. This function reads 32bit LE
6712 * IO-mapped register @reg and tests for the following condition.
6713 *
6714 * (*@reg & mask) != val
6715 *
6716 * If the condition is met, it returns; otherwise, the process is
6717 * repeated after @interval_msec until timeout.
6718 *
6719 * LOCKING:
6720 * Kernel thread context (may sleep)
6721 *
6722 * RETURNS:
6723 * The final register value.
6724 */
97750ceb 6725u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
341c2c95 6726 unsigned long interval, unsigned long timeout)
c22daff4 6727{
341c2c95 6728 unsigned long deadline;
c22daff4
TH
6729 u32 tmp;
6730
6731 tmp = ioread32(reg);
6732
6733 /* Calculate timeout _after_ the first read to make sure
6734 * preceding writes reach the controller before starting to
6735 * eat away the timeout.
6736 */
341c2c95 6737 deadline = ata_deadline(jiffies, timeout);
c22daff4 6738
341c2c95 6739 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
97750ceb 6740 ata_msleep(ap, interval);
c22daff4
TH
6741 tmp = ioread32(reg);
6742 }
6743
6744 return tmp;
6745}
6746
dd5b06c4
TH
6747/*
6748 * Dummy port_ops
6749 */
182d7bba 6750static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
dd5b06c4 6751{
182d7bba 6752 return AC_ERR_SYSTEM;
dd5b06c4
TH
6753}
6754
182d7bba 6755static void ata_dummy_error_handler(struct ata_port *ap)
dd5b06c4 6756{
182d7bba 6757 /* truly dummy */
dd5b06c4
TH
6758}
6759
029cfd6b 6760struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
6761 .qc_prep = ata_noop_qc_prep,
6762 .qc_issue = ata_dummy_qc_issue,
182d7bba 6763 .error_handler = ata_dummy_error_handler,
e4a9c373
DW
6764 .sched_eh = ata_std_sched_eh,
6765 .end_eh = ata_std_end_eh,
dd5b06c4
TH
6766};
6767
21b0ad4f
TH
6768const struct ata_port_info ata_dummy_port_info = {
6769 .port_ops = &ata_dummy_port_ops,
6770};
6771
a9a79dfe
JP
6772/*
6773 * Utility print functions
6774 */
6775int ata_port_printk(const struct ata_port *ap, const char *level,
6776 const char *fmt, ...)
6777{
6778 struct va_format vaf;
6779 va_list args;
6780 int r;
6781
6782 va_start(args, fmt);
6783
6784 vaf.fmt = fmt;
6785 vaf.va = &args;
6786
6787 r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
6788
6789 va_end(args);
6790
6791 return r;
6792}
6793EXPORT_SYMBOL(ata_port_printk);
6794
6795int ata_link_printk(const struct ata_link *link, const char *level,
6796 const char *fmt, ...)
6797{
6798 struct va_format vaf;
6799 va_list args;
6800 int r;
6801
6802 va_start(args, fmt);
6803
6804 vaf.fmt = fmt;
6805 vaf.va = &args;
6806
6807 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6808 r = printk("%sata%u.%02u: %pV",
6809 level, link->ap->print_id, link->pmp, &vaf);
6810 else
6811 r = printk("%sata%u: %pV",
6812 level, link->ap->print_id, &vaf);
6813
6814 va_end(args);
6815
6816 return r;
6817}
6818EXPORT_SYMBOL(ata_link_printk);
6819
6820int ata_dev_printk(const struct ata_device *dev, const char *level,
6821 const char *fmt, ...)
6822{
6823 struct va_format vaf;
6824 va_list args;
6825 int r;
6826
6827 va_start(args, fmt);
6828
6829 vaf.fmt = fmt;
6830 vaf.va = &args;
6831
6832 r = printk("%sata%u.%02u: %pV",
6833 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6834 &vaf);
6835
6836 va_end(args);
6837
6838 return r;
6839}
6840EXPORT_SYMBOL(ata_dev_printk);
6841
06296a1e
JP
6842void ata_print_version(const struct device *dev, const char *version)
6843{
6844 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6845}
6846EXPORT_SYMBOL(ata_print_version);
6847
1da177e4
LT
6848/*
6849 * libata is essentially a library of internal helper functions for
6850 * low-level ATA host controller drivers. As such, the API/ABI is
6851 * likely to change as new drivers are added and updated.
6852 * Do not depend on ABI/API stability.
6853 */
e9c83914
TH
6854EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6855EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6856EXPORT_SYMBOL_GPL(sata_deb_timing_long);
029cfd6b
TH
6857EXPORT_SYMBOL_GPL(ata_base_port_ops);
6858EXPORT_SYMBOL_GPL(sata_port_ops);
dd5b06c4 6859EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 6860EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1eca4365
TH
6861EXPORT_SYMBOL_GPL(ata_link_next);
6862EXPORT_SYMBOL_GPL(ata_dev_next);
1da177e4 6863EXPORT_SYMBOL_GPL(ata_std_bios_param);
d8d9129e 6864EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
cca3974e 6865EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6866EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 6867EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
b1c72916 6868EXPORT_SYMBOL_GPL(ata_slave_link_init);
ecef7253 6869EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6870EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 6871EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 6872EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 6873EXPORT_SYMBOL_GPL(ata_sg_init);
f686bcb8 6874EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6875EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
436d34b3 6876EXPORT_SYMBOL_GPL(atapi_cmd_type);
1da177e4
LT
6877EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6878EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
6879EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6880EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6881EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6882EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6883EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6884EXPORT_SYMBOL_GPL(ata_mode_string);
6885EXPORT_SYMBOL_GPL(ata_id_xfermask);
04351821 6886EXPORT_SYMBOL_GPL(ata_do_set_mode);
31cc23b3 6887EXPORT_SYMBOL_GPL(ata_std_qc_defer);
e46834cd 6888EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
10305f0f 6889EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6890EXPORT_SYMBOL_GPL(sata_set_spd);
aa2731ad 6891EXPORT_SYMBOL_GPL(ata_wait_after_reset);
936fd732
TH
6892EXPORT_SYMBOL_GPL(sata_link_debounce);
6893EXPORT_SYMBOL_GPL(sata_link_resume);
1152b261 6894EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
0aa1113d 6895EXPORT_SYMBOL_GPL(ata_std_prereset);
cc0680a5 6896EXPORT_SYMBOL_GPL(sata_link_hardreset);
57c9efdf 6897EXPORT_SYMBOL_GPL(sata_std_hardreset);
203c75b8 6898EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6899EXPORT_SYMBOL_GPL(ata_dev_classify);
6900EXPORT_SYMBOL_GPL(ata_dev_pair);
67846b30 6901EXPORT_SYMBOL_GPL(ata_ratelimit);
97750ceb 6902EXPORT_SYMBOL_GPL(ata_msleep);
c22daff4 6903EXPORT_SYMBOL_GPL(ata_wait_register);
1da177e4 6904EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6905EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6906EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6907EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
f6e67035 6908EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
34bf2170
TH
6909EXPORT_SYMBOL_GPL(sata_scr_valid);
6910EXPORT_SYMBOL_GPL(sata_scr_read);
6911EXPORT_SYMBOL_GPL(sata_scr_write);
6912EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
6913EXPORT_SYMBOL_GPL(ata_link_online);
6914EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 6915#ifdef CONFIG_PM
cca3974e
JG
6916EXPORT_SYMBOL_GPL(ata_host_suspend);
6917EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6918#endif /* CONFIG_PM */
6a62a04d
TH
6919EXPORT_SYMBOL_GPL(ata_id_string);
6920EXPORT_SYMBOL_GPL(ata_id_c_string);
963e4975 6921EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1da177e4
LT
6922EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6923
1bc4ccff 6924EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 6925EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
6926EXPORT_SYMBOL_GPL(ata_timing_compute);
6927EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 6928EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 6929
1da177e4
LT
6930#ifdef CONFIG_PCI
6931EXPORT_SYMBOL_GPL(pci_test_config_bits);
1da177e4 6932EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6933#ifdef CONFIG_PM
500530f6
TH
6934EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6935EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6936EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6937EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6938#endif /* CONFIG_PM */
1da177e4 6939#endif /* CONFIG_PCI */
9b847548 6940
b7db04d9
BN
6941EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6942
b64bbc39
TH
6943EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6944EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6945EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
6946EXPORT_SYMBOL_GPL(ata_port_desc);
6947#ifdef CONFIG_PCI
6948EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6949#endif /* CONFIG_PCI */
7b70fc03 6950EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 6951EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 6952EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 6953EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 6954EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
6955EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6956EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6957EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6958EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
10acf3b0 6959EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
022bdb07 6960EXPORT_SYMBOL_GPL(ata_do_eh);
a1efdaba 6961EXPORT_SYMBOL_GPL(ata_std_error_handler);
be0d18df
AC
6962
6963EXPORT_SYMBOL_GPL(ata_cable_40wire);
6964EXPORT_SYMBOL_GPL(ata_cable_80wire);
6965EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 6966EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 6967EXPORT_SYMBOL_GPL(ata_cable_sata);