Merge tag 's390-5.2-5' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
c82ee6d3 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4 2/*
af36d7f0
JG
3 * libata-core.c - helper library for ATA
4 *
8c3d3d4b 5 * Maintained by: Tejun Heo <tj@kernel.org>
af36d7f0
JG
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
10 * Copyright 2003-2004 Jeff Garzik
11 *
af36d7f0 12 * libata documentation is available via 'make {ps|pdf}docs',
19285f3c 13 * as Documentation/driver-api/libata.rst
af36d7f0
JG
14 *
15 * Hardware documentation available from http://www.t13.org/ and
16 * http://www.sata-io.org/
17 *
92c52c52
AC
18 * Standards documents from:
19 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
20 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
21 * http://www.sata-io.org (SATA)
22 * http://www.compactflash.org (CF)
23 * http://www.qic.org (QIC157 - Tape and DSC)
24 * http://www.ce-ata.org (CE-ATA: not supported)
1da177e4
LT
25 */
26
1da177e4
LT
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/list.h>
32#include <linux/mm.h>
1da177e4
LT
33#include <linux/spinlock.h>
34#include <linux/blkdev.h>
35#include <linux/delay.h>
36#include <linux/timer.h>
848c3920 37#include <linux/time.h>
1da177e4
LT
38#include <linux/interrupt.h>
39#include <linux/completion.h>
40#include <linux/suspend.h>
41#include <linux/workqueue.h>
378f058c 42#include <linux/scatterlist.h>
2dcb407e 43#include <linux/io.h>
79318057 44#include <linux/async.h>
e18086d6 45#include <linux/log2.h>
5a0e3ad6 46#include <linux/slab.h>
428ac5fc 47#include <linux/glob.h>
1da177e4 48#include <scsi/scsi.h>
193515d5 49#include <scsi/scsi_cmnd.h>
1da177e4
LT
50#include <scsi/scsi_host.h>
51#include <linux/libata.h>
1da177e4 52#include <asm/byteorder.h>
fe5af0cc 53#include <asm/unaligned.h>
140b5e59 54#include <linux/cdrom.h>
9990b6f3 55#include <linux/ratelimit.h>
eb25cb99 56#include <linux/leds.h>
9ee4f393 57#include <linux/pm_runtime.h>
b7db04d9 58#include <linux/platform_device.h>
1da177e4 59
255c03d1
HR
60#define CREATE_TRACE_POINTS
61#include <trace/events/libata.h>
62
1da177e4 63#include "libata.h"
d9027470 64#include "libata-transport.h"
fda0efc5 65
d7bb4cc7 66/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
67const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
68const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
69const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 70
029cfd6b 71const struct ata_port_operations ata_base_port_ops = {
0aa1113d 72 .prereset = ata_std_prereset,
203c75b8 73 .postreset = ata_std_postreset,
a1efdaba 74 .error_handler = ata_std_error_handler,
e4a9c373
DW
75 .sched_eh = ata_std_sched_eh,
76 .end_eh = ata_std_end_eh,
029cfd6b
TH
77};
78
79const struct ata_port_operations sata_port_ops = {
80 .inherits = &ata_base_port_ops,
81
82 .qc_defer = ata_std_qc_defer,
57c9efdf 83 .hardreset = sata_std_hardreset,
029cfd6b
TH
84};
85
3373efd8
TH
86static unsigned int ata_dev_init_params(struct ata_device *dev,
87 u16 heads, u16 sectors);
88static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
89static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 90static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 91
a78f57af 92atomic_t ata_print_id = ATOMIC_INIT(0);
1da177e4 93
33267325
TH
94struct ata_force_param {
95 const char *name;
96 unsigned int cbl;
97 int spd_limit;
98 unsigned long xfer_mask;
99 unsigned int horkage_on;
100 unsigned int horkage_off;
05944bdf 101 unsigned int lflags;
33267325
TH
102};
103
104struct ata_force_ent {
105 int port;
106 int device;
107 struct ata_force_param param;
108};
109
110static struct ata_force_ent *ata_force_tbl;
111static int ata_force_tbl_size;
112
113static char ata_force_param_buf[PAGE_SIZE] __initdata;
7afb4222
TH
114/* param_buf is thrown away after initialization, disallow read */
115module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
8c27ceff 116MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
33267325 117
2486fa56 118static int atapi_enabled = 1;
1623c81e 119module_param(atapi_enabled, int, 0444);
ad5d8eac 120MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
1623c81e 121
c5c61bda 122static int atapi_dmadir = 0;
95de719a 123module_param(atapi_dmadir, int, 0444);
ad5d8eac 124MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
95de719a 125
baf4fdfa
ML
126int atapi_passthru16 = 1;
127module_param(atapi_passthru16, int, 0444);
ad5d8eac 128MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
baf4fdfa 129
c3c013a2
JG
130int libata_fua = 0;
131module_param_named(fua, libata_fua, int, 0444);
ad5d8eac 132MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
c3c013a2 133
2dcb407e 134static int ata_ignore_hpa;
1e999736
AC
135module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
136MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
137
b3a70601
AC
138static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
139module_param_named(dma, libata_dma_mask, int, 0444);
140MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
141
87fbc5a0 142static int ata_probe_timeout;
a8601e5f
AM
143module_param(ata_probe_timeout, int, 0444);
144MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
145
6ebe9d86 146int libata_noacpi = 0;
d7d0dad6 147module_param_named(noacpi, libata_noacpi, int, 0444);
ad5d8eac 148MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
11ef697b 149
ae8d4ee7
AC
150int libata_allow_tpm = 0;
151module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
ad5d8eac 152MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
ae8d4ee7 153
e7ecd435
TH
154static int atapi_an;
155module_param(atapi_an, int, 0444);
156MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
157
1da177e4
LT
158MODULE_AUTHOR("Jeff Garzik");
159MODULE_DESCRIPTION("Library module for ATA devices");
160MODULE_LICENSE("GPL");
161MODULE_VERSION(DRV_VERSION);
162
0baab86b 163
9913ff8a
TH
164static bool ata_sstatus_online(u32 sstatus)
165{
166 return (sstatus & 0xf) == 0x3;
167}
168
1eca4365
TH
169/**
170 * ata_link_next - link iteration helper
171 * @link: the previous link, NULL to start
172 * @ap: ATA port containing links to iterate
173 * @mode: iteration mode, one of ATA_LITER_*
174 *
175 * LOCKING:
176 * Host lock or EH context.
aadffb68 177 *
1eca4365
TH
178 * RETURNS:
179 * Pointer to the next link.
aadffb68 180 */
1eca4365
TH
181struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
182 enum ata_link_iter_mode mode)
aadffb68 183{
1eca4365
TH
184 BUG_ON(mode != ATA_LITER_EDGE &&
185 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
186
aadffb68 187 /* NULL link indicates start of iteration */
1eca4365
TH
188 if (!link)
189 switch (mode) {
190 case ATA_LITER_EDGE:
191 case ATA_LITER_PMP_FIRST:
192 if (sata_pmp_attached(ap))
193 return ap->pmp_link;
194 /* fall through */
195 case ATA_LITER_HOST_FIRST:
196 return &ap->link;
197 }
aadffb68 198
1eca4365
TH
199 /* we just iterated over the host link, what's next? */
200 if (link == &ap->link)
201 switch (mode) {
202 case ATA_LITER_HOST_FIRST:
203 if (sata_pmp_attached(ap))
204 return ap->pmp_link;
205 /* fall through */
206 case ATA_LITER_PMP_FIRST:
207 if (unlikely(ap->slave_link))
b1c72916 208 return ap->slave_link;
1eca4365
TH
209 /* fall through */
210 case ATA_LITER_EDGE:
aadffb68 211 return NULL;
b1c72916 212 }
aadffb68 213
b1c72916
TH
214 /* slave_link excludes PMP */
215 if (unlikely(link == ap->slave_link))
216 return NULL;
217
1eca4365 218 /* we were over a PMP link */
aadffb68
TH
219 if (++link < ap->pmp_link + ap->nr_pmp_links)
220 return link;
1eca4365
TH
221
222 if (mode == ATA_LITER_PMP_FIRST)
223 return &ap->link;
224
aadffb68
TH
225 return NULL;
226}
227
1eca4365
TH
228/**
229 * ata_dev_next - device iteration helper
230 * @dev: the previous device, NULL to start
231 * @link: ATA link containing devices to iterate
232 * @mode: iteration mode, one of ATA_DITER_*
233 *
234 * LOCKING:
235 * Host lock or EH context.
236 *
237 * RETURNS:
238 * Pointer to the next device.
239 */
240struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
241 enum ata_dev_iter_mode mode)
242{
243 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
244 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
245
246 /* NULL dev indicates start of iteration */
247 if (!dev)
248 switch (mode) {
249 case ATA_DITER_ENABLED:
250 case ATA_DITER_ALL:
251 dev = link->device;
252 goto check;
253 case ATA_DITER_ENABLED_REVERSE:
254 case ATA_DITER_ALL_REVERSE:
255 dev = link->device + ata_link_max_devices(link) - 1;
256 goto check;
257 }
258
259 next:
260 /* move to the next one */
261 switch (mode) {
262 case ATA_DITER_ENABLED:
263 case ATA_DITER_ALL:
264 if (++dev < link->device + ata_link_max_devices(link))
265 goto check;
266 return NULL;
267 case ATA_DITER_ENABLED_REVERSE:
268 case ATA_DITER_ALL_REVERSE:
269 if (--dev >= link->device)
270 goto check;
271 return NULL;
272 }
273
274 check:
275 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
276 !ata_dev_enabled(dev))
277 goto next;
278 return dev;
279}
280
b1c72916
TH
281/**
282 * ata_dev_phys_link - find physical link for a device
283 * @dev: ATA device to look up physical link for
284 *
285 * Look up physical link which @dev is attached to. Note that
286 * this is different from @dev->link only when @dev is on slave
287 * link. For all other cases, it's the same as @dev->link.
288 *
289 * LOCKING:
290 * Don't care.
291 *
292 * RETURNS:
293 * Pointer to the found physical link.
294 */
295struct ata_link *ata_dev_phys_link(struct ata_device *dev)
296{
297 struct ata_port *ap = dev->link->ap;
298
299 if (!ap->slave_link)
300 return dev->link;
301 if (!dev->devno)
302 return &ap->link;
303 return ap->slave_link;
304}
305
33267325
TH
306/**
307 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 308 * @ap: ATA port of interest
33267325
TH
309 *
310 * Force cable type according to libata.force and whine about it.
311 * The last entry which has matching port number is used, so it
312 * can be specified as part of device force parameters. For
313 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
314 * same effect.
315 *
316 * LOCKING:
317 * EH context.
318 */
319void ata_force_cbl(struct ata_port *ap)
320{
321 int i;
322
323 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
324 const struct ata_force_ent *fe = &ata_force_tbl[i];
325
326 if (fe->port != -1 && fe->port != ap->print_id)
327 continue;
328
329 if (fe->param.cbl == ATA_CBL_NONE)
330 continue;
331
332 ap->cbl = fe->param.cbl;
a9a79dfe 333 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
33267325
TH
334 return;
335 }
336}
337
338/**
05944bdf 339 * ata_force_link_limits - force link limits according to libata.force
33267325
TH
340 * @link: ATA link of interest
341 *
05944bdf
TH
342 * Force link flags and SATA spd limit according to libata.force
343 * and whine about it. When only the port part is specified
344 * (e.g. 1:), the limit applies to all links connected to both
345 * the host link and all fan-out ports connected via PMP. If the
346 * device part is specified as 0 (e.g. 1.00:), it specifies the
347 * first fan-out link not the host link. Device number 15 always
b1c72916
TH
348 * points to the host link whether PMP is attached or not. If the
349 * controller has slave link, device number 16 points to it.
33267325
TH
350 *
351 * LOCKING:
352 * EH context.
353 */
05944bdf 354static void ata_force_link_limits(struct ata_link *link)
33267325 355{
05944bdf 356 bool did_spd = false;
b1c72916
TH
357 int linkno = link->pmp;
358 int i;
33267325
TH
359
360 if (ata_is_host_link(link))
b1c72916 361 linkno += 15;
33267325
TH
362
363 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
364 const struct ata_force_ent *fe = &ata_force_tbl[i];
365
366 if (fe->port != -1 && fe->port != link->ap->print_id)
367 continue;
368
369 if (fe->device != -1 && fe->device != linkno)
370 continue;
371
05944bdf
TH
372 /* only honor the first spd limit */
373 if (!did_spd && fe->param.spd_limit) {
374 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
a9a79dfe 375 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
05944bdf
TH
376 fe->param.name);
377 did_spd = true;
378 }
33267325 379
05944bdf
TH
380 /* let lflags stack */
381 if (fe->param.lflags) {
382 link->flags |= fe->param.lflags;
a9a79dfe 383 ata_link_notice(link,
05944bdf
TH
384 "FORCE: link flag 0x%x forced -> 0x%x\n",
385 fe->param.lflags, link->flags);
386 }
33267325
TH
387 }
388}
389
390/**
391 * ata_force_xfermask - force xfermask according to libata.force
392 * @dev: ATA device of interest
393 *
394 * Force xfer_mask according to libata.force and whine about it.
395 * For consistency with link selection, device number 15 selects
396 * the first device connected to the host link.
397 *
398 * LOCKING:
399 * EH context.
400 */
401static void ata_force_xfermask(struct ata_device *dev)
402{
403 int devno = dev->link->pmp + dev->devno;
404 int alt_devno = devno;
405 int i;
406
b1c72916
TH
407 /* allow n.15/16 for devices attached to host port */
408 if (ata_is_host_link(dev->link))
409 alt_devno += 15;
33267325
TH
410
411 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
412 const struct ata_force_ent *fe = &ata_force_tbl[i];
413 unsigned long pio_mask, mwdma_mask, udma_mask;
414
415 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
416 continue;
417
418 if (fe->device != -1 && fe->device != devno &&
419 fe->device != alt_devno)
420 continue;
421
422 if (!fe->param.xfer_mask)
423 continue;
424
425 ata_unpack_xfermask(fe->param.xfer_mask,
426 &pio_mask, &mwdma_mask, &udma_mask);
427 if (udma_mask)
428 dev->udma_mask = udma_mask;
429 else if (mwdma_mask) {
430 dev->udma_mask = 0;
431 dev->mwdma_mask = mwdma_mask;
432 } else {
433 dev->udma_mask = 0;
434 dev->mwdma_mask = 0;
435 dev->pio_mask = pio_mask;
436 }
437
a9a79dfe
JP
438 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
439 fe->param.name);
33267325
TH
440 return;
441 }
442}
443
444/**
445 * ata_force_horkage - force horkage according to libata.force
446 * @dev: ATA device of interest
447 *
448 * Force horkage according to libata.force and whine about it.
449 * For consistency with link selection, device number 15 selects
450 * the first device connected to the host link.
451 *
452 * LOCKING:
453 * EH context.
454 */
455static void ata_force_horkage(struct ata_device *dev)
456{
457 int devno = dev->link->pmp + dev->devno;
458 int alt_devno = devno;
459 int i;
460
b1c72916
TH
461 /* allow n.15/16 for devices attached to host port */
462 if (ata_is_host_link(dev->link))
463 alt_devno += 15;
33267325
TH
464
465 for (i = 0; i < ata_force_tbl_size; i++) {
466 const struct ata_force_ent *fe = &ata_force_tbl[i];
467
468 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
469 continue;
470
471 if (fe->device != -1 && fe->device != devno &&
472 fe->device != alt_devno)
473 continue;
474
475 if (!(~dev->horkage & fe->param.horkage_on) &&
476 !(dev->horkage & fe->param.horkage_off))
477 continue;
478
479 dev->horkage |= fe->param.horkage_on;
480 dev->horkage &= ~fe->param.horkage_off;
481
a9a79dfe
JP
482 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
483 fe->param.name);
33267325
TH
484 }
485}
486
436d34b3
TH
487/**
488 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
489 * @opcode: SCSI opcode
490 *
491 * Determine ATAPI command type from @opcode.
492 *
493 * LOCKING:
494 * None.
495 *
496 * RETURNS:
497 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
498 */
499int atapi_cmd_type(u8 opcode)
500{
501 switch (opcode) {
502 case GPCMD_READ_10:
503 case GPCMD_READ_12:
504 return ATAPI_READ;
505
506 case GPCMD_WRITE_10:
507 case GPCMD_WRITE_12:
508 case GPCMD_WRITE_AND_VERIFY_10:
509 return ATAPI_WRITE;
510
511 case GPCMD_READ_CD:
512 case GPCMD_READ_CD_MSF:
513 return ATAPI_READ_CD;
514
e52dcc48
TH
515 case ATA_16:
516 case ATA_12:
517 if (atapi_passthru16)
518 return ATAPI_PASS_THRU;
519 /* fall thru */
436d34b3
TH
520 default:
521 return ATAPI_MISC;
522 }
523}
524
1da177e4
LT
525/**
526 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
527 * @tf: Taskfile to convert
1da177e4 528 * @pmp: Port multiplier port
9977126c
TH
529 * @is_cmd: This FIS is for command
530 * @fis: Buffer into which data will output
1da177e4
LT
531 *
532 * Converts a standard ATA taskfile to a Serial ATA
533 * FIS structure (Register - Host to Device).
534 *
535 * LOCKING:
536 * Inherited from caller.
537 */
9977126c 538void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 539{
9977126c
TH
540 fis[0] = 0x27; /* Register - Host to Device FIS */
541 fis[1] = pmp & 0xf; /* Port multiplier number*/
542 if (is_cmd)
543 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
544
1da177e4
LT
545 fis[2] = tf->command;
546 fis[3] = tf->feature;
547
548 fis[4] = tf->lbal;
549 fis[5] = tf->lbam;
550 fis[6] = tf->lbah;
551 fis[7] = tf->device;
552
553 fis[8] = tf->hob_lbal;
554 fis[9] = tf->hob_lbam;
555 fis[10] = tf->hob_lbah;
556 fis[11] = tf->hob_feature;
557
558 fis[12] = tf->nsect;
559 fis[13] = tf->hob_nsect;
560 fis[14] = 0;
561 fis[15] = tf->ctl;
562
86a565e6
MC
563 fis[16] = tf->auxiliary & 0xff;
564 fis[17] = (tf->auxiliary >> 8) & 0xff;
565 fis[18] = (tf->auxiliary >> 16) & 0xff;
566 fis[19] = (tf->auxiliary >> 24) & 0xff;
1da177e4
LT
567}
568
569/**
570 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
571 * @fis: Buffer from which data will be input
572 * @tf: Taskfile to output
573 *
e12a1be6 574 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
575 *
576 * LOCKING:
577 * Inherited from caller.
578 */
579
057ace5e 580void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
581{
582 tf->command = fis[2]; /* status */
583 tf->feature = fis[3]; /* error */
584
585 tf->lbal = fis[4];
586 tf->lbam = fis[5];
587 tf->lbah = fis[6];
588 tf->device = fis[7];
589
590 tf->hob_lbal = fis[8];
591 tf->hob_lbam = fis[9];
592 tf->hob_lbah = fis[10];
593
594 tf->nsect = fis[12];
595 tf->hob_nsect = fis[13];
596}
597
8cbd6df1
AL
598static const u8 ata_rw_cmds[] = {
599 /* pio multi */
600 ATA_CMD_READ_MULTI,
601 ATA_CMD_WRITE_MULTI,
602 ATA_CMD_READ_MULTI_EXT,
603 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
604 0,
605 0,
606 0,
607 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
608 /* pio */
609 ATA_CMD_PIO_READ,
610 ATA_CMD_PIO_WRITE,
611 ATA_CMD_PIO_READ_EXT,
612 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
613 0,
614 0,
615 0,
616 0,
8cbd6df1
AL
617 /* dma */
618 ATA_CMD_READ,
619 ATA_CMD_WRITE,
620 ATA_CMD_READ_EXT,
9a3dccc4
TH
621 ATA_CMD_WRITE_EXT,
622 0,
623 0,
624 0,
625 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 626};
1da177e4
LT
627
628/**
8cbd6df1 629 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
630 * @tf: command to examine and configure
631 * @dev: device tf belongs to
1da177e4 632 *
2e9edbf8 633 * Examine the device configuration and tf->flags to calculate
8cbd6df1 634 * the proper read/write commands and protocol to use.
1da177e4
LT
635 *
636 * LOCKING:
637 * caller.
638 */
bd056d7e 639static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 640{
9a3dccc4 641 u8 cmd;
1da177e4 642
9a3dccc4 643 int index, fua, lba48, write;
2e9edbf8 644
9a3dccc4 645 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
646 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
647 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 648
8cbd6df1
AL
649 if (dev->flags & ATA_DFLAG_PIO) {
650 tf->protocol = ATA_PROT_PIO;
9a3dccc4 651 index = dev->multi_count ? 0 : 8;
9af5c9c9 652 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
653 /* Unable to use DMA due to host limitation */
654 tf->protocol = ATA_PROT_PIO;
0565c26d 655 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
656 } else {
657 tf->protocol = ATA_PROT_DMA;
9a3dccc4 658 index = 16;
8cbd6df1 659 }
1da177e4 660
9a3dccc4
TH
661 cmd = ata_rw_cmds[index + fua + lba48 + write];
662 if (cmd) {
663 tf->command = cmd;
664 return 0;
665 }
666 return -1;
1da177e4
LT
667}
668
35b649fe
TH
669/**
670 * ata_tf_read_block - Read block address from ATA taskfile
671 * @tf: ATA taskfile of interest
672 * @dev: ATA device @tf belongs to
673 *
674 * LOCKING:
675 * None.
676 *
677 * Read block address from @tf. This function can handle all
678 * three address formats - LBA, LBA48 and CHS. tf->protocol and
679 * flags select the address format to use.
680 *
681 * RETURNS:
682 * Block address read from @tf.
683 */
cffd1ee9 684u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
35b649fe
TH
685{
686 u64 block = 0;
687
fe16d4f2 688 if (tf->flags & ATA_TFLAG_LBA) {
35b649fe
TH
689 if (tf->flags & ATA_TFLAG_LBA48) {
690 block |= (u64)tf->hob_lbah << 40;
691 block |= (u64)tf->hob_lbam << 32;
44901a96 692 block |= (u64)tf->hob_lbal << 24;
35b649fe
TH
693 } else
694 block |= (tf->device & 0xf) << 24;
695
696 block |= tf->lbah << 16;
697 block |= tf->lbam << 8;
698 block |= tf->lbal;
699 } else {
700 u32 cyl, head, sect;
701
702 cyl = tf->lbam | (tf->lbah << 8);
703 head = tf->device & 0xf;
704 sect = tf->lbal;
705
ac8672ea 706 if (!sect) {
a9a79dfe
JP
707 ata_dev_warn(dev,
708 "device reported invalid CHS sector 0\n");
cffd1ee9 709 return U64_MAX;
ac8672ea
TH
710 }
711
712 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
35b649fe
TH
713 }
714
715 return block;
716}
717
bd056d7e
TH
718/**
719 * ata_build_rw_tf - Build ATA taskfile for given read/write request
720 * @tf: Target ATA taskfile
721 * @dev: ATA device @tf belongs to
722 * @block: Block address
723 * @n_block: Number of blocks
724 * @tf_flags: RW/FUA etc...
725 * @tag: tag
8e061784 726 * @class: IO priority class
bd056d7e
TH
727 *
728 * LOCKING:
729 * None.
730 *
731 * Build ATA taskfile @tf for read/write request described by
732 * @block, @n_block, @tf_flags and @tag on @dev.
733 *
734 * RETURNS:
735 *
736 * 0 on success, -ERANGE if the request is too large for @dev,
737 * -EINVAL if the request is invalid.
738 */
739int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
740 u64 block, u32 n_block, unsigned int tf_flags,
8e061784 741 unsigned int tag, int class)
bd056d7e
TH
742{
743 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
744 tf->flags |= tf_flags;
745
2e2cc676 746 if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
bd056d7e
TH
747 /* yay, NCQ */
748 if (!lba_48_ok(block, n_block))
749 return -ERANGE;
750
751 tf->protocol = ATA_PROT_NCQ;
752 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
753
754 if (tf->flags & ATA_TFLAG_WRITE)
755 tf->command = ATA_CMD_FPDMA_WRITE;
756 else
757 tf->command = ATA_CMD_FPDMA_READ;
758
759 tf->nsect = tag << 3;
760 tf->hob_feature = (n_block >> 8) & 0xff;
761 tf->feature = n_block & 0xff;
762
763 tf->hob_lbah = (block >> 40) & 0xff;
764 tf->hob_lbam = (block >> 32) & 0xff;
765 tf->hob_lbal = (block >> 24) & 0xff;
766 tf->lbah = (block >> 16) & 0xff;
767 tf->lbam = (block >> 8) & 0xff;
768 tf->lbal = block & 0xff;
769
9ca7cfa4 770 tf->device = ATA_LBA;
bd056d7e
TH
771 if (tf->flags & ATA_TFLAG_FUA)
772 tf->device |= 1 << 7;
8e061784 773
9f56eca3 774 if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
8e061784
AM
775 if (class == IOPRIO_CLASS_RT)
776 tf->hob_nsect |= ATA_PRIO_HIGH <<
777 ATA_SHIFT_PRIO;
778 }
bd056d7e
TH
779 } else if (dev->flags & ATA_DFLAG_LBA) {
780 tf->flags |= ATA_TFLAG_LBA;
781
782 if (lba_28_ok(block, n_block)) {
783 /* use LBA28 */
784 tf->device |= (block >> 24) & 0xf;
785 } else if (lba_48_ok(block, n_block)) {
786 if (!(dev->flags & ATA_DFLAG_LBA48))
787 return -ERANGE;
788
789 /* use LBA48 */
790 tf->flags |= ATA_TFLAG_LBA48;
791
792 tf->hob_nsect = (n_block >> 8) & 0xff;
793
794 tf->hob_lbah = (block >> 40) & 0xff;
795 tf->hob_lbam = (block >> 32) & 0xff;
796 tf->hob_lbal = (block >> 24) & 0xff;
797 } else
798 /* request too large even for LBA48 */
799 return -ERANGE;
800
801 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
802 return -EINVAL;
803
804 tf->nsect = n_block & 0xff;
805
806 tf->lbah = (block >> 16) & 0xff;
807 tf->lbam = (block >> 8) & 0xff;
808 tf->lbal = block & 0xff;
809
810 tf->device |= ATA_LBA;
811 } else {
812 /* CHS */
813 u32 sect, head, cyl, track;
814
815 /* The request -may- be too large for CHS addressing. */
816 if (!lba_28_ok(block, n_block))
817 return -ERANGE;
818
819 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
820 return -EINVAL;
821
822 /* Convert LBA to CHS */
823 track = (u32)block / dev->sectors;
824 cyl = track / dev->heads;
825 head = track % dev->heads;
826 sect = (u32)block % dev->sectors + 1;
827
828 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
829 (u32)block, track, cyl, head, sect);
830
831 /* Check whether the converted CHS can fit.
832 Cylinder: 0-65535
833 Head: 0-15
834 Sector: 1-255*/
835 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
836 return -ERANGE;
837
838 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
839 tf->lbal = sect;
840 tf->lbam = cyl;
841 tf->lbah = cyl >> 8;
842 tf->device |= head;
843 }
844
845 return 0;
846}
847
cb95d562
TH
848/**
849 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
850 * @pio_mask: pio_mask
851 * @mwdma_mask: mwdma_mask
852 * @udma_mask: udma_mask
853 *
854 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
855 * unsigned int xfer_mask.
856 *
857 * LOCKING:
858 * None.
859 *
860 * RETURNS:
861 * Packed xfer_mask.
862 */
7dc951ae
TH
863unsigned long ata_pack_xfermask(unsigned long pio_mask,
864 unsigned long mwdma_mask,
865 unsigned long udma_mask)
cb95d562
TH
866{
867 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
868 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
869 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
870}
871
c0489e4e
TH
872/**
873 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
874 * @xfer_mask: xfer_mask to unpack
875 * @pio_mask: resulting pio_mask
876 * @mwdma_mask: resulting mwdma_mask
877 * @udma_mask: resulting udma_mask
878 *
879 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
c9b5560a 880 * Any NULL destination masks will be ignored.
c0489e4e 881 */
7dc951ae
TH
882void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
883 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
884{
885 if (pio_mask)
886 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
887 if (mwdma_mask)
888 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
889 if (udma_mask)
890 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
891}
892
cb95d562 893static const struct ata_xfer_ent {
be9a50c8 894 int shift, bits;
cb95d562
TH
895 u8 base;
896} ata_xfer_tbl[] = {
70cd071e
TH
897 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
898 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
899 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
900 { -1, },
901};
902
903/**
904 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
905 * @xfer_mask: xfer_mask of interest
906 *
907 * Return matching XFER_* value for @xfer_mask. Only the highest
908 * bit of @xfer_mask is considered.
909 *
910 * LOCKING:
911 * None.
912 *
913 * RETURNS:
70cd071e 914 * Matching XFER_* value, 0xff if no match found.
cb95d562 915 */
7dc951ae 916u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
917{
918 int highbit = fls(xfer_mask) - 1;
919 const struct ata_xfer_ent *ent;
920
921 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
922 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
923 return ent->base + highbit - ent->shift;
70cd071e 924 return 0xff;
cb95d562
TH
925}
926
927/**
928 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
929 * @xfer_mode: XFER_* of interest
930 *
931 * Return matching xfer_mask for @xfer_mode.
932 *
933 * LOCKING:
934 * None.
935 *
936 * RETURNS:
937 * Matching xfer_mask, 0 if no match found.
938 */
7dc951ae 939unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
940{
941 const struct ata_xfer_ent *ent;
942
943 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
944 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
945 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
946 & ~((1 << ent->shift) - 1);
cb95d562
TH
947 return 0;
948}
949
950/**
951 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
952 * @xfer_mode: XFER_* of interest
953 *
954 * Return matching xfer_shift for @xfer_mode.
955 *
956 * LOCKING:
957 * None.
958 *
959 * RETURNS:
960 * Matching xfer_shift, -1 if no match found.
961 */
7dc951ae 962int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
963{
964 const struct ata_xfer_ent *ent;
965
966 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
967 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
968 return ent->shift;
969 return -1;
970}
971
1da177e4 972/**
1da7b0d0
TH
973 * ata_mode_string - convert xfer_mask to string
974 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
975 *
976 * Determine string which represents the highest speed
1da7b0d0 977 * (highest bit in @modemask).
1da177e4
LT
978 *
979 * LOCKING:
980 * None.
981 *
982 * RETURNS:
983 * Constant C string representing highest speed listed in
1da7b0d0 984 * @mode_mask, or the constant C string "<n/a>".
1da177e4 985 */
7dc951ae 986const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 987{
75f554bc
TH
988 static const char * const xfer_mode_str[] = {
989 "PIO0",
990 "PIO1",
991 "PIO2",
992 "PIO3",
993 "PIO4",
b352e57d
AC
994 "PIO5",
995 "PIO6",
75f554bc
TH
996 "MWDMA0",
997 "MWDMA1",
998 "MWDMA2",
b352e57d
AC
999 "MWDMA3",
1000 "MWDMA4",
75f554bc
TH
1001 "UDMA/16",
1002 "UDMA/25",
1003 "UDMA/33",
1004 "UDMA/44",
1005 "UDMA/66",
1006 "UDMA/100",
1007 "UDMA/133",
1008 "UDMA7",
1009 };
1da7b0d0 1010 int highbit;
1da177e4 1011
1da7b0d0
TH
1012 highbit = fls(xfer_mask) - 1;
1013 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1014 return xfer_mode_str[highbit];
1da177e4 1015 return "<n/a>";
1da177e4
LT
1016}
1017
d9027470 1018const char *sata_spd_string(unsigned int spd)
4c360c81
TH
1019{
1020 static const char * const spd_str[] = {
1021 "1.5 Gbps",
1022 "3.0 Gbps",
8522ee25 1023 "6.0 Gbps",
4c360c81
TH
1024 };
1025
1026 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1027 return "<unknown>";
1028 return spd_str[spd - 1];
1029}
1030
1da177e4
LT
1031/**
1032 * ata_dev_classify - determine device type based on ATA-spec signature
1033 * @tf: ATA taskfile register set for device to be identified
1034 *
1035 * Determine from taskfile register contents whether a device is
1036 * ATA or ATAPI, as per "Signature and persistence" section
1037 * of ATA/PI spec (volume 1, sect 5.14).
1038 *
1039 * LOCKING:
1040 * None.
1041 *
1042 * RETURNS:
9162c657
HR
1043 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1044 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1045 */
057ace5e 1046unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1047{
1048 /* Apple's open source Darwin code hints that some devices only
1049 * put a proper signature into the LBA mid/high registers,
1050 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1051 *
1052 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1053 * signatures for ATA and ATAPI devices attached on SerialATA,
1054 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1055 * spec has never mentioned about using different signatures
1056 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1057 * Multiplier specification began to use 0x69/0x96 to identify
1058 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1059 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1060 * 0x69/0x96 shortly and described them as reserved for
1061 * SerialATA.
1062 *
1063 * We follow the current spec and consider that 0x69/0x96
1064 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
79b42bab
TH
1065 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1066 * SEMB signature. This is worked around in
1067 * ata_dev_read_id().
1da177e4 1068 */
633273a3 1069 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1070 DPRINTK("found ATA device by sig\n");
1071 return ATA_DEV_ATA;
1072 }
1073
633273a3 1074 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1075 DPRINTK("found ATAPI device by sig\n");
1076 return ATA_DEV_ATAPI;
1077 }
1078
633273a3
TH
1079 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1080 DPRINTK("found PMP device by sig\n");
1081 return ATA_DEV_PMP;
1082 }
1083
1084 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
79b42bab
TH
1085 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1086 return ATA_DEV_SEMB;
633273a3
TH
1087 }
1088
9162c657
HR
1089 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1090 DPRINTK("found ZAC device by sig\n");
1091 return ATA_DEV_ZAC;
1092 }
1093
1da177e4
LT
1094 DPRINTK("unknown device\n");
1095 return ATA_DEV_UNKNOWN;
1096}
1097
1da177e4 1098/**
6a62a04d 1099 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1100 * @id: IDENTIFY DEVICE results we will examine
1101 * @s: string into which data is output
1102 * @ofs: offset into identify device page
1103 * @len: length of string to return. must be an even number.
1104 *
1105 * The strings in the IDENTIFY DEVICE page are broken up into
1106 * 16-bit chunks. Run through the string, and output each
1107 * 8-bit chunk linearly, regardless of platform.
1108 *
1109 * LOCKING:
1110 * caller.
1111 */
1112
6a62a04d
TH
1113void ata_id_string(const u16 *id, unsigned char *s,
1114 unsigned int ofs, unsigned int len)
1da177e4
LT
1115{
1116 unsigned int c;
1117
963e4975
AC
1118 BUG_ON(len & 1);
1119
1da177e4
LT
1120 while (len > 0) {
1121 c = id[ofs] >> 8;
1122 *s = c;
1123 s++;
1124
1125 c = id[ofs] & 0xff;
1126 *s = c;
1127 s++;
1128
1129 ofs++;
1130 len -= 2;
1131 }
1132}
1133
0e949ff3 1134/**
6a62a04d 1135 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1136 * @id: IDENTIFY DEVICE results we will examine
1137 * @s: string into which data is output
1138 * @ofs: offset into identify device page
1139 * @len: length of string to return. must be an odd number.
1140 *
6a62a04d 1141 * This function is identical to ata_id_string except that it
0e949ff3
TH
1142 * trims trailing spaces and terminates the resulting string with
1143 * null. @len must be actual maximum length (even number) + 1.
1144 *
1145 * LOCKING:
1146 * caller.
1147 */
6a62a04d
TH
1148void ata_id_c_string(const u16 *id, unsigned char *s,
1149 unsigned int ofs, unsigned int len)
0e949ff3
TH
1150{
1151 unsigned char *p;
1152
6a62a04d 1153 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1154
1155 p = s + strnlen(s, len - 1);
1156 while (p > s && p[-1] == ' ')
1157 p--;
1158 *p = '\0';
1159}
0baab86b 1160
db6f8759
TH
1161static u64 ata_id_n_sectors(const u16 *id)
1162{
1163 if (ata_id_has_lba(id)) {
1164 if (ata_id_has_lba48(id))
968e594a 1165 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
db6f8759 1166 else
968e594a 1167 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
db6f8759
TH
1168 } else {
1169 if (ata_id_current_chs_valid(id))
968e594a
RH
1170 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1171 id[ATA_ID_CUR_SECTORS];
db6f8759 1172 else
968e594a
RH
1173 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1174 id[ATA_ID_SECTORS];
db6f8759
TH
1175 }
1176}
1177
a5987e0a 1178u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1e999736
AC
1179{
1180 u64 sectors = 0;
1181
1182 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1183 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
ba14a9c2 1184 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1e999736
AC
1185 sectors |= (tf->lbah & 0xff) << 16;
1186 sectors |= (tf->lbam & 0xff) << 8;
1187 sectors |= (tf->lbal & 0xff);
1188
a5987e0a 1189 return sectors;
1e999736
AC
1190}
1191
a5987e0a 1192u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1e999736
AC
1193{
1194 u64 sectors = 0;
1195
1196 sectors |= (tf->device & 0x0f) << 24;
1197 sectors |= (tf->lbah & 0xff) << 16;
1198 sectors |= (tf->lbam & 0xff) << 8;
1199 sectors |= (tf->lbal & 0xff);
1200
a5987e0a 1201 return sectors;
1e999736
AC
1202}
1203
1204/**
c728a914
TH
1205 * ata_read_native_max_address - Read native max address
1206 * @dev: target device
1207 * @max_sectors: out parameter for the result native max address
1e999736 1208 *
c728a914
TH
1209 * Perform an LBA48 or LBA28 native size query upon the device in
1210 * question.
1e999736 1211 *
c728a914
TH
1212 * RETURNS:
1213 * 0 on success, -EACCES if command is aborted by the drive.
1214 * -EIO on other errors.
1e999736 1215 */
c728a914 1216static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1217{
c728a914 1218 unsigned int err_mask;
1e999736 1219 struct ata_taskfile tf;
c728a914 1220 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1221
1222 ata_tf_init(dev, &tf);
1223
c728a914 1224 /* always clear all address registers */
1e999736 1225 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1226
c728a914
TH
1227 if (lba48) {
1228 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1229 tf.flags |= ATA_TFLAG_LBA48;
1230 } else
1231 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1232
bd18bc04 1233 tf.protocol = ATA_PROT_NODATA;
c728a914
TH
1234 tf.device |= ATA_LBA;
1235
2b789108 1236 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1237 if (err_mask) {
a9a79dfe
JP
1238 ata_dev_warn(dev,
1239 "failed to read native max address (err_mask=0x%x)\n",
1240 err_mask);
c728a914
TH
1241 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1242 return -EACCES;
1243 return -EIO;
1244 }
1e999736 1245
c728a914 1246 if (lba48)
a5987e0a 1247 *max_sectors = ata_tf_to_lba48(&tf) + 1;
c728a914 1248 else
a5987e0a 1249 *max_sectors = ata_tf_to_lba(&tf) + 1;
2dcb407e 1250 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1251 (*max_sectors)--;
c728a914 1252 return 0;
1e999736
AC
1253}
1254
1255/**
c728a914
TH
1256 * ata_set_max_sectors - Set max sectors
1257 * @dev: target device
6b38d1d1 1258 * @new_sectors: new max sectors value to set for the device
1e999736 1259 *
c728a914
TH
1260 * Set max sectors of @dev to @new_sectors.
1261 *
1262 * RETURNS:
1263 * 0 on success, -EACCES if command is aborted or denied (due to
1264 * previous non-volatile SET_MAX) by the drive. -EIO on other
1265 * errors.
1e999736 1266 */
05027adc 1267static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1268{
c728a914 1269 unsigned int err_mask;
1e999736 1270 struct ata_taskfile tf;
c728a914 1271 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1272
1273 new_sectors--;
1274
1275 ata_tf_init(dev, &tf);
1276
1e999736 1277 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1278
1279 if (lba48) {
1280 tf.command = ATA_CMD_SET_MAX_EXT;
1281 tf.flags |= ATA_TFLAG_LBA48;
1282
1283 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1284 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1285 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1286 } else {
c728a914
TH
1287 tf.command = ATA_CMD_SET_MAX;
1288
1e582ba4
TH
1289 tf.device |= (new_sectors >> 24) & 0xf;
1290 }
1291
bd18bc04 1292 tf.protocol = ATA_PROT_NODATA;
c728a914 1293 tf.device |= ATA_LBA;
1e999736
AC
1294
1295 tf.lbal = (new_sectors >> 0) & 0xff;
1296 tf.lbam = (new_sectors >> 8) & 0xff;
1297 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1298
2b789108 1299 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1300 if (err_mask) {
a9a79dfe
JP
1301 ata_dev_warn(dev,
1302 "failed to set max address (err_mask=0x%x)\n",
1303 err_mask);
c728a914
TH
1304 if (err_mask == AC_ERR_DEV &&
1305 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1306 return -EACCES;
1307 return -EIO;
1308 }
1309
c728a914 1310 return 0;
1e999736
AC
1311}
1312
1313/**
1314 * ata_hpa_resize - Resize a device with an HPA set
1315 * @dev: Device to resize
1316 *
1317 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1318 * it if required to the full size of the media. The caller must check
1319 * the drive has the HPA feature set enabled.
05027adc
TH
1320 *
1321 * RETURNS:
1322 * 0 on success, -errno on failure.
1e999736 1323 */
05027adc 1324static int ata_hpa_resize(struct ata_device *dev)
1e999736 1325{
05027adc
TH
1326 struct ata_eh_context *ehc = &dev->link->eh_context;
1327 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
445d211b 1328 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
05027adc
TH
1329 u64 sectors = ata_id_n_sectors(dev->id);
1330 u64 native_sectors;
c728a914 1331 int rc;
a617c09f 1332
05027adc 1333 /* do we need to do it? */
9162c657 1334 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
05027adc
TH
1335 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1336 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1337 return 0;
1e999736 1338
05027adc
TH
1339 /* read native max address */
1340 rc = ata_read_native_max_address(dev, &native_sectors);
1341 if (rc) {
dda7aba1
TH
1342 /* If device aborted the command or HPA isn't going to
1343 * be unlocked, skip HPA resizing.
05027adc 1344 */
445d211b 1345 if (rc == -EACCES || !unlock_hpa) {
a9a79dfe
JP
1346 ata_dev_warn(dev,
1347 "HPA support seems broken, skipping HPA handling\n");
05027adc
TH
1348 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1349
1350 /* we can continue if device aborted the command */
1351 if (rc == -EACCES)
1352 rc = 0;
1e999736 1353 }
37301a55 1354
05027adc
TH
1355 return rc;
1356 }
5920dadf 1357 dev->n_native_sectors = native_sectors;
05027adc
TH
1358
1359 /* nothing to do? */
445d211b 1360 if (native_sectors <= sectors || !unlock_hpa) {
05027adc
TH
1361 if (!print_info || native_sectors == sectors)
1362 return 0;
1363
1364 if (native_sectors > sectors)
a9a79dfe 1365 ata_dev_info(dev,
05027adc
TH
1366 "HPA detected: current %llu, native %llu\n",
1367 (unsigned long long)sectors,
1368 (unsigned long long)native_sectors);
1369 else if (native_sectors < sectors)
a9a79dfe
JP
1370 ata_dev_warn(dev,
1371 "native sectors (%llu) is smaller than sectors (%llu)\n",
05027adc
TH
1372 (unsigned long long)native_sectors,
1373 (unsigned long long)sectors);
1374 return 0;
1375 }
1376
1377 /* let's unlock HPA */
1378 rc = ata_set_max_sectors(dev, native_sectors);
1379 if (rc == -EACCES) {
1380 /* if device aborted the command, skip HPA resizing */
a9a79dfe
JP
1381 ata_dev_warn(dev,
1382 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1383 (unsigned long long)sectors,
1384 (unsigned long long)native_sectors);
05027adc
TH
1385 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1386 return 0;
1387 } else if (rc)
1388 return rc;
1389
1390 /* re-read IDENTIFY data */
1391 rc = ata_dev_reread_id(dev, 0);
1392 if (rc) {
a9a79dfe
JP
1393 ata_dev_err(dev,
1394 "failed to re-read IDENTIFY data after HPA resizing\n");
05027adc
TH
1395 return rc;
1396 }
1397
1398 if (print_info) {
1399 u64 new_sectors = ata_id_n_sectors(dev->id);
a9a79dfe 1400 ata_dev_info(dev,
05027adc
TH
1401 "HPA unlocked: %llu -> %llu, native %llu\n",
1402 (unsigned long long)sectors,
1403 (unsigned long long)new_sectors,
1404 (unsigned long long)native_sectors);
1405 }
1406
1407 return 0;
1e999736
AC
1408}
1409
1da177e4
LT
1410/**
1411 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1412 * @id: IDENTIFY DEVICE page to dump
1da177e4 1413 *
0bd3300a
TH
1414 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1415 * page.
1da177e4
LT
1416 *
1417 * LOCKING:
1418 * caller.
1419 */
1420
0bd3300a 1421static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1422{
1423 DPRINTK("49==0x%04x "
1424 "53==0x%04x "
1425 "63==0x%04x "
1426 "64==0x%04x "
1427 "75==0x%04x \n",
0bd3300a
TH
1428 id[49],
1429 id[53],
1430 id[63],
1431 id[64],
1432 id[75]);
1da177e4
LT
1433 DPRINTK("80==0x%04x "
1434 "81==0x%04x "
1435 "82==0x%04x "
1436 "83==0x%04x "
1437 "84==0x%04x \n",
0bd3300a
TH
1438 id[80],
1439 id[81],
1440 id[82],
1441 id[83],
1442 id[84]);
1da177e4
LT
1443 DPRINTK("88==0x%04x "
1444 "93==0x%04x\n",
0bd3300a
TH
1445 id[88],
1446 id[93]);
1da177e4
LT
1447}
1448
cb95d562
TH
1449/**
1450 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1451 * @id: IDENTIFY data to compute xfer mask from
1452 *
1453 * Compute the xfermask for this device. This is not as trivial
1454 * as it seems if we must consider early devices correctly.
1455 *
1456 * FIXME: pre IDE drive timing (do we care ?).
1457 *
1458 * LOCKING:
1459 * None.
1460 *
1461 * RETURNS:
1462 * Computed xfermask
1463 */
7dc951ae 1464unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1465{
7dc951ae 1466 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1467
1468 /* Usual case. Word 53 indicates word 64 is valid */
1469 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1470 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1471 pio_mask <<= 3;
1472 pio_mask |= 0x7;
1473 } else {
1474 /* If word 64 isn't valid then Word 51 high byte holds
1475 * the PIO timing number for the maximum. Turn it into
1476 * a mask.
1477 */
7a0f1c8a 1478 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1479 if (mode < 5) /* Valid PIO range */
2dcb407e 1480 pio_mask = (2 << mode) - 1;
46767aeb
AC
1481 else
1482 pio_mask = 1;
cb95d562
TH
1483
1484 /* But wait.. there's more. Design your standards by
1485 * committee and you too can get a free iordy field to
1486 * process. However its the speeds not the modes that
1487 * are supported... Note drivers using the timing API
1488 * will get this right anyway
1489 */
1490 }
1491
1492 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1493
b352e57d
AC
1494 if (ata_id_is_cfa(id)) {
1495 /*
1496 * Process compact flash extended modes
1497 */
62afe5d7
SS
1498 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1499 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
b352e57d
AC
1500
1501 if (pio)
1502 pio_mask |= (1 << 5);
1503 if (pio > 1)
1504 pio_mask |= (1 << 6);
1505 if (dma)
1506 mwdma_mask |= (1 << 3);
1507 if (dma > 1)
1508 mwdma_mask |= (1 << 4);
1509 }
1510
fb21f0d0
TH
1511 udma_mask = 0;
1512 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1513 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1514
1515 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1516}
1517
7102d230 1518static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1519{
77853bf2 1520 struct completion *waiting = qc->private_data;
a2a7a662 1521
a2a7a662 1522 complete(waiting);
a2a7a662
TH
1523}
1524
1525/**
2432697b 1526 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1527 * @dev: Device to which the command is sent
1528 * @tf: Taskfile registers for the command and the result
d69cf37d 1529 * @cdb: CDB for packet command
e227867f 1530 * @dma_dir: Data transfer direction of the command
5c1ad8b3 1531 * @sgl: sg list for the data buffer of the command
2432697b 1532 * @n_elem: Number of sg entries
2b789108 1533 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1534 *
1535 * Executes libata internal command with timeout. @tf contains
1536 * command on entry and result on return. Timeout and error
1537 * conditions are reported via return value. No recovery action
1538 * is taken after a command times out. It's caller's duty to
1539 * clean up after timeout.
1540 *
1541 * LOCKING:
1542 * None. Should be called with kernel context, might sleep.
551e8889
TH
1543 *
1544 * RETURNS:
1545 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1546 */
2432697b
TH
1547unsigned ata_exec_internal_sg(struct ata_device *dev,
1548 struct ata_taskfile *tf, const u8 *cdb,
87260216 1549 int dma_dir, struct scatterlist *sgl,
2b789108 1550 unsigned int n_elem, unsigned long timeout)
a2a7a662 1551{
9af5c9c9
TH
1552 struct ata_link *link = dev->link;
1553 struct ata_port *ap = link->ap;
a2a7a662 1554 u8 command = tf->command;
87fbc5a0 1555 int auto_timeout = 0;
a2a7a662 1556 struct ata_queued_cmd *qc;
28361c40 1557 unsigned int preempted_tag;
e3ed8939
JA
1558 u32 preempted_sactive;
1559 u64 preempted_qc_active;
da917d69 1560 int preempted_nr_active_links;
60be6b9a 1561 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1562 unsigned long flags;
77853bf2 1563 unsigned int err_mask;
d95a717f 1564 int rc;
a2a7a662 1565
ba6a1308 1566 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1567
e3180499 1568 /* no internal command while frozen */
b51e9e5d 1569 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1570 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1571 return AC_ERR_SYSTEM;
1572 }
1573
2ab7db1f 1574 /* initialize internal qc */
28361c40 1575 qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
a2a7a662 1576
28361c40
JA
1577 qc->tag = ATA_TAG_INTERNAL;
1578 qc->hw_tag = 0;
2ab7db1f
TH
1579 qc->scsicmd = NULL;
1580 qc->ap = ap;
1581 qc->dev = dev;
1582 ata_qc_reinit(qc);
1583
9af5c9c9
TH
1584 preempted_tag = link->active_tag;
1585 preempted_sactive = link->sactive;
dedaf2b0 1586 preempted_qc_active = ap->qc_active;
da917d69 1587 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1588 link->active_tag = ATA_TAG_POISON;
1589 link->sactive = 0;
dedaf2b0 1590 ap->qc_active = 0;
da917d69 1591 ap->nr_active_links = 0;
2ab7db1f
TH
1592
1593 /* prepare & issue qc */
a2a7a662 1594 qc->tf = *tf;
d69cf37d
TH
1595 if (cdb)
1596 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e771451c
VP
1597
1598 /* some SATA bridges need us to indicate data xfer direction */
1599 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1600 dma_dir == DMA_FROM_DEVICE)
1601 qc->tf.feature |= ATAPI_DMADIR;
1602
e61e0672 1603 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1604 qc->dma_dir = dma_dir;
1605 if (dma_dir != DMA_NONE) {
2432697b 1606 unsigned int i, buflen = 0;
87260216 1607 struct scatterlist *sg;
2432697b 1608
87260216
JA
1609 for_each_sg(sgl, sg, n_elem, i)
1610 buflen += sg->length;
2432697b 1611
87260216 1612 ata_sg_init(qc, sgl, n_elem);
49c80429 1613 qc->nbytes = buflen;
a2a7a662
TH
1614 }
1615
77853bf2 1616 qc->private_data = &wait;
a2a7a662
TH
1617 qc->complete_fn = ata_qc_complete_internal;
1618
8e0e694a 1619 ata_qc_issue(qc);
a2a7a662 1620
ba6a1308 1621 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1622
87fbc5a0
TH
1623 if (!timeout) {
1624 if (ata_probe_timeout)
1625 timeout = ata_probe_timeout * 1000;
1626 else {
1627 timeout = ata_internal_cmd_timeout(dev, command);
1628 auto_timeout = 1;
1629 }
1630 }
2b789108 1631
c0c362b6
TH
1632 if (ap->ops->error_handler)
1633 ata_eh_release(ap);
1634
2b789108 1635 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f 1636
c0c362b6
TH
1637 if (ap->ops->error_handler)
1638 ata_eh_acquire(ap);
1639
c429137a 1640 ata_sff_flush_pio_task(ap);
41ade50c 1641
d95a717f 1642 if (!rc) {
ba6a1308 1643 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1644
1645 /* We're racing with irq here. If we lose, the
1646 * following test prevents us from completing the qc
d95a717f
TH
1647 * twice. If we win, the port is frozen and will be
1648 * cleaned up by ->post_internal_cmd().
a2a7a662 1649 */
77853bf2 1650 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1651 qc->err_mask |= AC_ERR_TIMEOUT;
1652
1653 if (ap->ops->error_handler)
1654 ata_port_freeze(ap);
1655 else
1656 ata_qc_complete(qc);
f15a1daf 1657
0dd4b21f 1658 if (ata_msg_warn(ap))
a9a79dfe
JP
1659 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1660 command);
a2a7a662
TH
1661 }
1662
ba6a1308 1663 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1664 }
1665
d95a717f
TH
1666 /* do post_internal_cmd */
1667 if (ap->ops->post_internal_cmd)
1668 ap->ops->post_internal_cmd(qc);
1669
a51d644a
TH
1670 /* perform minimal error analysis */
1671 if (qc->flags & ATA_QCFLAG_FAILED) {
1672 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1673 qc->err_mask |= AC_ERR_DEV;
1674
1675 if (!qc->err_mask)
1676 qc->err_mask |= AC_ERR_OTHER;
1677
1678 if (qc->err_mask & ~AC_ERR_OTHER)
1679 qc->err_mask &= ~AC_ERR_OTHER;
2dae9955
DLM
1680 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1681 qc->result_tf.command |= ATA_SENSE;
d95a717f
TH
1682 }
1683
15869303 1684 /* finish up */
ba6a1308 1685 spin_lock_irqsave(ap->lock, flags);
15869303 1686
e61e0672 1687 *tf = qc->result_tf;
77853bf2
TH
1688 err_mask = qc->err_mask;
1689
1690 ata_qc_free(qc);
9af5c9c9
TH
1691 link->active_tag = preempted_tag;
1692 link->sactive = preempted_sactive;
dedaf2b0 1693 ap->qc_active = preempted_qc_active;
da917d69 1694 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1695
ba6a1308 1696 spin_unlock_irqrestore(ap->lock, flags);
15869303 1697
87fbc5a0
TH
1698 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1699 ata_internal_cmd_timed_out(dev, command);
1700
77853bf2 1701 return err_mask;
a2a7a662
TH
1702}
1703
2432697b 1704/**
33480a0e 1705 * ata_exec_internal - execute libata internal command
2432697b
TH
1706 * @dev: Device to which the command is sent
1707 * @tf: Taskfile registers for the command and the result
1708 * @cdb: CDB for packet command
e227867f 1709 * @dma_dir: Data transfer direction of the command
2432697b
TH
1710 * @buf: Data buffer of the command
1711 * @buflen: Length of data buffer
2b789108 1712 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1713 *
1714 * Wrapper around ata_exec_internal_sg() which takes simple
1715 * buffer instead of sg list.
1716 *
1717 * LOCKING:
1718 * None. Should be called with kernel context, might sleep.
1719 *
1720 * RETURNS:
1721 * Zero on success, AC_ERR_* mask on failure
1722 */
1723unsigned ata_exec_internal(struct ata_device *dev,
1724 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1725 int dma_dir, void *buf, unsigned int buflen,
1726 unsigned long timeout)
2432697b 1727{
33480a0e
TH
1728 struct scatterlist *psg = NULL, sg;
1729 unsigned int n_elem = 0;
2432697b 1730
33480a0e
TH
1731 if (dma_dir != DMA_NONE) {
1732 WARN_ON(!buf);
1733 sg_init_one(&sg, buf, buflen);
1734 psg = &sg;
1735 n_elem++;
1736 }
2432697b 1737
2b789108
TH
1738 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1739 timeout);
2432697b
TH
1740}
1741
1bc4ccff
AC
1742/**
1743 * ata_pio_need_iordy - check if iordy needed
1744 * @adev: ATA device
1745 *
1746 * Check if the current speed of the device requires IORDY. Used
1747 * by various controllers for chip configuration.
1748 */
1bc4ccff
AC
1749unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1750{
0d9e6659
TH
1751 /* Don't set IORDY if we're preparing for reset. IORDY may
1752 * lead to controller lock up on certain controllers if the
1753 * port is not occupied. See bko#11703 for details.
1754 */
1755 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1756 return 0;
1757 /* Controller doesn't support IORDY. Probably a pointless
1758 * check as the caller should know this.
1759 */
9af5c9c9 1760 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1761 return 0;
5c18c4d2
DD
1762 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1763 if (ata_id_is_cfa(adev->id)
1764 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1765 return 0;
432729f0
AC
1766 /* PIO3 and higher it is mandatory */
1767 if (adev->pio_mode > XFER_PIO_2)
1768 return 1;
1769 /* We turn it on when possible */
1770 if (ata_id_has_iordy(adev->id))
1bc4ccff 1771 return 1;
432729f0
AC
1772 return 0;
1773}
2e9edbf8 1774
432729f0
AC
1775/**
1776 * ata_pio_mask_no_iordy - Return the non IORDY mask
1777 * @adev: ATA device
1778 *
1779 * Compute the highest mode possible if we are not using iordy. Return
1780 * -1 if no iordy mode is available.
1781 */
432729f0
AC
1782static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1783{
1bc4ccff 1784 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1785 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1786 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1787 /* Is the speed faster than the drive allows non IORDY ? */
1788 if (pio) {
1789 /* This is cycle times not frequency - watch the logic! */
1790 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1791 return 3 << ATA_SHIFT_PIO;
1792 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1793 }
1794 }
432729f0 1795 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1796}
1797
963e4975
AC
1798/**
1799 * ata_do_dev_read_id - default ID read method
1800 * @dev: device
1801 * @tf: proposed taskfile
1802 * @id: data buffer
1803 *
1804 * Issue the identify taskfile and hand back the buffer containing
1805 * identify data. For some RAID controllers and for pre ATA devices
1806 * this function is wrapped or replaced by the driver
1807 */
1808unsigned int ata_do_dev_read_id(struct ata_device *dev,
1809 struct ata_taskfile *tf, u16 *id)
1810{
1811 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1812 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1813}
1814
1da177e4 1815/**
49016aca 1816 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1817 * @dev: target device
1818 * @p_class: pointer to class of the target device (may be changed)
bff04647 1819 * @flags: ATA_READID_* flags
fe635c7e 1820 * @id: buffer to read IDENTIFY data into
1da177e4 1821 *
49016aca
TH
1822 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1823 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1824 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1825 * for pre-ATA4 drives.
1da177e4 1826 *
50a99018 1827 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1828 * now we abort if we hit that case.
50a99018 1829 *
1da177e4 1830 * LOCKING:
49016aca
TH
1831 * Kernel thread context (may sleep)
1832 *
1833 * RETURNS:
1834 * 0 on success, -errno otherwise.
1da177e4 1835 */
a9beec95 1836int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1837 unsigned int flags, u16 *id)
1da177e4 1838{
9af5c9c9 1839 struct ata_port *ap = dev->link->ap;
49016aca 1840 unsigned int class = *p_class;
a0123703 1841 struct ata_taskfile tf;
49016aca
TH
1842 unsigned int err_mask = 0;
1843 const char *reason;
79b42bab 1844 bool is_semb = class == ATA_DEV_SEMB;
54936f8b 1845 int may_fallback = 1, tried_spinup = 0;
49016aca 1846 int rc;
1da177e4 1847
0dd4b21f 1848 if (ata_msg_ctl(ap))
a9a79dfe 1849 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1da177e4 1850
963e4975 1851retry:
3373efd8 1852 ata_tf_init(dev, &tf);
a0123703 1853
49016aca 1854 switch (class) {
79b42bab
TH
1855 case ATA_DEV_SEMB:
1856 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
05b83605 1857 /* fall through */
49016aca 1858 case ATA_DEV_ATA:
9162c657 1859 case ATA_DEV_ZAC:
a0123703 1860 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1861 break;
1862 case ATA_DEV_ATAPI:
a0123703 1863 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1864 break;
1865 default:
1866 rc = -ENODEV;
1867 reason = "unsupported class";
1868 goto err_out;
1da177e4
LT
1869 }
1870
a0123703 1871 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1872
1873 /* Some devices choke if TF registers contain garbage. Make
1874 * sure those are properly initialized.
1875 */
1876 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1877
1878 /* Device presence detection is unreliable on some
1879 * controllers. Always poll IDENTIFY if available.
1880 */
1881 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1882
963e4975
AC
1883 if (ap->ops->read_id)
1884 err_mask = ap->ops->read_id(dev, &tf, id);
1885 else
1886 err_mask = ata_do_dev_read_id(dev, &tf, id);
1887
a0123703 1888 if (err_mask) {
800b3996 1889 if (err_mask & AC_ERR_NODEV_HINT) {
a9a79dfe 1890 ata_dev_dbg(dev, "NODEV after polling detection\n");
55a8e2c8
TH
1891 return -ENOENT;
1892 }
1893
79b42bab 1894 if (is_semb) {
a9a79dfe
JP
1895 ata_dev_info(dev,
1896 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
79b42bab
TH
1897 /* SEMB is not supported yet */
1898 *p_class = ATA_DEV_SEMB_UNSUP;
1899 return 0;
1900 }
1901
1ffc151f
TH
1902 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1903 /* Device or controller might have reported
1904 * the wrong device class. Give a shot at the
1905 * other IDENTIFY if the current one is
1906 * aborted by the device.
1907 */
1908 if (may_fallback) {
1909 may_fallback = 0;
1910
1911 if (class == ATA_DEV_ATA)
1912 class = ATA_DEV_ATAPI;
1913 else
1914 class = ATA_DEV_ATA;
1915 goto retry;
1916 }
1917
1918 /* Control reaches here iff the device aborted
1919 * both flavors of IDENTIFYs which happens
1920 * sometimes with phantom devices.
1921 */
a9a79dfe
JP
1922 ata_dev_dbg(dev,
1923 "both IDENTIFYs aborted, assuming NODEV\n");
1ffc151f 1924 return -ENOENT;
54936f8b
TH
1925 }
1926
49016aca
TH
1927 rc = -EIO;
1928 reason = "I/O error";
1da177e4
LT
1929 goto err_out;
1930 }
1931
43c9c591 1932 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
a9a79dfe
JP
1933 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1934 "class=%d may_fallback=%d tried_spinup=%d\n",
1935 class, may_fallback, tried_spinup);
43c9c591
TH
1936 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1937 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1938 }
1939
54936f8b
TH
1940 /* Falling back doesn't make sense if ID data was read
1941 * successfully at least once.
1942 */
1943 may_fallback = 0;
1944
49016aca 1945 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1946
49016aca 1947 /* sanity check */
a4f5749b 1948 rc = -EINVAL;
6070068b 1949 reason = "device reports invalid type";
a4f5749b 1950
9162c657 1951 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
a4f5749b
TH
1952 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1953 goto err_out;
db63a4c8
AW
1954 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1955 ata_id_is_ata(id)) {
1956 ata_dev_dbg(dev,
1957 "host indicates ignore ATA devices, ignored\n");
1958 return -ENOENT;
1959 }
a4f5749b
TH
1960 } else {
1961 if (ata_id_is_ata(id))
1962 goto err_out;
49016aca
TH
1963 }
1964
169439c2
ML
1965 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1966 tried_spinup = 1;
1967 /*
1968 * Drive powered-up in standby mode, and requires a specific
1969 * SET_FEATURES spin-up subcommand before it will accept
1970 * anything other than the original IDENTIFY command.
1971 */
218f3d30 1972 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 1973 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1974 rc = -EIO;
1975 reason = "SPINUP failed";
1976 goto err_out;
1977 }
1978 /*
1979 * If the drive initially returned incomplete IDENTIFY info,
1980 * we now must reissue the IDENTIFY command.
1981 */
1982 if (id[2] == 0x37c8)
1983 goto retry;
1984 }
1985
9162c657
HR
1986 if ((flags & ATA_READID_POSTRESET) &&
1987 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
49016aca
TH
1988 /*
1989 * The exact sequence expected by certain pre-ATA4 drives is:
1990 * SRST RESET
50a99018
AC
1991 * IDENTIFY (optional in early ATA)
1992 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1993 * anything else..
1994 * Some drives were very specific about that exact sequence.
50a99018
AC
1995 *
1996 * Note that ATA4 says lba is mandatory so the second check
c9404c9c 1997 * should never trigger.
49016aca
TH
1998 */
1999 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2000 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2001 if (err_mask) {
2002 rc = -EIO;
2003 reason = "INIT_DEV_PARAMS failed";
2004 goto err_out;
2005 }
2006
2007 /* current CHS translation info (id[53-58]) might be
2008 * changed. reread the identify device info.
2009 */
bff04647 2010 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2011 goto retry;
2012 }
2013 }
2014
2015 *p_class = class;
fe635c7e 2016
49016aca
TH
2017 return 0;
2018
2019 err_out:
88574551 2020 if (ata_msg_warn(ap))
a9a79dfe
JP
2021 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2022 reason, err_mask);
49016aca
TH
2023 return rc;
2024}
2025
f01f62c2
CH
2026/**
2027 * ata_read_log_page - read a specific log page
2028 * @dev: target device
2029 * @log: log to read
2030 * @page: page to read
2031 * @buf: buffer to store read page
2032 * @sectors: number of sectors to read
2033 *
2034 * Read log page using READ_LOG_EXT command.
2035 *
2036 * LOCKING:
2037 * Kernel thread context (may sleep).
2038 *
2039 * RETURNS:
2040 * 0 on success, AC_ERR_* mask otherwise.
2041 */
2042unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
2043 u8 page, void *buf, unsigned int sectors)
2044{
2045 unsigned long ap_flags = dev->link->ap->flags;
2046 struct ata_taskfile tf;
2047 unsigned int err_mask;
2048 bool dma = false;
2049
2050 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
2051
2052 /*
2053 * Return error without actually issuing the command on controllers
2054 * which e.g. lockup on a read log page.
2055 */
2056 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
2057 return AC_ERR_DEV;
2058
2059retry:
2060 ata_tf_init(dev, &tf);
2061 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
7cfdfdc8 2062 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
f01f62c2
CH
2063 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2064 tf.protocol = ATA_PROT_DMA;
2065 dma = true;
2066 } else {
2067 tf.command = ATA_CMD_READ_LOG_EXT;
2068 tf.protocol = ATA_PROT_PIO;
2069 dma = false;
2070 }
2071 tf.lbal = log;
2072 tf.lbam = page;
2073 tf.nsect = sectors;
2074 tf.hob_nsect = sectors >> 8;
2075 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2076
2077 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2078 buf, sectors * ATA_SECT_SIZE, 0);
2079
2080 if (err_mask && dma) {
7cfdfdc8
DLM
2081 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2082 ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
f01f62c2
CH
2083 goto retry;
2084 }
2085
2086 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2087 return err_mask;
2088}
2089
efe205a3
CH
2090static bool ata_log_supported(struct ata_device *dev, u8 log)
2091{
2092 struct ata_port *ap = dev->link->ap;
2093
2094 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2095 return false;
2096 return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
2097}
2098
a0fd2454
CH
2099static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2100{
2101 struct ata_port *ap = dev->link->ap;
2102 unsigned int err, i;
2103
2104 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2105 ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
2106 return false;
2107 }
2108
2109 /*
2110 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2111 * supported.
2112 */
2113 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2114 1);
2115 if (err) {
2116 ata_dev_info(dev,
2117 "failed to get Device Identify Log Emask 0x%x\n",
2118 err);
2119 return false;
2120 }
2121
2122 for (i = 0; i < ap->sector_buf[8]; i++) {
2123 if (ap->sector_buf[9 + i] == page)
2124 return true;
2125 }
2126
2127 return false;
2128}
2129
9062712f
TH
2130static int ata_do_link_spd_horkage(struct ata_device *dev)
2131{
2132 struct ata_link *plink = ata_dev_phys_link(dev);
2133 u32 target, target_limit;
2134
2135 if (!sata_scr_valid(plink))
2136 return 0;
2137
2138 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2139 target = 1;
2140 else
2141 return 0;
2142
2143 target_limit = (1 << target) - 1;
2144
2145 /* if already on stricter limit, no need to push further */
2146 if (plink->sata_spd_limit <= target_limit)
2147 return 0;
2148
2149 plink->sata_spd_limit = target_limit;
2150
2151 /* Request another EH round by returning -EAGAIN if link is
2152 * going faster than the target speed. Forward progress is
2153 * guaranteed by setting sata_spd_limit to target_limit above.
2154 */
2155 if (plink->sata_spd > target) {
a9a79dfe
JP
2156 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2157 sata_spd_string(target));
9062712f
TH
2158 return -EAGAIN;
2159 }
2160 return 0;
2161}
2162
3373efd8 2163static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2164{
9af5c9c9 2165 struct ata_port *ap = dev->link->ap;
9ce8e307
JA
2166
2167 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2168 return 0;
2169
9af5c9c9 2170 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2171}
2172
5a233551
HR
2173static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2174{
2175 struct ata_port *ap = dev->link->ap;
2176 unsigned int err_mask;
2177
efe205a3
CH
2178 if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2179 ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
fe5af0cc
HR
2180 return;
2181 }
5a233551
HR
2182 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2183 0, ap->sector_buf, 1);
2184 if (err_mask) {
2185 ata_dev_dbg(dev,
2186 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2187 err_mask);
2188 } else {
2189 u8 *cmds = dev->ncq_send_recv_cmds;
2190
2191 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2192 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2193
2194 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2195 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2196 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2197 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2198 }
2199 }
2200}
2201
284b3b77
HR
2202static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2203{
2204 struct ata_port *ap = dev->link->ap;
2205 unsigned int err_mask;
284b3b77 2206
efe205a3 2207 if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
284b3b77
HR
2208 ata_dev_warn(dev,
2209 "NCQ Send/Recv Log not supported\n");
2210 return;
2211 }
2212 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2213 0, ap->sector_buf, 1);
2214 if (err_mask) {
2215 ata_dev_dbg(dev,
2216 "failed to get NCQ Non-Data Log Emask 0x%x\n",
2217 err_mask);
2218 } else {
2219 u8 *cmds = dev->ncq_non_data_cmds;
2220
2221 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2222 }
2223}
2224
8e061784
AM
2225static void ata_dev_config_ncq_prio(struct ata_device *dev)
2226{
2227 struct ata_port *ap = dev->link->ap;
2228 unsigned int err_mask;
2229
9f56eca3
AM
2230 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
2231 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2232 return;
2233 }
2234
8e061784 2235 err_mask = ata_read_log_page(dev,
1d51d5f3 2236 ATA_LOG_IDENTIFY_DEVICE,
8e061784
AM
2237 ATA_LOG_SATA_SETTINGS,
2238 ap->sector_buf,
2239 1);
2240 if (err_mask) {
2241 ata_dev_dbg(dev,
2242 "failed to get Identify Device data, Emask 0x%x\n",
2243 err_mask);
2244 return;
2245 }
2246
9f56eca3 2247 if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
8e061784 2248 dev->flags |= ATA_DFLAG_NCQ_PRIO;
9f56eca3
AM
2249 } else {
2250 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
8e061784 2251 ata_dev_dbg(dev, "SATA page does not support priority\n");
9f56eca3 2252 }
8e061784
AM
2253
2254}
2255
388539f3 2256static int ata_dev_config_ncq(struct ata_device *dev,
a6e6ce8e
TH
2257 char *desc, size_t desc_sz)
2258{
9af5c9c9 2259 struct ata_port *ap = dev->link->ap;
a6e6ce8e 2260 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
388539f3
SL
2261 unsigned int err_mask;
2262 char *aa_desc = "";
a6e6ce8e
TH
2263
2264 if (!ata_id_has_ncq(dev->id)) {
2265 desc[0] = '\0';
388539f3 2266 return 0;
a6e6ce8e 2267 }
75683fe7 2268 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6 2269 snprintf(desc, desc_sz, "NCQ (not used)");
388539f3 2270 return 0;
6919a0a6 2271 }
a6e6ce8e 2272 if (ap->flags & ATA_FLAG_NCQ) {
69278f79 2273 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
a6e6ce8e
TH
2274 dev->flags |= ATA_DFLAG_NCQ;
2275 }
2276
388539f3
SL
2277 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2278 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2279 ata_id_has_fpdma_aa(dev->id)) {
2280 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2281 SATA_FPDMA_AA);
2282 if (err_mask) {
a9a79dfe
JP
2283 ata_dev_err(dev,
2284 "failed to enable AA (error_mask=0x%x)\n",
2285 err_mask);
388539f3
SL
2286 if (err_mask != AC_ERR_DEV) {
2287 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2288 return -EIO;
2289 }
2290 } else
2291 aa_desc = ", AA";
2292 }
2293
a6e6ce8e 2294 if (hdepth >= ddepth)
388539f3 2295 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
a6e6ce8e 2296 else
388539f3
SL
2297 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2298 ddepth, aa_desc);
ed36911c 2299
284b3b77
HR
2300 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2301 if (ata_id_has_ncq_send_and_recv(dev->id))
2302 ata_dev_config_ncq_send_recv(dev);
2303 if (ata_id_has_ncq_non_data(dev->id))
2304 ata_dev_config_ncq_non_data(dev);
8e061784
AM
2305 if (ata_id_has_ncq_prio(dev->id))
2306 ata_dev_config_ncq_prio(dev);
284b3b77 2307 }
f78dea06 2308
388539f3 2309 return 0;
a6e6ce8e 2310}
f78dea06 2311
e87fd28c
HR
2312static void ata_dev_config_sense_reporting(struct ata_device *dev)
2313{
2314 unsigned int err_mask;
2315
2316 if (!ata_id_has_sense_reporting(dev->id))
2317 return;
2318
2319 if (ata_id_sense_reporting_enabled(dev->id))
2320 return;
2321
2322 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2323 if (err_mask) {
2324 ata_dev_dbg(dev,
2325 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2326 err_mask);
2327 }
2328}
2329
6d1003ae
HR
2330static void ata_dev_config_zac(struct ata_device *dev)
2331{
2332 struct ata_port *ap = dev->link->ap;
2333 unsigned int err_mask;
2334 u8 *identify_buf = ap->sector_buf;
6d1003ae
HR
2335
2336 dev->zac_zones_optimal_open = U32_MAX;
2337 dev->zac_zones_optimal_nonseq = U32_MAX;
2338 dev->zac_zones_max_open = U32_MAX;
2339
2340 /*
2341 * Always set the 'ZAC' flag for Host-managed devices.
2342 */
2343 if (dev->class == ATA_DEV_ZAC)
2344 dev->flags |= ATA_DFLAG_ZAC;
2345 else if (ata_id_zoned_cap(dev->id) == 0x01)
2346 /*
2347 * Check for host-aware devices.
2348 */
2349 dev->flags |= ATA_DFLAG_ZAC;
2350
2351 if (!(dev->flags & ATA_DFLAG_ZAC))
2352 return;
2353
a0fd2454 2354 if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
6d1003ae
HR
2355 ata_dev_warn(dev,
2356 "ATA Zoned Information Log not supported\n");
2357 return;
2358 }
ed36911c 2359
6d1003ae
HR
2360 /*
2361 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2362 */
1d51d5f3 2363 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
6d1003ae
HR
2364 ATA_LOG_ZONED_INFORMATION,
2365 identify_buf, 1);
2366 if (!err_mask) {
2367 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2368
2369 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2370 if ((zoned_cap >> 63))
2371 dev->zac_zoned_cap = (zoned_cap & 1);
2372 opt_open = get_unaligned_le64(&identify_buf[24]);
2373 if ((opt_open >> 63))
2374 dev->zac_zones_optimal_open = (u32)opt_open;
2375 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2376 if ((opt_nonseq >> 63))
2377 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2378 max_open = get_unaligned_le64(&identify_buf[40]);
2379 if ((max_open >> 63))
2380 dev->zac_zones_max_open = (u32)max_open;
2381 }
a6e6ce8e
TH
2382}
2383
818831c8
CH
2384static void ata_dev_config_trusted(struct ata_device *dev)
2385{
2386 struct ata_port *ap = dev->link->ap;
2387 u64 trusted_cap;
2388 unsigned int err;
2389
e8f11db9
CH
2390 if (!ata_id_has_trusted(dev->id))
2391 return;
2392
818831c8
CH
2393 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2394 ata_dev_warn(dev,
2395 "Security Log not supported\n");
2396 return;
2397 }
2398
2399 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2400 ap->sector_buf, 1);
2401 if (err) {
2402 ata_dev_dbg(dev,
2403 "failed to read Security Log, Emask 0x%x\n", err);
2404 return;
2405 }
2406
2407 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2408 if (!(trusted_cap & (1ULL << 63))) {
2409 ata_dev_dbg(dev,
2410 "Trusted Computing capability qword not valid!\n");
2411 return;
2412 }
2413
2414 if (trusted_cap & (1 << 0))
2415 dev->flags |= ATA_DFLAG_TRUSTED;
2416}
2417
49016aca 2418/**
ffeae418 2419 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2420 * @dev: Target device to configure
2421 *
2422 * Configure @dev according to @dev->id. Generic and low-level
2423 * driver specific fixups are also applied.
49016aca
TH
2424 *
2425 * LOCKING:
ffeae418
TH
2426 * Kernel thread context (may sleep)
2427 *
2428 * RETURNS:
2429 * 0 on success, -errno otherwise
49016aca 2430 */
efdaedc4 2431int ata_dev_configure(struct ata_device *dev)
49016aca 2432{
9af5c9c9
TH
2433 struct ata_port *ap = dev->link->ap;
2434 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2435 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2436 const u16 *id = dev->id;
7dc951ae 2437 unsigned long xfer_mask;
65fe1f0f 2438 unsigned int err_mask;
b352e57d 2439 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2440 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2441 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2442 int rc;
49016aca 2443
0dd4b21f 2444 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
a9a79dfe 2445 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
ffeae418 2446 return 0;
49016aca
TH
2447 }
2448
0dd4b21f 2449 if (ata_msg_probe(ap))
a9a79dfe 2450 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1da177e4 2451
75683fe7
TH
2452 /* set horkage */
2453 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2454 ata_force_horkage(dev);
75683fe7 2455
50af2fa1 2456 if (dev->horkage & ATA_HORKAGE_DISABLE) {
a9a79dfe 2457 ata_dev_info(dev, "unsupported device, disabling\n");
50af2fa1
TH
2458 ata_dev_disable(dev);
2459 return 0;
2460 }
2461
2486fa56
TH
2462 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2463 dev->class == ATA_DEV_ATAPI) {
a9a79dfe
JP
2464 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2465 atapi_enabled ? "not supported with this driver"
2466 : "disabled");
2486fa56
TH
2467 ata_dev_disable(dev);
2468 return 0;
2469 }
2470
9062712f
TH
2471 rc = ata_do_link_spd_horkage(dev);
2472 if (rc)
2473 return rc;
2474
ecd75ad5
TH
2475 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2476 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2477 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2478 dev->horkage |= ATA_HORKAGE_NOLPM;
2479
240630e6
HG
2480 if (ap->flags & ATA_FLAG_NO_LPM)
2481 dev->horkage |= ATA_HORKAGE_NOLPM;
2482
ecd75ad5
TH
2483 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2484 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2485 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2486 }
2487
6746544c
TH
2488 /* let ACPI work its magic */
2489 rc = ata_acpi_on_devcfg(dev);
2490 if (rc)
2491 return rc;
08573a86 2492
05027adc
TH
2493 /* massage HPA, do it early as it might change IDENTIFY data */
2494 rc = ata_hpa_resize(dev);
2495 if (rc)
2496 return rc;
2497
c39f5ebe 2498 /* print device capabilities */
0dd4b21f 2499 if (ata_msg_probe(ap))
a9a79dfe
JP
2500 ata_dev_dbg(dev,
2501 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2502 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2503 __func__,
2504 id[49], id[82], id[83], id[84],
2505 id[85], id[86], id[87], id[88]);
c39f5ebe 2506
208a9933 2507 /* initialize to-be-configured parameters */
ea1dd4e1 2508 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2509 dev->max_sectors = 0;
2510 dev->cdb_len = 0;
2511 dev->n_sectors = 0;
2512 dev->cylinders = 0;
2513 dev->heads = 0;
2514 dev->sectors = 0;
e18086d6 2515 dev->multi_count = 0;
208a9933 2516
1da177e4
LT
2517 /*
2518 * common ATA, ATAPI feature tests
2519 */
2520
ff8854b2 2521 /* find max transfer mode; for printk only */
1148c3a7 2522 xfer_mask = ata_id_xfermask(id);
1da177e4 2523
0dd4b21f
BP
2524 if (ata_msg_probe(ap))
2525 ata_dump_id(id);
1da177e4 2526
ef143d57
AL
2527 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2528 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2529 sizeof(fwrevbuf));
2530
2531 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2532 sizeof(modelbuf));
2533
1da177e4 2534 /* ATA-specific feature tests */
9162c657 2535 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
b352e57d 2536 if (ata_id_is_cfa(id)) {
62afe5d7
SS
2537 /* CPRM may make this media unusable */
2538 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
a9a79dfe
JP
2539 ata_dev_warn(dev,
2540 "supports DRM functions and may not be fully accessible\n");
b352e57d 2541 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2542 } else {
2dcb407e 2543 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2544 /* Warn the user if the device has TPM extensions */
2545 if (ata_id_has_tpm(id))
a9a79dfe
JP
2546 ata_dev_warn(dev,
2547 "supports DRM functions and may not be fully accessible\n");
ae8d4ee7 2548 }
b352e57d 2549
1148c3a7 2550 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2551
e18086d6
ML
2552 /* get current R/W Multiple count setting */
2553 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2554 unsigned int max = dev->id[47] & 0xff;
2555 unsigned int cnt = dev->id[59] & 0xff;
2556 /* only recognize/allow powers of two here */
2557 if (is_power_of_2(max) && is_power_of_2(cnt))
2558 if (cnt <= max)
2559 dev->multi_count = cnt;
2560 }
3f64f565 2561
1148c3a7 2562 if (ata_id_has_lba(id)) {
4c2d721a 2563 const char *lba_desc;
388539f3 2564 char ncq_desc[24];
8bf62ece 2565
4c2d721a
TH
2566 lba_desc = "LBA";
2567 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2568 if (ata_id_has_lba48(id)) {
8bf62ece 2569 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2570 lba_desc = "LBA48";
6fc49adb
TH
2571
2572 if (dev->n_sectors >= (1UL << 28) &&
2573 ata_id_has_flush_ext(id))
2574 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2575 }
8bf62ece 2576
a6e6ce8e 2577 /* config NCQ */
388539f3
SL
2578 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2579 if (rc)
2580 return rc;
a6e6ce8e 2581
8bf62ece 2582 /* print device info to dmesg */
3f64f565 2583 if (ata_msg_drv(ap) && print_info) {
a9a79dfe
JP
2584 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2585 revbuf, modelbuf, fwrevbuf,
2586 ata_mode_string(xfer_mask));
2587 ata_dev_info(dev,
2588 "%llu sectors, multi %u: %s %s\n",
f15a1daf 2589 (unsigned long long)dev->n_sectors,
3f64f565
EM
2590 dev->multi_count, lba_desc, ncq_desc);
2591 }
ffeae418 2592 } else {
8bf62ece
AL
2593 /* CHS */
2594
2595 /* Default translation */
1148c3a7
TH
2596 dev->cylinders = id[1];
2597 dev->heads = id[3];
2598 dev->sectors = id[6];
8bf62ece 2599
1148c3a7 2600 if (ata_id_current_chs_valid(id)) {
8bf62ece 2601 /* Current CHS translation is valid. */
1148c3a7
TH
2602 dev->cylinders = id[54];
2603 dev->heads = id[55];
2604 dev->sectors = id[56];
8bf62ece
AL
2605 }
2606
2607 /* print device info to dmesg */
3f64f565 2608 if (ata_msg_drv(ap) && print_info) {
a9a79dfe
JP
2609 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2610 revbuf, modelbuf, fwrevbuf,
2611 ata_mode_string(xfer_mask));
2612 ata_dev_info(dev,
2613 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2614 (unsigned long long)dev->n_sectors,
2615 dev->multi_count, dev->cylinders,
2616 dev->heads, dev->sectors);
3f64f565 2617 }
07f6f7d0
AL
2618 }
2619
803739d2
SH
2620 /* Check and mark DevSlp capability. Get DevSlp timing variables
2621 * from SATA Settings page of Identify Device Data Log.
65fe1f0f 2622 */
803739d2 2623 if (ata_id_has_devslp(dev->id)) {
8e725c7f 2624 u8 *sata_setting = ap->sector_buf;
803739d2
SH
2625 int i, j;
2626
2627 dev->flags |= ATA_DFLAG_DEVSLP;
65fe1f0f 2628 err_mask = ata_read_log_page(dev,
1d51d5f3 2629 ATA_LOG_IDENTIFY_DEVICE,
65fe1f0f 2630 ATA_LOG_SATA_SETTINGS,
803739d2 2631 sata_setting,
65fe1f0f
SH
2632 1);
2633 if (err_mask)
2634 ata_dev_dbg(dev,
2635 "failed to get Identify Device Data, Emask 0x%x\n",
2636 err_mask);
803739d2
SH
2637 else
2638 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2639 j = ATA_LOG_DEVSLP_OFFSET + i;
2640 dev->devslp_timing[i] = sata_setting[j];
2641 }
65fe1f0f 2642 }
e87fd28c 2643 ata_dev_config_sense_reporting(dev);
6d1003ae 2644 ata_dev_config_zac(dev);
818831c8 2645 ata_dev_config_trusted(dev);
b1ffbf85 2646 dev->cdb_len = 32;
1da177e4
LT
2647 }
2648
2649 /* ATAPI-specific feature tests */
2c13b7ce 2650 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2651 const char *cdb_intr_string = "";
2652 const char *atapi_an_string = "";
91163006 2653 const char *dma_dir_string = "";
7d77b247 2654 u32 sntf;
08a556db 2655
1148c3a7 2656 rc = atapi_cdb_len(id);
1da177e4 2657 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2658 if (ata_msg_warn(ap))
a9a79dfe 2659 ata_dev_warn(dev, "unsupported CDB len\n");
ffeae418 2660 rc = -EINVAL;
1da177e4
LT
2661 goto err_out_nosup;
2662 }
6e7846e9 2663 dev->cdb_len = (unsigned int) rc;
1da177e4 2664
7d77b247
TH
2665 /* Enable ATAPI AN if both the host and device have
2666 * the support. If PMP is attached, SNTF is required
2667 * to enable ATAPI AN to discern between PHY status
2668 * changed notifications and ATAPI ANs.
9f45cbd3 2669 */
e7ecd435
TH
2670 if (atapi_an &&
2671 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
071f44b1 2672 (!sata_pmp_attached(ap) ||
7d77b247 2673 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
9f45cbd3 2674 /* issue SET feature command to turn this on */
218f3d30
JG
2675 err_mask = ata_dev_set_feature(dev,
2676 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2677 if (err_mask)
a9a79dfe
JP
2678 ata_dev_err(dev,
2679 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2680 err_mask);
854c73a2 2681 else {
9f45cbd3 2682 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2683 atapi_an_string = ", ATAPI AN";
2684 }
9f45cbd3
KCA
2685 }
2686
08a556db 2687 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2688 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2689 cdb_intr_string = ", CDB intr";
2690 }
312f7da2 2691
966fbe19 2692 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
91163006
TH
2693 dev->flags |= ATA_DFLAG_DMADIR;
2694 dma_dir_string = ", DMADIR";
2695 }
2696
afe75951 2697 if (ata_id_has_da(dev->id)) {
b1354cbb 2698 dev->flags |= ATA_DFLAG_DA;
afe75951
AL
2699 zpodd_init(dev);
2700 }
b1354cbb 2701
1da177e4 2702 /* print device info to dmesg */
5afc8142 2703 if (ata_msg_drv(ap) && print_info)
a9a79dfe
JP
2704 ata_dev_info(dev,
2705 "ATAPI: %s, %s, max %s%s%s%s\n",
2706 modelbuf, fwrevbuf,
2707 ata_mode_string(xfer_mask),
2708 cdb_intr_string, atapi_an_string,
2709 dma_dir_string);
1da177e4
LT
2710 }
2711
914ed354
TH
2712 /* determine max_sectors */
2713 dev->max_sectors = ATA_MAX_SECTORS;
2714 if (dev->flags & ATA_DFLAG_LBA48)
2715 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2716
c5038fc0
AC
2717 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2718 200 sectors */
3373efd8 2719 if (ata_dev_knobble(dev)) {
5afc8142 2720 if (ata_msg_drv(ap) && print_info)
a9a79dfe 2721 ata_dev_info(dev, "applying bridge limits\n");
5a529139 2722 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2723 dev->max_sectors = ATA_MAX_SECTORS;
2724 }
2725
f8d8e579 2726 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2727 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2728 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2729 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2730 }
f8d8e579 2731
75683fe7 2732 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2733 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2734 dev->max_sectors);
18d6e9d5 2735
af34d637
DM
2736 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2737 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2738 dev->max_sectors);
2739
a32450e1
SH
2740 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2741 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2742
4b2f3ede 2743 if (ap->ops->dev_config)
cd0d3bbc 2744 ap->ops->dev_config(dev);
4b2f3ede 2745
c5038fc0
AC
2746 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2747 /* Let the user know. We don't want to disallow opens for
2748 rescue purposes, or in case the vendor is just a blithering
2749 idiot. Do this after the dev_config call as some controllers
2750 with buggy firmware may want to avoid reporting false device
2751 bugs */
2752
2753 if (print_info) {
a9a79dfe 2754 ata_dev_warn(dev,
c5038fc0 2755"Drive reports diagnostics failure. This may indicate a drive\n");
a9a79dfe 2756 ata_dev_warn(dev,
c5038fc0
AC
2757"fault or invalid emulation. Contact drive vendor for information.\n");
2758 }
2759 }
2760
ac70a964 2761 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
a9a79dfe
JP
2762 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2763 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
ac70a964
TH
2764 }
2765
ffeae418 2766 return 0;
1da177e4
LT
2767
2768err_out_nosup:
0dd4b21f 2769 if (ata_msg_probe(ap))
a9a79dfe 2770 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
ffeae418 2771 return rc;
1da177e4
LT
2772}
2773
be0d18df 2774/**
2e41e8e6 2775 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2776 * @ap: port
2777 *
2e41e8e6 2778 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2779 * detection.
2780 */
2781
2782int ata_cable_40wire(struct ata_port *ap)
2783{
2784 return ATA_CBL_PATA40;
2785}
2786
2787/**
2e41e8e6 2788 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2789 * @ap: port
2790 *
2e41e8e6 2791 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2792 * detection.
2793 */
2794
2795int ata_cable_80wire(struct ata_port *ap)
2796{
2797 return ATA_CBL_PATA80;
2798}
2799
2800/**
2801 * ata_cable_unknown - return unknown PATA cable.
2802 * @ap: port
2803 *
2804 * Helper method for drivers which have no PATA cable detection.
2805 */
2806
2807int ata_cable_unknown(struct ata_port *ap)
2808{
2809 return ATA_CBL_PATA_UNK;
2810}
2811
c88f90c3
TH
2812/**
2813 * ata_cable_ignore - return ignored PATA cable.
2814 * @ap: port
2815 *
2816 * Helper method for drivers which don't use cable type to limit
2817 * transfer mode.
2818 */
2819int ata_cable_ignore(struct ata_port *ap)
2820{
2821 return ATA_CBL_PATA_IGN;
2822}
2823
be0d18df
AC
2824/**
2825 * ata_cable_sata - return SATA cable type
2826 * @ap: port
2827 *
2828 * Helper method for drivers which have SATA cables
2829 */
2830
2831int ata_cable_sata(struct ata_port *ap)
2832{
2833 return ATA_CBL_SATA;
2834}
2835
1da177e4
LT
2836/**
2837 * ata_bus_probe - Reset and probe ATA bus
2838 * @ap: Bus to probe
2839 *
0cba632b
JG
2840 * Master ATA bus probing function. Initiates a hardware-dependent
2841 * bus reset, then attempts to identify any devices found on
2842 * the bus.
2843 *
1da177e4 2844 * LOCKING:
0cba632b 2845 * PCI/etc. bus probe sem.
1da177e4
LT
2846 *
2847 * RETURNS:
96072e69 2848 * Zero on success, negative errno otherwise.
1da177e4
LT
2849 */
2850
80289167 2851int ata_bus_probe(struct ata_port *ap)
1da177e4 2852{
28ca5c57 2853 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2854 int tries[ATA_MAX_DEVICES];
f58229f8 2855 int rc;
e82cbdb9 2856 struct ata_device *dev;
1da177e4 2857
1eca4365 2858 ata_for_each_dev(dev, &ap->link, ALL)
f58229f8 2859 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2860
2861 retry:
1eca4365 2862 ata_for_each_dev(dev, &ap->link, ALL) {
cdeab114
TH
2863 /* If we issue an SRST then an ATA drive (not ATAPI)
2864 * may change configuration and be in PIO0 timing. If
2865 * we do a hard reset (or are coming from power on)
2866 * this is true for ATA or ATAPI. Until we've set a
2867 * suitable controller mode we should not touch the
2868 * bus as we may be talking too fast.
2869 */
2870 dev->pio_mode = XFER_PIO_0;
5416912a 2871 dev->dma_mode = 0xff;
cdeab114
TH
2872
2873 /* If the controller has a pio mode setup function
2874 * then use it to set the chipset to rights. Don't
2875 * touch the DMA setup as that will be dealt with when
2876 * configuring devices.
2877 */
2878 if (ap->ops->set_piomode)
2879 ap->ops->set_piomode(ap, dev);
2880 }
2881
2044470c 2882 /* reset and determine device classes */
52783c5d 2883 ap->ops->phy_reset(ap);
2061a47a 2884
1eca4365 2885 ata_for_each_dev(dev, &ap->link, ALL) {
3e4ec344 2886 if (dev->class != ATA_DEV_UNKNOWN)
52783c5d
TH
2887 classes[dev->devno] = dev->class;
2888 else
2889 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2890
52783c5d 2891 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2892 }
1da177e4 2893
f31f0cc2
JG
2894 /* read IDENTIFY page and configure devices. We have to do the identify
2895 specific sequence bass-ackwards so that PDIAG- is released by
2896 the slave device */
2897
1eca4365 2898 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
f58229f8
TH
2899 if (tries[dev->devno])
2900 dev->class = classes[dev->devno];
ffeae418 2901
14d2bac1 2902 if (!ata_dev_enabled(dev))
ffeae418 2903 continue;
ffeae418 2904
bff04647
TH
2905 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2906 dev->id);
14d2bac1
TH
2907 if (rc)
2908 goto fail;
f31f0cc2
JG
2909 }
2910
be0d18df
AC
2911 /* Now ask for the cable type as PDIAG- should have been released */
2912 if (ap->ops->cable_detect)
2913 ap->cbl = ap->ops->cable_detect(ap);
2914
1eca4365
TH
2915 /* We may have SATA bridge glue hiding here irrespective of
2916 * the reported cable types and sensed types. When SATA
2917 * drives indicate we have a bridge, we don't know which end
2918 * of the link the bridge is which is a problem.
2919 */
2920 ata_for_each_dev(dev, &ap->link, ENABLED)
614fe29b
AC
2921 if (ata_id_is_sata(dev->id))
2922 ap->cbl = ATA_CBL_SATA;
614fe29b 2923
f31f0cc2
JG
2924 /* After the identify sequence we can now set up the devices. We do
2925 this in the normal order so that the user doesn't get confused */
2926
1eca4365 2927 ata_for_each_dev(dev, &ap->link, ENABLED) {
9af5c9c9 2928 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2929 rc = ata_dev_configure(dev);
9af5c9c9 2930 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2931 if (rc)
2932 goto fail;
1da177e4
LT
2933 }
2934
e82cbdb9 2935 /* configure transfer mode */
0260731f 2936 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2937 if (rc)
51713d35 2938 goto fail;
1da177e4 2939
1eca4365
TH
2940 ata_for_each_dev(dev, &ap->link, ENABLED)
2941 return 0;
1da177e4 2942
96072e69 2943 return -ENODEV;
14d2bac1
TH
2944
2945 fail:
4ae72a1e
TH
2946 tries[dev->devno]--;
2947
14d2bac1
TH
2948 switch (rc) {
2949 case -EINVAL:
4ae72a1e 2950 /* eeek, something went very wrong, give up */
14d2bac1
TH
2951 tries[dev->devno] = 0;
2952 break;
4ae72a1e
TH
2953
2954 case -ENODEV:
2955 /* give it just one more chance */
2956 tries[dev->devno] = min(tries[dev->devno], 1);
05b83605 2957 /* fall through */
14d2bac1 2958 case -EIO:
4ae72a1e
TH
2959 if (tries[dev->devno] == 1) {
2960 /* This is the last chance, better to slow
2961 * down than lose it.
2962 */
a07d499b 2963 sata_down_spd_limit(&ap->link, 0);
4ae72a1e
TH
2964 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2965 }
14d2bac1
TH
2966 }
2967
4ae72a1e 2968 if (!tries[dev->devno])
3373efd8 2969 ata_dev_disable(dev);
ec573755 2970
14d2bac1 2971 goto retry;
1da177e4
LT
2972}
2973
3be680b7
TH
2974/**
2975 * sata_print_link_status - Print SATA link status
936fd732 2976 * @link: SATA link to printk link status about
3be680b7
TH
2977 *
2978 * This function prints link speed and status of a SATA link.
2979 *
2980 * LOCKING:
2981 * None.
2982 */
6bdb4fc9 2983static void sata_print_link_status(struct ata_link *link)
3be680b7 2984{
6d5f9732 2985 u32 sstatus, scontrol, tmp;
3be680b7 2986
936fd732 2987 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2988 return;
936fd732 2989 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2990
b1c72916 2991 if (ata_phys_link_online(link)) {
3be680b7 2992 tmp = (sstatus >> 4) & 0xf;
a9a79dfe
JP
2993 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2994 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2995 } else {
a9a79dfe
JP
2996 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2997 sstatus, scontrol);
3be680b7
TH
2998 }
2999}
3000
ebdfca6e
AC
3001/**
3002 * ata_dev_pair - return other device on cable
ebdfca6e
AC
3003 * @adev: device
3004 *
3005 * Obtain the other device on the same cable, or if none is
3006 * present NULL is returned
3007 */
2e9edbf8 3008
3373efd8 3009struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 3010{
9af5c9c9
TH
3011 struct ata_link *link = adev->link;
3012 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 3013 if (!ata_dev_enabled(pair))
ebdfca6e
AC
3014 return NULL;
3015 return pair;
3016}
3017
1c3fae4d 3018/**
3c567b7d 3019 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 3020 * @link: Link to adjust SATA spd limit for
a07d499b 3021 * @spd_limit: Additional limit
1c3fae4d 3022 *
936fd732 3023 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 3024 * function only adjusts the limit. The change must be applied
3c567b7d 3025 * using sata_set_spd().
1c3fae4d 3026 *
a07d499b
TH
3027 * If @spd_limit is non-zero, the speed is limited to equal to or
3028 * lower than @spd_limit if such speed is supported. If
3029 * @spd_limit is slower than any supported speed, only the lowest
3030 * supported speed is allowed.
3031 *
1c3fae4d
TH
3032 * LOCKING:
3033 * Inherited from caller.
3034 *
3035 * RETURNS:
3036 * 0 on success, negative errno on failure
3037 */
a07d499b 3038int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
1c3fae4d 3039{
81952c54 3040 u32 sstatus, spd, mask;
a07d499b 3041 int rc, bit;
1c3fae4d 3042
936fd732 3043 if (!sata_scr_valid(link))
008a7896
TH
3044 return -EOPNOTSUPP;
3045
3046 /* If SCR can be read, use it to determine the current SPD.
936fd732 3047 * If not, use cached value in link->sata_spd.
008a7896 3048 */
936fd732 3049 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
9913ff8a 3050 if (rc == 0 && ata_sstatus_online(sstatus))
008a7896
TH
3051 spd = (sstatus >> 4) & 0xf;
3052 else
936fd732 3053 spd = link->sata_spd;
1c3fae4d 3054
936fd732 3055 mask = link->sata_spd_limit;
1c3fae4d
TH
3056 if (mask <= 1)
3057 return -EINVAL;
008a7896
TH
3058
3059 /* unconditionally mask off the highest bit */
a07d499b
TH
3060 bit = fls(mask) - 1;
3061 mask &= ~(1 << bit);
1c3fae4d 3062
2dc0b46b
DM
3063 /*
3064 * Mask off all speeds higher than or equal to the current one. At
3065 * this point, if current SPD is not available and we previously
3066 * recorded the link speed from SStatus, the driver has already
3067 * masked off the highest bit so mask should already be 1 or 0.
3068 * Otherwise, we should not force 1.5Gbps on a link where we have
3069 * not previously recorded speed from SStatus. Just return in this
3070 * case.
008a7896
TH
3071 */
3072 if (spd > 1)
3073 mask &= (1 << (spd - 1)) - 1;
3074 else
2dc0b46b 3075 return -EINVAL;
008a7896
TH
3076
3077 /* were we already at the bottom? */
1c3fae4d
TH
3078 if (!mask)
3079 return -EINVAL;
3080
a07d499b
TH
3081 if (spd_limit) {
3082 if (mask & ((1 << spd_limit) - 1))
3083 mask &= (1 << spd_limit) - 1;
3084 else {
3085 bit = ffs(mask) - 1;
3086 mask = 1 << bit;
3087 }
3088 }
3089
936fd732 3090 link->sata_spd_limit = mask;
1c3fae4d 3091
a9a79dfe
JP
3092 ata_link_warn(link, "limiting SATA link speed to %s\n",
3093 sata_spd_string(fls(mask)));
1c3fae4d
TH
3094
3095 return 0;
3096}
3097
936fd732 3098static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 3099{
5270222f
TH
3100 struct ata_link *host_link = &link->ap->link;
3101 u32 limit, target, spd;
1c3fae4d 3102
5270222f
TH
3103 limit = link->sata_spd_limit;
3104
3105 /* Don't configure downstream link faster than upstream link.
3106 * It doesn't speed up anything and some PMPs choke on such
3107 * configuration.
3108 */
3109 if (!ata_is_host_link(link) && host_link->sata_spd)
3110 limit &= (1 << host_link->sata_spd) - 1;
3111
3112 if (limit == UINT_MAX)
3113 target = 0;
1c3fae4d 3114 else
5270222f 3115 target = fls(limit);
1c3fae4d
TH
3116
3117 spd = (*scontrol >> 4) & 0xf;
5270222f 3118 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 3119
5270222f 3120 return spd != target;
1c3fae4d
TH
3121}
3122
3123/**
3c567b7d 3124 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 3125 * @link: Link in question
1c3fae4d
TH
3126 *
3127 * Test whether the spd limit in SControl matches
936fd732 3128 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
3129 * whether hardreset is necessary to apply SATA spd
3130 * configuration.
3131 *
3132 * LOCKING:
3133 * Inherited from caller.
3134 *
3135 * RETURNS:
3136 * 1 if SATA spd configuration is needed, 0 otherwise.
3137 */
1dc55e87 3138static int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
3139{
3140 u32 scontrol;
3141
936fd732 3142 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 3143 return 1;
1c3fae4d 3144
936fd732 3145 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
3146}
3147
3148/**
3c567b7d 3149 * sata_set_spd - set SATA spd according to spd limit
936fd732 3150 * @link: Link to set SATA spd for
1c3fae4d 3151 *
936fd732 3152 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
3153 *
3154 * LOCKING:
3155 * Inherited from caller.
3156 *
3157 * RETURNS:
3158 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 3159 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 3160 */
936fd732 3161int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
3162{
3163 u32 scontrol;
81952c54 3164 int rc;
1c3fae4d 3165
936fd732 3166 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3167 return rc;
1c3fae4d 3168
936fd732 3169 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
3170 return 0;
3171
936fd732 3172 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
3173 return rc;
3174
1c3fae4d
TH
3175 return 1;
3176}
3177
452503f9
AC
3178/*
3179 * This mode timing computation functionality is ported over from
3180 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3181 */
3182/*
b352e57d 3183 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 3184 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
3185 * for UDMA6, which is currently supported only by Maxtor drives.
3186 *
3187 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
3188 */
3189
3190static const struct ata_timing ata_timing[] = {
3ada9c12
DD
3191/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
3192 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
3193 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
3194 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
3195 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
3196 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
3197 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
3198 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
3199
3200 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
3201 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
3202 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
3203
3204 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
3205 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
3206 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
3207 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
3208 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
3209
3210/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
3211 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
3212 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
3213 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
3214 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
3215 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
3216 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
3217 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
3218
3219 { 0xFF }
3220};
3221
2dcb407e 3222#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
23e4c67a 3223#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0)
452503f9
AC
3224
3225static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3226{
23e4c67a
AB
3227 q->setup = EZ(t->setup, T);
3228 q->act8b = EZ(t->act8b, T);
3229 q->rec8b = EZ(t->rec8b, T);
3230 q->cyc8b = EZ(t->cyc8b, T);
3231 q->active = EZ(t->active, T);
3232 q->recover = EZ(t->recover, T);
3233 q->dmack_hold = EZ(t->dmack_hold, T);
3234 q->cycle = EZ(t->cycle, T);
3235 q->udma = EZ(t->udma, UT);
452503f9
AC
3236}
3237
3238void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3239 struct ata_timing *m, unsigned int what)
3240{
3241 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3242 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3243 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3244 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3245 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3246 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3ada9c12 3247 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
452503f9
AC
3248 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3249 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3250}
3251
6357357c 3252const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 3253{
70cd071e
TH
3254 const struct ata_timing *t = ata_timing;
3255
3256 while (xfer_mode > t->mode)
3257 t++;
452503f9 3258
70cd071e
TH
3259 if (xfer_mode == t->mode)
3260 return t;
cd705d5a
BP
3261
3262 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3263 __func__, xfer_mode);
3264
70cd071e 3265 return NULL;
452503f9
AC
3266}
3267
3268int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3269 struct ata_timing *t, int T, int UT)
3270{
9e8808a9 3271 const u16 *id = adev->id;
452503f9
AC
3272 const struct ata_timing *s;
3273 struct ata_timing p;
3274
3275 /*
2e9edbf8 3276 * Find the mode.
75b1f2f8 3277 */
452503f9
AC
3278
3279 if (!(s = ata_timing_find_mode(speed)))
3280 return -EINVAL;
3281
75b1f2f8
AL
3282 memcpy(t, s, sizeof(*s));
3283
452503f9
AC
3284 /*
3285 * If the drive is an EIDE drive, it can tell us it needs extended
3286 * PIO/MW_DMA cycle timing.
3287 */
3288
9e8808a9 3289 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
452503f9 3290 memset(&p, 0, sizeof(p));
9e8808a9 3291
bff00256 3292 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
9e8808a9
BZ
3293 if (speed <= XFER_PIO_2)
3294 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3295 else if ((speed <= XFER_PIO_4) ||
3296 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3297 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3298 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3299 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3300
452503f9
AC
3301 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3302 }
3303
3304 /*
3305 * Convert the timing to bus clock counts.
3306 */
3307
75b1f2f8 3308 ata_timing_quantize(t, t, T, UT);
452503f9
AC
3309
3310 /*
c893a3ae
RD
3311 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3312 * S.M.A.R.T * and some other commands. We have to ensure that the
3313 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
3314 */
3315
fd3367af 3316 if (speed > XFER_PIO_6) {
452503f9
AC
3317 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3318 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3319 }
3320
3321 /*
c893a3ae 3322 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
3323 */
3324
3325 if (t->act8b + t->rec8b < t->cyc8b) {
3326 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3327 t->rec8b = t->cyc8b - t->act8b;
3328 }
3329
3330 if (t->active + t->recover < t->cycle) {
3331 t->active += (t->cycle - (t->active + t->recover)) / 2;
3332 t->recover = t->cycle - t->active;
3333 }
a617c09f 3334
4f701d1e
AC
3335 /* In a few cases quantisation may produce enough errors to
3336 leave t->cycle too low for the sum of active and recovery
3337 if so we must correct this */
3338 if (t->active + t->recover > t->cycle)
3339 t->cycle = t->active + t->recover;
452503f9
AC
3340
3341 return 0;
3342}
3343
a0f79b92
TH
3344/**
3345 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3346 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3347 * @cycle: cycle duration in ns
3348 *
3349 * Return matching xfer mode for @cycle. The returned mode is of
3350 * the transfer type specified by @xfer_shift. If @cycle is too
3351 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3352 * than the fastest known mode, the fasted mode is returned.
3353 *
3354 * LOCKING:
3355 * None.
3356 *
3357 * RETURNS:
3358 * Matching xfer_mode, 0xff if no match found.
3359 */
3360u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3361{
3362 u8 base_mode = 0xff, last_mode = 0xff;
3363 const struct ata_xfer_ent *ent;
3364 const struct ata_timing *t;
3365
3366 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3367 if (ent->shift == xfer_shift)
3368 base_mode = ent->base;
3369
3370 for (t = ata_timing_find_mode(base_mode);
3371 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3372 unsigned short this_cycle;
3373
3374 switch (xfer_shift) {
3375 case ATA_SHIFT_PIO:
3376 case ATA_SHIFT_MWDMA:
3377 this_cycle = t->cycle;
3378 break;
3379 case ATA_SHIFT_UDMA:
3380 this_cycle = t->udma;
3381 break;
3382 default:
3383 return 0xff;
3384 }
3385
3386 if (cycle > this_cycle)
3387 break;
3388
3389 last_mode = t->mode;
3390 }
3391
3392 return last_mode;
3393}
3394
cf176e1a
TH
3395/**
3396 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3397 * @dev: Device to adjust xfer masks
458337db 3398 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3399 *
3400 * Adjust xfer masks of @dev downward. Note that this function
3401 * does not apply the change. Invoking ata_set_mode() afterwards
3402 * will apply the limit.
3403 *
3404 * LOCKING:
3405 * Inherited from caller.
3406 *
3407 * RETURNS:
3408 * 0 on success, negative errno on failure
3409 */
458337db 3410int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3411{
458337db 3412 char buf[32];
7dc951ae
TH
3413 unsigned long orig_mask, xfer_mask;
3414 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3415 int quiet, highbit;
cf176e1a 3416
458337db
TH
3417 quiet = !!(sel & ATA_DNXFER_QUIET);
3418 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3419
458337db
TH
3420 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3421 dev->mwdma_mask,
3422 dev->udma_mask);
3423 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3424
458337db
TH
3425 switch (sel) {
3426 case ATA_DNXFER_PIO:
3427 highbit = fls(pio_mask) - 1;
3428 pio_mask &= ~(1 << highbit);
3429 break;
3430
3431 case ATA_DNXFER_DMA:
3432 if (udma_mask) {
3433 highbit = fls(udma_mask) - 1;
3434 udma_mask &= ~(1 << highbit);
3435 if (!udma_mask)
3436 return -ENOENT;
3437 } else if (mwdma_mask) {
3438 highbit = fls(mwdma_mask) - 1;
3439 mwdma_mask &= ~(1 << highbit);
3440 if (!mwdma_mask)
3441 return -ENOENT;
3442 }
3443 break;
3444
3445 case ATA_DNXFER_40C:
3446 udma_mask &= ATA_UDMA_MASK_40C;
3447 break;
3448
3449 case ATA_DNXFER_FORCE_PIO0:
3450 pio_mask &= 1;
05b83605 3451 /* fall through */
458337db
TH
3452 case ATA_DNXFER_FORCE_PIO:
3453 mwdma_mask = 0;
3454 udma_mask = 0;
3455 break;
3456
458337db
TH
3457 default:
3458 BUG();
3459 }
3460
3461 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3462
3463 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3464 return -ENOENT;
3465
3466 if (!quiet) {
3467 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3468 snprintf(buf, sizeof(buf), "%s:%s",
3469 ata_mode_string(xfer_mask),
3470 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3471 else
3472 snprintf(buf, sizeof(buf), "%s",
3473 ata_mode_string(xfer_mask));
3474
a9a79dfe 3475 ata_dev_warn(dev, "limiting speed to %s\n", buf);
458337db 3476 }
cf176e1a
TH
3477
3478 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3479 &dev->udma_mask);
3480
cf176e1a 3481 return 0;
cf176e1a
TH
3482}
3483
3373efd8 3484static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3485{
d0cb43b3 3486 struct ata_port *ap = dev->link->ap;
9af5c9c9 3487 struct ata_eh_context *ehc = &dev->link->eh_context;
d0cb43b3 3488 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
4055dee7
TH
3489 const char *dev_err_whine = "";
3490 int ign_dev_err = 0;
d0cb43b3 3491 unsigned int err_mask = 0;
83206a29 3492 int rc;
1da177e4 3493
e8384607 3494 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3495 if (dev->xfer_shift == ATA_SHIFT_PIO)
3496 dev->flags |= ATA_DFLAG_PIO;
3497
d0cb43b3
TH
3498 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3499 dev_err_whine = " (SET_XFERMODE skipped)";
3500 else {
3501 if (nosetxfer)
a9a79dfe
JP
3502 ata_dev_warn(dev,
3503 "NOSETXFER but PATA detected - can't "
3504 "skip SETXFER, might malfunction\n");
d0cb43b3
TH
3505 err_mask = ata_dev_set_xfermode(dev);
3506 }
2dcb407e 3507
4055dee7
TH
3508 if (err_mask & ~AC_ERR_DEV)
3509 goto fail;
3510
3511 /* revalidate */
3512 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3513 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3514 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3515 if (rc)
3516 return rc;
3517
b93fda12
AC
3518 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3519 /* Old CFA may refuse this command, which is just fine */
3520 if (ata_id_is_cfa(dev->id))
3521 ign_dev_err = 1;
3522 /* Catch several broken garbage emulations plus some pre
3523 ATA devices */
3524 if (ata_id_major_version(dev->id) == 0 &&
3525 dev->pio_mode <= XFER_PIO_2)
3526 ign_dev_err = 1;
3527 /* Some very old devices and some bad newer ones fail
3528 any kind of SET_XFERMODE request but support PIO0-2
3529 timings and no IORDY */
3530 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3531 ign_dev_err = 1;
3532 }
3acaf94b
AC
3533 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3534 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3535 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3536 dev->dma_mode == XFER_MW_DMA_0 &&
3537 (dev->id[63] >> 8) & 1)
4055dee7 3538 ign_dev_err = 1;
3acaf94b 3539
4055dee7
TH
3540 /* if the device is actually configured correctly, ignore dev err */
3541 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3542 ign_dev_err = 1;
1da177e4 3543
4055dee7
TH
3544 if (err_mask & AC_ERR_DEV) {
3545 if (!ign_dev_err)
3546 goto fail;
3547 else
3548 dev_err_whine = " (device error ignored)";
3549 }
48a8a14f 3550
23e71c3d
TH
3551 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3552 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3553
07b9b6d6
DLM
3554 if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3555 ehc->i.flags & ATA_EHI_DID_HARDRESET)
3556 ata_dev_info(dev, "configured for %s%s\n",
3557 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3558 dev_err_whine);
4055dee7 3559
83206a29 3560 return 0;
4055dee7
TH
3561
3562 fail:
a9a79dfe 3563 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
4055dee7 3564 return -EIO;
1da177e4
LT
3565}
3566
1da177e4 3567/**
04351821 3568 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3569 * @link: link on which timings will be programmed
1967b7ff 3570 * @r_failed_dev: out parameter for failed device
1da177e4 3571 *
04351821
A
3572 * Standard implementation of the function used to tune and set
3573 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3574 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3575 * returned in @r_failed_dev.
780a87f7 3576 *
1da177e4 3577 * LOCKING:
0cba632b 3578 * PCI/etc. bus probe sem.
e82cbdb9
TH
3579 *
3580 * RETURNS:
3581 * 0 on success, negative errno otherwise
1da177e4 3582 */
04351821 3583
0260731f 3584int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3585{
0260731f 3586 struct ata_port *ap = link->ap;
e8e0619f 3587 struct ata_device *dev;
f58229f8 3588 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3589
a6d5a51c 3590 /* step 1: calculate xfer_mask */
1eca4365 3591 ata_for_each_dev(dev, link, ENABLED) {
7dc951ae 3592 unsigned long pio_mask, dma_mask;
b3a70601 3593 unsigned int mode_mask;
a6d5a51c 3594
b3a70601
AC
3595 mode_mask = ATA_DMA_MASK_ATA;
3596 if (dev->class == ATA_DEV_ATAPI)
3597 mode_mask = ATA_DMA_MASK_ATAPI;
3598 else if (ata_id_is_cfa(dev->id))
3599 mode_mask = ATA_DMA_MASK_CFA;
3600
3373efd8 3601 ata_dev_xfermask(dev);
33267325 3602 ata_force_xfermask(dev);
1da177e4 3603
acf356b1 3604 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
b3a70601
AC
3605
3606 if (libata_dma_mask & mode_mask)
80a9c430
SS
3607 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3608 dev->udma_mask);
b3a70601
AC
3609 else
3610 dma_mask = 0;
3611
acf356b1
TH
3612 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3613 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3614
4f65977d 3615 found = 1;
b15b3eba 3616 if (ata_dma_enabled(dev))
5444a6f4 3617 used_dma = 1;
a6d5a51c 3618 }
4f65977d 3619 if (!found)
e82cbdb9 3620 goto out;
a6d5a51c
TH
3621
3622 /* step 2: always set host PIO timings */
1eca4365 3623 ata_for_each_dev(dev, link, ENABLED) {
70cd071e 3624 if (dev->pio_mode == 0xff) {
a9a79dfe 3625 ata_dev_warn(dev, "no PIO support\n");
e8e0619f 3626 rc = -EINVAL;
e82cbdb9 3627 goto out;
e8e0619f
TH
3628 }
3629
3630 dev->xfer_mode = dev->pio_mode;
3631 dev->xfer_shift = ATA_SHIFT_PIO;
3632 if (ap->ops->set_piomode)
3633 ap->ops->set_piomode(ap, dev);
3634 }
1da177e4 3635
a6d5a51c 3636 /* step 3: set host DMA timings */
1eca4365
TH
3637 ata_for_each_dev(dev, link, ENABLED) {
3638 if (!ata_dma_enabled(dev))
e8e0619f
TH
3639 continue;
3640
3641 dev->xfer_mode = dev->dma_mode;
3642 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3643 if (ap->ops->set_dmamode)
3644 ap->ops->set_dmamode(ap, dev);
3645 }
1da177e4
LT
3646
3647 /* step 4: update devices' xfer mode */
1eca4365 3648 ata_for_each_dev(dev, link, ENABLED) {
3373efd8 3649 rc = ata_dev_set_mode(dev);
5bbc53f4 3650 if (rc)
e82cbdb9 3651 goto out;
83206a29 3652 }
1da177e4 3653
e8e0619f
TH
3654 /* Record simplex status. If we selected DMA then the other
3655 * host channels are not permitted to do so.
5444a6f4 3656 */
cca3974e 3657 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3658 ap->host->simplex_claimed = ap;
5444a6f4 3659
e82cbdb9
TH
3660 out:
3661 if (rc)
3662 *r_failed_dev = dev;
3663 return rc;
1da177e4
LT
3664}
3665
aa2731ad
TH
3666/**
3667 * ata_wait_ready - wait for link to become ready
3668 * @link: link to be waited on
3669 * @deadline: deadline jiffies for the operation
3670 * @check_ready: callback to check link readiness
3671 *
3672 * Wait for @link to become ready. @check_ready should return
3673 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3674 * link doesn't seem to be occupied, other errno for other error
3675 * conditions.
3676 *
3677 * Transient -ENODEV conditions are allowed for
3678 * ATA_TMOUT_FF_WAIT.
3679 *
3680 * LOCKING:
3681 * EH context.
3682 *
3683 * RETURNS:
c9b5560a 3684 * 0 if @link is ready before @deadline; otherwise, -errno.
aa2731ad
TH
3685 */
3686int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3687 int (*check_ready)(struct ata_link *link))
3688{
3689 unsigned long start = jiffies;
b48d58f5 3690 unsigned long nodev_deadline;
aa2731ad
TH
3691 int warned = 0;
3692
b48d58f5
TH
3693 /* choose which 0xff timeout to use, read comment in libata.h */
3694 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3695 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3696 else
3697 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3698
b1c72916
TH
3699 /* Slave readiness can't be tested separately from master. On
3700 * M/S emulation configuration, this function should be called
3701 * only on the master and it will handle both master and slave.
3702 */
3703 WARN_ON(link == link->ap->slave_link);
3704
aa2731ad
TH
3705 if (time_after(nodev_deadline, deadline))
3706 nodev_deadline = deadline;
3707
3708 while (1) {
3709 unsigned long now = jiffies;
3710 int ready, tmp;
3711
3712 ready = tmp = check_ready(link);
3713 if (ready > 0)
3714 return 0;
3715
b48d58f5
TH
3716 /*
3717 * -ENODEV could be transient. Ignore -ENODEV if link
aa2731ad 3718 * is online. Also, some SATA devices take a long
b48d58f5
TH
3719 * time to clear 0xff after reset. Wait for
3720 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3721 * offline.
aa2731ad
TH
3722 *
3723 * Note that some PATA controllers (pata_ali) explode
3724 * if status register is read more than once when
3725 * there's no device attached.
3726 */
3727 if (ready == -ENODEV) {
3728 if (ata_link_online(link))
3729 ready = 0;
3730 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3731 !ata_link_offline(link) &&
3732 time_before(now, nodev_deadline))
3733 ready = 0;
3734 }
3735
3736 if (ready)
3737 return ready;
3738 if (time_after(now, deadline))
3739 return -EBUSY;
3740
3741 if (!warned && time_after(now, start + 5 * HZ) &&
3742 (deadline - now > 3 * HZ)) {
a9a79dfe 3743 ata_link_warn(link,
aa2731ad
TH
3744 "link is slow to respond, please be patient "
3745 "(ready=%d)\n", tmp);
3746 warned = 1;
3747 }
3748
97750ceb 3749 ata_msleep(link->ap, 50);
aa2731ad
TH
3750 }
3751}
3752
3753/**
3754 * ata_wait_after_reset - wait for link to become ready after reset
3755 * @link: link to be waited on
3756 * @deadline: deadline jiffies for the operation
3757 * @check_ready: callback to check link readiness
3758 *
3759 * Wait for @link to become ready after reset.
3760 *
3761 * LOCKING:
3762 * EH context.
3763 *
3764 * RETURNS:
c9b5560a 3765 * 0 if @link is ready before @deadline; otherwise, -errno.
aa2731ad 3766 */
2b4221bb 3767int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
aa2731ad
TH
3768 int (*check_ready)(struct ata_link *link))
3769{
97750ceb 3770 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
aa2731ad
TH
3771
3772 return ata_wait_ready(link, deadline, check_ready);
3773}
3774
d7bb4cc7 3775/**
936fd732
TH
3776 * sata_link_debounce - debounce SATA phy status
3777 * @link: ATA link to debounce SATA phy status for
c9b5560a 3778 * @params: timing parameters { interval, duration, timeout } in msec
d4b2bab4 3779 * @deadline: deadline jiffies for the operation
d7bb4cc7 3780 *
1152b261 3781 * Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3782 * holding the same value where DET is not 1 for @duration polled
3783 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3784 * beginning of the stable state. Because DET gets stuck at 1 on
3785 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3786 * until timeout then returns 0 if DET is stable at 1.
3787 *
d4b2bab4
TH
3788 * @timeout is further limited by @deadline. The sooner of the
3789 * two is used.
3790 *
d7bb4cc7
TH
3791 * LOCKING:
3792 * Kernel thread context (may sleep)
3793 *
3794 * RETURNS:
3795 * 0 on success, -errno on failure.
3796 */
936fd732
TH
3797int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3798 unsigned long deadline)
7a7921e8 3799{
341c2c95
TH
3800 unsigned long interval = params[0];
3801 unsigned long duration = params[1];
d4b2bab4 3802 unsigned long last_jiffies, t;
d7bb4cc7
TH
3803 u32 last, cur;
3804 int rc;
3805
341c2c95 3806 t = ata_deadline(jiffies, params[2]);
d4b2bab4
TH
3807 if (time_before(t, deadline))
3808 deadline = t;
3809
936fd732 3810 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3811 return rc;
3812 cur &= 0xf;
3813
3814 last = cur;
3815 last_jiffies = jiffies;
3816
3817 while (1) {
97750ceb 3818 ata_msleep(link->ap, interval);
936fd732 3819 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3820 return rc;
3821 cur &= 0xf;
3822
3823 /* DET stable? */
3824 if (cur == last) {
d4b2bab4 3825 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7 3826 continue;
341c2c95
TH
3827 if (time_after(jiffies,
3828 ata_deadline(last_jiffies, duration)))
d7bb4cc7
TH
3829 return 0;
3830 continue;
3831 }
3832
3833 /* unstable, start over */
3834 last = cur;
3835 last_jiffies = jiffies;
3836
f1545154
TH
3837 /* Check deadline. If debouncing failed, return
3838 * -EPIPE to tell upper layer to lower link speed.
3839 */
d4b2bab4 3840 if (time_after(jiffies, deadline))
f1545154 3841 return -EPIPE;
d7bb4cc7
TH
3842 }
3843}
3844
3845/**
936fd732
TH
3846 * sata_link_resume - resume SATA link
3847 * @link: ATA link to resume SATA
c9b5560a 3848 * @params: timing parameters { interval, duration, timeout } in msec
d4b2bab4 3849 * @deadline: deadline jiffies for the operation
d7bb4cc7 3850 *
936fd732 3851 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3852 *
3853 * LOCKING:
3854 * Kernel thread context (may sleep)
3855 *
3856 * RETURNS:
3857 * 0 on success, -errno on failure.
3858 */
936fd732
TH
3859int sata_link_resume(struct ata_link *link, const unsigned long *params,
3860 unsigned long deadline)
d7bb4cc7 3861{
5040ab67 3862 int tries = ATA_LINK_RESUME_TRIES;
ac371987 3863 u32 scontrol, serror;
81952c54
TH
3864 int rc;
3865
936fd732 3866 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3867 return rc;
7a7921e8 3868
5040ab67
TH
3869 /*
3870 * Writes to SControl sometimes get ignored under certain
3871 * controllers (ata_piix SIDPR). Make sure DET actually is
3872 * cleared.
3873 */
3874 do {
3875 scontrol = (scontrol & 0x0f0) | 0x300;
3876 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3877 return rc;
3878 /*
3879 * Some PHYs react badly if SStatus is pounded
3880 * immediately after resuming. Delay 200ms before
3881 * debouncing.
3882 */
e39b2bb3
DP
3883 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3884 ata_msleep(link->ap, 200);
81952c54 3885
5040ab67
TH
3886 /* is SControl restored correctly? */
3887 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3888 return rc;
3889 } while ((scontrol & 0xf0f) != 0x300 && --tries);
7a7921e8 3890
5040ab67 3891 if ((scontrol & 0xf0f) != 0x300) {
38941c95 3892 ata_link_warn(link, "failed to resume link (SControl %X)\n",
a9a79dfe 3893 scontrol);
5040ab67
TH
3894 return 0;
3895 }
3896
3897 if (tries < ATA_LINK_RESUME_TRIES)
a9a79dfe
JP
3898 ata_link_warn(link, "link resume succeeded after %d retries\n",
3899 ATA_LINK_RESUME_TRIES - tries);
7a7921e8 3900
ac371987
TH
3901 if ((rc = sata_link_debounce(link, params, deadline)))
3902 return rc;
3903
f046519f 3904 /* clear SError, some PHYs require this even for SRST to work */
ac371987
TH
3905 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3906 rc = sata_scr_write(link, SCR_ERROR, serror);
ac371987 3907
f046519f 3908 return rc != -EINVAL ? rc : 0;
7a7921e8
TH
3909}
3910
1152b261
TH
3911/**
3912 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3913 * @link: ATA link to manipulate SControl for
3914 * @policy: LPM policy to configure
3915 * @spm_wakeup: initiate LPM transition to active state
3916 *
3917 * Manipulate the IPM field of the SControl register of @link
3918 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3919 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3920 * the link. This function also clears PHYRDY_CHG before
3921 * returning.
3922 *
3923 * LOCKING:
3924 * EH context.
3925 *
3926 * RETURNS:
8485187b 3927 * 0 on success, -errno otherwise.
1152b261
TH
3928 */
3929int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3930 bool spm_wakeup)
3931{
3932 struct ata_eh_context *ehc = &link->eh_context;
3933 bool woken_up = false;
3934 u32 scontrol;
3935 int rc;
3936
3937 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3938 if (rc)
3939 return rc;
3940
3941 switch (policy) {
3942 case ATA_LPM_MAX_POWER:
3943 /* disable all LPM transitions */
65fe1f0f 3944 scontrol |= (0x7 << 8);
1152b261
TH
3945 /* initiate transition to active state */
3946 if (spm_wakeup) {
3947 scontrol |= (0x4 << 12);
3948 woken_up = true;
3949 }
3950 break;
3951 case ATA_LPM_MED_POWER:
3952 /* allow LPM to PARTIAL */
3953 scontrol &= ~(0x1 << 8);
65fe1f0f 3954 scontrol |= (0x6 << 8);
1152b261 3955 break;
f4ac6476 3956 case ATA_LPM_MED_POWER_WITH_DIPM:
a5ec5a7b 3957 case ATA_LPM_MIN_POWER_WITH_PARTIAL:
1152b261 3958 case ATA_LPM_MIN_POWER:
8a745f1f
KCA
3959 if (ata_link_nr_enabled(link) > 0)
3960 /* no restrictions on LPM transitions */
65fe1f0f 3961 scontrol &= ~(0x7 << 8);
8a745f1f
KCA
3962 else {
3963 /* empty port, power off */
3964 scontrol &= ~0xf;
3965 scontrol |= (0x1 << 2);
3966 }
1152b261
TH
3967 break;
3968 default:
3969 WARN_ON(1);
3970 }
3971
3972 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3973 if (rc)
3974 return rc;
3975
3976 /* give the link time to transit out of LPM state */
3977 if (woken_up)
3978 msleep(10);
3979
3980 /* clear PHYRDY_CHG from SError */
3981 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3982 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3983}
3984
f5914a46 3985/**
0aa1113d 3986 * ata_std_prereset - prepare for reset
cc0680a5 3987 * @link: ATA link to be reset
d4b2bab4 3988 * @deadline: deadline jiffies for the operation
f5914a46 3989 *
cc0680a5 3990 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3991 * prereset makes libata abort whole reset sequence and give up
3992 * that port, so prereset should be best-effort. It does its
3993 * best to prepare for reset sequence but if things go wrong, it
3994 * should just whine, not fail.
f5914a46
TH
3995 *
3996 * LOCKING:
3997 * Kernel thread context (may sleep)
3998 *
3999 * RETURNS:
4000 * 0 on success, -errno otherwise.
4001 */
0aa1113d 4002int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 4003{
cc0680a5 4004 struct ata_port *ap = link->ap;
936fd732 4005 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 4006 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
4007 int rc;
4008
f5914a46
TH
4009 /* if we're about to do hardreset, nothing more to do */
4010 if (ehc->i.action & ATA_EH_HARDRESET)
4011 return 0;
4012
936fd732 4013 /* if SATA, resume link */
a16abc0b 4014 if (ap->flags & ATA_FLAG_SATA) {
936fd732 4015 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
4016 /* whine about phy resume failure but proceed */
4017 if (rc && rc != -EOPNOTSUPP)
a9a79dfe
JP
4018 ata_link_warn(link,
4019 "failed to resume link for reset (errno=%d)\n",
4020 rc);
f5914a46
TH
4021 }
4022
45db2f6c 4023 /* no point in trying softreset on offline link */
b1c72916 4024 if (ata_phys_link_offline(link))
45db2f6c
TH
4025 ehc->i.action &= ~ATA_EH_SOFTRESET;
4026
f5914a46
TH
4027 return 0;
4028}
4029
c2bd5804 4030/**
624d5c51
TH
4031 * sata_link_hardreset - reset link via SATA phy reset
4032 * @link: link to reset
c9b5560a 4033 * @timing: timing parameters { interval, duration, timeout } in msec
d4b2bab4 4034 * @deadline: deadline jiffies for the operation
9dadd45b
TH
4035 * @online: optional out parameter indicating link onlineness
4036 * @check_ready: optional callback to check link readiness
c2bd5804 4037 *
624d5c51 4038 * SATA phy-reset @link using DET bits of SControl register.
9dadd45b
TH
4039 * After hardreset, link readiness is waited upon using
4040 * ata_wait_ready() if @check_ready is specified. LLDs are
4041 * allowed to not specify @check_ready and wait itself after this
4042 * function returns. Device classification is LLD's
4043 * responsibility.
4044 *
4045 * *@online is set to one iff reset succeeded and @link is online
4046 * after reset.
c2bd5804
TH
4047 *
4048 * LOCKING:
4049 * Kernel thread context (may sleep)
4050 *
4051 * RETURNS:
4052 * 0 on success, -errno otherwise.
4053 */
624d5c51 4054int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
9dadd45b
TH
4055 unsigned long deadline,
4056 bool *online, int (*check_ready)(struct ata_link *))
c2bd5804 4057{
624d5c51 4058 u32 scontrol;
81952c54 4059 int rc;
852ee16a 4060
c2bd5804
TH
4061 DPRINTK("ENTER\n");
4062
9dadd45b
TH
4063 if (online)
4064 *online = false;
4065
936fd732 4066 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
4067 /* SATA spec says nothing about how to reconfigure
4068 * spd. To be on the safe side, turn off phy during
4069 * reconfiguration. This works for at least ICH7 AHCI
4070 * and Sil3124.
4071 */
936fd732 4072 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4073 goto out;
81952c54 4074
a34b6fc0 4075 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 4076
936fd732 4077 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 4078 goto out;
1c3fae4d 4079
936fd732 4080 sata_set_spd(link);
1c3fae4d
TH
4081 }
4082
4083 /* issue phy wake/reset */
936fd732 4084 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4085 goto out;
81952c54 4086
852ee16a 4087 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 4088
936fd732 4089 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 4090 goto out;
c2bd5804 4091
1c3fae4d 4092 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
4093 * 10.4.2 says at least 1 ms.
4094 */
97750ceb 4095 ata_msleep(link->ap, 1);
c2bd5804 4096
936fd732
TH
4097 /* bring link back */
4098 rc = sata_link_resume(link, timing, deadline);
9dadd45b
TH
4099 if (rc)
4100 goto out;
4101 /* if link is offline nothing more to do */
b1c72916 4102 if (ata_phys_link_offline(link))
9dadd45b
TH
4103 goto out;
4104
4105 /* Link is online. From this point, -ENODEV too is an error. */
4106 if (online)
4107 *online = true;
4108
071f44b1 4109 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
9dadd45b
TH
4110 /* If PMP is supported, we have to do follow-up SRST.
4111 * Some PMPs don't send D2H Reg FIS after hardreset if
4112 * the first port is empty. Wait only for
4113 * ATA_TMOUT_PMP_SRST_WAIT.
4114 */
4115 if (check_ready) {
4116 unsigned long pmp_deadline;
4117
341c2c95
TH
4118 pmp_deadline = ata_deadline(jiffies,
4119 ATA_TMOUT_PMP_SRST_WAIT);
9dadd45b
TH
4120 if (time_after(pmp_deadline, deadline))
4121 pmp_deadline = deadline;
4122 ata_wait_ready(link, pmp_deadline, check_ready);
4123 }
4124 rc = -EAGAIN;
4125 goto out;
4126 }
4127
4128 rc = 0;
4129 if (check_ready)
4130 rc = ata_wait_ready(link, deadline, check_ready);
b6103f6d 4131 out:
0cbf0711
TH
4132 if (rc && rc != -EAGAIN) {
4133 /* online is set iff link is online && reset succeeded */
4134 if (online)
4135 *online = false;
a9a79dfe 4136 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
0cbf0711 4137 }
b6103f6d
TH
4138 DPRINTK("EXIT, rc=%d\n", rc);
4139 return rc;
4140}
4141
57c9efdf
TH
4142/**
4143 * sata_std_hardreset - COMRESET w/o waiting or classification
4144 * @link: link to reset
4145 * @class: resulting class of attached device
4146 * @deadline: deadline jiffies for the operation
4147 *
4148 * Standard SATA COMRESET w/o waiting or classification.
4149 *
4150 * LOCKING:
4151 * Kernel thread context (may sleep)
4152 *
4153 * RETURNS:
4154 * 0 if link offline, -EAGAIN if link online, -errno on errors.
4155 */
4156int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4157 unsigned long deadline)
4158{
4159 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4160 bool online;
4161 int rc;
4162
4163 /* do hardreset */
4164 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
57c9efdf
TH
4165 return online ? -EAGAIN : rc;
4166}
4167
c2bd5804 4168/**
203c75b8 4169 * ata_std_postreset - standard postreset callback
cc0680a5 4170 * @link: the target ata_link
c2bd5804
TH
4171 * @classes: classes of attached devices
4172 *
4173 * This function is invoked after a successful reset. Note that
4174 * the device might have been reset more than once using
4175 * different reset methods before postreset is invoked.
c2bd5804 4176 *
c2bd5804
TH
4177 * LOCKING:
4178 * Kernel thread context (may sleep)
4179 */
203c75b8 4180void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 4181{
f046519f
TH
4182 u32 serror;
4183
c2bd5804
TH
4184 DPRINTK("ENTER\n");
4185
f046519f
TH
4186 /* reset complete, clear SError */
4187 if (!sata_scr_read(link, SCR_ERROR, &serror))
4188 sata_scr_write(link, SCR_ERROR, serror);
4189
c2bd5804 4190 /* print link status */
936fd732 4191 sata_print_link_status(link);
c2bd5804 4192
c2bd5804
TH
4193 DPRINTK("EXIT\n");
4194}
4195
623a3128
TH
4196/**
4197 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
4198 * @dev: device to compare against
4199 * @new_class: class of the new device
4200 * @new_id: IDENTIFY page of the new device
4201 *
4202 * Compare @new_class and @new_id against @dev and determine
4203 * whether @dev is the device indicated by @new_class and
4204 * @new_id.
4205 *
4206 * LOCKING:
4207 * None.
4208 *
4209 * RETURNS:
4210 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4211 */
3373efd8
TH
4212static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4213 const u16 *new_id)
623a3128
TH
4214{
4215 const u16 *old_id = dev->id;
a0cf733b
TH
4216 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4217 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
4218
4219 if (dev->class != new_class) {
a9a79dfe
JP
4220 ata_dev_info(dev, "class mismatch %d != %d\n",
4221 dev->class, new_class);
623a3128
TH
4222 return 0;
4223 }
4224
a0cf733b
TH
4225 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4226 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4227 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4228 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
4229
4230 if (strcmp(model[0], model[1])) {
a9a79dfe
JP
4231 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
4232 model[0], model[1]);
623a3128
TH
4233 return 0;
4234 }
4235
4236 if (strcmp(serial[0], serial[1])) {
a9a79dfe
JP
4237 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
4238 serial[0], serial[1]);
623a3128
TH
4239 return 0;
4240 }
4241
623a3128
TH
4242 return 1;
4243}
4244
4245/**
fe30911b 4246 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 4247 * @dev: target ATA device
bff04647 4248 * @readid_flags: read ID flags
623a3128
TH
4249 *
4250 * Re-read IDENTIFY page and make sure @dev is still attached to
4251 * the port.
4252 *
4253 * LOCKING:
4254 * Kernel thread context (may sleep)
4255 *
4256 * RETURNS:
4257 * 0 on success, negative errno otherwise
4258 */
fe30911b 4259int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 4260{
5eb45c02 4261 unsigned int class = dev->class;
9af5c9c9 4262 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
4263 int rc;
4264
fe635c7e 4265 /* read ID data */
bff04647 4266 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 4267 if (rc)
fe30911b 4268 return rc;
623a3128
TH
4269
4270 /* is the device still there? */
fe30911b
TH
4271 if (!ata_dev_same_device(dev, class, id))
4272 return -ENODEV;
623a3128 4273
fe635c7e 4274 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4275 return 0;
4276}
4277
4278/**
4279 * ata_dev_revalidate - Revalidate ATA device
4280 * @dev: device to revalidate
422c9daa 4281 * @new_class: new class code
fe30911b
TH
4282 * @readid_flags: read ID flags
4283 *
4284 * Re-read IDENTIFY page, make sure @dev is still attached to the
4285 * port and reconfigure it according to the new IDENTIFY page.
4286 *
4287 * LOCKING:
4288 * Kernel thread context (may sleep)
4289 *
4290 * RETURNS:
4291 * 0 on success, negative errno otherwise
4292 */
422c9daa
TH
4293int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4294 unsigned int readid_flags)
fe30911b 4295{
6ddcd3b0 4296 u64 n_sectors = dev->n_sectors;
5920dadf 4297 u64 n_native_sectors = dev->n_native_sectors;
fe30911b
TH
4298 int rc;
4299
4300 if (!ata_dev_enabled(dev))
4301 return -ENODEV;
4302
422c9daa
TH
4303 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4304 if (ata_class_enabled(new_class) &&
f0d0613d
BP
4305 new_class != ATA_DEV_ATA &&
4306 new_class != ATA_DEV_ATAPI &&
9162c657 4307 new_class != ATA_DEV_ZAC &&
f0d0613d 4308 new_class != ATA_DEV_SEMB) {
a9a79dfe
JP
4309 ata_dev_info(dev, "class mismatch %u != %u\n",
4310 dev->class, new_class);
422c9daa
TH
4311 rc = -ENODEV;
4312 goto fail;
4313 }
4314
fe30911b
TH
4315 /* re-read ID */
4316 rc = ata_dev_reread_id(dev, readid_flags);
4317 if (rc)
4318 goto fail;
623a3128
TH
4319
4320 /* configure device according to the new ID */
efdaedc4 4321 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4322 if (rc)
4323 goto fail;
4324
4325 /* verify n_sectors hasn't changed */
445d211b
TH
4326 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4327 dev->n_sectors == n_sectors)
4328 return 0;
4329
4330 /* n_sectors has changed */
a9a79dfe
JP
4331 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4332 (unsigned long long)n_sectors,
4333 (unsigned long long)dev->n_sectors);
445d211b
TH
4334
4335 /*
4336 * Something could have caused HPA to be unlocked
4337 * involuntarily. If n_native_sectors hasn't changed and the
4338 * new size matches it, keep the device.
4339 */
4340 if (dev->n_native_sectors == n_native_sectors &&
4341 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
a9a79dfe
JP
4342 ata_dev_warn(dev,
4343 "new n_sectors matches native, probably "
4344 "late HPA unlock, n_sectors updated\n");
68939ce5 4345 /* use the larger n_sectors */
445d211b 4346 return 0;
6ddcd3b0
TH
4347 }
4348
445d211b
TH
4349 /*
4350 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4351 * unlocking HPA in those cases.
4352 *
4353 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4354 */
4355 if (dev->n_native_sectors == n_native_sectors &&
4356 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4357 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
a9a79dfe
JP
4358 ata_dev_warn(dev,
4359 "old n_sectors matches native, probably "
4360 "late HPA lock, will try to unlock HPA\n");
445d211b
TH
4361 /* try unlocking HPA */
4362 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4363 rc = -EIO;
4364 } else
4365 rc = -ENODEV;
623a3128 4366
445d211b
TH
4367 /* restore original n_[native_]sectors and fail */
4368 dev->n_native_sectors = n_native_sectors;
4369 dev->n_sectors = n_sectors;
623a3128 4370 fail:
a9a79dfe 4371 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4372 return rc;
4373}
4374
6919a0a6
AC
4375struct ata_blacklist_entry {
4376 const char *model_num;
4377 const char *model_rev;
4378 unsigned long horkage;
4379};
4380
4381static const struct ata_blacklist_entry ata_device_blacklist [] = {
4382 /* Devices with DMA related problems under Linux */
4383 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4384 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4385 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4386 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4387 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4388 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4389 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4390 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4391 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
7da4c935 4392 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4393 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4394 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4395 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4396 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4397 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
7da4c935 4398 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4399 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4400 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4401 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4402 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4403 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4404 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4405 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4406 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4407 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4408 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4409 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4410 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
d17d794c 4411 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
b00622fc 4412 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
3af9a77a 4413 /* Odd clown on sil3726/4726 PMPs */
50af2fa1 4414 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
6919a0a6 4415
18d6e9d5 4416 /* Weird ATAPI devices */
40a1d531 4417 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
6a87e42e 4418 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
a32450e1 4419 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
0523f037 4420 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
18d6e9d5 4421
af34d637
DM
4422 /*
4423 * Causes silent data corruption with higher max sects.
4424 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4425 */
4426 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
1488a1e3
TH
4427
4428 /*
e0edc8c5 4429 * These devices time out with higher max sects.
1488a1e3
TH
4430 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4431 */
e0edc8c5 4432 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
db5ff909 4433 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
af34d637 4434
6919a0a6
AC
4435 /* Devices we expect to fail diagnostics */
4436
4437 /* Devices where NCQ should be avoided */
4438 /* NCQ is slow */
2dcb407e 4439 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 4440 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
4441 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4442 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4443 /* NCQ is broken */
539cc7c7 4444 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4445 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 4446 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4447 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
5ccfca97 4448 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
539cc7c7 4449
ac70a964 4450 /* Seagate NCQ + FLUSH CACHE firmware bug */
4d1f9082 4451 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964 4452 ATA_HORKAGE_FIRMWARE_WARN },
d10d491f 4453
4d1f9082 4454 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
4455 ATA_HORKAGE_FIRMWARE_WARN },
4456
4d1f9082 4457 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
4458 ATA_HORKAGE_FIRMWARE_WARN },
4459
4d1f9082 4460 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964
TH
4461 ATA_HORKAGE_FIRMWARE_WARN },
4462
31f6264e
HG
4463 /* drives which fail FPDMA_AA activation (some may freeze afterwards)
4464 the ST disks also have LPM issues */
4465 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA |
4466 ATA_HORKAGE_NOLPM, },
4467 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA |
4468 ATA_HORKAGE_NOLPM, },
08c85d2a 4469 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
87809942 4470
36e337d0
RH
4471 /* Blacklist entries taken from Silicon Image 3124/3132
4472 Windows driver .inf file - also several Linux problem reports */
4473 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4474 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4475 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4476
68b0ddb2
TH
4477 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4478 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4479
322579dc
TH
4480 /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
4481 SD7SN6S256G and SD8SN8U256G */
4482 { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
4483
16c55b03
TH
4484 /* devices which puke on READ_NATIVE_MAX */
4485 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4486 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4487 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4488 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4489
7831387b
TH
4490 /* this one allows HPA unlocking but fails IOs on the area */
4491 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4492
93328e11
AC
4493 /* Devices which report 1 sector over size HPA */
4494 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4495 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 4496 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 4497
6bbfd53d
AC
4498 /* Devices which get the IVB wrong */
4499 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
a79067e5 4500 /* Maybe we should just blacklist TSSTcorp... */
7da4c935 4501 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
6bbfd53d 4502
9ce8e307
JA
4503 /* Devices that do not need bridging limits applied */
4504 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
04d0f1b8 4505 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
9ce8e307 4506
9062712f
TH
4507 /* Devices which aren't very happy with higher link speeds */
4508 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
c531077f 4509 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
9062712f 4510
d0cb43b3
TH
4511 /*
4512 * Devices which choke on SETXFER. Applies only if both the
4513 * device and controller are SATA.
4514 */
cd691876 4515 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
3a25179e
VL
4516 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4517 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
cd691876
TH
4518 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4519 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
d0cb43b3 4520
b17e5729 4521 /* Crucial BX100 SSD 500GB has broken LPM support */
3bf7b5d6 4522 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
b17e5729 4523
d418ff56
HG
4524 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4525 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
9c7be59f
HG
4526 ATA_HORKAGE_ZERO_AFTER_TRIM |
4527 ATA_HORKAGE_NOLPM, },
d418ff56
HG
4528 /* 512GB MX100 with newer firmware has only LPM issues */
4529 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
4530 ATA_HORKAGE_NOLPM, },
9c7be59f 4531
62ac3f73
HG
4532 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4533 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4534 ATA_HORKAGE_ZERO_AFTER_TRIM |
4535 ATA_HORKAGE_NOLPM, },
4536 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4537 ATA_HORKAGE_ZERO_AFTER_TRIM |
4538 ATA_HORKAGE_NOLPM, },
4539
76936e9a 4540 /* These specific Samsung models/firmware-revs do not handle LPM well */
b5b4d3a5 4541 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
76936e9a 4542 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
410b5c7b 4543 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
dd957493 4544 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
b5b4d3a5 4545
f78dea06 4546 /* devices that don't properly handle queued TRIM commands */
136d769e
SM
4547 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4548 ATA_HORKAGE_ZERO_AFTER_TRIM, },
243918be 4549 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ff7f53fb
MP
4550 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4551 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4552 ATA_HORKAGE_ZERO_AFTER_TRIM, },
9051bd39 4553 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ff7f53fb
MP
4554 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4555 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4556 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4557 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
e61f7d1c 4558 ATA_HORKAGE_ZERO_AFTER_TRIM, },
ca6bfcb2
JHP
4559 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4560 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4561 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
6fc4d97a 4562 ATA_HORKAGE_ZERO_AFTER_TRIM, },
7a7184b0
GA
4563 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4564 ATA_HORKAGE_ZERO_AFTER_TRIM, },
e61f7d1c 4565
cda57b1b
AF
4566 /* devices that don't properly handle TRIM commands */
4567 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4568
e61f7d1c
MP
4569 /*
4570 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4571 * (Return Zero After Trim) flags in the ATA Command Set are
4572 * unreliable in the sense that they only define what happens if
4573 * the device successfully executed the DSM TRIM command. TRIM
4574 * is only advisory, however, and the device is free to silently
4575 * ignore all or parts of the request.
4576 *
4577 * Whitelist drives that are known to reliably return zeroes
4578 * after TRIM.
4579 */
4580
4581 /*
4582 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4583 * that model before whitelisting all other intel SSDs.
4584 */
4585 { "INTEL*SSDSC2MH*", NULL, 0, },
4586
ff7f53fb
MP
4587 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4588 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
e61f7d1c
MP
4589 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4590 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4591 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4592 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
fd6f32f7 4593 { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
e61f7d1c 4594 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
f78dea06 4595
ecd75ad5
TH
4596 /*
4597 * Some WD SATA-I drives spin up and down erratically when the link
4598 * is put into the slumber mode. We don't have full list of the
4599 * affected devices. Disable LPM if the device matches one of the
4600 * known prefixes and is SATA-1. As a side effect LPM partial is
4601 * lost too.
4602 *
4603 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4604 */
4605 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4606 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4607 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4608 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4609 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4610 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4611 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4612
6919a0a6
AC
4613 /* End Marker */
4614 { }
1da177e4 4615};
2e9edbf8 4616
75683fe7 4617static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4618{
8bfa79fc
TH
4619 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4620 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4621 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4622
8bfa79fc
TH
4623 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4624 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4625
6919a0a6 4626 while (ad->model_num) {
1c402799 4627 if (glob_match(ad->model_num, model_num)) {
6919a0a6
AC
4628 if (ad->model_rev == NULL)
4629 return ad->horkage;
1c402799 4630 if (glob_match(ad->model_rev, model_rev))
6919a0a6 4631 return ad->horkage;
f4b15fef 4632 }
6919a0a6 4633 ad++;
f4b15fef 4634 }
1da177e4
LT
4635 return 0;
4636}
4637
6919a0a6
AC
4638static int ata_dma_blacklisted(const struct ata_device *dev)
4639{
4640 /* We don't support polling DMA.
4641 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4642 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4643 */
9af5c9c9 4644 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4645 (dev->flags & ATA_DFLAG_CDB_INTR))
4646 return 1;
75683fe7 4647 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4648}
4649
6bbfd53d
AC
4650/**
4651 * ata_is_40wire - check drive side detection
4652 * @dev: device
4653 *
4654 * Perform drive side detection decoding, allowing for device vendors
4655 * who can't follow the documentation.
4656 */
4657
4658static int ata_is_40wire(struct ata_device *dev)
4659{
4660 if (dev->horkage & ATA_HORKAGE_IVB)
4661 return ata_drive_40wire_relaxed(dev->id);
4662 return ata_drive_40wire(dev->id);
4663}
4664
15a5551c
AC
4665/**
4666 * cable_is_40wire - 40/80/SATA decider
4667 * @ap: port to consider
4668 *
4669 * This function encapsulates the policy for speed management
4670 * in one place. At the moment we don't cache the result but
4671 * there is a good case for setting ap->cbl to the result when
4672 * we are called with unknown cables (and figuring out if it
4673 * impacts hotplug at all).
4674 *
4675 * Return 1 if the cable appears to be 40 wire.
4676 */
4677
4678static int cable_is_40wire(struct ata_port *ap)
4679{
4680 struct ata_link *link;
4681 struct ata_device *dev;
4682
4a9c7b33 4683 /* If the controller thinks we are 40 wire, we are. */
15a5551c
AC
4684 if (ap->cbl == ATA_CBL_PATA40)
4685 return 1;
4a9c7b33
TH
4686
4687 /* If the controller thinks we are 80 wire, we are. */
15a5551c
AC
4688 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4689 return 0;
4a9c7b33
TH
4690
4691 /* If the system is known to be 40 wire short cable (eg
4692 * laptop), then we allow 80 wire modes even if the drive
4693 * isn't sure.
4694 */
f792068e
AC
4695 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4696 return 0;
4a9c7b33
TH
4697
4698 /* If the controller doesn't know, we scan.
4699 *
4700 * Note: We look for all 40 wire detects at this point. Any
4701 * 80 wire detect is taken to be 80 wire cable because
4702 * - in many setups only the one drive (slave if present) will
4703 * give a valid detect
4704 * - if you have a non detect capable drive you don't want it
4705 * to colour the choice
4706 */
1eca4365
TH
4707 ata_for_each_link(link, ap, EDGE) {
4708 ata_for_each_dev(dev, link, ENABLED) {
4709 if (!ata_is_40wire(dev))
15a5551c
AC
4710 return 0;
4711 }
4712 }
4713 return 1;
4714}
4715
a6d5a51c
TH
4716/**
4717 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4718 * @dev: Device to compute xfermask for
4719 *
acf356b1
TH
4720 * Compute supported xfermask of @dev and store it in
4721 * dev->*_mask. This function is responsible for applying all
4722 * known limits including host controller limits, device
4723 * blacklist, etc...
a6d5a51c
TH
4724 *
4725 * LOCKING:
4726 * None.
a6d5a51c 4727 */
3373efd8 4728static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4729{
9af5c9c9
TH
4730 struct ata_link *link = dev->link;
4731 struct ata_port *ap = link->ap;
cca3974e 4732 struct ata_host *host = ap->host;
a6d5a51c 4733 unsigned long xfer_mask;
1da177e4 4734
37deecb5 4735 /* controller modes available */
565083e1
TH
4736 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4737 ap->mwdma_mask, ap->udma_mask);
4738
8343f889 4739 /* drive modes available */
37deecb5
TH
4740 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4741 dev->mwdma_mask, dev->udma_mask);
4742 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4743
b352e57d
AC
4744 /*
4745 * CFA Advanced TrueIDE timings are not allowed on a shared
4746 * cable
4747 */
4748 if (ata_dev_pair(dev)) {
4749 /* No PIO5 or PIO6 */
4750 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4751 /* No MWDMA3 or MWDMA 4 */
4752 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4753 }
4754
37deecb5
TH
4755 if (ata_dma_blacklisted(dev)) {
4756 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4757 ata_dev_warn(dev,
4758 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4759 }
a6d5a51c 4760
14d66ab7 4761 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4762 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5 4763 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4764 ata_dev_warn(dev,
4765 "simplex DMA is claimed by other device, disabling DMA\n");
5444a6f4 4766 }
565083e1 4767
e424675f
JG
4768 if (ap->flags & ATA_FLAG_NO_IORDY)
4769 xfer_mask &= ata_pio_mask_no_iordy(dev);
4770
5444a6f4 4771 if (ap->ops->mode_filter)
a76b62ca 4772 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4773
8343f889
RH
4774 /* Apply cable rule here. Don't apply it early because when
4775 * we handle hot plug the cable type can itself change.
4776 * Check this last so that we know if the transfer rate was
4777 * solely limited by the cable.
4778 * Unknown or 80 wire cables reported host side are checked
4779 * drive side as well. Cases where we know a 40wire cable
4780 * is used safely for 80 are not checked here.
4781 */
4782 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4783 /* UDMA/44 or higher would be available */
15a5551c 4784 if (cable_is_40wire(ap)) {
a9a79dfe
JP
4785 ata_dev_warn(dev,
4786 "limited to UDMA/33 due to 40-wire cable\n");
8343f889
RH
4787 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4788 }
4789
565083e1
TH
4790 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4791 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4792}
4793
1da177e4
LT
4794/**
4795 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4796 * @dev: Device to which command will be sent
4797 *
780a87f7
JG
4798 * Issue SET FEATURES - XFER MODE command to device @dev
4799 * on port @ap.
4800 *
1da177e4 4801 * LOCKING:
0cba632b 4802 * PCI/etc. bus probe sem.
83206a29
TH
4803 *
4804 * RETURNS:
4805 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4806 */
4807
3373efd8 4808static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4809{
a0123703 4810 struct ata_taskfile tf;
83206a29 4811 unsigned int err_mask;
1da177e4
LT
4812
4813 /* set up set-features taskfile */
4814 DPRINTK("set features - xfer mode\n");
4815
464cf177
TH
4816 /* Some controllers and ATAPI devices show flaky interrupt
4817 * behavior after setting xfer mode. Use polling instead.
4818 */
3373efd8 4819 ata_tf_init(dev, &tf);
a0123703
TH
4820 tf.command = ATA_CMD_SET_FEATURES;
4821 tf.feature = SETFEATURES_XFER;
464cf177 4822 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4823 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4824 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4825 if (ata_pio_need_iordy(dev))
4826 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4827 /* If the device has IORDY and the controller does not - turn it off */
4828 else if (ata_id_has_iordy(dev->id))
11b7becc 4829 tf.nsect = 0x01;
b9f8ab2d
AC
4830 else /* In the ancient relic department - skip all of this */
4831 return 0;
1da177e4 4832
d531be2c
MP
4833 /* On some disks, this command causes spin-up, so we need longer timeout */
4834 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
9f45cbd3
KCA
4835
4836 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4837 return err_mask;
4838}
1152b261 4839
9f45cbd3 4840/**
218f3d30 4841 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4842 * @dev: Device to which command will be sent
4843 * @enable: Whether to enable or disable the feature
218f3d30 4844 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4845 *
4846 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4847 * on port @ap with sector count
9f45cbd3
KCA
4848 *
4849 * LOCKING:
4850 * PCI/etc. bus probe sem.
4851 *
4852 * RETURNS:
4853 * 0 on success, AC_ERR_* mask otherwise.
4854 */
1152b261 4855unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
9f45cbd3
KCA
4856{
4857 struct ata_taskfile tf;
4858 unsigned int err_mask;
974e0a45 4859 unsigned long timeout = 0;
9f45cbd3
KCA
4860
4861 /* set up set-features taskfile */
4862 DPRINTK("set features - SATA features\n");
4863
4864 ata_tf_init(dev, &tf);
4865 tf.command = ATA_CMD_SET_FEATURES;
4866 tf.feature = enable;
4867 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4868 tf.protocol = ATA_PROT_NODATA;
218f3d30 4869 tf.nsect = feature;
9f45cbd3 4870
974e0a45
DLM
4871 if (enable == SETFEATURES_SPINUP)
4872 timeout = ata_probe_timeout ?
4873 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4874 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
1da177e4 4875
83206a29
TH
4876 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4877 return err_mask;
1da177e4 4878}
633de4cc 4879EXPORT_SYMBOL_GPL(ata_dev_set_feature);
1da177e4 4880
8bf62ece
AL
4881/**
4882 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4883 * @dev: Device to which command will be sent
e2a7f77a
RD
4884 * @heads: Number of heads (taskfile parameter)
4885 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4886 *
4887 * LOCKING:
6aff8f1f
TH
4888 * Kernel thread context (may sleep)
4889 *
4890 * RETURNS:
4891 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4892 */
3373efd8
TH
4893static unsigned int ata_dev_init_params(struct ata_device *dev,
4894 u16 heads, u16 sectors)
8bf62ece 4895{
a0123703 4896 struct ata_taskfile tf;
6aff8f1f 4897 unsigned int err_mask;
8bf62ece
AL
4898
4899 /* Number of sectors per track 1-255. Number of heads 1-16 */
4900 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4901 return AC_ERR_INVALID;
8bf62ece
AL
4902
4903 /* set up init dev params taskfile */
4904 DPRINTK("init dev params \n");
4905
3373efd8 4906 ata_tf_init(dev, &tf);
a0123703
TH
4907 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4908 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4909 tf.protocol = ATA_PROT_NODATA;
4910 tf.nsect = sectors;
4911 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4912
2b789108 4913 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4914 /* A clean abort indicates an original or just out of spec drive
4915 and we should continue as we issue the setup based on the
4916 drive reported working geometry */
4917 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4918 err_mask = 0;
8bf62ece 4919
6aff8f1f
TH
4920 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4921 return err_mask;
8bf62ece
AL
4922}
4923
1da177e4 4924/**
5895ef9a 4925 * atapi_check_dma - Check whether ATAPI DMA can be supported
1da177e4
LT
4926 * @qc: Metadata associated with taskfile to check
4927 *
780a87f7
JG
4928 * Allow low-level driver to filter ATA PACKET commands, returning
4929 * a status indicating whether or not it is OK to use DMA for the
4930 * supplied PACKET command.
4931 *
1da177e4 4932 * LOCKING:
624d5c51
TH
4933 * spin_lock_irqsave(host lock)
4934 *
4935 * RETURNS: 0 when ATAPI DMA can be used
4936 * nonzero otherwise
4937 */
5895ef9a 4938int atapi_check_dma(struct ata_queued_cmd *qc)
624d5c51
TH
4939{
4940 struct ata_port *ap = qc->ap;
71601958 4941
624d5c51
TH
4942 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4943 * few ATAPI devices choke on such DMA requests.
4944 */
6a87e42e
TH
4945 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4946 unlikely(qc->nbytes & 15))
624d5c51 4947 return 1;
e2cec771 4948
624d5c51
TH
4949 if (ap->ops->check_atapi_dma)
4950 return ap->ops->check_atapi_dma(qc);
e2cec771 4951
624d5c51
TH
4952 return 0;
4953}
1da177e4 4954
624d5c51
TH
4955/**
4956 * ata_std_qc_defer - Check whether a qc needs to be deferred
4957 * @qc: ATA command in question
4958 *
4959 * Non-NCQ commands cannot run with any other command, NCQ or
4960 * not. As upper layer only knows the queue depth, we are
4961 * responsible for maintaining exclusion. This function checks
4962 * whether a new command @qc can be issued.
4963 *
4964 * LOCKING:
4965 * spin_lock_irqsave(host lock)
4966 *
4967 * RETURNS:
4968 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4969 */
4970int ata_std_qc_defer(struct ata_queued_cmd *qc)
4971{
4972 struct ata_link *link = qc->dev->link;
e2cec771 4973
179b310a 4974 if (ata_is_ncq(qc->tf.protocol)) {
624d5c51
TH
4975 if (!ata_tag_valid(link->active_tag))
4976 return 0;
4977 } else {
4978 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4979 return 0;
4980 }
e2cec771 4981
624d5c51
TH
4982 return ATA_DEFER_LINK;
4983}
6912ccd5 4984
624d5c51 4985void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
1da177e4 4986
624d5c51
TH
4987/**
4988 * ata_sg_init - Associate command with scatter-gather table.
4989 * @qc: Command to be associated
4990 * @sg: Scatter-gather table.
4991 * @n_elem: Number of elements in s/g table.
4992 *
4993 * Initialize the data-related elements of queued_cmd @qc
4994 * to point to a scatter-gather table @sg, containing @n_elem
4995 * elements.
4996 *
4997 * LOCKING:
4998 * spin_lock_irqsave(host lock)
4999 */
5000void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
5001 unsigned int n_elem)
5002{
5003 qc->sg = sg;
5004 qc->n_elem = n_elem;
5005 qc->cursg = qc->sg;
5006}
bb5cb290 5007
2874d5ee
GU
5008#ifdef CONFIG_HAS_DMA
5009
5010/**
5011 * ata_sg_clean - Unmap DMA memory associated with command
5012 * @qc: Command containing DMA memory to be released
5013 *
5014 * Unmap all mapped DMA memory associated with this command.
5015 *
5016 * LOCKING:
5017 * spin_lock_irqsave(host lock)
5018 */
af27e01c 5019static void ata_sg_clean(struct ata_queued_cmd *qc)
2874d5ee
GU
5020{
5021 struct ata_port *ap = qc->ap;
5022 struct scatterlist *sg = qc->sg;
5023 int dir = qc->dma_dir;
5024
5025 WARN_ON_ONCE(sg == NULL);
5026
5027 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
5028
5029 if (qc->n_elem)
5030 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
5031
5032 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5033 qc->sg = NULL;
5034}
5035
624d5c51
TH
5036/**
5037 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
5038 * @qc: Command with scatter-gather table to be mapped.
5039 *
5040 * DMA-map the scatter-gather table associated with queued_cmd @qc.
5041 *
5042 * LOCKING:
5043 * spin_lock_irqsave(host lock)
5044 *
5045 * RETURNS:
5046 * Zero on success, negative on error.
5047 *
5048 */
5049static int ata_sg_setup(struct ata_queued_cmd *qc)
5050{
5051 struct ata_port *ap = qc->ap;
5052 unsigned int n_elem;
1da177e4 5053
624d5c51 5054 VPRINTK("ENTER, ata%u\n", ap->print_id);
e2cec771 5055
624d5c51
TH
5056 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
5057 if (n_elem < 1)
5058 return -1;
bb5cb290 5059
bb376673 5060 VPRINTK("%d sg elements mapped\n", n_elem);
5825627c 5061 qc->orig_n_elem = qc->n_elem;
624d5c51
TH
5062 qc->n_elem = n_elem;
5063 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4 5064
624d5c51 5065 return 0;
1da177e4
LT
5066}
5067
2874d5ee
GU
5068#else /* !CONFIG_HAS_DMA */
5069
5070static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
5071static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
5072
5073#endif /* !CONFIG_HAS_DMA */
5074
624d5c51
TH
5075/**
5076 * swap_buf_le16 - swap halves of 16-bit words in place
5077 * @buf: Buffer to swap
5078 * @buf_words: Number of 16-bit words in buffer.
5079 *
5080 * Swap halves of 16-bit words if needed to convert from
5081 * little-endian byte order to native cpu byte order, or
5082 * vice-versa.
5083 *
5084 * LOCKING:
5085 * Inherited from caller.
5086 */
5087void swap_buf_le16(u16 *buf, unsigned int buf_words)
8061f5f0 5088{
624d5c51
TH
5089#ifdef __BIG_ENDIAN
5090 unsigned int i;
8061f5f0 5091
624d5c51
TH
5092 for (i = 0; i < buf_words; i++)
5093 buf[i] = le16_to_cpu(buf[i]);
5094#endif /* __BIG_ENDIAN */
8061f5f0
TH
5095}
5096
8a8bc223 5097/**
98bd4be1
SL
5098 * ata_qc_new_init - Request an available ATA command, and initialize it
5099 * @dev: Device from whom we request an available command structure
38755e89 5100 * @tag: tag
1871ee13 5101 *
8a8bc223
TH
5102 * LOCKING:
5103 * None.
5104 */
5105
98bd4be1 5106struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
8a8bc223 5107{
98bd4be1 5108 struct ata_port *ap = dev->link->ap;
12cb5ce1 5109 struct ata_queued_cmd *qc;
8a8bc223
TH
5110
5111 /* no command while frozen */
5112 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5113 return NULL;
5114
98bd4be1 5115 /* libsas case */
5067c046 5116 if (ap->flags & ATA_FLAG_SAS_HOST) {
98bd4be1
SL
5117 tag = ata_sas_allocate_tag(ap);
5118 if (tag < 0)
5119 return NULL;
8a4aeec8 5120 }
8a8bc223 5121
98bd4be1 5122 qc = __ata_qc_from_tag(ap, tag);
5ac40790 5123 qc->tag = qc->hw_tag = tag;
98bd4be1
SL
5124 qc->scsicmd = NULL;
5125 qc->ap = ap;
5126 qc->dev = dev;
1da177e4 5127
98bd4be1 5128 ata_qc_reinit(qc);
1da177e4
LT
5129
5130 return qc;
5131}
5132
8a8bc223
TH
5133/**
5134 * ata_qc_free - free unused ata_queued_cmd
5135 * @qc: Command to complete
5136 *
5137 * Designed to free unused ata_queued_cmd object
5138 * in case something prevents using it.
5139 *
5140 * LOCKING:
5141 * spin_lock_irqsave(host lock)
5142 */
5143void ata_qc_free(struct ata_queued_cmd *qc)
5144{
a1104016 5145 struct ata_port *ap;
8a8bc223
TH
5146 unsigned int tag;
5147
efcb3cf7 5148 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
a1104016 5149 ap = qc->ap;
8a8bc223
TH
5150
5151 qc->flags = 0;
5152 tag = qc->tag;
28361c40 5153 if (ata_tag_valid(tag)) {
8a8bc223 5154 qc->tag = ATA_TAG_POISON;
5067c046 5155 if (ap->flags & ATA_FLAG_SAS_HOST)
98bd4be1 5156 ata_sas_free_tag(tag, ap);
8a8bc223
TH
5157 }
5158}
5159
76014427 5160void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5161{
a1104016
JL
5162 struct ata_port *ap;
5163 struct ata_link *link;
dedaf2b0 5164
efcb3cf7
TH
5165 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5166 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
a1104016
JL
5167 ap = qc->ap;
5168 link = qc->dev->link;
1da177e4
LT
5169
5170 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5171 ata_sg_clean(qc);
5172
7401abf2 5173 /* command should be marked inactive atomically with qc completion */
179b310a 5174 if (ata_is_ncq(qc->tf.protocol)) {
4e5b6260 5175 link->sactive &= ~(1 << qc->hw_tag);
da917d69
TH
5176 if (!link->sactive)
5177 ap->nr_active_links--;
5178 } else {
9af5c9c9 5179 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5180 ap->nr_active_links--;
5181 }
5182
5183 /* clear exclusive status */
5184 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5185 ap->excl_link == link))
5186 ap->excl_link = NULL;
7401abf2 5187
3f3791d3
AL
5188 /* atapi: mark qc as inactive to prevent the interrupt handler
5189 * from completing the command twice later, before the error handler
5190 * is called. (when rc != 0 and atapi request sense is needed)
5191 */
5192 qc->flags &= ~ATA_QCFLAG_ACTIVE;
e3ed8939 5193 ap->qc_active &= ~(1ULL << qc->tag);
3f3791d3 5194
1da177e4 5195 /* call completion callback */
77853bf2 5196 qc->complete_fn(qc);
1da177e4
LT
5197}
5198
39599a53
TH
5199static void fill_result_tf(struct ata_queued_cmd *qc)
5200{
5201 struct ata_port *ap = qc->ap;
5202
39599a53 5203 qc->result_tf.flags = qc->tf.flags;
22183bf5 5204 ap->ops->qc_fill_rtf(qc);
39599a53
TH
5205}
5206
00115e0f
TH
5207static void ata_verify_xfer(struct ata_queued_cmd *qc)
5208{
5209 struct ata_device *dev = qc->dev;
5210
eb0effdf 5211 if (!ata_is_data(qc->tf.protocol))
00115e0f
TH
5212 return;
5213
5214 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5215 return;
5216
5217 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5218}
5219
f686bcb8
TH
5220/**
5221 * ata_qc_complete - Complete an active ATA command
5222 * @qc: Command to complete
f686bcb8 5223 *
1aadf5c3
TH
5224 * Indicate to the mid and upper layers that an ATA command has
5225 * completed, with either an ok or not-ok status.
5226 *
5227 * Refrain from calling this function multiple times when
5228 * successfully completing multiple NCQ commands.
5229 * ata_qc_complete_multiple() should be used instead, which will
5230 * properly update IRQ expect state.
f686bcb8
TH
5231 *
5232 * LOCKING:
cca3974e 5233 * spin_lock_irqsave(host lock)
f686bcb8
TH
5234 */
5235void ata_qc_complete(struct ata_queued_cmd *qc)
5236{
5237 struct ata_port *ap = qc->ap;
5238
eb25cb99 5239 /* Trigger the LED (if available) */
d1ed7c55 5240 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
eb25cb99 5241
f686bcb8
TH
5242 /* XXX: New EH and old EH use different mechanisms to
5243 * synchronize EH with regular execution path.
5244 *
5245 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5246 * Normal execution path is responsible for not accessing a
5247 * failed qc. libata core enforces the rule by returning NULL
5248 * from ata_qc_from_tag() for failed qcs.
5249 *
5250 * Old EH depends on ata_qc_complete() nullifying completion
5251 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5252 * not synchronize with interrupt handler. Only PIO task is
5253 * taken care of.
5254 */
5255 if (ap->ops->error_handler) {
4dbfa39b
TH
5256 struct ata_device *dev = qc->dev;
5257 struct ata_eh_info *ehi = &dev->link->eh_info;
5258
f686bcb8
TH
5259 if (unlikely(qc->err_mask))
5260 qc->flags |= ATA_QCFLAG_FAILED;
5261
f08dc1ac
TH
5262 /*
5263 * Finish internal commands without any further processing
5264 * and always with the result TF filled.
5265 */
5266 if (unlikely(ata_tag_internal(qc->tag))) {
f4b31db9 5267 fill_result_tf(qc);
255c03d1 5268 trace_ata_qc_complete_internal(qc);
f08dc1ac
TH
5269 __ata_qc_complete(qc);
5270 return;
5271 }
f4b31db9 5272
f08dc1ac
TH
5273 /*
5274 * Non-internal qc has failed. Fill the result TF and
5275 * summon EH.
5276 */
5277 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5278 fill_result_tf(qc);
255c03d1 5279 trace_ata_qc_complete_failed(qc);
f08dc1ac 5280 ata_qc_schedule_eh(qc);
f4b31db9 5281 return;
f686bcb8
TH
5282 }
5283
4dc738ed
TH
5284 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5285
f686bcb8
TH
5286 /* read result TF if requested */
5287 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5288 fill_result_tf(qc);
f686bcb8 5289
255c03d1 5290 trace_ata_qc_complete_done(qc);
4dbfa39b
TH
5291 /* Some commands need post-processing after successful
5292 * completion.
5293 */
5294 switch (qc->tf.command) {
5295 case ATA_CMD_SET_FEATURES:
5296 if (qc->tf.feature != SETFEATURES_WC_ON &&
0c12735e
TY
5297 qc->tf.feature != SETFEATURES_WC_OFF &&
5298 qc->tf.feature != SETFEATURES_RA_ON &&
5299 qc->tf.feature != SETFEATURES_RA_OFF)
4dbfa39b
TH
5300 break;
5301 /* fall through */
5302 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5303 case ATA_CMD_SET_MULTI: /* multi_count changed */
5304 /* revalidate device */
5305 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5306 ata_port_schedule_eh(ap);
5307 break;
054a5fba
TH
5308
5309 case ATA_CMD_SLEEP:
5310 dev->flags |= ATA_DFLAG_SLEEPING;
5311 break;
4dbfa39b
TH
5312 }
5313
00115e0f
TH
5314 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5315 ata_verify_xfer(qc);
5316
f686bcb8
TH
5317 __ata_qc_complete(qc);
5318 } else {
5319 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5320 return;
5321
5322 /* read result TF if failed or requested */
5323 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5324 fill_result_tf(qc);
f686bcb8
TH
5325
5326 __ata_qc_complete(qc);
5327 }
5328}
5329
dedaf2b0
TH
5330/**
5331 * ata_qc_complete_multiple - Complete multiple qcs successfully
5332 * @ap: port in question
5333 * @qc_active: new qc_active mask
dedaf2b0
TH
5334 *
5335 * Complete in-flight commands. This functions is meant to be
5336 * called from low-level driver's interrupt routine to complete
5337 * requests normally. ap->qc_active and @qc_active is compared
5338 * and commands are completed accordingly.
5339 *
1aadf5c3
TH
5340 * Always use this function when completing multiple NCQ commands
5341 * from IRQ handlers instead of calling ata_qc_complete()
5342 * multiple times to keep IRQ expect status properly in sync.
5343 *
dedaf2b0 5344 * LOCKING:
cca3974e 5345 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5346 *
5347 * RETURNS:
5348 * Number of completed commands on success, -errno otherwise.
5349 */
e3ed8939 5350int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
dedaf2b0 5351{
7ce5c8cd 5352 u64 done_mask, ap_qc_active = ap->qc_active;
dedaf2b0 5353 int nr_done = 0;
dedaf2b0 5354
7ce5c8cd
JA
5355 /*
5356 * If the internal tag is set on ap->qc_active, then we care about
5357 * bit0 on the passed in qc_active mask. Move that bit up to match
5358 * the internal tag.
5359 */
5360 if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
5361 qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
5362 qc_active ^= qc_active & 0x01;
5363 }
5364
5365 done_mask = ap_qc_active ^ qc_active;
dedaf2b0
TH
5366
5367 if (unlikely(done_mask & qc_active)) {
e3ed8939 5368 ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
a9a79dfe 5369 ap->qc_active, qc_active);
dedaf2b0
TH
5370 return -EINVAL;
5371 }
5372
43768180 5373 while (done_mask) {
dedaf2b0 5374 struct ata_queued_cmd *qc;
e3ed8939 5375 unsigned int tag = __ffs64(done_mask);
dedaf2b0 5376
43768180
JA
5377 qc = ata_qc_from_tag(ap, tag);
5378 if (qc) {
dedaf2b0
TH
5379 ata_qc_complete(qc);
5380 nr_done++;
5381 }
e3ed8939 5382 done_mask &= ~(1ULL << tag);
dedaf2b0
TH
5383 }
5384
5385 return nr_done;
5386}
5387
1da177e4
LT
5388/**
5389 * ata_qc_issue - issue taskfile to device
5390 * @qc: command to issue to device
5391 *
5392 * Prepare an ATA command to submission to device.
5393 * This includes mapping the data into a DMA-able
5394 * area, filling in the S/G table, and finally
5395 * writing the taskfile to hardware, starting the command.
5396 *
5397 * LOCKING:
cca3974e 5398 * spin_lock_irqsave(host lock)
1da177e4 5399 */
8e0e694a 5400void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5401{
5402 struct ata_port *ap = qc->ap;
9af5c9c9 5403 struct ata_link *link = qc->dev->link;
405e66b3 5404 u8 prot = qc->tf.protocol;
1da177e4 5405
dedaf2b0
TH
5406 /* Make sure only one non-NCQ command is outstanding. The
5407 * check is skipped for old EH because it reuses active qc to
5408 * request ATAPI sense.
5409 */
efcb3cf7 5410 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 5411
1973a023 5412 if (ata_is_ncq(prot)) {
4e5b6260 5413 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
da917d69
TH
5414
5415 if (!link->sactive)
5416 ap->nr_active_links++;
4e5b6260 5417 link->sactive |= 1 << qc->hw_tag;
dedaf2b0 5418 } else {
efcb3cf7 5419 WARN_ON_ONCE(link->sactive);
da917d69
TH
5420
5421 ap->nr_active_links++;
9af5c9c9 5422 link->active_tag = qc->tag;
dedaf2b0
TH
5423 }
5424
e4a70e76 5425 qc->flags |= ATA_QCFLAG_ACTIVE;
e3ed8939 5426 ap->qc_active |= 1ULL << qc->tag;
e4a70e76 5427
60f5d6ef
TH
5428 /*
5429 * We guarantee to LLDs that they will have at least one
f92a2636
TH
5430 * non-zero sg if the command is a data command.
5431 */
9173e5e8 5432 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
60f5d6ef 5433 goto sys_err;
f92a2636 5434
405e66b3 5435 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 5436 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7 5437 if (ata_sg_setup(qc))
60f5d6ef 5438 goto sys_err;
1da177e4 5439
cf480626 5440 /* if device is sleeping, schedule reset and abort the link */
054a5fba 5441 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
cf480626 5442 link->eh_info.action |= ATA_EH_RESET;
054a5fba
TH
5443 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5444 ata_link_abort(link);
5445 return;
5446 }
5447
1da177e4 5448 ap->ops->qc_prep(qc);
255c03d1 5449 trace_ata_qc_issue(qc);
8e0e694a
TH
5450 qc->err_mask |= ap->ops->qc_issue(qc);
5451 if (unlikely(qc->err_mask))
5452 goto err;
5453 return;
1da177e4 5454
60f5d6ef 5455sys_err:
8e0e694a
TH
5456 qc->err_mask |= AC_ERR_SYSTEM;
5457err:
5458 ata_qc_complete(qc);
1da177e4
LT
5459}
5460
34bf2170
TH
5461/**
5462 * sata_scr_valid - test whether SCRs are accessible
936fd732 5463 * @link: ATA link to test SCR accessibility for
34bf2170 5464 *
936fd732 5465 * Test whether SCRs are accessible for @link.
34bf2170
TH
5466 *
5467 * LOCKING:
5468 * None.
5469 *
5470 * RETURNS:
5471 * 1 if SCRs are accessible, 0 otherwise.
5472 */
936fd732 5473int sata_scr_valid(struct ata_link *link)
34bf2170 5474{
936fd732
TH
5475 struct ata_port *ap = link->ap;
5476
a16abc0b 5477 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5478}
5479
5480/**
5481 * sata_scr_read - read SCR register of the specified port
936fd732 5482 * @link: ATA link to read SCR for
34bf2170
TH
5483 * @reg: SCR to read
5484 * @val: Place to store read value
5485 *
936fd732 5486 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
5487 * guaranteed to succeed if @link is ap->link, the cable type of
5488 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
5489 *
5490 * LOCKING:
633273a3 5491 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
5492 *
5493 * RETURNS:
5494 * 0 on success, negative errno on failure.
5495 */
936fd732 5496int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 5497{
633273a3 5498 if (ata_is_host_link(link)) {
633273a3 5499 if (sata_scr_valid(link))
82ef04fb 5500 return link->ap->ops->scr_read(link, reg, val);
633273a3
TH
5501 return -EOPNOTSUPP;
5502 }
5503
5504 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
5505}
5506
5507/**
5508 * sata_scr_write - write SCR register of the specified port
936fd732 5509 * @link: ATA link to write SCR for
34bf2170
TH
5510 * @reg: SCR to write
5511 * @val: value to write
5512 *
936fd732 5513 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
5514 * guaranteed to succeed if @link is ap->link, the cable type of
5515 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
5516 *
5517 * LOCKING:
633273a3 5518 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
5519 *
5520 * RETURNS:
5521 * 0 on success, negative errno on failure.
5522 */
936fd732 5523int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 5524{
633273a3 5525 if (ata_is_host_link(link)) {
633273a3 5526 if (sata_scr_valid(link))
82ef04fb 5527 return link->ap->ops->scr_write(link, reg, val);
633273a3
TH
5528 return -EOPNOTSUPP;
5529 }
936fd732 5530
633273a3 5531 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
5532}
5533
5534/**
5535 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 5536 * @link: ATA link to write SCR for
34bf2170
TH
5537 * @reg: SCR to write
5538 * @val: value to write
5539 *
5540 * This function is identical to sata_scr_write() except that this
5541 * function performs flush after writing to the register.
5542 *
5543 * LOCKING:
633273a3 5544 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
5545 *
5546 * RETURNS:
5547 * 0 on success, negative errno on failure.
5548 */
936fd732 5549int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 5550{
633273a3 5551 if (ata_is_host_link(link)) {
633273a3 5552 int rc;
da3dbb17 5553
633273a3 5554 if (sata_scr_valid(link)) {
82ef04fb 5555 rc = link->ap->ops->scr_write(link, reg, val);
633273a3 5556 if (rc == 0)
82ef04fb 5557 rc = link->ap->ops->scr_read(link, reg, &val);
633273a3
TH
5558 return rc;
5559 }
5560 return -EOPNOTSUPP;
34bf2170 5561 }
633273a3
TH
5562
5563 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
5564}
5565
5566/**
b1c72916 5567 * ata_phys_link_online - test whether the given link is online
936fd732 5568 * @link: ATA link to test
34bf2170 5569 *
936fd732
TH
5570 * Test whether @link is online. Note that this function returns
5571 * 0 if online status of @link cannot be obtained, so
5572 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5573 *
5574 * LOCKING:
5575 * None.
5576 *
5577 * RETURNS:
b5b3fa38 5578 * True if the port online status is available and online.
34bf2170 5579 */
b1c72916 5580bool ata_phys_link_online(struct ata_link *link)
34bf2170
TH
5581{
5582 u32 sstatus;
5583
936fd732 5584 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 5585 ata_sstatus_online(sstatus))
b5b3fa38
TH
5586 return true;
5587 return false;
34bf2170
TH
5588}
5589
5590/**
b1c72916 5591 * ata_phys_link_offline - test whether the given link is offline
936fd732 5592 * @link: ATA link to test
34bf2170 5593 *
936fd732
TH
5594 * Test whether @link is offline. Note that this function
5595 * returns 0 if offline status of @link cannot be obtained, so
5596 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5597 *
5598 * LOCKING:
5599 * None.
5600 *
5601 * RETURNS:
b5b3fa38 5602 * True if the port offline status is available and offline.
34bf2170 5603 */
b1c72916 5604bool ata_phys_link_offline(struct ata_link *link)
34bf2170
TH
5605{
5606 u32 sstatus;
5607
936fd732 5608 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 5609 !ata_sstatus_online(sstatus))
b5b3fa38
TH
5610 return true;
5611 return false;
34bf2170 5612}
0baab86b 5613
b1c72916
TH
5614/**
5615 * ata_link_online - test whether the given link is online
5616 * @link: ATA link to test
5617 *
5618 * Test whether @link is online. This is identical to
5619 * ata_phys_link_online() when there's no slave link. When
5620 * there's a slave link, this function should only be called on
5621 * the master link and will return true if any of M/S links is
5622 * online.
5623 *
5624 * LOCKING:
5625 * None.
5626 *
5627 * RETURNS:
5628 * True if the port online status is available and online.
5629 */
5630bool ata_link_online(struct ata_link *link)
5631{
5632 struct ata_link *slave = link->ap->slave_link;
5633
5634 WARN_ON(link == slave); /* shouldn't be called on slave link */
5635
5636 return ata_phys_link_online(link) ||
5637 (slave && ata_phys_link_online(slave));
5638}
5639
5640/**
5641 * ata_link_offline - test whether the given link is offline
5642 * @link: ATA link to test
5643 *
5644 * Test whether @link is offline. This is identical to
5645 * ata_phys_link_offline() when there's no slave link. When
5646 * there's a slave link, this function should only be called on
5647 * the master link and will return true if both M/S links are
5648 * offline.
5649 *
5650 * LOCKING:
5651 * None.
5652 *
5653 * RETURNS:
5654 * True if the port offline status is available and offline.
5655 */
5656bool ata_link_offline(struct ata_link *link)
5657{
5658 struct ata_link *slave = link->ap->slave_link;
5659
5660 WARN_ON(link == slave); /* shouldn't be called on slave link */
5661
5662 return ata_phys_link_offline(link) &&
5663 (!slave || ata_phys_link_offline(slave));
5664}
5665
6ffa01d8 5666#ifdef CONFIG_PM
bc6e7c4b
DW
5667static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5668 unsigned int action, unsigned int ehi_flags,
5669 bool async)
500530f6 5670{
5ef41082 5671 struct ata_link *link;
500530f6 5672 unsigned long flags;
500530f6 5673
5ef41082
LM
5674 /* Previous resume operation might still be in
5675 * progress. Wait for PM_PENDING to clear.
5676 */
5677 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5678 ata_port_wait_eh(ap);
5679 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5680 }
500530f6 5681
5ef41082
LM
5682 /* request PM ops to EH */
5683 spin_lock_irqsave(ap->lock, flags);
500530f6 5684
5ef41082 5685 ap->pm_mesg = mesg;
5ef41082
LM
5686 ap->pflags |= ATA_PFLAG_PM_PENDING;
5687 ata_for_each_link(link, ap, HOST_FIRST) {
5688 link->eh_info.action |= action;
5689 link->eh_info.flags |= ehi_flags;
5690 }
500530f6 5691
5ef41082 5692 ata_port_schedule_eh(ap);
500530f6 5693
5ef41082 5694 spin_unlock_irqrestore(ap->lock, flags);
500530f6 5695
2fcbdcb4 5696 if (!async) {
5ef41082
LM
5697 ata_port_wait_eh(ap);
5698 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
500530f6 5699 }
500530f6
TH
5700}
5701
bc6e7c4b
DW
5702/*
5703 * On some hardware, device fails to respond after spun down for suspend. As
5704 * the device won't be used before being resumed, we don't need to touch the
5705 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5706 *
5707 * http://thread.gmane.org/gmane.linux.ide/46764
5708 */
5709static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5710 | ATA_EHI_NO_AUTOPSY
5711 | ATA_EHI_NO_RECOVERY;
5712
5713static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5ef41082 5714{
bc6e7c4b 5715 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5ef41082
LM
5716}
5717
bc6e7c4b 5718static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
2fcbdcb4 5719{
bc6e7c4b 5720 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
2fcbdcb4
DW
5721}
5722
bc6e7c4b 5723static int ata_port_pm_suspend(struct device *dev)
5ef41082 5724{
bc6e7c4b
DW
5725 struct ata_port *ap = to_ata_port(dev);
5726
5ef41082
LM
5727 if (pm_runtime_suspended(dev))
5728 return 0;
5729
bc6e7c4b
DW
5730 ata_port_suspend(ap, PMSG_SUSPEND);
5731 return 0;
33574d68
LM
5732}
5733
bc6e7c4b 5734static int ata_port_pm_freeze(struct device *dev)
33574d68 5735{
bc6e7c4b
DW
5736 struct ata_port *ap = to_ata_port(dev);
5737
33574d68 5738 if (pm_runtime_suspended(dev))
f5e6d0d0 5739 return 0;
33574d68 5740
bc6e7c4b
DW
5741 ata_port_suspend(ap, PMSG_FREEZE);
5742 return 0;
33574d68
LM
5743}
5744
bc6e7c4b 5745static int ata_port_pm_poweroff(struct device *dev)
33574d68 5746{
bc6e7c4b
DW
5747 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5748 return 0;
5ef41082
LM
5749}
5750
bc6e7c4b
DW
5751static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5752 | ATA_EHI_QUIET;
5ef41082 5753
bc6e7c4b
DW
5754static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5755{
5756 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5ef41082
LM
5757}
5758
bc6e7c4b 5759static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
2fcbdcb4 5760{
bc6e7c4b 5761 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
2fcbdcb4
DW
5762}
5763
bc6e7c4b 5764static int ata_port_pm_resume(struct device *dev)
e90b1e5a 5765{
200421a8 5766 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
bc6e7c4b
DW
5767 pm_runtime_disable(dev);
5768 pm_runtime_set_active(dev);
5769 pm_runtime_enable(dev);
5770 return 0;
e90b1e5a
LM
5771}
5772
7e15e9be
AL
5773/*
5774 * For ODDs, the upper layer will poll for media change every few seconds,
5775 * which will make it enter and leave suspend state every few seconds. And
5776 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5777 * is very little and the ODD may malfunction after constantly being reset.
5778 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5779 * ODD is attached to the port.
5780 */
9ee4f393
LM
5781static int ata_port_runtime_idle(struct device *dev)
5782{
7e15e9be
AL
5783 struct ata_port *ap = to_ata_port(dev);
5784 struct ata_link *link;
5785 struct ata_device *adev;
5786
5787 ata_for_each_link(link, ap, HOST_FIRST) {
5788 ata_for_each_dev(adev, link, ENABLED)
5789 if (adev->class == ATA_DEV_ATAPI &&
5790 !zpodd_dev_enabled(adev))
5791 return -EBUSY;
5792 }
5793
45f0a85c 5794 return 0;
9ee4f393
LM
5795}
5796
a7ff60db
AL
5797static int ata_port_runtime_suspend(struct device *dev)
5798{
bc6e7c4b
DW
5799 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5800 return 0;
a7ff60db
AL
5801}
5802
5803static int ata_port_runtime_resume(struct device *dev)
5804{
bc6e7c4b
DW
5805 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5806 return 0;
a7ff60db
AL
5807}
5808
5ef41082 5809static const struct dev_pm_ops ata_port_pm_ops = {
bc6e7c4b
DW
5810 .suspend = ata_port_pm_suspend,
5811 .resume = ata_port_pm_resume,
5812 .freeze = ata_port_pm_freeze,
5813 .thaw = ata_port_pm_resume,
5814 .poweroff = ata_port_pm_poweroff,
5815 .restore = ata_port_pm_resume,
9ee4f393 5816
a7ff60db
AL
5817 .runtime_suspend = ata_port_runtime_suspend,
5818 .runtime_resume = ata_port_runtime_resume,
9ee4f393 5819 .runtime_idle = ata_port_runtime_idle,
5ef41082
LM
5820};
5821
2fcbdcb4
DW
5822/* sas ports don't participate in pm runtime management of ata_ports,
5823 * and need to resume ata devices at the domain level, not the per-port
5824 * level. sas suspend/resume is async to allow parallel port recovery
5825 * since sas has multiple ata_port instances per Scsi_Host.
5826 */
bc6e7c4b 5827void ata_sas_port_suspend(struct ata_port *ap)
2fcbdcb4 5828{
bc6e7c4b 5829 ata_port_suspend_async(ap, PMSG_SUSPEND);
2fcbdcb4 5830}
bc6e7c4b 5831EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
2fcbdcb4 5832
bc6e7c4b 5833void ata_sas_port_resume(struct ata_port *ap)
2fcbdcb4 5834{
bc6e7c4b 5835 ata_port_resume_async(ap, PMSG_RESUME);
2fcbdcb4 5836}
bc6e7c4b 5837EXPORT_SYMBOL_GPL(ata_sas_port_resume);
2fcbdcb4 5838
500530f6 5839/**
cca3974e
JG
5840 * ata_host_suspend - suspend host
5841 * @host: host to suspend
500530f6
TH
5842 * @mesg: PM message
5843 *
5ef41082 5844 * Suspend @host. Actual operation is performed by port suspend.
500530f6 5845 */
cca3974e 5846int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5847{
5ef41082
LM
5848 host->dev->power.power_state = mesg;
5849 return 0;
500530f6
TH
5850}
5851
5852/**
cca3974e
JG
5853 * ata_host_resume - resume host
5854 * @host: host to resume
500530f6 5855 *
5ef41082 5856 * Resume @host. Actual operation is performed by port resume.
500530f6 5857 */
cca3974e 5858void ata_host_resume(struct ata_host *host)
500530f6 5859{
72ad6ec4 5860 host->dev->power.power_state = PMSG_ON;
500530f6 5861}
6ffa01d8 5862#endif
500530f6 5863
8df82c13 5864const struct device_type ata_port_type = {
5ef41082
LM
5865 .name = "ata_port",
5866#ifdef CONFIG_PM
5867 .pm = &ata_port_pm_ops,
5868#endif
5869};
5870
3ef3b43d
TH
5871/**
5872 * ata_dev_init - Initialize an ata_device structure
5873 * @dev: Device structure to initialize
5874 *
5875 * Initialize @dev in preparation for probing.
5876 *
5877 * LOCKING:
5878 * Inherited from caller.
5879 */
5880void ata_dev_init(struct ata_device *dev)
5881{
b1c72916 5882 struct ata_link *link = ata_dev_phys_link(dev);
9af5c9c9 5883 struct ata_port *ap = link->ap;
72fa4b74
TH
5884 unsigned long flags;
5885
b1c72916 5886 /* SATA spd limit is bound to the attached device, reset together */
9af5c9c9
TH
5887 link->sata_spd_limit = link->hw_sata_spd_limit;
5888 link->sata_spd = 0;
5a04bf4b 5889
72fa4b74
TH
5890 /* High bits of dev->flags are used to record warm plug
5891 * requests which occur asynchronously. Synchronize using
cca3974e 5892 * host lock.
72fa4b74 5893 */
ba6a1308 5894 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5895 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 5896 dev->horkage = 0;
ba6a1308 5897 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5898
99cf610a
TH
5899 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5900 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
3ef3b43d
TH
5901 dev->pio_mask = UINT_MAX;
5902 dev->mwdma_mask = UINT_MAX;
5903 dev->udma_mask = UINT_MAX;
5904}
5905
4fb37a25
TH
5906/**
5907 * ata_link_init - Initialize an ata_link structure
5908 * @ap: ATA port link is attached to
5909 * @link: Link structure to initialize
8989805d 5910 * @pmp: Port multiplier port number
4fb37a25
TH
5911 *
5912 * Initialize @link.
5913 *
5914 * LOCKING:
5915 * Kernel thread context (may sleep)
5916 */
fb7fd614 5917void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
5918{
5919 int i;
5920
5921 /* clear everything except for devices */
d9027470
GG
5922 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5923 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
4fb37a25
TH
5924
5925 link->ap = ap;
8989805d 5926 link->pmp = pmp;
4fb37a25
TH
5927 link->active_tag = ATA_TAG_POISON;
5928 link->hw_sata_spd_limit = UINT_MAX;
5929
5930 /* can't use iterator, ap isn't initialized yet */
5931 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5932 struct ata_device *dev = &link->device[i];
5933
5934 dev->link = link;
5935 dev->devno = dev - link->device;
110f66d2
TH
5936#ifdef CONFIG_ATA_ACPI
5937 dev->gtf_filter = ata_acpi_gtf_filter;
5938#endif
4fb37a25
TH
5939 ata_dev_init(dev);
5940 }
5941}
5942
5943/**
5944 * sata_link_init_spd - Initialize link->sata_spd_limit
5945 * @link: Link to configure sata_spd_limit for
5946 *
5947 * Initialize @link->[hw_]sata_spd_limit to the currently
5948 * configured value.
5949 *
5950 * LOCKING:
5951 * Kernel thread context (may sleep).
5952 *
5953 * RETURNS:
5954 * 0 on success, -errno on failure.
5955 */
fb7fd614 5956int sata_link_init_spd(struct ata_link *link)
4fb37a25 5957{
33267325 5958 u8 spd;
4fb37a25
TH
5959 int rc;
5960
d127ea7b 5961 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
4fb37a25
TH
5962 if (rc)
5963 return rc;
5964
d127ea7b 5965 spd = (link->saved_scontrol >> 4) & 0xf;
4fb37a25
TH
5966 if (spd)
5967 link->hw_sata_spd_limit &= (1 << spd) - 1;
5968
05944bdf 5969 ata_force_link_limits(link);
33267325 5970
4fb37a25
TH
5971 link->sata_spd_limit = link->hw_sata_spd_limit;
5972
5973 return 0;
5974}
5975
1da177e4 5976/**
f3187195
TH
5977 * ata_port_alloc - allocate and initialize basic ATA port resources
5978 * @host: ATA host this allocated port belongs to
1da177e4 5979 *
f3187195
TH
5980 * Allocate and initialize basic ATA port resources.
5981 *
5982 * RETURNS:
5983 * Allocate ATA port on success, NULL on failure.
0cba632b 5984 *
1da177e4 5985 * LOCKING:
f3187195 5986 * Inherited from calling layer (may sleep).
1da177e4 5987 */
f3187195 5988struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5989{
f3187195 5990 struct ata_port *ap;
1da177e4 5991
f3187195
TH
5992 DPRINTK("ENTER\n");
5993
5994 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5995 if (!ap)
5996 return NULL;
4fca377f 5997
7b3a24c5 5998 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
cca3974e 5999 ap->lock = &host->lock;
f3187195 6000 ap->print_id = -1;
e628dc99 6001 ap->local_port_no = -1;
cca3974e 6002 ap->host = host;
f3187195 6003 ap->dev = host->dev;
bd5d825c
BP
6004
6005#if defined(ATA_VERBOSE_DEBUG)
6006 /* turn on all debugging levels */
6007 ap->msg_enable = 0x00FF;
6008#elif defined(ATA_DEBUG)
6009 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6010#else
0dd4b21f 6011 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6012#endif
1da177e4 6013
ad72cf98 6014 mutex_init(&ap->scsi_scan_mutex);
65f27f38
DH
6015 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6016 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6017 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6018 init_waitqueue_head(&ap->eh_wait_q);
45fabbb7 6019 init_completion(&ap->park_req_pending);
b93ab338
KC
6020 timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
6021 TIMER_DEFERRABLE);
1da177e4 6022
838df628 6023 ap->cbl = ATA_CBL_NONE;
838df628 6024
8989805d 6025 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6026
6027#ifdef ATA_IRQ_TRAP
6028 ap->stats.unhandled_irq = 1;
6029 ap->stats.idle_irq = 1;
6030#endif
270390e1
TH
6031 ata_sff_port_init(ap);
6032
1da177e4 6033 return ap;
1da177e4
LT
6034}
6035
2623c7a5 6036static void ata_devres_release(struct device *gendev, void *res)
f0d36efd
TH
6037{
6038 struct ata_host *host = dev_get_drvdata(gendev);
6039 int i;
6040
1aa506e4
TH
6041 for (i = 0; i < host->n_ports; i++) {
6042 struct ata_port *ap = host->ports[i];
6043
4911487a
TH
6044 if (!ap)
6045 continue;
6046
6047 if (ap->scsi_host)
1aa506e4
TH
6048 scsi_host_put(ap->scsi_host);
6049
2623c7a5
TK
6050 }
6051
6052 dev_set_drvdata(gendev, NULL);
6053 ata_host_put(host);
6054}
6055
6056static void ata_host_release(struct kref *kref)
6057{
6058 struct ata_host *host = container_of(kref, struct ata_host, kref);
6059 int i;
6060
6061 for (i = 0; i < host->n_ports; i++) {
6062 struct ata_port *ap = host->ports[i];
6063
633273a3 6064 kfree(ap->pmp_link);
b1c72916 6065 kfree(ap->slave_link);
4911487a 6066 kfree(ap);
1aa506e4
TH
6067 host->ports[i] = NULL;
6068 }
2623c7a5
TK
6069 kfree(host);
6070}
1aa506e4 6071
2623c7a5
TK
6072void ata_host_get(struct ata_host *host)
6073{
6074 kref_get(&host->kref);
6075}
6076
6077void ata_host_put(struct ata_host *host)
6078{
6079 kref_put(&host->kref, ata_host_release);
f0d36efd
TH
6080}
6081
f3187195
TH
6082/**
6083 * ata_host_alloc - allocate and init basic ATA host resources
6084 * @dev: generic device this host is associated with
6085 * @max_ports: maximum number of ATA ports associated with this host
6086 *
6087 * Allocate and initialize basic ATA host resources. LLD calls
6088 * this function to allocate a host, initializes it fully and
6089 * attaches it using ata_host_register().
6090 *
6091 * @max_ports ports are allocated and host->n_ports is
6092 * initialized to @max_ports. The caller is allowed to decrease
6093 * host->n_ports before calling ata_host_register(). The unused
6094 * ports will be automatically freed on registration.
6095 *
6096 * RETURNS:
6097 * Allocate ATA host on success, NULL on failure.
6098 *
6099 * LOCKING:
6100 * Inherited from calling layer (may sleep).
6101 */
6102struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6103{
6104 struct ata_host *host;
6105 size_t sz;
6106 int i;
2623c7a5 6107 void *dr;
f3187195
TH
6108
6109 DPRINTK("ENTER\n");
6110
f3187195
TH
6111 /* alloc a container for our list of ATA ports (buses) */
6112 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
2623c7a5 6113 host = kzalloc(sz, GFP_KERNEL);
f3187195 6114 if (!host)
2623c7a5
TK
6115 return NULL;
6116
6117 if (!devres_open_group(dev, NULL, GFP_KERNEL))
dafd6c49 6118 goto err_free;
2623c7a5
TK
6119
6120 dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
6121 if (!dr)
f3187195
TH
6122 goto err_out;
6123
2623c7a5 6124 devres_add(dev, dr);
f3187195
TH
6125 dev_set_drvdata(dev, host);
6126
6127 spin_lock_init(&host->lock);
c0c362b6 6128 mutex_init(&host->eh_mutex);
f3187195
TH
6129 host->dev = dev;
6130 host->n_ports = max_ports;
2623c7a5 6131 kref_init(&host->kref);
f3187195
TH
6132
6133 /* allocate ports bound to this host */
6134 for (i = 0; i < max_ports; i++) {
6135 struct ata_port *ap;
6136
6137 ap = ata_port_alloc(host);
6138 if (!ap)
6139 goto err_out;
6140
6141 ap->port_no = i;
6142 host->ports[i] = ap;
6143 }
6144
6145 devres_remove_group(dev, NULL);
6146 return host;
6147
6148 err_out:
6149 devres_release_group(dev, NULL);
dafd6c49
CIK
6150 err_free:
6151 kfree(host);
f3187195
TH
6152 return NULL;
6153}
6154
f5cda257
TH
6155/**
6156 * ata_host_alloc_pinfo - alloc host and init with port_info array
6157 * @dev: generic device this host is associated with
6158 * @ppi: array of ATA port_info to initialize host with
6159 * @n_ports: number of ATA ports attached to this host
6160 *
6161 * Allocate ATA host and initialize with info from @ppi. If NULL
6162 * terminated, @ppi may contain fewer entries than @n_ports. The
6163 * last entry will be used for the remaining ports.
6164 *
6165 * RETURNS:
6166 * Allocate ATA host on success, NULL on failure.
6167 *
6168 * LOCKING:
6169 * Inherited from calling layer (may sleep).
6170 */
6171struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6172 const struct ata_port_info * const * ppi,
6173 int n_ports)
6174{
6175 const struct ata_port_info *pi;
6176 struct ata_host *host;
6177 int i, j;
6178
6179 host = ata_host_alloc(dev, n_ports);
6180 if (!host)
6181 return NULL;
6182
6183 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6184 struct ata_port *ap = host->ports[i];
6185
6186 if (ppi[j])
6187 pi = ppi[j++];
6188
6189 ap->pio_mask = pi->pio_mask;
6190 ap->mwdma_mask = pi->mwdma_mask;
6191 ap->udma_mask = pi->udma_mask;
6192 ap->flags |= pi->flags;
0c88758b 6193 ap->link.flags |= pi->link_flags;
f5cda257
TH
6194 ap->ops = pi->port_ops;
6195
6196 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6197 host->ops = pi->port_ops;
f5cda257
TH
6198 }
6199
6200 return host;
6201}
6202
b1c72916
TH
6203/**
6204 * ata_slave_link_init - initialize slave link
6205 * @ap: port to initialize slave link for
6206 *
6207 * Create and initialize slave link for @ap. This enables slave
6208 * link handling on the port.
6209 *
6210 * In libata, a port contains links and a link contains devices.
6211 * There is single host link but if a PMP is attached to it,
6212 * there can be multiple fan-out links. On SATA, there's usually
6213 * a single device connected to a link but PATA and SATA
6214 * controllers emulating TF based interface can have two - master
6215 * and slave.
6216 *
6217 * However, there are a few controllers which don't fit into this
6218 * abstraction too well - SATA controllers which emulate TF
6219 * interface with both master and slave devices but also have
6220 * separate SCR register sets for each device. These controllers
6221 * need separate links for physical link handling
6222 * (e.g. onlineness, link speed) but should be treated like a
6223 * traditional M/S controller for everything else (e.g. command
6224 * issue, softreset).
6225 *
6226 * slave_link is libata's way of handling this class of
6227 * controllers without impacting core layer too much. For
6228 * anything other than physical link handling, the default host
6229 * link is used for both master and slave. For physical link
6230 * handling, separate @ap->slave_link is used. All dirty details
6231 * are implemented inside libata core layer. From LLD's POV, the
6232 * only difference is that prereset, hardreset and postreset are
6233 * called once more for the slave link, so the reset sequence
6234 * looks like the following.
6235 *
6236 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
6237 * softreset(M) -> postreset(M) -> postreset(S)
6238 *
6239 * Note that softreset is called only for the master. Softreset
6240 * resets both M/S by definition, so SRST on master should handle
6241 * both (the standard method will work just fine).
6242 *
6243 * LOCKING:
6244 * Should be called before host is registered.
6245 *
6246 * RETURNS:
6247 * 0 on success, -errno on failure.
6248 */
6249int ata_slave_link_init(struct ata_port *ap)
6250{
6251 struct ata_link *link;
6252
6253 WARN_ON(ap->slave_link);
6254 WARN_ON(ap->flags & ATA_FLAG_PMP);
6255
6256 link = kzalloc(sizeof(*link), GFP_KERNEL);
6257 if (!link)
6258 return -ENOMEM;
6259
6260 ata_link_init(ap, link, 1);
6261 ap->slave_link = link;
6262 return 0;
6263}
6264
32ebbc0c
TH
6265static void ata_host_stop(struct device *gendev, void *res)
6266{
6267 struct ata_host *host = dev_get_drvdata(gendev);
6268 int i;
6269
6270 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6271
6272 for (i = 0; i < host->n_ports; i++) {
6273 struct ata_port *ap = host->ports[i];
6274
6275 if (ap->ops->port_stop)
6276 ap->ops->port_stop(ap);
6277 }
6278
6279 if (host->ops->host_stop)
6280 host->ops->host_stop(host);
6281}
6282
029cfd6b
TH
6283/**
6284 * ata_finalize_port_ops - finalize ata_port_operations
6285 * @ops: ata_port_operations to finalize
6286 *
6287 * An ata_port_operations can inherit from another ops and that
6288 * ops can again inherit from another. This can go on as many
6289 * times as necessary as long as there is no loop in the
6290 * inheritance chain.
6291 *
6292 * Ops tables are finalized when the host is started. NULL or
6293 * unspecified entries are inherited from the closet ancestor
6294 * which has the method and the entry is populated with it.
6295 * After finalization, the ops table directly points to all the
6296 * methods and ->inherits is no longer necessary and cleared.
6297 *
6298 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
6299 *
6300 * LOCKING:
6301 * None.
6302 */
6303static void ata_finalize_port_ops(struct ata_port_operations *ops)
6304{
2da67659 6305 static DEFINE_SPINLOCK(lock);
029cfd6b
TH
6306 const struct ata_port_operations *cur;
6307 void **begin = (void **)ops;
6308 void **end = (void **)&ops->inherits;
6309 void **pp;
6310
6311 if (!ops || !ops->inherits)
6312 return;
6313
6314 spin_lock(&lock);
6315
6316 for (cur = ops->inherits; cur; cur = cur->inherits) {
6317 void **inherit = (void **)cur;
6318
6319 for (pp = begin; pp < end; pp++, inherit++)
6320 if (!*pp)
6321 *pp = *inherit;
6322 }
6323
6324 for (pp = begin; pp < end; pp++)
6325 if (IS_ERR(*pp))
6326 *pp = NULL;
6327
6328 ops->inherits = NULL;
6329
6330 spin_unlock(&lock);
6331}
6332
ecef7253
TH
6333/**
6334 * ata_host_start - start and freeze ports of an ATA host
6335 * @host: ATA host to start ports for
6336 *
6337 * Start and then freeze ports of @host. Started status is
6338 * recorded in host->flags, so this function can be called
6339 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6340 * once. If host->ops isn't initialized yet, its set to the
6341 * first non-dummy port ops.
ecef7253
TH
6342 *
6343 * LOCKING:
6344 * Inherited from calling layer (may sleep).
6345 *
6346 * RETURNS:
6347 * 0 if all ports are started successfully, -errno otherwise.
6348 */
6349int ata_host_start(struct ata_host *host)
6350{
32ebbc0c
TH
6351 int have_stop = 0;
6352 void *start_dr = NULL;
ecef7253
TH
6353 int i, rc;
6354
6355 if (host->flags & ATA_HOST_STARTED)
6356 return 0;
6357
029cfd6b
TH
6358 ata_finalize_port_ops(host->ops);
6359
ecef7253
TH
6360 for (i = 0; i < host->n_ports; i++) {
6361 struct ata_port *ap = host->ports[i];
6362
029cfd6b
TH
6363 ata_finalize_port_ops(ap->ops);
6364
f3187195
TH
6365 if (!host->ops && !ata_port_is_dummy(ap))
6366 host->ops = ap->ops;
6367
32ebbc0c
TH
6368 if (ap->ops->port_stop)
6369 have_stop = 1;
6370 }
6371
6372 if (host->ops->host_stop)
6373 have_stop = 1;
6374
6375 if (have_stop) {
6376 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6377 if (!start_dr)
6378 return -ENOMEM;
6379 }
6380
6381 for (i = 0; i < host->n_ports; i++) {
6382 struct ata_port *ap = host->ports[i];
6383
ecef7253
TH
6384 if (ap->ops->port_start) {
6385 rc = ap->ops->port_start(ap);
6386 if (rc) {
0f9fe9b7 6387 if (rc != -ENODEV)
a44fec1f
JP
6388 dev_err(host->dev,
6389 "failed to start port %d (errno=%d)\n",
6390 i, rc);
ecef7253
TH
6391 goto err_out;
6392 }
6393 }
ecef7253
TH
6394 ata_eh_freeze_port(ap);
6395 }
6396
32ebbc0c
TH
6397 if (start_dr)
6398 devres_add(host->dev, start_dr);
ecef7253
TH
6399 host->flags |= ATA_HOST_STARTED;
6400 return 0;
6401
6402 err_out:
6403 while (--i >= 0) {
6404 struct ata_port *ap = host->ports[i];
6405
6406 if (ap->ops->port_stop)
6407 ap->ops->port_stop(ap);
6408 }
32ebbc0c 6409 devres_free(start_dr);
ecef7253
TH
6410 return rc;
6411}
6412
b03732f0 6413/**
8d8e7d13 6414 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
cca3974e
JG
6415 * @host: host to initialize
6416 * @dev: device host is attached to
cca3974e 6417 * @ops: port_ops
b03732f0 6418 *
b03732f0 6419 */
cca3974e 6420void ata_host_init(struct ata_host *host, struct device *dev,
8d8e7d13 6421 struct ata_port_operations *ops)
b03732f0 6422{
cca3974e 6423 spin_lock_init(&host->lock);
c0c362b6 6424 mutex_init(&host->eh_mutex);
69278f79 6425 host->n_tags = ATA_MAX_QUEUE;
cca3974e 6426 host->dev = dev;
cca3974e 6427 host->ops = ops;
2fa4a326 6428 kref_init(&host->kref);
b03732f0
BK
6429}
6430
9508a66f 6431void __ata_port_probe(struct ata_port *ap)
79318057 6432{
9508a66f
DW
6433 struct ata_eh_info *ehi = &ap->link.eh_info;
6434 unsigned long flags;
886ad09f 6435
9508a66f
DW
6436 /* kick EH for boot probing */
6437 spin_lock_irqsave(ap->lock, flags);
79318057 6438
9508a66f
DW
6439 ehi->probe_mask |= ATA_ALL_DEVICES;
6440 ehi->action |= ATA_EH_RESET;
6441 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
79318057 6442
9508a66f
DW
6443 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6444 ap->pflags |= ATA_PFLAG_LOADING;
6445 ata_port_schedule_eh(ap);
79318057 6446
9508a66f
DW
6447 spin_unlock_irqrestore(ap->lock, flags);
6448}
79318057 6449
9508a66f
DW
6450int ata_port_probe(struct ata_port *ap)
6451{
6452 int rc = 0;
79318057 6453
9508a66f
DW
6454 if (ap->ops->error_handler) {
6455 __ata_port_probe(ap);
79318057
AV
6456 ata_port_wait_eh(ap);
6457 } else {
6458 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6459 rc = ata_bus_probe(ap);
6460 DPRINTK("ata%u: bus probe end\n", ap->print_id);
79318057 6461 }
238c9cf9
JB
6462 return rc;
6463}
6464
6465
6466static void async_port_probe(void *data, async_cookie_t cookie)
6467{
6468 struct ata_port *ap = data;
4fca377f 6469
238c9cf9
JB
6470 /*
6471 * If we're not allowed to scan this host in parallel,
6472 * we need to wait until all previous scans have completed
6473 * before going further.
6474 * Jeff Garzik says this is only within a controller, so we
6475 * don't need to wait for port 0, only for later ports.
6476 */
6477 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6478 async_synchronize_cookie(cookie);
6479
6480 (void)ata_port_probe(ap);
f29d3b23
AV
6481
6482 /* in order to keep device order, we need to synchronize at this point */
6483 async_synchronize_cookie(cookie);
6484
6485 ata_scsi_scan_host(ap, 1);
79318057 6486}
238c9cf9 6487
f3187195
TH
6488/**
6489 * ata_host_register - register initialized ATA host
6490 * @host: ATA host to register
6491 * @sht: template for SCSI host
6492 *
6493 * Register initialized ATA host. @host is allocated using
6494 * ata_host_alloc() and fully initialized by LLD. This function
6495 * starts ports, registers @host with ATA and SCSI layers and
6496 * probe registered devices.
6497 *
6498 * LOCKING:
6499 * Inherited from calling layer (may sleep).
6500 *
6501 * RETURNS:
6502 * 0 on success, -errno otherwise.
6503 */
6504int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6505{
6506 int i, rc;
6507
69278f79 6508 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
1871ee13 6509
f3187195
TH
6510 /* host must have been started */
6511 if (!(host->flags & ATA_HOST_STARTED)) {
a44fec1f 6512 dev_err(host->dev, "BUG: trying to register unstarted host\n");
f3187195
TH
6513 WARN_ON(1);
6514 return -EINVAL;
6515 }
6516
6517 /* Blow away unused ports. This happens when LLD can't
6518 * determine the exact number of ports to allocate at
6519 * allocation time.
6520 */
6521 for (i = host->n_ports; host->ports[i]; i++)
6522 kfree(host->ports[i]);
6523
6524 /* give ports names and add SCSI hosts */
e628dc99 6525 for (i = 0; i < host->n_ports; i++) {
85d6725b 6526 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
e628dc99
DM
6527 host->ports[i]->local_port_no = i + 1;
6528 }
4fca377f 6529
d9027470
GG
6530 /* Create associated sysfs transport objects */
6531 for (i = 0; i < host->n_ports; i++) {
6532 rc = ata_tport_add(host->dev,host->ports[i]);
6533 if (rc) {
6534 goto err_tadd;
6535 }
6536 }
6537
f3187195
TH
6538 rc = ata_scsi_add_hosts(host, sht);
6539 if (rc)
d9027470 6540 goto err_tadd;
f3187195
TH
6541
6542 /* set cable, sata_spd_limit and report */
6543 for (i = 0; i < host->n_ports; i++) {
6544 struct ata_port *ap = host->ports[i];
f3187195
TH
6545 unsigned long xfer_mask;
6546
6547 /* set SATA cable type if still unset */
6548 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6549 ap->cbl = ATA_CBL_SATA;
6550
6551 /* init sata_spd_limit to the current value */
4fb37a25 6552 sata_link_init_spd(&ap->link);
b1c72916
TH
6553 if (ap->slave_link)
6554 sata_link_init_spd(ap->slave_link);
f3187195 6555
cbcdd875 6556 /* print per-port info to dmesg */
f3187195
TH
6557 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6558 ap->udma_mask);
6559
abf6e8ed 6560 if (!ata_port_is_dummy(ap)) {
a9a79dfe
JP
6561 ata_port_info(ap, "%cATA max %s %s\n",
6562 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6563 ata_mode_string(xfer_mask),
6564 ap->link.eh_info.desc);
abf6e8ed
TH
6565 ata_ehi_clear_desc(&ap->link.eh_info);
6566 } else
a9a79dfe 6567 ata_port_info(ap, "DUMMY\n");
f3187195
TH
6568 }
6569
f6005354 6570 /* perform each probe asynchronously */
f3187195
TH
6571 for (i = 0; i < host->n_ports; i++) {
6572 struct ata_port *ap = host->ports[i];
79318057 6573 async_schedule(async_port_probe, ap);
f3187195 6574 }
f3187195
TH
6575
6576 return 0;
d9027470
GG
6577
6578 err_tadd:
6579 while (--i >= 0) {
6580 ata_tport_delete(host->ports[i]);
6581 }
6582 return rc;
6583
f3187195
TH
6584}
6585
f5cda257
TH
6586/**
6587 * ata_host_activate - start host, request IRQ and register it
6588 * @host: target ATA host
6589 * @irq: IRQ to request
6590 * @irq_handler: irq_handler used when requesting IRQ
6591 * @irq_flags: irq_flags used when requesting IRQ
6592 * @sht: scsi_host_template to use when registering the host
6593 *
6594 * After allocating an ATA host and initializing it, most libata
6595 * LLDs perform three steps to activate the host - start host,
c9b5560a 6596 * request IRQ and register it. This helper takes necessary
f5cda257
TH
6597 * arguments and performs the three steps in one go.
6598 *
3d46b2e2
PM
6599 * An invalid IRQ skips the IRQ registration and expects the host to
6600 * have set polling mode on the port. In this case, @irq_handler
6601 * should be NULL.
6602 *
f5cda257
TH
6603 * LOCKING:
6604 * Inherited from calling layer (may sleep).
6605 *
6606 * RETURNS:
6607 * 0 on success, -errno otherwise.
6608 */
6609int ata_host_activate(struct ata_host *host, int irq,
6610 irq_handler_t irq_handler, unsigned long irq_flags,
6611 struct scsi_host_template *sht)
6612{
cbcdd875 6613 int i, rc;
7e22c002 6614 char *irq_desc;
f5cda257
TH
6615
6616 rc = ata_host_start(host);
6617 if (rc)
6618 return rc;
6619
3d46b2e2
PM
6620 /* Special case for polling mode */
6621 if (!irq) {
6622 WARN_ON(irq_handler);
6623 return ata_host_register(host, sht);
6624 }
6625
7e22c002
HK
6626 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6627 dev_driver_string(host->dev),
6628 dev_name(host->dev));
6629 if (!irq_desc)
6630 return -ENOMEM;
6631
f5cda257 6632 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7e22c002 6633 irq_desc, host);
f5cda257
TH
6634 if (rc)
6635 return rc;
6636
cbcdd875
TH
6637 for (i = 0; i < host->n_ports; i++)
6638 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6639
f5cda257
TH
6640 rc = ata_host_register(host, sht);
6641 /* if failed, just free the IRQ and leave ports alone */
6642 if (rc)
6643 devm_free_irq(host->dev, irq, host);
6644
6645 return rc;
6646}
6647
720ba126 6648/**
c9b5560a 6649 * ata_port_detach - Detach ATA port in preparation of device removal
720ba126
TH
6650 * @ap: ATA port to be detached
6651 *
6652 * Detach all ATA devices and the associated SCSI devices of @ap;
6653 * then, remove the associated SCSI host. @ap is guaranteed to
6654 * be quiescent on return from this function.
6655 *
6656 * LOCKING:
6657 * Kernel thread context (may sleep).
6658 */
741b7763 6659static void ata_port_detach(struct ata_port *ap)
720ba126
TH
6660{
6661 unsigned long flags;
a6f9bf4d
LK
6662 struct ata_link *link;
6663 struct ata_device *dev;
720ba126
TH
6664
6665 if (!ap->ops->error_handler)
c3cf30a9 6666 goto skip_eh;
720ba126
TH
6667
6668 /* tell EH we're leaving & flush EH */
ba6a1308 6669 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6670 ap->pflags |= ATA_PFLAG_UNLOADING;
ece180d1 6671 ata_port_schedule_eh(ap);
ba6a1308 6672 spin_unlock_irqrestore(ap->lock, flags);
720ba126 6673
ece180d1 6674 /* wait till EH commits suicide */
720ba126
TH
6675 ata_port_wait_eh(ap);
6676
ece180d1
TH
6677 /* it better be dead now */
6678 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
720ba126 6679
afe2c511 6680 cancel_delayed_work_sync(&ap->hotplug_task);
720ba126 6681
c3cf30a9 6682 skip_eh:
a6f9bf4d
LK
6683 /* clean up zpodd on port removal */
6684 ata_for_each_link(link, ap, HOST_FIRST) {
6685 ata_for_each_dev(dev, link, ALL) {
6686 if (zpodd_dev_enabled(dev))
6687 zpodd_exit(dev);
6688 }
6689 }
d9027470
GG
6690 if (ap->pmp_link) {
6691 int i;
6692 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6693 ata_tlink_delete(&ap->pmp_link[i]);
6694 }
720ba126 6695 /* remove the associated SCSI host */
cca3974e 6696 scsi_remove_host(ap->scsi_host);
c5700766 6697 ata_tport_delete(ap);
720ba126
TH
6698}
6699
0529c159
TH
6700/**
6701 * ata_host_detach - Detach all ports of an ATA host
6702 * @host: Host to detach
6703 *
6704 * Detach all ports of @host.
6705 *
6706 * LOCKING:
6707 * Kernel thread context (may sleep).
6708 */
6709void ata_host_detach(struct ata_host *host)
6710{
6711 int i;
6712
6713 for (i = 0; i < host->n_ports; i++)
6714 ata_port_detach(host->ports[i]);
562f0c2d
TH
6715
6716 /* the host is dead now, dissociate ACPI */
6717 ata_acpi_dissociate(host);
0529c159
TH
6718}
6719
374b1873
JG
6720#ifdef CONFIG_PCI
6721
1da177e4
LT
6722/**
6723 * ata_pci_remove_one - PCI layer callback for device removal
6724 * @pdev: PCI device that was removed
6725 *
b878ca5d
TH
6726 * PCI layer indicates to libata via this hook that hot-unplug or
6727 * module unload event has occurred. Detach all ports. Resource
6728 * release is handled via devres.
1da177e4
LT
6729 *
6730 * LOCKING:
6731 * Inherited from PCI layer (may sleep).
6732 */
f0d36efd 6733void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 6734{
04a3f5b7 6735 struct ata_host *host = pci_get_drvdata(pdev);
1da177e4 6736
b878ca5d 6737 ata_host_detach(host);
1da177e4
LT
6738}
6739
6740/* move to PCI subsystem */
057ace5e 6741int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6742{
6743 unsigned long tmp = 0;
6744
6745 switch (bits->width) {
6746 case 1: {
6747 u8 tmp8 = 0;
6748 pci_read_config_byte(pdev, bits->reg, &tmp8);
6749 tmp = tmp8;
6750 break;
6751 }
6752 case 2: {
6753 u16 tmp16 = 0;
6754 pci_read_config_word(pdev, bits->reg, &tmp16);
6755 tmp = tmp16;
6756 break;
6757 }
6758 case 4: {
6759 u32 tmp32 = 0;
6760 pci_read_config_dword(pdev, bits->reg, &tmp32);
6761 tmp = tmp32;
6762 break;
6763 }
6764
6765 default:
6766 return -EINVAL;
6767 }
6768
6769 tmp &= bits->mask;
6770
6771 return (tmp == bits->val) ? 1 : 0;
6772}
9b847548 6773
6ffa01d8 6774#ifdef CONFIG_PM
3c5100c1 6775void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6776{
6777 pci_save_state(pdev);
4c90d971 6778 pci_disable_device(pdev);
500530f6 6779
3a2d5b70 6780 if (mesg.event & PM_EVENT_SLEEP)
500530f6 6781 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6782}
6783
553c4aa6 6784int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6785{
553c4aa6
TH
6786 int rc;
6787
9b847548
JA
6788 pci_set_power_state(pdev, PCI_D0);
6789 pci_restore_state(pdev);
553c4aa6 6790
b878ca5d 6791 rc = pcim_enable_device(pdev);
553c4aa6 6792 if (rc) {
a44fec1f
JP
6793 dev_err(&pdev->dev,
6794 "failed to enable device after resume (%d)\n", rc);
553c4aa6
TH
6795 return rc;
6796 }
6797
9b847548 6798 pci_set_master(pdev);
553c4aa6 6799 return 0;
500530f6
TH
6800}
6801
3c5100c1 6802int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6803{
04a3f5b7 6804 struct ata_host *host = pci_get_drvdata(pdev);
500530f6
TH
6805 int rc = 0;
6806
cca3974e 6807 rc = ata_host_suspend(host, mesg);
500530f6
TH
6808 if (rc)
6809 return rc;
6810
3c5100c1 6811 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6812
6813 return 0;
6814}
6815
6816int ata_pci_device_resume(struct pci_dev *pdev)
6817{
04a3f5b7 6818 struct ata_host *host = pci_get_drvdata(pdev);
553c4aa6 6819 int rc;
500530f6 6820
553c4aa6
TH
6821 rc = ata_pci_device_do_resume(pdev);
6822 if (rc == 0)
6823 ata_host_resume(host);
6824 return rc;
9b847548 6825}
6ffa01d8
TH
6826#endif /* CONFIG_PM */
6827
1da177e4
LT
6828#endif /* CONFIG_PCI */
6829
b7db04d9
BN
6830/**
6831 * ata_platform_remove_one - Platform layer callback for device removal
6832 * @pdev: Platform device that was removed
6833 *
6834 * Platform layer indicates to libata via this hook that hot-unplug or
6835 * module unload event has occurred. Detach all ports. Resource
6836 * release is handled via devres.
6837 *
6838 * LOCKING:
6839 * Inherited from platform layer (may sleep).
6840 */
6841int ata_platform_remove_one(struct platform_device *pdev)
6842{
6843 struct ata_host *host = platform_get_drvdata(pdev);
6844
6845 ata_host_detach(host);
6846
6847 return 0;
6848}
6849
33267325
TH
6850static int __init ata_parse_force_one(char **cur,
6851 struct ata_force_ent *force_ent,
6852 const char **reason)
6853{
0f5f264b 6854 static const struct ata_force_param force_tbl[] __initconst = {
33267325
TH
6855 { "40c", .cbl = ATA_CBL_PATA40 },
6856 { "80c", .cbl = ATA_CBL_PATA80 },
6857 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6858 { "unk", .cbl = ATA_CBL_PATA_UNK },
6859 { "ign", .cbl = ATA_CBL_PATA_IGN },
6860 { "sata", .cbl = ATA_CBL_SATA },
6861 { "1.5Gbps", .spd_limit = 1 },
6862 { "3.0Gbps", .spd_limit = 2 },
6863 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6864 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
d7b16e4f
MP
6865 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
6866 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
43c9c591 6867 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
33267325
TH
6868 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6869 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6870 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6871 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6872 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6873 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6874 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6875 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6876 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6877 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6878 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6879 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6880 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6881 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6882 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6883 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6884 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6885 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6886 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6887 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6888 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6889 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6890 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6891 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6892 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6893 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6894 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6895 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6896 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6897 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6898 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6899 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6900 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6901 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
05944bdf
TH
6902 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6903 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6904 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
ca6d43b0 6905 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
966fbe19 6906 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
b8bd6dc3 6907 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
33267325
TH
6908 };
6909 char *start = *cur, *p = *cur;
6910 char *id, *val, *endp;
6911 const struct ata_force_param *match_fp = NULL;
6912 int nr_matches = 0, i;
6913
6914 /* find where this param ends and update *cur */
6915 while (*p != '\0' && *p != ',')
6916 p++;
6917
6918 if (*p == '\0')
6919 *cur = p;
6920 else
6921 *cur = p + 1;
6922
6923 *p = '\0';
6924
6925 /* parse */
6926 p = strchr(start, ':');
6927 if (!p) {
6928 val = strstrip(start);
6929 goto parse_val;
6930 }
6931 *p = '\0';
6932
6933 id = strstrip(start);
6934 val = strstrip(p + 1);
6935
6936 /* parse id */
6937 p = strchr(id, '.');
6938 if (p) {
6939 *p++ = '\0';
6940 force_ent->device = simple_strtoul(p, &endp, 10);
6941 if (p == endp || *endp != '\0') {
6942 *reason = "invalid device";
6943 return -EINVAL;
6944 }
6945 }
6946
6947 force_ent->port = simple_strtoul(id, &endp, 10);
f7cf69ae 6948 if (id == endp || *endp != '\0') {
33267325
TH
6949 *reason = "invalid port/link";
6950 return -EINVAL;
6951 }
6952
6953 parse_val:
6954 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6955 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6956 const struct ata_force_param *fp = &force_tbl[i];
6957
6958 if (strncasecmp(val, fp->name, strlen(val)))
6959 continue;
6960
6961 nr_matches++;
6962 match_fp = fp;
6963
6964 if (strcasecmp(val, fp->name) == 0) {
6965 nr_matches = 1;
6966 break;
6967 }
6968 }
6969
6970 if (!nr_matches) {
6971 *reason = "unknown value";
6972 return -EINVAL;
6973 }
6974 if (nr_matches > 1) {
9de55351 6975 *reason = "ambiguous value";
33267325
TH
6976 return -EINVAL;
6977 }
6978
6979 force_ent->param = *match_fp;
6980
6981 return 0;
6982}
6983
6984static void __init ata_parse_force_param(void)
6985{
6986 int idx = 0, size = 1;
6987 int last_port = -1, last_device = -1;
6988 char *p, *cur, *next;
6989
6990 /* calculate maximum number of params and allocate force_tbl */
6991 for (p = ata_force_param_buf; *p; p++)
6992 if (*p == ',')
6993 size++;
6994
6396bb22 6995 ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
33267325
TH
6996 if (!ata_force_tbl) {
6997 printk(KERN_WARNING "ata: failed to extend force table, "
6998 "libata.force ignored\n");
6999 return;
7000 }
7001
7002 /* parse and populate the table */
7003 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7004 const char *reason = "";
7005 struct ata_force_ent te = { .port = -1, .device = -1 };
7006
7007 next = cur;
7008 if (ata_parse_force_one(&next, &te, &reason)) {
7009 printk(KERN_WARNING "ata: failed to parse force "
7010 "parameter \"%s\" (%s)\n",
7011 cur, reason);
7012 continue;
7013 }
7014
7015 if (te.port == -1) {
7016 te.port = last_port;
7017 te.device = last_device;
7018 }
7019
7020 ata_force_tbl[idx++] = te;
7021
7022 last_port = te.port;
7023 last_device = te.device;
7024 }
7025
7026 ata_force_tbl_size = idx;
7027}
1da177e4 7028
1da177e4
LT
7029static int __init ata_init(void)
7030{
d9027470 7031 int rc;
270390e1 7032
33267325
TH
7033 ata_parse_force_param();
7034
270390e1 7035 rc = ata_sff_init();
ad72cf98
TH
7036 if (rc) {
7037 kfree(ata_force_tbl);
7038 return rc;
7039 }
453b07ac 7040
d9027470
GG
7041 libata_transport_init();
7042 ata_scsi_transport_template = ata_attach_transport();
7043 if (!ata_scsi_transport_template) {
7044 ata_sff_exit();
7045 rc = -ENOMEM;
7046 goto err_out;
4fca377f 7047 }
d9027470 7048
1da177e4
LT
7049 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7050 return 0;
d9027470
GG
7051
7052err_out:
7053 return rc;
1da177e4
LT
7054}
7055
7056static void __exit ata_exit(void)
7057{
d9027470
GG
7058 ata_release_transport(ata_scsi_transport_template);
7059 libata_transport_exit();
270390e1 7060 ata_sff_exit();
33267325 7061 kfree(ata_force_tbl);
1da177e4
LT
7062}
7063
a4625085 7064subsys_initcall(ata_init);
1da177e4
LT
7065module_exit(ata_exit);
7066
9990b6f3 7067static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
67846b30
JG
7068
7069int ata_ratelimit(void)
7070{
9990b6f3 7071 return __ratelimit(&ratelimit);
67846b30
JG
7072}
7073
c0c362b6
TH
7074/**
7075 * ata_msleep - ATA EH owner aware msleep
7076 * @ap: ATA port to attribute the sleep to
7077 * @msecs: duration to sleep in milliseconds
7078 *
7079 * Sleeps @msecs. If the current task is owner of @ap's EH, the
7080 * ownership is released before going to sleep and reacquired
7081 * after the sleep is complete. IOW, other ports sharing the
7082 * @ap->host will be allowed to own the EH while this task is
7083 * sleeping.
7084 *
7085 * LOCKING:
7086 * Might sleep.
7087 */
97750ceb
TH
7088void ata_msleep(struct ata_port *ap, unsigned int msecs)
7089{
c0c362b6
TH
7090 bool owns_eh = ap && ap->host->eh_owner == current;
7091
7092 if (owns_eh)
7093 ata_eh_release(ap);
7094
848c3920
AVM
7095 if (msecs < 20) {
7096 unsigned long usecs = msecs * USEC_PER_MSEC;
7097 usleep_range(usecs, usecs + 50);
7098 } else {
7099 msleep(msecs);
7100 }
c0c362b6
TH
7101
7102 if (owns_eh)
7103 ata_eh_acquire(ap);
97750ceb
TH
7104}
7105
c22daff4
TH
7106/**
7107 * ata_wait_register - wait until register value changes
97750ceb 7108 * @ap: ATA port to wait register for, can be NULL
c22daff4
TH
7109 * @reg: IO-mapped register
7110 * @mask: Mask to apply to read register value
7111 * @val: Wait condition
341c2c95
TH
7112 * @interval: polling interval in milliseconds
7113 * @timeout: timeout in milliseconds
c22daff4
TH
7114 *
7115 * Waiting for some bits of register to change is a common
7116 * operation for ATA controllers. This function reads 32bit LE
7117 * IO-mapped register @reg and tests for the following condition.
7118 *
7119 * (*@reg & mask) != val
7120 *
7121 * If the condition is met, it returns; otherwise, the process is
7122 * repeated after @interval_msec until timeout.
7123 *
7124 * LOCKING:
7125 * Kernel thread context (may sleep)
7126 *
7127 * RETURNS:
7128 * The final register value.
7129 */
97750ceb 7130u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
341c2c95 7131 unsigned long interval, unsigned long timeout)
c22daff4 7132{
341c2c95 7133 unsigned long deadline;
c22daff4
TH
7134 u32 tmp;
7135
7136 tmp = ioread32(reg);
7137
7138 /* Calculate timeout _after_ the first read to make sure
7139 * preceding writes reach the controller before starting to
7140 * eat away the timeout.
7141 */
341c2c95 7142 deadline = ata_deadline(jiffies, timeout);
c22daff4 7143
341c2c95 7144 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
97750ceb 7145 ata_msleep(ap, interval);
c22daff4
TH
7146 tmp = ioread32(reg);
7147 }
7148
7149 return tmp;
7150}
7151
8393b811
GM
7152/**
7153 * sata_lpm_ignore_phy_events - test if PHY event should be ignored
7154 * @link: Link receiving the event
7155 *
7156 * Test whether the received PHY event has to be ignored or not.
7157 *
7158 * LOCKING:
7159 * None:
7160 *
7161 * RETURNS:
7162 * True if the event has to be ignored.
7163 */
7164bool sata_lpm_ignore_phy_events(struct ata_link *link)
7165{
09c5b480
GM
7166 unsigned long lpm_timeout = link->last_lpm_change +
7167 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
7168
8393b811 7169 /* if LPM is enabled, PHYRDY doesn't mean anything */
09c5b480
GM
7170 if (link->lpm_policy > ATA_LPM_MAX_POWER)
7171 return true;
7172
7173 /* ignore the first PHY event after the LPM policy changed
7174 * as it is might be spurious
7175 */
7176 if ((link->flags & ATA_LFLAG_CHANGED) &&
7177 time_before(jiffies, lpm_timeout))
7178 return true;
7179
7180 return false;
8393b811
GM
7181}
7182EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
7183
dd5b06c4
TH
7184/*
7185 * Dummy port_ops
7186 */
182d7bba 7187static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
dd5b06c4 7188{
182d7bba 7189 return AC_ERR_SYSTEM;
dd5b06c4
TH
7190}
7191
182d7bba 7192static void ata_dummy_error_handler(struct ata_port *ap)
dd5b06c4 7193{
182d7bba 7194 /* truly dummy */
dd5b06c4
TH
7195}
7196
029cfd6b 7197struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7198 .qc_prep = ata_noop_qc_prep,
7199 .qc_issue = ata_dummy_qc_issue,
182d7bba 7200 .error_handler = ata_dummy_error_handler,
e4a9c373
DW
7201 .sched_eh = ata_std_sched_eh,
7202 .end_eh = ata_std_end_eh,
dd5b06c4
TH
7203};
7204
21b0ad4f
TH
7205const struct ata_port_info ata_dummy_port_info = {
7206 .port_ops = &ata_dummy_port_ops,
7207};
7208
a9a79dfe
JP
7209/*
7210 * Utility print functions
7211 */
d7bead1b
JP
7212void ata_port_printk(const struct ata_port *ap, const char *level,
7213 const char *fmt, ...)
a9a79dfe
JP
7214{
7215 struct va_format vaf;
7216 va_list args;
a9a79dfe
JP
7217
7218 va_start(args, fmt);
7219
7220 vaf.fmt = fmt;
7221 vaf.va = &args;
7222
d7bead1b 7223 printk("%sata%u: %pV", level, ap->print_id, &vaf);
a9a79dfe
JP
7224
7225 va_end(args);
a9a79dfe
JP
7226}
7227EXPORT_SYMBOL(ata_port_printk);
7228
d7bead1b
JP
7229void ata_link_printk(const struct ata_link *link, const char *level,
7230 const char *fmt, ...)
a9a79dfe
JP
7231{
7232 struct va_format vaf;
7233 va_list args;
a9a79dfe
JP
7234
7235 va_start(args, fmt);
7236
7237 vaf.fmt = fmt;
7238 vaf.va = &args;
7239
7240 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
d7bead1b
JP
7241 printk("%sata%u.%02u: %pV",
7242 level, link->ap->print_id, link->pmp, &vaf);
a9a79dfe 7243 else
d7bead1b
JP
7244 printk("%sata%u: %pV",
7245 level, link->ap->print_id, &vaf);
a9a79dfe
JP
7246
7247 va_end(args);
a9a79dfe
JP
7248}
7249EXPORT_SYMBOL(ata_link_printk);
7250
d7bead1b 7251void ata_dev_printk(const struct ata_device *dev, const char *level,
a9a79dfe
JP
7252 const char *fmt, ...)
7253{
7254 struct va_format vaf;
7255 va_list args;
a9a79dfe
JP
7256
7257 va_start(args, fmt);
7258
7259 vaf.fmt = fmt;
7260 vaf.va = &args;
7261
d7bead1b
JP
7262 printk("%sata%u.%02u: %pV",
7263 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
7264 &vaf);
a9a79dfe
JP
7265
7266 va_end(args);
a9a79dfe
JP
7267}
7268EXPORT_SYMBOL(ata_dev_printk);
7269
06296a1e
JP
7270void ata_print_version(const struct device *dev, const char *version)
7271{
7272 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
7273}
7274EXPORT_SYMBOL(ata_print_version);
7275
1da177e4
LT
7276/*
7277 * libata is essentially a library of internal helper functions for
7278 * low-level ATA host controller drivers. As such, the API/ABI is
7279 * likely to change as new drivers are added and updated.
7280 * Do not depend on ABI/API stability.
7281 */
e9c83914
TH
7282EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7283EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7284EXPORT_SYMBOL_GPL(sata_deb_timing_long);
029cfd6b
TH
7285EXPORT_SYMBOL_GPL(ata_base_port_ops);
7286EXPORT_SYMBOL_GPL(sata_port_ops);
dd5b06c4 7287EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7288EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1eca4365
TH
7289EXPORT_SYMBOL_GPL(ata_link_next);
7290EXPORT_SYMBOL_GPL(ata_dev_next);
1da177e4 7291EXPORT_SYMBOL_GPL(ata_std_bios_param);
d8d9129e 7292EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
cca3974e 7293EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7294EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7295EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
b1c72916 7296EXPORT_SYMBOL_GPL(ata_slave_link_init);
ecef7253 7297EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7298EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7299EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7300EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 7301EXPORT_SYMBOL_GPL(ata_sg_init);
f686bcb8 7302EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7303EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
436d34b3 7304EXPORT_SYMBOL_GPL(atapi_cmd_type);
1da177e4
LT
7305EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7306EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
7307EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7308EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7309EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7310EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7311EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7312EXPORT_SYMBOL_GPL(ata_mode_string);
7313EXPORT_SYMBOL_GPL(ata_id_xfermask);
04351821 7314EXPORT_SYMBOL_GPL(ata_do_set_mode);
31cc23b3 7315EXPORT_SYMBOL_GPL(ata_std_qc_defer);
e46834cd 7316EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
10305f0f 7317EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7318EXPORT_SYMBOL_GPL(sata_set_spd);
aa2731ad 7319EXPORT_SYMBOL_GPL(ata_wait_after_reset);
936fd732
TH
7320EXPORT_SYMBOL_GPL(sata_link_debounce);
7321EXPORT_SYMBOL_GPL(sata_link_resume);
1152b261 7322EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
0aa1113d 7323EXPORT_SYMBOL_GPL(ata_std_prereset);
cc0680a5 7324EXPORT_SYMBOL_GPL(sata_link_hardreset);
57c9efdf 7325EXPORT_SYMBOL_GPL(sata_std_hardreset);
203c75b8 7326EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7327EXPORT_SYMBOL_GPL(ata_dev_classify);
7328EXPORT_SYMBOL_GPL(ata_dev_pair);
67846b30 7329EXPORT_SYMBOL_GPL(ata_ratelimit);
97750ceb 7330EXPORT_SYMBOL_GPL(ata_msleep);
c22daff4 7331EXPORT_SYMBOL_GPL(ata_wait_register);
1da177e4 7332EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7333EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7334EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7335EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
f6e67035 7336EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
34bf2170
TH
7337EXPORT_SYMBOL_GPL(sata_scr_valid);
7338EXPORT_SYMBOL_GPL(sata_scr_read);
7339EXPORT_SYMBOL_GPL(sata_scr_write);
7340EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7341EXPORT_SYMBOL_GPL(ata_link_online);
7342EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7343#ifdef CONFIG_PM
cca3974e
JG
7344EXPORT_SYMBOL_GPL(ata_host_suspend);
7345EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7346#endif /* CONFIG_PM */
6a62a04d
TH
7347EXPORT_SYMBOL_GPL(ata_id_string);
7348EXPORT_SYMBOL_GPL(ata_id_c_string);
963e4975 7349EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1da177e4
LT
7350EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7351
1bc4ccff 7352EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 7353EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
7354EXPORT_SYMBOL_GPL(ata_timing_compute);
7355EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 7356EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 7357
1da177e4
LT
7358#ifdef CONFIG_PCI
7359EXPORT_SYMBOL_GPL(pci_test_config_bits);
1da177e4 7360EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7361#ifdef CONFIG_PM
500530f6
TH
7362EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7363EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7364EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7365EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7366#endif /* CONFIG_PM */
1da177e4 7367#endif /* CONFIG_PCI */
9b847548 7368
b7db04d9
BN
7369EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7370
b64bbc39
TH
7371EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7372EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7373EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7374EXPORT_SYMBOL_GPL(ata_port_desc);
7375#ifdef CONFIG_PCI
7376EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7377#endif /* CONFIG_PCI */
7b70fc03 7378EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7379EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7380EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7381EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7382EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7383EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7384EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7385EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7386EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
10acf3b0 7387EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
022bdb07 7388EXPORT_SYMBOL_GPL(ata_do_eh);
a1efdaba 7389EXPORT_SYMBOL_GPL(ata_std_error_handler);
be0d18df
AC
7390
7391EXPORT_SYMBOL_GPL(ata_cable_40wire);
7392EXPORT_SYMBOL_GPL(ata_cable_80wire);
7393EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 7394EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 7395EXPORT_SYMBOL_GPL(ata_cable_sata);
2fa4a326 7396EXPORT_SYMBOL_GPL(ata_host_get);
4e8065aa 7397EXPORT_SYMBOL_GPL(ata_host_put);