libnvdimm/altmap: Track namespace boundaries in altmap
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
c82ee6d3 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4 2/*
af36d7f0
JG
3 * libata-core.c - helper library for ATA
4 *
8c3d3d4b 5 * Maintained by: Tejun Heo <tj@kernel.org>
af36d7f0
JG
6 * Please ALWAYS copy linux-ide@vger.kernel.org
7 * on emails.
8 *
9 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
10 * Copyright 2003-2004 Jeff Garzik
11 *
af36d7f0 12 * libata documentation is available via 'make {ps|pdf}docs',
19285f3c 13 * as Documentation/driver-api/libata.rst
af36d7f0
JG
14 *
15 * Hardware documentation available from http://www.t13.org/ and
16 * http://www.sata-io.org/
17 *
92c52c52
AC
18 * Standards documents from:
19 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
20 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
21 * http://www.sata-io.org (SATA)
22 * http://www.compactflash.org (CF)
23 * http://www.qic.org (QIC157 - Tape and DSC)
24 * http://www.ce-ata.org (CE-ATA: not supported)
1da177e4
LT
25 */
26
1da177e4
LT
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/list.h>
32#include <linux/mm.h>
1da177e4
LT
33#include <linux/spinlock.h>
34#include <linux/blkdev.h>
35#include <linux/delay.h>
36#include <linux/timer.h>
848c3920 37#include <linux/time.h>
1da177e4
LT
38#include <linux/interrupt.h>
39#include <linux/completion.h>
40#include <linux/suspend.h>
41#include <linux/workqueue.h>
378f058c 42#include <linux/scatterlist.h>
2dcb407e 43#include <linux/io.h>
79318057 44#include <linux/async.h>
e18086d6 45#include <linux/log2.h>
5a0e3ad6 46#include <linux/slab.h>
428ac5fc 47#include <linux/glob.h>
1da177e4 48#include <scsi/scsi.h>
193515d5 49#include <scsi/scsi_cmnd.h>
1da177e4
LT
50#include <scsi/scsi_host.h>
51#include <linux/libata.h>
1da177e4 52#include <asm/byteorder.h>
fe5af0cc 53#include <asm/unaligned.h>
140b5e59 54#include <linux/cdrom.h>
9990b6f3 55#include <linux/ratelimit.h>
eb25cb99 56#include <linux/leds.h>
9ee4f393 57#include <linux/pm_runtime.h>
b7db04d9 58#include <linux/platform_device.h>
1da177e4 59
255c03d1
HR
60#define CREATE_TRACE_POINTS
61#include <trace/events/libata.h>
62
1da177e4 63#include "libata.h"
d9027470 64#include "libata-transport.h"
fda0efc5 65
d7bb4cc7 66/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
67const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
68const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
69const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 70
029cfd6b 71const struct ata_port_operations ata_base_port_ops = {
0aa1113d 72 .prereset = ata_std_prereset,
203c75b8 73 .postreset = ata_std_postreset,
a1efdaba 74 .error_handler = ata_std_error_handler,
e4a9c373
DW
75 .sched_eh = ata_std_sched_eh,
76 .end_eh = ata_std_end_eh,
029cfd6b
TH
77};
78
79const struct ata_port_operations sata_port_ops = {
80 .inherits = &ata_base_port_ops,
81
82 .qc_defer = ata_std_qc_defer,
57c9efdf 83 .hardreset = sata_std_hardreset,
029cfd6b
TH
84};
85
3373efd8
TH
86static unsigned int ata_dev_init_params(struct ata_device *dev,
87 u16 heads, u16 sectors);
88static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
89static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 90static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 91
a78f57af 92atomic_t ata_print_id = ATOMIC_INIT(0);
1da177e4 93
33267325
TH
94struct ata_force_param {
95 const char *name;
96 unsigned int cbl;
97 int spd_limit;
98 unsigned long xfer_mask;
99 unsigned int horkage_on;
100 unsigned int horkage_off;
05944bdf 101 unsigned int lflags;
33267325
TH
102};
103
104struct ata_force_ent {
105 int port;
106 int device;
107 struct ata_force_param param;
108};
109
110static struct ata_force_ent *ata_force_tbl;
111static int ata_force_tbl_size;
112
113static char ata_force_param_buf[PAGE_SIZE] __initdata;
7afb4222
TH
114/* param_buf is thrown away after initialization, disallow read */
115module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
8c27ceff 116MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
33267325 117
2486fa56 118static int atapi_enabled = 1;
1623c81e 119module_param(atapi_enabled, int, 0444);
ad5d8eac 120MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
1623c81e 121
c5c61bda 122static int atapi_dmadir = 0;
95de719a 123module_param(atapi_dmadir, int, 0444);
ad5d8eac 124MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
95de719a 125
baf4fdfa
ML
126int atapi_passthru16 = 1;
127module_param(atapi_passthru16, int, 0444);
ad5d8eac 128MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
baf4fdfa 129
c3c013a2
JG
130int libata_fua = 0;
131module_param_named(fua, libata_fua, int, 0444);
ad5d8eac 132MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
c3c013a2 133
2dcb407e 134static int ata_ignore_hpa;
1e999736
AC
135module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
136MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
137
b3a70601
AC
138static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
139module_param_named(dma, libata_dma_mask, int, 0444);
140MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
141
87fbc5a0 142static int ata_probe_timeout;
a8601e5f
AM
143module_param(ata_probe_timeout, int, 0444);
144MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
145
6ebe9d86 146int libata_noacpi = 0;
d7d0dad6 147module_param_named(noacpi, libata_noacpi, int, 0444);
ad5d8eac 148MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
11ef697b 149
ae8d4ee7
AC
150int libata_allow_tpm = 0;
151module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
ad5d8eac 152MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
ae8d4ee7 153
e7ecd435
TH
154static int atapi_an;
155module_param(atapi_an, int, 0444);
156MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
157
1da177e4
LT
158MODULE_AUTHOR("Jeff Garzik");
159MODULE_DESCRIPTION("Library module for ATA devices");
160MODULE_LICENSE("GPL");
161MODULE_VERSION(DRV_VERSION);
162
0baab86b 163
9913ff8a
TH
164static bool ata_sstatus_online(u32 sstatus)
165{
166 return (sstatus & 0xf) == 0x3;
167}
168
1eca4365
TH
169/**
170 * ata_link_next - link iteration helper
171 * @link: the previous link, NULL to start
172 * @ap: ATA port containing links to iterate
173 * @mode: iteration mode, one of ATA_LITER_*
174 *
175 * LOCKING:
176 * Host lock or EH context.
aadffb68 177 *
1eca4365
TH
178 * RETURNS:
179 * Pointer to the next link.
aadffb68 180 */
1eca4365
TH
181struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
182 enum ata_link_iter_mode mode)
aadffb68 183{
1eca4365
TH
184 BUG_ON(mode != ATA_LITER_EDGE &&
185 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
186
aadffb68 187 /* NULL link indicates start of iteration */
1eca4365
TH
188 if (!link)
189 switch (mode) {
190 case ATA_LITER_EDGE:
191 case ATA_LITER_PMP_FIRST:
192 if (sata_pmp_attached(ap))
193 return ap->pmp_link;
194 /* fall through */
195 case ATA_LITER_HOST_FIRST:
196 return &ap->link;
197 }
aadffb68 198
1eca4365
TH
199 /* we just iterated over the host link, what's next? */
200 if (link == &ap->link)
201 switch (mode) {
202 case ATA_LITER_HOST_FIRST:
203 if (sata_pmp_attached(ap))
204 return ap->pmp_link;
205 /* fall through */
206 case ATA_LITER_PMP_FIRST:
207 if (unlikely(ap->slave_link))
b1c72916 208 return ap->slave_link;
1eca4365
TH
209 /* fall through */
210 case ATA_LITER_EDGE:
aadffb68 211 return NULL;
b1c72916 212 }
aadffb68 213
b1c72916
TH
214 /* slave_link excludes PMP */
215 if (unlikely(link == ap->slave_link))
216 return NULL;
217
1eca4365 218 /* we were over a PMP link */
aadffb68
TH
219 if (++link < ap->pmp_link + ap->nr_pmp_links)
220 return link;
1eca4365
TH
221
222 if (mode == ATA_LITER_PMP_FIRST)
223 return &ap->link;
224
aadffb68
TH
225 return NULL;
226}
227
1eca4365
TH
228/**
229 * ata_dev_next - device iteration helper
230 * @dev: the previous device, NULL to start
231 * @link: ATA link containing devices to iterate
232 * @mode: iteration mode, one of ATA_DITER_*
233 *
234 * LOCKING:
235 * Host lock or EH context.
236 *
237 * RETURNS:
238 * Pointer to the next device.
239 */
240struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
241 enum ata_dev_iter_mode mode)
242{
243 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
244 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
245
246 /* NULL dev indicates start of iteration */
247 if (!dev)
248 switch (mode) {
249 case ATA_DITER_ENABLED:
250 case ATA_DITER_ALL:
251 dev = link->device;
252 goto check;
253 case ATA_DITER_ENABLED_REVERSE:
254 case ATA_DITER_ALL_REVERSE:
255 dev = link->device + ata_link_max_devices(link) - 1;
256 goto check;
257 }
258
259 next:
260 /* move to the next one */
261 switch (mode) {
262 case ATA_DITER_ENABLED:
263 case ATA_DITER_ALL:
264 if (++dev < link->device + ata_link_max_devices(link))
265 goto check;
266 return NULL;
267 case ATA_DITER_ENABLED_REVERSE:
268 case ATA_DITER_ALL_REVERSE:
269 if (--dev >= link->device)
270 goto check;
271 return NULL;
272 }
273
274 check:
275 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
276 !ata_dev_enabled(dev))
277 goto next;
278 return dev;
279}
280
b1c72916
TH
281/**
282 * ata_dev_phys_link - find physical link for a device
283 * @dev: ATA device to look up physical link for
284 *
285 * Look up physical link which @dev is attached to. Note that
286 * this is different from @dev->link only when @dev is on slave
287 * link. For all other cases, it's the same as @dev->link.
288 *
289 * LOCKING:
290 * Don't care.
291 *
292 * RETURNS:
293 * Pointer to the found physical link.
294 */
295struct ata_link *ata_dev_phys_link(struct ata_device *dev)
296{
297 struct ata_port *ap = dev->link->ap;
298
299 if (!ap->slave_link)
300 return dev->link;
301 if (!dev->devno)
302 return &ap->link;
303 return ap->slave_link;
304}
305
33267325
TH
306/**
307 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 308 * @ap: ATA port of interest
33267325
TH
309 *
310 * Force cable type according to libata.force and whine about it.
311 * The last entry which has matching port number is used, so it
312 * can be specified as part of device force parameters. For
313 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
314 * same effect.
315 *
316 * LOCKING:
317 * EH context.
318 */
319void ata_force_cbl(struct ata_port *ap)
320{
321 int i;
322
323 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
324 const struct ata_force_ent *fe = &ata_force_tbl[i];
325
326 if (fe->port != -1 && fe->port != ap->print_id)
327 continue;
328
329 if (fe->param.cbl == ATA_CBL_NONE)
330 continue;
331
332 ap->cbl = fe->param.cbl;
a9a79dfe 333 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
33267325
TH
334 return;
335 }
336}
337
338/**
05944bdf 339 * ata_force_link_limits - force link limits according to libata.force
33267325
TH
340 * @link: ATA link of interest
341 *
05944bdf
TH
342 * Force link flags and SATA spd limit according to libata.force
343 * and whine about it. When only the port part is specified
344 * (e.g. 1:), the limit applies to all links connected to both
345 * the host link and all fan-out ports connected via PMP. If the
346 * device part is specified as 0 (e.g. 1.00:), it specifies the
347 * first fan-out link not the host link. Device number 15 always
b1c72916
TH
348 * points to the host link whether PMP is attached or not. If the
349 * controller has slave link, device number 16 points to it.
33267325
TH
350 *
351 * LOCKING:
352 * EH context.
353 */
05944bdf 354static void ata_force_link_limits(struct ata_link *link)
33267325 355{
05944bdf 356 bool did_spd = false;
b1c72916
TH
357 int linkno = link->pmp;
358 int i;
33267325
TH
359
360 if (ata_is_host_link(link))
b1c72916 361 linkno += 15;
33267325
TH
362
363 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
364 const struct ata_force_ent *fe = &ata_force_tbl[i];
365
366 if (fe->port != -1 && fe->port != link->ap->print_id)
367 continue;
368
369 if (fe->device != -1 && fe->device != linkno)
370 continue;
371
05944bdf
TH
372 /* only honor the first spd limit */
373 if (!did_spd && fe->param.spd_limit) {
374 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
a9a79dfe 375 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
05944bdf
TH
376 fe->param.name);
377 did_spd = true;
378 }
33267325 379
05944bdf
TH
380 /* let lflags stack */
381 if (fe->param.lflags) {
382 link->flags |= fe->param.lflags;
a9a79dfe 383 ata_link_notice(link,
05944bdf
TH
384 "FORCE: link flag 0x%x forced -> 0x%x\n",
385 fe->param.lflags, link->flags);
386 }
33267325
TH
387 }
388}
389
390/**
391 * ata_force_xfermask - force xfermask according to libata.force
392 * @dev: ATA device of interest
393 *
394 * Force xfer_mask according to libata.force and whine about it.
395 * For consistency with link selection, device number 15 selects
396 * the first device connected to the host link.
397 *
398 * LOCKING:
399 * EH context.
400 */
401static void ata_force_xfermask(struct ata_device *dev)
402{
403 int devno = dev->link->pmp + dev->devno;
404 int alt_devno = devno;
405 int i;
406
b1c72916
TH
407 /* allow n.15/16 for devices attached to host port */
408 if (ata_is_host_link(dev->link))
409 alt_devno += 15;
33267325
TH
410
411 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
412 const struct ata_force_ent *fe = &ata_force_tbl[i];
413 unsigned long pio_mask, mwdma_mask, udma_mask;
414
415 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
416 continue;
417
418 if (fe->device != -1 && fe->device != devno &&
419 fe->device != alt_devno)
420 continue;
421
422 if (!fe->param.xfer_mask)
423 continue;
424
425 ata_unpack_xfermask(fe->param.xfer_mask,
426 &pio_mask, &mwdma_mask, &udma_mask);
427 if (udma_mask)
428 dev->udma_mask = udma_mask;
429 else if (mwdma_mask) {
430 dev->udma_mask = 0;
431 dev->mwdma_mask = mwdma_mask;
432 } else {
433 dev->udma_mask = 0;
434 dev->mwdma_mask = 0;
435 dev->pio_mask = pio_mask;
436 }
437
a9a79dfe
JP
438 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
439 fe->param.name);
33267325
TH
440 return;
441 }
442}
443
444/**
445 * ata_force_horkage - force horkage according to libata.force
446 * @dev: ATA device of interest
447 *
448 * Force horkage according to libata.force and whine about it.
449 * For consistency with link selection, device number 15 selects
450 * the first device connected to the host link.
451 *
452 * LOCKING:
453 * EH context.
454 */
455static void ata_force_horkage(struct ata_device *dev)
456{
457 int devno = dev->link->pmp + dev->devno;
458 int alt_devno = devno;
459 int i;
460
b1c72916
TH
461 /* allow n.15/16 for devices attached to host port */
462 if (ata_is_host_link(dev->link))
463 alt_devno += 15;
33267325
TH
464
465 for (i = 0; i < ata_force_tbl_size; i++) {
466 const struct ata_force_ent *fe = &ata_force_tbl[i];
467
468 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
469 continue;
470
471 if (fe->device != -1 && fe->device != devno &&
472 fe->device != alt_devno)
473 continue;
474
475 if (!(~dev->horkage & fe->param.horkage_on) &&
476 !(dev->horkage & fe->param.horkage_off))
477 continue;
478
479 dev->horkage |= fe->param.horkage_on;
480 dev->horkage &= ~fe->param.horkage_off;
481
a9a79dfe
JP
482 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
483 fe->param.name);
33267325
TH
484 }
485}
486
436d34b3
TH
487/**
488 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
489 * @opcode: SCSI opcode
490 *
491 * Determine ATAPI command type from @opcode.
492 *
493 * LOCKING:
494 * None.
495 *
496 * RETURNS:
497 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
498 */
499int atapi_cmd_type(u8 opcode)
500{
501 switch (opcode) {
502 case GPCMD_READ_10:
503 case GPCMD_READ_12:
504 return ATAPI_READ;
505
506 case GPCMD_WRITE_10:
507 case GPCMD_WRITE_12:
508 case GPCMD_WRITE_AND_VERIFY_10:
509 return ATAPI_WRITE;
510
511 case GPCMD_READ_CD:
512 case GPCMD_READ_CD_MSF:
513 return ATAPI_READ_CD;
514
e52dcc48
TH
515 case ATA_16:
516 case ATA_12:
517 if (atapi_passthru16)
518 return ATAPI_PASS_THRU;
519 /* fall thru */
436d34b3
TH
520 default:
521 return ATAPI_MISC;
522 }
523}
524
1da177e4
LT
525/**
526 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
527 * @tf: Taskfile to convert
1da177e4 528 * @pmp: Port multiplier port
9977126c
TH
529 * @is_cmd: This FIS is for command
530 * @fis: Buffer into which data will output
1da177e4
LT
531 *
532 * Converts a standard ATA taskfile to a Serial ATA
533 * FIS structure (Register - Host to Device).
534 *
535 * LOCKING:
536 * Inherited from caller.
537 */
9977126c 538void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 539{
9977126c
TH
540 fis[0] = 0x27; /* Register - Host to Device FIS */
541 fis[1] = pmp & 0xf; /* Port multiplier number*/
542 if (is_cmd)
543 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
544
1da177e4
LT
545 fis[2] = tf->command;
546 fis[3] = tf->feature;
547
548 fis[4] = tf->lbal;
549 fis[5] = tf->lbam;
550 fis[6] = tf->lbah;
551 fis[7] = tf->device;
552
553 fis[8] = tf->hob_lbal;
554 fis[9] = tf->hob_lbam;
555 fis[10] = tf->hob_lbah;
556 fis[11] = tf->hob_feature;
557
558 fis[12] = tf->nsect;
559 fis[13] = tf->hob_nsect;
560 fis[14] = 0;
561 fis[15] = tf->ctl;
562
86a565e6
MC
563 fis[16] = tf->auxiliary & 0xff;
564 fis[17] = (tf->auxiliary >> 8) & 0xff;
565 fis[18] = (tf->auxiliary >> 16) & 0xff;
566 fis[19] = (tf->auxiliary >> 24) & 0xff;
1da177e4
LT
567}
568
569/**
570 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
571 * @fis: Buffer from which data will be input
572 * @tf: Taskfile to output
573 *
e12a1be6 574 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
575 *
576 * LOCKING:
577 * Inherited from caller.
578 */
579
057ace5e 580void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
581{
582 tf->command = fis[2]; /* status */
583 tf->feature = fis[3]; /* error */
584
585 tf->lbal = fis[4];
586 tf->lbam = fis[5];
587 tf->lbah = fis[6];
588 tf->device = fis[7];
589
590 tf->hob_lbal = fis[8];
591 tf->hob_lbam = fis[9];
592 tf->hob_lbah = fis[10];
593
594 tf->nsect = fis[12];
595 tf->hob_nsect = fis[13];
596}
597
8cbd6df1
AL
598static const u8 ata_rw_cmds[] = {
599 /* pio multi */
600 ATA_CMD_READ_MULTI,
601 ATA_CMD_WRITE_MULTI,
602 ATA_CMD_READ_MULTI_EXT,
603 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
604 0,
605 0,
606 0,
607 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
608 /* pio */
609 ATA_CMD_PIO_READ,
610 ATA_CMD_PIO_WRITE,
611 ATA_CMD_PIO_READ_EXT,
612 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
613 0,
614 0,
615 0,
616 0,
8cbd6df1
AL
617 /* dma */
618 ATA_CMD_READ,
619 ATA_CMD_WRITE,
620 ATA_CMD_READ_EXT,
9a3dccc4
TH
621 ATA_CMD_WRITE_EXT,
622 0,
623 0,
624 0,
625 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 626};
1da177e4
LT
627
628/**
8cbd6df1 629 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
630 * @tf: command to examine and configure
631 * @dev: device tf belongs to
1da177e4 632 *
2e9edbf8 633 * Examine the device configuration and tf->flags to calculate
8cbd6df1 634 * the proper read/write commands and protocol to use.
1da177e4
LT
635 *
636 * LOCKING:
637 * caller.
638 */
bd056d7e 639static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 640{
9a3dccc4 641 u8 cmd;
1da177e4 642
9a3dccc4 643 int index, fua, lba48, write;
2e9edbf8 644
9a3dccc4 645 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
646 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
647 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 648
8cbd6df1
AL
649 if (dev->flags & ATA_DFLAG_PIO) {
650 tf->protocol = ATA_PROT_PIO;
9a3dccc4 651 index = dev->multi_count ? 0 : 8;
9af5c9c9 652 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
653 /* Unable to use DMA due to host limitation */
654 tf->protocol = ATA_PROT_PIO;
0565c26d 655 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
656 } else {
657 tf->protocol = ATA_PROT_DMA;
9a3dccc4 658 index = 16;
8cbd6df1 659 }
1da177e4 660
9a3dccc4
TH
661 cmd = ata_rw_cmds[index + fua + lba48 + write];
662 if (cmd) {
663 tf->command = cmd;
664 return 0;
665 }
666 return -1;
1da177e4
LT
667}
668
35b649fe
TH
669/**
670 * ata_tf_read_block - Read block address from ATA taskfile
671 * @tf: ATA taskfile of interest
672 * @dev: ATA device @tf belongs to
673 *
674 * LOCKING:
675 * None.
676 *
677 * Read block address from @tf. This function can handle all
678 * three address formats - LBA, LBA48 and CHS. tf->protocol and
679 * flags select the address format to use.
680 *
681 * RETURNS:
682 * Block address read from @tf.
683 */
cffd1ee9 684u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
35b649fe
TH
685{
686 u64 block = 0;
687
fe16d4f2 688 if (tf->flags & ATA_TFLAG_LBA) {
35b649fe
TH
689 if (tf->flags & ATA_TFLAG_LBA48) {
690 block |= (u64)tf->hob_lbah << 40;
691 block |= (u64)tf->hob_lbam << 32;
44901a96 692 block |= (u64)tf->hob_lbal << 24;
35b649fe
TH
693 } else
694 block |= (tf->device & 0xf) << 24;
695
696 block |= tf->lbah << 16;
697 block |= tf->lbam << 8;
698 block |= tf->lbal;
699 } else {
700 u32 cyl, head, sect;
701
702 cyl = tf->lbam | (tf->lbah << 8);
703 head = tf->device & 0xf;
704 sect = tf->lbal;
705
ac8672ea 706 if (!sect) {
a9a79dfe
JP
707 ata_dev_warn(dev,
708 "device reported invalid CHS sector 0\n");
cffd1ee9 709 return U64_MAX;
ac8672ea
TH
710 }
711
712 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
35b649fe
TH
713 }
714
715 return block;
716}
717
bd056d7e
TH
718/**
719 * ata_build_rw_tf - Build ATA taskfile for given read/write request
720 * @tf: Target ATA taskfile
721 * @dev: ATA device @tf belongs to
722 * @block: Block address
723 * @n_block: Number of blocks
724 * @tf_flags: RW/FUA etc...
725 * @tag: tag
8e061784 726 * @class: IO priority class
bd056d7e
TH
727 *
728 * LOCKING:
729 * None.
730 *
731 * Build ATA taskfile @tf for read/write request described by
732 * @block, @n_block, @tf_flags and @tag on @dev.
733 *
734 * RETURNS:
735 *
736 * 0 on success, -ERANGE if the request is too large for @dev,
737 * -EINVAL if the request is invalid.
738 */
739int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
740 u64 block, u32 n_block, unsigned int tf_flags,
8e061784 741 unsigned int tag, int class)
bd056d7e
TH
742{
743 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
744 tf->flags |= tf_flags;
745
2e2cc676 746 if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
bd056d7e
TH
747 /* yay, NCQ */
748 if (!lba_48_ok(block, n_block))
749 return -ERANGE;
750
751 tf->protocol = ATA_PROT_NCQ;
752 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
753
754 if (tf->flags & ATA_TFLAG_WRITE)
755 tf->command = ATA_CMD_FPDMA_WRITE;
756 else
757 tf->command = ATA_CMD_FPDMA_READ;
758
759 tf->nsect = tag << 3;
760 tf->hob_feature = (n_block >> 8) & 0xff;
761 tf->feature = n_block & 0xff;
762
763 tf->hob_lbah = (block >> 40) & 0xff;
764 tf->hob_lbam = (block >> 32) & 0xff;
765 tf->hob_lbal = (block >> 24) & 0xff;
766 tf->lbah = (block >> 16) & 0xff;
767 tf->lbam = (block >> 8) & 0xff;
768 tf->lbal = block & 0xff;
769
9ca7cfa4 770 tf->device = ATA_LBA;
bd056d7e
TH
771 if (tf->flags & ATA_TFLAG_FUA)
772 tf->device |= 1 << 7;
8e061784 773
9f56eca3 774 if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
8e061784
AM
775 if (class == IOPRIO_CLASS_RT)
776 tf->hob_nsect |= ATA_PRIO_HIGH <<
777 ATA_SHIFT_PRIO;
778 }
bd056d7e
TH
779 } else if (dev->flags & ATA_DFLAG_LBA) {
780 tf->flags |= ATA_TFLAG_LBA;
781
782 if (lba_28_ok(block, n_block)) {
783 /* use LBA28 */
784 tf->device |= (block >> 24) & 0xf;
785 } else if (lba_48_ok(block, n_block)) {
786 if (!(dev->flags & ATA_DFLAG_LBA48))
787 return -ERANGE;
788
789 /* use LBA48 */
790 tf->flags |= ATA_TFLAG_LBA48;
791
792 tf->hob_nsect = (n_block >> 8) & 0xff;
793
794 tf->hob_lbah = (block >> 40) & 0xff;
795 tf->hob_lbam = (block >> 32) & 0xff;
796 tf->hob_lbal = (block >> 24) & 0xff;
797 } else
798 /* request too large even for LBA48 */
799 return -ERANGE;
800
801 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
802 return -EINVAL;
803
804 tf->nsect = n_block & 0xff;
805
806 tf->lbah = (block >> 16) & 0xff;
807 tf->lbam = (block >> 8) & 0xff;
808 tf->lbal = block & 0xff;
809
810 tf->device |= ATA_LBA;
811 } else {
812 /* CHS */
813 u32 sect, head, cyl, track;
814
815 /* The request -may- be too large for CHS addressing. */
816 if (!lba_28_ok(block, n_block))
817 return -ERANGE;
818
819 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
820 return -EINVAL;
821
822 /* Convert LBA to CHS */
823 track = (u32)block / dev->sectors;
824 cyl = track / dev->heads;
825 head = track % dev->heads;
826 sect = (u32)block % dev->sectors + 1;
827
828 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
829 (u32)block, track, cyl, head, sect);
830
831 /* Check whether the converted CHS can fit.
832 Cylinder: 0-65535
833 Head: 0-15
834 Sector: 1-255*/
835 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
836 return -ERANGE;
837
838 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
839 tf->lbal = sect;
840 tf->lbam = cyl;
841 tf->lbah = cyl >> 8;
842 tf->device |= head;
843 }
844
845 return 0;
846}
847
cb95d562
TH
848/**
849 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
850 * @pio_mask: pio_mask
851 * @mwdma_mask: mwdma_mask
852 * @udma_mask: udma_mask
853 *
854 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
855 * unsigned int xfer_mask.
856 *
857 * LOCKING:
858 * None.
859 *
860 * RETURNS:
861 * Packed xfer_mask.
862 */
7dc951ae
TH
863unsigned long ata_pack_xfermask(unsigned long pio_mask,
864 unsigned long mwdma_mask,
865 unsigned long udma_mask)
cb95d562
TH
866{
867 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
868 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
869 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
870}
871
c0489e4e
TH
872/**
873 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
874 * @xfer_mask: xfer_mask to unpack
875 * @pio_mask: resulting pio_mask
876 * @mwdma_mask: resulting mwdma_mask
877 * @udma_mask: resulting udma_mask
878 *
879 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
c9b5560a 880 * Any NULL destination masks will be ignored.
c0489e4e 881 */
7dc951ae
TH
882void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
883 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
884{
885 if (pio_mask)
886 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
887 if (mwdma_mask)
888 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
889 if (udma_mask)
890 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
891}
892
cb95d562 893static const struct ata_xfer_ent {
be9a50c8 894 int shift, bits;
cb95d562
TH
895 u8 base;
896} ata_xfer_tbl[] = {
70cd071e
TH
897 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
898 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
899 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
900 { -1, },
901};
902
903/**
904 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
905 * @xfer_mask: xfer_mask of interest
906 *
907 * Return matching XFER_* value for @xfer_mask. Only the highest
908 * bit of @xfer_mask is considered.
909 *
910 * LOCKING:
911 * None.
912 *
913 * RETURNS:
70cd071e 914 * Matching XFER_* value, 0xff if no match found.
cb95d562 915 */
7dc951ae 916u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
917{
918 int highbit = fls(xfer_mask) - 1;
919 const struct ata_xfer_ent *ent;
920
921 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
922 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
923 return ent->base + highbit - ent->shift;
70cd071e 924 return 0xff;
cb95d562
TH
925}
926
927/**
928 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
929 * @xfer_mode: XFER_* of interest
930 *
931 * Return matching xfer_mask for @xfer_mode.
932 *
933 * LOCKING:
934 * None.
935 *
936 * RETURNS:
937 * Matching xfer_mask, 0 if no match found.
938 */
7dc951ae 939unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
940{
941 const struct ata_xfer_ent *ent;
942
943 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
944 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
945 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
946 & ~((1 << ent->shift) - 1);
cb95d562
TH
947 return 0;
948}
949
950/**
951 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
952 * @xfer_mode: XFER_* of interest
953 *
954 * Return matching xfer_shift for @xfer_mode.
955 *
956 * LOCKING:
957 * None.
958 *
959 * RETURNS:
960 * Matching xfer_shift, -1 if no match found.
961 */
7dc951ae 962int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
963{
964 const struct ata_xfer_ent *ent;
965
966 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
967 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
968 return ent->shift;
969 return -1;
970}
971
1da177e4 972/**
1da7b0d0
TH
973 * ata_mode_string - convert xfer_mask to string
974 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
975 *
976 * Determine string which represents the highest speed
1da7b0d0 977 * (highest bit in @modemask).
1da177e4
LT
978 *
979 * LOCKING:
980 * None.
981 *
982 * RETURNS:
983 * Constant C string representing highest speed listed in
1da7b0d0 984 * @mode_mask, or the constant C string "<n/a>".
1da177e4 985 */
7dc951ae 986const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 987{
75f554bc
TH
988 static const char * const xfer_mode_str[] = {
989 "PIO0",
990 "PIO1",
991 "PIO2",
992 "PIO3",
993 "PIO4",
b352e57d
AC
994 "PIO5",
995 "PIO6",
75f554bc
TH
996 "MWDMA0",
997 "MWDMA1",
998 "MWDMA2",
b352e57d
AC
999 "MWDMA3",
1000 "MWDMA4",
75f554bc
TH
1001 "UDMA/16",
1002 "UDMA/25",
1003 "UDMA/33",
1004 "UDMA/44",
1005 "UDMA/66",
1006 "UDMA/100",
1007 "UDMA/133",
1008 "UDMA7",
1009 };
1da7b0d0 1010 int highbit;
1da177e4 1011
1da7b0d0
TH
1012 highbit = fls(xfer_mask) - 1;
1013 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1014 return xfer_mode_str[highbit];
1da177e4 1015 return "<n/a>";
1da177e4
LT
1016}
1017
d9027470 1018const char *sata_spd_string(unsigned int spd)
4c360c81
TH
1019{
1020 static const char * const spd_str[] = {
1021 "1.5 Gbps",
1022 "3.0 Gbps",
8522ee25 1023 "6.0 Gbps",
4c360c81
TH
1024 };
1025
1026 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1027 return "<unknown>";
1028 return spd_str[spd - 1];
1029}
1030
1da177e4
LT
1031/**
1032 * ata_dev_classify - determine device type based on ATA-spec signature
1033 * @tf: ATA taskfile register set for device to be identified
1034 *
1035 * Determine from taskfile register contents whether a device is
1036 * ATA or ATAPI, as per "Signature and persistence" section
1037 * of ATA/PI spec (volume 1, sect 5.14).
1038 *
1039 * LOCKING:
1040 * None.
1041 *
1042 * RETURNS:
9162c657
HR
1043 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1044 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1045 */
057ace5e 1046unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1047{
1048 /* Apple's open source Darwin code hints that some devices only
1049 * put a proper signature into the LBA mid/high registers,
1050 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1051 *
1052 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1053 * signatures for ATA and ATAPI devices attached on SerialATA,
1054 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1055 * spec has never mentioned about using different signatures
1056 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1057 * Multiplier specification began to use 0x69/0x96 to identify
1058 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1059 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1060 * 0x69/0x96 shortly and described them as reserved for
1061 * SerialATA.
1062 *
1063 * We follow the current spec and consider that 0x69/0x96
1064 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
79b42bab
TH
1065 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1066 * SEMB signature. This is worked around in
1067 * ata_dev_read_id().
1da177e4 1068 */
633273a3 1069 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1070 DPRINTK("found ATA device by sig\n");
1071 return ATA_DEV_ATA;
1072 }
1073
633273a3 1074 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1075 DPRINTK("found ATAPI device by sig\n");
1076 return ATA_DEV_ATAPI;
1077 }
1078
633273a3
TH
1079 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1080 DPRINTK("found PMP device by sig\n");
1081 return ATA_DEV_PMP;
1082 }
1083
1084 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
79b42bab
TH
1085 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1086 return ATA_DEV_SEMB;
633273a3
TH
1087 }
1088
9162c657
HR
1089 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1090 DPRINTK("found ZAC device by sig\n");
1091 return ATA_DEV_ZAC;
1092 }
1093
1da177e4
LT
1094 DPRINTK("unknown device\n");
1095 return ATA_DEV_UNKNOWN;
1096}
1097
1da177e4 1098/**
6a62a04d 1099 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1100 * @id: IDENTIFY DEVICE results we will examine
1101 * @s: string into which data is output
1102 * @ofs: offset into identify device page
1103 * @len: length of string to return. must be an even number.
1104 *
1105 * The strings in the IDENTIFY DEVICE page are broken up into
1106 * 16-bit chunks. Run through the string, and output each
1107 * 8-bit chunk linearly, regardless of platform.
1108 *
1109 * LOCKING:
1110 * caller.
1111 */
1112
6a62a04d
TH
1113void ata_id_string(const u16 *id, unsigned char *s,
1114 unsigned int ofs, unsigned int len)
1da177e4
LT
1115{
1116 unsigned int c;
1117
963e4975
AC
1118 BUG_ON(len & 1);
1119
1da177e4
LT
1120 while (len > 0) {
1121 c = id[ofs] >> 8;
1122 *s = c;
1123 s++;
1124
1125 c = id[ofs] & 0xff;
1126 *s = c;
1127 s++;
1128
1129 ofs++;
1130 len -= 2;
1131 }
1132}
1133
0e949ff3 1134/**
6a62a04d 1135 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1136 * @id: IDENTIFY DEVICE results we will examine
1137 * @s: string into which data is output
1138 * @ofs: offset into identify device page
1139 * @len: length of string to return. must be an odd number.
1140 *
6a62a04d 1141 * This function is identical to ata_id_string except that it
0e949ff3
TH
1142 * trims trailing spaces and terminates the resulting string with
1143 * null. @len must be actual maximum length (even number) + 1.
1144 *
1145 * LOCKING:
1146 * caller.
1147 */
6a62a04d
TH
1148void ata_id_c_string(const u16 *id, unsigned char *s,
1149 unsigned int ofs, unsigned int len)
0e949ff3
TH
1150{
1151 unsigned char *p;
1152
6a62a04d 1153 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1154
1155 p = s + strnlen(s, len - 1);
1156 while (p > s && p[-1] == ' ')
1157 p--;
1158 *p = '\0';
1159}
0baab86b 1160
db6f8759
TH
1161static u64 ata_id_n_sectors(const u16 *id)
1162{
1163 if (ata_id_has_lba(id)) {
1164 if (ata_id_has_lba48(id))
968e594a 1165 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
db6f8759 1166 else
968e594a 1167 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
db6f8759
TH
1168 } else {
1169 if (ata_id_current_chs_valid(id))
968e594a
RH
1170 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1171 id[ATA_ID_CUR_SECTORS];
db6f8759 1172 else
968e594a
RH
1173 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1174 id[ATA_ID_SECTORS];
db6f8759
TH
1175 }
1176}
1177
a5987e0a 1178u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1e999736
AC
1179{
1180 u64 sectors = 0;
1181
1182 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1183 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
ba14a9c2 1184 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1e999736
AC
1185 sectors |= (tf->lbah & 0xff) << 16;
1186 sectors |= (tf->lbam & 0xff) << 8;
1187 sectors |= (tf->lbal & 0xff);
1188
a5987e0a 1189 return sectors;
1e999736
AC
1190}
1191
a5987e0a 1192u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1e999736
AC
1193{
1194 u64 sectors = 0;
1195
1196 sectors |= (tf->device & 0x0f) << 24;
1197 sectors |= (tf->lbah & 0xff) << 16;
1198 sectors |= (tf->lbam & 0xff) << 8;
1199 sectors |= (tf->lbal & 0xff);
1200
a5987e0a 1201 return sectors;
1e999736
AC
1202}
1203
1204/**
c728a914
TH
1205 * ata_read_native_max_address - Read native max address
1206 * @dev: target device
1207 * @max_sectors: out parameter for the result native max address
1e999736 1208 *
c728a914
TH
1209 * Perform an LBA48 or LBA28 native size query upon the device in
1210 * question.
1e999736 1211 *
c728a914
TH
1212 * RETURNS:
1213 * 0 on success, -EACCES if command is aborted by the drive.
1214 * -EIO on other errors.
1e999736 1215 */
c728a914 1216static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1217{
c728a914 1218 unsigned int err_mask;
1e999736 1219 struct ata_taskfile tf;
c728a914 1220 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1221
1222 ata_tf_init(dev, &tf);
1223
c728a914 1224 /* always clear all address registers */
1e999736 1225 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1226
c728a914
TH
1227 if (lba48) {
1228 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1229 tf.flags |= ATA_TFLAG_LBA48;
1230 } else
1231 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1232
bd18bc04 1233 tf.protocol = ATA_PROT_NODATA;
c728a914
TH
1234 tf.device |= ATA_LBA;
1235
2b789108 1236 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1237 if (err_mask) {
a9a79dfe
JP
1238 ata_dev_warn(dev,
1239 "failed to read native max address (err_mask=0x%x)\n",
1240 err_mask);
c728a914
TH
1241 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1242 return -EACCES;
1243 return -EIO;
1244 }
1e999736 1245
c728a914 1246 if (lba48)
a5987e0a 1247 *max_sectors = ata_tf_to_lba48(&tf) + 1;
c728a914 1248 else
a5987e0a 1249 *max_sectors = ata_tf_to_lba(&tf) + 1;
2dcb407e 1250 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1251 (*max_sectors)--;
c728a914 1252 return 0;
1e999736
AC
1253}
1254
1255/**
c728a914
TH
1256 * ata_set_max_sectors - Set max sectors
1257 * @dev: target device
6b38d1d1 1258 * @new_sectors: new max sectors value to set for the device
1e999736 1259 *
c728a914
TH
1260 * Set max sectors of @dev to @new_sectors.
1261 *
1262 * RETURNS:
1263 * 0 on success, -EACCES if command is aborted or denied (due to
1264 * previous non-volatile SET_MAX) by the drive. -EIO on other
1265 * errors.
1e999736 1266 */
05027adc 1267static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1268{
c728a914 1269 unsigned int err_mask;
1e999736 1270 struct ata_taskfile tf;
c728a914 1271 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1272
1273 new_sectors--;
1274
1275 ata_tf_init(dev, &tf);
1276
1e999736 1277 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1278
1279 if (lba48) {
1280 tf.command = ATA_CMD_SET_MAX_EXT;
1281 tf.flags |= ATA_TFLAG_LBA48;
1282
1283 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1284 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1285 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1286 } else {
c728a914
TH
1287 tf.command = ATA_CMD_SET_MAX;
1288
1e582ba4
TH
1289 tf.device |= (new_sectors >> 24) & 0xf;
1290 }
1291
bd18bc04 1292 tf.protocol = ATA_PROT_NODATA;
c728a914 1293 tf.device |= ATA_LBA;
1e999736
AC
1294
1295 tf.lbal = (new_sectors >> 0) & 0xff;
1296 tf.lbam = (new_sectors >> 8) & 0xff;
1297 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1298
2b789108 1299 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1300 if (err_mask) {
a9a79dfe
JP
1301 ata_dev_warn(dev,
1302 "failed to set max address (err_mask=0x%x)\n",
1303 err_mask);
c728a914
TH
1304 if (err_mask == AC_ERR_DEV &&
1305 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1306 return -EACCES;
1307 return -EIO;
1308 }
1309
c728a914 1310 return 0;
1e999736
AC
1311}
1312
1313/**
1314 * ata_hpa_resize - Resize a device with an HPA set
1315 * @dev: Device to resize
1316 *
1317 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1318 * it if required to the full size of the media. The caller must check
1319 * the drive has the HPA feature set enabled.
05027adc
TH
1320 *
1321 * RETURNS:
1322 * 0 on success, -errno on failure.
1e999736 1323 */
05027adc 1324static int ata_hpa_resize(struct ata_device *dev)
1e999736 1325{
05027adc
TH
1326 struct ata_eh_context *ehc = &dev->link->eh_context;
1327 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
445d211b 1328 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
05027adc
TH
1329 u64 sectors = ata_id_n_sectors(dev->id);
1330 u64 native_sectors;
c728a914 1331 int rc;
a617c09f 1332
05027adc 1333 /* do we need to do it? */
9162c657 1334 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
05027adc
TH
1335 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1336 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1337 return 0;
1e999736 1338
05027adc
TH
1339 /* read native max address */
1340 rc = ata_read_native_max_address(dev, &native_sectors);
1341 if (rc) {
dda7aba1
TH
1342 /* If device aborted the command or HPA isn't going to
1343 * be unlocked, skip HPA resizing.
05027adc 1344 */
445d211b 1345 if (rc == -EACCES || !unlock_hpa) {
a9a79dfe
JP
1346 ata_dev_warn(dev,
1347 "HPA support seems broken, skipping HPA handling\n");
05027adc
TH
1348 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1349
1350 /* we can continue if device aborted the command */
1351 if (rc == -EACCES)
1352 rc = 0;
1e999736 1353 }
37301a55 1354
05027adc
TH
1355 return rc;
1356 }
5920dadf 1357 dev->n_native_sectors = native_sectors;
05027adc
TH
1358
1359 /* nothing to do? */
445d211b 1360 if (native_sectors <= sectors || !unlock_hpa) {
05027adc
TH
1361 if (!print_info || native_sectors == sectors)
1362 return 0;
1363
1364 if (native_sectors > sectors)
a9a79dfe 1365 ata_dev_info(dev,
05027adc
TH
1366 "HPA detected: current %llu, native %llu\n",
1367 (unsigned long long)sectors,
1368 (unsigned long long)native_sectors);
1369 else if (native_sectors < sectors)
a9a79dfe
JP
1370 ata_dev_warn(dev,
1371 "native sectors (%llu) is smaller than sectors (%llu)\n",
05027adc
TH
1372 (unsigned long long)native_sectors,
1373 (unsigned long long)sectors);
1374 return 0;
1375 }
1376
1377 /* let's unlock HPA */
1378 rc = ata_set_max_sectors(dev, native_sectors);
1379 if (rc == -EACCES) {
1380 /* if device aborted the command, skip HPA resizing */
a9a79dfe
JP
1381 ata_dev_warn(dev,
1382 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1383 (unsigned long long)sectors,
1384 (unsigned long long)native_sectors);
05027adc
TH
1385 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1386 return 0;
1387 } else if (rc)
1388 return rc;
1389
1390 /* re-read IDENTIFY data */
1391 rc = ata_dev_reread_id(dev, 0);
1392 if (rc) {
a9a79dfe
JP
1393 ata_dev_err(dev,
1394 "failed to re-read IDENTIFY data after HPA resizing\n");
05027adc
TH
1395 return rc;
1396 }
1397
1398 if (print_info) {
1399 u64 new_sectors = ata_id_n_sectors(dev->id);
a9a79dfe 1400 ata_dev_info(dev,
05027adc
TH
1401 "HPA unlocked: %llu -> %llu, native %llu\n",
1402 (unsigned long long)sectors,
1403 (unsigned long long)new_sectors,
1404 (unsigned long long)native_sectors);
1405 }
1406
1407 return 0;
1e999736
AC
1408}
1409
1da177e4
LT
1410/**
1411 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1412 * @id: IDENTIFY DEVICE page to dump
1da177e4 1413 *
0bd3300a
TH
1414 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1415 * page.
1da177e4
LT
1416 *
1417 * LOCKING:
1418 * caller.
1419 */
1420
0bd3300a 1421static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1422{
1423 DPRINTK("49==0x%04x "
1424 "53==0x%04x "
1425 "63==0x%04x "
1426 "64==0x%04x "
1427 "75==0x%04x \n",
0bd3300a
TH
1428 id[49],
1429 id[53],
1430 id[63],
1431 id[64],
1432 id[75]);
1da177e4
LT
1433 DPRINTK("80==0x%04x "
1434 "81==0x%04x "
1435 "82==0x%04x "
1436 "83==0x%04x "
1437 "84==0x%04x \n",
0bd3300a
TH
1438 id[80],
1439 id[81],
1440 id[82],
1441 id[83],
1442 id[84]);
1da177e4
LT
1443 DPRINTK("88==0x%04x "
1444 "93==0x%04x\n",
0bd3300a
TH
1445 id[88],
1446 id[93]);
1da177e4
LT
1447}
1448
cb95d562
TH
1449/**
1450 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1451 * @id: IDENTIFY data to compute xfer mask from
1452 *
1453 * Compute the xfermask for this device. This is not as trivial
1454 * as it seems if we must consider early devices correctly.
1455 *
1456 * FIXME: pre IDE drive timing (do we care ?).
1457 *
1458 * LOCKING:
1459 * None.
1460 *
1461 * RETURNS:
1462 * Computed xfermask
1463 */
7dc951ae 1464unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1465{
7dc951ae 1466 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1467
1468 /* Usual case. Word 53 indicates word 64 is valid */
1469 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1470 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1471 pio_mask <<= 3;
1472 pio_mask |= 0x7;
1473 } else {
1474 /* If word 64 isn't valid then Word 51 high byte holds
1475 * the PIO timing number for the maximum. Turn it into
1476 * a mask.
1477 */
7a0f1c8a 1478 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1479 if (mode < 5) /* Valid PIO range */
2dcb407e 1480 pio_mask = (2 << mode) - 1;
46767aeb
AC
1481 else
1482 pio_mask = 1;
cb95d562
TH
1483
1484 /* But wait.. there's more. Design your standards by
1485 * committee and you too can get a free iordy field to
1486 * process. However its the speeds not the modes that
1487 * are supported... Note drivers using the timing API
1488 * will get this right anyway
1489 */
1490 }
1491
1492 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1493
b352e57d
AC
1494 if (ata_id_is_cfa(id)) {
1495 /*
1496 * Process compact flash extended modes
1497 */
62afe5d7
SS
1498 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1499 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
b352e57d
AC
1500
1501 if (pio)
1502 pio_mask |= (1 << 5);
1503 if (pio > 1)
1504 pio_mask |= (1 << 6);
1505 if (dma)
1506 mwdma_mask |= (1 << 3);
1507 if (dma > 1)
1508 mwdma_mask |= (1 << 4);
1509 }
1510
fb21f0d0
TH
1511 udma_mask = 0;
1512 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1513 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1514
1515 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1516}
1517
7102d230 1518static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1519{
77853bf2 1520 struct completion *waiting = qc->private_data;
a2a7a662 1521
a2a7a662 1522 complete(waiting);
a2a7a662
TH
1523}
1524
1525/**
2432697b 1526 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1527 * @dev: Device to which the command is sent
1528 * @tf: Taskfile registers for the command and the result
d69cf37d 1529 * @cdb: CDB for packet command
e227867f 1530 * @dma_dir: Data transfer direction of the command
5c1ad8b3 1531 * @sgl: sg list for the data buffer of the command
2432697b 1532 * @n_elem: Number of sg entries
2b789108 1533 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1534 *
1535 * Executes libata internal command with timeout. @tf contains
1536 * command on entry and result on return. Timeout and error
1537 * conditions are reported via return value. No recovery action
1538 * is taken after a command times out. It's caller's duty to
1539 * clean up after timeout.
1540 *
1541 * LOCKING:
1542 * None. Should be called with kernel context, might sleep.
551e8889
TH
1543 *
1544 * RETURNS:
1545 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1546 */
2432697b
TH
1547unsigned ata_exec_internal_sg(struct ata_device *dev,
1548 struct ata_taskfile *tf, const u8 *cdb,
87260216 1549 int dma_dir, struct scatterlist *sgl,
2b789108 1550 unsigned int n_elem, unsigned long timeout)
a2a7a662 1551{
9af5c9c9
TH
1552 struct ata_link *link = dev->link;
1553 struct ata_port *ap = link->ap;
a2a7a662 1554 u8 command = tf->command;
87fbc5a0 1555 int auto_timeout = 0;
a2a7a662 1556 struct ata_queued_cmd *qc;
28361c40 1557 unsigned int preempted_tag;
e3ed8939
JA
1558 u32 preempted_sactive;
1559 u64 preempted_qc_active;
da917d69 1560 int preempted_nr_active_links;
60be6b9a 1561 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1562 unsigned long flags;
77853bf2 1563 unsigned int err_mask;
d95a717f 1564 int rc;
a2a7a662 1565
ba6a1308 1566 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1567
e3180499 1568 /* no internal command while frozen */
b51e9e5d 1569 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1570 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1571 return AC_ERR_SYSTEM;
1572 }
1573
2ab7db1f 1574 /* initialize internal qc */
28361c40 1575 qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
a2a7a662 1576
28361c40
JA
1577 qc->tag = ATA_TAG_INTERNAL;
1578 qc->hw_tag = 0;
2ab7db1f
TH
1579 qc->scsicmd = NULL;
1580 qc->ap = ap;
1581 qc->dev = dev;
1582 ata_qc_reinit(qc);
1583
9af5c9c9
TH
1584 preempted_tag = link->active_tag;
1585 preempted_sactive = link->sactive;
dedaf2b0 1586 preempted_qc_active = ap->qc_active;
da917d69 1587 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1588 link->active_tag = ATA_TAG_POISON;
1589 link->sactive = 0;
dedaf2b0 1590 ap->qc_active = 0;
da917d69 1591 ap->nr_active_links = 0;
2ab7db1f
TH
1592
1593 /* prepare & issue qc */
a2a7a662 1594 qc->tf = *tf;
d69cf37d
TH
1595 if (cdb)
1596 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e771451c
VP
1597
1598 /* some SATA bridges need us to indicate data xfer direction */
1599 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1600 dma_dir == DMA_FROM_DEVICE)
1601 qc->tf.feature |= ATAPI_DMADIR;
1602
e61e0672 1603 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1604 qc->dma_dir = dma_dir;
1605 if (dma_dir != DMA_NONE) {
2432697b 1606 unsigned int i, buflen = 0;
87260216 1607 struct scatterlist *sg;
2432697b 1608
87260216
JA
1609 for_each_sg(sgl, sg, n_elem, i)
1610 buflen += sg->length;
2432697b 1611
87260216 1612 ata_sg_init(qc, sgl, n_elem);
49c80429 1613 qc->nbytes = buflen;
a2a7a662
TH
1614 }
1615
77853bf2 1616 qc->private_data = &wait;
a2a7a662
TH
1617 qc->complete_fn = ata_qc_complete_internal;
1618
8e0e694a 1619 ata_qc_issue(qc);
a2a7a662 1620
ba6a1308 1621 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1622
87fbc5a0
TH
1623 if (!timeout) {
1624 if (ata_probe_timeout)
1625 timeout = ata_probe_timeout * 1000;
1626 else {
1627 timeout = ata_internal_cmd_timeout(dev, command);
1628 auto_timeout = 1;
1629 }
1630 }
2b789108 1631
c0c362b6
TH
1632 if (ap->ops->error_handler)
1633 ata_eh_release(ap);
1634
2b789108 1635 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f 1636
c0c362b6
TH
1637 if (ap->ops->error_handler)
1638 ata_eh_acquire(ap);
1639
c429137a 1640 ata_sff_flush_pio_task(ap);
41ade50c 1641
d95a717f 1642 if (!rc) {
ba6a1308 1643 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1644
1645 /* We're racing with irq here. If we lose, the
1646 * following test prevents us from completing the qc
d95a717f
TH
1647 * twice. If we win, the port is frozen and will be
1648 * cleaned up by ->post_internal_cmd().
a2a7a662 1649 */
77853bf2 1650 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1651 qc->err_mask |= AC_ERR_TIMEOUT;
1652
1653 if (ap->ops->error_handler)
1654 ata_port_freeze(ap);
1655 else
1656 ata_qc_complete(qc);
f15a1daf 1657
0dd4b21f 1658 if (ata_msg_warn(ap))
a9a79dfe
JP
1659 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1660 command);
a2a7a662
TH
1661 }
1662
ba6a1308 1663 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1664 }
1665
d95a717f
TH
1666 /* do post_internal_cmd */
1667 if (ap->ops->post_internal_cmd)
1668 ap->ops->post_internal_cmd(qc);
1669
a51d644a
TH
1670 /* perform minimal error analysis */
1671 if (qc->flags & ATA_QCFLAG_FAILED) {
1672 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1673 qc->err_mask |= AC_ERR_DEV;
1674
1675 if (!qc->err_mask)
1676 qc->err_mask |= AC_ERR_OTHER;
1677
1678 if (qc->err_mask & ~AC_ERR_OTHER)
1679 qc->err_mask &= ~AC_ERR_OTHER;
2dae9955
DLM
1680 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1681 qc->result_tf.command |= ATA_SENSE;
d95a717f
TH
1682 }
1683
15869303 1684 /* finish up */
ba6a1308 1685 spin_lock_irqsave(ap->lock, flags);
15869303 1686
e61e0672 1687 *tf = qc->result_tf;
77853bf2
TH
1688 err_mask = qc->err_mask;
1689
1690 ata_qc_free(qc);
9af5c9c9
TH
1691 link->active_tag = preempted_tag;
1692 link->sactive = preempted_sactive;
dedaf2b0 1693 ap->qc_active = preempted_qc_active;
da917d69 1694 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1695
ba6a1308 1696 spin_unlock_irqrestore(ap->lock, flags);
15869303 1697
87fbc5a0
TH
1698 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1699 ata_internal_cmd_timed_out(dev, command);
1700
77853bf2 1701 return err_mask;
a2a7a662
TH
1702}
1703
2432697b 1704/**
33480a0e 1705 * ata_exec_internal - execute libata internal command
2432697b
TH
1706 * @dev: Device to which the command is sent
1707 * @tf: Taskfile registers for the command and the result
1708 * @cdb: CDB for packet command
e227867f 1709 * @dma_dir: Data transfer direction of the command
2432697b
TH
1710 * @buf: Data buffer of the command
1711 * @buflen: Length of data buffer
2b789108 1712 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1713 *
1714 * Wrapper around ata_exec_internal_sg() which takes simple
1715 * buffer instead of sg list.
1716 *
1717 * LOCKING:
1718 * None. Should be called with kernel context, might sleep.
1719 *
1720 * RETURNS:
1721 * Zero on success, AC_ERR_* mask on failure
1722 */
1723unsigned ata_exec_internal(struct ata_device *dev,
1724 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1725 int dma_dir, void *buf, unsigned int buflen,
1726 unsigned long timeout)
2432697b 1727{
33480a0e
TH
1728 struct scatterlist *psg = NULL, sg;
1729 unsigned int n_elem = 0;
2432697b 1730
33480a0e
TH
1731 if (dma_dir != DMA_NONE) {
1732 WARN_ON(!buf);
1733 sg_init_one(&sg, buf, buflen);
1734 psg = &sg;
1735 n_elem++;
1736 }
2432697b 1737
2b789108
TH
1738 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1739 timeout);
2432697b
TH
1740}
1741
1bc4ccff
AC
1742/**
1743 * ata_pio_need_iordy - check if iordy needed
1744 * @adev: ATA device
1745 *
1746 * Check if the current speed of the device requires IORDY. Used
1747 * by various controllers for chip configuration.
1748 */
1bc4ccff
AC
1749unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1750{
0d9e6659
TH
1751 /* Don't set IORDY if we're preparing for reset. IORDY may
1752 * lead to controller lock up on certain controllers if the
1753 * port is not occupied. See bko#11703 for details.
1754 */
1755 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1756 return 0;
1757 /* Controller doesn't support IORDY. Probably a pointless
1758 * check as the caller should know this.
1759 */
9af5c9c9 1760 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1761 return 0;
5c18c4d2
DD
1762 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1763 if (ata_id_is_cfa(adev->id)
1764 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1765 return 0;
432729f0
AC
1766 /* PIO3 and higher it is mandatory */
1767 if (adev->pio_mode > XFER_PIO_2)
1768 return 1;
1769 /* We turn it on when possible */
1770 if (ata_id_has_iordy(adev->id))
1bc4ccff 1771 return 1;
432729f0
AC
1772 return 0;
1773}
2e9edbf8 1774
432729f0
AC
1775/**
1776 * ata_pio_mask_no_iordy - Return the non IORDY mask
1777 * @adev: ATA device
1778 *
1779 * Compute the highest mode possible if we are not using iordy. Return
1780 * -1 if no iordy mode is available.
1781 */
432729f0
AC
1782static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1783{
1bc4ccff 1784 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1785 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1786 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1787 /* Is the speed faster than the drive allows non IORDY ? */
1788 if (pio) {
1789 /* This is cycle times not frequency - watch the logic! */
1790 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1791 return 3 << ATA_SHIFT_PIO;
1792 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1793 }
1794 }
432729f0 1795 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1796}
1797
963e4975
AC
1798/**
1799 * ata_do_dev_read_id - default ID read method
1800 * @dev: device
1801 * @tf: proposed taskfile
1802 * @id: data buffer
1803 *
1804 * Issue the identify taskfile and hand back the buffer containing
1805 * identify data. For some RAID controllers and for pre ATA devices
1806 * this function is wrapped or replaced by the driver
1807 */
1808unsigned int ata_do_dev_read_id(struct ata_device *dev,
1809 struct ata_taskfile *tf, u16 *id)
1810{
1811 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1812 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1813}
1814
1da177e4 1815/**
49016aca 1816 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1817 * @dev: target device
1818 * @p_class: pointer to class of the target device (may be changed)
bff04647 1819 * @flags: ATA_READID_* flags
fe635c7e 1820 * @id: buffer to read IDENTIFY data into
1da177e4 1821 *
49016aca
TH
1822 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1823 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1824 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1825 * for pre-ATA4 drives.
1da177e4 1826 *
50a99018 1827 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1828 * now we abort if we hit that case.
50a99018 1829 *
1da177e4 1830 * LOCKING:
49016aca
TH
1831 * Kernel thread context (may sleep)
1832 *
1833 * RETURNS:
1834 * 0 on success, -errno otherwise.
1da177e4 1835 */
a9beec95 1836int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1837 unsigned int flags, u16 *id)
1da177e4 1838{
9af5c9c9 1839 struct ata_port *ap = dev->link->ap;
49016aca 1840 unsigned int class = *p_class;
a0123703 1841 struct ata_taskfile tf;
49016aca
TH
1842 unsigned int err_mask = 0;
1843 const char *reason;
79b42bab 1844 bool is_semb = class == ATA_DEV_SEMB;
54936f8b 1845 int may_fallback = 1, tried_spinup = 0;
49016aca 1846 int rc;
1da177e4 1847
0dd4b21f 1848 if (ata_msg_ctl(ap))
a9a79dfe 1849 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1da177e4 1850
963e4975 1851retry:
3373efd8 1852 ata_tf_init(dev, &tf);
a0123703 1853
49016aca 1854 switch (class) {
79b42bab
TH
1855 case ATA_DEV_SEMB:
1856 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
05b83605 1857 /* fall through */
49016aca 1858 case ATA_DEV_ATA:
9162c657 1859 case ATA_DEV_ZAC:
a0123703 1860 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1861 break;
1862 case ATA_DEV_ATAPI:
a0123703 1863 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1864 break;
1865 default:
1866 rc = -ENODEV;
1867 reason = "unsupported class";
1868 goto err_out;
1da177e4
LT
1869 }
1870
a0123703 1871 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1872
1873 /* Some devices choke if TF registers contain garbage. Make
1874 * sure those are properly initialized.
1875 */
1876 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1877
1878 /* Device presence detection is unreliable on some
1879 * controllers. Always poll IDENTIFY if available.
1880 */
1881 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1882
963e4975
AC
1883 if (ap->ops->read_id)
1884 err_mask = ap->ops->read_id(dev, &tf, id);
1885 else
1886 err_mask = ata_do_dev_read_id(dev, &tf, id);
1887
a0123703 1888 if (err_mask) {
800b3996 1889 if (err_mask & AC_ERR_NODEV_HINT) {
a9a79dfe 1890 ata_dev_dbg(dev, "NODEV after polling detection\n");
55a8e2c8
TH
1891 return -ENOENT;
1892 }
1893
79b42bab 1894 if (is_semb) {
a9a79dfe
JP
1895 ata_dev_info(dev,
1896 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
79b42bab
TH
1897 /* SEMB is not supported yet */
1898 *p_class = ATA_DEV_SEMB_UNSUP;
1899 return 0;
1900 }
1901
1ffc151f
TH
1902 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1903 /* Device or controller might have reported
1904 * the wrong device class. Give a shot at the
1905 * other IDENTIFY if the current one is
1906 * aborted by the device.
1907 */
1908 if (may_fallback) {
1909 may_fallback = 0;
1910
1911 if (class == ATA_DEV_ATA)
1912 class = ATA_DEV_ATAPI;
1913 else
1914 class = ATA_DEV_ATA;
1915 goto retry;
1916 }
1917
1918 /* Control reaches here iff the device aborted
1919 * both flavors of IDENTIFYs which happens
1920 * sometimes with phantom devices.
1921 */
a9a79dfe
JP
1922 ata_dev_dbg(dev,
1923 "both IDENTIFYs aborted, assuming NODEV\n");
1ffc151f 1924 return -ENOENT;
54936f8b
TH
1925 }
1926
49016aca
TH
1927 rc = -EIO;
1928 reason = "I/O error";
1da177e4
LT
1929 goto err_out;
1930 }
1931
43c9c591 1932 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
a9a79dfe
JP
1933 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1934 "class=%d may_fallback=%d tried_spinup=%d\n",
1935 class, may_fallback, tried_spinup);
43c9c591
TH
1936 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1937 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1938 }
1939
54936f8b
TH
1940 /* Falling back doesn't make sense if ID data was read
1941 * successfully at least once.
1942 */
1943 may_fallback = 0;
1944
49016aca 1945 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1946
49016aca 1947 /* sanity check */
a4f5749b 1948 rc = -EINVAL;
6070068b 1949 reason = "device reports invalid type";
a4f5749b 1950
9162c657 1951 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
a4f5749b
TH
1952 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1953 goto err_out;
db63a4c8
AW
1954 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1955 ata_id_is_ata(id)) {
1956 ata_dev_dbg(dev,
1957 "host indicates ignore ATA devices, ignored\n");
1958 return -ENOENT;
1959 }
a4f5749b
TH
1960 } else {
1961 if (ata_id_is_ata(id))
1962 goto err_out;
49016aca
TH
1963 }
1964
169439c2
ML
1965 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1966 tried_spinup = 1;
1967 /*
1968 * Drive powered-up in standby mode, and requires a specific
1969 * SET_FEATURES spin-up subcommand before it will accept
1970 * anything other than the original IDENTIFY command.
1971 */
218f3d30 1972 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 1973 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1974 rc = -EIO;
1975 reason = "SPINUP failed";
1976 goto err_out;
1977 }
1978 /*
1979 * If the drive initially returned incomplete IDENTIFY info,
1980 * we now must reissue the IDENTIFY command.
1981 */
1982 if (id[2] == 0x37c8)
1983 goto retry;
1984 }
1985
9162c657
HR
1986 if ((flags & ATA_READID_POSTRESET) &&
1987 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
49016aca
TH
1988 /*
1989 * The exact sequence expected by certain pre-ATA4 drives is:
1990 * SRST RESET
50a99018
AC
1991 * IDENTIFY (optional in early ATA)
1992 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1993 * anything else..
1994 * Some drives were very specific about that exact sequence.
50a99018
AC
1995 *
1996 * Note that ATA4 says lba is mandatory so the second check
c9404c9c 1997 * should never trigger.
49016aca
TH
1998 */
1999 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2000 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2001 if (err_mask) {
2002 rc = -EIO;
2003 reason = "INIT_DEV_PARAMS failed";
2004 goto err_out;
2005 }
2006
2007 /* current CHS translation info (id[53-58]) might be
2008 * changed. reread the identify device info.
2009 */
bff04647 2010 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2011 goto retry;
2012 }
2013 }
2014
2015 *p_class = class;
fe635c7e 2016
49016aca
TH
2017 return 0;
2018
2019 err_out:
88574551 2020 if (ata_msg_warn(ap))
a9a79dfe
JP
2021 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2022 reason, err_mask);
49016aca
TH
2023 return rc;
2024}
2025
f01f62c2
CH
2026/**
2027 * ata_read_log_page - read a specific log page
2028 * @dev: target device
2029 * @log: log to read
2030 * @page: page to read
2031 * @buf: buffer to store read page
2032 * @sectors: number of sectors to read
2033 *
2034 * Read log page using READ_LOG_EXT command.
2035 *
2036 * LOCKING:
2037 * Kernel thread context (may sleep).
2038 *
2039 * RETURNS:
2040 * 0 on success, AC_ERR_* mask otherwise.
2041 */
2042unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
2043 u8 page, void *buf, unsigned int sectors)
2044{
2045 unsigned long ap_flags = dev->link->ap->flags;
2046 struct ata_taskfile tf;
2047 unsigned int err_mask;
2048 bool dma = false;
2049
2050 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
2051
2052 /*
2053 * Return error without actually issuing the command on controllers
2054 * which e.g. lockup on a read log page.
2055 */
2056 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
2057 return AC_ERR_DEV;
2058
2059retry:
2060 ata_tf_init(dev, &tf);
2061 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
7cfdfdc8 2062 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
f01f62c2
CH
2063 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2064 tf.protocol = ATA_PROT_DMA;
2065 dma = true;
2066 } else {
2067 tf.command = ATA_CMD_READ_LOG_EXT;
2068 tf.protocol = ATA_PROT_PIO;
2069 dma = false;
2070 }
2071 tf.lbal = log;
2072 tf.lbam = page;
2073 tf.nsect = sectors;
2074 tf.hob_nsect = sectors >> 8;
2075 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2076
2077 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2078 buf, sectors * ATA_SECT_SIZE, 0);
2079
2080 if (err_mask && dma) {
7cfdfdc8
DLM
2081 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2082 ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
f01f62c2
CH
2083 goto retry;
2084 }
2085
2086 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2087 return err_mask;
2088}
2089
efe205a3
CH
2090static bool ata_log_supported(struct ata_device *dev, u8 log)
2091{
2092 struct ata_port *ap = dev->link->ap;
2093
2094 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2095 return false;
2096 return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
2097}
2098
a0fd2454
CH
2099static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2100{
2101 struct ata_port *ap = dev->link->ap;
2102 unsigned int err, i;
2103
2104 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2105 ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
2106 return false;
2107 }
2108
2109 /*
2110 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2111 * supported.
2112 */
2113 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2114 1);
2115 if (err) {
2116 ata_dev_info(dev,
2117 "failed to get Device Identify Log Emask 0x%x\n",
2118 err);
2119 return false;
2120 }
2121
2122 for (i = 0; i < ap->sector_buf[8]; i++) {
2123 if (ap->sector_buf[9 + i] == page)
2124 return true;
2125 }
2126
2127 return false;
2128}
2129
9062712f
TH
2130static int ata_do_link_spd_horkage(struct ata_device *dev)
2131{
2132 struct ata_link *plink = ata_dev_phys_link(dev);
2133 u32 target, target_limit;
2134
2135 if (!sata_scr_valid(plink))
2136 return 0;
2137
2138 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2139 target = 1;
2140 else
2141 return 0;
2142
2143 target_limit = (1 << target) - 1;
2144
2145 /* if already on stricter limit, no need to push further */
2146 if (plink->sata_spd_limit <= target_limit)
2147 return 0;
2148
2149 plink->sata_spd_limit = target_limit;
2150
2151 /* Request another EH round by returning -EAGAIN if link is
2152 * going faster than the target speed. Forward progress is
2153 * guaranteed by setting sata_spd_limit to target_limit above.
2154 */
2155 if (plink->sata_spd > target) {
a9a79dfe
JP
2156 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2157 sata_spd_string(target));
9062712f
TH
2158 return -EAGAIN;
2159 }
2160 return 0;
2161}
2162
3373efd8 2163static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2164{
9af5c9c9 2165 struct ata_port *ap = dev->link->ap;
9ce8e307
JA
2166
2167 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2168 return 0;
2169
9af5c9c9 2170 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2171}
2172
5a233551
HR
2173static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2174{
2175 struct ata_port *ap = dev->link->ap;
2176 unsigned int err_mask;
2177
efe205a3
CH
2178 if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2179 ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
fe5af0cc
HR
2180 return;
2181 }
5a233551
HR
2182 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2183 0, ap->sector_buf, 1);
2184 if (err_mask) {
2185 ata_dev_dbg(dev,
2186 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2187 err_mask);
2188 } else {
2189 u8 *cmds = dev->ncq_send_recv_cmds;
2190
2191 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2192 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2193
2194 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2195 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2196 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2197 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2198 }
2199 }
2200}
2201
284b3b77
HR
2202static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2203{
2204 struct ata_port *ap = dev->link->ap;
2205 unsigned int err_mask;
284b3b77 2206
efe205a3 2207 if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
284b3b77
HR
2208 ata_dev_warn(dev,
2209 "NCQ Send/Recv Log not supported\n");
2210 return;
2211 }
2212 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2213 0, ap->sector_buf, 1);
2214 if (err_mask) {
2215 ata_dev_dbg(dev,
2216 "failed to get NCQ Non-Data Log Emask 0x%x\n",
2217 err_mask);
2218 } else {
2219 u8 *cmds = dev->ncq_non_data_cmds;
2220
2221 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2222 }
2223}
2224
8e061784
AM
2225static void ata_dev_config_ncq_prio(struct ata_device *dev)
2226{
2227 struct ata_port *ap = dev->link->ap;
2228 unsigned int err_mask;
2229
9f56eca3
AM
2230 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
2231 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2232 return;
2233 }
2234
8e061784 2235 err_mask = ata_read_log_page(dev,
1d51d5f3 2236 ATA_LOG_IDENTIFY_DEVICE,
8e061784
AM
2237 ATA_LOG_SATA_SETTINGS,
2238 ap->sector_buf,
2239 1);
2240 if (err_mask) {
2241 ata_dev_dbg(dev,
2242 "failed to get Identify Device data, Emask 0x%x\n",
2243 err_mask);
2244 return;
2245 }
2246
9f56eca3 2247 if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
8e061784 2248 dev->flags |= ATA_DFLAG_NCQ_PRIO;
9f56eca3
AM
2249 } else {
2250 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
8e061784 2251 ata_dev_dbg(dev, "SATA page does not support priority\n");
9f56eca3 2252 }
8e061784
AM
2253
2254}
2255
388539f3 2256static int ata_dev_config_ncq(struct ata_device *dev,
a6e6ce8e
TH
2257 char *desc, size_t desc_sz)
2258{
9af5c9c9 2259 struct ata_port *ap = dev->link->ap;
a6e6ce8e 2260 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
388539f3
SL
2261 unsigned int err_mask;
2262 char *aa_desc = "";
a6e6ce8e
TH
2263
2264 if (!ata_id_has_ncq(dev->id)) {
2265 desc[0] = '\0';
388539f3 2266 return 0;
a6e6ce8e 2267 }
75683fe7 2268 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6 2269 snprintf(desc, desc_sz, "NCQ (not used)");
388539f3 2270 return 0;
6919a0a6 2271 }
a6e6ce8e 2272 if (ap->flags & ATA_FLAG_NCQ) {
69278f79 2273 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
a6e6ce8e
TH
2274 dev->flags |= ATA_DFLAG_NCQ;
2275 }
2276
388539f3
SL
2277 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2278 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2279 ata_id_has_fpdma_aa(dev->id)) {
2280 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2281 SATA_FPDMA_AA);
2282 if (err_mask) {
a9a79dfe
JP
2283 ata_dev_err(dev,
2284 "failed to enable AA (error_mask=0x%x)\n",
2285 err_mask);
388539f3
SL
2286 if (err_mask != AC_ERR_DEV) {
2287 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2288 return -EIO;
2289 }
2290 } else
2291 aa_desc = ", AA";
2292 }
2293
a6e6ce8e 2294 if (hdepth >= ddepth)
388539f3 2295 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
a6e6ce8e 2296 else
388539f3
SL
2297 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2298 ddepth, aa_desc);
ed36911c 2299
284b3b77
HR
2300 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2301 if (ata_id_has_ncq_send_and_recv(dev->id))
2302 ata_dev_config_ncq_send_recv(dev);
2303 if (ata_id_has_ncq_non_data(dev->id))
2304 ata_dev_config_ncq_non_data(dev);
8e061784
AM
2305 if (ata_id_has_ncq_prio(dev->id))
2306 ata_dev_config_ncq_prio(dev);
284b3b77 2307 }
f78dea06 2308
388539f3 2309 return 0;
a6e6ce8e 2310}
f78dea06 2311
e87fd28c
HR
2312static void ata_dev_config_sense_reporting(struct ata_device *dev)
2313{
2314 unsigned int err_mask;
2315
2316 if (!ata_id_has_sense_reporting(dev->id))
2317 return;
2318
2319 if (ata_id_sense_reporting_enabled(dev->id))
2320 return;
2321
2322 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2323 if (err_mask) {
2324 ata_dev_dbg(dev,
2325 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2326 err_mask);
2327 }
2328}
2329
6d1003ae
HR
2330static void ata_dev_config_zac(struct ata_device *dev)
2331{
2332 struct ata_port *ap = dev->link->ap;
2333 unsigned int err_mask;
2334 u8 *identify_buf = ap->sector_buf;
6d1003ae
HR
2335
2336 dev->zac_zones_optimal_open = U32_MAX;
2337 dev->zac_zones_optimal_nonseq = U32_MAX;
2338 dev->zac_zones_max_open = U32_MAX;
2339
2340 /*
2341 * Always set the 'ZAC' flag for Host-managed devices.
2342 */
2343 if (dev->class == ATA_DEV_ZAC)
2344 dev->flags |= ATA_DFLAG_ZAC;
2345 else if (ata_id_zoned_cap(dev->id) == 0x01)
2346 /*
2347 * Check for host-aware devices.
2348 */
2349 dev->flags |= ATA_DFLAG_ZAC;
2350
2351 if (!(dev->flags & ATA_DFLAG_ZAC))
2352 return;
2353
a0fd2454 2354 if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
6d1003ae
HR
2355 ata_dev_warn(dev,
2356 "ATA Zoned Information Log not supported\n");
2357 return;
2358 }
ed36911c 2359
6d1003ae
HR
2360 /*
2361 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2362 */
1d51d5f3 2363 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
6d1003ae
HR
2364 ATA_LOG_ZONED_INFORMATION,
2365 identify_buf, 1);
2366 if (!err_mask) {
2367 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2368
2369 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2370 if ((zoned_cap >> 63))
2371 dev->zac_zoned_cap = (zoned_cap & 1);
2372 opt_open = get_unaligned_le64(&identify_buf[24]);
2373 if ((opt_open >> 63))
2374 dev->zac_zones_optimal_open = (u32)opt_open;
2375 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2376 if ((opt_nonseq >> 63))
2377 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2378 max_open = get_unaligned_le64(&identify_buf[40]);
2379 if ((max_open >> 63))
2380 dev->zac_zones_max_open = (u32)max_open;
2381 }
a6e6ce8e
TH
2382}
2383
818831c8
CH
2384static void ata_dev_config_trusted(struct ata_device *dev)
2385{
2386 struct ata_port *ap = dev->link->ap;
2387 u64 trusted_cap;
2388 unsigned int err;
2389
e8f11db9
CH
2390 if (!ata_id_has_trusted(dev->id))
2391 return;
2392
818831c8
CH
2393 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2394 ata_dev_warn(dev,
2395 "Security Log not supported\n");
2396 return;
2397 }
2398
2399 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2400 ap->sector_buf, 1);
2401 if (err) {
2402 ata_dev_dbg(dev,
2403 "failed to read Security Log, Emask 0x%x\n", err);
2404 return;
2405 }
2406
2407 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2408 if (!(trusted_cap & (1ULL << 63))) {
2409 ata_dev_dbg(dev,
2410 "Trusted Computing capability qword not valid!\n");
2411 return;
2412 }
2413
2414 if (trusted_cap & (1 << 0))
2415 dev->flags |= ATA_DFLAG_TRUSTED;
2416}
2417
49016aca 2418/**
ffeae418 2419 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2420 * @dev: Target device to configure
2421 *
2422 * Configure @dev according to @dev->id. Generic and low-level
2423 * driver specific fixups are also applied.
49016aca
TH
2424 *
2425 * LOCKING:
ffeae418
TH
2426 * Kernel thread context (may sleep)
2427 *
2428 * RETURNS:
2429 * 0 on success, -errno otherwise
49016aca 2430 */
efdaedc4 2431int ata_dev_configure(struct ata_device *dev)
49016aca 2432{
9af5c9c9
TH
2433 struct ata_port *ap = dev->link->ap;
2434 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2435 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2436 const u16 *id = dev->id;
7dc951ae 2437 unsigned long xfer_mask;
65fe1f0f 2438 unsigned int err_mask;
b352e57d 2439 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2440 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2441 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2442 int rc;
49016aca 2443
0dd4b21f 2444 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
a9a79dfe 2445 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
ffeae418 2446 return 0;
49016aca
TH
2447 }
2448
0dd4b21f 2449 if (ata_msg_probe(ap))
a9a79dfe 2450 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1da177e4 2451
75683fe7
TH
2452 /* set horkage */
2453 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2454 ata_force_horkage(dev);
75683fe7 2455
50af2fa1 2456 if (dev->horkage & ATA_HORKAGE_DISABLE) {
a9a79dfe 2457 ata_dev_info(dev, "unsupported device, disabling\n");
50af2fa1
TH
2458 ata_dev_disable(dev);
2459 return 0;
2460 }
2461
2486fa56
TH
2462 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2463 dev->class == ATA_DEV_ATAPI) {
a9a79dfe
JP
2464 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2465 atapi_enabled ? "not supported with this driver"
2466 : "disabled");
2486fa56
TH
2467 ata_dev_disable(dev);
2468 return 0;
2469 }
2470
9062712f
TH
2471 rc = ata_do_link_spd_horkage(dev);
2472 if (rc)
2473 return rc;
2474
ecd75ad5
TH
2475 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2476 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2477 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2478 dev->horkage |= ATA_HORKAGE_NOLPM;
2479
240630e6
HG
2480 if (ap->flags & ATA_FLAG_NO_LPM)
2481 dev->horkage |= ATA_HORKAGE_NOLPM;
2482
ecd75ad5
TH
2483 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2484 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2485 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2486 }
2487
6746544c
TH
2488 /* let ACPI work its magic */
2489 rc = ata_acpi_on_devcfg(dev);
2490 if (rc)
2491 return rc;
08573a86 2492
05027adc
TH
2493 /* massage HPA, do it early as it might change IDENTIFY data */
2494 rc = ata_hpa_resize(dev);
2495 if (rc)
2496 return rc;
2497
c39f5ebe 2498 /* print device capabilities */
0dd4b21f 2499 if (ata_msg_probe(ap))
a9a79dfe
JP
2500 ata_dev_dbg(dev,
2501 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2502 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2503 __func__,
2504 id[49], id[82], id[83], id[84],
2505 id[85], id[86], id[87], id[88]);
c39f5ebe 2506
208a9933 2507 /* initialize to-be-configured parameters */
ea1dd4e1 2508 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2509 dev->max_sectors = 0;
2510 dev->cdb_len = 0;
2511 dev->n_sectors = 0;
2512 dev->cylinders = 0;
2513 dev->heads = 0;
2514 dev->sectors = 0;
e18086d6 2515 dev->multi_count = 0;
208a9933 2516
1da177e4
LT
2517 /*
2518 * common ATA, ATAPI feature tests
2519 */
2520
ff8854b2 2521 /* find max transfer mode; for printk only */
1148c3a7 2522 xfer_mask = ata_id_xfermask(id);
1da177e4 2523
0dd4b21f
BP
2524 if (ata_msg_probe(ap))
2525 ata_dump_id(id);
1da177e4 2526
ef143d57
AL
2527 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2528 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2529 sizeof(fwrevbuf));
2530
2531 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2532 sizeof(modelbuf));
2533
1da177e4 2534 /* ATA-specific feature tests */
9162c657 2535 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
b352e57d 2536 if (ata_id_is_cfa(id)) {
62afe5d7
SS
2537 /* CPRM may make this media unusable */
2538 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
a9a79dfe
JP
2539 ata_dev_warn(dev,
2540 "supports DRM functions and may not be fully accessible\n");
b352e57d 2541 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2542 } else {
2dcb407e 2543 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2544 /* Warn the user if the device has TPM extensions */
2545 if (ata_id_has_tpm(id))
a9a79dfe
JP
2546 ata_dev_warn(dev,
2547 "supports DRM functions and may not be fully accessible\n");
ae8d4ee7 2548 }
b352e57d 2549
1148c3a7 2550 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2551
e18086d6
ML
2552 /* get current R/W Multiple count setting */
2553 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2554 unsigned int max = dev->id[47] & 0xff;
2555 unsigned int cnt = dev->id[59] & 0xff;
2556 /* only recognize/allow powers of two here */
2557 if (is_power_of_2(max) && is_power_of_2(cnt))
2558 if (cnt <= max)
2559 dev->multi_count = cnt;
2560 }
3f64f565 2561
1148c3a7 2562 if (ata_id_has_lba(id)) {
4c2d721a 2563 const char *lba_desc;
388539f3 2564 char ncq_desc[24];
8bf62ece 2565
4c2d721a
TH
2566 lba_desc = "LBA";
2567 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2568 if (ata_id_has_lba48(id)) {
8bf62ece 2569 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2570 lba_desc = "LBA48";
6fc49adb
TH
2571
2572 if (dev->n_sectors >= (1UL << 28) &&
2573 ata_id_has_flush_ext(id))
2574 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2575 }
8bf62ece 2576
a6e6ce8e 2577 /* config NCQ */
388539f3
SL
2578 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2579 if (rc)
2580 return rc;
a6e6ce8e 2581
8bf62ece 2582 /* print device info to dmesg */
3f64f565 2583 if (ata_msg_drv(ap) && print_info) {
a9a79dfe
JP
2584 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2585 revbuf, modelbuf, fwrevbuf,
2586 ata_mode_string(xfer_mask));
2587 ata_dev_info(dev,
2588 "%llu sectors, multi %u: %s %s\n",
f15a1daf 2589 (unsigned long long)dev->n_sectors,
3f64f565
EM
2590 dev->multi_count, lba_desc, ncq_desc);
2591 }
ffeae418 2592 } else {
8bf62ece
AL
2593 /* CHS */
2594
2595 /* Default translation */
1148c3a7
TH
2596 dev->cylinders = id[1];
2597 dev->heads = id[3];
2598 dev->sectors = id[6];
8bf62ece 2599
1148c3a7 2600 if (ata_id_current_chs_valid(id)) {
8bf62ece 2601 /* Current CHS translation is valid. */
1148c3a7
TH
2602 dev->cylinders = id[54];
2603 dev->heads = id[55];
2604 dev->sectors = id[56];
8bf62ece
AL
2605 }
2606
2607 /* print device info to dmesg */
3f64f565 2608 if (ata_msg_drv(ap) && print_info) {
a9a79dfe
JP
2609 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2610 revbuf, modelbuf, fwrevbuf,
2611 ata_mode_string(xfer_mask));
2612 ata_dev_info(dev,
2613 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2614 (unsigned long long)dev->n_sectors,
2615 dev->multi_count, dev->cylinders,
2616 dev->heads, dev->sectors);
3f64f565 2617 }
07f6f7d0
AL
2618 }
2619
803739d2
SH
2620 /* Check and mark DevSlp capability. Get DevSlp timing variables
2621 * from SATA Settings page of Identify Device Data Log.
65fe1f0f 2622 */
803739d2 2623 if (ata_id_has_devslp(dev->id)) {
8e725c7f 2624 u8 *sata_setting = ap->sector_buf;
803739d2
SH
2625 int i, j;
2626
2627 dev->flags |= ATA_DFLAG_DEVSLP;
65fe1f0f 2628 err_mask = ata_read_log_page(dev,
1d51d5f3 2629 ATA_LOG_IDENTIFY_DEVICE,
65fe1f0f 2630 ATA_LOG_SATA_SETTINGS,
803739d2 2631 sata_setting,
65fe1f0f
SH
2632 1);
2633 if (err_mask)
2634 ata_dev_dbg(dev,
2635 "failed to get Identify Device Data, Emask 0x%x\n",
2636 err_mask);
803739d2
SH
2637 else
2638 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2639 j = ATA_LOG_DEVSLP_OFFSET + i;
2640 dev->devslp_timing[i] = sata_setting[j];
2641 }
65fe1f0f 2642 }
e87fd28c 2643 ata_dev_config_sense_reporting(dev);
6d1003ae 2644 ata_dev_config_zac(dev);
818831c8 2645 ata_dev_config_trusted(dev);
b1ffbf85 2646 dev->cdb_len = 32;
1da177e4
LT
2647 }
2648
2649 /* ATAPI-specific feature tests */
2c13b7ce 2650 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2651 const char *cdb_intr_string = "";
2652 const char *atapi_an_string = "";
91163006 2653 const char *dma_dir_string = "";
7d77b247 2654 u32 sntf;
08a556db 2655
1148c3a7 2656 rc = atapi_cdb_len(id);
1da177e4 2657 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2658 if (ata_msg_warn(ap))
a9a79dfe 2659 ata_dev_warn(dev, "unsupported CDB len\n");
ffeae418 2660 rc = -EINVAL;
1da177e4
LT
2661 goto err_out_nosup;
2662 }
6e7846e9 2663 dev->cdb_len = (unsigned int) rc;
1da177e4 2664
7d77b247
TH
2665 /* Enable ATAPI AN if both the host and device have
2666 * the support. If PMP is attached, SNTF is required
2667 * to enable ATAPI AN to discern between PHY status
2668 * changed notifications and ATAPI ANs.
9f45cbd3 2669 */
e7ecd435
TH
2670 if (atapi_an &&
2671 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
071f44b1 2672 (!sata_pmp_attached(ap) ||
7d77b247 2673 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
9f45cbd3 2674 /* issue SET feature command to turn this on */
218f3d30
JG
2675 err_mask = ata_dev_set_feature(dev,
2676 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2677 if (err_mask)
a9a79dfe
JP
2678 ata_dev_err(dev,
2679 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2680 err_mask);
854c73a2 2681 else {
9f45cbd3 2682 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2683 atapi_an_string = ", ATAPI AN";
2684 }
9f45cbd3
KCA
2685 }
2686
08a556db 2687 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2688 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2689 cdb_intr_string = ", CDB intr";
2690 }
312f7da2 2691
966fbe19 2692 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
91163006
TH
2693 dev->flags |= ATA_DFLAG_DMADIR;
2694 dma_dir_string = ", DMADIR";
2695 }
2696
afe75951 2697 if (ata_id_has_da(dev->id)) {
b1354cbb 2698 dev->flags |= ATA_DFLAG_DA;
afe75951
AL
2699 zpodd_init(dev);
2700 }
b1354cbb 2701
1da177e4 2702 /* print device info to dmesg */
5afc8142 2703 if (ata_msg_drv(ap) && print_info)
a9a79dfe
JP
2704 ata_dev_info(dev,
2705 "ATAPI: %s, %s, max %s%s%s%s\n",
2706 modelbuf, fwrevbuf,
2707 ata_mode_string(xfer_mask),
2708 cdb_intr_string, atapi_an_string,
2709 dma_dir_string);
1da177e4
LT
2710 }
2711
914ed354
TH
2712 /* determine max_sectors */
2713 dev->max_sectors = ATA_MAX_SECTORS;
2714 if (dev->flags & ATA_DFLAG_LBA48)
2715 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2716
c5038fc0
AC
2717 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2718 200 sectors */
3373efd8 2719 if (ata_dev_knobble(dev)) {
5afc8142 2720 if (ata_msg_drv(ap) && print_info)
a9a79dfe 2721 ata_dev_info(dev, "applying bridge limits\n");
5a529139 2722 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2723 dev->max_sectors = ATA_MAX_SECTORS;
2724 }
2725
f8d8e579 2726 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2727 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2728 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2729 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2730 }
f8d8e579 2731
75683fe7 2732 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2733 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2734 dev->max_sectors);
18d6e9d5 2735
af34d637
DM
2736 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2737 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2738 dev->max_sectors);
2739
a32450e1
SH
2740 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2741 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2742
4b2f3ede 2743 if (ap->ops->dev_config)
cd0d3bbc 2744 ap->ops->dev_config(dev);
4b2f3ede 2745
c5038fc0
AC
2746 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2747 /* Let the user know. We don't want to disallow opens for
2748 rescue purposes, or in case the vendor is just a blithering
2749 idiot. Do this after the dev_config call as some controllers
2750 with buggy firmware may want to avoid reporting false device
2751 bugs */
2752
2753 if (print_info) {
a9a79dfe 2754 ata_dev_warn(dev,
c5038fc0 2755"Drive reports diagnostics failure. This may indicate a drive\n");
a9a79dfe 2756 ata_dev_warn(dev,
c5038fc0
AC
2757"fault or invalid emulation. Contact drive vendor for information.\n");
2758 }
2759 }
2760
ac70a964 2761 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
a9a79dfe
JP
2762 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2763 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
ac70a964
TH
2764 }
2765
ffeae418 2766 return 0;
1da177e4
LT
2767
2768err_out_nosup:
0dd4b21f 2769 if (ata_msg_probe(ap))
a9a79dfe 2770 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
ffeae418 2771 return rc;
1da177e4
LT
2772}
2773
be0d18df 2774/**
2e41e8e6 2775 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2776 * @ap: port
2777 *
2e41e8e6 2778 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2779 * detection.
2780 */
2781
2782int ata_cable_40wire(struct ata_port *ap)
2783{
2784 return ATA_CBL_PATA40;
2785}
2786
2787/**
2e41e8e6 2788 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2789 * @ap: port
2790 *
2e41e8e6 2791 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2792 * detection.
2793 */
2794
2795int ata_cable_80wire(struct ata_port *ap)
2796{
2797 return ATA_CBL_PATA80;
2798}
2799
2800/**
2801 * ata_cable_unknown - return unknown PATA cable.
2802 * @ap: port
2803 *
2804 * Helper method for drivers which have no PATA cable detection.
2805 */
2806
2807int ata_cable_unknown(struct ata_port *ap)
2808{
2809 return ATA_CBL_PATA_UNK;
2810}
2811
c88f90c3
TH
2812/**
2813 * ata_cable_ignore - return ignored PATA cable.
2814 * @ap: port
2815 *
2816 * Helper method for drivers which don't use cable type to limit
2817 * transfer mode.
2818 */
2819int ata_cable_ignore(struct ata_port *ap)
2820{
2821 return ATA_CBL_PATA_IGN;
2822}
2823
be0d18df
AC
2824/**
2825 * ata_cable_sata - return SATA cable type
2826 * @ap: port
2827 *
2828 * Helper method for drivers which have SATA cables
2829 */
2830
2831int ata_cable_sata(struct ata_port *ap)
2832{
2833 return ATA_CBL_SATA;
2834}
2835
1da177e4
LT
2836/**
2837 * ata_bus_probe - Reset and probe ATA bus
2838 * @ap: Bus to probe
2839 *
0cba632b
JG
2840 * Master ATA bus probing function. Initiates a hardware-dependent
2841 * bus reset, then attempts to identify any devices found on
2842 * the bus.
2843 *
1da177e4 2844 * LOCKING:
0cba632b 2845 * PCI/etc. bus probe sem.
1da177e4
LT
2846 *
2847 * RETURNS:
96072e69 2848 * Zero on success, negative errno otherwise.
1da177e4
LT
2849 */
2850
80289167 2851int ata_bus_probe(struct ata_port *ap)
1da177e4 2852{
28ca5c57 2853 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2854 int tries[ATA_MAX_DEVICES];
f58229f8 2855 int rc;
e82cbdb9 2856 struct ata_device *dev;
1da177e4 2857
1eca4365 2858 ata_for_each_dev(dev, &ap->link, ALL)
f58229f8 2859 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2860
2861 retry:
1eca4365 2862 ata_for_each_dev(dev, &ap->link, ALL) {
cdeab114
TH
2863 /* If we issue an SRST then an ATA drive (not ATAPI)
2864 * may change configuration and be in PIO0 timing. If
2865 * we do a hard reset (or are coming from power on)
2866 * this is true for ATA or ATAPI. Until we've set a
2867 * suitable controller mode we should not touch the
2868 * bus as we may be talking too fast.
2869 */
2870 dev->pio_mode = XFER_PIO_0;
5416912a 2871 dev->dma_mode = 0xff;
cdeab114
TH
2872
2873 /* If the controller has a pio mode setup function
2874 * then use it to set the chipset to rights. Don't
2875 * touch the DMA setup as that will be dealt with when
2876 * configuring devices.
2877 */
2878 if (ap->ops->set_piomode)
2879 ap->ops->set_piomode(ap, dev);
2880 }
2881
2044470c 2882 /* reset and determine device classes */
52783c5d 2883 ap->ops->phy_reset(ap);
2061a47a 2884
1eca4365 2885 ata_for_each_dev(dev, &ap->link, ALL) {
3e4ec344 2886 if (dev->class != ATA_DEV_UNKNOWN)
52783c5d
TH
2887 classes[dev->devno] = dev->class;
2888 else
2889 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2890
52783c5d 2891 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2892 }
1da177e4 2893
f31f0cc2
JG
2894 /* read IDENTIFY page and configure devices. We have to do the identify
2895 specific sequence bass-ackwards so that PDIAG- is released by
2896 the slave device */
2897
1eca4365 2898 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
f58229f8
TH
2899 if (tries[dev->devno])
2900 dev->class = classes[dev->devno];
ffeae418 2901
14d2bac1 2902 if (!ata_dev_enabled(dev))
ffeae418 2903 continue;
ffeae418 2904
bff04647
TH
2905 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2906 dev->id);
14d2bac1
TH
2907 if (rc)
2908 goto fail;
f31f0cc2
JG
2909 }
2910
be0d18df
AC
2911 /* Now ask for the cable type as PDIAG- should have been released */
2912 if (ap->ops->cable_detect)
2913 ap->cbl = ap->ops->cable_detect(ap);
2914
1eca4365
TH
2915 /* We may have SATA bridge glue hiding here irrespective of
2916 * the reported cable types and sensed types. When SATA
2917 * drives indicate we have a bridge, we don't know which end
2918 * of the link the bridge is which is a problem.
2919 */
2920 ata_for_each_dev(dev, &ap->link, ENABLED)
614fe29b
AC
2921 if (ata_id_is_sata(dev->id))
2922 ap->cbl = ATA_CBL_SATA;
614fe29b 2923
f31f0cc2
JG
2924 /* After the identify sequence we can now set up the devices. We do
2925 this in the normal order so that the user doesn't get confused */
2926
1eca4365 2927 ata_for_each_dev(dev, &ap->link, ENABLED) {
9af5c9c9 2928 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2929 rc = ata_dev_configure(dev);
9af5c9c9 2930 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2931 if (rc)
2932 goto fail;
1da177e4
LT
2933 }
2934
e82cbdb9 2935 /* configure transfer mode */
0260731f 2936 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2937 if (rc)
51713d35 2938 goto fail;
1da177e4 2939
1eca4365
TH
2940 ata_for_each_dev(dev, &ap->link, ENABLED)
2941 return 0;
1da177e4 2942
96072e69 2943 return -ENODEV;
14d2bac1
TH
2944
2945 fail:
4ae72a1e
TH
2946 tries[dev->devno]--;
2947
14d2bac1
TH
2948 switch (rc) {
2949 case -EINVAL:
4ae72a1e 2950 /* eeek, something went very wrong, give up */
14d2bac1
TH
2951 tries[dev->devno] = 0;
2952 break;
4ae72a1e
TH
2953
2954 case -ENODEV:
2955 /* give it just one more chance */
2956 tries[dev->devno] = min(tries[dev->devno], 1);
05b83605 2957 /* fall through */
14d2bac1 2958 case -EIO:
4ae72a1e
TH
2959 if (tries[dev->devno] == 1) {
2960 /* This is the last chance, better to slow
2961 * down than lose it.
2962 */
a07d499b 2963 sata_down_spd_limit(&ap->link, 0);
4ae72a1e
TH
2964 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2965 }
14d2bac1
TH
2966 }
2967
4ae72a1e 2968 if (!tries[dev->devno])
3373efd8 2969 ata_dev_disable(dev);
ec573755 2970
14d2bac1 2971 goto retry;
1da177e4
LT
2972}
2973
3be680b7
TH
2974/**
2975 * sata_print_link_status - Print SATA link status
936fd732 2976 * @link: SATA link to printk link status about
3be680b7
TH
2977 *
2978 * This function prints link speed and status of a SATA link.
2979 *
2980 * LOCKING:
2981 * None.
2982 */
6bdb4fc9 2983static void sata_print_link_status(struct ata_link *link)
3be680b7 2984{
6d5f9732 2985 u32 sstatus, scontrol, tmp;
3be680b7 2986
936fd732 2987 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2988 return;
936fd732 2989 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2990
b1c72916 2991 if (ata_phys_link_online(link)) {
3be680b7 2992 tmp = (sstatus >> 4) & 0xf;
a9a79dfe
JP
2993 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2994 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2995 } else {
a9a79dfe
JP
2996 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2997 sstatus, scontrol);
3be680b7
TH
2998 }
2999}
3000
ebdfca6e
AC
3001/**
3002 * ata_dev_pair - return other device on cable
ebdfca6e
AC
3003 * @adev: device
3004 *
3005 * Obtain the other device on the same cable, or if none is
3006 * present NULL is returned
3007 */
2e9edbf8 3008
3373efd8 3009struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 3010{
9af5c9c9
TH
3011 struct ata_link *link = adev->link;
3012 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 3013 if (!ata_dev_enabled(pair))
ebdfca6e
AC
3014 return NULL;
3015 return pair;
3016}
3017
1c3fae4d 3018/**
3c567b7d 3019 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 3020 * @link: Link to adjust SATA spd limit for
a07d499b 3021 * @spd_limit: Additional limit
1c3fae4d 3022 *
936fd732 3023 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 3024 * function only adjusts the limit. The change must be applied
3c567b7d 3025 * using sata_set_spd().
1c3fae4d 3026 *
a07d499b
TH
3027 * If @spd_limit is non-zero, the speed is limited to equal to or
3028 * lower than @spd_limit if such speed is supported. If
3029 * @spd_limit is slower than any supported speed, only the lowest
3030 * supported speed is allowed.
3031 *
1c3fae4d
TH
3032 * LOCKING:
3033 * Inherited from caller.
3034 *
3035 * RETURNS:
3036 * 0 on success, negative errno on failure
3037 */
a07d499b 3038int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
1c3fae4d 3039{
81952c54 3040 u32 sstatus, spd, mask;
a07d499b 3041 int rc, bit;
1c3fae4d 3042
936fd732 3043 if (!sata_scr_valid(link))
008a7896
TH
3044 return -EOPNOTSUPP;
3045
3046 /* If SCR can be read, use it to determine the current SPD.
936fd732 3047 * If not, use cached value in link->sata_spd.
008a7896 3048 */
936fd732 3049 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
9913ff8a 3050 if (rc == 0 && ata_sstatus_online(sstatus))
008a7896
TH
3051 spd = (sstatus >> 4) & 0xf;
3052 else
936fd732 3053 spd = link->sata_spd;
1c3fae4d 3054
936fd732 3055 mask = link->sata_spd_limit;
1c3fae4d
TH
3056 if (mask <= 1)
3057 return -EINVAL;
008a7896
TH
3058
3059 /* unconditionally mask off the highest bit */
a07d499b
TH
3060 bit = fls(mask) - 1;
3061 mask &= ~(1 << bit);
1c3fae4d 3062
2dc0b46b
DM
3063 /*
3064 * Mask off all speeds higher than or equal to the current one. At
3065 * this point, if current SPD is not available and we previously
3066 * recorded the link speed from SStatus, the driver has already
3067 * masked off the highest bit so mask should already be 1 or 0.
3068 * Otherwise, we should not force 1.5Gbps on a link where we have
3069 * not previously recorded speed from SStatus. Just return in this
3070 * case.
008a7896
TH
3071 */
3072 if (spd > 1)
3073 mask &= (1 << (spd - 1)) - 1;
3074 else
2dc0b46b 3075 return -EINVAL;
008a7896
TH
3076
3077 /* were we already at the bottom? */
1c3fae4d
TH
3078 if (!mask)
3079 return -EINVAL;
3080
a07d499b
TH
3081 if (spd_limit) {
3082 if (mask & ((1 << spd_limit) - 1))
3083 mask &= (1 << spd_limit) - 1;
3084 else {
3085 bit = ffs(mask) - 1;
3086 mask = 1 << bit;
3087 }
3088 }
3089
936fd732 3090 link->sata_spd_limit = mask;
1c3fae4d 3091
a9a79dfe
JP
3092 ata_link_warn(link, "limiting SATA link speed to %s\n",
3093 sata_spd_string(fls(mask)));
1c3fae4d
TH
3094
3095 return 0;
3096}
3097
936fd732 3098static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 3099{
5270222f
TH
3100 struct ata_link *host_link = &link->ap->link;
3101 u32 limit, target, spd;
1c3fae4d 3102
5270222f
TH
3103 limit = link->sata_spd_limit;
3104
3105 /* Don't configure downstream link faster than upstream link.
3106 * It doesn't speed up anything and some PMPs choke on such
3107 * configuration.
3108 */
3109 if (!ata_is_host_link(link) && host_link->sata_spd)
3110 limit &= (1 << host_link->sata_spd) - 1;
3111
3112 if (limit == UINT_MAX)
3113 target = 0;
1c3fae4d 3114 else
5270222f 3115 target = fls(limit);
1c3fae4d
TH
3116
3117 spd = (*scontrol >> 4) & 0xf;
5270222f 3118 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 3119
5270222f 3120 return spd != target;
1c3fae4d
TH
3121}
3122
3123/**
3c567b7d 3124 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 3125 * @link: Link in question
1c3fae4d
TH
3126 *
3127 * Test whether the spd limit in SControl matches
936fd732 3128 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
3129 * whether hardreset is necessary to apply SATA spd
3130 * configuration.
3131 *
3132 * LOCKING:
3133 * Inherited from caller.
3134 *
3135 * RETURNS:
3136 * 1 if SATA spd configuration is needed, 0 otherwise.
3137 */
1dc55e87 3138static int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
3139{
3140 u32 scontrol;
3141
936fd732 3142 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 3143 return 1;
1c3fae4d 3144
936fd732 3145 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
3146}
3147
3148/**
3c567b7d 3149 * sata_set_spd - set SATA spd according to spd limit
936fd732 3150 * @link: Link to set SATA spd for
1c3fae4d 3151 *
936fd732 3152 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
3153 *
3154 * LOCKING:
3155 * Inherited from caller.
3156 *
3157 * RETURNS:
3158 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 3159 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 3160 */
936fd732 3161int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
3162{
3163 u32 scontrol;
81952c54 3164 int rc;
1c3fae4d 3165
936fd732 3166 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3167 return rc;
1c3fae4d 3168
936fd732 3169 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
3170 return 0;
3171
936fd732 3172 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
3173 return rc;
3174
1c3fae4d
TH
3175 return 1;
3176}
3177
452503f9
AC
3178/*
3179 * This mode timing computation functionality is ported over from
3180 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3181 */
3182/*
b352e57d 3183 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 3184 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
3185 * for UDMA6, which is currently supported only by Maxtor drives.
3186 *
3187 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
3188 */
3189
3190static const struct ata_timing ata_timing[] = {
3ada9c12
DD
3191/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
3192 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
3193 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
3194 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
3195 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
3196 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
3197 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
3198 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
3199
3200 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
3201 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
3202 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
3203
3204 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
3205 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
3206 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
3207 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
3208 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
3209
3210/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
3211 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
3212 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
3213 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
3214 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
3215 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
3216 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
3217 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
3218
3219 { 0xFF }
3220};
3221
2dcb407e 3222#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
23e4c67a 3223#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0)
452503f9
AC
3224
3225static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3226{
23e4c67a
AB
3227 q->setup = EZ(t->setup, T);
3228 q->act8b = EZ(t->act8b, T);
3229 q->rec8b = EZ(t->rec8b, T);
3230 q->cyc8b = EZ(t->cyc8b, T);
3231 q->active = EZ(t->active, T);
3232 q->recover = EZ(t->recover, T);
3233 q->dmack_hold = EZ(t->dmack_hold, T);
3234 q->cycle = EZ(t->cycle, T);
3235 q->udma = EZ(t->udma, UT);
452503f9
AC
3236}
3237
3238void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3239 struct ata_timing *m, unsigned int what)
3240{
3241 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3242 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3243 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3244 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3245 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3246 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3ada9c12 3247 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
452503f9
AC
3248 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3249 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3250}
3251
6357357c 3252const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 3253{
70cd071e
TH
3254 const struct ata_timing *t = ata_timing;
3255
3256 while (xfer_mode > t->mode)
3257 t++;
452503f9 3258
70cd071e
TH
3259 if (xfer_mode == t->mode)
3260 return t;
cd705d5a
BP
3261
3262 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3263 __func__, xfer_mode);
3264
70cd071e 3265 return NULL;
452503f9
AC
3266}
3267
3268int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3269 struct ata_timing *t, int T, int UT)
3270{
9e8808a9 3271 const u16 *id = adev->id;
452503f9
AC
3272 const struct ata_timing *s;
3273 struct ata_timing p;
3274
3275 /*
2e9edbf8 3276 * Find the mode.
75b1f2f8 3277 */
452503f9
AC
3278
3279 if (!(s = ata_timing_find_mode(speed)))
3280 return -EINVAL;
3281
75b1f2f8
AL
3282 memcpy(t, s, sizeof(*s));
3283
452503f9
AC
3284 /*
3285 * If the drive is an EIDE drive, it can tell us it needs extended
3286 * PIO/MW_DMA cycle timing.
3287 */
3288
9e8808a9 3289 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
452503f9 3290 memset(&p, 0, sizeof(p));
9e8808a9 3291
bff00256 3292 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
9e8808a9
BZ
3293 if (speed <= XFER_PIO_2)
3294 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3295 else if ((speed <= XFER_PIO_4) ||
3296 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3297 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3298 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3299 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3300
452503f9
AC
3301 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3302 }
3303
3304 /*
3305 * Convert the timing to bus clock counts.
3306 */
3307
75b1f2f8 3308 ata_timing_quantize(t, t, T, UT);
452503f9
AC
3309
3310 /*
c893a3ae
RD
3311 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3312 * S.M.A.R.T * and some other commands. We have to ensure that the
3313 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
3314 */
3315
fd3367af 3316 if (speed > XFER_PIO_6) {
452503f9
AC
3317 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3318 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3319 }
3320
3321 /*
c893a3ae 3322 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
3323 */
3324
3325 if (t->act8b + t->rec8b < t->cyc8b) {
3326 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3327 t->rec8b = t->cyc8b - t->act8b;
3328 }
3329
3330 if (t->active + t->recover < t->cycle) {
3331 t->active += (t->cycle - (t->active + t->recover)) / 2;
3332 t->recover = t->cycle - t->active;
3333 }
a617c09f 3334
4f701d1e
AC
3335 /* In a few cases quantisation may produce enough errors to
3336 leave t->cycle too low for the sum of active and recovery
3337 if so we must correct this */
3338 if (t->active + t->recover > t->cycle)
3339 t->cycle = t->active + t->recover;
452503f9
AC
3340
3341 return 0;
3342}
3343
a0f79b92
TH
3344/**
3345 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3346 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3347 * @cycle: cycle duration in ns
3348 *
3349 * Return matching xfer mode for @cycle. The returned mode is of
3350 * the transfer type specified by @xfer_shift. If @cycle is too
3351 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3352 * than the fastest known mode, the fasted mode is returned.
3353 *
3354 * LOCKING:
3355 * None.
3356 *
3357 * RETURNS:
3358 * Matching xfer_mode, 0xff if no match found.
3359 */
3360u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3361{
3362 u8 base_mode = 0xff, last_mode = 0xff;
3363 const struct ata_xfer_ent *ent;
3364 const struct ata_timing *t;
3365
3366 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3367 if (ent->shift == xfer_shift)
3368 base_mode = ent->base;
3369
3370 for (t = ata_timing_find_mode(base_mode);
3371 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3372 unsigned short this_cycle;
3373
3374 switch (xfer_shift) {
3375 case ATA_SHIFT_PIO:
3376 case ATA_SHIFT_MWDMA:
3377 this_cycle = t->cycle;
3378 break;
3379 case ATA_SHIFT_UDMA:
3380 this_cycle = t->udma;
3381 break;
3382 default:
3383 return 0xff;
3384 }
3385
3386 if (cycle > this_cycle)
3387 break;
3388
3389 last_mode = t->mode;
3390 }
3391
3392 return last_mode;
3393}
3394
cf176e1a
TH
3395/**
3396 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3397 * @dev: Device to adjust xfer masks
458337db 3398 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3399 *
3400 * Adjust xfer masks of @dev downward. Note that this function
3401 * does not apply the change. Invoking ata_set_mode() afterwards
3402 * will apply the limit.
3403 *
3404 * LOCKING:
3405 * Inherited from caller.
3406 *
3407 * RETURNS:
3408 * 0 on success, negative errno on failure
3409 */
458337db 3410int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3411{
458337db 3412 char buf[32];
7dc951ae
TH
3413 unsigned long orig_mask, xfer_mask;
3414 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3415 int quiet, highbit;
cf176e1a 3416
458337db
TH
3417 quiet = !!(sel & ATA_DNXFER_QUIET);
3418 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3419
458337db
TH
3420 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3421 dev->mwdma_mask,
3422 dev->udma_mask);
3423 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3424
458337db
TH
3425 switch (sel) {
3426 case ATA_DNXFER_PIO:
3427 highbit = fls(pio_mask) - 1;
3428 pio_mask &= ~(1 << highbit);
3429 break;
3430
3431 case ATA_DNXFER_DMA:
3432 if (udma_mask) {
3433 highbit = fls(udma_mask) - 1;
3434 udma_mask &= ~(1 << highbit);
3435 if (!udma_mask)
3436 return -ENOENT;
3437 } else if (mwdma_mask) {
3438 highbit = fls(mwdma_mask) - 1;
3439 mwdma_mask &= ~(1 << highbit);
3440 if (!mwdma_mask)
3441 return -ENOENT;
3442 }
3443 break;
3444
3445 case ATA_DNXFER_40C:
3446 udma_mask &= ATA_UDMA_MASK_40C;
3447 break;
3448
3449 case ATA_DNXFER_FORCE_PIO0:
3450 pio_mask &= 1;
05b83605 3451 /* fall through */
458337db
TH
3452 case ATA_DNXFER_FORCE_PIO:
3453 mwdma_mask = 0;
3454 udma_mask = 0;
3455 break;
3456
458337db
TH
3457 default:
3458 BUG();
3459 }
3460
3461 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3462
3463 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3464 return -ENOENT;
3465
3466 if (!quiet) {
3467 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3468 snprintf(buf, sizeof(buf), "%s:%s",
3469 ata_mode_string(xfer_mask),
3470 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3471 else
3472 snprintf(buf, sizeof(buf), "%s",
3473 ata_mode_string(xfer_mask));
3474
a9a79dfe 3475 ata_dev_warn(dev, "limiting speed to %s\n", buf);
458337db 3476 }
cf176e1a
TH
3477
3478 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3479 &dev->udma_mask);
3480
cf176e1a 3481 return 0;
cf176e1a
TH
3482}
3483
3373efd8 3484static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3485{
d0cb43b3 3486 struct ata_port *ap = dev->link->ap;
9af5c9c9 3487 struct ata_eh_context *ehc = &dev->link->eh_context;
d0cb43b3 3488 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
4055dee7
TH
3489 const char *dev_err_whine = "";
3490 int ign_dev_err = 0;
d0cb43b3 3491 unsigned int err_mask = 0;
83206a29 3492 int rc;
1da177e4 3493
e8384607 3494 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3495 if (dev->xfer_shift == ATA_SHIFT_PIO)
3496 dev->flags |= ATA_DFLAG_PIO;
3497
d0cb43b3
TH
3498 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3499 dev_err_whine = " (SET_XFERMODE skipped)";
3500 else {
3501 if (nosetxfer)
a9a79dfe
JP
3502 ata_dev_warn(dev,
3503 "NOSETXFER but PATA detected - can't "
3504 "skip SETXFER, might malfunction\n");
d0cb43b3
TH
3505 err_mask = ata_dev_set_xfermode(dev);
3506 }
2dcb407e 3507
4055dee7
TH
3508 if (err_mask & ~AC_ERR_DEV)
3509 goto fail;
3510
3511 /* revalidate */
3512 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3513 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3514 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3515 if (rc)
3516 return rc;
3517
b93fda12
AC
3518 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3519 /* Old CFA may refuse this command, which is just fine */
3520 if (ata_id_is_cfa(dev->id))
3521 ign_dev_err = 1;
3522 /* Catch several broken garbage emulations plus some pre
3523 ATA devices */
3524 if (ata_id_major_version(dev->id) == 0 &&
3525 dev->pio_mode <= XFER_PIO_2)
3526 ign_dev_err = 1;
3527 /* Some very old devices and some bad newer ones fail
3528 any kind of SET_XFERMODE request but support PIO0-2
3529 timings and no IORDY */
3530 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3531 ign_dev_err = 1;
3532 }
3acaf94b
AC
3533 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3534 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3535 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3536 dev->dma_mode == XFER_MW_DMA_0 &&
3537 (dev->id[63] >> 8) & 1)
4055dee7 3538 ign_dev_err = 1;
3acaf94b 3539
4055dee7
TH
3540 /* if the device is actually configured correctly, ignore dev err */
3541 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3542 ign_dev_err = 1;
1da177e4 3543
4055dee7
TH
3544 if (err_mask & AC_ERR_DEV) {
3545 if (!ign_dev_err)
3546 goto fail;
3547 else
3548 dev_err_whine = " (device error ignored)";
3549 }
48a8a14f 3550
23e71c3d
TH
3551 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3552 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3553
07b9b6d6
DLM
3554 if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3555 ehc->i.flags & ATA_EHI_DID_HARDRESET)
3556 ata_dev_info(dev, "configured for %s%s\n",
3557 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3558 dev_err_whine);
4055dee7 3559
83206a29 3560 return 0;
4055dee7
TH
3561
3562 fail:
a9a79dfe 3563 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
4055dee7 3564 return -EIO;
1da177e4
LT
3565}
3566
1da177e4 3567/**
04351821 3568 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3569 * @link: link on which timings will be programmed
1967b7ff 3570 * @r_failed_dev: out parameter for failed device
1da177e4 3571 *
04351821
A
3572 * Standard implementation of the function used to tune and set
3573 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3574 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3575 * returned in @r_failed_dev.
780a87f7 3576 *
1da177e4 3577 * LOCKING:
0cba632b 3578 * PCI/etc. bus probe sem.
e82cbdb9
TH
3579 *
3580 * RETURNS:
3581 * 0 on success, negative errno otherwise
1da177e4 3582 */
04351821 3583
0260731f 3584int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3585{
0260731f 3586 struct ata_port *ap = link->ap;
e8e0619f 3587 struct ata_device *dev;
f58229f8 3588 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3589
a6d5a51c 3590 /* step 1: calculate xfer_mask */
1eca4365 3591 ata_for_each_dev(dev, link, ENABLED) {
7dc951ae 3592 unsigned long pio_mask, dma_mask;
b3a70601 3593 unsigned int mode_mask;
a6d5a51c 3594
b3a70601
AC
3595 mode_mask = ATA_DMA_MASK_ATA;
3596 if (dev->class == ATA_DEV_ATAPI)
3597 mode_mask = ATA_DMA_MASK_ATAPI;
3598 else if (ata_id_is_cfa(dev->id))
3599 mode_mask = ATA_DMA_MASK_CFA;
3600
3373efd8 3601 ata_dev_xfermask(dev);
33267325 3602 ata_force_xfermask(dev);
1da177e4 3603
acf356b1 3604 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
b3a70601
AC
3605
3606 if (libata_dma_mask & mode_mask)
80a9c430
SS
3607 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3608 dev->udma_mask);
b3a70601
AC
3609 else
3610 dma_mask = 0;
3611
acf356b1
TH
3612 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3613 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3614
4f65977d 3615 found = 1;
b15b3eba 3616 if (ata_dma_enabled(dev))
5444a6f4 3617 used_dma = 1;
a6d5a51c 3618 }
4f65977d 3619 if (!found)
e82cbdb9 3620 goto out;
a6d5a51c
TH
3621
3622 /* step 2: always set host PIO timings */
1eca4365 3623 ata_for_each_dev(dev, link, ENABLED) {
70cd071e 3624 if (dev->pio_mode == 0xff) {
a9a79dfe 3625 ata_dev_warn(dev, "no PIO support\n");
e8e0619f 3626 rc = -EINVAL;
e82cbdb9 3627 goto out;
e8e0619f
TH
3628 }
3629
3630 dev->xfer_mode = dev->pio_mode;
3631 dev->xfer_shift = ATA_SHIFT_PIO;
3632 if (ap->ops->set_piomode)
3633 ap->ops->set_piomode(ap, dev);
3634 }
1da177e4 3635
a6d5a51c 3636 /* step 3: set host DMA timings */
1eca4365
TH
3637 ata_for_each_dev(dev, link, ENABLED) {
3638 if (!ata_dma_enabled(dev))
e8e0619f
TH
3639 continue;
3640
3641 dev->xfer_mode = dev->dma_mode;
3642 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3643 if (ap->ops->set_dmamode)
3644 ap->ops->set_dmamode(ap, dev);
3645 }
1da177e4
LT
3646
3647 /* step 4: update devices' xfer mode */
1eca4365 3648 ata_for_each_dev(dev, link, ENABLED) {
3373efd8 3649 rc = ata_dev_set_mode(dev);
5bbc53f4 3650 if (rc)
e82cbdb9 3651 goto out;
83206a29 3652 }
1da177e4 3653
e8e0619f
TH
3654 /* Record simplex status. If we selected DMA then the other
3655 * host channels are not permitted to do so.
5444a6f4 3656 */
cca3974e 3657 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3658 ap->host->simplex_claimed = ap;
5444a6f4 3659
e82cbdb9
TH
3660 out:
3661 if (rc)
3662 *r_failed_dev = dev;
3663 return rc;
1da177e4
LT
3664}
3665
aa2731ad
TH
3666/**
3667 * ata_wait_ready - wait for link to become ready
3668 * @link: link to be waited on
3669 * @deadline: deadline jiffies for the operation
3670 * @check_ready: callback to check link readiness
3671 *
3672 * Wait for @link to become ready. @check_ready should return
3673 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3674 * link doesn't seem to be occupied, other errno for other error
3675 * conditions.
3676 *
3677 * Transient -ENODEV conditions are allowed for
3678 * ATA_TMOUT_FF_WAIT.
3679 *
3680 * LOCKING:
3681 * EH context.
3682 *
3683 * RETURNS:
c9b5560a 3684 * 0 if @link is ready before @deadline; otherwise, -errno.
aa2731ad
TH
3685 */
3686int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3687 int (*check_ready)(struct ata_link *link))
3688{
3689 unsigned long start = jiffies;
b48d58f5 3690 unsigned long nodev_deadline;
aa2731ad
TH
3691 int warned = 0;
3692
b48d58f5
TH
3693 /* choose which 0xff timeout to use, read comment in libata.h */
3694 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3695 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3696 else
3697 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3698
b1c72916
TH
3699 /* Slave readiness can't be tested separately from master. On
3700 * M/S emulation configuration, this function should be called
3701 * only on the master and it will handle both master and slave.
3702 */
3703 WARN_ON(link == link->ap->slave_link);
3704
aa2731ad
TH
3705 if (time_after(nodev_deadline, deadline))
3706 nodev_deadline = deadline;
3707
3708 while (1) {
3709 unsigned long now = jiffies;
3710 int ready, tmp;
3711
3712 ready = tmp = check_ready(link);
3713 if (ready > 0)
3714 return 0;
3715
b48d58f5
TH
3716 /*
3717 * -ENODEV could be transient. Ignore -ENODEV if link
aa2731ad 3718 * is online. Also, some SATA devices take a long
b48d58f5
TH
3719 * time to clear 0xff after reset. Wait for
3720 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3721 * offline.
aa2731ad
TH
3722 *
3723 * Note that some PATA controllers (pata_ali) explode
3724 * if status register is read more than once when
3725 * there's no device attached.
3726 */
3727 if (ready == -ENODEV) {
3728 if (ata_link_online(link))
3729 ready = 0;
3730 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3731 !ata_link_offline(link) &&
3732 time_before(now, nodev_deadline))
3733 ready = 0;
3734 }
3735
3736 if (ready)
3737 return ready;
3738 if (time_after(now, deadline))
3739 return -EBUSY;
3740
3741 if (!warned && time_after(now, start + 5 * HZ) &&
3742 (deadline - now > 3 * HZ)) {
a9a79dfe 3743 ata_link_warn(link,
aa2731ad
TH
3744 "link is slow to respond, please be patient "
3745 "(ready=%d)\n", tmp);
3746 warned = 1;
3747 }
3748
97750ceb 3749 ata_msleep(link->ap, 50);
aa2731ad
TH
3750 }
3751}
3752
3753/**
3754 * ata_wait_after_reset - wait for link to become ready after reset
3755 * @link: link to be waited on
3756 * @deadline: deadline jiffies for the operation
3757 * @check_ready: callback to check link readiness
3758 *
3759 * Wait for @link to become ready after reset.
3760 *
3761 * LOCKING:
3762 * EH context.
3763 *
3764 * RETURNS:
c9b5560a 3765 * 0 if @link is ready before @deadline; otherwise, -errno.
aa2731ad 3766 */
2b4221bb 3767int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
aa2731ad
TH
3768 int (*check_ready)(struct ata_link *link))
3769{
97750ceb 3770 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
aa2731ad
TH
3771
3772 return ata_wait_ready(link, deadline, check_ready);
3773}
3774
d7bb4cc7 3775/**
936fd732
TH
3776 * sata_link_debounce - debounce SATA phy status
3777 * @link: ATA link to debounce SATA phy status for
c9b5560a 3778 * @params: timing parameters { interval, duration, timeout } in msec
d4b2bab4 3779 * @deadline: deadline jiffies for the operation
d7bb4cc7 3780 *
1152b261 3781 * Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3782 * holding the same value where DET is not 1 for @duration polled
3783 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3784 * beginning of the stable state. Because DET gets stuck at 1 on
3785 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3786 * until timeout then returns 0 if DET is stable at 1.
3787 *
d4b2bab4
TH
3788 * @timeout is further limited by @deadline. The sooner of the
3789 * two is used.
3790 *
d7bb4cc7
TH
3791 * LOCKING:
3792 * Kernel thread context (may sleep)
3793 *
3794 * RETURNS:
3795 * 0 on success, -errno on failure.
3796 */
936fd732
TH
3797int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3798 unsigned long deadline)
7a7921e8 3799{
341c2c95
TH
3800 unsigned long interval = params[0];
3801 unsigned long duration = params[1];
d4b2bab4 3802 unsigned long last_jiffies, t;
d7bb4cc7
TH
3803 u32 last, cur;
3804 int rc;
3805
341c2c95 3806 t = ata_deadline(jiffies, params[2]);
d4b2bab4
TH
3807 if (time_before(t, deadline))
3808 deadline = t;
3809
936fd732 3810 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3811 return rc;
3812 cur &= 0xf;
3813
3814 last = cur;
3815 last_jiffies = jiffies;
3816
3817 while (1) {
97750ceb 3818 ata_msleep(link->ap, interval);
936fd732 3819 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3820 return rc;
3821 cur &= 0xf;
3822
3823 /* DET stable? */
3824 if (cur == last) {
d4b2bab4 3825 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7 3826 continue;
341c2c95
TH
3827 if (time_after(jiffies,
3828 ata_deadline(last_jiffies, duration)))
d7bb4cc7
TH
3829 return 0;
3830 continue;
3831 }
3832
3833 /* unstable, start over */
3834 last = cur;
3835 last_jiffies = jiffies;
3836
f1545154
TH
3837 /* Check deadline. If debouncing failed, return
3838 * -EPIPE to tell upper layer to lower link speed.
3839 */
d4b2bab4 3840 if (time_after(jiffies, deadline))
f1545154 3841 return -EPIPE;
d7bb4cc7
TH
3842 }
3843}
3844
3845/**
936fd732
TH
3846 * sata_link_resume - resume SATA link
3847 * @link: ATA link to resume SATA
c9b5560a 3848 * @params: timing parameters { interval, duration, timeout } in msec
d4b2bab4 3849 * @deadline: deadline jiffies for the operation
d7bb4cc7 3850 *
936fd732 3851 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3852 *
3853 * LOCKING:
3854 * Kernel thread context (may sleep)
3855 *
3856 * RETURNS:
3857 * 0 on success, -errno on failure.
3858 */
936fd732
TH
3859int sata_link_resume(struct ata_link *link, const unsigned long *params,
3860 unsigned long deadline)
d7bb4cc7 3861{
5040ab67 3862 int tries = ATA_LINK_RESUME_TRIES;
ac371987 3863 u32 scontrol, serror;
81952c54
TH
3864 int rc;
3865
936fd732 3866 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3867 return rc;
7a7921e8 3868
5040ab67
TH
3869 /*
3870 * Writes to SControl sometimes get ignored under certain
3871 * controllers (ata_piix SIDPR). Make sure DET actually is
3872 * cleared.
3873 */
3874 do {
3875 scontrol = (scontrol & 0x0f0) | 0x300;
3876 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3877 return rc;
3878 /*
3879 * Some PHYs react badly if SStatus is pounded
3880 * immediately after resuming. Delay 200ms before
3881 * debouncing.
3882 */
e39b2bb3
DP
3883 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3884 ata_msleep(link->ap, 200);
81952c54 3885
5040ab67
TH
3886 /* is SControl restored correctly? */
3887 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3888 return rc;
3889 } while ((scontrol & 0xf0f) != 0x300 && --tries);
7a7921e8 3890
5040ab67 3891 if ((scontrol & 0xf0f) != 0x300) {
38941c95 3892 ata_link_warn(link, "failed to resume link (SControl %X)\n",
a9a79dfe 3893 scontrol);
5040ab67
TH
3894 return 0;
3895 }
3896
3897 if (tries < ATA_LINK_RESUME_TRIES)
a9a79dfe
JP
3898 ata_link_warn(link, "link resume succeeded after %d retries\n",
3899 ATA_LINK_RESUME_TRIES - tries);
7a7921e8 3900
ac371987
TH
3901 if ((rc = sata_link_debounce(link, params, deadline)))
3902 return rc;
3903
f046519f 3904 /* clear SError, some PHYs require this even for SRST to work */
ac371987
TH
3905 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3906 rc = sata_scr_write(link, SCR_ERROR, serror);
ac371987 3907
f046519f 3908 return rc != -EINVAL ? rc : 0;
7a7921e8
TH
3909}
3910
1152b261
TH
3911/**
3912 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3913 * @link: ATA link to manipulate SControl for
3914 * @policy: LPM policy to configure
3915 * @spm_wakeup: initiate LPM transition to active state
3916 *
3917 * Manipulate the IPM field of the SControl register of @link
3918 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3919 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3920 * the link. This function also clears PHYRDY_CHG before
3921 * returning.
3922 *
3923 * LOCKING:
3924 * EH context.
3925 *
3926 * RETURNS:
8485187b 3927 * 0 on success, -errno otherwise.
1152b261
TH
3928 */
3929int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3930 bool spm_wakeup)
3931{
3932 struct ata_eh_context *ehc = &link->eh_context;
3933 bool woken_up = false;
3934 u32 scontrol;
3935 int rc;
3936
3937 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3938 if (rc)
3939 return rc;
3940
3941 switch (policy) {
3942 case ATA_LPM_MAX_POWER:
3943 /* disable all LPM transitions */
65fe1f0f 3944 scontrol |= (0x7 << 8);
1152b261
TH
3945 /* initiate transition to active state */
3946 if (spm_wakeup) {
3947 scontrol |= (0x4 << 12);
3948 woken_up = true;
3949 }
3950 break;
3951 case ATA_LPM_MED_POWER:
3952 /* allow LPM to PARTIAL */
3953 scontrol &= ~(0x1 << 8);
65fe1f0f 3954 scontrol |= (0x6 << 8);
1152b261 3955 break;
f4ac6476 3956 case ATA_LPM_MED_POWER_WITH_DIPM:
a5ec5a7b 3957 case ATA_LPM_MIN_POWER_WITH_PARTIAL:
1152b261 3958 case ATA_LPM_MIN_POWER:
8a745f1f
KCA
3959 if (ata_link_nr_enabled(link) > 0)
3960 /* no restrictions on LPM transitions */
65fe1f0f 3961 scontrol &= ~(0x7 << 8);
8a745f1f
KCA
3962 else {
3963 /* empty port, power off */
3964 scontrol &= ~0xf;
3965 scontrol |= (0x1 << 2);
3966 }
1152b261
TH
3967 break;
3968 default:
3969 WARN_ON(1);
3970 }
3971
3972 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3973 if (rc)
3974 return rc;
3975
3976 /* give the link time to transit out of LPM state */
3977 if (woken_up)
3978 msleep(10);
3979
3980 /* clear PHYRDY_CHG from SError */
3981 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3982 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3983}
3984
f5914a46 3985/**
0aa1113d 3986 * ata_std_prereset - prepare for reset
cc0680a5 3987 * @link: ATA link to be reset
d4b2bab4 3988 * @deadline: deadline jiffies for the operation
f5914a46 3989 *
cc0680a5 3990 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3991 * prereset makes libata abort whole reset sequence and give up
3992 * that port, so prereset should be best-effort. It does its
3993 * best to prepare for reset sequence but if things go wrong, it
3994 * should just whine, not fail.
f5914a46
TH
3995 *
3996 * LOCKING:
3997 * Kernel thread context (may sleep)
3998 *
3999 * RETURNS:
4000 * 0 on success, -errno otherwise.
4001 */
0aa1113d 4002int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 4003{
cc0680a5 4004 struct ata_port *ap = link->ap;
936fd732 4005 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 4006 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
4007 int rc;
4008
f5914a46
TH
4009 /* if we're about to do hardreset, nothing more to do */
4010 if (ehc->i.action & ATA_EH_HARDRESET)
4011 return 0;
4012
936fd732 4013 /* if SATA, resume link */
a16abc0b 4014 if (ap->flags & ATA_FLAG_SATA) {
936fd732 4015 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
4016 /* whine about phy resume failure but proceed */
4017 if (rc && rc != -EOPNOTSUPP)
a9a79dfe
JP
4018 ata_link_warn(link,
4019 "failed to resume link for reset (errno=%d)\n",
4020 rc);
f5914a46
TH
4021 }
4022
45db2f6c 4023 /* no point in trying softreset on offline link */
b1c72916 4024 if (ata_phys_link_offline(link))
45db2f6c
TH
4025 ehc->i.action &= ~ATA_EH_SOFTRESET;
4026
f5914a46
TH
4027 return 0;
4028}
4029
c2bd5804 4030/**
624d5c51
TH
4031 * sata_link_hardreset - reset link via SATA phy reset
4032 * @link: link to reset
c9b5560a 4033 * @timing: timing parameters { interval, duration, timeout } in msec
d4b2bab4 4034 * @deadline: deadline jiffies for the operation
9dadd45b
TH
4035 * @online: optional out parameter indicating link onlineness
4036 * @check_ready: optional callback to check link readiness
c2bd5804 4037 *
624d5c51 4038 * SATA phy-reset @link using DET bits of SControl register.
9dadd45b
TH
4039 * After hardreset, link readiness is waited upon using
4040 * ata_wait_ready() if @check_ready is specified. LLDs are
4041 * allowed to not specify @check_ready and wait itself after this
4042 * function returns. Device classification is LLD's
4043 * responsibility.
4044 *
4045 * *@online is set to one iff reset succeeded and @link is online
4046 * after reset.
c2bd5804
TH
4047 *
4048 * LOCKING:
4049 * Kernel thread context (may sleep)
4050 *
4051 * RETURNS:
4052 * 0 on success, -errno otherwise.
4053 */
624d5c51 4054int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
9dadd45b
TH
4055 unsigned long deadline,
4056 bool *online, int (*check_ready)(struct ata_link *))
c2bd5804 4057{
624d5c51 4058 u32 scontrol;
81952c54 4059 int rc;
852ee16a 4060
c2bd5804
TH
4061 DPRINTK("ENTER\n");
4062
9dadd45b
TH
4063 if (online)
4064 *online = false;
4065
936fd732 4066 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
4067 /* SATA spec says nothing about how to reconfigure
4068 * spd. To be on the safe side, turn off phy during
4069 * reconfiguration. This works for at least ICH7 AHCI
4070 * and Sil3124.
4071 */
936fd732 4072 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4073 goto out;
81952c54 4074
a34b6fc0 4075 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 4076
936fd732 4077 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 4078 goto out;
1c3fae4d 4079
936fd732 4080 sata_set_spd(link);
1c3fae4d
TH
4081 }
4082
4083 /* issue phy wake/reset */
936fd732 4084 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4085 goto out;
81952c54 4086
852ee16a 4087 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 4088
936fd732 4089 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 4090 goto out;
c2bd5804 4091
1c3fae4d 4092 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
4093 * 10.4.2 says at least 1 ms.
4094 */
97750ceb 4095 ata_msleep(link->ap, 1);
c2bd5804 4096
936fd732
TH
4097 /* bring link back */
4098 rc = sata_link_resume(link, timing, deadline);
9dadd45b
TH
4099 if (rc)
4100 goto out;
4101 /* if link is offline nothing more to do */
b1c72916 4102 if (ata_phys_link_offline(link))
9dadd45b
TH
4103 goto out;
4104
4105 /* Link is online. From this point, -ENODEV too is an error. */
4106 if (online)
4107 *online = true;
4108
071f44b1 4109 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
9dadd45b
TH
4110 /* If PMP is supported, we have to do follow-up SRST.
4111 * Some PMPs don't send D2H Reg FIS after hardreset if
4112 * the first port is empty. Wait only for
4113 * ATA_TMOUT_PMP_SRST_WAIT.
4114 */
4115 if (check_ready) {
4116 unsigned long pmp_deadline;
4117
341c2c95
TH
4118 pmp_deadline = ata_deadline(jiffies,
4119 ATA_TMOUT_PMP_SRST_WAIT);
9dadd45b
TH
4120 if (time_after(pmp_deadline, deadline))
4121 pmp_deadline = deadline;
4122 ata_wait_ready(link, pmp_deadline, check_ready);
4123 }
4124 rc = -EAGAIN;
4125 goto out;
4126 }
4127
4128 rc = 0;
4129 if (check_ready)
4130 rc = ata_wait_ready(link, deadline, check_ready);
b6103f6d 4131 out:
0cbf0711
TH
4132 if (rc && rc != -EAGAIN) {
4133 /* online is set iff link is online && reset succeeded */
4134 if (online)
4135 *online = false;
a9a79dfe 4136 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
0cbf0711 4137 }
b6103f6d
TH
4138 DPRINTK("EXIT, rc=%d\n", rc);
4139 return rc;
4140}
4141
57c9efdf
TH
4142/**
4143 * sata_std_hardreset - COMRESET w/o waiting or classification
4144 * @link: link to reset
4145 * @class: resulting class of attached device
4146 * @deadline: deadline jiffies for the operation
4147 *
4148 * Standard SATA COMRESET w/o waiting or classification.
4149 *
4150 * LOCKING:
4151 * Kernel thread context (may sleep)
4152 *
4153 * RETURNS:
4154 * 0 if link offline, -EAGAIN if link online, -errno on errors.
4155 */
4156int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4157 unsigned long deadline)
4158{
4159 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4160 bool online;
4161 int rc;
4162
4163 /* do hardreset */
4164 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
57c9efdf
TH
4165 return online ? -EAGAIN : rc;
4166}
4167
c2bd5804 4168/**
203c75b8 4169 * ata_std_postreset - standard postreset callback
cc0680a5 4170 * @link: the target ata_link
c2bd5804
TH
4171 * @classes: classes of attached devices
4172 *
4173 * This function is invoked after a successful reset. Note that
4174 * the device might have been reset more than once using
4175 * different reset methods before postreset is invoked.
c2bd5804 4176 *
c2bd5804
TH
4177 * LOCKING:
4178 * Kernel thread context (may sleep)
4179 */
203c75b8 4180void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 4181{
f046519f
TH
4182 u32 serror;
4183
c2bd5804
TH
4184 DPRINTK("ENTER\n");
4185
f046519f
TH
4186 /* reset complete, clear SError */
4187 if (!sata_scr_read(link, SCR_ERROR, &serror))
4188 sata_scr_write(link, SCR_ERROR, serror);
4189
c2bd5804 4190 /* print link status */
936fd732 4191 sata_print_link_status(link);
c2bd5804 4192
c2bd5804
TH
4193 DPRINTK("EXIT\n");
4194}
4195
623a3128
TH
4196/**
4197 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
4198 * @dev: device to compare against
4199 * @new_class: class of the new device
4200 * @new_id: IDENTIFY page of the new device
4201 *
4202 * Compare @new_class and @new_id against @dev and determine
4203 * whether @dev is the device indicated by @new_class and
4204 * @new_id.
4205 *
4206 * LOCKING:
4207 * None.
4208 *
4209 * RETURNS:
4210 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4211 */
3373efd8
TH
4212static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4213 const u16 *new_id)
623a3128
TH
4214{
4215 const u16 *old_id = dev->id;
a0cf733b
TH
4216 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4217 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
4218
4219 if (dev->class != new_class) {
a9a79dfe
JP
4220 ata_dev_info(dev, "class mismatch %d != %d\n",
4221 dev->class, new_class);
623a3128
TH
4222 return 0;
4223 }
4224
a0cf733b
TH
4225 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4226 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4227 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4228 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
4229
4230 if (strcmp(model[0], model[1])) {
a9a79dfe
JP
4231 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
4232 model[0], model[1]);
623a3128
TH
4233 return 0;
4234 }
4235
4236 if (strcmp(serial[0], serial[1])) {
a9a79dfe
JP
4237 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
4238 serial[0], serial[1]);
623a3128
TH
4239 return 0;
4240 }
4241
623a3128
TH
4242 return 1;
4243}
4244
4245/**
fe30911b 4246 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 4247 * @dev: target ATA device
bff04647 4248 * @readid_flags: read ID flags
623a3128
TH
4249 *
4250 * Re-read IDENTIFY page and make sure @dev is still attached to
4251 * the port.
4252 *
4253 * LOCKING:
4254 * Kernel thread context (may sleep)
4255 *
4256 * RETURNS:
4257 * 0 on success, negative errno otherwise
4258 */
fe30911b 4259int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 4260{
5eb45c02 4261 unsigned int class = dev->class;
9af5c9c9 4262 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
4263 int rc;
4264
fe635c7e 4265 /* read ID data */
bff04647 4266 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 4267 if (rc)
fe30911b 4268 return rc;
623a3128
TH
4269
4270 /* is the device still there? */
fe30911b
TH
4271 if (!ata_dev_same_device(dev, class, id))
4272 return -ENODEV;
623a3128 4273
fe635c7e 4274 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4275 return 0;
4276}
4277
4278/**
4279 * ata_dev_revalidate - Revalidate ATA device
4280 * @dev: device to revalidate
422c9daa 4281 * @new_class: new class code
fe30911b
TH
4282 * @readid_flags: read ID flags
4283 *
4284 * Re-read IDENTIFY page, make sure @dev is still attached to the
4285 * port and reconfigure it according to the new IDENTIFY page.
4286 *
4287 * LOCKING:
4288 * Kernel thread context (may sleep)
4289 *
4290 * RETURNS:
4291 * 0 on success, negative errno otherwise
4292 */
422c9daa
TH
4293int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4294 unsigned int readid_flags)
fe30911b 4295{
6ddcd3b0 4296 u64 n_sectors = dev->n_sectors;
5920dadf 4297 u64 n_native_sectors = dev->n_native_sectors;
fe30911b
TH
4298 int rc;
4299
4300 if (!ata_dev_enabled(dev))
4301 return -ENODEV;
4302
422c9daa
TH
4303 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4304 if (ata_class_enabled(new_class) &&
f0d0613d
BP
4305 new_class != ATA_DEV_ATA &&
4306 new_class != ATA_DEV_ATAPI &&
9162c657 4307 new_class != ATA_DEV_ZAC &&
f0d0613d 4308 new_class != ATA_DEV_SEMB) {
a9a79dfe
JP
4309 ata_dev_info(dev, "class mismatch %u != %u\n",
4310 dev->class, new_class);
422c9daa
TH
4311 rc = -ENODEV;
4312 goto fail;
4313 }
4314
fe30911b
TH
4315 /* re-read ID */
4316 rc = ata_dev_reread_id(dev, readid_flags);
4317 if (rc)
4318 goto fail;
623a3128
TH
4319
4320 /* configure device according to the new ID */
efdaedc4 4321 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4322 if (rc)
4323 goto fail;
4324
4325 /* verify n_sectors hasn't changed */
445d211b
TH
4326 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4327 dev->n_sectors == n_sectors)
4328 return 0;
4329
4330 /* n_sectors has changed */
a9a79dfe
JP
4331 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4332 (unsigned long long)n_sectors,
4333 (unsigned long long)dev->n_sectors);
445d211b
TH
4334
4335 /*
4336 * Something could have caused HPA to be unlocked
4337 * involuntarily. If n_native_sectors hasn't changed and the
4338 * new size matches it, keep the device.
4339 */
4340 if (dev->n_native_sectors == n_native_sectors &&
4341 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
a9a79dfe
JP
4342 ata_dev_warn(dev,
4343 "new n_sectors matches native, probably "
4344 "late HPA unlock, n_sectors updated\n");
68939ce5 4345 /* use the larger n_sectors */
445d211b 4346 return 0;
6ddcd3b0
TH
4347 }
4348
445d211b
TH
4349 /*
4350 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4351 * unlocking HPA in those cases.
4352 *
4353 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4354 */
4355 if (dev->n_native_sectors == n_native_sectors &&
4356 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4357 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
a9a79dfe
JP
4358 ata_dev_warn(dev,
4359 "old n_sectors matches native, probably "
4360 "late HPA lock, will try to unlock HPA\n");
445d211b
TH
4361 /* try unlocking HPA */
4362 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4363 rc = -EIO;
4364 } else
4365 rc = -ENODEV;
623a3128 4366
445d211b
TH
4367 /* restore original n_[native_]sectors and fail */
4368 dev->n_native_sectors = n_native_sectors;
4369 dev->n_sectors = n_sectors;
623a3128 4370 fail:
a9a79dfe 4371 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4372 return rc;
4373}
4374
6919a0a6
AC
4375struct ata_blacklist_entry {
4376 const char *model_num;
4377 const char *model_rev;
4378 unsigned long horkage;
4379};
4380
4381static const struct ata_blacklist_entry ata_device_blacklist [] = {
4382 /* Devices with DMA related problems under Linux */
4383 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4384 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4385 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4386 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4387 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4388 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4389 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4390 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4391 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
7da4c935 4392 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4393 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4394 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4395 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4396 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4397 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
7da4c935 4398 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4399 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4400 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4401 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4402 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4403 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4404 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4405 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4406 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4407 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4408 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4409 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4410 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
d17d794c 4411 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
b00622fc 4412 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
3af9a77a 4413 /* Odd clown on sil3726/4726 PMPs */
50af2fa1 4414 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
6919a0a6 4415
18d6e9d5 4416 /* Weird ATAPI devices */
40a1d531 4417 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
6a87e42e 4418 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
a32450e1 4419 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
0523f037 4420 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
18d6e9d5 4421
af34d637
DM
4422 /*
4423 * Causes silent data corruption with higher max sects.
4424 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4425 */
4426 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
1488a1e3
TH
4427
4428 /*
e0edc8c5 4429 * These devices time out with higher max sects.
1488a1e3
TH
4430 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4431 */
e0edc8c5 4432 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
db5ff909 4433 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
af34d637 4434
6919a0a6
AC
4435 /* Devices we expect to fail diagnostics */
4436
4437 /* Devices where NCQ should be avoided */
4438 /* NCQ is slow */
2dcb407e 4439 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 4440 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
4441 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4442 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4443 /* NCQ is broken */
539cc7c7 4444 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4445 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 4446 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4447 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
5ccfca97 4448 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
539cc7c7 4449
ac70a964 4450 /* Seagate NCQ + FLUSH CACHE firmware bug */
4d1f9082 4451 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964 4452 ATA_HORKAGE_FIRMWARE_WARN },
d10d491f 4453
4d1f9082 4454 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
4455 ATA_HORKAGE_FIRMWARE_WARN },
4456
4d1f9082 4457 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
4458 ATA_HORKAGE_FIRMWARE_WARN },
4459
4d1f9082 4460 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964
TH
4461 ATA_HORKAGE_FIRMWARE_WARN },
4462
31f6264e
HG
4463 /* drives which fail FPDMA_AA activation (some may freeze afterwards)
4464 the ST disks also have LPM issues */
8756a25b 4465 { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
31f6264e 4466 ATA_HORKAGE_NOLPM, },
08c85d2a 4467 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
87809942 4468
36e337d0
RH
4469 /* Blacklist entries taken from Silicon Image 3124/3132
4470 Windows driver .inf file - also several Linux problem reports */
4471 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4472 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4473 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4474
68b0ddb2
TH
4475 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4476 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4477
322579dc
TH
4478 /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
4479 SD7SN6S256G and SD8SN8U256G */
4480 { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
4481
16c55b03
TH
4482 /* devices which puke on READ_NATIVE_MAX */
4483 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4484 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4485 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4486 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4487
7831387b
TH
4488 /* this one allows HPA unlocking but fails IOs on the area */
4489 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4490
93328e11
AC
4491 /* Devices which report 1 sector over size HPA */
4492 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4493 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 4494 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 4495
6bbfd53d
AC
4496 /* Devices which get the IVB wrong */
4497 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
a79067e5 4498 /* Maybe we should just blacklist TSSTcorp... */
7da4c935 4499 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
6bbfd53d 4500
9ce8e307
JA
4501 /* Devices that do not need bridging limits applied */
4502 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
04d0f1b8 4503 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
9ce8e307 4504
9062712f
TH
4505 /* Devices which aren't very happy with higher link speeds */
4506 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
c531077f 4507 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
9062712f 4508
d0cb43b3
TH
4509 /*
4510 * Devices which choke on SETXFER. Applies only if both the
4511 * device and controller are SATA.
4512 */
cd691876 4513 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
3a25179e
VL
4514 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4515 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
cd691876
TH
4516 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4517 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
d0cb43b3 4518
b17e5729 4519 /* Crucial BX100 SSD 500GB has broken LPM support */
3bf7b5d6 4520 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
b17e5729 4521
d418ff56
HG
4522 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4523 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
9c7be59f
HG
4524 ATA_HORKAGE_ZERO_AFTER_TRIM |
4525 ATA_HORKAGE_NOLPM, },
d418ff56
HG
4526 /* 512GB MX100 with newer firmware has only LPM issues */
4527 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
4528 ATA_HORKAGE_NOLPM, },
9c7be59f 4529
62ac3f73
HG
4530 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4531 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4532 ATA_HORKAGE_ZERO_AFTER_TRIM |
4533 ATA_HORKAGE_NOLPM, },
4534 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4535 ATA_HORKAGE_ZERO_AFTER_TRIM |
4536 ATA_HORKAGE_NOLPM, },
4537
76936e9a 4538 /* These specific Samsung models/firmware-revs do not handle LPM well */
b5b4d3a5 4539 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
76936e9a 4540 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
410b5c7b 4541 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
dd957493 4542 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
b5b4d3a5 4543
f78dea06 4544 /* devices that don't properly handle queued TRIM commands */
136d769e
SM
4545 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4546 ATA_HORKAGE_ZERO_AFTER_TRIM, },
243918be 4547 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ff7f53fb
MP
4548 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4549 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4550 ATA_HORKAGE_ZERO_AFTER_TRIM, },
9051bd39 4551 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ff7f53fb
MP
4552 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4553 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4554 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4555 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
e61f7d1c 4556 ATA_HORKAGE_ZERO_AFTER_TRIM, },
ca6bfcb2
JHP
4557 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4558 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4559 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
6fc4d97a 4560 ATA_HORKAGE_ZERO_AFTER_TRIM, },
7a7184b0
GA
4561 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4562 ATA_HORKAGE_ZERO_AFTER_TRIM, },
e61f7d1c 4563
cda57b1b
AF
4564 /* devices that don't properly handle TRIM commands */
4565 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4566
e61f7d1c
MP
4567 /*
4568 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4569 * (Return Zero After Trim) flags in the ATA Command Set are
4570 * unreliable in the sense that they only define what happens if
4571 * the device successfully executed the DSM TRIM command. TRIM
4572 * is only advisory, however, and the device is free to silently
4573 * ignore all or parts of the request.
4574 *
4575 * Whitelist drives that are known to reliably return zeroes
4576 * after TRIM.
4577 */
4578
4579 /*
4580 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4581 * that model before whitelisting all other intel SSDs.
4582 */
4583 { "INTEL*SSDSC2MH*", NULL, 0, },
4584
ff7f53fb
MP
4585 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4586 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
e61f7d1c
MP
4587 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4588 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4589 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4590 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
fd6f32f7 4591 { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
e61f7d1c 4592 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
f78dea06 4593
ecd75ad5
TH
4594 /*
4595 * Some WD SATA-I drives spin up and down erratically when the link
4596 * is put into the slumber mode. We don't have full list of the
4597 * affected devices. Disable LPM if the device matches one of the
4598 * known prefixes and is SATA-1. As a side effect LPM partial is
4599 * lost too.
4600 *
4601 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4602 */
4603 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4604 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4605 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4606 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4607 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4608 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4609 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4610
6919a0a6
AC
4611 /* End Marker */
4612 { }
1da177e4 4613};
2e9edbf8 4614
75683fe7 4615static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4616{
8bfa79fc
TH
4617 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4618 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4619 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4620
8bfa79fc
TH
4621 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4622 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4623
6919a0a6 4624 while (ad->model_num) {
1c402799 4625 if (glob_match(ad->model_num, model_num)) {
6919a0a6
AC
4626 if (ad->model_rev == NULL)
4627 return ad->horkage;
1c402799 4628 if (glob_match(ad->model_rev, model_rev))
6919a0a6 4629 return ad->horkage;
f4b15fef 4630 }
6919a0a6 4631 ad++;
f4b15fef 4632 }
1da177e4
LT
4633 return 0;
4634}
4635
6919a0a6
AC
4636static int ata_dma_blacklisted(const struct ata_device *dev)
4637{
4638 /* We don't support polling DMA.
4639 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4640 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4641 */
9af5c9c9 4642 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4643 (dev->flags & ATA_DFLAG_CDB_INTR))
4644 return 1;
75683fe7 4645 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4646}
4647
6bbfd53d
AC
4648/**
4649 * ata_is_40wire - check drive side detection
4650 * @dev: device
4651 *
4652 * Perform drive side detection decoding, allowing for device vendors
4653 * who can't follow the documentation.
4654 */
4655
4656static int ata_is_40wire(struct ata_device *dev)
4657{
4658 if (dev->horkage & ATA_HORKAGE_IVB)
4659 return ata_drive_40wire_relaxed(dev->id);
4660 return ata_drive_40wire(dev->id);
4661}
4662
15a5551c
AC
4663/**
4664 * cable_is_40wire - 40/80/SATA decider
4665 * @ap: port to consider
4666 *
4667 * This function encapsulates the policy for speed management
4668 * in one place. At the moment we don't cache the result but
4669 * there is a good case for setting ap->cbl to the result when
4670 * we are called with unknown cables (and figuring out if it
4671 * impacts hotplug at all).
4672 *
4673 * Return 1 if the cable appears to be 40 wire.
4674 */
4675
4676static int cable_is_40wire(struct ata_port *ap)
4677{
4678 struct ata_link *link;
4679 struct ata_device *dev;
4680
4a9c7b33 4681 /* If the controller thinks we are 40 wire, we are. */
15a5551c
AC
4682 if (ap->cbl == ATA_CBL_PATA40)
4683 return 1;
4a9c7b33
TH
4684
4685 /* If the controller thinks we are 80 wire, we are. */
15a5551c
AC
4686 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4687 return 0;
4a9c7b33
TH
4688
4689 /* If the system is known to be 40 wire short cable (eg
4690 * laptop), then we allow 80 wire modes even if the drive
4691 * isn't sure.
4692 */
f792068e
AC
4693 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4694 return 0;
4a9c7b33
TH
4695
4696 /* If the controller doesn't know, we scan.
4697 *
4698 * Note: We look for all 40 wire detects at this point. Any
4699 * 80 wire detect is taken to be 80 wire cable because
4700 * - in many setups only the one drive (slave if present) will
4701 * give a valid detect
4702 * - if you have a non detect capable drive you don't want it
4703 * to colour the choice
4704 */
1eca4365
TH
4705 ata_for_each_link(link, ap, EDGE) {
4706 ata_for_each_dev(dev, link, ENABLED) {
4707 if (!ata_is_40wire(dev))
15a5551c
AC
4708 return 0;
4709 }
4710 }
4711 return 1;
4712}
4713
a6d5a51c
TH
4714/**
4715 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4716 * @dev: Device to compute xfermask for
4717 *
acf356b1
TH
4718 * Compute supported xfermask of @dev and store it in
4719 * dev->*_mask. This function is responsible for applying all
4720 * known limits including host controller limits, device
4721 * blacklist, etc...
a6d5a51c
TH
4722 *
4723 * LOCKING:
4724 * None.
a6d5a51c 4725 */
3373efd8 4726static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4727{
9af5c9c9
TH
4728 struct ata_link *link = dev->link;
4729 struct ata_port *ap = link->ap;
cca3974e 4730 struct ata_host *host = ap->host;
a6d5a51c 4731 unsigned long xfer_mask;
1da177e4 4732
37deecb5 4733 /* controller modes available */
565083e1
TH
4734 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4735 ap->mwdma_mask, ap->udma_mask);
4736
8343f889 4737 /* drive modes available */
37deecb5
TH
4738 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4739 dev->mwdma_mask, dev->udma_mask);
4740 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4741
b352e57d
AC
4742 /*
4743 * CFA Advanced TrueIDE timings are not allowed on a shared
4744 * cable
4745 */
4746 if (ata_dev_pair(dev)) {
4747 /* No PIO5 or PIO6 */
4748 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4749 /* No MWDMA3 or MWDMA 4 */
4750 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4751 }
4752
37deecb5
TH
4753 if (ata_dma_blacklisted(dev)) {
4754 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4755 ata_dev_warn(dev,
4756 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4757 }
a6d5a51c 4758
14d66ab7 4759 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4760 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5 4761 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4762 ata_dev_warn(dev,
4763 "simplex DMA is claimed by other device, disabling DMA\n");
5444a6f4 4764 }
565083e1 4765
e424675f
JG
4766 if (ap->flags & ATA_FLAG_NO_IORDY)
4767 xfer_mask &= ata_pio_mask_no_iordy(dev);
4768
5444a6f4 4769 if (ap->ops->mode_filter)
a76b62ca 4770 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4771
8343f889
RH
4772 /* Apply cable rule here. Don't apply it early because when
4773 * we handle hot plug the cable type can itself change.
4774 * Check this last so that we know if the transfer rate was
4775 * solely limited by the cable.
4776 * Unknown or 80 wire cables reported host side are checked
4777 * drive side as well. Cases where we know a 40wire cable
4778 * is used safely for 80 are not checked here.
4779 */
4780 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4781 /* UDMA/44 or higher would be available */
15a5551c 4782 if (cable_is_40wire(ap)) {
a9a79dfe
JP
4783 ata_dev_warn(dev,
4784 "limited to UDMA/33 due to 40-wire cable\n");
8343f889
RH
4785 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4786 }
4787
565083e1
TH
4788 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4789 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4790}
4791
1da177e4
LT
4792/**
4793 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4794 * @dev: Device to which command will be sent
4795 *
780a87f7
JG
4796 * Issue SET FEATURES - XFER MODE command to device @dev
4797 * on port @ap.
4798 *
1da177e4 4799 * LOCKING:
0cba632b 4800 * PCI/etc. bus probe sem.
83206a29
TH
4801 *
4802 * RETURNS:
4803 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4804 */
4805
3373efd8 4806static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4807{
a0123703 4808 struct ata_taskfile tf;
83206a29 4809 unsigned int err_mask;
1da177e4
LT
4810
4811 /* set up set-features taskfile */
4812 DPRINTK("set features - xfer mode\n");
4813
464cf177
TH
4814 /* Some controllers and ATAPI devices show flaky interrupt
4815 * behavior after setting xfer mode. Use polling instead.
4816 */
3373efd8 4817 ata_tf_init(dev, &tf);
a0123703
TH
4818 tf.command = ATA_CMD_SET_FEATURES;
4819 tf.feature = SETFEATURES_XFER;
464cf177 4820 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4821 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4822 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4823 if (ata_pio_need_iordy(dev))
4824 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4825 /* If the device has IORDY and the controller does not - turn it off */
4826 else if (ata_id_has_iordy(dev->id))
11b7becc 4827 tf.nsect = 0x01;
b9f8ab2d
AC
4828 else /* In the ancient relic department - skip all of this */
4829 return 0;
1da177e4 4830
d531be2c
MP
4831 /* On some disks, this command causes spin-up, so we need longer timeout */
4832 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
9f45cbd3
KCA
4833
4834 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4835 return err_mask;
4836}
1152b261 4837
9f45cbd3 4838/**
218f3d30 4839 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4840 * @dev: Device to which command will be sent
4841 * @enable: Whether to enable or disable the feature
218f3d30 4842 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4843 *
4844 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4845 * on port @ap with sector count
9f45cbd3
KCA
4846 *
4847 * LOCKING:
4848 * PCI/etc. bus probe sem.
4849 *
4850 * RETURNS:
4851 * 0 on success, AC_ERR_* mask otherwise.
4852 */
1152b261 4853unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
9f45cbd3
KCA
4854{
4855 struct ata_taskfile tf;
4856 unsigned int err_mask;
974e0a45 4857 unsigned long timeout = 0;
9f45cbd3
KCA
4858
4859 /* set up set-features taskfile */
4860 DPRINTK("set features - SATA features\n");
4861
4862 ata_tf_init(dev, &tf);
4863 tf.command = ATA_CMD_SET_FEATURES;
4864 tf.feature = enable;
4865 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4866 tf.protocol = ATA_PROT_NODATA;
218f3d30 4867 tf.nsect = feature;
9f45cbd3 4868
974e0a45
DLM
4869 if (enable == SETFEATURES_SPINUP)
4870 timeout = ata_probe_timeout ?
4871 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4872 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
1da177e4 4873
83206a29
TH
4874 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4875 return err_mask;
1da177e4 4876}
633de4cc 4877EXPORT_SYMBOL_GPL(ata_dev_set_feature);
1da177e4 4878
8bf62ece
AL
4879/**
4880 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4881 * @dev: Device to which command will be sent
e2a7f77a
RD
4882 * @heads: Number of heads (taskfile parameter)
4883 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4884 *
4885 * LOCKING:
6aff8f1f
TH
4886 * Kernel thread context (may sleep)
4887 *
4888 * RETURNS:
4889 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4890 */
3373efd8
TH
4891static unsigned int ata_dev_init_params(struct ata_device *dev,
4892 u16 heads, u16 sectors)
8bf62ece 4893{
a0123703 4894 struct ata_taskfile tf;
6aff8f1f 4895 unsigned int err_mask;
8bf62ece
AL
4896
4897 /* Number of sectors per track 1-255. Number of heads 1-16 */
4898 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4899 return AC_ERR_INVALID;
8bf62ece
AL
4900
4901 /* set up init dev params taskfile */
4902 DPRINTK("init dev params \n");
4903
3373efd8 4904 ata_tf_init(dev, &tf);
a0123703
TH
4905 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4906 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4907 tf.protocol = ATA_PROT_NODATA;
4908 tf.nsect = sectors;
4909 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4910
2b789108 4911 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4912 /* A clean abort indicates an original or just out of spec drive
4913 and we should continue as we issue the setup based on the
4914 drive reported working geometry */
4915 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4916 err_mask = 0;
8bf62ece 4917
6aff8f1f
TH
4918 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4919 return err_mask;
8bf62ece
AL
4920}
4921
1da177e4 4922/**
5895ef9a 4923 * atapi_check_dma - Check whether ATAPI DMA can be supported
1da177e4
LT
4924 * @qc: Metadata associated with taskfile to check
4925 *
780a87f7
JG
4926 * Allow low-level driver to filter ATA PACKET commands, returning
4927 * a status indicating whether or not it is OK to use DMA for the
4928 * supplied PACKET command.
4929 *
1da177e4 4930 * LOCKING:
624d5c51
TH
4931 * spin_lock_irqsave(host lock)
4932 *
4933 * RETURNS: 0 when ATAPI DMA can be used
4934 * nonzero otherwise
4935 */
5895ef9a 4936int atapi_check_dma(struct ata_queued_cmd *qc)
624d5c51
TH
4937{
4938 struct ata_port *ap = qc->ap;
71601958 4939
624d5c51
TH
4940 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4941 * few ATAPI devices choke on such DMA requests.
4942 */
6a87e42e
TH
4943 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4944 unlikely(qc->nbytes & 15))
624d5c51 4945 return 1;
e2cec771 4946
624d5c51
TH
4947 if (ap->ops->check_atapi_dma)
4948 return ap->ops->check_atapi_dma(qc);
e2cec771 4949
624d5c51
TH
4950 return 0;
4951}
1da177e4 4952
624d5c51
TH
4953/**
4954 * ata_std_qc_defer - Check whether a qc needs to be deferred
4955 * @qc: ATA command in question
4956 *
4957 * Non-NCQ commands cannot run with any other command, NCQ or
4958 * not. As upper layer only knows the queue depth, we are
4959 * responsible for maintaining exclusion. This function checks
4960 * whether a new command @qc can be issued.
4961 *
4962 * LOCKING:
4963 * spin_lock_irqsave(host lock)
4964 *
4965 * RETURNS:
4966 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4967 */
4968int ata_std_qc_defer(struct ata_queued_cmd *qc)
4969{
4970 struct ata_link *link = qc->dev->link;
e2cec771 4971
179b310a 4972 if (ata_is_ncq(qc->tf.protocol)) {
624d5c51
TH
4973 if (!ata_tag_valid(link->active_tag))
4974 return 0;
4975 } else {
4976 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4977 return 0;
4978 }
e2cec771 4979
624d5c51
TH
4980 return ATA_DEFER_LINK;
4981}
6912ccd5 4982
624d5c51 4983void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
1da177e4 4984
624d5c51
TH
4985/**
4986 * ata_sg_init - Associate command with scatter-gather table.
4987 * @qc: Command to be associated
4988 * @sg: Scatter-gather table.
4989 * @n_elem: Number of elements in s/g table.
4990 *
4991 * Initialize the data-related elements of queued_cmd @qc
4992 * to point to a scatter-gather table @sg, containing @n_elem
4993 * elements.
4994 *
4995 * LOCKING:
4996 * spin_lock_irqsave(host lock)
4997 */
4998void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4999 unsigned int n_elem)
5000{
5001 qc->sg = sg;
5002 qc->n_elem = n_elem;
5003 qc->cursg = qc->sg;
5004}
bb5cb290 5005
2874d5ee
GU
5006#ifdef CONFIG_HAS_DMA
5007
5008/**
5009 * ata_sg_clean - Unmap DMA memory associated with command
5010 * @qc: Command containing DMA memory to be released
5011 *
5012 * Unmap all mapped DMA memory associated with this command.
5013 *
5014 * LOCKING:
5015 * spin_lock_irqsave(host lock)
5016 */
af27e01c 5017static void ata_sg_clean(struct ata_queued_cmd *qc)
2874d5ee
GU
5018{
5019 struct ata_port *ap = qc->ap;
5020 struct scatterlist *sg = qc->sg;
5021 int dir = qc->dma_dir;
5022
5023 WARN_ON_ONCE(sg == NULL);
5024
5025 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
5026
5027 if (qc->n_elem)
5028 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
5029
5030 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5031 qc->sg = NULL;
5032}
5033
624d5c51
TH
5034/**
5035 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
5036 * @qc: Command with scatter-gather table to be mapped.
5037 *
5038 * DMA-map the scatter-gather table associated with queued_cmd @qc.
5039 *
5040 * LOCKING:
5041 * spin_lock_irqsave(host lock)
5042 *
5043 * RETURNS:
5044 * Zero on success, negative on error.
5045 *
5046 */
5047static int ata_sg_setup(struct ata_queued_cmd *qc)
5048{
5049 struct ata_port *ap = qc->ap;
5050 unsigned int n_elem;
1da177e4 5051
624d5c51 5052 VPRINTK("ENTER, ata%u\n", ap->print_id);
e2cec771 5053
624d5c51
TH
5054 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
5055 if (n_elem < 1)
5056 return -1;
bb5cb290 5057
bb376673 5058 VPRINTK("%d sg elements mapped\n", n_elem);
5825627c 5059 qc->orig_n_elem = qc->n_elem;
624d5c51
TH
5060 qc->n_elem = n_elem;
5061 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4 5062
624d5c51 5063 return 0;
1da177e4
LT
5064}
5065
2874d5ee
GU
5066#else /* !CONFIG_HAS_DMA */
5067
5068static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
5069static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
5070
5071#endif /* !CONFIG_HAS_DMA */
5072
624d5c51
TH
5073/**
5074 * swap_buf_le16 - swap halves of 16-bit words in place
5075 * @buf: Buffer to swap
5076 * @buf_words: Number of 16-bit words in buffer.
5077 *
5078 * Swap halves of 16-bit words if needed to convert from
5079 * little-endian byte order to native cpu byte order, or
5080 * vice-versa.
5081 *
5082 * LOCKING:
5083 * Inherited from caller.
5084 */
5085void swap_buf_le16(u16 *buf, unsigned int buf_words)
8061f5f0 5086{
624d5c51
TH
5087#ifdef __BIG_ENDIAN
5088 unsigned int i;
8061f5f0 5089
624d5c51
TH
5090 for (i = 0; i < buf_words; i++)
5091 buf[i] = le16_to_cpu(buf[i]);
5092#endif /* __BIG_ENDIAN */
8061f5f0
TH
5093}
5094
8a8bc223 5095/**
98bd4be1
SL
5096 * ata_qc_new_init - Request an available ATA command, and initialize it
5097 * @dev: Device from whom we request an available command structure
38755e89 5098 * @tag: tag
1871ee13 5099 *
8a8bc223
TH
5100 * LOCKING:
5101 * None.
5102 */
5103
98bd4be1 5104struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
8a8bc223 5105{
98bd4be1 5106 struct ata_port *ap = dev->link->ap;
12cb5ce1 5107 struct ata_queued_cmd *qc;
8a8bc223
TH
5108
5109 /* no command while frozen */
5110 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5111 return NULL;
5112
98bd4be1 5113 /* libsas case */
5067c046 5114 if (ap->flags & ATA_FLAG_SAS_HOST) {
98bd4be1
SL
5115 tag = ata_sas_allocate_tag(ap);
5116 if (tag < 0)
5117 return NULL;
8a4aeec8 5118 }
8a8bc223 5119
98bd4be1 5120 qc = __ata_qc_from_tag(ap, tag);
5ac40790 5121 qc->tag = qc->hw_tag = tag;
98bd4be1
SL
5122 qc->scsicmd = NULL;
5123 qc->ap = ap;
5124 qc->dev = dev;
1da177e4 5125
98bd4be1 5126 ata_qc_reinit(qc);
1da177e4
LT
5127
5128 return qc;
5129}
5130
8a8bc223
TH
5131/**
5132 * ata_qc_free - free unused ata_queued_cmd
5133 * @qc: Command to complete
5134 *
5135 * Designed to free unused ata_queued_cmd object
5136 * in case something prevents using it.
5137 *
5138 * LOCKING:
5139 * spin_lock_irqsave(host lock)
5140 */
5141void ata_qc_free(struct ata_queued_cmd *qc)
5142{
a1104016 5143 struct ata_port *ap;
8a8bc223
TH
5144 unsigned int tag;
5145
efcb3cf7 5146 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
a1104016 5147 ap = qc->ap;
8a8bc223
TH
5148
5149 qc->flags = 0;
5150 tag = qc->tag;
28361c40 5151 if (ata_tag_valid(tag)) {
8a8bc223 5152 qc->tag = ATA_TAG_POISON;
5067c046 5153 if (ap->flags & ATA_FLAG_SAS_HOST)
98bd4be1 5154 ata_sas_free_tag(tag, ap);
8a8bc223
TH
5155 }
5156}
5157
76014427 5158void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5159{
a1104016
JL
5160 struct ata_port *ap;
5161 struct ata_link *link;
dedaf2b0 5162
efcb3cf7
TH
5163 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5164 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
a1104016
JL
5165 ap = qc->ap;
5166 link = qc->dev->link;
1da177e4
LT
5167
5168 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5169 ata_sg_clean(qc);
5170
7401abf2 5171 /* command should be marked inactive atomically with qc completion */
179b310a 5172 if (ata_is_ncq(qc->tf.protocol)) {
4e5b6260 5173 link->sactive &= ~(1 << qc->hw_tag);
da917d69
TH
5174 if (!link->sactive)
5175 ap->nr_active_links--;
5176 } else {
9af5c9c9 5177 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5178 ap->nr_active_links--;
5179 }
5180
5181 /* clear exclusive status */
5182 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5183 ap->excl_link == link))
5184 ap->excl_link = NULL;
7401abf2 5185
3f3791d3
AL
5186 /* atapi: mark qc as inactive to prevent the interrupt handler
5187 * from completing the command twice later, before the error handler
5188 * is called. (when rc != 0 and atapi request sense is needed)
5189 */
5190 qc->flags &= ~ATA_QCFLAG_ACTIVE;
e3ed8939 5191 ap->qc_active &= ~(1ULL << qc->tag);
3f3791d3 5192
1da177e4 5193 /* call completion callback */
77853bf2 5194 qc->complete_fn(qc);
1da177e4
LT
5195}
5196
39599a53
TH
5197static void fill_result_tf(struct ata_queued_cmd *qc)
5198{
5199 struct ata_port *ap = qc->ap;
5200
39599a53 5201 qc->result_tf.flags = qc->tf.flags;
22183bf5 5202 ap->ops->qc_fill_rtf(qc);
39599a53
TH
5203}
5204
00115e0f
TH
5205static void ata_verify_xfer(struct ata_queued_cmd *qc)
5206{
5207 struct ata_device *dev = qc->dev;
5208
eb0effdf 5209 if (!ata_is_data(qc->tf.protocol))
00115e0f
TH
5210 return;
5211
5212 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5213 return;
5214
5215 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5216}
5217
f686bcb8
TH
5218/**
5219 * ata_qc_complete - Complete an active ATA command
5220 * @qc: Command to complete
f686bcb8 5221 *
1aadf5c3
TH
5222 * Indicate to the mid and upper layers that an ATA command has
5223 * completed, with either an ok or not-ok status.
5224 *
5225 * Refrain from calling this function multiple times when
5226 * successfully completing multiple NCQ commands.
5227 * ata_qc_complete_multiple() should be used instead, which will
5228 * properly update IRQ expect state.
f686bcb8
TH
5229 *
5230 * LOCKING:
cca3974e 5231 * spin_lock_irqsave(host lock)
f686bcb8
TH
5232 */
5233void ata_qc_complete(struct ata_queued_cmd *qc)
5234{
5235 struct ata_port *ap = qc->ap;
5236
eb25cb99 5237 /* Trigger the LED (if available) */
d1ed7c55 5238 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
eb25cb99 5239
f686bcb8
TH
5240 /* XXX: New EH and old EH use different mechanisms to
5241 * synchronize EH with regular execution path.
5242 *
5243 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5244 * Normal execution path is responsible for not accessing a
5245 * failed qc. libata core enforces the rule by returning NULL
5246 * from ata_qc_from_tag() for failed qcs.
5247 *
5248 * Old EH depends on ata_qc_complete() nullifying completion
5249 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5250 * not synchronize with interrupt handler. Only PIO task is
5251 * taken care of.
5252 */
5253 if (ap->ops->error_handler) {
4dbfa39b
TH
5254 struct ata_device *dev = qc->dev;
5255 struct ata_eh_info *ehi = &dev->link->eh_info;
5256
f686bcb8
TH
5257 if (unlikely(qc->err_mask))
5258 qc->flags |= ATA_QCFLAG_FAILED;
5259
f08dc1ac
TH
5260 /*
5261 * Finish internal commands without any further processing
5262 * and always with the result TF filled.
5263 */
5264 if (unlikely(ata_tag_internal(qc->tag))) {
f4b31db9 5265 fill_result_tf(qc);
255c03d1 5266 trace_ata_qc_complete_internal(qc);
f08dc1ac
TH
5267 __ata_qc_complete(qc);
5268 return;
5269 }
f4b31db9 5270
f08dc1ac
TH
5271 /*
5272 * Non-internal qc has failed. Fill the result TF and
5273 * summon EH.
5274 */
5275 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5276 fill_result_tf(qc);
255c03d1 5277 trace_ata_qc_complete_failed(qc);
f08dc1ac 5278 ata_qc_schedule_eh(qc);
f4b31db9 5279 return;
f686bcb8
TH
5280 }
5281
4dc738ed
TH
5282 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5283
f686bcb8
TH
5284 /* read result TF if requested */
5285 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5286 fill_result_tf(qc);
f686bcb8 5287
255c03d1 5288 trace_ata_qc_complete_done(qc);
4dbfa39b
TH
5289 /* Some commands need post-processing after successful
5290 * completion.
5291 */
5292 switch (qc->tf.command) {
5293 case ATA_CMD_SET_FEATURES:
5294 if (qc->tf.feature != SETFEATURES_WC_ON &&
0c12735e
TY
5295 qc->tf.feature != SETFEATURES_WC_OFF &&
5296 qc->tf.feature != SETFEATURES_RA_ON &&
5297 qc->tf.feature != SETFEATURES_RA_OFF)
4dbfa39b
TH
5298 break;
5299 /* fall through */
5300 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5301 case ATA_CMD_SET_MULTI: /* multi_count changed */
5302 /* revalidate device */
5303 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5304 ata_port_schedule_eh(ap);
5305 break;
054a5fba
TH
5306
5307 case ATA_CMD_SLEEP:
5308 dev->flags |= ATA_DFLAG_SLEEPING;
5309 break;
4dbfa39b
TH
5310 }
5311
00115e0f
TH
5312 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5313 ata_verify_xfer(qc);
5314
f686bcb8
TH
5315 __ata_qc_complete(qc);
5316 } else {
5317 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5318 return;
5319
5320 /* read result TF if failed or requested */
5321 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5322 fill_result_tf(qc);
f686bcb8
TH
5323
5324 __ata_qc_complete(qc);
5325 }
5326}
5327
dedaf2b0
TH
5328/**
5329 * ata_qc_complete_multiple - Complete multiple qcs successfully
5330 * @ap: port in question
5331 * @qc_active: new qc_active mask
dedaf2b0
TH
5332 *
5333 * Complete in-flight commands. This functions is meant to be
5334 * called from low-level driver's interrupt routine to complete
5335 * requests normally. ap->qc_active and @qc_active is compared
5336 * and commands are completed accordingly.
5337 *
1aadf5c3
TH
5338 * Always use this function when completing multiple NCQ commands
5339 * from IRQ handlers instead of calling ata_qc_complete()
5340 * multiple times to keep IRQ expect status properly in sync.
5341 *
dedaf2b0 5342 * LOCKING:
cca3974e 5343 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5344 *
5345 * RETURNS:
5346 * Number of completed commands on success, -errno otherwise.
5347 */
e3ed8939 5348int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
dedaf2b0 5349{
7ce5c8cd 5350 u64 done_mask, ap_qc_active = ap->qc_active;
dedaf2b0 5351 int nr_done = 0;
dedaf2b0 5352
7ce5c8cd
JA
5353 /*
5354 * If the internal tag is set on ap->qc_active, then we care about
5355 * bit0 on the passed in qc_active mask. Move that bit up to match
5356 * the internal tag.
5357 */
5358 if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
5359 qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
5360 qc_active ^= qc_active & 0x01;
5361 }
5362
5363 done_mask = ap_qc_active ^ qc_active;
dedaf2b0
TH
5364
5365 if (unlikely(done_mask & qc_active)) {
e3ed8939 5366 ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
a9a79dfe 5367 ap->qc_active, qc_active);
dedaf2b0
TH
5368 return -EINVAL;
5369 }
5370
43768180 5371 while (done_mask) {
dedaf2b0 5372 struct ata_queued_cmd *qc;
e3ed8939 5373 unsigned int tag = __ffs64(done_mask);
dedaf2b0 5374
43768180
JA
5375 qc = ata_qc_from_tag(ap, tag);
5376 if (qc) {
dedaf2b0
TH
5377 ata_qc_complete(qc);
5378 nr_done++;
5379 }
e3ed8939 5380 done_mask &= ~(1ULL << tag);
dedaf2b0
TH
5381 }
5382
5383 return nr_done;
5384}
5385
1da177e4
LT
5386/**
5387 * ata_qc_issue - issue taskfile to device
5388 * @qc: command to issue to device
5389 *
5390 * Prepare an ATA command to submission to device.
5391 * This includes mapping the data into a DMA-able
5392 * area, filling in the S/G table, and finally
5393 * writing the taskfile to hardware, starting the command.
5394 *
5395 * LOCKING:
cca3974e 5396 * spin_lock_irqsave(host lock)
1da177e4 5397 */
8e0e694a 5398void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5399{
5400 struct ata_port *ap = qc->ap;
9af5c9c9 5401 struct ata_link *link = qc->dev->link;
405e66b3 5402 u8 prot = qc->tf.protocol;
1da177e4 5403
dedaf2b0
TH
5404 /* Make sure only one non-NCQ command is outstanding. The
5405 * check is skipped for old EH because it reuses active qc to
5406 * request ATAPI sense.
5407 */
efcb3cf7 5408 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 5409
1973a023 5410 if (ata_is_ncq(prot)) {
4e5b6260 5411 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
da917d69
TH
5412
5413 if (!link->sactive)
5414 ap->nr_active_links++;
4e5b6260 5415 link->sactive |= 1 << qc->hw_tag;
dedaf2b0 5416 } else {
efcb3cf7 5417 WARN_ON_ONCE(link->sactive);
da917d69
TH
5418
5419 ap->nr_active_links++;
9af5c9c9 5420 link->active_tag = qc->tag;
dedaf2b0
TH
5421 }
5422
e4a70e76 5423 qc->flags |= ATA_QCFLAG_ACTIVE;
e3ed8939 5424 ap->qc_active |= 1ULL << qc->tag;
e4a70e76 5425
60f5d6ef
TH
5426 /*
5427 * We guarantee to LLDs that they will have at least one
f92a2636
TH
5428 * non-zero sg if the command is a data command.
5429 */
9173e5e8 5430 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
60f5d6ef 5431 goto sys_err;
f92a2636 5432
405e66b3 5433 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 5434 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7 5435 if (ata_sg_setup(qc))
60f5d6ef 5436 goto sys_err;
1da177e4 5437
cf480626 5438 /* if device is sleeping, schedule reset and abort the link */
054a5fba 5439 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
cf480626 5440 link->eh_info.action |= ATA_EH_RESET;
054a5fba
TH
5441 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5442 ata_link_abort(link);
5443 return;
5444 }
5445
1da177e4 5446 ap->ops->qc_prep(qc);
255c03d1 5447 trace_ata_qc_issue(qc);
8e0e694a
TH
5448 qc->err_mask |= ap->ops->qc_issue(qc);
5449 if (unlikely(qc->err_mask))
5450 goto err;
5451 return;
1da177e4 5452
60f5d6ef 5453sys_err:
8e0e694a
TH
5454 qc->err_mask |= AC_ERR_SYSTEM;
5455err:
5456 ata_qc_complete(qc);
1da177e4
LT
5457}
5458
34bf2170
TH
5459/**
5460 * sata_scr_valid - test whether SCRs are accessible
936fd732 5461 * @link: ATA link to test SCR accessibility for
34bf2170 5462 *
936fd732 5463 * Test whether SCRs are accessible for @link.
34bf2170
TH
5464 *
5465 * LOCKING:
5466 * None.
5467 *
5468 * RETURNS:
5469 * 1 if SCRs are accessible, 0 otherwise.
5470 */
936fd732 5471int sata_scr_valid(struct ata_link *link)
34bf2170 5472{
936fd732
TH
5473 struct ata_port *ap = link->ap;
5474
a16abc0b 5475 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5476}
5477
5478/**
5479 * sata_scr_read - read SCR register of the specified port
936fd732 5480 * @link: ATA link to read SCR for
34bf2170
TH
5481 * @reg: SCR to read
5482 * @val: Place to store read value
5483 *
936fd732 5484 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
5485 * guaranteed to succeed if @link is ap->link, the cable type of
5486 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
5487 *
5488 * LOCKING:
633273a3 5489 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
5490 *
5491 * RETURNS:
5492 * 0 on success, negative errno on failure.
5493 */
936fd732 5494int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 5495{
633273a3 5496 if (ata_is_host_link(link)) {
633273a3 5497 if (sata_scr_valid(link))
82ef04fb 5498 return link->ap->ops->scr_read(link, reg, val);
633273a3
TH
5499 return -EOPNOTSUPP;
5500 }
5501
5502 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
5503}
5504
5505/**
5506 * sata_scr_write - write SCR register of the specified port
936fd732 5507 * @link: ATA link to write SCR for
34bf2170
TH
5508 * @reg: SCR to write
5509 * @val: value to write
5510 *
936fd732 5511 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
5512 * guaranteed to succeed if @link is ap->link, the cable type of
5513 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
5514 *
5515 * LOCKING:
633273a3 5516 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
5517 *
5518 * RETURNS:
5519 * 0 on success, negative errno on failure.
5520 */
936fd732 5521int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 5522{
633273a3 5523 if (ata_is_host_link(link)) {
633273a3 5524 if (sata_scr_valid(link))
82ef04fb 5525 return link->ap->ops->scr_write(link, reg, val);
633273a3
TH
5526 return -EOPNOTSUPP;
5527 }
936fd732 5528
633273a3 5529 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
5530}
5531
5532/**
5533 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 5534 * @link: ATA link to write SCR for
34bf2170
TH
5535 * @reg: SCR to write
5536 * @val: value to write
5537 *
5538 * This function is identical to sata_scr_write() except that this
5539 * function performs flush after writing to the register.
5540 *
5541 * LOCKING:
633273a3 5542 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
5543 *
5544 * RETURNS:
5545 * 0 on success, negative errno on failure.
5546 */
936fd732 5547int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 5548{
633273a3 5549 if (ata_is_host_link(link)) {
633273a3 5550 int rc;
da3dbb17 5551
633273a3 5552 if (sata_scr_valid(link)) {
82ef04fb 5553 rc = link->ap->ops->scr_write(link, reg, val);
633273a3 5554 if (rc == 0)
82ef04fb 5555 rc = link->ap->ops->scr_read(link, reg, &val);
633273a3
TH
5556 return rc;
5557 }
5558 return -EOPNOTSUPP;
34bf2170 5559 }
633273a3
TH
5560
5561 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
5562}
5563
5564/**
b1c72916 5565 * ata_phys_link_online - test whether the given link is online
936fd732 5566 * @link: ATA link to test
34bf2170 5567 *
936fd732
TH
5568 * Test whether @link is online. Note that this function returns
5569 * 0 if online status of @link cannot be obtained, so
5570 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5571 *
5572 * LOCKING:
5573 * None.
5574 *
5575 * RETURNS:
b5b3fa38 5576 * True if the port online status is available and online.
34bf2170 5577 */
b1c72916 5578bool ata_phys_link_online(struct ata_link *link)
34bf2170
TH
5579{
5580 u32 sstatus;
5581
936fd732 5582 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 5583 ata_sstatus_online(sstatus))
b5b3fa38
TH
5584 return true;
5585 return false;
34bf2170
TH
5586}
5587
5588/**
b1c72916 5589 * ata_phys_link_offline - test whether the given link is offline
936fd732 5590 * @link: ATA link to test
34bf2170 5591 *
936fd732
TH
5592 * Test whether @link is offline. Note that this function
5593 * returns 0 if offline status of @link cannot be obtained, so
5594 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5595 *
5596 * LOCKING:
5597 * None.
5598 *
5599 * RETURNS:
b5b3fa38 5600 * True if the port offline status is available and offline.
34bf2170 5601 */
b1c72916 5602bool ata_phys_link_offline(struct ata_link *link)
34bf2170
TH
5603{
5604 u32 sstatus;
5605
936fd732 5606 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 5607 !ata_sstatus_online(sstatus))
b5b3fa38
TH
5608 return true;
5609 return false;
34bf2170 5610}
0baab86b 5611
b1c72916
TH
5612/**
5613 * ata_link_online - test whether the given link is online
5614 * @link: ATA link to test
5615 *
5616 * Test whether @link is online. This is identical to
5617 * ata_phys_link_online() when there's no slave link. When
5618 * there's a slave link, this function should only be called on
5619 * the master link and will return true if any of M/S links is
5620 * online.
5621 *
5622 * LOCKING:
5623 * None.
5624 *
5625 * RETURNS:
5626 * True if the port online status is available and online.
5627 */
5628bool ata_link_online(struct ata_link *link)
5629{
5630 struct ata_link *slave = link->ap->slave_link;
5631
5632 WARN_ON(link == slave); /* shouldn't be called on slave link */
5633
5634 return ata_phys_link_online(link) ||
5635 (slave && ata_phys_link_online(slave));
5636}
5637
5638/**
5639 * ata_link_offline - test whether the given link is offline
5640 * @link: ATA link to test
5641 *
5642 * Test whether @link is offline. This is identical to
5643 * ata_phys_link_offline() when there's no slave link. When
5644 * there's a slave link, this function should only be called on
5645 * the master link and will return true if both M/S links are
5646 * offline.
5647 *
5648 * LOCKING:
5649 * None.
5650 *
5651 * RETURNS:
5652 * True if the port offline status is available and offline.
5653 */
5654bool ata_link_offline(struct ata_link *link)
5655{
5656 struct ata_link *slave = link->ap->slave_link;
5657
5658 WARN_ON(link == slave); /* shouldn't be called on slave link */
5659
5660 return ata_phys_link_offline(link) &&
5661 (!slave || ata_phys_link_offline(slave));
5662}
5663
6ffa01d8 5664#ifdef CONFIG_PM
bc6e7c4b
DW
5665static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5666 unsigned int action, unsigned int ehi_flags,
5667 bool async)
500530f6 5668{
5ef41082 5669 struct ata_link *link;
500530f6 5670 unsigned long flags;
500530f6 5671
5ef41082
LM
5672 /* Previous resume operation might still be in
5673 * progress. Wait for PM_PENDING to clear.
5674 */
5675 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5676 ata_port_wait_eh(ap);
5677 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5678 }
500530f6 5679
5ef41082
LM
5680 /* request PM ops to EH */
5681 spin_lock_irqsave(ap->lock, flags);
500530f6 5682
5ef41082 5683 ap->pm_mesg = mesg;
5ef41082
LM
5684 ap->pflags |= ATA_PFLAG_PM_PENDING;
5685 ata_for_each_link(link, ap, HOST_FIRST) {
5686 link->eh_info.action |= action;
5687 link->eh_info.flags |= ehi_flags;
5688 }
500530f6 5689
5ef41082 5690 ata_port_schedule_eh(ap);
500530f6 5691
5ef41082 5692 spin_unlock_irqrestore(ap->lock, flags);
500530f6 5693
2fcbdcb4 5694 if (!async) {
5ef41082
LM
5695 ata_port_wait_eh(ap);
5696 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
500530f6 5697 }
500530f6
TH
5698}
5699
bc6e7c4b
DW
5700/*
5701 * On some hardware, device fails to respond after spun down for suspend. As
5702 * the device won't be used before being resumed, we don't need to touch the
5703 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5704 *
5705 * http://thread.gmane.org/gmane.linux.ide/46764
5706 */
5707static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5708 | ATA_EHI_NO_AUTOPSY
5709 | ATA_EHI_NO_RECOVERY;
5710
5711static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5ef41082 5712{
bc6e7c4b 5713 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5ef41082
LM
5714}
5715
bc6e7c4b 5716static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
2fcbdcb4 5717{
bc6e7c4b 5718 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
2fcbdcb4
DW
5719}
5720
bc6e7c4b 5721static int ata_port_pm_suspend(struct device *dev)
5ef41082 5722{
bc6e7c4b
DW
5723 struct ata_port *ap = to_ata_port(dev);
5724
5ef41082
LM
5725 if (pm_runtime_suspended(dev))
5726 return 0;
5727
bc6e7c4b
DW
5728 ata_port_suspend(ap, PMSG_SUSPEND);
5729 return 0;
33574d68
LM
5730}
5731
bc6e7c4b 5732static int ata_port_pm_freeze(struct device *dev)
33574d68 5733{
bc6e7c4b
DW
5734 struct ata_port *ap = to_ata_port(dev);
5735
33574d68 5736 if (pm_runtime_suspended(dev))
f5e6d0d0 5737 return 0;
33574d68 5738
bc6e7c4b
DW
5739 ata_port_suspend(ap, PMSG_FREEZE);
5740 return 0;
33574d68
LM
5741}
5742
bc6e7c4b 5743static int ata_port_pm_poweroff(struct device *dev)
33574d68 5744{
bc6e7c4b
DW
5745 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5746 return 0;
5ef41082
LM
5747}
5748
bc6e7c4b
DW
5749static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5750 | ATA_EHI_QUIET;
5ef41082 5751
bc6e7c4b
DW
5752static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5753{
5754 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5ef41082
LM
5755}
5756
bc6e7c4b 5757static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
2fcbdcb4 5758{
bc6e7c4b 5759 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
2fcbdcb4
DW
5760}
5761
bc6e7c4b 5762static int ata_port_pm_resume(struct device *dev)
e90b1e5a 5763{
200421a8 5764 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
bc6e7c4b
DW
5765 pm_runtime_disable(dev);
5766 pm_runtime_set_active(dev);
5767 pm_runtime_enable(dev);
5768 return 0;
e90b1e5a
LM
5769}
5770
7e15e9be
AL
5771/*
5772 * For ODDs, the upper layer will poll for media change every few seconds,
5773 * which will make it enter and leave suspend state every few seconds. And
5774 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5775 * is very little and the ODD may malfunction after constantly being reset.
5776 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5777 * ODD is attached to the port.
5778 */
9ee4f393
LM
5779static int ata_port_runtime_idle(struct device *dev)
5780{
7e15e9be
AL
5781 struct ata_port *ap = to_ata_port(dev);
5782 struct ata_link *link;
5783 struct ata_device *adev;
5784
5785 ata_for_each_link(link, ap, HOST_FIRST) {
5786 ata_for_each_dev(adev, link, ENABLED)
5787 if (adev->class == ATA_DEV_ATAPI &&
5788 !zpodd_dev_enabled(adev))
5789 return -EBUSY;
5790 }
5791
45f0a85c 5792 return 0;
9ee4f393
LM
5793}
5794
a7ff60db
AL
5795static int ata_port_runtime_suspend(struct device *dev)
5796{
bc6e7c4b
DW
5797 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5798 return 0;
a7ff60db
AL
5799}
5800
5801static int ata_port_runtime_resume(struct device *dev)
5802{
bc6e7c4b
DW
5803 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5804 return 0;
a7ff60db
AL
5805}
5806
5ef41082 5807static const struct dev_pm_ops ata_port_pm_ops = {
bc6e7c4b
DW
5808 .suspend = ata_port_pm_suspend,
5809 .resume = ata_port_pm_resume,
5810 .freeze = ata_port_pm_freeze,
5811 .thaw = ata_port_pm_resume,
5812 .poweroff = ata_port_pm_poweroff,
5813 .restore = ata_port_pm_resume,
9ee4f393 5814
a7ff60db
AL
5815 .runtime_suspend = ata_port_runtime_suspend,
5816 .runtime_resume = ata_port_runtime_resume,
9ee4f393 5817 .runtime_idle = ata_port_runtime_idle,
5ef41082
LM
5818};
5819
2fcbdcb4
DW
5820/* sas ports don't participate in pm runtime management of ata_ports,
5821 * and need to resume ata devices at the domain level, not the per-port
5822 * level. sas suspend/resume is async to allow parallel port recovery
5823 * since sas has multiple ata_port instances per Scsi_Host.
5824 */
bc6e7c4b 5825void ata_sas_port_suspend(struct ata_port *ap)
2fcbdcb4 5826{
bc6e7c4b 5827 ata_port_suspend_async(ap, PMSG_SUSPEND);
2fcbdcb4 5828}
bc6e7c4b 5829EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
2fcbdcb4 5830
bc6e7c4b 5831void ata_sas_port_resume(struct ata_port *ap)
2fcbdcb4 5832{
bc6e7c4b 5833 ata_port_resume_async(ap, PMSG_RESUME);
2fcbdcb4 5834}
bc6e7c4b 5835EXPORT_SYMBOL_GPL(ata_sas_port_resume);
2fcbdcb4 5836
500530f6 5837/**
cca3974e
JG
5838 * ata_host_suspend - suspend host
5839 * @host: host to suspend
500530f6
TH
5840 * @mesg: PM message
5841 *
5ef41082 5842 * Suspend @host. Actual operation is performed by port suspend.
500530f6 5843 */
cca3974e 5844int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5845{
5ef41082
LM
5846 host->dev->power.power_state = mesg;
5847 return 0;
500530f6
TH
5848}
5849
5850/**
cca3974e
JG
5851 * ata_host_resume - resume host
5852 * @host: host to resume
500530f6 5853 *
5ef41082 5854 * Resume @host. Actual operation is performed by port resume.
500530f6 5855 */
cca3974e 5856void ata_host_resume(struct ata_host *host)
500530f6 5857{
72ad6ec4 5858 host->dev->power.power_state = PMSG_ON;
500530f6 5859}
6ffa01d8 5860#endif
500530f6 5861
8df82c13 5862const struct device_type ata_port_type = {
5ef41082
LM
5863 .name = "ata_port",
5864#ifdef CONFIG_PM
5865 .pm = &ata_port_pm_ops,
5866#endif
5867};
5868
3ef3b43d
TH
5869/**
5870 * ata_dev_init - Initialize an ata_device structure
5871 * @dev: Device structure to initialize
5872 *
5873 * Initialize @dev in preparation for probing.
5874 *
5875 * LOCKING:
5876 * Inherited from caller.
5877 */
5878void ata_dev_init(struct ata_device *dev)
5879{
b1c72916 5880 struct ata_link *link = ata_dev_phys_link(dev);
9af5c9c9 5881 struct ata_port *ap = link->ap;
72fa4b74
TH
5882 unsigned long flags;
5883
b1c72916 5884 /* SATA spd limit is bound to the attached device, reset together */
9af5c9c9
TH
5885 link->sata_spd_limit = link->hw_sata_spd_limit;
5886 link->sata_spd = 0;
5a04bf4b 5887
72fa4b74
TH
5888 /* High bits of dev->flags are used to record warm plug
5889 * requests which occur asynchronously. Synchronize using
cca3974e 5890 * host lock.
72fa4b74 5891 */
ba6a1308 5892 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5893 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 5894 dev->horkage = 0;
ba6a1308 5895 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5896
99cf610a
TH
5897 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5898 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
3ef3b43d
TH
5899 dev->pio_mask = UINT_MAX;
5900 dev->mwdma_mask = UINT_MAX;
5901 dev->udma_mask = UINT_MAX;
5902}
5903
4fb37a25
TH
5904/**
5905 * ata_link_init - Initialize an ata_link structure
5906 * @ap: ATA port link is attached to
5907 * @link: Link structure to initialize
8989805d 5908 * @pmp: Port multiplier port number
4fb37a25
TH
5909 *
5910 * Initialize @link.
5911 *
5912 * LOCKING:
5913 * Kernel thread context (may sleep)
5914 */
fb7fd614 5915void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
5916{
5917 int i;
5918
5919 /* clear everything except for devices */
d9027470
GG
5920 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5921 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
4fb37a25
TH
5922
5923 link->ap = ap;
8989805d 5924 link->pmp = pmp;
4fb37a25
TH
5925 link->active_tag = ATA_TAG_POISON;
5926 link->hw_sata_spd_limit = UINT_MAX;
5927
5928 /* can't use iterator, ap isn't initialized yet */
5929 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5930 struct ata_device *dev = &link->device[i];
5931
5932 dev->link = link;
5933 dev->devno = dev - link->device;
110f66d2
TH
5934#ifdef CONFIG_ATA_ACPI
5935 dev->gtf_filter = ata_acpi_gtf_filter;
5936#endif
4fb37a25
TH
5937 ata_dev_init(dev);
5938 }
5939}
5940
5941/**
5942 * sata_link_init_spd - Initialize link->sata_spd_limit
5943 * @link: Link to configure sata_spd_limit for
5944 *
5945 * Initialize @link->[hw_]sata_spd_limit to the currently
5946 * configured value.
5947 *
5948 * LOCKING:
5949 * Kernel thread context (may sleep).
5950 *
5951 * RETURNS:
5952 * 0 on success, -errno on failure.
5953 */
fb7fd614 5954int sata_link_init_spd(struct ata_link *link)
4fb37a25 5955{
33267325 5956 u8 spd;
4fb37a25
TH
5957 int rc;
5958
d127ea7b 5959 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
4fb37a25
TH
5960 if (rc)
5961 return rc;
5962
d127ea7b 5963 spd = (link->saved_scontrol >> 4) & 0xf;
4fb37a25
TH
5964 if (spd)
5965 link->hw_sata_spd_limit &= (1 << spd) - 1;
5966
05944bdf 5967 ata_force_link_limits(link);
33267325 5968
4fb37a25
TH
5969 link->sata_spd_limit = link->hw_sata_spd_limit;
5970
5971 return 0;
5972}
5973
1da177e4 5974/**
f3187195
TH
5975 * ata_port_alloc - allocate and initialize basic ATA port resources
5976 * @host: ATA host this allocated port belongs to
1da177e4 5977 *
f3187195
TH
5978 * Allocate and initialize basic ATA port resources.
5979 *
5980 * RETURNS:
5981 * Allocate ATA port on success, NULL on failure.
0cba632b 5982 *
1da177e4 5983 * LOCKING:
f3187195 5984 * Inherited from calling layer (may sleep).
1da177e4 5985 */
f3187195 5986struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5987{
f3187195 5988 struct ata_port *ap;
1da177e4 5989
f3187195
TH
5990 DPRINTK("ENTER\n");
5991
5992 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5993 if (!ap)
5994 return NULL;
4fca377f 5995
7b3a24c5 5996 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
cca3974e 5997 ap->lock = &host->lock;
f3187195 5998 ap->print_id = -1;
e628dc99 5999 ap->local_port_no = -1;
cca3974e 6000 ap->host = host;
f3187195 6001 ap->dev = host->dev;
bd5d825c
BP
6002
6003#if defined(ATA_VERBOSE_DEBUG)
6004 /* turn on all debugging levels */
6005 ap->msg_enable = 0x00FF;
6006#elif defined(ATA_DEBUG)
6007 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6008#else
0dd4b21f 6009 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6010#endif
1da177e4 6011
ad72cf98 6012 mutex_init(&ap->scsi_scan_mutex);
65f27f38
DH
6013 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6014 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6015 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6016 init_waitqueue_head(&ap->eh_wait_q);
45fabbb7 6017 init_completion(&ap->park_req_pending);
b93ab338
KC
6018 timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
6019 TIMER_DEFERRABLE);
1da177e4 6020
838df628 6021 ap->cbl = ATA_CBL_NONE;
838df628 6022
8989805d 6023 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6024
6025#ifdef ATA_IRQ_TRAP
6026 ap->stats.unhandled_irq = 1;
6027 ap->stats.idle_irq = 1;
6028#endif
270390e1
TH
6029 ata_sff_port_init(ap);
6030
1da177e4 6031 return ap;
1da177e4
LT
6032}
6033
2623c7a5 6034static void ata_devres_release(struct device *gendev, void *res)
f0d36efd
TH
6035{
6036 struct ata_host *host = dev_get_drvdata(gendev);
6037 int i;
6038
1aa506e4
TH
6039 for (i = 0; i < host->n_ports; i++) {
6040 struct ata_port *ap = host->ports[i];
6041
4911487a
TH
6042 if (!ap)
6043 continue;
6044
6045 if (ap->scsi_host)
1aa506e4
TH
6046 scsi_host_put(ap->scsi_host);
6047
2623c7a5
TK
6048 }
6049
6050 dev_set_drvdata(gendev, NULL);
6051 ata_host_put(host);
6052}
6053
6054static void ata_host_release(struct kref *kref)
6055{
6056 struct ata_host *host = container_of(kref, struct ata_host, kref);
6057 int i;
6058
6059 for (i = 0; i < host->n_ports; i++) {
6060 struct ata_port *ap = host->ports[i];
6061
633273a3 6062 kfree(ap->pmp_link);
b1c72916 6063 kfree(ap->slave_link);
4911487a 6064 kfree(ap);
1aa506e4
TH
6065 host->ports[i] = NULL;
6066 }
2623c7a5
TK
6067 kfree(host);
6068}
1aa506e4 6069
2623c7a5
TK
6070void ata_host_get(struct ata_host *host)
6071{
6072 kref_get(&host->kref);
6073}
6074
6075void ata_host_put(struct ata_host *host)
6076{
6077 kref_put(&host->kref, ata_host_release);
f0d36efd
TH
6078}
6079
f3187195
TH
6080/**
6081 * ata_host_alloc - allocate and init basic ATA host resources
6082 * @dev: generic device this host is associated with
6083 * @max_ports: maximum number of ATA ports associated with this host
6084 *
6085 * Allocate and initialize basic ATA host resources. LLD calls
6086 * this function to allocate a host, initializes it fully and
6087 * attaches it using ata_host_register().
6088 *
6089 * @max_ports ports are allocated and host->n_ports is
6090 * initialized to @max_ports. The caller is allowed to decrease
6091 * host->n_ports before calling ata_host_register(). The unused
6092 * ports will be automatically freed on registration.
6093 *
6094 * RETURNS:
6095 * Allocate ATA host on success, NULL on failure.
6096 *
6097 * LOCKING:
6098 * Inherited from calling layer (may sleep).
6099 */
6100struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6101{
6102 struct ata_host *host;
6103 size_t sz;
6104 int i;
2623c7a5 6105 void *dr;
f3187195
TH
6106
6107 DPRINTK("ENTER\n");
6108
f3187195
TH
6109 /* alloc a container for our list of ATA ports (buses) */
6110 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
2623c7a5 6111 host = kzalloc(sz, GFP_KERNEL);
f3187195 6112 if (!host)
2623c7a5
TK
6113 return NULL;
6114
6115 if (!devres_open_group(dev, NULL, GFP_KERNEL))
dafd6c49 6116 goto err_free;
2623c7a5
TK
6117
6118 dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
6119 if (!dr)
f3187195
TH
6120 goto err_out;
6121
2623c7a5 6122 devres_add(dev, dr);
f3187195
TH
6123 dev_set_drvdata(dev, host);
6124
6125 spin_lock_init(&host->lock);
c0c362b6 6126 mutex_init(&host->eh_mutex);
f3187195
TH
6127 host->dev = dev;
6128 host->n_ports = max_ports;
2623c7a5 6129 kref_init(&host->kref);
f3187195
TH
6130
6131 /* allocate ports bound to this host */
6132 for (i = 0; i < max_ports; i++) {
6133 struct ata_port *ap;
6134
6135 ap = ata_port_alloc(host);
6136 if (!ap)
6137 goto err_out;
6138
6139 ap->port_no = i;
6140 host->ports[i] = ap;
6141 }
6142
6143 devres_remove_group(dev, NULL);
6144 return host;
6145
6146 err_out:
6147 devres_release_group(dev, NULL);
dafd6c49
CIK
6148 err_free:
6149 kfree(host);
f3187195
TH
6150 return NULL;
6151}
6152
f5cda257
TH
6153/**
6154 * ata_host_alloc_pinfo - alloc host and init with port_info array
6155 * @dev: generic device this host is associated with
6156 * @ppi: array of ATA port_info to initialize host with
6157 * @n_ports: number of ATA ports attached to this host
6158 *
6159 * Allocate ATA host and initialize with info from @ppi. If NULL
6160 * terminated, @ppi may contain fewer entries than @n_ports. The
6161 * last entry will be used for the remaining ports.
6162 *
6163 * RETURNS:
6164 * Allocate ATA host on success, NULL on failure.
6165 *
6166 * LOCKING:
6167 * Inherited from calling layer (may sleep).
6168 */
6169struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6170 const struct ata_port_info * const * ppi,
6171 int n_ports)
6172{
6173 const struct ata_port_info *pi;
6174 struct ata_host *host;
6175 int i, j;
6176
6177 host = ata_host_alloc(dev, n_ports);
6178 if (!host)
6179 return NULL;
6180
6181 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6182 struct ata_port *ap = host->ports[i];
6183
6184 if (ppi[j])
6185 pi = ppi[j++];
6186
6187 ap->pio_mask = pi->pio_mask;
6188 ap->mwdma_mask = pi->mwdma_mask;
6189 ap->udma_mask = pi->udma_mask;
6190 ap->flags |= pi->flags;
0c88758b 6191 ap->link.flags |= pi->link_flags;
f5cda257
TH
6192 ap->ops = pi->port_ops;
6193
6194 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6195 host->ops = pi->port_ops;
f5cda257
TH
6196 }
6197
6198 return host;
6199}
6200
b1c72916
TH
6201/**
6202 * ata_slave_link_init - initialize slave link
6203 * @ap: port to initialize slave link for
6204 *
6205 * Create and initialize slave link for @ap. This enables slave
6206 * link handling on the port.
6207 *
6208 * In libata, a port contains links and a link contains devices.
6209 * There is single host link but if a PMP is attached to it,
6210 * there can be multiple fan-out links. On SATA, there's usually
6211 * a single device connected to a link but PATA and SATA
6212 * controllers emulating TF based interface can have two - master
6213 * and slave.
6214 *
6215 * However, there are a few controllers which don't fit into this
6216 * abstraction too well - SATA controllers which emulate TF
6217 * interface with both master and slave devices but also have
6218 * separate SCR register sets for each device. These controllers
6219 * need separate links for physical link handling
6220 * (e.g. onlineness, link speed) but should be treated like a
6221 * traditional M/S controller for everything else (e.g. command
6222 * issue, softreset).
6223 *
6224 * slave_link is libata's way of handling this class of
6225 * controllers without impacting core layer too much. For
6226 * anything other than physical link handling, the default host
6227 * link is used for both master and slave. For physical link
6228 * handling, separate @ap->slave_link is used. All dirty details
6229 * are implemented inside libata core layer. From LLD's POV, the
6230 * only difference is that prereset, hardreset and postreset are
6231 * called once more for the slave link, so the reset sequence
6232 * looks like the following.
6233 *
6234 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
6235 * softreset(M) -> postreset(M) -> postreset(S)
6236 *
6237 * Note that softreset is called only for the master. Softreset
6238 * resets both M/S by definition, so SRST on master should handle
6239 * both (the standard method will work just fine).
6240 *
6241 * LOCKING:
6242 * Should be called before host is registered.
6243 *
6244 * RETURNS:
6245 * 0 on success, -errno on failure.
6246 */
6247int ata_slave_link_init(struct ata_port *ap)
6248{
6249 struct ata_link *link;
6250
6251 WARN_ON(ap->slave_link);
6252 WARN_ON(ap->flags & ATA_FLAG_PMP);
6253
6254 link = kzalloc(sizeof(*link), GFP_KERNEL);
6255 if (!link)
6256 return -ENOMEM;
6257
6258 ata_link_init(ap, link, 1);
6259 ap->slave_link = link;
6260 return 0;
6261}
6262
32ebbc0c
TH
6263static void ata_host_stop(struct device *gendev, void *res)
6264{
6265 struct ata_host *host = dev_get_drvdata(gendev);
6266 int i;
6267
6268 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6269
6270 for (i = 0; i < host->n_ports; i++) {
6271 struct ata_port *ap = host->ports[i];
6272
6273 if (ap->ops->port_stop)
6274 ap->ops->port_stop(ap);
6275 }
6276
6277 if (host->ops->host_stop)
6278 host->ops->host_stop(host);
6279}
6280
029cfd6b
TH
6281/**
6282 * ata_finalize_port_ops - finalize ata_port_operations
6283 * @ops: ata_port_operations to finalize
6284 *
6285 * An ata_port_operations can inherit from another ops and that
6286 * ops can again inherit from another. This can go on as many
6287 * times as necessary as long as there is no loop in the
6288 * inheritance chain.
6289 *
6290 * Ops tables are finalized when the host is started. NULL or
6291 * unspecified entries are inherited from the closet ancestor
6292 * which has the method and the entry is populated with it.
6293 * After finalization, the ops table directly points to all the
6294 * methods and ->inherits is no longer necessary and cleared.
6295 *
6296 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
6297 *
6298 * LOCKING:
6299 * None.
6300 */
6301static void ata_finalize_port_ops(struct ata_port_operations *ops)
6302{
2da67659 6303 static DEFINE_SPINLOCK(lock);
029cfd6b
TH
6304 const struct ata_port_operations *cur;
6305 void **begin = (void **)ops;
6306 void **end = (void **)&ops->inherits;
6307 void **pp;
6308
6309 if (!ops || !ops->inherits)
6310 return;
6311
6312 spin_lock(&lock);
6313
6314 for (cur = ops->inherits; cur; cur = cur->inherits) {
6315 void **inherit = (void **)cur;
6316
6317 for (pp = begin; pp < end; pp++, inherit++)
6318 if (!*pp)
6319 *pp = *inherit;
6320 }
6321
6322 for (pp = begin; pp < end; pp++)
6323 if (IS_ERR(*pp))
6324 *pp = NULL;
6325
6326 ops->inherits = NULL;
6327
6328 spin_unlock(&lock);
6329}
6330
ecef7253
TH
6331/**
6332 * ata_host_start - start and freeze ports of an ATA host
6333 * @host: ATA host to start ports for
6334 *
6335 * Start and then freeze ports of @host. Started status is
6336 * recorded in host->flags, so this function can be called
6337 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6338 * once. If host->ops isn't initialized yet, its set to the
6339 * first non-dummy port ops.
ecef7253
TH
6340 *
6341 * LOCKING:
6342 * Inherited from calling layer (may sleep).
6343 *
6344 * RETURNS:
6345 * 0 if all ports are started successfully, -errno otherwise.
6346 */
6347int ata_host_start(struct ata_host *host)
6348{
32ebbc0c
TH
6349 int have_stop = 0;
6350 void *start_dr = NULL;
ecef7253
TH
6351 int i, rc;
6352
6353 if (host->flags & ATA_HOST_STARTED)
6354 return 0;
6355
029cfd6b
TH
6356 ata_finalize_port_ops(host->ops);
6357
ecef7253
TH
6358 for (i = 0; i < host->n_ports; i++) {
6359 struct ata_port *ap = host->ports[i];
6360
029cfd6b
TH
6361 ata_finalize_port_ops(ap->ops);
6362
f3187195
TH
6363 if (!host->ops && !ata_port_is_dummy(ap))
6364 host->ops = ap->ops;
6365
32ebbc0c
TH
6366 if (ap->ops->port_stop)
6367 have_stop = 1;
6368 }
6369
6370 if (host->ops->host_stop)
6371 have_stop = 1;
6372
6373 if (have_stop) {
6374 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6375 if (!start_dr)
6376 return -ENOMEM;
6377 }
6378
6379 for (i = 0; i < host->n_ports; i++) {
6380 struct ata_port *ap = host->ports[i];
6381
ecef7253
TH
6382 if (ap->ops->port_start) {
6383 rc = ap->ops->port_start(ap);
6384 if (rc) {
0f9fe9b7 6385 if (rc != -ENODEV)
a44fec1f
JP
6386 dev_err(host->dev,
6387 "failed to start port %d (errno=%d)\n",
6388 i, rc);
ecef7253
TH
6389 goto err_out;
6390 }
6391 }
ecef7253
TH
6392 ata_eh_freeze_port(ap);
6393 }
6394
32ebbc0c
TH
6395 if (start_dr)
6396 devres_add(host->dev, start_dr);
ecef7253
TH
6397 host->flags |= ATA_HOST_STARTED;
6398 return 0;
6399
6400 err_out:
6401 while (--i >= 0) {
6402 struct ata_port *ap = host->ports[i];
6403
6404 if (ap->ops->port_stop)
6405 ap->ops->port_stop(ap);
6406 }
32ebbc0c 6407 devres_free(start_dr);
ecef7253
TH
6408 return rc;
6409}
6410
b03732f0 6411/**
8d8e7d13 6412 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
cca3974e
JG
6413 * @host: host to initialize
6414 * @dev: device host is attached to
cca3974e 6415 * @ops: port_ops
b03732f0 6416 *
b03732f0 6417 */
cca3974e 6418void ata_host_init(struct ata_host *host, struct device *dev,
8d8e7d13 6419 struct ata_port_operations *ops)
b03732f0 6420{
cca3974e 6421 spin_lock_init(&host->lock);
c0c362b6 6422 mutex_init(&host->eh_mutex);
69278f79 6423 host->n_tags = ATA_MAX_QUEUE;
cca3974e 6424 host->dev = dev;
cca3974e 6425 host->ops = ops;
2fa4a326 6426 kref_init(&host->kref);
b03732f0
BK
6427}
6428
9508a66f 6429void __ata_port_probe(struct ata_port *ap)
79318057 6430{
9508a66f
DW
6431 struct ata_eh_info *ehi = &ap->link.eh_info;
6432 unsigned long flags;
886ad09f 6433
9508a66f
DW
6434 /* kick EH for boot probing */
6435 spin_lock_irqsave(ap->lock, flags);
79318057 6436
9508a66f
DW
6437 ehi->probe_mask |= ATA_ALL_DEVICES;
6438 ehi->action |= ATA_EH_RESET;
6439 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
79318057 6440
9508a66f
DW
6441 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6442 ap->pflags |= ATA_PFLAG_LOADING;
6443 ata_port_schedule_eh(ap);
79318057 6444
9508a66f
DW
6445 spin_unlock_irqrestore(ap->lock, flags);
6446}
79318057 6447
9508a66f
DW
6448int ata_port_probe(struct ata_port *ap)
6449{
6450 int rc = 0;
79318057 6451
9508a66f
DW
6452 if (ap->ops->error_handler) {
6453 __ata_port_probe(ap);
79318057
AV
6454 ata_port_wait_eh(ap);
6455 } else {
6456 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6457 rc = ata_bus_probe(ap);
6458 DPRINTK("ata%u: bus probe end\n", ap->print_id);
79318057 6459 }
238c9cf9
JB
6460 return rc;
6461}
6462
6463
6464static void async_port_probe(void *data, async_cookie_t cookie)
6465{
6466 struct ata_port *ap = data;
4fca377f 6467
238c9cf9
JB
6468 /*
6469 * If we're not allowed to scan this host in parallel,
6470 * we need to wait until all previous scans have completed
6471 * before going further.
6472 * Jeff Garzik says this is only within a controller, so we
6473 * don't need to wait for port 0, only for later ports.
6474 */
6475 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6476 async_synchronize_cookie(cookie);
6477
6478 (void)ata_port_probe(ap);
f29d3b23
AV
6479
6480 /* in order to keep device order, we need to synchronize at this point */
6481 async_synchronize_cookie(cookie);
6482
6483 ata_scsi_scan_host(ap, 1);
79318057 6484}
238c9cf9 6485
f3187195
TH
6486/**
6487 * ata_host_register - register initialized ATA host
6488 * @host: ATA host to register
6489 * @sht: template for SCSI host
6490 *
6491 * Register initialized ATA host. @host is allocated using
6492 * ata_host_alloc() and fully initialized by LLD. This function
6493 * starts ports, registers @host with ATA and SCSI layers and
6494 * probe registered devices.
6495 *
6496 * LOCKING:
6497 * Inherited from calling layer (may sleep).
6498 *
6499 * RETURNS:
6500 * 0 on success, -errno otherwise.
6501 */
6502int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6503{
6504 int i, rc;
6505
69278f79 6506 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
1871ee13 6507
f3187195
TH
6508 /* host must have been started */
6509 if (!(host->flags & ATA_HOST_STARTED)) {
a44fec1f 6510 dev_err(host->dev, "BUG: trying to register unstarted host\n");
f3187195
TH
6511 WARN_ON(1);
6512 return -EINVAL;
6513 }
6514
6515 /* Blow away unused ports. This happens when LLD can't
6516 * determine the exact number of ports to allocate at
6517 * allocation time.
6518 */
6519 for (i = host->n_ports; host->ports[i]; i++)
6520 kfree(host->ports[i]);
6521
6522 /* give ports names and add SCSI hosts */
e628dc99 6523 for (i = 0; i < host->n_ports; i++) {
85d6725b 6524 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
e628dc99
DM
6525 host->ports[i]->local_port_no = i + 1;
6526 }
4fca377f 6527
d9027470
GG
6528 /* Create associated sysfs transport objects */
6529 for (i = 0; i < host->n_ports; i++) {
6530 rc = ata_tport_add(host->dev,host->ports[i]);
6531 if (rc) {
6532 goto err_tadd;
6533 }
6534 }
6535
f3187195
TH
6536 rc = ata_scsi_add_hosts(host, sht);
6537 if (rc)
d9027470 6538 goto err_tadd;
f3187195
TH
6539
6540 /* set cable, sata_spd_limit and report */
6541 for (i = 0; i < host->n_ports; i++) {
6542 struct ata_port *ap = host->ports[i];
f3187195
TH
6543 unsigned long xfer_mask;
6544
6545 /* set SATA cable type if still unset */
6546 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6547 ap->cbl = ATA_CBL_SATA;
6548
6549 /* init sata_spd_limit to the current value */
4fb37a25 6550 sata_link_init_spd(&ap->link);
b1c72916
TH
6551 if (ap->slave_link)
6552 sata_link_init_spd(ap->slave_link);
f3187195 6553
cbcdd875 6554 /* print per-port info to dmesg */
f3187195
TH
6555 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6556 ap->udma_mask);
6557
abf6e8ed 6558 if (!ata_port_is_dummy(ap)) {
a9a79dfe
JP
6559 ata_port_info(ap, "%cATA max %s %s\n",
6560 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6561 ata_mode_string(xfer_mask),
6562 ap->link.eh_info.desc);
abf6e8ed
TH
6563 ata_ehi_clear_desc(&ap->link.eh_info);
6564 } else
a9a79dfe 6565 ata_port_info(ap, "DUMMY\n");
f3187195
TH
6566 }
6567
f6005354 6568 /* perform each probe asynchronously */
f3187195
TH
6569 for (i = 0; i < host->n_ports; i++) {
6570 struct ata_port *ap = host->ports[i];
79318057 6571 async_schedule(async_port_probe, ap);
f3187195 6572 }
f3187195
TH
6573
6574 return 0;
d9027470
GG
6575
6576 err_tadd:
6577 while (--i >= 0) {
6578 ata_tport_delete(host->ports[i]);
6579 }
6580 return rc;
6581
f3187195
TH
6582}
6583
f5cda257
TH
6584/**
6585 * ata_host_activate - start host, request IRQ and register it
6586 * @host: target ATA host
6587 * @irq: IRQ to request
6588 * @irq_handler: irq_handler used when requesting IRQ
6589 * @irq_flags: irq_flags used when requesting IRQ
6590 * @sht: scsi_host_template to use when registering the host
6591 *
6592 * After allocating an ATA host and initializing it, most libata
6593 * LLDs perform three steps to activate the host - start host,
c9b5560a 6594 * request IRQ and register it. This helper takes necessary
f5cda257
TH
6595 * arguments and performs the three steps in one go.
6596 *
3d46b2e2
PM
6597 * An invalid IRQ skips the IRQ registration and expects the host to
6598 * have set polling mode on the port. In this case, @irq_handler
6599 * should be NULL.
6600 *
f5cda257
TH
6601 * LOCKING:
6602 * Inherited from calling layer (may sleep).
6603 *
6604 * RETURNS:
6605 * 0 on success, -errno otherwise.
6606 */
6607int ata_host_activate(struct ata_host *host, int irq,
6608 irq_handler_t irq_handler, unsigned long irq_flags,
6609 struct scsi_host_template *sht)
6610{
cbcdd875 6611 int i, rc;
7e22c002 6612 char *irq_desc;
f5cda257
TH
6613
6614 rc = ata_host_start(host);
6615 if (rc)
6616 return rc;
6617
3d46b2e2
PM
6618 /* Special case for polling mode */
6619 if (!irq) {
6620 WARN_ON(irq_handler);
6621 return ata_host_register(host, sht);
6622 }
6623
7e22c002
HK
6624 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6625 dev_driver_string(host->dev),
6626 dev_name(host->dev));
6627 if (!irq_desc)
6628 return -ENOMEM;
6629
f5cda257 6630 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7e22c002 6631 irq_desc, host);
f5cda257
TH
6632 if (rc)
6633 return rc;
6634
cbcdd875
TH
6635 for (i = 0; i < host->n_ports; i++)
6636 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6637
f5cda257
TH
6638 rc = ata_host_register(host, sht);
6639 /* if failed, just free the IRQ and leave ports alone */
6640 if (rc)
6641 devm_free_irq(host->dev, irq, host);
6642
6643 return rc;
6644}
6645
720ba126 6646/**
c9b5560a 6647 * ata_port_detach - Detach ATA port in preparation of device removal
720ba126
TH
6648 * @ap: ATA port to be detached
6649 *
6650 * Detach all ATA devices and the associated SCSI devices of @ap;
6651 * then, remove the associated SCSI host. @ap is guaranteed to
6652 * be quiescent on return from this function.
6653 *
6654 * LOCKING:
6655 * Kernel thread context (may sleep).
6656 */
741b7763 6657static void ata_port_detach(struct ata_port *ap)
720ba126
TH
6658{
6659 unsigned long flags;
a6f9bf4d
LK
6660 struct ata_link *link;
6661 struct ata_device *dev;
720ba126
TH
6662
6663 if (!ap->ops->error_handler)
c3cf30a9 6664 goto skip_eh;
720ba126
TH
6665
6666 /* tell EH we're leaving & flush EH */
ba6a1308 6667 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6668 ap->pflags |= ATA_PFLAG_UNLOADING;
ece180d1 6669 ata_port_schedule_eh(ap);
ba6a1308 6670 spin_unlock_irqrestore(ap->lock, flags);
720ba126 6671
ece180d1 6672 /* wait till EH commits suicide */
720ba126
TH
6673 ata_port_wait_eh(ap);
6674
ece180d1
TH
6675 /* it better be dead now */
6676 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
720ba126 6677
afe2c511 6678 cancel_delayed_work_sync(&ap->hotplug_task);
720ba126 6679
c3cf30a9 6680 skip_eh:
a6f9bf4d
LK
6681 /* clean up zpodd on port removal */
6682 ata_for_each_link(link, ap, HOST_FIRST) {
6683 ata_for_each_dev(dev, link, ALL) {
6684 if (zpodd_dev_enabled(dev))
6685 zpodd_exit(dev);
6686 }
6687 }
d9027470
GG
6688 if (ap->pmp_link) {
6689 int i;
6690 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6691 ata_tlink_delete(&ap->pmp_link[i]);
6692 }
720ba126 6693 /* remove the associated SCSI host */
cca3974e 6694 scsi_remove_host(ap->scsi_host);
c5700766 6695 ata_tport_delete(ap);
720ba126
TH
6696}
6697
0529c159
TH
6698/**
6699 * ata_host_detach - Detach all ports of an ATA host
6700 * @host: Host to detach
6701 *
6702 * Detach all ports of @host.
6703 *
6704 * LOCKING:
6705 * Kernel thread context (may sleep).
6706 */
6707void ata_host_detach(struct ata_host *host)
6708{
6709 int i;
6710
6711 for (i = 0; i < host->n_ports; i++)
6712 ata_port_detach(host->ports[i]);
562f0c2d
TH
6713
6714 /* the host is dead now, dissociate ACPI */
6715 ata_acpi_dissociate(host);
0529c159
TH
6716}
6717
374b1873
JG
6718#ifdef CONFIG_PCI
6719
1da177e4
LT
6720/**
6721 * ata_pci_remove_one - PCI layer callback for device removal
6722 * @pdev: PCI device that was removed
6723 *
b878ca5d
TH
6724 * PCI layer indicates to libata via this hook that hot-unplug or
6725 * module unload event has occurred. Detach all ports. Resource
6726 * release is handled via devres.
1da177e4
LT
6727 *
6728 * LOCKING:
6729 * Inherited from PCI layer (may sleep).
6730 */
f0d36efd 6731void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 6732{
04a3f5b7 6733 struct ata_host *host = pci_get_drvdata(pdev);
1da177e4 6734
b878ca5d 6735 ata_host_detach(host);
1da177e4
LT
6736}
6737
6738/* move to PCI subsystem */
057ace5e 6739int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6740{
6741 unsigned long tmp = 0;
6742
6743 switch (bits->width) {
6744 case 1: {
6745 u8 tmp8 = 0;
6746 pci_read_config_byte(pdev, bits->reg, &tmp8);
6747 tmp = tmp8;
6748 break;
6749 }
6750 case 2: {
6751 u16 tmp16 = 0;
6752 pci_read_config_word(pdev, bits->reg, &tmp16);
6753 tmp = tmp16;
6754 break;
6755 }
6756 case 4: {
6757 u32 tmp32 = 0;
6758 pci_read_config_dword(pdev, bits->reg, &tmp32);
6759 tmp = tmp32;
6760 break;
6761 }
6762
6763 default:
6764 return -EINVAL;
6765 }
6766
6767 tmp &= bits->mask;
6768
6769 return (tmp == bits->val) ? 1 : 0;
6770}
9b847548 6771
6ffa01d8 6772#ifdef CONFIG_PM
3c5100c1 6773void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6774{
6775 pci_save_state(pdev);
4c90d971 6776 pci_disable_device(pdev);
500530f6 6777
3a2d5b70 6778 if (mesg.event & PM_EVENT_SLEEP)
500530f6 6779 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6780}
6781
553c4aa6 6782int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6783{
553c4aa6
TH
6784 int rc;
6785
9b847548
JA
6786 pci_set_power_state(pdev, PCI_D0);
6787 pci_restore_state(pdev);
553c4aa6 6788
b878ca5d 6789 rc = pcim_enable_device(pdev);
553c4aa6 6790 if (rc) {
a44fec1f
JP
6791 dev_err(&pdev->dev,
6792 "failed to enable device after resume (%d)\n", rc);
553c4aa6
TH
6793 return rc;
6794 }
6795
9b847548 6796 pci_set_master(pdev);
553c4aa6 6797 return 0;
500530f6
TH
6798}
6799
3c5100c1 6800int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6801{
04a3f5b7 6802 struct ata_host *host = pci_get_drvdata(pdev);
500530f6
TH
6803 int rc = 0;
6804
cca3974e 6805 rc = ata_host_suspend(host, mesg);
500530f6
TH
6806 if (rc)
6807 return rc;
6808
3c5100c1 6809 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6810
6811 return 0;
6812}
6813
6814int ata_pci_device_resume(struct pci_dev *pdev)
6815{
04a3f5b7 6816 struct ata_host *host = pci_get_drvdata(pdev);
553c4aa6 6817 int rc;
500530f6 6818
553c4aa6
TH
6819 rc = ata_pci_device_do_resume(pdev);
6820 if (rc == 0)
6821 ata_host_resume(host);
6822 return rc;
9b847548 6823}
6ffa01d8
TH
6824#endif /* CONFIG_PM */
6825
1da177e4
LT
6826#endif /* CONFIG_PCI */
6827
b7db04d9
BN
6828/**
6829 * ata_platform_remove_one - Platform layer callback for device removal
6830 * @pdev: Platform device that was removed
6831 *
6832 * Platform layer indicates to libata via this hook that hot-unplug or
6833 * module unload event has occurred. Detach all ports. Resource
6834 * release is handled via devres.
6835 *
6836 * LOCKING:
6837 * Inherited from platform layer (may sleep).
6838 */
6839int ata_platform_remove_one(struct platform_device *pdev)
6840{
6841 struct ata_host *host = platform_get_drvdata(pdev);
6842
6843 ata_host_detach(host);
6844
6845 return 0;
6846}
6847
33267325
TH
6848static int __init ata_parse_force_one(char **cur,
6849 struct ata_force_ent *force_ent,
6850 const char **reason)
6851{
0f5f264b 6852 static const struct ata_force_param force_tbl[] __initconst = {
33267325
TH
6853 { "40c", .cbl = ATA_CBL_PATA40 },
6854 { "80c", .cbl = ATA_CBL_PATA80 },
6855 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6856 { "unk", .cbl = ATA_CBL_PATA_UNK },
6857 { "ign", .cbl = ATA_CBL_PATA_IGN },
6858 { "sata", .cbl = ATA_CBL_SATA },
6859 { "1.5Gbps", .spd_limit = 1 },
6860 { "3.0Gbps", .spd_limit = 2 },
6861 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6862 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
d7b16e4f
MP
6863 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
6864 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
43c9c591 6865 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
33267325
TH
6866 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6867 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6868 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6869 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6870 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6871 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6872 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6873 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6874 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6875 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6876 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6877 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6878 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6879 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6880 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6881 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6882 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6883 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6884 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6885 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6886 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6887 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6888 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6889 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6890 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6891 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6892 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6893 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6894 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6895 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6896 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6897 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6898 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6899 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
05944bdf
TH
6900 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6901 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6902 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
ca6d43b0 6903 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
966fbe19 6904 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
b8bd6dc3 6905 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
33267325
TH
6906 };
6907 char *start = *cur, *p = *cur;
6908 char *id, *val, *endp;
6909 const struct ata_force_param *match_fp = NULL;
6910 int nr_matches = 0, i;
6911
6912 /* find where this param ends and update *cur */
6913 while (*p != '\0' && *p != ',')
6914 p++;
6915
6916 if (*p == '\0')
6917 *cur = p;
6918 else
6919 *cur = p + 1;
6920
6921 *p = '\0';
6922
6923 /* parse */
6924 p = strchr(start, ':');
6925 if (!p) {
6926 val = strstrip(start);
6927 goto parse_val;
6928 }
6929 *p = '\0';
6930
6931 id = strstrip(start);
6932 val = strstrip(p + 1);
6933
6934 /* parse id */
6935 p = strchr(id, '.');
6936 if (p) {
6937 *p++ = '\0';
6938 force_ent->device = simple_strtoul(p, &endp, 10);
6939 if (p == endp || *endp != '\0') {
6940 *reason = "invalid device";
6941 return -EINVAL;
6942 }
6943 }
6944
6945 force_ent->port = simple_strtoul(id, &endp, 10);
f7cf69ae 6946 if (id == endp || *endp != '\0') {
33267325
TH
6947 *reason = "invalid port/link";
6948 return -EINVAL;
6949 }
6950
6951 parse_val:
6952 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6953 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6954 const struct ata_force_param *fp = &force_tbl[i];
6955
6956 if (strncasecmp(val, fp->name, strlen(val)))
6957 continue;
6958
6959 nr_matches++;
6960 match_fp = fp;
6961
6962 if (strcasecmp(val, fp->name) == 0) {
6963 nr_matches = 1;
6964 break;
6965 }
6966 }
6967
6968 if (!nr_matches) {
6969 *reason = "unknown value";
6970 return -EINVAL;
6971 }
6972 if (nr_matches > 1) {
9de55351 6973 *reason = "ambiguous value";
33267325
TH
6974 return -EINVAL;
6975 }
6976
6977 force_ent->param = *match_fp;
6978
6979 return 0;
6980}
6981
6982static void __init ata_parse_force_param(void)
6983{
6984 int idx = 0, size = 1;
6985 int last_port = -1, last_device = -1;
6986 char *p, *cur, *next;
6987
6988 /* calculate maximum number of params and allocate force_tbl */
6989 for (p = ata_force_param_buf; *p; p++)
6990 if (*p == ',')
6991 size++;
6992
6396bb22 6993 ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
33267325
TH
6994 if (!ata_force_tbl) {
6995 printk(KERN_WARNING "ata: failed to extend force table, "
6996 "libata.force ignored\n");
6997 return;
6998 }
6999
7000 /* parse and populate the table */
7001 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7002 const char *reason = "";
7003 struct ata_force_ent te = { .port = -1, .device = -1 };
7004
7005 next = cur;
7006 if (ata_parse_force_one(&next, &te, &reason)) {
7007 printk(KERN_WARNING "ata: failed to parse force "
7008 "parameter \"%s\" (%s)\n",
7009 cur, reason);
7010 continue;
7011 }
7012
7013 if (te.port == -1) {
7014 te.port = last_port;
7015 te.device = last_device;
7016 }
7017
7018 ata_force_tbl[idx++] = te;
7019
7020 last_port = te.port;
7021 last_device = te.device;
7022 }
7023
7024 ata_force_tbl_size = idx;
7025}
1da177e4 7026
1da177e4
LT
7027static int __init ata_init(void)
7028{
d9027470 7029 int rc;
270390e1 7030
33267325
TH
7031 ata_parse_force_param();
7032
270390e1 7033 rc = ata_sff_init();
ad72cf98
TH
7034 if (rc) {
7035 kfree(ata_force_tbl);
7036 return rc;
7037 }
453b07ac 7038
d9027470
GG
7039 libata_transport_init();
7040 ata_scsi_transport_template = ata_attach_transport();
7041 if (!ata_scsi_transport_template) {
7042 ata_sff_exit();
7043 rc = -ENOMEM;
7044 goto err_out;
4fca377f 7045 }
d9027470 7046
1da177e4
LT
7047 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7048 return 0;
d9027470
GG
7049
7050err_out:
7051 return rc;
1da177e4
LT
7052}
7053
7054static void __exit ata_exit(void)
7055{
d9027470
GG
7056 ata_release_transport(ata_scsi_transport_template);
7057 libata_transport_exit();
270390e1 7058 ata_sff_exit();
33267325 7059 kfree(ata_force_tbl);
1da177e4
LT
7060}
7061
a4625085 7062subsys_initcall(ata_init);
1da177e4
LT
7063module_exit(ata_exit);
7064
9990b6f3 7065static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
67846b30
JG
7066
7067int ata_ratelimit(void)
7068{
9990b6f3 7069 return __ratelimit(&ratelimit);
67846b30
JG
7070}
7071
c0c362b6
TH
7072/**
7073 * ata_msleep - ATA EH owner aware msleep
7074 * @ap: ATA port to attribute the sleep to
7075 * @msecs: duration to sleep in milliseconds
7076 *
7077 * Sleeps @msecs. If the current task is owner of @ap's EH, the
7078 * ownership is released before going to sleep and reacquired
7079 * after the sleep is complete. IOW, other ports sharing the
7080 * @ap->host will be allowed to own the EH while this task is
7081 * sleeping.
7082 *
7083 * LOCKING:
7084 * Might sleep.
7085 */
97750ceb
TH
7086void ata_msleep(struct ata_port *ap, unsigned int msecs)
7087{
c0c362b6
TH
7088 bool owns_eh = ap && ap->host->eh_owner == current;
7089
7090 if (owns_eh)
7091 ata_eh_release(ap);
7092
848c3920
AVM
7093 if (msecs < 20) {
7094 unsigned long usecs = msecs * USEC_PER_MSEC;
7095 usleep_range(usecs, usecs + 50);
7096 } else {
7097 msleep(msecs);
7098 }
c0c362b6
TH
7099
7100 if (owns_eh)
7101 ata_eh_acquire(ap);
97750ceb
TH
7102}
7103
c22daff4
TH
7104/**
7105 * ata_wait_register - wait until register value changes
97750ceb 7106 * @ap: ATA port to wait register for, can be NULL
c22daff4
TH
7107 * @reg: IO-mapped register
7108 * @mask: Mask to apply to read register value
7109 * @val: Wait condition
341c2c95
TH
7110 * @interval: polling interval in milliseconds
7111 * @timeout: timeout in milliseconds
c22daff4
TH
7112 *
7113 * Waiting for some bits of register to change is a common
7114 * operation for ATA controllers. This function reads 32bit LE
7115 * IO-mapped register @reg and tests for the following condition.
7116 *
7117 * (*@reg & mask) != val
7118 *
7119 * If the condition is met, it returns; otherwise, the process is
7120 * repeated after @interval_msec until timeout.
7121 *
7122 * LOCKING:
7123 * Kernel thread context (may sleep)
7124 *
7125 * RETURNS:
7126 * The final register value.
7127 */
97750ceb 7128u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
341c2c95 7129 unsigned long interval, unsigned long timeout)
c22daff4 7130{
341c2c95 7131 unsigned long deadline;
c22daff4
TH
7132 u32 tmp;
7133
7134 tmp = ioread32(reg);
7135
7136 /* Calculate timeout _after_ the first read to make sure
7137 * preceding writes reach the controller before starting to
7138 * eat away the timeout.
7139 */
341c2c95 7140 deadline = ata_deadline(jiffies, timeout);
c22daff4 7141
341c2c95 7142 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
97750ceb 7143 ata_msleep(ap, interval);
c22daff4
TH
7144 tmp = ioread32(reg);
7145 }
7146
7147 return tmp;
7148}
7149
8393b811
GM
7150/**
7151 * sata_lpm_ignore_phy_events - test if PHY event should be ignored
7152 * @link: Link receiving the event
7153 *
7154 * Test whether the received PHY event has to be ignored or not.
7155 *
7156 * LOCKING:
7157 * None:
7158 *
7159 * RETURNS:
7160 * True if the event has to be ignored.
7161 */
7162bool sata_lpm_ignore_phy_events(struct ata_link *link)
7163{
09c5b480
GM
7164 unsigned long lpm_timeout = link->last_lpm_change +
7165 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
7166
8393b811 7167 /* if LPM is enabled, PHYRDY doesn't mean anything */
09c5b480
GM
7168 if (link->lpm_policy > ATA_LPM_MAX_POWER)
7169 return true;
7170
7171 /* ignore the first PHY event after the LPM policy changed
7172 * as it is might be spurious
7173 */
7174 if ((link->flags & ATA_LFLAG_CHANGED) &&
7175 time_before(jiffies, lpm_timeout))
7176 return true;
7177
7178 return false;
8393b811
GM
7179}
7180EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
7181
dd5b06c4
TH
7182/*
7183 * Dummy port_ops
7184 */
182d7bba 7185static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
dd5b06c4 7186{
182d7bba 7187 return AC_ERR_SYSTEM;
dd5b06c4
TH
7188}
7189
182d7bba 7190static void ata_dummy_error_handler(struct ata_port *ap)
dd5b06c4 7191{
182d7bba 7192 /* truly dummy */
dd5b06c4
TH
7193}
7194
029cfd6b 7195struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7196 .qc_prep = ata_noop_qc_prep,
7197 .qc_issue = ata_dummy_qc_issue,
182d7bba 7198 .error_handler = ata_dummy_error_handler,
e4a9c373
DW
7199 .sched_eh = ata_std_sched_eh,
7200 .end_eh = ata_std_end_eh,
dd5b06c4
TH
7201};
7202
21b0ad4f
TH
7203const struct ata_port_info ata_dummy_port_info = {
7204 .port_ops = &ata_dummy_port_ops,
7205};
7206
a9a79dfe
JP
7207/*
7208 * Utility print functions
7209 */
d7bead1b
JP
7210void ata_port_printk(const struct ata_port *ap, const char *level,
7211 const char *fmt, ...)
a9a79dfe
JP
7212{
7213 struct va_format vaf;
7214 va_list args;
a9a79dfe
JP
7215
7216 va_start(args, fmt);
7217
7218 vaf.fmt = fmt;
7219 vaf.va = &args;
7220
d7bead1b 7221 printk("%sata%u: %pV", level, ap->print_id, &vaf);
a9a79dfe
JP
7222
7223 va_end(args);
a9a79dfe
JP
7224}
7225EXPORT_SYMBOL(ata_port_printk);
7226
d7bead1b
JP
7227void ata_link_printk(const struct ata_link *link, const char *level,
7228 const char *fmt, ...)
a9a79dfe
JP
7229{
7230 struct va_format vaf;
7231 va_list args;
a9a79dfe
JP
7232
7233 va_start(args, fmt);
7234
7235 vaf.fmt = fmt;
7236 vaf.va = &args;
7237
7238 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
d7bead1b
JP
7239 printk("%sata%u.%02u: %pV",
7240 level, link->ap->print_id, link->pmp, &vaf);
a9a79dfe 7241 else
d7bead1b
JP
7242 printk("%sata%u: %pV",
7243 level, link->ap->print_id, &vaf);
a9a79dfe
JP
7244
7245 va_end(args);
a9a79dfe
JP
7246}
7247EXPORT_SYMBOL(ata_link_printk);
7248
d7bead1b 7249void ata_dev_printk(const struct ata_device *dev, const char *level,
a9a79dfe
JP
7250 const char *fmt, ...)
7251{
7252 struct va_format vaf;
7253 va_list args;
a9a79dfe
JP
7254
7255 va_start(args, fmt);
7256
7257 vaf.fmt = fmt;
7258 vaf.va = &args;
7259
d7bead1b
JP
7260 printk("%sata%u.%02u: %pV",
7261 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
7262 &vaf);
a9a79dfe
JP
7263
7264 va_end(args);
a9a79dfe
JP
7265}
7266EXPORT_SYMBOL(ata_dev_printk);
7267
06296a1e
JP
7268void ata_print_version(const struct device *dev, const char *version)
7269{
7270 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
7271}
7272EXPORT_SYMBOL(ata_print_version);
7273
1da177e4
LT
7274/*
7275 * libata is essentially a library of internal helper functions for
7276 * low-level ATA host controller drivers. As such, the API/ABI is
7277 * likely to change as new drivers are added and updated.
7278 * Do not depend on ABI/API stability.
7279 */
e9c83914
TH
7280EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7281EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7282EXPORT_SYMBOL_GPL(sata_deb_timing_long);
029cfd6b
TH
7283EXPORT_SYMBOL_GPL(ata_base_port_ops);
7284EXPORT_SYMBOL_GPL(sata_port_ops);
dd5b06c4 7285EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7286EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1eca4365
TH
7287EXPORT_SYMBOL_GPL(ata_link_next);
7288EXPORT_SYMBOL_GPL(ata_dev_next);
1da177e4 7289EXPORT_SYMBOL_GPL(ata_std_bios_param);
d8d9129e 7290EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
cca3974e 7291EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7292EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7293EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
b1c72916 7294EXPORT_SYMBOL_GPL(ata_slave_link_init);
ecef7253 7295EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7296EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7297EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7298EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 7299EXPORT_SYMBOL_GPL(ata_sg_init);
f686bcb8 7300EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7301EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
436d34b3 7302EXPORT_SYMBOL_GPL(atapi_cmd_type);
1da177e4
LT
7303EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7304EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
7305EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7306EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7307EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7308EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7309EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7310EXPORT_SYMBOL_GPL(ata_mode_string);
7311EXPORT_SYMBOL_GPL(ata_id_xfermask);
04351821 7312EXPORT_SYMBOL_GPL(ata_do_set_mode);
31cc23b3 7313EXPORT_SYMBOL_GPL(ata_std_qc_defer);
e46834cd 7314EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
10305f0f 7315EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7316EXPORT_SYMBOL_GPL(sata_set_spd);
aa2731ad 7317EXPORT_SYMBOL_GPL(ata_wait_after_reset);
936fd732
TH
7318EXPORT_SYMBOL_GPL(sata_link_debounce);
7319EXPORT_SYMBOL_GPL(sata_link_resume);
1152b261 7320EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
0aa1113d 7321EXPORT_SYMBOL_GPL(ata_std_prereset);
cc0680a5 7322EXPORT_SYMBOL_GPL(sata_link_hardreset);
57c9efdf 7323EXPORT_SYMBOL_GPL(sata_std_hardreset);
203c75b8 7324EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7325EXPORT_SYMBOL_GPL(ata_dev_classify);
7326EXPORT_SYMBOL_GPL(ata_dev_pair);
67846b30 7327EXPORT_SYMBOL_GPL(ata_ratelimit);
97750ceb 7328EXPORT_SYMBOL_GPL(ata_msleep);
c22daff4 7329EXPORT_SYMBOL_GPL(ata_wait_register);
1da177e4 7330EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7331EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7332EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7333EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
f6e67035 7334EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
34bf2170
TH
7335EXPORT_SYMBOL_GPL(sata_scr_valid);
7336EXPORT_SYMBOL_GPL(sata_scr_read);
7337EXPORT_SYMBOL_GPL(sata_scr_write);
7338EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7339EXPORT_SYMBOL_GPL(ata_link_online);
7340EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7341#ifdef CONFIG_PM
cca3974e
JG
7342EXPORT_SYMBOL_GPL(ata_host_suspend);
7343EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7344#endif /* CONFIG_PM */
6a62a04d
TH
7345EXPORT_SYMBOL_GPL(ata_id_string);
7346EXPORT_SYMBOL_GPL(ata_id_c_string);
963e4975 7347EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1da177e4
LT
7348EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7349
1bc4ccff 7350EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 7351EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
7352EXPORT_SYMBOL_GPL(ata_timing_compute);
7353EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 7354EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 7355
1da177e4
LT
7356#ifdef CONFIG_PCI
7357EXPORT_SYMBOL_GPL(pci_test_config_bits);
1da177e4 7358EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7359#ifdef CONFIG_PM
500530f6
TH
7360EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7361EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7362EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7363EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7364#endif /* CONFIG_PM */
1da177e4 7365#endif /* CONFIG_PCI */
9b847548 7366
b7db04d9
BN
7367EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7368
b64bbc39
TH
7369EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7370EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7371EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7372EXPORT_SYMBOL_GPL(ata_port_desc);
7373#ifdef CONFIG_PCI
7374EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7375#endif /* CONFIG_PCI */
7b70fc03 7376EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7377EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7378EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7379EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7380EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7381EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7382EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7383EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7384EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
10acf3b0 7385EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
022bdb07 7386EXPORT_SYMBOL_GPL(ata_do_eh);
a1efdaba 7387EXPORT_SYMBOL_GPL(ata_std_error_handler);
be0d18df
AC
7388
7389EXPORT_SYMBOL_GPL(ata_cable_40wire);
7390EXPORT_SYMBOL_GPL(ata_cable_80wire);
7391EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 7392EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 7393EXPORT_SYMBOL_GPL(ata_cable_sata);
2fa4a326 7394EXPORT_SYMBOL_GPL(ata_host_get);
4e8065aa 7395EXPORT_SYMBOL_GPL(ata_host_put);