scsi: ata: libata-scsi: Add support for CDL pages mode sense
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
c82ee6d3 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4 2/*
af36d7f0
JG
3 * libata-core.c - helper library for ATA
4 *
af36d7f0
JG
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
7 *
af36d7f0 8 * libata documentation is available via 'make {ps|pdf}docs',
19285f3c 9 * as Documentation/driver-api/libata.rst
af36d7f0
JG
10 *
11 * Hardware documentation available from http://www.t13.org/ and
12 * http://www.sata-io.org/
13 *
92c52c52
AC
14 * Standards documents from:
15 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
16 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
17 * http://www.sata-io.org (SATA)
18 * http://www.compactflash.org (CF)
19 * http://www.qic.org (QIC157 - Tape and DSC)
20 * http://www.ce-ata.org (CE-ATA: not supported)
a52fbcfc
BZ
21 *
22 * libata is essentially a library of internal helper functions for
23 * low-level ATA host controller drivers. As such, the API/ABI is
24 * likely to change as new drivers are added and updated.
25 * Do not depend on ABI/API stability.
1da177e4
LT
26 */
27
1da177e4
LT
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/list.h>
33#include <linux/mm.h>
1da177e4
LT
34#include <linux/spinlock.h>
35#include <linux/blkdev.h>
36#include <linux/delay.h>
37#include <linux/timer.h>
848c3920 38#include <linux/time.h>
1da177e4
LT
39#include <linux/interrupt.h>
40#include <linux/completion.h>
41#include <linux/suspend.h>
42#include <linux/workqueue.h>
378f058c 43#include <linux/scatterlist.h>
2dcb407e 44#include <linux/io.h>
e18086d6 45#include <linux/log2.h>
5a0e3ad6 46#include <linux/slab.h>
428ac5fc 47#include <linux/glob.h>
1da177e4 48#include <scsi/scsi.h>
193515d5 49#include <scsi/scsi_cmnd.h>
1da177e4
LT
50#include <scsi/scsi_host.h>
51#include <linux/libata.h>
1da177e4 52#include <asm/byteorder.h>
fe5af0cc 53#include <asm/unaligned.h>
140b5e59 54#include <linux/cdrom.h>
9990b6f3 55#include <linux/ratelimit.h>
eb25cb99 56#include <linux/leds.h>
9ee4f393 57#include <linux/pm_runtime.h>
b7db04d9 58#include <linux/platform_device.h>
bbf5a097 59#include <asm/setup.h>
1da177e4 60
255c03d1
HR
61#define CREATE_TRACE_POINTS
62#include <trace/events/libata.h>
63
1da177e4 64#include "libata.h"
d9027470 65#include "libata-transport.h"
fda0efc5 66
029cfd6b 67const struct ata_port_operations ata_base_port_ops = {
0aa1113d 68 .prereset = ata_std_prereset,
203c75b8 69 .postreset = ata_std_postreset,
a1efdaba 70 .error_handler = ata_std_error_handler,
e4a9c373
DW
71 .sched_eh = ata_std_sched_eh,
72 .end_eh = ata_std_end_eh,
029cfd6b
TH
73};
74
75const struct ata_port_operations sata_port_ops = {
76 .inherits = &ata_base_port_ops,
77
78 .qc_defer = ata_std_qc_defer,
57c9efdf 79 .hardreset = sata_std_hardreset,
029cfd6b 80};
a52fbcfc 81EXPORT_SYMBOL_GPL(sata_port_ops);
029cfd6b 82
3373efd8
TH
83static unsigned int ata_dev_init_params(struct ata_device *dev,
84 u16 heads, u16 sectors);
85static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
86static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 87static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 88
a78f57af 89atomic_t ata_print_id = ATOMIC_INIT(0);
1da177e4 90
bf89b0bf 91#ifdef CONFIG_ATA_FORCE
33267325
TH
92struct ata_force_param {
93 const char *name;
8ba5a45c
BZ
94 u8 cbl;
95 u8 spd_limit;
f0a6d77b 96 unsigned int xfer_mask;
33267325
TH
97 unsigned int horkage_on;
98 unsigned int horkage_off;
3af9ca4d
DLM
99 u16 lflags_on;
100 u16 lflags_off;
33267325
TH
101};
102
103struct ata_force_ent {
104 int port;
105 int device;
106 struct ata_force_param param;
107};
108
109static struct ata_force_ent *ata_force_tbl;
110static int ata_force_tbl_size;
111
bbf5a097 112static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
7afb4222
TH
113/* param_buf is thrown away after initialization, disallow read */
114module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
8c27ceff 115MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
bf89b0bf 116#endif
33267325 117
2486fa56 118static int atapi_enabled = 1;
1623c81e 119module_param(atapi_enabled, int, 0444);
ad5d8eac 120MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
1623c81e 121
c5c61bda 122static int atapi_dmadir = 0;
95de719a 123module_param(atapi_dmadir, int, 0444);
ad5d8eac 124MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
95de719a 125
baf4fdfa
ML
126int atapi_passthru16 = 1;
127module_param(atapi_passthru16, int, 0444);
ad5d8eac 128MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
baf4fdfa 129
c3c013a2
JG
130int libata_fua = 0;
131module_param_named(fua, libata_fua, int, 0444);
ad5d8eac 132MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
c3c013a2 133
2dcb407e 134static int ata_ignore_hpa;
1e999736
AC
135module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
136MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
137
b3a70601
AC
138static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
139module_param_named(dma, libata_dma_mask, int, 0444);
140MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
141
87fbc5a0 142static int ata_probe_timeout;
a8601e5f
AM
143module_param(ata_probe_timeout, int, 0444);
144MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
145
6ebe9d86 146int libata_noacpi = 0;
d7d0dad6 147module_param_named(noacpi, libata_noacpi, int, 0444);
ad5d8eac 148MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
11ef697b 149
ae8d4ee7
AC
150int libata_allow_tpm = 0;
151module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
ad5d8eac 152MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
ae8d4ee7 153
e7ecd435
TH
154static int atapi_an;
155module_param(atapi_an, int, 0444);
156MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
157
1da177e4
LT
158MODULE_AUTHOR("Jeff Garzik");
159MODULE_DESCRIPTION("Library module for ATA devices");
160MODULE_LICENSE("GPL");
161MODULE_VERSION(DRV_VERSION);
162
891fd7c6
DLM
163static inline bool ata_dev_print_info(struct ata_device *dev)
164{
165 struct ata_eh_context *ehc = &dev->link->eh_context;
166
167 return ehc->i.flags & ATA_EHI_PRINTINFO;
168}
0baab86b 169
9913ff8a
TH
170static bool ata_sstatus_online(u32 sstatus)
171{
172 return (sstatus & 0xf) == 0x3;
173}
174
1eca4365
TH
175/**
176 * ata_link_next - link iteration helper
177 * @link: the previous link, NULL to start
178 * @ap: ATA port containing links to iterate
179 * @mode: iteration mode, one of ATA_LITER_*
180 *
181 * LOCKING:
182 * Host lock or EH context.
aadffb68 183 *
1eca4365
TH
184 * RETURNS:
185 * Pointer to the next link.
aadffb68 186 */
1eca4365
TH
187struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
188 enum ata_link_iter_mode mode)
aadffb68 189{
1eca4365
TH
190 BUG_ON(mode != ATA_LITER_EDGE &&
191 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
192
aadffb68 193 /* NULL link indicates start of iteration */
1eca4365
TH
194 if (!link)
195 switch (mode) {
196 case ATA_LITER_EDGE:
197 case ATA_LITER_PMP_FIRST:
198 if (sata_pmp_attached(ap))
199 return ap->pmp_link;
df561f66 200 fallthrough;
1eca4365
TH
201 case ATA_LITER_HOST_FIRST:
202 return &ap->link;
203 }
aadffb68 204
1eca4365
TH
205 /* we just iterated over the host link, what's next? */
206 if (link == &ap->link)
207 switch (mode) {
208 case ATA_LITER_HOST_FIRST:
209 if (sata_pmp_attached(ap))
210 return ap->pmp_link;
df561f66 211 fallthrough;
1eca4365
TH
212 case ATA_LITER_PMP_FIRST:
213 if (unlikely(ap->slave_link))
b1c72916 214 return ap->slave_link;
df561f66 215 fallthrough;
1eca4365 216 case ATA_LITER_EDGE:
aadffb68 217 return NULL;
b1c72916 218 }
aadffb68 219
b1c72916
TH
220 /* slave_link excludes PMP */
221 if (unlikely(link == ap->slave_link))
222 return NULL;
223
1eca4365 224 /* we were over a PMP link */
aadffb68
TH
225 if (++link < ap->pmp_link + ap->nr_pmp_links)
226 return link;
1eca4365
TH
227
228 if (mode == ATA_LITER_PMP_FIRST)
229 return &ap->link;
230
aadffb68
TH
231 return NULL;
232}
a52fbcfc 233EXPORT_SYMBOL_GPL(ata_link_next);
aadffb68 234
1eca4365
TH
235/**
236 * ata_dev_next - device iteration helper
237 * @dev: the previous device, NULL to start
238 * @link: ATA link containing devices to iterate
239 * @mode: iteration mode, one of ATA_DITER_*
240 *
241 * LOCKING:
242 * Host lock or EH context.
243 *
244 * RETURNS:
245 * Pointer to the next device.
246 */
247struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
248 enum ata_dev_iter_mode mode)
249{
250 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
251 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
252
253 /* NULL dev indicates start of iteration */
254 if (!dev)
255 switch (mode) {
256 case ATA_DITER_ENABLED:
257 case ATA_DITER_ALL:
258 dev = link->device;
259 goto check;
260 case ATA_DITER_ENABLED_REVERSE:
261 case ATA_DITER_ALL_REVERSE:
262 dev = link->device + ata_link_max_devices(link) - 1;
263 goto check;
264 }
265
266 next:
267 /* move to the next one */
268 switch (mode) {
269 case ATA_DITER_ENABLED:
270 case ATA_DITER_ALL:
271 if (++dev < link->device + ata_link_max_devices(link))
272 goto check;
273 return NULL;
274 case ATA_DITER_ENABLED_REVERSE:
275 case ATA_DITER_ALL_REVERSE:
276 if (--dev >= link->device)
277 goto check;
278 return NULL;
279 }
280
281 check:
282 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
283 !ata_dev_enabled(dev))
284 goto next;
285 return dev;
286}
a52fbcfc 287EXPORT_SYMBOL_GPL(ata_dev_next);
1eca4365 288
b1c72916
TH
289/**
290 * ata_dev_phys_link - find physical link for a device
291 * @dev: ATA device to look up physical link for
292 *
293 * Look up physical link which @dev is attached to. Note that
294 * this is different from @dev->link only when @dev is on slave
295 * link. For all other cases, it's the same as @dev->link.
296 *
297 * LOCKING:
298 * Don't care.
299 *
300 * RETURNS:
301 * Pointer to the found physical link.
302 */
303struct ata_link *ata_dev_phys_link(struct ata_device *dev)
304{
305 struct ata_port *ap = dev->link->ap;
306
307 if (!ap->slave_link)
308 return dev->link;
309 if (!dev->devno)
310 return &ap->link;
311 return ap->slave_link;
312}
313
bf89b0bf 314#ifdef CONFIG_ATA_FORCE
33267325
TH
315/**
316 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 317 * @ap: ATA port of interest
33267325
TH
318 *
319 * Force cable type according to libata.force and whine about it.
320 * The last entry which has matching port number is used, so it
321 * can be specified as part of device force parameters. For
322 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
323 * same effect.
324 *
325 * LOCKING:
326 * EH context.
327 */
328void ata_force_cbl(struct ata_port *ap)
329{
330 int i;
331
332 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
333 const struct ata_force_ent *fe = &ata_force_tbl[i];
334
335 if (fe->port != -1 && fe->port != ap->print_id)
336 continue;
337
338 if (fe->param.cbl == ATA_CBL_NONE)
339 continue;
340
341 ap->cbl = fe->param.cbl;
a9a79dfe 342 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
33267325
TH
343 return;
344 }
345}
346
347/**
05944bdf 348 * ata_force_link_limits - force link limits according to libata.force
33267325
TH
349 * @link: ATA link of interest
350 *
05944bdf
TH
351 * Force link flags and SATA spd limit according to libata.force
352 * and whine about it. When only the port part is specified
353 * (e.g. 1:), the limit applies to all links connected to both
354 * the host link and all fan-out ports connected via PMP. If the
355 * device part is specified as 0 (e.g. 1.00:), it specifies the
356 * first fan-out link not the host link. Device number 15 always
b1c72916
TH
357 * points to the host link whether PMP is attached or not. If the
358 * controller has slave link, device number 16 points to it.
33267325
TH
359 *
360 * LOCKING:
361 * EH context.
362 */
05944bdf 363static void ata_force_link_limits(struct ata_link *link)
33267325 364{
05944bdf 365 bool did_spd = false;
b1c72916
TH
366 int linkno = link->pmp;
367 int i;
33267325
TH
368
369 if (ata_is_host_link(link))
b1c72916 370 linkno += 15;
33267325
TH
371
372 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
373 const struct ata_force_ent *fe = &ata_force_tbl[i];
374
375 if (fe->port != -1 && fe->port != link->ap->print_id)
376 continue;
377
378 if (fe->device != -1 && fe->device != linkno)
379 continue;
380
05944bdf
TH
381 /* only honor the first spd limit */
382 if (!did_spd && fe->param.spd_limit) {
383 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
a9a79dfe 384 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
05944bdf
TH
385 fe->param.name);
386 did_spd = true;
387 }
33267325 388
05944bdf 389 /* let lflags stack */
3af9ca4d
DLM
390 if (fe->param.lflags_on) {
391 link->flags |= fe->param.lflags_on;
a9a79dfe 392 ata_link_notice(link,
05944bdf 393 "FORCE: link flag 0x%x forced -> 0x%x\n",
3af9ca4d
DLM
394 fe->param.lflags_on, link->flags);
395 }
396 if (fe->param.lflags_off) {
397 link->flags &= ~fe->param.lflags_off;
398 ata_link_notice(link,
399 "FORCE: link flag 0x%x cleared -> 0x%x\n",
400 fe->param.lflags_off, link->flags);
05944bdf 401 }
33267325
TH
402 }
403}
404
405/**
406 * ata_force_xfermask - force xfermask according to libata.force
407 * @dev: ATA device of interest
408 *
409 * Force xfer_mask according to libata.force and whine about it.
410 * For consistency with link selection, device number 15 selects
411 * the first device connected to the host link.
412 *
413 * LOCKING:
414 * EH context.
415 */
416static void ata_force_xfermask(struct ata_device *dev)
417{
418 int devno = dev->link->pmp + dev->devno;
419 int alt_devno = devno;
420 int i;
421
b1c72916
TH
422 /* allow n.15/16 for devices attached to host port */
423 if (ata_is_host_link(dev->link))
424 alt_devno += 15;
33267325
TH
425
426 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
427 const struct ata_force_ent *fe = &ata_force_tbl[i];
f0a6d77b 428 unsigned int pio_mask, mwdma_mask, udma_mask;
33267325
TH
429
430 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
431 continue;
432
433 if (fe->device != -1 && fe->device != devno &&
434 fe->device != alt_devno)
435 continue;
436
437 if (!fe->param.xfer_mask)
438 continue;
439
440 ata_unpack_xfermask(fe->param.xfer_mask,
441 &pio_mask, &mwdma_mask, &udma_mask);
442 if (udma_mask)
443 dev->udma_mask = udma_mask;
444 else if (mwdma_mask) {
445 dev->udma_mask = 0;
446 dev->mwdma_mask = mwdma_mask;
447 } else {
448 dev->udma_mask = 0;
449 dev->mwdma_mask = 0;
450 dev->pio_mask = pio_mask;
451 }
452
a9a79dfe
JP
453 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
454 fe->param.name);
33267325
TH
455 return;
456 }
457}
458
459/**
460 * ata_force_horkage - force horkage according to libata.force
461 * @dev: ATA device of interest
462 *
463 * Force horkage according to libata.force and whine about it.
464 * For consistency with link selection, device number 15 selects
465 * the first device connected to the host link.
466 *
467 * LOCKING:
468 * EH context.
469 */
470static void ata_force_horkage(struct ata_device *dev)
471{
472 int devno = dev->link->pmp + dev->devno;
473 int alt_devno = devno;
474 int i;
475
b1c72916
TH
476 /* allow n.15/16 for devices attached to host port */
477 if (ata_is_host_link(dev->link))
478 alt_devno += 15;
33267325
TH
479
480 for (i = 0; i < ata_force_tbl_size; i++) {
481 const struct ata_force_ent *fe = &ata_force_tbl[i];
482
483 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
484 continue;
485
486 if (fe->device != -1 && fe->device != devno &&
487 fe->device != alt_devno)
488 continue;
489
490 if (!(~dev->horkage & fe->param.horkage_on) &&
491 !(dev->horkage & fe->param.horkage_off))
492 continue;
493
494 dev->horkage |= fe->param.horkage_on;
495 dev->horkage &= ~fe->param.horkage_off;
496
a9a79dfe
JP
497 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
498 fe->param.name);
33267325
TH
499 }
500}
bf89b0bf
BZ
501#else
502static inline void ata_force_link_limits(struct ata_link *link) { }
503static inline void ata_force_xfermask(struct ata_device *dev) { }
504static inline void ata_force_horkage(struct ata_device *dev) { }
505#endif
33267325 506
436d34b3
TH
507/**
508 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
509 * @opcode: SCSI opcode
510 *
511 * Determine ATAPI command type from @opcode.
512 *
513 * LOCKING:
514 * None.
515 *
516 * RETURNS:
517 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
518 */
519int atapi_cmd_type(u8 opcode)
520{
521 switch (opcode) {
522 case GPCMD_READ_10:
523 case GPCMD_READ_12:
524 return ATAPI_READ;
525
526 case GPCMD_WRITE_10:
527 case GPCMD_WRITE_12:
528 case GPCMD_WRITE_AND_VERIFY_10:
529 return ATAPI_WRITE;
530
531 case GPCMD_READ_CD:
532 case GPCMD_READ_CD_MSF:
533 return ATAPI_READ_CD;
534
e52dcc48
TH
535 case ATA_16:
536 case ATA_12:
537 if (atapi_passthru16)
538 return ATAPI_PASS_THRU;
df561f66 539 fallthrough;
436d34b3
TH
540 default:
541 return ATAPI_MISC;
542 }
543}
a52fbcfc 544EXPORT_SYMBOL_GPL(atapi_cmd_type);
436d34b3 545
8cbd6df1
AL
546static const u8 ata_rw_cmds[] = {
547 /* pio multi */
548 ATA_CMD_READ_MULTI,
549 ATA_CMD_WRITE_MULTI,
550 ATA_CMD_READ_MULTI_EXT,
551 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
552 0,
553 0,
554 0,
bc9af490 555 0,
8cbd6df1
AL
556 /* pio */
557 ATA_CMD_PIO_READ,
558 ATA_CMD_PIO_WRITE,
559 ATA_CMD_PIO_READ_EXT,
560 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
561 0,
562 0,
563 0,
564 0,
8cbd6df1
AL
565 /* dma */
566 ATA_CMD_READ,
567 ATA_CMD_WRITE,
568 ATA_CMD_READ_EXT,
9a3dccc4
TH
569 ATA_CMD_WRITE_EXT,
570 0,
571 0,
572 0,
573 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 574};
1da177e4
LT
575
576/**
77839deb
DLM
577 * ata_set_rwcmd_protocol - set taskfile r/w command and protocol
578 * @dev: target device for the taskfile
579 * @tf: taskfile to examine and configure
1da177e4 580 *
77839deb
DLM
581 * Examine the device configuration and tf->flags to determine
582 * the proper read/write command and protocol to use for @tf.
1da177e4
LT
583 *
584 * LOCKING:
585 * caller.
586 */
77839deb
DLM
587static bool ata_set_rwcmd_protocol(struct ata_device *dev,
588 struct ata_taskfile *tf)
1da177e4 589{
9a3dccc4 590 u8 cmd;
1da177e4 591
9a3dccc4 592 int index, fua, lba48, write;
2e9edbf8 593
9a3dccc4 594 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
595 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
596 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 597
8cbd6df1
AL
598 if (dev->flags & ATA_DFLAG_PIO) {
599 tf->protocol = ATA_PROT_PIO;
9a3dccc4 600 index = dev->multi_count ? 0 : 8;
9af5c9c9 601 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
602 /* Unable to use DMA due to host limitation */
603 tf->protocol = ATA_PROT_PIO;
0565c26d 604 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
605 } else {
606 tf->protocol = ATA_PROT_DMA;
9a3dccc4 607 index = 16;
8cbd6df1 608 }
1da177e4 609
9a3dccc4 610 cmd = ata_rw_cmds[index + fua + lba48 + write];
77839deb
DLM
611 if (!cmd)
612 return false;
613
614 tf->command = cmd;
615
616 return true;
1da177e4
LT
617}
618
35b649fe
TH
619/**
620 * ata_tf_read_block - Read block address from ATA taskfile
621 * @tf: ATA taskfile of interest
622 * @dev: ATA device @tf belongs to
623 *
624 * LOCKING:
625 * None.
626 *
627 * Read block address from @tf. This function can handle all
628 * three address formats - LBA, LBA48 and CHS. tf->protocol and
629 * flags select the address format to use.
630 *
631 * RETURNS:
632 * Block address read from @tf.
633 */
cffd1ee9 634u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
35b649fe
TH
635{
636 u64 block = 0;
637
fe16d4f2 638 if (tf->flags & ATA_TFLAG_LBA) {
35b649fe
TH
639 if (tf->flags & ATA_TFLAG_LBA48) {
640 block |= (u64)tf->hob_lbah << 40;
641 block |= (u64)tf->hob_lbam << 32;
44901a96 642 block |= (u64)tf->hob_lbal << 24;
35b649fe
TH
643 } else
644 block |= (tf->device & 0xf) << 24;
645
646 block |= tf->lbah << 16;
647 block |= tf->lbam << 8;
648 block |= tf->lbal;
649 } else {
650 u32 cyl, head, sect;
651
652 cyl = tf->lbam | (tf->lbah << 8);
653 head = tf->device & 0xf;
654 sect = tf->lbal;
655
ac8672ea 656 if (!sect) {
a9a79dfe
JP
657 ata_dev_warn(dev,
658 "device reported invalid CHS sector 0\n");
cffd1ee9 659 return U64_MAX;
ac8672ea
TH
660 }
661
662 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
35b649fe
TH
663 }
664
665 return block;
666}
667
bd056d7e
TH
668/**
669 * ata_build_rw_tf - Build ATA taskfile for given read/write request
066de3b9 670 * @qc: Metadata associated with the taskfile to build
bd056d7e
TH
671 * @block: Block address
672 * @n_block: Number of blocks
673 * @tf_flags: RW/FUA etc...
8e061784 674 * @class: IO priority class
bd056d7e
TH
675 *
676 * LOCKING:
677 * None.
678 *
066de3b9
DLM
679 * Build ATA taskfile for the command @qc for read/write request described
680 * by @block, @n_block, @tf_flags and @class.
bd056d7e
TH
681 *
682 * RETURNS:
683 *
684 * 0 on success, -ERANGE if the request is too large for @dev,
685 * -EINVAL if the request is invalid.
686 */
066de3b9
DLM
687int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
688 unsigned int tf_flags, int class)
bd056d7e 689{
066de3b9
DLM
690 struct ata_taskfile *tf = &qc->tf;
691 struct ata_device *dev = qc->dev;
692
bd056d7e
TH
693 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
694 tf->flags |= tf_flags;
695
066de3b9 696 if (ata_ncq_enabled(dev)) {
bd056d7e
TH
697 /* yay, NCQ */
698 if (!lba_48_ok(block, n_block))
699 return -ERANGE;
700
701 tf->protocol = ATA_PROT_NCQ;
702 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
703
704 if (tf->flags & ATA_TFLAG_WRITE)
705 tf->command = ATA_CMD_FPDMA_WRITE;
706 else
707 tf->command = ATA_CMD_FPDMA_READ;
708
066de3b9 709 tf->nsect = qc->hw_tag << 3;
bd056d7e
TH
710 tf->hob_feature = (n_block >> 8) & 0xff;
711 tf->feature = n_block & 0xff;
712
713 tf->hob_lbah = (block >> 40) & 0xff;
714 tf->hob_lbam = (block >> 32) & 0xff;
715 tf->hob_lbal = (block >> 24) & 0xff;
716 tf->lbah = (block >> 16) & 0xff;
717 tf->lbam = (block >> 8) & 0xff;
718 tf->lbal = block & 0xff;
719
9ca7cfa4 720 tf->device = ATA_LBA;
bd056d7e
TH
721 if (tf->flags & ATA_TFLAG_FUA)
722 tf->device |= 1 << 7;
8e061784 723
e00923c5 724 if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED &&
2360fa18
DLM
725 class == IOPRIO_CLASS_RT)
726 tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
bd056d7e
TH
727 } else if (dev->flags & ATA_DFLAG_LBA) {
728 tf->flags |= ATA_TFLAG_LBA;
729
bc9af490
DLM
730 /* We need LBA48 for FUA writes */
731 if (!(tf->flags & ATA_TFLAG_FUA) && lba_28_ok(block, n_block)) {
bd056d7e
TH
732 /* use LBA28 */
733 tf->device |= (block >> 24) & 0xf;
734 } else if (lba_48_ok(block, n_block)) {
735 if (!(dev->flags & ATA_DFLAG_LBA48))
736 return -ERANGE;
737
738 /* use LBA48 */
739 tf->flags |= ATA_TFLAG_LBA48;
740
741 tf->hob_nsect = (n_block >> 8) & 0xff;
742
743 tf->hob_lbah = (block >> 40) & 0xff;
744 tf->hob_lbam = (block >> 32) & 0xff;
745 tf->hob_lbal = (block >> 24) & 0xff;
bc9af490 746 } else {
bd056d7e
TH
747 /* request too large even for LBA48 */
748 return -ERANGE;
bc9af490 749 }
bd056d7e 750
77839deb 751 if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
bd056d7e
TH
752 return -EINVAL;
753
754 tf->nsect = n_block & 0xff;
755
756 tf->lbah = (block >> 16) & 0xff;
757 tf->lbam = (block >> 8) & 0xff;
758 tf->lbal = block & 0xff;
759
760 tf->device |= ATA_LBA;
761 } else {
762 /* CHS */
763 u32 sect, head, cyl, track;
764
765 /* The request -may- be too large for CHS addressing. */
766 if (!lba_28_ok(block, n_block))
767 return -ERANGE;
768
77839deb 769 if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
bd056d7e
TH
770 return -EINVAL;
771
772 /* Convert LBA to CHS */
773 track = (u32)block / dev->sectors;
774 cyl = track / dev->heads;
775 head = track % dev->heads;
776 sect = (u32)block % dev->sectors + 1;
777
bd056d7e
TH
778 /* Check whether the converted CHS can fit.
779 Cylinder: 0-65535
780 Head: 0-15
781 Sector: 1-255*/
782 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
783 return -ERANGE;
784
785 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
786 tf->lbal = sect;
787 tf->lbam = cyl;
788 tf->lbah = cyl >> 8;
789 tf->device |= head;
790 }
791
792 return 0;
793}
794
cb95d562
TH
795/**
796 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
797 * @pio_mask: pio_mask
798 * @mwdma_mask: mwdma_mask
799 * @udma_mask: udma_mask
800 *
801 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
802 * unsigned int xfer_mask.
803 *
804 * LOCKING:
805 * None.
806 *
807 * RETURNS:
808 * Packed xfer_mask.
809 */
f0a6d77b
SS
810unsigned int ata_pack_xfermask(unsigned int pio_mask,
811 unsigned int mwdma_mask,
812 unsigned int udma_mask)
cb95d562 813{
f0a6d77b 814 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
cb95d562
TH
815 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
816 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
817}
a52fbcfc 818EXPORT_SYMBOL_GPL(ata_pack_xfermask);
cb95d562 819
c0489e4e
TH
820/**
821 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
822 * @xfer_mask: xfer_mask to unpack
823 * @pio_mask: resulting pio_mask
824 * @mwdma_mask: resulting mwdma_mask
825 * @udma_mask: resulting udma_mask
826 *
827 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
c9b5560a 828 * Any NULL destination masks will be ignored.
c0489e4e 829 */
f0a6d77b
SS
830void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask,
831 unsigned int *mwdma_mask, unsigned int *udma_mask)
c0489e4e
TH
832{
833 if (pio_mask)
834 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
835 if (mwdma_mask)
836 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
837 if (udma_mask)
838 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
839}
840
cb95d562 841static const struct ata_xfer_ent {
be9a50c8 842 int shift, bits;
cb95d562
TH
843 u8 base;
844} ata_xfer_tbl[] = {
70cd071e
TH
845 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
846 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
847 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
848 { -1, },
849};
850
851/**
852 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
853 * @xfer_mask: xfer_mask of interest
854 *
855 * Return matching XFER_* value for @xfer_mask. Only the highest
856 * bit of @xfer_mask is considered.
857 *
858 * LOCKING:
859 * None.
860 *
861 * RETURNS:
70cd071e 862 * Matching XFER_* value, 0xff if no match found.
cb95d562 863 */
f0a6d77b 864u8 ata_xfer_mask2mode(unsigned int xfer_mask)
cb95d562
TH
865{
866 int highbit = fls(xfer_mask) - 1;
867 const struct ata_xfer_ent *ent;
868
869 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
870 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
871 return ent->base + highbit - ent->shift;
70cd071e 872 return 0xff;
cb95d562 873}
a52fbcfc 874EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
cb95d562
TH
875
876/**
877 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
878 * @xfer_mode: XFER_* of interest
879 *
880 * Return matching xfer_mask for @xfer_mode.
881 *
882 * LOCKING:
883 * None.
884 *
885 * RETURNS:
886 * Matching xfer_mask, 0 if no match found.
887 */
f0a6d77b 888unsigned int ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
889{
890 const struct ata_xfer_ent *ent;
891
892 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
893 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
894 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
895 & ~((1 << ent->shift) - 1);
cb95d562
TH
896 return 0;
897}
a52fbcfc 898EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
cb95d562
TH
899
900/**
901 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
902 * @xfer_mode: XFER_* of interest
903 *
904 * Return matching xfer_shift for @xfer_mode.
905 *
906 * LOCKING:
907 * None.
908 *
909 * RETURNS:
910 * Matching xfer_shift, -1 if no match found.
911 */
a28c1ab3 912int ata_xfer_mode2shift(u8 xfer_mode)
cb95d562
TH
913{
914 const struct ata_xfer_ent *ent;
915
916 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
917 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
918 return ent->shift;
919 return -1;
920}
a52fbcfc 921EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
cb95d562 922
1da177e4 923/**
1da7b0d0
TH
924 * ata_mode_string - convert xfer_mask to string
925 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
926 *
927 * Determine string which represents the highest speed
1da7b0d0 928 * (highest bit in @modemask).
1da177e4
LT
929 *
930 * LOCKING:
931 * None.
932 *
933 * RETURNS:
934 * Constant C string representing highest speed listed in
1da7b0d0 935 * @mode_mask, or the constant C string "<n/a>".
1da177e4 936 */
f0a6d77b 937const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 938{
75f554bc
TH
939 static const char * const xfer_mode_str[] = {
940 "PIO0",
941 "PIO1",
942 "PIO2",
943 "PIO3",
944 "PIO4",
b352e57d
AC
945 "PIO5",
946 "PIO6",
75f554bc
TH
947 "MWDMA0",
948 "MWDMA1",
949 "MWDMA2",
b352e57d
AC
950 "MWDMA3",
951 "MWDMA4",
75f554bc
TH
952 "UDMA/16",
953 "UDMA/25",
954 "UDMA/33",
955 "UDMA/44",
956 "UDMA/66",
957 "UDMA/100",
958 "UDMA/133",
959 "UDMA7",
960 };
1da7b0d0 961 int highbit;
1da177e4 962
1da7b0d0
TH
963 highbit = fls(xfer_mask) - 1;
964 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
965 return xfer_mode_str[highbit];
1da177e4 966 return "<n/a>";
1da177e4 967}
a52fbcfc 968EXPORT_SYMBOL_GPL(ata_mode_string);
1da177e4 969
d9027470 970const char *sata_spd_string(unsigned int spd)
4c360c81
TH
971{
972 static const char * const spd_str[] = {
973 "1.5 Gbps",
974 "3.0 Gbps",
8522ee25 975 "6.0 Gbps",
4c360c81
TH
976 };
977
978 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
979 return "<unknown>";
980 return spd_str[spd - 1];
981}
982
1da177e4
LT
983/**
984 * ata_dev_classify - determine device type based on ATA-spec signature
985 * @tf: ATA taskfile register set for device to be identified
986 *
987 * Determine from taskfile register contents whether a device is
988 * ATA or ATAPI, as per "Signature and persistence" section
989 * of ATA/PI spec (volume 1, sect 5.14).
990 *
991 * LOCKING:
992 * None.
993 *
994 * RETURNS:
9162c657
HR
995 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
996 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1da177e4 997 */
057ace5e 998unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
999{
1000 /* Apple's open source Darwin code hints that some devices only
1001 * put a proper signature into the LBA mid/high registers,
1002 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1003 *
1004 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1005 * signatures for ATA and ATAPI devices attached on SerialATA,
1006 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1007 * spec has never mentioned about using different signatures
1008 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1009 * Multiplier specification began to use 0x69/0x96 to identify
1010 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1011 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1012 * 0x69/0x96 shortly and described them as reserved for
1013 * SerialATA.
1014 *
1015 * We follow the current spec and consider that 0x69/0x96
1016 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
79b42bab
TH
1017 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1018 * SEMB signature. This is worked around in
1019 * ata_dev_read_id().
1da177e4 1020 */
6c952a0d 1021 if (tf->lbam == 0 && tf->lbah == 0)
1da177e4 1022 return ATA_DEV_ATA;
1da177e4 1023
6c952a0d 1024 if (tf->lbam == 0x14 && tf->lbah == 0xeb)
1da177e4 1025 return ATA_DEV_ATAPI;
1da177e4 1026
6c952a0d 1027 if (tf->lbam == 0x69 && tf->lbah == 0x96)
633273a3 1028 return ATA_DEV_PMP;
633273a3 1029
6c952a0d 1030 if (tf->lbam == 0x3c && tf->lbah == 0xc3)
79b42bab 1031 return ATA_DEV_SEMB;
633273a3 1032
6c952a0d 1033 if (tf->lbam == 0xcd && tf->lbah == 0xab)
9162c657 1034 return ATA_DEV_ZAC;
9162c657 1035
1da177e4
LT
1036 return ATA_DEV_UNKNOWN;
1037}
a52fbcfc 1038EXPORT_SYMBOL_GPL(ata_dev_classify);
1da177e4 1039
1da177e4 1040/**
6a62a04d 1041 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1042 * @id: IDENTIFY DEVICE results we will examine
1043 * @s: string into which data is output
1044 * @ofs: offset into identify device page
1045 * @len: length of string to return. must be an even number.
1046 *
1047 * The strings in the IDENTIFY DEVICE page are broken up into
1048 * 16-bit chunks. Run through the string, and output each
1049 * 8-bit chunk linearly, regardless of platform.
1050 *
1051 * LOCKING:
1052 * caller.
1053 */
1054
6a62a04d
TH
1055void ata_id_string(const u16 *id, unsigned char *s,
1056 unsigned int ofs, unsigned int len)
1da177e4
LT
1057{
1058 unsigned int c;
1059
963e4975
AC
1060 BUG_ON(len & 1);
1061
1da177e4
LT
1062 while (len > 0) {
1063 c = id[ofs] >> 8;
1064 *s = c;
1065 s++;
1066
1067 c = id[ofs] & 0xff;
1068 *s = c;
1069 s++;
1070
1071 ofs++;
1072 len -= 2;
1073 }
1074}
a52fbcfc 1075EXPORT_SYMBOL_GPL(ata_id_string);
1da177e4 1076
0e949ff3 1077/**
6a62a04d 1078 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1079 * @id: IDENTIFY DEVICE results we will examine
1080 * @s: string into which data is output
1081 * @ofs: offset into identify device page
1082 * @len: length of string to return. must be an odd number.
1083 *
6a62a04d 1084 * This function is identical to ata_id_string except that it
0e949ff3
TH
1085 * trims trailing spaces and terminates the resulting string with
1086 * null. @len must be actual maximum length (even number) + 1.
1087 *
1088 * LOCKING:
1089 * caller.
1090 */
6a62a04d
TH
1091void ata_id_c_string(const u16 *id, unsigned char *s,
1092 unsigned int ofs, unsigned int len)
0e949ff3
TH
1093{
1094 unsigned char *p;
1095
6a62a04d 1096 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1097
1098 p = s + strnlen(s, len - 1);
1099 while (p > s && p[-1] == ' ')
1100 p--;
1101 *p = '\0';
1102}
a52fbcfc 1103EXPORT_SYMBOL_GPL(ata_id_c_string);
0baab86b 1104
db6f8759
TH
1105static u64 ata_id_n_sectors(const u16 *id)
1106{
1107 if (ata_id_has_lba(id)) {
1108 if (ata_id_has_lba48(id))
968e594a 1109 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
5eb8deb4
SS
1110
1111 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
db6f8759 1112 }
5eb8deb4
SS
1113
1114 if (ata_id_current_chs_valid(id))
1115 return (u32)id[ATA_ID_CUR_CYLS] * (u32)id[ATA_ID_CUR_HEADS] *
1116 (u32)id[ATA_ID_CUR_SECTORS];
1117
1118 return (u32)id[ATA_ID_CYLS] * (u32)id[ATA_ID_HEADS] *
1119 (u32)id[ATA_ID_SECTORS];
db6f8759
TH
1120}
1121
a5987e0a 1122u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1e999736
AC
1123{
1124 u64 sectors = 0;
1125
1126 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1127 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
ba14a9c2 1128 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1e999736
AC
1129 sectors |= (tf->lbah & 0xff) << 16;
1130 sectors |= (tf->lbam & 0xff) << 8;
1131 sectors |= (tf->lbal & 0xff);
1132
a5987e0a 1133 return sectors;
1e999736
AC
1134}
1135
a5987e0a 1136u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1e999736
AC
1137{
1138 u64 sectors = 0;
1139
1140 sectors |= (tf->device & 0x0f) << 24;
1141 sectors |= (tf->lbah & 0xff) << 16;
1142 sectors |= (tf->lbam & 0xff) << 8;
1143 sectors |= (tf->lbal & 0xff);
1144
a5987e0a 1145 return sectors;
1e999736
AC
1146}
1147
1148/**
c728a914
TH
1149 * ata_read_native_max_address - Read native max address
1150 * @dev: target device
1151 * @max_sectors: out parameter for the result native max address
1e999736 1152 *
c728a914
TH
1153 * Perform an LBA48 or LBA28 native size query upon the device in
1154 * question.
1e999736 1155 *
c728a914
TH
1156 * RETURNS:
1157 * 0 on success, -EACCES if command is aborted by the drive.
1158 * -EIO on other errors.
1e999736 1159 */
c728a914 1160static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1161{
c728a914 1162 unsigned int err_mask;
1e999736 1163 struct ata_taskfile tf;
c728a914 1164 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1165
1166 ata_tf_init(dev, &tf);
1167
c728a914 1168 /* always clear all address registers */
1e999736 1169 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1170
c728a914
TH
1171 if (lba48) {
1172 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1173 tf.flags |= ATA_TFLAG_LBA48;
1174 } else
1175 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1176
bd18bc04 1177 tf.protocol = ATA_PROT_NODATA;
c728a914
TH
1178 tf.device |= ATA_LBA;
1179
2b789108 1180 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1181 if (err_mask) {
a9a79dfe
JP
1182 ata_dev_warn(dev,
1183 "failed to read native max address (err_mask=0x%x)\n",
1184 err_mask);
efcef265 1185 if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
c728a914
TH
1186 return -EACCES;
1187 return -EIO;
1188 }
1e999736 1189
c728a914 1190 if (lba48)
a5987e0a 1191 *max_sectors = ata_tf_to_lba48(&tf) + 1;
c728a914 1192 else
a5987e0a 1193 *max_sectors = ata_tf_to_lba(&tf) + 1;
2dcb407e 1194 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1195 (*max_sectors)--;
c728a914 1196 return 0;
1e999736
AC
1197}
1198
1199/**
c728a914
TH
1200 * ata_set_max_sectors - Set max sectors
1201 * @dev: target device
6b38d1d1 1202 * @new_sectors: new max sectors value to set for the device
1e999736 1203 *
c728a914
TH
1204 * Set max sectors of @dev to @new_sectors.
1205 *
1206 * RETURNS:
1207 * 0 on success, -EACCES if command is aborted or denied (due to
1208 * previous non-volatile SET_MAX) by the drive. -EIO on other
1209 * errors.
1e999736 1210 */
05027adc 1211static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1212{
c728a914 1213 unsigned int err_mask;
1e999736 1214 struct ata_taskfile tf;
c728a914 1215 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1216
1217 new_sectors--;
1218
1219 ata_tf_init(dev, &tf);
1220
1e999736 1221 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1222
1223 if (lba48) {
1224 tf.command = ATA_CMD_SET_MAX_EXT;
1225 tf.flags |= ATA_TFLAG_LBA48;
1226
1227 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1228 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1229 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1230 } else {
c728a914
TH
1231 tf.command = ATA_CMD_SET_MAX;
1232
1e582ba4
TH
1233 tf.device |= (new_sectors >> 24) & 0xf;
1234 }
1235
bd18bc04 1236 tf.protocol = ATA_PROT_NODATA;
c728a914 1237 tf.device |= ATA_LBA;
1e999736
AC
1238
1239 tf.lbal = (new_sectors >> 0) & 0xff;
1240 tf.lbam = (new_sectors >> 8) & 0xff;
1241 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1242
2b789108 1243 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1244 if (err_mask) {
a9a79dfe
JP
1245 ata_dev_warn(dev,
1246 "failed to set max address (err_mask=0x%x)\n",
1247 err_mask);
c728a914 1248 if (err_mask == AC_ERR_DEV &&
efcef265 1249 (tf.error & (ATA_ABORTED | ATA_IDNF)))
c728a914
TH
1250 return -EACCES;
1251 return -EIO;
1252 }
1253
c728a914 1254 return 0;
1e999736
AC
1255}
1256
1257/**
1258 * ata_hpa_resize - Resize a device with an HPA set
1259 * @dev: Device to resize
1260 *
1261 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1262 * it if required to the full size of the media. The caller must check
1263 * the drive has the HPA feature set enabled.
05027adc
TH
1264 *
1265 * RETURNS:
1266 * 0 on success, -errno on failure.
1e999736 1267 */
05027adc 1268static int ata_hpa_resize(struct ata_device *dev)
1e999736 1269{
891fd7c6 1270 bool print_info = ata_dev_print_info(dev);
445d211b 1271 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
05027adc
TH
1272 u64 sectors = ata_id_n_sectors(dev->id);
1273 u64 native_sectors;
c728a914 1274 int rc;
a617c09f 1275
05027adc 1276 /* do we need to do it? */
9162c657 1277 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
05027adc
TH
1278 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1279 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1280 return 0;
1e999736 1281
05027adc
TH
1282 /* read native max address */
1283 rc = ata_read_native_max_address(dev, &native_sectors);
1284 if (rc) {
dda7aba1
TH
1285 /* If device aborted the command or HPA isn't going to
1286 * be unlocked, skip HPA resizing.
05027adc 1287 */
445d211b 1288 if (rc == -EACCES || !unlock_hpa) {
a9a79dfe
JP
1289 ata_dev_warn(dev,
1290 "HPA support seems broken, skipping HPA handling\n");
05027adc
TH
1291 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1292
1293 /* we can continue if device aborted the command */
1294 if (rc == -EACCES)
1295 rc = 0;
1e999736 1296 }
37301a55 1297
05027adc
TH
1298 return rc;
1299 }
5920dadf 1300 dev->n_native_sectors = native_sectors;
05027adc
TH
1301
1302 /* nothing to do? */
445d211b 1303 if (native_sectors <= sectors || !unlock_hpa) {
05027adc
TH
1304 if (!print_info || native_sectors == sectors)
1305 return 0;
1306
1307 if (native_sectors > sectors)
a9a79dfe 1308 ata_dev_info(dev,
05027adc
TH
1309 "HPA detected: current %llu, native %llu\n",
1310 (unsigned long long)sectors,
1311 (unsigned long long)native_sectors);
1312 else if (native_sectors < sectors)
a9a79dfe
JP
1313 ata_dev_warn(dev,
1314 "native sectors (%llu) is smaller than sectors (%llu)\n",
05027adc
TH
1315 (unsigned long long)native_sectors,
1316 (unsigned long long)sectors);
1317 return 0;
1318 }
1319
1320 /* let's unlock HPA */
1321 rc = ata_set_max_sectors(dev, native_sectors);
1322 if (rc == -EACCES) {
1323 /* if device aborted the command, skip HPA resizing */
a9a79dfe
JP
1324 ata_dev_warn(dev,
1325 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1326 (unsigned long long)sectors,
1327 (unsigned long long)native_sectors);
05027adc
TH
1328 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1329 return 0;
1330 } else if (rc)
1331 return rc;
1332
1333 /* re-read IDENTIFY data */
1334 rc = ata_dev_reread_id(dev, 0);
1335 if (rc) {
a9a79dfe
JP
1336 ata_dev_err(dev,
1337 "failed to re-read IDENTIFY data after HPA resizing\n");
05027adc
TH
1338 return rc;
1339 }
1340
1341 if (print_info) {
1342 u64 new_sectors = ata_id_n_sectors(dev->id);
a9a79dfe 1343 ata_dev_info(dev,
05027adc
TH
1344 "HPA unlocked: %llu -> %llu, native %llu\n",
1345 (unsigned long long)sectors,
1346 (unsigned long long)new_sectors,
1347 (unsigned long long)native_sectors);
1348 }
1349
1350 return 0;
1e999736
AC
1351}
1352
1da177e4
LT
1353/**
1354 * ata_dump_id - IDENTIFY DEVICE info debugging output
6044f3c4 1355 * @dev: device from which the information is fetched
0bd3300a 1356 * @id: IDENTIFY DEVICE page to dump
1da177e4 1357 *
0bd3300a
TH
1358 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1359 * page.
1da177e4
LT
1360 *
1361 * LOCKING:
1362 * caller.
1363 */
1364
6044f3c4 1365static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
1da177e4 1366{
6044f3c4
HR
1367 ata_dev_dbg(dev,
1368 "49==0x%04x 53==0x%04x 63==0x%04x 64==0x%04x 75==0x%04x\n"
1369 "80==0x%04x 81==0x%04x 82==0x%04x 83==0x%04x 84==0x%04x\n"
1370 "88==0x%04x 93==0x%04x\n",
1371 id[49], id[53], id[63], id[64], id[75], id[80],
1372 id[81], id[82], id[83], id[84], id[88], id[93]);
1da177e4
LT
1373}
1374
cb95d562
TH
1375/**
1376 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1377 * @id: IDENTIFY data to compute xfer mask from
1378 *
1379 * Compute the xfermask for this device. This is not as trivial
1380 * as it seems if we must consider early devices correctly.
1381 *
1382 * FIXME: pre IDE drive timing (do we care ?).
1383 *
1384 * LOCKING:
1385 * None.
1386 *
1387 * RETURNS:
1388 * Computed xfermask
1389 */
f0a6d77b 1390unsigned int ata_id_xfermask(const u16 *id)
cb95d562 1391{
f0a6d77b 1392 unsigned int pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1393
1394 /* Usual case. Word 53 indicates word 64 is valid */
1395 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1396 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1397 pio_mask <<= 3;
1398 pio_mask |= 0x7;
1399 } else {
1400 /* If word 64 isn't valid then Word 51 high byte holds
1401 * the PIO timing number for the maximum. Turn it into
1402 * a mask.
1403 */
7a0f1c8a 1404 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1405 if (mode < 5) /* Valid PIO range */
2dcb407e 1406 pio_mask = (2 << mode) - 1;
46767aeb
AC
1407 else
1408 pio_mask = 1;
cb95d562
TH
1409
1410 /* But wait.. there's more. Design your standards by
1411 * committee and you too can get a free iordy field to
e0af10ac 1412 * process. However it is the speeds not the modes that
cb95d562
TH
1413 * are supported... Note drivers using the timing API
1414 * will get this right anyway
1415 */
1416 }
1417
1418 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1419
b352e57d
AC
1420 if (ata_id_is_cfa(id)) {
1421 /*
1422 * Process compact flash extended modes
1423 */
62afe5d7
SS
1424 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1425 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
b352e57d
AC
1426
1427 if (pio)
1428 pio_mask |= (1 << 5);
1429 if (pio > 1)
1430 pio_mask |= (1 << 6);
1431 if (dma)
1432 mwdma_mask |= (1 << 3);
1433 if (dma > 1)
1434 mwdma_mask |= (1 << 4);
1435 }
1436
fb21f0d0
TH
1437 udma_mask = 0;
1438 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1439 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1440
1441 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1442}
a52fbcfc 1443EXPORT_SYMBOL_GPL(ata_id_xfermask);
cb95d562 1444
7102d230 1445static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1446{
77853bf2 1447 struct completion *waiting = qc->private_data;
a2a7a662 1448
a2a7a662 1449 complete(waiting);
a2a7a662
TH
1450}
1451
1452/**
2432697b 1453 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1454 * @dev: Device to which the command is sent
1455 * @tf: Taskfile registers for the command and the result
d69cf37d 1456 * @cdb: CDB for packet command
e227867f 1457 * @dma_dir: Data transfer direction of the command
5c1ad8b3 1458 * @sgl: sg list for the data buffer of the command
2432697b 1459 * @n_elem: Number of sg entries
2b789108 1460 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1461 *
1462 * Executes libata internal command with timeout. @tf contains
1463 * command on entry and result on return. Timeout and error
1464 * conditions are reported via return value. No recovery action
1465 * is taken after a command times out. It's caller's duty to
1466 * clean up after timeout.
1467 *
1468 * LOCKING:
1469 * None. Should be called with kernel context, might sleep.
551e8889
TH
1470 *
1471 * RETURNS:
1472 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1473 */
4d6119f0
SS
1474static unsigned ata_exec_internal_sg(struct ata_device *dev,
1475 struct ata_taskfile *tf, const u8 *cdb,
1476 int dma_dir, struct scatterlist *sgl,
61176eed 1477 unsigned int n_elem, unsigned int timeout)
a2a7a662 1478{
9af5c9c9
TH
1479 struct ata_link *link = dev->link;
1480 struct ata_port *ap = link->ap;
a2a7a662 1481 u8 command = tf->command;
87fbc5a0 1482 int auto_timeout = 0;
a2a7a662 1483 struct ata_queued_cmd *qc;
28361c40 1484 unsigned int preempted_tag;
e3ed8939
JA
1485 u32 preempted_sactive;
1486 u64 preempted_qc_active;
da917d69 1487 int preempted_nr_active_links;
60be6b9a 1488 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1489 unsigned long flags;
77853bf2 1490 unsigned int err_mask;
d95a717f 1491 int rc;
a2a7a662 1492
ba6a1308 1493 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1494
e3180499 1495 /* no internal command while frozen */
4cb7c6f1 1496 if (ata_port_is_frozen(ap)) {
ba6a1308 1497 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1498 return AC_ERR_SYSTEM;
1499 }
1500
2ab7db1f 1501 /* initialize internal qc */
28361c40 1502 qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
a2a7a662 1503
28361c40
JA
1504 qc->tag = ATA_TAG_INTERNAL;
1505 qc->hw_tag = 0;
2ab7db1f
TH
1506 qc->scsicmd = NULL;
1507 qc->ap = ap;
1508 qc->dev = dev;
1509 ata_qc_reinit(qc);
1510
9af5c9c9
TH
1511 preempted_tag = link->active_tag;
1512 preempted_sactive = link->sactive;
dedaf2b0 1513 preempted_qc_active = ap->qc_active;
da917d69 1514 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1515 link->active_tag = ATA_TAG_POISON;
1516 link->sactive = 0;
dedaf2b0 1517 ap->qc_active = 0;
da917d69 1518 ap->nr_active_links = 0;
2ab7db1f
TH
1519
1520 /* prepare & issue qc */
a2a7a662 1521 qc->tf = *tf;
d69cf37d
TH
1522 if (cdb)
1523 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e771451c
VP
1524
1525 /* some SATA bridges need us to indicate data xfer direction */
1526 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1527 dma_dir == DMA_FROM_DEVICE)
1528 qc->tf.feature |= ATAPI_DMADIR;
1529
e61e0672 1530 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1531 qc->dma_dir = dma_dir;
1532 if (dma_dir != DMA_NONE) {
2432697b 1533 unsigned int i, buflen = 0;
87260216 1534 struct scatterlist *sg;
2432697b 1535
87260216
JA
1536 for_each_sg(sgl, sg, n_elem, i)
1537 buflen += sg->length;
2432697b 1538
87260216 1539 ata_sg_init(qc, sgl, n_elem);
49c80429 1540 qc->nbytes = buflen;
a2a7a662
TH
1541 }
1542
77853bf2 1543 qc->private_data = &wait;
a2a7a662
TH
1544 qc->complete_fn = ata_qc_complete_internal;
1545
8e0e694a 1546 ata_qc_issue(qc);
a2a7a662 1547
ba6a1308 1548 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1549
87fbc5a0
TH
1550 if (!timeout) {
1551 if (ata_probe_timeout)
1552 timeout = ata_probe_timeout * 1000;
1553 else {
1554 timeout = ata_internal_cmd_timeout(dev, command);
1555 auto_timeout = 1;
1556 }
1557 }
2b789108 1558
c0c362b6
TH
1559 if (ap->ops->error_handler)
1560 ata_eh_release(ap);
1561
2b789108 1562 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f 1563
c0c362b6
TH
1564 if (ap->ops->error_handler)
1565 ata_eh_acquire(ap);
1566
c429137a 1567 ata_sff_flush_pio_task(ap);
41ade50c 1568
d95a717f 1569 if (!rc) {
ba6a1308 1570 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1571
1572 /* We're racing with irq here. If we lose, the
1573 * following test prevents us from completing the qc
d95a717f
TH
1574 * twice. If we win, the port is frozen and will be
1575 * cleaned up by ->post_internal_cmd().
a2a7a662 1576 */
77853bf2 1577 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1578 qc->err_mask |= AC_ERR_TIMEOUT;
1579
1580 if (ap->ops->error_handler)
1581 ata_port_freeze(ap);
1582 else
1583 ata_qc_complete(qc);
f15a1daf 1584
16169fb7
TH
1585 ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
1586 timeout, command);
a2a7a662
TH
1587 }
1588
ba6a1308 1589 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1590 }
1591
d95a717f
TH
1592 /* do post_internal_cmd */
1593 if (ap->ops->post_internal_cmd)
1594 ap->ops->post_internal_cmd(qc);
1595
a51d644a 1596 /* perform minimal error analysis */
87629312 1597 if (qc->flags & ATA_QCFLAG_EH) {
efcef265 1598 if (qc->result_tf.status & (ATA_ERR | ATA_DF))
a51d644a
TH
1599 qc->err_mask |= AC_ERR_DEV;
1600
1601 if (!qc->err_mask)
1602 qc->err_mask |= AC_ERR_OTHER;
1603
1604 if (qc->err_mask & ~AC_ERR_OTHER)
1605 qc->err_mask &= ~AC_ERR_OTHER;
2dae9955 1606 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
efcef265 1607 qc->result_tf.status |= ATA_SENSE;
d95a717f
TH
1608 }
1609
15869303 1610 /* finish up */
ba6a1308 1611 spin_lock_irqsave(ap->lock, flags);
15869303 1612
e61e0672 1613 *tf = qc->result_tf;
77853bf2
TH
1614 err_mask = qc->err_mask;
1615
1616 ata_qc_free(qc);
9af5c9c9
TH
1617 link->active_tag = preempted_tag;
1618 link->sactive = preempted_sactive;
dedaf2b0 1619 ap->qc_active = preempted_qc_active;
da917d69 1620 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1621
ba6a1308 1622 spin_unlock_irqrestore(ap->lock, flags);
15869303 1623
87fbc5a0
TH
1624 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1625 ata_internal_cmd_timed_out(dev, command);
1626
77853bf2 1627 return err_mask;
a2a7a662
TH
1628}
1629
2432697b 1630/**
33480a0e 1631 * ata_exec_internal - execute libata internal command
2432697b
TH
1632 * @dev: Device to which the command is sent
1633 * @tf: Taskfile registers for the command and the result
1634 * @cdb: CDB for packet command
e227867f 1635 * @dma_dir: Data transfer direction of the command
2432697b
TH
1636 * @buf: Data buffer of the command
1637 * @buflen: Length of data buffer
2b789108 1638 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1639 *
1640 * Wrapper around ata_exec_internal_sg() which takes simple
1641 * buffer instead of sg list.
1642 *
1643 * LOCKING:
1644 * None. Should be called with kernel context, might sleep.
1645 *
1646 * RETURNS:
1647 * Zero on success, AC_ERR_* mask on failure
1648 */
1649unsigned ata_exec_internal(struct ata_device *dev,
1650 struct ata_taskfile *tf, const u8 *cdb,
2b789108 1651 int dma_dir, void *buf, unsigned int buflen,
61176eed 1652 unsigned int timeout)
2432697b 1653{
33480a0e
TH
1654 struct scatterlist *psg = NULL, sg;
1655 unsigned int n_elem = 0;
2432697b 1656
33480a0e
TH
1657 if (dma_dir != DMA_NONE) {
1658 WARN_ON(!buf);
1659 sg_init_one(&sg, buf, buflen);
1660 psg = &sg;
1661 n_elem++;
1662 }
2432697b 1663
2b789108
TH
1664 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1665 timeout);
2432697b
TH
1666}
1667
1bc4ccff
AC
1668/**
1669 * ata_pio_need_iordy - check if iordy needed
1670 * @adev: ATA device
1671 *
1672 * Check if the current speed of the device requires IORDY. Used
1673 * by various controllers for chip configuration.
1674 */
1bc4ccff
AC
1675unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1676{
0d9e6659
TH
1677 /* Don't set IORDY if we're preparing for reset. IORDY may
1678 * lead to controller lock up on certain controllers if the
1679 * port is not occupied. See bko#11703 for details.
1680 */
1681 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1682 return 0;
1683 /* Controller doesn't support IORDY. Probably a pointless
1684 * check as the caller should know this.
1685 */
9af5c9c9 1686 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1687 return 0;
5c18c4d2
DD
1688 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1689 if (ata_id_is_cfa(adev->id)
1690 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1691 return 0;
432729f0
AC
1692 /* PIO3 and higher it is mandatory */
1693 if (adev->pio_mode > XFER_PIO_2)
1694 return 1;
1695 /* We turn it on when possible */
1696 if (ata_id_has_iordy(adev->id))
1bc4ccff 1697 return 1;
432729f0
AC
1698 return 0;
1699}
a52fbcfc 1700EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
2e9edbf8 1701
432729f0
AC
1702/**
1703 * ata_pio_mask_no_iordy - Return the non IORDY mask
1704 * @adev: ATA device
1705 *
1706 * Compute the highest mode possible if we are not using iordy. Return
1707 * -1 if no iordy mode is available.
1708 */
432729f0
AC
1709static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1710{
1bc4ccff 1711 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1712 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1713 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1714 /* Is the speed faster than the drive allows non IORDY ? */
1715 if (pio) {
1716 /* This is cycle times not frequency - watch the logic! */
1717 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1718 return 3 << ATA_SHIFT_PIO;
1719 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1720 }
1721 }
432729f0 1722 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1723}
1724
963e4975
AC
1725/**
1726 * ata_do_dev_read_id - default ID read method
1727 * @dev: device
1728 * @tf: proposed taskfile
1729 * @id: data buffer
1730 *
1731 * Issue the identify taskfile and hand back the buffer containing
1732 * identify data. For some RAID controllers and for pre ATA devices
1733 * this function is wrapped or replaced by the driver
1734 */
1735unsigned int ata_do_dev_read_id(struct ata_device *dev,
0561e514 1736 struct ata_taskfile *tf, __le16 *id)
963e4975
AC
1737{
1738 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1739 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1740}
a52fbcfc 1741EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
963e4975 1742
1da177e4 1743/**
49016aca 1744 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1745 * @dev: target device
1746 * @p_class: pointer to class of the target device (may be changed)
bff04647 1747 * @flags: ATA_READID_* flags
fe635c7e 1748 * @id: buffer to read IDENTIFY data into
1da177e4 1749 *
49016aca
TH
1750 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1751 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1752 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1753 * for pre-ATA4 drives.
1da177e4 1754 *
50a99018 1755 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1756 * now we abort if we hit that case.
50a99018 1757 *
1da177e4 1758 * LOCKING:
49016aca
TH
1759 * Kernel thread context (may sleep)
1760 *
1761 * RETURNS:
1762 * 0 on success, -errno otherwise.
1da177e4 1763 */
a9beec95 1764int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1765 unsigned int flags, u16 *id)
1da177e4 1766{
9af5c9c9 1767 struct ata_port *ap = dev->link->ap;
49016aca 1768 unsigned int class = *p_class;
a0123703 1769 struct ata_taskfile tf;
49016aca
TH
1770 unsigned int err_mask = 0;
1771 const char *reason;
79b42bab 1772 bool is_semb = class == ATA_DEV_SEMB;
54936f8b 1773 int may_fallback = 1, tried_spinup = 0;
49016aca 1774 int rc;
1da177e4 1775
963e4975 1776retry:
3373efd8 1777 ata_tf_init(dev, &tf);
a0123703 1778
49016aca 1779 switch (class) {
79b42bab
TH
1780 case ATA_DEV_SEMB:
1781 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
df561f66 1782 fallthrough;
49016aca 1783 case ATA_DEV_ATA:
9162c657 1784 case ATA_DEV_ZAC:
a0123703 1785 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1786 break;
1787 case ATA_DEV_ATAPI:
a0123703 1788 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1789 break;
1790 default:
1791 rc = -ENODEV;
1792 reason = "unsupported class";
1793 goto err_out;
1da177e4
LT
1794 }
1795
a0123703 1796 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1797
1798 /* Some devices choke if TF registers contain garbage. Make
1799 * sure those are properly initialized.
1800 */
1801 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1802
1803 /* Device presence detection is unreliable on some
1804 * controllers. Always poll IDENTIFY if available.
1805 */
1806 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1807
963e4975 1808 if (ap->ops->read_id)
0561e514 1809 err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
963e4975 1810 else
0561e514 1811 err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
963e4975 1812
a0123703 1813 if (err_mask) {
800b3996 1814 if (err_mask & AC_ERR_NODEV_HINT) {
a9a79dfe 1815 ata_dev_dbg(dev, "NODEV after polling detection\n");
55a8e2c8
TH
1816 return -ENOENT;
1817 }
1818
79b42bab 1819 if (is_semb) {
a9a79dfe
JP
1820 ata_dev_info(dev,
1821 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
79b42bab
TH
1822 /* SEMB is not supported yet */
1823 *p_class = ATA_DEV_SEMB_UNSUP;
1824 return 0;
1825 }
1826
efcef265 1827 if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
1ffc151f
TH
1828 /* Device or controller might have reported
1829 * the wrong device class. Give a shot at the
1830 * other IDENTIFY if the current one is
1831 * aborted by the device.
1832 */
1833 if (may_fallback) {
1834 may_fallback = 0;
1835
1836 if (class == ATA_DEV_ATA)
1837 class = ATA_DEV_ATAPI;
1838 else
1839 class = ATA_DEV_ATA;
1840 goto retry;
1841 }
1842
1843 /* Control reaches here iff the device aborted
1844 * both flavors of IDENTIFYs which happens
1845 * sometimes with phantom devices.
1846 */
a9a79dfe
JP
1847 ata_dev_dbg(dev,
1848 "both IDENTIFYs aborted, assuming NODEV\n");
1ffc151f 1849 return -ENOENT;
54936f8b
TH
1850 }
1851
49016aca
TH
1852 rc = -EIO;
1853 reason = "I/O error";
1da177e4
LT
1854 goto err_out;
1855 }
1856
43c9c591 1857 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
4baa5745 1858 ata_dev_info(dev, "dumping IDENTIFY data, "
a9a79dfe
JP
1859 "class=%d may_fallback=%d tried_spinup=%d\n",
1860 class, may_fallback, tried_spinup);
4baa5745 1861 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
43c9c591
TH
1862 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1863 }
1864
54936f8b
TH
1865 /* Falling back doesn't make sense if ID data was read
1866 * successfully at least once.
1867 */
1868 may_fallback = 0;
1869
49016aca 1870 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1871
49016aca 1872 /* sanity check */
a4f5749b 1873 rc = -EINVAL;
6070068b 1874 reason = "device reports invalid type";
a4f5749b 1875
9162c657 1876 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
a4f5749b
TH
1877 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1878 goto err_out;
db63a4c8
AW
1879 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1880 ata_id_is_ata(id)) {
1881 ata_dev_dbg(dev,
1882 "host indicates ignore ATA devices, ignored\n");
1883 return -ENOENT;
1884 }
a4f5749b
TH
1885 } else {
1886 if (ata_id_is_ata(id))
1887 goto err_out;
49016aca
TH
1888 }
1889
169439c2
ML
1890 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1891 tried_spinup = 1;
1892 /*
1893 * Drive powered-up in standby mode, and requires a specific
1894 * SET_FEATURES spin-up subcommand before it will accept
1895 * anything other than the original IDENTIFY command.
1896 */
218f3d30 1897 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 1898 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1899 rc = -EIO;
1900 reason = "SPINUP failed";
1901 goto err_out;
1902 }
1903 /*
1904 * If the drive initially returned incomplete IDENTIFY info,
1905 * we now must reissue the IDENTIFY command.
1906 */
1907 if (id[2] == 0x37c8)
1908 goto retry;
1909 }
1910
9162c657
HR
1911 if ((flags & ATA_READID_POSTRESET) &&
1912 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
49016aca
TH
1913 /*
1914 * The exact sequence expected by certain pre-ATA4 drives is:
1915 * SRST RESET
50a99018
AC
1916 * IDENTIFY (optional in early ATA)
1917 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1918 * anything else..
1919 * Some drives were very specific about that exact sequence.
50a99018
AC
1920 *
1921 * Note that ATA4 says lba is mandatory so the second check
c9404c9c 1922 * should never trigger.
49016aca
TH
1923 */
1924 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1925 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1926 if (err_mask) {
1927 rc = -EIO;
1928 reason = "INIT_DEV_PARAMS failed";
1929 goto err_out;
1930 }
1931
1932 /* current CHS translation info (id[53-58]) might be
1933 * changed. reread the identify device info.
1934 */
bff04647 1935 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1936 goto retry;
1937 }
1938 }
1939
1940 *p_class = class;
fe635c7e 1941
49016aca
TH
1942 return 0;
1943
1944 err_out:
16d42467
HR
1945 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
1946 reason, err_mask);
49016aca
TH
1947 return rc;
1948}
1949
f01f62c2
CH
1950/**
1951 * ata_read_log_page - read a specific log page
1952 * @dev: target device
1953 * @log: log to read
1954 * @page: page to read
1955 * @buf: buffer to store read page
1956 * @sectors: number of sectors to read
1957 *
1958 * Read log page using READ_LOG_EXT command.
1959 *
1960 * LOCKING:
1961 * Kernel thread context (may sleep).
1962 *
1963 * RETURNS:
1964 * 0 on success, AC_ERR_* mask otherwise.
1965 */
1966unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1967 u8 page, void *buf, unsigned int sectors)
1968{
1969 unsigned long ap_flags = dev->link->ap->flags;
1970 struct ata_taskfile tf;
1971 unsigned int err_mask;
1972 bool dma = false;
1973
4633778b 1974 ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
f01f62c2
CH
1975
1976 /*
1977 * Return error without actually issuing the command on controllers
1978 * which e.g. lockup on a read log page.
1979 */
1980 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
1981 return AC_ERR_DEV;
1982
1983retry:
1984 ata_tf_init(dev, &tf);
f971a854 1985 if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
7cfdfdc8 1986 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
f01f62c2
CH
1987 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
1988 tf.protocol = ATA_PROT_DMA;
1989 dma = true;
1990 } else {
1991 tf.command = ATA_CMD_READ_LOG_EXT;
1992 tf.protocol = ATA_PROT_PIO;
1993 dma = false;
1994 }
1995 tf.lbal = log;
1996 tf.lbam = page;
1997 tf.nsect = sectors;
1998 tf.hob_nsect = sectors >> 8;
1999 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2000
2001 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2002 buf, sectors * ATA_SECT_SIZE, 0);
2003
fc5c8aa7
DLM
2004 if (err_mask) {
2005 if (dma) {
2006 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
5122e53e
NC
2007 if (!ata_port_is_frozen(dev->link->ap))
2008 goto retry;
fc5c8aa7 2009 }
23ef63d5
DLM
2010 ata_dev_err(dev,
2011 "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
2012 (unsigned int)log, (unsigned int)page, err_mask);
f01f62c2
CH
2013 }
2014
f01f62c2
CH
2015 return err_mask;
2016}
2017
c745dfc5 2018static int ata_log_supported(struct ata_device *dev, u8 log)
efe205a3
CH
2019{
2020 struct ata_port *ap = dev->link->ap;
2021
ac9f0c81 2022 if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
c745dfc5 2023 return 0;
ac9f0c81 2024
efe205a3 2025 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
c745dfc5
TE
2026 return 0;
2027 return get_unaligned_le16(&ap->sector_buf[log * 2]);
efe205a3
CH
2028}
2029
a0fd2454
CH
2030static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2031{
2032 struct ata_port *ap = dev->link->ap;
2033 unsigned int err, i;
2034
636f6e2a
DLM
2035 if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
2036 return false;
2037
a0fd2454 2038 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
636f6e2a
DLM
2039 /*
2040 * IDENTIFY DEVICE data log is defined as mandatory starting
2041 * with ACS-3 (ATA version 10). Warn about the missing log
2042 * for drives which implement this ATA level or above.
2043 */
2044 if (ata_id_major_version(dev->id) >= 10)
2045 ata_dev_warn(dev,
2046 "ATA Identify Device Log not supported\n");
2047 dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
a0fd2454
CH
2048 return false;
2049 }
2050
2051 /*
2052 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2053 * supported.
2054 */
2055 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2056 1);
fc5c8aa7 2057 if (err)
a0fd2454 2058 return false;
a0fd2454
CH
2059
2060 for (i = 0; i < ap->sector_buf[8]; i++) {
2061 if (ap->sector_buf[9 + i] == page)
2062 return true;
2063 }
2064
2065 return false;
2066}
2067
9062712f
TH
2068static int ata_do_link_spd_horkage(struct ata_device *dev)
2069{
2070 struct ata_link *plink = ata_dev_phys_link(dev);
2071 u32 target, target_limit;
2072
2073 if (!sata_scr_valid(plink))
2074 return 0;
2075
2076 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2077 target = 1;
2078 else
2079 return 0;
2080
2081 target_limit = (1 << target) - 1;
2082
2083 /* if already on stricter limit, no need to push further */
2084 if (plink->sata_spd_limit <= target_limit)
2085 return 0;
2086
2087 plink->sata_spd_limit = target_limit;
2088
2089 /* Request another EH round by returning -EAGAIN if link is
2090 * going faster than the target speed. Forward progress is
2091 * guaranteed by setting sata_spd_limit to target_limit above.
2092 */
2093 if (plink->sata_spd > target) {
a9a79dfe
JP
2094 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2095 sata_spd_string(target));
9062712f
TH
2096 return -EAGAIN;
2097 }
2098 return 0;
2099}
2100
3373efd8 2101static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2102{
9af5c9c9 2103 struct ata_port *ap = dev->link->ap;
9ce8e307
JA
2104
2105 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2106 return 0;
2107
9af5c9c9 2108 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2109}
2110
5a233551
HR
2111static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2112{
2113 struct ata_port *ap = dev->link->ap;
2114 unsigned int err_mask;
2115
efe205a3
CH
2116 if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2117 ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
fe5af0cc
HR
2118 return;
2119 }
5a233551
HR
2120 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2121 0, ap->sector_buf, 1);
fc5c8aa7 2122 if (!err_mask) {
5a233551
HR
2123 u8 *cmds = dev->ncq_send_recv_cmds;
2124
2125 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2126 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2127
2128 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2129 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2130 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2131 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2132 }
2133 }
2134}
2135
284b3b77
HR
2136static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2137{
2138 struct ata_port *ap = dev->link->ap;
2139 unsigned int err_mask;
284b3b77 2140
efe205a3 2141 if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
284b3b77
HR
2142 ata_dev_warn(dev,
2143 "NCQ Send/Recv Log not supported\n");
2144 return;
2145 }
2146 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2147 0, ap->sector_buf, 1);
fc5c8aa7 2148 if (!err_mask) {
284b3b77
HR
2149 u8 *cmds = dev->ncq_non_data_cmds;
2150
2151 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2152 }
2153}
2154
8e061784
AM
2155static void ata_dev_config_ncq_prio(struct ata_device *dev)
2156{
2157 struct ata_port *ap = dev->link->ap;
2158 unsigned int err_mask;
2159
06f6c4c6
DLM
2160 if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2161 return;
2162
8e061784 2163 err_mask = ata_read_log_page(dev,
1d51d5f3 2164 ATA_LOG_IDENTIFY_DEVICE,
8e061784
AM
2165 ATA_LOG_SATA_SETTINGS,
2166 ap->sector_buf,
2167 1);
fc5c8aa7 2168 if (err_mask)
2360fa18 2169 goto not_supported;
8e061784 2170
2360fa18
DLM
2171 if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
2172 goto not_supported;
2173
2174 dev->flags |= ATA_DFLAG_NCQ_PRIO;
2175
2176 return;
8e061784 2177
2360fa18 2178not_supported:
e00923c5 2179 dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED;
2360fa18 2180 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
8e061784
AM
2181}
2182
7a8526a5
KH
2183static bool ata_dev_check_adapter(struct ata_device *dev,
2184 unsigned short vendor_id)
2185{
2186 struct pci_dev *pcidev = NULL;
2187 struct device *parent_dev = NULL;
2188
2189 for (parent_dev = dev->tdev.parent; parent_dev != NULL;
2190 parent_dev = parent_dev->parent) {
2191 if (dev_is_pci(parent_dev)) {
2192 pcidev = to_pci_dev(parent_dev);
2193 if (pcidev->vendor == vendor_id)
2194 return true;
2195 break;
2196 }
2197 }
2198
2199 return false;
2200}
2201
388539f3 2202static int ata_dev_config_ncq(struct ata_device *dev,
a6e6ce8e
TH
2203 char *desc, size_t desc_sz)
2204{
9af5c9c9 2205 struct ata_port *ap = dev->link->ap;
a6e6ce8e 2206 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
388539f3
SL
2207 unsigned int err_mask;
2208 char *aa_desc = "";
a6e6ce8e
TH
2209
2210 if (!ata_id_has_ncq(dev->id)) {
2211 desc[0] = '\0';
388539f3 2212 return 0;
a6e6ce8e 2213 }
cba97ea1
BZ
2214 if (!IS_ENABLED(CONFIG_SATA_HOST))
2215 return 0;
75683fe7 2216 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6 2217 snprintf(desc, desc_sz, "NCQ (not used)");
388539f3 2218 return 0;
6919a0a6 2219 }
7a8526a5
KH
2220
2221 if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
2222 ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
2223 snprintf(desc, desc_sz, "NCQ (not used)");
2224 return 0;
2225 }
2226
a6e6ce8e 2227 if (ap->flags & ATA_FLAG_NCQ) {
69278f79 2228 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
a6e6ce8e
TH
2229 dev->flags |= ATA_DFLAG_NCQ;
2230 }
2231
388539f3
SL
2232 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2233 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2234 ata_id_has_fpdma_aa(dev->id)) {
2235 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2236 SATA_FPDMA_AA);
2237 if (err_mask) {
a9a79dfe
JP
2238 ata_dev_err(dev,
2239 "failed to enable AA (error_mask=0x%x)\n",
2240 err_mask);
388539f3
SL
2241 if (err_mask != AC_ERR_DEV) {
2242 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2243 return -EIO;
2244 }
2245 } else
2246 aa_desc = ", AA";
2247 }
2248
a6e6ce8e 2249 if (hdepth >= ddepth)
388539f3 2250 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
a6e6ce8e 2251 else
388539f3
SL
2252 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2253 ddepth, aa_desc);
ed36911c 2254
284b3b77
HR
2255 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2256 if (ata_id_has_ncq_send_and_recv(dev->id))
2257 ata_dev_config_ncq_send_recv(dev);
2258 if (ata_id_has_ncq_non_data(dev->id))
2259 ata_dev_config_ncq_non_data(dev);
8e061784
AM
2260 if (ata_id_has_ncq_prio(dev->id))
2261 ata_dev_config_ncq_prio(dev);
284b3b77 2262 }
f78dea06 2263
388539f3 2264 return 0;
a6e6ce8e 2265}
f78dea06 2266
e87fd28c
HR
2267static void ata_dev_config_sense_reporting(struct ata_device *dev)
2268{
2269 unsigned int err_mask;
2270
2271 if (!ata_id_has_sense_reporting(dev->id))
2272 return;
2273
2274 if (ata_id_sense_reporting_enabled(dev->id))
2275 return;
2276
2277 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2278 if (err_mask) {
2279 ata_dev_dbg(dev,
2280 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2281 err_mask);
2282 }
2283}
2284
6d1003ae
HR
2285static void ata_dev_config_zac(struct ata_device *dev)
2286{
2287 struct ata_port *ap = dev->link->ap;
2288 unsigned int err_mask;
2289 u8 *identify_buf = ap->sector_buf;
6d1003ae
HR
2290
2291 dev->zac_zones_optimal_open = U32_MAX;
2292 dev->zac_zones_optimal_nonseq = U32_MAX;
2293 dev->zac_zones_max_open = U32_MAX;
2294
2295 /*
2296 * Always set the 'ZAC' flag for Host-managed devices.
2297 */
2298 if (dev->class == ATA_DEV_ZAC)
2299 dev->flags |= ATA_DFLAG_ZAC;
2300 else if (ata_id_zoned_cap(dev->id) == 0x01)
2301 /*
2302 * Check for host-aware devices.
2303 */
2304 dev->flags |= ATA_DFLAG_ZAC;
2305
2306 if (!(dev->flags & ATA_DFLAG_ZAC))
2307 return;
2308
a0fd2454 2309 if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
6d1003ae
HR
2310 ata_dev_warn(dev,
2311 "ATA Zoned Information Log not supported\n");
2312 return;
2313 }
ed36911c 2314
6d1003ae
HR
2315 /*
2316 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2317 */
1d51d5f3 2318 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
6d1003ae
HR
2319 ATA_LOG_ZONED_INFORMATION,
2320 identify_buf, 1);
2321 if (!err_mask) {
2322 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2323
2324 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2325 if ((zoned_cap >> 63))
2326 dev->zac_zoned_cap = (zoned_cap & 1);
2327 opt_open = get_unaligned_le64(&identify_buf[24]);
2328 if ((opt_open >> 63))
2329 dev->zac_zones_optimal_open = (u32)opt_open;
2330 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2331 if ((opt_nonseq >> 63))
2332 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2333 max_open = get_unaligned_le64(&identify_buf[40]);
2334 if ((max_open >> 63))
2335 dev->zac_zones_max_open = (u32)max_open;
2336 }
a6e6ce8e
TH
2337}
2338
818831c8
CH
2339static void ata_dev_config_trusted(struct ata_device *dev)
2340{
2341 struct ata_port *ap = dev->link->ap;
2342 u64 trusted_cap;
2343 unsigned int err;
2344
e8f11db9
CH
2345 if (!ata_id_has_trusted(dev->id))
2346 return;
2347
818831c8
CH
2348 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2349 ata_dev_warn(dev,
2350 "Security Log not supported\n");
2351 return;
2352 }
2353
2354 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2355 ap->sector_buf, 1);
fc5c8aa7 2356 if (err)
818831c8 2357 return;
818831c8
CH
2358
2359 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2360 if (!(trusted_cap & (1ULL << 63))) {
2361 ata_dev_dbg(dev,
2362 "Trusted Computing capability qword not valid!\n");
2363 return;
2364 }
2365
2366 if (trusted_cap & (1 << 0))
2367 dev->flags |= ATA_DFLAG_TRUSTED;
2368}
2369
62e4a60e
DLM
2370static void ata_dev_config_cdl(struct ata_device *dev)
2371{
2372 struct ata_port *ap = dev->link->ap;
2373 unsigned int err_mask;
2374 u64 val;
2375
2376 if (ata_id_major_version(dev->id) < 12)
2377 goto not_supported;
2378
2379 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
2380 !ata_identify_page_supported(dev, ATA_LOG_SUPPORTED_CAPABILITIES))
2381 goto not_supported;
2382
2383 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2384 ATA_LOG_SUPPORTED_CAPABILITIES,
2385 ap->sector_buf, 1);
2386 if (err_mask)
2387 goto not_supported;
2388
2389 /* Check Command Duration Limit Supported bits */
2390 val = get_unaligned_le64(&ap->sector_buf[168]);
2391 if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(0)))
2392 goto not_supported;
2393
2394 /* Warn the user if command duration guideline is not supported */
2395 if (!(val & BIT_ULL(1)))
2396 ata_dev_warn(dev,
2397 "Command duration guideline is not supported\n");
2398
2399 /*
2400 * Command duration limits is supported: cache the CDL log page 18h
2401 * (command duration descriptors).
2402 */
2403 err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, ap->sector_buf, 1);
2404 if (err_mask) {
2405 ata_dev_warn(dev, "Read Command Duration Limits log failed\n");
2406 goto not_supported;
2407 }
2408
2409 memcpy(dev->cdl, ap->sector_buf, ATA_LOG_CDL_SIZE);
2410 dev->flags |= ATA_DFLAG_CDL;
2411
2412 return;
2413
2414not_supported:
2415 dev->flags &= ~ATA_DFLAG_CDL;
2416}
2417
891fd7c6
DLM
2418static int ata_dev_config_lba(struct ata_device *dev)
2419{
891fd7c6
DLM
2420 const u16 *id = dev->id;
2421 const char *lba_desc;
2422 char ncq_desc[24];
2423 int ret;
2424
2425 dev->flags |= ATA_DFLAG_LBA;
2426
2427 if (ata_id_has_lba48(id)) {
2428 lba_desc = "LBA48";
2429 dev->flags |= ATA_DFLAG_LBA48;
2430 if (dev->n_sectors >= (1UL << 28) &&
2431 ata_id_has_flush_ext(id))
2432 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2433 } else {
2434 lba_desc = "LBA";
2435 }
2436
2437 /* config NCQ */
2438 ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2439
2440 /* print device info to dmesg */
1c95a27c 2441 if (ata_dev_print_info(dev))
891fd7c6
DLM
2442 ata_dev_info(dev,
2443 "%llu sectors, multi %u: %s %s\n",
2444 (unsigned long long)dev->n_sectors,
2445 dev->multi_count, lba_desc, ncq_desc);
2446
2447 return ret;
2448}
2449
2450static void ata_dev_config_chs(struct ata_device *dev)
2451{
891fd7c6
DLM
2452 const u16 *id = dev->id;
2453
2454 if (ata_id_current_chs_valid(id)) {
2455 /* Current CHS translation is valid. */
2456 dev->cylinders = id[54];
2457 dev->heads = id[55];
2458 dev->sectors = id[56];
2459 } else {
2460 /* Default translation */
2461 dev->cylinders = id[1];
2462 dev->heads = id[3];
2463 dev->sectors = id[6];
2464 }
2465
2466 /* print device info to dmesg */
1c95a27c 2467 if (ata_dev_print_info(dev))
891fd7c6
DLM
2468 ata_dev_info(dev,
2469 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2470 (unsigned long long)dev->n_sectors,
2471 dev->multi_count, dev->cylinders,
2472 dev->heads, dev->sectors);
2473}
2474
4d2e4980
DLM
2475static void ata_dev_config_fua(struct ata_device *dev)
2476{
2477 /* Ignore FUA support if its use is disabled globally */
2478 if (!libata_fua)
2479 goto nofua;
2480
2481 /* Ignore devices without support for WRITE DMA FUA EXT */
2482 if (!(dev->flags & ATA_DFLAG_LBA48) || !ata_id_has_fua(dev->id))
2483 goto nofua;
2484
2485 /* Ignore known bad devices and devices that lack NCQ support */
2486 if (!ata_ncq_supported(dev) || (dev->horkage & ATA_HORKAGE_NO_FUA))
2487 goto nofua;
2488
2489 dev->flags |= ATA_DFLAG_FUA;
2490
2491 return;
2492
2493nofua:
2494 dev->flags &= ~ATA_DFLAG_FUA;
2495}
2496
d8d8778c
DLM
2497static void ata_dev_config_devslp(struct ata_device *dev)
2498{
2499 u8 *sata_setting = dev->link->ap->sector_buf;
2500 unsigned int err_mask;
2501 int i, j;
2502
2503 /*
2504 * Check device sleep capability. Get DevSlp timing variables
2505 * from SATA Settings page of Identify Device Data Log.
2506 */
06f6c4c6
DLM
2507 if (!ata_id_has_devslp(dev->id) ||
2508 !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
d8d8778c
DLM
2509 return;
2510
2511 err_mask = ata_read_log_page(dev,
2512 ATA_LOG_IDENTIFY_DEVICE,
2513 ATA_LOG_SATA_SETTINGS,
2514 sata_setting, 1);
fc5c8aa7 2515 if (err_mask)
d8d8778c 2516 return;
d8d8778c
DLM
2517
2518 dev->flags |= ATA_DFLAG_DEVSLP;
2519 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2520 j = ATA_LOG_DEVSLP_OFFSET + i;
2521 dev->devslp_timing[i] = sata_setting[j];
2522 }
2523}
2524
fe22e1c2
DLM
2525static void ata_dev_config_cpr(struct ata_device *dev)
2526{
2527 unsigned int err_mask;
2528 size_t buf_len;
2529 int i, nr_cpr = 0;
2530 struct ata_cpr_log *cpr_log = NULL;
2531 u8 *desc, *buf = NULL;
2532
c745dfc5
TE
2533 if (ata_id_major_version(dev->id) < 11)
2534 goto out;
2535
2536 buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
2537 if (buf_len == 0)
fe22e1c2
DLM
2538 goto out;
2539
2540 /*
fda17afc 2541 * Read the concurrent positioning ranges log (0x47). We can have at
c745dfc5
TE
2542 * most 255 32B range descriptors plus a 64B header. This log varies in
2543 * size, so use the size reported in the GPL directory. Reading beyond
2544 * the supported length will result in an error.
fe22e1c2 2545 */
c745dfc5 2546 buf_len <<= 9;
fe22e1c2
DLM
2547 buf = kzalloc(buf_len, GFP_KERNEL);
2548 if (!buf)
2549 goto out;
2550
fda17afc
DLM
2551 err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
2552 0, buf, buf_len >> 9);
fe22e1c2
DLM
2553 if (err_mask)
2554 goto out;
2555
2556 nr_cpr = buf[0];
2557 if (!nr_cpr)
2558 goto out;
2559
2560 cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
2561 if (!cpr_log)
2562 goto out;
2563
2564 cpr_log->nr_cpr = nr_cpr;
2565 desc = &buf[64];
2566 for (i = 0; i < nr_cpr; i++, desc += 32) {
2567 cpr_log->cpr[i].num = desc[0];
2568 cpr_log->cpr[i].num_storage_elements = desc[1];
2569 cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
2570 cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
2571 }
2572
2573out:
2574 swap(dev->cpr_log, cpr_log);
2575 kfree(cpr_log);
2576 kfree(buf);
2577}
2578
d633b8a7
DLM
2579static void ata_dev_print_features(struct ata_device *dev)
2580{
2581 if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
2582 return;
2583
2584 ata_dev_info(dev,
62e4a60e 2585 "Features:%s%s%s%s%s%s%s%s\n",
4d2e4980 2586 dev->flags & ATA_DFLAG_FUA ? " FUA" : "",
d633b8a7
DLM
2587 dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
2588 dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
2589 dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
2590 dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
fe22e1c2 2591 dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
62e4a60e 2592 dev->flags & ATA_DFLAG_CDL ? " CDL" : "",
fe22e1c2 2593 dev->cpr_log ? " CPR" : "");
d633b8a7
DLM
2594}
2595
49016aca 2596/**
ffeae418 2597 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2598 * @dev: Target device to configure
2599 *
2600 * Configure @dev according to @dev->id. Generic and low-level
2601 * driver specific fixups are also applied.
49016aca
TH
2602 *
2603 * LOCKING:
ffeae418
TH
2604 * Kernel thread context (may sleep)
2605 *
2606 * RETURNS:
2607 * 0 on success, -errno otherwise
49016aca 2608 */
efdaedc4 2609int ata_dev_configure(struct ata_device *dev)
49016aca 2610{
9af5c9c9 2611 struct ata_port *ap = dev->link->ap;
891fd7c6 2612 bool print_info = ata_dev_print_info(dev);
1148c3a7 2613 const u16 *id = dev->id;
f0a6d77b 2614 unsigned int xfer_mask;
65fe1f0f 2615 unsigned int err_mask;
b352e57d 2616 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2617 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2618 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2619 int rc;
49016aca 2620
96c810f2
HR
2621 if (!ata_dev_enabled(dev)) {
2622 ata_dev_dbg(dev, "no device\n");
ffeae418 2623 return 0;
49016aca
TH
2624 }
2625
75683fe7
TH
2626 /* set horkage */
2627 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2628 ata_force_horkage(dev);
75683fe7 2629
50af2fa1 2630 if (dev->horkage & ATA_HORKAGE_DISABLE) {
a9a79dfe 2631 ata_dev_info(dev, "unsupported device, disabling\n");
50af2fa1
TH
2632 ata_dev_disable(dev);
2633 return 0;
2634 }
2635
2486fa56
TH
2636 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2637 dev->class == ATA_DEV_ATAPI) {
a9a79dfe
JP
2638 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2639 atapi_enabled ? "not supported with this driver"
2640 : "disabled");
2486fa56
TH
2641 ata_dev_disable(dev);
2642 return 0;
2643 }
2644
9062712f
TH
2645 rc = ata_do_link_spd_horkage(dev);
2646 if (rc)
2647 return rc;
2648
ecd75ad5
TH
2649 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2650 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2651 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2652 dev->horkage |= ATA_HORKAGE_NOLPM;
2653
240630e6
HG
2654 if (ap->flags & ATA_FLAG_NO_LPM)
2655 dev->horkage |= ATA_HORKAGE_NOLPM;
2656
ecd75ad5
TH
2657 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2658 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2659 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2660 }
2661
6746544c
TH
2662 /* let ACPI work its magic */
2663 rc = ata_acpi_on_devcfg(dev);
2664 if (rc)
2665 return rc;
08573a86 2666
05027adc
TH
2667 /* massage HPA, do it early as it might change IDENTIFY data */
2668 rc = ata_hpa_resize(dev);
2669 if (rc)
2670 return rc;
2671
c39f5ebe 2672 /* print device capabilities */
17a1e1be
HR
2673 ata_dev_dbg(dev,
2674 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2675 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2676 __func__,
2677 id[49], id[82], id[83], id[84],
2678 id[85], id[86], id[87], id[88]);
c39f5ebe 2679
208a9933 2680 /* initialize to-be-configured parameters */
ea1dd4e1 2681 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2682 dev->max_sectors = 0;
2683 dev->cdb_len = 0;
2684 dev->n_sectors = 0;
2685 dev->cylinders = 0;
2686 dev->heads = 0;
2687 dev->sectors = 0;
e18086d6 2688 dev->multi_count = 0;
208a9933 2689
1da177e4
LT
2690 /*
2691 * common ATA, ATAPI feature tests
2692 */
2693
ff8854b2 2694 /* find max transfer mode; for printk only */
1148c3a7 2695 xfer_mask = ata_id_xfermask(id);
1da177e4 2696
6044f3c4 2697 ata_dump_id(dev, id);
1da177e4 2698
ef143d57
AL
2699 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2700 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2701 sizeof(fwrevbuf));
2702
2703 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2704 sizeof(modelbuf));
2705
1da177e4 2706 /* ATA-specific feature tests */
9162c657 2707 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
b352e57d 2708 if (ata_id_is_cfa(id)) {
62afe5d7
SS
2709 /* CPRM may make this media unusable */
2710 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
a9a79dfe
JP
2711 ata_dev_warn(dev,
2712 "supports DRM functions and may not be fully accessible\n");
b352e57d 2713 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2714 } else {
2dcb407e 2715 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2716 /* Warn the user if the device has TPM extensions */
2717 if (ata_id_has_tpm(id))
a9a79dfe
JP
2718 ata_dev_warn(dev,
2719 "supports DRM functions and may not be fully accessible\n");
ae8d4ee7 2720 }
b352e57d 2721
1148c3a7 2722 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2723
e18086d6
ML
2724 /* get current R/W Multiple count setting */
2725 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2726 unsigned int max = dev->id[47] & 0xff;
2727 unsigned int cnt = dev->id[59] & 0xff;
2728 /* only recognize/allow powers of two here */
2729 if (is_power_of_2(max) && is_power_of_2(cnt))
2730 if (cnt <= max)
2731 dev->multi_count = cnt;
2732 }
3f64f565 2733
891fd7c6 2734 /* print device info to dmesg */
1c95a27c 2735 if (print_info)
891fd7c6
DLM
2736 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2737 revbuf, modelbuf, fwrevbuf,
2738 ata_mode_string(xfer_mask));
8bf62ece 2739
891fd7c6
DLM
2740 if (ata_id_has_lba(id)) {
2741 rc = ata_dev_config_lba(dev);
388539f3
SL
2742 if (rc)
2743 return rc;
ffeae418 2744 } else {
891fd7c6 2745 ata_dev_config_chs(dev);
07f6f7d0
AL
2746 }
2747
4d2e4980 2748 ata_dev_config_fua(dev);
d8d8778c 2749 ata_dev_config_devslp(dev);
e87fd28c 2750 ata_dev_config_sense_reporting(dev);
6d1003ae 2751 ata_dev_config_zac(dev);
818831c8 2752 ata_dev_config_trusted(dev);
fe22e1c2 2753 ata_dev_config_cpr(dev);
62e4a60e 2754 ata_dev_config_cdl(dev);
b1ffbf85 2755 dev->cdb_len = 32;
d633b8a7 2756
1c95a27c 2757 if (print_info)
d633b8a7 2758 ata_dev_print_features(dev);
1da177e4
LT
2759 }
2760
2761 /* ATAPI-specific feature tests */
2c13b7ce 2762 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2763 const char *cdb_intr_string = "";
2764 const char *atapi_an_string = "";
91163006 2765 const char *dma_dir_string = "";
7d77b247 2766 u32 sntf;
08a556db 2767
1148c3a7 2768 rc = atapi_cdb_len(id);
1da177e4 2769 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
16d42467 2770 ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
ffeae418 2771 rc = -EINVAL;
1da177e4
LT
2772 goto err_out_nosup;
2773 }
6e7846e9 2774 dev->cdb_len = (unsigned int) rc;
1da177e4 2775
7d77b247
TH
2776 /* Enable ATAPI AN if both the host and device have
2777 * the support. If PMP is attached, SNTF is required
2778 * to enable ATAPI AN to discern between PHY status
2779 * changed notifications and ATAPI ANs.
9f45cbd3 2780 */
e7ecd435
TH
2781 if (atapi_an &&
2782 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
071f44b1 2783 (!sata_pmp_attached(ap) ||
7d77b247 2784 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
9f45cbd3 2785 /* issue SET feature command to turn this on */
218f3d30
JG
2786 err_mask = ata_dev_set_feature(dev,
2787 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2788 if (err_mask)
a9a79dfe
JP
2789 ata_dev_err(dev,
2790 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2791 err_mask);
854c73a2 2792 else {
9f45cbd3 2793 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2794 atapi_an_string = ", ATAPI AN";
2795 }
9f45cbd3
KCA
2796 }
2797
08a556db 2798 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2799 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2800 cdb_intr_string = ", CDB intr";
2801 }
312f7da2 2802
966fbe19 2803 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
91163006
TH
2804 dev->flags |= ATA_DFLAG_DMADIR;
2805 dma_dir_string = ", DMADIR";
2806 }
2807
afe75951 2808 if (ata_id_has_da(dev->id)) {
b1354cbb 2809 dev->flags |= ATA_DFLAG_DA;
afe75951
AL
2810 zpodd_init(dev);
2811 }
b1354cbb 2812
1da177e4 2813 /* print device info to dmesg */
1c95a27c 2814 if (print_info)
a9a79dfe
JP
2815 ata_dev_info(dev,
2816 "ATAPI: %s, %s, max %s%s%s%s\n",
2817 modelbuf, fwrevbuf,
2818 ata_mode_string(xfer_mask),
2819 cdb_intr_string, atapi_an_string,
2820 dma_dir_string);
1da177e4
LT
2821 }
2822
914ed354
TH
2823 /* determine max_sectors */
2824 dev->max_sectors = ATA_MAX_SECTORS;
2825 if (dev->flags & ATA_DFLAG_LBA48)
2826 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2827
c5038fc0
AC
2828 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2829 200 sectors */
3373efd8 2830 if (ata_dev_knobble(dev)) {
1c95a27c 2831 if (print_info)
a9a79dfe 2832 ata_dev_info(dev, "applying bridge limits\n");
5a529139 2833 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2834 dev->max_sectors = ATA_MAX_SECTORS;
2835 }
2836
f8d8e579 2837 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2838 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2839 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2840 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2841 }
f8d8e579 2842
75683fe7 2843 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2844 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2845 dev->max_sectors);
18d6e9d5 2846
af34d637
DM
2847 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2848 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2849 dev->max_sectors);
2850
a32450e1
SH
2851 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2852 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2853
4b2f3ede 2854 if (ap->ops->dev_config)
cd0d3bbc 2855 ap->ops->dev_config(dev);
4b2f3ede 2856
c5038fc0
AC
2857 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2858 /* Let the user know. We don't want to disallow opens for
2859 rescue purposes, or in case the vendor is just a blithering
2860 idiot. Do this after the dev_config call as some controllers
2861 with buggy firmware may want to avoid reporting false device
2862 bugs */
2863
2864 if (print_info) {
a9a79dfe 2865 ata_dev_warn(dev,
c5038fc0 2866"Drive reports diagnostics failure. This may indicate a drive\n");
a9a79dfe 2867 ata_dev_warn(dev,
c5038fc0
AC
2868"fault or invalid emulation. Contact drive vendor for information.\n");
2869 }
2870 }
2871
ac70a964 2872 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
a9a79dfe
JP
2873 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2874 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
ac70a964
TH
2875 }
2876
ffeae418 2877 return 0;
1da177e4
LT
2878
2879err_out_nosup:
ffeae418 2880 return rc;
1da177e4
LT
2881}
2882
be0d18df 2883/**
2e41e8e6 2884 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2885 * @ap: port
2886 *
2e41e8e6 2887 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2888 * detection.
2889 */
2890
2891int ata_cable_40wire(struct ata_port *ap)
2892{
2893 return ATA_CBL_PATA40;
2894}
a52fbcfc 2895EXPORT_SYMBOL_GPL(ata_cable_40wire);
be0d18df
AC
2896
2897/**
2e41e8e6 2898 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2899 * @ap: port
2900 *
2e41e8e6 2901 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2902 * detection.
2903 */
2904
2905int ata_cable_80wire(struct ata_port *ap)
2906{
2907 return ATA_CBL_PATA80;
2908}
a52fbcfc 2909EXPORT_SYMBOL_GPL(ata_cable_80wire);
be0d18df
AC
2910
2911/**
2912 * ata_cable_unknown - return unknown PATA cable.
2913 * @ap: port
2914 *
2915 * Helper method for drivers which have no PATA cable detection.
2916 */
2917
2918int ata_cable_unknown(struct ata_port *ap)
2919{
2920 return ATA_CBL_PATA_UNK;
2921}
a52fbcfc 2922EXPORT_SYMBOL_GPL(ata_cable_unknown);
be0d18df 2923
c88f90c3
TH
2924/**
2925 * ata_cable_ignore - return ignored PATA cable.
2926 * @ap: port
2927 *
2928 * Helper method for drivers which don't use cable type to limit
2929 * transfer mode.
2930 */
2931int ata_cable_ignore(struct ata_port *ap)
2932{
2933 return ATA_CBL_PATA_IGN;
2934}
a52fbcfc 2935EXPORT_SYMBOL_GPL(ata_cable_ignore);
c88f90c3 2936
be0d18df
AC
2937/**
2938 * ata_cable_sata - return SATA cable type
2939 * @ap: port
2940 *
2941 * Helper method for drivers which have SATA cables
2942 */
2943
2944int ata_cable_sata(struct ata_port *ap)
2945{
2946 return ATA_CBL_SATA;
2947}
a52fbcfc 2948EXPORT_SYMBOL_GPL(ata_cable_sata);
be0d18df 2949
1da177e4
LT
2950/**
2951 * ata_bus_probe - Reset and probe ATA bus
2952 * @ap: Bus to probe
2953 *
0cba632b
JG
2954 * Master ATA bus probing function. Initiates a hardware-dependent
2955 * bus reset, then attempts to identify any devices found on
2956 * the bus.
2957 *
1da177e4 2958 * LOCKING:
0cba632b 2959 * PCI/etc. bus probe sem.
1da177e4
LT
2960 *
2961 * RETURNS:
96072e69 2962 * Zero on success, negative errno otherwise.
1da177e4
LT
2963 */
2964
80289167 2965int ata_bus_probe(struct ata_port *ap)
1da177e4 2966{
28ca5c57 2967 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2968 int tries[ATA_MAX_DEVICES];
f58229f8 2969 int rc;
e82cbdb9 2970 struct ata_device *dev;
1da177e4 2971
1eca4365 2972 ata_for_each_dev(dev, &ap->link, ALL)
f58229f8 2973 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2974
2975 retry:
1eca4365 2976 ata_for_each_dev(dev, &ap->link, ALL) {
cdeab114
TH
2977 /* If we issue an SRST then an ATA drive (not ATAPI)
2978 * may change configuration and be in PIO0 timing. If
2979 * we do a hard reset (or are coming from power on)
2980 * this is true for ATA or ATAPI. Until we've set a
2981 * suitable controller mode we should not touch the
2982 * bus as we may be talking too fast.
2983 */
2984 dev->pio_mode = XFER_PIO_0;
5416912a 2985 dev->dma_mode = 0xff;
cdeab114
TH
2986
2987 /* If the controller has a pio mode setup function
2988 * then use it to set the chipset to rights. Don't
2989 * touch the DMA setup as that will be dealt with when
2990 * configuring devices.
2991 */
2992 if (ap->ops->set_piomode)
2993 ap->ops->set_piomode(ap, dev);
2994 }
2995
2044470c 2996 /* reset and determine device classes */
52783c5d 2997 ap->ops->phy_reset(ap);
2061a47a 2998
1eca4365 2999 ata_for_each_dev(dev, &ap->link, ALL) {
3e4ec344 3000 if (dev->class != ATA_DEV_UNKNOWN)
52783c5d
TH
3001 classes[dev->devno] = dev->class;
3002 else
3003 classes[dev->devno] = ATA_DEV_NONE;
2044470c 3004
52783c5d 3005 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 3006 }
1da177e4 3007
f31f0cc2
JG
3008 /* read IDENTIFY page and configure devices. We have to do the identify
3009 specific sequence bass-ackwards so that PDIAG- is released by
3010 the slave device */
3011
1eca4365 3012 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
f58229f8
TH
3013 if (tries[dev->devno])
3014 dev->class = classes[dev->devno];
ffeae418 3015
14d2bac1 3016 if (!ata_dev_enabled(dev))
ffeae418 3017 continue;
ffeae418 3018
bff04647
TH
3019 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
3020 dev->id);
14d2bac1
TH
3021 if (rc)
3022 goto fail;
f31f0cc2
JG
3023 }
3024
be0d18df
AC
3025 /* Now ask for the cable type as PDIAG- should have been released */
3026 if (ap->ops->cable_detect)
3027 ap->cbl = ap->ops->cable_detect(ap);
3028
1eca4365
TH
3029 /* We may have SATA bridge glue hiding here irrespective of
3030 * the reported cable types and sensed types. When SATA
3031 * drives indicate we have a bridge, we don't know which end
3032 * of the link the bridge is which is a problem.
3033 */
3034 ata_for_each_dev(dev, &ap->link, ENABLED)
614fe29b
AC
3035 if (ata_id_is_sata(dev->id))
3036 ap->cbl = ATA_CBL_SATA;
614fe29b 3037
f31f0cc2
JG
3038 /* After the identify sequence we can now set up the devices. We do
3039 this in the normal order so that the user doesn't get confused */
3040
1eca4365 3041 ata_for_each_dev(dev, &ap->link, ENABLED) {
9af5c9c9 3042 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 3043 rc = ata_dev_configure(dev);
9af5c9c9 3044 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
3045 if (rc)
3046 goto fail;
1da177e4
LT
3047 }
3048
e82cbdb9 3049 /* configure transfer mode */
0260731f 3050 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 3051 if (rc)
51713d35 3052 goto fail;
1da177e4 3053
1eca4365
TH
3054 ata_for_each_dev(dev, &ap->link, ENABLED)
3055 return 0;
1da177e4 3056
96072e69 3057 return -ENODEV;
14d2bac1
TH
3058
3059 fail:
4ae72a1e
TH
3060 tries[dev->devno]--;
3061
14d2bac1
TH
3062 switch (rc) {
3063 case -EINVAL:
4ae72a1e 3064 /* eeek, something went very wrong, give up */
14d2bac1
TH
3065 tries[dev->devno] = 0;
3066 break;
4ae72a1e
TH
3067
3068 case -ENODEV:
3069 /* give it just one more chance */
3070 tries[dev->devno] = min(tries[dev->devno], 1);
df561f66 3071 fallthrough;
14d2bac1 3072 case -EIO:
4ae72a1e
TH
3073 if (tries[dev->devno] == 1) {
3074 /* This is the last chance, better to slow
3075 * down than lose it.
3076 */
a07d499b 3077 sata_down_spd_limit(&ap->link, 0);
4ae72a1e
TH
3078 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3079 }
14d2bac1
TH
3080 }
3081
4ae72a1e 3082 if (!tries[dev->devno])
3373efd8 3083 ata_dev_disable(dev);
ec573755 3084
14d2bac1 3085 goto retry;
1da177e4
LT
3086}
3087
3be680b7
TH
3088/**
3089 * sata_print_link_status - Print SATA link status
936fd732 3090 * @link: SATA link to printk link status about
3be680b7
TH
3091 *
3092 * This function prints link speed and status of a SATA link.
3093 *
3094 * LOCKING:
3095 * None.
3096 */
6bdb4fc9 3097static void sata_print_link_status(struct ata_link *link)
3be680b7 3098{
6d5f9732 3099 u32 sstatus, scontrol, tmp;
3be680b7 3100
936fd732 3101 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 3102 return;
55d5ba55
LZ
3103 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3104 return;
3be680b7 3105
b1c72916 3106 if (ata_phys_link_online(link)) {
3be680b7 3107 tmp = (sstatus >> 4) & 0xf;
a9a79dfe
JP
3108 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3109 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 3110 } else {
a9a79dfe
JP
3111 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3112 sstatus, scontrol);
3be680b7
TH
3113 }
3114}
3115
ebdfca6e
AC
3116/**
3117 * ata_dev_pair - return other device on cable
ebdfca6e
AC
3118 * @adev: device
3119 *
3120 * Obtain the other device on the same cable, or if none is
3121 * present NULL is returned
3122 */
2e9edbf8 3123
3373efd8 3124struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 3125{
9af5c9c9
TH
3126 struct ata_link *link = adev->link;
3127 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 3128 if (!ata_dev_enabled(pair))
ebdfca6e
AC
3129 return NULL;
3130 return pair;
3131}
a52fbcfc 3132EXPORT_SYMBOL_GPL(ata_dev_pair);
ebdfca6e 3133
1c3fae4d 3134/**
3c567b7d 3135 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 3136 * @link: Link to adjust SATA spd limit for
a07d499b 3137 * @spd_limit: Additional limit
1c3fae4d 3138 *
936fd732 3139 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 3140 * function only adjusts the limit. The change must be applied
3c567b7d 3141 * using sata_set_spd().
1c3fae4d 3142 *
a07d499b
TH
3143 * If @spd_limit is non-zero, the speed is limited to equal to or
3144 * lower than @spd_limit if such speed is supported. If
3145 * @spd_limit is slower than any supported speed, only the lowest
3146 * supported speed is allowed.
3147 *
1c3fae4d
TH
3148 * LOCKING:
3149 * Inherited from caller.
3150 *
3151 * RETURNS:
3152 * 0 on success, negative errno on failure
3153 */
a07d499b 3154int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
1c3fae4d 3155{
81952c54 3156 u32 sstatus, spd, mask;
a07d499b 3157 int rc, bit;
1c3fae4d 3158
936fd732 3159 if (!sata_scr_valid(link))
008a7896
TH
3160 return -EOPNOTSUPP;
3161
3162 /* If SCR can be read, use it to determine the current SPD.
936fd732 3163 * If not, use cached value in link->sata_spd.
008a7896 3164 */
936fd732 3165 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
9913ff8a 3166 if (rc == 0 && ata_sstatus_online(sstatus))
008a7896
TH
3167 spd = (sstatus >> 4) & 0xf;
3168 else
936fd732 3169 spd = link->sata_spd;
1c3fae4d 3170
936fd732 3171 mask = link->sata_spd_limit;
1c3fae4d
TH
3172 if (mask <= 1)
3173 return -EINVAL;
008a7896
TH
3174
3175 /* unconditionally mask off the highest bit */
a07d499b
TH
3176 bit = fls(mask) - 1;
3177 mask &= ~(1 << bit);
1c3fae4d 3178
2dc0b46b
DM
3179 /*
3180 * Mask off all speeds higher than or equal to the current one. At
3181 * this point, if current SPD is not available and we previously
3182 * recorded the link speed from SStatus, the driver has already
3183 * masked off the highest bit so mask should already be 1 or 0.
3184 * Otherwise, we should not force 1.5Gbps on a link where we have
3185 * not previously recorded speed from SStatus. Just return in this
3186 * case.
008a7896
TH
3187 */
3188 if (spd > 1)
3189 mask &= (1 << (spd - 1)) - 1;
69f2c934 3190 else if (link->sata_spd)
2dc0b46b 3191 return -EINVAL;
008a7896
TH
3192
3193 /* were we already at the bottom? */
1c3fae4d
TH
3194 if (!mask)
3195 return -EINVAL;
3196
a07d499b
TH
3197 if (spd_limit) {
3198 if (mask & ((1 << spd_limit) - 1))
3199 mask &= (1 << spd_limit) - 1;
3200 else {
3201 bit = ffs(mask) - 1;
3202 mask = 1 << bit;
3203 }
3204 }
3205
936fd732 3206 link->sata_spd_limit = mask;
1c3fae4d 3207
a9a79dfe
JP
3208 ata_link_warn(link, "limiting SATA link speed to %s\n",
3209 sata_spd_string(fls(mask)));
1c3fae4d
TH
3210
3211 return 0;
3212}
3213
a9b2c120 3214#ifdef CONFIG_ATA_ACPI
a0f79b92
TH
3215/**
3216 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3217 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3218 * @cycle: cycle duration in ns
3219 *
3220 * Return matching xfer mode for @cycle. The returned mode is of
3221 * the transfer type specified by @xfer_shift. If @cycle is too
3222 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3223 * than the fastest known mode, the fasted mode is returned.
3224 *
3225 * LOCKING:
3226 * None.
3227 *
3228 * RETURNS:
3229 * Matching xfer_mode, 0xff if no match found.
3230 */
3231u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3232{
3233 u8 base_mode = 0xff, last_mode = 0xff;
3234 const struct ata_xfer_ent *ent;
3235 const struct ata_timing *t;
3236
3237 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3238 if (ent->shift == xfer_shift)
3239 base_mode = ent->base;
3240
3241 for (t = ata_timing_find_mode(base_mode);
3242 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3243 unsigned short this_cycle;
3244
3245 switch (xfer_shift) {
3246 case ATA_SHIFT_PIO:
3247 case ATA_SHIFT_MWDMA:
3248 this_cycle = t->cycle;
3249 break;
3250 case ATA_SHIFT_UDMA:
3251 this_cycle = t->udma;
3252 break;
3253 default:
3254 return 0xff;
3255 }
3256
3257 if (cycle > this_cycle)
3258 break;
3259
3260 last_mode = t->mode;
3261 }
3262
3263 return last_mode;
3264}
a9b2c120 3265#endif
a0f79b92 3266
cf176e1a
TH
3267/**
3268 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3269 * @dev: Device to adjust xfer masks
458337db 3270 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3271 *
3272 * Adjust xfer masks of @dev downward. Note that this function
3273 * does not apply the change. Invoking ata_set_mode() afterwards
3274 * will apply the limit.
3275 *
3276 * LOCKING:
3277 * Inherited from caller.
3278 *
3279 * RETURNS:
3280 * 0 on success, negative errno on failure
3281 */
458337db 3282int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3283{
458337db 3284 char buf[32];
f0a6d77b
SS
3285 unsigned int orig_mask, xfer_mask;
3286 unsigned int pio_mask, mwdma_mask, udma_mask;
458337db 3287 int quiet, highbit;
cf176e1a 3288
458337db
TH
3289 quiet = !!(sel & ATA_DNXFER_QUIET);
3290 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3291
458337db
TH
3292 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3293 dev->mwdma_mask,
3294 dev->udma_mask);
3295 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3296
458337db
TH
3297 switch (sel) {
3298 case ATA_DNXFER_PIO:
3299 highbit = fls(pio_mask) - 1;
3300 pio_mask &= ~(1 << highbit);
3301 break;
3302
3303 case ATA_DNXFER_DMA:
3304 if (udma_mask) {
3305 highbit = fls(udma_mask) - 1;
3306 udma_mask &= ~(1 << highbit);
3307 if (!udma_mask)
3308 return -ENOENT;
3309 } else if (mwdma_mask) {
3310 highbit = fls(mwdma_mask) - 1;
3311 mwdma_mask &= ~(1 << highbit);
3312 if (!mwdma_mask)
3313 return -ENOENT;
3314 }
3315 break;
3316
3317 case ATA_DNXFER_40C:
3318 udma_mask &= ATA_UDMA_MASK_40C;
3319 break;
3320
3321 case ATA_DNXFER_FORCE_PIO0:
3322 pio_mask &= 1;
df561f66 3323 fallthrough;
458337db
TH
3324 case ATA_DNXFER_FORCE_PIO:
3325 mwdma_mask = 0;
3326 udma_mask = 0;
3327 break;
3328
458337db
TH
3329 default:
3330 BUG();
3331 }
3332
3333 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3334
3335 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3336 return -ENOENT;
3337
3338 if (!quiet) {
3339 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3340 snprintf(buf, sizeof(buf), "%s:%s",
3341 ata_mode_string(xfer_mask),
3342 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3343 else
3344 snprintf(buf, sizeof(buf), "%s",
3345 ata_mode_string(xfer_mask));
3346
a9a79dfe 3347 ata_dev_warn(dev, "limiting speed to %s\n", buf);
458337db 3348 }
cf176e1a
TH
3349
3350 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3351 &dev->udma_mask);
3352
cf176e1a 3353 return 0;
cf176e1a
TH
3354}
3355
3373efd8 3356static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3357{
d0cb43b3 3358 struct ata_port *ap = dev->link->ap;
9af5c9c9 3359 struct ata_eh_context *ehc = &dev->link->eh_context;
d0cb43b3 3360 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
4055dee7
TH
3361 const char *dev_err_whine = "";
3362 int ign_dev_err = 0;
d0cb43b3 3363 unsigned int err_mask = 0;
83206a29 3364 int rc;
1da177e4 3365
e8384607 3366 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3367 if (dev->xfer_shift == ATA_SHIFT_PIO)
3368 dev->flags |= ATA_DFLAG_PIO;
3369
d0cb43b3
TH
3370 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3371 dev_err_whine = " (SET_XFERMODE skipped)";
3372 else {
3373 if (nosetxfer)
a9a79dfe
JP
3374 ata_dev_warn(dev,
3375 "NOSETXFER but PATA detected - can't "
3376 "skip SETXFER, might malfunction\n");
d0cb43b3
TH
3377 err_mask = ata_dev_set_xfermode(dev);
3378 }
2dcb407e 3379
4055dee7
TH
3380 if (err_mask & ~AC_ERR_DEV)
3381 goto fail;
3382
3383 /* revalidate */
3384 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3385 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3386 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3387 if (rc)
3388 return rc;
3389
b93fda12
AC
3390 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3391 /* Old CFA may refuse this command, which is just fine */
3392 if (ata_id_is_cfa(dev->id))
3393 ign_dev_err = 1;
3394 /* Catch several broken garbage emulations plus some pre
3395 ATA devices */
3396 if (ata_id_major_version(dev->id) == 0 &&
3397 dev->pio_mode <= XFER_PIO_2)
3398 ign_dev_err = 1;
3399 /* Some very old devices and some bad newer ones fail
3400 any kind of SET_XFERMODE request but support PIO0-2
3401 timings and no IORDY */
3402 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3403 ign_dev_err = 1;
3404 }
3acaf94b
AC
3405 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3406 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3407 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3408 dev->dma_mode == XFER_MW_DMA_0 &&
3409 (dev->id[63] >> 8) & 1)
4055dee7 3410 ign_dev_err = 1;
3acaf94b 3411
4055dee7
TH
3412 /* if the device is actually configured correctly, ignore dev err */
3413 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3414 ign_dev_err = 1;
1da177e4 3415
4055dee7
TH
3416 if (err_mask & AC_ERR_DEV) {
3417 if (!ign_dev_err)
3418 goto fail;
3419 else
3420 dev_err_whine = " (device error ignored)";
3421 }
48a8a14f 3422
4633778b
HR
3423 ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
3424 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3425
07b9b6d6
DLM
3426 if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3427 ehc->i.flags & ATA_EHI_DID_HARDRESET)
3428 ata_dev_info(dev, "configured for %s%s\n",
3429 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3430 dev_err_whine);
4055dee7 3431
83206a29 3432 return 0;
4055dee7
TH
3433
3434 fail:
a9a79dfe 3435 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
4055dee7 3436 return -EIO;
1da177e4
LT
3437}
3438
1da177e4 3439/**
04351821 3440 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3441 * @link: link on which timings will be programmed
1967b7ff 3442 * @r_failed_dev: out parameter for failed device
1da177e4 3443 *
04351821
A
3444 * Standard implementation of the function used to tune and set
3445 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3446 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3447 * returned in @r_failed_dev.
780a87f7 3448 *
1da177e4 3449 * LOCKING:
0cba632b 3450 * PCI/etc. bus probe sem.
e82cbdb9
TH
3451 *
3452 * RETURNS:
3453 * 0 on success, negative errno otherwise
1da177e4 3454 */
04351821 3455
0260731f 3456int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3457{
0260731f 3458 struct ata_port *ap = link->ap;
e8e0619f 3459 struct ata_device *dev;
f58229f8 3460 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3461
a6d5a51c 3462 /* step 1: calculate xfer_mask */
1eca4365 3463 ata_for_each_dev(dev, link, ENABLED) {
f0a6d77b 3464 unsigned int pio_mask, dma_mask;
b3a70601 3465 unsigned int mode_mask;
a6d5a51c 3466
b3a70601
AC
3467 mode_mask = ATA_DMA_MASK_ATA;
3468 if (dev->class == ATA_DEV_ATAPI)
3469 mode_mask = ATA_DMA_MASK_ATAPI;
3470 else if (ata_id_is_cfa(dev->id))
3471 mode_mask = ATA_DMA_MASK_CFA;
3472
3373efd8 3473 ata_dev_xfermask(dev);
33267325 3474 ata_force_xfermask(dev);
1da177e4 3475
acf356b1 3476 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
b3a70601
AC
3477
3478 if (libata_dma_mask & mode_mask)
80a9c430
SS
3479 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3480 dev->udma_mask);
b3a70601
AC
3481 else
3482 dma_mask = 0;
3483
acf356b1
TH
3484 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3485 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3486
4f65977d 3487 found = 1;
b15b3eba 3488 if (ata_dma_enabled(dev))
5444a6f4 3489 used_dma = 1;
a6d5a51c 3490 }
4f65977d 3491 if (!found)
e82cbdb9 3492 goto out;
a6d5a51c
TH
3493
3494 /* step 2: always set host PIO timings */
1eca4365 3495 ata_for_each_dev(dev, link, ENABLED) {
70cd071e 3496 if (dev->pio_mode == 0xff) {
a9a79dfe 3497 ata_dev_warn(dev, "no PIO support\n");
e8e0619f 3498 rc = -EINVAL;
e82cbdb9 3499 goto out;
e8e0619f
TH
3500 }
3501
3502 dev->xfer_mode = dev->pio_mode;
3503 dev->xfer_shift = ATA_SHIFT_PIO;
3504 if (ap->ops->set_piomode)
3505 ap->ops->set_piomode(ap, dev);
3506 }
1da177e4 3507
a6d5a51c 3508 /* step 3: set host DMA timings */
1eca4365
TH
3509 ata_for_each_dev(dev, link, ENABLED) {
3510 if (!ata_dma_enabled(dev))
e8e0619f
TH
3511 continue;
3512
3513 dev->xfer_mode = dev->dma_mode;
3514 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3515 if (ap->ops->set_dmamode)
3516 ap->ops->set_dmamode(ap, dev);
3517 }
1da177e4
LT
3518
3519 /* step 4: update devices' xfer mode */
1eca4365 3520 ata_for_each_dev(dev, link, ENABLED) {
3373efd8 3521 rc = ata_dev_set_mode(dev);
5bbc53f4 3522 if (rc)
e82cbdb9 3523 goto out;
83206a29 3524 }
1da177e4 3525
e8e0619f
TH
3526 /* Record simplex status. If we selected DMA then the other
3527 * host channels are not permitted to do so.
5444a6f4 3528 */
cca3974e 3529 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3530 ap->host->simplex_claimed = ap;
5444a6f4 3531
e82cbdb9
TH
3532 out:
3533 if (rc)
3534 *r_failed_dev = dev;
3535 return rc;
1da177e4 3536}
a52fbcfc 3537EXPORT_SYMBOL_GPL(ata_do_set_mode);
1da177e4 3538
aa2731ad
TH
3539/**
3540 * ata_wait_ready - wait for link to become ready
3541 * @link: link to be waited on
3542 * @deadline: deadline jiffies for the operation
3543 * @check_ready: callback to check link readiness
3544 *
3545 * Wait for @link to become ready. @check_ready should return
3546 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3547 * link doesn't seem to be occupied, other errno for other error
3548 * conditions.
3549 *
3550 * Transient -ENODEV conditions are allowed for
3551 * ATA_TMOUT_FF_WAIT.
3552 *
3553 * LOCKING:
3554 * EH context.
3555 *
3556 * RETURNS:
c9b5560a 3557 * 0 if @link is ready before @deadline; otherwise, -errno.
aa2731ad
TH
3558 */
3559int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3560 int (*check_ready)(struct ata_link *link))
3561{
3562 unsigned long start = jiffies;
b48d58f5 3563 unsigned long nodev_deadline;
aa2731ad
TH
3564 int warned = 0;
3565
b48d58f5
TH
3566 /* choose which 0xff timeout to use, read comment in libata.h */
3567 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3568 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3569 else
3570 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3571
b1c72916
TH
3572 /* Slave readiness can't be tested separately from master. On
3573 * M/S emulation configuration, this function should be called
3574 * only on the master and it will handle both master and slave.
3575 */
3576 WARN_ON(link == link->ap->slave_link);
3577
aa2731ad
TH
3578 if (time_after(nodev_deadline, deadline))
3579 nodev_deadline = deadline;
3580
3581 while (1) {
3582 unsigned long now = jiffies;
3583 int ready, tmp;
3584
3585 ready = tmp = check_ready(link);
3586 if (ready > 0)
3587 return 0;
3588
b48d58f5
TH
3589 /*
3590 * -ENODEV could be transient. Ignore -ENODEV if link
aa2731ad 3591 * is online. Also, some SATA devices take a long
b48d58f5
TH
3592 * time to clear 0xff after reset. Wait for
3593 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3594 * offline.
aa2731ad
TH
3595 *
3596 * Note that some PATA controllers (pata_ali) explode
3597 * if status register is read more than once when
3598 * there's no device attached.
3599 */
3600 if (ready == -ENODEV) {
3601 if (ata_link_online(link))
3602 ready = 0;
3603 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3604 !ata_link_offline(link) &&
3605 time_before(now, nodev_deadline))
3606 ready = 0;
3607 }
3608
3609 if (ready)
3610 return ready;
3611 if (time_after(now, deadline))
3612 return -EBUSY;
3613
3614 if (!warned && time_after(now, start + 5 * HZ) &&
3615 (deadline - now > 3 * HZ)) {
a9a79dfe 3616 ata_link_warn(link,
aa2731ad
TH
3617 "link is slow to respond, please be patient "
3618 "(ready=%d)\n", tmp);
3619 warned = 1;
3620 }
3621
97750ceb 3622 ata_msleep(link->ap, 50);
aa2731ad
TH
3623 }
3624}
3625
3626/**
3627 * ata_wait_after_reset - wait for link to become ready after reset
3628 * @link: link to be waited on
3629 * @deadline: deadline jiffies for the operation
3630 * @check_ready: callback to check link readiness
3631 *
3632 * Wait for @link to become ready after reset.
3633 *
3634 * LOCKING:
3635 * EH context.
3636 *
3637 * RETURNS:
c9b5560a 3638 * 0 if @link is ready before @deadline; otherwise, -errno.
aa2731ad 3639 */
2b4221bb 3640int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
aa2731ad
TH
3641 int (*check_ready)(struct ata_link *link))
3642{
97750ceb 3643 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
aa2731ad
TH
3644
3645 return ata_wait_ready(link, deadline, check_ready);
3646}
a52fbcfc 3647EXPORT_SYMBOL_GPL(ata_wait_after_reset);
aa2731ad 3648
f5914a46 3649/**
0aa1113d 3650 * ata_std_prereset - prepare for reset
cc0680a5 3651 * @link: ATA link to be reset
d4b2bab4 3652 * @deadline: deadline jiffies for the operation
f5914a46 3653 *
cc0680a5 3654 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3655 * prereset makes libata abort whole reset sequence and give up
3656 * that port, so prereset should be best-effort. It does its
3657 * best to prepare for reset sequence but if things go wrong, it
3658 * should just whine, not fail.
f5914a46
TH
3659 *
3660 * LOCKING:
3661 * Kernel thread context (may sleep)
3662 *
3663 * RETURNS:
ac1eb665 3664 * Always 0.
f5914a46 3665 */
0aa1113d 3666int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3667{
cc0680a5 3668 struct ata_port *ap = link->ap;
936fd732 3669 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3670 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3671 int rc;
3672
f5914a46
TH
3673 /* if we're about to do hardreset, nothing more to do */
3674 if (ehc->i.action & ATA_EH_HARDRESET)
3675 return 0;
3676
936fd732 3677 /* if SATA, resume link */
a16abc0b 3678 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3679 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3680 /* whine about phy resume failure but proceed */
3681 if (rc && rc != -EOPNOTSUPP)
a9a79dfe
JP
3682 ata_link_warn(link,
3683 "failed to resume link for reset (errno=%d)\n",
3684 rc);
f5914a46
TH
3685 }
3686
45db2f6c 3687 /* no point in trying softreset on offline link */
b1c72916 3688 if (ata_phys_link_offline(link))
45db2f6c
TH
3689 ehc->i.action &= ~ATA_EH_SOFTRESET;
3690
f5914a46
TH
3691 return 0;
3692}
a52fbcfc 3693EXPORT_SYMBOL_GPL(ata_std_prereset);
f5914a46 3694
57c9efdf
TH
3695/**
3696 * sata_std_hardreset - COMRESET w/o waiting or classification
3697 * @link: link to reset
3698 * @class: resulting class of attached device
3699 * @deadline: deadline jiffies for the operation
3700 *
3701 * Standard SATA COMRESET w/o waiting or classification.
3702 *
3703 * LOCKING:
3704 * Kernel thread context (may sleep)
3705 *
3706 * RETURNS:
3707 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3708 */
3709int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3710 unsigned long deadline)
3711{
3712 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3713 bool online;
3714 int rc;
3715
3716 /* do hardreset */
3717 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
57c9efdf
TH
3718 return online ? -EAGAIN : rc;
3719}
a52fbcfc 3720EXPORT_SYMBOL_GPL(sata_std_hardreset);
57c9efdf 3721
c2bd5804 3722/**
203c75b8 3723 * ata_std_postreset - standard postreset callback
cc0680a5 3724 * @link: the target ata_link
c2bd5804
TH
3725 * @classes: classes of attached devices
3726 *
3727 * This function is invoked after a successful reset. Note that
3728 * the device might have been reset more than once using
3729 * different reset methods before postreset is invoked.
c2bd5804 3730 *
c2bd5804
TH
3731 * LOCKING:
3732 * Kernel thread context (may sleep)
3733 */
203c75b8 3734void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3735{
f046519f
TH
3736 u32 serror;
3737
f046519f
TH
3738 /* reset complete, clear SError */
3739 if (!sata_scr_read(link, SCR_ERROR, &serror))
3740 sata_scr_write(link, SCR_ERROR, serror);
3741
c2bd5804 3742 /* print link status */
936fd732 3743 sata_print_link_status(link);
c2bd5804 3744}
a52fbcfc 3745EXPORT_SYMBOL_GPL(ata_std_postreset);
c2bd5804 3746
623a3128
TH
3747/**
3748 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3749 * @dev: device to compare against
3750 * @new_class: class of the new device
3751 * @new_id: IDENTIFY page of the new device
3752 *
3753 * Compare @new_class and @new_id against @dev and determine
3754 * whether @dev is the device indicated by @new_class and
3755 * @new_id.
3756 *
3757 * LOCKING:
3758 * None.
3759 *
3760 * RETURNS:
3761 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3762 */
3373efd8
TH
3763static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3764 const u16 *new_id)
623a3128
TH
3765{
3766 const u16 *old_id = dev->id;
a0cf733b
TH
3767 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3768 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3769
3770 if (dev->class != new_class) {
a9a79dfe
JP
3771 ata_dev_info(dev, "class mismatch %d != %d\n",
3772 dev->class, new_class);
623a3128
TH
3773 return 0;
3774 }
3775
a0cf733b
TH
3776 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3777 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3778 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3779 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3780
3781 if (strcmp(model[0], model[1])) {
a9a79dfe
JP
3782 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3783 model[0], model[1]);
623a3128
TH
3784 return 0;
3785 }
3786
3787 if (strcmp(serial[0], serial[1])) {
a9a79dfe
JP
3788 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3789 serial[0], serial[1]);
623a3128
TH
3790 return 0;
3791 }
3792
623a3128
TH
3793 return 1;
3794}
3795
3796/**
fe30911b 3797 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3798 * @dev: target ATA device
bff04647 3799 * @readid_flags: read ID flags
623a3128
TH
3800 *
3801 * Re-read IDENTIFY page and make sure @dev is still attached to
3802 * the port.
3803 *
3804 * LOCKING:
3805 * Kernel thread context (may sleep)
3806 *
3807 * RETURNS:
3808 * 0 on success, negative errno otherwise
3809 */
fe30911b 3810int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3811{
5eb45c02 3812 unsigned int class = dev->class;
9af5c9c9 3813 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3814 int rc;
3815
fe635c7e 3816 /* read ID data */
bff04647 3817 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3818 if (rc)
fe30911b 3819 return rc;
623a3128
TH
3820
3821 /* is the device still there? */
fe30911b
TH
3822 if (!ata_dev_same_device(dev, class, id))
3823 return -ENODEV;
623a3128 3824
fe635c7e 3825 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3826 return 0;
3827}
3828
3829/**
3830 * ata_dev_revalidate - Revalidate ATA device
3831 * @dev: device to revalidate
422c9daa 3832 * @new_class: new class code
fe30911b
TH
3833 * @readid_flags: read ID flags
3834 *
3835 * Re-read IDENTIFY page, make sure @dev is still attached to the
3836 * port and reconfigure it according to the new IDENTIFY page.
3837 *
3838 * LOCKING:
3839 * Kernel thread context (may sleep)
3840 *
3841 * RETURNS:
3842 * 0 on success, negative errno otherwise
3843 */
422c9daa
TH
3844int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3845 unsigned int readid_flags)
fe30911b 3846{
6ddcd3b0 3847 u64 n_sectors = dev->n_sectors;
5920dadf 3848 u64 n_native_sectors = dev->n_native_sectors;
fe30911b
TH
3849 int rc;
3850
3851 if (!ata_dev_enabled(dev))
3852 return -ENODEV;
3853
422c9daa
TH
3854 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3855 if (ata_class_enabled(new_class) &&
f0d0613d
BP
3856 new_class != ATA_DEV_ATA &&
3857 new_class != ATA_DEV_ATAPI &&
9162c657 3858 new_class != ATA_DEV_ZAC &&
f0d0613d 3859 new_class != ATA_DEV_SEMB) {
a9a79dfe
JP
3860 ata_dev_info(dev, "class mismatch %u != %u\n",
3861 dev->class, new_class);
422c9daa
TH
3862 rc = -ENODEV;
3863 goto fail;
3864 }
3865
fe30911b
TH
3866 /* re-read ID */
3867 rc = ata_dev_reread_id(dev, readid_flags);
3868 if (rc)
3869 goto fail;
623a3128
TH
3870
3871 /* configure device according to the new ID */
efdaedc4 3872 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3873 if (rc)
3874 goto fail;
3875
3876 /* verify n_sectors hasn't changed */
445d211b
TH
3877 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3878 dev->n_sectors == n_sectors)
3879 return 0;
3880
3881 /* n_sectors has changed */
a9a79dfe
JP
3882 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3883 (unsigned long long)n_sectors,
3884 (unsigned long long)dev->n_sectors);
445d211b
TH
3885
3886 /*
3887 * Something could have caused HPA to be unlocked
3888 * involuntarily. If n_native_sectors hasn't changed and the
3889 * new size matches it, keep the device.
3890 */
3891 if (dev->n_native_sectors == n_native_sectors &&
3892 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
a9a79dfe
JP
3893 ata_dev_warn(dev,
3894 "new n_sectors matches native, probably "
3895 "late HPA unlock, n_sectors updated\n");
68939ce5 3896 /* use the larger n_sectors */
445d211b 3897 return 0;
6ddcd3b0
TH
3898 }
3899
445d211b
TH
3900 /*
3901 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
3902 * unlocking HPA in those cases.
3903 *
3904 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
3905 */
3906 if (dev->n_native_sectors == n_native_sectors &&
3907 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3908 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
a9a79dfe
JP
3909 ata_dev_warn(dev,
3910 "old n_sectors matches native, probably "
3911 "late HPA lock, will try to unlock HPA\n");
445d211b
TH
3912 /* try unlocking HPA */
3913 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
3914 rc = -EIO;
3915 } else
3916 rc = -ENODEV;
623a3128 3917
445d211b
TH
3918 /* restore original n_[native_]sectors and fail */
3919 dev->n_native_sectors = n_native_sectors;
3920 dev->n_sectors = n_sectors;
623a3128 3921 fail:
a9a79dfe 3922 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3923 return rc;
3924}
3925
6919a0a6
AC
3926struct ata_blacklist_entry {
3927 const char *model_num;
3928 const char *model_rev;
3929 unsigned long horkage;
3930};
3931
3932static const struct ata_blacklist_entry ata_device_blacklist [] = {
3933 /* Devices with DMA related problems under Linux */
3934 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3935 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3936 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3937 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3938 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3939 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3940 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3941 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3942 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
7da4c935 3943 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3944 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3945 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3946 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3947 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3948 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
7da4c935 3949 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3950 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3951 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3952 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3953 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3954 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3955 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3956 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3957 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3958 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3959 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 3960 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 3961 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
d17d794c 3962 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
b00622fc 3963 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
3af9a77a 3964 /* Odd clown on sil3726/4726 PMPs */
50af2fa1 3965 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
a66307d4
HR
3966 /* Similar story with ASMedia 1092 */
3967 { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
6919a0a6 3968
18d6e9d5 3969 /* Weird ATAPI devices */
40a1d531 3970 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
6a87e42e 3971 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
a32450e1 3972 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
0523f037 3973 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
18d6e9d5 3974
af34d637
DM
3975 /*
3976 * Causes silent data corruption with higher max sects.
3977 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
3978 */
3979 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
1488a1e3
TH
3980
3981 /*
e0edc8c5 3982 * These devices time out with higher max sects.
1488a1e3
TH
3983 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
3984 */
e0edc8c5 3985 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
db5ff909 3986 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
af34d637 3987
6919a0a6
AC
3988 /* Devices we expect to fail diagnostics */
3989
3990 /* Devices where NCQ should be avoided */
3991 /* NCQ is slow */
2dcb407e 3992 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
ef1429c0 3993 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3994 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3995 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3996 /* NCQ is broken */
539cc7c7 3997 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3998 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 3999 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4000 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
5ccfca97 4001 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
539cc7c7 4002
ac70a964 4003 /* Seagate NCQ + FLUSH CACHE firmware bug */
4d1f9082 4004 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964 4005 ATA_HORKAGE_FIRMWARE_WARN },
d10d491f 4006
4d1f9082 4007 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
4008 ATA_HORKAGE_FIRMWARE_WARN },
4009
4d1f9082 4010 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
4011 ATA_HORKAGE_FIRMWARE_WARN },
4012
4d1f9082 4013 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964
TH
4014 ATA_HORKAGE_FIRMWARE_WARN },
4015
31f6264e
HG
4016 /* drives which fail FPDMA_AA activation (some may freeze afterwards)
4017 the ST disks also have LPM issues */
8756a25b 4018 { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
ef1429c0 4019 ATA_HORKAGE_NOLPM },
08c85d2a 4020 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
87809942 4021
36e337d0
RH
4022 /* Blacklist entries taken from Silicon Image 3124/3132
4023 Windows driver .inf file - also several Linux problem reports */
ef1429c0
DLM
4024 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ },
4025 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ },
4026 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ },
6919a0a6 4027
68b0ddb2 4028 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
ef1429c0 4029 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ },
68b0ddb2 4030
3b545563 4031 /* Sandisk SD7/8/9s lock up hard on large trims */
ef1429c0 4032 { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M },
322579dc 4033
16c55b03 4034 /* devices which puke on READ_NATIVE_MAX */
ef1429c0 4035 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA },
16c55b03
TH
4036 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4037 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4038 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4039
7831387b
TH
4040 /* this one allows HPA unlocking but fails IOs on the area */
4041 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4042
93328e11 4043 /* Devices which report 1 sector over size HPA */
ef1429c0
DLM
4044 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE },
4045 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE },
4046 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE },
93328e11 4047
6bbfd53d 4048 /* Devices which get the IVB wrong */
ef1429c0 4049 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
a79067e5 4050 /* Maybe we should just blacklist TSSTcorp... */
ef1429c0 4051 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB },
6bbfd53d 4052
9ce8e307 4053 /* Devices that do not need bridging limits applied */
ef1429c0
DLM
4054 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK },
4055 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK },
9ce8e307 4056
9062712f 4057 /* Devices which aren't very happy with higher link speeds */
ef1429c0
DLM
4058 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS },
4059 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS },
9062712f 4060
d0cb43b3
TH
4061 /*
4062 * Devices which choke on SETXFER. Applies only if both the
4063 * device and controller are SATA.
4064 */
cd691876 4065 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
3a25179e
VL
4066 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4067 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
cd691876
TH
4068 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4069 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
d0cb43b3 4070
ea08aec7
NC
4071 /* These specific Pioneer models have LPM issues */
4072 { "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
4073 { "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
4074
b17e5729 4075 /* Crucial BX100 SSD 500GB has broken LPM support */
3bf7b5d6 4076 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
b17e5729 4077
d418ff56
HG
4078 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4079 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
9c7be59f 4080 ATA_HORKAGE_ZERO_AFTER_TRIM |
ef1429c0 4081 ATA_HORKAGE_NOLPM },
d418ff56
HG
4082 /* 512GB MX100 with newer firmware has only LPM issues */
4083 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
ef1429c0 4084 ATA_HORKAGE_NOLPM },
9c7be59f 4085
62ac3f73
HG
4086 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4087 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4088 ATA_HORKAGE_ZERO_AFTER_TRIM |
ef1429c0 4089 ATA_HORKAGE_NOLPM },
62ac3f73
HG
4090 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4091 ATA_HORKAGE_ZERO_AFTER_TRIM |
ef1429c0 4092 ATA_HORKAGE_NOLPM },
62ac3f73 4093
76936e9a 4094 /* These specific Samsung models/firmware-revs do not handle LPM well */
ef1429c0
DLM
4095 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
4096 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
4097 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM },
4098 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
b5b4d3a5 4099
f78dea06 4100 /* devices that don't properly handle queued TRIM commands */
136d769e 4101 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4102 ATA_HORKAGE_ZERO_AFTER_TRIM },
243918be 4103 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4104 ATA_HORKAGE_ZERO_AFTER_TRIM },
ff7f53fb 4105 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4106 ATA_HORKAGE_ZERO_AFTER_TRIM },
9051bd39 4107 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4108 ATA_HORKAGE_ZERO_AFTER_TRIM },
ff7f53fb 4109 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4110 ATA_HORKAGE_ZERO_AFTER_TRIM },
ff7f53fb 4111 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4112 ATA_HORKAGE_ZERO_AFTER_TRIM },
53997522
CL
4113 { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4114 ATA_HORKAGE_NO_DMA_LOG |
ef1429c0 4115 ATA_HORKAGE_ZERO_AFTER_TRIM },
ca6bfcb2 4116 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4117 ATA_HORKAGE_ZERO_AFTER_TRIM },
ca6bfcb2 4118 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4119 ATA_HORKAGE_ZERO_AFTER_TRIM },
8a6430ab 4120 { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
7a8526a5 4121 ATA_HORKAGE_ZERO_AFTER_TRIM |
ef1429c0 4122 ATA_HORKAGE_NO_NCQ_ON_ATI },
8a6430ab 4123 { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
7a8526a5 4124 ATA_HORKAGE_ZERO_AFTER_TRIM |
ef1429c0 4125 ATA_HORKAGE_NO_NCQ_ON_ATI },
ead08957
PM
4126 { "SAMSUNG*MZ7LH*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4127 ATA_HORKAGE_ZERO_AFTER_TRIM |
4128 ATA_HORKAGE_NO_NCQ_ON_ATI, },
7a7184b0 4129 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4130 ATA_HORKAGE_ZERO_AFTER_TRIM },
e61f7d1c 4131
cda57b1b 4132 /* devices that don't properly handle TRIM commands */
ef1429c0
DLM
4133 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM },
4134 { "M88V29*", NULL, ATA_HORKAGE_NOTRIM },
cda57b1b 4135
e61f7d1c
MP
4136 /*
4137 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4138 * (Return Zero After Trim) flags in the ATA Command Set are
4139 * unreliable in the sense that they only define what happens if
4140 * the device successfully executed the DSM TRIM command. TRIM
4141 * is only advisory, however, and the device is free to silently
4142 * ignore all or parts of the request.
4143 *
4144 * Whitelist drives that are known to reliably return zeroes
4145 * after TRIM.
4146 */
4147
4148 /*
4149 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4150 * that model before whitelisting all other intel SSDs.
4151 */
ef1429c0
DLM
4152 { "INTEL*SSDSC2MH*", NULL, 0 },
4153
4154 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4155 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4156 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4157 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4158 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4159 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4160 { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4161 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
f78dea06 4162
ecd75ad5
TH
4163 /*
4164 * Some WD SATA-I drives spin up and down erratically when the link
4165 * is put into the slumber mode. We don't have full list of the
4166 * affected devices. Disable LPM if the device matches one of the
4167 * known prefixes and is SATA-1. As a side effect LPM partial is
4168 * lost too.
4169 *
4170 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4171 */
4172 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4173 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4174 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4175 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4176 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4177 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4178 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4179
ac9f0c81
AL
4180 /*
4181 * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
4182 * log page is accessed. Ensure we never ask for this log page with
4183 * these devices.
4184 */
4185 { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
4186
4d2e4980
DLM
4187 /* Buggy FUA */
4188 { "Maxtor", "BANC1G10", ATA_HORKAGE_NO_FUA },
38d43122
DLM
4189 { "WDC*WD2500J*", NULL, ATA_HORKAGE_NO_FUA },
4190 { "OCZ-VERTEX*", NULL, ATA_HORKAGE_NO_FUA },
4191 { "INTEL*SSDSC2CT*", NULL, ATA_HORKAGE_NO_FUA },
4d2e4980 4192
6919a0a6
AC
4193 /* End Marker */
4194 { }
1da177e4 4195};
2e9edbf8 4196
75683fe7 4197static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4198{
8bfa79fc
TH
4199 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4200 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4201 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4202
8bfa79fc
TH
4203 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4204 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4205
6919a0a6 4206 while (ad->model_num) {
1c402799 4207 if (glob_match(ad->model_num, model_num)) {
6919a0a6
AC
4208 if (ad->model_rev == NULL)
4209 return ad->horkage;
1c402799 4210 if (glob_match(ad->model_rev, model_rev))
6919a0a6 4211 return ad->horkage;
f4b15fef 4212 }
6919a0a6 4213 ad++;
f4b15fef 4214 }
1da177e4
LT
4215 return 0;
4216}
4217
6919a0a6
AC
4218static int ata_dma_blacklisted(const struct ata_device *dev)
4219{
4220 /* We don't support polling DMA.
4221 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4222 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4223 */
9af5c9c9 4224 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4225 (dev->flags & ATA_DFLAG_CDB_INTR))
4226 return 1;
75683fe7 4227 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4228}
4229
6bbfd53d
AC
4230/**
4231 * ata_is_40wire - check drive side detection
4232 * @dev: device
4233 *
4234 * Perform drive side detection decoding, allowing for device vendors
4235 * who can't follow the documentation.
4236 */
4237
4238static int ata_is_40wire(struct ata_device *dev)
4239{
4240 if (dev->horkage & ATA_HORKAGE_IVB)
4241 return ata_drive_40wire_relaxed(dev->id);
4242 return ata_drive_40wire(dev->id);
4243}
4244
15a5551c
AC
4245/**
4246 * cable_is_40wire - 40/80/SATA decider
4247 * @ap: port to consider
4248 *
4249 * This function encapsulates the policy for speed management
4250 * in one place. At the moment we don't cache the result but
4251 * there is a good case for setting ap->cbl to the result when
4252 * we are called with unknown cables (and figuring out if it
4253 * impacts hotplug at all).
4254 *
4255 * Return 1 if the cable appears to be 40 wire.
4256 */
4257
4258static int cable_is_40wire(struct ata_port *ap)
4259{
4260 struct ata_link *link;
4261 struct ata_device *dev;
4262
4a9c7b33 4263 /* If the controller thinks we are 40 wire, we are. */
15a5551c
AC
4264 if (ap->cbl == ATA_CBL_PATA40)
4265 return 1;
4a9c7b33
TH
4266
4267 /* If the controller thinks we are 80 wire, we are. */
15a5551c
AC
4268 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4269 return 0;
4a9c7b33
TH
4270
4271 /* If the system is known to be 40 wire short cable (eg
4272 * laptop), then we allow 80 wire modes even if the drive
4273 * isn't sure.
4274 */
f792068e
AC
4275 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4276 return 0;
4a9c7b33
TH
4277
4278 /* If the controller doesn't know, we scan.
4279 *
4280 * Note: We look for all 40 wire detects at this point. Any
4281 * 80 wire detect is taken to be 80 wire cable because
4282 * - in many setups only the one drive (slave if present) will
4283 * give a valid detect
4284 * - if you have a non detect capable drive you don't want it
4285 * to colour the choice
4286 */
1eca4365
TH
4287 ata_for_each_link(link, ap, EDGE) {
4288 ata_for_each_dev(dev, link, ENABLED) {
4289 if (!ata_is_40wire(dev))
15a5551c
AC
4290 return 0;
4291 }
4292 }
4293 return 1;
4294}
4295
a6d5a51c
TH
4296/**
4297 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4298 * @dev: Device to compute xfermask for
4299 *
acf356b1
TH
4300 * Compute supported xfermask of @dev and store it in
4301 * dev->*_mask. This function is responsible for applying all
4302 * known limits including host controller limits, device
4303 * blacklist, etc...
a6d5a51c
TH
4304 *
4305 * LOCKING:
4306 * None.
a6d5a51c 4307 */
3373efd8 4308static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4309{
9af5c9c9
TH
4310 struct ata_link *link = dev->link;
4311 struct ata_port *ap = link->ap;
cca3974e 4312 struct ata_host *host = ap->host;
f0a6d77b 4313 unsigned int xfer_mask;
1da177e4 4314
37deecb5 4315 /* controller modes available */
565083e1
TH
4316 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4317 ap->mwdma_mask, ap->udma_mask);
4318
8343f889 4319 /* drive modes available */
37deecb5
TH
4320 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4321 dev->mwdma_mask, dev->udma_mask);
4322 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4323
b352e57d
AC
4324 /*
4325 * CFA Advanced TrueIDE timings are not allowed on a shared
4326 * cable
4327 */
4328 if (ata_dev_pair(dev)) {
4329 /* No PIO5 or PIO6 */
4330 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4331 /* No MWDMA3 or MWDMA 4 */
4332 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4333 }
4334
37deecb5
TH
4335 if (ata_dma_blacklisted(dev)) {
4336 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4337 ata_dev_warn(dev,
4338 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4339 }
a6d5a51c 4340
14d66ab7 4341 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4342 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5 4343 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4344 ata_dev_warn(dev,
4345 "simplex DMA is claimed by other device, disabling DMA\n");
5444a6f4 4346 }
565083e1 4347
e424675f
JG
4348 if (ap->flags & ATA_FLAG_NO_IORDY)
4349 xfer_mask &= ata_pio_mask_no_iordy(dev);
4350
5444a6f4 4351 if (ap->ops->mode_filter)
a76b62ca 4352 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4353
8343f889
RH
4354 /* Apply cable rule here. Don't apply it early because when
4355 * we handle hot plug the cable type can itself change.
4356 * Check this last so that we know if the transfer rate was
4357 * solely limited by the cable.
4358 * Unknown or 80 wire cables reported host side are checked
4359 * drive side as well. Cases where we know a 40wire cable
4360 * is used safely for 80 are not checked here.
4361 */
4362 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4363 /* UDMA/44 or higher would be available */
15a5551c 4364 if (cable_is_40wire(ap)) {
a9a79dfe
JP
4365 ata_dev_warn(dev,
4366 "limited to UDMA/33 due to 40-wire cable\n");
8343f889
RH
4367 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4368 }
4369
565083e1
TH
4370 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4371 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4372}
4373
1da177e4
LT
4374/**
4375 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4376 * @dev: Device to which command will be sent
4377 *
780a87f7
JG
4378 * Issue SET FEATURES - XFER MODE command to device @dev
4379 * on port @ap.
4380 *
1da177e4 4381 * LOCKING:
0cba632b 4382 * PCI/etc. bus probe sem.
83206a29
TH
4383 *
4384 * RETURNS:
4385 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4386 */
4387
3373efd8 4388static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4389{
a0123703 4390 struct ata_taskfile tf;
1da177e4
LT
4391
4392 /* set up set-features taskfile */
4633778b 4393 ata_dev_dbg(dev, "set features - xfer mode\n");
1da177e4 4394
464cf177
TH
4395 /* Some controllers and ATAPI devices show flaky interrupt
4396 * behavior after setting xfer mode. Use polling instead.
4397 */
3373efd8 4398 ata_tf_init(dev, &tf);
a0123703
TH
4399 tf.command = ATA_CMD_SET_FEATURES;
4400 tf.feature = SETFEATURES_XFER;
464cf177 4401 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4402 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4403 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4404 if (ata_pio_need_iordy(dev))
4405 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4406 /* If the device has IORDY and the controller does not - turn it off */
4407 else if (ata_id_has_iordy(dev->id))
11b7becc 4408 tf.nsect = 0x01;
b9f8ab2d
AC
4409 else /* In the ancient relic department - skip all of this */
4410 return 0;
1da177e4 4411
024811a2
DLM
4412 /*
4413 * On some disks, this command causes spin-up, so we need longer
4414 * timeout.
4415 */
4416 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
9f45cbd3 4417}
1152b261 4418
9f45cbd3 4419/**
99ad3f9f 4420 * ata_dev_set_feature - Issue SET FEATURES
9f45cbd3 4421 * @dev: Device to which command will be sent
99ad3f9f
NC
4422 * @subcmd: The SET FEATURES subcommand to be sent
4423 * @action: The sector count represents a subcommand specific action
9f45cbd3 4424 *
99ad3f9f 4425 * Issue SET FEATURES command to device @dev on port @ap with sector count
9f45cbd3
KCA
4426 *
4427 * LOCKING:
4428 * PCI/etc. bus probe sem.
4429 *
4430 * RETURNS:
4431 * 0 on success, AC_ERR_* mask otherwise.
4432 */
99ad3f9f 4433unsigned int ata_dev_set_feature(struct ata_device *dev, u8 subcmd, u8 action)
9f45cbd3
KCA
4434{
4435 struct ata_taskfile tf;
61176eed 4436 unsigned int timeout = 0;
9f45cbd3
KCA
4437
4438 /* set up set-features taskfile */
99ad3f9f 4439 ata_dev_dbg(dev, "set features\n");
9f45cbd3
KCA
4440
4441 ata_tf_init(dev, &tf);
4442 tf.command = ATA_CMD_SET_FEATURES;
99ad3f9f 4443 tf.feature = subcmd;
9f45cbd3
KCA
4444 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4445 tf.protocol = ATA_PROT_NODATA;
99ad3f9f 4446 tf.nsect = action;
9f45cbd3 4447
99ad3f9f 4448 if (subcmd == SETFEATURES_SPINUP)
974e0a45
DLM
4449 timeout = ata_probe_timeout ?
4450 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
1da177e4 4451
614065ab 4452 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
1da177e4 4453}
633de4cc 4454EXPORT_SYMBOL_GPL(ata_dev_set_feature);
1da177e4 4455
8bf62ece
AL
4456/**
4457 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4458 * @dev: Device to which command will be sent
e2a7f77a
RD
4459 * @heads: Number of heads (taskfile parameter)
4460 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4461 *
4462 * LOCKING:
6aff8f1f
TH
4463 * Kernel thread context (may sleep)
4464 *
4465 * RETURNS:
4466 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4467 */
3373efd8
TH
4468static unsigned int ata_dev_init_params(struct ata_device *dev,
4469 u16 heads, u16 sectors)
8bf62ece 4470{
a0123703 4471 struct ata_taskfile tf;
6aff8f1f 4472 unsigned int err_mask;
8bf62ece
AL
4473
4474 /* Number of sectors per track 1-255. Number of heads 1-16 */
4475 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4476 return AC_ERR_INVALID;
8bf62ece
AL
4477
4478 /* set up init dev params taskfile */
4633778b 4479 ata_dev_dbg(dev, "init dev params \n");
8bf62ece 4480
3373efd8 4481 ata_tf_init(dev, &tf);
a0123703
TH
4482 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4483 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4484 tf.protocol = ATA_PROT_NODATA;
4485 tf.nsect = sectors;
4486 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4487
2b789108 4488 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4489 /* A clean abort indicates an original or just out of spec drive
4490 and we should continue as we issue the setup based on the
4491 drive reported working geometry */
efcef265 4492 if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
18b2466c 4493 err_mask = 0;
8bf62ece 4494
6aff8f1f 4495 return err_mask;
8bf62ece
AL
4496}
4497
1da177e4 4498/**
5895ef9a 4499 * atapi_check_dma - Check whether ATAPI DMA can be supported
1da177e4
LT
4500 * @qc: Metadata associated with taskfile to check
4501 *
780a87f7
JG
4502 * Allow low-level driver to filter ATA PACKET commands, returning
4503 * a status indicating whether or not it is OK to use DMA for the
4504 * supplied PACKET command.
4505 *
1da177e4 4506 * LOCKING:
624d5c51
TH
4507 * spin_lock_irqsave(host lock)
4508 *
4509 * RETURNS: 0 when ATAPI DMA can be used
4510 * nonzero otherwise
4511 */
5895ef9a 4512int atapi_check_dma(struct ata_queued_cmd *qc)
624d5c51
TH
4513{
4514 struct ata_port *ap = qc->ap;
71601958 4515
624d5c51
TH
4516 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4517 * few ATAPI devices choke on such DMA requests.
4518 */
6a87e42e
TH
4519 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4520 unlikely(qc->nbytes & 15))
624d5c51 4521 return 1;
e2cec771 4522
624d5c51
TH
4523 if (ap->ops->check_atapi_dma)
4524 return ap->ops->check_atapi_dma(qc);
e2cec771 4525
624d5c51
TH
4526 return 0;
4527}
1da177e4 4528
624d5c51
TH
4529/**
4530 * ata_std_qc_defer - Check whether a qc needs to be deferred
4531 * @qc: ATA command in question
4532 *
4533 * Non-NCQ commands cannot run with any other command, NCQ or
4534 * not. As upper layer only knows the queue depth, we are
4535 * responsible for maintaining exclusion. This function checks
4536 * whether a new command @qc can be issued.
4537 *
4538 * LOCKING:
4539 * spin_lock_irqsave(host lock)
4540 *
4541 * RETURNS:
4542 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4543 */
4544int ata_std_qc_defer(struct ata_queued_cmd *qc)
4545{
4546 struct ata_link *link = qc->dev->link;
e2cec771 4547
179b310a 4548 if (ata_is_ncq(qc->tf.protocol)) {
624d5c51
TH
4549 if (!ata_tag_valid(link->active_tag))
4550 return 0;
4551 } else {
4552 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4553 return 0;
4554 }
e2cec771 4555
624d5c51
TH
4556 return ATA_DEFER_LINK;
4557}
a52fbcfc 4558EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6912ccd5 4559
95364f36
JS
4560enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
4561{
4562 return AC_ERR_OK;
4563}
a52fbcfc 4564EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4 4565
624d5c51
TH
4566/**
4567 * ata_sg_init - Associate command with scatter-gather table.
4568 * @qc: Command to be associated
4569 * @sg: Scatter-gather table.
4570 * @n_elem: Number of elements in s/g table.
4571 *
4572 * Initialize the data-related elements of queued_cmd @qc
4573 * to point to a scatter-gather table @sg, containing @n_elem
4574 * elements.
4575 *
4576 * LOCKING:
4577 * spin_lock_irqsave(host lock)
4578 */
4579void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4580 unsigned int n_elem)
4581{
4582 qc->sg = sg;
4583 qc->n_elem = n_elem;
4584 qc->cursg = qc->sg;
4585}
bb5cb290 4586
2874d5ee
GU
4587#ifdef CONFIG_HAS_DMA
4588
4589/**
4590 * ata_sg_clean - Unmap DMA memory associated with command
4591 * @qc: Command containing DMA memory to be released
4592 *
4593 * Unmap all mapped DMA memory associated with this command.
4594 *
4595 * LOCKING:
4596 * spin_lock_irqsave(host lock)
4597 */
af27e01c 4598static void ata_sg_clean(struct ata_queued_cmd *qc)
2874d5ee
GU
4599{
4600 struct ata_port *ap = qc->ap;
4601 struct scatterlist *sg = qc->sg;
4602 int dir = qc->dma_dir;
4603
4604 WARN_ON_ONCE(sg == NULL);
4605
2874d5ee
GU
4606 if (qc->n_elem)
4607 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4608
4609 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4610 qc->sg = NULL;
4611}
4612
624d5c51
TH
4613/**
4614 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4615 * @qc: Command with scatter-gather table to be mapped.
4616 *
4617 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4618 *
4619 * LOCKING:
4620 * spin_lock_irqsave(host lock)
4621 *
4622 * RETURNS:
4623 * Zero on success, negative on error.
4624 *
4625 */
4626static int ata_sg_setup(struct ata_queued_cmd *qc)
4627{
4628 struct ata_port *ap = qc->ap;
4629 unsigned int n_elem;
1da177e4 4630
624d5c51
TH
4631 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4632 if (n_elem < 1)
4633 return -1;
bb5cb290 4634
5825627c 4635 qc->orig_n_elem = qc->n_elem;
624d5c51
TH
4636 qc->n_elem = n_elem;
4637 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4 4638
624d5c51 4639 return 0;
1da177e4
LT
4640}
4641
2874d5ee
GU
4642#else /* !CONFIG_HAS_DMA */
4643
4644static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
4645static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
4646
4647#endif /* !CONFIG_HAS_DMA */
4648
624d5c51
TH
4649/**
4650 * swap_buf_le16 - swap halves of 16-bit words in place
4651 * @buf: Buffer to swap
4652 * @buf_words: Number of 16-bit words in buffer.
4653 *
4654 * Swap halves of 16-bit words if needed to convert from
4655 * little-endian byte order to native cpu byte order, or
4656 * vice-versa.
4657 *
4658 * LOCKING:
4659 * Inherited from caller.
4660 */
4661void swap_buf_le16(u16 *buf, unsigned int buf_words)
8061f5f0 4662{
624d5c51
TH
4663#ifdef __BIG_ENDIAN
4664 unsigned int i;
8061f5f0 4665
624d5c51
TH
4666 for (i = 0; i < buf_words; i++)
4667 buf[i] = le16_to_cpu(buf[i]);
4668#endif /* __BIG_ENDIAN */
8061f5f0
TH
4669}
4670
8a8bc223
TH
4671/**
4672 * ata_qc_free - free unused ata_queued_cmd
4673 * @qc: Command to complete
4674 *
4675 * Designed to free unused ata_queued_cmd object
4676 * in case something prevents using it.
4677 *
4678 * LOCKING:
4679 * spin_lock_irqsave(host lock)
4680 */
4681void ata_qc_free(struct ata_queued_cmd *qc)
4682{
8a8bc223 4683 qc->flags = 0;
4f1a22ee 4684 if (ata_tag_valid(qc->tag))
8a8bc223 4685 qc->tag = ATA_TAG_POISON;
8a8bc223
TH
4686}
4687
76014427 4688void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4689{
a1104016
JL
4690 struct ata_port *ap;
4691 struct ata_link *link;
dedaf2b0 4692
efcb3cf7
TH
4693 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4694 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
a1104016
JL
4695 ap = qc->ap;
4696 link = qc->dev->link;
1da177e4
LT
4697
4698 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4699 ata_sg_clean(qc);
4700
7401abf2 4701 /* command should be marked inactive atomically with qc completion */
179b310a 4702 if (ata_is_ncq(qc->tf.protocol)) {
4e5b6260 4703 link->sactive &= ~(1 << qc->hw_tag);
da917d69
TH
4704 if (!link->sactive)
4705 ap->nr_active_links--;
4706 } else {
9af5c9c9 4707 link->active_tag = ATA_TAG_POISON;
da917d69
TH
4708 ap->nr_active_links--;
4709 }
4710
4711 /* clear exclusive status */
4712 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4713 ap->excl_link == link))
4714 ap->excl_link = NULL;
7401abf2 4715
3f3791d3
AL
4716 /* atapi: mark qc as inactive to prevent the interrupt handler
4717 * from completing the command twice later, before the error handler
4718 * is called. (when rc != 0 and atapi request sense is needed)
4719 */
4720 qc->flags &= ~ATA_QCFLAG_ACTIVE;
e3ed8939 4721 ap->qc_active &= ~(1ULL << qc->tag);
3f3791d3 4722
1da177e4 4723 /* call completion callback */
77853bf2 4724 qc->complete_fn(qc);
1da177e4
LT
4725}
4726
39599a53
TH
4727static void fill_result_tf(struct ata_queued_cmd *qc)
4728{
4729 struct ata_port *ap = qc->ap;
4730
39599a53 4731 qc->result_tf.flags = qc->tf.flags;
22183bf5 4732 ap->ops->qc_fill_rtf(qc);
39599a53
TH
4733}
4734
00115e0f
TH
4735static void ata_verify_xfer(struct ata_queued_cmd *qc)
4736{
4737 struct ata_device *dev = qc->dev;
4738
eb0effdf 4739 if (!ata_is_data(qc->tf.protocol))
00115e0f
TH
4740 return;
4741
4742 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4743 return;
4744
4745 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4746}
4747
f686bcb8
TH
4748/**
4749 * ata_qc_complete - Complete an active ATA command
4750 * @qc: Command to complete
f686bcb8 4751 *
1aadf5c3
TH
4752 * Indicate to the mid and upper layers that an ATA command has
4753 * completed, with either an ok or not-ok status.
4754 *
4755 * Refrain from calling this function multiple times when
4756 * successfully completing multiple NCQ commands.
4757 * ata_qc_complete_multiple() should be used instead, which will
4758 * properly update IRQ expect state.
f686bcb8
TH
4759 *
4760 * LOCKING:
cca3974e 4761 * spin_lock_irqsave(host lock)
f686bcb8
TH
4762 */
4763void ata_qc_complete(struct ata_queued_cmd *qc)
4764{
4765 struct ata_port *ap = qc->ap;
4766
eb25cb99 4767 /* Trigger the LED (if available) */
d1ed7c55 4768 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
eb25cb99 4769
f686bcb8
TH
4770 /* XXX: New EH and old EH use different mechanisms to
4771 * synchronize EH with regular execution path.
4772 *
87629312 4773 * In new EH, a qc owned by EH is marked with ATA_QCFLAG_EH.
f686bcb8 4774 * Normal execution path is responsible for not accessing a
87629312
NC
4775 * qc owned by EH. libata core enforces the rule by returning NULL
4776 * from ata_qc_from_tag() for qcs owned by EH.
f686bcb8
TH
4777 *
4778 * Old EH depends on ata_qc_complete() nullifying completion
4779 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4780 * not synchronize with interrupt handler. Only PIO task is
4781 * taken care of.
4782 */
4783 if (ap->ops->error_handler) {
4dbfa39b
TH
4784 struct ata_device *dev = qc->dev;
4785 struct ata_eh_info *ehi = &dev->link->eh_info;
4786
f686bcb8 4787 if (unlikely(qc->err_mask))
87629312 4788 qc->flags |= ATA_QCFLAG_EH;
f686bcb8 4789
f08dc1ac
TH
4790 /*
4791 * Finish internal commands without any further processing
4792 * and always with the result TF filled.
4793 */
4794 if (unlikely(ata_tag_internal(qc->tag))) {
f4b31db9 4795 fill_result_tf(qc);
255c03d1 4796 trace_ata_qc_complete_internal(qc);
f08dc1ac
TH
4797 __ata_qc_complete(qc);
4798 return;
4799 }
f4b31db9 4800
f08dc1ac
TH
4801 /*
4802 * Non-internal qc has failed. Fill the result TF and
4803 * summon EH.
4804 */
87629312 4805 if (unlikely(qc->flags & ATA_QCFLAG_EH)) {
f08dc1ac 4806 fill_result_tf(qc);
255c03d1 4807 trace_ata_qc_complete_failed(qc);
f08dc1ac 4808 ata_qc_schedule_eh(qc);
f4b31db9 4809 return;
f686bcb8
TH
4810 }
4811
4cb7c6f1 4812 WARN_ON_ONCE(ata_port_is_frozen(ap));
4dc738ed 4813
f686bcb8
TH
4814 /* read result TF if requested */
4815 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4816 fill_result_tf(qc);
f686bcb8 4817
255c03d1 4818 trace_ata_qc_complete_done(qc);
4dbfa39b
TH
4819 /* Some commands need post-processing after successful
4820 * completion.
4821 */
4822 switch (qc->tf.command) {
4823 case ATA_CMD_SET_FEATURES:
4824 if (qc->tf.feature != SETFEATURES_WC_ON &&
0c12735e
TY
4825 qc->tf.feature != SETFEATURES_WC_OFF &&
4826 qc->tf.feature != SETFEATURES_RA_ON &&
4827 qc->tf.feature != SETFEATURES_RA_OFF)
4dbfa39b 4828 break;
df561f66 4829 fallthrough;
4dbfa39b
TH
4830 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4831 case ATA_CMD_SET_MULTI: /* multi_count changed */
4832 /* revalidate device */
4833 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4834 ata_port_schedule_eh(ap);
4835 break;
054a5fba
TH
4836
4837 case ATA_CMD_SLEEP:
4838 dev->flags |= ATA_DFLAG_SLEEPING;
4839 break;
4dbfa39b
TH
4840 }
4841
00115e0f
TH
4842 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4843 ata_verify_xfer(qc);
4844
f686bcb8
TH
4845 __ata_qc_complete(qc);
4846 } else {
4847 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4848 return;
4849
4850 /* read result TF if failed or requested */
4851 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4852 fill_result_tf(qc);
f686bcb8
TH
4853
4854 __ata_qc_complete(qc);
4855 }
4856}
a52fbcfc 4857EXPORT_SYMBOL_GPL(ata_qc_complete);
f686bcb8 4858
8385d756
SH
4859/**
4860 * ata_qc_get_active - get bitmask of active qcs
4861 * @ap: port in question
4862 *
4863 * LOCKING:
4864 * spin_lock_irqsave(host lock)
4865 *
4866 * RETURNS:
4867 * Bitmask of active qcs
4868 */
4869u64 ata_qc_get_active(struct ata_port *ap)
4870{
4871 u64 qc_active = ap->qc_active;
4872
4873 /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
4874 if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
4875 qc_active |= (1 << 0);
4876 qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
4877 }
4878
4879 return qc_active;
4880}
4881EXPORT_SYMBOL_GPL(ata_qc_get_active);
4882
1da177e4
LT
4883/**
4884 * ata_qc_issue - issue taskfile to device
4885 * @qc: command to issue to device
4886 *
4887 * Prepare an ATA command to submission to device.
4888 * This includes mapping the data into a DMA-able
4889 * area, filling in the S/G table, and finally
4890 * writing the taskfile to hardware, starting the command.
4891 *
4892 * LOCKING:
cca3974e 4893 * spin_lock_irqsave(host lock)
1da177e4 4894 */
8e0e694a 4895void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4896{
4897 struct ata_port *ap = qc->ap;
9af5c9c9 4898 struct ata_link *link = qc->dev->link;
405e66b3 4899 u8 prot = qc->tf.protocol;
1da177e4 4900
dedaf2b0
TH
4901 /* Make sure only one non-NCQ command is outstanding. The
4902 * check is skipped for old EH because it reuses active qc to
4903 * request ATAPI sense.
4904 */
efcb3cf7 4905 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 4906
1973a023 4907 if (ata_is_ncq(prot)) {
4e5b6260 4908 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
da917d69
TH
4909
4910 if (!link->sactive)
4911 ap->nr_active_links++;
4e5b6260 4912 link->sactive |= 1 << qc->hw_tag;
dedaf2b0 4913 } else {
efcb3cf7 4914 WARN_ON_ONCE(link->sactive);
da917d69
TH
4915
4916 ap->nr_active_links++;
9af5c9c9 4917 link->active_tag = qc->tag;
dedaf2b0
TH
4918 }
4919
e4a70e76 4920 qc->flags |= ATA_QCFLAG_ACTIVE;
e3ed8939 4921 ap->qc_active |= 1ULL << qc->tag;
e4a70e76 4922
60f5d6ef
TH
4923 /*
4924 * We guarantee to LLDs that they will have at least one
f92a2636
TH
4925 * non-zero sg if the command is a data command.
4926 */
9173e5e8 4927 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
60f5d6ef 4928 goto sys_err;
f92a2636 4929
405e66b3 4930 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 4931 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7 4932 if (ata_sg_setup(qc))
60f5d6ef 4933 goto sys_err;
1da177e4 4934
cf480626 4935 /* if device is sleeping, schedule reset and abort the link */
054a5fba 4936 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
cf480626 4937 link->eh_info.action |= ATA_EH_RESET;
054a5fba
TH
4938 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4939 ata_link_abort(link);
4940 return;
4941 }
4942
fc914faa 4943 trace_ata_qc_prep(qc);
95364f36
JS
4944 qc->err_mask |= ap->ops->qc_prep(qc);
4945 if (unlikely(qc->err_mask))
4946 goto err;
255c03d1 4947 trace_ata_qc_issue(qc);
8e0e694a
TH
4948 qc->err_mask |= ap->ops->qc_issue(qc);
4949 if (unlikely(qc->err_mask))
4950 goto err;
4951 return;
1da177e4 4952
60f5d6ef 4953sys_err:
8e0e694a
TH
4954 qc->err_mask |= AC_ERR_SYSTEM;
4955err:
4956 ata_qc_complete(qc);
1da177e4
LT
4957}
4958
34bf2170 4959/**
b1c72916 4960 * ata_phys_link_online - test whether the given link is online
936fd732 4961 * @link: ATA link to test
34bf2170 4962 *
936fd732
TH
4963 * Test whether @link is online. Note that this function returns
4964 * 0 if online status of @link cannot be obtained, so
4965 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4966 *
4967 * LOCKING:
4968 * None.
4969 *
4970 * RETURNS:
b5b3fa38 4971 * True if the port online status is available and online.
34bf2170 4972 */
b1c72916 4973bool ata_phys_link_online(struct ata_link *link)
34bf2170
TH
4974{
4975 u32 sstatus;
4976
936fd732 4977 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 4978 ata_sstatus_online(sstatus))
b5b3fa38
TH
4979 return true;
4980 return false;
34bf2170
TH
4981}
4982
4983/**
b1c72916 4984 * ata_phys_link_offline - test whether the given link is offline
936fd732 4985 * @link: ATA link to test
34bf2170 4986 *
936fd732
TH
4987 * Test whether @link is offline. Note that this function
4988 * returns 0 if offline status of @link cannot be obtained, so
4989 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4990 *
4991 * LOCKING:
4992 * None.
4993 *
4994 * RETURNS:
b5b3fa38 4995 * True if the port offline status is available and offline.
34bf2170 4996 */
b1c72916 4997bool ata_phys_link_offline(struct ata_link *link)
34bf2170
TH
4998{
4999 u32 sstatus;
5000
936fd732 5001 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 5002 !ata_sstatus_online(sstatus))
b5b3fa38
TH
5003 return true;
5004 return false;
34bf2170 5005}
0baab86b 5006
b1c72916
TH
5007/**
5008 * ata_link_online - test whether the given link is online
5009 * @link: ATA link to test
5010 *
5011 * Test whether @link is online. This is identical to
5012 * ata_phys_link_online() when there's no slave link. When
5013 * there's a slave link, this function should only be called on
5014 * the master link and will return true if any of M/S links is
5015 * online.
5016 *
5017 * LOCKING:
5018 * None.
5019 *
5020 * RETURNS:
5021 * True if the port online status is available and online.
5022 */
5023bool ata_link_online(struct ata_link *link)
5024{
5025 struct ata_link *slave = link->ap->slave_link;
5026
5027 WARN_ON(link == slave); /* shouldn't be called on slave link */
5028
5029 return ata_phys_link_online(link) ||
5030 (slave && ata_phys_link_online(slave));
5031}
a52fbcfc 5032EXPORT_SYMBOL_GPL(ata_link_online);
b1c72916
TH
5033
5034/**
5035 * ata_link_offline - test whether the given link is offline
5036 * @link: ATA link to test
5037 *
5038 * Test whether @link is offline. This is identical to
5039 * ata_phys_link_offline() when there's no slave link. When
5040 * there's a slave link, this function should only be called on
5041 * the master link and will return true if both M/S links are
5042 * offline.
5043 *
5044 * LOCKING:
5045 * None.
5046 *
5047 * RETURNS:
5048 * True if the port offline status is available and offline.
5049 */
5050bool ata_link_offline(struct ata_link *link)
5051{
5052 struct ata_link *slave = link->ap->slave_link;
5053
5054 WARN_ON(link == slave); /* shouldn't be called on slave link */
5055
5056 return ata_phys_link_offline(link) &&
5057 (!slave || ata_phys_link_offline(slave));
5058}
a52fbcfc 5059EXPORT_SYMBOL_GPL(ata_link_offline);
b1c72916 5060
6ffa01d8 5061#ifdef CONFIG_PM
bc6e7c4b
DW
5062static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5063 unsigned int action, unsigned int ehi_flags,
5064 bool async)
500530f6 5065{
5ef41082 5066 struct ata_link *link;
500530f6 5067 unsigned long flags;
500530f6 5068
5ef41082
LM
5069 /* Previous resume operation might still be in
5070 * progress. Wait for PM_PENDING to clear.
5071 */
5072 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5073 ata_port_wait_eh(ap);
5074 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5075 }
500530f6 5076
5ef41082
LM
5077 /* request PM ops to EH */
5078 spin_lock_irqsave(ap->lock, flags);
500530f6 5079
5ef41082 5080 ap->pm_mesg = mesg;
5ef41082
LM
5081 ap->pflags |= ATA_PFLAG_PM_PENDING;
5082 ata_for_each_link(link, ap, HOST_FIRST) {
5083 link->eh_info.action |= action;
5084 link->eh_info.flags |= ehi_flags;
5085 }
500530f6 5086
5ef41082 5087 ata_port_schedule_eh(ap);
500530f6 5088
5ef41082 5089 spin_unlock_irqrestore(ap->lock, flags);
500530f6 5090
2fcbdcb4 5091 if (!async) {
5ef41082
LM
5092 ata_port_wait_eh(ap);
5093 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
500530f6 5094 }
500530f6
TH
5095}
5096
bc6e7c4b
DW
5097/*
5098 * On some hardware, device fails to respond after spun down for suspend. As
5099 * the device won't be used before being resumed, we don't need to touch the
5100 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5101 *
5102 * http://thread.gmane.org/gmane.linux.ide/46764
5103 */
5104static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5105 | ATA_EHI_NO_AUTOPSY
5106 | ATA_EHI_NO_RECOVERY;
5107
5108static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5ef41082 5109{
bc6e7c4b 5110 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5ef41082
LM
5111}
5112
bc6e7c4b 5113static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
2fcbdcb4 5114{
bc6e7c4b 5115 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
2fcbdcb4
DW
5116}
5117
bc6e7c4b 5118static int ata_port_pm_suspend(struct device *dev)
5ef41082 5119{
bc6e7c4b
DW
5120 struct ata_port *ap = to_ata_port(dev);
5121
5ef41082
LM
5122 if (pm_runtime_suspended(dev))
5123 return 0;
5124
bc6e7c4b
DW
5125 ata_port_suspend(ap, PMSG_SUSPEND);
5126 return 0;
33574d68
LM
5127}
5128
bc6e7c4b 5129static int ata_port_pm_freeze(struct device *dev)
33574d68 5130{
bc6e7c4b
DW
5131 struct ata_port *ap = to_ata_port(dev);
5132
33574d68 5133 if (pm_runtime_suspended(dev))
f5e6d0d0 5134 return 0;
33574d68 5135
bc6e7c4b
DW
5136 ata_port_suspend(ap, PMSG_FREEZE);
5137 return 0;
33574d68
LM
5138}
5139
bc6e7c4b 5140static int ata_port_pm_poweroff(struct device *dev)
33574d68 5141{
bc6e7c4b
DW
5142 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5143 return 0;
5ef41082
LM
5144}
5145
bc6e7c4b
DW
5146static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5147 | ATA_EHI_QUIET;
5ef41082 5148
bc6e7c4b
DW
5149static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5150{
5151 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5ef41082
LM
5152}
5153
bc6e7c4b 5154static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
2fcbdcb4 5155{
bc6e7c4b 5156 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
2fcbdcb4
DW
5157}
5158
bc6e7c4b 5159static int ata_port_pm_resume(struct device *dev)
e90b1e5a 5160{
200421a8 5161 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
bc6e7c4b
DW
5162 pm_runtime_disable(dev);
5163 pm_runtime_set_active(dev);
5164 pm_runtime_enable(dev);
5165 return 0;
e90b1e5a
LM
5166}
5167
7e15e9be
AL
5168/*
5169 * For ODDs, the upper layer will poll for media change every few seconds,
5170 * which will make it enter and leave suspend state every few seconds. And
5171 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5172 * is very little and the ODD may malfunction after constantly being reset.
5173 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5174 * ODD is attached to the port.
5175 */
9ee4f393
LM
5176static int ata_port_runtime_idle(struct device *dev)
5177{
7e15e9be
AL
5178 struct ata_port *ap = to_ata_port(dev);
5179 struct ata_link *link;
5180 struct ata_device *adev;
5181
5182 ata_for_each_link(link, ap, HOST_FIRST) {
5183 ata_for_each_dev(adev, link, ENABLED)
5184 if (adev->class == ATA_DEV_ATAPI &&
5185 !zpodd_dev_enabled(adev))
5186 return -EBUSY;
5187 }
5188
45f0a85c 5189 return 0;
9ee4f393
LM
5190}
5191
a7ff60db
AL
5192static int ata_port_runtime_suspend(struct device *dev)
5193{
bc6e7c4b
DW
5194 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5195 return 0;
a7ff60db
AL
5196}
5197
5198static int ata_port_runtime_resume(struct device *dev)
5199{
bc6e7c4b
DW
5200 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5201 return 0;
a7ff60db
AL
5202}
5203
5ef41082 5204static const struct dev_pm_ops ata_port_pm_ops = {
bc6e7c4b
DW
5205 .suspend = ata_port_pm_suspend,
5206 .resume = ata_port_pm_resume,
5207 .freeze = ata_port_pm_freeze,
5208 .thaw = ata_port_pm_resume,
5209 .poweroff = ata_port_pm_poweroff,
5210 .restore = ata_port_pm_resume,
9ee4f393 5211
a7ff60db
AL
5212 .runtime_suspend = ata_port_runtime_suspend,
5213 .runtime_resume = ata_port_runtime_resume,
9ee4f393 5214 .runtime_idle = ata_port_runtime_idle,
5ef41082
LM
5215};
5216
2fcbdcb4
DW
5217/* sas ports don't participate in pm runtime management of ata_ports,
5218 * and need to resume ata devices at the domain level, not the per-port
5219 * level. sas suspend/resume is async to allow parallel port recovery
5220 * since sas has multiple ata_port instances per Scsi_Host.
5221 */
bc6e7c4b 5222void ata_sas_port_suspend(struct ata_port *ap)
2fcbdcb4 5223{
bc6e7c4b 5224 ata_port_suspend_async(ap, PMSG_SUSPEND);
2fcbdcb4 5225}
bc6e7c4b 5226EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
2fcbdcb4 5227
bc6e7c4b 5228void ata_sas_port_resume(struct ata_port *ap)
2fcbdcb4 5229{
bc6e7c4b 5230 ata_port_resume_async(ap, PMSG_RESUME);
2fcbdcb4 5231}
bc6e7c4b 5232EXPORT_SYMBOL_GPL(ata_sas_port_resume);
2fcbdcb4 5233
500530f6 5234/**
cca3974e
JG
5235 * ata_host_suspend - suspend host
5236 * @host: host to suspend
500530f6
TH
5237 * @mesg: PM message
5238 *
5ef41082 5239 * Suspend @host. Actual operation is performed by port suspend.
500530f6 5240 */
ec87cf37 5241void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5242{
5ef41082 5243 host->dev->power.power_state = mesg;
500530f6 5244}
a52fbcfc 5245EXPORT_SYMBOL_GPL(ata_host_suspend);
500530f6
TH
5246
5247/**
cca3974e
JG
5248 * ata_host_resume - resume host
5249 * @host: host to resume
500530f6 5250 *
5ef41082 5251 * Resume @host. Actual operation is performed by port resume.
500530f6 5252 */
cca3974e 5253void ata_host_resume(struct ata_host *host)
500530f6 5254{
72ad6ec4 5255 host->dev->power.power_state = PMSG_ON;
500530f6 5256}
a52fbcfc 5257EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 5258#endif
500530f6 5259
8df82c13 5260const struct device_type ata_port_type = {
5ef41082
LM
5261 .name = "ata_port",
5262#ifdef CONFIG_PM
5263 .pm = &ata_port_pm_ops,
5264#endif
5265};
5266
3ef3b43d
TH
5267/**
5268 * ata_dev_init - Initialize an ata_device structure
5269 * @dev: Device structure to initialize
5270 *
5271 * Initialize @dev in preparation for probing.
5272 *
5273 * LOCKING:
5274 * Inherited from caller.
5275 */
5276void ata_dev_init(struct ata_device *dev)
5277{
b1c72916 5278 struct ata_link *link = ata_dev_phys_link(dev);
9af5c9c9 5279 struct ata_port *ap = link->ap;
72fa4b74
TH
5280 unsigned long flags;
5281
b1c72916 5282 /* SATA spd limit is bound to the attached device, reset together */
9af5c9c9
TH
5283 link->sata_spd_limit = link->hw_sata_spd_limit;
5284 link->sata_spd = 0;
5a04bf4b 5285
72fa4b74
TH
5286 /* High bits of dev->flags are used to record warm plug
5287 * requests which occur asynchronously. Synchronize using
cca3974e 5288 * host lock.
72fa4b74 5289 */
ba6a1308 5290 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5291 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 5292 dev->horkage = 0;
ba6a1308 5293 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5294
99cf610a
TH
5295 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5296 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
3ef3b43d
TH
5297 dev->pio_mask = UINT_MAX;
5298 dev->mwdma_mask = UINT_MAX;
5299 dev->udma_mask = UINT_MAX;
5300}
5301
4fb37a25
TH
5302/**
5303 * ata_link_init - Initialize an ata_link structure
5304 * @ap: ATA port link is attached to
5305 * @link: Link structure to initialize
8989805d 5306 * @pmp: Port multiplier port number
4fb37a25
TH
5307 *
5308 * Initialize @link.
5309 *
5310 * LOCKING:
5311 * Kernel thread context (may sleep)
5312 */
fb7fd614 5313void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
5314{
5315 int i;
5316
5317 /* clear everything except for devices */
d9027470
GG
5318 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5319 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
4fb37a25
TH
5320
5321 link->ap = ap;
8989805d 5322 link->pmp = pmp;
4fb37a25
TH
5323 link->active_tag = ATA_TAG_POISON;
5324 link->hw_sata_spd_limit = UINT_MAX;
5325
5326 /* can't use iterator, ap isn't initialized yet */
5327 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5328 struct ata_device *dev = &link->device[i];
5329
5330 dev->link = link;
5331 dev->devno = dev - link->device;
110f66d2
TH
5332#ifdef CONFIG_ATA_ACPI
5333 dev->gtf_filter = ata_acpi_gtf_filter;
5334#endif
4fb37a25
TH
5335 ata_dev_init(dev);
5336 }
5337}
5338
5339/**
5340 * sata_link_init_spd - Initialize link->sata_spd_limit
5341 * @link: Link to configure sata_spd_limit for
5342 *
a31a6997 5343 * Initialize ``link->[hw_]sata_spd_limit`` to the currently
4fb37a25
TH
5344 * configured value.
5345 *
5346 * LOCKING:
5347 * Kernel thread context (may sleep).
5348 *
5349 * RETURNS:
5350 * 0 on success, -errno on failure.
5351 */
fb7fd614 5352int sata_link_init_spd(struct ata_link *link)
4fb37a25 5353{
33267325 5354 u8 spd;
4fb37a25
TH
5355 int rc;
5356
d127ea7b 5357 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
4fb37a25
TH
5358 if (rc)
5359 return rc;
5360
d127ea7b 5361 spd = (link->saved_scontrol >> 4) & 0xf;
4fb37a25
TH
5362 if (spd)
5363 link->hw_sata_spd_limit &= (1 << spd) - 1;
5364
05944bdf 5365 ata_force_link_limits(link);
33267325 5366
4fb37a25
TH
5367 link->sata_spd_limit = link->hw_sata_spd_limit;
5368
5369 return 0;
5370}
5371
1da177e4 5372/**
f3187195
TH
5373 * ata_port_alloc - allocate and initialize basic ATA port resources
5374 * @host: ATA host this allocated port belongs to
1da177e4 5375 *
f3187195
TH
5376 * Allocate and initialize basic ATA port resources.
5377 *
5378 * RETURNS:
5379 * Allocate ATA port on success, NULL on failure.
0cba632b 5380 *
1da177e4 5381 * LOCKING:
f3187195 5382 * Inherited from calling layer (may sleep).
1da177e4 5383 */
f3187195 5384struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5385{
f3187195 5386 struct ata_port *ap;
1da177e4 5387
f3187195
TH
5388 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5389 if (!ap)
5390 return NULL;
4fca377f 5391
7b3a24c5 5392 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
cca3974e 5393 ap->lock = &host->lock;
f3187195 5394 ap->print_id = -1;
e628dc99 5395 ap->local_port_no = -1;
cca3974e 5396 ap->host = host;
f3187195 5397 ap->dev = host->dev;
bd5d825c 5398
ad72cf98 5399 mutex_init(&ap->scsi_scan_mutex);
65f27f38
DH
5400 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5401 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5402 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5403 init_waitqueue_head(&ap->eh_wait_q);
45fabbb7 5404 init_completion(&ap->park_req_pending);
b93ab338
KC
5405 timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
5406 TIMER_DEFERRABLE);
1da177e4 5407
838df628 5408 ap->cbl = ATA_CBL_NONE;
838df628 5409
8989805d 5410 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
5411
5412#ifdef ATA_IRQ_TRAP
5413 ap->stats.unhandled_irq = 1;
5414 ap->stats.idle_irq = 1;
5415#endif
270390e1
TH
5416 ata_sff_port_init(ap);
5417
1da177e4 5418 return ap;
1da177e4
LT
5419}
5420
2623c7a5 5421static void ata_devres_release(struct device *gendev, void *res)
f0d36efd
TH
5422{
5423 struct ata_host *host = dev_get_drvdata(gendev);
5424 int i;
5425
1aa506e4
TH
5426 for (i = 0; i < host->n_ports; i++) {
5427 struct ata_port *ap = host->ports[i];
5428
4911487a
TH
5429 if (!ap)
5430 continue;
5431
5432 if (ap->scsi_host)
1aa506e4
TH
5433 scsi_host_put(ap->scsi_host);
5434
2623c7a5
TK
5435 }
5436
5437 dev_set_drvdata(gendev, NULL);
5438 ata_host_put(host);
5439}
5440
5441static void ata_host_release(struct kref *kref)
5442{
5443 struct ata_host *host = container_of(kref, struct ata_host, kref);
5444 int i;
5445
5446 for (i = 0; i < host->n_ports; i++) {
5447 struct ata_port *ap = host->ports[i];
5448
633273a3 5449 kfree(ap->pmp_link);
b1c72916 5450 kfree(ap->slave_link);
4911487a 5451 kfree(ap);
1aa506e4
TH
5452 host->ports[i] = NULL;
5453 }
2623c7a5
TK
5454 kfree(host);
5455}
1aa506e4 5456
2623c7a5
TK
5457void ata_host_get(struct ata_host *host)
5458{
5459 kref_get(&host->kref);
5460}
5461
5462void ata_host_put(struct ata_host *host)
5463{
5464 kref_put(&host->kref, ata_host_release);
f0d36efd 5465}
a52fbcfc 5466EXPORT_SYMBOL_GPL(ata_host_put);
f0d36efd 5467
f3187195
TH
5468/**
5469 * ata_host_alloc - allocate and init basic ATA host resources
5470 * @dev: generic device this host is associated with
5471 * @max_ports: maximum number of ATA ports associated with this host
5472 *
5473 * Allocate and initialize basic ATA host resources. LLD calls
5474 * this function to allocate a host, initializes it fully and
5475 * attaches it using ata_host_register().
5476 *
5477 * @max_ports ports are allocated and host->n_ports is
5478 * initialized to @max_ports. The caller is allowed to decrease
5479 * host->n_ports before calling ata_host_register(). The unused
5480 * ports will be automatically freed on registration.
5481 *
5482 * RETURNS:
5483 * Allocate ATA host on success, NULL on failure.
5484 *
5485 * LOCKING:
5486 * Inherited from calling layer (may sleep).
5487 */
5488struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5489{
5490 struct ata_host *host;
5491 size_t sz;
5492 int i;
2623c7a5 5493 void *dr;
f3187195 5494
f3187195
TH
5495 /* alloc a container for our list of ATA ports (buses) */
5496 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
2623c7a5 5497 host = kzalloc(sz, GFP_KERNEL);
f3187195 5498 if (!host)
2623c7a5
TK
5499 return NULL;
5500
5501 if (!devres_open_group(dev, NULL, GFP_KERNEL))
dafd6c49 5502 goto err_free;
2623c7a5
TK
5503
5504 dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
5505 if (!dr)
f3187195
TH
5506 goto err_out;
5507
2623c7a5 5508 devres_add(dev, dr);
f3187195
TH
5509 dev_set_drvdata(dev, host);
5510
5511 spin_lock_init(&host->lock);
c0c362b6 5512 mutex_init(&host->eh_mutex);
f3187195
TH
5513 host->dev = dev;
5514 host->n_ports = max_ports;
2623c7a5 5515 kref_init(&host->kref);
f3187195
TH
5516
5517 /* allocate ports bound to this host */
5518 for (i = 0; i < max_ports; i++) {
5519 struct ata_port *ap;
5520
5521 ap = ata_port_alloc(host);
5522 if (!ap)
5523 goto err_out;
5524
5525 ap->port_no = i;
5526 host->ports[i] = ap;
5527 }
5528
5529 devres_remove_group(dev, NULL);
5530 return host;
5531
5532 err_out:
5533 devres_release_group(dev, NULL);
dafd6c49
CIK
5534 err_free:
5535 kfree(host);
f3187195
TH
5536 return NULL;
5537}
a52fbcfc 5538EXPORT_SYMBOL_GPL(ata_host_alloc);
f3187195 5539
f5cda257
TH
5540/**
5541 * ata_host_alloc_pinfo - alloc host and init with port_info array
5542 * @dev: generic device this host is associated with
5543 * @ppi: array of ATA port_info to initialize host with
5544 * @n_ports: number of ATA ports attached to this host
5545 *
5546 * Allocate ATA host and initialize with info from @ppi. If NULL
5547 * terminated, @ppi may contain fewer entries than @n_ports. The
5548 * last entry will be used for the remaining ports.
5549 *
5550 * RETURNS:
5551 * Allocate ATA host on success, NULL on failure.
5552 *
5553 * LOCKING:
5554 * Inherited from calling layer (may sleep).
5555 */
5556struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5557 const struct ata_port_info * const * ppi,
5558 int n_ports)
5559{
bf476fe2 5560 const struct ata_port_info *pi = &ata_dummy_port_info;
f5cda257
TH
5561 struct ata_host *host;
5562 int i, j;
5563
5564 host = ata_host_alloc(dev, n_ports);
5565 if (!host)
5566 return NULL;
5567
bf476fe2 5568 for (i = 0, j = 0; i < host->n_ports; i++) {
f5cda257
TH
5569 struct ata_port *ap = host->ports[i];
5570
5571 if (ppi[j])
5572 pi = ppi[j++];
5573
5574 ap->pio_mask = pi->pio_mask;
5575 ap->mwdma_mask = pi->mwdma_mask;
5576 ap->udma_mask = pi->udma_mask;
5577 ap->flags |= pi->flags;
0c88758b 5578 ap->link.flags |= pi->link_flags;
f5cda257
TH
5579 ap->ops = pi->port_ops;
5580
5581 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5582 host->ops = pi->port_ops;
f5cda257
TH
5583 }
5584
5585 return host;
5586}
a52fbcfc 5587EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
f5cda257 5588
32ebbc0c
TH
5589static void ata_host_stop(struct device *gendev, void *res)
5590{
5591 struct ata_host *host = dev_get_drvdata(gendev);
5592 int i;
5593
5594 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5595
5596 for (i = 0; i < host->n_ports; i++) {
5597 struct ata_port *ap = host->ports[i];
5598
5599 if (ap->ops->port_stop)
5600 ap->ops->port_stop(ap);
5601 }
5602
5603 if (host->ops->host_stop)
5604 host->ops->host_stop(host);
5605}
5606
029cfd6b
TH
5607/**
5608 * ata_finalize_port_ops - finalize ata_port_operations
5609 * @ops: ata_port_operations to finalize
5610 *
5611 * An ata_port_operations can inherit from another ops and that
5612 * ops can again inherit from another. This can go on as many
5613 * times as necessary as long as there is no loop in the
5614 * inheritance chain.
5615 *
5616 * Ops tables are finalized when the host is started. NULL or
5617 * unspecified entries are inherited from the closet ancestor
5618 * which has the method and the entry is populated with it.
5619 * After finalization, the ops table directly points to all the
5620 * methods and ->inherits is no longer necessary and cleared.
5621 *
5622 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5623 *
5624 * LOCKING:
5625 * None.
5626 */
5627static void ata_finalize_port_ops(struct ata_port_operations *ops)
5628{
2da67659 5629 static DEFINE_SPINLOCK(lock);
029cfd6b
TH
5630 const struct ata_port_operations *cur;
5631 void **begin = (void **)ops;
5632 void **end = (void **)&ops->inherits;
5633 void **pp;
5634
5635 if (!ops || !ops->inherits)
5636 return;
5637
5638 spin_lock(&lock);
5639
5640 for (cur = ops->inherits; cur; cur = cur->inherits) {
5641 void **inherit = (void **)cur;
5642
5643 for (pp = begin; pp < end; pp++, inherit++)
5644 if (!*pp)
5645 *pp = *inherit;
5646 }
5647
5648 for (pp = begin; pp < end; pp++)
5649 if (IS_ERR(*pp))
5650 *pp = NULL;
5651
5652 ops->inherits = NULL;
5653
5654 spin_unlock(&lock);
5655}
5656
ecef7253
TH
5657/**
5658 * ata_host_start - start and freeze ports of an ATA host
5659 * @host: ATA host to start ports for
5660 *
5661 * Start and then freeze ports of @host. Started status is
5662 * recorded in host->flags, so this function can be called
5663 * multiple times. Ports are guaranteed to get started only
e0af10ac 5664 * once. If host->ops is not initialized yet, it is set to the
f3187195 5665 * first non-dummy port ops.
ecef7253
TH
5666 *
5667 * LOCKING:
5668 * Inherited from calling layer (may sleep).
5669 *
5670 * RETURNS:
5671 * 0 if all ports are started successfully, -errno otherwise.
5672 */
5673int ata_host_start(struct ata_host *host)
5674{
32ebbc0c
TH
5675 int have_stop = 0;
5676 void *start_dr = NULL;
ecef7253
TH
5677 int i, rc;
5678
5679 if (host->flags & ATA_HOST_STARTED)
5680 return 0;
5681
029cfd6b
TH
5682 ata_finalize_port_ops(host->ops);
5683
ecef7253
TH
5684 for (i = 0; i < host->n_ports; i++) {
5685 struct ata_port *ap = host->ports[i];
5686
029cfd6b
TH
5687 ata_finalize_port_ops(ap->ops);
5688
f3187195
TH
5689 if (!host->ops && !ata_port_is_dummy(ap))
5690 host->ops = ap->ops;
5691
32ebbc0c
TH
5692 if (ap->ops->port_stop)
5693 have_stop = 1;
5694 }
5695
355a8031 5696 if (host->ops && host->ops->host_stop)
32ebbc0c
TH
5697 have_stop = 1;
5698
5699 if (have_stop) {
5700 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5701 if (!start_dr)
5702 return -ENOMEM;
5703 }
5704
5705 for (i = 0; i < host->n_ports; i++) {
5706 struct ata_port *ap = host->ports[i];
5707
ecef7253
TH
5708 if (ap->ops->port_start) {
5709 rc = ap->ops->port_start(ap);
5710 if (rc) {
0f9fe9b7 5711 if (rc != -ENODEV)
a44fec1f
JP
5712 dev_err(host->dev,
5713 "failed to start port %d (errno=%d)\n",
5714 i, rc);
ecef7253
TH
5715 goto err_out;
5716 }
5717 }
ecef7253
TH
5718 ata_eh_freeze_port(ap);
5719 }
5720
32ebbc0c
TH
5721 if (start_dr)
5722 devres_add(host->dev, start_dr);
ecef7253
TH
5723 host->flags |= ATA_HOST_STARTED;
5724 return 0;
5725
5726 err_out:
5727 while (--i >= 0) {
5728 struct ata_port *ap = host->ports[i];
5729
5730 if (ap->ops->port_stop)
5731 ap->ops->port_stop(ap);
5732 }
32ebbc0c 5733 devres_free(start_dr);
ecef7253
TH
5734 return rc;
5735}
a52fbcfc 5736EXPORT_SYMBOL_GPL(ata_host_start);
ecef7253 5737
b03732f0 5738/**
94bd5719 5739 * ata_host_init - Initialize a host struct for sas (ipr, libsas)
cca3974e
JG
5740 * @host: host to initialize
5741 * @dev: device host is attached to
cca3974e 5742 * @ops: port_ops
b03732f0 5743 *
b03732f0 5744 */
cca3974e 5745void ata_host_init(struct ata_host *host, struct device *dev,
8d8e7d13 5746 struct ata_port_operations *ops)
b03732f0 5747{
cca3974e 5748 spin_lock_init(&host->lock);
c0c362b6 5749 mutex_init(&host->eh_mutex);
69278f79 5750 host->n_tags = ATA_MAX_QUEUE;
cca3974e 5751 host->dev = dev;
cca3974e 5752 host->ops = ops;
2fa4a326 5753 kref_init(&host->kref);
b03732f0 5754}
a52fbcfc 5755EXPORT_SYMBOL_GPL(ata_host_init);
b03732f0 5756
9508a66f 5757void __ata_port_probe(struct ata_port *ap)
79318057 5758{
9508a66f
DW
5759 struct ata_eh_info *ehi = &ap->link.eh_info;
5760 unsigned long flags;
886ad09f 5761
9508a66f
DW
5762 /* kick EH for boot probing */
5763 spin_lock_irqsave(ap->lock, flags);
79318057 5764
9508a66f
DW
5765 ehi->probe_mask |= ATA_ALL_DEVICES;
5766 ehi->action |= ATA_EH_RESET;
5767 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
79318057 5768
9508a66f
DW
5769 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5770 ap->pflags |= ATA_PFLAG_LOADING;
5771 ata_port_schedule_eh(ap);
79318057 5772
9508a66f
DW
5773 spin_unlock_irqrestore(ap->lock, flags);
5774}
79318057 5775
9508a66f
DW
5776int ata_port_probe(struct ata_port *ap)
5777{
5778 int rc = 0;
79318057 5779
9508a66f
DW
5780 if (ap->ops->error_handler) {
5781 __ata_port_probe(ap);
79318057
AV
5782 ata_port_wait_eh(ap);
5783 } else {
79318057 5784 rc = ata_bus_probe(ap);
79318057 5785 }
238c9cf9
JB
5786 return rc;
5787}
5788
5789
5790static void async_port_probe(void *data, async_cookie_t cookie)
5791{
5792 struct ata_port *ap = data;
4fca377f 5793
238c9cf9
JB
5794 /*
5795 * If we're not allowed to scan this host in parallel,
5796 * we need to wait until all previous scans have completed
5797 * before going further.
5798 * Jeff Garzik says this is only within a controller, so we
5799 * don't need to wait for port 0, only for later ports.
5800 */
5801 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5802 async_synchronize_cookie(cookie);
5803
5804 (void)ata_port_probe(ap);
f29d3b23
AV
5805
5806 /* in order to keep device order, we need to synchronize at this point */
5807 async_synchronize_cookie(cookie);
5808
5809 ata_scsi_scan_host(ap, 1);
79318057 5810}
238c9cf9 5811
f3187195
TH
5812/**
5813 * ata_host_register - register initialized ATA host
5814 * @host: ATA host to register
5815 * @sht: template for SCSI host
5816 *
5817 * Register initialized ATA host. @host is allocated using
5818 * ata_host_alloc() and fully initialized by LLD. This function
5819 * starts ports, registers @host with ATA and SCSI layers and
5820 * probe registered devices.
5821 *
5822 * LOCKING:
5823 * Inherited from calling layer (may sleep).
5824 *
5825 * RETURNS:
5826 * 0 on success, -errno otherwise.
5827 */
25df73d9 5828int ata_host_register(struct ata_host *host, const struct scsi_host_template *sht)
f3187195
TH
5829{
5830 int i, rc;
5831
69278f79 5832 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
1871ee13 5833
f3187195
TH
5834 /* host must have been started */
5835 if (!(host->flags & ATA_HOST_STARTED)) {
a44fec1f 5836 dev_err(host->dev, "BUG: trying to register unstarted host\n");
f3187195
TH
5837 WARN_ON(1);
5838 return -EINVAL;
5839 }
5840
5841 /* Blow away unused ports. This happens when LLD can't
5842 * determine the exact number of ports to allocate at
5843 * allocation time.
5844 */
5845 for (i = host->n_ports; host->ports[i]; i++)
5846 kfree(host->ports[i]);
5847
5848 /* give ports names and add SCSI hosts */
e628dc99 5849 for (i = 0; i < host->n_ports; i++) {
85d6725b 5850 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
e628dc99
DM
5851 host->ports[i]->local_port_no = i + 1;
5852 }
4fca377f 5853
d9027470
GG
5854 /* Create associated sysfs transport objects */
5855 for (i = 0; i < host->n_ports; i++) {
5856 rc = ata_tport_add(host->dev,host->ports[i]);
5857 if (rc) {
5858 goto err_tadd;
5859 }
5860 }
5861
f3187195
TH
5862 rc = ata_scsi_add_hosts(host, sht);
5863 if (rc)
d9027470 5864 goto err_tadd;
f3187195
TH
5865
5866 /* set cable, sata_spd_limit and report */
5867 for (i = 0; i < host->n_ports; i++) {
5868 struct ata_port *ap = host->ports[i];
f0a6d77b 5869 unsigned int xfer_mask;
f3187195
TH
5870
5871 /* set SATA cable type if still unset */
5872 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5873 ap->cbl = ATA_CBL_SATA;
5874
5875 /* init sata_spd_limit to the current value */
4fb37a25 5876 sata_link_init_spd(&ap->link);
b1c72916
TH
5877 if (ap->slave_link)
5878 sata_link_init_spd(ap->slave_link);
f3187195 5879
cbcdd875 5880 /* print per-port info to dmesg */
f3187195
TH
5881 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5882 ap->udma_mask);
5883
abf6e8ed 5884 if (!ata_port_is_dummy(ap)) {
a9a79dfe
JP
5885 ata_port_info(ap, "%cATA max %s %s\n",
5886 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5887 ata_mode_string(xfer_mask),
5888 ap->link.eh_info.desc);
abf6e8ed
TH
5889 ata_ehi_clear_desc(&ap->link.eh_info);
5890 } else
a9a79dfe 5891 ata_port_info(ap, "DUMMY\n");
f3187195
TH
5892 }
5893
f6005354 5894 /* perform each probe asynchronously */
f3187195
TH
5895 for (i = 0; i < host->n_ports; i++) {
5896 struct ata_port *ap = host->ports[i];
b5292111 5897 ap->cookie = async_schedule(async_port_probe, ap);
f3187195 5898 }
f3187195
TH
5899
5900 return 0;
d9027470
GG
5901
5902 err_tadd:
5903 while (--i >= 0) {
5904 ata_tport_delete(host->ports[i]);
5905 }
5906 return rc;
5907
f3187195 5908}
a52fbcfc 5909EXPORT_SYMBOL_GPL(ata_host_register);
f3187195 5910
f5cda257
TH
5911/**
5912 * ata_host_activate - start host, request IRQ and register it
5913 * @host: target ATA host
5914 * @irq: IRQ to request
5915 * @irq_handler: irq_handler used when requesting IRQ
5916 * @irq_flags: irq_flags used when requesting IRQ
5917 * @sht: scsi_host_template to use when registering the host
5918 *
5919 * After allocating an ATA host and initializing it, most libata
5920 * LLDs perform three steps to activate the host - start host,
c9b5560a 5921 * request IRQ and register it. This helper takes necessary
f5cda257
TH
5922 * arguments and performs the three steps in one go.
5923 *
3d46b2e2
PM
5924 * An invalid IRQ skips the IRQ registration and expects the host to
5925 * have set polling mode on the port. In this case, @irq_handler
5926 * should be NULL.
5927 *
f5cda257
TH
5928 * LOCKING:
5929 * Inherited from calling layer (may sleep).
5930 *
5931 * RETURNS:
5932 * 0 on success, -errno otherwise.
5933 */
5934int ata_host_activate(struct ata_host *host, int irq,
5935 irq_handler_t irq_handler, unsigned long irq_flags,
25df73d9 5936 const struct scsi_host_template *sht)
f5cda257 5937{
cbcdd875 5938 int i, rc;
7e22c002 5939 char *irq_desc;
f5cda257
TH
5940
5941 rc = ata_host_start(host);
5942 if (rc)
5943 return rc;
5944
3d46b2e2
PM
5945 /* Special case for polling mode */
5946 if (!irq) {
5947 WARN_ON(irq_handler);
5948 return ata_host_register(host, sht);
5949 }
5950
7e22c002
HK
5951 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
5952 dev_driver_string(host->dev),
5953 dev_name(host->dev));
5954 if (!irq_desc)
5955 return -ENOMEM;
5956
f5cda257 5957 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7e22c002 5958 irq_desc, host);
f5cda257
TH
5959 if (rc)
5960 return rc;
5961
cbcdd875
TH
5962 for (i = 0; i < host->n_ports; i++)
5963 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 5964
f5cda257
TH
5965 rc = ata_host_register(host, sht);
5966 /* if failed, just free the IRQ and leave ports alone */
5967 if (rc)
5968 devm_free_irq(host->dev, irq, host);
5969
5970 return rc;
5971}
a52fbcfc 5972EXPORT_SYMBOL_GPL(ata_host_activate);
f5cda257 5973
720ba126 5974/**
c9b5560a 5975 * ata_port_detach - Detach ATA port in preparation of device removal
720ba126
TH
5976 * @ap: ATA port to be detached
5977 *
5978 * Detach all ATA devices and the associated SCSI devices of @ap;
5979 * then, remove the associated SCSI host. @ap is guaranteed to
5980 * be quiescent on return from this function.
5981 *
5982 * LOCKING:
5983 * Kernel thread context (may sleep).
5984 */
741b7763 5985static void ata_port_detach(struct ata_port *ap)
720ba126
TH
5986{
5987 unsigned long flags;
a6f9bf4d
LK
5988 struct ata_link *link;
5989 struct ata_device *dev;
720ba126
TH
5990
5991 if (!ap->ops->error_handler)
c3cf30a9 5992 goto skip_eh;
720ba126
TH
5993
5994 /* tell EH we're leaving & flush EH */
ba6a1308 5995 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5996 ap->pflags |= ATA_PFLAG_UNLOADING;
ece180d1 5997 ata_port_schedule_eh(ap);
ba6a1308 5998 spin_unlock_irqrestore(ap->lock, flags);
720ba126 5999
ece180d1 6000 /* wait till EH commits suicide */
720ba126
TH
6001 ata_port_wait_eh(ap);
6002
ece180d1
TH
6003 /* it better be dead now */
6004 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
720ba126 6005
afe2c511 6006 cancel_delayed_work_sync(&ap->hotplug_task);
720ba126 6007
c3cf30a9 6008 skip_eh:
a6f9bf4d
LK
6009 /* clean up zpodd on port removal */
6010 ata_for_each_link(link, ap, HOST_FIRST) {
6011 ata_for_each_dev(dev, link, ALL) {
6012 if (zpodd_dev_enabled(dev))
6013 zpodd_exit(dev);
6014 }
6015 }
d9027470
GG
6016 if (ap->pmp_link) {
6017 int i;
6018 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6019 ata_tlink_delete(&ap->pmp_link[i]);
6020 }
720ba126 6021 /* remove the associated SCSI host */
cca3974e 6022 scsi_remove_host(ap->scsi_host);
c5700766 6023 ata_tport_delete(ap);
720ba126
TH
6024}
6025
0529c159
TH
6026/**
6027 * ata_host_detach - Detach all ports of an ATA host
6028 * @host: Host to detach
6029 *
6030 * Detach all ports of @host.
6031 *
6032 * LOCKING:
6033 * Kernel thread context (may sleep).
6034 */
6035void ata_host_detach(struct ata_host *host)
6036{
6037 int i;
6038
b5292111
KHF
6039 for (i = 0; i < host->n_ports; i++) {
6040 /* Ensure ata_port probe has completed */
6041 async_synchronize_cookie(host->ports[i]->cookie + 1);
0529c159 6042 ata_port_detach(host->ports[i]);
b5292111 6043 }
562f0c2d
TH
6044
6045 /* the host is dead now, dissociate ACPI */
6046 ata_acpi_dissociate(host);
0529c159 6047}
a52fbcfc 6048EXPORT_SYMBOL_GPL(ata_host_detach);
0529c159 6049
374b1873
JG
6050#ifdef CONFIG_PCI
6051
1da177e4
LT
6052/**
6053 * ata_pci_remove_one - PCI layer callback for device removal
6054 * @pdev: PCI device that was removed
6055 *
b878ca5d
TH
6056 * PCI layer indicates to libata via this hook that hot-unplug or
6057 * module unload event has occurred. Detach all ports. Resource
6058 * release is handled via devres.
1da177e4
LT
6059 *
6060 * LOCKING:
6061 * Inherited from PCI layer (may sleep).
6062 */
f0d36efd 6063void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 6064{
04a3f5b7 6065 struct ata_host *host = pci_get_drvdata(pdev);
1da177e4 6066
b878ca5d 6067 ata_host_detach(host);
1da177e4 6068}
a52fbcfc 6069EXPORT_SYMBOL_GPL(ata_pci_remove_one);
1da177e4 6070
10a663a1
PK
6071void ata_pci_shutdown_one(struct pci_dev *pdev)
6072{
6073 struct ata_host *host = pci_get_drvdata(pdev);
6074 int i;
6075
6076 for (i = 0; i < host->n_ports; i++) {
6077 struct ata_port *ap = host->ports[i];
6078
6079 ap->pflags |= ATA_PFLAG_FROZEN;
6080
6081 /* Disable port interrupts */
6082 if (ap->ops->freeze)
6083 ap->ops->freeze(ap);
6084
6085 /* Stop the port DMA engines */
6086 if (ap->ops->port_stop)
6087 ap->ops->port_stop(ap);
6088 }
6089}
a52fbcfc 6090EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
10a663a1 6091
1da177e4 6092/* move to PCI subsystem */
057ace5e 6093int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6094{
6095 unsigned long tmp = 0;
6096
6097 switch (bits->width) {
6098 case 1: {
6099 u8 tmp8 = 0;
6100 pci_read_config_byte(pdev, bits->reg, &tmp8);
6101 tmp = tmp8;
6102 break;
6103 }
6104 case 2: {
6105 u16 tmp16 = 0;
6106 pci_read_config_word(pdev, bits->reg, &tmp16);
6107 tmp = tmp16;
6108 break;
6109 }
6110 case 4: {
6111 u32 tmp32 = 0;
6112 pci_read_config_dword(pdev, bits->reg, &tmp32);
6113 tmp = tmp32;
6114 break;
6115 }
6116
6117 default:
6118 return -EINVAL;
6119 }
6120
6121 tmp &= bits->mask;
6122
6123 return (tmp == bits->val) ? 1 : 0;
6124}
a52fbcfc 6125EXPORT_SYMBOL_GPL(pci_test_config_bits);
9b847548 6126
6ffa01d8 6127#ifdef CONFIG_PM
3c5100c1 6128void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6129{
6130 pci_save_state(pdev);
4c90d971 6131 pci_disable_device(pdev);
500530f6 6132
3a2d5b70 6133 if (mesg.event & PM_EVENT_SLEEP)
500530f6 6134 pci_set_power_state(pdev, PCI_D3hot);
9b847548 6135}
a52fbcfc 6136EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
9b847548 6137
553c4aa6 6138int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6139{
553c4aa6
TH
6140 int rc;
6141
9b847548
JA
6142 pci_set_power_state(pdev, PCI_D0);
6143 pci_restore_state(pdev);
553c4aa6 6144
b878ca5d 6145 rc = pcim_enable_device(pdev);
553c4aa6 6146 if (rc) {
a44fec1f
JP
6147 dev_err(&pdev->dev,
6148 "failed to enable device after resume (%d)\n", rc);
553c4aa6
TH
6149 return rc;
6150 }
6151
9b847548 6152 pci_set_master(pdev);
553c4aa6 6153 return 0;
500530f6 6154}
a52fbcfc 6155EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
500530f6 6156
3c5100c1 6157int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6158{
04a3f5b7 6159 struct ata_host *host = pci_get_drvdata(pdev);
500530f6 6160
ec87cf37 6161 ata_host_suspend(host, mesg);
500530f6 6162
3c5100c1 6163 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6164
6165 return 0;
6166}
a52fbcfc 6167EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
500530f6
TH
6168
6169int ata_pci_device_resume(struct pci_dev *pdev)
6170{
04a3f5b7 6171 struct ata_host *host = pci_get_drvdata(pdev);
553c4aa6 6172 int rc;
500530f6 6173
553c4aa6
TH
6174 rc = ata_pci_device_do_resume(pdev);
6175 if (rc == 0)
6176 ata_host_resume(host);
6177 return rc;
9b847548 6178}
a52fbcfc 6179EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6180#endif /* CONFIG_PM */
1da177e4
LT
6181#endif /* CONFIG_PCI */
6182
b7db04d9
BN
6183/**
6184 * ata_platform_remove_one - Platform layer callback for device removal
6185 * @pdev: Platform device that was removed
6186 *
6187 * Platform layer indicates to libata via this hook that hot-unplug or
6188 * module unload event has occurred. Detach all ports. Resource
6189 * release is handled via devres.
6190 *
6191 * LOCKING:
6192 * Inherited from platform layer (may sleep).
6193 */
6194int ata_platform_remove_one(struct platform_device *pdev)
6195{
6196 struct ata_host *host = platform_get_drvdata(pdev);
6197
6198 ata_host_detach(host);
6199
6200 return 0;
6201}
a52fbcfc 6202EXPORT_SYMBOL_GPL(ata_platform_remove_one);
b7db04d9 6203
bf89b0bf 6204#ifdef CONFIG_ATA_FORCE
168af4af
DLM
6205
6206#define force_cbl(name, flag) \
6207 { #name, .cbl = (flag) }
6208
6209#define force_spd_limit(spd, val) \
6210 { #spd, .spd_limit = (val) }
6211
6212#define force_xfer(mode, shift) \
6213 { #mode, .xfer_mask = (1UL << (shift)) }
6214
3af9ca4d
DLM
6215#define force_lflag_on(name, flags) \
6216 { #name, .lflags_on = (flags) }
6217
6218#define force_lflag_onoff(name, flags) \
6219 { "no" #name, .lflags_on = (flags) }, \
6220 { #name, .lflags_off = (flags) }
168af4af
DLM
6221
6222#define force_horkage_on(name, flag) \
6223 { #name, .horkage_on = (flag) }
6224
6225#define force_horkage_onoff(name, flag) \
6226 { "no" #name, .horkage_on = (flag) }, \
6227 { #name, .horkage_off = (flag) }
6228
6229static const struct ata_force_param force_tbl[] __initconst = {
6230 force_cbl(40c, ATA_CBL_PATA40),
6231 force_cbl(80c, ATA_CBL_PATA80),
6232 force_cbl(short40c, ATA_CBL_PATA40_SHORT),
6233 force_cbl(unk, ATA_CBL_PATA_UNK),
6234 force_cbl(ign, ATA_CBL_PATA_IGN),
6235 force_cbl(sata, ATA_CBL_SATA),
6236
6237 force_spd_limit(1.5Gbps, 1),
6238 force_spd_limit(3.0Gbps, 2),
6239
6240 force_xfer(pio0, ATA_SHIFT_PIO + 0),
6241 force_xfer(pio1, ATA_SHIFT_PIO + 1),
6242 force_xfer(pio2, ATA_SHIFT_PIO + 2),
6243 force_xfer(pio3, ATA_SHIFT_PIO + 3),
6244 force_xfer(pio4, ATA_SHIFT_PIO + 4),
6245 force_xfer(pio5, ATA_SHIFT_PIO + 5),
6246 force_xfer(pio6, ATA_SHIFT_PIO + 6),
6247 force_xfer(mwdma0, ATA_SHIFT_MWDMA + 0),
6248 force_xfer(mwdma1, ATA_SHIFT_MWDMA + 1),
6249 force_xfer(mwdma2, ATA_SHIFT_MWDMA + 2),
6250 force_xfer(mwdma3, ATA_SHIFT_MWDMA + 3),
6251 force_xfer(mwdma4, ATA_SHIFT_MWDMA + 4),
6252 force_xfer(udma0, ATA_SHIFT_UDMA + 0),
6253 force_xfer(udma16, ATA_SHIFT_UDMA + 0),
6254 force_xfer(udma/16, ATA_SHIFT_UDMA + 0),
6255 force_xfer(udma1, ATA_SHIFT_UDMA + 1),
6256 force_xfer(udma25, ATA_SHIFT_UDMA + 1),
6257 force_xfer(udma/25, ATA_SHIFT_UDMA + 1),
6258 force_xfer(udma2, ATA_SHIFT_UDMA + 2),
6259 force_xfer(udma33, ATA_SHIFT_UDMA + 2),
6260 force_xfer(udma/33, ATA_SHIFT_UDMA + 2),
6261 force_xfer(udma3, ATA_SHIFT_UDMA + 3),
6262 force_xfer(udma44, ATA_SHIFT_UDMA + 3),
6263 force_xfer(udma/44, ATA_SHIFT_UDMA + 3),
6264 force_xfer(udma4, ATA_SHIFT_UDMA + 4),
6265 force_xfer(udma66, ATA_SHIFT_UDMA + 4),
6266 force_xfer(udma/66, ATA_SHIFT_UDMA + 4),
6267 force_xfer(udma5, ATA_SHIFT_UDMA + 5),
6268 force_xfer(udma100, ATA_SHIFT_UDMA + 5),
6269 force_xfer(udma/100, ATA_SHIFT_UDMA + 5),
6270 force_xfer(udma6, ATA_SHIFT_UDMA + 6),
6271 force_xfer(udma133, ATA_SHIFT_UDMA + 6),
6272 force_xfer(udma/133, ATA_SHIFT_UDMA + 6),
6273 force_xfer(udma7, ATA_SHIFT_UDMA + 7),
6274
3af9ca4d
DLM
6275 force_lflag_on(nohrst, ATA_LFLAG_NO_HRST),
6276 force_lflag_on(nosrst, ATA_LFLAG_NO_SRST),
6277 force_lflag_on(norst, ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST),
6278 force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
6279 force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
168af4af
DLM
6280
6281 force_horkage_onoff(ncq, ATA_HORKAGE_NONCQ),
6282 force_horkage_onoff(ncqtrim, ATA_HORKAGE_NO_NCQ_TRIM),
6283 force_horkage_onoff(ncqati, ATA_HORKAGE_NO_NCQ_ON_ATI),
6284
2c33bbda
DLM
6285 force_horkage_onoff(trim, ATA_HORKAGE_NOTRIM),
6286 force_horkage_on(trim_zero, ATA_HORKAGE_ZERO_AFTER_TRIM),
6287 force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M),
6288
6289 force_horkage_onoff(dma, ATA_HORKAGE_NODMA),
168af4af 6290 force_horkage_on(atapi_dmadir, ATA_HORKAGE_ATAPI_DMADIR),
2c33bbda
DLM
6291 force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA),
6292
6293 force_horkage_onoff(dmalog, ATA_HORKAGE_NO_DMA_LOG),
6294 force_horkage_onoff(iddevlog, ATA_HORKAGE_NO_ID_DEV_LOG),
6295 force_horkage_onoff(logdir, ATA_HORKAGE_NO_LOG_DIR),
6296
6297 force_horkage_on(max_sec_128, ATA_HORKAGE_MAX_SEC_128),
6298 force_horkage_on(max_sec_1024, ATA_HORKAGE_MAX_SEC_1024),
6299 force_horkage_on(max_sec_lba48, ATA_HORKAGE_MAX_SEC_LBA48),
6300
6301 force_horkage_onoff(lpm, ATA_HORKAGE_NOLPM),
6302 force_horkage_onoff(setxfer, ATA_HORKAGE_NOSETXFER),
6303 force_horkage_on(dump_id, ATA_HORKAGE_DUMP_ID),
4d2e4980 6304 force_horkage_onoff(fua, ATA_HORKAGE_NO_FUA),
2c33bbda
DLM
6305
6306 force_horkage_on(disable, ATA_HORKAGE_DISABLE),
168af4af
DLM
6307};
6308
33267325
TH
6309static int __init ata_parse_force_one(char **cur,
6310 struct ata_force_ent *force_ent,
6311 const char **reason)
6312{
33267325
TH
6313 char *start = *cur, *p = *cur;
6314 char *id, *val, *endp;
6315 const struct ata_force_param *match_fp = NULL;
6316 int nr_matches = 0, i;
6317
6318 /* find where this param ends and update *cur */
6319 while (*p != '\0' && *p != ',')
6320 p++;
6321
6322 if (*p == '\0')
6323 *cur = p;
6324 else
6325 *cur = p + 1;
6326
6327 *p = '\0';
6328
6329 /* parse */
6330 p = strchr(start, ':');
6331 if (!p) {
6332 val = strstrip(start);
6333 goto parse_val;
6334 }
6335 *p = '\0';
6336
6337 id = strstrip(start);
6338 val = strstrip(p + 1);
6339
6340 /* parse id */
6341 p = strchr(id, '.');
6342 if (p) {
6343 *p++ = '\0';
6344 force_ent->device = simple_strtoul(p, &endp, 10);
6345 if (p == endp || *endp != '\0') {
6346 *reason = "invalid device";
6347 return -EINVAL;
6348 }
6349 }
6350
6351 force_ent->port = simple_strtoul(id, &endp, 10);
f7cf69ae 6352 if (id == endp || *endp != '\0') {
33267325
TH
6353 *reason = "invalid port/link";
6354 return -EINVAL;
6355 }
6356
6357 parse_val:
6358 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6359 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6360 const struct ata_force_param *fp = &force_tbl[i];
6361
6362 if (strncasecmp(val, fp->name, strlen(val)))
6363 continue;
6364
6365 nr_matches++;
6366 match_fp = fp;
6367
6368 if (strcasecmp(val, fp->name) == 0) {
6369 nr_matches = 1;
6370 break;
6371 }
6372 }
6373
6374 if (!nr_matches) {
6375 *reason = "unknown value";
6376 return -EINVAL;
6377 }
6378 if (nr_matches > 1) {
9de55351 6379 *reason = "ambiguous value";
33267325
TH
6380 return -EINVAL;
6381 }
6382
6383 force_ent->param = *match_fp;
6384
6385 return 0;
6386}
6387
6388static void __init ata_parse_force_param(void)
6389{
6390 int idx = 0, size = 1;
6391 int last_port = -1, last_device = -1;
6392 char *p, *cur, *next;
6393
168af4af 6394 /* Calculate maximum number of params and allocate ata_force_tbl */
33267325
TH
6395 for (p = ata_force_param_buf; *p; p++)
6396 if (*p == ',')
6397 size++;
6398
6396bb22 6399 ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
33267325
TH
6400 if (!ata_force_tbl) {
6401 printk(KERN_WARNING "ata: failed to extend force table, "
6402 "libata.force ignored\n");
6403 return;
6404 }
6405
6406 /* parse and populate the table */
6407 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6408 const char *reason = "";
6409 struct ata_force_ent te = { .port = -1, .device = -1 };
6410
6411 next = cur;
6412 if (ata_parse_force_one(&next, &te, &reason)) {
6413 printk(KERN_WARNING "ata: failed to parse force "
6414 "parameter \"%s\" (%s)\n",
6415 cur, reason);
6416 continue;
6417 }
6418
6419 if (te.port == -1) {
6420 te.port = last_port;
6421 te.device = last_device;
6422 }
6423
6424 ata_force_tbl[idx++] = te;
6425
6426 last_port = te.port;
6427 last_device = te.device;
6428 }
6429
6430 ata_force_tbl_size = idx;
6431}
1da177e4 6432
bf89b0bf
BZ
6433static void ata_free_force_param(void)
6434{
6435 kfree(ata_force_tbl);
6436}
6437#else
6438static inline void ata_parse_force_param(void) { }
6439static inline void ata_free_force_param(void) { }
6440#endif
6441
1da177e4
LT
6442static int __init ata_init(void)
6443{
d9027470 6444 int rc;
270390e1 6445
33267325
TH
6446 ata_parse_force_param();
6447
270390e1 6448 rc = ata_sff_init();
ad72cf98 6449 if (rc) {
bf89b0bf 6450 ata_free_force_param();
ad72cf98
TH
6451 return rc;
6452 }
453b07ac 6453
d9027470
GG
6454 libata_transport_init();
6455 ata_scsi_transport_template = ata_attach_transport();
6456 if (!ata_scsi_transport_template) {
6457 ata_sff_exit();
6458 rc = -ENOMEM;
6459 goto err_out;
4fca377f 6460 }
d9027470 6461
1da177e4
LT
6462 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6463 return 0;
d9027470
GG
6464
6465err_out:
6466 return rc;
1da177e4
LT
6467}
6468
6469static void __exit ata_exit(void)
6470{
d9027470
GG
6471 ata_release_transport(ata_scsi_transport_template);
6472 libata_transport_exit();
270390e1 6473 ata_sff_exit();
bf89b0bf 6474 ata_free_force_param();
1da177e4
LT
6475}
6476
a4625085 6477subsys_initcall(ata_init);
1da177e4
LT
6478module_exit(ata_exit);
6479
9990b6f3 6480static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
67846b30
JG
6481
6482int ata_ratelimit(void)
6483{
9990b6f3 6484 return __ratelimit(&ratelimit);
67846b30 6485}
a52fbcfc 6486EXPORT_SYMBOL_GPL(ata_ratelimit);
67846b30 6487
c0c362b6
TH
6488/**
6489 * ata_msleep - ATA EH owner aware msleep
6490 * @ap: ATA port to attribute the sleep to
6491 * @msecs: duration to sleep in milliseconds
6492 *
6493 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6494 * ownership is released before going to sleep and reacquired
6495 * after the sleep is complete. IOW, other ports sharing the
6496 * @ap->host will be allowed to own the EH while this task is
6497 * sleeping.
6498 *
6499 * LOCKING:
6500 * Might sleep.
6501 */
97750ceb
TH
6502void ata_msleep(struct ata_port *ap, unsigned int msecs)
6503{
c0c362b6
TH
6504 bool owns_eh = ap && ap->host->eh_owner == current;
6505
6506 if (owns_eh)
6507 ata_eh_release(ap);
6508
848c3920
AVM
6509 if (msecs < 20) {
6510 unsigned long usecs = msecs * USEC_PER_MSEC;
6511 usleep_range(usecs, usecs + 50);
6512 } else {
6513 msleep(msecs);
6514 }
c0c362b6
TH
6515
6516 if (owns_eh)
6517 ata_eh_acquire(ap);
97750ceb 6518}
a52fbcfc 6519EXPORT_SYMBOL_GPL(ata_msleep);
97750ceb 6520
c22daff4
TH
6521/**
6522 * ata_wait_register - wait until register value changes
97750ceb 6523 * @ap: ATA port to wait register for, can be NULL
c22daff4
TH
6524 * @reg: IO-mapped register
6525 * @mask: Mask to apply to read register value
6526 * @val: Wait condition
341c2c95
TH
6527 * @interval: polling interval in milliseconds
6528 * @timeout: timeout in milliseconds
c22daff4
TH
6529 *
6530 * Waiting for some bits of register to change is a common
6531 * operation for ATA controllers. This function reads 32bit LE
6532 * IO-mapped register @reg and tests for the following condition.
6533 *
6534 * (*@reg & mask) != val
6535 *
6536 * If the condition is met, it returns; otherwise, the process is
6537 * repeated after @interval_msec until timeout.
6538 *
6539 * LOCKING:
6540 * Kernel thread context (may sleep)
6541 *
6542 * RETURNS:
6543 * The final register value.
6544 */
97750ceb 6545u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
341c2c95 6546 unsigned long interval, unsigned long timeout)
c22daff4 6547{
341c2c95 6548 unsigned long deadline;
c22daff4
TH
6549 u32 tmp;
6550
6551 tmp = ioread32(reg);
6552
6553 /* Calculate timeout _after_ the first read to make sure
6554 * preceding writes reach the controller before starting to
6555 * eat away the timeout.
6556 */
341c2c95 6557 deadline = ata_deadline(jiffies, timeout);
c22daff4 6558
341c2c95 6559 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
97750ceb 6560 ata_msleep(ap, interval);
c22daff4
TH
6561 tmp = ioread32(reg);
6562 }
6563
6564 return tmp;
6565}
a52fbcfc 6566EXPORT_SYMBOL_GPL(ata_wait_register);
c22daff4 6567
dd5b06c4
TH
6568/*
6569 * Dummy port_ops
6570 */
182d7bba 6571static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
dd5b06c4 6572{
182d7bba 6573 return AC_ERR_SYSTEM;
dd5b06c4
TH
6574}
6575
182d7bba 6576static void ata_dummy_error_handler(struct ata_port *ap)
dd5b06c4 6577{
182d7bba 6578 /* truly dummy */
dd5b06c4
TH
6579}
6580
029cfd6b 6581struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
6582 .qc_prep = ata_noop_qc_prep,
6583 .qc_issue = ata_dummy_qc_issue,
182d7bba 6584 .error_handler = ata_dummy_error_handler,
e4a9c373
DW
6585 .sched_eh = ata_std_sched_eh,
6586 .end_eh = ata_std_end_eh,
dd5b06c4 6587};
a52fbcfc 6588EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
dd5b06c4 6589
21b0ad4f
TH
6590const struct ata_port_info ata_dummy_port_info = {
6591 .port_ops = &ata_dummy_port_ops,
6592};
a52fbcfc 6593EXPORT_SYMBOL_GPL(ata_dummy_port_info);
21b0ad4f 6594
06296a1e
JP
6595void ata_print_version(const struct device *dev, const char *version)
6596{
6597 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6598}
6599EXPORT_SYMBOL(ata_print_version);
c206a389
HR
6600
6601EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
6602EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
6603EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
6604EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
6605EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);