ata: libata-core: cleanup ata_device_blacklist
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
c82ee6d3 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4 2/*
af36d7f0
JG
3 * libata-core.c - helper library for ATA
4 *
af36d7f0
JG
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
7 *
af36d7f0 8 * libata documentation is available via 'make {ps|pdf}docs',
19285f3c 9 * as Documentation/driver-api/libata.rst
af36d7f0
JG
10 *
11 * Hardware documentation available from http://www.t13.org/ and
12 * http://www.sata-io.org/
13 *
92c52c52
AC
14 * Standards documents from:
15 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
16 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
17 * http://www.sata-io.org (SATA)
18 * http://www.compactflash.org (CF)
19 * http://www.qic.org (QIC157 - Tape and DSC)
20 * http://www.ce-ata.org (CE-ATA: not supported)
a52fbcfc
BZ
21 *
22 * libata is essentially a library of internal helper functions for
23 * low-level ATA host controller drivers. As such, the API/ABI is
24 * likely to change as new drivers are added and updated.
25 * Do not depend on ABI/API stability.
1da177e4
LT
26 */
27
1da177e4
LT
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/list.h>
33#include <linux/mm.h>
1da177e4
LT
34#include <linux/spinlock.h>
35#include <linux/blkdev.h>
36#include <linux/delay.h>
37#include <linux/timer.h>
848c3920 38#include <linux/time.h>
1da177e4
LT
39#include <linux/interrupt.h>
40#include <linux/completion.h>
41#include <linux/suspend.h>
42#include <linux/workqueue.h>
378f058c 43#include <linux/scatterlist.h>
2dcb407e 44#include <linux/io.h>
e18086d6 45#include <linux/log2.h>
5a0e3ad6 46#include <linux/slab.h>
428ac5fc 47#include <linux/glob.h>
1da177e4 48#include <scsi/scsi.h>
193515d5 49#include <scsi/scsi_cmnd.h>
1da177e4
LT
50#include <scsi/scsi_host.h>
51#include <linux/libata.h>
1da177e4 52#include <asm/byteorder.h>
fe5af0cc 53#include <asm/unaligned.h>
140b5e59 54#include <linux/cdrom.h>
9990b6f3 55#include <linux/ratelimit.h>
eb25cb99 56#include <linux/leds.h>
9ee4f393 57#include <linux/pm_runtime.h>
b7db04d9 58#include <linux/platform_device.h>
bbf5a097 59#include <asm/setup.h>
1da177e4 60
255c03d1
HR
61#define CREATE_TRACE_POINTS
62#include <trace/events/libata.h>
63
1da177e4 64#include "libata.h"
d9027470 65#include "libata-transport.h"
fda0efc5 66
029cfd6b 67const struct ata_port_operations ata_base_port_ops = {
0aa1113d 68 .prereset = ata_std_prereset,
203c75b8 69 .postreset = ata_std_postreset,
a1efdaba 70 .error_handler = ata_std_error_handler,
e4a9c373
DW
71 .sched_eh = ata_std_sched_eh,
72 .end_eh = ata_std_end_eh,
029cfd6b
TH
73};
74
75const struct ata_port_operations sata_port_ops = {
76 .inherits = &ata_base_port_ops,
77
78 .qc_defer = ata_std_qc_defer,
57c9efdf 79 .hardreset = sata_std_hardreset,
029cfd6b 80};
a52fbcfc 81EXPORT_SYMBOL_GPL(sata_port_ops);
029cfd6b 82
3373efd8
TH
83static unsigned int ata_dev_init_params(struct ata_device *dev,
84 u16 heads, u16 sectors);
85static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
86static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 87static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 88
a78f57af 89atomic_t ata_print_id = ATOMIC_INIT(0);
1da177e4 90
bf89b0bf 91#ifdef CONFIG_ATA_FORCE
33267325
TH
92struct ata_force_param {
93 const char *name;
8ba5a45c
BZ
94 u8 cbl;
95 u8 spd_limit;
33267325
TH
96 unsigned long xfer_mask;
97 unsigned int horkage_on;
98 unsigned int horkage_off;
8ba5a45c 99 u16 lflags;
33267325
TH
100};
101
102struct ata_force_ent {
103 int port;
104 int device;
105 struct ata_force_param param;
106};
107
108static struct ata_force_ent *ata_force_tbl;
109static int ata_force_tbl_size;
110
bbf5a097 111static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
7afb4222
TH
112/* param_buf is thrown away after initialization, disallow read */
113module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
8c27ceff 114MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
bf89b0bf 115#endif
33267325 116
2486fa56 117static int atapi_enabled = 1;
1623c81e 118module_param(atapi_enabled, int, 0444);
ad5d8eac 119MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
1623c81e 120
c5c61bda 121static int atapi_dmadir = 0;
95de719a 122module_param(atapi_dmadir, int, 0444);
ad5d8eac 123MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
95de719a 124
baf4fdfa
ML
125int atapi_passthru16 = 1;
126module_param(atapi_passthru16, int, 0444);
ad5d8eac 127MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
baf4fdfa 128
c3c013a2
JG
129int libata_fua = 0;
130module_param_named(fua, libata_fua, int, 0444);
ad5d8eac 131MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
c3c013a2 132
2dcb407e 133static int ata_ignore_hpa;
1e999736
AC
134module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
135MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
136
b3a70601
AC
137static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
138module_param_named(dma, libata_dma_mask, int, 0444);
139MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
140
87fbc5a0 141static int ata_probe_timeout;
a8601e5f
AM
142module_param(ata_probe_timeout, int, 0444);
143MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
144
6ebe9d86 145int libata_noacpi = 0;
d7d0dad6 146module_param_named(noacpi, libata_noacpi, int, 0444);
ad5d8eac 147MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
11ef697b 148
ae8d4ee7
AC
149int libata_allow_tpm = 0;
150module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
ad5d8eac 151MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
ae8d4ee7 152
e7ecd435
TH
153static int atapi_an;
154module_param(atapi_an, int, 0444);
155MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
156
1da177e4
LT
157MODULE_AUTHOR("Jeff Garzik");
158MODULE_DESCRIPTION("Library module for ATA devices");
159MODULE_LICENSE("GPL");
160MODULE_VERSION(DRV_VERSION);
161
891fd7c6
DLM
162static inline bool ata_dev_print_info(struct ata_device *dev)
163{
164 struct ata_eh_context *ehc = &dev->link->eh_context;
165
166 return ehc->i.flags & ATA_EHI_PRINTINFO;
167}
0baab86b 168
9913ff8a
TH
169static bool ata_sstatus_online(u32 sstatus)
170{
171 return (sstatus & 0xf) == 0x3;
172}
173
1eca4365
TH
174/**
175 * ata_link_next - link iteration helper
176 * @link: the previous link, NULL to start
177 * @ap: ATA port containing links to iterate
178 * @mode: iteration mode, one of ATA_LITER_*
179 *
180 * LOCKING:
181 * Host lock or EH context.
aadffb68 182 *
1eca4365
TH
183 * RETURNS:
184 * Pointer to the next link.
aadffb68 185 */
1eca4365
TH
186struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
187 enum ata_link_iter_mode mode)
aadffb68 188{
1eca4365
TH
189 BUG_ON(mode != ATA_LITER_EDGE &&
190 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
191
aadffb68 192 /* NULL link indicates start of iteration */
1eca4365
TH
193 if (!link)
194 switch (mode) {
195 case ATA_LITER_EDGE:
196 case ATA_LITER_PMP_FIRST:
197 if (sata_pmp_attached(ap))
198 return ap->pmp_link;
df561f66 199 fallthrough;
1eca4365
TH
200 case ATA_LITER_HOST_FIRST:
201 return &ap->link;
202 }
aadffb68 203
1eca4365
TH
204 /* we just iterated over the host link, what's next? */
205 if (link == &ap->link)
206 switch (mode) {
207 case ATA_LITER_HOST_FIRST:
208 if (sata_pmp_attached(ap))
209 return ap->pmp_link;
df561f66 210 fallthrough;
1eca4365
TH
211 case ATA_LITER_PMP_FIRST:
212 if (unlikely(ap->slave_link))
b1c72916 213 return ap->slave_link;
df561f66 214 fallthrough;
1eca4365 215 case ATA_LITER_EDGE:
aadffb68 216 return NULL;
b1c72916 217 }
aadffb68 218
b1c72916
TH
219 /* slave_link excludes PMP */
220 if (unlikely(link == ap->slave_link))
221 return NULL;
222
1eca4365 223 /* we were over a PMP link */
aadffb68
TH
224 if (++link < ap->pmp_link + ap->nr_pmp_links)
225 return link;
1eca4365
TH
226
227 if (mode == ATA_LITER_PMP_FIRST)
228 return &ap->link;
229
aadffb68
TH
230 return NULL;
231}
a52fbcfc 232EXPORT_SYMBOL_GPL(ata_link_next);
aadffb68 233
1eca4365
TH
234/**
235 * ata_dev_next - device iteration helper
236 * @dev: the previous device, NULL to start
237 * @link: ATA link containing devices to iterate
238 * @mode: iteration mode, one of ATA_DITER_*
239 *
240 * LOCKING:
241 * Host lock or EH context.
242 *
243 * RETURNS:
244 * Pointer to the next device.
245 */
246struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
247 enum ata_dev_iter_mode mode)
248{
249 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
250 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
251
252 /* NULL dev indicates start of iteration */
253 if (!dev)
254 switch (mode) {
255 case ATA_DITER_ENABLED:
256 case ATA_DITER_ALL:
257 dev = link->device;
258 goto check;
259 case ATA_DITER_ENABLED_REVERSE:
260 case ATA_DITER_ALL_REVERSE:
261 dev = link->device + ata_link_max_devices(link) - 1;
262 goto check;
263 }
264
265 next:
266 /* move to the next one */
267 switch (mode) {
268 case ATA_DITER_ENABLED:
269 case ATA_DITER_ALL:
270 if (++dev < link->device + ata_link_max_devices(link))
271 goto check;
272 return NULL;
273 case ATA_DITER_ENABLED_REVERSE:
274 case ATA_DITER_ALL_REVERSE:
275 if (--dev >= link->device)
276 goto check;
277 return NULL;
278 }
279
280 check:
281 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
282 !ata_dev_enabled(dev))
283 goto next;
284 return dev;
285}
a52fbcfc 286EXPORT_SYMBOL_GPL(ata_dev_next);
1eca4365 287
b1c72916
TH
288/**
289 * ata_dev_phys_link - find physical link for a device
290 * @dev: ATA device to look up physical link for
291 *
292 * Look up physical link which @dev is attached to. Note that
293 * this is different from @dev->link only when @dev is on slave
294 * link. For all other cases, it's the same as @dev->link.
295 *
296 * LOCKING:
297 * Don't care.
298 *
299 * RETURNS:
300 * Pointer to the found physical link.
301 */
302struct ata_link *ata_dev_phys_link(struct ata_device *dev)
303{
304 struct ata_port *ap = dev->link->ap;
305
306 if (!ap->slave_link)
307 return dev->link;
308 if (!dev->devno)
309 return &ap->link;
310 return ap->slave_link;
311}
312
bf89b0bf 313#ifdef CONFIG_ATA_FORCE
33267325
TH
314/**
315 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 316 * @ap: ATA port of interest
33267325
TH
317 *
318 * Force cable type according to libata.force and whine about it.
319 * The last entry which has matching port number is used, so it
320 * can be specified as part of device force parameters. For
321 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
322 * same effect.
323 *
324 * LOCKING:
325 * EH context.
326 */
327void ata_force_cbl(struct ata_port *ap)
328{
329 int i;
330
331 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
332 const struct ata_force_ent *fe = &ata_force_tbl[i];
333
334 if (fe->port != -1 && fe->port != ap->print_id)
335 continue;
336
337 if (fe->param.cbl == ATA_CBL_NONE)
338 continue;
339
340 ap->cbl = fe->param.cbl;
a9a79dfe 341 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
33267325
TH
342 return;
343 }
344}
345
346/**
05944bdf 347 * ata_force_link_limits - force link limits according to libata.force
33267325
TH
348 * @link: ATA link of interest
349 *
05944bdf
TH
350 * Force link flags and SATA spd limit according to libata.force
351 * and whine about it. When only the port part is specified
352 * (e.g. 1:), the limit applies to all links connected to both
353 * the host link and all fan-out ports connected via PMP. If the
354 * device part is specified as 0 (e.g. 1.00:), it specifies the
355 * first fan-out link not the host link. Device number 15 always
b1c72916
TH
356 * points to the host link whether PMP is attached or not. If the
357 * controller has slave link, device number 16 points to it.
33267325
TH
358 *
359 * LOCKING:
360 * EH context.
361 */
05944bdf 362static void ata_force_link_limits(struct ata_link *link)
33267325 363{
05944bdf 364 bool did_spd = false;
b1c72916
TH
365 int linkno = link->pmp;
366 int i;
33267325
TH
367
368 if (ata_is_host_link(link))
b1c72916 369 linkno += 15;
33267325
TH
370
371 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
372 const struct ata_force_ent *fe = &ata_force_tbl[i];
373
374 if (fe->port != -1 && fe->port != link->ap->print_id)
375 continue;
376
377 if (fe->device != -1 && fe->device != linkno)
378 continue;
379
05944bdf
TH
380 /* only honor the first spd limit */
381 if (!did_spd && fe->param.spd_limit) {
382 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
a9a79dfe 383 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
05944bdf
TH
384 fe->param.name);
385 did_spd = true;
386 }
33267325 387
05944bdf
TH
388 /* let lflags stack */
389 if (fe->param.lflags) {
390 link->flags |= fe->param.lflags;
a9a79dfe 391 ata_link_notice(link,
05944bdf
TH
392 "FORCE: link flag 0x%x forced -> 0x%x\n",
393 fe->param.lflags, link->flags);
394 }
33267325
TH
395 }
396}
397
398/**
399 * ata_force_xfermask - force xfermask according to libata.force
400 * @dev: ATA device of interest
401 *
402 * Force xfer_mask according to libata.force and whine about it.
403 * For consistency with link selection, device number 15 selects
404 * the first device connected to the host link.
405 *
406 * LOCKING:
407 * EH context.
408 */
409static void ata_force_xfermask(struct ata_device *dev)
410{
411 int devno = dev->link->pmp + dev->devno;
412 int alt_devno = devno;
413 int i;
414
b1c72916
TH
415 /* allow n.15/16 for devices attached to host port */
416 if (ata_is_host_link(dev->link))
417 alt_devno += 15;
33267325
TH
418
419 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
420 const struct ata_force_ent *fe = &ata_force_tbl[i];
421 unsigned long pio_mask, mwdma_mask, udma_mask;
422
423 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
424 continue;
425
426 if (fe->device != -1 && fe->device != devno &&
427 fe->device != alt_devno)
428 continue;
429
430 if (!fe->param.xfer_mask)
431 continue;
432
433 ata_unpack_xfermask(fe->param.xfer_mask,
434 &pio_mask, &mwdma_mask, &udma_mask);
435 if (udma_mask)
436 dev->udma_mask = udma_mask;
437 else if (mwdma_mask) {
438 dev->udma_mask = 0;
439 dev->mwdma_mask = mwdma_mask;
440 } else {
441 dev->udma_mask = 0;
442 dev->mwdma_mask = 0;
443 dev->pio_mask = pio_mask;
444 }
445
a9a79dfe
JP
446 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
447 fe->param.name);
33267325
TH
448 return;
449 }
450}
451
452/**
453 * ata_force_horkage - force horkage according to libata.force
454 * @dev: ATA device of interest
455 *
456 * Force horkage according to libata.force and whine about it.
457 * For consistency with link selection, device number 15 selects
458 * the first device connected to the host link.
459 *
460 * LOCKING:
461 * EH context.
462 */
463static void ata_force_horkage(struct ata_device *dev)
464{
465 int devno = dev->link->pmp + dev->devno;
466 int alt_devno = devno;
467 int i;
468
b1c72916
TH
469 /* allow n.15/16 for devices attached to host port */
470 if (ata_is_host_link(dev->link))
471 alt_devno += 15;
33267325
TH
472
473 for (i = 0; i < ata_force_tbl_size; i++) {
474 const struct ata_force_ent *fe = &ata_force_tbl[i];
475
476 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
477 continue;
478
479 if (fe->device != -1 && fe->device != devno &&
480 fe->device != alt_devno)
481 continue;
482
483 if (!(~dev->horkage & fe->param.horkage_on) &&
484 !(dev->horkage & fe->param.horkage_off))
485 continue;
486
487 dev->horkage |= fe->param.horkage_on;
488 dev->horkage &= ~fe->param.horkage_off;
489
a9a79dfe
JP
490 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
491 fe->param.name);
33267325
TH
492 }
493}
bf89b0bf
BZ
494#else
495static inline void ata_force_link_limits(struct ata_link *link) { }
496static inline void ata_force_xfermask(struct ata_device *dev) { }
497static inline void ata_force_horkage(struct ata_device *dev) { }
498#endif
33267325 499
436d34b3
TH
500/**
501 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
502 * @opcode: SCSI opcode
503 *
504 * Determine ATAPI command type from @opcode.
505 *
506 * LOCKING:
507 * None.
508 *
509 * RETURNS:
510 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
511 */
512int atapi_cmd_type(u8 opcode)
513{
514 switch (opcode) {
515 case GPCMD_READ_10:
516 case GPCMD_READ_12:
517 return ATAPI_READ;
518
519 case GPCMD_WRITE_10:
520 case GPCMD_WRITE_12:
521 case GPCMD_WRITE_AND_VERIFY_10:
522 return ATAPI_WRITE;
523
524 case GPCMD_READ_CD:
525 case GPCMD_READ_CD_MSF:
526 return ATAPI_READ_CD;
527
e52dcc48
TH
528 case ATA_16:
529 case ATA_12:
530 if (atapi_passthru16)
531 return ATAPI_PASS_THRU;
df561f66 532 fallthrough;
436d34b3
TH
533 default:
534 return ATAPI_MISC;
535 }
536}
a52fbcfc 537EXPORT_SYMBOL_GPL(atapi_cmd_type);
436d34b3 538
8cbd6df1
AL
539static const u8 ata_rw_cmds[] = {
540 /* pio multi */
541 ATA_CMD_READ_MULTI,
542 ATA_CMD_WRITE_MULTI,
543 ATA_CMD_READ_MULTI_EXT,
544 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
545 0,
546 0,
547 0,
548 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
549 /* pio */
550 ATA_CMD_PIO_READ,
551 ATA_CMD_PIO_WRITE,
552 ATA_CMD_PIO_READ_EXT,
553 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
554 0,
555 0,
556 0,
557 0,
8cbd6df1
AL
558 /* dma */
559 ATA_CMD_READ,
560 ATA_CMD_WRITE,
561 ATA_CMD_READ_EXT,
9a3dccc4
TH
562 ATA_CMD_WRITE_EXT,
563 0,
564 0,
565 0,
566 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 567};
1da177e4
LT
568
569/**
8cbd6df1 570 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
571 * @tf: command to examine and configure
572 * @dev: device tf belongs to
1da177e4 573 *
2e9edbf8 574 * Examine the device configuration and tf->flags to calculate
8cbd6df1 575 * the proper read/write commands and protocol to use.
1da177e4
LT
576 *
577 * LOCKING:
578 * caller.
579 */
bd056d7e 580static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 581{
9a3dccc4 582 u8 cmd;
1da177e4 583
9a3dccc4 584 int index, fua, lba48, write;
2e9edbf8 585
9a3dccc4 586 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
587 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
588 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 589
8cbd6df1
AL
590 if (dev->flags & ATA_DFLAG_PIO) {
591 tf->protocol = ATA_PROT_PIO;
9a3dccc4 592 index = dev->multi_count ? 0 : 8;
9af5c9c9 593 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
594 /* Unable to use DMA due to host limitation */
595 tf->protocol = ATA_PROT_PIO;
0565c26d 596 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
597 } else {
598 tf->protocol = ATA_PROT_DMA;
9a3dccc4 599 index = 16;
8cbd6df1 600 }
1da177e4 601
9a3dccc4
TH
602 cmd = ata_rw_cmds[index + fua + lba48 + write];
603 if (cmd) {
604 tf->command = cmd;
605 return 0;
606 }
607 return -1;
1da177e4
LT
608}
609
35b649fe
TH
610/**
611 * ata_tf_read_block - Read block address from ATA taskfile
612 * @tf: ATA taskfile of interest
613 * @dev: ATA device @tf belongs to
614 *
615 * LOCKING:
616 * None.
617 *
618 * Read block address from @tf. This function can handle all
619 * three address formats - LBA, LBA48 and CHS. tf->protocol and
620 * flags select the address format to use.
621 *
622 * RETURNS:
623 * Block address read from @tf.
624 */
cffd1ee9 625u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
35b649fe
TH
626{
627 u64 block = 0;
628
fe16d4f2 629 if (tf->flags & ATA_TFLAG_LBA) {
35b649fe
TH
630 if (tf->flags & ATA_TFLAG_LBA48) {
631 block |= (u64)tf->hob_lbah << 40;
632 block |= (u64)tf->hob_lbam << 32;
44901a96 633 block |= (u64)tf->hob_lbal << 24;
35b649fe
TH
634 } else
635 block |= (tf->device & 0xf) << 24;
636
637 block |= tf->lbah << 16;
638 block |= tf->lbam << 8;
639 block |= tf->lbal;
640 } else {
641 u32 cyl, head, sect;
642
643 cyl = tf->lbam | (tf->lbah << 8);
644 head = tf->device & 0xf;
645 sect = tf->lbal;
646
ac8672ea 647 if (!sect) {
a9a79dfe
JP
648 ata_dev_warn(dev,
649 "device reported invalid CHS sector 0\n");
cffd1ee9 650 return U64_MAX;
ac8672ea
TH
651 }
652
653 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
35b649fe
TH
654 }
655
656 return block;
657}
658
bd056d7e
TH
659/**
660 * ata_build_rw_tf - Build ATA taskfile for given read/write request
661 * @tf: Target ATA taskfile
662 * @dev: ATA device @tf belongs to
663 * @block: Block address
664 * @n_block: Number of blocks
665 * @tf_flags: RW/FUA etc...
666 * @tag: tag
8e061784 667 * @class: IO priority class
bd056d7e
TH
668 *
669 * LOCKING:
670 * None.
671 *
672 * Build ATA taskfile @tf for read/write request described by
673 * @block, @n_block, @tf_flags and @tag on @dev.
674 *
675 * RETURNS:
676 *
677 * 0 on success, -ERANGE if the request is too large for @dev,
678 * -EINVAL if the request is invalid.
679 */
680int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
681 u64 block, u32 n_block, unsigned int tf_flags,
8e061784 682 unsigned int tag, int class)
bd056d7e
TH
683{
684 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
685 tf->flags |= tf_flags;
686
2e2cc676 687 if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
bd056d7e
TH
688 /* yay, NCQ */
689 if (!lba_48_ok(block, n_block))
690 return -ERANGE;
691
692 tf->protocol = ATA_PROT_NCQ;
693 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
694
695 if (tf->flags & ATA_TFLAG_WRITE)
696 tf->command = ATA_CMD_FPDMA_WRITE;
697 else
698 tf->command = ATA_CMD_FPDMA_READ;
699
700 tf->nsect = tag << 3;
701 tf->hob_feature = (n_block >> 8) & 0xff;
702 tf->feature = n_block & 0xff;
703
704 tf->hob_lbah = (block >> 40) & 0xff;
705 tf->hob_lbam = (block >> 32) & 0xff;
706 tf->hob_lbal = (block >> 24) & 0xff;
707 tf->lbah = (block >> 16) & 0xff;
708 tf->lbam = (block >> 8) & 0xff;
709 tf->lbal = block & 0xff;
710
9ca7cfa4 711 tf->device = ATA_LBA;
bd056d7e
TH
712 if (tf->flags & ATA_TFLAG_FUA)
713 tf->device |= 1 << 7;
8e061784 714
2360fa18
DLM
715 if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE &&
716 class == IOPRIO_CLASS_RT)
717 tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
bd056d7e
TH
718 } else if (dev->flags & ATA_DFLAG_LBA) {
719 tf->flags |= ATA_TFLAG_LBA;
720
721 if (lba_28_ok(block, n_block)) {
722 /* use LBA28 */
723 tf->device |= (block >> 24) & 0xf;
724 } else if (lba_48_ok(block, n_block)) {
725 if (!(dev->flags & ATA_DFLAG_LBA48))
726 return -ERANGE;
727
728 /* use LBA48 */
729 tf->flags |= ATA_TFLAG_LBA48;
730
731 tf->hob_nsect = (n_block >> 8) & 0xff;
732
733 tf->hob_lbah = (block >> 40) & 0xff;
734 tf->hob_lbam = (block >> 32) & 0xff;
735 tf->hob_lbal = (block >> 24) & 0xff;
736 } else
737 /* request too large even for LBA48 */
738 return -ERANGE;
739
740 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
741 return -EINVAL;
742
743 tf->nsect = n_block & 0xff;
744
745 tf->lbah = (block >> 16) & 0xff;
746 tf->lbam = (block >> 8) & 0xff;
747 tf->lbal = block & 0xff;
748
749 tf->device |= ATA_LBA;
750 } else {
751 /* CHS */
752 u32 sect, head, cyl, track;
753
754 /* The request -may- be too large for CHS addressing. */
755 if (!lba_28_ok(block, n_block))
756 return -ERANGE;
757
758 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
759 return -EINVAL;
760
761 /* Convert LBA to CHS */
762 track = (u32)block / dev->sectors;
763 cyl = track / dev->heads;
764 head = track % dev->heads;
765 sect = (u32)block % dev->sectors + 1;
766
bd056d7e
TH
767 /* Check whether the converted CHS can fit.
768 Cylinder: 0-65535
769 Head: 0-15
770 Sector: 1-255*/
771 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
772 return -ERANGE;
773
774 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
775 tf->lbal = sect;
776 tf->lbam = cyl;
777 tf->lbah = cyl >> 8;
778 tf->device |= head;
779 }
780
781 return 0;
782}
783
cb95d562
TH
784/**
785 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
786 * @pio_mask: pio_mask
787 * @mwdma_mask: mwdma_mask
788 * @udma_mask: udma_mask
789 *
790 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
791 * unsigned int xfer_mask.
792 *
793 * LOCKING:
794 * None.
795 *
796 * RETURNS:
797 * Packed xfer_mask.
798 */
7dc951ae
TH
799unsigned long ata_pack_xfermask(unsigned long pio_mask,
800 unsigned long mwdma_mask,
801 unsigned long udma_mask)
cb95d562
TH
802{
803 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
804 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
805 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
806}
a52fbcfc 807EXPORT_SYMBOL_GPL(ata_pack_xfermask);
cb95d562 808
c0489e4e
TH
809/**
810 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
811 * @xfer_mask: xfer_mask to unpack
812 * @pio_mask: resulting pio_mask
813 * @mwdma_mask: resulting mwdma_mask
814 * @udma_mask: resulting udma_mask
815 *
816 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
c9b5560a 817 * Any NULL destination masks will be ignored.
c0489e4e 818 */
7dc951ae
TH
819void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
820 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
821{
822 if (pio_mask)
823 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
824 if (mwdma_mask)
825 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
826 if (udma_mask)
827 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
828}
829
cb95d562 830static const struct ata_xfer_ent {
be9a50c8 831 int shift, bits;
cb95d562
TH
832 u8 base;
833} ata_xfer_tbl[] = {
70cd071e
TH
834 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
835 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
836 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
837 { -1, },
838};
839
840/**
841 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
842 * @xfer_mask: xfer_mask of interest
843 *
844 * Return matching XFER_* value for @xfer_mask. Only the highest
845 * bit of @xfer_mask is considered.
846 *
847 * LOCKING:
848 * None.
849 *
850 * RETURNS:
70cd071e 851 * Matching XFER_* value, 0xff if no match found.
cb95d562 852 */
7dc951ae 853u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
854{
855 int highbit = fls(xfer_mask) - 1;
856 const struct ata_xfer_ent *ent;
857
858 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
859 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
860 return ent->base + highbit - ent->shift;
70cd071e 861 return 0xff;
cb95d562 862}
a52fbcfc 863EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
cb95d562
TH
864
865/**
866 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
867 * @xfer_mode: XFER_* of interest
868 *
869 * Return matching xfer_mask for @xfer_mode.
870 *
871 * LOCKING:
872 * None.
873 *
874 * RETURNS:
875 * Matching xfer_mask, 0 if no match found.
876 */
7dc951ae 877unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
878{
879 const struct ata_xfer_ent *ent;
880
881 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
882 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
883 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
884 & ~((1 << ent->shift) - 1);
cb95d562
TH
885 return 0;
886}
a52fbcfc 887EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
cb95d562
TH
888
889/**
890 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
891 * @xfer_mode: XFER_* of interest
892 *
893 * Return matching xfer_shift for @xfer_mode.
894 *
895 * LOCKING:
896 * None.
897 *
898 * RETURNS:
899 * Matching xfer_shift, -1 if no match found.
900 */
a28c1ab3 901int ata_xfer_mode2shift(u8 xfer_mode)
cb95d562
TH
902{
903 const struct ata_xfer_ent *ent;
904
905 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
906 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
907 return ent->shift;
908 return -1;
909}
a52fbcfc 910EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
cb95d562 911
1da177e4 912/**
1da7b0d0
TH
913 * ata_mode_string - convert xfer_mask to string
914 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
915 *
916 * Determine string which represents the highest speed
1da7b0d0 917 * (highest bit in @modemask).
1da177e4
LT
918 *
919 * LOCKING:
920 * None.
921 *
922 * RETURNS:
923 * Constant C string representing highest speed listed in
1da7b0d0 924 * @mode_mask, or the constant C string "<n/a>".
1da177e4 925 */
7dc951ae 926const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 927{
75f554bc
TH
928 static const char * const xfer_mode_str[] = {
929 "PIO0",
930 "PIO1",
931 "PIO2",
932 "PIO3",
933 "PIO4",
b352e57d
AC
934 "PIO5",
935 "PIO6",
75f554bc
TH
936 "MWDMA0",
937 "MWDMA1",
938 "MWDMA2",
b352e57d
AC
939 "MWDMA3",
940 "MWDMA4",
75f554bc
TH
941 "UDMA/16",
942 "UDMA/25",
943 "UDMA/33",
944 "UDMA/44",
945 "UDMA/66",
946 "UDMA/100",
947 "UDMA/133",
948 "UDMA7",
949 };
1da7b0d0 950 int highbit;
1da177e4 951
1da7b0d0
TH
952 highbit = fls(xfer_mask) - 1;
953 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
954 return xfer_mode_str[highbit];
1da177e4 955 return "<n/a>";
1da177e4 956}
a52fbcfc 957EXPORT_SYMBOL_GPL(ata_mode_string);
1da177e4 958
d9027470 959const char *sata_spd_string(unsigned int spd)
4c360c81
TH
960{
961 static const char * const spd_str[] = {
962 "1.5 Gbps",
963 "3.0 Gbps",
8522ee25 964 "6.0 Gbps",
4c360c81
TH
965 };
966
967 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
968 return "<unknown>";
969 return spd_str[spd - 1];
970}
971
1da177e4
LT
972/**
973 * ata_dev_classify - determine device type based on ATA-spec signature
974 * @tf: ATA taskfile register set for device to be identified
975 *
976 * Determine from taskfile register contents whether a device is
977 * ATA or ATAPI, as per "Signature and persistence" section
978 * of ATA/PI spec (volume 1, sect 5.14).
979 *
980 * LOCKING:
981 * None.
982 *
983 * RETURNS:
9162c657
HR
984 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
985 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1da177e4 986 */
057ace5e 987unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
988{
989 /* Apple's open source Darwin code hints that some devices only
990 * put a proper signature into the LBA mid/high registers,
991 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
992 *
993 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
994 * signatures for ATA and ATAPI devices attached on SerialATA,
995 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
996 * spec has never mentioned about using different signatures
997 * for ATA/ATAPI devices. Then, Serial ATA II: Port
998 * Multiplier specification began to use 0x69/0x96 to identify
999 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1000 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1001 * 0x69/0x96 shortly and described them as reserved for
1002 * SerialATA.
1003 *
1004 * We follow the current spec and consider that 0x69/0x96
1005 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
79b42bab
TH
1006 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1007 * SEMB signature. This is worked around in
1008 * ata_dev_read_id().
1da177e4 1009 */
6c952a0d 1010 if (tf->lbam == 0 && tf->lbah == 0)
1da177e4 1011 return ATA_DEV_ATA;
1da177e4 1012
6c952a0d 1013 if (tf->lbam == 0x14 && tf->lbah == 0xeb)
1da177e4 1014 return ATA_DEV_ATAPI;
1da177e4 1015
6c952a0d 1016 if (tf->lbam == 0x69 && tf->lbah == 0x96)
633273a3 1017 return ATA_DEV_PMP;
633273a3 1018
6c952a0d 1019 if (tf->lbam == 0x3c && tf->lbah == 0xc3)
79b42bab 1020 return ATA_DEV_SEMB;
633273a3 1021
6c952a0d 1022 if (tf->lbam == 0xcd && tf->lbah == 0xab)
9162c657 1023 return ATA_DEV_ZAC;
9162c657 1024
1da177e4
LT
1025 return ATA_DEV_UNKNOWN;
1026}
a52fbcfc 1027EXPORT_SYMBOL_GPL(ata_dev_classify);
1da177e4 1028
1da177e4 1029/**
6a62a04d 1030 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1031 * @id: IDENTIFY DEVICE results we will examine
1032 * @s: string into which data is output
1033 * @ofs: offset into identify device page
1034 * @len: length of string to return. must be an even number.
1035 *
1036 * The strings in the IDENTIFY DEVICE page are broken up into
1037 * 16-bit chunks. Run through the string, and output each
1038 * 8-bit chunk linearly, regardless of platform.
1039 *
1040 * LOCKING:
1041 * caller.
1042 */
1043
6a62a04d
TH
1044void ata_id_string(const u16 *id, unsigned char *s,
1045 unsigned int ofs, unsigned int len)
1da177e4
LT
1046{
1047 unsigned int c;
1048
963e4975
AC
1049 BUG_ON(len & 1);
1050
1da177e4
LT
1051 while (len > 0) {
1052 c = id[ofs] >> 8;
1053 *s = c;
1054 s++;
1055
1056 c = id[ofs] & 0xff;
1057 *s = c;
1058 s++;
1059
1060 ofs++;
1061 len -= 2;
1062 }
1063}
a52fbcfc 1064EXPORT_SYMBOL_GPL(ata_id_string);
1da177e4 1065
0e949ff3 1066/**
6a62a04d 1067 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1068 * @id: IDENTIFY DEVICE results we will examine
1069 * @s: string into which data is output
1070 * @ofs: offset into identify device page
1071 * @len: length of string to return. must be an odd number.
1072 *
6a62a04d 1073 * This function is identical to ata_id_string except that it
0e949ff3
TH
1074 * trims trailing spaces and terminates the resulting string with
1075 * null. @len must be actual maximum length (even number) + 1.
1076 *
1077 * LOCKING:
1078 * caller.
1079 */
6a62a04d
TH
1080void ata_id_c_string(const u16 *id, unsigned char *s,
1081 unsigned int ofs, unsigned int len)
0e949ff3
TH
1082{
1083 unsigned char *p;
1084
6a62a04d 1085 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1086
1087 p = s + strnlen(s, len - 1);
1088 while (p > s && p[-1] == ' ')
1089 p--;
1090 *p = '\0';
1091}
a52fbcfc 1092EXPORT_SYMBOL_GPL(ata_id_c_string);
0baab86b 1093
db6f8759
TH
1094static u64 ata_id_n_sectors(const u16 *id)
1095{
1096 if (ata_id_has_lba(id)) {
1097 if (ata_id_has_lba48(id))
968e594a 1098 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
db6f8759 1099 else
968e594a 1100 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
db6f8759
TH
1101 } else {
1102 if (ata_id_current_chs_valid(id))
968e594a
RH
1103 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1104 id[ATA_ID_CUR_SECTORS];
db6f8759 1105 else
968e594a
RH
1106 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1107 id[ATA_ID_SECTORS];
db6f8759
TH
1108 }
1109}
1110
a5987e0a 1111u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1e999736
AC
1112{
1113 u64 sectors = 0;
1114
1115 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1116 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
ba14a9c2 1117 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1e999736
AC
1118 sectors |= (tf->lbah & 0xff) << 16;
1119 sectors |= (tf->lbam & 0xff) << 8;
1120 sectors |= (tf->lbal & 0xff);
1121
a5987e0a 1122 return sectors;
1e999736
AC
1123}
1124
a5987e0a 1125u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1e999736
AC
1126{
1127 u64 sectors = 0;
1128
1129 sectors |= (tf->device & 0x0f) << 24;
1130 sectors |= (tf->lbah & 0xff) << 16;
1131 sectors |= (tf->lbam & 0xff) << 8;
1132 sectors |= (tf->lbal & 0xff);
1133
a5987e0a 1134 return sectors;
1e999736
AC
1135}
1136
1137/**
c728a914
TH
1138 * ata_read_native_max_address - Read native max address
1139 * @dev: target device
1140 * @max_sectors: out parameter for the result native max address
1e999736 1141 *
c728a914
TH
1142 * Perform an LBA48 or LBA28 native size query upon the device in
1143 * question.
1e999736 1144 *
c728a914
TH
1145 * RETURNS:
1146 * 0 on success, -EACCES if command is aborted by the drive.
1147 * -EIO on other errors.
1e999736 1148 */
c728a914 1149static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1150{
c728a914 1151 unsigned int err_mask;
1e999736 1152 struct ata_taskfile tf;
c728a914 1153 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1154
1155 ata_tf_init(dev, &tf);
1156
c728a914 1157 /* always clear all address registers */
1e999736 1158 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1159
c728a914
TH
1160 if (lba48) {
1161 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1162 tf.flags |= ATA_TFLAG_LBA48;
1163 } else
1164 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1165
bd18bc04 1166 tf.protocol = ATA_PROT_NODATA;
c728a914
TH
1167 tf.device |= ATA_LBA;
1168
2b789108 1169 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1170 if (err_mask) {
a9a79dfe
JP
1171 ata_dev_warn(dev,
1172 "failed to read native max address (err_mask=0x%x)\n",
1173 err_mask);
efcef265 1174 if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
c728a914
TH
1175 return -EACCES;
1176 return -EIO;
1177 }
1e999736 1178
c728a914 1179 if (lba48)
a5987e0a 1180 *max_sectors = ata_tf_to_lba48(&tf) + 1;
c728a914 1181 else
a5987e0a 1182 *max_sectors = ata_tf_to_lba(&tf) + 1;
2dcb407e 1183 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1184 (*max_sectors)--;
c728a914 1185 return 0;
1e999736
AC
1186}
1187
1188/**
c728a914
TH
1189 * ata_set_max_sectors - Set max sectors
1190 * @dev: target device
6b38d1d1 1191 * @new_sectors: new max sectors value to set for the device
1e999736 1192 *
c728a914
TH
1193 * Set max sectors of @dev to @new_sectors.
1194 *
1195 * RETURNS:
1196 * 0 on success, -EACCES if command is aborted or denied (due to
1197 * previous non-volatile SET_MAX) by the drive. -EIO on other
1198 * errors.
1e999736 1199 */
05027adc 1200static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1201{
c728a914 1202 unsigned int err_mask;
1e999736 1203 struct ata_taskfile tf;
c728a914 1204 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1205
1206 new_sectors--;
1207
1208 ata_tf_init(dev, &tf);
1209
1e999736 1210 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1211
1212 if (lba48) {
1213 tf.command = ATA_CMD_SET_MAX_EXT;
1214 tf.flags |= ATA_TFLAG_LBA48;
1215
1216 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1217 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1218 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1219 } else {
c728a914
TH
1220 tf.command = ATA_CMD_SET_MAX;
1221
1e582ba4
TH
1222 tf.device |= (new_sectors >> 24) & 0xf;
1223 }
1224
bd18bc04 1225 tf.protocol = ATA_PROT_NODATA;
c728a914 1226 tf.device |= ATA_LBA;
1e999736
AC
1227
1228 tf.lbal = (new_sectors >> 0) & 0xff;
1229 tf.lbam = (new_sectors >> 8) & 0xff;
1230 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1231
2b789108 1232 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1233 if (err_mask) {
a9a79dfe
JP
1234 ata_dev_warn(dev,
1235 "failed to set max address (err_mask=0x%x)\n",
1236 err_mask);
c728a914 1237 if (err_mask == AC_ERR_DEV &&
efcef265 1238 (tf.error & (ATA_ABORTED | ATA_IDNF)))
c728a914
TH
1239 return -EACCES;
1240 return -EIO;
1241 }
1242
c728a914 1243 return 0;
1e999736
AC
1244}
1245
1246/**
1247 * ata_hpa_resize - Resize a device with an HPA set
1248 * @dev: Device to resize
1249 *
1250 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1251 * it if required to the full size of the media. The caller must check
1252 * the drive has the HPA feature set enabled.
05027adc
TH
1253 *
1254 * RETURNS:
1255 * 0 on success, -errno on failure.
1e999736 1256 */
05027adc 1257static int ata_hpa_resize(struct ata_device *dev)
1e999736 1258{
891fd7c6 1259 bool print_info = ata_dev_print_info(dev);
445d211b 1260 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
05027adc
TH
1261 u64 sectors = ata_id_n_sectors(dev->id);
1262 u64 native_sectors;
c728a914 1263 int rc;
a617c09f 1264
05027adc 1265 /* do we need to do it? */
9162c657 1266 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
05027adc
TH
1267 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1268 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1269 return 0;
1e999736 1270
05027adc
TH
1271 /* read native max address */
1272 rc = ata_read_native_max_address(dev, &native_sectors);
1273 if (rc) {
dda7aba1
TH
1274 /* If device aborted the command or HPA isn't going to
1275 * be unlocked, skip HPA resizing.
05027adc 1276 */
445d211b 1277 if (rc == -EACCES || !unlock_hpa) {
a9a79dfe
JP
1278 ata_dev_warn(dev,
1279 "HPA support seems broken, skipping HPA handling\n");
05027adc
TH
1280 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1281
1282 /* we can continue if device aborted the command */
1283 if (rc == -EACCES)
1284 rc = 0;
1e999736 1285 }
37301a55 1286
05027adc
TH
1287 return rc;
1288 }
5920dadf 1289 dev->n_native_sectors = native_sectors;
05027adc
TH
1290
1291 /* nothing to do? */
445d211b 1292 if (native_sectors <= sectors || !unlock_hpa) {
05027adc
TH
1293 if (!print_info || native_sectors == sectors)
1294 return 0;
1295
1296 if (native_sectors > sectors)
a9a79dfe 1297 ata_dev_info(dev,
05027adc
TH
1298 "HPA detected: current %llu, native %llu\n",
1299 (unsigned long long)sectors,
1300 (unsigned long long)native_sectors);
1301 else if (native_sectors < sectors)
a9a79dfe
JP
1302 ata_dev_warn(dev,
1303 "native sectors (%llu) is smaller than sectors (%llu)\n",
05027adc
TH
1304 (unsigned long long)native_sectors,
1305 (unsigned long long)sectors);
1306 return 0;
1307 }
1308
1309 /* let's unlock HPA */
1310 rc = ata_set_max_sectors(dev, native_sectors);
1311 if (rc == -EACCES) {
1312 /* if device aborted the command, skip HPA resizing */
a9a79dfe
JP
1313 ata_dev_warn(dev,
1314 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1315 (unsigned long long)sectors,
1316 (unsigned long long)native_sectors);
05027adc
TH
1317 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1318 return 0;
1319 } else if (rc)
1320 return rc;
1321
1322 /* re-read IDENTIFY data */
1323 rc = ata_dev_reread_id(dev, 0);
1324 if (rc) {
a9a79dfe
JP
1325 ata_dev_err(dev,
1326 "failed to re-read IDENTIFY data after HPA resizing\n");
05027adc
TH
1327 return rc;
1328 }
1329
1330 if (print_info) {
1331 u64 new_sectors = ata_id_n_sectors(dev->id);
a9a79dfe 1332 ata_dev_info(dev,
05027adc
TH
1333 "HPA unlocked: %llu -> %llu, native %llu\n",
1334 (unsigned long long)sectors,
1335 (unsigned long long)new_sectors,
1336 (unsigned long long)native_sectors);
1337 }
1338
1339 return 0;
1e999736
AC
1340}
1341
1da177e4
LT
1342/**
1343 * ata_dump_id - IDENTIFY DEVICE info debugging output
6044f3c4 1344 * @dev: device from which the information is fetched
0bd3300a 1345 * @id: IDENTIFY DEVICE page to dump
1da177e4 1346 *
0bd3300a
TH
1347 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1348 * page.
1da177e4
LT
1349 *
1350 * LOCKING:
1351 * caller.
1352 */
1353
6044f3c4 1354static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
1da177e4 1355{
6044f3c4
HR
1356 ata_dev_dbg(dev,
1357 "49==0x%04x 53==0x%04x 63==0x%04x 64==0x%04x 75==0x%04x\n"
1358 "80==0x%04x 81==0x%04x 82==0x%04x 83==0x%04x 84==0x%04x\n"
1359 "88==0x%04x 93==0x%04x\n",
1360 id[49], id[53], id[63], id[64], id[75], id[80],
1361 id[81], id[82], id[83], id[84], id[88], id[93]);
1da177e4
LT
1362}
1363
cb95d562
TH
1364/**
1365 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1366 * @id: IDENTIFY data to compute xfer mask from
1367 *
1368 * Compute the xfermask for this device. This is not as trivial
1369 * as it seems if we must consider early devices correctly.
1370 *
1371 * FIXME: pre IDE drive timing (do we care ?).
1372 *
1373 * LOCKING:
1374 * None.
1375 *
1376 * RETURNS:
1377 * Computed xfermask
1378 */
7dc951ae 1379unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1380{
7dc951ae 1381 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1382
1383 /* Usual case. Word 53 indicates word 64 is valid */
1384 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1385 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1386 pio_mask <<= 3;
1387 pio_mask |= 0x7;
1388 } else {
1389 /* If word 64 isn't valid then Word 51 high byte holds
1390 * the PIO timing number for the maximum. Turn it into
1391 * a mask.
1392 */
7a0f1c8a 1393 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1394 if (mode < 5) /* Valid PIO range */
2dcb407e 1395 pio_mask = (2 << mode) - 1;
46767aeb
AC
1396 else
1397 pio_mask = 1;
cb95d562
TH
1398
1399 /* But wait.. there's more. Design your standards by
1400 * committee and you too can get a free iordy field to
e0af10ac 1401 * process. However it is the speeds not the modes that
cb95d562
TH
1402 * are supported... Note drivers using the timing API
1403 * will get this right anyway
1404 */
1405 }
1406
1407 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1408
b352e57d
AC
1409 if (ata_id_is_cfa(id)) {
1410 /*
1411 * Process compact flash extended modes
1412 */
62afe5d7
SS
1413 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1414 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
b352e57d
AC
1415
1416 if (pio)
1417 pio_mask |= (1 << 5);
1418 if (pio > 1)
1419 pio_mask |= (1 << 6);
1420 if (dma)
1421 mwdma_mask |= (1 << 3);
1422 if (dma > 1)
1423 mwdma_mask |= (1 << 4);
1424 }
1425
fb21f0d0
TH
1426 udma_mask = 0;
1427 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1428 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1429
1430 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1431}
a52fbcfc 1432EXPORT_SYMBOL_GPL(ata_id_xfermask);
cb95d562 1433
7102d230 1434static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1435{
77853bf2 1436 struct completion *waiting = qc->private_data;
a2a7a662 1437
a2a7a662 1438 complete(waiting);
a2a7a662
TH
1439}
1440
1441/**
2432697b 1442 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1443 * @dev: Device to which the command is sent
1444 * @tf: Taskfile registers for the command and the result
d69cf37d 1445 * @cdb: CDB for packet command
e227867f 1446 * @dma_dir: Data transfer direction of the command
5c1ad8b3 1447 * @sgl: sg list for the data buffer of the command
2432697b 1448 * @n_elem: Number of sg entries
2b789108 1449 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1450 *
1451 * Executes libata internal command with timeout. @tf contains
1452 * command on entry and result on return. Timeout and error
1453 * conditions are reported via return value. No recovery action
1454 * is taken after a command times out. It's caller's duty to
1455 * clean up after timeout.
1456 *
1457 * LOCKING:
1458 * None. Should be called with kernel context, might sleep.
551e8889
TH
1459 *
1460 * RETURNS:
1461 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1462 */
2432697b
TH
1463unsigned ata_exec_internal_sg(struct ata_device *dev,
1464 struct ata_taskfile *tf, const u8 *cdb,
87260216 1465 int dma_dir, struct scatterlist *sgl,
2b789108 1466 unsigned int n_elem, unsigned long timeout)
a2a7a662 1467{
9af5c9c9
TH
1468 struct ata_link *link = dev->link;
1469 struct ata_port *ap = link->ap;
a2a7a662 1470 u8 command = tf->command;
87fbc5a0 1471 int auto_timeout = 0;
a2a7a662 1472 struct ata_queued_cmd *qc;
28361c40 1473 unsigned int preempted_tag;
e3ed8939
JA
1474 u32 preempted_sactive;
1475 u64 preempted_qc_active;
da917d69 1476 int preempted_nr_active_links;
60be6b9a 1477 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1478 unsigned long flags;
77853bf2 1479 unsigned int err_mask;
d95a717f 1480 int rc;
a2a7a662 1481
ba6a1308 1482 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1483
e3180499 1484 /* no internal command while frozen */
b51e9e5d 1485 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1486 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1487 return AC_ERR_SYSTEM;
1488 }
1489
2ab7db1f 1490 /* initialize internal qc */
28361c40 1491 qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
a2a7a662 1492
28361c40
JA
1493 qc->tag = ATA_TAG_INTERNAL;
1494 qc->hw_tag = 0;
2ab7db1f
TH
1495 qc->scsicmd = NULL;
1496 qc->ap = ap;
1497 qc->dev = dev;
1498 ata_qc_reinit(qc);
1499
9af5c9c9
TH
1500 preempted_tag = link->active_tag;
1501 preempted_sactive = link->sactive;
dedaf2b0 1502 preempted_qc_active = ap->qc_active;
da917d69 1503 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1504 link->active_tag = ATA_TAG_POISON;
1505 link->sactive = 0;
dedaf2b0 1506 ap->qc_active = 0;
da917d69 1507 ap->nr_active_links = 0;
2ab7db1f
TH
1508
1509 /* prepare & issue qc */
a2a7a662 1510 qc->tf = *tf;
d69cf37d
TH
1511 if (cdb)
1512 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e771451c
VP
1513
1514 /* some SATA bridges need us to indicate data xfer direction */
1515 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1516 dma_dir == DMA_FROM_DEVICE)
1517 qc->tf.feature |= ATAPI_DMADIR;
1518
e61e0672 1519 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1520 qc->dma_dir = dma_dir;
1521 if (dma_dir != DMA_NONE) {
2432697b 1522 unsigned int i, buflen = 0;
87260216 1523 struct scatterlist *sg;
2432697b 1524
87260216
JA
1525 for_each_sg(sgl, sg, n_elem, i)
1526 buflen += sg->length;
2432697b 1527
87260216 1528 ata_sg_init(qc, sgl, n_elem);
49c80429 1529 qc->nbytes = buflen;
a2a7a662
TH
1530 }
1531
77853bf2 1532 qc->private_data = &wait;
a2a7a662
TH
1533 qc->complete_fn = ata_qc_complete_internal;
1534
8e0e694a 1535 ata_qc_issue(qc);
a2a7a662 1536
ba6a1308 1537 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1538
87fbc5a0
TH
1539 if (!timeout) {
1540 if (ata_probe_timeout)
1541 timeout = ata_probe_timeout * 1000;
1542 else {
1543 timeout = ata_internal_cmd_timeout(dev, command);
1544 auto_timeout = 1;
1545 }
1546 }
2b789108 1547
c0c362b6
TH
1548 if (ap->ops->error_handler)
1549 ata_eh_release(ap);
1550
2b789108 1551 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f 1552
c0c362b6
TH
1553 if (ap->ops->error_handler)
1554 ata_eh_acquire(ap);
1555
c429137a 1556 ata_sff_flush_pio_task(ap);
41ade50c 1557
d95a717f 1558 if (!rc) {
ba6a1308 1559 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1560
1561 /* We're racing with irq here. If we lose, the
1562 * following test prevents us from completing the qc
d95a717f
TH
1563 * twice. If we win, the port is frozen and will be
1564 * cleaned up by ->post_internal_cmd().
a2a7a662 1565 */
77853bf2 1566 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1567 qc->err_mask |= AC_ERR_TIMEOUT;
1568
1569 if (ap->ops->error_handler)
1570 ata_port_freeze(ap);
1571 else
1572 ata_qc_complete(qc);
f15a1daf 1573
16d42467
HR
1574 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1575 command);
a2a7a662
TH
1576 }
1577
ba6a1308 1578 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1579 }
1580
d95a717f
TH
1581 /* do post_internal_cmd */
1582 if (ap->ops->post_internal_cmd)
1583 ap->ops->post_internal_cmd(qc);
1584
a51d644a
TH
1585 /* perform minimal error analysis */
1586 if (qc->flags & ATA_QCFLAG_FAILED) {
efcef265 1587 if (qc->result_tf.status & (ATA_ERR | ATA_DF))
a51d644a
TH
1588 qc->err_mask |= AC_ERR_DEV;
1589
1590 if (!qc->err_mask)
1591 qc->err_mask |= AC_ERR_OTHER;
1592
1593 if (qc->err_mask & ~AC_ERR_OTHER)
1594 qc->err_mask &= ~AC_ERR_OTHER;
2dae9955 1595 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
efcef265 1596 qc->result_tf.status |= ATA_SENSE;
d95a717f
TH
1597 }
1598
15869303 1599 /* finish up */
ba6a1308 1600 spin_lock_irqsave(ap->lock, flags);
15869303 1601
e61e0672 1602 *tf = qc->result_tf;
77853bf2
TH
1603 err_mask = qc->err_mask;
1604
1605 ata_qc_free(qc);
9af5c9c9
TH
1606 link->active_tag = preempted_tag;
1607 link->sactive = preempted_sactive;
dedaf2b0 1608 ap->qc_active = preempted_qc_active;
da917d69 1609 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1610
ba6a1308 1611 spin_unlock_irqrestore(ap->lock, flags);
15869303 1612
87fbc5a0
TH
1613 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1614 ata_internal_cmd_timed_out(dev, command);
1615
77853bf2 1616 return err_mask;
a2a7a662
TH
1617}
1618
2432697b 1619/**
33480a0e 1620 * ata_exec_internal - execute libata internal command
2432697b
TH
1621 * @dev: Device to which the command is sent
1622 * @tf: Taskfile registers for the command and the result
1623 * @cdb: CDB for packet command
e227867f 1624 * @dma_dir: Data transfer direction of the command
2432697b
TH
1625 * @buf: Data buffer of the command
1626 * @buflen: Length of data buffer
2b789108 1627 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1628 *
1629 * Wrapper around ata_exec_internal_sg() which takes simple
1630 * buffer instead of sg list.
1631 *
1632 * LOCKING:
1633 * None. Should be called with kernel context, might sleep.
1634 *
1635 * RETURNS:
1636 * Zero on success, AC_ERR_* mask on failure
1637 */
1638unsigned ata_exec_internal(struct ata_device *dev,
1639 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1640 int dma_dir, void *buf, unsigned int buflen,
1641 unsigned long timeout)
2432697b 1642{
33480a0e
TH
1643 struct scatterlist *psg = NULL, sg;
1644 unsigned int n_elem = 0;
2432697b 1645
33480a0e
TH
1646 if (dma_dir != DMA_NONE) {
1647 WARN_ON(!buf);
1648 sg_init_one(&sg, buf, buflen);
1649 psg = &sg;
1650 n_elem++;
1651 }
2432697b 1652
2b789108
TH
1653 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1654 timeout);
2432697b
TH
1655}
1656
1bc4ccff
AC
1657/**
1658 * ata_pio_need_iordy - check if iordy needed
1659 * @adev: ATA device
1660 *
1661 * Check if the current speed of the device requires IORDY. Used
1662 * by various controllers for chip configuration.
1663 */
1bc4ccff
AC
1664unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1665{
0d9e6659
TH
1666 /* Don't set IORDY if we're preparing for reset. IORDY may
1667 * lead to controller lock up on certain controllers if the
1668 * port is not occupied. See bko#11703 for details.
1669 */
1670 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1671 return 0;
1672 /* Controller doesn't support IORDY. Probably a pointless
1673 * check as the caller should know this.
1674 */
9af5c9c9 1675 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1676 return 0;
5c18c4d2
DD
1677 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1678 if (ata_id_is_cfa(adev->id)
1679 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1680 return 0;
432729f0
AC
1681 /* PIO3 and higher it is mandatory */
1682 if (adev->pio_mode > XFER_PIO_2)
1683 return 1;
1684 /* We turn it on when possible */
1685 if (ata_id_has_iordy(adev->id))
1bc4ccff 1686 return 1;
432729f0
AC
1687 return 0;
1688}
a52fbcfc 1689EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
2e9edbf8 1690
432729f0
AC
1691/**
1692 * ata_pio_mask_no_iordy - Return the non IORDY mask
1693 * @adev: ATA device
1694 *
1695 * Compute the highest mode possible if we are not using iordy. Return
1696 * -1 if no iordy mode is available.
1697 */
432729f0
AC
1698static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1699{
1bc4ccff 1700 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1701 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1702 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1703 /* Is the speed faster than the drive allows non IORDY ? */
1704 if (pio) {
1705 /* This is cycle times not frequency - watch the logic! */
1706 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1707 return 3 << ATA_SHIFT_PIO;
1708 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1709 }
1710 }
432729f0 1711 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1712}
1713
963e4975
AC
1714/**
1715 * ata_do_dev_read_id - default ID read method
1716 * @dev: device
1717 * @tf: proposed taskfile
1718 * @id: data buffer
1719 *
1720 * Issue the identify taskfile and hand back the buffer containing
1721 * identify data. For some RAID controllers and for pre ATA devices
1722 * this function is wrapped or replaced by the driver
1723 */
1724unsigned int ata_do_dev_read_id(struct ata_device *dev,
0561e514 1725 struct ata_taskfile *tf, __le16 *id)
963e4975
AC
1726{
1727 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1728 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1729}
a52fbcfc 1730EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
963e4975 1731
1da177e4 1732/**
49016aca 1733 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1734 * @dev: target device
1735 * @p_class: pointer to class of the target device (may be changed)
bff04647 1736 * @flags: ATA_READID_* flags
fe635c7e 1737 * @id: buffer to read IDENTIFY data into
1da177e4 1738 *
49016aca
TH
1739 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1740 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1741 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1742 * for pre-ATA4 drives.
1da177e4 1743 *
50a99018 1744 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1745 * now we abort if we hit that case.
50a99018 1746 *
1da177e4 1747 * LOCKING:
49016aca
TH
1748 * Kernel thread context (may sleep)
1749 *
1750 * RETURNS:
1751 * 0 on success, -errno otherwise.
1da177e4 1752 */
a9beec95 1753int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1754 unsigned int flags, u16 *id)
1da177e4 1755{
9af5c9c9 1756 struct ata_port *ap = dev->link->ap;
49016aca 1757 unsigned int class = *p_class;
a0123703 1758 struct ata_taskfile tf;
49016aca
TH
1759 unsigned int err_mask = 0;
1760 const char *reason;
79b42bab 1761 bool is_semb = class == ATA_DEV_SEMB;
54936f8b 1762 int may_fallback = 1, tried_spinup = 0;
49016aca 1763 int rc;
1da177e4 1764
963e4975 1765retry:
3373efd8 1766 ata_tf_init(dev, &tf);
a0123703 1767
49016aca 1768 switch (class) {
79b42bab
TH
1769 case ATA_DEV_SEMB:
1770 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
df561f66 1771 fallthrough;
49016aca 1772 case ATA_DEV_ATA:
9162c657 1773 case ATA_DEV_ZAC:
a0123703 1774 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1775 break;
1776 case ATA_DEV_ATAPI:
a0123703 1777 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1778 break;
1779 default:
1780 rc = -ENODEV;
1781 reason = "unsupported class";
1782 goto err_out;
1da177e4
LT
1783 }
1784
a0123703 1785 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1786
1787 /* Some devices choke if TF registers contain garbage. Make
1788 * sure those are properly initialized.
1789 */
1790 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1791
1792 /* Device presence detection is unreliable on some
1793 * controllers. Always poll IDENTIFY if available.
1794 */
1795 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1796
963e4975 1797 if (ap->ops->read_id)
0561e514 1798 err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
963e4975 1799 else
0561e514 1800 err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
963e4975 1801
a0123703 1802 if (err_mask) {
800b3996 1803 if (err_mask & AC_ERR_NODEV_HINT) {
a9a79dfe 1804 ata_dev_dbg(dev, "NODEV after polling detection\n");
55a8e2c8
TH
1805 return -ENOENT;
1806 }
1807
79b42bab 1808 if (is_semb) {
a9a79dfe
JP
1809 ata_dev_info(dev,
1810 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
79b42bab
TH
1811 /* SEMB is not supported yet */
1812 *p_class = ATA_DEV_SEMB_UNSUP;
1813 return 0;
1814 }
1815
efcef265 1816 if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
1ffc151f
TH
1817 /* Device or controller might have reported
1818 * the wrong device class. Give a shot at the
1819 * other IDENTIFY if the current one is
1820 * aborted by the device.
1821 */
1822 if (may_fallback) {
1823 may_fallback = 0;
1824
1825 if (class == ATA_DEV_ATA)
1826 class = ATA_DEV_ATAPI;
1827 else
1828 class = ATA_DEV_ATA;
1829 goto retry;
1830 }
1831
1832 /* Control reaches here iff the device aborted
1833 * both flavors of IDENTIFYs which happens
1834 * sometimes with phantom devices.
1835 */
a9a79dfe
JP
1836 ata_dev_dbg(dev,
1837 "both IDENTIFYs aborted, assuming NODEV\n");
1ffc151f 1838 return -ENOENT;
54936f8b
TH
1839 }
1840
49016aca
TH
1841 rc = -EIO;
1842 reason = "I/O error";
1da177e4
LT
1843 goto err_out;
1844 }
1845
43c9c591 1846 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
4baa5745 1847 ata_dev_info(dev, "dumping IDENTIFY data, "
a9a79dfe
JP
1848 "class=%d may_fallback=%d tried_spinup=%d\n",
1849 class, may_fallback, tried_spinup);
4baa5745 1850 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
43c9c591
TH
1851 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1852 }
1853
54936f8b
TH
1854 /* Falling back doesn't make sense if ID data was read
1855 * successfully at least once.
1856 */
1857 may_fallback = 0;
1858
49016aca 1859 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1860
49016aca 1861 /* sanity check */
a4f5749b 1862 rc = -EINVAL;
6070068b 1863 reason = "device reports invalid type";
a4f5749b 1864
9162c657 1865 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
a4f5749b
TH
1866 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1867 goto err_out;
db63a4c8
AW
1868 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1869 ata_id_is_ata(id)) {
1870 ata_dev_dbg(dev,
1871 "host indicates ignore ATA devices, ignored\n");
1872 return -ENOENT;
1873 }
a4f5749b
TH
1874 } else {
1875 if (ata_id_is_ata(id))
1876 goto err_out;
49016aca
TH
1877 }
1878
169439c2
ML
1879 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1880 tried_spinup = 1;
1881 /*
1882 * Drive powered-up in standby mode, and requires a specific
1883 * SET_FEATURES spin-up subcommand before it will accept
1884 * anything other than the original IDENTIFY command.
1885 */
218f3d30 1886 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 1887 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1888 rc = -EIO;
1889 reason = "SPINUP failed";
1890 goto err_out;
1891 }
1892 /*
1893 * If the drive initially returned incomplete IDENTIFY info,
1894 * we now must reissue the IDENTIFY command.
1895 */
1896 if (id[2] == 0x37c8)
1897 goto retry;
1898 }
1899
9162c657
HR
1900 if ((flags & ATA_READID_POSTRESET) &&
1901 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
49016aca
TH
1902 /*
1903 * The exact sequence expected by certain pre-ATA4 drives is:
1904 * SRST RESET
50a99018
AC
1905 * IDENTIFY (optional in early ATA)
1906 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1907 * anything else..
1908 * Some drives were very specific about that exact sequence.
50a99018
AC
1909 *
1910 * Note that ATA4 says lba is mandatory so the second check
c9404c9c 1911 * should never trigger.
49016aca
TH
1912 */
1913 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1914 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1915 if (err_mask) {
1916 rc = -EIO;
1917 reason = "INIT_DEV_PARAMS failed";
1918 goto err_out;
1919 }
1920
1921 /* current CHS translation info (id[53-58]) might be
1922 * changed. reread the identify device info.
1923 */
bff04647 1924 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1925 goto retry;
1926 }
1927 }
1928
1929 *p_class = class;
fe635c7e 1930
49016aca
TH
1931 return 0;
1932
1933 err_out:
16d42467
HR
1934 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
1935 reason, err_mask);
49016aca
TH
1936 return rc;
1937}
1938
f01f62c2
CH
1939/**
1940 * ata_read_log_page - read a specific log page
1941 * @dev: target device
1942 * @log: log to read
1943 * @page: page to read
1944 * @buf: buffer to store read page
1945 * @sectors: number of sectors to read
1946 *
1947 * Read log page using READ_LOG_EXT command.
1948 *
1949 * LOCKING:
1950 * Kernel thread context (may sleep).
1951 *
1952 * RETURNS:
1953 * 0 on success, AC_ERR_* mask otherwise.
1954 */
1955unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1956 u8 page, void *buf, unsigned int sectors)
1957{
1958 unsigned long ap_flags = dev->link->ap->flags;
1959 struct ata_taskfile tf;
1960 unsigned int err_mask;
1961 bool dma = false;
1962
4633778b 1963 ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
f01f62c2
CH
1964
1965 /*
1966 * Return error without actually issuing the command on controllers
1967 * which e.g. lockup on a read log page.
1968 */
1969 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
1970 return AC_ERR_DEV;
1971
1972retry:
1973 ata_tf_init(dev, &tf);
f971a854 1974 if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
7cfdfdc8 1975 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
f01f62c2
CH
1976 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
1977 tf.protocol = ATA_PROT_DMA;
1978 dma = true;
1979 } else {
1980 tf.command = ATA_CMD_READ_LOG_EXT;
1981 tf.protocol = ATA_PROT_PIO;
1982 dma = false;
1983 }
1984 tf.lbal = log;
1985 tf.lbam = page;
1986 tf.nsect = sectors;
1987 tf.hob_nsect = sectors >> 8;
1988 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1989
1990 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1991 buf, sectors * ATA_SECT_SIZE, 0);
1992
fc5c8aa7
DLM
1993 if (err_mask) {
1994 if (dma) {
1995 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
1996 goto retry;
1997 }
23ef63d5
DLM
1998 ata_dev_err(dev,
1999 "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
2000 (unsigned int)log, (unsigned int)page, err_mask);
f01f62c2
CH
2001 }
2002
f01f62c2
CH
2003 return err_mask;
2004}
2005
efe205a3
CH
2006static bool ata_log_supported(struct ata_device *dev, u8 log)
2007{
2008 struct ata_port *ap = dev->link->ap;
2009
ac9f0c81
AL
2010 if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
2011 return false;
2012
efe205a3
CH
2013 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2014 return false;
2015 return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
2016}
2017
a0fd2454
CH
2018static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2019{
2020 struct ata_port *ap = dev->link->ap;
2021 unsigned int err, i;
2022
636f6e2a
DLM
2023 if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
2024 return false;
2025
a0fd2454 2026 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
636f6e2a
DLM
2027 /*
2028 * IDENTIFY DEVICE data log is defined as mandatory starting
2029 * with ACS-3 (ATA version 10). Warn about the missing log
2030 * for drives which implement this ATA level or above.
2031 */
2032 if (ata_id_major_version(dev->id) >= 10)
2033 ata_dev_warn(dev,
2034 "ATA Identify Device Log not supported\n");
2035 dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
a0fd2454
CH
2036 return false;
2037 }
2038
2039 /*
2040 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2041 * supported.
2042 */
2043 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2044 1);
fc5c8aa7 2045 if (err)
a0fd2454 2046 return false;
a0fd2454
CH
2047
2048 for (i = 0; i < ap->sector_buf[8]; i++) {
2049 if (ap->sector_buf[9 + i] == page)
2050 return true;
2051 }
2052
2053 return false;
2054}
2055
9062712f
TH
2056static int ata_do_link_spd_horkage(struct ata_device *dev)
2057{
2058 struct ata_link *plink = ata_dev_phys_link(dev);
2059 u32 target, target_limit;
2060
2061 if (!sata_scr_valid(plink))
2062 return 0;
2063
2064 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2065 target = 1;
2066 else
2067 return 0;
2068
2069 target_limit = (1 << target) - 1;
2070
2071 /* if already on stricter limit, no need to push further */
2072 if (plink->sata_spd_limit <= target_limit)
2073 return 0;
2074
2075 plink->sata_spd_limit = target_limit;
2076
2077 /* Request another EH round by returning -EAGAIN if link is
2078 * going faster than the target speed. Forward progress is
2079 * guaranteed by setting sata_spd_limit to target_limit above.
2080 */
2081 if (plink->sata_spd > target) {
a9a79dfe
JP
2082 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2083 sata_spd_string(target));
9062712f
TH
2084 return -EAGAIN;
2085 }
2086 return 0;
2087}
2088
3373efd8 2089static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2090{
9af5c9c9 2091 struct ata_port *ap = dev->link->ap;
9ce8e307
JA
2092
2093 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2094 return 0;
2095
9af5c9c9 2096 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2097}
2098
5a233551
HR
2099static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2100{
2101 struct ata_port *ap = dev->link->ap;
2102 unsigned int err_mask;
2103
efe205a3
CH
2104 if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2105 ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
fe5af0cc
HR
2106 return;
2107 }
5a233551
HR
2108 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2109 0, ap->sector_buf, 1);
fc5c8aa7 2110 if (!err_mask) {
5a233551
HR
2111 u8 *cmds = dev->ncq_send_recv_cmds;
2112
2113 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2114 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2115
2116 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2117 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2118 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2119 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2120 }
2121 }
2122}
2123
284b3b77
HR
2124static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2125{
2126 struct ata_port *ap = dev->link->ap;
2127 unsigned int err_mask;
284b3b77 2128
efe205a3 2129 if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
284b3b77
HR
2130 ata_dev_warn(dev,
2131 "NCQ Send/Recv Log not supported\n");
2132 return;
2133 }
2134 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2135 0, ap->sector_buf, 1);
fc5c8aa7 2136 if (!err_mask) {
284b3b77
HR
2137 u8 *cmds = dev->ncq_non_data_cmds;
2138
2139 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2140 }
2141}
2142
8e061784
AM
2143static void ata_dev_config_ncq_prio(struct ata_device *dev)
2144{
2145 struct ata_port *ap = dev->link->ap;
2146 unsigned int err_mask;
2147
06f6c4c6
DLM
2148 if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2149 return;
2150
8e061784 2151 err_mask = ata_read_log_page(dev,
1d51d5f3 2152 ATA_LOG_IDENTIFY_DEVICE,
8e061784
AM
2153 ATA_LOG_SATA_SETTINGS,
2154 ap->sector_buf,
2155 1);
fc5c8aa7 2156 if (err_mask)
2360fa18 2157 goto not_supported;
8e061784 2158
2360fa18
DLM
2159 if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
2160 goto not_supported;
2161
2162 dev->flags |= ATA_DFLAG_NCQ_PRIO;
2163
2164 return;
8e061784 2165
2360fa18
DLM
2166not_supported:
2167 dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
2168 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
8e061784
AM
2169}
2170
7a8526a5
KH
2171static bool ata_dev_check_adapter(struct ata_device *dev,
2172 unsigned short vendor_id)
2173{
2174 struct pci_dev *pcidev = NULL;
2175 struct device *parent_dev = NULL;
2176
2177 for (parent_dev = dev->tdev.parent; parent_dev != NULL;
2178 parent_dev = parent_dev->parent) {
2179 if (dev_is_pci(parent_dev)) {
2180 pcidev = to_pci_dev(parent_dev);
2181 if (pcidev->vendor == vendor_id)
2182 return true;
2183 break;
2184 }
2185 }
2186
2187 return false;
2188}
2189
388539f3 2190static int ata_dev_config_ncq(struct ata_device *dev,
a6e6ce8e
TH
2191 char *desc, size_t desc_sz)
2192{
9af5c9c9 2193 struct ata_port *ap = dev->link->ap;
a6e6ce8e 2194 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
388539f3
SL
2195 unsigned int err_mask;
2196 char *aa_desc = "";
a6e6ce8e
TH
2197
2198 if (!ata_id_has_ncq(dev->id)) {
2199 desc[0] = '\0';
388539f3 2200 return 0;
a6e6ce8e 2201 }
cba97ea1
BZ
2202 if (!IS_ENABLED(CONFIG_SATA_HOST))
2203 return 0;
75683fe7 2204 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6 2205 snprintf(desc, desc_sz, "NCQ (not used)");
388539f3 2206 return 0;
6919a0a6 2207 }
7a8526a5
KH
2208
2209 if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
2210 ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
2211 snprintf(desc, desc_sz, "NCQ (not used)");
2212 return 0;
2213 }
2214
a6e6ce8e 2215 if (ap->flags & ATA_FLAG_NCQ) {
69278f79 2216 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
a6e6ce8e
TH
2217 dev->flags |= ATA_DFLAG_NCQ;
2218 }
2219
388539f3
SL
2220 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2221 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2222 ata_id_has_fpdma_aa(dev->id)) {
2223 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2224 SATA_FPDMA_AA);
2225 if (err_mask) {
a9a79dfe
JP
2226 ata_dev_err(dev,
2227 "failed to enable AA (error_mask=0x%x)\n",
2228 err_mask);
388539f3
SL
2229 if (err_mask != AC_ERR_DEV) {
2230 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2231 return -EIO;
2232 }
2233 } else
2234 aa_desc = ", AA";
2235 }
2236
a6e6ce8e 2237 if (hdepth >= ddepth)
388539f3 2238 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
a6e6ce8e 2239 else
388539f3
SL
2240 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2241 ddepth, aa_desc);
ed36911c 2242
284b3b77
HR
2243 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2244 if (ata_id_has_ncq_send_and_recv(dev->id))
2245 ata_dev_config_ncq_send_recv(dev);
2246 if (ata_id_has_ncq_non_data(dev->id))
2247 ata_dev_config_ncq_non_data(dev);
8e061784
AM
2248 if (ata_id_has_ncq_prio(dev->id))
2249 ata_dev_config_ncq_prio(dev);
284b3b77 2250 }
f78dea06 2251
388539f3 2252 return 0;
a6e6ce8e 2253}
f78dea06 2254
e87fd28c
HR
2255static void ata_dev_config_sense_reporting(struct ata_device *dev)
2256{
2257 unsigned int err_mask;
2258
2259 if (!ata_id_has_sense_reporting(dev->id))
2260 return;
2261
2262 if (ata_id_sense_reporting_enabled(dev->id))
2263 return;
2264
2265 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2266 if (err_mask) {
2267 ata_dev_dbg(dev,
2268 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2269 err_mask);
2270 }
2271}
2272
6d1003ae
HR
2273static void ata_dev_config_zac(struct ata_device *dev)
2274{
2275 struct ata_port *ap = dev->link->ap;
2276 unsigned int err_mask;
2277 u8 *identify_buf = ap->sector_buf;
6d1003ae
HR
2278
2279 dev->zac_zones_optimal_open = U32_MAX;
2280 dev->zac_zones_optimal_nonseq = U32_MAX;
2281 dev->zac_zones_max_open = U32_MAX;
2282
2283 /*
2284 * Always set the 'ZAC' flag for Host-managed devices.
2285 */
2286 if (dev->class == ATA_DEV_ZAC)
2287 dev->flags |= ATA_DFLAG_ZAC;
2288 else if (ata_id_zoned_cap(dev->id) == 0x01)
2289 /*
2290 * Check for host-aware devices.
2291 */
2292 dev->flags |= ATA_DFLAG_ZAC;
2293
2294 if (!(dev->flags & ATA_DFLAG_ZAC))
2295 return;
2296
a0fd2454 2297 if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
6d1003ae
HR
2298 ata_dev_warn(dev,
2299 "ATA Zoned Information Log not supported\n");
2300 return;
2301 }
ed36911c 2302
6d1003ae
HR
2303 /*
2304 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2305 */
1d51d5f3 2306 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
6d1003ae
HR
2307 ATA_LOG_ZONED_INFORMATION,
2308 identify_buf, 1);
2309 if (!err_mask) {
2310 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2311
2312 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2313 if ((zoned_cap >> 63))
2314 dev->zac_zoned_cap = (zoned_cap & 1);
2315 opt_open = get_unaligned_le64(&identify_buf[24]);
2316 if ((opt_open >> 63))
2317 dev->zac_zones_optimal_open = (u32)opt_open;
2318 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2319 if ((opt_nonseq >> 63))
2320 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2321 max_open = get_unaligned_le64(&identify_buf[40]);
2322 if ((max_open >> 63))
2323 dev->zac_zones_max_open = (u32)max_open;
2324 }
a6e6ce8e
TH
2325}
2326
818831c8
CH
2327static void ata_dev_config_trusted(struct ata_device *dev)
2328{
2329 struct ata_port *ap = dev->link->ap;
2330 u64 trusted_cap;
2331 unsigned int err;
2332
e8f11db9
CH
2333 if (!ata_id_has_trusted(dev->id))
2334 return;
2335
818831c8
CH
2336 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2337 ata_dev_warn(dev,
2338 "Security Log not supported\n");
2339 return;
2340 }
2341
2342 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2343 ap->sector_buf, 1);
fc5c8aa7 2344 if (err)
818831c8 2345 return;
818831c8
CH
2346
2347 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2348 if (!(trusted_cap & (1ULL << 63))) {
2349 ata_dev_dbg(dev,
2350 "Trusted Computing capability qword not valid!\n");
2351 return;
2352 }
2353
2354 if (trusted_cap & (1 << 0))
2355 dev->flags |= ATA_DFLAG_TRUSTED;
2356}
2357
891fd7c6
DLM
2358static int ata_dev_config_lba(struct ata_device *dev)
2359{
891fd7c6
DLM
2360 const u16 *id = dev->id;
2361 const char *lba_desc;
2362 char ncq_desc[24];
2363 int ret;
2364
2365 dev->flags |= ATA_DFLAG_LBA;
2366
2367 if (ata_id_has_lba48(id)) {
2368 lba_desc = "LBA48";
2369 dev->flags |= ATA_DFLAG_LBA48;
2370 if (dev->n_sectors >= (1UL << 28) &&
2371 ata_id_has_flush_ext(id))
2372 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2373 } else {
2374 lba_desc = "LBA";
2375 }
2376
2377 /* config NCQ */
2378 ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2379
2380 /* print device info to dmesg */
1c95a27c 2381 if (ata_dev_print_info(dev))
891fd7c6
DLM
2382 ata_dev_info(dev,
2383 "%llu sectors, multi %u: %s %s\n",
2384 (unsigned long long)dev->n_sectors,
2385 dev->multi_count, lba_desc, ncq_desc);
2386
2387 return ret;
2388}
2389
2390static void ata_dev_config_chs(struct ata_device *dev)
2391{
891fd7c6
DLM
2392 const u16 *id = dev->id;
2393
2394 if (ata_id_current_chs_valid(id)) {
2395 /* Current CHS translation is valid. */
2396 dev->cylinders = id[54];
2397 dev->heads = id[55];
2398 dev->sectors = id[56];
2399 } else {
2400 /* Default translation */
2401 dev->cylinders = id[1];
2402 dev->heads = id[3];
2403 dev->sectors = id[6];
2404 }
2405
2406 /* print device info to dmesg */
1c95a27c 2407 if (ata_dev_print_info(dev))
891fd7c6
DLM
2408 ata_dev_info(dev,
2409 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2410 (unsigned long long)dev->n_sectors,
2411 dev->multi_count, dev->cylinders,
2412 dev->heads, dev->sectors);
2413}
2414
d8d8778c
DLM
2415static void ata_dev_config_devslp(struct ata_device *dev)
2416{
2417 u8 *sata_setting = dev->link->ap->sector_buf;
2418 unsigned int err_mask;
2419 int i, j;
2420
2421 /*
2422 * Check device sleep capability. Get DevSlp timing variables
2423 * from SATA Settings page of Identify Device Data Log.
2424 */
06f6c4c6
DLM
2425 if (!ata_id_has_devslp(dev->id) ||
2426 !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
d8d8778c
DLM
2427 return;
2428
2429 err_mask = ata_read_log_page(dev,
2430 ATA_LOG_IDENTIFY_DEVICE,
2431 ATA_LOG_SATA_SETTINGS,
2432 sata_setting, 1);
fc5c8aa7 2433 if (err_mask)
d8d8778c 2434 return;
d8d8778c
DLM
2435
2436 dev->flags |= ATA_DFLAG_DEVSLP;
2437 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2438 j = ATA_LOG_DEVSLP_OFFSET + i;
2439 dev->devslp_timing[i] = sata_setting[j];
2440 }
2441}
2442
fe22e1c2
DLM
2443static void ata_dev_config_cpr(struct ata_device *dev)
2444{
2445 unsigned int err_mask;
2446 size_t buf_len;
2447 int i, nr_cpr = 0;
2448 struct ata_cpr_log *cpr_log = NULL;
2449 u8 *desc, *buf = NULL;
2450
fda17afc
DLM
2451 if (ata_id_major_version(dev->id) < 11 ||
2452 !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES))
fe22e1c2
DLM
2453 goto out;
2454
2455 /*
fda17afc
DLM
2456 * Read the concurrent positioning ranges log (0x47). We can have at
2457 * most 255 32B range descriptors plus a 64B header.
fe22e1c2
DLM
2458 */
2459 buf_len = (64 + 255 * 32 + 511) & ~511;
2460 buf = kzalloc(buf_len, GFP_KERNEL);
2461 if (!buf)
2462 goto out;
2463
fda17afc
DLM
2464 err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
2465 0, buf, buf_len >> 9);
fe22e1c2
DLM
2466 if (err_mask)
2467 goto out;
2468
2469 nr_cpr = buf[0];
2470 if (!nr_cpr)
2471 goto out;
2472
2473 cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
2474 if (!cpr_log)
2475 goto out;
2476
2477 cpr_log->nr_cpr = nr_cpr;
2478 desc = &buf[64];
2479 for (i = 0; i < nr_cpr; i++, desc += 32) {
2480 cpr_log->cpr[i].num = desc[0];
2481 cpr_log->cpr[i].num_storage_elements = desc[1];
2482 cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
2483 cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
2484 }
2485
2486out:
2487 swap(dev->cpr_log, cpr_log);
2488 kfree(cpr_log);
2489 kfree(buf);
2490}
2491
d633b8a7
DLM
2492static void ata_dev_print_features(struct ata_device *dev)
2493{
2494 if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
2495 return;
2496
2497 ata_dev_info(dev,
fe22e1c2 2498 "Features:%s%s%s%s%s%s\n",
d633b8a7
DLM
2499 dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
2500 dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
2501 dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
2502 dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
fe22e1c2
DLM
2503 dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
2504 dev->cpr_log ? " CPR" : "");
d633b8a7
DLM
2505}
2506
49016aca 2507/**
ffeae418 2508 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2509 * @dev: Target device to configure
2510 *
2511 * Configure @dev according to @dev->id. Generic and low-level
2512 * driver specific fixups are also applied.
49016aca
TH
2513 *
2514 * LOCKING:
ffeae418
TH
2515 * Kernel thread context (may sleep)
2516 *
2517 * RETURNS:
2518 * 0 on success, -errno otherwise
49016aca 2519 */
efdaedc4 2520int ata_dev_configure(struct ata_device *dev)
49016aca 2521{
9af5c9c9 2522 struct ata_port *ap = dev->link->ap;
891fd7c6 2523 bool print_info = ata_dev_print_info(dev);
1148c3a7 2524 const u16 *id = dev->id;
7dc951ae 2525 unsigned long xfer_mask;
65fe1f0f 2526 unsigned int err_mask;
b352e57d 2527 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2528 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2529 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2530 int rc;
49016aca 2531
96c810f2
HR
2532 if (!ata_dev_enabled(dev)) {
2533 ata_dev_dbg(dev, "no device\n");
ffeae418 2534 return 0;
49016aca
TH
2535 }
2536
75683fe7
TH
2537 /* set horkage */
2538 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2539 ata_force_horkage(dev);
75683fe7 2540
50af2fa1 2541 if (dev->horkage & ATA_HORKAGE_DISABLE) {
a9a79dfe 2542 ata_dev_info(dev, "unsupported device, disabling\n");
50af2fa1
TH
2543 ata_dev_disable(dev);
2544 return 0;
2545 }
2546
2486fa56
TH
2547 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2548 dev->class == ATA_DEV_ATAPI) {
a9a79dfe
JP
2549 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2550 atapi_enabled ? "not supported with this driver"
2551 : "disabled");
2486fa56
TH
2552 ata_dev_disable(dev);
2553 return 0;
2554 }
2555
9062712f
TH
2556 rc = ata_do_link_spd_horkage(dev);
2557 if (rc)
2558 return rc;
2559
ecd75ad5
TH
2560 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2561 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2562 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2563 dev->horkage |= ATA_HORKAGE_NOLPM;
2564
240630e6
HG
2565 if (ap->flags & ATA_FLAG_NO_LPM)
2566 dev->horkage |= ATA_HORKAGE_NOLPM;
2567
ecd75ad5
TH
2568 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2569 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2570 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2571 }
2572
6746544c
TH
2573 /* let ACPI work its magic */
2574 rc = ata_acpi_on_devcfg(dev);
2575 if (rc)
2576 return rc;
08573a86 2577
05027adc
TH
2578 /* massage HPA, do it early as it might change IDENTIFY data */
2579 rc = ata_hpa_resize(dev);
2580 if (rc)
2581 return rc;
2582
c39f5ebe 2583 /* print device capabilities */
17a1e1be
HR
2584 ata_dev_dbg(dev,
2585 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2586 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2587 __func__,
2588 id[49], id[82], id[83], id[84],
2589 id[85], id[86], id[87], id[88]);
c39f5ebe 2590
208a9933 2591 /* initialize to-be-configured parameters */
ea1dd4e1 2592 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2593 dev->max_sectors = 0;
2594 dev->cdb_len = 0;
2595 dev->n_sectors = 0;
2596 dev->cylinders = 0;
2597 dev->heads = 0;
2598 dev->sectors = 0;
e18086d6 2599 dev->multi_count = 0;
208a9933 2600
1da177e4
LT
2601 /*
2602 * common ATA, ATAPI feature tests
2603 */
2604
ff8854b2 2605 /* find max transfer mode; for printk only */
1148c3a7 2606 xfer_mask = ata_id_xfermask(id);
1da177e4 2607
6044f3c4 2608 ata_dump_id(dev, id);
1da177e4 2609
ef143d57
AL
2610 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2611 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2612 sizeof(fwrevbuf));
2613
2614 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2615 sizeof(modelbuf));
2616
1da177e4 2617 /* ATA-specific feature tests */
9162c657 2618 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
b352e57d 2619 if (ata_id_is_cfa(id)) {
62afe5d7
SS
2620 /* CPRM may make this media unusable */
2621 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
a9a79dfe
JP
2622 ata_dev_warn(dev,
2623 "supports DRM functions and may not be fully accessible\n");
b352e57d 2624 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2625 } else {
2dcb407e 2626 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2627 /* Warn the user if the device has TPM extensions */
2628 if (ata_id_has_tpm(id))
a9a79dfe
JP
2629 ata_dev_warn(dev,
2630 "supports DRM functions and may not be fully accessible\n");
ae8d4ee7 2631 }
b352e57d 2632
1148c3a7 2633 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2634
e18086d6
ML
2635 /* get current R/W Multiple count setting */
2636 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2637 unsigned int max = dev->id[47] & 0xff;
2638 unsigned int cnt = dev->id[59] & 0xff;
2639 /* only recognize/allow powers of two here */
2640 if (is_power_of_2(max) && is_power_of_2(cnt))
2641 if (cnt <= max)
2642 dev->multi_count = cnt;
2643 }
3f64f565 2644
891fd7c6 2645 /* print device info to dmesg */
1c95a27c 2646 if (print_info)
891fd7c6
DLM
2647 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2648 revbuf, modelbuf, fwrevbuf,
2649 ata_mode_string(xfer_mask));
8bf62ece 2650
891fd7c6
DLM
2651 if (ata_id_has_lba(id)) {
2652 rc = ata_dev_config_lba(dev);
388539f3
SL
2653 if (rc)
2654 return rc;
ffeae418 2655 } else {
891fd7c6 2656 ata_dev_config_chs(dev);
07f6f7d0
AL
2657 }
2658
d8d8778c 2659 ata_dev_config_devslp(dev);
e87fd28c 2660 ata_dev_config_sense_reporting(dev);
6d1003ae 2661 ata_dev_config_zac(dev);
818831c8 2662 ata_dev_config_trusted(dev);
fe22e1c2 2663 ata_dev_config_cpr(dev);
b1ffbf85 2664 dev->cdb_len = 32;
d633b8a7 2665
1c95a27c 2666 if (print_info)
d633b8a7 2667 ata_dev_print_features(dev);
1da177e4
LT
2668 }
2669
2670 /* ATAPI-specific feature tests */
2c13b7ce 2671 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2672 const char *cdb_intr_string = "";
2673 const char *atapi_an_string = "";
91163006 2674 const char *dma_dir_string = "";
7d77b247 2675 u32 sntf;
08a556db 2676
1148c3a7 2677 rc = atapi_cdb_len(id);
1da177e4 2678 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
16d42467 2679 ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
ffeae418 2680 rc = -EINVAL;
1da177e4
LT
2681 goto err_out_nosup;
2682 }
6e7846e9 2683 dev->cdb_len = (unsigned int) rc;
1da177e4 2684
7d77b247
TH
2685 /* Enable ATAPI AN if both the host and device have
2686 * the support. If PMP is attached, SNTF is required
2687 * to enable ATAPI AN to discern between PHY status
2688 * changed notifications and ATAPI ANs.
9f45cbd3 2689 */
e7ecd435
TH
2690 if (atapi_an &&
2691 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
071f44b1 2692 (!sata_pmp_attached(ap) ||
7d77b247 2693 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
9f45cbd3 2694 /* issue SET feature command to turn this on */
218f3d30
JG
2695 err_mask = ata_dev_set_feature(dev,
2696 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2697 if (err_mask)
a9a79dfe
JP
2698 ata_dev_err(dev,
2699 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2700 err_mask);
854c73a2 2701 else {
9f45cbd3 2702 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2703 atapi_an_string = ", ATAPI AN";
2704 }
9f45cbd3
KCA
2705 }
2706
08a556db 2707 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2708 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2709 cdb_intr_string = ", CDB intr";
2710 }
312f7da2 2711
966fbe19 2712 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
91163006
TH
2713 dev->flags |= ATA_DFLAG_DMADIR;
2714 dma_dir_string = ", DMADIR";
2715 }
2716
afe75951 2717 if (ata_id_has_da(dev->id)) {
b1354cbb 2718 dev->flags |= ATA_DFLAG_DA;
afe75951
AL
2719 zpodd_init(dev);
2720 }
b1354cbb 2721
1da177e4 2722 /* print device info to dmesg */
1c95a27c 2723 if (print_info)
a9a79dfe
JP
2724 ata_dev_info(dev,
2725 "ATAPI: %s, %s, max %s%s%s%s\n",
2726 modelbuf, fwrevbuf,
2727 ata_mode_string(xfer_mask),
2728 cdb_intr_string, atapi_an_string,
2729 dma_dir_string);
1da177e4
LT
2730 }
2731
914ed354
TH
2732 /* determine max_sectors */
2733 dev->max_sectors = ATA_MAX_SECTORS;
2734 if (dev->flags & ATA_DFLAG_LBA48)
2735 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2736
c5038fc0
AC
2737 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2738 200 sectors */
3373efd8 2739 if (ata_dev_knobble(dev)) {
1c95a27c 2740 if (print_info)
a9a79dfe 2741 ata_dev_info(dev, "applying bridge limits\n");
5a529139 2742 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2743 dev->max_sectors = ATA_MAX_SECTORS;
2744 }
2745
f8d8e579 2746 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2747 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2748 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2749 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2750 }
f8d8e579 2751
75683fe7 2752 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2753 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2754 dev->max_sectors);
18d6e9d5 2755
af34d637
DM
2756 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2757 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2758 dev->max_sectors);
2759
a32450e1
SH
2760 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2761 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2762
4b2f3ede 2763 if (ap->ops->dev_config)
cd0d3bbc 2764 ap->ops->dev_config(dev);
4b2f3ede 2765
c5038fc0
AC
2766 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2767 /* Let the user know. We don't want to disallow opens for
2768 rescue purposes, or in case the vendor is just a blithering
2769 idiot. Do this after the dev_config call as some controllers
2770 with buggy firmware may want to avoid reporting false device
2771 bugs */
2772
2773 if (print_info) {
a9a79dfe 2774 ata_dev_warn(dev,
c5038fc0 2775"Drive reports diagnostics failure. This may indicate a drive\n");
a9a79dfe 2776 ata_dev_warn(dev,
c5038fc0
AC
2777"fault or invalid emulation. Contact drive vendor for information.\n");
2778 }
2779 }
2780
ac70a964 2781 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
a9a79dfe
JP
2782 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2783 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
ac70a964
TH
2784 }
2785
ffeae418 2786 return 0;
1da177e4
LT
2787
2788err_out_nosup:
ffeae418 2789 return rc;
1da177e4
LT
2790}
2791
be0d18df 2792/**
2e41e8e6 2793 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2794 * @ap: port
2795 *
2e41e8e6 2796 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2797 * detection.
2798 */
2799
2800int ata_cable_40wire(struct ata_port *ap)
2801{
2802 return ATA_CBL_PATA40;
2803}
a52fbcfc 2804EXPORT_SYMBOL_GPL(ata_cable_40wire);
be0d18df
AC
2805
2806/**
2e41e8e6 2807 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2808 * @ap: port
2809 *
2e41e8e6 2810 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2811 * detection.
2812 */
2813
2814int ata_cable_80wire(struct ata_port *ap)
2815{
2816 return ATA_CBL_PATA80;
2817}
a52fbcfc 2818EXPORT_SYMBOL_GPL(ata_cable_80wire);
be0d18df
AC
2819
2820/**
2821 * ata_cable_unknown - return unknown PATA cable.
2822 * @ap: port
2823 *
2824 * Helper method for drivers which have no PATA cable detection.
2825 */
2826
2827int ata_cable_unknown(struct ata_port *ap)
2828{
2829 return ATA_CBL_PATA_UNK;
2830}
a52fbcfc 2831EXPORT_SYMBOL_GPL(ata_cable_unknown);
be0d18df 2832
c88f90c3
TH
2833/**
2834 * ata_cable_ignore - return ignored PATA cable.
2835 * @ap: port
2836 *
2837 * Helper method for drivers which don't use cable type to limit
2838 * transfer mode.
2839 */
2840int ata_cable_ignore(struct ata_port *ap)
2841{
2842 return ATA_CBL_PATA_IGN;
2843}
a52fbcfc 2844EXPORT_SYMBOL_GPL(ata_cable_ignore);
c88f90c3 2845
be0d18df
AC
2846/**
2847 * ata_cable_sata - return SATA cable type
2848 * @ap: port
2849 *
2850 * Helper method for drivers which have SATA cables
2851 */
2852
2853int ata_cable_sata(struct ata_port *ap)
2854{
2855 return ATA_CBL_SATA;
2856}
a52fbcfc 2857EXPORT_SYMBOL_GPL(ata_cable_sata);
be0d18df 2858
1da177e4
LT
2859/**
2860 * ata_bus_probe - Reset and probe ATA bus
2861 * @ap: Bus to probe
2862 *
0cba632b
JG
2863 * Master ATA bus probing function. Initiates a hardware-dependent
2864 * bus reset, then attempts to identify any devices found on
2865 * the bus.
2866 *
1da177e4 2867 * LOCKING:
0cba632b 2868 * PCI/etc. bus probe sem.
1da177e4
LT
2869 *
2870 * RETURNS:
96072e69 2871 * Zero on success, negative errno otherwise.
1da177e4
LT
2872 */
2873
80289167 2874int ata_bus_probe(struct ata_port *ap)
1da177e4 2875{
28ca5c57 2876 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2877 int tries[ATA_MAX_DEVICES];
f58229f8 2878 int rc;
e82cbdb9 2879 struct ata_device *dev;
1da177e4 2880
1eca4365 2881 ata_for_each_dev(dev, &ap->link, ALL)
f58229f8 2882 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2883
2884 retry:
1eca4365 2885 ata_for_each_dev(dev, &ap->link, ALL) {
cdeab114
TH
2886 /* If we issue an SRST then an ATA drive (not ATAPI)
2887 * may change configuration and be in PIO0 timing. If
2888 * we do a hard reset (or are coming from power on)
2889 * this is true for ATA or ATAPI. Until we've set a
2890 * suitable controller mode we should not touch the
2891 * bus as we may be talking too fast.
2892 */
2893 dev->pio_mode = XFER_PIO_0;
5416912a 2894 dev->dma_mode = 0xff;
cdeab114
TH
2895
2896 /* If the controller has a pio mode setup function
2897 * then use it to set the chipset to rights. Don't
2898 * touch the DMA setup as that will be dealt with when
2899 * configuring devices.
2900 */
2901 if (ap->ops->set_piomode)
2902 ap->ops->set_piomode(ap, dev);
2903 }
2904
2044470c 2905 /* reset and determine device classes */
52783c5d 2906 ap->ops->phy_reset(ap);
2061a47a 2907
1eca4365 2908 ata_for_each_dev(dev, &ap->link, ALL) {
3e4ec344 2909 if (dev->class != ATA_DEV_UNKNOWN)
52783c5d
TH
2910 classes[dev->devno] = dev->class;
2911 else
2912 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2913
52783c5d 2914 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2915 }
1da177e4 2916
f31f0cc2
JG
2917 /* read IDENTIFY page and configure devices. We have to do the identify
2918 specific sequence bass-ackwards so that PDIAG- is released by
2919 the slave device */
2920
1eca4365 2921 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
f58229f8
TH
2922 if (tries[dev->devno])
2923 dev->class = classes[dev->devno];
ffeae418 2924
14d2bac1 2925 if (!ata_dev_enabled(dev))
ffeae418 2926 continue;
ffeae418 2927
bff04647
TH
2928 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2929 dev->id);
14d2bac1
TH
2930 if (rc)
2931 goto fail;
f31f0cc2
JG
2932 }
2933
be0d18df
AC
2934 /* Now ask for the cable type as PDIAG- should have been released */
2935 if (ap->ops->cable_detect)
2936 ap->cbl = ap->ops->cable_detect(ap);
2937
1eca4365
TH
2938 /* We may have SATA bridge glue hiding here irrespective of
2939 * the reported cable types and sensed types. When SATA
2940 * drives indicate we have a bridge, we don't know which end
2941 * of the link the bridge is which is a problem.
2942 */
2943 ata_for_each_dev(dev, &ap->link, ENABLED)
614fe29b
AC
2944 if (ata_id_is_sata(dev->id))
2945 ap->cbl = ATA_CBL_SATA;
614fe29b 2946
f31f0cc2
JG
2947 /* After the identify sequence we can now set up the devices. We do
2948 this in the normal order so that the user doesn't get confused */
2949
1eca4365 2950 ata_for_each_dev(dev, &ap->link, ENABLED) {
9af5c9c9 2951 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2952 rc = ata_dev_configure(dev);
9af5c9c9 2953 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2954 if (rc)
2955 goto fail;
1da177e4
LT
2956 }
2957
e82cbdb9 2958 /* configure transfer mode */
0260731f 2959 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2960 if (rc)
51713d35 2961 goto fail;
1da177e4 2962
1eca4365
TH
2963 ata_for_each_dev(dev, &ap->link, ENABLED)
2964 return 0;
1da177e4 2965
96072e69 2966 return -ENODEV;
14d2bac1
TH
2967
2968 fail:
4ae72a1e
TH
2969 tries[dev->devno]--;
2970
14d2bac1
TH
2971 switch (rc) {
2972 case -EINVAL:
4ae72a1e 2973 /* eeek, something went very wrong, give up */
14d2bac1
TH
2974 tries[dev->devno] = 0;
2975 break;
4ae72a1e
TH
2976
2977 case -ENODEV:
2978 /* give it just one more chance */
2979 tries[dev->devno] = min(tries[dev->devno], 1);
df561f66 2980 fallthrough;
14d2bac1 2981 case -EIO:
4ae72a1e
TH
2982 if (tries[dev->devno] == 1) {
2983 /* This is the last chance, better to slow
2984 * down than lose it.
2985 */
a07d499b 2986 sata_down_spd_limit(&ap->link, 0);
4ae72a1e
TH
2987 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2988 }
14d2bac1
TH
2989 }
2990
4ae72a1e 2991 if (!tries[dev->devno])
3373efd8 2992 ata_dev_disable(dev);
ec573755 2993
14d2bac1 2994 goto retry;
1da177e4
LT
2995}
2996
3be680b7
TH
2997/**
2998 * sata_print_link_status - Print SATA link status
936fd732 2999 * @link: SATA link to printk link status about
3be680b7
TH
3000 *
3001 * This function prints link speed and status of a SATA link.
3002 *
3003 * LOCKING:
3004 * None.
3005 */
6bdb4fc9 3006static void sata_print_link_status(struct ata_link *link)
3be680b7 3007{
6d5f9732 3008 u32 sstatus, scontrol, tmp;
3be680b7 3009
936fd732 3010 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 3011 return;
936fd732 3012 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 3013
b1c72916 3014 if (ata_phys_link_online(link)) {
3be680b7 3015 tmp = (sstatus >> 4) & 0xf;
a9a79dfe
JP
3016 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3017 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 3018 } else {
a9a79dfe
JP
3019 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3020 sstatus, scontrol);
3be680b7
TH
3021 }
3022}
3023
ebdfca6e
AC
3024/**
3025 * ata_dev_pair - return other device on cable
ebdfca6e
AC
3026 * @adev: device
3027 *
3028 * Obtain the other device on the same cable, or if none is
3029 * present NULL is returned
3030 */
2e9edbf8 3031
3373efd8 3032struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 3033{
9af5c9c9
TH
3034 struct ata_link *link = adev->link;
3035 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 3036 if (!ata_dev_enabled(pair))
ebdfca6e
AC
3037 return NULL;
3038 return pair;
3039}
a52fbcfc 3040EXPORT_SYMBOL_GPL(ata_dev_pair);
ebdfca6e 3041
1c3fae4d 3042/**
3c567b7d 3043 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 3044 * @link: Link to adjust SATA spd limit for
a07d499b 3045 * @spd_limit: Additional limit
1c3fae4d 3046 *
936fd732 3047 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 3048 * function only adjusts the limit. The change must be applied
3c567b7d 3049 * using sata_set_spd().
1c3fae4d 3050 *
a07d499b
TH
3051 * If @spd_limit is non-zero, the speed is limited to equal to or
3052 * lower than @spd_limit if such speed is supported. If
3053 * @spd_limit is slower than any supported speed, only the lowest
3054 * supported speed is allowed.
3055 *
1c3fae4d
TH
3056 * LOCKING:
3057 * Inherited from caller.
3058 *
3059 * RETURNS:
3060 * 0 on success, negative errno on failure
3061 */
a07d499b 3062int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
1c3fae4d 3063{
81952c54 3064 u32 sstatus, spd, mask;
a07d499b 3065 int rc, bit;
1c3fae4d 3066
936fd732 3067 if (!sata_scr_valid(link))
008a7896
TH
3068 return -EOPNOTSUPP;
3069
3070 /* If SCR can be read, use it to determine the current SPD.
936fd732 3071 * If not, use cached value in link->sata_spd.
008a7896 3072 */
936fd732 3073 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
9913ff8a 3074 if (rc == 0 && ata_sstatus_online(sstatus))
008a7896
TH
3075 spd = (sstatus >> 4) & 0xf;
3076 else
936fd732 3077 spd = link->sata_spd;
1c3fae4d 3078
936fd732 3079 mask = link->sata_spd_limit;
1c3fae4d
TH
3080 if (mask <= 1)
3081 return -EINVAL;
008a7896
TH
3082
3083 /* unconditionally mask off the highest bit */
a07d499b
TH
3084 bit = fls(mask) - 1;
3085 mask &= ~(1 << bit);
1c3fae4d 3086
2dc0b46b
DM
3087 /*
3088 * Mask off all speeds higher than or equal to the current one. At
3089 * this point, if current SPD is not available and we previously
3090 * recorded the link speed from SStatus, the driver has already
3091 * masked off the highest bit so mask should already be 1 or 0.
3092 * Otherwise, we should not force 1.5Gbps on a link where we have
3093 * not previously recorded speed from SStatus. Just return in this
3094 * case.
008a7896
TH
3095 */
3096 if (spd > 1)
3097 mask &= (1 << (spd - 1)) - 1;
3098 else
2dc0b46b 3099 return -EINVAL;
008a7896
TH
3100
3101 /* were we already at the bottom? */
1c3fae4d
TH
3102 if (!mask)
3103 return -EINVAL;
3104
a07d499b
TH
3105 if (spd_limit) {
3106 if (mask & ((1 << spd_limit) - 1))
3107 mask &= (1 << spd_limit) - 1;
3108 else {
3109 bit = ffs(mask) - 1;
3110 mask = 1 << bit;
3111 }
3112 }
3113
936fd732 3114 link->sata_spd_limit = mask;
1c3fae4d 3115
a9a79dfe
JP
3116 ata_link_warn(link, "limiting SATA link speed to %s\n",
3117 sata_spd_string(fls(mask)));
1c3fae4d
TH
3118
3119 return 0;
3120}
3121
a9b2c120 3122#ifdef CONFIG_ATA_ACPI
a0f79b92
TH
3123/**
3124 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3125 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3126 * @cycle: cycle duration in ns
3127 *
3128 * Return matching xfer mode for @cycle. The returned mode is of
3129 * the transfer type specified by @xfer_shift. If @cycle is too
3130 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3131 * than the fastest known mode, the fasted mode is returned.
3132 *
3133 * LOCKING:
3134 * None.
3135 *
3136 * RETURNS:
3137 * Matching xfer_mode, 0xff if no match found.
3138 */
3139u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3140{
3141 u8 base_mode = 0xff, last_mode = 0xff;
3142 const struct ata_xfer_ent *ent;
3143 const struct ata_timing *t;
3144
3145 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3146 if (ent->shift == xfer_shift)
3147 base_mode = ent->base;
3148
3149 for (t = ata_timing_find_mode(base_mode);
3150 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3151 unsigned short this_cycle;
3152
3153 switch (xfer_shift) {
3154 case ATA_SHIFT_PIO:
3155 case ATA_SHIFT_MWDMA:
3156 this_cycle = t->cycle;
3157 break;
3158 case ATA_SHIFT_UDMA:
3159 this_cycle = t->udma;
3160 break;
3161 default:
3162 return 0xff;
3163 }
3164
3165 if (cycle > this_cycle)
3166 break;
3167
3168 last_mode = t->mode;
3169 }
3170
3171 return last_mode;
3172}
a9b2c120 3173#endif
a0f79b92 3174
cf176e1a
TH
3175/**
3176 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3177 * @dev: Device to adjust xfer masks
458337db 3178 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3179 *
3180 * Adjust xfer masks of @dev downward. Note that this function
3181 * does not apply the change. Invoking ata_set_mode() afterwards
3182 * will apply the limit.
3183 *
3184 * LOCKING:
3185 * Inherited from caller.
3186 *
3187 * RETURNS:
3188 * 0 on success, negative errno on failure
3189 */
458337db 3190int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3191{
458337db 3192 char buf[32];
7dc951ae
TH
3193 unsigned long orig_mask, xfer_mask;
3194 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3195 int quiet, highbit;
cf176e1a 3196
458337db
TH
3197 quiet = !!(sel & ATA_DNXFER_QUIET);
3198 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3199
458337db
TH
3200 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3201 dev->mwdma_mask,
3202 dev->udma_mask);
3203 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3204
458337db
TH
3205 switch (sel) {
3206 case ATA_DNXFER_PIO:
3207 highbit = fls(pio_mask) - 1;
3208 pio_mask &= ~(1 << highbit);
3209 break;
3210
3211 case ATA_DNXFER_DMA:
3212 if (udma_mask) {
3213 highbit = fls(udma_mask) - 1;
3214 udma_mask &= ~(1 << highbit);
3215 if (!udma_mask)
3216 return -ENOENT;
3217 } else if (mwdma_mask) {
3218 highbit = fls(mwdma_mask) - 1;
3219 mwdma_mask &= ~(1 << highbit);
3220 if (!mwdma_mask)
3221 return -ENOENT;
3222 }
3223 break;
3224
3225 case ATA_DNXFER_40C:
3226 udma_mask &= ATA_UDMA_MASK_40C;
3227 break;
3228
3229 case ATA_DNXFER_FORCE_PIO0:
3230 pio_mask &= 1;
df561f66 3231 fallthrough;
458337db
TH
3232 case ATA_DNXFER_FORCE_PIO:
3233 mwdma_mask = 0;
3234 udma_mask = 0;
3235 break;
3236
458337db
TH
3237 default:
3238 BUG();
3239 }
3240
3241 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3242
3243 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3244 return -ENOENT;
3245
3246 if (!quiet) {
3247 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3248 snprintf(buf, sizeof(buf), "%s:%s",
3249 ata_mode_string(xfer_mask),
3250 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3251 else
3252 snprintf(buf, sizeof(buf), "%s",
3253 ata_mode_string(xfer_mask));
3254
a9a79dfe 3255 ata_dev_warn(dev, "limiting speed to %s\n", buf);
458337db 3256 }
cf176e1a
TH
3257
3258 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3259 &dev->udma_mask);
3260
cf176e1a 3261 return 0;
cf176e1a
TH
3262}
3263
3373efd8 3264static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3265{
d0cb43b3 3266 struct ata_port *ap = dev->link->ap;
9af5c9c9 3267 struct ata_eh_context *ehc = &dev->link->eh_context;
d0cb43b3 3268 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
4055dee7
TH
3269 const char *dev_err_whine = "";
3270 int ign_dev_err = 0;
d0cb43b3 3271 unsigned int err_mask = 0;
83206a29 3272 int rc;
1da177e4 3273
e8384607 3274 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3275 if (dev->xfer_shift == ATA_SHIFT_PIO)
3276 dev->flags |= ATA_DFLAG_PIO;
3277
d0cb43b3
TH
3278 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3279 dev_err_whine = " (SET_XFERMODE skipped)";
3280 else {
3281 if (nosetxfer)
a9a79dfe
JP
3282 ata_dev_warn(dev,
3283 "NOSETXFER but PATA detected - can't "
3284 "skip SETXFER, might malfunction\n");
d0cb43b3
TH
3285 err_mask = ata_dev_set_xfermode(dev);
3286 }
2dcb407e 3287
4055dee7
TH
3288 if (err_mask & ~AC_ERR_DEV)
3289 goto fail;
3290
3291 /* revalidate */
3292 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3293 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3294 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3295 if (rc)
3296 return rc;
3297
b93fda12
AC
3298 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3299 /* Old CFA may refuse this command, which is just fine */
3300 if (ata_id_is_cfa(dev->id))
3301 ign_dev_err = 1;
3302 /* Catch several broken garbage emulations plus some pre
3303 ATA devices */
3304 if (ata_id_major_version(dev->id) == 0 &&
3305 dev->pio_mode <= XFER_PIO_2)
3306 ign_dev_err = 1;
3307 /* Some very old devices and some bad newer ones fail
3308 any kind of SET_XFERMODE request but support PIO0-2
3309 timings and no IORDY */
3310 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3311 ign_dev_err = 1;
3312 }
3acaf94b
AC
3313 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3314 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3315 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3316 dev->dma_mode == XFER_MW_DMA_0 &&
3317 (dev->id[63] >> 8) & 1)
4055dee7 3318 ign_dev_err = 1;
3acaf94b 3319
4055dee7
TH
3320 /* if the device is actually configured correctly, ignore dev err */
3321 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3322 ign_dev_err = 1;
1da177e4 3323
4055dee7
TH
3324 if (err_mask & AC_ERR_DEV) {
3325 if (!ign_dev_err)
3326 goto fail;
3327 else
3328 dev_err_whine = " (device error ignored)";
3329 }
48a8a14f 3330
4633778b
HR
3331 ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
3332 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3333
07b9b6d6
DLM
3334 if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3335 ehc->i.flags & ATA_EHI_DID_HARDRESET)
3336 ata_dev_info(dev, "configured for %s%s\n",
3337 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3338 dev_err_whine);
4055dee7 3339
83206a29 3340 return 0;
4055dee7
TH
3341
3342 fail:
a9a79dfe 3343 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
4055dee7 3344 return -EIO;
1da177e4
LT
3345}
3346
1da177e4 3347/**
04351821 3348 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3349 * @link: link on which timings will be programmed
1967b7ff 3350 * @r_failed_dev: out parameter for failed device
1da177e4 3351 *
04351821
A
3352 * Standard implementation of the function used to tune and set
3353 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3354 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3355 * returned in @r_failed_dev.
780a87f7 3356 *
1da177e4 3357 * LOCKING:
0cba632b 3358 * PCI/etc. bus probe sem.
e82cbdb9
TH
3359 *
3360 * RETURNS:
3361 * 0 on success, negative errno otherwise
1da177e4 3362 */
04351821 3363
0260731f 3364int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3365{
0260731f 3366 struct ata_port *ap = link->ap;
e8e0619f 3367 struct ata_device *dev;
f58229f8 3368 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3369
a6d5a51c 3370 /* step 1: calculate xfer_mask */
1eca4365 3371 ata_for_each_dev(dev, link, ENABLED) {
7dc951ae 3372 unsigned long pio_mask, dma_mask;
b3a70601 3373 unsigned int mode_mask;
a6d5a51c 3374
b3a70601
AC
3375 mode_mask = ATA_DMA_MASK_ATA;
3376 if (dev->class == ATA_DEV_ATAPI)
3377 mode_mask = ATA_DMA_MASK_ATAPI;
3378 else if (ata_id_is_cfa(dev->id))
3379 mode_mask = ATA_DMA_MASK_CFA;
3380
3373efd8 3381 ata_dev_xfermask(dev);
33267325 3382 ata_force_xfermask(dev);
1da177e4 3383
acf356b1 3384 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
b3a70601
AC
3385
3386 if (libata_dma_mask & mode_mask)
80a9c430
SS
3387 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3388 dev->udma_mask);
b3a70601
AC
3389 else
3390 dma_mask = 0;
3391
acf356b1
TH
3392 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3393 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3394
4f65977d 3395 found = 1;
b15b3eba 3396 if (ata_dma_enabled(dev))
5444a6f4 3397 used_dma = 1;
a6d5a51c 3398 }
4f65977d 3399 if (!found)
e82cbdb9 3400 goto out;
a6d5a51c
TH
3401
3402 /* step 2: always set host PIO timings */
1eca4365 3403 ata_for_each_dev(dev, link, ENABLED) {
70cd071e 3404 if (dev->pio_mode == 0xff) {
a9a79dfe 3405 ata_dev_warn(dev, "no PIO support\n");
e8e0619f 3406 rc = -EINVAL;
e82cbdb9 3407 goto out;
e8e0619f
TH
3408 }
3409
3410 dev->xfer_mode = dev->pio_mode;
3411 dev->xfer_shift = ATA_SHIFT_PIO;
3412 if (ap->ops->set_piomode)
3413 ap->ops->set_piomode(ap, dev);
3414 }
1da177e4 3415
a6d5a51c 3416 /* step 3: set host DMA timings */
1eca4365
TH
3417 ata_for_each_dev(dev, link, ENABLED) {
3418 if (!ata_dma_enabled(dev))
e8e0619f
TH
3419 continue;
3420
3421 dev->xfer_mode = dev->dma_mode;
3422 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3423 if (ap->ops->set_dmamode)
3424 ap->ops->set_dmamode(ap, dev);
3425 }
1da177e4
LT
3426
3427 /* step 4: update devices' xfer mode */
1eca4365 3428 ata_for_each_dev(dev, link, ENABLED) {
3373efd8 3429 rc = ata_dev_set_mode(dev);
5bbc53f4 3430 if (rc)
e82cbdb9 3431 goto out;
83206a29 3432 }
1da177e4 3433
e8e0619f
TH
3434 /* Record simplex status. If we selected DMA then the other
3435 * host channels are not permitted to do so.
5444a6f4 3436 */
cca3974e 3437 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3438 ap->host->simplex_claimed = ap;
5444a6f4 3439
e82cbdb9
TH
3440 out:
3441 if (rc)
3442 *r_failed_dev = dev;
3443 return rc;
1da177e4 3444}
a52fbcfc 3445EXPORT_SYMBOL_GPL(ata_do_set_mode);
1da177e4 3446
aa2731ad
TH
3447/**
3448 * ata_wait_ready - wait for link to become ready
3449 * @link: link to be waited on
3450 * @deadline: deadline jiffies for the operation
3451 * @check_ready: callback to check link readiness
3452 *
3453 * Wait for @link to become ready. @check_ready should return
3454 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3455 * link doesn't seem to be occupied, other errno for other error
3456 * conditions.
3457 *
3458 * Transient -ENODEV conditions are allowed for
3459 * ATA_TMOUT_FF_WAIT.
3460 *
3461 * LOCKING:
3462 * EH context.
3463 *
3464 * RETURNS:
c9b5560a 3465 * 0 if @link is ready before @deadline; otherwise, -errno.
aa2731ad
TH
3466 */
3467int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3468 int (*check_ready)(struct ata_link *link))
3469{
3470 unsigned long start = jiffies;
b48d58f5 3471 unsigned long nodev_deadline;
aa2731ad
TH
3472 int warned = 0;
3473
b48d58f5
TH
3474 /* choose which 0xff timeout to use, read comment in libata.h */
3475 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3476 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3477 else
3478 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3479
b1c72916
TH
3480 /* Slave readiness can't be tested separately from master. On
3481 * M/S emulation configuration, this function should be called
3482 * only on the master and it will handle both master and slave.
3483 */
3484 WARN_ON(link == link->ap->slave_link);
3485
aa2731ad
TH
3486 if (time_after(nodev_deadline, deadline))
3487 nodev_deadline = deadline;
3488
3489 while (1) {
3490 unsigned long now = jiffies;
3491 int ready, tmp;
3492
3493 ready = tmp = check_ready(link);
3494 if (ready > 0)
3495 return 0;
3496
b48d58f5
TH
3497 /*
3498 * -ENODEV could be transient. Ignore -ENODEV if link
aa2731ad 3499 * is online. Also, some SATA devices take a long
b48d58f5
TH
3500 * time to clear 0xff after reset. Wait for
3501 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3502 * offline.
aa2731ad
TH
3503 *
3504 * Note that some PATA controllers (pata_ali) explode
3505 * if status register is read more than once when
3506 * there's no device attached.
3507 */
3508 if (ready == -ENODEV) {
3509 if (ata_link_online(link))
3510 ready = 0;
3511 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3512 !ata_link_offline(link) &&
3513 time_before(now, nodev_deadline))
3514 ready = 0;
3515 }
3516
3517 if (ready)
3518 return ready;
3519 if (time_after(now, deadline))
3520 return -EBUSY;
3521
3522 if (!warned && time_after(now, start + 5 * HZ) &&
3523 (deadline - now > 3 * HZ)) {
a9a79dfe 3524 ata_link_warn(link,
aa2731ad
TH
3525 "link is slow to respond, please be patient "
3526 "(ready=%d)\n", tmp);
3527 warned = 1;
3528 }
3529
97750ceb 3530 ata_msleep(link->ap, 50);
aa2731ad
TH
3531 }
3532}
3533
3534/**
3535 * ata_wait_after_reset - wait for link to become ready after reset
3536 * @link: link to be waited on
3537 * @deadline: deadline jiffies for the operation
3538 * @check_ready: callback to check link readiness
3539 *
3540 * Wait for @link to become ready after reset.
3541 *
3542 * LOCKING:
3543 * EH context.
3544 *
3545 * RETURNS:
c9b5560a 3546 * 0 if @link is ready before @deadline; otherwise, -errno.
aa2731ad 3547 */
2b4221bb 3548int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
aa2731ad
TH
3549 int (*check_ready)(struct ata_link *link))
3550{
97750ceb 3551 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
aa2731ad
TH
3552
3553 return ata_wait_ready(link, deadline, check_ready);
3554}
a52fbcfc 3555EXPORT_SYMBOL_GPL(ata_wait_after_reset);
aa2731ad 3556
f5914a46 3557/**
0aa1113d 3558 * ata_std_prereset - prepare for reset
cc0680a5 3559 * @link: ATA link to be reset
d4b2bab4 3560 * @deadline: deadline jiffies for the operation
f5914a46 3561 *
cc0680a5 3562 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3563 * prereset makes libata abort whole reset sequence and give up
3564 * that port, so prereset should be best-effort. It does its
3565 * best to prepare for reset sequence but if things go wrong, it
3566 * should just whine, not fail.
f5914a46
TH
3567 *
3568 * LOCKING:
3569 * Kernel thread context (may sleep)
3570 *
3571 * RETURNS:
ac1eb665 3572 * Always 0.
f5914a46 3573 */
0aa1113d 3574int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3575{
cc0680a5 3576 struct ata_port *ap = link->ap;
936fd732 3577 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3578 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3579 int rc;
3580
f5914a46
TH
3581 /* if we're about to do hardreset, nothing more to do */
3582 if (ehc->i.action & ATA_EH_HARDRESET)
3583 return 0;
3584
936fd732 3585 /* if SATA, resume link */
a16abc0b 3586 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3587 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3588 /* whine about phy resume failure but proceed */
3589 if (rc && rc != -EOPNOTSUPP)
a9a79dfe
JP
3590 ata_link_warn(link,
3591 "failed to resume link for reset (errno=%d)\n",
3592 rc);
f5914a46
TH
3593 }
3594
45db2f6c 3595 /* no point in trying softreset on offline link */
b1c72916 3596 if (ata_phys_link_offline(link))
45db2f6c
TH
3597 ehc->i.action &= ~ATA_EH_SOFTRESET;
3598
f5914a46
TH
3599 return 0;
3600}
a52fbcfc 3601EXPORT_SYMBOL_GPL(ata_std_prereset);
f5914a46 3602
57c9efdf
TH
3603/**
3604 * sata_std_hardreset - COMRESET w/o waiting or classification
3605 * @link: link to reset
3606 * @class: resulting class of attached device
3607 * @deadline: deadline jiffies for the operation
3608 *
3609 * Standard SATA COMRESET w/o waiting or classification.
3610 *
3611 * LOCKING:
3612 * Kernel thread context (may sleep)
3613 *
3614 * RETURNS:
3615 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3616 */
3617int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3618 unsigned long deadline)
3619{
3620 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3621 bool online;
3622 int rc;
3623
3624 /* do hardreset */
3625 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
57c9efdf
TH
3626 return online ? -EAGAIN : rc;
3627}
a52fbcfc 3628EXPORT_SYMBOL_GPL(sata_std_hardreset);
57c9efdf 3629
c2bd5804 3630/**
203c75b8 3631 * ata_std_postreset - standard postreset callback
cc0680a5 3632 * @link: the target ata_link
c2bd5804
TH
3633 * @classes: classes of attached devices
3634 *
3635 * This function is invoked after a successful reset. Note that
3636 * the device might have been reset more than once using
3637 * different reset methods before postreset is invoked.
c2bd5804 3638 *
c2bd5804
TH
3639 * LOCKING:
3640 * Kernel thread context (may sleep)
3641 */
203c75b8 3642void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3643{
f046519f
TH
3644 u32 serror;
3645
f046519f
TH
3646 /* reset complete, clear SError */
3647 if (!sata_scr_read(link, SCR_ERROR, &serror))
3648 sata_scr_write(link, SCR_ERROR, serror);
3649
c2bd5804 3650 /* print link status */
936fd732 3651 sata_print_link_status(link);
c2bd5804 3652}
a52fbcfc 3653EXPORT_SYMBOL_GPL(ata_std_postreset);
c2bd5804 3654
623a3128
TH
3655/**
3656 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3657 * @dev: device to compare against
3658 * @new_class: class of the new device
3659 * @new_id: IDENTIFY page of the new device
3660 *
3661 * Compare @new_class and @new_id against @dev and determine
3662 * whether @dev is the device indicated by @new_class and
3663 * @new_id.
3664 *
3665 * LOCKING:
3666 * None.
3667 *
3668 * RETURNS:
3669 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3670 */
3373efd8
TH
3671static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3672 const u16 *new_id)
623a3128
TH
3673{
3674 const u16 *old_id = dev->id;
a0cf733b
TH
3675 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3676 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3677
3678 if (dev->class != new_class) {
a9a79dfe
JP
3679 ata_dev_info(dev, "class mismatch %d != %d\n",
3680 dev->class, new_class);
623a3128
TH
3681 return 0;
3682 }
3683
a0cf733b
TH
3684 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3685 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3686 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3687 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3688
3689 if (strcmp(model[0], model[1])) {
a9a79dfe
JP
3690 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3691 model[0], model[1]);
623a3128
TH
3692 return 0;
3693 }
3694
3695 if (strcmp(serial[0], serial[1])) {
a9a79dfe
JP
3696 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3697 serial[0], serial[1]);
623a3128
TH
3698 return 0;
3699 }
3700
623a3128
TH
3701 return 1;
3702}
3703
3704/**
fe30911b 3705 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3706 * @dev: target ATA device
bff04647 3707 * @readid_flags: read ID flags
623a3128
TH
3708 *
3709 * Re-read IDENTIFY page and make sure @dev is still attached to
3710 * the port.
3711 *
3712 * LOCKING:
3713 * Kernel thread context (may sleep)
3714 *
3715 * RETURNS:
3716 * 0 on success, negative errno otherwise
3717 */
fe30911b 3718int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3719{
5eb45c02 3720 unsigned int class = dev->class;
9af5c9c9 3721 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3722 int rc;
3723
fe635c7e 3724 /* read ID data */
bff04647 3725 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3726 if (rc)
fe30911b 3727 return rc;
623a3128
TH
3728
3729 /* is the device still there? */
fe30911b
TH
3730 if (!ata_dev_same_device(dev, class, id))
3731 return -ENODEV;
623a3128 3732
fe635c7e 3733 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3734 return 0;
3735}
3736
3737/**
3738 * ata_dev_revalidate - Revalidate ATA device
3739 * @dev: device to revalidate
422c9daa 3740 * @new_class: new class code
fe30911b
TH
3741 * @readid_flags: read ID flags
3742 *
3743 * Re-read IDENTIFY page, make sure @dev is still attached to the
3744 * port and reconfigure it according to the new IDENTIFY page.
3745 *
3746 * LOCKING:
3747 * Kernel thread context (may sleep)
3748 *
3749 * RETURNS:
3750 * 0 on success, negative errno otherwise
3751 */
422c9daa
TH
3752int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3753 unsigned int readid_flags)
fe30911b 3754{
6ddcd3b0 3755 u64 n_sectors = dev->n_sectors;
5920dadf 3756 u64 n_native_sectors = dev->n_native_sectors;
fe30911b
TH
3757 int rc;
3758
3759 if (!ata_dev_enabled(dev))
3760 return -ENODEV;
3761
422c9daa
TH
3762 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3763 if (ata_class_enabled(new_class) &&
f0d0613d
BP
3764 new_class != ATA_DEV_ATA &&
3765 new_class != ATA_DEV_ATAPI &&
9162c657 3766 new_class != ATA_DEV_ZAC &&
f0d0613d 3767 new_class != ATA_DEV_SEMB) {
a9a79dfe
JP
3768 ata_dev_info(dev, "class mismatch %u != %u\n",
3769 dev->class, new_class);
422c9daa
TH
3770 rc = -ENODEV;
3771 goto fail;
3772 }
3773
fe30911b
TH
3774 /* re-read ID */
3775 rc = ata_dev_reread_id(dev, readid_flags);
3776 if (rc)
3777 goto fail;
623a3128
TH
3778
3779 /* configure device according to the new ID */
efdaedc4 3780 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3781 if (rc)
3782 goto fail;
3783
3784 /* verify n_sectors hasn't changed */
445d211b
TH
3785 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3786 dev->n_sectors == n_sectors)
3787 return 0;
3788
3789 /* n_sectors has changed */
a9a79dfe
JP
3790 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3791 (unsigned long long)n_sectors,
3792 (unsigned long long)dev->n_sectors);
445d211b
TH
3793
3794 /*
3795 * Something could have caused HPA to be unlocked
3796 * involuntarily. If n_native_sectors hasn't changed and the
3797 * new size matches it, keep the device.
3798 */
3799 if (dev->n_native_sectors == n_native_sectors &&
3800 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
a9a79dfe
JP
3801 ata_dev_warn(dev,
3802 "new n_sectors matches native, probably "
3803 "late HPA unlock, n_sectors updated\n");
68939ce5 3804 /* use the larger n_sectors */
445d211b 3805 return 0;
6ddcd3b0
TH
3806 }
3807
445d211b
TH
3808 /*
3809 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
3810 * unlocking HPA in those cases.
3811 *
3812 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
3813 */
3814 if (dev->n_native_sectors == n_native_sectors &&
3815 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3816 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
a9a79dfe
JP
3817 ata_dev_warn(dev,
3818 "old n_sectors matches native, probably "
3819 "late HPA lock, will try to unlock HPA\n");
445d211b
TH
3820 /* try unlocking HPA */
3821 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
3822 rc = -EIO;
3823 } else
3824 rc = -ENODEV;
623a3128 3825
445d211b
TH
3826 /* restore original n_[native_]sectors and fail */
3827 dev->n_native_sectors = n_native_sectors;
3828 dev->n_sectors = n_sectors;
623a3128 3829 fail:
a9a79dfe 3830 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3831 return rc;
3832}
3833
6919a0a6
AC
3834struct ata_blacklist_entry {
3835 const char *model_num;
3836 const char *model_rev;
3837 unsigned long horkage;
3838};
3839
3840static const struct ata_blacklist_entry ata_device_blacklist [] = {
3841 /* Devices with DMA related problems under Linux */
3842 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3843 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3844 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3845 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3846 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3847 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3848 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3849 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3850 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
7da4c935 3851 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3852 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3853 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3854 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3855 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3856 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
7da4c935 3857 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3858 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3859 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3860 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3861 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3862 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3863 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3864 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3865 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3866 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3867 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 3868 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 3869 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
d17d794c 3870 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
b00622fc 3871 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
3af9a77a 3872 /* Odd clown on sil3726/4726 PMPs */
50af2fa1 3873 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
a66307d4
HR
3874 /* Similar story with ASMedia 1092 */
3875 { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
6919a0a6 3876
18d6e9d5 3877 /* Weird ATAPI devices */
40a1d531 3878 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
6a87e42e 3879 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
a32450e1 3880 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
0523f037 3881 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
18d6e9d5 3882
af34d637
DM
3883 /*
3884 * Causes silent data corruption with higher max sects.
3885 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
3886 */
3887 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
1488a1e3
TH
3888
3889 /*
e0edc8c5 3890 * These devices time out with higher max sects.
1488a1e3
TH
3891 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
3892 */
e0edc8c5 3893 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
db5ff909 3894 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
af34d637 3895
6919a0a6
AC
3896 /* Devices we expect to fail diagnostics */
3897
3898 /* Devices where NCQ should be avoided */
3899 /* NCQ is slow */
2dcb407e 3900 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
ef1429c0 3901 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3902 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3903 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3904 /* NCQ is broken */
539cc7c7 3905 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3906 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 3907 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 3908 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
5ccfca97 3909 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
539cc7c7 3910
ac70a964 3911 /* Seagate NCQ + FLUSH CACHE firmware bug */
4d1f9082 3912 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964 3913 ATA_HORKAGE_FIRMWARE_WARN },
d10d491f 3914
4d1f9082 3915 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
3916 ATA_HORKAGE_FIRMWARE_WARN },
3917
4d1f9082 3918 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
3919 ATA_HORKAGE_FIRMWARE_WARN },
3920
4d1f9082 3921 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964
TH
3922 ATA_HORKAGE_FIRMWARE_WARN },
3923
31f6264e
HG
3924 /* drives which fail FPDMA_AA activation (some may freeze afterwards)
3925 the ST disks also have LPM issues */
8756a25b 3926 { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
ef1429c0 3927 ATA_HORKAGE_NOLPM },
08c85d2a 3928 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
87809942 3929
36e337d0
RH
3930 /* Blacklist entries taken from Silicon Image 3124/3132
3931 Windows driver .inf file - also several Linux problem reports */
ef1429c0
DLM
3932 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ },
3933 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ },
3934 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ },
6919a0a6 3935
68b0ddb2 3936 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
ef1429c0 3937 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ },
68b0ddb2 3938
3b545563 3939 /* Sandisk SD7/8/9s lock up hard on large trims */
ef1429c0 3940 { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M },
322579dc 3941
16c55b03 3942 /* devices which puke on READ_NATIVE_MAX */
ef1429c0 3943 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA },
16c55b03
TH
3944 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3945 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3946 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 3947
7831387b
TH
3948 /* this one allows HPA unlocking but fails IOs on the area */
3949 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
3950
93328e11 3951 /* Devices which report 1 sector over size HPA */
ef1429c0
DLM
3952 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE },
3953 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE },
3954 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE },
93328e11 3955
6bbfd53d 3956 /* Devices which get the IVB wrong */
ef1429c0 3957 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
a79067e5 3958 /* Maybe we should just blacklist TSSTcorp... */
ef1429c0 3959 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB },
6bbfd53d 3960
9ce8e307 3961 /* Devices that do not need bridging limits applied */
ef1429c0
DLM
3962 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK },
3963 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK },
9ce8e307 3964
9062712f 3965 /* Devices which aren't very happy with higher link speeds */
ef1429c0
DLM
3966 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS },
3967 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS },
9062712f 3968
d0cb43b3
TH
3969 /*
3970 * Devices which choke on SETXFER. Applies only if both the
3971 * device and controller are SATA.
3972 */
cd691876 3973 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
3a25179e
VL
3974 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
3975 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
cd691876
TH
3976 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
3977 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
d0cb43b3 3978
b17e5729 3979 /* Crucial BX100 SSD 500GB has broken LPM support */
3bf7b5d6 3980 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
b17e5729 3981
d418ff56
HG
3982 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
3983 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
9c7be59f 3984 ATA_HORKAGE_ZERO_AFTER_TRIM |
ef1429c0 3985 ATA_HORKAGE_NOLPM },
d418ff56
HG
3986 /* 512GB MX100 with newer firmware has only LPM issues */
3987 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
ef1429c0 3988 ATA_HORKAGE_NOLPM },
9c7be59f 3989
62ac3f73
HG
3990 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
3991 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
3992 ATA_HORKAGE_ZERO_AFTER_TRIM |
ef1429c0 3993 ATA_HORKAGE_NOLPM },
62ac3f73
HG
3994 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
3995 ATA_HORKAGE_ZERO_AFTER_TRIM |
ef1429c0 3996 ATA_HORKAGE_NOLPM },
62ac3f73 3997
76936e9a 3998 /* These specific Samsung models/firmware-revs do not handle LPM well */
ef1429c0
DLM
3999 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
4000 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
4001 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM },
4002 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
b5b4d3a5 4003
f78dea06 4004 /* devices that don't properly handle queued TRIM commands */
136d769e 4005 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4006 ATA_HORKAGE_ZERO_AFTER_TRIM },
243918be 4007 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4008 ATA_HORKAGE_ZERO_AFTER_TRIM },
ff7f53fb 4009 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4010 ATA_HORKAGE_ZERO_AFTER_TRIM },
9051bd39 4011 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4012 ATA_HORKAGE_ZERO_AFTER_TRIM },
ff7f53fb 4013 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4014 ATA_HORKAGE_ZERO_AFTER_TRIM },
ff7f53fb 4015 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4016 ATA_HORKAGE_ZERO_AFTER_TRIM },
53997522
CL
4017 { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4018 ATA_HORKAGE_NO_DMA_LOG |
ef1429c0 4019 ATA_HORKAGE_ZERO_AFTER_TRIM },
ca6bfcb2 4020 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4021 ATA_HORKAGE_ZERO_AFTER_TRIM },
ca6bfcb2 4022 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4023 ATA_HORKAGE_ZERO_AFTER_TRIM },
8a6430ab 4024 { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
7a8526a5 4025 ATA_HORKAGE_ZERO_AFTER_TRIM |
ef1429c0 4026 ATA_HORKAGE_NO_NCQ_ON_ATI },
8a6430ab 4027 { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
7a8526a5 4028 ATA_HORKAGE_ZERO_AFTER_TRIM |
ef1429c0 4029 ATA_HORKAGE_NO_NCQ_ON_ATI },
7a7184b0 4030 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ef1429c0 4031 ATA_HORKAGE_ZERO_AFTER_TRIM },
e61f7d1c 4032
cda57b1b 4033 /* devices that don't properly handle TRIM commands */
ef1429c0
DLM
4034 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM },
4035 { "M88V29*", NULL, ATA_HORKAGE_NOTRIM },
cda57b1b 4036
e61f7d1c
MP
4037 /*
4038 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4039 * (Return Zero After Trim) flags in the ATA Command Set are
4040 * unreliable in the sense that they only define what happens if
4041 * the device successfully executed the DSM TRIM command. TRIM
4042 * is only advisory, however, and the device is free to silently
4043 * ignore all or parts of the request.
4044 *
4045 * Whitelist drives that are known to reliably return zeroes
4046 * after TRIM.
4047 */
4048
4049 /*
4050 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4051 * that model before whitelisting all other intel SSDs.
4052 */
ef1429c0
DLM
4053 { "INTEL*SSDSC2MH*", NULL, 0 },
4054
4055 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4056 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4057 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4058 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4059 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4060 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4061 { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
4062 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM },
f78dea06 4063
ecd75ad5
TH
4064 /*
4065 * Some WD SATA-I drives spin up and down erratically when the link
4066 * is put into the slumber mode. We don't have full list of the
4067 * affected devices. Disable LPM if the device matches one of the
4068 * known prefixes and is SATA-1. As a side effect LPM partial is
4069 * lost too.
4070 *
4071 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4072 */
4073 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4074 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4075 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4076 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4077 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4078 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4079 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4080
ac9f0c81
AL
4081 /*
4082 * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
4083 * log page is accessed. Ensure we never ask for this log page with
4084 * these devices.
4085 */
4086 { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
4087
6919a0a6
AC
4088 /* End Marker */
4089 { }
1da177e4 4090};
2e9edbf8 4091
75683fe7 4092static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4093{
8bfa79fc
TH
4094 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4095 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4096 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4097
8bfa79fc
TH
4098 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4099 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4100
6919a0a6 4101 while (ad->model_num) {
1c402799 4102 if (glob_match(ad->model_num, model_num)) {
6919a0a6
AC
4103 if (ad->model_rev == NULL)
4104 return ad->horkage;
1c402799 4105 if (glob_match(ad->model_rev, model_rev))
6919a0a6 4106 return ad->horkage;
f4b15fef 4107 }
6919a0a6 4108 ad++;
f4b15fef 4109 }
1da177e4
LT
4110 return 0;
4111}
4112
6919a0a6
AC
4113static int ata_dma_blacklisted(const struct ata_device *dev)
4114{
4115 /* We don't support polling DMA.
4116 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4117 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4118 */
9af5c9c9 4119 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4120 (dev->flags & ATA_DFLAG_CDB_INTR))
4121 return 1;
75683fe7 4122 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4123}
4124
6bbfd53d
AC
4125/**
4126 * ata_is_40wire - check drive side detection
4127 * @dev: device
4128 *
4129 * Perform drive side detection decoding, allowing for device vendors
4130 * who can't follow the documentation.
4131 */
4132
4133static int ata_is_40wire(struct ata_device *dev)
4134{
4135 if (dev->horkage & ATA_HORKAGE_IVB)
4136 return ata_drive_40wire_relaxed(dev->id);
4137 return ata_drive_40wire(dev->id);
4138}
4139
15a5551c
AC
4140/**
4141 * cable_is_40wire - 40/80/SATA decider
4142 * @ap: port to consider
4143 *
4144 * This function encapsulates the policy for speed management
4145 * in one place. At the moment we don't cache the result but
4146 * there is a good case for setting ap->cbl to the result when
4147 * we are called with unknown cables (and figuring out if it
4148 * impacts hotplug at all).
4149 *
4150 * Return 1 if the cable appears to be 40 wire.
4151 */
4152
4153static int cable_is_40wire(struct ata_port *ap)
4154{
4155 struct ata_link *link;
4156 struct ata_device *dev;
4157
4a9c7b33 4158 /* If the controller thinks we are 40 wire, we are. */
15a5551c
AC
4159 if (ap->cbl == ATA_CBL_PATA40)
4160 return 1;
4a9c7b33
TH
4161
4162 /* If the controller thinks we are 80 wire, we are. */
15a5551c
AC
4163 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4164 return 0;
4a9c7b33
TH
4165
4166 /* If the system is known to be 40 wire short cable (eg
4167 * laptop), then we allow 80 wire modes even if the drive
4168 * isn't sure.
4169 */
f792068e
AC
4170 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4171 return 0;
4a9c7b33
TH
4172
4173 /* If the controller doesn't know, we scan.
4174 *
4175 * Note: We look for all 40 wire detects at this point. Any
4176 * 80 wire detect is taken to be 80 wire cable because
4177 * - in many setups only the one drive (slave if present) will
4178 * give a valid detect
4179 * - if you have a non detect capable drive you don't want it
4180 * to colour the choice
4181 */
1eca4365
TH
4182 ata_for_each_link(link, ap, EDGE) {
4183 ata_for_each_dev(dev, link, ENABLED) {
4184 if (!ata_is_40wire(dev))
15a5551c
AC
4185 return 0;
4186 }
4187 }
4188 return 1;
4189}
4190
a6d5a51c
TH
4191/**
4192 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4193 * @dev: Device to compute xfermask for
4194 *
acf356b1
TH
4195 * Compute supported xfermask of @dev and store it in
4196 * dev->*_mask. This function is responsible for applying all
4197 * known limits including host controller limits, device
4198 * blacklist, etc...
a6d5a51c
TH
4199 *
4200 * LOCKING:
4201 * None.
a6d5a51c 4202 */
3373efd8 4203static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4204{
9af5c9c9
TH
4205 struct ata_link *link = dev->link;
4206 struct ata_port *ap = link->ap;
cca3974e 4207 struct ata_host *host = ap->host;
a6d5a51c 4208 unsigned long xfer_mask;
1da177e4 4209
37deecb5 4210 /* controller modes available */
565083e1
TH
4211 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4212 ap->mwdma_mask, ap->udma_mask);
4213
8343f889 4214 /* drive modes available */
37deecb5
TH
4215 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4216 dev->mwdma_mask, dev->udma_mask);
4217 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4218
b352e57d
AC
4219 /*
4220 * CFA Advanced TrueIDE timings are not allowed on a shared
4221 * cable
4222 */
4223 if (ata_dev_pair(dev)) {
4224 /* No PIO5 or PIO6 */
4225 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4226 /* No MWDMA3 or MWDMA 4 */
4227 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4228 }
4229
37deecb5
TH
4230 if (ata_dma_blacklisted(dev)) {
4231 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4232 ata_dev_warn(dev,
4233 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4234 }
a6d5a51c 4235
14d66ab7 4236 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4237 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5 4238 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4239 ata_dev_warn(dev,
4240 "simplex DMA is claimed by other device, disabling DMA\n");
5444a6f4 4241 }
565083e1 4242
e424675f
JG
4243 if (ap->flags & ATA_FLAG_NO_IORDY)
4244 xfer_mask &= ata_pio_mask_no_iordy(dev);
4245
5444a6f4 4246 if (ap->ops->mode_filter)
a76b62ca 4247 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4248
8343f889
RH
4249 /* Apply cable rule here. Don't apply it early because when
4250 * we handle hot plug the cable type can itself change.
4251 * Check this last so that we know if the transfer rate was
4252 * solely limited by the cable.
4253 * Unknown or 80 wire cables reported host side are checked
4254 * drive side as well. Cases where we know a 40wire cable
4255 * is used safely for 80 are not checked here.
4256 */
4257 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4258 /* UDMA/44 or higher would be available */
15a5551c 4259 if (cable_is_40wire(ap)) {
a9a79dfe
JP
4260 ata_dev_warn(dev,
4261 "limited to UDMA/33 due to 40-wire cable\n");
8343f889
RH
4262 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4263 }
4264
565083e1
TH
4265 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4266 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4267}
4268
1da177e4
LT
4269/**
4270 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4271 * @dev: Device to which command will be sent
4272 *
780a87f7
JG
4273 * Issue SET FEATURES - XFER MODE command to device @dev
4274 * on port @ap.
4275 *
1da177e4 4276 * LOCKING:
0cba632b 4277 * PCI/etc. bus probe sem.
83206a29
TH
4278 *
4279 * RETURNS:
4280 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4281 */
4282
3373efd8 4283static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4284{
a0123703 4285 struct ata_taskfile tf;
83206a29 4286 unsigned int err_mask;
1da177e4
LT
4287
4288 /* set up set-features taskfile */
4633778b 4289 ata_dev_dbg(dev, "set features - xfer mode\n");
1da177e4 4290
464cf177
TH
4291 /* Some controllers and ATAPI devices show flaky interrupt
4292 * behavior after setting xfer mode. Use polling instead.
4293 */
3373efd8 4294 ata_tf_init(dev, &tf);
a0123703
TH
4295 tf.command = ATA_CMD_SET_FEATURES;
4296 tf.feature = SETFEATURES_XFER;
464cf177 4297 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4298 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4299 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4300 if (ata_pio_need_iordy(dev))
4301 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4302 /* If the device has IORDY and the controller does not - turn it off */
4303 else if (ata_id_has_iordy(dev->id))
11b7becc 4304 tf.nsect = 0x01;
b9f8ab2d
AC
4305 else /* In the ancient relic department - skip all of this */
4306 return 0;
1da177e4 4307
d531be2c
MP
4308 /* On some disks, this command causes spin-up, so we need longer timeout */
4309 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
9f45cbd3 4310
9f45cbd3
KCA
4311 return err_mask;
4312}
1152b261 4313
9f45cbd3 4314/**
218f3d30 4315 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4316 * @dev: Device to which command will be sent
4317 * @enable: Whether to enable or disable the feature
218f3d30 4318 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4319 *
4320 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4321 * on port @ap with sector count
9f45cbd3
KCA
4322 *
4323 * LOCKING:
4324 * PCI/etc. bus probe sem.
4325 *
4326 * RETURNS:
4327 * 0 on success, AC_ERR_* mask otherwise.
4328 */
1152b261 4329unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
9f45cbd3
KCA
4330{
4331 struct ata_taskfile tf;
4332 unsigned int err_mask;
974e0a45 4333 unsigned long timeout = 0;
9f45cbd3
KCA
4334
4335 /* set up set-features taskfile */
4633778b 4336 ata_dev_dbg(dev, "set features - SATA features\n");
9f45cbd3
KCA
4337
4338 ata_tf_init(dev, &tf);
4339 tf.command = ATA_CMD_SET_FEATURES;
4340 tf.feature = enable;
4341 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4342 tf.protocol = ATA_PROT_NODATA;
218f3d30 4343 tf.nsect = feature;
9f45cbd3 4344
974e0a45
DLM
4345 if (enable == SETFEATURES_SPINUP)
4346 timeout = ata_probe_timeout ?
4347 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4348 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
1da177e4 4349
83206a29 4350 return err_mask;
1da177e4 4351}
633de4cc 4352EXPORT_SYMBOL_GPL(ata_dev_set_feature);
1da177e4 4353
8bf62ece
AL
4354/**
4355 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4356 * @dev: Device to which command will be sent
e2a7f77a
RD
4357 * @heads: Number of heads (taskfile parameter)
4358 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4359 *
4360 * LOCKING:
6aff8f1f
TH
4361 * Kernel thread context (may sleep)
4362 *
4363 * RETURNS:
4364 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4365 */
3373efd8
TH
4366static unsigned int ata_dev_init_params(struct ata_device *dev,
4367 u16 heads, u16 sectors)
8bf62ece 4368{
a0123703 4369 struct ata_taskfile tf;
6aff8f1f 4370 unsigned int err_mask;
8bf62ece
AL
4371
4372 /* Number of sectors per track 1-255. Number of heads 1-16 */
4373 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4374 return AC_ERR_INVALID;
8bf62ece
AL
4375
4376 /* set up init dev params taskfile */
4633778b 4377 ata_dev_dbg(dev, "init dev params \n");
8bf62ece 4378
3373efd8 4379 ata_tf_init(dev, &tf);
a0123703
TH
4380 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4381 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4382 tf.protocol = ATA_PROT_NODATA;
4383 tf.nsect = sectors;
4384 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4385
2b789108 4386 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4387 /* A clean abort indicates an original or just out of spec drive
4388 and we should continue as we issue the setup based on the
4389 drive reported working geometry */
efcef265 4390 if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
18b2466c 4391 err_mask = 0;
8bf62ece 4392
6aff8f1f 4393 return err_mask;
8bf62ece
AL
4394}
4395
1da177e4 4396/**
5895ef9a 4397 * atapi_check_dma - Check whether ATAPI DMA can be supported
1da177e4
LT
4398 * @qc: Metadata associated with taskfile to check
4399 *
780a87f7
JG
4400 * Allow low-level driver to filter ATA PACKET commands, returning
4401 * a status indicating whether or not it is OK to use DMA for the
4402 * supplied PACKET command.
4403 *
1da177e4 4404 * LOCKING:
624d5c51
TH
4405 * spin_lock_irqsave(host lock)
4406 *
4407 * RETURNS: 0 when ATAPI DMA can be used
4408 * nonzero otherwise
4409 */
5895ef9a 4410int atapi_check_dma(struct ata_queued_cmd *qc)
624d5c51
TH
4411{
4412 struct ata_port *ap = qc->ap;
71601958 4413
624d5c51
TH
4414 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4415 * few ATAPI devices choke on such DMA requests.
4416 */
6a87e42e
TH
4417 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4418 unlikely(qc->nbytes & 15))
624d5c51 4419 return 1;
e2cec771 4420
624d5c51
TH
4421 if (ap->ops->check_atapi_dma)
4422 return ap->ops->check_atapi_dma(qc);
e2cec771 4423
624d5c51
TH
4424 return 0;
4425}
1da177e4 4426
624d5c51
TH
4427/**
4428 * ata_std_qc_defer - Check whether a qc needs to be deferred
4429 * @qc: ATA command in question
4430 *
4431 * Non-NCQ commands cannot run with any other command, NCQ or
4432 * not. As upper layer only knows the queue depth, we are
4433 * responsible for maintaining exclusion. This function checks
4434 * whether a new command @qc can be issued.
4435 *
4436 * LOCKING:
4437 * spin_lock_irqsave(host lock)
4438 *
4439 * RETURNS:
4440 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4441 */
4442int ata_std_qc_defer(struct ata_queued_cmd *qc)
4443{
4444 struct ata_link *link = qc->dev->link;
e2cec771 4445
179b310a 4446 if (ata_is_ncq(qc->tf.protocol)) {
624d5c51
TH
4447 if (!ata_tag_valid(link->active_tag))
4448 return 0;
4449 } else {
4450 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4451 return 0;
4452 }
e2cec771 4453
624d5c51
TH
4454 return ATA_DEFER_LINK;
4455}
a52fbcfc 4456EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6912ccd5 4457
95364f36
JS
4458enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
4459{
4460 return AC_ERR_OK;
4461}
a52fbcfc 4462EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4 4463
624d5c51
TH
4464/**
4465 * ata_sg_init - Associate command with scatter-gather table.
4466 * @qc: Command to be associated
4467 * @sg: Scatter-gather table.
4468 * @n_elem: Number of elements in s/g table.
4469 *
4470 * Initialize the data-related elements of queued_cmd @qc
4471 * to point to a scatter-gather table @sg, containing @n_elem
4472 * elements.
4473 *
4474 * LOCKING:
4475 * spin_lock_irqsave(host lock)
4476 */
4477void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4478 unsigned int n_elem)
4479{
4480 qc->sg = sg;
4481 qc->n_elem = n_elem;
4482 qc->cursg = qc->sg;
4483}
bb5cb290 4484
2874d5ee
GU
4485#ifdef CONFIG_HAS_DMA
4486
4487/**
4488 * ata_sg_clean - Unmap DMA memory associated with command
4489 * @qc: Command containing DMA memory to be released
4490 *
4491 * Unmap all mapped DMA memory associated with this command.
4492 *
4493 * LOCKING:
4494 * spin_lock_irqsave(host lock)
4495 */
af27e01c 4496static void ata_sg_clean(struct ata_queued_cmd *qc)
2874d5ee
GU
4497{
4498 struct ata_port *ap = qc->ap;
4499 struct scatterlist *sg = qc->sg;
4500 int dir = qc->dma_dir;
4501
4502 WARN_ON_ONCE(sg == NULL);
4503
2874d5ee
GU
4504 if (qc->n_elem)
4505 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4506
4507 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4508 qc->sg = NULL;
4509}
4510
624d5c51
TH
4511/**
4512 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4513 * @qc: Command with scatter-gather table to be mapped.
4514 *
4515 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4516 *
4517 * LOCKING:
4518 * spin_lock_irqsave(host lock)
4519 *
4520 * RETURNS:
4521 * Zero on success, negative on error.
4522 *
4523 */
4524static int ata_sg_setup(struct ata_queued_cmd *qc)
4525{
4526 struct ata_port *ap = qc->ap;
4527 unsigned int n_elem;
1da177e4 4528
624d5c51
TH
4529 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4530 if (n_elem < 1)
4531 return -1;
bb5cb290 4532
5825627c 4533 qc->orig_n_elem = qc->n_elem;
624d5c51
TH
4534 qc->n_elem = n_elem;
4535 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4 4536
624d5c51 4537 return 0;
1da177e4
LT
4538}
4539
2874d5ee
GU
4540#else /* !CONFIG_HAS_DMA */
4541
4542static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
4543static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
4544
4545#endif /* !CONFIG_HAS_DMA */
4546
624d5c51
TH
4547/**
4548 * swap_buf_le16 - swap halves of 16-bit words in place
4549 * @buf: Buffer to swap
4550 * @buf_words: Number of 16-bit words in buffer.
4551 *
4552 * Swap halves of 16-bit words if needed to convert from
4553 * little-endian byte order to native cpu byte order, or
4554 * vice-versa.
4555 *
4556 * LOCKING:
4557 * Inherited from caller.
4558 */
4559void swap_buf_le16(u16 *buf, unsigned int buf_words)
8061f5f0 4560{
624d5c51
TH
4561#ifdef __BIG_ENDIAN
4562 unsigned int i;
8061f5f0 4563
624d5c51
TH
4564 for (i = 0; i < buf_words; i++)
4565 buf[i] = le16_to_cpu(buf[i]);
4566#endif /* __BIG_ENDIAN */
8061f5f0
TH
4567}
4568
8a8bc223
TH
4569/**
4570 * ata_qc_free - free unused ata_queued_cmd
4571 * @qc: Command to complete
4572 *
4573 * Designed to free unused ata_queued_cmd object
4574 * in case something prevents using it.
4575 *
4576 * LOCKING:
4577 * spin_lock_irqsave(host lock)
4578 */
4579void ata_qc_free(struct ata_queued_cmd *qc)
4580{
8a8bc223 4581 qc->flags = 0;
4f1a22ee 4582 if (ata_tag_valid(qc->tag))
8a8bc223 4583 qc->tag = ATA_TAG_POISON;
8a8bc223
TH
4584}
4585
76014427 4586void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4587{
a1104016
JL
4588 struct ata_port *ap;
4589 struct ata_link *link;
dedaf2b0 4590
efcb3cf7
TH
4591 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4592 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
a1104016
JL
4593 ap = qc->ap;
4594 link = qc->dev->link;
1da177e4
LT
4595
4596 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4597 ata_sg_clean(qc);
4598
7401abf2 4599 /* command should be marked inactive atomically with qc completion */
179b310a 4600 if (ata_is_ncq(qc->tf.protocol)) {
4e5b6260 4601 link->sactive &= ~(1 << qc->hw_tag);
da917d69
TH
4602 if (!link->sactive)
4603 ap->nr_active_links--;
4604 } else {
9af5c9c9 4605 link->active_tag = ATA_TAG_POISON;
da917d69
TH
4606 ap->nr_active_links--;
4607 }
4608
4609 /* clear exclusive status */
4610 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4611 ap->excl_link == link))
4612 ap->excl_link = NULL;
7401abf2 4613
3f3791d3
AL
4614 /* atapi: mark qc as inactive to prevent the interrupt handler
4615 * from completing the command twice later, before the error handler
4616 * is called. (when rc != 0 and atapi request sense is needed)
4617 */
4618 qc->flags &= ~ATA_QCFLAG_ACTIVE;
e3ed8939 4619 ap->qc_active &= ~(1ULL << qc->tag);
3f3791d3 4620
1da177e4 4621 /* call completion callback */
77853bf2 4622 qc->complete_fn(qc);
1da177e4
LT
4623}
4624
39599a53
TH
4625static void fill_result_tf(struct ata_queued_cmd *qc)
4626{
4627 struct ata_port *ap = qc->ap;
4628
39599a53 4629 qc->result_tf.flags = qc->tf.flags;
22183bf5 4630 ap->ops->qc_fill_rtf(qc);
39599a53
TH
4631}
4632
00115e0f
TH
4633static void ata_verify_xfer(struct ata_queued_cmd *qc)
4634{
4635 struct ata_device *dev = qc->dev;
4636
eb0effdf 4637 if (!ata_is_data(qc->tf.protocol))
00115e0f
TH
4638 return;
4639
4640 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4641 return;
4642
4643 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4644}
4645
f686bcb8
TH
4646/**
4647 * ata_qc_complete - Complete an active ATA command
4648 * @qc: Command to complete
f686bcb8 4649 *
1aadf5c3
TH
4650 * Indicate to the mid and upper layers that an ATA command has
4651 * completed, with either an ok or not-ok status.
4652 *
4653 * Refrain from calling this function multiple times when
4654 * successfully completing multiple NCQ commands.
4655 * ata_qc_complete_multiple() should be used instead, which will
4656 * properly update IRQ expect state.
f686bcb8
TH
4657 *
4658 * LOCKING:
cca3974e 4659 * spin_lock_irqsave(host lock)
f686bcb8
TH
4660 */
4661void ata_qc_complete(struct ata_queued_cmd *qc)
4662{
4663 struct ata_port *ap = qc->ap;
4664
eb25cb99 4665 /* Trigger the LED (if available) */
d1ed7c55 4666 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
eb25cb99 4667
f686bcb8
TH
4668 /* XXX: New EH and old EH use different mechanisms to
4669 * synchronize EH with regular execution path.
4670 *
4671 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4672 * Normal execution path is responsible for not accessing a
4673 * failed qc. libata core enforces the rule by returning NULL
4674 * from ata_qc_from_tag() for failed qcs.
4675 *
4676 * Old EH depends on ata_qc_complete() nullifying completion
4677 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4678 * not synchronize with interrupt handler. Only PIO task is
4679 * taken care of.
4680 */
4681 if (ap->ops->error_handler) {
4dbfa39b
TH
4682 struct ata_device *dev = qc->dev;
4683 struct ata_eh_info *ehi = &dev->link->eh_info;
4684
f686bcb8
TH
4685 if (unlikely(qc->err_mask))
4686 qc->flags |= ATA_QCFLAG_FAILED;
4687
f08dc1ac
TH
4688 /*
4689 * Finish internal commands without any further processing
4690 * and always with the result TF filled.
4691 */
4692 if (unlikely(ata_tag_internal(qc->tag))) {
f4b31db9 4693 fill_result_tf(qc);
255c03d1 4694 trace_ata_qc_complete_internal(qc);
f08dc1ac
TH
4695 __ata_qc_complete(qc);
4696 return;
4697 }
f4b31db9 4698
f08dc1ac
TH
4699 /*
4700 * Non-internal qc has failed. Fill the result TF and
4701 * summon EH.
4702 */
4703 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4704 fill_result_tf(qc);
255c03d1 4705 trace_ata_qc_complete_failed(qc);
f08dc1ac 4706 ata_qc_schedule_eh(qc);
f4b31db9 4707 return;
f686bcb8
TH
4708 }
4709
4dc738ed
TH
4710 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4711
f686bcb8
TH
4712 /* read result TF if requested */
4713 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4714 fill_result_tf(qc);
f686bcb8 4715
255c03d1 4716 trace_ata_qc_complete_done(qc);
4dbfa39b
TH
4717 /* Some commands need post-processing after successful
4718 * completion.
4719 */
4720 switch (qc->tf.command) {
4721 case ATA_CMD_SET_FEATURES:
4722 if (qc->tf.feature != SETFEATURES_WC_ON &&
0c12735e
TY
4723 qc->tf.feature != SETFEATURES_WC_OFF &&
4724 qc->tf.feature != SETFEATURES_RA_ON &&
4725 qc->tf.feature != SETFEATURES_RA_OFF)
4dbfa39b 4726 break;
df561f66 4727 fallthrough;
4dbfa39b
TH
4728 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4729 case ATA_CMD_SET_MULTI: /* multi_count changed */
4730 /* revalidate device */
4731 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4732 ata_port_schedule_eh(ap);
4733 break;
054a5fba
TH
4734
4735 case ATA_CMD_SLEEP:
4736 dev->flags |= ATA_DFLAG_SLEEPING;
4737 break;
4dbfa39b
TH
4738 }
4739
00115e0f
TH
4740 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4741 ata_verify_xfer(qc);
4742
f686bcb8
TH
4743 __ata_qc_complete(qc);
4744 } else {
4745 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4746 return;
4747
4748 /* read result TF if failed or requested */
4749 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4750 fill_result_tf(qc);
f686bcb8
TH
4751
4752 __ata_qc_complete(qc);
4753 }
4754}
a52fbcfc 4755EXPORT_SYMBOL_GPL(ata_qc_complete);
f686bcb8 4756
8385d756
SH
4757/**
4758 * ata_qc_get_active - get bitmask of active qcs
4759 * @ap: port in question
4760 *
4761 * LOCKING:
4762 * spin_lock_irqsave(host lock)
4763 *
4764 * RETURNS:
4765 * Bitmask of active qcs
4766 */
4767u64 ata_qc_get_active(struct ata_port *ap)
4768{
4769 u64 qc_active = ap->qc_active;
4770
4771 /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
4772 if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
4773 qc_active |= (1 << 0);
4774 qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
4775 }
4776
4777 return qc_active;
4778}
4779EXPORT_SYMBOL_GPL(ata_qc_get_active);
4780
1da177e4
LT
4781/**
4782 * ata_qc_issue - issue taskfile to device
4783 * @qc: command to issue to device
4784 *
4785 * Prepare an ATA command to submission to device.
4786 * This includes mapping the data into a DMA-able
4787 * area, filling in the S/G table, and finally
4788 * writing the taskfile to hardware, starting the command.
4789 *
4790 * LOCKING:
cca3974e 4791 * spin_lock_irqsave(host lock)
1da177e4 4792 */
8e0e694a 4793void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4794{
4795 struct ata_port *ap = qc->ap;
9af5c9c9 4796 struct ata_link *link = qc->dev->link;
405e66b3 4797 u8 prot = qc->tf.protocol;
1da177e4 4798
dedaf2b0
TH
4799 /* Make sure only one non-NCQ command is outstanding. The
4800 * check is skipped for old EH because it reuses active qc to
4801 * request ATAPI sense.
4802 */
efcb3cf7 4803 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 4804
1973a023 4805 if (ata_is_ncq(prot)) {
4e5b6260 4806 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
da917d69
TH
4807
4808 if (!link->sactive)
4809 ap->nr_active_links++;
4e5b6260 4810 link->sactive |= 1 << qc->hw_tag;
dedaf2b0 4811 } else {
efcb3cf7 4812 WARN_ON_ONCE(link->sactive);
da917d69
TH
4813
4814 ap->nr_active_links++;
9af5c9c9 4815 link->active_tag = qc->tag;
dedaf2b0
TH
4816 }
4817
e4a70e76 4818 qc->flags |= ATA_QCFLAG_ACTIVE;
e3ed8939 4819 ap->qc_active |= 1ULL << qc->tag;
e4a70e76 4820
60f5d6ef
TH
4821 /*
4822 * We guarantee to LLDs that they will have at least one
f92a2636
TH
4823 * non-zero sg if the command is a data command.
4824 */
9173e5e8 4825 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
60f5d6ef 4826 goto sys_err;
f92a2636 4827
405e66b3 4828 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 4829 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7 4830 if (ata_sg_setup(qc))
60f5d6ef 4831 goto sys_err;
1da177e4 4832
cf480626 4833 /* if device is sleeping, schedule reset and abort the link */
054a5fba 4834 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
cf480626 4835 link->eh_info.action |= ATA_EH_RESET;
054a5fba
TH
4836 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4837 ata_link_abort(link);
4838 return;
4839 }
4840
fc914faa 4841 trace_ata_qc_prep(qc);
95364f36
JS
4842 qc->err_mask |= ap->ops->qc_prep(qc);
4843 if (unlikely(qc->err_mask))
4844 goto err;
255c03d1 4845 trace_ata_qc_issue(qc);
8e0e694a
TH
4846 qc->err_mask |= ap->ops->qc_issue(qc);
4847 if (unlikely(qc->err_mask))
4848 goto err;
4849 return;
1da177e4 4850
60f5d6ef 4851sys_err:
8e0e694a
TH
4852 qc->err_mask |= AC_ERR_SYSTEM;
4853err:
4854 ata_qc_complete(qc);
1da177e4
LT
4855}
4856
34bf2170 4857/**
b1c72916 4858 * ata_phys_link_online - test whether the given link is online
936fd732 4859 * @link: ATA link to test
34bf2170 4860 *
936fd732
TH
4861 * Test whether @link is online. Note that this function returns
4862 * 0 if online status of @link cannot be obtained, so
4863 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4864 *
4865 * LOCKING:
4866 * None.
4867 *
4868 * RETURNS:
b5b3fa38 4869 * True if the port online status is available and online.
34bf2170 4870 */
b1c72916 4871bool ata_phys_link_online(struct ata_link *link)
34bf2170
TH
4872{
4873 u32 sstatus;
4874
936fd732 4875 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 4876 ata_sstatus_online(sstatus))
b5b3fa38
TH
4877 return true;
4878 return false;
34bf2170
TH
4879}
4880
4881/**
b1c72916 4882 * ata_phys_link_offline - test whether the given link is offline
936fd732 4883 * @link: ATA link to test
34bf2170 4884 *
936fd732
TH
4885 * Test whether @link is offline. Note that this function
4886 * returns 0 if offline status of @link cannot be obtained, so
4887 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4888 *
4889 * LOCKING:
4890 * None.
4891 *
4892 * RETURNS:
b5b3fa38 4893 * True if the port offline status is available and offline.
34bf2170 4894 */
b1c72916 4895bool ata_phys_link_offline(struct ata_link *link)
34bf2170
TH
4896{
4897 u32 sstatus;
4898
936fd732 4899 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 4900 !ata_sstatus_online(sstatus))
b5b3fa38
TH
4901 return true;
4902 return false;
34bf2170 4903}
0baab86b 4904
b1c72916
TH
4905/**
4906 * ata_link_online - test whether the given link is online
4907 * @link: ATA link to test
4908 *
4909 * Test whether @link is online. This is identical to
4910 * ata_phys_link_online() when there's no slave link. When
4911 * there's a slave link, this function should only be called on
4912 * the master link and will return true if any of M/S links is
4913 * online.
4914 *
4915 * LOCKING:
4916 * None.
4917 *
4918 * RETURNS:
4919 * True if the port online status is available and online.
4920 */
4921bool ata_link_online(struct ata_link *link)
4922{
4923 struct ata_link *slave = link->ap->slave_link;
4924
4925 WARN_ON(link == slave); /* shouldn't be called on slave link */
4926
4927 return ata_phys_link_online(link) ||
4928 (slave && ata_phys_link_online(slave));
4929}
a52fbcfc 4930EXPORT_SYMBOL_GPL(ata_link_online);
b1c72916
TH
4931
4932/**
4933 * ata_link_offline - test whether the given link is offline
4934 * @link: ATA link to test
4935 *
4936 * Test whether @link is offline. This is identical to
4937 * ata_phys_link_offline() when there's no slave link. When
4938 * there's a slave link, this function should only be called on
4939 * the master link and will return true if both M/S links are
4940 * offline.
4941 *
4942 * LOCKING:
4943 * None.
4944 *
4945 * RETURNS:
4946 * True if the port offline status is available and offline.
4947 */
4948bool ata_link_offline(struct ata_link *link)
4949{
4950 struct ata_link *slave = link->ap->slave_link;
4951
4952 WARN_ON(link == slave); /* shouldn't be called on slave link */
4953
4954 return ata_phys_link_offline(link) &&
4955 (!slave || ata_phys_link_offline(slave));
4956}
a52fbcfc 4957EXPORT_SYMBOL_GPL(ata_link_offline);
b1c72916 4958
6ffa01d8 4959#ifdef CONFIG_PM
bc6e7c4b
DW
4960static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
4961 unsigned int action, unsigned int ehi_flags,
4962 bool async)
500530f6 4963{
5ef41082 4964 struct ata_link *link;
500530f6 4965 unsigned long flags;
500530f6 4966
5ef41082
LM
4967 /* Previous resume operation might still be in
4968 * progress. Wait for PM_PENDING to clear.
4969 */
4970 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4971 ata_port_wait_eh(ap);
4972 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4973 }
500530f6 4974
5ef41082
LM
4975 /* request PM ops to EH */
4976 spin_lock_irqsave(ap->lock, flags);
500530f6 4977
5ef41082 4978 ap->pm_mesg = mesg;
5ef41082
LM
4979 ap->pflags |= ATA_PFLAG_PM_PENDING;
4980 ata_for_each_link(link, ap, HOST_FIRST) {
4981 link->eh_info.action |= action;
4982 link->eh_info.flags |= ehi_flags;
4983 }
500530f6 4984
5ef41082 4985 ata_port_schedule_eh(ap);
500530f6 4986
5ef41082 4987 spin_unlock_irqrestore(ap->lock, flags);
500530f6 4988
2fcbdcb4 4989 if (!async) {
5ef41082
LM
4990 ata_port_wait_eh(ap);
4991 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
500530f6 4992 }
500530f6
TH
4993}
4994
bc6e7c4b
DW
4995/*
4996 * On some hardware, device fails to respond after spun down for suspend. As
4997 * the device won't be used before being resumed, we don't need to touch the
4998 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
4999 *
5000 * http://thread.gmane.org/gmane.linux.ide/46764
5001 */
5002static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5003 | ATA_EHI_NO_AUTOPSY
5004 | ATA_EHI_NO_RECOVERY;
5005
5006static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5ef41082 5007{
bc6e7c4b 5008 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5ef41082
LM
5009}
5010
bc6e7c4b 5011static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
2fcbdcb4 5012{
bc6e7c4b 5013 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
2fcbdcb4
DW
5014}
5015
bc6e7c4b 5016static int ata_port_pm_suspend(struct device *dev)
5ef41082 5017{
bc6e7c4b
DW
5018 struct ata_port *ap = to_ata_port(dev);
5019
5ef41082
LM
5020 if (pm_runtime_suspended(dev))
5021 return 0;
5022
bc6e7c4b
DW
5023 ata_port_suspend(ap, PMSG_SUSPEND);
5024 return 0;
33574d68
LM
5025}
5026
bc6e7c4b 5027static int ata_port_pm_freeze(struct device *dev)
33574d68 5028{
bc6e7c4b
DW
5029 struct ata_port *ap = to_ata_port(dev);
5030
33574d68 5031 if (pm_runtime_suspended(dev))
f5e6d0d0 5032 return 0;
33574d68 5033
bc6e7c4b
DW
5034 ata_port_suspend(ap, PMSG_FREEZE);
5035 return 0;
33574d68
LM
5036}
5037
bc6e7c4b 5038static int ata_port_pm_poweroff(struct device *dev)
33574d68 5039{
bc6e7c4b
DW
5040 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5041 return 0;
5ef41082
LM
5042}
5043
bc6e7c4b
DW
5044static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5045 | ATA_EHI_QUIET;
5ef41082 5046
bc6e7c4b
DW
5047static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5048{
5049 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5ef41082
LM
5050}
5051
bc6e7c4b 5052static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
2fcbdcb4 5053{
bc6e7c4b 5054 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
2fcbdcb4
DW
5055}
5056
bc6e7c4b 5057static int ata_port_pm_resume(struct device *dev)
e90b1e5a 5058{
200421a8 5059 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
bc6e7c4b
DW
5060 pm_runtime_disable(dev);
5061 pm_runtime_set_active(dev);
5062 pm_runtime_enable(dev);
5063 return 0;
e90b1e5a
LM
5064}
5065
7e15e9be
AL
5066/*
5067 * For ODDs, the upper layer will poll for media change every few seconds,
5068 * which will make it enter and leave suspend state every few seconds. And
5069 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5070 * is very little and the ODD may malfunction after constantly being reset.
5071 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5072 * ODD is attached to the port.
5073 */
9ee4f393
LM
5074static int ata_port_runtime_idle(struct device *dev)
5075{
7e15e9be
AL
5076 struct ata_port *ap = to_ata_port(dev);
5077 struct ata_link *link;
5078 struct ata_device *adev;
5079
5080 ata_for_each_link(link, ap, HOST_FIRST) {
5081 ata_for_each_dev(adev, link, ENABLED)
5082 if (adev->class == ATA_DEV_ATAPI &&
5083 !zpodd_dev_enabled(adev))
5084 return -EBUSY;
5085 }
5086
45f0a85c 5087 return 0;
9ee4f393
LM
5088}
5089
a7ff60db
AL
5090static int ata_port_runtime_suspend(struct device *dev)
5091{
bc6e7c4b
DW
5092 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5093 return 0;
a7ff60db
AL
5094}
5095
5096static int ata_port_runtime_resume(struct device *dev)
5097{
bc6e7c4b
DW
5098 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5099 return 0;
a7ff60db
AL
5100}
5101
5ef41082 5102static const struct dev_pm_ops ata_port_pm_ops = {
bc6e7c4b
DW
5103 .suspend = ata_port_pm_suspend,
5104 .resume = ata_port_pm_resume,
5105 .freeze = ata_port_pm_freeze,
5106 .thaw = ata_port_pm_resume,
5107 .poweroff = ata_port_pm_poweroff,
5108 .restore = ata_port_pm_resume,
9ee4f393 5109
a7ff60db
AL
5110 .runtime_suspend = ata_port_runtime_suspend,
5111 .runtime_resume = ata_port_runtime_resume,
9ee4f393 5112 .runtime_idle = ata_port_runtime_idle,
5ef41082
LM
5113};
5114
2fcbdcb4
DW
5115/* sas ports don't participate in pm runtime management of ata_ports,
5116 * and need to resume ata devices at the domain level, not the per-port
5117 * level. sas suspend/resume is async to allow parallel port recovery
5118 * since sas has multiple ata_port instances per Scsi_Host.
5119 */
bc6e7c4b 5120void ata_sas_port_suspend(struct ata_port *ap)
2fcbdcb4 5121{
bc6e7c4b 5122 ata_port_suspend_async(ap, PMSG_SUSPEND);
2fcbdcb4 5123}
bc6e7c4b 5124EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
2fcbdcb4 5125
bc6e7c4b 5126void ata_sas_port_resume(struct ata_port *ap)
2fcbdcb4 5127{
bc6e7c4b 5128 ata_port_resume_async(ap, PMSG_RESUME);
2fcbdcb4 5129}
bc6e7c4b 5130EXPORT_SYMBOL_GPL(ata_sas_port_resume);
2fcbdcb4 5131
500530f6 5132/**
cca3974e
JG
5133 * ata_host_suspend - suspend host
5134 * @host: host to suspend
500530f6
TH
5135 * @mesg: PM message
5136 *
5ef41082 5137 * Suspend @host. Actual operation is performed by port suspend.
500530f6 5138 */
ec87cf37 5139void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5140{
5ef41082 5141 host->dev->power.power_state = mesg;
500530f6 5142}
a52fbcfc 5143EXPORT_SYMBOL_GPL(ata_host_suspend);
500530f6
TH
5144
5145/**
cca3974e
JG
5146 * ata_host_resume - resume host
5147 * @host: host to resume
500530f6 5148 *
5ef41082 5149 * Resume @host. Actual operation is performed by port resume.
500530f6 5150 */
cca3974e 5151void ata_host_resume(struct ata_host *host)
500530f6 5152{
72ad6ec4 5153 host->dev->power.power_state = PMSG_ON;
500530f6 5154}
a52fbcfc 5155EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 5156#endif
500530f6 5157
8df82c13 5158const struct device_type ata_port_type = {
5ef41082
LM
5159 .name = "ata_port",
5160#ifdef CONFIG_PM
5161 .pm = &ata_port_pm_ops,
5162#endif
5163};
5164
3ef3b43d
TH
5165/**
5166 * ata_dev_init - Initialize an ata_device structure
5167 * @dev: Device structure to initialize
5168 *
5169 * Initialize @dev in preparation for probing.
5170 *
5171 * LOCKING:
5172 * Inherited from caller.
5173 */
5174void ata_dev_init(struct ata_device *dev)
5175{
b1c72916 5176 struct ata_link *link = ata_dev_phys_link(dev);
9af5c9c9 5177 struct ata_port *ap = link->ap;
72fa4b74
TH
5178 unsigned long flags;
5179
b1c72916 5180 /* SATA spd limit is bound to the attached device, reset together */
9af5c9c9
TH
5181 link->sata_spd_limit = link->hw_sata_spd_limit;
5182 link->sata_spd = 0;
5a04bf4b 5183
72fa4b74
TH
5184 /* High bits of dev->flags are used to record warm plug
5185 * requests which occur asynchronously. Synchronize using
cca3974e 5186 * host lock.
72fa4b74 5187 */
ba6a1308 5188 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5189 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 5190 dev->horkage = 0;
ba6a1308 5191 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5192
99cf610a
TH
5193 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5194 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
3ef3b43d
TH
5195 dev->pio_mask = UINT_MAX;
5196 dev->mwdma_mask = UINT_MAX;
5197 dev->udma_mask = UINT_MAX;
5198}
5199
4fb37a25
TH
5200/**
5201 * ata_link_init - Initialize an ata_link structure
5202 * @ap: ATA port link is attached to
5203 * @link: Link structure to initialize
8989805d 5204 * @pmp: Port multiplier port number
4fb37a25
TH
5205 *
5206 * Initialize @link.
5207 *
5208 * LOCKING:
5209 * Kernel thread context (may sleep)
5210 */
fb7fd614 5211void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
5212{
5213 int i;
5214
5215 /* clear everything except for devices */
d9027470
GG
5216 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5217 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
4fb37a25
TH
5218
5219 link->ap = ap;
8989805d 5220 link->pmp = pmp;
4fb37a25
TH
5221 link->active_tag = ATA_TAG_POISON;
5222 link->hw_sata_spd_limit = UINT_MAX;
5223
5224 /* can't use iterator, ap isn't initialized yet */
5225 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5226 struct ata_device *dev = &link->device[i];
5227
5228 dev->link = link;
5229 dev->devno = dev - link->device;
110f66d2
TH
5230#ifdef CONFIG_ATA_ACPI
5231 dev->gtf_filter = ata_acpi_gtf_filter;
5232#endif
4fb37a25
TH
5233 ata_dev_init(dev);
5234 }
5235}
5236
5237/**
5238 * sata_link_init_spd - Initialize link->sata_spd_limit
5239 * @link: Link to configure sata_spd_limit for
5240 *
a31a6997 5241 * Initialize ``link->[hw_]sata_spd_limit`` to the currently
4fb37a25
TH
5242 * configured value.
5243 *
5244 * LOCKING:
5245 * Kernel thread context (may sleep).
5246 *
5247 * RETURNS:
5248 * 0 on success, -errno on failure.
5249 */
fb7fd614 5250int sata_link_init_spd(struct ata_link *link)
4fb37a25 5251{
33267325 5252 u8 spd;
4fb37a25
TH
5253 int rc;
5254
d127ea7b 5255 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
4fb37a25
TH
5256 if (rc)
5257 return rc;
5258
d127ea7b 5259 spd = (link->saved_scontrol >> 4) & 0xf;
4fb37a25
TH
5260 if (spd)
5261 link->hw_sata_spd_limit &= (1 << spd) - 1;
5262
05944bdf 5263 ata_force_link_limits(link);
33267325 5264
4fb37a25
TH
5265 link->sata_spd_limit = link->hw_sata_spd_limit;
5266
5267 return 0;
5268}
5269
1da177e4 5270/**
f3187195
TH
5271 * ata_port_alloc - allocate and initialize basic ATA port resources
5272 * @host: ATA host this allocated port belongs to
1da177e4 5273 *
f3187195
TH
5274 * Allocate and initialize basic ATA port resources.
5275 *
5276 * RETURNS:
5277 * Allocate ATA port on success, NULL on failure.
0cba632b 5278 *
1da177e4 5279 * LOCKING:
f3187195 5280 * Inherited from calling layer (may sleep).
1da177e4 5281 */
f3187195 5282struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5283{
f3187195 5284 struct ata_port *ap;
1da177e4 5285
f3187195
TH
5286 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5287 if (!ap)
5288 return NULL;
4fca377f 5289
7b3a24c5 5290 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
cca3974e 5291 ap->lock = &host->lock;
f3187195 5292 ap->print_id = -1;
e628dc99 5293 ap->local_port_no = -1;
cca3974e 5294 ap->host = host;
f3187195 5295 ap->dev = host->dev;
bd5d825c 5296
ad72cf98 5297 mutex_init(&ap->scsi_scan_mutex);
65f27f38
DH
5298 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5299 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5300 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5301 init_waitqueue_head(&ap->eh_wait_q);
45fabbb7 5302 init_completion(&ap->park_req_pending);
b93ab338
KC
5303 timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
5304 TIMER_DEFERRABLE);
1da177e4 5305
838df628 5306 ap->cbl = ATA_CBL_NONE;
838df628 5307
8989805d 5308 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
5309
5310#ifdef ATA_IRQ_TRAP
5311 ap->stats.unhandled_irq = 1;
5312 ap->stats.idle_irq = 1;
5313#endif
270390e1
TH
5314 ata_sff_port_init(ap);
5315
1da177e4 5316 return ap;
1da177e4
LT
5317}
5318
2623c7a5 5319static void ata_devres_release(struct device *gendev, void *res)
f0d36efd
TH
5320{
5321 struct ata_host *host = dev_get_drvdata(gendev);
5322 int i;
5323
1aa506e4
TH
5324 for (i = 0; i < host->n_ports; i++) {
5325 struct ata_port *ap = host->ports[i];
5326
4911487a
TH
5327 if (!ap)
5328 continue;
5329
5330 if (ap->scsi_host)
1aa506e4
TH
5331 scsi_host_put(ap->scsi_host);
5332
2623c7a5
TK
5333 }
5334
5335 dev_set_drvdata(gendev, NULL);
5336 ata_host_put(host);
5337}
5338
5339static void ata_host_release(struct kref *kref)
5340{
5341 struct ata_host *host = container_of(kref, struct ata_host, kref);
5342 int i;
5343
5344 for (i = 0; i < host->n_ports; i++) {
5345 struct ata_port *ap = host->ports[i];
5346
633273a3 5347 kfree(ap->pmp_link);
b1c72916 5348 kfree(ap->slave_link);
4911487a 5349 kfree(ap);
1aa506e4
TH
5350 host->ports[i] = NULL;
5351 }
2623c7a5
TK
5352 kfree(host);
5353}
1aa506e4 5354
2623c7a5
TK
5355void ata_host_get(struct ata_host *host)
5356{
5357 kref_get(&host->kref);
5358}
5359
5360void ata_host_put(struct ata_host *host)
5361{
5362 kref_put(&host->kref, ata_host_release);
f0d36efd 5363}
a52fbcfc 5364EXPORT_SYMBOL_GPL(ata_host_put);
f0d36efd 5365
f3187195
TH
5366/**
5367 * ata_host_alloc - allocate and init basic ATA host resources
5368 * @dev: generic device this host is associated with
5369 * @max_ports: maximum number of ATA ports associated with this host
5370 *
5371 * Allocate and initialize basic ATA host resources. LLD calls
5372 * this function to allocate a host, initializes it fully and
5373 * attaches it using ata_host_register().
5374 *
5375 * @max_ports ports are allocated and host->n_ports is
5376 * initialized to @max_ports. The caller is allowed to decrease
5377 * host->n_ports before calling ata_host_register(). The unused
5378 * ports will be automatically freed on registration.
5379 *
5380 * RETURNS:
5381 * Allocate ATA host on success, NULL on failure.
5382 *
5383 * LOCKING:
5384 * Inherited from calling layer (may sleep).
5385 */
5386struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5387{
5388 struct ata_host *host;
5389 size_t sz;
5390 int i;
2623c7a5 5391 void *dr;
f3187195 5392
f3187195
TH
5393 /* alloc a container for our list of ATA ports (buses) */
5394 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
2623c7a5 5395 host = kzalloc(sz, GFP_KERNEL);
f3187195 5396 if (!host)
2623c7a5
TK
5397 return NULL;
5398
5399 if (!devres_open_group(dev, NULL, GFP_KERNEL))
dafd6c49 5400 goto err_free;
2623c7a5
TK
5401
5402 dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
5403 if (!dr)
f3187195
TH
5404 goto err_out;
5405
2623c7a5 5406 devres_add(dev, dr);
f3187195
TH
5407 dev_set_drvdata(dev, host);
5408
5409 spin_lock_init(&host->lock);
c0c362b6 5410 mutex_init(&host->eh_mutex);
f3187195
TH
5411 host->dev = dev;
5412 host->n_ports = max_ports;
2623c7a5 5413 kref_init(&host->kref);
f3187195
TH
5414
5415 /* allocate ports bound to this host */
5416 for (i = 0; i < max_ports; i++) {
5417 struct ata_port *ap;
5418
5419 ap = ata_port_alloc(host);
5420 if (!ap)
5421 goto err_out;
5422
5423 ap->port_no = i;
5424 host->ports[i] = ap;
5425 }
5426
5427 devres_remove_group(dev, NULL);
5428 return host;
5429
5430 err_out:
5431 devres_release_group(dev, NULL);
dafd6c49
CIK
5432 err_free:
5433 kfree(host);
f3187195
TH
5434 return NULL;
5435}
a52fbcfc 5436EXPORT_SYMBOL_GPL(ata_host_alloc);
f3187195 5437
f5cda257
TH
5438/**
5439 * ata_host_alloc_pinfo - alloc host and init with port_info array
5440 * @dev: generic device this host is associated with
5441 * @ppi: array of ATA port_info to initialize host with
5442 * @n_ports: number of ATA ports attached to this host
5443 *
5444 * Allocate ATA host and initialize with info from @ppi. If NULL
5445 * terminated, @ppi may contain fewer entries than @n_ports. The
5446 * last entry will be used for the remaining ports.
5447 *
5448 * RETURNS:
5449 * Allocate ATA host on success, NULL on failure.
5450 *
5451 * LOCKING:
5452 * Inherited from calling layer (may sleep).
5453 */
5454struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5455 const struct ata_port_info * const * ppi,
5456 int n_ports)
5457{
5458 const struct ata_port_info *pi;
5459 struct ata_host *host;
5460 int i, j;
5461
5462 host = ata_host_alloc(dev, n_ports);
5463 if (!host)
5464 return NULL;
5465
5466 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5467 struct ata_port *ap = host->ports[i];
5468
5469 if (ppi[j])
5470 pi = ppi[j++];
5471
5472 ap->pio_mask = pi->pio_mask;
5473 ap->mwdma_mask = pi->mwdma_mask;
5474 ap->udma_mask = pi->udma_mask;
5475 ap->flags |= pi->flags;
0c88758b 5476 ap->link.flags |= pi->link_flags;
f5cda257
TH
5477 ap->ops = pi->port_ops;
5478
5479 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5480 host->ops = pi->port_ops;
f5cda257
TH
5481 }
5482
5483 return host;
5484}
a52fbcfc 5485EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
f5cda257 5486
32ebbc0c
TH
5487static void ata_host_stop(struct device *gendev, void *res)
5488{
5489 struct ata_host *host = dev_get_drvdata(gendev);
5490 int i;
5491
5492 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5493
5494 for (i = 0; i < host->n_ports; i++) {
5495 struct ata_port *ap = host->ports[i];
5496
5497 if (ap->ops->port_stop)
5498 ap->ops->port_stop(ap);
5499 }
5500
5501 if (host->ops->host_stop)
5502 host->ops->host_stop(host);
5503}
5504
029cfd6b
TH
5505/**
5506 * ata_finalize_port_ops - finalize ata_port_operations
5507 * @ops: ata_port_operations to finalize
5508 *
5509 * An ata_port_operations can inherit from another ops and that
5510 * ops can again inherit from another. This can go on as many
5511 * times as necessary as long as there is no loop in the
5512 * inheritance chain.
5513 *
5514 * Ops tables are finalized when the host is started. NULL or
5515 * unspecified entries are inherited from the closet ancestor
5516 * which has the method and the entry is populated with it.
5517 * After finalization, the ops table directly points to all the
5518 * methods and ->inherits is no longer necessary and cleared.
5519 *
5520 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5521 *
5522 * LOCKING:
5523 * None.
5524 */
5525static void ata_finalize_port_ops(struct ata_port_operations *ops)
5526{
2da67659 5527 static DEFINE_SPINLOCK(lock);
029cfd6b
TH
5528 const struct ata_port_operations *cur;
5529 void **begin = (void **)ops;
5530 void **end = (void **)&ops->inherits;
5531 void **pp;
5532
5533 if (!ops || !ops->inherits)
5534 return;
5535
5536 spin_lock(&lock);
5537
5538 for (cur = ops->inherits; cur; cur = cur->inherits) {
5539 void **inherit = (void **)cur;
5540
5541 for (pp = begin; pp < end; pp++, inherit++)
5542 if (!*pp)
5543 *pp = *inherit;
5544 }
5545
5546 for (pp = begin; pp < end; pp++)
5547 if (IS_ERR(*pp))
5548 *pp = NULL;
5549
5550 ops->inherits = NULL;
5551
5552 spin_unlock(&lock);
5553}
5554
ecef7253
TH
5555/**
5556 * ata_host_start - start and freeze ports of an ATA host
5557 * @host: ATA host to start ports for
5558 *
5559 * Start and then freeze ports of @host. Started status is
5560 * recorded in host->flags, so this function can be called
5561 * multiple times. Ports are guaranteed to get started only
e0af10ac 5562 * once. If host->ops is not initialized yet, it is set to the
f3187195 5563 * first non-dummy port ops.
ecef7253
TH
5564 *
5565 * LOCKING:
5566 * Inherited from calling layer (may sleep).
5567 *
5568 * RETURNS:
5569 * 0 if all ports are started successfully, -errno otherwise.
5570 */
5571int ata_host_start(struct ata_host *host)
5572{
32ebbc0c
TH
5573 int have_stop = 0;
5574 void *start_dr = NULL;
ecef7253
TH
5575 int i, rc;
5576
5577 if (host->flags & ATA_HOST_STARTED)
5578 return 0;
5579
029cfd6b
TH
5580 ata_finalize_port_ops(host->ops);
5581
ecef7253
TH
5582 for (i = 0; i < host->n_ports; i++) {
5583 struct ata_port *ap = host->ports[i];
5584
029cfd6b
TH
5585 ata_finalize_port_ops(ap->ops);
5586
f3187195
TH
5587 if (!host->ops && !ata_port_is_dummy(ap))
5588 host->ops = ap->ops;
5589
32ebbc0c
TH
5590 if (ap->ops->port_stop)
5591 have_stop = 1;
5592 }
5593
355a8031 5594 if (host->ops && host->ops->host_stop)
32ebbc0c
TH
5595 have_stop = 1;
5596
5597 if (have_stop) {
5598 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5599 if (!start_dr)
5600 return -ENOMEM;
5601 }
5602
5603 for (i = 0; i < host->n_ports; i++) {
5604 struct ata_port *ap = host->ports[i];
5605
ecef7253
TH
5606 if (ap->ops->port_start) {
5607 rc = ap->ops->port_start(ap);
5608 if (rc) {
0f9fe9b7 5609 if (rc != -ENODEV)
a44fec1f
JP
5610 dev_err(host->dev,
5611 "failed to start port %d (errno=%d)\n",
5612 i, rc);
ecef7253
TH
5613 goto err_out;
5614 }
5615 }
ecef7253
TH
5616 ata_eh_freeze_port(ap);
5617 }
5618
32ebbc0c
TH
5619 if (start_dr)
5620 devres_add(host->dev, start_dr);
ecef7253
TH
5621 host->flags |= ATA_HOST_STARTED;
5622 return 0;
5623
5624 err_out:
5625 while (--i >= 0) {
5626 struct ata_port *ap = host->ports[i];
5627
5628 if (ap->ops->port_stop)
5629 ap->ops->port_stop(ap);
5630 }
32ebbc0c 5631 devres_free(start_dr);
ecef7253
TH
5632 return rc;
5633}
a52fbcfc 5634EXPORT_SYMBOL_GPL(ata_host_start);
ecef7253 5635
b03732f0 5636/**
94bd5719 5637 * ata_host_init - Initialize a host struct for sas (ipr, libsas)
cca3974e
JG
5638 * @host: host to initialize
5639 * @dev: device host is attached to
cca3974e 5640 * @ops: port_ops
b03732f0 5641 *
b03732f0 5642 */
cca3974e 5643void ata_host_init(struct ata_host *host, struct device *dev,
8d8e7d13 5644 struct ata_port_operations *ops)
b03732f0 5645{
cca3974e 5646 spin_lock_init(&host->lock);
c0c362b6 5647 mutex_init(&host->eh_mutex);
69278f79 5648 host->n_tags = ATA_MAX_QUEUE;
cca3974e 5649 host->dev = dev;
cca3974e 5650 host->ops = ops;
2fa4a326 5651 kref_init(&host->kref);
b03732f0 5652}
a52fbcfc 5653EXPORT_SYMBOL_GPL(ata_host_init);
b03732f0 5654
9508a66f 5655void __ata_port_probe(struct ata_port *ap)
79318057 5656{
9508a66f
DW
5657 struct ata_eh_info *ehi = &ap->link.eh_info;
5658 unsigned long flags;
886ad09f 5659
9508a66f
DW
5660 /* kick EH for boot probing */
5661 spin_lock_irqsave(ap->lock, flags);
79318057 5662
9508a66f
DW
5663 ehi->probe_mask |= ATA_ALL_DEVICES;
5664 ehi->action |= ATA_EH_RESET;
5665 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
79318057 5666
9508a66f
DW
5667 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5668 ap->pflags |= ATA_PFLAG_LOADING;
5669 ata_port_schedule_eh(ap);
79318057 5670
9508a66f
DW
5671 spin_unlock_irqrestore(ap->lock, flags);
5672}
79318057 5673
9508a66f
DW
5674int ata_port_probe(struct ata_port *ap)
5675{
5676 int rc = 0;
79318057 5677
9508a66f
DW
5678 if (ap->ops->error_handler) {
5679 __ata_port_probe(ap);
79318057
AV
5680 ata_port_wait_eh(ap);
5681 } else {
79318057 5682 rc = ata_bus_probe(ap);
79318057 5683 }
238c9cf9
JB
5684 return rc;
5685}
5686
5687
5688static void async_port_probe(void *data, async_cookie_t cookie)
5689{
5690 struct ata_port *ap = data;
4fca377f 5691
238c9cf9
JB
5692 /*
5693 * If we're not allowed to scan this host in parallel,
5694 * we need to wait until all previous scans have completed
5695 * before going further.
5696 * Jeff Garzik says this is only within a controller, so we
5697 * don't need to wait for port 0, only for later ports.
5698 */
5699 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5700 async_synchronize_cookie(cookie);
5701
5702 (void)ata_port_probe(ap);
f29d3b23
AV
5703
5704 /* in order to keep device order, we need to synchronize at this point */
5705 async_synchronize_cookie(cookie);
5706
5707 ata_scsi_scan_host(ap, 1);
79318057 5708}
238c9cf9 5709
f3187195
TH
5710/**
5711 * ata_host_register - register initialized ATA host
5712 * @host: ATA host to register
5713 * @sht: template for SCSI host
5714 *
5715 * Register initialized ATA host. @host is allocated using
5716 * ata_host_alloc() and fully initialized by LLD. This function
5717 * starts ports, registers @host with ATA and SCSI layers and
5718 * probe registered devices.
5719 *
5720 * LOCKING:
5721 * Inherited from calling layer (may sleep).
5722 *
5723 * RETURNS:
5724 * 0 on success, -errno otherwise.
5725 */
5726int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5727{
5728 int i, rc;
5729
69278f79 5730 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
1871ee13 5731
f3187195
TH
5732 /* host must have been started */
5733 if (!(host->flags & ATA_HOST_STARTED)) {
a44fec1f 5734 dev_err(host->dev, "BUG: trying to register unstarted host\n");
f3187195
TH
5735 WARN_ON(1);
5736 return -EINVAL;
5737 }
5738
5739 /* Blow away unused ports. This happens when LLD can't
5740 * determine the exact number of ports to allocate at
5741 * allocation time.
5742 */
5743 for (i = host->n_ports; host->ports[i]; i++)
5744 kfree(host->ports[i]);
5745
5746 /* give ports names and add SCSI hosts */
e628dc99 5747 for (i = 0; i < host->n_ports; i++) {
85d6725b 5748 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
e628dc99
DM
5749 host->ports[i]->local_port_no = i + 1;
5750 }
4fca377f 5751
d9027470
GG
5752 /* Create associated sysfs transport objects */
5753 for (i = 0; i < host->n_ports; i++) {
5754 rc = ata_tport_add(host->dev,host->ports[i]);
5755 if (rc) {
5756 goto err_tadd;
5757 }
5758 }
5759
f3187195
TH
5760 rc = ata_scsi_add_hosts(host, sht);
5761 if (rc)
d9027470 5762 goto err_tadd;
f3187195
TH
5763
5764 /* set cable, sata_spd_limit and report */
5765 for (i = 0; i < host->n_ports; i++) {
5766 struct ata_port *ap = host->ports[i];
f3187195
TH
5767 unsigned long xfer_mask;
5768
5769 /* set SATA cable type if still unset */
5770 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5771 ap->cbl = ATA_CBL_SATA;
5772
5773 /* init sata_spd_limit to the current value */
4fb37a25 5774 sata_link_init_spd(&ap->link);
b1c72916
TH
5775 if (ap->slave_link)
5776 sata_link_init_spd(ap->slave_link);
f3187195 5777
cbcdd875 5778 /* print per-port info to dmesg */
f3187195
TH
5779 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5780 ap->udma_mask);
5781
abf6e8ed 5782 if (!ata_port_is_dummy(ap)) {
a9a79dfe
JP
5783 ata_port_info(ap, "%cATA max %s %s\n",
5784 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5785 ata_mode_string(xfer_mask),
5786 ap->link.eh_info.desc);
abf6e8ed
TH
5787 ata_ehi_clear_desc(&ap->link.eh_info);
5788 } else
a9a79dfe 5789 ata_port_info(ap, "DUMMY\n");
f3187195
TH
5790 }
5791
f6005354 5792 /* perform each probe asynchronously */
f3187195
TH
5793 for (i = 0; i < host->n_ports; i++) {
5794 struct ata_port *ap = host->ports[i];
b5292111 5795 ap->cookie = async_schedule(async_port_probe, ap);
f3187195 5796 }
f3187195
TH
5797
5798 return 0;
d9027470
GG
5799
5800 err_tadd:
5801 while (--i >= 0) {
5802 ata_tport_delete(host->ports[i]);
5803 }
5804 return rc;
5805
f3187195 5806}
a52fbcfc 5807EXPORT_SYMBOL_GPL(ata_host_register);
f3187195 5808
f5cda257
TH
5809/**
5810 * ata_host_activate - start host, request IRQ and register it
5811 * @host: target ATA host
5812 * @irq: IRQ to request
5813 * @irq_handler: irq_handler used when requesting IRQ
5814 * @irq_flags: irq_flags used when requesting IRQ
5815 * @sht: scsi_host_template to use when registering the host
5816 *
5817 * After allocating an ATA host and initializing it, most libata
5818 * LLDs perform three steps to activate the host - start host,
c9b5560a 5819 * request IRQ and register it. This helper takes necessary
f5cda257
TH
5820 * arguments and performs the three steps in one go.
5821 *
3d46b2e2
PM
5822 * An invalid IRQ skips the IRQ registration and expects the host to
5823 * have set polling mode on the port. In this case, @irq_handler
5824 * should be NULL.
5825 *
f5cda257
TH
5826 * LOCKING:
5827 * Inherited from calling layer (may sleep).
5828 *
5829 * RETURNS:
5830 * 0 on success, -errno otherwise.
5831 */
5832int ata_host_activate(struct ata_host *host, int irq,
5833 irq_handler_t irq_handler, unsigned long irq_flags,
5834 struct scsi_host_template *sht)
5835{
cbcdd875 5836 int i, rc;
7e22c002 5837 char *irq_desc;
f5cda257
TH
5838
5839 rc = ata_host_start(host);
5840 if (rc)
5841 return rc;
5842
3d46b2e2
PM
5843 /* Special case for polling mode */
5844 if (!irq) {
5845 WARN_ON(irq_handler);
5846 return ata_host_register(host, sht);
5847 }
5848
7e22c002
HK
5849 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
5850 dev_driver_string(host->dev),
5851 dev_name(host->dev));
5852 if (!irq_desc)
5853 return -ENOMEM;
5854
f5cda257 5855 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7e22c002 5856 irq_desc, host);
f5cda257
TH
5857 if (rc)
5858 return rc;
5859
cbcdd875
TH
5860 for (i = 0; i < host->n_ports; i++)
5861 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 5862
f5cda257
TH
5863 rc = ata_host_register(host, sht);
5864 /* if failed, just free the IRQ and leave ports alone */
5865 if (rc)
5866 devm_free_irq(host->dev, irq, host);
5867
5868 return rc;
5869}
a52fbcfc 5870EXPORT_SYMBOL_GPL(ata_host_activate);
f5cda257 5871
720ba126 5872/**
c9b5560a 5873 * ata_port_detach - Detach ATA port in preparation of device removal
720ba126
TH
5874 * @ap: ATA port to be detached
5875 *
5876 * Detach all ATA devices and the associated SCSI devices of @ap;
5877 * then, remove the associated SCSI host. @ap is guaranteed to
5878 * be quiescent on return from this function.
5879 *
5880 * LOCKING:
5881 * Kernel thread context (may sleep).
5882 */
741b7763 5883static void ata_port_detach(struct ata_port *ap)
720ba126
TH
5884{
5885 unsigned long flags;
a6f9bf4d
LK
5886 struct ata_link *link;
5887 struct ata_device *dev;
720ba126
TH
5888
5889 if (!ap->ops->error_handler)
c3cf30a9 5890 goto skip_eh;
720ba126
TH
5891
5892 /* tell EH we're leaving & flush EH */
ba6a1308 5893 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5894 ap->pflags |= ATA_PFLAG_UNLOADING;
ece180d1 5895 ata_port_schedule_eh(ap);
ba6a1308 5896 spin_unlock_irqrestore(ap->lock, flags);
720ba126 5897
ece180d1 5898 /* wait till EH commits suicide */
720ba126
TH
5899 ata_port_wait_eh(ap);
5900
ece180d1
TH
5901 /* it better be dead now */
5902 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
720ba126 5903
afe2c511 5904 cancel_delayed_work_sync(&ap->hotplug_task);
720ba126 5905
c3cf30a9 5906 skip_eh:
a6f9bf4d
LK
5907 /* clean up zpodd on port removal */
5908 ata_for_each_link(link, ap, HOST_FIRST) {
5909 ata_for_each_dev(dev, link, ALL) {
5910 if (zpodd_dev_enabled(dev))
5911 zpodd_exit(dev);
5912 }
5913 }
d9027470
GG
5914 if (ap->pmp_link) {
5915 int i;
5916 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
5917 ata_tlink_delete(&ap->pmp_link[i]);
5918 }
720ba126 5919 /* remove the associated SCSI host */
cca3974e 5920 scsi_remove_host(ap->scsi_host);
c5700766 5921 ata_tport_delete(ap);
720ba126
TH
5922}
5923
0529c159
TH
5924/**
5925 * ata_host_detach - Detach all ports of an ATA host
5926 * @host: Host to detach
5927 *
5928 * Detach all ports of @host.
5929 *
5930 * LOCKING:
5931 * Kernel thread context (may sleep).
5932 */
5933void ata_host_detach(struct ata_host *host)
5934{
5935 int i;
5936
b5292111
KHF
5937 for (i = 0; i < host->n_ports; i++) {
5938 /* Ensure ata_port probe has completed */
5939 async_synchronize_cookie(host->ports[i]->cookie + 1);
0529c159 5940 ata_port_detach(host->ports[i]);
b5292111 5941 }
562f0c2d
TH
5942
5943 /* the host is dead now, dissociate ACPI */
5944 ata_acpi_dissociate(host);
0529c159 5945}
a52fbcfc 5946EXPORT_SYMBOL_GPL(ata_host_detach);
0529c159 5947
374b1873
JG
5948#ifdef CONFIG_PCI
5949
1da177e4
LT
5950/**
5951 * ata_pci_remove_one - PCI layer callback for device removal
5952 * @pdev: PCI device that was removed
5953 *
b878ca5d
TH
5954 * PCI layer indicates to libata via this hook that hot-unplug or
5955 * module unload event has occurred. Detach all ports. Resource
5956 * release is handled via devres.
1da177e4
LT
5957 *
5958 * LOCKING:
5959 * Inherited from PCI layer (may sleep).
5960 */
f0d36efd 5961void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 5962{
04a3f5b7 5963 struct ata_host *host = pci_get_drvdata(pdev);
1da177e4 5964
b878ca5d 5965 ata_host_detach(host);
1da177e4 5966}
a52fbcfc 5967EXPORT_SYMBOL_GPL(ata_pci_remove_one);
1da177e4 5968
10a663a1
PK
5969void ata_pci_shutdown_one(struct pci_dev *pdev)
5970{
5971 struct ata_host *host = pci_get_drvdata(pdev);
5972 int i;
5973
5974 for (i = 0; i < host->n_ports; i++) {
5975 struct ata_port *ap = host->ports[i];
5976
5977 ap->pflags |= ATA_PFLAG_FROZEN;
5978
5979 /* Disable port interrupts */
5980 if (ap->ops->freeze)
5981 ap->ops->freeze(ap);
5982
5983 /* Stop the port DMA engines */
5984 if (ap->ops->port_stop)
5985 ap->ops->port_stop(ap);
5986 }
5987}
a52fbcfc 5988EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
10a663a1 5989
1da177e4 5990/* move to PCI subsystem */
057ace5e 5991int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
5992{
5993 unsigned long tmp = 0;
5994
5995 switch (bits->width) {
5996 case 1: {
5997 u8 tmp8 = 0;
5998 pci_read_config_byte(pdev, bits->reg, &tmp8);
5999 tmp = tmp8;
6000 break;
6001 }
6002 case 2: {
6003 u16 tmp16 = 0;
6004 pci_read_config_word(pdev, bits->reg, &tmp16);
6005 tmp = tmp16;
6006 break;
6007 }
6008 case 4: {
6009 u32 tmp32 = 0;
6010 pci_read_config_dword(pdev, bits->reg, &tmp32);
6011 tmp = tmp32;
6012 break;
6013 }
6014
6015 default:
6016 return -EINVAL;
6017 }
6018
6019 tmp &= bits->mask;
6020
6021 return (tmp == bits->val) ? 1 : 0;
6022}
a52fbcfc 6023EXPORT_SYMBOL_GPL(pci_test_config_bits);
9b847548 6024
6ffa01d8 6025#ifdef CONFIG_PM
3c5100c1 6026void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6027{
6028 pci_save_state(pdev);
4c90d971 6029 pci_disable_device(pdev);
500530f6 6030
3a2d5b70 6031 if (mesg.event & PM_EVENT_SLEEP)
500530f6 6032 pci_set_power_state(pdev, PCI_D3hot);
9b847548 6033}
a52fbcfc 6034EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
9b847548 6035
553c4aa6 6036int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6037{
553c4aa6
TH
6038 int rc;
6039
9b847548
JA
6040 pci_set_power_state(pdev, PCI_D0);
6041 pci_restore_state(pdev);
553c4aa6 6042
b878ca5d 6043 rc = pcim_enable_device(pdev);
553c4aa6 6044 if (rc) {
a44fec1f
JP
6045 dev_err(&pdev->dev,
6046 "failed to enable device after resume (%d)\n", rc);
553c4aa6
TH
6047 return rc;
6048 }
6049
9b847548 6050 pci_set_master(pdev);
553c4aa6 6051 return 0;
500530f6 6052}
a52fbcfc 6053EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
500530f6 6054
3c5100c1 6055int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6056{
04a3f5b7 6057 struct ata_host *host = pci_get_drvdata(pdev);
500530f6 6058
ec87cf37 6059 ata_host_suspend(host, mesg);
500530f6 6060
3c5100c1 6061 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6062
6063 return 0;
6064}
a52fbcfc 6065EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
500530f6
TH
6066
6067int ata_pci_device_resume(struct pci_dev *pdev)
6068{
04a3f5b7 6069 struct ata_host *host = pci_get_drvdata(pdev);
553c4aa6 6070 int rc;
500530f6 6071
553c4aa6
TH
6072 rc = ata_pci_device_do_resume(pdev);
6073 if (rc == 0)
6074 ata_host_resume(host);
6075 return rc;
9b847548 6076}
a52fbcfc 6077EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6078#endif /* CONFIG_PM */
1da177e4
LT
6079#endif /* CONFIG_PCI */
6080
b7db04d9
BN
6081/**
6082 * ata_platform_remove_one - Platform layer callback for device removal
6083 * @pdev: Platform device that was removed
6084 *
6085 * Platform layer indicates to libata via this hook that hot-unplug or
6086 * module unload event has occurred. Detach all ports. Resource
6087 * release is handled via devres.
6088 *
6089 * LOCKING:
6090 * Inherited from platform layer (may sleep).
6091 */
6092int ata_platform_remove_one(struct platform_device *pdev)
6093{
6094 struct ata_host *host = platform_get_drvdata(pdev);
6095
6096 ata_host_detach(host);
6097
6098 return 0;
6099}
a52fbcfc 6100EXPORT_SYMBOL_GPL(ata_platform_remove_one);
b7db04d9 6101
bf89b0bf 6102#ifdef CONFIG_ATA_FORCE
33267325
TH
6103static int __init ata_parse_force_one(char **cur,
6104 struct ata_force_ent *force_ent,
6105 const char **reason)
6106{
0f5f264b 6107 static const struct ata_force_param force_tbl[] __initconst = {
33267325
TH
6108 { "40c", .cbl = ATA_CBL_PATA40 },
6109 { "80c", .cbl = ATA_CBL_PATA80 },
6110 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6111 { "unk", .cbl = ATA_CBL_PATA_UNK },
6112 { "ign", .cbl = ATA_CBL_PATA_IGN },
6113 { "sata", .cbl = ATA_CBL_SATA },
6114 { "1.5Gbps", .spd_limit = 1 },
6115 { "3.0Gbps", .spd_limit = 2 },
6116 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6117 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
d7b16e4f
MP
6118 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
6119 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
7a8526a5
KH
6120 { "noncqati", .horkage_on = ATA_HORKAGE_NO_NCQ_ON_ATI },
6121 { "ncqati", .horkage_off = ATA_HORKAGE_NO_NCQ_ON_ATI },
43c9c591 6122 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
33267325
TH
6123 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6124 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6125 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6126 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6127 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6128 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6129 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6130 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6131 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6132 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6133 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6134 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6135 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6136 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6137 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6138 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6139 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6140 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6141 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6142 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6143 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6144 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6145 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6146 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6147 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6148 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6149 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6150 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6151 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6152 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6153 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6154 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6155 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6156 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
05944bdf
TH
6157 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6158 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6159 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
ca6d43b0 6160 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
966fbe19 6161 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
b8bd6dc3 6162 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
33267325
TH
6163 };
6164 char *start = *cur, *p = *cur;
6165 char *id, *val, *endp;
6166 const struct ata_force_param *match_fp = NULL;
6167 int nr_matches = 0, i;
6168
6169 /* find where this param ends and update *cur */
6170 while (*p != '\0' && *p != ',')
6171 p++;
6172
6173 if (*p == '\0')
6174 *cur = p;
6175 else
6176 *cur = p + 1;
6177
6178 *p = '\0';
6179
6180 /* parse */
6181 p = strchr(start, ':');
6182 if (!p) {
6183 val = strstrip(start);
6184 goto parse_val;
6185 }
6186 *p = '\0';
6187
6188 id = strstrip(start);
6189 val = strstrip(p + 1);
6190
6191 /* parse id */
6192 p = strchr(id, '.');
6193 if (p) {
6194 *p++ = '\0';
6195 force_ent->device = simple_strtoul(p, &endp, 10);
6196 if (p == endp || *endp != '\0') {
6197 *reason = "invalid device";
6198 return -EINVAL;
6199 }
6200 }
6201
6202 force_ent->port = simple_strtoul(id, &endp, 10);
f7cf69ae 6203 if (id == endp || *endp != '\0') {
33267325
TH
6204 *reason = "invalid port/link";
6205 return -EINVAL;
6206 }
6207
6208 parse_val:
6209 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6210 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6211 const struct ata_force_param *fp = &force_tbl[i];
6212
6213 if (strncasecmp(val, fp->name, strlen(val)))
6214 continue;
6215
6216 nr_matches++;
6217 match_fp = fp;
6218
6219 if (strcasecmp(val, fp->name) == 0) {
6220 nr_matches = 1;
6221 break;
6222 }
6223 }
6224
6225 if (!nr_matches) {
6226 *reason = "unknown value";
6227 return -EINVAL;
6228 }
6229 if (nr_matches > 1) {
9de55351 6230 *reason = "ambiguous value";
33267325
TH
6231 return -EINVAL;
6232 }
6233
6234 force_ent->param = *match_fp;
6235
6236 return 0;
6237}
6238
6239static void __init ata_parse_force_param(void)
6240{
6241 int idx = 0, size = 1;
6242 int last_port = -1, last_device = -1;
6243 char *p, *cur, *next;
6244
6245 /* calculate maximum number of params and allocate force_tbl */
6246 for (p = ata_force_param_buf; *p; p++)
6247 if (*p == ',')
6248 size++;
6249
6396bb22 6250 ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
33267325
TH
6251 if (!ata_force_tbl) {
6252 printk(KERN_WARNING "ata: failed to extend force table, "
6253 "libata.force ignored\n");
6254 return;
6255 }
6256
6257 /* parse and populate the table */
6258 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6259 const char *reason = "";
6260 struct ata_force_ent te = { .port = -1, .device = -1 };
6261
6262 next = cur;
6263 if (ata_parse_force_one(&next, &te, &reason)) {
6264 printk(KERN_WARNING "ata: failed to parse force "
6265 "parameter \"%s\" (%s)\n",
6266 cur, reason);
6267 continue;
6268 }
6269
6270 if (te.port == -1) {
6271 te.port = last_port;
6272 te.device = last_device;
6273 }
6274
6275 ata_force_tbl[idx++] = te;
6276
6277 last_port = te.port;
6278 last_device = te.device;
6279 }
6280
6281 ata_force_tbl_size = idx;
6282}
1da177e4 6283
bf89b0bf
BZ
6284static void ata_free_force_param(void)
6285{
6286 kfree(ata_force_tbl);
6287}
6288#else
6289static inline void ata_parse_force_param(void) { }
6290static inline void ata_free_force_param(void) { }
6291#endif
6292
1da177e4
LT
6293static int __init ata_init(void)
6294{
d9027470 6295 int rc;
270390e1 6296
33267325
TH
6297 ata_parse_force_param();
6298
270390e1 6299 rc = ata_sff_init();
ad72cf98 6300 if (rc) {
bf89b0bf 6301 ata_free_force_param();
ad72cf98
TH
6302 return rc;
6303 }
453b07ac 6304
d9027470
GG
6305 libata_transport_init();
6306 ata_scsi_transport_template = ata_attach_transport();
6307 if (!ata_scsi_transport_template) {
6308 ata_sff_exit();
6309 rc = -ENOMEM;
6310 goto err_out;
4fca377f 6311 }
d9027470 6312
1da177e4
LT
6313 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6314 return 0;
d9027470
GG
6315
6316err_out:
6317 return rc;
1da177e4
LT
6318}
6319
6320static void __exit ata_exit(void)
6321{
d9027470
GG
6322 ata_release_transport(ata_scsi_transport_template);
6323 libata_transport_exit();
270390e1 6324 ata_sff_exit();
bf89b0bf 6325 ata_free_force_param();
1da177e4
LT
6326}
6327
a4625085 6328subsys_initcall(ata_init);
1da177e4
LT
6329module_exit(ata_exit);
6330
9990b6f3 6331static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
67846b30
JG
6332
6333int ata_ratelimit(void)
6334{
9990b6f3 6335 return __ratelimit(&ratelimit);
67846b30 6336}
a52fbcfc 6337EXPORT_SYMBOL_GPL(ata_ratelimit);
67846b30 6338
c0c362b6
TH
6339/**
6340 * ata_msleep - ATA EH owner aware msleep
6341 * @ap: ATA port to attribute the sleep to
6342 * @msecs: duration to sleep in milliseconds
6343 *
6344 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6345 * ownership is released before going to sleep and reacquired
6346 * after the sleep is complete. IOW, other ports sharing the
6347 * @ap->host will be allowed to own the EH while this task is
6348 * sleeping.
6349 *
6350 * LOCKING:
6351 * Might sleep.
6352 */
97750ceb
TH
6353void ata_msleep(struct ata_port *ap, unsigned int msecs)
6354{
c0c362b6
TH
6355 bool owns_eh = ap && ap->host->eh_owner == current;
6356
6357 if (owns_eh)
6358 ata_eh_release(ap);
6359
848c3920
AVM
6360 if (msecs < 20) {
6361 unsigned long usecs = msecs * USEC_PER_MSEC;
6362 usleep_range(usecs, usecs + 50);
6363 } else {
6364 msleep(msecs);
6365 }
c0c362b6
TH
6366
6367 if (owns_eh)
6368 ata_eh_acquire(ap);
97750ceb 6369}
a52fbcfc 6370EXPORT_SYMBOL_GPL(ata_msleep);
97750ceb 6371
c22daff4
TH
6372/**
6373 * ata_wait_register - wait until register value changes
97750ceb 6374 * @ap: ATA port to wait register for, can be NULL
c22daff4
TH
6375 * @reg: IO-mapped register
6376 * @mask: Mask to apply to read register value
6377 * @val: Wait condition
341c2c95
TH
6378 * @interval: polling interval in milliseconds
6379 * @timeout: timeout in milliseconds
c22daff4
TH
6380 *
6381 * Waiting for some bits of register to change is a common
6382 * operation for ATA controllers. This function reads 32bit LE
6383 * IO-mapped register @reg and tests for the following condition.
6384 *
6385 * (*@reg & mask) != val
6386 *
6387 * If the condition is met, it returns; otherwise, the process is
6388 * repeated after @interval_msec until timeout.
6389 *
6390 * LOCKING:
6391 * Kernel thread context (may sleep)
6392 *
6393 * RETURNS:
6394 * The final register value.
6395 */
97750ceb 6396u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
341c2c95 6397 unsigned long interval, unsigned long timeout)
c22daff4 6398{
341c2c95 6399 unsigned long deadline;
c22daff4
TH
6400 u32 tmp;
6401
6402 tmp = ioread32(reg);
6403
6404 /* Calculate timeout _after_ the first read to make sure
6405 * preceding writes reach the controller before starting to
6406 * eat away the timeout.
6407 */
341c2c95 6408 deadline = ata_deadline(jiffies, timeout);
c22daff4 6409
341c2c95 6410 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
97750ceb 6411 ata_msleep(ap, interval);
c22daff4
TH
6412 tmp = ioread32(reg);
6413 }
6414
6415 return tmp;
6416}
a52fbcfc 6417EXPORT_SYMBOL_GPL(ata_wait_register);
c22daff4 6418
dd5b06c4
TH
6419/*
6420 * Dummy port_ops
6421 */
182d7bba 6422static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
dd5b06c4 6423{
182d7bba 6424 return AC_ERR_SYSTEM;
dd5b06c4
TH
6425}
6426
182d7bba 6427static void ata_dummy_error_handler(struct ata_port *ap)
dd5b06c4 6428{
182d7bba 6429 /* truly dummy */
dd5b06c4
TH
6430}
6431
029cfd6b 6432struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
6433 .qc_prep = ata_noop_qc_prep,
6434 .qc_issue = ata_dummy_qc_issue,
182d7bba 6435 .error_handler = ata_dummy_error_handler,
e4a9c373
DW
6436 .sched_eh = ata_std_sched_eh,
6437 .end_eh = ata_std_end_eh,
dd5b06c4 6438};
a52fbcfc 6439EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
dd5b06c4 6440
21b0ad4f
TH
6441const struct ata_port_info ata_dummy_port_info = {
6442 .port_ops = &ata_dummy_port_ops,
6443};
a52fbcfc 6444EXPORT_SYMBOL_GPL(ata_dummy_port_info);
21b0ad4f 6445
06296a1e
JP
6446void ata_print_version(const struct device *dev, const char *version)
6447{
6448 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6449}
6450EXPORT_SYMBOL(ata_print_version);
c206a389
HR
6451
6452EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
6453EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
6454EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
6455EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
6456EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);