ata: libata: drop ata_msg_warn()
[linux-2.6-block.git] / drivers / ata / libata-core.c
CommitLineData
c82ee6d3 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4 2/*
af36d7f0
JG
3 * libata-core.c - helper library for ATA
4 *
af36d7f0
JG
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
7 *
af36d7f0 8 * libata documentation is available via 'make {ps|pdf}docs',
19285f3c 9 * as Documentation/driver-api/libata.rst
af36d7f0
JG
10 *
11 * Hardware documentation available from http://www.t13.org/ and
12 * http://www.sata-io.org/
13 *
92c52c52
AC
14 * Standards documents from:
15 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
16 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
17 * http://www.sata-io.org (SATA)
18 * http://www.compactflash.org (CF)
19 * http://www.qic.org (QIC157 - Tape and DSC)
20 * http://www.ce-ata.org (CE-ATA: not supported)
a52fbcfc
BZ
21 *
22 * libata is essentially a library of internal helper functions for
23 * low-level ATA host controller drivers. As such, the API/ABI is
24 * likely to change as new drivers are added and updated.
25 * Do not depend on ABI/API stability.
1da177e4
LT
26 */
27
1da177e4
LT
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/list.h>
33#include <linux/mm.h>
1da177e4
LT
34#include <linux/spinlock.h>
35#include <linux/blkdev.h>
36#include <linux/delay.h>
37#include <linux/timer.h>
848c3920 38#include <linux/time.h>
1da177e4
LT
39#include <linux/interrupt.h>
40#include <linux/completion.h>
41#include <linux/suspend.h>
42#include <linux/workqueue.h>
378f058c 43#include <linux/scatterlist.h>
2dcb407e 44#include <linux/io.h>
e18086d6 45#include <linux/log2.h>
5a0e3ad6 46#include <linux/slab.h>
428ac5fc 47#include <linux/glob.h>
1da177e4 48#include <scsi/scsi.h>
193515d5 49#include <scsi/scsi_cmnd.h>
1da177e4
LT
50#include <scsi/scsi_host.h>
51#include <linux/libata.h>
1da177e4 52#include <asm/byteorder.h>
fe5af0cc 53#include <asm/unaligned.h>
140b5e59 54#include <linux/cdrom.h>
9990b6f3 55#include <linux/ratelimit.h>
eb25cb99 56#include <linux/leds.h>
9ee4f393 57#include <linux/pm_runtime.h>
b7db04d9 58#include <linux/platform_device.h>
bbf5a097 59#include <asm/setup.h>
1da177e4 60
255c03d1
HR
61#define CREATE_TRACE_POINTS
62#include <trace/events/libata.h>
63
1da177e4 64#include "libata.h"
d9027470 65#include "libata-transport.h"
fda0efc5 66
029cfd6b 67const struct ata_port_operations ata_base_port_ops = {
0aa1113d 68 .prereset = ata_std_prereset,
203c75b8 69 .postreset = ata_std_postreset,
a1efdaba 70 .error_handler = ata_std_error_handler,
e4a9c373
DW
71 .sched_eh = ata_std_sched_eh,
72 .end_eh = ata_std_end_eh,
029cfd6b
TH
73};
74
75const struct ata_port_operations sata_port_ops = {
76 .inherits = &ata_base_port_ops,
77
78 .qc_defer = ata_std_qc_defer,
57c9efdf 79 .hardreset = sata_std_hardreset,
029cfd6b 80};
a52fbcfc 81EXPORT_SYMBOL_GPL(sata_port_ops);
029cfd6b 82
3373efd8
TH
83static unsigned int ata_dev_init_params(struct ata_device *dev,
84 u16 heads, u16 sectors);
85static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
86static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 87static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 88
a78f57af 89atomic_t ata_print_id = ATOMIC_INIT(0);
1da177e4 90
bf89b0bf 91#ifdef CONFIG_ATA_FORCE
33267325
TH
92struct ata_force_param {
93 const char *name;
8ba5a45c
BZ
94 u8 cbl;
95 u8 spd_limit;
33267325
TH
96 unsigned long xfer_mask;
97 unsigned int horkage_on;
98 unsigned int horkage_off;
8ba5a45c 99 u16 lflags;
33267325
TH
100};
101
102struct ata_force_ent {
103 int port;
104 int device;
105 struct ata_force_param param;
106};
107
108static struct ata_force_ent *ata_force_tbl;
109static int ata_force_tbl_size;
110
bbf5a097 111static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
7afb4222
TH
112/* param_buf is thrown away after initialization, disallow read */
113module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
8c27ceff 114MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
bf89b0bf 115#endif
33267325 116
2486fa56 117static int atapi_enabled = 1;
1623c81e 118module_param(atapi_enabled, int, 0444);
ad5d8eac 119MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
1623c81e 120
c5c61bda 121static int atapi_dmadir = 0;
95de719a 122module_param(atapi_dmadir, int, 0444);
ad5d8eac 123MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
95de719a 124
baf4fdfa
ML
125int atapi_passthru16 = 1;
126module_param(atapi_passthru16, int, 0444);
ad5d8eac 127MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
baf4fdfa 128
c3c013a2
JG
129int libata_fua = 0;
130module_param_named(fua, libata_fua, int, 0444);
ad5d8eac 131MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
c3c013a2 132
2dcb407e 133static int ata_ignore_hpa;
1e999736
AC
134module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
135MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
136
b3a70601
AC
137static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
138module_param_named(dma, libata_dma_mask, int, 0444);
139MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
140
87fbc5a0 141static int ata_probe_timeout;
a8601e5f
AM
142module_param(ata_probe_timeout, int, 0444);
143MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
144
6ebe9d86 145int libata_noacpi = 0;
d7d0dad6 146module_param_named(noacpi, libata_noacpi, int, 0444);
ad5d8eac 147MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
11ef697b 148
ae8d4ee7
AC
149int libata_allow_tpm = 0;
150module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
ad5d8eac 151MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
ae8d4ee7 152
e7ecd435
TH
153static int atapi_an;
154module_param(atapi_an, int, 0444);
155MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
156
1da177e4
LT
157MODULE_AUTHOR("Jeff Garzik");
158MODULE_DESCRIPTION("Library module for ATA devices");
159MODULE_LICENSE("GPL");
160MODULE_VERSION(DRV_VERSION);
161
891fd7c6
DLM
162static inline bool ata_dev_print_info(struct ata_device *dev)
163{
164 struct ata_eh_context *ehc = &dev->link->eh_context;
165
166 return ehc->i.flags & ATA_EHI_PRINTINFO;
167}
0baab86b 168
9913ff8a
TH
169static bool ata_sstatus_online(u32 sstatus)
170{
171 return (sstatus & 0xf) == 0x3;
172}
173
1eca4365
TH
174/**
175 * ata_link_next - link iteration helper
176 * @link: the previous link, NULL to start
177 * @ap: ATA port containing links to iterate
178 * @mode: iteration mode, one of ATA_LITER_*
179 *
180 * LOCKING:
181 * Host lock or EH context.
aadffb68 182 *
1eca4365
TH
183 * RETURNS:
184 * Pointer to the next link.
aadffb68 185 */
1eca4365
TH
186struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
187 enum ata_link_iter_mode mode)
aadffb68 188{
1eca4365
TH
189 BUG_ON(mode != ATA_LITER_EDGE &&
190 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
191
aadffb68 192 /* NULL link indicates start of iteration */
1eca4365
TH
193 if (!link)
194 switch (mode) {
195 case ATA_LITER_EDGE:
196 case ATA_LITER_PMP_FIRST:
197 if (sata_pmp_attached(ap))
198 return ap->pmp_link;
df561f66 199 fallthrough;
1eca4365
TH
200 case ATA_LITER_HOST_FIRST:
201 return &ap->link;
202 }
aadffb68 203
1eca4365
TH
204 /* we just iterated over the host link, what's next? */
205 if (link == &ap->link)
206 switch (mode) {
207 case ATA_LITER_HOST_FIRST:
208 if (sata_pmp_attached(ap))
209 return ap->pmp_link;
df561f66 210 fallthrough;
1eca4365
TH
211 case ATA_LITER_PMP_FIRST:
212 if (unlikely(ap->slave_link))
b1c72916 213 return ap->slave_link;
df561f66 214 fallthrough;
1eca4365 215 case ATA_LITER_EDGE:
aadffb68 216 return NULL;
b1c72916 217 }
aadffb68 218
b1c72916
TH
219 /* slave_link excludes PMP */
220 if (unlikely(link == ap->slave_link))
221 return NULL;
222
1eca4365 223 /* we were over a PMP link */
aadffb68
TH
224 if (++link < ap->pmp_link + ap->nr_pmp_links)
225 return link;
1eca4365
TH
226
227 if (mode == ATA_LITER_PMP_FIRST)
228 return &ap->link;
229
aadffb68
TH
230 return NULL;
231}
a52fbcfc 232EXPORT_SYMBOL_GPL(ata_link_next);
aadffb68 233
1eca4365
TH
234/**
235 * ata_dev_next - device iteration helper
236 * @dev: the previous device, NULL to start
237 * @link: ATA link containing devices to iterate
238 * @mode: iteration mode, one of ATA_DITER_*
239 *
240 * LOCKING:
241 * Host lock or EH context.
242 *
243 * RETURNS:
244 * Pointer to the next device.
245 */
246struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
247 enum ata_dev_iter_mode mode)
248{
249 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
250 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
251
252 /* NULL dev indicates start of iteration */
253 if (!dev)
254 switch (mode) {
255 case ATA_DITER_ENABLED:
256 case ATA_DITER_ALL:
257 dev = link->device;
258 goto check;
259 case ATA_DITER_ENABLED_REVERSE:
260 case ATA_DITER_ALL_REVERSE:
261 dev = link->device + ata_link_max_devices(link) - 1;
262 goto check;
263 }
264
265 next:
266 /* move to the next one */
267 switch (mode) {
268 case ATA_DITER_ENABLED:
269 case ATA_DITER_ALL:
270 if (++dev < link->device + ata_link_max_devices(link))
271 goto check;
272 return NULL;
273 case ATA_DITER_ENABLED_REVERSE:
274 case ATA_DITER_ALL_REVERSE:
275 if (--dev >= link->device)
276 goto check;
277 return NULL;
278 }
279
280 check:
281 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
282 !ata_dev_enabled(dev))
283 goto next;
284 return dev;
285}
a52fbcfc 286EXPORT_SYMBOL_GPL(ata_dev_next);
1eca4365 287
b1c72916
TH
288/**
289 * ata_dev_phys_link - find physical link for a device
290 * @dev: ATA device to look up physical link for
291 *
292 * Look up physical link which @dev is attached to. Note that
293 * this is different from @dev->link only when @dev is on slave
294 * link. For all other cases, it's the same as @dev->link.
295 *
296 * LOCKING:
297 * Don't care.
298 *
299 * RETURNS:
300 * Pointer to the found physical link.
301 */
302struct ata_link *ata_dev_phys_link(struct ata_device *dev)
303{
304 struct ata_port *ap = dev->link->ap;
305
306 if (!ap->slave_link)
307 return dev->link;
308 if (!dev->devno)
309 return &ap->link;
310 return ap->slave_link;
311}
312
bf89b0bf 313#ifdef CONFIG_ATA_FORCE
33267325
TH
314/**
315 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 316 * @ap: ATA port of interest
33267325
TH
317 *
318 * Force cable type according to libata.force and whine about it.
319 * The last entry which has matching port number is used, so it
320 * can be specified as part of device force parameters. For
321 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
322 * same effect.
323 *
324 * LOCKING:
325 * EH context.
326 */
327void ata_force_cbl(struct ata_port *ap)
328{
329 int i;
330
331 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
332 const struct ata_force_ent *fe = &ata_force_tbl[i];
333
334 if (fe->port != -1 && fe->port != ap->print_id)
335 continue;
336
337 if (fe->param.cbl == ATA_CBL_NONE)
338 continue;
339
340 ap->cbl = fe->param.cbl;
a9a79dfe 341 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
33267325
TH
342 return;
343 }
344}
345
346/**
05944bdf 347 * ata_force_link_limits - force link limits according to libata.force
33267325
TH
348 * @link: ATA link of interest
349 *
05944bdf
TH
350 * Force link flags and SATA spd limit according to libata.force
351 * and whine about it. When only the port part is specified
352 * (e.g. 1:), the limit applies to all links connected to both
353 * the host link and all fan-out ports connected via PMP. If the
354 * device part is specified as 0 (e.g. 1.00:), it specifies the
355 * first fan-out link not the host link. Device number 15 always
b1c72916
TH
356 * points to the host link whether PMP is attached or not. If the
357 * controller has slave link, device number 16 points to it.
33267325
TH
358 *
359 * LOCKING:
360 * EH context.
361 */
05944bdf 362static void ata_force_link_limits(struct ata_link *link)
33267325 363{
05944bdf 364 bool did_spd = false;
b1c72916
TH
365 int linkno = link->pmp;
366 int i;
33267325
TH
367
368 if (ata_is_host_link(link))
b1c72916 369 linkno += 15;
33267325
TH
370
371 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
372 const struct ata_force_ent *fe = &ata_force_tbl[i];
373
374 if (fe->port != -1 && fe->port != link->ap->print_id)
375 continue;
376
377 if (fe->device != -1 && fe->device != linkno)
378 continue;
379
05944bdf
TH
380 /* only honor the first spd limit */
381 if (!did_spd && fe->param.spd_limit) {
382 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
a9a79dfe 383 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
05944bdf
TH
384 fe->param.name);
385 did_spd = true;
386 }
33267325 387
05944bdf
TH
388 /* let lflags stack */
389 if (fe->param.lflags) {
390 link->flags |= fe->param.lflags;
a9a79dfe 391 ata_link_notice(link,
05944bdf
TH
392 "FORCE: link flag 0x%x forced -> 0x%x\n",
393 fe->param.lflags, link->flags);
394 }
33267325
TH
395 }
396}
397
398/**
399 * ata_force_xfermask - force xfermask according to libata.force
400 * @dev: ATA device of interest
401 *
402 * Force xfer_mask according to libata.force and whine about it.
403 * For consistency with link selection, device number 15 selects
404 * the first device connected to the host link.
405 *
406 * LOCKING:
407 * EH context.
408 */
409static void ata_force_xfermask(struct ata_device *dev)
410{
411 int devno = dev->link->pmp + dev->devno;
412 int alt_devno = devno;
413 int i;
414
b1c72916
TH
415 /* allow n.15/16 for devices attached to host port */
416 if (ata_is_host_link(dev->link))
417 alt_devno += 15;
33267325
TH
418
419 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
420 const struct ata_force_ent *fe = &ata_force_tbl[i];
421 unsigned long pio_mask, mwdma_mask, udma_mask;
422
423 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
424 continue;
425
426 if (fe->device != -1 && fe->device != devno &&
427 fe->device != alt_devno)
428 continue;
429
430 if (!fe->param.xfer_mask)
431 continue;
432
433 ata_unpack_xfermask(fe->param.xfer_mask,
434 &pio_mask, &mwdma_mask, &udma_mask);
435 if (udma_mask)
436 dev->udma_mask = udma_mask;
437 else if (mwdma_mask) {
438 dev->udma_mask = 0;
439 dev->mwdma_mask = mwdma_mask;
440 } else {
441 dev->udma_mask = 0;
442 dev->mwdma_mask = 0;
443 dev->pio_mask = pio_mask;
444 }
445
a9a79dfe
JP
446 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
447 fe->param.name);
33267325
TH
448 return;
449 }
450}
451
452/**
453 * ata_force_horkage - force horkage according to libata.force
454 * @dev: ATA device of interest
455 *
456 * Force horkage according to libata.force and whine about it.
457 * For consistency with link selection, device number 15 selects
458 * the first device connected to the host link.
459 *
460 * LOCKING:
461 * EH context.
462 */
463static void ata_force_horkage(struct ata_device *dev)
464{
465 int devno = dev->link->pmp + dev->devno;
466 int alt_devno = devno;
467 int i;
468
b1c72916
TH
469 /* allow n.15/16 for devices attached to host port */
470 if (ata_is_host_link(dev->link))
471 alt_devno += 15;
33267325
TH
472
473 for (i = 0; i < ata_force_tbl_size; i++) {
474 const struct ata_force_ent *fe = &ata_force_tbl[i];
475
476 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
477 continue;
478
479 if (fe->device != -1 && fe->device != devno &&
480 fe->device != alt_devno)
481 continue;
482
483 if (!(~dev->horkage & fe->param.horkage_on) &&
484 !(dev->horkage & fe->param.horkage_off))
485 continue;
486
487 dev->horkage |= fe->param.horkage_on;
488 dev->horkage &= ~fe->param.horkage_off;
489
a9a79dfe
JP
490 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
491 fe->param.name);
33267325
TH
492 }
493}
bf89b0bf
BZ
494#else
495static inline void ata_force_link_limits(struct ata_link *link) { }
496static inline void ata_force_xfermask(struct ata_device *dev) { }
497static inline void ata_force_horkage(struct ata_device *dev) { }
498#endif
33267325 499
436d34b3
TH
500/**
501 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
502 * @opcode: SCSI opcode
503 *
504 * Determine ATAPI command type from @opcode.
505 *
506 * LOCKING:
507 * None.
508 *
509 * RETURNS:
510 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
511 */
512int atapi_cmd_type(u8 opcode)
513{
514 switch (opcode) {
515 case GPCMD_READ_10:
516 case GPCMD_READ_12:
517 return ATAPI_READ;
518
519 case GPCMD_WRITE_10:
520 case GPCMD_WRITE_12:
521 case GPCMD_WRITE_AND_VERIFY_10:
522 return ATAPI_WRITE;
523
524 case GPCMD_READ_CD:
525 case GPCMD_READ_CD_MSF:
526 return ATAPI_READ_CD;
527
e52dcc48
TH
528 case ATA_16:
529 case ATA_12:
530 if (atapi_passthru16)
531 return ATAPI_PASS_THRU;
df561f66 532 fallthrough;
436d34b3
TH
533 default:
534 return ATAPI_MISC;
535 }
536}
a52fbcfc 537EXPORT_SYMBOL_GPL(atapi_cmd_type);
436d34b3 538
8cbd6df1
AL
539static const u8 ata_rw_cmds[] = {
540 /* pio multi */
541 ATA_CMD_READ_MULTI,
542 ATA_CMD_WRITE_MULTI,
543 ATA_CMD_READ_MULTI_EXT,
544 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
545 0,
546 0,
547 0,
548 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
549 /* pio */
550 ATA_CMD_PIO_READ,
551 ATA_CMD_PIO_WRITE,
552 ATA_CMD_PIO_READ_EXT,
553 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
554 0,
555 0,
556 0,
557 0,
8cbd6df1
AL
558 /* dma */
559 ATA_CMD_READ,
560 ATA_CMD_WRITE,
561 ATA_CMD_READ_EXT,
9a3dccc4
TH
562 ATA_CMD_WRITE_EXT,
563 0,
564 0,
565 0,
566 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 567};
1da177e4
LT
568
569/**
8cbd6df1 570 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
571 * @tf: command to examine and configure
572 * @dev: device tf belongs to
1da177e4 573 *
2e9edbf8 574 * Examine the device configuration and tf->flags to calculate
8cbd6df1 575 * the proper read/write commands and protocol to use.
1da177e4
LT
576 *
577 * LOCKING:
578 * caller.
579 */
bd056d7e 580static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 581{
9a3dccc4 582 u8 cmd;
1da177e4 583
9a3dccc4 584 int index, fua, lba48, write;
2e9edbf8 585
9a3dccc4 586 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
587 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
588 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 589
8cbd6df1
AL
590 if (dev->flags & ATA_DFLAG_PIO) {
591 tf->protocol = ATA_PROT_PIO;
9a3dccc4 592 index = dev->multi_count ? 0 : 8;
9af5c9c9 593 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
594 /* Unable to use DMA due to host limitation */
595 tf->protocol = ATA_PROT_PIO;
0565c26d 596 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
597 } else {
598 tf->protocol = ATA_PROT_DMA;
9a3dccc4 599 index = 16;
8cbd6df1 600 }
1da177e4 601
9a3dccc4
TH
602 cmd = ata_rw_cmds[index + fua + lba48 + write];
603 if (cmd) {
604 tf->command = cmd;
605 return 0;
606 }
607 return -1;
1da177e4
LT
608}
609
35b649fe
TH
610/**
611 * ata_tf_read_block - Read block address from ATA taskfile
612 * @tf: ATA taskfile of interest
613 * @dev: ATA device @tf belongs to
614 *
615 * LOCKING:
616 * None.
617 *
618 * Read block address from @tf. This function can handle all
619 * three address formats - LBA, LBA48 and CHS. tf->protocol and
620 * flags select the address format to use.
621 *
622 * RETURNS:
623 * Block address read from @tf.
624 */
cffd1ee9 625u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
35b649fe
TH
626{
627 u64 block = 0;
628
fe16d4f2 629 if (tf->flags & ATA_TFLAG_LBA) {
35b649fe
TH
630 if (tf->flags & ATA_TFLAG_LBA48) {
631 block |= (u64)tf->hob_lbah << 40;
632 block |= (u64)tf->hob_lbam << 32;
44901a96 633 block |= (u64)tf->hob_lbal << 24;
35b649fe
TH
634 } else
635 block |= (tf->device & 0xf) << 24;
636
637 block |= tf->lbah << 16;
638 block |= tf->lbam << 8;
639 block |= tf->lbal;
640 } else {
641 u32 cyl, head, sect;
642
643 cyl = tf->lbam | (tf->lbah << 8);
644 head = tf->device & 0xf;
645 sect = tf->lbal;
646
ac8672ea 647 if (!sect) {
a9a79dfe
JP
648 ata_dev_warn(dev,
649 "device reported invalid CHS sector 0\n");
cffd1ee9 650 return U64_MAX;
ac8672ea
TH
651 }
652
653 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
35b649fe
TH
654 }
655
656 return block;
657}
658
bd056d7e
TH
659/**
660 * ata_build_rw_tf - Build ATA taskfile for given read/write request
661 * @tf: Target ATA taskfile
662 * @dev: ATA device @tf belongs to
663 * @block: Block address
664 * @n_block: Number of blocks
665 * @tf_flags: RW/FUA etc...
666 * @tag: tag
8e061784 667 * @class: IO priority class
bd056d7e
TH
668 *
669 * LOCKING:
670 * None.
671 *
672 * Build ATA taskfile @tf for read/write request described by
673 * @block, @n_block, @tf_flags and @tag on @dev.
674 *
675 * RETURNS:
676 *
677 * 0 on success, -ERANGE if the request is too large for @dev,
678 * -EINVAL if the request is invalid.
679 */
680int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
681 u64 block, u32 n_block, unsigned int tf_flags,
8e061784 682 unsigned int tag, int class)
bd056d7e
TH
683{
684 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
685 tf->flags |= tf_flags;
686
2e2cc676 687 if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
bd056d7e
TH
688 /* yay, NCQ */
689 if (!lba_48_ok(block, n_block))
690 return -ERANGE;
691
692 tf->protocol = ATA_PROT_NCQ;
693 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
694
695 if (tf->flags & ATA_TFLAG_WRITE)
696 tf->command = ATA_CMD_FPDMA_WRITE;
697 else
698 tf->command = ATA_CMD_FPDMA_READ;
699
700 tf->nsect = tag << 3;
701 tf->hob_feature = (n_block >> 8) & 0xff;
702 tf->feature = n_block & 0xff;
703
704 tf->hob_lbah = (block >> 40) & 0xff;
705 tf->hob_lbam = (block >> 32) & 0xff;
706 tf->hob_lbal = (block >> 24) & 0xff;
707 tf->lbah = (block >> 16) & 0xff;
708 tf->lbam = (block >> 8) & 0xff;
709 tf->lbal = block & 0xff;
710
9ca7cfa4 711 tf->device = ATA_LBA;
bd056d7e
TH
712 if (tf->flags & ATA_TFLAG_FUA)
713 tf->device |= 1 << 7;
8e061784 714
2360fa18
DLM
715 if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE &&
716 class == IOPRIO_CLASS_RT)
717 tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
bd056d7e
TH
718 } else if (dev->flags & ATA_DFLAG_LBA) {
719 tf->flags |= ATA_TFLAG_LBA;
720
721 if (lba_28_ok(block, n_block)) {
722 /* use LBA28 */
723 tf->device |= (block >> 24) & 0xf;
724 } else if (lba_48_ok(block, n_block)) {
725 if (!(dev->flags & ATA_DFLAG_LBA48))
726 return -ERANGE;
727
728 /* use LBA48 */
729 tf->flags |= ATA_TFLAG_LBA48;
730
731 tf->hob_nsect = (n_block >> 8) & 0xff;
732
733 tf->hob_lbah = (block >> 40) & 0xff;
734 tf->hob_lbam = (block >> 32) & 0xff;
735 tf->hob_lbal = (block >> 24) & 0xff;
736 } else
737 /* request too large even for LBA48 */
738 return -ERANGE;
739
740 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
741 return -EINVAL;
742
743 tf->nsect = n_block & 0xff;
744
745 tf->lbah = (block >> 16) & 0xff;
746 tf->lbam = (block >> 8) & 0xff;
747 tf->lbal = block & 0xff;
748
749 tf->device |= ATA_LBA;
750 } else {
751 /* CHS */
752 u32 sect, head, cyl, track;
753
754 /* The request -may- be too large for CHS addressing. */
755 if (!lba_28_ok(block, n_block))
756 return -ERANGE;
757
758 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
759 return -EINVAL;
760
761 /* Convert LBA to CHS */
762 track = (u32)block / dev->sectors;
763 cyl = track / dev->heads;
764 head = track % dev->heads;
765 sect = (u32)block % dev->sectors + 1;
766
bd056d7e
TH
767 /* Check whether the converted CHS can fit.
768 Cylinder: 0-65535
769 Head: 0-15
770 Sector: 1-255*/
771 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
772 return -ERANGE;
773
774 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
775 tf->lbal = sect;
776 tf->lbam = cyl;
777 tf->lbah = cyl >> 8;
778 tf->device |= head;
779 }
780
781 return 0;
782}
783
cb95d562
TH
784/**
785 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
786 * @pio_mask: pio_mask
787 * @mwdma_mask: mwdma_mask
788 * @udma_mask: udma_mask
789 *
790 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
791 * unsigned int xfer_mask.
792 *
793 * LOCKING:
794 * None.
795 *
796 * RETURNS:
797 * Packed xfer_mask.
798 */
7dc951ae
TH
799unsigned long ata_pack_xfermask(unsigned long pio_mask,
800 unsigned long mwdma_mask,
801 unsigned long udma_mask)
cb95d562
TH
802{
803 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
804 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
805 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
806}
a52fbcfc 807EXPORT_SYMBOL_GPL(ata_pack_xfermask);
cb95d562 808
c0489e4e
TH
809/**
810 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
811 * @xfer_mask: xfer_mask to unpack
812 * @pio_mask: resulting pio_mask
813 * @mwdma_mask: resulting mwdma_mask
814 * @udma_mask: resulting udma_mask
815 *
816 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
c9b5560a 817 * Any NULL destination masks will be ignored.
c0489e4e 818 */
7dc951ae
TH
819void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
820 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
821{
822 if (pio_mask)
823 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
824 if (mwdma_mask)
825 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
826 if (udma_mask)
827 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
828}
829
cb95d562 830static const struct ata_xfer_ent {
be9a50c8 831 int shift, bits;
cb95d562
TH
832 u8 base;
833} ata_xfer_tbl[] = {
70cd071e
TH
834 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
835 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
836 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
837 { -1, },
838};
839
840/**
841 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
842 * @xfer_mask: xfer_mask of interest
843 *
844 * Return matching XFER_* value for @xfer_mask. Only the highest
845 * bit of @xfer_mask is considered.
846 *
847 * LOCKING:
848 * None.
849 *
850 * RETURNS:
70cd071e 851 * Matching XFER_* value, 0xff if no match found.
cb95d562 852 */
7dc951ae 853u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
854{
855 int highbit = fls(xfer_mask) - 1;
856 const struct ata_xfer_ent *ent;
857
858 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
859 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
860 return ent->base + highbit - ent->shift;
70cd071e 861 return 0xff;
cb95d562 862}
a52fbcfc 863EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
cb95d562
TH
864
865/**
866 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
867 * @xfer_mode: XFER_* of interest
868 *
869 * Return matching xfer_mask for @xfer_mode.
870 *
871 * LOCKING:
872 * None.
873 *
874 * RETURNS:
875 * Matching xfer_mask, 0 if no match found.
876 */
7dc951ae 877unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
878{
879 const struct ata_xfer_ent *ent;
880
881 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
882 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
883 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
884 & ~((1 << ent->shift) - 1);
cb95d562
TH
885 return 0;
886}
a52fbcfc 887EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
cb95d562
TH
888
889/**
890 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
891 * @xfer_mode: XFER_* of interest
892 *
893 * Return matching xfer_shift for @xfer_mode.
894 *
895 * LOCKING:
896 * None.
897 *
898 * RETURNS:
899 * Matching xfer_shift, -1 if no match found.
900 */
7dc951ae 901int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
902{
903 const struct ata_xfer_ent *ent;
904
905 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
906 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
907 return ent->shift;
908 return -1;
909}
a52fbcfc 910EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
cb95d562 911
1da177e4 912/**
1da7b0d0
TH
913 * ata_mode_string - convert xfer_mask to string
914 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
915 *
916 * Determine string which represents the highest speed
1da7b0d0 917 * (highest bit in @modemask).
1da177e4
LT
918 *
919 * LOCKING:
920 * None.
921 *
922 * RETURNS:
923 * Constant C string representing highest speed listed in
1da7b0d0 924 * @mode_mask, or the constant C string "<n/a>".
1da177e4 925 */
7dc951ae 926const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 927{
75f554bc
TH
928 static const char * const xfer_mode_str[] = {
929 "PIO0",
930 "PIO1",
931 "PIO2",
932 "PIO3",
933 "PIO4",
b352e57d
AC
934 "PIO5",
935 "PIO6",
75f554bc
TH
936 "MWDMA0",
937 "MWDMA1",
938 "MWDMA2",
b352e57d
AC
939 "MWDMA3",
940 "MWDMA4",
75f554bc
TH
941 "UDMA/16",
942 "UDMA/25",
943 "UDMA/33",
944 "UDMA/44",
945 "UDMA/66",
946 "UDMA/100",
947 "UDMA/133",
948 "UDMA7",
949 };
1da7b0d0 950 int highbit;
1da177e4 951
1da7b0d0
TH
952 highbit = fls(xfer_mask) - 1;
953 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
954 return xfer_mode_str[highbit];
1da177e4 955 return "<n/a>";
1da177e4 956}
a52fbcfc 957EXPORT_SYMBOL_GPL(ata_mode_string);
1da177e4 958
d9027470 959const char *sata_spd_string(unsigned int spd)
4c360c81
TH
960{
961 static const char * const spd_str[] = {
962 "1.5 Gbps",
963 "3.0 Gbps",
8522ee25 964 "6.0 Gbps",
4c360c81
TH
965 };
966
967 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
968 return "<unknown>";
969 return spd_str[spd - 1];
970}
971
1da177e4
LT
972/**
973 * ata_dev_classify - determine device type based on ATA-spec signature
974 * @tf: ATA taskfile register set for device to be identified
975 *
976 * Determine from taskfile register contents whether a device is
977 * ATA or ATAPI, as per "Signature and persistence" section
978 * of ATA/PI spec (volume 1, sect 5.14).
979 *
980 * LOCKING:
981 * None.
982 *
983 * RETURNS:
9162c657
HR
984 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
985 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1da177e4 986 */
057ace5e 987unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
988{
989 /* Apple's open source Darwin code hints that some devices only
990 * put a proper signature into the LBA mid/high registers,
991 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
992 *
993 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
994 * signatures for ATA and ATAPI devices attached on SerialATA,
995 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
996 * spec has never mentioned about using different signatures
997 * for ATA/ATAPI devices. Then, Serial ATA II: Port
998 * Multiplier specification began to use 0x69/0x96 to identify
999 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1000 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1001 * 0x69/0x96 shortly and described them as reserved for
1002 * SerialATA.
1003 *
1004 * We follow the current spec and consider that 0x69/0x96
1005 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
79b42bab
TH
1006 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1007 * SEMB signature. This is worked around in
1008 * ata_dev_read_id().
1da177e4 1009 */
6c952a0d 1010 if (tf->lbam == 0 && tf->lbah == 0)
1da177e4 1011 return ATA_DEV_ATA;
1da177e4 1012
6c952a0d 1013 if (tf->lbam == 0x14 && tf->lbah == 0xeb)
1da177e4 1014 return ATA_DEV_ATAPI;
1da177e4 1015
6c952a0d 1016 if (tf->lbam == 0x69 && tf->lbah == 0x96)
633273a3 1017 return ATA_DEV_PMP;
633273a3 1018
6c952a0d 1019 if (tf->lbam == 0x3c && tf->lbah == 0xc3)
79b42bab 1020 return ATA_DEV_SEMB;
633273a3 1021
6c952a0d 1022 if (tf->lbam == 0xcd && tf->lbah == 0xab)
9162c657 1023 return ATA_DEV_ZAC;
9162c657 1024
1da177e4
LT
1025 return ATA_DEV_UNKNOWN;
1026}
a52fbcfc 1027EXPORT_SYMBOL_GPL(ata_dev_classify);
1da177e4 1028
1da177e4 1029/**
6a62a04d 1030 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1031 * @id: IDENTIFY DEVICE results we will examine
1032 * @s: string into which data is output
1033 * @ofs: offset into identify device page
1034 * @len: length of string to return. must be an even number.
1035 *
1036 * The strings in the IDENTIFY DEVICE page are broken up into
1037 * 16-bit chunks. Run through the string, and output each
1038 * 8-bit chunk linearly, regardless of platform.
1039 *
1040 * LOCKING:
1041 * caller.
1042 */
1043
6a62a04d
TH
1044void ata_id_string(const u16 *id, unsigned char *s,
1045 unsigned int ofs, unsigned int len)
1da177e4
LT
1046{
1047 unsigned int c;
1048
963e4975
AC
1049 BUG_ON(len & 1);
1050
1da177e4
LT
1051 while (len > 0) {
1052 c = id[ofs] >> 8;
1053 *s = c;
1054 s++;
1055
1056 c = id[ofs] & 0xff;
1057 *s = c;
1058 s++;
1059
1060 ofs++;
1061 len -= 2;
1062 }
1063}
a52fbcfc 1064EXPORT_SYMBOL_GPL(ata_id_string);
1da177e4 1065
0e949ff3 1066/**
6a62a04d 1067 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1068 * @id: IDENTIFY DEVICE results we will examine
1069 * @s: string into which data is output
1070 * @ofs: offset into identify device page
1071 * @len: length of string to return. must be an odd number.
1072 *
6a62a04d 1073 * This function is identical to ata_id_string except that it
0e949ff3
TH
1074 * trims trailing spaces and terminates the resulting string with
1075 * null. @len must be actual maximum length (even number) + 1.
1076 *
1077 * LOCKING:
1078 * caller.
1079 */
6a62a04d
TH
1080void ata_id_c_string(const u16 *id, unsigned char *s,
1081 unsigned int ofs, unsigned int len)
0e949ff3
TH
1082{
1083 unsigned char *p;
1084
6a62a04d 1085 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1086
1087 p = s + strnlen(s, len - 1);
1088 while (p > s && p[-1] == ' ')
1089 p--;
1090 *p = '\0';
1091}
a52fbcfc 1092EXPORT_SYMBOL_GPL(ata_id_c_string);
0baab86b 1093
db6f8759
TH
1094static u64 ata_id_n_sectors(const u16 *id)
1095{
1096 if (ata_id_has_lba(id)) {
1097 if (ata_id_has_lba48(id))
968e594a 1098 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
db6f8759 1099 else
968e594a 1100 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
db6f8759
TH
1101 } else {
1102 if (ata_id_current_chs_valid(id))
968e594a
RH
1103 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1104 id[ATA_ID_CUR_SECTORS];
db6f8759 1105 else
968e594a
RH
1106 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1107 id[ATA_ID_SECTORS];
db6f8759
TH
1108 }
1109}
1110
a5987e0a 1111u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1e999736
AC
1112{
1113 u64 sectors = 0;
1114
1115 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1116 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
ba14a9c2 1117 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1e999736
AC
1118 sectors |= (tf->lbah & 0xff) << 16;
1119 sectors |= (tf->lbam & 0xff) << 8;
1120 sectors |= (tf->lbal & 0xff);
1121
a5987e0a 1122 return sectors;
1e999736
AC
1123}
1124
a5987e0a 1125u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1e999736
AC
1126{
1127 u64 sectors = 0;
1128
1129 sectors |= (tf->device & 0x0f) << 24;
1130 sectors |= (tf->lbah & 0xff) << 16;
1131 sectors |= (tf->lbam & 0xff) << 8;
1132 sectors |= (tf->lbal & 0xff);
1133
a5987e0a 1134 return sectors;
1e999736
AC
1135}
1136
1137/**
c728a914
TH
1138 * ata_read_native_max_address - Read native max address
1139 * @dev: target device
1140 * @max_sectors: out parameter for the result native max address
1e999736 1141 *
c728a914
TH
1142 * Perform an LBA48 or LBA28 native size query upon the device in
1143 * question.
1e999736 1144 *
c728a914
TH
1145 * RETURNS:
1146 * 0 on success, -EACCES if command is aborted by the drive.
1147 * -EIO on other errors.
1e999736 1148 */
c728a914 1149static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1150{
c728a914 1151 unsigned int err_mask;
1e999736 1152 struct ata_taskfile tf;
c728a914 1153 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1154
1155 ata_tf_init(dev, &tf);
1156
c728a914 1157 /* always clear all address registers */
1e999736 1158 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1159
c728a914
TH
1160 if (lba48) {
1161 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1162 tf.flags |= ATA_TFLAG_LBA48;
1163 } else
1164 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1165
bd18bc04 1166 tf.protocol = ATA_PROT_NODATA;
c728a914
TH
1167 tf.device |= ATA_LBA;
1168
2b789108 1169 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1170 if (err_mask) {
a9a79dfe
JP
1171 ata_dev_warn(dev,
1172 "failed to read native max address (err_mask=0x%x)\n",
1173 err_mask);
c728a914
TH
1174 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1175 return -EACCES;
1176 return -EIO;
1177 }
1e999736 1178
c728a914 1179 if (lba48)
a5987e0a 1180 *max_sectors = ata_tf_to_lba48(&tf) + 1;
c728a914 1181 else
a5987e0a 1182 *max_sectors = ata_tf_to_lba(&tf) + 1;
2dcb407e 1183 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1184 (*max_sectors)--;
c728a914 1185 return 0;
1e999736
AC
1186}
1187
1188/**
c728a914
TH
1189 * ata_set_max_sectors - Set max sectors
1190 * @dev: target device
6b38d1d1 1191 * @new_sectors: new max sectors value to set for the device
1e999736 1192 *
c728a914
TH
1193 * Set max sectors of @dev to @new_sectors.
1194 *
1195 * RETURNS:
1196 * 0 on success, -EACCES if command is aborted or denied (due to
1197 * previous non-volatile SET_MAX) by the drive. -EIO on other
1198 * errors.
1e999736 1199 */
05027adc 1200static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1201{
c728a914 1202 unsigned int err_mask;
1e999736 1203 struct ata_taskfile tf;
c728a914 1204 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1205
1206 new_sectors--;
1207
1208 ata_tf_init(dev, &tf);
1209
1e999736 1210 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1211
1212 if (lba48) {
1213 tf.command = ATA_CMD_SET_MAX_EXT;
1214 tf.flags |= ATA_TFLAG_LBA48;
1215
1216 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1217 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1218 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1219 } else {
c728a914
TH
1220 tf.command = ATA_CMD_SET_MAX;
1221
1e582ba4
TH
1222 tf.device |= (new_sectors >> 24) & 0xf;
1223 }
1224
bd18bc04 1225 tf.protocol = ATA_PROT_NODATA;
c728a914 1226 tf.device |= ATA_LBA;
1e999736
AC
1227
1228 tf.lbal = (new_sectors >> 0) & 0xff;
1229 tf.lbam = (new_sectors >> 8) & 0xff;
1230 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1231
2b789108 1232 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914 1233 if (err_mask) {
a9a79dfe
JP
1234 ata_dev_warn(dev,
1235 "failed to set max address (err_mask=0x%x)\n",
1236 err_mask);
c728a914
TH
1237 if (err_mask == AC_ERR_DEV &&
1238 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1239 return -EACCES;
1240 return -EIO;
1241 }
1242
c728a914 1243 return 0;
1e999736
AC
1244}
1245
1246/**
1247 * ata_hpa_resize - Resize a device with an HPA set
1248 * @dev: Device to resize
1249 *
1250 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1251 * it if required to the full size of the media. The caller must check
1252 * the drive has the HPA feature set enabled.
05027adc
TH
1253 *
1254 * RETURNS:
1255 * 0 on success, -errno on failure.
1e999736 1256 */
05027adc 1257static int ata_hpa_resize(struct ata_device *dev)
1e999736 1258{
891fd7c6 1259 bool print_info = ata_dev_print_info(dev);
445d211b 1260 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
05027adc
TH
1261 u64 sectors = ata_id_n_sectors(dev->id);
1262 u64 native_sectors;
c728a914 1263 int rc;
a617c09f 1264
05027adc 1265 /* do we need to do it? */
9162c657 1266 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
05027adc
TH
1267 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1268 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1269 return 0;
1e999736 1270
05027adc
TH
1271 /* read native max address */
1272 rc = ata_read_native_max_address(dev, &native_sectors);
1273 if (rc) {
dda7aba1
TH
1274 /* If device aborted the command or HPA isn't going to
1275 * be unlocked, skip HPA resizing.
05027adc 1276 */
445d211b 1277 if (rc == -EACCES || !unlock_hpa) {
a9a79dfe
JP
1278 ata_dev_warn(dev,
1279 "HPA support seems broken, skipping HPA handling\n");
05027adc
TH
1280 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1281
1282 /* we can continue if device aborted the command */
1283 if (rc == -EACCES)
1284 rc = 0;
1e999736 1285 }
37301a55 1286
05027adc
TH
1287 return rc;
1288 }
5920dadf 1289 dev->n_native_sectors = native_sectors;
05027adc
TH
1290
1291 /* nothing to do? */
445d211b 1292 if (native_sectors <= sectors || !unlock_hpa) {
05027adc
TH
1293 if (!print_info || native_sectors == sectors)
1294 return 0;
1295
1296 if (native_sectors > sectors)
a9a79dfe 1297 ata_dev_info(dev,
05027adc
TH
1298 "HPA detected: current %llu, native %llu\n",
1299 (unsigned long long)sectors,
1300 (unsigned long long)native_sectors);
1301 else if (native_sectors < sectors)
a9a79dfe
JP
1302 ata_dev_warn(dev,
1303 "native sectors (%llu) is smaller than sectors (%llu)\n",
05027adc
TH
1304 (unsigned long long)native_sectors,
1305 (unsigned long long)sectors);
1306 return 0;
1307 }
1308
1309 /* let's unlock HPA */
1310 rc = ata_set_max_sectors(dev, native_sectors);
1311 if (rc == -EACCES) {
1312 /* if device aborted the command, skip HPA resizing */
a9a79dfe
JP
1313 ata_dev_warn(dev,
1314 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1315 (unsigned long long)sectors,
1316 (unsigned long long)native_sectors);
05027adc
TH
1317 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1318 return 0;
1319 } else if (rc)
1320 return rc;
1321
1322 /* re-read IDENTIFY data */
1323 rc = ata_dev_reread_id(dev, 0);
1324 if (rc) {
a9a79dfe
JP
1325 ata_dev_err(dev,
1326 "failed to re-read IDENTIFY data after HPA resizing\n");
05027adc
TH
1327 return rc;
1328 }
1329
1330 if (print_info) {
1331 u64 new_sectors = ata_id_n_sectors(dev->id);
a9a79dfe 1332 ata_dev_info(dev,
05027adc
TH
1333 "HPA unlocked: %llu -> %llu, native %llu\n",
1334 (unsigned long long)sectors,
1335 (unsigned long long)new_sectors,
1336 (unsigned long long)native_sectors);
1337 }
1338
1339 return 0;
1e999736
AC
1340}
1341
1da177e4
LT
1342/**
1343 * ata_dump_id - IDENTIFY DEVICE info debugging output
6044f3c4 1344 * @dev: device from which the information is fetched
0bd3300a 1345 * @id: IDENTIFY DEVICE page to dump
1da177e4 1346 *
0bd3300a
TH
1347 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1348 * page.
1da177e4
LT
1349 *
1350 * LOCKING:
1351 * caller.
1352 */
1353
6044f3c4 1354static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
1da177e4 1355{
6044f3c4
HR
1356 ata_dev_dbg(dev,
1357 "49==0x%04x 53==0x%04x 63==0x%04x 64==0x%04x 75==0x%04x\n"
1358 "80==0x%04x 81==0x%04x 82==0x%04x 83==0x%04x 84==0x%04x\n"
1359 "88==0x%04x 93==0x%04x\n",
1360 id[49], id[53], id[63], id[64], id[75], id[80],
1361 id[81], id[82], id[83], id[84], id[88], id[93]);
1da177e4
LT
1362}
1363
cb95d562
TH
1364/**
1365 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1366 * @id: IDENTIFY data to compute xfer mask from
1367 *
1368 * Compute the xfermask for this device. This is not as trivial
1369 * as it seems if we must consider early devices correctly.
1370 *
1371 * FIXME: pre IDE drive timing (do we care ?).
1372 *
1373 * LOCKING:
1374 * None.
1375 *
1376 * RETURNS:
1377 * Computed xfermask
1378 */
7dc951ae 1379unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1380{
7dc951ae 1381 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1382
1383 /* Usual case. Word 53 indicates word 64 is valid */
1384 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1385 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1386 pio_mask <<= 3;
1387 pio_mask |= 0x7;
1388 } else {
1389 /* If word 64 isn't valid then Word 51 high byte holds
1390 * the PIO timing number for the maximum. Turn it into
1391 * a mask.
1392 */
7a0f1c8a 1393 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1394 if (mode < 5) /* Valid PIO range */
2dcb407e 1395 pio_mask = (2 << mode) - 1;
46767aeb
AC
1396 else
1397 pio_mask = 1;
cb95d562
TH
1398
1399 /* But wait.. there's more. Design your standards by
1400 * committee and you too can get a free iordy field to
1401 * process. However its the speeds not the modes that
1402 * are supported... Note drivers using the timing API
1403 * will get this right anyway
1404 */
1405 }
1406
1407 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1408
b352e57d
AC
1409 if (ata_id_is_cfa(id)) {
1410 /*
1411 * Process compact flash extended modes
1412 */
62afe5d7
SS
1413 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1414 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
b352e57d
AC
1415
1416 if (pio)
1417 pio_mask |= (1 << 5);
1418 if (pio > 1)
1419 pio_mask |= (1 << 6);
1420 if (dma)
1421 mwdma_mask |= (1 << 3);
1422 if (dma > 1)
1423 mwdma_mask |= (1 << 4);
1424 }
1425
fb21f0d0
TH
1426 udma_mask = 0;
1427 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1428 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1429
1430 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1431}
a52fbcfc 1432EXPORT_SYMBOL_GPL(ata_id_xfermask);
cb95d562 1433
7102d230 1434static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1435{
77853bf2 1436 struct completion *waiting = qc->private_data;
a2a7a662 1437
a2a7a662 1438 complete(waiting);
a2a7a662
TH
1439}
1440
1441/**
2432697b 1442 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1443 * @dev: Device to which the command is sent
1444 * @tf: Taskfile registers for the command and the result
d69cf37d 1445 * @cdb: CDB for packet command
e227867f 1446 * @dma_dir: Data transfer direction of the command
5c1ad8b3 1447 * @sgl: sg list for the data buffer of the command
2432697b 1448 * @n_elem: Number of sg entries
2b789108 1449 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1450 *
1451 * Executes libata internal command with timeout. @tf contains
1452 * command on entry and result on return. Timeout and error
1453 * conditions are reported via return value. No recovery action
1454 * is taken after a command times out. It's caller's duty to
1455 * clean up after timeout.
1456 *
1457 * LOCKING:
1458 * None. Should be called with kernel context, might sleep.
551e8889
TH
1459 *
1460 * RETURNS:
1461 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1462 */
2432697b
TH
1463unsigned ata_exec_internal_sg(struct ata_device *dev,
1464 struct ata_taskfile *tf, const u8 *cdb,
87260216 1465 int dma_dir, struct scatterlist *sgl,
2b789108 1466 unsigned int n_elem, unsigned long timeout)
a2a7a662 1467{
9af5c9c9
TH
1468 struct ata_link *link = dev->link;
1469 struct ata_port *ap = link->ap;
a2a7a662 1470 u8 command = tf->command;
87fbc5a0 1471 int auto_timeout = 0;
a2a7a662 1472 struct ata_queued_cmd *qc;
28361c40 1473 unsigned int preempted_tag;
e3ed8939
JA
1474 u32 preempted_sactive;
1475 u64 preempted_qc_active;
da917d69 1476 int preempted_nr_active_links;
60be6b9a 1477 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1478 unsigned long flags;
77853bf2 1479 unsigned int err_mask;
d95a717f 1480 int rc;
a2a7a662 1481
ba6a1308 1482 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1483
e3180499 1484 /* no internal command while frozen */
b51e9e5d 1485 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1486 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1487 return AC_ERR_SYSTEM;
1488 }
1489
2ab7db1f 1490 /* initialize internal qc */
28361c40 1491 qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
a2a7a662 1492
28361c40
JA
1493 qc->tag = ATA_TAG_INTERNAL;
1494 qc->hw_tag = 0;
2ab7db1f
TH
1495 qc->scsicmd = NULL;
1496 qc->ap = ap;
1497 qc->dev = dev;
1498 ata_qc_reinit(qc);
1499
9af5c9c9
TH
1500 preempted_tag = link->active_tag;
1501 preempted_sactive = link->sactive;
dedaf2b0 1502 preempted_qc_active = ap->qc_active;
da917d69 1503 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1504 link->active_tag = ATA_TAG_POISON;
1505 link->sactive = 0;
dedaf2b0 1506 ap->qc_active = 0;
da917d69 1507 ap->nr_active_links = 0;
2ab7db1f
TH
1508
1509 /* prepare & issue qc */
a2a7a662 1510 qc->tf = *tf;
d69cf37d
TH
1511 if (cdb)
1512 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e771451c
VP
1513
1514 /* some SATA bridges need us to indicate data xfer direction */
1515 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1516 dma_dir == DMA_FROM_DEVICE)
1517 qc->tf.feature |= ATAPI_DMADIR;
1518
e61e0672 1519 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1520 qc->dma_dir = dma_dir;
1521 if (dma_dir != DMA_NONE) {
2432697b 1522 unsigned int i, buflen = 0;
87260216 1523 struct scatterlist *sg;
2432697b 1524
87260216
JA
1525 for_each_sg(sgl, sg, n_elem, i)
1526 buflen += sg->length;
2432697b 1527
87260216 1528 ata_sg_init(qc, sgl, n_elem);
49c80429 1529 qc->nbytes = buflen;
a2a7a662
TH
1530 }
1531
77853bf2 1532 qc->private_data = &wait;
a2a7a662
TH
1533 qc->complete_fn = ata_qc_complete_internal;
1534
8e0e694a 1535 ata_qc_issue(qc);
a2a7a662 1536
ba6a1308 1537 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1538
87fbc5a0
TH
1539 if (!timeout) {
1540 if (ata_probe_timeout)
1541 timeout = ata_probe_timeout * 1000;
1542 else {
1543 timeout = ata_internal_cmd_timeout(dev, command);
1544 auto_timeout = 1;
1545 }
1546 }
2b789108 1547
c0c362b6
TH
1548 if (ap->ops->error_handler)
1549 ata_eh_release(ap);
1550
2b789108 1551 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f 1552
c0c362b6
TH
1553 if (ap->ops->error_handler)
1554 ata_eh_acquire(ap);
1555
c429137a 1556 ata_sff_flush_pio_task(ap);
41ade50c 1557
d95a717f 1558 if (!rc) {
ba6a1308 1559 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1560
1561 /* We're racing with irq here. If we lose, the
1562 * following test prevents us from completing the qc
d95a717f
TH
1563 * twice. If we win, the port is frozen and will be
1564 * cleaned up by ->post_internal_cmd().
a2a7a662 1565 */
77853bf2 1566 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1567 qc->err_mask |= AC_ERR_TIMEOUT;
1568
1569 if (ap->ops->error_handler)
1570 ata_port_freeze(ap);
1571 else
1572 ata_qc_complete(qc);
f15a1daf 1573
16d42467
HR
1574 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1575 command);
a2a7a662
TH
1576 }
1577
ba6a1308 1578 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1579 }
1580
d95a717f
TH
1581 /* do post_internal_cmd */
1582 if (ap->ops->post_internal_cmd)
1583 ap->ops->post_internal_cmd(qc);
1584
a51d644a
TH
1585 /* perform minimal error analysis */
1586 if (qc->flags & ATA_QCFLAG_FAILED) {
1587 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1588 qc->err_mask |= AC_ERR_DEV;
1589
1590 if (!qc->err_mask)
1591 qc->err_mask |= AC_ERR_OTHER;
1592
1593 if (qc->err_mask & ~AC_ERR_OTHER)
1594 qc->err_mask &= ~AC_ERR_OTHER;
2dae9955
DLM
1595 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1596 qc->result_tf.command |= ATA_SENSE;
d95a717f
TH
1597 }
1598
15869303 1599 /* finish up */
ba6a1308 1600 spin_lock_irqsave(ap->lock, flags);
15869303 1601
e61e0672 1602 *tf = qc->result_tf;
77853bf2
TH
1603 err_mask = qc->err_mask;
1604
1605 ata_qc_free(qc);
9af5c9c9
TH
1606 link->active_tag = preempted_tag;
1607 link->sactive = preempted_sactive;
dedaf2b0 1608 ap->qc_active = preempted_qc_active;
da917d69 1609 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1610
ba6a1308 1611 spin_unlock_irqrestore(ap->lock, flags);
15869303 1612
87fbc5a0
TH
1613 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1614 ata_internal_cmd_timed_out(dev, command);
1615
77853bf2 1616 return err_mask;
a2a7a662
TH
1617}
1618
2432697b 1619/**
33480a0e 1620 * ata_exec_internal - execute libata internal command
2432697b
TH
1621 * @dev: Device to which the command is sent
1622 * @tf: Taskfile registers for the command and the result
1623 * @cdb: CDB for packet command
e227867f 1624 * @dma_dir: Data transfer direction of the command
2432697b
TH
1625 * @buf: Data buffer of the command
1626 * @buflen: Length of data buffer
2b789108 1627 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1628 *
1629 * Wrapper around ata_exec_internal_sg() which takes simple
1630 * buffer instead of sg list.
1631 *
1632 * LOCKING:
1633 * None. Should be called with kernel context, might sleep.
1634 *
1635 * RETURNS:
1636 * Zero on success, AC_ERR_* mask on failure
1637 */
1638unsigned ata_exec_internal(struct ata_device *dev,
1639 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1640 int dma_dir, void *buf, unsigned int buflen,
1641 unsigned long timeout)
2432697b 1642{
33480a0e
TH
1643 struct scatterlist *psg = NULL, sg;
1644 unsigned int n_elem = 0;
2432697b 1645
33480a0e
TH
1646 if (dma_dir != DMA_NONE) {
1647 WARN_ON(!buf);
1648 sg_init_one(&sg, buf, buflen);
1649 psg = &sg;
1650 n_elem++;
1651 }
2432697b 1652
2b789108
TH
1653 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1654 timeout);
2432697b
TH
1655}
1656
1bc4ccff
AC
1657/**
1658 * ata_pio_need_iordy - check if iordy needed
1659 * @adev: ATA device
1660 *
1661 * Check if the current speed of the device requires IORDY. Used
1662 * by various controllers for chip configuration.
1663 */
1bc4ccff
AC
1664unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1665{
0d9e6659
TH
1666 /* Don't set IORDY if we're preparing for reset. IORDY may
1667 * lead to controller lock up on certain controllers if the
1668 * port is not occupied. See bko#11703 for details.
1669 */
1670 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1671 return 0;
1672 /* Controller doesn't support IORDY. Probably a pointless
1673 * check as the caller should know this.
1674 */
9af5c9c9 1675 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1676 return 0;
5c18c4d2
DD
1677 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1678 if (ata_id_is_cfa(adev->id)
1679 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1680 return 0;
432729f0
AC
1681 /* PIO3 and higher it is mandatory */
1682 if (adev->pio_mode > XFER_PIO_2)
1683 return 1;
1684 /* We turn it on when possible */
1685 if (ata_id_has_iordy(adev->id))
1bc4ccff 1686 return 1;
432729f0
AC
1687 return 0;
1688}
a52fbcfc 1689EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
2e9edbf8 1690
432729f0
AC
1691/**
1692 * ata_pio_mask_no_iordy - Return the non IORDY mask
1693 * @adev: ATA device
1694 *
1695 * Compute the highest mode possible if we are not using iordy. Return
1696 * -1 if no iordy mode is available.
1697 */
432729f0
AC
1698static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1699{
1bc4ccff 1700 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1701 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1702 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1703 /* Is the speed faster than the drive allows non IORDY ? */
1704 if (pio) {
1705 /* This is cycle times not frequency - watch the logic! */
1706 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1707 return 3 << ATA_SHIFT_PIO;
1708 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1709 }
1710 }
432729f0 1711 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1712}
1713
963e4975
AC
1714/**
1715 * ata_do_dev_read_id - default ID read method
1716 * @dev: device
1717 * @tf: proposed taskfile
1718 * @id: data buffer
1719 *
1720 * Issue the identify taskfile and hand back the buffer containing
1721 * identify data. For some RAID controllers and for pre ATA devices
1722 * this function is wrapped or replaced by the driver
1723 */
1724unsigned int ata_do_dev_read_id(struct ata_device *dev,
1725 struct ata_taskfile *tf, u16 *id)
1726{
1727 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1728 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1729}
a52fbcfc 1730EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
963e4975 1731
1da177e4 1732/**
49016aca 1733 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1734 * @dev: target device
1735 * @p_class: pointer to class of the target device (may be changed)
bff04647 1736 * @flags: ATA_READID_* flags
fe635c7e 1737 * @id: buffer to read IDENTIFY data into
1da177e4 1738 *
49016aca
TH
1739 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1740 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1741 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1742 * for pre-ATA4 drives.
1da177e4 1743 *
50a99018 1744 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1745 * now we abort if we hit that case.
50a99018 1746 *
1da177e4 1747 * LOCKING:
49016aca
TH
1748 * Kernel thread context (may sleep)
1749 *
1750 * RETURNS:
1751 * 0 on success, -errno otherwise.
1da177e4 1752 */
a9beec95 1753int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1754 unsigned int flags, u16 *id)
1da177e4 1755{
9af5c9c9 1756 struct ata_port *ap = dev->link->ap;
49016aca 1757 unsigned int class = *p_class;
a0123703 1758 struct ata_taskfile tf;
49016aca
TH
1759 unsigned int err_mask = 0;
1760 const char *reason;
79b42bab 1761 bool is_semb = class == ATA_DEV_SEMB;
54936f8b 1762 int may_fallback = 1, tried_spinup = 0;
49016aca 1763 int rc;
1da177e4 1764
963e4975 1765retry:
3373efd8 1766 ata_tf_init(dev, &tf);
a0123703 1767
49016aca 1768 switch (class) {
79b42bab
TH
1769 case ATA_DEV_SEMB:
1770 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
df561f66 1771 fallthrough;
49016aca 1772 case ATA_DEV_ATA:
9162c657 1773 case ATA_DEV_ZAC:
a0123703 1774 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1775 break;
1776 case ATA_DEV_ATAPI:
a0123703 1777 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1778 break;
1779 default:
1780 rc = -ENODEV;
1781 reason = "unsupported class";
1782 goto err_out;
1da177e4
LT
1783 }
1784
a0123703 1785 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1786
1787 /* Some devices choke if TF registers contain garbage. Make
1788 * sure those are properly initialized.
1789 */
1790 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1791
1792 /* Device presence detection is unreliable on some
1793 * controllers. Always poll IDENTIFY if available.
1794 */
1795 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1796
963e4975
AC
1797 if (ap->ops->read_id)
1798 err_mask = ap->ops->read_id(dev, &tf, id);
1799 else
1800 err_mask = ata_do_dev_read_id(dev, &tf, id);
1801
a0123703 1802 if (err_mask) {
800b3996 1803 if (err_mask & AC_ERR_NODEV_HINT) {
a9a79dfe 1804 ata_dev_dbg(dev, "NODEV after polling detection\n");
55a8e2c8
TH
1805 return -ENOENT;
1806 }
1807
79b42bab 1808 if (is_semb) {
a9a79dfe
JP
1809 ata_dev_info(dev,
1810 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
79b42bab
TH
1811 /* SEMB is not supported yet */
1812 *p_class = ATA_DEV_SEMB_UNSUP;
1813 return 0;
1814 }
1815
1ffc151f
TH
1816 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1817 /* Device or controller might have reported
1818 * the wrong device class. Give a shot at the
1819 * other IDENTIFY if the current one is
1820 * aborted by the device.
1821 */
1822 if (may_fallback) {
1823 may_fallback = 0;
1824
1825 if (class == ATA_DEV_ATA)
1826 class = ATA_DEV_ATAPI;
1827 else
1828 class = ATA_DEV_ATA;
1829 goto retry;
1830 }
1831
1832 /* Control reaches here iff the device aborted
1833 * both flavors of IDENTIFYs which happens
1834 * sometimes with phantom devices.
1835 */
a9a79dfe
JP
1836 ata_dev_dbg(dev,
1837 "both IDENTIFYs aborted, assuming NODEV\n");
1ffc151f 1838 return -ENOENT;
54936f8b
TH
1839 }
1840
49016aca
TH
1841 rc = -EIO;
1842 reason = "I/O error";
1da177e4
LT
1843 goto err_out;
1844 }
1845
43c9c591 1846 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
4baa5745 1847 ata_dev_info(dev, "dumping IDENTIFY data, "
a9a79dfe
JP
1848 "class=%d may_fallback=%d tried_spinup=%d\n",
1849 class, may_fallback, tried_spinup);
4baa5745 1850 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
43c9c591
TH
1851 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1852 }
1853
54936f8b
TH
1854 /* Falling back doesn't make sense if ID data was read
1855 * successfully at least once.
1856 */
1857 may_fallback = 0;
1858
49016aca 1859 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1860
49016aca 1861 /* sanity check */
a4f5749b 1862 rc = -EINVAL;
6070068b 1863 reason = "device reports invalid type";
a4f5749b 1864
9162c657 1865 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
a4f5749b
TH
1866 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1867 goto err_out;
db63a4c8
AW
1868 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1869 ata_id_is_ata(id)) {
1870 ata_dev_dbg(dev,
1871 "host indicates ignore ATA devices, ignored\n");
1872 return -ENOENT;
1873 }
a4f5749b
TH
1874 } else {
1875 if (ata_id_is_ata(id))
1876 goto err_out;
49016aca
TH
1877 }
1878
169439c2
ML
1879 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1880 tried_spinup = 1;
1881 /*
1882 * Drive powered-up in standby mode, and requires a specific
1883 * SET_FEATURES spin-up subcommand before it will accept
1884 * anything other than the original IDENTIFY command.
1885 */
218f3d30 1886 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 1887 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1888 rc = -EIO;
1889 reason = "SPINUP failed";
1890 goto err_out;
1891 }
1892 /*
1893 * If the drive initially returned incomplete IDENTIFY info,
1894 * we now must reissue the IDENTIFY command.
1895 */
1896 if (id[2] == 0x37c8)
1897 goto retry;
1898 }
1899
9162c657
HR
1900 if ((flags & ATA_READID_POSTRESET) &&
1901 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
49016aca
TH
1902 /*
1903 * The exact sequence expected by certain pre-ATA4 drives is:
1904 * SRST RESET
50a99018
AC
1905 * IDENTIFY (optional in early ATA)
1906 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1907 * anything else..
1908 * Some drives were very specific about that exact sequence.
50a99018
AC
1909 *
1910 * Note that ATA4 says lba is mandatory so the second check
c9404c9c 1911 * should never trigger.
49016aca
TH
1912 */
1913 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1914 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1915 if (err_mask) {
1916 rc = -EIO;
1917 reason = "INIT_DEV_PARAMS failed";
1918 goto err_out;
1919 }
1920
1921 /* current CHS translation info (id[53-58]) might be
1922 * changed. reread the identify device info.
1923 */
bff04647 1924 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1925 goto retry;
1926 }
1927 }
1928
1929 *p_class = class;
fe635c7e 1930
49016aca
TH
1931 return 0;
1932
1933 err_out:
16d42467
HR
1934 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
1935 reason, err_mask);
49016aca
TH
1936 return rc;
1937}
1938
f01f62c2
CH
1939/**
1940 * ata_read_log_page - read a specific log page
1941 * @dev: target device
1942 * @log: log to read
1943 * @page: page to read
1944 * @buf: buffer to store read page
1945 * @sectors: number of sectors to read
1946 *
1947 * Read log page using READ_LOG_EXT command.
1948 *
1949 * LOCKING:
1950 * Kernel thread context (may sleep).
1951 *
1952 * RETURNS:
1953 * 0 on success, AC_ERR_* mask otherwise.
1954 */
1955unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1956 u8 page, void *buf, unsigned int sectors)
1957{
1958 unsigned long ap_flags = dev->link->ap->flags;
1959 struct ata_taskfile tf;
1960 unsigned int err_mask;
1961 bool dma = false;
1962
4633778b 1963 ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
f01f62c2
CH
1964
1965 /*
1966 * Return error without actually issuing the command on controllers
1967 * which e.g. lockup on a read log page.
1968 */
1969 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
1970 return AC_ERR_DEV;
1971
1972retry:
1973 ata_tf_init(dev, &tf);
f971a854 1974 if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
7cfdfdc8 1975 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
f01f62c2
CH
1976 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
1977 tf.protocol = ATA_PROT_DMA;
1978 dma = true;
1979 } else {
1980 tf.command = ATA_CMD_READ_LOG_EXT;
1981 tf.protocol = ATA_PROT_PIO;
1982 dma = false;
1983 }
1984 tf.lbal = log;
1985 tf.lbam = page;
1986 tf.nsect = sectors;
1987 tf.hob_nsect = sectors >> 8;
1988 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1989
1990 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1991 buf, sectors * ATA_SECT_SIZE, 0);
1992
fc5c8aa7
DLM
1993 if (err_mask) {
1994 if (dma) {
1995 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
1996 goto retry;
1997 }
23ef63d5
DLM
1998 ata_dev_err(dev,
1999 "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
2000 (unsigned int)log, (unsigned int)page, err_mask);
f01f62c2
CH
2001 }
2002
f01f62c2
CH
2003 return err_mask;
2004}
2005
efe205a3
CH
2006static bool ata_log_supported(struct ata_device *dev, u8 log)
2007{
2008 struct ata_port *ap = dev->link->ap;
2009
2010 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2011 return false;
2012 return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
2013}
2014
a0fd2454
CH
2015static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2016{
2017 struct ata_port *ap = dev->link->ap;
2018 unsigned int err, i;
2019
636f6e2a
DLM
2020 if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
2021 return false;
2022
a0fd2454 2023 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
636f6e2a
DLM
2024 /*
2025 * IDENTIFY DEVICE data log is defined as mandatory starting
2026 * with ACS-3 (ATA version 10). Warn about the missing log
2027 * for drives which implement this ATA level or above.
2028 */
2029 if (ata_id_major_version(dev->id) >= 10)
2030 ata_dev_warn(dev,
2031 "ATA Identify Device Log not supported\n");
2032 dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
a0fd2454
CH
2033 return false;
2034 }
2035
2036 /*
2037 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2038 * supported.
2039 */
2040 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2041 1);
fc5c8aa7 2042 if (err)
a0fd2454 2043 return false;
a0fd2454
CH
2044
2045 for (i = 0; i < ap->sector_buf[8]; i++) {
2046 if (ap->sector_buf[9 + i] == page)
2047 return true;
2048 }
2049
2050 return false;
2051}
2052
9062712f
TH
2053static int ata_do_link_spd_horkage(struct ata_device *dev)
2054{
2055 struct ata_link *plink = ata_dev_phys_link(dev);
2056 u32 target, target_limit;
2057
2058 if (!sata_scr_valid(plink))
2059 return 0;
2060
2061 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2062 target = 1;
2063 else
2064 return 0;
2065
2066 target_limit = (1 << target) - 1;
2067
2068 /* if already on stricter limit, no need to push further */
2069 if (plink->sata_spd_limit <= target_limit)
2070 return 0;
2071
2072 plink->sata_spd_limit = target_limit;
2073
2074 /* Request another EH round by returning -EAGAIN if link is
2075 * going faster than the target speed. Forward progress is
2076 * guaranteed by setting sata_spd_limit to target_limit above.
2077 */
2078 if (plink->sata_spd > target) {
a9a79dfe
JP
2079 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2080 sata_spd_string(target));
9062712f
TH
2081 return -EAGAIN;
2082 }
2083 return 0;
2084}
2085
3373efd8 2086static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2087{
9af5c9c9 2088 struct ata_port *ap = dev->link->ap;
9ce8e307
JA
2089
2090 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2091 return 0;
2092
9af5c9c9 2093 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2094}
2095
5a233551
HR
2096static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2097{
2098 struct ata_port *ap = dev->link->ap;
2099 unsigned int err_mask;
2100
efe205a3
CH
2101 if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2102 ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
fe5af0cc
HR
2103 return;
2104 }
5a233551
HR
2105 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2106 0, ap->sector_buf, 1);
fc5c8aa7 2107 if (!err_mask) {
5a233551
HR
2108 u8 *cmds = dev->ncq_send_recv_cmds;
2109
2110 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2111 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2112
2113 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2114 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2115 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2116 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2117 }
2118 }
2119}
2120
284b3b77
HR
2121static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2122{
2123 struct ata_port *ap = dev->link->ap;
2124 unsigned int err_mask;
284b3b77 2125
efe205a3 2126 if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
284b3b77
HR
2127 ata_dev_warn(dev,
2128 "NCQ Send/Recv Log not supported\n");
2129 return;
2130 }
2131 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2132 0, ap->sector_buf, 1);
fc5c8aa7 2133 if (!err_mask) {
284b3b77
HR
2134 u8 *cmds = dev->ncq_non_data_cmds;
2135
2136 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2137 }
2138}
2139
8e061784
AM
2140static void ata_dev_config_ncq_prio(struct ata_device *dev)
2141{
2142 struct ata_port *ap = dev->link->ap;
2143 unsigned int err_mask;
2144
06f6c4c6
DLM
2145 if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2146 return;
2147
8e061784 2148 err_mask = ata_read_log_page(dev,
1d51d5f3 2149 ATA_LOG_IDENTIFY_DEVICE,
8e061784
AM
2150 ATA_LOG_SATA_SETTINGS,
2151 ap->sector_buf,
2152 1);
fc5c8aa7 2153 if (err_mask)
2360fa18 2154 goto not_supported;
8e061784 2155
2360fa18
DLM
2156 if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
2157 goto not_supported;
2158
2159 dev->flags |= ATA_DFLAG_NCQ_PRIO;
2160
2161 return;
8e061784 2162
2360fa18
DLM
2163not_supported:
2164 dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
2165 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
8e061784
AM
2166}
2167
7a8526a5
KH
2168static bool ata_dev_check_adapter(struct ata_device *dev,
2169 unsigned short vendor_id)
2170{
2171 struct pci_dev *pcidev = NULL;
2172 struct device *parent_dev = NULL;
2173
2174 for (parent_dev = dev->tdev.parent; parent_dev != NULL;
2175 parent_dev = parent_dev->parent) {
2176 if (dev_is_pci(parent_dev)) {
2177 pcidev = to_pci_dev(parent_dev);
2178 if (pcidev->vendor == vendor_id)
2179 return true;
2180 break;
2181 }
2182 }
2183
2184 return false;
2185}
2186
388539f3 2187static int ata_dev_config_ncq(struct ata_device *dev,
a6e6ce8e
TH
2188 char *desc, size_t desc_sz)
2189{
9af5c9c9 2190 struct ata_port *ap = dev->link->ap;
a6e6ce8e 2191 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
388539f3
SL
2192 unsigned int err_mask;
2193 char *aa_desc = "";
a6e6ce8e
TH
2194
2195 if (!ata_id_has_ncq(dev->id)) {
2196 desc[0] = '\0';
388539f3 2197 return 0;
a6e6ce8e 2198 }
cba97ea1
BZ
2199 if (!IS_ENABLED(CONFIG_SATA_HOST))
2200 return 0;
75683fe7 2201 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6 2202 snprintf(desc, desc_sz, "NCQ (not used)");
388539f3 2203 return 0;
6919a0a6 2204 }
7a8526a5
KH
2205
2206 if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
2207 ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
2208 snprintf(desc, desc_sz, "NCQ (not used)");
2209 return 0;
2210 }
2211
a6e6ce8e 2212 if (ap->flags & ATA_FLAG_NCQ) {
69278f79 2213 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
a6e6ce8e
TH
2214 dev->flags |= ATA_DFLAG_NCQ;
2215 }
2216
388539f3
SL
2217 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2218 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2219 ata_id_has_fpdma_aa(dev->id)) {
2220 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2221 SATA_FPDMA_AA);
2222 if (err_mask) {
a9a79dfe
JP
2223 ata_dev_err(dev,
2224 "failed to enable AA (error_mask=0x%x)\n",
2225 err_mask);
388539f3
SL
2226 if (err_mask != AC_ERR_DEV) {
2227 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2228 return -EIO;
2229 }
2230 } else
2231 aa_desc = ", AA";
2232 }
2233
a6e6ce8e 2234 if (hdepth >= ddepth)
388539f3 2235 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
a6e6ce8e 2236 else
388539f3
SL
2237 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2238 ddepth, aa_desc);
ed36911c 2239
284b3b77
HR
2240 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2241 if (ata_id_has_ncq_send_and_recv(dev->id))
2242 ata_dev_config_ncq_send_recv(dev);
2243 if (ata_id_has_ncq_non_data(dev->id))
2244 ata_dev_config_ncq_non_data(dev);
8e061784
AM
2245 if (ata_id_has_ncq_prio(dev->id))
2246 ata_dev_config_ncq_prio(dev);
284b3b77 2247 }
f78dea06 2248
388539f3 2249 return 0;
a6e6ce8e 2250}
f78dea06 2251
e87fd28c
HR
2252static void ata_dev_config_sense_reporting(struct ata_device *dev)
2253{
2254 unsigned int err_mask;
2255
2256 if (!ata_id_has_sense_reporting(dev->id))
2257 return;
2258
2259 if (ata_id_sense_reporting_enabled(dev->id))
2260 return;
2261
2262 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2263 if (err_mask) {
2264 ata_dev_dbg(dev,
2265 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2266 err_mask);
2267 }
2268}
2269
6d1003ae
HR
2270static void ata_dev_config_zac(struct ata_device *dev)
2271{
2272 struct ata_port *ap = dev->link->ap;
2273 unsigned int err_mask;
2274 u8 *identify_buf = ap->sector_buf;
6d1003ae
HR
2275
2276 dev->zac_zones_optimal_open = U32_MAX;
2277 dev->zac_zones_optimal_nonseq = U32_MAX;
2278 dev->zac_zones_max_open = U32_MAX;
2279
2280 /*
2281 * Always set the 'ZAC' flag for Host-managed devices.
2282 */
2283 if (dev->class == ATA_DEV_ZAC)
2284 dev->flags |= ATA_DFLAG_ZAC;
2285 else if (ata_id_zoned_cap(dev->id) == 0x01)
2286 /*
2287 * Check for host-aware devices.
2288 */
2289 dev->flags |= ATA_DFLAG_ZAC;
2290
2291 if (!(dev->flags & ATA_DFLAG_ZAC))
2292 return;
2293
a0fd2454 2294 if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
6d1003ae
HR
2295 ata_dev_warn(dev,
2296 "ATA Zoned Information Log not supported\n");
2297 return;
2298 }
ed36911c 2299
6d1003ae
HR
2300 /*
2301 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2302 */
1d51d5f3 2303 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
6d1003ae
HR
2304 ATA_LOG_ZONED_INFORMATION,
2305 identify_buf, 1);
2306 if (!err_mask) {
2307 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2308
2309 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2310 if ((zoned_cap >> 63))
2311 dev->zac_zoned_cap = (zoned_cap & 1);
2312 opt_open = get_unaligned_le64(&identify_buf[24]);
2313 if ((opt_open >> 63))
2314 dev->zac_zones_optimal_open = (u32)opt_open;
2315 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2316 if ((opt_nonseq >> 63))
2317 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2318 max_open = get_unaligned_le64(&identify_buf[40]);
2319 if ((max_open >> 63))
2320 dev->zac_zones_max_open = (u32)max_open;
2321 }
a6e6ce8e
TH
2322}
2323
818831c8
CH
2324static void ata_dev_config_trusted(struct ata_device *dev)
2325{
2326 struct ata_port *ap = dev->link->ap;
2327 u64 trusted_cap;
2328 unsigned int err;
2329
e8f11db9
CH
2330 if (!ata_id_has_trusted(dev->id))
2331 return;
2332
818831c8
CH
2333 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2334 ata_dev_warn(dev,
2335 "Security Log not supported\n");
2336 return;
2337 }
2338
2339 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2340 ap->sector_buf, 1);
fc5c8aa7 2341 if (err)
818831c8 2342 return;
818831c8
CH
2343
2344 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2345 if (!(trusted_cap & (1ULL << 63))) {
2346 ata_dev_dbg(dev,
2347 "Trusted Computing capability qword not valid!\n");
2348 return;
2349 }
2350
2351 if (trusted_cap & (1 << 0))
2352 dev->flags |= ATA_DFLAG_TRUSTED;
2353}
2354
891fd7c6
DLM
2355static int ata_dev_config_lba(struct ata_device *dev)
2356{
2357 struct ata_port *ap = dev->link->ap;
2358 const u16 *id = dev->id;
2359 const char *lba_desc;
2360 char ncq_desc[24];
2361 int ret;
2362
2363 dev->flags |= ATA_DFLAG_LBA;
2364
2365 if (ata_id_has_lba48(id)) {
2366 lba_desc = "LBA48";
2367 dev->flags |= ATA_DFLAG_LBA48;
2368 if (dev->n_sectors >= (1UL << 28) &&
2369 ata_id_has_flush_ext(id))
2370 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2371 } else {
2372 lba_desc = "LBA";
2373 }
2374
2375 /* config NCQ */
2376 ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2377
2378 /* print device info to dmesg */
2379 if (ata_msg_drv(ap) && ata_dev_print_info(dev))
2380 ata_dev_info(dev,
2381 "%llu sectors, multi %u: %s %s\n",
2382 (unsigned long long)dev->n_sectors,
2383 dev->multi_count, lba_desc, ncq_desc);
2384
2385 return ret;
2386}
2387
2388static void ata_dev_config_chs(struct ata_device *dev)
2389{
2390 struct ata_port *ap = dev->link->ap;
2391 const u16 *id = dev->id;
2392
2393 if (ata_id_current_chs_valid(id)) {
2394 /* Current CHS translation is valid. */
2395 dev->cylinders = id[54];
2396 dev->heads = id[55];
2397 dev->sectors = id[56];
2398 } else {
2399 /* Default translation */
2400 dev->cylinders = id[1];
2401 dev->heads = id[3];
2402 dev->sectors = id[6];
2403 }
2404
2405 /* print device info to dmesg */
2406 if (ata_msg_drv(ap) && ata_dev_print_info(dev))
2407 ata_dev_info(dev,
2408 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2409 (unsigned long long)dev->n_sectors,
2410 dev->multi_count, dev->cylinders,
2411 dev->heads, dev->sectors);
2412}
2413
d8d8778c
DLM
2414static void ata_dev_config_devslp(struct ata_device *dev)
2415{
2416 u8 *sata_setting = dev->link->ap->sector_buf;
2417 unsigned int err_mask;
2418 int i, j;
2419
2420 /*
2421 * Check device sleep capability. Get DevSlp timing variables
2422 * from SATA Settings page of Identify Device Data Log.
2423 */
06f6c4c6
DLM
2424 if (!ata_id_has_devslp(dev->id) ||
2425 !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
d8d8778c
DLM
2426 return;
2427
2428 err_mask = ata_read_log_page(dev,
2429 ATA_LOG_IDENTIFY_DEVICE,
2430 ATA_LOG_SATA_SETTINGS,
2431 sata_setting, 1);
fc5c8aa7 2432 if (err_mask)
d8d8778c 2433 return;
d8d8778c
DLM
2434
2435 dev->flags |= ATA_DFLAG_DEVSLP;
2436 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2437 j = ATA_LOG_DEVSLP_OFFSET + i;
2438 dev->devslp_timing[i] = sata_setting[j];
2439 }
2440}
2441
fe22e1c2
DLM
2442static void ata_dev_config_cpr(struct ata_device *dev)
2443{
2444 unsigned int err_mask;
2445 size_t buf_len;
2446 int i, nr_cpr = 0;
2447 struct ata_cpr_log *cpr_log = NULL;
2448 u8 *desc, *buf = NULL;
2449
2450 if (!ata_identify_page_supported(dev,
2451 ATA_LOG_CONCURRENT_POSITIONING_RANGES))
2452 goto out;
2453
2454 /*
2455 * Read IDENTIFY DEVICE data log, page 0x47
2456 * (concurrent positioning ranges). We can have at most 255 32B range
2457 * descriptors plus a 64B header.
2458 */
2459 buf_len = (64 + 255 * 32 + 511) & ~511;
2460 buf = kzalloc(buf_len, GFP_KERNEL);
2461 if (!buf)
2462 goto out;
2463
2464 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2465 ATA_LOG_CONCURRENT_POSITIONING_RANGES,
2466 buf, buf_len >> 9);
2467 if (err_mask)
2468 goto out;
2469
2470 nr_cpr = buf[0];
2471 if (!nr_cpr)
2472 goto out;
2473
2474 cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
2475 if (!cpr_log)
2476 goto out;
2477
2478 cpr_log->nr_cpr = nr_cpr;
2479 desc = &buf[64];
2480 for (i = 0; i < nr_cpr; i++, desc += 32) {
2481 cpr_log->cpr[i].num = desc[0];
2482 cpr_log->cpr[i].num_storage_elements = desc[1];
2483 cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
2484 cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
2485 }
2486
2487out:
2488 swap(dev->cpr_log, cpr_log);
2489 kfree(cpr_log);
2490 kfree(buf);
2491}
2492
d633b8a7
DLM
2493static void ata_dev_print_features(struct ata_device *dev)
2494{
2495 if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
2496 return;
2497
2498 ata_dev_info(dev,
fe22e1c2 2499 "Features:%s%s%s%s%s%s\n",
d633b8a7
DLM
2500 dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
2501 dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
2502 dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
2503 dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
fe22e1c2
DLM
2504 dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
2505 dev->cpr_log ? " CPR" : "");
d633b8a7
DLM
2506}
2507
49016aca 2508/**
ffeae418 2509 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2510 * @dev: Target device to configure
2511 *
2512 * Configure @dev according to @dev->id. Generic and low-level
2513 * driver specific fixups are also applied.
49016aca
TH
2514 *
2515 * LOCKING:
ffeae418
TH
2516 * Kernel thread context (may sleep)
2517 *
2518 * RETURNS:
2519 * 0 on success, -errno otherwise
49016aca 2520 */
efdaedc4 2521int ata_dev_configure(struct ata_device *dev)
49016aca 2522{
9af5c9c9 2523 struct ata_port *ap = dev->link->ap;
891fd7c6 2524 bool print_info = ata_dev_print_info(dev);
1148c3a7 2525 const u16 *id = dev->id;
7dc951ae 2526 unsigned long xfer_mask;
65fe1f0f 2527 unsigned int err_mask;
b352e57d 2528 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2529 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2530 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2531 int rc;
49016aca 2532
0dd4b21f 2533 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
a9a79dfe 2534 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
ffeae418 2535 return 0;
49016aca
TH
2536 }
2537
0dd4b21f 2538 if (ata_msg_probe(ap))
a9a79dfe 2539 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1da177e4 2540
75683fe7
TH
2541 /* set horkage */
2542 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2543 ata_force_horkage(dev);
75683fe7 2544
50af2fa1 2545 if (dev->horkage & ATA_HORKAGE_DISABLE) {
a9a79dfe 2546 ata_dev_info(dev, "unsupported device, disabling\n");
50af2fa1
TH
2547 ata_dev_disable(dev);
2548 return 0;
2549 }
2550
2486fa56
TH
2551 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2552 dev->class == ATA_DEV_ATAPI) {
a9a79dfe
JP
2553 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2554 atapi_enabled ? "not supported with this driver"
2555 : "disabled");
2486fa56
TH
2556 ata_dev_disable(dev);
2557 return 0;
2558 }
2559
9062712f
TH
2560 rc = ata_do_link_spd_horkage(dev);
2561 if (rc)
2562 return rc;
2563
ecd75ad5
TH
2564 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2565 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2566 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2567 dev->horkage |= ATA_HORKAGE_NOLPM;
2568
240630e6
HG
2569 if (ap->flags & ATA_FLAG_NO_LPM)
2570 dev->horkage |= ATA_HORKAGE_NOLPM;
2571
ecd75ad5
TH
2572 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2573 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2574 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2575 }
2576
6746544c
TH
2577 /* let ACPI work its magic */
2578 rc = ata_acpi_on_devcfg(dev);
2579 if (rc)
2580 return rc;
08573a86 2581
05027adc
TH
2582 /* massage HPA, do it early as it might change IDENTIFY data */
2583 rc = ata_hpa_resize(dev);
2584 if (rc)
2585 return rc;
2586
c39f5ebe 2587 /* print device capabilities */
0dd4b21f 2588 if (ata_msg_probe(ap))
a9a79dfe
JP
2589 ata_dev_dbg(dev,
2590 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2591 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2592 __func__,
2593 id[49], id[82], id[83], id[84],
2594 id[85], id[86], id[87], id[88]);
c39f5ebe 2595
208a9933 2596 /* initialize to-be-configured parameters */
ea1dd4e1 2597 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2598 dev->max_sectors = 0;
2599 dev->cdb_len = 0;
2600 dev->n_sectors = 0;
2601 dev->cylinders = 0;
2602 dev->heads = 0;
2603 dev->sectors = 0;
e18086d6 2604 dev->multi_count = 0;
208a9933 2605
1da177e4
LT
2606 /*
2607 * common ATA, ATAPI feature tests
2608 */
2609
ff8854b2 2610 /* find max transfer mode; for printk only */
1148c3a7 2611 xfer_mask = ata_id_xfermask(id);
1da177e4 2612
6044f3c4 2613 ata_dump_id(dev, id);
1da177e4 2614
ef143d57
AL
2615 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2616 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2617 sizeof(fwrevbuf));
2618
2619 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2620 sizeof(modelbuf));
2621
1da177e4 2622 /* ATA-specific feature tests */
9162c657 2623 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
b352e57d 2624 if (ata_id_is_cfa(id)) {
62afe5d7
SS
2625 /* CPRM may make this media unusable */
2626 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
a9a79dfe
JP
2627 ata_dev_warn(dev,
2628 "supports DRM functions and may not be fully accessible\n");
b352e57d 2629 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2630 } else {
2dcb407e 2631 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2632 /* Warn the user if the device has TPM extensions */
2633 if (ata_id_has_tpm(id))
a9a79dfe
JP
2634 ata_dev_warn(dev,
2635 "supports DRM functions and may not be fully accessible\n");
ae8d4ee7 2636 }
b352e57d 2637
1148c3a7 2638 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2639
e18086d6
ML
2640 /* get current R/W Multiple count setting */
2641 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2642 unsigned int max = dev->id[47] & 0xff;
2643 unsigned int cnt = dev->id[59] & 0xff;
2644 /* only recognize/allow powers of two here */
2645 if (is_power_of_2(max) && is_power_of_2(cnt))
2646 if (cnt <= max)
2647 dev->multi_count = cnt;
2648 }
3f64f565 2649
891fd7c6
DLM
2650 /* print device info to dmesg */
2651 if (ata_msg_drv(ap) && print_info)
2652 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2653 revbuf, modelbuf, fwrevbuf,
2654 ata_mode_string(xfer_mask));
8bf62ece 2655
891fd7c6
DLM
2656 if (ata_id_has_lba(id)) {
2657 rc = ata_dev_config_lba(dev);
388539f3
SL
2658 if (rc)
2659 return rc;
ffeae418 2660 } else {
891fd7c6 2661 ata_dev_config_chs(dev);
07f6f7d0
AL
2662 }
2663
d8d8778c 2664 ata_dev_config_devslp(dev);
e87fd28c 2665 ata_dev_config_sense_reporting(dev);
6d1003ae 2666 ata_dev_config_zac(dev);
818831c8 2667 ata_dev_config_trusted(dev);
fe22e1c2 2668 ata_dev_config_cpr(dev);
b1ffbf85 2669 dev->cdb_len = 32;
d633b8a7
DLM
2670
2671 if (ata_msg_drv(ap) && print_info)
2672 ata_dev_print_features(dev);
1da177e4
LT
2673 }
2674
2675 /* ATAPI-specific feature tests */
2c13b7ce 2676 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2677 const char *cdb_intr_string = "";
2678 const char *atapi_an_string = "";
91163006 2679 const char *dma_dir_string = "";
7d77b247 2680 u32 sntf;
08a556db 2681
1148c3a7 2682 rc = atapi_cdb_len(id);
1da177e4 2683 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
16d42467 2684 ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
ffeae418 2685 rc = -EINVAL;
1da177e4
LT
2686 goto err_out_nosup;
2687 }
6e7846e9 2688 dev->cdb_len = (unsigned int) rc;
1da177e4 2689
7d77b247
TH
2690 /* Enable ATAPI AN if both the host and device have
2691 * the support. If PMP is attached, SNTF is required
2692 * to enable ATAPI AN to discern between PHY status
2693 * changed notifications and ATAPI ANs.
9f45cbd3 2694 */
e7ecd435
TH
2695 if (atapi_an &&
2696 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
071f44b1 2697 (!sata_pmp_attached(ap) ||
7d77b247 2698 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
9f45cbd3 2699 /* issue SET feature command to turn this on */
218f3d30
JG
2700 err_mask = ata_dev_set_feature(dev,
2701 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2702 if (err_mask)
a9a79dfe
JP
2703 ata_dev_err(dev,
2704 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2705 err_mask);
854c73a2 2706 else {
9f45cbd3 2707 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2708 atapi_an_string = ", ATAPI AN";
2709 }
9f45cbd3
KCA
2710 }
2711
08a556db 2712 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2713 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2714 cdb_intr_string = ", CDB intr";
2715 }
312f7da2 2716
966fbe19 2717 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
91163006
TH
2718 dev->flags |= ATA_DFLAG_DMADIR;
2719 dma_dir_string = ", DMADIR";
2720 }
2721
afe75951 2722 if (ata_id_has_da(dev->id)) {
b1354cbb 2723 dev->flags |= ATA_DFLAG_DA;
afe75951
AL
2724 zpodd_init(dev);
2725 }
b1354cbb 2726
1da177e4 2727 /* print device info to dmesg */
5afc8142 2728 if (ata_msg_drv(ap) && print_info)
a9a79dfe
JP
2729 ata_dev_info(dev,
2730 "ATAPI: %s, %s, max %s%s%s%s\n",
2731 modelbuf, fwrevbuf,
2732 ata_mode_string(xfer_mask),
2733 cdb_intr_string, atapi_an_string,
2734 dma_dir_string);
1da177e4
LT
2735 }
2736
914ed354
TH
2737 /* determine max_sectors */
2738 dev->max_sectors = ATA_MAX_SECTORS;
2739 if (dev->flags & ATA_DFLAG_LBA48)
2740 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2741
c5038fc0
AC
2742 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2743 200 sectors */
3373efd8 2744 if (ata_dev_knobble(dev)) {
5afc8142 2745 if (ata_msg_drv(ap) && print_info)
a9a79dfe 2746 ata_dev_info(dev, "applying bridge limits\n");
5a529139 2747 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2748 dev->max_sectors = ATA_MAX_SECTORS;
2749 }
2750
f8d8e579 2751 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2752 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2753 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2754 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2755 }
f8d8e579 2756
75683fe7 2757 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2758 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2759 dev->max_sectors);
18d6e9d5 2760
af34d637
DM
2761 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2762 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2763 dev->max_sectors);
2764
a32450e1
SH
2765 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2766 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2767
4b2f3ede 2768 if (ap->ops->dev_config)
cd0d3bbc 2769 ap->ops->dev_config(dev);
4b2f3ede 2770
c5038fc0
AC
2771 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2772 /* Let the user know. We don't want to disallow opens for
2773 rescue purposes, or in case the vendor is just a blithering
2774 idiot. Do this after the dev_config call as some controllers
2775 with buggy firmware may want to avoid reporting false device
2776 bugs */
2777
2778 if (print_info) {
a9a79dfe 2779 ata_dev_warn(dev,
c5038fc0 2780"Drive reports diagnostics failure. This may indicate a drive\n");
a9a79dfe 2781 ata_dev_warn(dev,
c5038fc0
AC
2782"fault or invalid emulation. Contact drive vendor for information.\n");
2783 }
2784 }
2785
ac70a964 2786 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
a9a79dfe
JP
2787 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2788 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
ac70a964
TH
2789 }
2790
ffeae418 2791 return 0;
1da177e4
LT
2792
2793err_out_nosup:
0dd4b21f 2794 if (ata_msg_probe(ap))
a9a79dfe 2795 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
ffeae418 2796 return rc;
1da177e4
LT
2797}
2798
be0d18df 2799/**
2e41e8e6 2800 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2801 * @ap: port
2802 *
2e41e8e6 2803 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2804 * detection.
2805 */
2806
2807int ata_cable_40wire(struct ata_port *ap)
2808{
2809 return ATA_CBL_PATA40;
2810}
a52fbcfc 2811EXPORT_SYMBOL_GPL(ata_cable_40wire);
be0d18df
AC
2812
2813/**
2e41e8e6 2814 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2815 * @ap: port
2816 *
2e41e8e6 2817 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2818 * detection.
2819 */
2820
2821int ata_cable_80wire(struct ata_port *ap)
2822{
2823 return ATA_CBL_PATA80;
2824}
a52fbcfc 2825EXPORT_SYMBOL_GPL(ata_cable_80wire);
be0d18df
AC
2826
2827/**
2828 * ata_cable_unknown - return unknown PATA cable.
2829 * @ap: port
2830 *
2831 * Helper method for drivers which have no PATA cable detection.
2832 */
2833
2834int ata_cable_unknown(struct ata_port *ap)
2835{
2836 return ATA_CBL_PATA_UNK;
2837}
a52fbcfc 2838EXPORT_SYMBOL_GPL(ata_cable_unknown);
be0d18df 2839
c88f90c3
TH
2840/**
2841 * ata_cable_ignore - return ignored PATA cable.
2842 * @ap: port
2843 *
2844 * Helper method for drivers which don't use cable type to limit
2845 * transfer mode.
2846 */
2847int ata_cable_ignore(struct ata_port *ap)
2848{
2849 return ATA_CBL_PATA_IGN;
2850}
a52fbcfc 2851EXPORT_SYMBOL_GPL(ata_cable_ignore);
c88f90c3 2852
be0d18df
AC
2853/**
2854 * ata_cable_sata - return SATA cable type
2855 * @ap: port
2856 *
2857 * Helper method for drivers which have SATA cables
2858 */
2859
2860int ata_cable_sata(struct ata_port *ap)
2861{
2862 return ATA_CBL_SATA;
2863}
a52fbcfc 2864EXPORT_SYMBOL_GPL(ata_cable_sata);
be0d18df 2865
1da177e4
LT
2866/**
2867 * ata_bus_probe - Reset and probe ATA bus
2868 * @ap: Bus to probe
2869 *
0cba632b
JG
2870 * Master ATA bus probing function. Initiates a hardware-dependent
2871 * bus reset, then attempts to identify any devices found on
2872 * the bus.
2873 *
1da177e4 2874 * LOCKING:
0cba632b 2875 * PCI/etc. bus probe sem.
1da177e4
LT
2876 *
2877 * RETURNS:
96072e69 2878 * Zero on success, negative errno otherwise.
1da177e4
LT
2879 */
2880
80289167 2881int ata_bus_probe(struct ata_port *ap)
1da177e4 2882{
28ca5c57 2883 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2884 int tries[ATA_MAX_DEVICES];
f58229f8 2885 int rc;
e82cbdb9 2886 struct ata_device *dev;
1da177e4 2887
1eca4365 2888 ata_for_each_dev(dev, &ap->link, ALL)
f58229f8 2889 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2890
2891 retry:
1eca4365 2892 ata_for_each_dev(dev, &ap->link, ALL) {
cdeab114
TH
2893 /* If we issue an SRST then an ATA drive (not ATAPI)
2894 * may change configuration and be in PIO0 timing. If
2895 * we do a hard reset (or are coming from power on)
2896 * this is true for ATA or ATAPI. Until we've set a
2897 * suitable controller mode we should not touch the
2898 * bus as we may be talking too fast.
2899 */
2900 dev->pio_mode = XFER_PIO_0;
5416912a 2901 dev->dma_mode = 0xff;
cdeab114
TH
2902
2903 /* If the controller has a pio mode setup function
2904 * then use it to set the chipset to rights. Don't
2905 * touch the DMA setup as that will be dealt with when
2906 * configuring devices.
2907 */
2908 if (ap->ops->set_piomode)
2909 ap->ops->set_piomode(ap, dev);
2910 }
2911
2044470c 2912 /* reset and determine device classes */
52783c5d 2913 ap->ops->phy_reset(ap);
2061a47a 2914
1eca4365 2915 ata_for_each_dev(dev, &ap->link, ALL) {
3e4ec344 2916 if (dev->class != ATA_DEV_UNKNOWN)
52783c5d
TH
2917 classes[dev->devno] = dev->class;
2918 else
2919 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2920
52783c5d 2921 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2922 }
1da177e4 2923
f31f0cc2
JG
2924 /* read IDENTIFY page and configure devices. We have to do the identify
2925 specific sequence bass-ackwards so that PDIAG- is released by
2926 the slave device */
2927
1eca4365 2928 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
f58229f8
TH
2929 if (tries[dev->devno])
2930 dev->class = classes[dev->devno];
ffeae418 2931
14d2bac1 2932 if (!ata_dev_enabled(dev))
ffeae418 2933 continue;
ffeae418 2934
bff04647
TH
2935 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2936 dev->id);
14d2bac1
TH
2937 if (rc)
2938 goto fail;
f31f0cc2
JG
2939 }
2940
be0d18df
AC
2941 /* Now ask for the cable type as PDIAG- should have been released */
2942 if (ap->ops->cable_detect)
2943 ap->cbl = ap->ops->cable_detect(ap);
2944
1eca4365
TH
2945 /* We may have SATA bridge glue hiding here irrespective of
2946 * the reported cable types and sensed types. When SATA
2947 * drives indicate we have a bridge, we don't know which end
2948 * of the link the bridge is which is a problem.
2949 */
2950 ata_for_each_dev(dev, &ap->link, ENABLED)
614fe29b
AC
2951 if (ata_id_is_sata(dev->id))
2952 ap->cbl = ATA_CBL_SATA;
614fe29b 2953
f31f0cc2
JG
2954 /* After the identify sequence we can now set up the devices. We do
2955 this in the normal order so that the user doesn't get confused */
2956
1eca4365 2957 ata_for_each_dev(dev, &ap->link, ENABLED) {
9af5c9c9 2958 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2959 rc = ata_dev_configure(dev);
9af5c9c9 2960 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2961 if (rc)
2962 goto fail;
1da177e4
LT
2963 }
2964
e82cbdb9 2965 /* configure transfer mode */
0260731f 2966 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2967 if (rc)
51713d35 2968 goto fail;
1da177e4 2969
1eca4365
TH
2970 ata_for_each_dev(dev, &ap->link, ENABLED)
2971 return 0;
1da177e4 2972
96072e69 2973 return -ENODEV;
14d2bac1
TH
2974
2975 fail:
4ae72a1e
TH
2976 tries[dev->devno]--;
2977
14d2bac1
TH
2978 switch (rc) {
2979 case -EINVAL:
4ae72a1e 2980 /* eeek, something went very wrong, give up */
14d2bac1
TH
2981 tries[dev->devno] = 0;
2982 break;
4ae72a1e
TH
2983
2984 case -ENODEV:
2985 /* give it just one more chance */
2986 tries[dev->devno] = min(tries[dev->devno], 1);
df561f66 2987 fallthrough;
14d2bac1 2988 case -EIO:
4ae72a1e
TH
2989 if (tries[dev->devno] == 1) {
2990 /* This is the last chance, better to slow
2991 * down than lose it.
2992 */
a07d499b 2993 sata_down_spd_limit(&ap->link, 0);
4ae72a1e
TH
2994 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2995 }
14d2bac1
TH
2996 }
2997
4ae72a1e 2998 if (!tries[dev->devno])
3373efd8 2999 ata_dev_disable(dev);
ec573755 3000
14d2bac1 3001 goto retry;
1da177e4
LT
3002}
3003
3be680b7
TH
3004/**
3005 * sata_print_link_status - Print SATA link status
936fd732 3006 * @link: SATA link to printk link status about
3be680b7
TH
3007 *
3008 * This function prints link speed and status of a SATA link.
3009 *
3010 * LOCKING:
3011 * None.
3012 */
6bdb4fc9 3013static void sata_print_link_status(struct ata_link *link)
3be680b7 3014{
6d5f9732 3015 u32 sstatus, scontrol, tmp;
3be680b7 3016
936fd732 3017 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 3018 return;
936fd732 3019 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 3020
b1c72916 3021 if (ata_phys_link_online(link)) {
3be680b7 3022 tmp = (sstatus >> 4) & 0xf;
a9a79dfe
JP
3023 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3024 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 3025 } else {
a9a79dfe
JP
3026 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3027 sstatus, scontrol);
3be680b7
TH
3028 }
3029}
3030
ebdfca6e
AC
3031/**
3032 * ata_dev_pair - return other device on cable
ebdfca6e
AC
3033 * @adev: device
3034 *
3035 * Obtain the other device on the same cable, or if none is
3036 * present NULL is returned
3037 */
2e9edbf8 3038
3373efd8 3039struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 3040{
9af5c9c9
TH
3041 struct ata_link *link = adev->link;
3042 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 3043 if (!ata_dev_enabled(pair))
ebdfca6e
AC
3044 return NULL;
3045 return pair;
3046}
a52fbcfc 3047EXPORT_SYMBOL_GPL(ata_dev_pair);
ebdfca6e 3048
1c3fae4d 3049/**
3c567b7d 3050 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 3051 * @link: Link to adjust SATA spd limit for
a07d499b 3052 * @spd_limit: Additional limit
1c3fae4d 3053 *
936fd732 3054 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 3055 * function only adjusts the limit. The change must be applied
3c567b7d 3056 * using sata_set_spd().
1c3fae4d 3057 *
a07d499b
TH
3058 * If @spd_limit is non-zero, the speed is limited to equal to or
3059 * lower than @spd_limit if such speed is supported. If
3060 * @spd_limit is slower than any supported speed, only the lowest
3061 * supported speed is allowed.
3062 *
1c3fae4d
TH
3063 * LOCKING:
3064 * Inherited from caller.
3065 *
3066 * RETURNS:
3067 * 0 on success, negative errno on failure
3068 */
a07d499b 3069int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
1c3fae4d 3070{
81952c54 3071 u32 sstatus, spd, mask;
a07d499b 3072 int rc, bit;
1c3fae4d 3073
936fd732 3074 if (!sata_scr_valid(link))
008a7896
TH
3075 return -EOPNOTSUPP;
3076
3077 /* If SCR can be read, use it to determine the current SPD.
936fd732 3078 * If not, use cached value in link->sata_spd.
008a7896 3079 */
936fd732 3080 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
9913ff8a 3081 if (rc == 0 && ata_sstatus_online(sstatus))
008a7896
TH
3082 spd = (sstatus >> 4) & 0xf;
3083 else
936fd732 3084 spd = link->sata_spd;
1c3fae4d 3085
936fd732 3086 mask = link->sata_spd_limit;
1c3fae4d
TH
3087 if (mask <= 1)
3088 return -EINVAL;
008a7896
TH
3089
3090 /* unconditionally mask off the highest bit */
a07d499b
TH
3091 bit = fls(mask) - 1;
3092 mask &= ~(1 << bit);
1c3fae4d 3093
2dc0b46b
DM
3094 /*
3095 * Mask off all speeds higher than or equal to the current one. At
3096 * this point, if current SPD is not available and we previously
3097 * recorded the link speed from SStatus, the driver has already
3098 * masked off the highest bit so mask should already be 1 or 0.
3099 * Otherwise, we should not force 1.5Gbps on a link where we have
3100 * not previously recorded speed from SStatus. Just return in this
3101 * case.
008a7896
TH
3102 */
3103 if (spd > 1)
3104 mask &= (1 << (spd - 1)) - 1;
3105 else
2dc0b46b 3106 return -EINVAL;
008a7896
TH
3107
3108 /* were we already at the bottom? */
1c3fae4d
TH
3109 if (!mask)
3110 return -EINVAL;
3111
a07d499b
TH
3112 if (spd_limit) {
3113 if (mask & ((1 << spd_limit) - 1))
3114 mask &= (1 << spd_limit) - 1;
3115 else {
3116 bit = ffs(mask) - 1;
3117 mask = 1 << bit;
3118 }
3119 }
3120
936fd732 3121 link->sata_spd_limit = mask;
1c3fae4d 3122
a9a79dfe
JP
3123 ata_link_warn(link, "limiting SATA link speed to %s\n",
3124 sata_spd_string(fls(mask)));
1c3fae4d
TH
3125
3126 return 0;
3127}
3128
a9b2c120 3129#ifdef CONFIG_ATA_ACPI
a0f79b92
TH
3130/**
3131 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3132 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3133 * @cycle: cycle duration in ns
3134 *
3135 * Return matching xfer mode for @cycle. The returned mode is of
3136 * the transfer type specified by @xfer_shift. If @cycle is too
3137 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3138 * than the fastest known mode, the fasted mode is returned.
3139 *
3140 * LOCKING:
3141 * None.
3142 *
3143 * RETURNS:
3144 * Matching xfer_mode, 0xff if no match found.
3145 */
3146u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3147{
3148 u8 base_mode = 0xff, last_mode = 0xff;
3149 const struct ata_xfer_ent *ent;
3150 const struct ata_timing *t;
3151
3152 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3153 if (ent->shift == xfer_shift)
3154 base_mode = ent->base;
3155
3156 for (t = ata_timing_find_mode(base_mode);
3157 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3158 unsigned short this_cycle;
3159
3160 switch (xfer_shift) {
3161 case ATA_SHIFT_PIO:
3162 case ATA_SHIFT_MWDMA:
3163 this_cycle = t->cycle;
3164 break;
3165 case ATA_SHIFT_UDMA:
3166 this_cycle = t->udma;
3167 break;
3168 default:
3169 return 0xff;
3170 }
3171
3172 if (cycle > this_cycle)
3173 break;
3174
3175 last_mode = t->mode;
3176 }
3177
3178 return last_mode;
3179}
a9b2c120 3180#endif
a0f79b92 3181
cf176e1a
TH
3182/**
3183 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3184 * @dev: Device to adjust xfer masks
458337db 3185 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3186 *
3187 * Adjust xfer masks of @dev downward. Note that this function
3188 * does not apply the change. Invoking ata_set_mode() afterwards
3189 * will apply the limit.
3190 *
3191 * LOCKING:
3192 * Inherited from caller.
3193 *
3194 * RETURNS:
3195 * 0 on success, negative errno on failure
3196 */
458337db 3197int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3198{
458337db 3199 char buf[32];
7dc951ae
TH
3200 unsigned long orig_mask, xfer_mask;
3201 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3202 int quiet, highbit;
cf176e1a 3203
458337db
TH
3204 quiet = !!(sel & ATA_DNXFER_QUIET);
3205 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3206
458337db
TH
3207 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3208 dev->mwdma_mask,
3209 dev->udma_mask);
3210 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3211
458337db
TH
3212 switch (sel) {
3213 case ATA_DNXFER_PIO:
3214 highbit = fls(pio_mask) - 1;
3215 pio_mask &= ~(1 << highbit);
3216 break;
3217
3218 case ATA_DNXFER_DMA:
3219 if (udma_mask) {
3220 highbit = fls(udma_mask) - 1;
3221 udma_mask &= ~(1 << highbit);
3222 if (!udma_mask)
3223 return -ENOENT;
3224 } else if (mwdma_mask) {
3225 highbit = fls(mwdma_mask) - 1;
3226 mwdma_mask &= ~(1 << highbit);
3227 if (!mwdma_mask)
3228 return -ENOENT;
3229 }
3230 break;
3231
3232 case ATA_DNXFER_40C:
3233 udma_mask &= ATA_UDMA_MASK_40C;
3234 break;
3235
3236 case ATA_DNXFER_FORCE_PIO0:
3237 pio_mask &= 1;
df561f66 3238 fallthrough;
458337db
TH
3239 case ATA_DNXFER_FORCE_PIO:
3240 mwdma_mask = 0;
3241 udma_mask = 0;
3242 break;
3243
458337db
TH
3244 default:
3245 BUG();
3246 }
3247
3248 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3249
3250 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3251 return -ENOENT;
3252
3253 if (!quiet) {
3254 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3255 snprintf(buf, sizeof(buf), "%s:%s",
3256 ata_mode_string(xfer_mask),
3257 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3258 else
3259 snprintf(buf, sizeof(buf), "%s",
3260 ata_mode_string(xfer_mask));
3261
a9a79dfe 3262 ata_dev_warn(dev, "limiting speed to %s\n", buf);
458337db 3263 }
cf176e1a
TH
3264
3265 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3266 &dev->udma_mask);
3267
cf176e1a 3268 return 0;
cf176e1a
TH
3269}
3270
3373efd8 3271static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3272{
d0cb43b3 3273 struct ata_port *ap = dev->link->ap;
9af5c9c9 3274 struct ata_eh_context *ehc = &dev->link->eh_context;
d0cb43b3 3275 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
4055dee7
TH
3276 const char *dev_err_whine = "";
3277 int ign_dev_err = 0;
d0cb43b3 3278 unsigned int err_mask = 0;
83206a29 3279 int rc;
1da177e4 3280
e8384607 3281 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3282 if (dev->xfer_shift == ATA_SHIFT_PIO)
3283 dev->flags |= ATA_DFLAG_PIO;
3284
d0cb43b3
TH
3285 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3286 dev_err_whine = " (SET_XFERMODE skipped)";
3287 else {
3288 if (nosetxfer)
a9a79dfe
JP
3289 ata_dev_warn(dev,
3290 "NOSETXFER but PATA detected - can't "
3291 "skip SETXFER, might malfunction\n");
d0cb43b3
TH
3292 err_mask = ata_dev_set_xfermode(dev);
3293 }
2dcb407e 3294
4055dee7
TH
3295 if (err_mask & ~AC_ERR_DEV)
3296 goto fail;
3297
3298 /* revalidate */
3299 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3300 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3301 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3302 if (rc)
3303 return rc;
3304
b93fda12
AC
3305 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3306 /* Old CFA may refuse this command, which is just fine */
3307 if (ata_id_is_cfa(dev->id))
3308 ign_dev_err = 1;
3309 /* Catch several broken garbage emulations plus some pre
3310 ATA devices */
3311 if (ata_id_major_version(dev->id) == 0 &&
3312 dev->pio_mode <= XFER_PIO_2)
3313 ign_dev_err = 1;
3314 /* Some very old devices and some bad newer ones fail
3315 any kind of SET_XFERMODE request but support PIO0-2
3316 timings and no IORDY */
3317 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3318 ign_dev_err = 1;
3319 }
3acaf94b
AC
3320 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3321 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3322 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3323 dev->dma_mode == XFER_MW_DMA_0 &&
3324 (dev->id[63] >> 8) & 1)
4055dee7 3325 ign_dev_err = 1;
3acaf94b 3326
4055dee7
TH
3327 /* if the device is actually configured correctly, ignore dev err */
3328 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3329 ign_dev_err = 1;
1da177e4 3330
4055dee7
TH
3331 if (err_mask & AC_ERR_DEV) {
3332 if (!ign_dev_err)
3333 goto fail;
3334 else
3335 dev_err_whine = " (device error ignored)";
3336 }
48a8a14f 3337
4633778b
HR
3338 ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
3339 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3340
07b9b6d6
DLM
3341 if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3342 ehc->i.flags & ATA_EHI_DID_HARDRESET)
3343 ata_dev_info(dev, "configured for %s%s\n",
3344 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3345 dev_err_whine);
4055dee7 3346
83206a29 3347 return 0;
4055dee7
TH
3348
3349 fail:
a9a79dfe 3350 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
4055dee7 3351 return -EIO;
1da177e4
LT
3352}
3353
1da177e4 3354/**
04351821 3355 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3356 * @link: link on which timings will be programmed
1967b7ff 3357 * @r_failed_dev: out parameter for failed device
1da177e4 3358 *
04351821
A
3359 * Standard implementation of the function used to tune and set
3360 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3361 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3362 * returned in @r_failed_dev.
780a87f7 3363 *
1da177e4 3364 * LOCKING:
0cba632b 3365 * PCI/etc. bus probe sem.
e82cbdb9
TH
3366 *
3367 * RETURNS:
3368 * 0 on success, negative errno otherwise
1da177e4 3369 */
04351821 3370
0260731f 3371int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3372{
0260731f 3373 struct ata_port *ap = link->ap;
e8e0619f 3374 struct ata_device *dev;
f58229f8 3375 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3376
a6d5a51c 3377 /* step 1: calculate xfer_mask */
1eca4365 3378 ata_for_each_dev(dev, link, ENABLED) {
7dc951ae 3379 unsigned long pio_mask, dma_mask;
b3a70601 3380 unsigned int mode_mask;
a6d5a51c 3381
b3a70601
AC
3382 mode_mask = ATA_DMA_MASK_ATA;
3383 if (dev->class == ATA_DEV_ATAPI)
3384 mode_mask = ATA_DMA_MASK_ATAPI;
3385 else if (ata_id_is_cfa(dev->id))
3386 mode_mask = ATA_DMA_MASK_CFA;
3387
3373efd8 3388 ata_dev_xfermask(dev);
33267325 3389 ata_force_xfermask(dev);
1da177e4 3390
acf356b1 3391 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
b3a70601
AC
3392
3393 if (libata_dma_mask & mode_mask)
80a9c430
SS
3394 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3395 dev->udma_mask);
b3a70601
AC
3396 else
3397 dma_mask = 0;
3398
acf356b1
TH
3399 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3400 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3401
4f65977d 3402 found = 1;
b15b3eba 3403 if (ata_dma_enabled(dev))
5444a6f4 3404 used_dma = 1;
a6d5a51c 3405 }
4f65977d 3406 if (!found)
e82cbdb9 3407 goto out;
a6d5a51c
TH
3408
3409 /* step 2: always set host PIO timings */
1eca4365 3410 ata_for_each_dev(dev, link, ENABLED) {
70cd071e 3411 if (dev->pio_mode == 0xff) {
a9a79dfe 3412 ata_dev_warn(dev, "no PIO support\n");
e8e0619f 3413 rc = -EINVAL;
e82cbdb9 3414 goto out;
e8e0619f
TH
3415 }
3416
3417 dev->xfer_mode = dev->pio_mode;
3418 dev->xfer_shift = ATA_SHIFT_PIO;
3419 if (ap->ops->set_piomode)
3420 ap->ops->set_piomode(ap, dev);
3421 }
1da177e4 3422
a6d5a51c 3423 /* step 3: set host DMA timings */
1eca4365
TH
3424 ata_for_each_dev(dev, link, ENABLED) {
3425 if (!ata_dma_enabled(dev))
e8e0619f
TH
3426 continue;
3427
3428 dev->xfer_mode = dev->dma_mode;
3429 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3430 if (ap->ops->set_dmamode)
3431 ap->ops->set_dmamode(ap, dev);
3432 }
1da177e4
LT
3433
3434 /* step 4: update devices' xfer mode */
1eca4365 3435 ata_for_each_dev(dev, link, ENABLED) {
3373efd8 3436 rc = ata_dev_set_mode(dev);
5bbc53f4 3437 if (rc)
e82cbdb9 3438 goto out;
83206a29 3439 }
1da177e4 3440
e8e0619f
TH
3441 /* Record simplex status. If we selected DMA then the other
3442 * host channels are not permitted to do so.
5444a6f4 3443 */
cca3974e 3444 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3445 ap->host->simplex_claimed = ap;
5444a6f4 3446
e82cbdb9
TH
3447 out:
3448 if (rc)
3449 *r_failed_dev = dev;
3450 return rc;
1da177e4 3451}
a52fbcfc 3452EXPORT_SYMBOL_GPL(ata_do_set_mode);
1da177e4 3453
aa2731ad
TH
3454/**
3455 * ata_wait_ready - wait for link to become ready
3456 * @link: link to be waited on
3457 * @deadline: deadline jiffies for the operation
3458 * @check_ready: callback to check link readiness
3459 *
3460 * Wait for @link to become ready. @check_ready should return
3461 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3462 * link doesn't seem to be occupied, other errno for other error
3463 * conditions.
3464 *
3465 * Transient -ENODEV conditions are allowed for
3466 * ATA_TMOUT_FF_WAIT.
3467 *
3468 * LOCKING:
3469 * EH context.
3470 *
3471 * RETURNS:
c9b5560a 3472 * 0 if @link is ready before @deadline; otherwise, -errno.
aa2731ad
TH
3473 */
3474int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3475 int (*check_ready)(struct ata_link *link))
3476{
3477 unsigned long start = jiffies;
b48d58f5 3478 unsigned long nodev_deadline;
aa2731ad
TH
3479 int warned = 0;
3480
b48d58f5
TH
3481 /* choose which 0xff timeout to use, read comment in libata.h */
3482 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3483 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3484 else
3485 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3486
b1c72916
TH
3487 /* Slave readiness can't be tested separately from master. On
3488 * M/S emulation configuration, this function should be called
3489 * only on the master and it will handle both master and slave.
3490 */
3491 WARN_ON(link == link->ap->slave_link);
3492
aa2731ad
TH
3493 if (time_after(nodev_deadline, deadline))
3494 nodev_deadline = deadline;
3495
3496 while (1) {
3497 unsigned long now = jiffies;
3498 int ready, tmp;
3499
3500 ready = tmp = check_ready(link);
3501 if (ready > 0)
3502 return 0;
3503
b48d58f5
TH
3504 /*
3505 * -ENODEV could be transient. Ignore -ENODEV if link
aa2731ad 3506 * is online. Also, some SATA devices take a long
b48d58f5
TH
3507 * time to clear 0xff after reset. Wait for
3508 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3509 * offline.
aa2731ad
TH
3510 *
3511 * Note that some PATA controllers (pata_ali) explode
3512 * if status register is read more than once when
3513 * there's no device attached.
3514 */
3515 if (ready == -ENODEV) {
3516 if (ata_link_online(link))
3517 ready = 0;
3518 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3519 !ata_link_offline(link) &&
3520 time_before(now, nodev_deadline))
3521 ready = 0;
3522 }
3523
3524 if (ready)
3525 return ready;
3526 if (time_after(now, deadline))
3527 return -EBUSY;
3528
3529 if (!warned && time_after(now, start + 5 * HZ) &&
3530 (deadline - now > 3 * HZ)) {
a9a79dfe 3531 ata_link_warn(link,
aa2731ad
TH
3532 "link is slow to respond, please be patient "
3533 "(ready=%d)\n", tmp);
3534 warned = 1;
3535 }
3536
97750ceb 3537 ata_msleep(link->ap, 50);
aa2731ad
TH
3538 }
3539}
3540
3541/**
3542 * ata_wait_after_reset - wait for link to become ready after reset
3543 * @link: link to be waited on
3544 * @deadline: deadline jiffies for the operation
3545 * @check_ready: callback to check link readiness
3546 *
3547 * Wait for @link to become ready after reset.
3548 *
3549 * LOCKING:
3550 * EH context.
3551 *
3552 * RETURNS:
c9b5560a 3553 * 0 if @link is ready before @deadline; otherwise, -errno.
aa2731ad 3554 */
2b4221bb 3555int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
aa2731ad
TH
3556 int (*check_ready)(struct ata_link *link))
3557{
97750ceb 3558 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
aa2731ad
TH
3559
3560 return ata_wait_ready(link, deadline, check_ready);
3561}
a52fbcfc 3562EXPORT_SYMBOL_GPL(ata_wait_after_reset);
aa2731ad 3563
f5914a46 3564/**
0aa1113d 3565 * ata_std_prereset - prepare for reset
cc0680a5 3566 * @link: ATA link to be reset
d4b2bab4 3567 * @deadline: deadline jiffies for the operation
f5914a46 3568 *
cc0680a5 3569 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3570 * prereset makes libata abort whole reset sequence and give up
3571 * that port, so prereset should be best-effort. It does its
3572 * best to prepare for reset sequence but if things go wrong, it
3573 * should just whine, not fail.
f5914a46
TH
3574 *
3575 * LOCKING:
3576 * Kernel thread context (may sleep)
3577 *
3578 * RETURNS:
3579 * 0 on success, -errno otherwise.
3580 */
0aa1113d 3581int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3582{
cc0680a5 3583 struct ata_port *ap = link->ap;
936fd732 3584 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3585 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3586 int rc;
3587
f5914a46
TH
3588 /* if we're about to do hardreset, nothing more to do */
3589 if (ehc->i.action & ATA_EH_HARDRESET)
3590 return 0;
3591
936fd732 3592 /* if SATA, resume link */
a16abc0b 3593 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3594 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3595 /* whine about phy resume failure but proceed */
3596 if (rc && rc != -EOPNOTSUPP)
a9a79dfe
JP
3597 ata_link_warn(link,
3598 "failed to resume link for reset (errno=%d)\n",
3599 rc);
f5914a46
TH
3600 }
3601
45db2f6c 3602 /* no point in trying softreset on offline link */
b1c72916 3603 if (ata_phys_link_offline(link))
45db2f6c
TH
3604 ehc->i.action &= ~ATA_EH_SOFTRESET;
3605
f5914a46
TH
3606 return 0;
3607}
a52fbcfc 3608EXPORT_SYMBOL_GPL(ata_std_prereset);
f5914a46 3609
57c9efdf
TH
3610/**
3611 * sata_std_hardreset - COMRESET w/o waiting or classification
3612 * @link: link to reset
3613 * @class: resulting class of attached device
3614 * @deadline: deadline jiffies for the operation
3615 *
3616 * Standard SATA COMRESET w/o waiting or classification.
3617 *
3618 * LOCKING:
3619 * Kernel thread context (may sleep)
3620 *
3621 * RETURNS:
3622 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3623 */
3624int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3625 unsigned long deadline)
3626{
3627 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3628 bool online;
3629 int rc;
3630
3631 /* do hardreset */
3632 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
57c9efdf
TH
3633 return online ? -EAGAIN : rc;
3634}
a52fbcfc 3635EXPORT_SYMBOL_GPL(sata_std_hardreset);
57c9efdf 3636
c2bd5804 3637/**
203c75b8 3638 * ata_std_postreset - standard postreset callback
cc0680a5 3639 * @link: the target ata_link
c2bd5804
TH
3640 * @classes: classes of attached devices
3641 *
3642 * This function is invoked after a successful reset. Note that
3643 * the device might have been reset more than once using
3644 * different reset methods before postreset is invoked.
c2bd5804 3645 *
c2bd5804
TH
3646 * LOCKING:
3647 * Kernel thread context (may sleep)
3648 */
203c75b8 3649void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3650{
f046519f
TH
3651 u32 serror;
3652
f046519f
TH
3653 /* reset complete, clear SError */
3654 if (!sata_scr_read(link, SCR_ERROR, &serror))
3655 sata_scr_write(link, SCR_ERROR, serror);
3656
c2bd5804 3657 /* print link status */
936fd732 3658 sata_print_link_status(link);
c2bd5804 3659}
a52fbcfc 3660EXPORT_SYMBOL_GPL(ata_std_postreset);
c2bd5804 3661
623a3128
TH
3662/**
3663 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3664 * @dev: device to compare against
3665 * @new_class: class of the new device
3666 * @new_id: IDENTIFY page of the new device
3667 *
3668 * Compare @new_class and @new_id against @dev and determine
3669 * whether @dev is the device indicated by @new_class and
3670 * @new_id.
3671 *
3672 * LOCKING:
3673 * None.
3674 *
3675 * RETURNS:
3676 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3677 */
3373efd8
TH
3678static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3679 const u16 *new_id)
623a3128
TH
3680{
3681 const u16 *old_id = dev->id;
a0cf733b
TH
3682 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3683 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3684
3685 if (dev->class != new_class) {
a9a79dfe
JP
3686 ata_dev_info(dev, "class mismatch %d != %d\n",
3687 dev->class, new_class);
623a3128
TH
3688 return 0;
3689 }
3690
a0cf733b
TH
3691 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3692 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3693 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3694 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3695
3696 if (strcmp(model[0], model[1])) {
a9a79dfe
JP
3697 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3698 model[0], model[1]);
623a3128
TH
3699 return 0;
3700 }
3701
3702 if (strcmp(serial[0], serial[1])) {
a9a79dfe
JP
3703 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3704 serial[0], serial[1]);
623a3128
TH
3705 return 0;
3706 }
3707
623a3128
TH
3708 return 1;
3709}
3710
3711/**
fe30911b 3712 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3713 * @dev: target ATA device
bff04647 3714 * @readid_flags: read ID flags
623a3128
TH
3715 *
3716 * Re-read IDENTIFY page and make sure @dev is still attached to
3717 * the port.
3718 *
3719 * LOCKING:
3720 * Kernel thread context (may sleep)
3721 *
3722 * RETURNS:
3723 * 0 on success, negative errno otherwise
3724 */
fe30911b 3725int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3726{
5eb45c02 3727 unsigned int class = dev->class;
9af5c9c9 3728 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3729 int rc;
3730
fe635c7e 3731 /* read ID data */
bff04647 3732 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3733 if (rc)
fe30911b 3734 return rc;
623a3128
TH
3735
3736 /* is the device still there? */
fe30911b
TH
3737 if (!ata_dev_same_device(dev, class, id))
3738 return -ENODEV;
623a3128 3739
fe635c7e 3740 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3741 return 0;
3742}
3743
3744/**
3745 * ata_dev_revalidate - Revalidate ATA device
3746 * @dev: device to revalidate
422c9daa 3747 * @new_class: new class code
fe30911b
TH
3748 * @readid_flags: read ID flags
3749 *
3750 * Re-read IDENTIFY page, make sure @dev is still attached to the
3751 * port and reconfigure it according to the new IDENTIFY page.
3752 *
3753 * LOCKING:
3754 * Kernel thread context (may sleep)
3755 *
3756 * RETURNS:
3757 * 0 on success, negative errno otherwise
3758 */
422c9daa
TH
3759int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3760 unsigned int readid_flags)
fe30911b 3761{
6ddcd3b0 3762 u64 n_sectors = dev->n_sectors;
5920dadf 3763 u64 n_native_sectors = dev->n_native_sectors;
fe30911b
TH
3764 int rc;
3765
3766 if (!ata_dev_enabled(dev))
3767 return -ENODEV;
3768
422c9daa
TH
3769 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3770 if (ata_class_enabled(new_class) &&
f0d0613d
BP
3771 new_class != ATA_DEV_ATA &&
3772 new_class != ATA_DEV_ATAPI &&
9162c657 3773 new_class != ATA_DEV_ZAC &&
f0d0613d 3774 new_class != ATA_DEV_SEMB) {
a9a79dfe
JP
3775 ata_dev_info(dev, "class mismatch %u != %u\n",
3776 dev->class, new_class);
422c9daa
TH
3777 rc = -ENODEV;
3778 goto fail;
3779 }
3780
fe30911b
TH
3781 /* re-read ID */
3782 rc = ata_dev_reread_id(dev, readid_flags);
3783 if (rc)
3784 goto fail;
623a3128
TH
3785
3786 /* configure device according to the new ID */
efdaedc4 3787 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3788 if (rc)
3789 goto fail;
3790
3791 /* verify n_sectors hasn't changed */
445d211b
TH
3792 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3793 dev->n_sectors == n_sectors)
3794 return 0;
3795
3796 /* n_sectors has changed */
a9a79dfe
JP
3797 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3798 (unsigned long long)n_sectors,
3799 (unsigned long long)dev->n_sectors);
445d211b
TH
3800
3801 /*
3802 * Something could have caused HPA to be unlocked
3803 * involuntarily. If n_native_sectors hasn't changed and the
3804 * new size matches it, keep the device.
3805 */
3806 if (dev->n_native_sectors == n_native_sectors &&
3807 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
a9a79dfe
JP
3808 ata_dev_warn(dev,
3809 "new n_sectors matches native, probably "
3810 "late HPA unlock, n_sectors updated\n");
68939ce5 3811 /* use the larger n_sectors */
445d211b 3812 return 0;
6ddcd3b0
TH
3813 }
3814
445d211b
TH
3815 /*
3816 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
3817 * unlocking HPA in those cases.
3818 *
3819 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
3820 */
3821 if (dev->n_native_sectors == n_native_sectors &&
3822 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3823 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
a9a79dfe
JP
3824 ata_dev_warn(dev,
3825 "old n_sectors matches native, probably "
3826 "late HPA lock, will try to unlock HPA\n");
445d211b
TH
3827 /* try unlocking HPA */
3828 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
3829 rc = -EIO;
3830 } else
3831 rc = -ENODEV;
623a3128 3832
445d211b
TH
3833 /* restore original n_[native_]sectors and fail */
3834 dev->n_native_sectors = n_native_sectors;
3835 dev->n_sectors = n_sectors;
623a3128 3836 fail:
a9a79dfe 3837 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3838 return rc;
3839}
3840
6919a0a6
AC
3841struct ata_blacklist_entry {
3842 const char *model_num;
3843 const char *model_rev;
3844 unsigned long horkage;
3845};
3846
3847static const struct ata_blacklist_entry ata_device_blacklist [] = {
3848 /* Devices with DMA related problems under Linux */
3849 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3850 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3851 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3852 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3853 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3854 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3855 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3856 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3857 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
7da4c935 3858 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3859 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3860 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3861 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3862 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3863 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
7da4c935 3864 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3865 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3866 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3867 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3868 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3869 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3870 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3871 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3872 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3873 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3874 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 3875 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 3876 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
d17d794c 3877 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
b00622fc 3878 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
3af9a77a 3879 /* Odd clown on sil3726/4726 PMPs */
50af2fa1 3880 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
a66307d4
HR
3881 /* Similar story with ASMedia 1092 */
3882 { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
6919a0a6 3883
18d6e9d5 3884 /* Weird ATAPI devices */
40a1d531 3885 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
6a87e42e 3886 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
a32450e1 3887 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
0523f037 3888 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
18d6e9d5 3889
af34d637
DM
3890 /*
3891 * Causes silent data corruption with higher max sects.
3892 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
3893 */
3894 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
1488a1e3
TH
3895
3896 /*
e0edc8c5 3897 * These devices time out with higher max sects.
1488a1e3
TH
3898 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
3899 */
e0edc8c5 3900 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
db5ff909 3901 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
af34d637 3902
6919a0a6
AC
3903 /* Devices we expect to fail diagnostics */
3904
3905 /* Devices where NCQ should be avoided */
3906 /* NCQ is slow */
2dcb407e 3907 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 3908 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
3909 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3910 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3911 /* NCQ is broken */
539cc7c7 3912 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3913 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 3914 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 3915 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
5ccfca97 3916 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
539cc7c7 3917
ac70a964 3918 /* Seagate NCQ + FLUSH CACHE firmware bug */
4d1f9082 3919 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964 3920 ATA_HORKAGE_FIRMWARE_WARN },
d10d491f 3921
4d1f9082 3922 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
3923 ATA_HORKAGE_FIRMWARE_WARN },
3924
4d1f9082 3925 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
d10d491f
TH
3926 ATA_HORKAGE_FIRMWARE_WARN },
3927
4d1f9082 3928 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ac70a964
TH
3929 ATA_HORKAGE_FIRMWARE_WARN },
3930
31f6264e
HG
3931 /* drives which fail FPDMA_AA activation (some may freeze afterwards)
3932 the ST disks also have LPM issues */
8756a25b 3933 { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
31f6264e 3934 ATA_HORKAGE_NOLPM, },
08c85d2a 3935 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
87809942 3936
36e337d0
RH
3937 /* Blacklist entries taken from Silicon Image 3124/3132
3938 Windows driver .inf file - also several Linux problem reports */
3939 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3940 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3941 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 3942
68b0ddb2
TH
3943 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
3944 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
3945
3b545563
TH
3946 /* Sandisk SD7/8/9s lock up hard on large trims */
3947 { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, },
322579dc 3948
16c55b03
TH
3949 /* devices which puke on READ_NATIVE_MAX */
3950 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3951 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3952 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3953 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 3954
7831387b
TH
3955 /* this one allows HPA unlocking but fails IOs on the area */
3956 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
3957
93328e11
AC
3958 /* Devices which report 1 sector over size HPA */
3959 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
3960 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 3961 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 3962
6bbfd53d
AC
3963 /* Devices which get the IVB wrong */
3964 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
a79067e5 3965 /* Maybe we should just blacklist TSSTcorp... */
7da4c935 3966 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
6bbfd53d 3967
9ce8e307
JA
3968 /* Devices that do not need bridging limits applied */
3969 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
04d0f1b8 3970 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
9ce8e307 3971
9062712f
TH
3972 /* Devices which aren't very happy with higher link speeds */
3973 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
c531077f 3974 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
9062712f 3975
d0cb43b3
TH
3976 /*
3977 * Devices which choke on SETXFER. Applies only if both the
3978 * device and controller are SATA.
3979 */
cd691876 3980 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
3a25179e
VL
3981 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
3982 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
cd691876
TH
3983 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
3984 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
d0cb43b3 3985
b17e5729 3986 /* Crucial BX100 SSD 500GB has broken LPM support */
3bf7b5d6 3987 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
b17e5729 3988
d418ff56
HG
3989 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
3990 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
9c7be59f
HG
3991 ATA_HORKAGE_ZERO_AFTER_TRIM |
3992 ATA_HORKAGE_NOLPM, },
d418ff56
HG
3993 /* 512GB MX100 with newer firmware has only LPM issues */
3994 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
3995 ATA_HORKAGE_NOLPM, },
9c7be59f 3996
62ac3f73
HG
3997 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
3998 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
3999 ATA_HORKAGE_ZERO_AFTER_TRIM |
4000 ATA_HORKAGE_NOLPM, },
4001 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4002 ATA_HORKAGE_ZERO_AFTER_TRIM |
4003 ATA_HORKAGE_NOLPM, },
4004
76936e9a 4005 /* These specific Samsung models/firmware-revs do not handle LPM well */
b5b4d3a5 4006 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
76936e9a 4007 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
410b5c7b 4008 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
dd957493 4009 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
b5b4d3a5 4010
f78dea06 4011 /* devices that don't properly handle queued TRIM commands */
136d769e
SM
4012 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4013 ATA_HORKAGE_ZERO_AFTER_TRIM, },
243918be 4014 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ff7f53fb
MP
4015 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4016 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4017 ATA_HORKAGE_ZERO_AFTER_TRIM, },
9051bd39 4018 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ff7f53fb
MP
4019 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4020 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4021 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4022 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
e61f7d1c 4023 ATA_HORKAGE_ZERO_AFTER_TRIM, },
ca6bfcb2
JHP
4024 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4025 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4026 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
6fc4d97a 4027 ATA_HORKAGE_ZERO_AFTER_TRIM, },
8a6430ab 4028 { "Samsung SSD 860*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
7a8526a5
KH
4029 ATA_HORKAGE_ZERO_AFTER_TRIM |
4030 ATA_HORKAGE_NO_NCQ_ON_ATI, },
8a6430ab 4031 { "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
7a8526a5
KH
4032 ATA_HORKAGE_ZERO_AFTER_TRIM |
4033 ATA_HORKAGE_NO_NCQ_ON_ATI, },
7a7184b0
GA
4034 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4035 ATA_HORKAGE_ZERO_AFTER_TRIM, },
e61f7d1c 4036
cda57b1b
AF
4037 /* devices that don't properly handle TRIM commands */
4038 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4039
e61f7d1c
MP
4040 /*
4041 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4042 * (Return Zero After Trim) flags in the ATA Command Set are
4043 * unreliable in the sense that they only define what happens if
4044 * the device successfully executed the DSM TRIM command. TRIM
4045 * is only advisory, however, and the device is free to silently
4046 * ignore all or parts of the request.
4047 *
4048 * Whitelist drives that are known to reliably return zeroes
4049 * after TRIM.
4050 */
4051
4052 /*
4053 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4054 * that model before whitelisting all other intel SSDs.
4055 */
4056 { "INTEL*SSDSC2MH*", NULL, 0, },
4057
ff7f53fb
MP
4058 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4059 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
e61f7d1c
MP
4060 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4061 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4062 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4063 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
fd6f32f7 4064 { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
e61f7d1c 4065 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
f78dea06 4066
ecd75ad5
TH
4067 /*
4068 * Some WD SATA-I drives spin up and down erratically when the link
4069 * is put into the slumber mode. We don't have full list of the
4070 * affected devices. Disable LPM if the device matches one of the
4071 * known prefixes and is SATA-1. As a side effect LPM partial is
4072 * lost too.
4073 *
4074 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4075 */
4076 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4077 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4078 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4079 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4080 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4081 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4082 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4083
6919a0a6
AC
4084 /* End Marker */
4085 { }
1da177e4 4086};
2e9edbf8 4087
75683fe7 4088static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4089{
8bfa79fc
TH
4090 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4091 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4092 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4093
8bfa79fc
TH
4094 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4095 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4096
6919a0a6 4097 while (ad->model_num) {
1c402799 4098 if (glob_match(ad->model_num, model_num)) {
6919a0a6
AC
4099 if (ad->model_rev == NULL)
4100 return ad->horkage;
1c402799 4101 if (glob_match(ad->model_rev, model_rev))
6919a0a6 4102 return ad->horkage;
f4b15fef 4103 }
6919a0a6 4104 ad++;
f4b15fef 4105 }
1da177e4
LT
4106 return 0;
4107}
4108
6919a0a6
AC
4109static int ata_dma_blacklisted(const struct ata_device *dev)
4110{
4111 /* We don't support polling DMA.
4112 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4113 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4114 */
9af5c9c9 4115 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4116 (dev->flags & ATA_DFLAG_CDB_INTR))
4117 return 1;
75683fe7 4118 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4119}
4120
6bbfd53d
AC
4121/**
4122 * ata_is_40wire - check drive side detection
4123 * @dev: device
4124 *
4125 * Perform drive side detection decoding, allowing for device vendors
4126 * who can't follow the documentation.
4127 */
4128
4129static int ata_is_40wire(struct ata_device *dev)
4130{
4131 if (dev->horkage & ATA_HORKAGE_IVB)
4132 return ata_drive_40wire_relaxed(dev->id);
4133 return ata_drive_40wire(dev->id);
4134}
4135
15a5551c
AC
4136/**
4137 * cable_is_40wire - 40/80/SATA decider
4138 * @ap: port to consider
4139 *
4140 * This function encapsulates the policy for speed management
4141 * in one place. At the moment we don't cache the result but
4142 * there is a good case for setting ap->cbl to the result when
4143 * we are called with unknown cables (and figuring out if it
4144 * impacts hotplug at all).
4145 *
4146 * Return 1 if the cable appears to be 40 wire.
4147 */
4148
4149static int cable_is_40wire(struct ata_port *ap)
4150{
4151 struct ata_link *link;
4152 struct ata_device *dev;
4153
4a9c7b33 4154 /* If the controller thinks we are 40 wire, we are. */
15a5551c
AC
4155 if (ap->cbl == ATA_CBL_PATA40)
4156 return 1;
4a9c7b33
TH
4157
4158 /* If the controller thinks we are 80 wire, we are. */
15a5551c
AC
4159 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4160 return 0;
4a9c7b33
TH
4161
4162 /* If the system is known to be 40 wire short cable (eg
4163 * laptop), then we allow 80 wire modes even if the drive
4164 * isn't sure.
4165 */
f792068e
AC
4166 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4167 return 0;
4a9c7b33
TH
4168
4169 /* If the controller doesn't know, we scan.
4170 *
4171 * Note: We look for all 40 wire detects at this point. Any
4172 * 80 wire detect is taken to be 80 wire cable because
4173 * - in many setups only the one drive (slave if present) will
4174 * give a valid detect
4175 * - if you have a non detect capable drive you don't want it
4176 * to colour the choice
4177 */
1eca4365
TH
4178 ata_for_each_link(link, ap, EDGE) {
4179 ata_for_each_dev(dev, link, ENABLED) {
4180 if (!ata_is_40wire(dev))
15a5551c
AC
4181 return 0;
4182 }
4183 }
4184 return 1;
4185}
4186
a6d5a51c
TH
4187/**
4188 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4189 * @dev: Device to compute xfermask for
4190 *
acf356b1
TH
4191 * Compute supported xfermask of @dev and store it in
4192 * dev->*_mask. This function is responsible for applying all
4193 * known limits including host controller limits, device
4194 * blacklist, etc...
a6d5a51c
TH
4195 *
4196 * LOCKING:
4197 * None.
a6d5a51c 4198 */
3373efd8 4199static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4200{
9af5c9c9
TH
4201 struct ata_link *link = dev->link;
4202 struct ata_port *ap = link->ap;
cca3974e 4203 struct ata_host *host = ap->host;
a6d5a51c 4204 unsigned long xfer_mask;
1da177e4 4205
37deecb5 4206 /* controller modes available */
565083e1
TH
4207 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4208 ap->mwdma_mask, ap->udma_mask);
4209
8343f889 4210 /* drive modes available */
37deecb5
TH
4211 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4212 dev->mwdma_mask, dev->udma_mask);
4213 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4214
b352e57d
AC
4215 /*
4216 * CFA Advanced TrueIDE timings are not allowed on a shared
4217 * cable
4218 */
4219 if (ata_dev_pair(dev)) {
4220 /* No PIO5 or PIO6 */
4221 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4222 /* No MWDMA3 or MWDMA 4 */
4223 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4224 }
4225
37deecb5
TH
4226 if (ata_dma_blacklisted(dev)) {
4227 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4228 ata_dev_warn(dev,
4229 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4230 }
a6d5a51c 4231
14d66ab7 4232 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4233 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5 4234 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
a9a79dfe
JP
4235 ata_dev_warn(dev,
4236 "simplex DMA is claimed by other device, disabling DMA\n");
5444a6f4 4237 }
565083e1 4238
e424675f
JG
4239 if (ap->flags & ATA_FLAG_NO_IORDY)
4240 xfer_mask &= ata_pio_mask_no_iordy(dev);
4241
5444a6f4 4242 if (ap->ops->mode_filter)
a76b62ca 4243 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4244
8343f889
RH
4245 /* Apply cable rule here. Don't apply it early because when
4246 * we handle hot plug the cable type can itself change.
4247 * Check this last so that we know if the transfer rate was
4248 * solely limited by the cable.
4249 * Unknown or 80 wire cables reported host side are checked
4250 * drive side as well. Cases where we know a 40wire cable
4251 * is used safely for 80 are not checked here.
4252 */
4253 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4254 /* UDMA/44 or higher would be available */
15a5551c 4255 if (cable_is_40wire(ap)) {
a9a79dfe
JP
4256 ata_dev_warn(dev,
4257 "limited to UDMA/33 due to 40-wire cable\n");
8343f889
RH
4258 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4259 }
4260
565083e1
TH
4261 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4262 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4263}
4264
1da177e4
LT
4265/**
4266 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4267 * @dev: Device to which command will be sent
4268 *
780a87f7
JG
4269 * Issue SET FEATURES - XFER MODE command to device @dev
4270 * on port @ap.
4271 *
1da177e4 4272 * LOCKING:
0cba632b 4273 * PCI/etc. bus probe sem.
83206a29
TH
4274 *
4275 * RETURNS:
4276 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4277 */
4278
3373efd8 4279static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4280{
a0123703 4281 struct ata_taskfile tf;
83206a29 4282 unsigned int err_mask;
1da177e4
LT
4283
4284 /* set up set-features taskfile */
4633778b 4285 ata_dev_dbg(dev, "set features - xfer mode\n");
1da177e4 4286
464cf177
TH
4287 /* Some controllers and ATAPI devices show flaky interrupt
4288 * behavior after setting xfer mode. Use polling instead.
4289 */
3373efd8 4290 ata_tf_init(dev, &tf);
a0123703
TH
4291 tf.command = ATA_CMD_SET_FEATURES;
4292 tf.feature = SETFEATURES_XFER;
464cf177 4293 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4294 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4295 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4296 if (ata_pio_need_iordy(dev))
4297 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4298 /* If the device has IORDY and the controller does not - turn it off */
4299 else if (ata_id_has_iordy(dev->id))
11b7becc 4300 tf.nsect = 0x01;
b9f8ab2d
AC
4301 else /* In the ancient relic department - skip all of this */
4302 return 0;
1da177e4 4303
d531be2c
MP
4304 /* On some disks, this command causes spin-up, so we need longer timeout */
4305 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
9f45cbd3 4306
9f45cbd3
KCA
4307 return err_mask;
4308}
1152b261 4309
9f45cbd3 4310/**
218f3d30 4311 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4312 * @dev: Device to which command will be sent
4313 * @enable: Whether to enable or disable the feature
218f3d30 4314 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4315 *
4316 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4317 * on port @ap with sector count
9f45cbd3
KCA
4318 *
4319 * LOCKING:
4320 * PCI/etc. bus probe sem.
4321 *
4322 * RETURNS:
4323 * 0 on success, AC_ERR_* mask otherwise.
4324 */
1152b261 4325unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
9f45cbd3
KCA
4326{
4327 struct ata_taskfile tf;
4328 unsigned int err_mask;
974e0a45 4329 unsigned long timeout = 0;
9f45cbd3
KCA
4330
4331 /* set up set-features taskfile */
4633778b 4332 ata_dev_dbg(dev, "set features - SATA features\n");
9f45cbd3
KCA
4333
4334 ata_tf_init(dev, &tf);
4335 tf.command = ATA_CMD_SET_FEATURES;
4336 tf.feature = enable;
4337 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4338 tf.protocol = ATA_PROT_NODATA;
218f3d30 4339 tf.nsect = feature;
9f45cbd3 4340
974e0a45
DLM
4341 if (enable == SETFEATURES_SPINUP)
4342 timeout = ata_probe_timeout ?
4343 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4344 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
1da177e4 4345
83206a29 4346 return err_mask;
1da177e4 4347}
633de4cc 4348EXPORT_SYMBOL_GPL(ata_dev_set_feature);
1da177e4 4349
8bf62ece
AL
4350/**
4351 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4352 * @dev: Device to which command will be sent
e2a7f77a
RD
4353 * @heads: Number of heads (taskfile parameter)
4354 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4355 *
4356 * LOCKING:
6aff8f1f
TH
4357 * Kernel thread context (may sleep)
4358 *
4359 * RETURNS:
4360 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4361 */
3373efd8
TH
4362static unsigned int ata_dev_init_params(struct ata_device *dev,
4363 u16 heads, u16 sectors)
8bf62ece 4364{
a0123703 4365 struct ata_taskfile tf;
6aff8f1f 4366 unsigned int err_mask;
8bf62ece
AL
4367
4368 /* Number of sectors per track 1-255. Number of heads 1-16 */
4369 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4370 return AC_ERR_INVALID;
8bf62ece
AL
4371
4372 /* set up init dev params taskfile */
4633778b 4373 ata_dev_dbg(dev, "init dev params \n");
8bf62ece 4374
3373efd8 4375 ata_tf_init(dev, &tf);
a0123703
TH
4376 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4377 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4378 tf.protocol = ATA_PROT_NODATA;
4379 tf.nsect = sectors;
4380 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4381
2b789108 4382 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4383 /* A clean abort indicates an original or just out of spec drive
4384 and we should continue as we issue the setup based on the
4385 drive reported working geometry */
4386 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4387 err_mask = 0;
8bf62ece 4388
6aff8f1f 4389 return err_mask;
8bf62ece
AL
4390}
4391
1da177e4 4392/**
5895ef9a 4393 * atapi_check_dma - Check whether ATAPI DMA can be supported
1da177e4
LT
4394 * @qc: Metadata associated with taskfile to check
4395 *
780a87f7
JG
4396 * Allow low-level driver to filter ATA PACKET commands, returning
4397 * a status indicating whether or not it is OK to use DMA for the
4398 * supplied PACKET command.
4399 *
1da177e4 4400 * LOCKING:
624d5c51
TH
4401 * spin_lock_irqsave(host lock)
4402 *
4403 * RETURNS: 0 when ATAPI DMA can be used
4404 * nonzero otherwise
4405 */
5895ef9a 4406int atapi_check_dma(struct ata_queued_cmd *qc)
624d5c51
TH
4407{
4408 struct ata_port *ap = qc->ap;
71601958 4409
624d5c51
TH
4410 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4411 * few ATAPI devices choke on such DMA requests.
4412 */
6a87e42e
TH
4413 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4414 unlikely(qc->nbytes & 15))
624d5c51 4415 return 1;
e2cec771 4416
624d5c51
TH
4417 if (ap->ops->check_atapi_dma)
4418 return ap->ops->check_atapi_dma(qc);
e2cec771 4419
624d5c51
TH
4420 return 0;
4421}
1da177e4 4422
624d5c51
TH
4423/**
4424 * ata_std_qc_defer - Check whether a qc needs to be deferred
4425 * @qc: ATA command in question
4426 *
4427 * Non-NCQ commands cannot run with any other command, NCQ or
4428 * not. As upper layer only knows the queue depth, we are
4429 * responsible for maintaining exclusion. This function checks
4430 * whether a new command @qc can be issued.
4431 *
4432 * LOCKING:
4433 * spin_lock_irqsave(host lock)
4434 *
4435 * RETURNS:
4436 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4437 */
4438int ata_std_qc_defer(struct ata_queued_cmd *qc)
4439{
4440 struct ata_link *link = qc->dev->link;
e2cec771 4441
179b310a 4442 if (ata_is_ncq(qc->tf.protocol)) {
624d5c51
TH
4443 if (!ata_tag_valid(link->active_tag))
4444 return 0;
4445 } else {
4446 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4447 return 0;
4448 }
e2cec771 4449
624d5c51
TH
4450 return ATA_DEFER_LINK;
4451}
a52fbcfc 4452EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6912ccd5 4453
95364f36
JS
4454enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
4455{
4456 return AC_ERR_OK;
4457}
a52fbcfc 4458EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4 4459
624d5c51
TH
4460/**
4461 * ata_sg_init - Associate command with scatter-gather table.
4462 * @qc: Command to be associated
4463 * @sg: Scatter-gather table.
4464 * @n_elem: Number of elements in s/g table.
4465 *
4466 * Initialize the data-related elements of queued_cmd @qc
4467 * to point to a scatter-gather table @sg, containing @n_elem
4468 * elements.
4469 *
4470 * LOCKING:
4471 * spin_lock_irqsave(host lock)
4472 */
4473void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4474 unsigned int n_elem)
4475{
4476 qc->sg = sg;
4477 qc->n_elem = n_elem;
4478 qc->cursg = qc->sg;
4479}
bb5cb290 4480
2874d5ee
GU
4481#ifdef CONFIG_HAS_DMA
4482
4483/**
4484 * ata_sg_clean - Unmap DMA memory associated with command
4485 * @qc: Command containing DMA memory to be released
4486 *
4487 * Unmap all mapped DMA memory associated with this command.
4488 *
4489 * LOCKING:
4490 * spin_lock_irqsave(host lock)
4491 */
af27e01c 4492static void ata_sg_clean(struct ata_queued_cmd *qc)
2874d5ee
GU
4493{
4494 struct ata_port *ap = qc->ap;
4495 struct scatterlist *sg = qc->sg;
4496 int dir = qc->dma_dir;
4497
4498 WARN_ON_ONCE(sg == NULL);
4499
2874d5ee
GU
4500 if (qc->n_elem)
4501 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4502
4503 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4504 qc->sg = NULL;
4505}
4506
624d5c51
TH
4507/**
4508 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4509 * @qc: Command with scatter-gather table to be mapped.
4510 *
4511 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4512 *
4513 * LOCKING:
4514 * spin_lock_irqsave(host lock)
4515 *
4516 * RETURNS:
4517 * Zero on success, negative on error.
4518 *
4519 */
4520static int ata_sg_setup(struct ata_queued_cmd *qc)
4521{
4522 struct ata_port *ap = qc->ap;
4523 unsigned int n_elem;
1da177e4 4524
624d5c51
TH
4525 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4526 if (n_elem < 1)
4527 return -1;
bb5cb290 4528
5825627c 4529 qc->orig_n_elem = qc->n_elem;
624d5c51
TH
4530 qc->n_elem = n_elem;
4531 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4 4532
624d5c51 4533 return 0;
1da177e4
LT
4534}
4535
2874d5ee
GU
4536#else /* !CONFIG_HAS_DMA */
4537
4538static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
4539static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
4540
4541#endif /* !CONFIG_HAS_DMA */
4542
624d5c51
TH
4543/**
4544 * swap_buf_le16 - swap halves of 16-bit words in place
4545 * @buf: Buffer to swap
4546 * @buf_words: Number of 16-bit words in buffer.
4547 *
4548 * Swap halves of 16-bit words if needed to convert from
4549 * little-endian byte order to native cpu byte order, or
4550 * vice-versa.
4551 *
4552 * LOCKING:
4553 * Inherited from caller.
4554 */
4555void swap_buf_le16(u16 *buf, unsigned int buf_words)
8061f5f0 4556{
624d5c51
TH
4557#ifdef __BIG_ENDIAN
4558 unsigned int i;
8061f5f0 4559
624d5c51
TH
4560 for (i = 0; i < buf_words; i++)
4561 buf[i] = le16_to_cpu(buf[i]);
4562#endif /* __BIG_ENDIAN */
8061f5f0
TH
4563}
4564
8a8bc223 4565/**
98bd4be1
SL
4566 * ata_qc_new_init - Request an available ATA command, and initialize it
4567 * @dev: Device from whom we request an available command structure
38755e89 4568 * @tag: tag
1871ee13 4569 *
8a8bc223
TH
4570 * LOCKING:
4571 * None.
4572 */
4573
98bd4be1 4574struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
8a8bc223 4575{
98bd4be1 4576 struct ata_port *ap = dev->link->ap;
12cb5ce1 4577 struct ata_queued_cmd *qc;
8a8bc223
TH
4578
4579 /* no command while frozen */
4580 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4581 return NULL;
4582
98bd4be1 4583 /* libsas case */
5067c046 4584 if (ap->flags & ATA_FLAG_SAS_HOST) {
98bd4be1
SL
4585 tag = ata_sas_allocate_tag(ap);
4586 if (tag < 0)
4587 return NULL;
8a4aeec8 4588 }
8a8bc223 4589
98bd4be1 4590 qc = __ata_qc_from_tag(ap, tag);
5ac40790 4591 qc->tag = qc->hw_tag = tag;
98bd4be1
SL
4592 qc->scsicmd = NULL;
4593 qc->ap = ap;
4594 qc->dev = dev;
1da177e4 4595
98bd4be1 4596 ata_qc_reinit(qc);
1da177e4
LT
4597
4598 return qc;
4599}
4600
8a8bc223
TH
4601/**
4602 * ata_qc_free - free unused ata_queued_cmd
4603 * @qc: Command to complete
4604 *
4605 * Designed to free unused ata_queued_cmd object
4606 * in case something prevents using it.
4607 *
4608 * LOCKING:
4609 * spin_lock_irqsave(host lock)
4610 */
4611void ata_qc_free(struct ata_queued_cmd *qc)
4612{
a1104016 4613 struct ata_port *ap;
8a8bc223
TH
4614 unsigned int tag;
4615
efcb3cf7 4616 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
a1104016 4617 ap = qc->ap;
8a8bc223
TH
4618
4619 qc->flags = 0;
4620 tag = qc->tag;
28361c40 4621 if (ata_tag_valid(tag)) {
8a8bc223 4622 qc->tag = ATA_TAG_POISON;
5067c046 4623 if (ap->flags & ATA_FLAG_SAS_HOST)
98bd4be1 4624 ata_sas_free_tag(tag, ap);
8a8bc223
TH
4625 }
4626}
4627
76014427 4628void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4629{
a1104016
JL
4630 struct ata_port *ap;
4631 struct ata_link *link;
dedaf2b0 4632
efcb3cf7
TH
4633 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4634 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
a1104016
JL
4635 ap = qc->ap;
4636 link = qc->dev->link;
1da177e4
LT
4637
4638 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4639 ata_sg_clean(qc);
4640
7401abf2 4641 /* command should be marked inactive atomically with qc completion */
179b310a 4642 if (ata_is_ncq(qc->tf.protocol)) {
4e5b6260 4643 link->sactive &= ~(1 << qc->hw_tag);
da917d69
TH
4644 if (!link->sactive)
4645 ap->nr_active_links--;
4646 } else {
9af5c9c9 4647 link->active_tag = ATA_TAG_POISON;
da917d69
TH
4648 ap->nr_active_links--;
4649 }
4650
4651 /* clear exclusive status */
4652 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4653 ap->excl_link == link))
4654 ap->excl_link = NULL;
7401abf2 4655
3f3791d3
AL
4656 /* atapi: mark qc as inactive to prevent the interrupt handler
4657 * from completing the command twice later, before the error handler
4658 * is called. (when rc != 0 and atapi request sense is needed)
4659 */
4660 qc->flags &= ~ATA_QCFLAG_ACTIVE;
e3ed8939 4661 ap->qc_active &= ~(1ULL << qc->tag);
3f3791d3 4662
1da177e4 4663 /* call completion callback */
77853bf2 4664 qc->complete_fn(qc);
1da177e4
LT
4665}
4666
39599a53
TH
4667static void fill_result_tf(struct ata_queued_cmd *qc)
4668{
4669 struct ata_port *ap = qc->ap;
4670
39599a53 4671 qc->result_tf.flags = qc->tf.flags;
22183bf5 4672 ap->ops->qc_fill_rtf(qc);
39599a53
TH
4673}
4674
00115e0f
TH
4675static void ata_verify_xfer(struct ata_queued_cmd *qc)
4676{
4677 struct ata_device *dev = qc->dev;
4678
eb0effdf 4679 if (!ata_is_data(qc->tf.protocol))
00115e0f
TH
4680 return;
4681
4682 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4683 return;
4684
4685 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4686}
4687
f686bcb8
TH
4688/**
4689 * ata_qc_complete - Complete an active ATA command
4690 * @qc: Command to complete
f686bcb8 4691 *
1aadf5c3
TH
4692 * Indicate to the mid and upper layers that an ATA command has
4693 * completed, with either an ok or not-ok status.
4694 *
4695 * Refrain from calling this function multiple times when
4696 * successfully completing multiple NCQ commands.
4697 * ata_qc_complete_multiple() should be used instead, which will
4698 * properly update IRQ expect state.
f686bcb8
TH
4699 *
4700 * LOCKING:
cca3974e 4701 * spin_lock_irqsave(host lock)
f686bcb8
TH
4702 */
4703void ata_qc_complete(struct ata_queued_cmd *qc)
4704{
4705 struct ata_port *ap = qc->ap;
4706
eb25cb99 4707 /* Trigger the LED (if available) */
d1ed7c55 4708 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
eb25cb99 4709
f686bcb8
TH
4710 /* XXX: New EH and old EH use different mechanisms to
4711 * synchronize EH with regular execution path.
4712 *
4713 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4714 * Normal execution path is responsible for not accessing a
4715 * failed qc. libata core enforces the rule by returning NULL
4716 * from ata_qc_from_tag() for failed qcs.
4717 *
4718 * Old EH depends on ata_qc_complete() nullifying completion
4719 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4720 * not synchronize with interrupt handler. Only PIO task is
4721 * taken care of.
4722 */
4723 if (ap->ops->error_handler) {
4dbfa39b
TH
4724 struct ata_device *dev = qc->dev;
4725 struct ata_eh_info *ehi = &dev->link->eh_info;
4726
f686bcb8
TH
4727 if (unlikely(qc->err_mask))
4728 qc->flags |= ATA_QCFLAG_FAILED;
4729
f08dc1ac
TH
4730 /*
4731 * Finish internal commands without any further processing
4732 * and always with the result TF filled.
4733 */
4734 if (unlikely(ata_tag_internal(qc->tag))) {
f4b31db9 4735 fill_result_tf(qc);
255c03d1 4736 trace_ata_qc_complete_internal(qc);
f08dc1ac
TH
4737 __ata_qc_complete(qc);
4738 return;
4739 }
f4b31db9 4740
f08dc1ac
TH
4741 /*
4742 * Non-internal qc has failed. Fill the result TF and
4743 * summon EH.
4744 */
4745 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4746 fill_result_tf(qc);
255c03d1 4747 trace_ata_qc_complete_failed(qc);
f08dc1ac 4748 ata_qc_schedule_eh(qc);
f4b31db9 4749 return;
f686bcb8
TH
4750 }
4751
4dc738ed
TH
4752 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4753
f686bcb8
TH
4754 /* read result TF if requested */
4755 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4756 fill_result_tf(qc);
f686bcb8 4757
255c03d1 4758 trace_ata_qc_complete_done(qc);
4dbfa39b
TH
4759 /* Some commands need post-processing after successful
4760 * completion.
4761 */
4762 switch (qc->tf.command) {
4763 case ATA_CMD_SET_FEATURES:
4764 if (qc->tf.feature != SETFEATURES_WC_ON &&
0c12735e
TY
4765 qc->tf.feature != SETFEATURES_WC_OFF &&
4766 qc->tf.feature != SETFEATURES_RA_ON &&
4767 qc->tf.feature != SETFEATURES_RA_OFF)
4dbfa39b 4768 break;
df561f66 4769 fallthrough;
4dbfa39b
TH
4770 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4771 case ATA_CMD_SET_MULTI: /* multi_count changed */
4772 /* revalidate device */
4773 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4774 ata_port_schedule_eh(ap);
4775 break;
054a5fba
TH
4776
4777 case ATA_CMD_SLEEP:
4778 dev->flags |= ATA_DFLAG_SLEEPING;
4779 break;
4dbfa39b
TH
4780 }
4781
00115e0f
TH
4782 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4783 ata_verify_xfer(qc);
4784
f686bcb8
TH
4785 __ata_qc_complete(qc);
4786 } else {
4787 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4788 return;
4789
4790 /* read result TF if failed or requested */
4791 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4792 fill_result_tf(qc);
f686bcb8
TH
4793
4794 __ata_qc_complete(qc);
4795 }
4796}
a52fbcfc 4797EXPORT_SYMBOL_GPL(ata_qc_complete);
f686bcb8 4798
8385d756
SH
4799/**
4800 * ata_qc_get_active - get bitmask of active qcs
4801 * @ap: port in question
4802 *
4803 * LOCKING:
4804 * spin_lock_irqsave(host lock)
4805 *
4806 * RETURNS:
4807 * Bitmask of active qcs
4808 */
4809u64 ata_qc_get_active(struct ata_port *ap)
4810{
4811 u64 qc_active = ap->qc_active;
4812
4813 /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
4814 if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
4815 qc_active |= (1 << 0);
4816 qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
4817 }
4818
4819 return qc_active;
4820}
4821EXPORT_SYMBOL_GPL(ata_qc_get_active);
4822
1da177e4
LT
4823/**
4824 * ata_qc_issue - issue taskfile to device
4825 * @qc: command to issue to device
4826 *
4827 * Prepare an ATA command to submission to device.
4828 * This includes mapping the data into a DMA-able
4829 * area, filling in the S/G table, and finally
4830 * writing the taskfile to hardware, starting the command.
4831 *
4832 * LOCKING:
cca3974e 4833 * spin_lock_irqsave(host lock)
1da177e4 4834 */
8e0e694a 4835void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4836{
4837 struct ata_port *ap = qc->ap;
9af5c9c9 4838 struct ata_link *link = qc->dev->link;
405e66b3 4839 u8 prot = qc->tf.protocol;
1da177e4 4840
dedaf2b0
TH
4841 /* Make sure only one non-NCQ command is outstanding. The
4842 * check is skipped for old EH because it reuses active qc to
4843 * request ATAPI sense.
4844 */
efcb3cf7 4845 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 4846
1973a023 4847 if (ata_is_ncq(prot)) {
4e5b6260 4848 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
da917d69
TH
4849
4850 if (!link->sactive)
4851 ap->nr_active_links++;
4e5b6260 4852 link->sactive |= 1 << qc->hw_tag;
dedaf2b0 4853 } else {
efcb3cf7 4854 WARN_ON_ONCE(link->sactive);
da917d69
TH
4855
4856 ap->nr_active_links++;
9af5c9c9 4857 link->active_tag = qc->tag;
dedaf2b0
TH
4858 }
4859
e4a70e76 4860 qc->flags |= ATA_QCFLAG_ACTIVE;
e3ed8939 4861 ap->qc_active |= 1ULL << qc->tag;
e4a70e76 4862
60f5d6ef
TH
4863 /*
4864 * We guarantee to LLDs that they will have at least one
f92a2636
TH
4865 * non-zero sg if the command is a data command.
4866 */
9173e5e8 4867 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
60f5d6ef 4868 goto sys_err;
f92a2636 4869
405e66b3 4870 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 4871 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7 4872 if (ata_sg_setup(qc))
60f5d6ef 4873 goto sys_err;
1da177e4 4874
cf480626 4875 /* if device is sleeping, schedule reset and abort the link */
054a5fba 4876 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
cf480626 4877 link->eh_info.action |= ATA_EH_RESET;
054a5fba
TH
4878 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4879 ata_link_abort(link);
4880 return;
4881 }
4882
fc914faa 4883 trace_ata_qc_prep(qc);
95364f36
JS
4884 qc->err_mask |= ap->ops->qc_prep(qc);
4885 if (unlikely(qc->err_mask))
4886 goto err;
255c03d1 4887 trace_ata_qc_issue(qc);
8e0e694a
TH
4888 qc->err_mask |= ap->ops->qc_issue(qc);
4889 if (unlikely(qc->err_mask))
4890 goto err;
4891 return;
1da177e4 4892
60f5d6ef 4893sys_err:
8e0e694a
TH
4894 qc->err_mask |= AC_ERR_SYSTEM;
4895err:
4896 ata_qc_complete(qc);
1da177e4
LT
4897}
4898
34bf2170 4899/**
b1c72916 4900 * ata_phys_link_online - test whether the given link is online
936fd732 4901 * @link: ATA link to test
34bf2170 4902 *
936fd732
TH
4903 * Test whether @link is online. Note that this function returns
4904 * 0 if online status of @link cannot be obtained, so
4905 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4906 *
4907 * LOCKING:
4908 * None.
4909 *
4910 * RETURNS:
b5b3fa38 4911 * True if the port online status is available and online.
34bf2170 4912 */
b1c72916 4913bool ata_phys_link_online(struct ata_link *link)
34bf2170
TH
4914{
4915 u32 sstatus;
4916
936fd732 4917 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 4918 ata_sstatus_online(sstatus))
b5b3fa38
TH
4919 return true;
4920 return false;
34bf2170
TH
4921}
4922
4923/**
b1c72916 4924 * ata_phys_link_offline - test whether the given link is offline
936fd732 4925 * @link: ATA link to test
34bf2170 4926 *
936fd732
TH
4927 * Test whether @link is offline. Note that this function
4928 * returns 0 if offline status of @link cannot be obtained, so
4929 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4930 *
4931 * LOCKING:
4932 * None.
4933 *
4934 * RETURNS:
b5b3fa38 4935 * True if the port offline status is available and offline.
34bf2170 4936 */
b1c72916 4937bool ata_phys_link_offline(struct ata_link *link)
34bf2170
TH
4938{
4939 u32 sstatus;
4940
936fd732 4941 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
9913ff8a 4942 !ata_sstatus_online(sstatus))
b5b3fa38
TH
4943 return true;
4944 return false;
34bf2170 4945}
0baab86b 4946
b1c72916
TH
4947/**
4948 * ata_link_online - test whether the given link is online
4949 * @link: ATA link to test
4950 *
4951 * Test whether @link is online. This is identical to
4952 * ata_phys_link_online() when there's no slave link. When
4953 * there's a slave link, this function should only be called on
4954 * the master link and will return true if any of M/S links is
4955 * online.
4956 *
4957 * LOCKING:
4958 * None.
4959 *
4960 * RETURNS:
4961 * True if the port online status is available and online.
4962 */
4963bool ata_link_online(struct ata_link *link)
4964{
4965 struct ata_link *slave = link->ap->slave_link;
4966
4967 WARN_ON(link == slave); /* shouldn't be called on slave link */
4968
4969 return ata_phys_link_online(link) ||
4970 (slave && ata_phys_link_online(slave));
4971}
a52fbcfc 4972EXPORT_SYMBOL_GPL(ata_link_online);
b1c72916
TH
4973
4974/**
4975 * ata_link_offline - test whether the given link is offline
4976 * @link: ATA link to test
4977 *
4978 * Test whether @link is offline. This is identical to
4979 * ata_phys_link_offline() when there's no slave link. When
4980 * there's a slave link, this function should only be called on
4981 * the master link and will return true if both M/S links are
4982 * offline.
4983 *
4984 * LOCKING:
4985 * None.
4986 *
4987 * RETURNS:
4988 * True if the port offline status is available and offline.
4989 */
4990bool ata_link_offline(struct ata_link *link)
4991{
4992 struct ata_link *slave = link->ap->slave_link;
4993
4994 WARN_ON(link == slave); /* shouldn't be called on slave link */
4995
4996 return ata_phys_link_offline(link) &&
4997 (!slave || ata_phys_link_offline(slave));
4998}
a52fbcfc 4999EXPORT_SYMBOL_GPL(ata_link_offline);
b1c72916 5000
6ffa01d8 5001#ifdef CONFIG_PM
bc6e7c4b
DW
5002static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5003 unsigned int action, unsigned int ehi_flags,
5004 bool async)
500530f6 5005{
5ef41082 5006 struct ata_link *link;
500530f6 5007 unsigned long flags;
500530f6 5008
5ef41082
LM
5009 /* Previous resume operation might still be in
5010 * progress. Wait for PM_PENDING to clear.
5011 */
5012 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5013 ata_port_wait_eh(ap);
5014 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5015 }
500530f6 5016
5ef41082
LM
5017 /* request PM ops to EH */
5018 spin_lock_irqsave(ap->lock, flags);
500530f6 5019
5ef41082 5020 ap->pm_mesg = mesg;
5ef41082
LM
5021 ap->pflags |= ATA_PFLAG_PM_PENDING;
5022 ata_for_each_link(link, ap, HOST_FIRST) {
5023 link->eh_info.action |= action;
5024 link->eh_info.flags |= ehi_flags;
5025 }
500530f6 5026
5ef41082 5027 ata_port_schedule_eh(ap);
500530f6 5028
5ef41082 5029 spin_unlock_irqrestore(ap->lock, flags);
500530f6 5030
2fcbdcb4 5031 if (!async) {
5ef41082
LM
5032 ata_port_wait_eh(ap);
5033 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
500530f6 5034 }
500530f6
TH
5035}
5036
bc6e7c4b
DW
5037/*
5038 * On some hardware, device fails to respond after spun down for suspend. As
5039 * the device won't be used before being resumed, we don't need to touch the
5040 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5041 *
5042 * http://thread.gmane.org/gmane.linux.ide/46764
5043 */
5044static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5045 | ATA_EHI_NO_AUTOPSY
5046 | ATA_EHI_NO_RECOVERY;
5047
5048static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5ef41082 5049{
bc6e7c4b 5050 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5ef41082
LM
5051}
5052
bc6e7c4b 5053static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
2fcbdcb4 5054{
bc6e7c4b 5055 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
2fcbdcb4
DW
5056}
5057
bc6e7c4b 5058static int ata_port_pm_suspend(struct device *dev)
5ef41082 5059{
bc6e7c4b
DW
5060 struct ata_port *ap = to_ata_port(dev);
5061
5ef41082
LM
5062 if (pm_runtime_suspended(dev))
5063 return 0;
5064
bc6e7c4b
DW
5065 ata_port_suspend(ap, PMSG_SUSPEND);
5066 return 0;
33574d68
LM
5067}
5068
bc6e7c4b 5069static int ata_port_pm_freeze(struct device *dev)
33574d68 5070{
bc6e7c4b
DW
5071 struct ata_port *ap = to_ata_port(dev);
5072
33574d68 5073 if (pm_runtime_suspended(dev))
f5e6d0d0 5074 return 0;
33574d68 5075
bc6e7c4b
DW
5076 ata_port_suspend(ap, PMSG_FREEZE);
5077 return 0;
33574d68
LM
5078}
5079
bc6e7c4b 5080static int ata_port_pm_poweroff(struct device *dev)
33574d68 5081{
bc6e7c4b
DW
5082 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5083 return 0;
5ef41082
LM
5084}
5085
bc6e7c4b
DW
5086static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5087 | ATA_EHI_QUIET;
5ef41082 5088
bc6e7c4b
DW
5089static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5090{
5091 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5ef41082
LM
5092}
5093
bc6e7c4b 5094static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
2fcbdcb4 5095{
bc6e7c4b 5096 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
2fcbdcb4
DW
5097}
5098
bc6e7c4b 5099static int ata_port_pm_resume(struct device *dev)
e90b1e5a 5100{
200421a8 5101 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
bc6e7c4b
DW
5102 pm_runtime_disable(dev);
5103 pm_runtime_set_active(dev);
5104 pm_runtime_enable(dev);
5105 return 0;
e90b1e5a
LM
5106}
5107
7e15e9be
AL
5108/*
5109 * For ODDs, the upper layer will poll for media change every few seconds,
5110 * which will make it enter and leave suspend state every few seconds. And
5111 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5112 * is very little and the ODD may malfunction after constantly being reset.
5113 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5114 * ODD is attached to the port.
5115 */
9ee4f393
LM
5116static int ata_port_runtime_idle(struct device *dev)
5117{
7e15e9be
AL
5118 struct ata_port *ap = to_ata_port(dev);
5119 struct ata_link *link;
5120 struct ata_device *adev;
5121
5122 ata_for_each_link(link, ap, HOST_FIRST) {
5123 ata_for_each_dev(adev, link, ENABLED)
5124 if (adev->class == ATA_DEV_ATAPI &&
5125 !zpodd_dev_enabled(adev))
5126 return -EBUSY;
5127 }
5128
45f0a85c 5129 return 0;
9ee4f393
LM
5130}
5131
a7ff60db
AL
5132static int ata_port_runtime_suspend(struct device *dev)
5133{
bc6e7c4b
DW
5134 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5135 return 0;
a7ff60db
AL
5136}
5137
5138static int ata_port_runtime_resume(struct device *dev)
5139{
bc6e7c4b
DW
5140 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5141 return 0;
a7ff60db
AL
5142}
5143
5ef41082 5144static const struct dev_pm_ops ata_port_pm_ops = {
bc6e7c4b
DW
5145 .suspend = ata_port_pm_suspend,
5146 .resume = ata_port_pm_resume,
5147 .freeze = ata_port_pm_freeze,
5148 .thaw = ata_port_pm_resume,
5149 .poweroff = ata_port_pm_poweroff,
5150 .restore = ata_port_pm_resume,
9ee4f393 5151
a7ff60db
AL
5152 .runtime_suspend = ata_port_runtime_suspend,
5153 .runtime_resume = ata_port_runtime_resume,
9ee4f393 5154 .runtime_idle = ata_port_runtime_idle,
5ef41082
LM
5155};
5156
2fcbdcb4
DW
5157/* sas ports don't participate in pm runtime management of ata_ports,
5158 * and need to resume ata devices at the domain level, not the per-port
5159 * level. sas suspend/resume is async to allow parallel port recovery
5160 * since sas has multiple ata_port instances per Scsi_Host.
5161 */
bc6e7c4b 5162void ata_sas_port_suspend(struct ata_port *ap)
2fcbdcb4 5163{
bc6e7c4b 5164 ata_port_suspend_async(ap, PMSG_SUSPEND);
2fcbdcb4 5165}
bc6e7c4b 5166EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
2fcbdcb4 5167
bc6e7c4b 5168void ata_sas_port_resume(struct ata_port *ap)
2fcbdcb4 5169{
bc6e7c4b 5170 ata_port_resume_async(ap, PMSG_RESUME);
2fcbdcb4 5171}
bc6e7c4b 5172EXPORT_SYMBOL_GPL(ata_sas_port_resume);
2fcbdcb4 5173
500530f6 5174/**
cca3974e
JG
5175 * ata_host_suspend - suspend host
5176 * @host: host to suspend
500530f6
TH
5177 * @mesg: PM message
5178 *
5ef41082 5179 * Suspend @host. Actual operation is performed by port suspend.
500530f6 5180 */
cca3974e 5181int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5182{
5ef41082
LM
5183 host->dev->power.power_state = mesg;
5184 return 0;
500530f6 5185}
a52fbcfc 5186EXPORT_SYMBOL_GPL(ata_host_suspend);
500530f6
TH
5187
5188/**
cca3974e
JG
5189 * ata_host_resume - resume host
5190 * @host: host to resume
500530f6 5191 *
5ef41082 5192 * Resume @host. Actual operation is performed by port resume.
500530f6 5193 */
cca3974e 5194void ata_host_resume(struct ata_host *host)
500530f6 5195{
72ad6ec4 5196 host->dev->power.power_state = PMSG_ON;
500530f6 5197}
a52fbcfc 5198EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 5199#endif
500530f6 5200
8df82c13 5201const struct device_type ata_port_type = {
5ef41082
LM
5202 .name = "ata_port",
5203#ifdef CONFIG_PM
5204 .pm = &ata_port_pm_ops,
5205#endif
5206};
5207
3ef3b43d
TH
5208/**
5209 * ata_dev_init - Initialize an ata_device structure
5210 * @dev: Device structure to initialize
5211 *
5212 * Initialize @dev in preparation for probing.
5213 *
5214 * LOCKING:
5215 * Inherited from caller.
5216 */
5217void ata_dev_init(struct ata_device *dev)
5218{
b1c72916 5219 struct ata_link *link = ata_dev_phys_link(dev);
9af5c9c9 5220 struct ata_port *ap = link->ap;
72fa4b74
TH
5221 unsigned long flags;
5222
b1c72916 5223 /* SATA spd limit is bound to the attached device, reset together */
9af5c9c9
TH
5224 link->sata_spd_limit = link->hw_sata_spd_limit;
5225 link->sata_spd = 0;
5a04bf4b 5226
72fa4b74
TH
5227 /* High bits of dev->flags are used to record warm plug
5228 * requests which occur asynchronously. Synchronize using
cca3974e 5229 * host lock.
72fa4b74 5230 */
ba6a1308 5231 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5232 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 5233 dev->horkage = 0;
ba6a1308 5234 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5235
99cf610a
TH
5236 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5237 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
3ef3b43d
TH
5238 dev->pio_mask = UINT_MAX;
5239 dev->mwdma_mask = UINT_MAX;
5240 dev->udma_mask = UINT_MAX;
5241}
5242
4fb37a25
TH
5243/**
5244 * ata_link_init - Initialize an ata_link structure
5245 * @ap: ATA port link is attached to
5246 * @link: Link structure to initialize
8989805d 5247 * @pmp: Port multiplier port number
4fb37a25
TH
5248 *
5249 * Initialize @link.
5250 *
5251 * LOCKING:
5252 * Kernel thread context (may sleep)
5253 */
fb7fd614 5254void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
5255{
5256 int i;
5257
5258 /* clear everything except for devices */
d9027470
GG
5259 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5260 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
4fb37a25
TH
5261
5262 link->ap = ap;
8989805d 5263 link->pmp = pmp;
4fb37a25
TH
5264 link->active_tag = ATA_TAG_POISON;
5265 link->hw_sata_spd_limit = UINT_MAX;
5266
5267 /* can't use iterator, ap isn't initialized yet */
5268 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5269 struct ata_device *dev = &link->device[i];
5270
5271 dev->link = link;
5272 dev->devno = dev - link->device;
110f66d2
TH
5273#ifdef CONFIG_ATA_ACPI
5274 dev->gtf_filter = ata_acpi_gtf_filter;
5275#endif
4fb37a25
TH
5276 ata_dev_init(dev);
5277 }
5278}
5279
5280/**
5281 * sata_link_init_spd - Initialize link->sata_spd_limit
5282 * @link: Link to configure sata_spd_limit for
5283 *
a31a6997 5284 * Initialize ``link->[hw_]sata_spd_limit`` to the currently
4fb37a25
TH
5285 * configured value.
5286 *
5287 * LOCKING:
5288 * Kernel thread context (may sleep).
5289 *
5290 * RETURNS:
5291 * 0 on success, -errno on failure.
5292 */
fb7fd614 5293int sata_link_init_spd(struct ata_link *link)
4fb37a25 5294{
33267325 5295 u8 spd;
4fb37a25
TH
5296 int rc;
5297
d127ea7b 5298 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
4fb37a25
TH
5299 if (rc)
5300 return rc;
5301
d127ea7b 5302 spd = (link->saved_scontrol >> 4) & 0xf;
4fb37a25
TH
5303 if (spd)
5304 link->hw_sata_spd_limit &= (1 << spd) - 1;
5305
05944bdf 5306 ata_force_link_limits(link);
33267325 5307
4fb37a25
TH
5308 link->sata_spd_limit = link->hw_sata_spd_limit;
5309
5310 return 0;
5311}
5312
1da177e4 5313/**
f3187195
TH
5314 * ata_port_alloc - allocate and initialize basic ATA port resources
5315 * @host: ATA host this allocated port belongs to
1da177e4 5316 *
f3187195
TH
5317 * Allocate and initialize basic ATA port resources.
5318 *
5319 * RETURNS:
5320 * Allocate ATA port on success, NULL on failure.
0cba632b 5321 *
1da177e4 5322 * LOCKING:
f3187195 5323 * Inherited from calling layer (may sleep).
1da177e4 5324 */
f3187195 5325struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5326{
f3187195 5327 struct ata_port *ap;
1da177e4 5328
f3187195
TH
5329 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5330 if (!ap)
5331 return NULL;
4fca377f 5332
7b3a24c5 5333 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
cca3974e 5334 ap->lock = &host->lock;
f3187195 5335 ap->print_id = -1;
e628dc99 5336 ap->local_port_no = -1;
cca3974e 5337 ap->host = host;
f3187195 5338 ap->dev = host->dev;
bd5d825c
BP
5339
5340#if defined(ATA_VERBOSE_DEBUG)
5341 /* turn on all debugging levels */
16d42467 5342 ap->msg_enable = 0x0007;
bd5d825c 5343#elif defined(ATA_DEBUG)
16d42467 5344 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO;
88574551 5345#else
16d42467 5346 ap->msg_enable = ATA_MSG_DRV;
bd5d825c 5347#endif
1da177e4 5348
ad72cf98 5349 mutex_init(&ap->scsi_scan_mutex);
65f27f38
DH
5350 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5351 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5352 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5353 init_waitqueue_head(&ap->eh_wait_q);
45fabbb7 5354 init_completion(&ap->park_req_pending);
b93ab338
KC
5355 timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
5356 TIMER_DEFERRABLE);
1da177e4 5357
838df628 5358 ap->cbl = ATA_CBL_NONE;
838df628 5359
8989805d 5360 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
5361
5362#ifdef ATA_IRQ_TRAP
5363 ap->stats.unhandled_irq = 1;
5364 ap->stats.idle_irq = 1;
5365#endif
270390e1
TH
5366 ata_sff_port_init(ap);
5367
1da177e4 5368 return ap;
1da177e4
LT
5369}
5370
2623c7a5 5371static void ata_devres_release(struct device *gendev, void *res)
f0d36efd
TH
5372{
5373 struct ata_host *host = dev_get_drvdata(gendev);
5374 int i;
5375
1aa506e4
TH
5376 for (i = 0; i < host->n_ports; i++) {
5377 struct ata_port *ap = host->ports[i];
5378
4911487a
TH
5379 if (!ap)
5380 continue;
5381
5382 if (ap->scsi_host)
1aa506e4
TH
5383 scsi_host_put(ap->scsi_host);
5384
2623c7a5
TK
5385 }
5386
5387 dev_set_drvdata(gendev, NULL);
5388 ata_host_put(host);
5389}
5390
5391static void ata_host_release(struct kref *kref)
5392{
5393 struct ata_host *host = container_of(kref, struct ata_host, kref);
5394 int i;
5395
5396 for (i = 0; i < host->n_ports; i++) {
5397 struct ata_port *ap = host->ports[i];
5398
633273a3 5399 kfree(ap->pmp_link);
b1c72916 5400 kfree(ap->slave_link);
4911487a 5401 kfree(ap);
1aa506e4
TH
5402 host->ports[i] = NULL;
5403 }
2623c7a5
TK
5404 kfree(host);
5405}
1aa506e4 5406
2623c7a5
TK
5407void ata_host_get(struct ata_host *host)
5408{
5409 kref_get(&host->kref);
5410}
5411
5412void ata_host_put(struct ata_host *host)
5413{
5414 kref_put(&host->kref, ata_host_release);
f0d36efd 5415}
a52fbcfc 5416EXPORT_SYMBOL_GPL(ata_host_put);
f0d36efd 5417
f3187195
TH
5418/**
5419 * ata_host_alloc - allocate and init basic ATA host resources
5420 * @dev: generic device this host is associated with
5421 * @max_ports: maximum number of ATA ports associated with this host
5422 *
5423 * Allocate and initialize basic ATA host resources. LLD calls
5424 * this function to allocate a host, initializes it fully and
5425 * attaches it using ata_host_register().
5426 *
5427 * @max_ports ports are allocated and host->n_ports is
5428 * initialized to @max_ports. The caller is allowed to decrease
5429 * host->n_ports before calling ata_host_register(). The unused
5430 * ports will be automatically freed on registration.
5431 *
5432 * RETURNS:
5433 * Allocate ATA host on success, NULL on failure.
5434 *
5435 * LOCKING:
5436 * Inherited from calling layer (may sleep).
5437 */
5438struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5439{
5440 struct ata_host *host;
5441 size_t sz;
5442 int i;
2623c7a5 5443 void *dr;
f3187195 5444
f3187195
TH
5445 /* alloc a container for our list of ATA ports (buses) */
5446 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
2623c7a5 5447 host = kzalloc(sz, GFP_KERNEL);
f3187195 5448 if (!host)
2623c7a5
TK
5449 return NULL;
5450
5451 if (!devres_open_group(dev, NULL, GFP_KERNEL))
dafd6c49 5452 goto err_free;
2623c7a5
TK
5453
5454 dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
5455 if (!dr)
f3187195
TH
5456 goto err_out;
5457
2623c7a5 5458 devres_add(dev, dr);
f3187195
TH
5459 dev_set_drvdata(dev, host);
5460
5461 spin_lock_init(&host->lock);
c0c362b6 5462 mutex_init(&host->eh_mutex);
f3187195
TH
5463 host->dev = dev;
5464 host->n_ports = max_ports;
2623c7a5 5465 kref_init(&host->kref);
f3187195
TH
5466
5467 /* allocate ports bound to this host */
5468 for (i = 0; i < max_ports; i++) {
5469 struct ata_port *ap;
5470
5471 ap = ata_port_alloc(host);
5472 if (!ap)
5473 goto err_out;
5474
5475 ap->port_no = i;
5476 host->ports[i] = ap;
5477 }
5478
5479 devres_remove_group(dev, NULL);
5480 return host;
5481
5482 err_out:
5483 devres_release_group(dev, NULL);
dafd6c49
CIK
5484 err_free:
5485 kfree(host);
f3187195
TH
5486 return NULL;
5487}
a52fbcfc 5488EXPORT_SYMBOL_GPL(ata_host_alloc);
f3187195 5489
f5cda257
TH
5490/**
5491 * ata_host_alloc_pinfo - alloc host and init with port_info array
5492 * @dev: generic device this host is associated with
5493 * @ppi: array of ATA port_info to initialize host with
5494 * @n_ports: number of ATA ports attached to this host
5495 *
5496 * Allocate ATA host and initialize with info from @ppi. If NULL
5497 * terminated, @ppi may contain fewer entries than @n_ports. The
5498 * last entry will be used for the remaining ports.
5499 *
5500 * RETURNS:
5501 * Allocate ATA host on success, NULL on failure.
5502 *
5503 * LOCKING:
5504 * Inherited from calling layer (may sleep).
5505 */
5506struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5507 const struct ata_port_info * const * ppi,
5508 int n_ports)
5509{
5510 const struct ata_port_info *pi;
5511 struct ata_host *host;
5512 int i, j;
5513
5514 host = ata_host_alloc(dev, n_ports);
5515 if (!host)
5516 return NULL;
5517
5518 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5519 struct ata_port *ap = host->ports[i];
5520
5521 if (ppi[j])
5522 pi = ppi[j++];
5523
5524 ap->pio_mask = pi->pio_mask;
5525 ap->mwdma_mask = pi->mwdma_mask;
5526 ap->udma_mask = pi->udma_mask;
5527 ap->flags |= pi->flags;
0c88758b 5528 ap->link.flags |= pi->link_flags;
f5cda257
TH
5529 ap->ops = pi->port_ops;
5530
5531 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5532 host->ops = pi->port_ops;
f5cda257
TH
5533 }
5534
5535 return host;
5536}
a52fbcfc 5537EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
f5cda257 5538
32ebbc0c
TH
5539static void ata_host_stop(struct device *gendev, void *res)
5540{
5541 struct ata_host *host = dev_get_drvdata(gendev);
5542 int i;
5543
5544 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5545
5546 for (i = 0; i < host->n_ports; i++) {
5547 struct ata_port *ap = host->ports[i];
5548
5549 if (ap->ops->port_stop)
5550 ap->ops->port_stop(ap);
5551 }
5552
5553 if (host->ops->host_stop)
5554 host->ops->host_stop(host);
5555}
5556
029cfd6b
TH
5557/**
5558 * ata_finalize_port_ops - finalize ata_port_operations
5559 * @ops: ata_port_operations to finalize
5560 *
5561 * An ata_port_operations can inherit from another ops and that
5562 * ops can again inherit from another. This can go on as many
5563 * times as necessary as long as there is no loop in the
5564 * inheritance chain.
5565 *
5566 * Ops tables are finalized when the host is started. NULL or
5567 * unspecified entries are inherited from the closet ancestor
5568 * which has the method and the entry is populated with it.
5569 * After finalization, the ops table directly points to all the
5570 * methods and ->inherits is no longer necessary and cleared.
5571 *
5572 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5573 *
5574 * LOCKING:
5575 * None.
5576 */
5577static void ata_finalize_port_ops(struct ata_port_operations *ops)
5578{
2da67659 5579 static DEFINE_SPINLOCK(lock);
029cfd6b
TH
5580 const struct ata_port_operations *cur;
5581 void **begin = (void **)ops;
5582 void **end = (void **)&ops->inherits;
5583 void **pp;
5584
5585 if (!ops || !ops->inherits)
5586 return;
5587
5588 spin_lock(&lock);
5589
5590 for (cur = ops->inherits; cur; cur = cur->inherits) {
5591 void **inherit = (void **)cur;
5592
5593 for (pp = begin; pp < end; pp++, inherit++)
5594 if (!*pp)
5595 *pp = *inherit;
5596 }
5597
5598 for (pp = begin; pp < end; pp++)
5599 if (IS_ERR(*pp))
5600 *pp = NULL;
5601
5602 ops->inherits = NULL;
5603
5604 spin_unlock(&lock);
5605}
5606
ecef7253
TH
5607/**
5608 * ata_host_start - start and freeze ports of an ATA host
5609 * @host: ATA host to start ports for
5610 *
5611 * Start and then freeze ports of @host. Started status is
5612 * recorded in host->flags, so this function can be called
5613 * multiple times. Ports are guaranteed to get started only
f3187195
TH
5614 * once. If host->ops isn't initialized yet, its set to the
5615 * first non-dummy port ops.
ecef7253
TH
5616 *
5617 * LOCKING:
5618 * Inherited from calling layer (may sleep).
5619 *
5620 * RETURNS:
5621 * 0 if all ports are started successfully, -errno otherwise.
5622 */
5623int ata_host_start(struct ata_host *host)
5624{
32ebbc0c
TH
5625 int have_stop = 0;
5626 void *start_dr = NULL;
ecef7253
TH
5627 int i, rc;
5628
5629 if (host->flags & ATA_HOST_STARTED)
5630 return 0;
5631
029cfd6b
TH
5632 ata_finalize_port_ops(host->ops);
5633
ecef7253
TH
5634 for (i = 0; i < host->n_ports; i++) {
5635 struct ata_port *ap = host->ports[i];
5636
029cfd6b
TH
5637 ata_finalize_port_ops(ap->ops);
5638
f3187195
TH
5639 if (!host->ops && !ata_port_is_dummy(ap))
5640 host->ops = ap->ops;
5641
32ebbc0c
TH
5642 if (ap->ops->port_stop)
5643 have_stop = 1;
5644 }
5645
355a8031 5646 if (host->ops && host->ops->host_stop)
32ebbc0c
TH
5647 have_stop = 1;
5648
5649 if (have_stop) {
5650 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5651 if (!start_dr)
5652 return -ENOMEM;
5653 }
5654
5655 for (i = 0; i < host->n_ports; i++) {
5656 struct ata_port *ap = host->ports[i];
5657
ecef7253
TH
5658 if (ap->ops->port_start) {
5659 rc = ap->ops->port_start(ap);
5660 if (rc) {
0f9fe9b7 5661 if (rc != -ENODEV)
a44fec1f
JP
5662 dev_err(host->dev,
5663 "failed to start port %d (errno=%d)\n",
5664 i, rc);
ecef7253
TH
5665 goto err_out;
5666 }
5667 }
ecef7253
TH
5668 ata_eh_freeze_port(ap);
5669 }
5670
32ebbc0c
TH
5671 if (start_dr)
5672 devres_add(host->dev, start_dr);
ecef7253
TH
5673 host->flags |= ATA_HOST_STARTED;
5674 return 0;
5675
5676 err_out:
5677 while (--i >= 0) {
5678 struct ata_port *ap = host->ports[i];
5679
5680 if (ap->ops->port_stop)
5681 ap->ops->port_stop(ap);
5682 }
32ebbc0c 5683 devres_free(start_dr);
ecef7253
TH
5684 return rc;
5685}
a52fbcfc 5686EXPORT_SYMBOL_GPL(ata_host_start);
ecef7253 5687
b03732f0 5688/**
94bd5719 5689 * ata_host_init - Initialize a host struct for sas (ipr, libsas)
cca3974e
JG
5690 * @host: host to initialize
5691 * @dev: device host is attached to
cca3974e 5692 * @ops: port_ops
b03732f0 5693 *
b03732f0 5694 */
cca3974e 5695void ata_host_init(struct ata_host *host, struct device *dev,
8d8e7d13 5696 struct ata_port_operations *ops)
b03732f0 5697{
cca3974e 5698 spin_lock_init(&host->lock);
c0c362b6 5699 mutex_init(&host->eh_mutex);
69278f79 5700 host->n_tags = ATA_MAX_QUEUE;
cca3974e 5701 host->dev = dev;
cca3974e 5702 host->ops = ops;
2fa4a326 5703 kref_init(&host->kref);
b03732f0 5704}
a52fbcfc 5705EXPORT_SYMBOL_GPL(ata_host_init);
b03732f0 5706
9508a66f 5707void __ata_port_probe(struct ata_port *ap)
79318057 5708{
9508a66f
DW
5709 struct ata_eh_info *ehi = &ap->link.eh_info;
5710 unsigned long flags;
886ad09f 5711
9508a66f
DW
5712 /* kick EH for boot probing */
5713 spin_lock_irqsave(ap->lock, flags);
79318057 5714
9508a66f
DW
5715 ehi->probe_mask |= ATA_ALL_DEVICES;
5716 ehi->action |= ATA_EH_RESET;
5717 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
79318057 5718
9508a66f
DW
5719 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5720 ap->pflags |= ATA_PFLAG_LOADING;
5721 ata_port_schedule_eh(ap);
79318057 5722
9508a66f
DW
5723 spin_unlock_irqrestore(ap->lock, flags);
5724}
79318057 5725
9508a66f
DW
5726int ata_port_probe(struct ata_port *ap)
5727{
5728 int rc = 0;
79318057 5729
9508a66f
DW
5730 if (ap->ops->error_handler) {
5731 __ata_port_probe(ap);
79318057
AV
5732 ata_port_wait_eh(ap);
5733 } else {
79318057 5734 rc = ata_bus_probe(ap);
79318057 5735 }
238c9cf9
JB
5736 return rc;
5737}
5738
5739
5740static void async_port_probe(void *data, async_cookie_t cookie)
5741{
5742 struct ata_port *ap = data;
4fca377f 5743
238c9cf9
JB
5744 /*
5745 * If we're not allowed to scan this host in parallel,
5746 * we need to wait until all previous scans have completed
5747 * before going further.
5748 * Jeff Garzik says this is only within a controller, so we
5749 * don't need to wait for port 0, only for later ports.
5750 */
5751 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5752 async_synchronize_cookie(cookie);
5753
5754 (void)ata_port_probe(ap);
f29d3b23
AV
5755
5756 /* in order to keep device order, we need to synchronize at this point */
5757 async_synchronize_cookie(cookie);
5758
5759 ata_scsi_scan_host(ap, 1);
79318057 5760}
238c9cf9 5761
f3187195
TH
5762/**
5763 * ata_host_register - register initialized ATA host
5764 * @host: ATA host to register
5765 * @sht: template for SCSI host
5766 *
5767 * Register initialized ATA host. @host is allocated using
5768 * ata_host_alloc() and fully initialized by LLD. This function
5769 * starts ports, registers @host with ATA and SCSI layers and
5770 * probe registered devices.
5771 *
5772 * LOCKING:
5773 * Inherited from calling layer (may sleep).
5774 *
5775 * RETURNS:
5776 * 0 on success, -errno otherwise.
5777 */
5778int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5779{
5780 int i, rc;
5781
69278f79 5782 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
1871ee13 5783
f3187195
TH
5784 /* host must have been started */
5785 if (!(host->flags & ATA_HOST_STARTED)) {
a44fec1f 5786 dev_err(host->dev, "BUG: trying to register unstarted host\n");
f3187195
TH
5787 WARN_ON(1);
5788 return -EINVAL;
5789 }
5790
5791 /* Blow away unused ports. This happens when LLD can't
5792 * determine the exact number of ports to allocate at
5793 * allocation time.
5794 */
5795 for (i = host->n_ports; host->ports[i]; i++)
5796 kfree(host->ports[i]);
5797
5798 /* give ports names and add SCSI hosts */
e628dc99 5799 for (i = 0; i < host->n_ports; i++) {
85d6725b 5800 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
e628dc99
DM
5801 host->ports[i]->local_port_no = i + 1;
5802 }
4fca377f 5803
d9027470
GG
5804 /* Create associated sysfs transport objects */
5805 for (i = 0; i < host->n_ports; i++) {
5806 rc = ata_tport_add(host->dev,host->ports[i]);
5807 if (rc) {
5808 goto err_tadd;
5809 }
5810 }
5811
f3187195
TH
5812 rc = ata_scsi_add_hosts(host, sht);
5813 if (rc)
d9027470 5814 goto err_tadd;
f3187195
TH
5815
5816 /* set cable, sata_spd_limit and report */
5817 for (i = 0; i < host->n_ports; i++) {
5818 struct ata_port *ap = host->ports[i];
f3187195
TH
5819 unsigned long xfer_mask;
5820
5821 /* set SATA cable type if still unset */
5822 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5823 ap->cbl = ATA_CBL_SATA;
5824
5825 /* init sata_spd_limit to the current value */
4fb37a25 5826 sata_link_init_spd(&ap->link);
b1c72916
TH
5827 if (ap->slave_link)
5828 sata_link_init_spd(ap->slave_link);
f3187195 5829
cbcdd875 5830 /* print per-port info to dmesg */
f3187195
TH
5831 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5832 ap->udma_mask);
5833
abf6e8ed 5834 if (!ata_port_is_dummy(ap)) {
a9a79dfe
JP
5835 ata_port_info(ap, "%cATA max %s %s\n",
5836 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5837 ata_mode_string(xfer_mask),
5838 ap->link.eh_info.desc);
abf6e8ed
TH
5839 ata_ehi_clear_desc(&ap->link.eh_info);
5840 } else
a9a79dfe 5841 ata_port_info(ap, "DUMMY\n");
f3187195
TH
5842 }
5843
f6005354 5844 /* perform each probe asynchronously */
f3187195
TH
5845 for (i = 0; i < host->n_ports; i++) {
5846 struct ata_port *ap = host->ports[i];
b5292111 5847 ap->cookie = async_schedule(async_port_probe, ap);
f3187195 5848 }
f3187195
TH
5849
5850 return 0;
d9027470
GG
5851
5852 err_tadd:
5853 while (--i >= 0) {
5854 ata_tport_delete(host->ports[i]);
5855 }
5856 return rc;
5857
f3187195 5858}
a52fbcfc 5859EXPORT_SYMBOL_GPL(ata_host_register);
f3187195 5860
f5cda257
TH
5861/**
5862 * ata_host_activate - start host, request IRQ and register it
5863 * @host: target ATA host
5864 * @irq: IRQ to request
5865 * @irq_handler: irq_handler used when requesting IRQ
5866 * @irq_flags: irq_flags used when requesting IRQ
5867 * @sht: scsi_host_template to use when registering the host
5868 *
5869 * After allocating an ATA host and initializing it, most libata
5870 * LLDs perform three steps to activate the host - start host,
c9b5560a 5871 * request IRQ and register it. This helper takes necessary
f5cda257
TH
5872 * arguments and performs the three steps in one go.
5873 *
3d46b2e2
PM
5874 * An invalid IRQ skips the IRQ registration and expects the host to
5875 * have set polling mode on the port. In this case, @irq_handler
5876 * should be NULL.
5877 *
f5cda257
TH
5878 * LOCKING:
5879 * Inherited from calling layer (may sleep).
5880 *
5881 * RETURNS:
5882 * 0 on success, -errno otherwise.
5883 */
5884int ata_host_activate(struct ata_host *host, int irq,
5885 irq_handler_t irq_handler, unsigned long irq_flags,
5886 struct scsi_host_template *sht)
5887{
cbcdd875 5888 int i, rc;
7e22c002 5889 char *irq_desc;
f5cda257
TH
5890
5891 rc = ata_host_start(host);
5892 if (rc)
5893 return rc;
5894
3d46b2e2
PM
5895 /* Special case for polling mode */
5896 if (!irq) {
5897 WARN_ON(irq_handler);
5898 return ata_host_register(host, sht);
5899 }
5900
7e22c002
HK
5901 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
5902 dev_driver_string(host->dev),
5903 dev_name(host->dev));
5904 if (!irq_desc)
5905 return -ENOMEM;
5906
f5cda257 5907 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7e22c002 5908 irq_desc, host);
f5cda257
TH
5909 if (rc)
5910 return rc;
5911
cbcdd875
TH
5912 for (i = 0; i < host->n_ports; i++)
5913 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 5914
f5cda257
TH
5915 rc = ata_host_register(host, sht);
5916 /* if failed, just free the IRQ and leave ports alone */
5917 if (rc)
5918 devm_free_irq(host->dev, irq, host);
5919
5920 return rc;
5921}
a52fbcfc 5922EXPORT_SYMBOL_GPL(ata_host_activate);
f5cda257 5923
720ba126 5924/**
c9b5560a 5925 * ata_port_detach - Detach ATA port in preparation of device removal
720ba126
TH
5926 * @ap: ATA port to be detached
5927 *
5928 * Detach all ATA devices and the associated SCSI devices of @ap;
5929 * then, remove the associated SCSI host. @ap is guaranteed to
5930 * be quiescent on return from this function.
5931 *
5932 * LOCKING:
5933 * Kernel thread context (may sleep).
5934 */
741b7763 5935static void ata_port_detach(struct ata_port *ap)
720ba126
TH
5936{
5937 unsigned long flags;
a6f9bf4d
LK
5938 struct ata_link *link;
5939 struct ata_device *dev;
720ba126
TH
5940
5941 if (!ap->ops->error_handler)
c3cf30a9 5942 goto skip_eh;
720ba126
TH
5943
5944 /* tell EH we're leaving & flush EH */
ba6a1308 5945 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5946 ap->pflags |= ATA_PFLAG_UNLOADING;
ece180d1 5947 ata_port_schedule_eh(ap);
ba6a1308 5948 spin_unlock_irqrestore(ap->lock, flags);
720ba126 5949
ece180d1 5950 /* wait till EH commits suicide */
720ba126
TH
5951 ata_port_wait_eh(ap);
5952
ece180d1
TH
5953 /* it better be dead now */
5954 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
720ba126 5955
afe2c511 5956 cancel_delayed_work_sync(&ap->hotplug_task);
720ba126 5957
c3cf30a9 5958 skip_eh:
a6f9bf4d
LK
5959 /* clean up zpodd on port removal */
5960 ata_for_each_link(link, ap, HOST_FIRST) {
5961 ata_for_each_dev(dev, link, ALL) {
5962 if (zpodd_dev_enabled(dev))
5963 zpodd_exit(dev);
5964 }
5965 }
d9027470
GG
5966 if (ap->pmp_link) {
5967 int i;
5968 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
5969 ata_tlink_delete(&ap->pmp_link[i]);
5970 }
720ba126 5971 /* remove the associated SCSI host */
cca3974e 5972 scsi_remove_host(ap->scsi_host);
c5700766 5973 ata_tport_delete(ap);
720ba126
TH
5974}
5975
0529c159
TH
5976/**
5977 * ata_host_detach - Detach all ports of an ATA host
5978 * @host: Host to detach
5979 *
5980 * Detach all ports of @host.
5981 *
5982 * LOCKING:
5983 * Kernel thread context (may sleep).
5984 */
5985void ata_host_detach(struct ata_host *host)
5986{
5987 int i;
5988
b5292111
KHF
5989 for (i = 0; i < host->n_ports; i++) {
5990 /* Ensure ata_port probe has completed */
5991 async_synchronize_cookie(host->ports[i]->cookie + 1);
0529c159 5992 ata_port_detach(host->ports[i]);
b5292111 5993 }
562f0c2d
TH
5994
5995 /* the host is dead now, dissociate ACPI */
5996 ata_acpi_dissociate(host);
0529c159 5997}
a52fbcfc 5998EXPORT_SYMBOL_GPL(ata_host_detach);
0529c159 5999
374b1873
JG
6000#ifdef CONFIG_PCI
6001
1da177e4
LT
6002/**
6003 * ata_pci_remove_one - PCI layer callback for device removal
6004 * @pdev: PCI device that was removed
6005 *
b878ca5d
TH
6006 * PCI layer indicates to libata via this hook that hot-unplug or
6007 * module unload event has occurred. Detach all ports. Resource
6008 * release is handled via devres.
1da177e4
LT
6009 *
6010 * LOCKING:
6011 * Inherited from PCI layer (may sleep).
6012 */
f0d36efd 6013void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 6014{
04a3f5b7 6015 struct ata_host *host = pci_get_drvdata(pdev);
1da177e4 6016
b878ca5d 6017 ata_host_detach(host);
1da177e4 6018}
a52fbcfc 6019EXPORT_SYMBOL_GPL(ata_pci_remove_one);
1da177e4 6020
10a663a1
PK
6021void ata_pci_shutdown_one(struct pci_dev *pdev)
6022{
6023 struct ata_host *host = pci_get_drvdata(pdev);
6024 int i;
6025
6026 for (i = 0; i < host->n_ports; i++) {
6027 struct ata_port *ap = host->ports[i];
6028
6029 ap->pflags |= ATA_PFLAG_FROZEN;
6030
6031 /* Disable port interrupts */
6032 if (ap->ops->freeze)
6033 ap->ops->freeze(ap);
6034
6035 /* Stop the port DMA engines */
6036 if (ap->ops->port_stop)
6037 ap->ops->port_stop(ap);
6038 }
6039}
a52fbcfc 6040EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
10a663a1 6041
1da177e4 6042/* move to PCI subsystem */
057ace5e 6043int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6044{
6045 unsigned long tmp = 0;
6046
6047 switch (bits->width) {
6048 case 1: {
6049 u8 tmp8 = 0;
6050 pci_read_config_byte(pdev, bits->reg, &tmp8);
6051 tmp = tmp8;
6052 break;
6053 }
6054 case 2: {
6055 u16 tmp16 = 0;
6056 pci_read_config_word(pdev, bits->reg, &tmp16);
6057 tmp = tmp16;
6058 break;
6059 }
6060 case 4: {
6061 u32 tmp32 = 0;
6062 pci_read_config_dword(pdev, bits->reg, &tmp32);
6063 tmp = tmp32;
6064 break;
6065 }
6066
6067 default:
6068 return -EINVAL;
6069 }
6070
6071 tmp &= bits->mask;
6072
6073 return (tmp == bits->val) ? 1 : 0;
6074}
a52fbcfc 6075EXPORT_SYMBOL_GPL(pci_test_config_bits);
9b847548 6076
6ffa01d8 6077#ifdef CONFIG_PM
3c5100c1 6078void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6079{
6080 pci_save_state(pdev);
4c90d971 6081 pci_disable_device(pdev);
500530f6 6082
3a2d5b70 6083 if (mesg.event & PM_EVENT_SLEEP)
500530f6 6084 pci_set_power_state(pdev, PCI_D3hot);
9b847548 6085}
a52fbcfc 6086EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
9b847548 6087
553c4aa6 6088int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6089{
553c4aa6
TH
6090 int rc;
6091
9b847548
JA
6092 pci_set_power_state(pdev, PCI_D0);
6093 pci_restore_state(pdev);
553c4aa6 6094
b878ca5d 6095 rc = pcim_enable_device(pdev);
553c4aa6 6096 if (rc) {
a44fec1f
JP
6097 dev_err(&pdev->dev,
6098 "failed to enable device after resume (%d)\n", rc);
553c4aa6
TH
6099 return rc;
6100 }
6101
9b847548 6102 pci_set_master(pdev);
553c4aa6 6103 return 0;
500530f6 6104}
a52fbcfc 6105EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
500530f6 6106
3c5100c1 6107int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6108{
04a3f5b7 6109 struct ata_host *host = pci_get_drvdata(pdev);
500530f6
TH
6110 int rc = 0;
6111
cca3974e 6112 rc = ata_host_suspend(host, mesg);
500530f6
TH
6113 if (rc)
6114 return rc;
6115
3c5100c1 6116 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6117
6118 return 0;
6119}
a52fbcfc 6120EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
500530f6
TH
6121
6122int ata_pci_device_resume(struct pci_dev *pdev)
6123{
04a3f5b7 6124 struct ata_host *host = pci_get_drvdata(pdev);
553c4aa6 6125 int rc;
500530f6 6126
553c4aa6
TH
6127 rc = ata_pci_device_do_resume(pdev);
6128 if (rc == 0)
6129 ata_host_resume(host);
6130 return rc;
9b847548 6131}
a52fbcfc 6132EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6133#endif /* CONFIG_PM */
1da177e4
LT
6134#endif /* CONFIG_PCI */
6135
b7db04d9
BN
6136/**
6137 * ata_platform_remove_one - Platform layer callback for device removal
6138 * @pdev: Platform device that was removed
6139 *
6140 * Platform layer indicates to libata via this hook that hot-unplug or
6141 * module unload event has occurred. Detach all ports. Resource
6142 * release is handled via devres.
6143 *
6144 * LOCKING:
6145 * Inherited from platform layer (may sleep).
6146 */
6147int ata_platform_remove_one(struct platform_device *pdev)
6148{
6149 struct ata_host *host = platform_get_drvdata(pdev);
6150
6151 ata_host_detach(host);
6152
6153 return 0;
6154}
a52fbcfc 6155EXPORT_SYMBOL_GPL(ata_platform_remove_one);
b7db04d9 6156
bf89b0bf 6157#ifdef CONFIG_ATA_FORCE
33267325
TH
6158static int __init ata_parse_force_one(char **cur,
6159 struct ata_force_ent *force_ent,
6160 const char **reason)
6161{
0f5f264b 6162 static const struct ata_force_param force_tbl[] __initconst = {
33267325
TH
6163 { "40c", .cbl = ATA_CBL_PATA40 },
6164 { "80c", .cbl = ATA_CBL_PATA80 },
6165 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6166 { "unk", .cbl = ATA_CBL_PATA_UNK },
6167 { "ign", .cbl = ATA_CBL_PATA_IGN },
6168 { "sata", .cbl = ATA_CBL_SATA },
6169 { "1.5Gbps", .spd_limit = 1 },
6170 { "3.0Gbps", .spd_limit = 2 },
6171 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6172 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
d7b16e4f
MP
6173 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
6174 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
7a8526a5
KH
6175 { "noncqati", .horkage_on = ATA_HORKAGE_NO_NCQ_ON_ATI },
6176 { "ncqati", .horkage_off = ATA_HORKAGE_NO_NCQ_ON_ATI },
43c9c591 6177 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
33267325
TH
6178 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6179 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6180 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6181 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6182 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6183 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6184 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6185 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6186 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6187 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6188 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6189 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6190 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6191 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6192 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6193 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6194 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6195 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6196 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6197 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6198 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6199 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6200 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6201 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6202 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6203 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6204 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6205 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6206 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6207 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6208 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6209 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6210 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6211 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
05944bdf
TH
6212 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6213 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6214 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
ca6d43b0 6215 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
966fbe19 6216 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
b8bd6dc3 6217 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
33267325
TH
6218 };
6219 char *start = *cur, *p = *cur;
6220 char *id, *val, *endp;
6221 const struct ata_force_param *match_fp = NULL;
6222 int nr_matches = 0, i;
6223
6224 /* find where this param ends and update *cur */
6225 while (*p != '\0' && *p != ',')
6226 p++;
6227
6228 if (*p == '\0')
6229 *cur = p;
6230 else
6231 *cur = p + 1;
6232
6233 *p = '\0';
6234
6235 /* parse */
6236 p = strchr(start, ':');
6237 if (!p) {
6238 val = strstrip(start);
6239 goto parse_val;
6240 }
6241 *p = '\0';
6242
6243 id = strstrip(start);
6244 val = strstrip(p + 1);
6245
6246 /* parse id */
6247 p = strchr(id, '.');
6248 if (p) {
6249 *p++ = '\0';
6250 force_ent->device = simple_strtoul(p, &endp, 10);
6251 if (p == endp || *endp != '\0') {
6252 *reason = "invalid device";
6253 return -EINVAL;
6254 }
6255 }
6256
6257 force_ent->port = simple_strtoul(id, &endp, 10);
f7cf69ae 6258 if (id == endp || *endp != '\0') {
33267325
TH
6259 *reason = "invalid port/link";
6260 return -EINVAL;
6261 }
6262
6263 parse_val:
6264 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6265 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6266 const struct ata_force_param *fp = &force_tbl[i];
6267
6268 if (strncasecmp(val, fp->name, strlen(val)))
6269 continue;
6270
6271 nr_matches++;
6272 match_fp = fp;
6273
6274 if (strcasecmp(val, fp->name) == 0) {
6275 nr_matches = 1;
6276 break;
6277 }
6278 }
6279
6280 if (!nr_matches) {
6281 *reason = "unknown value";
6282 return -EINVAL;
6283 }
6284 if (nr_matches > 1) {
9de55351 6285 *reason = "ambiguous value";
33267325
TH
6286 return -EINVAL;
6287 }
6288
6289 force_ent->param = *match_fp;
6290
6291 return 0;
6292}
6293
6294static void __init ata_parse_force_param(void)
6295{
6296 int idx = 0, size = 1;
6297 int last_port = -1, last_device = -1;
6298 char *p, *cur, *next;
6299
6300 /* calculate maximum number of params and allocate force_tbl */
6301 for (p = ata_force_param_buf; *p; p++)
6302 if (*p == ',')
6303 size++;
6304
6396bb22 6305 ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
33267325
TH
6306 if (!ata_force_tbl) {
6307 printk(KERN_WARNING "ata: failed to extend force table, "
6308 "libata.force ignored\n");
6309 return;
6310 }
6311
6312 /* parse and populate the table */
6313 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6314 const char *reason = "";
6315 struct ata_force_ent te = { .port = -1, .device = -1 };
6316
6317 next = cur;
6318 if (ata_parse_force_one(&next, &te, &reason)) {
6319 printk(KERN_WARNING "ata: failed to parse force "
6320 "parameter \"%s\" (%s)\n",
6321 cur, reason);
6322 continue;
6323 }
6324
6325 if (te.port == -1) {
6326 te.port = last_port;
6327 te.device = last_device;
6328 }
6329
6330 ata_force_tbl[idx++] = te;
6331
6332 last_port = te.port;
6333 last_device = te.device;
6334 }
6335
6336 ata_force_tbl_size = idx;
6337}
1da177e4 6338
bf89b0bf
BZ
6339static void ata_free_force_param(void)
6340{
6341 kfree(ata_force_tbl);
6342}
6343#else
6344static inline void ata_parse_force_param(void) { }
6345static inline void ata_free_force_param(void) { }
6346#endif
6347
1da177e4
LT
6348static int __init ata_init(void)
6349{
d9027470 6350 int rc;
270390e1 6351
33267325
TH
6352 ata_parse_force_param();
6353
270390e1 6354 rc = ata_sff_init();
ad72cf98 6355 if (rc) {
bf89b0bf 6356 ata_free_force_param();
ad72cf98
TH
6357 return rc;
6358 }
453b07ac 6359
d9027470
GG
6360 libata_transport_init();
6361 ata_scsi_transport_template = ata_attach_transport();
6362 if (!ata_scsi_transport_template) {
6363 ata_sff_exit();
6364 rc = -ENOMEM;
6365 goto err_out;
4fca377f 6366 }
d9027470 6367
1da177e4
LT
6368 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6369 return 0;
d9027470
GG
6370
6371err_out:
6372 return rc;
1da177e4
LT
6373}
6374
6375static void __exit ata_exit(void)
6376{
d9027470
GG
6377 ata_release_transport(ata_scsi_transport_template);
6378 libata_transport_exit();
270390e1 6379 ata_sff_exit();
bf89b0bf 6380 ata_free_force_param();
1da177e4
LT
6381}
6382
a4625085 6383subsys_initcall(ata_init);
1da177e4
LT
6384module_exit(ata_exit);
6385
9990b6f3 6386static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
67846b30
JG
6387
6388int ata_ratelimit(void)
6389{
9990b6f3 6390 return __ratelimit(&ratelimit);
67846b30 6391}
a52fbcfc 6392EXPORT_SYMBOL_GPL(ata_ratelimit);
67846b30 6393
c0c362b6
TH
6394/**
6395 * ata_msleep - ATA EH owner aware msleep
6396 * @ap: ATA port to attribute the sleep to
6397 * @msecs: duration to sleep in milliseconds
6398 *
6399 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6400 * ownership is released before going to sleep and reacquired
6401 * after the sleep is complete. IOW, other ports sharing the
6402 * @ap->host will be allowed to own the EH while this task is
6403 * sleeping.
6404 *
6405 * LOCKING:
6406 * Might sleep.
6407 */
97750ceb
TH
6408void ata_msleep(struct ata_port *ap, unsigned int msecs)
6409{
c0c362b6
TH
6410 bool owns_eh = ap && ap->host->eh_owner == current;
6411
6412 if (owns_eh)
6413 ata_eh_release(ap);
6414
848c3920
AVM
6415 if (msecs < 20) {
6416 unsigned long usecs = msecs * USEC_PER_MSEC;
6417 usleep_range(usecs, usecs + 50);
6418 } else {
6419 msleep(msecs);
6420 }
c0c362b6
TH
6421
6422 if (owns_eh)
6423 ata_eh_acquire(ap);
97750ceb 6424}
a52fbcfc 6425EXPORT_SYMBOL_GPL(ata_msleep);
97750ceb 6426
c22daff4
TH
6427/**
6428 * ata_wait_register - wait until register value changes
97750ceb 6429 * @ap: ATA port to wait register for, can be NULL
c22daff4
TH
6430 * @reg: IO-mapped register
6431 * @mask: Mask to apply to read register value
6432 * @val: Wait condition
341c2c95
TH
6433 * @interval: polling interval in milliseconds
6434 * @timeout: timeout in milliseconds
c22daff4
TH
6435 *
6436 * Waiting for some bits of register to change is a common
6437 * operation for ATA controllers. This function reads 32bit LE
6438 * IO-mapped register @reg and tests for the following condition.
6439 *
6440 * (*@reg & mask) != val
6441 *
6442 * If the condition is met, it returns; otherwise, the process is
6443 * repeated after @interval_msec until timeout.
6444 *
6445 * LOCKING:
6446 * Kernel thread context (may sleep)
6447 *
6448 * RETURNS:
6449 * The final register value.
6450 */
97750ceb 6451u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
341c2c95 6452 unsigned long interval, unsigned long timeout)
c22daff4 6453{
341c2c95 6454 unsigned long deadline;
c22daff4
TH
6455 u32 tmp;
6456
6457 tmp = ioread32(reg);
6458
6459 /* Calculate timeout _after_ the first read to make sure
6460 * preceding writes reach the controller before starting to
6461 * eat away the timeout.
6462 */
341c2c95 6463 deadline = ata_deadline(jiffies, timeout);
c22daff4 6464
341c2c95 6465 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
97750ceb 6466 ata_msleep(ap, interval);
c22daff4
TH
6467 tmp = ioread32(reg);
6468 }
6469
6470 return tmp;
6471}
a52fbcfc 6472EXPORT_SYMBOL_GPL(ata_wait_register);
c22daff4 6473
dd5b06c4
TH
6474/*
6475 * Dummy port_ops
6476 */
182d7bba 6477static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
dd5b06c4 6478{
182d7bba 6479 return AC_ERR_SYSTEM;
dd5b06c4
TH
6480}
6481
182d7bba 6482static void ata_dummy_error_handler(struct ata_port *ap)
dd5b06c4 6483{
182d7bba 6484 /* truly dummy */
dd5b06c4
TH
6485}
6486
029cfd6b 6487struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
6488 .qc_prep = ata_noop_qc_prep,
6489 .qc_issue = ata_dummy_qc_issue,
182d7bba 6490 .error_handler = ata_dummy_error_handler,
e4a9c373
DW
6491 .sched_eh = ata_std_sched_eh,
6492 .end_eh = ata_std_end_eh,
dd5b06c4 6493};
a52fbcfc 6494EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
dd5b06c4 6495
21b0ad4f
TH
6496const struct ata_port_info ata_dummy_port_info = {
6497 .port_ops = &ata_dummy_port_ops,
6498};
a52fbcfc 6499EXPORT_SYMBOL_GPL(ata_dummy_port_info);
21b0ad4f 6500
06296a1e
JP
6501void ata_print_version(const struct device *dev, const char *version)
6502{
6503 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6504}
6505EXPORT_SYMBOL(ata_print_version);
c206a389
HR
6506
6507EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
6508EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
6509EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
6510EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
6511EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);