ata: libata: drop superfluous ata_eh_request_sense() parameter
[linux-block.git] / drivers / ata / libata-eh.c
CommitLineData
c82ee6d3 1// SPDX-License-Identifier: GPL-2.0-or-later
ece1d636
TH
2/*
3 * libata-eh.c - libata error handling
4 *
ece1d636
TH
5 * Copyright 2006 Tejun Heo <htejun@gmail.com>
6 *
ece1d636 7 * libata documentation is available via 'make {ps|pdf}docs',
9bb9a39c 8 * as Documentation/driver-api/libata.rst
ece1d636
TH
9 *
10 * Hardware documentation available from http://www.t13.org/ and
11 * http://www.sata-io.org/
ece1d636
TH
12 */
13
ece1d636 14#include <linux/kernel.h>
242f9dcb 15#include <linux/blkdev.h>
38789fda 16#include <linux/export.h>
2855568b 17#include <linux/pci.h>
ece1d636
TH
18#include <scsi/scsi.h>
19#include <scsi/scsi_host.h>
20#include <scsi/scsi_eh.h>
21#include <scsi/scsi_device.h>
22#include <scsi/scsi_cmnd.h>
6521148c 23#include <scsi/scsi_dbg.h>
c6fd2807 24#include "../scsi/scsi_transport_api.h"
ece1d636
TH
25
26#include <linux/libata.h>
27
255c03d1 28#include <trace/events/libata.h>
ece1d636
TH
29#include "libata.h"
30
7d47e8d4 31enum {
3884f7b0 32 /* speed down verdicts */
7d47e8d4
TH
33 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
34 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
35 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
76326ac1 36 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
3884f7b0
TH
37
38 /* error flags */
39 ATA_EFLAG_IS_IO = (1 << 0),
76326ac1 40 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
d9027470 41 ATA_EFLAG_OLD_ER = (1 << 31),
3884f7b0
TH
42
43 /* error categories */
44 ATA_ECAT_NONE = 0,
45 ATA_ECAT_ATA_BUS = 1,
46 ATA_ECAT_TOUT_HSM = 2,
47 ATA_ECAT_UNK_DEV = 3,
75f9cafc
TH
48 ATA_ECAT_DUBIOUS_NONE = 4,
49 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
50 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
51 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
52 ATA_ECAT_NR = 8,
7d47e8d4 53
87fbc5a0
TH
54 ATA_EH_CMD_DFL_TIMEOUT = 5000,
55
0a2c0f56
TH
56 /* always put at least this amount of time between resets */
57 ATA_EH_RESET_COOL_DOWN = 5000,
58
341c2c95
TH
59 /* Waiting in ->prereset can never be reliable. It's
60 * sometimes nice to wait there but it can't be depended upon;
61 * otherwise, we wouldn't be resetting. Just give it enough
62 * time for most drives to spin up.
63 */
64 ATA_EH_PRERESET_TIMEOUT = 10000,
65 ATA_EH_FASTDRAIN_INTERVAL = 3000,
11fc33da
TH
66
67 ATA_EH_UA_TRIES = 5,
c2c7a89c
TH
68
69 /* probe speed down parameters, see ata_eh_schedule_probe() */
70 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
71 ATA_EH_PROBE_TRIALS = 2,
31daabda
TH
72};
73
74/* The following table determines how we sequence resets. Each entry
75 * represents timeout for that try. The first try can be soft or
76 * hardreset. All others are hardreset if available. In most cases
77 * the first reset w/ 10sec timeout should succeed. Following entries
35bf8821
DW
78 * are mostly for error handling, hotplug and those outlier devices that
79 * take an exceptionally long time to recover from reset.
31daabda
TH
80 */
81static const unsigned long ata_eh_reset_timeouts[] = {
341c2c95
TH
82 10000, /* most drives spin up by 10sec */
83 10000, /* > 99% working drives spin up before 20sec */
35bf8821 84 35000, /* give > 30 secs of idleness for outlier devices */
341c2c95 85 5000, /* and sweet one last chance */
d8af0eb6 86 ULONG_MAX, /* > 1 min has elapsed, give up */
31daabda
TH
87};
88
e06233f9 89static const unsigned int ata_eh_identify_timeouts[] = {
87fbc5a0
TH
90 5000, /* covers > 99% of successes and not too boring on failures */
91 10000, /* combined time till here is enough even for media access */
92 30000, /* for true idiots */
e06233f9 93 UINT_MAX,
87fbc5a0
TH
94};
95
e06233f9 96static const unsigned int ata_eh_revalidate_timeouts[] = {
68dbbe7d
DLM
97 15000, /* Some drives are slow to read log pages when waking-up */
98 15000, /* combined time till here is enough even for media access */
e06233f9 99 UINT_MAX,
68dbbe7d
DLM
100};
101
e06233f9 102static const unsigned int ata_eh_flush_timeouts[] = {
6013efd8
TH
103 15000, /* be generous with flush */
104 15000, /* ditto */
105 30000, /* and even more generous */
e06233f9 106 UINT_MAX,
6013efd8
TH
107};
108
e06233f9 109static const unsigned int ata_eh_other_timeouts[] = {
87fbc5a0
TH
110 5000, /* same rationale as identify timeout */
111 10000, /* ditto */
112 /* but no merciful 30sec for other commands, it just isn't worth it */
e06233f9 113 UINT_MAX,
87fbc5a0
TH
114};
115
116struct ata_eh_cmd_timeout_ent {
117 const u8 *commands;
e06233f9 118 const unsigned int *timeouts;
87fbc5a0
TH
119};
120
121/* The following table determines timeouts to use for EH internal
122 * commands. Each table entry is a command class and matches the
123 * commands the entry applies to and the timeout table to use.
124 *
125 * On the retry after a command timed out, the next timeout value from
126 * the table is used. If the table doesn't contain further entries,
127 * the last value is used.
128 *
129 * ehc->cmd_timeout_idx keeps track of which timeout to use per
130 * command class, so if SET_FEATURES times out on the first try, the
131 * next try will use the second timeout value only for that class.
132 */
133#define CMDS(cmds...) (const u8 []){ cmds, 0 }
134static const struct ata_eh_cmd_timeout_ent
135ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
136 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
137 .timeouts = ata_eh_identify_timeouts, },
68dbbe7d
DLM
138 { .commands = CMDS(ATA_CMD_READ_LOG_EXT, ATA_CMD_READ_LOG_DMA_EXT),
139 .timeouts = ata_eh_revalidate_timeouts, },
87fbc5a0
TH
140 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
141 .timeouts = ata_eh_other_timeouts, },
142 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
143 .timeouts = ata_eh_other_timeouts, },
144 { .commands = CMDS(ATA_CMD_SET_FEATURES),
145 .timeouts = ata_eh_other_timeouts, },
146 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
147 .timeouts = ata_eh_other_timeouts, },
6013efd8
TH
148 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
149 .timeouts = ata_eh_flush_timeouts },
87fbc5a0
TH
150};
151#undef CMDS
152
ad9e2762 153static void __ata_port_freeze(struct ata_port *ap);
6ffa01d8 154#ifdef CONFIG_PM
500530f6
TH
155static void ata_eh_handle_port_suspend(struct ata_port *ap);
156static void ata_eh_handle_port_resume(struct ata_port *ap);
6ffa01d8
TH
157#else /* CONFIG_PM */
158static void ata_eh_handle_port_suspend(struct ata_port *ap)
159{ }
160
161static void ata_eh_handle_port_resume(struct ata_port *ap)
162{ }
6ffa01d8 163#endif /* CONFIG_PM */
ad9e2762 164
0d74d872
MM
165static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
166 const char *fmt, va_list args)
b64bbc39
TH
167{
168 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
169 ATA_EH_DESC_LEN - ehi->desc_len,
170 fmt, args);
171}
172
173/**
174 * __ata_ehi_push_desc - push error description without adding separator
175 * @ehi: target EHI
176 * @fmt: printf format string
177 *
178 * Format string according to @fmt and append it to @ehi->desc.
179 *
180 * LOCKING:
181 * spin_lock_irqsave(host lock)
182 */
183void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
184{
185 va_list args;
186
187 va_start(args, fmt);
188 __ata_ehi_pushv_desc(ehi, fmt, args);
189 va_end(args);
190}
a52fbcfc 191EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
b64bbc39
TH
192
193/**
194 * ata_ehi_push_desc - push error description with separator
195 * @ehi: target EHI
196 * @fmt: printf format string
197 *
198 * Format string according to @fmt and append it to @ehi->desc.
199 * If @ehi->desc is not empty, ", " is added in-between.
200 *
201 * LOCKING:
202 * spin_lock_irqsave(host lock)
203 */
204void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
205{
206 va_list args;
207
208 if (ehi->desc_len)
209 __ata_ehi_push_desc(ehi, ", ");
210
211 va_start(args, fmt);
212 __ata_ehi_pushv_desc(ehi, fmt, args);
213 va_end(args);
214}
a52fbcfc 215EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
b64bbc39
TH
216
217/**
218 * ata_ehi_clear_desc - clean error description
219 * @ehi: target EHI
220 *
221 * Clear @ehi->desc.
222 *
223 * LOCKING:
224 * spin_lock_irqsave(host lock)
225 */
226void ata_ehi_clear_desc(struct ata_eh_info *ehi)
227{
228 ehi->desc[0] = '\0';
229 ehi->desc_len = 0;
230}
a52fbcfc 231EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
b64bbc39 232
cbcdd875
TH
233/**
234 * ata_port_desc - append port description
235 * @ap: target ATA port
236 * @fmt: printf format string
237 *
238 * Format string according to @fmt and append it to port
239 * description. If port description is not empty, " " is added
240 * in-between. This function is to be used while initializing
241 * ata_host. The description is printed on host registration.
242 *
243 * LOCKING:
244 * None.
245 */
246void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
247{
248 va_list args;
249
250 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
251
252 if (ap->link.eh_info.desc_len)
253 __ata_ehi_push_desc(&ap->link.eh_info, " ");
254
255 va_start(args, fmt);
256 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
257 va_end(args);
258}
a52fbcfc 259EXPORT_SYMBOL_GPL(ata_port_desc);
cbcdd875
TH
260
261#ifdef CONFIG_PCI
cbcdd875
TH
262/**
263 * ata_port_pbar_desc - append PCI BAR description
264 * @ap: target ATA port
265 * @bar: target PCI BAR
266 * @offset: offset into PCI BAR
267 * @name: name of the area
268 *
269 * If @offset is negative, this function formats a string which
270 * contains the name, address, size and type of the BAR and
271 * appends it to the port description. If @offset is zero or
272 * positive, only name and offsetted address is appended.
273 *
274 * LOCKING:
275 * None.
276 */
277void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
278 const char *name)
279{
280 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
281 char *type = "";
282 unsigned long long start, len;
283
284 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
285 type = "m";
286 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
287 type = "i";
288
289 start = (unsigned long long)pci_resource_start(pdev, bar);
290 len = (unsigned long long)pci_resource_len(pdev, bar);
291
292 if (offset < 0)
293 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
294 else
e6a73ab1
AM
295 ata_port_desc(ap, "%s 0x%llx", name,
296 start + (unsigned long long)offset);
cbcdd875 297}
a52fbcfc 298EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
cbcdd875
TH
299#endif /* CONFIG_PCI */
300
87fbc5a0
TH
301static int ata_lookup_timeout_table(u8 cmd)
302{
303 int i;
304
305 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
306 const u8 *cur;
307
308 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
309 if (*cur == cmd)
310 return i;
311 }
312
313 return -1;
314}
315
316/**
317 * ata_internal_cmd_timeout - determine timeout for an internal command
318 * @dev: target device
319 * @cmd: internal command to be issued
320 *
321 * Determine timeout for internal command @cmd for @dev.
322 *
323 * LOCKING:
324 * EH context.
325 *
326 * RETURNS:
327 * Determined timeout.
328 */
e06233f9 329unsigned int ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
87fbc5a0
TH
330{
331 struct ata_eh_context *ehc = &dev->link->eh_context;
332 int ent = ata_lookup_timeout_table(cmd);
333 int idx;
334
335 if (ent < 0)
336 return ATA_EH_CMD_DFL_TIMEOUT;
337
338 idx = ehc->cmd_timeout_idx[dev->devno][ent];
339 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
340}
341
342/**
343 * ata_internal_cmd_timed_out - notification for internal command timeout
344 * @dev: target device
345 * @cmd: internal command which timed out
346 *
347 * Notify EH that internal command @cmd for @dev timed out. This
348 * function should be called only for commands whose timeouts are
349 * determined using ata_internal_cmd_timeout().
350 *
351 * LOCKING:
352 * EH context.
353 */
354void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
355{
356 struct ata_eh_context *ehc = &dev->link->eh_context;
357 int ent = ata_lookup_timeout_table(cmd);
358 int idx;
359
360 if (ent < 0)
361 return;
362
363 idx = ehc->cmd_timeout_idx[dev->devno][ent];
e06233f9 364 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != UINT_MAX)
87fbc5a0
TH
365 ehc->cmd_timeout_idx[dev->devno][ent]++;
366}
367
3884f7b0 368static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
0c247c55
TH
369 unsigned int err_mask)
370{
371 struct ata_ering_entry *ent;
372
373 WARN_ON(!err_mask);
374
375 ering->cursor++;
376 ering->cursor %= ATA_ERING_SIZE;
377
378 ent = &ering->ring[ering->cursor];
3884f7b0 379 ent->eflags = eflags;
0c247c55
TH
380 ent->err_mask = err_mask;
381 ent->timestamp = get_jiffies_64();
382}
383
76326ac1
TH
384static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
385{
386 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
387
388 if (ent->err_mask)
389 return ent;
390 return NULL;
391}
392
d9027470
GG
393int ata_ering_map(struct ata_ering *ering,
394 int (*map_fn)(struct ata_ering_entry *, void *),
395 void *arg)
0c247c55
TH
396{
397 int idx, rc = 0;
398 struct ata_ering_entry *ent;
399
400 idx = ering->cursor;
401 do {
402 ent = &ering->ring[idx];
403 if (!ent->err_mask)
404 break;
405 rc = map_fn(ent, arg);
406 if (rc)
407 break;
408 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
409 } while (idx != ering->cursor);
410
411 return rc;
412}
413
60428407 414static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
d9027470
GG
415{
416 ent->eflags |= ATA_EFLAG_OLD_ER;
417 return 0;
418}
419
420static void ata_ering_clear(struct ata_ering *ering)
421{
422 ata_ering_map(ering, ata_ering_clear_cb, NULL);
423}
424
64f65ca6
TH
425static unsigned int ata_eh_dev_action(struct ata_device *dev)
426{
9af5c9c9 427 struct ata_eh_context *ehc = &dev->link->eh_context;
64f65ca6
TH
428
429 return ehc->i.action | ehc->i.dev_action[dev->devno];
430}
431
f58229f8 432static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
af181c2d
TH
433 struct ata_eh_info *ehi, unsigned int action)
434{
f58229f8 435 struct ata_device *tdev;
af181c2d
TH
436
437 if (!dev) {
438 ehi->action &= ~action;
1eca4365 439 ata_for_each_dev(tdev, link, ALL)
f58229f8 440 ehi->dev_action[tdev->devno] &= ~action;
af181c2d
TH
441 } else {
442 /* doesn't make sense for port-wide EH actions */
443 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
444
445 /* break ehi->action into ehi->dev_action */
446 if (ehi->action & action) {
1eca4365 447 ata_for_each_dev(tdev, link, ALL)
f58229f8
TH
448 ehi->dev_action[tdev->devno] |=
449 ehi->action & action;
af181c2d
TH
450 ehi->action &= ~action;
451 }
452
453 /* turn off the specified per-dev action */
454 ehi->dev_action[dev->devno] &= ~action;
455 }
456}
457
c0c362b6
TH
458/**
459 * ata_eh_acquire - acquire EH ownership
460 * @ap: ATA port to acquire EH ownership for
461 *
462 * Acquire EH ownership for @ap. This is the basic exclusion
463 * mechanism for ports sharing a host. Only one port hanging off
464 * the same host can claim the ownership of EH.
465 *
466 * LOCKING:
467 * EH context.
468 */
469void ata_eh_acquire(struct ata_port *ap)
470{
471 mutex_lock(&ap->host->eh_mutex);
472 WARN_ON_ONCE(ap->host->eh_owner);
473 ap->host->eh_owner = current;
474}
475
476/**
477 * ata_eh_release - release EH ownership
478 * @ap: ATA port to release EH ownership for
479 *
480 * Release EH ownership for @ap if the caller. The caller must
481 * have acquired EH ownership using ata_eh_acquire() previously.
482 *
483 * LOCKING:
484 * EH context.
485 */
486void ata_eh_release(struct ata_port *ap)
487{
488 WARN_ON_ONCE(ap->host->eh_owner != current);
489 ap->host->eh_owner = NULL;
490 mutex_unlock(&ap->host->eh_mutex);
491}
492
ece180d1
TH
493static void ata_eh_unload(struct ata_port *ap)
494{
495 struct ata_link *link;
496 struct ata_device *dev;
497 unsigned long flags;
498
499 /* Restore SControl IPM and SPD for the next driver and
500 * disable attached devices.
501 */
502 ata_for_each_link(link, ap, PMP_FIRST) {
503 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
504 ata_for_each_dev(dev, link, ALL)
505 ata_dev_disable(dev);
506 }
507
508 /* freeze and set UNLOADED */
509 spin_lock_irqsave(ap->lock, flags);
510
511 ata_port_freeze(ap); /* won't be thawed */
512 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
513 ap->pflags |= ATA_PFLAG_UNLOADED;
514
515 spin_unlock_irqrestore(ap->lock, flags);
516}
517
ece1d636
TH
518/**
519 * ata_scsi_error - SCSI layer error handler callback
520 * @host: SCSI host on which error occurred
521 *
522 * Handles SCSI-layer-thrown error events.
523 *
524 * LOCKING:
525 * Inherited from SCSI layer (none, can sleep)
526 *
527 * RETURNS:
528 * Zero.
529 */
381544bb 530void ata_scsi_error(struct Scsi_Host *host)
ece1d636 531{
35bb94b1 532 struct ata_port *ap = ata_shost_to_port(host);
ad9e2762 533 unsigned long flags;
c34aeebc 534 LIST_HEAD(eh_work_q);
ece1d636 535
c34aeebc
JB
536 spin_lock_irqsave(host->host_lock, flags);
537 list_splice_init(&host->eh_cmd_q, &eh_work_q);
538 spin_unlock_irqrestore(host->host_lock, flags);
539
0e0b494c
JB
540 ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
541
542 /* If we timed raced normal completion and there is nothing to
543 recover nr_timedout == 0 why exactly are we doing error recovery ? */
544 ata_scsi_port_error_handler(host, ap);
545
546 /* finish or retry handled scmd's and clean up */
72d8c36e 547 WARN_ON(!list_empty(&eh_work_q));
0e0b494c 548
0e0b494c
JB
549}
550
551/**
552 * ata_scsi_cmd_error_handler - error callback for a list of commands
553 * @host: scsi host containing the port
554 * @ap: ATA port within the host
555 * @eh_work_q: list of commands to process
556 *
557 * process the given list of commands and return those finished to the
558 * ap->eh_done_q. This function is the first part of the libata error
559 * handler which processes a given list of failed commands.
560 */
561void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
562 struct list_head *eh_work_q)
563{
564 int i;
565 unsigned long flags;
566
c429137a
TH
567 /* make sure sff pio task is not running */
568 ata_sff_flush_pio_task(ap);
ece1d636 569
cca3974e 570 /* synchronize with host lock and sort out timeouts */
ad9e2762
TH
571
572 /* For new EH, all qcs are finished in one of three ways -
573 * normal completion, error completion, and SCSI timeout.
c96f1732 574 * Both completions can race against SCSI timeout. When normal
ad9e2762
TH
575 * completion wins, the qc never reaches EH. When error
576 * completion wins, the qc has ATA_QCFLAG_FAILED set.
577 *
578 * When SCSI timeout wins, things are a bit more complex.
579 * Normal or error completion can occur after the timeout but
580 * before this point. In such cases, both types of
581 * completions are honored. A scmd is determined to have
582 * timed out iff its associated qc is active and not failed.
583 */
a4f08141 584 spin_lock_irqsave(ap->lock, flags);
ad9e2762
TH
585 if (ap->ops->error_handler) {
586 struct scsi_cmnd *scmd, *tmp;
587 int nr_timedout = 0;
588
c96f1732
AC
589 /* This must occur under the ap->lock as we don't want
590 a polled recovery to race the real interrupt handler
d9027470 591
c96f1732
AC
592 The lost_interrupt handler checks for any completed but
593 non-notified command and completes much like an IRQ handler.
d9027470 594
c96f1732
AC
595 We then fall into the error recovery code which will treat
596 this as if normal completion won the race */
597
598 if (ap->ops->lost_interrupt)
599 ap->ops->lost_interrupt(ap);
d9027470 600
0e0b494c 601 list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
ad9e2762
TH
602 struct ata_queued_cmd *qc;
603
258c4e5c 604 ata_qc_for_each_raw(ap, qc, i) {
ad9e2762
TH
605 if (qc->flags & ATA_QCFLAG_ACTIVE &&
606 qc->scsicmd == scmd)
607 break;
608 }
609
610 if (i < ATA_MAX_QUEUE) {
611 /* the scmd has an associated qc */
612 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
613 /* which hasn't failed yet, timeout */
614 qc->err_mask |= AC_ERR_TIMEOUT;
615 qc->flags |= ATA_QCFLAG_FAILED;
616 nr_timedout++;
617 }
618 } else {
619 /* Normal completion occurred after
620 * SCSI timeout but before this point.
621 * Successfully complete it.
622 */
623 scmd->retries = scmd->allowed;
624 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
625 }
626 }
627
628 /* If we have timed out qcs. They belong to EH from
629 * this point but the state of the controller is
630 * unknown. Freeze the port to make sure the IRQ
631 * handler doesn't diddle with those qcs. This must
632 * be done atomically w.r.t. setting QCFLAG_FAILED.
633 */
634 if (nr_timedout)
635 __ata_port_freeze(ap);
636
a1e10f7e
TH
637
638 /* initialize eh_tries */
639 ap->eh_tries = ATA_EH_MAX_TRIES;
a4f08141
PM
640 }
641 spin_unlock_irqrestore(ap->lock, flags);
d9027470 642
0e0b494c
JB
643}
644EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
645
646/**
647 * ata_scsi_port_error_handler - recover the port after the commands
648 * @host: SCSI host containing the port
649 * @ap: the ATA port
650 *
651 * Handle the recovery of the port @ap after all the commands
652 * have been recovered.
653 */
654void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
655{
656 unsigned long flags;
ad9e2762 657
ad9e2762
TH
658 /* invoke error handler */
659 if (ap->ops->error_handler) {
cf1b86c8
TH
660 struct ata_link *link;
661
c0c362b6
TH
662 /* acquire EH ownership */
663 ata_eh_acquire(ap);
664 repeat:
5ddf24c5
TH
665 /* kill fast drain timer */
666 del_timer_sync(&ap->fastdrain_timer);
667
500530f6
TH
668 /* process port resume request */
669 ata_eh_handle_port_resume(ap);
670
f3e81b19 671 /* fetch & clear EH info */
e30349d2 672 spin_lock_irqsave(ap->lock, flags);
f3e81b19 673
1eca4365 674 ata_for_each_link(link, ap, HOST_FIRST) {
00115e0f
TH
675 struct ata_eh_context *ehc = &link->eh_context;
676 struct ata_device *dev;
677
cf1b86c8
TH
678 memset(&link->eh_context, 0, sizeof(link->eh_context));
679 link->eh_context.i = link->eh_info;
680 memset(&link->eh_info, 0, sizeof(link->eh_info));
00115e0f 681
1eca4365 682 ata_for_each_dev(dev, link, ENABLED) {
00115e0f
TH
683 int devno = dev->devno;
684
685 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
686 if (ata_ncq_enabled(dev))
687 ehc->saved_ncq_enabled |= 1 << devno;
688 }
cf1b86c8 689 }
f3e81b19 690
b51e9e5d
TH
691 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
692 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
da917d69 693 ap->excl_link = NULL; /* don't maintain exclusion over EH */
f3e81b19 694
e30349d2 695 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762 696
500530f6
TH
697 /* invoke EH, skip if unloading or suspended */
698 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
720ba126 699 ap->ops->error_handler(ap);
ece180d1
TH
700 else {
701 /* if unloading, commence suicide */
702 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
703 !(ap->pflags & ATA_PFLAG_UNLOADED))
704 ata_eh_unload(ap);
720ba126 705 ata_eh_finish(ap);
ece180d1 706 }
ad9e2762 707
500530f6
TH
708 /* process port suspend request */
709 ata_eh_handle_port_suspend(ap);
710
25985edc 711 /* Exception might have happened after ->error_handler
ad9e2762
TH
712 * recovered the port but before this point. Repeat
713 * EH in such case.
714 */
e30349d2 715 spin_lock_irqsave(ap->lock, flags);
ad9e2762 716
b51e9e5d 717 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
a1e10f7e 718 if (--ap->eh_tries) {
e30349d2 719 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762
TH
720 goto repeat;
721 }
a9a79dfe
JP
722 ata_port_err(ap,
723 "EH pending after %d tries, giving up\n",
724 ATA_EH_MAX_TRIES);
914616a3 725 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
ad9e2762
TH
726 }
727
f3e81b19 728 /* this run is complete, make sure EH info is clear */
1eca4365 729 ata_for_each_link(link, ap, HOST_FIRST)
cf1b86c8 730 memset(&link->eh_info, 0, sizeof(link->eh_info));
f3e81b19 731
e4a9c373
DW
732 /* end eh (clear host_eh_scheduled) while holding
733 * ap->lock such that if exception occurs after this
734 * point but before EH completion, SCSI midlayer will
ad9e2762
TH
735 * re-initiate EH.
736 */
e4a9c373 737 ap->ops->end_eh(ap);
ad9e2762 738
e30349d2 739 spin_unlock_irqrestore(ap->lock, flags);
c0c362b6 740 ata_eh_release(ap);
ad9e2762 741 } else {
9af5c9c9 742 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
ad9e2762
TH
743 ap->ops->eng_timeout(ap);
744 }
ece1d636 745
ece1d636
TH
746 scsi_eh_flush_done_q(&ap->eh_done_q);
747
ad9e2762 748 /* clean up */
e30349d2 749 spin_lock_irqsave(ap->lock, flags);
ad9e2762 750
1cdaf534 751 if (ap->pflags & ATA_PFLAG_LOADING)
b51e9e5d 752 ap->pflags &= ~ATA_PFLAG_LOADING;
6f54120e
JY
753 else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
754 !(ap->flags & ATA_FLAG_SAS_HOST))
ad72cf98 755 schedule_delayed_work(&ap->hotplug_task, 0);
1cdaf534
TH
756
757 if (ap->pflags & ATA_PFLAG_RECOVERED)
a9a79dfe 758 ata_port_info(ap, "EH complete\n");
580b2102 759
b51e9e5d 760 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
ad9e2762 761
c6cf9e99 762 /* tell wait_eh that we're done */
b51e9e5d 763 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
c6cf9e99
TH
764 wake_up_all(&ap->eh_wait_q);
765
e30349d2 766 spin_unlock_irqrestore(ap->lock, flags);
ece1d636 767}
0e0b494c 768EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
ece1d636 769
c6cf9e99
TH
770/**
771 * ata_port_wait_eh - Wait for the currently pending EH to complete
772 * @ap: Port to wait EH for
773 *
774 * Wait until the currently pending EH is complete.
775 *
776 * LOCKING:
777 * Kernel thread context (may sleep).
778 */
779void ata_port_wait_eh(struct ata_port *ap)
780{
781 unsigned long flags;
782 DEFINE_WAIT(wait);
783
784 retry:
ba6a1308 785 spin_lock_irqsave(ap->lock, flags);
c6cf9e99 786
b51e9e5d 787 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
c6cf9e99 788 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
ba6a1308 789 spin_unlock_irqrestore(ap->lock, flags);
c6cf9e99 790 schedule();
ba6a1308 791 spin_lock_irqsave(ap->lock, flags);
c6cf9e99 792 }
0a1b622e 793 finish_wait(&ap->eh_wait_q, &wait);
c6cf9e99 794
ba6a1308 795 spin_unlock_irqrestore(ap->lock, flags);
c6cf9e99
TH
796
797 /* make sure SCSI EH is complete */
cca3974e 798 if (scsi_host_in_recovery(ap->scsi_host)) {
97750ceb 799 ata_msleep(ap, 10);
c6cf9e99
TH
800 goto retry;
801 }
802}
81c757bc 803EXPORT_SYMBOL_GPL(ata_port_wait_eh);
c6cf9e99 804
afae461a 805static unsigned int ata_eh_nr_in_flight(struct ata_port *ap)
5ddf24c5 806{
258c4e5c 807 struct ata_queued_cmd *qc;
5ddf24c5 808 unsigned int tag;
afae461a 809 unsigned int nr = 0;
5ddf24c5
TH
810
811 /* count only non-internal commands */
258c4e5c
JA
812 ata_qc_for_each(ap, qc, tag) {
813 if (qc)
5ddf24c5 814 nr++;
9d207acc 815 }
5ddf24c5
TH
816
817 return nr;
818}
819
b93ab338 820void ata_eh_fastdrain_timerfn(struct timer_list *t)
5ddf24c5 821{
b93ab338 822 struct ata_port *ap = from_timer(ap, t, fastdrain_timer);
5ddf24c5 823 unsigned long flags;
afae461a 824 unsigned int cnt;
5ddf24c5
TH
825
826 spin_lock_irqsave(ap->lock, flags);
827
828 cnt = ata_eh_nr_in_flight(ap);
829
830 /* are we done? */
831 if (!cnt)
832 goto out_unlock;
833
834 if (cnt == ap->fastdrain_cnt) {
258c4e5c 835 struct ata_queued_cmd *qc;
5ddf24c5
TH
836 unsigned int tag;
837
838 /* No progress during the last interval, tag all
839 * in-flight qcs as timed out and freeze the port.
840 */
258c4e5c 841 ata_qc_for_each(ap, qc, tag) {
5ddf24c5
TH
842 if (qc)
843 qc->err_mask |= AC_ERR_TIMEOUT;
844 }
845
846 ata_port_freeze(ap);
847 } else {
848 /* some qcs have finished, give it another chance */
849 ap->fastdrain_cnt = cnt;
850 ap->fastdrain_timer.expires =
341c2c95 851 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
5ddf24c5
TH
852 add_timer(&ap->fastdrain_timer);
853 }
854
855 out_unlock:
856 spin_unlock_irqrestore(ap->lock, flags);
857}
858
859/**
860 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
861 * @ap: target ATA port
862 * @fastdrain: activate fast drain
863 *
864 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
865 * is non-zero and EH wasn't pending before. Fast drain ensures
866 * that EH kicks in in timely manner.
867 *
868 * LOCKING:
869 * spin_lock_irqsave(host lock)
870 */
871static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
872{
afae461a 873 unsigned int cnt;
5ddf24c5
TH
874
875 /* already scheduled? */
876 if (ap->pflags & ATA_PFLAG_EH_PENDING)
877 return;
878
879 ap->pflags |= ATA_PFLAG_EH_PENDING;
880
881 if (!fastdrain)
882 return;
883
884 /* do we have in-flight qcs? */
885 cnt = ata_eh_nr_in_flight(ap);
886 if (!cnt)
887 return;
888
889 /* activate fast drain */
890 ap->fastdrain_cnt = cnt;
341c2c95
TH
891 ap->fastdrain_timer.expires =
892 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
5ddf24c5
TH
893 add_timer(&ap->fastdrain_timer);
894}
895
f686bcb8
TH
896/**
897 * ata_qc_schedule_eh - schedule qc for error handling
898 * @qc: command to schedule error handling for
899 *
900 * Schedule error handling for @qc. EH will kick in as soon as
901 * other commands are drained.
902 *
903 * LOCKING:
cca3974e 904 * spin_lock_irqsave(host lock)
f686bcb8
TH
905 */
906void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
907{
908 struct ata_port *ap = qc->ap;
909
910 WARN_ON(!ap->ops->error_handler);
911
912 qc->flags |= ATA_QCFLAG_FAILED;
5ddf24c5 913 ata_eh_set_pending(ap, 1);
f686bcb8
TH
914
915 /* The following will fail if timeout has already expired.
916 * ata_scsi_error() takes care of such scmds on EH entry.
917 * Note that ATA_QCFLAG_FAILED is unconditionally set after
918 * this function completes.
919 */
c8329cd5 920 blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
f686bcb8
TH
921}
922
7b70fc03 923/**
e4a9c373
DW
924 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
925 * @ap: ATA port to schedule EH for
7b70fc03 926 *
e4a9c373 927 * LOCKING: inherited from ata_port_schedule_eh
cca3974e 928 * spin_lock_irqsave(host lock)
7b70fc03 929 */
e4a9c373 930void ata_std_sched_eh(struct ata_port *ap)
7b70fc03
TH
931{
932 WARN_ON(!ap->ops->error_handler);
933
f4d6d004
TH
934 if (ap->pflags & ATA_PFLAG_INITIALIZING)
935 return;
936
5ddf24c5 937 ata_eh_set_pending(ap, 1);
cca3974e 938 scsi_schedule_eh(ap->scsi_host);
7b70fc03 939
c318458c 940 trace_ata_std_sched_eh(ap);
7b70fc03 941}
e4a9c373
DW
942EXPORT_SYMBOL_GPL(ata_std_sched_eh);
943
944/**
945 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
946 * @ap: ATA port to end EH for
947 *
948 * In the libata object model there is a 1:1 mapping of ata_port to
949 * shost, so host fields can be directly manipulated under ap->lock, in
950 * the libsas case we need to hold a lock at the ha->level to coordinate
951 * these events.
952 *
953 * LOCKING:
954 * spin_lock_irqsave(host lock)
955 */
956void ata_std_end_eh(struct ata_port *ap)
957{
958 struct Scsi_Host *host = ap->scsi_host;
959
960 host->host_eh_scheduled = 0;
961}
962EXPORT_SYMBOL(ata_std_end_eh);
963
964
965/**
966 * ata_port_schedule_eh - schedule error handling without a qc
967 * @ap: ATA port to schedule EH for
968 *
969 * Schedule error handling for @ap. EH will kick in as soon as
970 * all commands are drained.
971 *
972 * LOCKING:
973 * spin_lock_irqsave(host lock)
974 */
975void ata_port_schedule_eh(struct ata_port *ap)
976{
977 /* see: ata_std_sched_eh, unless you know better */
978 ap->ops->sched_eh(ap);
979}
a52fbcfc 980EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7b70fc03 981
dbd82616 982static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
7b70fc03 983{
258c4e5c 984 struct ata_queued_cmd *qc;
7b70fc03
TH
985 int tag, nr_aborted = 0;
986
987 WARN_ON(!ap->ops->error_handler);
988
5ddf24c5
TH
989 /* we're gonna abort all commands, no need for fast drain */
990 ata_eh_set_pending(ap, 0);
991
28361c40 992 /* include internal tag in iteration */
258c4e5c 993 ata_qc_for_each_with_internal(ap, qc, tag) {
dbd82616 994 if (qc && (!link || qc->dev->link == link)) {
7b70fc03
TH
995 qc->flags |= ATA_QCFLAG_FAILED;
996 ata_qc_complete(qc);
997 nr_aborted++;
998 }
999 }
1000
1001 if (!nr_aborted)
1002 ata_port_schedule_eh(ap);
1003
1004 return nr_aborted;
1005}
1006
dbd82616
TH
1007/**
1008 * ata_link_abort - abort all qc's on the link
1009 * @link: ATA link to abort qc's for
1010 *
1011 * Abort all active qc's active on @link and schedule EH.
1012 *
1013 * LOCKING:
1014 * spin_lock_irqsave(host lock)
1015 *
1016 * RETURNS:
1017 * Number of aborted qc's.
1018 */
1019int ata_link_abort(struct ata_link *link)
1020{
1021 return ata_do_link_abort(link->ap, link);
1022}
a52fbcfc 1023EXPORT_SYMBOL_GPL(ata_link_abort);
dbd82616
TH
1024
1025/**
1026 * ata_port_abort - abort all qc's on the port
1027 * @ap: ATA port to abort qc's for
1028 *
1029 * Abort all active qc's of @ap and schedule EH.
1030 *
1031 * LOCKING:
1032 * spin_lock_irqsave(host_set lock)
1033 *
1034 * RETURNS:
1035 * Number of aborted qc's.
1036 */
1037int ata_port_abort(struct ata_port *ap)
1038{
1039 return ata_do_link_abort(ap, NULL);
1040}
a52fbcfc 1041EXPORT_SYMBOL_GPL(ata_port_abort);
dbd82616 1042
e3180499
TH
1043/**
1044 * __ata_port_freeze - freeze port
1045 * @ap: ATA port to freeze
1046 *
1047 * This function is called when HSM violation or some other
1048 * condition disrupts normal operation of the port. Frozen port
1049 * is not allowed to perform any operation until the port is
1050 * thawed, which usually follows a successful reset.
1051 *
1052 * ap->ops->freeze() callback can be used for freezing the port
1053 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1054 * port cannot be frozen hardware-wise, the interrupt handler
1055 * must ack and clear interrupts unconditionally while the port
1056 * is frozen.
1057 *
1058 * LOCKING:
cca3974e 1059 * spin_lock_irqsave(host lock)
e3180499
TH
1060 */
1061static void __ata_port_freeze(struct ata_port *ap)
1062{
1063 WARN_ON(!ap->ops->error_handler);
1064
1065 if (ap->ops->freeze)
1066 ap->ops->freeze(ap);
1067
b51e9e5d 1068 ap->pflags |= ATA_PFLAG_FROZEN;
e3180499 1069
c318458c 1070 trace_ata_port_freeze(ap);
e3180499
TH
1071}
1072
1073/**
1074 * ata_port_freeze - abort & freeze port
1075 * @ap: ATA port to freeze
1076 *
54c38444
JG
1077 * Abort and freeze @ap. The freeze operation must be called
1078 * first, because some hardware requires special operations
1079 * before the taskfile registers are accessible.
e3180499
TH
1080 *
1081 * LOCKING:
cca3974e 1082 * spin_lock_irqsave(host lock)
e3180499
TH
1083 *
1084 * RETURNS:
1085 * Number of aborted commands.
1086 */
1087int ata_port_freeze(struct ata_port *ap)
1088{
e3180499
TH
1089 WARN_ON(!ap->ops->error_handler);
1090
e3180499
TH
1091 __ata_port_freeze(ap);
1092
cb6e73aa 1093 return ata_port_abort(ap);
e3180499 1094}
a52fbcfc 1095EXPORT_SYMBOL_GPL(ata_port_freeze);
e3180499
TH
1096
1097/**
1098 * ata_eh_freeze_port - EH helper to freeze port
1099 * @ap: ATA port to freeze
1100 *
1101 * Freeze @ap.
1102 *
1103 * LOCKING:
1104 * None.
1105 */
1106void ata_eh_freeze_port(struct ata_port *ap)
1107{
1108 unsigned long flags;
1109
1110 if (!ap->ops->error_handler)
1111 return;
1112
ba6a1308 1113 spin_lock_irqsave(ap->lock, flags);
e3180499 1114 __ata_port_freeze(ap);
ba6a1308 1115 spin_unlock_irqrestore(ap->lock, flags);
e3180499 1116}
a52fbcfc 1117EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
e3180499
TH
1118
1119/**
94bd5719 1120 * ata_eh_thaw_port - EH helper to thaw port
e3180499
TH
1121 * @ap: ATA port to thaw
1122 *
1123 * Thaw frozen port @ap.
1124 *
1125 * LOCKING:
1126 * None.
1127 */
1128void ata_eh_thaw_port(struct ata_port *ap)
1129{
1130 unsigned long flags;
1131
1132 if (!ap->ops->error_handler)
1133 return;
1134
ba6a1308 1135 spin_lock_irqsave(ap->lock, flags);
e3180499 1136
b51e9e5d 1137 ap->pflags &= ~ATA_PFLAG_FROZEN;
e3180499
TH
1138
1139 if (ap->ops->thaw)
1140 ap->ops->thaw(ap);
1141
ba6a1308 1142 spin_unlock_irqrestore(ap->lock, flags);
e3180499 1143
c318458c 1144 trace_ata_port_thaw(ap);
e3180499
TH
1145}
1146
ece1d636
TH
1147static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1148{
1149 /* nada */
1150}
1151
1152static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1153{
1154 struct ata_port *ap = qc->ap;
1155 struct scsi_cmnd *scmd = qc->scsicmd;
1156 unsigned long flags;
1157
ba6a1308 1158 spin_lock_irqsave(ap->lock, flags);
ece1d636
TH
1159 qc->scsidone = ata_eh_scsidone;
1160 __ata_qc_complete(qc);
1161 WARN_ON(ata_tag_valid(qc->tag));
ba6a1308 1162 spin_unlock_irqrestore(ap->lock, flags);
ece1d636
TH
1163
1164 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1165}
1166
1167/**
1168 * ata_eh_qc_complete - Complete an active ATA command from EH
1169 * @qc: Command to complete
1170 *
1171 * Indicate to the mid and upper layers that an ATA command has
1172 * completed. To be used from EH.
1173 */
1174void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1175{
1176 struct scsi_cmnd *scmd = qc->scsicmd;
1177 scmd->retries = scmd->allowed;
1178 __ata_eh_qc_complete(qc);
1179}
1180
1181/**
1182 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1183 * @qc: Command to retry
1184 *
1185 * Indicate to the mid and upper layers that an ATA command
1186 * should be retried. To be used from EH.
1187 *
1188 * SCSI midlayer limits the number of retries to scmd->allowed.
f13e2201 1189 * scmd->allowed is incremented for commands which get retried
ece1d636
TH
1190 * due to unrelated failures (qc->err_mask is zero).
1191 */
1192void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1193{
1194 struct scsi_cmnd *scmd = qc->scsicmd;
f13e2201
GG
1195 if (!qc->err_mask)
1196 scmd->allowed++;
ece1d636
TH
1197 __ata_eh_qc_complete(qc);
1198}
022bdb07 1199
678afac6
TH
1200/**
1201 * ata_dev_disable - disable ATA device
1202 * @dev: ATA device to disable
1203 *
1204 * Disable @dev.
1205 *
1206 * Locking:
1207 * EH context.
1208 */
1209void ata_dev_disable(struct ata_device *dev)
1210{
1211 if (!ata_dev_enabled(dev))
1212 return;
1213
1c95a27c 1214 ata_dev_warn(dev, "disable device\n");
678afac6
TH
1215 ata_acpi_on_disable(dev);
1216 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1217 dev->class++;
99cf610a
TH
1218
1219 /* From now till the next successful probe, ering is used to
1220 * track probe failures. Clear accumulated device error info.
1221 */
1222 ata_ering_clear(&dev->ering);
678afac6 1223}
a52fbcfc 1224EXPORT_SYMBOL_GPL(ata_dev_disable);
678afac6 1225
0ea035a3
TH
1226/**
1227 * ata_eh_detach_dev - detach ATA device
1228 * @dev: ATA device to detach
1229 *
1230 * Detach @dev.
1231 *
1232 * LOCKING:
1233 * None.
1234 */
fb7fd614 1235void ata_eh_detach_dev(struct ata_device *dev)
0ea035a3 1236{
f58229f8
TH
1237 struct ata_link *link = dev->link;
1238 struct ata_port *ap = link->ap;
90484ebf 1239 struct ata_eh_context *ehc = &link->eh_context;
0ea035a3
TH
1240 unsigned long flags;
1241
1242 ata_dev_disable(dev);
1243
ba6a1308 1244 spin_lock_irqsave(ap->lock, flags);
0ea035a3
TH
1245
1246 dev->flags &= ~ATA_DFLAG_DETACH;
1247
1248 if (ata_scsi_offline_dev(dev)) {
1249 dev->flags |= ATA_DFLAG_DETACHED;
b51e9e5d 1250 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
0ea035a3
TH
1251 }
1252
90484ebf 1253 /* clear per-dev EH info */
f58229f8
TH
1254 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1255 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
90484ebf
TH
1256 ehc->saved_xfer_mode[dev->devno] = 0;
1257 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
beb07c1a 1258
ba6a1308 1259 spin_unlock_irqrestore(ap->lock, flags);
0ea035a3
TH
1260}
1261
022bdb07
TH
1262/**
1263 * ata_eh_about_to_do - about to perform eh_action
955e57df 1264 * @link: target ATA link
47005f25 1265 * @dev: target ATA dev for per-dev action (can be NULL)
022bdb07
TH
1266 * @action: action about to be performed
1267 *
1268 * Called just before performing EH actions to clear related bits
955e57df
TH
1269 * in @link->eh_info such that eh actions are not unnecessarily
1270 * repeated.
022bdb07
TH
1271 *
1272 * LOCKING:
1273 * None.
1274 */
fb7fd614
TH
1275void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1276 unsigned int action)
022bdb07 1277{
955e57df
TH
1278 struct ata_port *ap = link->ap;
1279 struct ata_eh_info *ehi = &link->eh_info;
1280 struct ata_eh_context *ehc = &link->eh_context;
022bdb07
TH
1281 unsigned long flags;
1282
c318458c
HR
1283 trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action);
1284
ba6a1308 1285 spin_lock_irqsave(ap->lock, flags);
1cdaf534 1286
955e57df 1287 ata_eh_clear_action(link, dev, ehi, action);
1cdaf534 1288
a568d1d2
TH
1289 /* About to take EH action, set RECOVERED. Ignore actions on
1290 * slave links as master will do them again.
1291 */
1292 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1cdaf534
TH
1293 ap->pflags |= ATA_PFLAG_RECOVERED;
1294
ba6a1308 1295 spin_unlock_irqrestore(ap->lock, flags);
022bdb07
TH
1296}
1297
47005f25
TH
1298/**
1299 * ata_eh_done - EH action complete
2f60e1ab 1300 * @link: ATA link for which EH actions are complete
47005f25
TH
1301 * @dev: target ATA dev for per-dev action (can be NULL)
1302 * @action: action just completed
1303 *
1304 * Called right after performing EH actions to clear related bits
955e57df 1305 * in @link->eh_context.
47005f25
TH
1306 *
1307 * LOCKING:
1308 * None.
1309 */
fb7fd614
TH
1310void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1311 unsigned int action)
47005f25 1312{
955e57df 1313 struct ata_eh_context *ehc = &link->eh_context;
9af5c9c9 1314
c318458c
HR
1315 trace_ata_eh_done(link, dev ? dev->devno : 0, action);
1316
955e57df 1317 ata_eh_clear_action(link, dev, &ehc->i, action);
47005f25
TH
1318}
1319
022bdb07
TH
1320/**
1321 * ata_err_string - convert err_mask to descriptive string
1322 * @err_mask: error mask to convert to string
1323 *
1324 * Convert @err_mask to descriptive string. Errors are
1325 * prioritized according to severity and only the most severe
1326 * error is reported.
1327 *
1328 * LOCKING:
1329 * None.
1330 *
1331 * RETURNS:
1332 * Descriptive string for @err_mask
1333 */
2dcb407e 1334static const char *ata_err_string(unsigned int err_mask)
022bdb07
TH
1335{
1336 if (err_mask & AC_ERR_HOST_BUS)
1337 return "host bus error";
1338 if (err_mask & AC_ERR_ATA_BUS)
1339 return "ATA bus error";
1340 if (err_mask & AC_ERR_TIMEOUT)
1341 return "timeout";
1342 if (err_mask & AC_ERR_HSM)
1343 return "HSM violation";
1344 if (err_mask & AC_ERR_SYSTEM)
1345 return "internal error";
1346 if (err_mask & AC_ERR_MEDIA)
1347 return "media error";
1348 if (err_mask & AC_ERR_INVALID)
1349 return "invalid argument";
1350 if (err_mask & AC_ERR_DEV)
1351 return "device error";
54fb131b
DLM
1352 if (err_mask & AC_ERR_NCQ)
1353 return "NCQ error";
1354 if (err_mask & AC_ERR_NODEV_HINT)
1355 return "Polling detection error";
022bdb07
TH
1356 return "unknown error";
1357}
1358
11fc33da
TH
1359/**
1360 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1361 * @dev: target ATAPI device
1362 * @r_sense_key: out parameter for sense_key
1363 *
1364 * Perform ATAPI TEST_UNIT_READY.
1365 *
1366 * LOCKING:
1367 * EH context (may sleep).
1368 *
1369 * RETURNS:
1370 * 0 on success, AC_ERR_* mask on failure.
1371 */
3dc67440 1372unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
11fc33da
TH
1373{
1374 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1375 struct ata_taskfile tf;
1376 unsigned int err_mask;
1377
1378 ata_tf_init(dev, &tf);
1379
1380 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1381 tf.command = ATA_CMD_PACKET;
1382 tf.protocol = ATAPI_PROT_NODATA;
1383
1384 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1385 if (err_mask == AC_ERR_DEV)
efcef265 1386 *r_sense_key = tf.error >> 4;
11fc33da
TH
1387 return err_mask;
1388}
1389
e87fd28c
HR
1390/**
1391 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
2f60e1ab 1392 * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
e87fd28c
HR
1393 *
1394 * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1395 * SENSE. This function is an EH helper.
1396 *
1397 * LOCKING:
1398 * Kernel thread context (may sleep).
1399 */
b46c760e 1400static void ata_eh_request_sense(struct ata_queued_cmd *qc)
e87fd28c 1401{
b46c760e 1402 struct scsi_cmnd *cmd = qc->scsicmd;
e87fd28c
HR
1403 struct ata_device *dev = qc->dev;
1404 struct ata_taskfile tf;
1405 unsigned int err_mask;
1406
1407 if (qc->ap->pflags & ATA_PFLAG_FROZEN) {
1408 ata_dev_warn(dev, "sense data available but port frozen\n");
1409 return;
1410 }
1411
d238ffd5 1412 if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID)
e87fd28c
HR
1413 return;
1414
1415 if (!ata_id_sense_reporting_enabled(dev->id)) {
1416 ata_dev_warn(qc->dev, "sense data reporting disabled\n");
1417 return;
1418 }
1419
e87fd28c
HR
1420 ata_tf_init(dev, &tf);
1421 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1422 tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1423 tf.command = ATA_CMD_REQ_SENSE_DATA;
1424 tf.protocol = ATA_PROT_NODATA;
1425
1426 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1427 /* Ignore err_mask; ATA_ERR might be set */
efcef265 1428 if (tf.status & ATA_SENSE) {
06dbde5f 1429 ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal);
e87fd28c
HR
1430 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1431 } else {
1432 ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
efcef265 1433 tf.status, err_mask);
e87fd28c
HR
1434 }
1435}
1436
022bdb07
TH
1437/**
1438 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1439 * @dev: device to perform REQUEST_SENSE to
1440 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
3eabddb8 1441 * @dfl_sense_key: default sense key to use
022bdb07
TH
1442 *
1443 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1444 * SENSE. This function is EH helper.
1445 *
1446 * LOCKING:
1447 * Kernel thread context (may sleep).
1448 *
1449 * RETURNS:
1450 * 0 on success, AC_ERR_* mask on failure
1451 */
3dc67440 1452unsigned int atapi_eh_request_sense(struct ata_device *dev,
3eabddb8 1453 u8 *sense_buf, u8 dfl_sense_key)
022bdb07 1454{
3eabddb8
TH
1455 u8 cdb[ATAPI_CDB_LEN] =
1456 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
9af5c9c9 1457 struct ata_port *ap = dev->link->ap;
022bdb07 1458 struct ata_taskfile tf;
022bdb07 1459
022bdb07
TH
1460 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1461
56287768
AL
1462 /* initialize sense_buf with the error register,
1463 * for the case where they are -not- overwritten
1464 */
022bdb07 1465 sense_buf[0] = 0x70;
3eabddb8 1466 sense_buf[2] = dfl_sense_key;
56287768 1467
a617c09f 1468 /* some devices time out if garbage left in tf */
56287768 1469 ata_tf_init(dev, &tf);
022bdb07 1470
022bdb07
TH
1471 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1472 tf.command = ATA_CMD_PACKET;
1473
1474 /* is it pointless to prefer PIO for "safety reasons"? */
1475 if (ap->flags & ATA_FLAG_PIO_DMA) {
0dc36888 1476 tf.protocol = ATAPI_PROT_DMA;
022bdb07
TH
1477 tf.feature |= ATAPI_PKT_DMA;
1478 } else {
0dc36888 1479 tf.protocol = ATAPI_PROT_PIO;
f2dfc1a1
TH
1480 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1481 tf.lbah = 0;
022bdb07
TH
1482 }
1483
1484 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
2b789108 1485 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
022bdb07
TH
1486}
1487
1488/**
1489 * ata_eh_analyze_serror - analyze SError for a failed port
0260731f 1490 * @link: ATA link to analyze SError for
022bdb07
TH
1491 *
1492 * Analyze SError if available and further determine cause of
1493 * failure.
1494 *
1495 * LOCKING:
1496 * None.
1497 */
0260731f 1498static void ata_eh_analyze_serror(struct ata_link *link)
022bdb07 1499{
0260731f 1500 struct ata_eh_context *ehc = &link->eh_context;
022bdb07
TH
1501 u32 serror = ehc->i.serror;
1502 unsigned int err_mask = 0, action = 0;
f9df58cb 1503 u32 hotplug_mask;
022bdb07 1504
e0614db2 1505 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
022bdb07 1506 err_mask |= AC_ERR_ATA_BUS;
cf480626 1507 action |= ATA_EH_RESET;
022bdb07
TH
1508 }
1509 if (serror & SERR_PROTOCOL) {
1510 err_mask |= AC_ERR_HSM;
cf480626 1511 action |= ATA_EH_RESET;
022bdb07
TH
1512 }
1513 if (serror & SERR_INTERNAL) {
1514 err_mask |= AC_ERR_SYSTEM;
cf480626 1515 action |= ATA_EH_RESET;
022bdb07 1516 }
f9df58cb
TH
1517
1518 /* Determine whether a hotplug event has occurred. Both
1519 * SError.N/X are considered hotplug events for enabled or
1520 * host links. For disabled PMP links, only N bit is
1521 * considered as X bit is left at 1 for link plugging.
1522 */
eb0e85e3 1523 if (link->lpm_policy > ATA_LPM_MAX_POWER)
6b7ae954
TH
1524 hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
1525 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
f9df58cb
TH
1526 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1527 else
1528 hotplug_mask = SERR_PHYRDY_CHG;
1529
1530 if (serror & hotplug_mask)
084fe639 1531 ata_ehi_hotplugged(&ehc->i);
022bdb07
TH
1532
1533 ehc->i.err_mask |= err_mask;
1534 ehc->i.action |= action;
1535}
1536
1537/**
1538 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1539 * @qc: qc to analyze
1540 * @tf: Taskfile registers to analyze
1541 *
1542 * Analyze taskfile of @qc and further determine cause of
1543 * failure. This function also requests ATAPI sense data if
25985edc 1544 * available.
022bdb07
TH
1545 *
1546 * LOCKING:
1547 * Kernel thread context (may sleep).
1548 *
1549 * RETURNS:
1550 * Determined recovery action
1551 */
1552static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1553 const struct ata_taskfile *tf)
1554{
1555 unsigned int tmp, action = 0;
efcef265 1556 u8 stat = tf->status, err = tf->error;
022bdb07
TH
1557
1558 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1559 qc->err_mask |= AC_ERR_HSM;
cf480626 1560 return ATA_EH_RESET;
022bdb07
TH
1561 }
1562
e87fd28c 1563 if (stat & (ATA_ERR | ATA_DF)) {
a51d644a 1564 qc->err_mask |= AC_ERR_DEV;
e87fd28c
HR
1565 /*
1566 * Sense data reporting does not work if the
1567 * device fault bit is set.
1568 */
1569 if (stat & ATA_DF)
1570 stat &= ~ATA_SENSE;
1571 } else {
022bdb07 1572 return 0;
e87fd28c 1573 }
022bdb07
TH
1574
1575 switch (qc->dev->class) {
9162c657 1576 case ATA_DEV_ZAC:
e87fd28c 1577 if (stat & ATA_SENSE)
b46c760e 1578 ata_eh_request_sense(qc);
df561f66 1579 fallthrough;
ca156e00 1580 case ATA_DEV_ATA:
022bdb07
TH
1581 if (err & ATA_ICRC)
1582 qc->err_mask |= AC_ERR_ATA_BUS;
eec7e1c1 1583 if (err & (ATA_UNC | ATA_AMNF))
022bdb07
TH
1584 qc->err_mask |= AC_ERR_MEDIA;
1585 if (err & ATA_IDNF)
1586 qc->err_mask |= AC_ERR_INVALID;
1587 break;
1588
1589 case ATA_DEV_ATAPI:
a569a30d 1590 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
3eabddb8
TH
1591 tmp = atapi_eh_request_sense(qc->dev,
1592 qc->scsicmd->sense_buffer,
efcef265 1593 qc->result_tf.error >> 4);
3852e373 1594 if (!tmp)
a569a30d 1595 qc->flags |= ATA_QCFLAG_SENSE_VALID;
3852e373 1596 else
a569a30d
TH
1597 qc->err_mask |= tmp;
1598 }
022bdb07
TH
1599 }
1600
3852e373 1601 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
b8e162f9 1602 enum scsi_disposition ret = scsi_check_sense(qc->scsicmd);
3852e373 1603 /*
79487259 1604 * SUCCESS here means that the sense code could be
3852e373
HR
1605 * evaluated and should be passed to the upper layers
1606 * for correct evaluation.
79487259 1607 * FAILED means the sense code could not be interpreted
3852e373
HR
1608 * and the device would need to be reset.
1609 * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
1610 * command would need to be retried.
1611 */
1612 if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) {
1613 qc->flags |= ATA_QCFLAG_RETRY;
1614 qc->err_mask |= AC_ERR_OTHER;
1615 } else if (ret != SUCCESS) {
1616 qc->err_mask |= AC_ERR_HSM;
1617 }
1618 }
022bdb07 1619 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
cf480626 1620 action |= ATA_EH_RESET;
022bdb07
TH
1621
1622 return action;
1623}
1624
76326ac1
TH
1625static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1626 int *xfer_ok)
022bdb07 1627{
76326ac1
TH
1628 int base = 0;
1629
1630 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1631 *xfer_ok = 1;
1632
1633 if (!*xfer_ok)
75f9cafc 1634 base = ATA_ECAT_DUBIOUS_NONE;
76326ac1 1635
7d47e8d4 1636 if (err_mask & AC_ERR_ATA_BUS)
76326ac1 1637 return base + ATA_ECAT_ATA_BUS;
022bdb07 1638
7d47e8d4 1639 if (err_mask & AC_ERR_TIMEOUT)
76326ac1 1640 return base + ATA_ECAT_TOUT_HSM;
7d47e8d4 1641
3884f7b0 1642 if (eflags & ATA_EFLAG_IS_IO) {
7d47e8d4 1643 if (err_mask & AC_ERR_HSM)
76326ac1 1644 return base + ATA_ECAT_TOUT_HSM;
7d47e8d4
TH
1645 if ((err_mask &
1646 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
76326ac1 1647 return base + ATA_ECAT_UNK_DEV;
022bdb07
TH
1648 }
1649
1650 return 0;
1651}
1652
7d47e8d4 1653struct speed_down_verdict_arg {
022bdb07 1654 u64 since;
76326ac1 1655 int xfer_ok;
3884f7b0 1656 int nr_errors[ATA_ECAT_NR];
022bdb07
TH
1657};
1658
7d47e8d4 1659static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
022bdb07 1660{
7d47e8d4 1661 struct speed_down_verdict_arg *arg = void_arg;
76326ac1 1662 int cat;
022bdb07 1663
d9027470 1664 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
022bdb07
TH
1665 return -1;
1666
76326ac1
TH
1667 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1668 &arg->xfer_ok);
7d47e8d4 1669 arg->nr_errors[cat]++;
76326ac1 1670
022bdb07
TH
1671 return 0;
1672}
1673
1674/**
7d47e8d4 1675 * ata_eh_speed_down_verdict - Determine speed down verdict
022bdb07
TH
1676 * @dev: Device of interest
1677 *
1678 * This function examines error ring of @dev and determines
7d47e8d4
TH
1679 * whether NCQ needs to be turned off, transfer speed should be
1680 * stepped down, or falling back to PIO is necessary.
022bdb07 1681 *
3884f7b0
TH
1682 * ECAT_ATA_BUS : ATA_BUS error for any command
1683 *
1684 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1685 * IO commands
1686 *
1687 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1688 *
76326ac1
TH
1689 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1690 * data transfer hasn't been verified.
1691 *
3884f7b0
TH
1692 * Verdicts are
1693 *
1694 * NCQ_OFF : Turn off NCQ.
022bdb07 1695 *
3884f7b0
TH
1696 * SPEED_DOWN : Speed down transfer speed but don't fall back
1697 * to PIO.
7d47e8d4 1698 *
3884f7b0 1699 * FALLBACK_TO_PIO : Fall back to PIO.
022bdb07 1700 *
3884f7b0 1701 * Even if multiple verdicts are returned, only one action is
76326ac1
TH
1702 * taken per error. An action triggered by non-DUBIOUS errors
1703 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1704 * This is to expedite speed down decisions right after device is
1705 * initially configured.
1706 *
4091fb95 1707 * The following are speed down rules. #1 and #2 deal with
76326ac1 1708 * DUBIOUS errors.
7d47e8d4 1709 *
76326ac1
TH
1710 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1711 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1712 *
1713 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1714 * occurred during last 5 mins, NCQ_OFF.
1715 *
1716 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
25985edc 1717 * occurred during last 5 mins, FALLBACK_TO_PIO
7d47e8d4 1718 *
76326ac1 1719 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
3884f7b0
TH
1720 * during last 10 mins, NCQ_OFF.
1721 *
76326ac1 1722 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
3884f7b0 1723 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
7d47e8d4 1724 *
022bdb07
TH
1725 * LOCKING:
1726 * Inherited from caller.
1727 *
1728 * RETURNS:
7d47e8d4 1729 * OR of ATA_EH_SPDN_* flags.
022bdb07 1730 */
7d47e8d4 1731static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
022bdb07 1732{
7d47e8d4
TH
1733 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1734 u64 j64 = get_jiffies_64();
1735 struct speed_down_verdict_arg arg;
1736 unsigned int verdict = 0;
022bdb07 1737
3884f7b0 1738 /* scan past 5 mins of error history */
7d47e8d4 1739 memset(&arg, 0, sizeof(arg));
3884f7b0 1740 arg.since = j64 - min(j64, j5mins);
7d47e8d4 1741 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
022bdb07 1742
76326ac1
TH
1743 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1744 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1745 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1746 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1747
1748 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1749 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1750 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1751
3884f7b0
TH
1752 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1753 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
663f99b8 1754 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
3884f7b0 1755 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
022bdb07 1756
3884f7b0 1757 /* scan past 10 mins of error history */
022bdb07 1758 memset(&arg, 0, sizeof(arg));
3884f7b0 1759 arg.since = j64 - min(j64, j10mins);
7d47e8d4 1760 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
022bdb07 1761
3884f7b0
TH
1762 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1763 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1764 verdict |= ATA_EH_SPDN_NCQ_OFF;
1765
1766 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1767 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
663f99b8 1768 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
3884f7b0 1769 verdict |= ATA_EH_SPDN_SPEED_DOWN;
022bdb07 1770
7d47e8d4 1771 return verdict;
022bdb07
TH
1772}
1773
1774/**
1775 * ata_eh_speed_down - record error and speed down if necessary
1776 * @dev: Failed device
3884f7b0 1777 * @eflags: mask of ATA_EFLAG_* flags
022bdb07
TH
1778 * @err_mask: err_mask of the error
1779 *
1780 * Record error and examine error history to determine whether
1781 * adjusting transmission speed is necessary. It also sets
1782 * transmission limits appropriately if such adjustment is
1783 * necessary.
1784 *
1785 * LOCKING:
1786 * Kernel thread context (may sleep).
1787 *
1788 * RETURNS:
7d47e8d4 1789 * Determined recovery action.
022bdb07 1790 */
3884f7b0
TH
1791static unsigned int ata_eh_speed_down(struct ata_device *dev,
1792 unsigned int eflags, unsigned int err_mask)
022bdb07 1793{
b1c72916 1794 struct ata_link *link = ata_dev_phys_link(dev);
76326ac1 1795 int xfer_ok = 0;
7d47e8d4
TH
1796 unsigned int verdict;
1797 unsigned int action = 0;
1798
1799 /* don't bother if Cat-0 error */
76326ac1 1800 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
022bdb07
TH
1801 return 0;
1802
1803 /* record error and determine whether speed down is necessary */
3884f7b0 1804 ata_ering_record(&dev->ering, eflags, err_mask);
7d47e8d4 1805 verdict = ata_eh_speed_down_verdict(dev);
022bdb07 1806
7d47e8d4
TH
1807 /* turn off NCQ? */
1808 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1809 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1810 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1811 dev->flags |= ATA_DFLAG_NCQ_OFF;
a9a79dfe 1812 ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
7d47e8d4
TH
1813 goto done;
1814 }
022bdb07 1815
7d47e8d4
TH
1816 /* speed down? */
1817 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1818 /* speed down SATA link speed if possible */
a07d499b 1819 if (sata_down_spd_limit(link, 0) == 0) {
cf480626 1820 action |= ATA_EH_RESET;
7d47e8d4
TH
1821 goto done;
1822 }
022bdb07 1823
7d47e8d4
TH
1824 /* lower transfer mode */
1825 if (dev->spdn_cnt < 2) {
1826 static const int dma_dnxfer_sel[] =
1827 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1828 static const int pio_dnxfer_sel[] =
1829 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1830 int sel;
1831
1832 if (dev->xfer_shift != ATA_SHIFT_PIO)
1833 sel = dma_dnxfer_sel[dev->spdn_cnt];
1834 else
1835 sel = pio_dnxfer_sel[dev->spdn_cnt];
1836
1837 dev->spdn_cnt++;
1838
1839 if (ata_down_xfermask_limit(dev, sel) == 0) {
cf480626 1840 action |= ATA_EH_RESET;
7d47e8d4
TH
1841 goto done;
1842 }
1843 }
1844 }
1845
1846 /* Fall back to PIO? Slowing down to PIO is meaningless for
663f99b8 1847 * SATA ATA devices. Consider it only for PATA and SATAPI.
7d47e8d4
TH
1848 */
1849 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
663f99b8 1850 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
7d47e8d4
TH
1851 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1852 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1853 dev->spdn_cnt = 0;
cf480626 1854 action |= ATA_EH_RESET;
7d47e8d4
TH
1855 goto done;
1856 }
1857 }
022bdb07 1858
022bdb07 1859 return 0;
7d47e8d4
TH
1860 done:
1861 /* device has been slowed down, blow error history */
76326ac1
TH
1862 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1863 ata_ering_clear(&dev->ering);
7d47e8d4 1864 return action;
022bdb07
TH
1865}
1866
8d899e70
ML
1867/**
1868 * ata_eh_worth_retry - analyze error and decide whether to retry
1869 * @qc: qc to possibly retry
1870 *
1871 * Look at the cause of the error and decide if a retry
1872 * might be useful or not. We don't want to retry media errors
1873 * because the drive itself has probably already taken 10-30 seconds
1874 * doing its own internal retries before reporting the failure.
1875 */
1876static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
1877{
1eaca39a 1878 if (qc->err_mask & AC_ERR_MEDIA)
8d899e70
ML
1879 return 0; /* don't retry media errors */
1880 if (qc->flags & ATA_QCFLAG_IO)
1881 return 1; /* otherwise retry anything from fs stack */
1882 if (qc->err_mask & AC_ERR_INVALID)
1883 return 0; /* don't retry these */
1884 return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */
1885}
1886
7eb49509
DLM
1887/**
1888 * ata_eh_quiet - check if we need to be quiet about a command error
1889 * @qc: qc to check
1890 *
1891 * Look at the qc flags anbd its scsi command request flags to determine
1892 * if we need to be quiet about the command failure.
1893 */
1894static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
1895{
c8329cd5 1896 if (qc->scsicmd && scsi_cmd_to_rq(qc->scsicmd)->rq_flags & RQF_QUIET)
7eb49509
DLM
1897 qc->flags |= ATA_QCFLAG_QUIET;
1898 return qc->flags & ATA_QCFLAG_QUIET;
1899}
1900
022bdb07 1901/**
9b1e2658
TH
1902 * ata_eh_link_autopsy - analyze error and determine recovery action
1903 * @link: host link to perform autopsy on
022bdb07 1904 *
0260731f
TH
1905 * Analyze why @link failed and determine which recovery actions
1906 * are needed. This function also sets more detailed AC_ERR_*
1907 * values and fills sense data for ATAPI CHECK SENSE.
022bdb07
TH
1908 *
1909 * LOCKING:
1910 * Kernel thread context (may sleep).
1911 */
9b1e2658 1912static void ata_eh_link_autopsy(struct ata_link *link)
022bdb07 1913{
0260731f 1914 struct ata_port *ap = link->ap;
936fd732 1915 struct ata_eh_context *ehc = &link->eh_context;
258c4e5c 1916 struct ata_queued_cmd *qc;
dfcc173d 1917 struct ata_device *dev;
3884f7b0 1918 unsigned int all_err_mask = 0, eflags = 0;
7eb49509 1919 int tag, nr_failed = 0, nr_quiet = 0;
022bdb07
TH
1920 u32 serror;
1921 int rc;
1922
1cdaf534
TH
1923 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1924 return;
1925
022bdb07 1926 /* obtain and analyze SError */
936fd732 1927 rc = sata_scr_read(link, SCR_ERROR, &serror);
022bdb07
TH
1928 if (rc == 0) {
1929 ehc->i.serror |= serror;
0260731f 1930 ata_eh_analyze_serror(link);
4e57c517 1931 } else if (rc != -EOPNOTSUPP) {
cf480626 1932 /* SError read failed, force reset and probing */
b558eddd 1933 ehc->i.probe_mask |= ATA_ALL_DEVICES;
cf480626 1934 ehc->i.action |= ATA_EH_RESET;
4e57c517
TH
1935 ehc->i.err_mask |= AC_ERR_OTHER;
1936 }
022bdb07 1937
e8ee8451 1938 /* analyze NCQ failure */
0260731f 1939 ata_eh_analyze_ncq_error(link);
e8ee8451 1940
022bdb07
TH
1941 /* any real error trumps AC_ERR_OTHER */
1942 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1943 ehc->i.err_mask &= ~AC_ERR_OTHER;
1944
1945 all_err_mask |= ehc->i.err_mask;
1946
258c4e5c 1947 ata_qc_for_each_raw(ap, qc, tag) {
b1c72916
TH
1948 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
1949 ata_dev_phys_link(qc->dev) != link)
022bdb07
TH
1950 continue;
1951
1952 /* inherit upper level err_mask */
1953 qc->err_mask |= ehc->i.err_mask;
1954
022bdb07 1955 /* analyze TF */
4528e4da 1956 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
022bdb07
TH
1957
1958 /* DEV errors are probably spurious in case of ATA_BUS error */
1959 if (qc->err_mask & AC_ERR_ATA_BUS)
1960 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1961 AC_ERR_INVALID);
1962
1963 /* any real error trumps unknown error */
1964 if (qc->err_mask & ~AC_ERR_OTHER)
1965 qc->err_mask &= ~AC_ERR_OTHER;
1966
804689ad
DLM
1967 /*
1968 * SENSE_VALID trumps dev/unknown error and revalidation. Upper
1969 * layers will determine whether the command is worth retrying
1970 * based on the sense data and device class/type. Otherwise,
1971 * determine directly if the command is worth retrying using its
1972 * error mask and flags.
1973 */
f90f0828 1974 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
022bdb07 1975 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
804689ad 1976 else if (ata_eh_worth_retry(qc))
03faab78
TH
1977 qc->flags |= ATA_QCFLAG_RETRY;
1978
022bdb07 1979 /* accumulate error info */
4528e4da 1980 ehc->i.dev = qc->dev;
022bdb07
TH
1981 all_err_mask |= qc->err_mask;
1982 if (qc->flags & ATA_QCFLAG_IO)
3884f7b0 1983 eflags |= ATA_EFLAG_IS_IO;
255c03d1 1984 trace_ata_eh_link_autopsy_qc(qc);
7eb49509
DLM
1985
1986 /* Count quiet errors */
1987 if (ata_eh_quiet(qc))
1988 nr_quiet++;
1989 nr_failed++;
022bdb07
TH
1990 }
1991
7eb49509
DLM
1992 /* If all failed commands requested silence, then be quiet */
1993 if (nr_quiet == nr_failed)
1994 ehc->i.flags |= ATA_EHI_QUIET;
1995
a20f33ff 1996 /* enforce default EH actions */
b51e9e5d 1997 if (ap->pflags & ATA_PFLAG_FROZEN ||
a20f33ff 1998 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
cf480626 1999 ehc->i.action |= ATA_EH_RESET;
3884f7b0
TH
2000 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2001 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
4528e4da 2002 ehc->i.action |= ATA_EH_REVALIDATE;
022bdb07 2003
dfcc173d
TH
2004 /* If we have offending qcs and the associated failed device,
2005 * perform per-dev EH action only on the offending device.
2006 */
4528e4da 2007 if (ehc->i.dev) {
4528e4da
TH
2008 ehc->i.dev_action[ehc->i.dev->devno] |=
2009 ehc->i.action & ATA_EH_PERDEV_MASK;
2010 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
47005f25
TH
2011 }
2012
2695e366
TH
2013 /* propagate timeout to host link */
2014 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2015 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2016
2017 /* record error and consider speeding down */
dfcc173d 2018 dev = ehc->i.dev;
2695e366
TH
2019 if (!dev && ((ata_link_max_devices(link) == 1 &&
2020 ata_dev_enabled(link->device))))
2021 dev = link->device;
dfcc173d 2022
76326ac1
TH
2023 if (dev) {
2024 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2025 eflags |= ATA_EFLAG_DUBIOUS_XFER;
3884f7b0 2026 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
f1601113 2027 trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
76326ac1 2028 }
022bdb07
TH
2029}
2030
2031/**
9b1e2658
TH
2032 * ata_eh_autopsy - analyze error and determine recovery action
2033 * @ap: host port to perform autopsy on
2034 *
2035 * Analyze all links of @ap and determine why they failed and
2036 * which recovery actions are needed.
2037 *
2038 * LOCKING:
2039 * Kernel thread context (may sleep).
2040 */
fb7fd614 2041void ata_eh_autopsy(struct ata_port *ap)
9b1e2658
TH
2042{
2043 struct ata_link *link;
2044
1eca4365 2045 ata_for_each_link(link, ap, EDGE)
9b1e2658 2046 ata_eh_link_autopsy(link);
2695e366 2047
b1c72916
TH
2048 /* Handle the frigging slave link. Autopsy is done similarly
2049 * but actions and flags are transferred over to the master
2050 * link and handled from there.
2051 */
2052 if (ap->slave_link) {
2053 struct ata_eh_context *mehc = &ap->link.eh_context;
2054 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2055
848e4c68
TH
2056 /* transfer control flags from master to slave */
2057 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2058
2059 /* perform autopsy on the slave link */
b1c72916
TH
2060 ata_eh_link_autopsy(ap->slave_link);
2061
848e4c68 2062 /* transfer actions from slave to master and clear slave */
b1c72916
TH
2063 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2064 mehc->i.action |= sehc->i.action;
2065 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2066 mehc->i.flags |= sehc->i.flags;
2067 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2068 }
2069
2695e366
TH
2070 /* Autopsy of fanout ports can affect host link autopsy.
2071 * Perform host link autopsy last.
2072 */
071f44b1 2073 if (sata_pmp_attached(ap))
2695e366 2074 ata_eh_link_autopsy(&ap->link);
9b1e2658
TH
2075}
2076
6521148c 2077/**
d4520903
HR
2078 * ata_get_cmd_name - get name for ATA command
2079 * @command: ATA command code to get name for
6521148c 2080 *
d4520903 2081 * Return a textual name of the given command or "unknown"
6521148c
RH
2082 *
2083 * LOCKING:
2084 * None
2085 */
d4520903 2086const char *ata_get_cmd_name(u8 command)
6521148c
RH
2087{
2088#ifdef CONFIG_ATA_VERBOSE_ERROR
2089 static const struct
2090 {
2091 u8 command;
2092 const char *text;
2093 } cmd_descr[] = {
2094 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
825e2d87
HR
2095 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2096 { ATA_CMD_STANDBY, "STANDBY" },
2097 { ATA_CMD_IDLE, "IDLE" },
2098 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2099 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
3915c3b5 2100 { ATA_CMD_DOWNLOAD_MICRO_DMA, "DOWNLOAD MICROCODE DMA" },
6521148c 2101 { ATA_CMD_NOP, "NOP" },
825e2d87
HR
2102 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2103 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2104 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2105 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2106 { ATA_CMD_SERVICE, "SERVICE" },
2107 { ATA_CMD_READ, "READ DMA" },
2108 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2109 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2110 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
6521148c 2111 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
825e2d87
HR
2112 { ATA_CMD_WRITE, "WRITE DMA" },
2113 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2114 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2115 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
6521148c
RH
2116 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2117 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2118 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2119 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2120 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
3915c3b5
RH
2121 { ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
2122 { ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
6521148c
RH
2123 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2124 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2125 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2126 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2127 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2128 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2129 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2130 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
825e2d87 2131 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
6521148c
RH
2132 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2133 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2134 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2135 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2136 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2137 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2138 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2139 { ATA_CMD_SLEEP, "SLEEP" },
2140 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2141 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2142 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2143 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2144 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2145 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2146 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2147 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
825e2d87 2148 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
3915c3b5 2149 { ATA_CMD_TRUSTED_NONDATA, "TRUSTED NON-DATA" },
6521148c 2150 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
825e2d87 2151 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
6521148c 2152 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
825e2d87 2153 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
6521148c 2154 { ATA_CMD_PMP_READ, "READ BUFFER" },
3915c3b5 2155 { ATA_CMD_PMP_READ_DMA, "READ BUFFER DMA" },
6521148c 2156 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
3915c3b5 2157 { ATA_CMD_PMP_WRITE_DMA, "WRITE BUFFER DMA" },
6521148c
RH
2158 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2159 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2160 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2161 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2162 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2163 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2164 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2165 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2166 { ATA_CMD_SMART, "SMART" },
2167 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2168 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
acad7627 2169 { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
825e2d87
HR
2170 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2171 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
6521148c
RH
2172 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2173 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2174 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
825e2d87 2175 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
3915c3b5
RH
2176 { ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" },
2177 { ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" },
28a3fc22 2178 { ATA_CMD_ZAC_MGMT_IN, "ZAC MANAGEMENT IN" },
27708a95 2179 { ATA_CMD_ZAC_MGMT_OUT, "ZAC MANAGEMENT OUT" },
6521148c
RH
2180 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2181 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2182 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2183 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2184 { ATA_CMD_RESTORE, "RECALIBRATE" },
2185 { 0, NULL } /* terminate list */
2186 };
2187
2188 unsigned int i;
2189 for (i = 0; cmd_descr[i].text; i++)
2190 if (cmd_descr[i].command == command)
2191 return cmd_descr[i].text;
2192#endif
2193
d4520903 2194 return "unknown";
6521148c 2195}
d4520903 2196EXPORT_SYMBOL_GPL(ata_get_cmd_name);
6521148c 2197
9b1e2658
TH
2198/**
2199 * ata_eh_link_report - report error handling to user
0260731f 2200 * @link: ATA link EH is going on
022bdb07
TH
2201 *
2202 * Report EH to user.
2203 *
2204 * LOCKING:
2205 * None.
2206 */
9b1e2658 2207static void ata_eh_link_report(struct ata_link *link)
022bdb07 2208{
0260731f
TH
2209 struct ata_port *ap = link->ap;
2210 struct ata_eh_context *ehc = &link->eh_context;
258c4e5c 2211 struct ata_queued_cmd *qc;
022bdb07 2212 const char *frozen, *desc;
462098b0 2213 char tries_buf[6] = "";
022bdb07
TH
2214 int tag, nr_failed = 0;
2215
94ff3d54
TH
2216 if (ehc->i.flags & ATA_EHI_QUIET)
2217 return;
2218
022bdb07
TH
2219 desc = NULL;
2220 if (ehc->i.desc[0] != '\0')
2221 desc = ehc->i.desc;
2222
258c4e5c 2223 ata_qc_for_each_raw(ap, qc, tag) {
b1c72916
TH
2224 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2225 ata_dev_phys_link(qc->dev) != link ||
e027bd36
TH
2226 ((qc->flags & ATA_QCFLAG_QUIET) &&
2227 qc->err_mask == AC_ERR_DEV))
022bdb07
TH
2228 continue;
2229 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2230 continue;
2231
2232 nr_failed++;
2233 }
2234
2235 if (!nr_failed && !ehc->i.err_mask)
2236 return;
2237
2238 frozen = "";
b51e9e5d 2239 if (ap->pflags & ATA_PFLAG_FROZEN)
022bdb07
TH
2240 frozen = " frozen";
2241
a1e10f7e 2242 if (ap->eh_tries < ATA_EH_MAX_TRIES)
462098b0 2243 snprintf(tries_buf, sizeof(tries_buf), " t%d",
a1e10f7e
TH
2244 ap->eh_tries);
2245
022bdb07 2246 if (ehc->i.dev) {
a9a79dfe
JP
2247 ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
2248 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2249 ehc->i.err_mask, link->sactive, ehc->i.serror,
2250 ehc->i.action, frozen, tries_buf);
022bdb07 2251 if (desc)
a9a79dfe 2252 ata_dev_err(ehc->i.dev, "%s\n", desc);
022bdb07 2253 } else {
a9a79dfe
JP
2254 ata_link_err(link, "exception Emask 0x%x "
2255 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2256 ehc->i.err_mask, link->sactive, ehc->i.serror,
2257 ehc->i.action, frozen, tries_buf);
022bdb07 2258 if (desc)
a9a79dfe 2259 ata_link_err(link, "%s\n", desc);
022bdb07
TH
2260 }
2261
6521148c 2262#ifdef CONFIG_ATA_VERBOSE_ERROR
1333e194 2263 if (ehc->i.serror)
a9a79dfe 2264 ata_link_err(link,
1333e194
RH
2265 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2266 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2267 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2268 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2269 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2270 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2271 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2272 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2273 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2274 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2275 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2276 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2277 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2278 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2279 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2280 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2281 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2dcb407e 2282 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
6521148c 2283#endif
1333e194 2284
258c4e5c 2285 ata_qc_for_each_raw(ap, qc, tag) {
8a937581 2286 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
abb6a889
TH
2287 char data_buf[20] = "";
2288 char cdb_buf[70] = "";
022bdb07 2289
0260731f 2290 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
b1c72916 2291 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
022bdb07
TH
2292 continue;
2293
abb6a889
TH
2294 if (qc->dma_dir != DMA_NONE) {
2295 static const char *dma_str[] = {
2296 [DMA_BIDIRECTIONAL] = "bidi",
2297 [DMA_TO_DEVICE] = "out",
2298 [DMA_FROM_DEVICE] = "in",
2299 };
fb1b8b11
GU
2300 const char *prot_str = NULL;
2301
2302 switch (qc->tf.protocol) {
2303 case ATA_PROT_UNKNOWN:
2304 prot_str = "unknown";
2305 break;
2306 case ATA_PROT_NODATA:
2307 prot_str = "nodata";
2308 break;
2309 case ATA_PROT_PIO:
2310 prot_str = "pio";
2311 break;
2312 case ATA_PROT_DMA:
2313 prot_str = "dma";
2314 break;
2315 case ATA_PROT_NCQ:
2316 prot_str = "ncq dma";
2317 break;
2318 case ATA_PROT_NCQ_NODATA:
2319 prot_str = "ncq nodata";
2320 break;
2321 case ATAPI_PROT_NODATA:
2322 prot_str = "nodata";
2323 break;
2324 case ATAPI_PROT_PIO:
2325 prot_str = "pio";
2326 break;
2327 case ATAPI_PROT_DMA:
2328 prot_str = "dma";
2329 break;
2330 }
abb6a889 2331 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
fb1b8b11 2332 prot_str, qc->nbytes, dma_str[qc->dma_dir]);
abb6a889
TH
2333 }
2334
6521148c 2335 if (ata_is_atapi(qc->tf.protocol)) {
a13b0c9d
HR
2336 const u8 *cdb = qc->cdb;
2337 size_t cdb_len = qc->dev->cdb_len;
2338
cbba5b0e
HR
2339 if (qc->scsicmd) {
2340 cdb = qc->scsicmd->cmnd;
2341 cdb_len = qc->scsicmd->cmd_len;
2342 }
2343 __scsi_format_command(cdb_buf, sizeof(cdb_buf),
2344 cdb, cdb_len);
d4520903
HR
2345 } else
2346 ata_dev_err(qc->dev, "failed command: %s\n",
2347 ata_get_cmd_name(cmd->command));
abb6a889 2348
a9a79dfe 2349 ata_dev_err(qc->dev,
8a937581 2350 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
abb6a889 2351 "tag %d%s\n %s"
8a937581 2352 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
5335b729 2353 "Emask 0x%x (%s)%s\n",
8a937581
TH
2354 cmd->command, cmd->feature, cmd->nsect,
2355 cmd->lbal, cmd->lbam, cmd->lbah,
2356 cmd->hob_feature, cmd->hob_nsect,
2357 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
abb6a889 2358 cmd->device, qc->tag, data_buf, cdb_buf,
efcef265 2359 res->status, res->error, res->nsect,
8a937581
TH
2360 res->lbal, res->lbam, res->lbah,
2361 res->hob_feature, res->hob_nsect,
2362 res->hob_lbal, res->hob_lbam, res->hob_lbah,
5335b729
TH
2363 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2364 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
1333e194 2365
6521148c 2366#ifdef CONFIG_ATA_VERBOSE_ERROR
efcef265
SS
2367 if (res->status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2368 ATA_SENSE | ATA_ERR)) {
2369 if (res->status & ATA_BUSY)
a9a79dfe 2370 ata_dev_err(qc->dev, "status: { Busy }\n");
1333e194 2371 else
e87fd28c 2372 ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
efcef265
SS
2373 res->status & ATA_DRDY ? "DRDY " : "",
2374 res->status & ATA_DF ? "DF " : "",
2375 res->status & ATA_DRQ ? "DRQ " : "",
2376 res->status & ATA_SENSE ? "SENSE " : "",
2377 res->status & ATA_ERR ? "ERR " : "");
1333e194
RH
2378 }
2379
2380 if (cmd->command != ATA_CMD_PACKET &&
efcef265
SS
2381 (res->error & (ATA_ICRC | ATA_UNC | ATA_AMNF | ATA_IDNF |
2382 ATA_ABORTED)))
eec7e1c1 2383 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
efcef265
SS
2384 res->error & ATA_ICRC ? "ICRC " : "",
2385 res->error & ATA_UNC ? "UNC " : "",
2386 res->error & ATA_AMNF ? "AMNF " : "",
2387 res->error & ATA_IDNF ? "IDNF " : "",
2388 res->error & ATA_ABORTED ? "ABRT " : "");
6521148c 2389#endif
022bdb07
TH
2390 }
2391}
2392
9b1e2658
TH
2393/**
2394 * ata_eh_report - report error handling to user
2395 * @ap: ATA port to report EH about
2396 *
2397 * Report EH to user.
2398 *
2399 * LOCKING:
2400 * None.
2401 */
fb7fd614 2402void ata_eh_report(struct ata_port *ap)
9b1e2658
TH
2403{
2404 struct ata_link *link;
2405
1eca4365 2406 ata_for_each_link(link, ap, HOST_FIRST)
9b1e2658
TH
2407 ata_eh_link_report(link);
2408}
2409
cc0680a5 2410static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
b1c72916
TH
2411 unsigned int *classes, unsigned long deadline,
2412 bool clear_classes)
d87fa38e 2413{
f58229f8 2414 struct ata_device *dev;
d87fa38e 2415
b1c72916 2416 if (clear_classes)
1eca4365 2417 ata_for_each_dev(dev, link, ALL)
b1c72916 2418 classes[dev->devno] = ATA_DEV_UNKNOWN;
d87fa38e 2419
f046519f 2420 return reset(link, classes, deadline);
d87fa38e
TH
2421}
2422
e8411fba 2423static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
664faf09 2424{
45db2f6c 2425 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
ae791c05 2426 return 0;
5dbfc9cb
TH
2427 if (rc == -EAGAIN)
2428 return 1;
071f44b1 2429 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
3495de73 2430 return 1;
664faf09
TH
2431 return 0;
2432}
2433
fb7fd614
TH
2434int ata_eh_reset(struct ata_link *link, int classify,
2435 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2436 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
022bdb07 2437{
afaa5c37 2438 struct ata_port *ap = link->ap;
b1c72916 2439 struct ata_link *slave = ap->slave_link;
936fd732 2440 struct ata_eh_context *ehc = &link->eh_context;
705d2014 2441 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
664faf09 2442 unsigned int *classes = ehc->classes;
416dc9ed 2443 unsigned int lflags = link->flags;
1cdaf534 2444 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
d8af0eb6 2445 int max_tries = 0, try = 0;
b1c72916 2446 struct ata_link *failed_link;
f58229f8 2447 struct ata_device *dev;
416dc9ed 2448 unsigned long deadline, now;
022bdb07 2449 ata_reset_fn_t reset;
afaa5c37 2450 unsigned long flags;
416dc9ed 2451 u32 sstatus;
b1c72916 2452 int nr_unknown, rc;
022bdb07 2453
932648b0
TH
2454 /*
2455 * Prepare to reset
2456 */
d8af0eb6
TH
2457 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2458 max_tries++;
ca6d43b0
DW
2459 if (link->flags & ATA_LFLAG_RST_ONCE)
2460 max_tries = 1;
05944bdf
TH
2461 if (link->flags & ATA_LFLAG_NO_HRST)
2462 hardreset = NULL;
2463 if (link->flags & ATA_LFLAG_NO_SRST)
2464 softreset = NULL;
d8af0eb6 2465
25985edc 2466 /* make sure each reset attempt is at least COOL_DOWN apart */
19b72321
TH
2467 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2468 now = jiffies;
2469 WARN_ON(time_after(ehc->last_reset, now));
2470 deadline = ata_deadline(ehc->last_reset,
2471 ATA_EH_RESET_COOL_DOWN);
2472 if (time_before(now, deadline))
2473 schedule_timeout_uninterruptible(deadline - now);
2474 }
0a2c0f56 2475
afaa5c37
TH
2476 spin_lock_irqsave(ap->lock, flags);
2477 ap->pflags |= ATA_PFLAG_RESETTING;
2478 spin_unlock_irqrestore(ap->lock, flags);
2479
cf480626 2480 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
13abf50d 2481
1eca4365 2482 ata_for_each_dev(dev, link, ALL) {
cdeab114
TH
2483 /* If we issue an SRST then an ATA drive (not ATAPI)
2484 * may change configuration and be in PIO0 timing. If
2485 * we do a hard reset (or are coming from power on)
2486 * this is true for ATA or ATAPI. Until we've set a
2487 * suitable controller mode we should not touch the
2488 * bus as we may be talking too fast.
2489 */
2490 dev->pio_mode = XFER_PIO_0;
5416912a 2491 dev->dma_mode = 0xff;
cdeab114
TH
2492
2493 /* If the controller has a pio mode setup function
2494 * then use it to set the chipset to rights. Don't
2495 * touch the DMA setup as that will be dealt with when
2496 * configuring devices.
2497 */
2498 if (ap->ops->set_piomode)
2499 ap->ops->set_piomode(ap, dev);
2500 }
2501
cf480626 2502 /* prefer hardreset */
932648b0 2503 reset = NULL;
cf480626
TH
2504 ehc->i.action &= ~ATA_EH_RESET;
2505 if (hardreset) {
2506 reset = hardreset;
a674050e 2507 ehc->i.action |= ATA_EH_HARDRESET;
4f7faa3f 2508 } else if (softreset) {
cf480626 2509 reset = softreset;
a674050e 2510 ehc->i.action |= ATA_EH_SOFTRESET;
cf480626 2511 }
f5914a46
TH
2512
2513 if (prereset) {
b1c72916
TH
2514 unsigned long deadline = ata_deadline(jiffies,
2515 ATA_EH_PRERESET_TIMEOUT);
2516
2517 if (slave) {
2518 sehc->i.action &= ~ATA_EH_RESET;
2519 sehc->i.action |= ehc->i.action;
2520 }
2521
2522 rc = prereset(link, deadline);
2523
2524 /* If present, do prereset on slave link too. Reset
2525 * is skipped iff both master and slave links report
2526 * -ENOENT or clear ATA_EH_RESET.
2527 */
2528 if (slave && (rc == 0 || rc == -ENOENT)) {
2529 int tmp;
2530
2531 tmp = prereset(slave, deadline);
2532 if (tmp != -ENOENT)
2533 rc = tmp;
2534
2535 ehc->i.action |= sehc->i.action;
2536 }
2537
f5914a46 2538 if (rc) {
c961922b 2539 if (rc == -ENOENT) {
a9a79dfe 2540 ata_link_dbg(link, "port disabled--ignoring\n");
cf480626 2541 ehc->i.action &= ~ATA_EH_RESET;
4aa9ab67 2542
1eca4365 2543 ata_for_each_dev(dev, link, ALL)
f58229f8 2544 classes[dev->devno] = ATA_DEV_NONE;
4aa9ab67
TH
2545
2546 rc = 0;
c961922b 2547 } else
a9a79dfe
JP
2548 ata_link_err(link,
2549 "prereset failed (errno=%d)\n",
2550 rc);
fccb6ea5 2551 goto out;
f5914a46 2552 }
f5914a46 2553
932648b0 2554 /* prereset() might have cleared ATA_EH_RESET. If so,
d6515e6f 2555 * bang classes, thaw and return.
932648b0
TH
2556 */
2557 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
1eca4365 2558 ata_for_each_dev(dev, link, ALL)
932648b0 2559 classes[dev->devno] = ATA_DEV_NONE;
d6515e6f
TH
2560 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2561 ata_is_host_link(link))
2562 ata_eh_thaw_port(ap);
932648b0
TH
2563 rc = 0;
2564 goto out;
2565 }
f5914a46
TH
2566 }
2567
022bdb07 2568 retry:
932648b0
TH
2569 /*
2570 * Perform reset
2571 */
dc98c32c
TH
2572 if (ata_is_host_link(link))
2573 ata_eh_freeze_port(ap);
2574
341c2c95 2575 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
31daabda 2576
932648b0
TH
2577 if (reset) {
2578 if (verbose)
a9a79dfe
JP
2579 ata_link_info(link, "%s resetting link\n",
2580 reset == softreset ? "soft" : "hard");
932648b0
TH
2581
2582 /* mark that this EH session started with reset */
19b72321 2583 ehc->last_reset = jiffies;
f8ec26d0 2584 if (reset == hardreset) {
932648b0 2585 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
f8ec26d0
HR
2586 trace_ata_link_hardreset_begin(link, classes, deadline);
2587 } else {
932648b0 2588 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
f8ec26d0
HR
2589 trace_ata_link_softreset_begin(link, classes, deadline);
2590 }
022bdb07 2591
b1c72916 2592 rc = ata_do_reset(link, reset, classes, deadline, true);
f8ec26d0
HR
2593 if (reset == hardreset)
2594 trace_ata_link_hardreset_end(link, classes, rc);
2595 else
2596 trace_ata_link_softreset_end(link, classes, rc);
b1c72916
TH
2597 if (rc && rc != -EAGAIN) {
2598 failed_link = link;
5dbfc9cb 2599 goto fail;
b1c72916
TH
2600 }
2601
2602 /* hardreset slave link if existent */
2603 if (slave && reset == hardreset) {
2604 int tmp;
2605
2606 if (verbose)
a9a79dfe 2607 ata_link_info(slave, "hard resetting link\n");
b1c72916
TH
2608
2609 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
f8ec26d0
HR
2610 trace_ata_slave_hardreset_begin(slave, classes,
2611 deadline);
b1c72916
TH
2612 tmp = ata_do_reset(slave, reset, classes, deadline,
2613 false);
f8ec26d0 2614 trace_ata_slave_hardreset_end(slave, classes, tmp);
b1c72916
TH
2615 switch (tmp) {
2616 case -EAGAIN:
2617 rc = -EAGAIN;
e06abcc6 2618 break;
b1c72916
TH
2619 case 0:
2620 break;
2621 default:
2622 failed_link = slave;
2623 rc = tmp;
2624 goto fail;
2625 }
2626 }
022bdb07 2627
b1c72916 2628 /* perform follow-up SRST if necessary */
932648b0 2629 if (reset == hardreset &&
e8411fba 2630 ata_eh_followup_srst_needed(link, rc)) {
932648b0 2631 reset = softreset;
022bdb07 2632
932648b0 2633 if (!reset) {
a9a79dfe
JP
2634 ata_link_err(link,
2635 "follow-up softreset required but no softreset available\n");
b1c72916 2636 failed_link = link;
932648b0
TH
2637 rc = -EINVAL;
2638 goto fail;
2639 }
664faf09 2640
932648b0 2641 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
f8ec26d0 2642 trace_ata_link_softreset_begin(link, classes, deadline);
b1c72916 2643 rc = ata_do_reset(link, reset, classes, deadline, true);
f8ec26d0 2644 trace_ata_link_softreset_end(link, classes, rc);
fe2c4d01
TH
2645 if (rc) {
2646 failed_link = link;
2647 goto fail;
2648 }
664faf09 2649 }
932648b0
TH
2650 } else {
2651 if (verbose)
a9a79dfe
JP
2652 ata_link_info(link,
2653 "no reset method available, skipping reset\n");
932648b0
TH
2654 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2655 lflags |= ATA_LFLAG_ASSUME_ATA;
664faf09
TH
2656 }
2657
932648b0
TH
2658 /*
2659 * Post-reset processing
2660 */
1eca4365 2661 ata_for_each_dev(dev, link, ALL) {
416dc9ed
TH
2662 /* After the reset, the device state is PIO 0 and the
2663 * controller state is undefined. Reset also wakes up
2664 * drives from sleeping mode.
2665 */
2666 dev->pio_mode = XFER_PIO_0;
2667 dev->flags &= ~ATA_DFLAG_SLEEPING;
31daabda 2668
3b761d3d
TH
2669 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2670 continue;
2671
2672 /* apply class override */
2673 if (lflags & ATA_LFLAG_ASSUME_ATA)
2674 classes[dev->devno] = ATA_DEV_ATA;
2675 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2676 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
022bdb07
TH
2677 }
2678
416dc9ed
TH
2679 /* record current link speed */
2680 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2681 link->sata_spd = (sstatus >> 4) & 0xf;
b1c72916
TH
2682 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2683 slave->sata_spd = (sstatus >> 4) & 0xf;
008a7896 2684
dc98c32c
TH
2685 /* thaw the port */
2686 if (ata_is_host_link(link))
2687 ata_eh_thaw_port(ap);
2688
f046519f
TH
2689 /* postreset() should clear hardware SError. Although SError
2690 * is cleared during link resume, clearing SError here is
2691 * necessary as some PHYs raise hotplug events after SRST.
2692 * This introduces race condition where hotplug occurs between
2693 * reset and here. This race is mediated by cross checking
2694 * link onlineness and classification result later.
2695 */
b1c72916 2696 if (postreset) {
416dc9ed 2697 postreset(link, classes);
f8ec26d0
HR
2698 trace_ata_link_postreset(link, classes, rc);
2699 if (slave) {
b1c72916 2700 postreset(slave, classes);
f8ec26d0
HR
2701 trace_ata_slave_postreset(slave, classes, rc);
2702 }
b1c72916 2703 }
20952b69 2704
1e641060 2705 /*
8c56cacc
TH
2706 * Some controllers can't be frozen very well and may set spurious
2707 * error conditions during reset. Clear accumulated error
2708 * information and re-thaw the port if frozen. As reset is the
2709 * final recovery action and we cross check link onlineness against
2710 * device classification later, no hotplug event is lost by this.
1e641060 2711 */
f046519f 2712 spin_lock_irqsave(link->ap->lock, flags);
1e641060 2713 memset(&link->eh_info, 0, sizeof(link->eh_info));
b1c72916 2714 if (slave)
1e641060
TH
2715 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2716 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
f046519f
TH
2717 spin_unlock_irqrestore(link->ap->lock, flags);
2718
8c56cacc
TH
2719 if (ap->pflags & ATA_PFLAG_FROZEN)
2720 ata_eh_thaw_port(ap);
2721
3b761d3d
TH
2722 /*
2723 * Make sure onlineness and classification result correspond.
f046519f
TH
2724 * Hotplug could have happened during reset and some
2725 * controllers fail to wait while a drive is spinning up after
2726 * being hotplugged causing misdetection. By cross checking
3b761d3d
TH
2727 * link on/offlineness and classification result, those
2728 * conditions can be reliably detected and retried.
f046519f 2729 */
b1c72916 2730 nr_unknown = 0;
1eca4365 2731 ata_for_each_dev(dev, link, ALL) {
3b761d3d
TH
2732 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2733 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
a9a79dfe 2734 ata_dev_dbg(dev, "link online but device misclassified\n");
3b761d3d 2735 classes[dev->devno] = ATA_DEV_NONE;
b1c72916 2736 nr_unknown++;
3b761d3d
TH
2737 }
2738 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2739 if (ata_class_enabled(classes[dev->devno]))
a9a79dfe
JP
2740 ata_dev_dbg(dev,
2741 "link offline, clearing class %d to NONE\n",
2742 classes[dev->devno]);
3b761d3d
TH
2743 classes[dev->devno] = ATA_DEV_NONE;
2744 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
a9a79dfe
JP
2745 ata_dev_dbg(dev,
2746 "link status unknown, clearing UNKNOWN to NONE\n");
3b761d3d 2747 classes[dev->devno] = ATA_DEV_NONE;
b1c72916 2748 }
f046519f
TH
2749 }
2750
b1c72916 2751 if (classify && nr_unknown) {
f046519f 2752 if (try < max_tries) {
a9a79dfe
JP
2753 ata_link_warn(link,
2754 "link online but %d devices misclassified, retrying\n",
2755 nr_unknown);
b1c72916 2756 failed_link = link;
f046519f
TH
2757 rc = -EAGAIN;
2758 goto fail;
2759 }
a9a79dfe
JP
2760 ata_link_warn(link,
2761 "link online but %d devices misclassified, "
2762 "device detection might fail\n", nr_unknown);
f046519f
TH
2763 }
2764
416dc9ed 2765 /* reset successful, schedule revalidation */
cf480626 2766 ata_eh_done(link, NULL, ATA_EH_RESET);
b1c72916
TH
2767 if (slave)
2768 ata_eh_done(slave, NULL, ATA_EH_RESET);
6b7ae954 2769 ehc->last_reset = jiffies; /* update to completion time */
416dc9ed 2770 ehc->i.action |= ATA_EH_REVALIDATE;
6b7ae954 2771 link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */
ae791c05 2772
416dc9ed 2773 rc = 0;
fccb6ea5
TH
2774 out:
2775 /* clear hotplug flag */
2776 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
b1c72916
TH
2777 if (slave)
2778 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
afaa5c37
TH
2779
2780 spin_lock_irqsave(ap->lock, flags);
2781 ap->pflags &= ~ATA_PFLAG_RESETTING;
2782 spin_unlock_irqrestore(ap->lock, flags);
2783
022bdb07 2784 return rc;
416dc9ed
TH
2785
2786 fail:
5958e302
TH
2787 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2788 if (!ata_is_host_link(link) &&
2789 sata_scr_read(link, SCR_STATUS, &sstatus))
2790 rc = -ERESTART;
2791
7a46c078 2792 if (try >= max_tries) {
8ea7645c
TH
2793 /*
2794 * Thaw host port even if reset failed, so that the port
2795 * can be retried on the next phy event. This risks
2796 * repeated EH runs but seems to be a better tradeoff than
2797 * shutting down a port after a botched hotplug attempt.
2798 */
2799 if (ata_is_host_link(link))
2800 ata_eh_thaw_port(ap);
416dc9ed 2801 goto out;
8ea7645c 2802 }
416dc9ed
TH
2803
2804 now = jiffies;
2805 if (time_before(now, deadline)) {
2806 unsigned long delta = deadline - now;
2807
a9a79dfe 2808 ata_link_warn(failed_link,
0a2c0f56
TH
2809 "reset failed (errno=%d), retrying in %u secs\n",
2810 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
416dc9ed 2811
c0c362b6 2812 ata_eh_release(ap);
416dc9ed
TH
2813 while (delta)
2814 delta = schedule_timeout_uninterruptible(delta);
c0c362b6 2815 ata_eh_acquire(ap);
416dc9ed
TH
2816 }
2817
7a46c078
GG
2818 /*
2819 * While disks spinup behind PMP, some controllers fail sending SRST.
2820 * They need to be reset - as well as the PMP - before retrying.
2821 */
2822 if (rc == -ERESTART) {
2823 if (ata_is_host_link(link))
2824 ata_eh_thaw_port(ap);
2825 goto out;
2826 }
2827
b1c72916 2828 if (try == max_tries - 1) {
a07d499b 2829 sata_down_spd_limit(link, 0);
b1c72916 2830 if (slave)
a07d499b 2831 sata_down_spd_limit(slave, 0);
b1c72916 2832 } else if (rc == -EPIPE)
a07d499b 2833 sata_down_spd_limit(failed_link, 0);
b1c72916 2834
416dc9ed
TH
2835 if (hardreset)
2836 reset = hardreset;
2837 goto retry;
022bdb07
TH
2838}
2839
45fabbb7
EO
2840static inline void ata_eh_pull_park_action(struct ata_port *ap)
2841{
2842 struct ata_link *link;
2843 struct ata_device *dev;
2844 unsigned long flags;
2845
2846 /*
2847 * This function can be thought of as an extended version of
2848 * ata_eh_about_to_do() specially crafted to accommodate the
2849 * requirements of ATA_EH_PARK handling. Since the EH thread
2850 * does not leave the do {} while () loop in ata_eh_recover as
2851 * long as the timeout for a park request to *one* device on
2852 * the port has not expired, and since we still want to pick
2853 * up park requests to other devices on the same port or
2854 * timeout updates for the same device, we have to pull
2855 * ATA_EH_PARK actions from eh_info into eh_context.i
2856 * ourselves at the beginning of each pass over the loop.
2857 *
2858 * Additionally, all write accesses to &ap->park_req_pending
16735d02 2859 * through reinit_completion() (see below) or complete_all()
45fabbb7
EO
2860 * (see ata_scsi_park_store()) are protected by the host lock.
2861 * As a result we have that park_req_pending.done is zero on
2862 * exit from this function, i.e. when ATA_EH_PARK actions for
2863 * *all* devices on port ap have been pulled into the
2864 * respective eh_context structs. If, and only if,
2865 * park_req_pending.done is non-zero by the time we reach
2866 * wait_for_completion_timeout(), another ATA_EH_PARK action
2867 * has been scheduled for at least one of the devices on port
2868 * ap and we have to cycle over the do {} while () loop in
2869 * ata_eh_recover() again.
2870 */
2871
2872 spin_lock_irqsave(ap->lock, flags);
16735d02 2873 reinit_completion(&ap->park_req_pending);
1eca4365
TH
2874 ata_for_each_link(link, ap, EDGE) {
2875 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
2876 struct ata_eh_info *ehi = &link->eh_info;
2877
2878 link->eh_context.i.dev_action[dev->devno] |=
2879 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2880 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2881 }
2882 }
2883 spin_unlock_irqrestore(ap->lock, flags);
2884}
2885
2886static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2887{
2888 struct ata_eh_context *ehc = &dev->link->eh_context;
2889 struct ata_taskfile tf;
2890 unsigned int err_mask;
2891
2892 ata_tf_init(dev, &tf);
2893 if (park) {
2894 ehc->unloaded_mask |= 1 << dev->devno;
2895 tf.command = ATA_CMD_IDLEIMMEDIATE;
2896 tf.feature = 0x44;
2897 tf.lbal = 0x4c;
2898 tf.lbam = 0x4e;
2899 tf.lbah = 0x55;
2900 } else {
2901 ehc->unloaded_mask &= ~(1 << dev->devno);
2902 tf.command = ATA_CMD_CHK_POWER;
2903 }
2904
2905 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
bd18bc04 2906 tf.protocol = ATA_PROT_NODATA;
45fabbb7
EO
2907 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2908 if (park && (err_mask || tf.lbal != 0xc4)) {
a9a79dfe 2909 ata_dev_err(dev, "head unload failed!\n");
45fabbb7
EO
2910 ehc->unloaded_mask &= ~(1 << dev->devno);
2911 }
2912}
2913
0260731f 2914static int ata_eh_revalidate_and_attach(struct ata_link *link,
084fe639 2915 struct ata_device **r_failed_dev)
022bdb07 2916{
0260731f
TH
2917 struct ata_port *ap = link->ap;
2918 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 2919 struct ata_device *dev;
8c3c52a8 2920 unsigned int new_mask = 0;
084fe639 2921 unsigned long flags;
f58229f8 2922 int rc = 0;
022bdb07 2923
8c3c52a8
TH
2924 /* For PATA drive side cable detection to work, IDENTIFY must
2925 * be done backwards such that PDIAG- is released by the slave
2926 * device before the master device is identified.
2927 */
1eca4365 2928 ata_for_each_dev(dev, link, ALL_REVERSE) {
f58229f8
TH
2929 unsigned int action = ata_eh_dev_action(dev);
2930 unsigned int readid_flags = 0;
022bdb07 2931
bff04647
TH
2932 if (ehc->i.flags & ATA_EHI_DID_RESET)
2933 readid_flags |= ATA_READID_POSTRESET;
2934
9666f400 2935 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
633273a3
TH
2936 WARN_ON(dev->class == ATA_DEV_PMP);
2937
b1c72916 2938 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
022bdb07 2939 rc = -EIO;
8c3c52a8 2940 goto err;
022bdb07
TH
2941 }
2942
0260731f 2943 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
422c9daa
TH
2944 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2945 readid_flags);
022bdb07 2946 if (rc)
8c3c52a8 2947 goto err;
022bdb07 2948
0260731f 2949 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
47005f25 2950
baa1e78a
TH
2951 /* Configuration may have changed, reconfigure
2952 * transfer mode.
2953 */
2954 ehc->i.flags |= ATA_EHI_SETMODE;
2955
3057ac3c 2956 /* schedule the scsi_rescan_device() here */
ad72cf98 2957 schedule_work(&(ap->scsi_rescan_task));
084fe639
TH
2958 } else if (dev->class == ATA_DEV_UNKNOWN &&
2959 ehc->tries[dev->devno] &&
2960 ata_class_enabled(ehc->classes[dev->devno])) {
842faa6c
TH
2961 /* Temporarily set dev->class, it will be
2962 * permanently set once all configurations are
2963 * complete. This is necessary because new
2964 * device configuration is done in two
2965 * separate loops.
2966 */
084fe639
TH
2967 dev->class = ehc->classes[dev->devno];
2968
633273a3
TH
2969 if (dev->class == ATA_DEV_PMP)
2970 rc = sata_pmp_attach(dev);
2971 else
2972 rc = ata_dev_read_id(dev, &dev->class,
2973 readid_flags, dev->id);
842faa6c
TH
2974
2975 /* read_id might have changed class, store and reset */
2976 ehc->classes[dev->devno] = dev->class;
2977 dev->class = ATA_DEV_UNKNOWN;
2978
8c3c52a8
TH
2979 switch (rc) {
2980 case 0:
99cf610a
TH
2981 /* clear error info accumulated during probe */
2982 ata_ering_clear(&dev->ering);
f58229f8 2983 new_mask |= 1 << dev->devno;
8c3c52a8
TH
2984 break;
2985 case -ENOENT:
55a8e2c8
TH
2986 /* IDENTIFY was issued to non-existent
2987 * device. No need to reset. Just
842faa6c 2988 * thaw and ignore the device.
55a8e2c8
TH
2989 */
2990 ata_eh_thaw_port(ap);
084fe639 2991 break;
8c3c52a8 2992 default:
8c3c52a8 2993 goto err;
084fe639 2994 }
8c3c52a8
TH
2995 }
2996 }
084fe639 2997
c1c4e8d5 2998 /* PDIAG- should have been released, ask cable type if post-reset */
33267325
TH
2999 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
3000 if (ap->ops->cable_detect)
3001 ap->cbl = ap->ops->cable_detect(ap);
3002 ata_force_cbl(ap);
3003 }
c1c4e8d5 3004
8c3c52a8
TH
3005 /* Configure new devices forward such that user doesn't see
3006 * device detection messages backwards.
3007 */
1eca4365 3008 ata_for_each_dev(dev, link, ALL) {
4f7c2874 3009 if (!(new_mask & (1 << dev->devno)))
8c3c52a8
TH
3010 continue;
3011
842faa6c
TH
3012 dev->class = ehc->classes[dev->devno];
3013
4f7c2874
TH
3014 if (dev->class == ATA_DEV_PMP)
3015 continue;
3016
8c3c52a8
TH
3017 ehc->i.flags |= ATA_EHI_PRINTINFO;
3018 rc = ata_dev_configure(dev);
3019 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
842faa6c
TH
3020 if (rc) {
3021 dev->class = ATA_DEV_UNKNOWN;
8c3c52a8 3022 goto err;
842faa6c 3023 }
8c3c52a8
TH
3024
3025 spin_lock_irqsave(ap->lock, flags);
3026 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3027 spin_unlock_irqrestore(ap->lock, flags);
3028
3029 /* new device discovered, configure xfermode */
3030 ehc->i.flags |= ATA_EHI_SETMODE;
022bdb07
TH
3031 }
3032
8c3c52a8 3033 return 0;
022bdb07 3034
8c3c52a8
TH
3035 err:
3036 *r_failed_dev = dev;
022bdb07
TH
3037 return rc;
3038}
3039
6f1d1e3a
TH
3040/**
3041 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3042 * @link: link on which timings will be programmed
98a1708d 3043 * @r_failed_dev: out parameter for failed device
6f1d1e3a
TH
3044 *
3045 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3046 * ata_set_mode() fails, pointer to the failing device is
3047 * returned in @r_failed_dev.
3048 *
3049 * LOCKING:
3050 * PCI/etc. bus probe sem.
3051 *
3052 * RETURNS:
3053 * 0 on success, negative errno otherwise
3054 */
3055int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3056{
3057 struct ata_port *ap = link->ap;
00115e0f
TH
3058 struct ata_device *dev;
3059 int rc;
6f1d1e3a 3060
76326ac1 3061 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
1eca4365 3062 ata_for_each_dev(dev, link, ENABLED) {
76326ac1
TH
3063 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3064 struct ata_ering_entry *ent;
3065
3066 ent = ata_ering_top(&dev->ering);
3067 if (ent)
3068 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3069 }
3070 }
3071
6f1d1e3a
TH
3072 /* has private set_mode? */
3073 if (ap->ops->set_mode)
00115e0f
TH
3074 rc = ap->ops->set_mode(link, r_failed_dev);
3075 else
3076 rc = ata_do_set_mode(link, r_failed_dev);
3077
3078 /* if transfer mode has changed, set DUBIOUS_XFER on device */
1eca4365 3079 ata_for_each_dev(dev, link, ENABLED) {
00115e0f
TH
3080 struct ata_eh_context *ehc = &link->eh_context;
3081 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3082 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3083
3084 if (dev->xfer_mode != saved_xfer_mode ||
3085 ata_ncq_enabled(dev) != saved_ncq)
3086 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3087 }
3088
3089 return rc;
6f1d1e3a
TH
3090}
3091
11fc33da
TH
3092/**
3093 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3094 * @dev: ATAPI device to clear UA for
3095 *
3096 * Resets and other operations can make an ATAPI device raise
3097 * UNIT ATTENTION which causes the next operation to fail. This
3098 * function clears UA.
3099 *
3100 * LOCKING:
3101 * EH context (may sleep).
3102 *
3103 * RETURNS:
3104 * 0 on success, -errno on failure.
3105 */
3106static int atapi_eh_clear_ua(struct ata_device *dev)
3107{
3108 int i;
3109
3110 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
b5357081 3111 u8 *sense_buffer = dev->link->ap->sector_buf;
11fc33da
TH
3112 u8 sense_key = 0;
3113 unsigned int err_mask;
3114
3115 err_mask = atapi_eh_tur(dev, &sense_key);
3116 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
a9a79dfe
JP
3117 ata_dev_warn(dev,
3118 "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3119 err_mask);
11fc33da
TH
3120 return -EIO;
3121 }
3122
3123 if (!err_mask || sense_key != UNIT_ATTENTION)
3124 return 0;
3125
3126 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3127 if (err_mask) {
a9a79dfe 3128 ata_dev_warn(dev, "failed to clear "
11fc33da
TH
3129 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3130 return -EIO;
3131 }
3132 }
3133
a9a79dfe
JP
3134 ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
3135 ATA_EH_UA_TRIES);
11fc33da
TH
3136
3137 return 0;
3138}
3139
6013efd8
TH
3140/**
3141 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3142 * @dev: ATA device which may need FLUSH retry
3143 *
3144 * If @dev failed FLUSH, it needs to be reported upper layer
3145 * immediately as it means that @dev failed to remap and already
3146 * lost at least a sector and further FLUSH retrials won't make
3147 * any difference to the lost sector. However, if FLUSH failed
3148 * for other reasons, for example transmission error, FLUSH needs
3149 * to be retried.
3150 *
3151 * This function determines whether FLUSH failure retry is
3152 * necessary and performs it if so.
3153 *
3154 * RETURNS:
3155 * 0 if EH can continue, -errno if EH needs to be repeated.
3156 */
3157static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3158{
3159 struct ata_link *link = dev->link;
3160 struct ata_port *ap = link->ap;
3161 struct ata_queued_cmd *qc;
3162 struct ata_taskfile tf;
3163 unsigned int err_mask;
3164 int rc = 0;
3165
3166 /* did flush fail for this device? */
3167 if (!ata_tag_valid(link->active_tag))
3168 return 0;
3169
3170 qc = __ata_qc_from_tag(ap, link->active_tag);
3171 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3172 qc->tf.command != ATA_CMD_FLUSH))
3173 return 0;
3174
3175 /* if the device failed it, it should be reported to upper layers */
3176 if (qc->err_mask & AC_ERR_DEV)
3177 return 0;
3178
3179 /* flush failed for some other reason, give it another shot */
3180 ata_tf_init(dev, &tf);
3181
3182 tf.command = qc->tf.command;
3183 tf.flags |= ATA_TFLAG_DEVICE;
3184 tf.protocol = ATA_PROT_NODATA;
3185
a9a79dfe 3186 ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
6013efd8
TH
3187 tf.command, qc->err_mask);
3188
3189 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3190 if (!err_mask) {
3191 /*
3192 * FLUSH is complete but there's no way to
3193 * successfully complete a failed command from EH.
3194 * Making sure retry is allowed at least once and
3195 * retrying it should do the trick - whatever was in
3196 * the cache is already on the platter and this won't
3197 * cause infinite loop.
3198 */
3199 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3200 } else {
a9a79dfe 3201 ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
6013efd8
TH
3202 err_mask);
3203 rc = -EIO;
3204
3205 /* if device failed it, report it to upper layers */
3206 if (err_mask & AC_ERR_DEV) {
3207 qc->err_mask |= AC_ERR_DEV;
3208 qc->result_tf = tf;
3209 if (!(ap->pflags & ATA_PFLAG_FROZEN))
3210 rc = 0;
3211 }
3212 }
3213 return rc;
3214}
3215
6b7ae954
TH
3216/**
3217 * ata_eh_set_lpm - configure SATA interface power management
3218 * @link: link to configure power management
3219 * @policy: the link power management policy
3220 * @r_failed_dev: out parameter for failed device
3221 *
3222 * Enable SATA Interface power management. This will enable
f4ac6476
HG
3223 * Device Interface Power Management (DIPM) for min_power and
3224 * medium_power_with_dipm policies, and then call driver specific
3225 * callbacks for enabling Host Initiated Power management.
6b7ae954
TH
3226 *
3227 * LOCKING:
3228 * EH context.
3229 *
3230 * RETURNS:
3231 * 0 on success, -errno on failure.
3232 */
3233static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3234 struct ata_device **r_failed_dev)
3235{
6c8ea89c 3236 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
6b7ae954
TH
3237 struct ata_eh_context *ehc = &link->eh_context;
3238 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
e5005b15 3239 enum ata_lpm_policy old_policy = link->lpm_policy;
5f6f12cc 3240 bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
6b7ae954
TH
3241 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3242 unsigned int err_mask;
3243 int rc;
3244
3245 /* if the link or host doesn't do LPM, noop */
4c9029e7
BZ
3246 if (!IS_ENABLED(CONFIG_SATA_HOST) ||
3247 (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
6b7ae954
TH
3248 return 0;
3249
3250 /*
3251 * DIPM is enabled only for MIN_POWER as some devices
3252 * misbehave when the host NACKs transition to SLUMBER. Order
3253 * device and link configurations such that the host always
3254 * allows DIPM requests.
3255 */
3256 ata_for_each_dev(dev, link, ENABLED) {
3257 bool hipm = ata_id_has_hipm(dev->id);
ae01b249 3258 bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
6b7ae954
TH
3259
3260 /* find the first enabled and LPM enabled devices */
3261 if (!link_dev)
3262 link_dev = dev;
3263
3264 if (!lpm_dev && (hipm || dipm))
3265 lpm_dev = dev;
3266
3267 hints &= ~ATA_LPM_EMPTY;
3268 if (!hipm)
3269 hints &= ~ATA_LPM_HIPM;
3270
3271 /* disable DIPM before changing link config */
f4ac6476 3272 if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
6b7ae954
TH
3273 err_mask = ata_dev_set_feature(dev,
3274 SETFEATURES_SATA_DISABLE, SATA_DIPM);
3275 if (err_mask && err_mask != AC_ERR_DEV) {
a9a79dfe
JP
3276 ata_dev_warn(dev,
3277 "failed to disable DIPM, Emask 0x%x\n",
3278 err_mask);
6b7ae954
TH
3279 rc = -EIO;
3280 goto fail;
3281 }
3282 }
3283 }
3284
6c8ea89c
TH
3285 if (ap) {
3286 rc = ap->ops->set_lpm(link, policy, hints);
3287 if (!rc && ap->slave_link)
3288 rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3289 } else
3290 rc = sata_pmp_set_lpm(link, policy, hints);
6b7ae954
TH
3291
3292 /*
3293 * Attribute link config failure to the first (LPM) enabled
3294 * device on the link.
3295 */
3296 if (rc) {
3297 if (rc == -EOPNOTSUPP) {
3298 link->flags |= ATA_LFLAG_NO_LPM;
3299 return 0;
3300 }
3301 dev = lpm_dev ? lpm_dev : link_dev;
3302 goto fail;
3303 }
3304
e5005b15
TH
3305 /*
3306 * Low level driver acked the transition. Issue DIPM command
3307 * with the new policy set.
3308 */
3309 link->lpm_policy = policy;
3310 if (ap && ap->slave_link)
3311 ap->slave_link->lpm_policy = policy;
3312
6b7ae954
TH
3313 /* host config updated, enable DIPM if transitioning to MIN_POWER */
3314 ata_for_each_dev(dev, link, ENABLED) {
f4ac6476 3315 if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
ae01b249 3316 ata_id_has_dipm(dev->id)) {
6b7ae954
TH
3317 err_mask = ata_dev_set_feature(dev,
3318 SETFEATURES_SATA_ENABLE, SATA_DIPM);
3319 if (err_mask && err_mask != AC_ERR_DEV) {
a9a79dfe 3320 ata_dev_warn(dev,
6b7ae954
TH
3321 "failed to enable DIPM, Emask 0x%x\n",
3322 err_mask);
3323 rc = -EIO;
3324 goto fail;
3325 }
3326 }
3327 }
3328
09c5b480
GM
3329 link->last_lpm_change = jiffies;
3330 link->flags |= ATA_LFLAG_CHANGED;
3331
6b7ae954
TH
3332 return 0;
3333
3334fail:
e5005b15
TH
3335 /* restore the old policy */
3336 link->lpm_policy = old_policy;
3337 if (ap && ap->slave_link)
3338 ap->slave_link->lpm_policy = old_policy;
3339
6b7ae954
TH
3340 /* if no device or only one more chance is left, disable LPM */
3341 if (!dev || ehc->tries[dev->devno] <= 2) {
a9a79dfe 3342 ata_link_warn(link, "disabling LPM on the link\n");
6b7ae954
TH
3343 link->flags |= ATA_LFLAG_NO_LPM;
3344 }
3345 if (r_failed_dev)
3346 *r_failed_dev = dev;
3347 return rc;
3348}
3349
8a745f1f 3350int ata_link_nr_enabled(struct ata_link *link)
022bdb07 3351{
f58229f8
TH
3352 struct ata_device *dev;
3353 int cnt = 0;
022bdb07 3354
1eca4365
TH
3355 ata_for_each_dev(dev, link, ENABLED)
3356 cnt++;
022bdb07
TH
3357 return cnt;
3358}
3359
0260731f 3360static int ata_link_nr_vacant(struct ata_link *link)
084fe639 3361{
f58229f8
TH
3362 struct ata_device *dev;
3363 int cnt = 0;
084fe639 3364
1eca4365 3365 ata_for_each_dev(dev, link, ALL)
f58229f8 3366 if (dev->class == ATA_DEV_UNKNOWN)
084fe639
TH
3367 cnt++;
3368 return cnt;
3369}
3370
0260731f 3371static int ata_eh_skip_recovery(struct ata_link *link)
084fe639 3372{
672b2d65 3373 struct ata_port *ap = link->ap;
0260731f 3374 struct ata_eh_context *ehc = &link->eh_context;
f58229f8 3375 struct ata_device *dev;
084fe639 3376
f9df58cb
TH
3377 /* skip disabled links */
3378 if (link->flags & ATA_LFLAG_DISABLED)
3379 return 1;
3380
e2f3d75f
TH
3381 /* skip if explicitly requested */
3382 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3383 return 1;
3384
672b2d65
TH
3385 /* thaw frozen port and recover failed devices */
3386 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3387 return 0;
3388
3389 /* reset at least once if reset is requested */
3390 if ((ehc->i.action & ATA_EH_RESET) &&
3391 !(ehc->i.flags & ATA_EHI_DID_RESET))
084fe639
TH
3392 return 0;
3393
3394 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1eca4365 3395 ata_for_each_dev(dev, link, ALL) {
084fe639
TH
3396 if (dev->class == ATA_DEV_UNKNOWN &&
3397 ehc->classes[dev->devno] != ATA_DEV_NONE)
3398 return 0;
3399 }
3400
3401 return 1;
3402}
3403
c2c7a89c
TH
3404static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3405{
3406 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3407 u64 now = get_jiffies_64();
3408 int *trials = void_arg;
3409
6868225e
LM
3410 if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
3411 (ent->timestamp < now - min(now, interval)))
c2c7a89c
TH
3412 return -1;
3413
3414 (*trials)++;
3415 return 0;
3416}
3417
02c05a27
TH
3418static int ata_eh_schedule_probe(struct ata_device *dev)
3419{
3420 struct ata_eh_context *ehc = &dev->link->eh_context;
c2c7a89c
TH
3421 struct ata_link *link = ata_dev_phys_link(dev);
3422 int trials = 0;
02c05a27
TH
3423
3424 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3425 (ehc->did_probe_mask & (1 << dev->devno)))
3426 return 0;
3427
3428 ata_eh_detach_dev(dev);
3429 ata_dev_init(dev);
3430 ehc->did_probe_mask |= (1 << dev->devno);
cf480626 3431 ehc->i.action |= ATA_EH_RESET;
00115e0f
TH
3432 ehc->saved_xfer_mode[dev->devno] = 0;
3433 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
02c05a27 3434
6b7ae954 3435 /* the link maybe in a deep sleep, wake it up */
6c8ea89c
TH
3436 if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3437 if (ata_is_host_link(link))
3438 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3439 ATA_LPM_EMPTY);
3440 else
3441 sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3442 ATA_LPM_EMPTY);
3443 }
6b7ae954 3444
c2c7a89c
TH
3445 /* Record and count probe trials on the ering. The specific
3446 * error mask used is irrelevant. Because a successful device
3447 * detection clears the ering, this count accumulates only if
3448 * there are consecutive failed probes.
3449 *
3450 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3451 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3452 * forced to 1.5Gbps.
3453 *
3454 * This is to work around cases where failed link speed
3455 * negotiation results in device misdetection leading to
3456 * infinite DEVXCHG or PHRDY CHG events.
3457 */
3458 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3459 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3460
3461 if (trials > ATA_EH_PROBE_TRIALS)
3462 sata_down_spd_limit(link, 1);
3463
02c05a27
TH
3464 return 1;
3465}
3466
9b1e2658 3467static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
fee7ca72 3468{
9af5c9c9 3469 struct ata_eh_context *ehc = &dev->link->eh_context;
fee7ca72 3470
cf9a590a
TH
3471 /* -EAGAIN from EH routine indicates retry without prejudice.
3472 * The requester is responsible for ensuring forward progress.
3473 */
3474 if (err != -EAGAIN)
3475 ehc->tries[dev->devno]--;
fee7ca72
TH
3476
3477 switch (err) {
3478 case -ENODEV:
3479 /* device missing or wrong IDENTIFY data, schedule probing */
3480 ehc->i.probe_mask |= (1 << dev->devno);
df561f66 3481 fallthrough;
fee7ca72
TH
3482 case -EINVAL:
3483 /* give it just one more chance */
3484 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
df561f66 3485 fallthrough;
fee7ca72 3486 case -EIO:
d89293ab 3487 if (ehc->tries[dev->devno] == 1) {
fee7ca72
TH
3488 /* This is the last chance, better to slow
3489 * down than lose it.
3490 */
a07d499b 3491 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
d89293ab
TH
3492 if (dev->pio_mode > XFER_PIO_0)
3493 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
fee7ca72
TH
3494 }
3495 }
3496
3497 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3498 /* disable device if it has used up all its chances */
3499 ata_dev_disable(dev);
3500
3501 /* detach if offline */
b1c72916 3502 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
fee7ca72
TH
3503 ata_eh_detach_dev(dev);
3504
02c05a27 3505 /* schedule probe if necessary */
87fbc5a0 3506 if (ata_eh_schedule_probe(dev)) {
fee7ca72 3507 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
87fbc5a0
TH
3508 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3509 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3510 }
9b1e2658
TH
3511
3512 return 1;
fee7ca72 3513 } else {
cf480626 3514 ehc->i.action |= ATA_EH_RESET;
9b1e2658 3515 return 0;
fee7ca72
TH
3516 }
3517}
3518
022bdb07
TH
3519/**
3520 * ata_eh_recover - recover host port after error
3521 * @ap: host port to recover
f5914a46 3522 * @prereset: prereset method (can be NULL)
022bdb07
TH
3523 * @softreset: softreset method (can be NULL)
3524 * @hardreset: hardreset method (can be NULL)
3525 * @postreset: postreset method (can be NULL)
9b1e2658 3526 * @r_failed_link: out parameter for failed link
022bdb07
TH
3527 *
3528 * This is the alpha and omega, eum and yang, heart and soul of
3529 * libata exception handling. On entry, actions required to
9b1e2658
TH
3530 * recover each link and hotplug requests are recorded in the
3531 * link's eh_context. This function executes all the operations
3532 * with appropriate retrials and fallbacks to resurrect failed
084fe639 3533 * devices, detach goners and greet newcomers.
022bdb07
TH
3534 *
3535 * LOCKING:
3536 * Kernel thread context (may sleep).
3537 *
3538 * RETURNS:
3539 * 0 on success, -errno on failure.
3540 */
fb7fd614
TH
3541int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3542 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3543 ata_postreset_fn_t postreset,
3544 struct ata_link **r_failed_link)
022bdb07 3545{
9b1e2658 3546 struct ata_link *link;
022bdb07 3547 struct ata_device *dev;
6b7ae954 3548 int rc, nr_fails;
45fabbb7 3549 unsigned long flags, deadline;
022bdb07 3550
022bdb07 3551 /* prep for recovery */
1eca4365 3552 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3553 struct ata_eh_context *ehc = &link->eh_context;
084fe639 3554
f9df58cb
TH
3555 /* re-enable link? */
3556 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3557 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3558 spin_lock_irqsave(ap->lock, flags);
3559 link->flags &= ~ATA_LFLAG_DISABLED;
3560 spin_unlock_irqrestore(ap->lock, flags);
3561 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3562 }
3563
1eca4365 3564 ata_for_each_dev(dev, link, ALL) {
fd995f70
TH
3565 if (link->flags & ATA_LFLAG_NO_RETRY)
3566 ehc->tries[dev->devno] = 1;
3567 else
3568 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
084fe639 3569
9b1e2658
TH
3570 /* collect port action mask recorded in dev actions */
3571 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3572 ~ATA_EH_PERDEV_MASK;
3573 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3574
3575 /* process hotplug request */
3576 if (dev->flags & ATA_DFLAG_DETACH)
3577 ata_eh_detach_dev(dev);
3578
02c05a27
TH
3579 /* schedule probe if necessary */
3580 if (!ata_dev_enabled(dev))
3581 ata_eh_schedule_probe(dev);
084fe639 3582 }
022bdb07
TH
3583 }
3584
3585 retry:
022bdb07
TH
3586 rc = 0;
3587
aeb2ecd6 3588 /* if UNLOADING, finish immediately */
b51e9e5d 3589 if (ap->pflags & ATA_PFLAG_UNLOADING)
aeb2ecd6
TH
3590 goto out;
3591
9b1e2658 3592 /* prep for EH */
1eca4365 3593 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3594 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 3595
9b1e2658
TH
3596 /* skip EH if possible. */
3597 if (ata_eh_skip_recovery(link))
3598 ehc->i.action = 0;
3599
1eca4365 3600 ata_for_each_dev(dev, link, ALL)
9b1e2658
TH
3601 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3602 }
084fe639 3603
022bdb07 3604 /* reset */
1eca4365 3605 ata_for_each_link(link, ap, EDGE) {
dc98c32c 3606 struct ata_eh_context *ehc = &link->eh_context;
9b1e2658 3607
dc98c32c
TH
3608 if (!(ehc->i.action & ATA_EH_RESET))
3609 continue;
9b1e2658 3610
dc98c32c
TH
3611 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3612 prereset, softreset, hardreset, postreset);
3613 if (rc) {
a9a79dfe 3614 ata_link_err(link, "reset failed, giving up\n");
dc98c32c 3615 goto out;
022bdb07 3616 }
022bdb07
TH
3617 }
3618
45fabbb7
EO
3619 do {
3620 unsigned long now;
3621
3622 /*
3623 * clears ATA_EH_PARK in eh_info and resets
3624 * ap->park_req_pending
3625 */
3626 ata_eh_pull_park_action(ap);
3627
3628 deadline = jiffies;
1eca4365
TH
3629 ata_for_each_link(link, ap, EDGE) {
3630 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
3631 struct ata_eh_context *ehc = &link->eh_context;
3632 unsigned long tmp;
3633
9162c657
HR
3634 if (dev->class != ATA_DEV_ATA &&
3635 dev->class != ATA_DEV_ZAC)
45fabbb7
EO
3636 continue;
3637 if (!(ehc->i.dev_action[dev->devno] &
3638 ATA_EH_PARK))
3639 continue;
3640 tmp = dev->unpark_deadline;
3641 if (time_before(deadline, tmp))
3642 deadline = tmp;
3643 else if (time_before_eq(tmp, jiffies))
3644 continue;
3645 if (ehc->unloaded_mask & (1 << dev->devno))
3646 continue;
3647
3648 ata_eh_park_issue_cmd(dev, 1);
3649 }
3650 }
3651
3652 now = jiffies;
3653 if (time_before_eq(deadline, now))
3654 break;
3655
c0c362b6 3656 ata_eh_release(ap);
45fabbb7
EO
3657 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3658 deadline - now);
c0c362b6 3659 ata_eh_acquire(ap);
45fabbb7 3660 } while (deadline);
1eca4365
TH
3661 ata_for_each_link(link, ap, EDGE) {
3662 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
3663 if (!(link->eh_context.unloaded_mask &
3664 (1 << dev->devno)))
3665 continue;
3666
3667 ata_eh_park_issue_cmd(dev, 0);
3668 ata_eh_done(link, dev, ATA_EH_PARK);
3669 }
3670 }
3671
9b1e2658 3672 /* the rest */
6b7ae954
TH
3673 nr_fails = 0;
3674 ata_for_each_link(link, ap, PMP_FIRST) {
9b1e2658 3675 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 3676
6b7ae954
TH
3677 if (sata_pmp_attached(ap) && ata_is_host_link(link))
3678 goto config_lpm;
3679
9b1e2658
TH
3680 /* revalidate existing devices and attach new ones */
3681 rc = ata_eh_revalidate_and_attach(link, &dev);
4ae72a1e 3682 if (rc)
6b7ae954 3683 goto rest_fail;
022bdb07 3684
633273a3
TH
3685 /* if PMP got attached, return, pmp EH will take care of it */
3686 if (link->device->class == ATA_DEV_PMP) {
3687 ehc->i.action = 0;
3688 return 0;
3689 }
3690
9b1e2658
TH
3691 /* configure transfer mode if necessary */
3692 if (ehc->i.flags & ATA_EHI_SETMODE) {
3693 rc = ata_set_mode(link, &dev);
3694 if (rc)
6b7ae954 3695 goto rest_fail;
9b1e2658
TH
3696 ehc->i.flags &= ~ATA_EHI_SETMODE;
3697 }
3698
11fc33da
TH
3699 /* If reset has been issued, clear UA to avoid
3700 * disrupting the current users of the device.
3701 */
3702 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1eca4365 3703 ata_for_each_dev(dev, link, ALL) {
11fc33da
TH
3704 if (dev->class != ATA_DEV_ATAPI)
3705 continue;
3706 rc = atapi_eh_clear_ua(dev);
3707 if (rc)
6b7ae954 3708 goto rest_fail;
21334205
AL
3709 if (zpodd_dev_enabled(dev))
3710 zpodd_post_poweron(dev);
11fc33da
TH
3711 }
3712 }
3713
6013efd8
TH
3714 /* retry flush if necessary */
3715 ata_for_each_dev(dev, link, ALL) {
9162c657
HR
3716 if (dev->class != ATA_DEV_ATA &&
3717 dev->class != ATA_DEV_ZAC)
6013efd8
TH
3718 continue;
3719 rc = ata_eh_maybe_retry_flush(dev);
3720 if (rc)
6b7ae954 3721 goto rest_fail;
6013efd8
TH
3722 }
3723
6b7ae954 3724 config_lpm:
11fc33da 3725 /* configure link power saving */
6b7ae954
TH
3726 if (link->lpm_policy != ap->target_lpm_policy) {
3727 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3728 if (rc)
3729 goto rest_fail;
3730 }
ca77329f 3731
9b1e2658
TH
3732 /* this link is okay now */
3733 ehc->i.flags = 0;
3734 continue;
022bdb07 3735
6b7ae954
TH
3736 rest_fail:
3737 nr_fails++;
3738 if (dev)
3739 ata_eh_handle_dev_fail(dev, rc);
022bdb07 3740
b06ce3e5
TH
3741 if (ap->pflags & ATA_PFLAG_FROZEN) {
3742 /* PMP reset requires working host port.
3743 * Can't retry if it's frozen.
3744 */
071f44b1 3745 if (sata_pmp_attached(ap))
b06ce3e5 3746 goto out;
9b1e2658 3747 break;
b06ce3e5 3748 }
022bdb07
TH
3749 }
3750
6b7ae954 3751 if (nr_fails)
9b1e2658 3752 goto retry;
022bdb07 3753
9b1e2658
TH
3754 out:
3755 if (rc && r_failed_link)
3756 *r_failed_link = link;
3757
022bdb07
TH
3758 return rc;
3759}
3760
3761/**
3762 * ata_eh_finish - finish up EH
3763 * @ap: host port to finish EH for
3764 *
3765 * Recovery is complete. Clean up EH states and retry or finish
3766 * failed qcs.
3767 *
3768 * LOCKING:
3769 * None.
3770 */
fb7fd614 3771void ata_eh_finish(struct ata_port *ap)
022bdb07 3772{
258c4e5c 3773 struct ata_queued_cmd *qc;
022bdb07
TH
3774 int tag;
3775
3776 /* retry or finish qcs */
258c4e5c 3777 ata_qc_for_each_raw(ap, qc, tag) {
022bdb07
TH
3778 if (!(qc->flags & ATA_QCFLAG_FAILED))
3779 continue;
3780
3781 if (qc->err_mask) {
3782 /* FIXME: Once EH migration is complete,
3783 * generate sense data in this function,
3784 * considering both err_mask and tf.
3785 */
03faab78 3786 if (qc->flags & ATA_QCFLAG_RETRY)
022bdb07 3787 ata_eh_qc_retry(qc);
03faab78
TH
3788 else
3789 ata_eh_qc_complete(qc);
022bdb07
TH
3790 } else {
3791 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3792 ata_eh_qc_complete(qc);
3793 } else {
3794 /* feed zero TF to sense generation */
3795 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3796 ata_eh_qc_retry(qc);
3797 }
3798 }
3799 }
da917d69
TH
3800
3801 /* make sure nr_active_links is zero after EH */
3802 WARN_ON(ap->nr_active_links);
3803 ap->nr_active_links = 0;
022bdb07
TH
3804}
3805
3806/**
3807 * ata_do_eh - do standard error handling
3808 * @ap: host port to handle error for
a1efdaba 3809 *
f5914a46 3810 * @prereset: prereset method (can be NULL)
022bdb07
TH
3811 * @softreset: softreset method (can be NULL)
3812 * @hardreset: hardreset method (can be NULL)
3813 * @postreset: postreset method (can be NULL)
3814 *
3815 * Perform standard error handling sequence.
3816 *
3817 * LOCKING:
3818 * Kernel thread context (may sleep).
3819 */
f5914a46
TH
3820void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3821 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3822 ata_postreset_fn_t postreset)
022bdb07 3823{
9b1e2658
TH
3824 struct ata_device *dev;
3825 int rc;
3826
3827 ata_eh_autopsy(ap);
3828 ata_eh_report(ap);
3829
3830 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3831 NULL);
3832 if (rc) {
1eca4365 3833 ata_for_each_dev(dev, &ap->link, ALL)
9b1e2658
TH
3834 ata_dev_disable(dev);
3835 }
3836
022bdb07
TH
3837 ata_eh_finish(ap);
3838}
500530f6 3839
a1efdaba
TH
3840/**
3841 * ata_std_error_handler - standard error handler
3842 * @ap: host port to handle error for
3843 *
3844 * Standard error handler
3845 *
3846 * LOCKING:
3847 * Kernel thread context (may sleep).
3848 */
3849void ata_std_error_handler(struct ata_port *ap)
3850{
3851 struct ata_port_operations *ops = ap->ops;
3852 ata_reset_fn_t hardreset = ops->hardreset;
3853
57c9efdf 3854 /* ignore built-in hardreset if SCR access is not available */
fe06e5f9 3855 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
a1efdaba
TH
3856 hardreset = NULL;
3857
3858 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3859}
a52fbcfc 3860EXPORT_SYMBOL_GPL(ata_std_error_handler);
a1efdaba 3861
6ffa01d8 3862#ifdef CONFIG_PM
500530f6
TH
3863/**
3864 * ata_eh_handle_port_suspend - perform port suspend operation
3865 * @ap: port to suspend
3866 *
3867 * Suspend @ap.
3868 *
3869 * LOCKING:
3870 * Kernel thread context (may sleep).
3871 */
3872static void ata_eh_handle_port_suspend(struct ata_port *ap)
3873{
3874 unsigned long flags;
3875 int rc = 0;
3dc67440 3876 struct ata_device *dev;
500530f6
TH
3877
3878 /* are we suspending? */
3879 spin_lock_irqsave(ap->lock, flags);
3880 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
a7ff60db 3881 ap->pm_mesg.event & PM_EVENT_RESUME) {
500530f6
TH
3882 spin_unlock_irqrestore(ap->lock, flags);
3883 return;
3884 }
3885 spin_unlock_irqrestore(ap->lock, flags);
3886
3887 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3888
3dc67440
AL
3889 /*
3890 * If we have a ZPODD attached, check its zero
3891 * power ready status before the port is frozen.
a7ff60db 3892 * Only needed for runtime suspend.
3dc67440 3893 */
a7ff60db
AL
3894 if (PMSG_IS_AUTO(ap->pm_mesg)) {
3895 ata_for_each_dev(dev, &ap->link, ENABLED) {
3896 if (zpodd_dev_enabled(dev))
3897 zpodd_on_suspend(dev);
3898 }
3dc67440
AL
3899 }
3900
500530f6
TH
3901 /* suspend */
3902 ata_eh_freeze_port(ap);
3903
3904 if (ap->ops->port_suspend)
3905 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3906
a7ff60db 3907 ata_acpi_set_state(ap, ap->pm_mesg);
2a7b02ea 3908
bc6e7c4b 3909 /* update the flags */
500530f6
TH
3910 spin_lock_irqsave(ap->lock, flags);
3911
3912 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3913 if (rc == 0)
3914 ap->pflags |= ATA_PFLAG_SUSPENDED;
64578a3d 3915 else if (ap->pflags & ATA_PFLAG_FROZEN)
500530f6
TH
3916 ata_port_schedule_eh(ap);
3917
500530f6
TH
3918 spin_unlock_irqrestore(ap->lock, flags);
3919
3920 return;
3921}
3922
3923/**
3924 * ata_eh_handle_port_resume - perform port resume operation
3925 * @ap: port to resume
3926 *
3927 * Resume @ap.
3928 *
500530f6
TH
3929 * LOCKING:
3930 * Kernel thread context (may sleep).
3931 */
3932static void ata_eh_handle_port_resume(struct ata_port *ap)
3933{
6f9c1ea2
TH
3934 struct ata_link *link;
3935 struct ata_device *dev;
500530f6 3936 unsigned long flags;
500530f6
TH
3937
3938 /* are we resuming? */
3939 spin_lock_irqsave(ap->lock, flags);
3940 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
a7ff60db 3941 !(ap->pm_mesg.event & PM_EVENT_RESUME)) {
500530f6
TH
3942 spin_unlock_irqrestore(ap->lock, flags);
3943 return;
3944 }
3945 spin_unlock_irqrestore(ap->lock, flags);
3946
9666f400 3947 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
500530f6 3948
6f9c1ea2
TH
3949 /*
3950 * Error timestamps are in jiffies which doesn't run while
3951 * suspended and PHY events during resume isn't too uncommon.
3952 * When the two are combined, it can lead to unnecessary speed
3953 * downs if the machine is suspended and resumed repeatedly.
3954 * Clear error history.
3955 */
3956 ata_for_each_link(link, ap, HOST_FIRST)
3957 ata_for_each_dev(dev, link, ALL)
3958 ata_ering_clear(&dev->ering);
3959
a7ff60db 3960 ata_acpi_set_state(ap, ap->pm_mesg);
bd3adca5 3961
500530f6 3962 if (ap->ops->port_resume)
ae867937 3963 ap->ops->port_resume(ap);
500530f6 3964
6746544c
TH
3965 /* tell ACPI that we're resuming */
3966 ata_acpi_on_resume(ap);
3967
bc6e7c4b 3968 /* update the flags */
500530f6
TH
3969 spin_lock_irqsave(ap->lock, flags);
3970 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
500530f6
TH
3971 spin_unlock_irqrestore(ap->lock, flags);
3972}
6ffa01d8 3973#endif /* CONFIG_PM */