libata: implement sata_link_scr_lpm() and make ata_dev_set_feature() global
[linux-block.git] / drivers / ata / libata-eh.c
CommitLineData
ece1d636
TH
1/*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
ece1d636 35#include <linux/kernel.h>
242f9dcb 36#include <linux/blkdev.h>
2855568b 37#include <linux/pci.h>
ece1d636
TH
38#include <scsi/scsi.h>
39#include <scsi/scsi_host.h>
40#include <scsi/scsi_eh.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_cmnd.h>
6521148c 43#include <scsi/scsi_dbg.h>
c6fd2807 44#include "../scsi/scsi_transport_api.h"
ece1d636
TH
45
46#include <linux/libata.h>
47
48#include "libata.h"
49
7d47e8d4 50enum {
3884f7b0 51 /* speed down verdicts */
7d47e8d4
TH
52 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
53 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
54 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
76326ac1 55 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
3884f7b0
TH
56
57 /* error flags */
58 ATA_EFLAG_IS_IO = (1 << 0),
76326ac1 59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
d9027470 60 ATA_EFLAG_OLD_ER = (1 << 31),
3884f7b0
TH
61
62 /* error categories */
63 ATA_ECAT_NONE = 0,
64 ATA_ECAT_ATA_BUS = 1,
65 ATA_ECAT_TOUT_HSM = 2,
66 ATA_ECAT_UNK_DEV = 3,
75f9cafc
TH
67 ATA_ECAT_DUBIOUS_NONE = 4,
68 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
69 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
70 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
71 ATA_ECAT_NR = 8,
7d47e8d4 72
87fbc5a0
TH
73 ATA_EH_CMD_DFL_TIMEOUT = 5000,
74
0a2c0f56
TH
75 /* always put at least this amount of time between resets */
76 ATA_EH_RESET_COOL_DOWN = 5000,
77
341c2c95
TH
78 /* Waiting in ->prereset can never be reliable. It's
79 * sometimes nice to wait there but it can't be depended upon;
80 * otherwise, we wouldn't be resetting. Just give it enough
81 * time for most drives to spin up.
82 */
83 ATA_EH_PRERESET_TIMEOUT = 10000,
84 ATA_EH_FASTDRAIN_INTERVAL = 3000,
11fc33da
TH
85
86 ATA_EH_UA_TRIES = 5,
c2c7a89c
TH
87
88 /* probe speed down parameters, see ata_eh_schedule_probe() */
89 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
90 ATA_EH_PROBE_TRIALS = 2,
31daabda
TH
91};
92
93/* The following table determines how we sequence resets. Each entry
94 * represents timeout for that try. The first try can be soft or
95 * hardreset. All others are hardreset if available. In most cases
96 * the first reset w/ 10sec timeout should succeed. Following entries
97 * are mostly for error handling, hotplug and retarded devices.
98 */
99static const unsigned long ata_eh_reset_timeouts[] = {
341c2c95
TH
100 10000, /* most drives spin up by 10sec */
101 10000, /* > 99% working drives spin up before 20sec */
102 35000, /* give > 30 secs of idleness for retarded devices */
103 5000, /* and sweet one last chance */
d8af0eb6 104 ULONG_MAX, /* > 1 min has elapsed, give up */
31daabda
TH
105};
106
87fbc5a0
TH
107static const unsigned long ata_eh_identify_timeouts[] = {
108 5000, /* covers > 99% of successes and not too boring on failures */
109 10000, /* combined time till here is enough even for media access */
110 30000, /* for true idiots */
111 ULONG_MAX,
112};
113
6013efd8
TH
114static const unsigned long ata_eh_flush_timeouts[] = {
115 15000, /* be generous with flush */
116 15000, /* ditto */
117 30000, /* and even more generous */
118 ULONG_MAX,
119};
120
87fbc5a0
TH
121static const unsigned long ata_eh_other_timeouts[] = {
122 5000, /* same rationale as identify timeout */
123 10000, /* ditto */
124 /* but no merciful 30sec for other commands, it just isn't worth it */
125 ULONG_MAX,
126};
127
128struct ata_eh_cmd_timeout_ent {
129 const u8 *commands;
130 const unsigned long *timeouts;
131};
132
133/* The following table determines timeouts to use for EH internal
134 * commands. Each table entry is a command class and matches the
135 * commands the entry applies to and the timeout table to use.
136 *
137 * On the retry after a command timed out, the next timeout value from
138 * the table is used. If the table doesn't contain further entries,
139 * the last value is used.
140 *
141 * ehc->cmd_timeout_idx keeps track of which timeout to use per
142 * command class, so if SET_FEATURES times out on the first try, the
143 * next try will use the second timeout value only for that class.
144 */
145#define CMDS(cmds...) (const u8 []){ cmds, 0 }
146static const struct ata_eh_cmd_timeout_ent
147ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
148 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
149 .timeouts = ata_eh_identify_timeouts, },
150 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
151 .timeouts = ata_eh_other_timeouts, },
152 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
153 .timeouts = ata_eh_other_timeouts, },
154 { .commands = CMDS(ATA_CMD_SET_FEATURES),
155 .timeouts = ata_eh_other_timeouts, },
156 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
157 .timeouts = ata_eh_other_timeouts, },
6013efd8
TH
158 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
159 .timeouts = ata_eh_flush_timeouts },
87fbc5a0
TH
160};
161#undef CMDS
162
ad9e2762 163static void __ata_port_freeze(struct ata_port *ap);
6ffa01d8 164#ifdef CONFIG_PM
500530f6
TH
165static void ata_eh_handle_port_suspend(struct ata_port *ap);
166static void ata_eh_handle_port_resume(struct ata_port *ap);
6ffa01d8
TH
167#else /* CONFIG_PM */
168static void ata_eh_handle_port_suspend(struct ata_port *ap)
169{ }
170
171static void ata_eh_handle_port_resume(struct ata_port *ap)
172{ }
6ffa01d8 173#endif /* CONFIG_PM */
ad9e2762 174
b64bbc39
TH
175static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
176 va_list args)
177{
178 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
179 ATA_EH_DESC_LEN - ehi->desc_len,
180 fmt, args);
181}
182
183/**
184 * __ata_ehi_push_desc - push error description without adding separator
185 * @ehi: target EHI
186 * @fmt: printf format string
187 *
188 * Format string according to @fmt and append it to @ehi->desc.
189 *
190 * LOCKING:
191 * spin_lock_irqsave(host lock)
192 */
193void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
194{
195 va_list args;
196
197 va_start(args, fmt);
198 __ata_ehi_pushv_desc(ehi, fmt, args);
199 va_end(args);
200}
201
202/**
203 * ata_ehi_push_desc - push error description with separator
204 * @ehi: target EHI
205 * @fmt: printf format string
206 *
207 * Format string according to @fmt and append it to @ehi->desc.
208 * If @ehi->desc is not empty, ", " is added in-between.
209 *
210 * LOCKING:
211 * spin_lock_irqsave(host lock)
212 */
213void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
214{
215 va_list args;
216
217 if (ehi->desc_len)
218 __ata_ehi_push_desc(ehi, ", ");
219
220 va_start(args, fmt);
221 __ata_ehi_pushv_desc(ehi, fmt, args);
222 va_end(args);
223}
224
225/**
226 * ata_ehi_clear_desc - clean error description
227 * @ehi: target EHI
228 *
229 * Clear @ehi->desc.
230 *
231 * LOCKING:
232 * spin_lock_irqsave(host lock)
233 */
234void ata_ehi_clear_desc(struct ata_eh_info *ehi)
235{
236 ehi->desc[0] = '\0';
237 ehi->desc_len = 0;
238}
239
cbcdd875
TH
240/**
241 * ata_port_desc - append port description
242 * @ap: target ATA port
243 * @fmt: printf format string
244 *
245 * Format string according to @fmt and append it to port
246 * description. If port description is not empty, " " is added
247 * in-between. This function is to be used while initializing
248 * ata_host. The description is printed on host registration.
249 *
250 * LOCKING:
251 * None.
252 */
253void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
254{
255 va_list args;
256
257 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
258
259 if (ap->link.eh_info.desc_len)
260 __ata_ehi_push_desc(&ap->link.eh_info, " ");
261
262 va_start(args, fmt);
263 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
264 va_end(args);
265}
266
267#ifdef CONFIG_PCI
268
269/**
270 * ata_port_pbar_desc - append PCI BAR description
271 * @ap: target ATA port
272 * @bar: target PCI BAR
273 * @offset: offset into PCI BAR
274 * @name: name of the area
275 *
276 * If @offset is negative, this function formats a string which
277 * contains the name, address, size and type of the BAR and
278 * appends it to the port description. If @offset is zero or
279 * positive, only name and offsetted address is appended.
280 *
281 * LOCKING:
282 * None.
283 */
284void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
285 const char *name)
286{
287 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
288 char *type = "";
289 unsigned long long start, len;
290
291 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
292 type = "m";
293 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
294 type = "i";
295
296 start = (unsigned long long)pci_resource_start(pdev, bar);
297 len = (unsigned long long)pci_resource_len(pdev, bar);
298
299 if (offset < 0)
300 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
301 else
e6a73ab1
AM
302 ata_port_desc(ap, "%s 0x%llx", name,
303 start + (unsigned long long)offset);
cbcdd875
TH
304}
305
306#endif /* CONFIG_PCI */
307
87fbc5a0
TH
308static int ata_lookup_timeout_table(u8 cmd)
309{
310 int i;
311
312 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
313 const u8 *cur;
314
315 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
316 if (*cur == cmd)
317 return i;
318 }
319
320 return -1;
321}
322
323/**
324 * ata_internal_cmd_timeout - determine timeout for an internal command
325 * @dev: target device
326 * @cmd: internal command to be issued
327 *
328 * Determine timeout for internal command @cmd for @dev.
329 *
330 * LOCKING:
331 * EH context.
332 *
333 * RETURNS:
334 * Determined timeout.
335 */
336unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
337{
338 struct ata_eh_context *ehc = &dev->link->eh_context;
339 int ent = ata_lookup_timeout_table(cmd);
340 int idx;
341
342 if (ent < 0)
343 return ATA_EH_CMD_DFL_TIMEOUT;
344
345 idx = ehc->cmd_timeout_idx[dev->devno][ent];
346 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
347}
348
349/**
350 * ata_internal_cmd_timed_out - notification for internal command timeout
351 * @dev: target device
352 * @cmd: internal command which timed out
353 *
354 * Notify EH that internal command @cmd for @dev timed out. This
355 * function should be called only for commands whose timeouts are
356 * determined using ata_internal_cmd_timeout().
357 *
358 * LOCKING:
359 * EH context.
360 */
361void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
362{
363 struct ata_eh_context *ehc = &dev->link->eh_context;
364 int ent = ata_lookup_timeout_table(cmd);
365 int idx;
366
367 if (ent < 0)
368 return;
369
370 idx = ehc->cmd_timeout_idx[dev->devno][ent];
371 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
372 ehc->cmd_timeout_idx[dev->devno][ent]++;
373}
374
3884f7b0 375static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
0c247c55
TH
376 unsigned int err_mask)
377{
378 struct ata_ering_entry *ent;
379
380 WARN_ON(!err_mask);
381
382 ering->cursor++;
383 ering->cursor %= ATA_ERING_SIZE;
384
385 ent = &ering->ring[ering->cursor];
3884f7b0 386 ent->eflags = eflags;
0c247c55
TH
387 ent->err_mask = err_mask;
388 ent->timestamp = get_jiffies_64();
389}
390
76326ac1
TH
391static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
392{
393 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
394
395 if (ent->err_mask)
396 return ent;
397 return NULL;
398}
399
d9027470
GG
400int ata_ering_map(struct ata_ering *ering,
401 int (*map_fn)(struct ata_ering_entry *, void *),
402 void *arg)
0c247c55
TH
403{
404 int idx, rc = 0;
405 struct ata_ering_entry *ent;
406
407 idx = ering->cursor;
408 do {
409 ent = &ering->ring[idx];
410 if (!ent->err_mask)
411 break;
412 rc = map_fn(ent, arg);
413 if (rc)
414 break;
415 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
416 } while (idx != ering->cursor);
417
418 return rc;
419}
420
d9027470
GG
421int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
422{
423 ent->eflags |= ATA_EFLAG_OLD_ER;
424 return 0;
425}
426
427static void ata_ering_clear(struct ata_ering *ering)
428{
429 ata_ering_map(ering, ata_ering_clear_cb, NULL);
430}
431
64f65ca6
TH
432static unsigned int ata_eh_dev_action(struct ata_device *dev)
433{
9af5c9c9 434 struct ata_eh_context *ehc = &dev->link->eh_context;
64f65ca6
TH
435
436 return ehc->i.action | ehc->i.dev_action[dev->devno];
437}
438
f58229f8 439static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
af181c2d
TH
440 struct ata_eh_info *ehi, unsigned int action)
441{
f58229f8 442 struct ata_device *tdev;
af181c2d
TH
443
444 if (!dev) {
445 ehi->action &= ~action;
1eca4365 446 ata_for_each_dev(tdev, link, ALL)
f58229f8 447 ehi->dev_action[tdev->devno] &= ~action;
af181c2d
TH
448 } else {
449 /* doesn't make sense for port-wide EH actions */
450 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
451
452 /* break ehi->action into ehi->dev_action */
453 if (ehi->action & action) {
1eca4365 454 ata_for_each_dev(tdev, link, ALL)
f58229f8
TH
455 ehi->dev_action[tdev->devno] |=
456 ehi->action & action;
af181c2d
TH
457 ehi->action &= ~action;
458 }
459
460 /* turn off the specified per-dev action */
461 ehi->dev_action[dev->devno] &= ~action;
462 }
463}
464
ece1d636
TH
465/**
466 * ata_scsi_timed_out - SCSI layer time out callback
467 * @cmd: timed out SCSI command
468 *
469 * Handles SCSI layer timeout. We race with normal completion of
470 * the qc for @cmd. If the qc is already gone, we lose and let
471 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
472 * timed out and EH should be invoked. Prevent ata_qc_complete()
473 * from finishing it by setting EH_SCHEDULED and return
474 * EH_NOT_HANDLED.
475 *
ad9e2762
TH
476 * TODO: kill this function once old EH is gone.
477 *
ece1d636
TH
478 * LOCKING:
479 * Called from timer context
480 *
481 * RETURNS:
482 * EH_HANDLED or EH_NOT_HANDLED
483 */
242f9dcb 484enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
ece1d636
TH
485{
486 struct Scsi_Host *host = cmd->device->host;
35bb94b1 487 struct ata_port *ap = ata_shost_to_port(host);
ece1d636
TH
488 unsigned long flags;
489 struct ata_queued_cmd *qc;
242f9dcb 490 enum blk_eh_timer_return ret;
ece1d636
TH
491
492 DPRINTK("ENTER\n");
493
ad9e2762 494 if (ap->ops->error_handler) {
242f9dcb 495 ret = BLK_EH_NOT_HANDLED;
ad9e2762
TH
496 goto out;
497 }
498
242f9dcb 499 ret = BLK_EH_HANDLED;
ba6a1308 500 spin_lock_irqsave(ap->lock, flags);
9af5c9c9 501 qc = ata_qc_from_tag(ap, ap->link.active_tag);
ece1d636
TH
502 if (qc) {
503 WARN_ON(qc->scsicmd != cmd);
504 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
505 qc->err_mask |= AC_ERR_TIMEOUT;
242f9dcb 506 ret = BLK_EH_NOT_HANDLED;
ece1d636 507 }
ba6a1308 508 spin_unlock_irqrestore(ap->lock, flags);
ece1d636 509
ad9e2762 510 out:
ece1d636
TH
511 DPRINTK("EXIT, ret=%d\n", ret);
512 return ret;
513}
514
ece180d1
TH
515static void ata_eh_unload(struct ata_port *ap)
516{
517 struct ata_link *link;
518 struct ata_device *dev;
519 unsigned long flags;
520
521 /* Restore SControl IPM and SPD for the next driver and
522 * disable attached devices.
523 */
524 ata_for_each_link(link, ap, PMP_FIRST) {
525 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
526 ata_for_each_dev(dev, link, ALL)
527 ata_dev_disable(dev);
528 }
529
530 /* freeze and set UNLOADED */
531 spin_lock_irqsave(ap->lock, flags);
532
533 ata_port_freeze(ap); /* won't be thawed */
534 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
535 ap->pflags |= ATA_PFLAG_UNLOADED;
536
537 spin_unlock_irqrestore(ap->lock, flags);
538}
539
ece1d636
TH
540/**
541 * ata_scsi_error - SCSI layer error handler callback
542 * @host: SCSI host on which error occurred
543 *
544 * Handles SCSI-layer-thrown error events.
545 *
546 * LOCKING:
547 * Inherited from SCSI layer (none, can sleep)
548 *
549 * RETURNS:
550 * Zero.
551 */
381544bb 552void ata_scsi_error(struct Scsi_Host *host)
ece1d636 553{
35bb94b1 554 struct ata_port *ap = ata_shost_to_port(host);
a1e10f7e 555 int i;
ad9e2762 556 unsigned long flags;
ece1d636
TH
557
558 DPRINTK("ENTER\n");
559
c429137a
TH
560 /* make sure sff pio task is not running */
561 ata_sff_flush_pio_task(ap);
ece1d636 562
cca3974e 563 /* synchronize with host lock and sort out timeouts */
ad9e2762
TH
564
565 /* For new EH, all qcs are finished in one of three ways -
566 * normal completion, error completion, and SCSI timeout.
c96f1732 567 * Both completions can race against SCSI timeout. When normal
ad9e2762
TH
568 * completion wins, the qc never reaches EH. When error
569 * completion wins, the qc has ATA_QCFLAG_FAILED set.
570 *
571 * When SCSI timeout wins, things are a bit more complex.
572 * Normal or error completion can occur after the timeout but
573 * before this point. In such cases, both types of
574 * completions are honored. A scmd is determined to have
575 * timed out iff its associated qc is active and not failed.
576 */
577 if (ap->ops->error_handler) {
578 struct scsi_cmnd *scmd, *tmp;
579 int nr_timedout = 0;
580
e30349d2 581 spin_lock_irqsave(ap->lock, flags);
d9027470 582
c96f1732
AC
583 /* This must occur under the ap->lock as we don't want
584 a polled recovery to race the real interrupt handler
d9027470 585
c96f1732
AC
586 The lost_interrupt handler checks for any completed but
587 non-notified command and completes much like an IRQ handler.
d9027470 588
c96f1732
AC
589 We then fall into the error recovery code which will treat
590 this as if normal completion won the race */
591
592 if (ap->ops->lost_interrupt)
593 ap->ops->lost_interrupt(ap);
d9027470 594
ad9e2762
TH
595 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
596 struct ata_queued_cmd *qc;
597
598 for (i = 0; i < ATA_MAX_QUEUE; i++) {
599 qc = __ata_qc_from_tag(ap, i);
600 if (qc->flags & ATA_QCFLAG_ACTIVE &&
601 qc->scsicmd == scmd)
602 break;
603 }
604
605 if (i < ATA_MAX_QUEUE) {
606 /* the scmd has an associated qc */
607 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
608 /* which hasn't failed yet, timeout */
609 qc->err_mask |= AC_ERR_TIMEOUT;
610 qc->flags |= ATA_QCFLAG_FAILED;
611 nr_timedout++;
612 }
613 } else {
614 /* Normal completion occurred after
615 * SCSI timeout but before this point.
616 * Successfully complete it.
617 */
618 scmd->retries = scmd->allowed;
619 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
620 }
621 }
622
623 /* If we have timed out qcs. They belong to EH from
624 * this point but the state of the controller is
625 * unknown. Freeze the port to make sure the IRQ
626 * handler doesn't diddle with those qcs. This must
627 * be done atomically w.r.t. setting QCFLAG_FAILED.
628 */
629 if (nr_timedout)
630 __ata_port_freeze(ap);
631
e30349d2 632 spin_unlock_irqrestore(ap->lock, flags);
a1e10f7e
TH
633
634 /* initialize eh_tries */
635 ap->eh_tries = ATA_EH_MAX_TRIES;
ad9e2762 636 } else
e30349d2 637 spin_unlock_wait(ap->lock);
d9027470 638
c96f1732
AC
639 /* If we timed raced normal completion and there is nothing to
640 recover nr_timedout == 0 why exactly are we doing error recovery ? */
ad9e2762
TH
641
642 repeat:
643 /* invoke error handler */
644 if (ap->ops->error_handler) {
cf1b86c8
TH
645 struct ata_link *link;
646
5ddf24c5
TH
647 /* kill fast drain timer */
648 del_timer_sync(&ap->fastdrain_timer);
649
500530f6
TH
650 /* process port resume request */
651 ata_eh_handle_port_resume(ap);
652
f3e81b19 653 /* fetch & clear EH info */
e30349d2 654 spin_lock_irqsave(ap->lock, flags);
f3e81b19 655
1eca4365 656 ata_for_each_link(link, ap, HOST_FIRST) {
00115e0f
TH
657 struct ata_eh_context *ehc = &link->eh_context;
658 struct ata_device *dev;
659
cf1b86c8
TH
660 memset(&link->eh_context, 0, sizeof(link->eh_context));
661 link->eh_context.i = link->eh_info;
662 memset(&link->eh_info, 0, sizeof(link->eh_info));
00115e0f 663
1eca4365 664 ata_for_each_dev(dev, link, ENABLED) {
00115e0f
TH
665 int devno = dev->devno;
666
667 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
668 if (ata_ncq_enabled(dev))
669 ehc->saved_ncq_enabled |= 1 << devno;
670 }
cf1b86c8 671 }
f3e81b19 672
b51e9e5d
TH
673 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
674 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
da917d69 675 ap->excl_link = NULL; /* don't maintain exclusion over EH */
f3e81b19 676
e30349d2 677 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762 678
500530f6
TH
679 /* invoke EH, skip if unloading or suspended */
680 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
720ba126 681 ap->ops->error_handler(ap);
ece180d1
TH
682 else {
683 /* if unloading, commence suicide */
684 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
685 !(ap->pflags & ATA_PFLAG_UNLOADED))
686 ata_eh_unload(ap);
720ba126 687 ata_eh_finish(ap);
ece180d1 688 }
ad9e2762 689
500530f6
TH
690 /* process port suspend request */
691 ata_eh_handle_port_suspend(ap);
692
ad9e2762
TH
693 /* Exception might have happend after ->error_handler
694 * recovered the port but before this point. Repeat
695 * EH in such case.
696 */
e30349d2 697 spin_lock_irqsave(ap->lock, flags);
ad9e2762 698
b51e9e5d 699 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
a1e10f7e 700 if (--ap->eh_tries) {
e30349d2 701 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762
TH
702 goto repeat;
703 }
704 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
a1e10f7e 705 "tries, giving up\n", ATA_EH_MAX_TRIES);
914616a3 706 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
ad9e2762
TH
707 }
708
f3e81b19 709 /* this run is complete, make sure EH info is clear */
1eca4365 710 ata_for_each_link(link, ap, HOST_FIRST)
cf1b86c8 711 memset(&link->eh_info, 0, sizeof(link->eh_info));
f3e81b19 712
e30349d2 713 /* Clear host_eh_scheduled while holding ap->lock such
ad9e2762
TH
714 * that if exception occurs after this point but
715 * before EH completion, SCSI midlayer will
716 * re-initiate EH.
717 */
718 host->host_eh_scheduled = 0;
719
e30349d2 720 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762 721 } else {
9af5c9c9 722 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
ad9e2762
TH
723 ap->ops->eng_timeout(ap);
724 }
ece1d636 725
ad9e2762 726 /* finish or retry handled scmd's and clean up */
ece1d636
TH
727 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
728
729 scsi_eh_flush_done_q(&ap->eh_done_q);
730
ad9e2762 731 /* clean up */
e30349d2 732 spin_lock_irqsave(ap->lock, flags);
ad9e2762 733
1cdaf534 734 if (ap->pflags & ATA_PFLAG_LOADING)
b51e9e5d 735 ap->pflags &= ~ATA_PFLAG_LOADING;
1cdaf534 736 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
ad72cf98 737 schedule_delayed_work(&ap->hotplug_task, 0);
1cdaf534
TH
738
739 if (ap->pflags & ATA_PFLAG_RECOVERED)
740 ata_port_printk(ap, KERN_INFO, "EH complete\n");
580b2102 741
b51e9e5d 742 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
ad9e2762 743
c6cf9e99 744 /* tell wait_eh that we're done */
b51e9e5d 745 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
c6cf9e99
TH
746 wake_up_all(&ap->eh_wait_q);
747
e30349d2 748 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762 749
ece1d636 750 DPRINTK("EXIT\n");
ece1d636
TH
751}
752
c6cf9e99
TH
753/**
754 * ata_port_wait_eh - Wait for the currently pending EH to complete
755 * @ap: Port to wait EH for
756 *
757 * Wait until the currently pending EH is complete.
758 *
759 * LOCKING:
760 * Kernel thread context (may sleep).
761 */
762void ata_port_wait_eh(struct ata_port *ap)
763{
764 unsigned long flags;
765 DEFINE_WAIT(wait);
766
767 retry:
ba6a1308 768 spin_lock_irqsave(ap->lock, flags);
c6cf9e99 769
b51e9e5d 770 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
c6cf9e99 771 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
ba6a1308 772 spin_unlock_irqrestore(ap->lock, flags);
c6cf9e99 773 schedule();
ba6a1308 774 spin_lock_irqsave(ap->lock, flags);
c6cf9e99 775 }
0a1b622e 776 finish_wait(&ap->eh_wait_q, &wait);
c6cf9e99 777
ba6a1308 778 spin_unlock_irqrestore(ap->lock, flags);
c6cf9e99
TH
779
780 /* make sure SCSI EH is complete */
cca3974e 781 if (scsi_host_in_recovery(ap->scsi_host)) {
c6cf9e99
TH
782 msleep(10);
783 goto retry;
784 }
785}
786
5ddf24c5
TH
787static int ata_eh_nr_in_flight(struct ata_port *ap)
788{
789 unsigned int tag;
790 int nr = 0;
791
792 /* count only non-internal commands */
793 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
794 if (ata_qc_from_tag(ap, tag))
795 nr++;
796
797 return nr;
798}
799
800void ata_eh_fastdrain_timerfn(unsigned long arg)
801{
802 struct ata_port *ap = (void *)arg;
803 unsigned long flags;
804 int cnt;
805
806 spin_lock_irqsave(ap->lock, flags);
807
808 cnt = ata_eh_nr_in_flight(ap);
809
810 /* are we done? */
811 if (!cnt)
812 goto out_unlock;
813
814 if (cnt == ap->fastdrain_cnt) {
815 unsigned int tag;
816
817 /* No progress during the last interval, tag all
818 * in-flight qcs as timed out and freeze the port.
819 */
820 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
821 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
822 if (qc)
823 qc->err_mask |= AC_ERR_TIMEOUT;
824 }
825
826 ata_port_freeze(ap);
827 } else {
828 /* some qcs have finished, give it another chance */
829 ap->fastdrain_cnt = cnt;
830 ap->fastdrain_timer.expires =
341c2c95 831 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
5ddf24c5
TH
832 add_timer(&ap->fastdrain_timer);
833 }
834
835 out_unlock:
836 spin_unlock_irqrestore(ap->lock, flags);
837}
838
839/**
840 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
841 * @ap: target ATA port
842 * @fastdrain: activate fast drain
843 *
844 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
845 * is non-zero and EH wasn't pending before. Fast drain ensures
846 * that EH kicks in in timely manner.
847 *
848 * LOCKING:
849 * spin_lock_irqsave(host lock)
850 */
851static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
852{
853 int cnt;
854
855 /* already scheduled? */
856 if (ap->pflags & ATA_PFLAG_EH_PENDING)
857 return;
858
859 ap->pflags |= ATA_PFLAG_EH_PENDING;
860
861 if (!fastdrain)
862 return;
863
864 /* do we have in-flight qcs? */
865 cnt = ata_eh_nr_in_flight(ap);
866 if (!cnt)
867 return;
868
869 /* activate fast drain */
870 ap->fastdrain_cnt = cnt;
341c2c95
TH
871 ap->fastdrain_timer.expires =
872 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
5ddf24c5
TH
873 add_timer(&ap->fastdrain_timer);
874}
875
f686bcb8
TH
876/**
877 * ata_qc_schedule_eh - schedule qc for error handling
878 * @qc: command to schedule error handling for
879 *
880 * Schedule error handling for @qc. EH will kick in as soon as
881 * other commands are drained.
882 *
883 * LOCKING:
cca3974e 884 * spin_lock_irqsave(host lock)
f686bcb8
TH
885 */
886void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
887{
888 struct ata_port *ap = qc->ap;
fa41efda
TH
889 struct request_queue *q = qc->scsicmd->device->request_queue;
890 unsigned long flags;
f686bcb8
TH
891
892 WARN_ON(!ap->ops->error_handler);
893
894 qc->flags |= ATA_QCFLAG_FAILED;
5ddf24c5 895 ata_eh_set_pending(ap, 1);
f686bcb8
TH
896
897 /* The following will fail if timeout has already expired.
898 * ata_scsi_error() takes care of such scmds on EH entry.
899 * Note that ATA_QCFLAG_FAILED is unconditionally set after
900 * this function completes.
901 */
fa41efda 902 spin_lock_irqsave(q->queue_lock, flags);
242f9dcb 903 blk_abort_request(qc->scsicmd->request);
fa41efda 904 spin_unlock_irqrestore(q->queue_lock, flags);
f686bcb8
TH
905}
906
7b70fc03
TH
907/**
908 * ata_port_schedule_eh - schedule error handling without a qc
909 * @ap: ATA port to schedule EH for
910 *
911 * Schedule error handling for @ap. EH will kick in as soon as
912 * all commands are drained.
913 *
914 * LOCKING:
cca3974e 915 * spin_lock_irqsave(host lock)
7b70fc03
TH
916 */
917void ata_port_schedule_eh(struct ata_port *ap)
918{
919 WARN_ON(!ap->ops->error_handler);
920
f4d6d004
TH
921 if (ap->pflags & ATA_PFLAG_INITIALIZING)
922 return;
923
5ddf24c5 924 ata_eh_set_pending(ap, 1);
cca3974e 925 scsi_schedule_eh(ap->scsi_host);
7b70fc03
TH
926
927 DPRINTK("port EH scheduled\n");
928}
929
dbd82616 930static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
7b70fc03
TH
931{
932 int tag, nr_aborted = 0;
933
934 WARN_ON(!ap->ops->error_handler);
935
5ddf24c5
TH
936 /* we're gonna abort all commands, no need for fast drain */
937 ata_eh_set_pending(ap, 0);
938
7b70fc03
TH
939 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
940 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
941
dbd82616 942 if (qc && (!link || qc->dev->link == link)) {
7b70fc03
TH
943 qc->flags |= ATA_QCFLAG_FAILED;
944 ata_qc_complete(qc);
945 nr_aborted++;
946 }
947 }
948
949 if (!nr_aborted)
950 ata_port_schedule_eh(ap);
951
952 return nr_aborted;
953}
954
dbd82616
TH
955/**
956 * ata_link_abort - abort all qc's on the link
957 * @link: ATA link to abort qc's for
958 *
959 * Abort all active qc's active on @link and schedule EH.
960 *
961 * LOCKING:
962 * spin_lock_irqsave(host lock)
963 *
964 * RETURNS:
965 * Number of aborted qc's.
966 */
967int ata_link_abort(struct ata_link *link)
968{
969 return ata_do_link_abort(link->ap, link);
970}
971
972/**
973 * ata_port_abort - abort all qc's on the port
974 * @ap: ATA port to abort qc's for
975 *
976 * Abort all active qc's of @ap and schedule EH.
977 *
978 * LOCKING:
979 * spin_lock_irqsave(host_set lock)
980 *
981 * RETURNS:
982 * Number of aborted qc's.
983 */
984int ata_port_abort(struct ata_port *ap)
985{
986 return ata_do_link_abort(ap, NULL);
987}
988
e3180499
TH
989/**
990 * __ata_port_freeze - freeze port
991 * @ap: ATA port to freeze
992 *
993 * This function is called when HSM violation or some other
994 * condition disrupts normal operation of the port. Frozen port
995 * is not allowed to perform any operation until the port is
996 * thawed, which usually follows a successful reset.
997 *
998 * ap->ops->freeze() callback can be used for freezing the port
999 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1000 * port cannot be frozen hardware-wise, the interrupt handler
1001 * must ack and clear interrupts unconditionally while the port
1002 * is frozen.
1003 *
1004 * LOCKING:
cca3974e 1005 * spin_lock_irqsave(host lock)
e3180499
TH
1006 */
1007static void __ata_port_freeze(struct ata_port *ap)
1008{
1009 WARN_ON(!ap->ops->error_handler);
1010
1011 if (ap->ops->freeze)
1012 ap->ops->freeze(ap);
1013
b51e9e5d 1014 ap->pflags |= ATA_PFLAG_FROZEN;
e3180499 1015
44877b4e 1016 DPRINTK("ata%u port frozen\n", ap->print_id);
e3180499
TH
1017}
1018
1019/**
1020 * ata_port_freeze - abort & freeze port
1021 * @ap: ATA port to freeze
1022 *
54c38444
JG
1023 * Abort and freeze @ap. The freeze operation must be called
1024 * first, because some hardware requires special operations
1025 * before the taskfile registers are accessible.
e3180499
TH
1026 *
1027 * LOCKING:
cca3974e 1028 * spin_lock_irqsave(host lock)
e3180499
TH
1029 *
1030 * RETURNS:
1031 * Number of aborted commands.
1032 */
1033int ata_port_freeze(struct ata_port *ap)
1034{
1035 int nr_aborted;
1036
1037 WARN_ON(!ap->ops->error_handler);
1038
e3180499 1039 __ata_port_freeze(ap);
54c38444 1040 nr_aborted = ata_port_abort(ap);
e3180499
TH
1041
1042 return nr_aborted;
1043}
1044
7d77b247
TH
1045/**
1046 * sata_async_notification - SATA async notification handler
1047 * @ap: ATA port where async notification is received
1048 *
1049 * Handler to be called when async notification via SDB FIS is
1050 * received. This function schedules EH if necessary.
1051 *
1052 * LOCKING:
1053 * spin_lock_irqsave(host lock)
1054 *
1055 * RETURNS:
1056 * 1 if EH is scheduled, 0 otherwise.
1057 */
1058int sata_async_notification(struct ata_port *ap)
1059{
1060 u32 sntf;
1061 int rc;
1062
1063 if (!(ap->flags & ATA_FLAG_AN))
1064 return 0;
1065
1066 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1067 if (rc == 0)
1068 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1069
071f44b1 1070 if (!sata_pmp_attached(ap) || rc) {
7d77b247 1071 /* PMP is not attached or SNTF is not available */
071f44b1 1072 if (!sata_pmp_attached(ap)) {
7d77b247
TH
1073 /* PMP is not attached. Check whether ATAPI
1074 * AN is configured. If so, notify media
1075 * change.
1076 */
1077 struct ata_device *dev = ap->link.device;
1078
1079 if ((dev->class == ATA_DEV_ATAPI) &&
1080 (dev->flags & ATA_DFLAG_AN))
1081 ata_scsi_media_change_notify(dev);
1082 return 0;
1083 } else {
1084 /* PMP is attached but SNTF is not available.
1085 * ATAPI async media change notification is
1086 * not used. The PMP must be reporting PHY
1087 * status change, schedule EH.
1088 */
1089 ata_port_schedule_eh(ap);
1090 return 1;
1091 }
1092 } else {
1093 /* PMP is attached and SNTF is available */
1094 struct ata_link *link;
1095
1096 /* check and notify ATAPI AN */
1eca4365 1097 ata_for_each_link(link, ap, EDGE) {
7d77b247
TH
1098 if (!(sntf & (1 << link->pmp)))
1099 continue;
1100
1101 if ((link->device->class == ATA_DEV_ATAPI) &&
1102 (link->device->flags & ATA_DFLAG_AN))
1103 ata_scsi_media_change_notify(link->device);
1104 }
1105
1106 /* If PMP is reporting that PHY status of some
1107 * downstream ports has changed, schedule EH.
1108 */
1109 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1110 ata_port_schedule_eh(ap);
1111 return 1;
1112 }
1113
1114 return 0;
1115 }
1116}
1117
e3180499
TH
1118/**
1119 * ata_eh_freeze_port - EH helper to freeze port
1120 * @ap: ATA port to freeze
1121 *
1122 * Freeze @ap.
1123 *
1124 * LOCKING:
1125 * None.
1126 */
1127void ata_eh_freeze_port(struct ata_port *ap)
1128{
1129 unsigned long flags;
1130
1131 if (!ap->ops->error_handler)
1132 return;
1133
ba6a1308 1134 spin_lock_irqsave(ap->lock, flags);
e3180499 1135 __ata_port_freeze(ap);
ba6a1308 1136 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1137}
1138
1139/**
1140 * ata_port_thaw_port - EH helper to thaw port
1141 * @ap: ATA port to thaw
1142 *
1143 * Thaw frozen port @ap.
1144 *
1145 * LOCKING:
1146 * None.
1147 */
1148void ata_eh_thaw_port(struct ata_port *ap)
1149{
1150 unsigned long flags;
1151
1152 if (!ap->ops->error_handler)
1153 return;
1154
ba6a1308 1155 spin_lock_irqsave(ap->lock, flags);
e3180499 1156
b51e9e5d 1157 ap->pflags &= ~ATA_PFLAG_FROZEN;
e3180499
TH
1158
1159 if (ap->ops->thaw)
1160 ap->ops->thaw(ap);
1161
ba6a1308 1162 spin_unlock_irqrestore(ap->lock, flags);
e3180499 1163
44877b4e 1164 DPRINTK("ata%u port thawed\n", ap->print_id);
e3180499
TH
1165}
1166
ece1d636
TH
1167static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1168{
1169 /* nada */
1170}
1171
1172static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1173{
1174 struct ata_port *ap = qc->ap;
1175 struct scsi_cmnd *scmd = qc->scsicmd;
1176 unsigned long flags;
1177
ba6a1308 1178 spin_lock_irqsave(ap->lock, flags);
ece1d636
TH
1179 qc->scsidone = ata_eh_scsidone;
1180 __ata_qc_complete(qc);
1181 WARN_ON(ata_tag_valid(qc->tag));
ba6a1308 1182 spin_unlock_irqrestore(ap->lock, flags);
ece1d636
TH
1183
1184 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1185}
1186
1187/**
1188 * ata_eh_qc_complete - Complete an active ATA command from EH
1189 * @qc: Command to complete
1190 *
1191 * Indicate to the mid and upper layers that an ATA command has
1192 * completed. To be used from EH.
1193 */
1194void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1195{
1196 struct scsi_cmnd *scmd = qc->scsicmd;
1197 scmd->retries = scmd->allowed;
1198 __ata_eh_qc_complete(qc);
1199}
1200
1201/**
1202 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1203 * @qc: Command to retry
1204 *
1205 * Indicate to the mid and upper layers that an ATA command
1206 * should be retried. To be used from EH.
1207 *
1208 * SCSI midlayer limits the number of retries to scmd->allowed.
1209 * scmd->retries is decremented for commands which get retried
1210 * due to unrelated failures (qc->err_mask is zero).
1211 */
1212void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1213{
1214 struct scsi_cmnd *scmd = qc->scsicmd;
1215 if (!qc->err_mask && scmd->retries)
1216 scmd->retries--;
1217 __ata_eh_qc_complete(qc);
1218}
022bdb07 1219
678afac6
TH
1220/**
1221 * ata_dev_disable - disable ATA device
1222 * @dev: ATA device to disable
1223 *
1224 * Disable @dev.
1225 *
1226 * Locking:
1227 * EH context.
1228 */
1229void ata_dev_disable(struct ata_device *dev)
1230{
1231 if (!ata_dev_enabled(dev))
1232 return;
1233
1234 if (ata_msg_drv(dev->link->ap))
1235 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1236 ata_acpi_on_disable(dev);
1237 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1238 dev->class++;
99cf610a
TH
1239
1240 /* From now till the next successful probe, ering is used to
1241 * track probe failures. Clear accumulated device error info.
1242 */
1243 ata_ering_clear(&dev->ering);
678afac6
TH
1244}
1245
0ea035a3
TH
1246/**
1247 * ata_eh_detach_dev - detach ATA device
1248 * @dev: ATA device to detach
1249 *
1250 * Detach @dev.
1251 *
1252 * LOCKING:
1253 * None.
1254 */
fb7fd614 1255void ata_eh_detach_dev(struct ata_device *dev)
0ea035a3 1256{
f58229f8
TH
1257 struct ata_link *link = dev->link;
1258 struct ata_port *ap = link->ap;
90484ebf 1259 struct ata_eh_context *ehc = &link->eh_context;
0ea035a3
TH
1260 unsigned long flags;
1261
1262 ata_dev_disable(dev);
1263
ba6a1308 1264 spin_lock_irqsave(ap->lock, flags);
0ea035a3
TH
1265
1266 dev->flags &= ~ATA_DFLAG_DETACH;
1267
1268 if (ata_scsi_offline_dev(dev)) {
1269 dev->flags |= ATA_DFLAG_DETACHED;
b51e9e5d 1270 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
0ea035a3
TH
1271 }
1272
90484ebf 1273 /* clear per-dev EH info */
f58229f8
TH
1274 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1275 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
90484ebf
TH
1276 ehc->saved_xfer_mode[dev->devno] = 0;
1277 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
beb07c1a 1278
ba6a1308 1279 spin_unlock_irqrestore(ap->lock, flags);
0ea035a3
TH
1280}
1281
022bdb07
TH
1282/**
1283 * ata_eh_about_to_do - about to perform eh_action
955e57df 1284 * @link: target ATA link
47005f25 1285 * @dev: target ATA dev for per-dev action (can be NULL)
022bdb07
TH
1286 * @action: action about to be performed
1287 *
1288 * Called just before performing EH actions to clear related bits
955e57df
TH
1289 * in @link->eh_info such that eh actions are not unnecessarily
1290 * repeated.
022bdb07
TH
1291 *
1292 * LOCKING:
1293 * None.
1294 */
fb7fd614
TH
1295void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1296 unsigned int action)
022bdb07 1297{
955e57df
TH
1298 struct ata_port *ap = link->ap;
1299 struct ata_eh_info *ehi = &link->eh_info;
1300 struct ata_eh_context *ehc = &link->eh_context;
022bdb07
TH
1301 unsigned long flags;
1302
ba6a1308 1303 spin_lock_irqsave(ap->lock, flags);
1cdaf534 1304
955e57df 1305 ata_eh_clear_action(link, dev, ehi, action);
1cdaf534 1306
a568d1d2
TH
1307 /* About to take EH action, set RECOVERED. Ignore actions on
1308 * slave links as master will do them again.
1309 */
1310 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1cdaf534
TH
1311 ap->pflags |= ATA_PFLAG_RECOVERED;
1312
ba6a1308 1313 spin_unlock_irqrestore(ap->lock, flags);
022bdb07
TH
1314}
1315
47005f25
TH
1316/**
1317 * ata_eh_done - EH action complete
955e57df 1318* @ap: target ATA port
47005f25
TH
1319 * @dev: target ATA dev for per-dev action (can be NULL)
1320 * @action: action just completed
1321 *
1322 * Called right after performing EH actions to clear related bits
955e57df 1323 * in @link->eh_context.
47005f25
TH
1324 *
1325 * LOCKING:
1326 * None.
1327 */
fb7fd614
TH
1328void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1329 unsigned int action)
47005f25 1330{
955e57df 1331 struct ata_eh_context *ehc = &link->eh_context;
9af5c9c9 1332
955e57df 1333 ata_eh_clear_action(link, dev, &ehc->i, action);
47005f25
TH
1334}
1335
022bdb07
TH
1336/**
1337 * ata_err_string - convert err_mask to descriptive string
1338 * @err_mask: error mask to convert to string
1339 *
1340 * Convert @err_mask to descriptive string. Errors are
1341 * prioritized according to severity and only the most severe
1342 * error is reported.
1343 *
1344 * LOCKING:
1345 * None.
1346 *
1347 * RETURNS:
1348 * Descriptive string for @err_mask
1349 */
2dcb407e 1350static const char *ata_err_string(unsigned int err_mask)
022bdb07
TH
1351{
1352 if (err_mask & AC_ERR_HOST_BUS)
1353 return "host bus error";
1354 if (err_mask & AC_ERR_ATA_BUS)
1355 return "ATA bus error";
1356 if (err_mask & AC_ERR_TIMEOUT)
1357 return "timeout";
1358 if (err_mask & AC_ERR_HSM)
1359 return "HSM violation";
1360 if (err_mask & AC_ERR_SYSTEM)
1361 return "internal error";
1362 if (err_mask & AC_ERR_MEDIA)
1363 return "media error";
1364 if (err_mask & AC_ERR_INVALID)
1365 return "invalid argument";
1366 if (err_mask & AC_ERR_DEV)
1367 return "device error";
1368 return "unknown error";
1369}
1370
e8ee8451
TH
1371/**
1372 * ata_read_log_page - read a specific log page
1373 * @dev: target device
1374 * @page: page to read
1375 * @buf: buffer to store read page
1376 * @sectors: number of sectors to read
1377 *
1378 * Read log page using READ_LOG_EXT command.
1379 *
1380 * LOCKING:
1381 * Kernel thread context (may sleep).
1382 *
1383 * RETURNS:
1384 * 0 on success, AC_ERR_* mask otherwise.
1385 */
1386static unsigned int ata_read_log_page(struct ata_device *dev,
1387 u8 page, void *buf, unsigned int sectors)
1388{
1389 struct ata_taskfile tf;
1390 unsigned int err_mask;
1391
1392 DPRINTK("read log page - page %d\n", page);
1393
1394 ata_tf_init(dev, &tf);
1395 tf.command = ATA_CMD_READ_LOG_EXT;
1396 tf.lbal = page;
1397 tf.nsect = sectors;
1398 tf.hob_nsect = sectors >> 8;
1399 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1400 tf.protocol = ATA_PROT_PIO;
1401
1402 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1403 buf, sectors * ATA_SECT_SIZE, 0);
e8ee8451
TH
1404
1405 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1406 return err_mask;
1407}
1408
1409/**
1410 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1411 * @dev: Device to read log page 10h from
1412 * @tag: Resulting tag of the failed command
1413 * @tf: Resulting taskfile registers of the failed command
1414 *
1415 * Read log page 10h to obtain NCQ error details and clear error
1416 * condition.
1417 *
1418 * LOCKING:
1419 * Kernel thread context (may sleep).
1420 *
1421 * RETURNS:
1422 * 0 on success, -errno otherwise.
1423 */
1424static int ata_eh_read_log_10h(struct ata_device *dev,
1425 int *tag, struct ata_taskfile *tf)
1426{
9af5c9c9 1427 u8 *buf = dev->link->ap->sector_buf;
e8ee8451
TH
1428 unsigned int err_mask;
1429 u8 csum;
1430 int i;
1431
1432 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1433 if (err_mask)
1434 return -EIO;
1435
1436 csum = 0;
1437 for (i = 0; i < ATA_SECT_SIZE; i++)
1438 csum += buf[i];
1439 if (csum)
1440 ata_dev_printk(dev, KERN_WARNING,
1441 "invalid checksum 0x%x on log page 10h\n", csum);
1442
1443 if (buf[0] & 0x80)
1444 return -ENOENT;
1445
1446 *tag = buf[0] & 0x1f;
1447
1448 tf->command = buf[2];
1449 tf->feature = buf[3];
1450 tf->lbal = buf[4];
1451 tf->lbam = buf[5];
1452 tf->lbah = buf[6];
1453 tf->device = buf[7];
1454 tf->hob_lbal = buf[8];
1455 tf->hob_lbam = buf[9];
1456 tf->hob_lbah = buf[10];
1457 tf->nsect = buf[12];
1458 tf->hob_nsect = buf[13];
1459
1460 return 0;
1461}
1462
11fc33da
TH
1463/**
1464 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1465 * @dev: target ATAPI device
1466 * @r_sense_key: out parameter for sense_key
1467 *
1468 * Perform ATAPI TEST_UNIT_READY.
1469 *
1470 * LOCKING:
1471 * EH context (may sleep).
1472 *
1473 * RETURNS:
1474 * 0 on success, AC_ERR_* mask on failure.
1475 */
1476static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1477{
1478 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1479 struct ata_taskfile tf;
1480 unsigned int err_mask;
1481
1482 ata_tf_init(dev, &tf);
1483
1484 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1485 tf.command = ATA_CMD_PACKET;
1486 tf.protocol = ATAPI_PROT_NODATA;
1487
1488 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1489 if (err_mask == AC_ERR_DEV)
1490 *r_sense_key = tf.feature >> 4;
1491 return err_mask;
1492}
1493
022bdb07
TH
1494/**
1495 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1496 * @dev: device to perform REQUEST_SENSE to
1497 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
3eabddb8 1498 * @dfl_sense_key: default sense key to use
022bdb07
TH
1499 *
1500 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1501 * SENSE. This function is EH helper.
1502 *
1503 * LOCKING:
1504 * Kernel thread context (may sleep).
1505 *
1506 * RETURNS:
1507 * 0 on success, AC_ERR_* mask on failure
1508 */
3eabddb8
TH
1509static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1510 u8 *sense_buf, u8 dfl_sense_key)
022bdb07 1511{
3eabddb8
TH
1512 u8 cdb[ATAPI_CDB_LEN] =
1513 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
9af5c9c9 1514 struct ata_port *ap = dev->link->ap;
022bdb07 1515 struct ata_taskfile tf;
022bdb07
TH
1516
1517 DPRINTK("ATAPI request sense\n");
1518
022bdb07
TH
1519 /* FIXME: is this needed? */
1520 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1521
56287768
AL
1522 /* initialize sense_buf with the error register,
1523 * for the case where they are -not- overwritten
1524 */
022bdb07 1525 sense_buf[0] = 0x70;
3eabddb8 1526 sense_buf[2] = dfl_sense_key;
56287768 1527
a617c09f 1528 /* some devices time out if garbage left in tf */
56287768 1529 ata_tf_init(dev, &tf);
022bdb07 1530
022bdb07
TH
1531 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1532 tf.command = ATA_CMD_PACKET;
1533
1534 /* is it pointless to prefer PIO for "safety reasons"? */
1535 if (ap->flags & ATA_FLAG_PIO_DMA) {
0dc36888 1536 tf.protocol = ATAPI_PROT_DMA;
022bdb07
TH
1537 tf.feature |= ATAPI_PKT_DMA;
1538 } else {
0dc36888 1539 tf.protocol = ATAPI_PROT_PIO;
f2dfc1a1
TH
1540 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1541 tf.lbah = 0;
022bdb07
TH
1542 }
1543
1544 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
2b789108 1545 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
022bdb07
TH
1546}
1547
1548/**
1549 * ata_eh_analyze_serror - analyze SError for a failed port
0260731f 1550 * @link: ATA link to analyze SError for
022bdb07
TH
1551 *
1552 * Analyze SError if available and further determine cause of
1553 * failure.
1554 *
1555 * LOCKING:
1556 * None.
1557 */
0260731f 1558static void ata_eh_analyze_serror(struct ata_link *link)
022bdb07 1559{
0260731f 1560 struct ata_eh_context *ehc = &link->eh_context;
022bdb07
TH
1561 u32 serror = ehc->i.serror;
1562 unsigned int err_mask = 0, action = 0;
f9df58cb 1563 u32 hotplug_mask;
022bdb07 1564
e0614db2 1565 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
022bdb07 1566 err_mask |= AC_ERR_ATA_BUS;
cf480626 1567 action |= ATA_EH_RESET;
022bdb07
TH
1568 }
1569 if (serror & SERR_PROTOCOL) {
1570 err_mask |= AC_ERR_HSM;
cf480626 1571 action |= ATA_EH_RESET;
022bdb07
TH
1572 }
1573 if (serror & SERR_INTERNAL) {
1574 err_mask |= AC_ERR_SYSTEM;
cf480626 1575 action |= ATA_EH_RESET;
022bdb07 1576 }
f9df58cb
TH
1577
1578 /* Determine whether a hotplug event has occurred. Both
1579 * SError.N/X are considered hotplug events for enabled or
1580 * host links. For disabled PMP links, only N bit is
1581 * considered as X bit is left at 1 for link plugging.
1582 */
1583 hotplug_mask = 0;
1584
1585 if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1586 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1587 else
1588 hotplug_mask = SERR_PHYRDY_CHG;
1589
1590 if (serror & hotplug_mask)
084fe639 1591 ata_ehi_hotplugged(&ehc->i);
022bdb07
TH
1592
1593 ehc->i.err_mask |= err_mask;
1594 ehc->i.action |= action;
1595}
1596
e8ee8451
TH
1597/**
1598 * ata_eh_analyze_ncq_error - analyze NCQ error
0260731f 1599 * @link: ATA link to analyze NCQ error for
e8ee8451
TH
1600 *
1601 * Read log page 10h, determine the offending qc and acquire
1602 * error status TF. For NCQ device errors, all LLDDs have to do
1603 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1604 * care of the rest.
1605 *
1606 * LOCKING:
1607 * Kernel thread context (may sleep).
1608 */
10acf3b0 1609void ata_eh_analyze_ncq_error(struct ata_link *link)
e8ee8451 1610{
0260731f
TH
1611 struct ata_port *ap = link->ap;
1612 struct ata_eh_context *ehc = &link->eh_context;
1613 struct ata_device *dev = link->device;
e8ee8451
TH
1614 struct ata_queued_cmd *qc;
1615 struct ata_taskfile tf;
1616 int tag, rc;
1617
1618 /* if frozen, we can't do much */
b51e9e5d 1619 if (ap->pflags & ATA_PFLAG_FROZEN)
e8ee8451
TH
1620 return;
1621
1622 /* is it NCQ device error? */
0260731f 1623 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
e8ee8451
TH
1624 return;
1625
1626 /* has LLDD analyzed already? */
1627 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1628 qc = __ata_qc_from_tag(ap, tag);
1629
1630 if (!(qc->flags & ATA_QCFLAG_FAILED))
1631 continue;
1632
1633 if (qc->err_mask)
1634 return;
1635 }
1636
1637 /* okay, this error is ours */
a09bf4cd 1638 memset(&tf, 0, sizeof(tf));
e8ee8451
TH
1639 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1640 if (rc) {
0260731f 1641 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
e8ee8451
TH
1642 "(errno=%d)\n", rc);
1643 return;
1644 }
1645
0260731f
TH
1646 if (!(link->sactive & (1 << tag))) {
1647 ata_link_printk(link, KERN_ERR, "log page 10h reported "
e8ee8451
TH
1648 "inactive tag %d\n", tag);
1649 return;
1650 }
1651
1652 /* we've got the perpetrator, condemn it */
1653 qc = __ata_qc_from_tag(ap, tag);
1654 memcpy(&qc->result_tf, &tf, sizeof(tf));
a6116c9e 1655 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
5335b729 1656 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
e8ee8451
TH
1657 ehc->i.err_mask &= ~AC_ERR_DEV;
1658}
1659
022bdb07
TH
1660/**
1661 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1662 * @qc: qc to analyze
1663 * @tf: Taskfile registers to analyze
1664 *
1665 * Analyze taskfile of @qc and further determine cause of
1666 * failure. This function also requests ATAPI sense data if
1667 * avaliable.
1668 *
1669 * LOCKING:
1670 * Kernel thread context (may sleep).
1671 *
1672 * RETURNS:
1673 * Determined recovery action
1674 */
1675static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1676 const struct ata_taskfile *tf)
1677{
1678 unsigned int tmp, action = 0;
1679 u8 stat = tf->command, err = tf->feature;
1680
1681 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1682 qc->err_mask |= AC_ERR_HSM;
cf480626 1683 return ATA_EH_RESET;
022bdb07
TH
1684 }
1685
a51d644a
TH
1686 if (stat & (ATA_ERR | ATA_DF))
1687 qc->err_mask |= AC_ERR_DEV;
1688 else
022bdb07
TH
1689 return 0;
1690
1691 switch (qc->dev->class) {
1692 case ATA_DEV_ATA:
1693 if (err & ATA_ICRC)
1694 qc->err_mask |= AC_ERR_ATA_BUS;
1695 if (err & ATA_UNC)
1696 qc->err_mask |= AC_ERR_MEDIA;
1697 if (err & ATA_IDNF)
1698 qc->err_mask |= AC_ERR_INVALID;
1699 break;
1700
1701 case ATA_DEV_ATAPI:
a569a30d 1702 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
3eabddb8
TH
1703 tmp = atapi_eh_request_sense(qc->dev,
1704 qc->scsicmd->sense_buffer,
1705 qc->result_tf.feature >> 4);
a569a30d
TH
1706 if (!tmp) {
1707 /* ATA_QCFLAG_SENSE_VALID is used to
1708 * tell atapi_qc_complete() that sense
1709 * data is already valid.
1710 *
1711 * TODO: interpret sense data and set
1712 * appropriate err_mask.
1713 */
1714 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1715 } else
1716 qc->err_mask |= tmp;
1717 }
022bdb07
TH
1718 }
1719
1720 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
cf480626 1721 action |= ATA_EH_RESET;
022bdb07
TH
1722
1723 return action;
1724}
1725
76326ac1
TH
1726static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1727 int *xfer_ok)
022bdb07 1728{
76326ac1
TH
1729 int base = 0;
1730
1731 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1732 *xfer_ok = 1;
1733
1734 if (!*xfer_ok)
75f9cafc 1735 base = ATA_ECAT_DUBIOUS_NONE;
76326ac1 1736
7d47e8d4 1737 if (err_mask & AC_ERR_ATA_BUS)
76326ac1 1738 return base + ATA_ECAT_ATA_BUS;
022bdb07 1739
7d47e8d4 1740 if (err_mask & AC_ERR_TIMEOUT)
76326ac1 1741 return base + ATA_ECAT_TOUT_HSM;
7d47e8d4 1742
3884f7b0 1743 if (eflags & ATA_EFLAG_IS_IO) {
7d47e8d4 1744 if (err_mask & AC_ERR_HSM)
76326ac1 1745 return base + ATA_ECAT_TOUT_HSM;
7d47e8d4
TH
1746 if ((err_mask &
1747 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
76326ac1 1748 return base + ATA_ECAT_UNK_DEV;
022bdb07
TH
1749 }
1750
1751 return 0;
1752}
1753
7d47e8d4 1754struct speed_down_verdict_arg {
022bdb07 1755 u64 since;
76326ac1 1756 int xfer_ok;
3884f7b0 1757 int nr_errors[ATA_ECAT_NR];
022bdb07
TH
1758};
1759
7d47e8d4 1760static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
022bdb07 1761{
7d47e8d4 1762 struct speed_down_verdict_arg *arg = void_arg;
76326ac1 1763 int cat;
022bdb07 1764
d9027470 1765 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
022bdb07
TH
1766 return -1;
1767
76326ac1
TH
1768 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1769 &arg->xfer_ok);
7d47e8d4 1770 arg->nr_errors[cat]++;
76326ac1 1771
022bdb07
TH
1772 return 0;
1773}
1774
1775/**
7d47e8d4 1776 * ata_eh_speed_down_verdict - Determine speed down verdict
022bdb07
TH
1777 * @dev: Device of interest
1778 *
1779 * This function examines error ring of @dev and determines
7d47e8d4
TH
1780 * whether NCQ needs to be turned off, transfer speed should be
1781 * stepped down, or falling back to PIO is necessary.
022bdb07 1782 *
3884f7b0
TH
1783 * ECAT_ATA_BUS : ATA_BUS error for any command
1784 *
1785 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1786 * IO commands
1787 *
1788 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1789 *
76326ac1
TH
1790 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1791 * data transfer hasn't been verified.
1792 *
3884f7b0
TH
1793 * Verdicts are
1794 *
1795 * NCQ_OFF : Turn off NCQ.
022bdb07 1796 *
3884f7b0
TH
1797 * SPEED_DOWN : Speed down transfer speed but don't fall back
1798 * to PIO.
7d47e8d4 1799 *
3884f7b0 1800 * FALLBACK_TO_PIO : Fall back to PIO.
022bdb07 1801 *
3884f7b0 1802 * Even if multiple verdicts are returned, only one action is
76326ac1
TH
1803 * taken per error. An action triggered by non-DUBIOUS errors
1804 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1805 * This is to expedite speed down decisions right after device is
1806 * initially configured.
1807 *
1808 * The followings are speed down rules. #1 and #2 deal with
1809 * DUBIOUS errors.
7d47e8d4 1810 *
76326ac1
TH
1811 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1812 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1813 *
1814 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1815 * occurred during last 5 mins, NCQ_OFF.
1816 *
1817 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
3884f7b0 1818 * ocurred during last 5 mins, FALLBACK_TO_PIO
7d47e8d4 1819 *
76326ac1 1820 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
3884f7b0
TH
1821 * during last 10 mins, NCQ_OFF.
1822 *
76326ac1 1823 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
3884f7b0 1824 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
7d47e8d4 1825 *
022bdb07
TH
1826 * LOCKING:
1827 * Inherited from caller.
1828 *
1829 * RETURNS:
7d47e8d4 1830 * OR of ATA_EH_SPDN_* flags.
022bdb07 1831 */
7d47e8d4 1832static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
022bdb07 1833{
7d47e8d4
TH
1834 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1835 u64 j64 = get_jiffies_64();
1836 struct speed_down_verdict_arg arg;
1837 unsigned int verdict = 0;
022bdb07 1838
3884f7b0 1839 /* scan past 5 mins of error history */
7d47e8d4 1840 memset(&arg, 0, sizeof(arg));
3884f7b0 1841 arg.since = j64 - min(j64, j5mins);
7d47e8d4 1842 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
022bdb07 1843
76326ac1
TH
1844 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1845 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1846 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1847 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1848
1849 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1850 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1851 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1852
3884f7b0
TH
1853 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1854 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
663f99b8 1855 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
3884f7b0 1856 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
022bdb07 1857
3884f7b0 1858 /* scan past 10 mins of error history */
022bdb07 1859 memset(&arg, 0, sizeof(arg));
3884f7b0 1860 arg.since = j64 - min(j64, j10mins);
7d47e8d4 1861 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
022bdb07 1862
3884f7b0
TH
1863 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1864 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1865 verdict |= ATA_EH_SPDN_NCQ_OFF;
1866
1867 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1868 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
663f99b8 1869 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
3884f7b0 1870 verdict |= ATA_EH_SPDN_SPEED_DOWN;
022bdb07 1871
7d47e8d4 1872 return verdict;
022bdb07
TH
1873}
1874
1875/**
1876 * ata_eh_speed_down - record error and speed down if necessary
1877 * @dev: Failed device
3884f7b0 1878 * @eflags: mask of ATA_EFLAG_* flags
022bdb07
TH
1879 * @err_mask: err_mask of the error
1880 *
1881 * Record error and examine error history to determine whether
1882 * adjusting transmission speed is necessary. It also sets
1883 * transmission limits appropriately if such adjustment is
1884 * necessary.
1885 *
1886 * LOCKING:
1887 * Kernel thread context (may sleep).
1888 *
1889 * RETURNS:
7d47e8d4 1890 * Determined recovery action.
022bdb07 1891 */
3884f7b0
TH
1892static unsigned int ata_eh_speed_down(struct ata_device *dev,
1893 unsigned int eflags, unsigned int err_mask)
022bdb07 1894{
b1c72916 1895 struct ata_link *link = ata_dev_phys_link(dev);
76326ac1 1896 int xfer_ok = 0;
7d47e8d4
TH
1897 unsigned int verdict;
1898 unsigned int action = 0;
1899
1900 /* don't bother if Cat-0 error */
76326ac1 1901 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
022bdb07
TH
1902 return 0;
1903
1904 /* record error and determine whether speed down is necessary */
3884f7b0 1905 ata_ering_record(&dev->ering, eflags, err_mask);
7d47e8d4 1906 verdict = ata_eh_speed_down_verdict(dev);
022bdb07 1907
7d47e8d4
TH
1908 /* turn off NCQ? */
1909 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1910 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1911 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1912 dev->flags |= ATA_DFLAG_NCQ_OFF;
1913 ata_dev_printk(dev, KERN_WARNING,
1914 "NCQ disabled due to excessive errors\n");
1915 goto done;
1916 }
022bdb07 1917
7d47e8d4
TH
1918 /* speed down? */
1919 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1920 /* speed down SATA link speed if possible */
a07d499b 1921 if (sata_down_spd_limit(link, 0) == 0) {
cf480626 1922 action |= ATA_EH_RESET;
7d47e8d4
TH
1923 goto done;
1924 }
022bdb07 1925
7d47e8d4
TH
1926 /* lower transfer mode */
1927 if (dev->spdn_cnt < 2) {
1928 static const int dma_dnxfer_sel[] =
1929 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1930 static const int pio_dnxfer_sel[] =
1931 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1932 int sel;
1933
1934 if (dev->xfer_shift != ATA_SHIFT_PIO)
1935 sel = dma_dnxfer_sel[dev->spdn_cnt];
1936 else
1937 sel = pio_dnxfer_sel[dev->spdn_cnt];
1938
1939 dev->spdn_cnt++;
1940
1941 if (ata_down_xfermask_limit(dev, sel) == 0) {
cf480626 1942 action |= ATA_EH_RESET;
7d47e8d4
TH
1943 goto done;
1944 }
1945 }
1946 }
1947
1948 /* Fall back to PIO? Slowing down to PIO is meaningless for
663f99b8 1949 * SATA ATA devices. Consider it only for PATA and SATAPI.
7d47e8d4
TH
1950 */
1951 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
663f99b8 1952 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
7d47e8d4
TH
1953 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1954 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1955 dev->spdn_cnt = 0;
cf480626 1956 action |= ATA_EH_RESET;
7d47e8d4
TH
1957 goto done;
1958 }
1959 }
022bdb07 1960
022bdb07 1961 return 0;
7d47e8d4
TH
1962 done:
1963 /* device has been slowed down, blow error history */
76326ac1
TH
1964 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1965 ata_ering_clear(&dev->ering);
7d47e8d4 1966 return action;
022bdb07
TH
1967}
1968
1969/**
9b1e2658
TH
1970 * ata_eh_link_autopsy - analyze error and determine recovery action
1971 * @link: host link to perform autopsy on
022bdb07 1972 *
0260731f
TH
1973 * Analyze why @link failed and determine which recovery actions
1974 * are needed. This function also sets more detailed AC_ERR_*
1975 * values and fills sense data for ATAPI CHECK SENSE.
022bdb07
TH
1976 *
1977 * LOCKING:
1978 * Kernel thread context (may sleep).
1979 */
9b1e2658 1980static void ata_eh_link_autopsy(struct ata_link *link)
022bdb07 1981{
0260731f 1982 struct ata_port *ap = link->ap;
936fd732 1983 struct ata_eh_context *ehc = &link->eh_context;
dfcc173d 1984 struct ata_device *dev;
3884f7b0
TH
1985 unsigned int all_err_mask = 0, eflags = 0;
1986 int tag;
022bdb07
TH
1987 u32 serror;
1988 int rc;
1989
1990 DPRINTK("ENTER\n");
1991
1cdaf534
TH
1992 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1993 return;
1994
022bdb07 1995 /* obtain and analyze SError */
936fd732 1996 rc = sata_scr_read(link, SCR_ERROR, &serror);
022bdb07
TH
1997 if (rc == 0) {
1998 ehc->i.serror |= serror;
0260731f 1999 ata_eh_analyze_serror(link);
4e57c517 2000 } else if (rc != -EOPNOTSUPP) {
cf480626 2001 /* SError read failed, force reset and probing */
b558eddd 2002 ehc->i.probe_mask |= ATA_ALL_DEVICES;
cf480626 2003 ehc->i.action |= ATA_EH_RESET;
4e57c517
TH
2004 ehc->i.err_mask |= AC_ERR_OTHER;
2005 }
022bdb07 2006
e8ee8451 2007 /* analyze NCQ failure */
0260731f 2008 ata_eh_analyze_ncq_error(link);
e8ee8451 2009
022bdb07
TH
2010 /* any real error trumps AC_ERR_OTHER */
2011 if (ehc->i.err_mask & ~AC_ERR_OTHER)
2012 ehc->i.err_mask &= ~AC_ERR_OTHER;
2013
2014 all_err_mask |= ehc->i.err_mask;
2015
2016 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2017 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2018
b1c72916
TH
2019 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2020 ata_dev_phys_link(qc->dev) != link)
022bdb07
TH
2021 continue;
2022
2023 /* inherit upper level err_mask */
2024 qc->err_mask |= ehc->i.err_mask;
2025
022bdb07 2026 /* analyze TF */
4528e4da 2027 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
022bdb07
TH
2028
2029 /* DEV errors are probably spurious in case of ATA_BUS error */
2030 if (qc->err_mask & AC_ERR_ATA_BUS)
2031 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2032 AC_ERR_INVALID);
2033
2034 /* any real error trumps unknown error */
2035 if (qc->err_mask & ~AC_ERR_OTHER)
2036 qc->err_mask &= ~AC_ERR_OTHER;
2037
2038 /* SENSE_VALID trumps dev/unknown error and revalidation */
f90f0828 2039 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
022bdb07 2040 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
022bdb07 2041
03faab78 2042 /* determine whether the command is worth retrying */
534ead70
TH
2043 if (qc->flags & ATA_QCFLAG_IO ||
2044 (!(qc->err_mask & AC_ERR_INVALID) &&
2045 qc->err_mask != AC_ERR_DEV))
03faab78
TH
2046 qc->flags |= ATA_QCFLAG_RETRY;
2047
022bdb07 2048 /* accumulate error info */
4528e4da 2049 ehc->i.dev = qc->dev;
022bdb07
TH
2050 all_err_mask |= qc->err_mask;
2051 if (qc->flags & ATA_QCFLAG_IO)
3884f7b0 2052 eflags |= ATA_EFLAG_IS_IO;
022bdb07
TH
2053 }
2054
a20f33ff 2055 /* enforce default EH actions */
b51e9e5d 2056 if (ap->pflags & ATA_PFLAG_FROZEN ||
a20f33ff 2057 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
cf480626 2058 ehc->i.action |= ATA_EH_RESET;
3884f7b0
TH
2059 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2060 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
4528e4da 2061 ehc->i.action |= ATA_EH_REVALIDATE;
022bdb07 2062
dfcc173d
TH
2063 /* If we have offending qcs and the associated failed device,
2064 * perform per-dev EH action only on the offending device.
2065 */
4528e4da 2066 if (ehc->i.dev) {
4528e4da
TH
2067 ehc->i.dev_action[ehc->i.dev->devno] |=
2068 ehc->i.action & ATA_EH_PERDEV_MASK;
2069 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
47005f25
TH
2070 }
2071
2695e366
TH
2072 /* propagate timeout to host link */
2073 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2074 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2075
2076 /* record error and consider speeding down */
dfcc173d 2077 dev = ehc->i.dev;
2695e366
TH
2078 if (!dev && ((ata_link_max_devices(link) == 1 &&
2079 ata_dev_enabled(link->device))))
2080 dev = link->device;
dfcc173d 2081
76326ac1
TH
2082 if (dev) {
2083 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2084 eflags |= ATA_EFLAG_DUBIOUS_XFER;
3884f7b0 2085 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
76326ac1 2086 }
dfcc173d 2087
022bdb07
TH
2088 DPRINTK("EXIT\n");
2089}
2090
2091/**
9b1e2658
TH
2092 * ata_eh_autopsy - analyze error and determine recovery action
2093 * @ap: host port to perform autopsy on
2094 *
2095 * Analyze all links of @ap and determine why they failed and
2096 * which recovery actions are needed.
2097 *
2098 * LOCKING:
2099 * Kernel thread context (may sleep).
2100 */
fb7fd614 2101void ata_eh_autopsy(struct ata_port *ap)
9b1e2658
TH
2102{
2103 struct ata_link *link;
2104
1eca4365 2105 ata_for_each_link(link, ap, EDGE)
9b1e2658 2106 ata_eh_link_autopsy(link);
2695e366 2107
b1c72916
TH
2108 /* Handle the frigging slave link. Autopsy is done similarly
2109 * but actions and flags are transferred over to the master
2110 * link and handled from there.
2111 */
2112 if (ap->slave_link) {
2113 struct ata_eh_context *mehc = &ap->link.eh_context;
2114 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2115
848e4c68
TH
2116 /* transfer control flags from master to slave */
2117 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2118
2119 /* perform autopsy on the slave link */
b1c72916
TH
2120 ata_eh_link_autopsy(ap->slave_link);
2121
848e4c68 2122 /* transfer actions from slave to master and clear slave */
b1c72916
TH
2123 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2124 mehc->i.action |= sehc->i.action;
2125 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2126 mehc->i.flags |= sehc->i.flags;
2127 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2128 }
2129
2695e366
TH
2130 /* Autopsy of fanout ports can affect host link autopsy.
2131 * Perform host link autopsy last.
2132 */
071f44b1 2133 if (sata_pmp_attached(ap))
2695e366 2134 ata_eh_link_autopsy(&ap->link);
9b1e2658
TH
2135}
2136
6521148c
RH
2137/**
2138 * ata_get_cmd_descript - get description for ATA command
2139 * @command: ATA command code to get description for
2140 *
2141 * Return a textual description of the given command, or NULL if the
2142 * command is not known.
2143 *
2144 * LOCKING:
2145 * None
2146 */
2147const char *ata_get_cmd_descript(u8 command)
2148{
2149#ifdef CONFIG_ATA_VERBOSE_ERROR
2150 static const struct
2151 {
2152 u8 command;
2153 const char *text;
2154 } cmd_descr[] = {
2155 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2156 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2157 { ATA_CMD_STANDBY, "STANDBY" },
2158 { ATA_CMD_IDLE, "IDLE" },
2159 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2160 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2161 { ATA_CMD_NOP, "NOP" },
2162 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2163 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2164 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2165 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2166 { ATA_CMD_SERVICE, "SERVICE" },
2167 { ATA_CMD_READ, "READ DMA" },
2168 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2169 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2170 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2171 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2172 { ATA_CMD_WRITE, "WRITE DMA" },
2173 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2174 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2175 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2176 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2177 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2178 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2179 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2180 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2181 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2182 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2183 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2184 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2185 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2186 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2187 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2188 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2189 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2190 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2191 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2192 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2193 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2194 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2195 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2196 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2197 { ATA_CMD_SLEEP, "SLEEP" },
2198 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2199 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2200 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2201 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2202 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2203 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2204 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2205 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2206 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2207 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2208 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2209 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2210 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2211 { ATA_CMD_PMP_READ, "READ BUFFER" },
2212 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2213 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2214 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2215 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2216 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2217 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2218 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2219 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2220 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2221 { ATA_CMD_SMART, "SMART" },
2222 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2223 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
acad7627 2224 { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
6521148c
RH
2225 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2226 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2227 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2228 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2229 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2230 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2231 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2232 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2233 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2234 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2235 { ATA_CMD_RESTORE, "RECALIBRATE" },
2236 { 0, NULL } /* terminate list */
2237 };
2238
2239 unsigned int i;
2240 for (i = 0; cmd_descr[i].text; i++)
2241 if (cmd_descr[i].command == command)
2242 return cmd_descr[i].text;
2243#endif
2244
2245 return NULL;
2246}
2247
9b1e2658
TH
2248/**
2249 * ata_eh_link_report - report error handling to user
0260731f 2250 * @link: ATA link EH is going on
022bdb07
TH
2251 *
2252 * Report EH to user.
2253 *
2254 * LOCKING:
2255 * None.
2256 */
9b1e2658 2257static void ata_eh_link_report(struct ata_link *link)
022bdb07 2258{
0260731f
TH
2259 struct ata_port *ap = link->ap;
2260 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 2261 const char *frozen, *desc;
a1e10f7e 2262 char tries_buf[6];
022bdb07
TH
2263 int tag, nr_failed = 0;
2264
94ff3d54
TH
2265 if (ehc->i.flags & ATA_EHI_QUIET)
2266 return;
2267
022bdb07
TH
2268 desc = NULL;
2269 if (ehc->i.desc[0] != '\0')
2270 desc = ehc->i.desc;
2271
2272 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2273 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2274
b1c72916
TH
2275 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2276 ata_dev_phys_link(qc->dev) != link ||
e027bd36
TH
2277 ((qc->flags & ATA_QCFLAG_QUIET) &&
2278 qc->err_mask == AC_ERR_DEV))
022bdb07
TH
2279 continue;
2280 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2281 continue;
2282
2283 nr_failed++;
2284 }
2285
2286 if (!nr_failed && !ehc->i.err_mask)
2287 return;
2288
2289 frozen = "";
b51e9e5d 2290 if (ap->pflags & ATA_PFLAG_FROZEN)
022bdb07
TH
2291 frozen = " frozen";
2292
a1e10f7e
TH
2293 memset(tries_buf, 0, sizeof(tries_buf));
2294 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2295 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2296 ap->eh_tries);
2297
022bdb07 2298 if (ehc->i.dev) {
e8ee8451 2299 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
a1e10f7e
TH
2300 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2301 ehc->i.err_mask, link->sactive, ehc->i.serror,
2302 ehc->i.action, frozen, tries_buf);
022bdb07 2303 if (desc)
b64bbc39 2304 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
022bdb07 2305 } else {
0260731f 2306 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
a1e10f7e
TH
2307 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2308 ehc->i.err_mask, link->sactive, ehc->i.serror,
2309 ehc->i.action, frozen, tries_buf);
022bdb07 2310 if (desc)
0260731f 2311 ata_link_printk(link, KERN_ERR, "%s\n", desc);
022bdb07
TH
2312 }
2313
6521148c 2314#ifdef CONFIG_ATA_VERBOSE_ERROR
1333e194 2315 if (ehc->i.serror)
da0e21d3 2316 ata_link_printk(link, KERN_ERR,
1333e194
RH
2317 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2318 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2319 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2320 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2321 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2322 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2323 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2324 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2325 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2326 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2327 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2328 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2329 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2330 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2331 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2332 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2333 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2dcb407e 2334 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
6521148c 2335#endif
1333e194 2336
022bdb07
TH
2337 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2338 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
8a937581 2339 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
abb6a889
TH
2340 const u8 *cdb = qc->cdb;
2341 char data_buf[20] = "";
2342 char cdb_buf[70] = "";
022bdb07 2343
0260731f 2344 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
b1c72916 2345 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
022bdb07
TH
2346 continue;
2347
abb6a889
TH
2348 if (qc->dma_dir != DMA_NONE) {
2349 static const char *dma_str[] = {
2350 [DMA_BIDIRECTIONAL] = "bidi",
2351 [DMA_TO_DEVICE] = "out",
2352 [DMA_FROM_DEVICE] = "in",
2353 };
2354 static const char *prot_str[] = {
2355 [ATA_PROT_PIO] = "pio",
2356 [ATA_PROT_DMA] = "dma",
2357 [ATA_PROT_NCQ] = "ncq",
0dc36888
TH
2358 [ATAPI_PROT_PIO] = "pio",
2359 [ATAPI_PROT_DMA] = "dma",
abb6a889
TH
2360 };
2361
2362 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2363 prot_str[qc->tf.protocol], qc->nbytes,
2364 dma_str[qc->dma_dir]);
2365 }
2366
6521148c
RH
2367 if (ata_is_atapi(qc->tf.protocol)) {
2368 if (qc->scsicmd)
2369 scsi_print_command(qc->scsicmd);
2370 else
2371 snprintf(cdb_buf, sizeof(cdb_buf),
abb6a889
TH
2372 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2373 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2374 cdb[0], cdb[1], cdb[2], cdb[3],
2375 cdb[4], cdb[5], cdb[6], cdb[7],
2376 cdb[8], cdb[9], cdb[10], cdb[11],
2377 cdb[12], cdb[13], cdb[14], cdb[15]);
6521148c
RH
2378 } else {
2379 const char *descr = ata_get_cmd_descript(cmd->command);
2380 if (descr)
2381 ata_dev_printk(qc->dev, KERN_ERR,
2382 "failed command: %s\n", descr);
2383 }
abb6a889 2384
8a937581
TH
2385 ata_dev_printk(qc->dev, KERN_ERR,
2386 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
abb6a889 2387 "tag %d%s\n %s"
8a937581 2388 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
5335b729 2389 "Emask 0x%x (%s)%s\n",
8a937581
TH
2390 cmd->command, cmd->feature, cmd->nsect,
2391 cmd->lbal, cmd->lbam, cmd->lbah,
2392 cmd->hob_feature, cmd->hob_nsect,
2393 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
abb6a889 2394 cmd->device, qc->tag, data_buf, cdb_buf,
8a937581
TH
2395 res->command, res->feature, res->nsect,
2396 res->lbal, res->lbam, res->lbah,
2397 res->hob_feature, res->hob_nsect,
2398 res->hob_lbal, res->hob_lbam, res->hob_lbah,
5335b729
TH
2399 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2400 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
1333e194 2401
6521148c 2402#ifdef CONFIG_ATA_VERBOSE_ERROR
1333e194 2403 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2dcb407e 2404 ATA_ERR)) {
1333e194
RH
2405 if (res->command & ATA_BUSY)
2406 ata_dev_printk(qc->dev, KERN_ERR,
2dcb407e 2407 "status: { Busy }\n");
1333e194
RH
2408 else
2409 ata_dev_printk(qc->dev, KERN_ERR,
2410 "status: { %s%s%s%s}\n",
2411 res->command & ATA_DRDY ? "DRDY " : "",
2412 res->command & ATA_DF ? "DF " : "",
2413 res->command & ATA_DRQ ? "DRQ " : "",
2dcb407e 2414 res->command & ATA_ERR ? "ERR " : "");
1333e194
RH
2415 }
2416
2417 if (cmd->command != ATA_CMD_PACKET &&
2418 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2419 ATA_ABORTED)))
2420 ata_dev_printk(qc->dev, KERN_ERR,
2421 "error: { %s%s%s%s}\n",
2422 res->feature & ATA_ICRC ? "ICRC " : "",
2423 res->feature & ATA_UNC ? "UNC " : "",
2424 res->feature & ATA_IDNF ? "IDNF " : "",
2dcb407e 2425 res->feature & ATA_ABORTED ? "ABRT " : "");
6521148c 2426#endif
022bdb07
TH
2427 }
2428}
2429
9b1e2658
TH
2430/**
2431 * ata_eh_report - report error handling to user
2432 * @ap: ATA port to report EH about
2433 *
2434 * Report EH to user.
2435 *
2436 * LOCKING:
2437 * None.
2438 */
fb7fd614 2439void ata_eh_report(struct ata_port *ap)
9b1e2658
TH
2440{
2441 struct ata_link *link;
2442
1eca4365 2443 ata_for_each_link(link, ap, HOST_FIRST)
9b1e2658
TH
2444 ata_eh_link_report(link);
2445}
2446
cc0680a5 2447static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
b1c72916
TH
2448 unsigned int *classes, unsigned long deadline,
2449 bool clear_classes)
d87fa38e 2450{
f58229f8 2451 struct ata_device *dev;
d87fa38e 2452
b1c72916 2453 if (clear_classes)
1eca4365 2454 ata_for_each_dev(dev, link, ALL)
b1c72916 2455 classes[dev->devno] = ATA_DEV_UNKNOWN;
d87fa38e 2456
f046519f 2457 return reset(link, classes, deadline);
d87fa38e
TH
2458}
2459
ae791c05 2460static int ata_eh_followup_srst_needed(struct ata_link *link,
5dbfc9cb 2461 int rc, const unsigned int *classes)
664faf09 2462{
45db2f6c 2463 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
ae791c05 2464 return 0;
5dbfc9cb
TH
2465 if (rc == -EAGAIN)
2466 return 1;
071f44b1 2467 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
3495de73 2468 return 1;
664faf09
TH
2469 return 0;
2470}
2471
fb7fd614
TH
2472int ata_eh_reset(struct ata_link *link, int classify,
2473 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2474 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
022bdb07 2475{
afaa5c37 2476 struct ata_port *ap = link->ap;
b1c72916 2477 struct ata_link *slave = ap->slave_link;
936fd732 2478 struct ata_eh_context *ehc = &link->eh_context;
705d2014 2479 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
664faf09 2480 unsigned int *classes = ehc->classes;
416dc9ed 2481 unsigned int lflags = link->flags;
1cdaf534 2482 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
d8af0eb6 2483 int max_tries = 0, try = 0;
b1c72916 2484 struct ata_link *failed_link;
f58229f8 2485 struct ata_device *dev;
416dc9ed 2486 unsigned long deadline, now;
022bdb07 2487 ata_reset_fn_t reset;
afaa5c37 2488 unsigned long flags;
416dc9ed 2489 u32 sstatus;
b1c72916 2490 int nr_unknown, rc;
022bdb07 2491
932648b0
TH
2492 /*
2493 * Prepare to reset
2494 */
d8af0eb6
TH
2495 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2496 max_tries++;
05944bdf
TH
2497 if (link->flags & ATA_LFLAG_NO_HRST)
2498 hardreset = NULL;
2499 if (link->flags & ATA_LFLAG_NO_SRST)
2500 softreset = NULL;
d8af0eb6 2501
19b72321
TH
2502 /* make sure each reset attemp is at least COOL_DOWN apart */
2503 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2504 now = jiffies;
2505 WARN_ON(time_after(ehc->last_reset, now));
2506 deadline = ata_deadline(ehc->last_reset,
2507 ATA_EH_RESET_COOL_DOWN);
2508 if (time_before(now, deadline))
2509 schedule_timeout_uninterruptible(deadline - now);
2510 }
0a2c0f56 2511
afaa5c37
TH
2512 spin_lock_irqsave(ap->lock, flags);
2513 ap->pflags |= ATA_PFLAG_RESETTING;
2514 spin_unlock_irqrestore(ap->lock, flags);
2515
cf480626 2516 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
13abf50d 2517
1eca4365 2518 ata_for_each_dev(dev, link, ALL) {
cdeab114
TH
2519 /* If we issue an SRST then an ATA drive (not ATAPI)
2520 * may change configuration and be in PIO0 timing. If
2521 * we do a hard reset (or are coming from power on)
2522 * this is true for ATA or ATAPI. Until we've set a
2523 * suitable controller mode we should not touch the
2524 * bus as we may be talking too fast.
2525 */
2526 dev->pio_mode = XFER_PIO_0;
2527
2528 /* If the controller has a pio mode setup function
2529 * then use it to set the chipset to rights. Don't
2530 * touch the DMA setup as that will be dealt with when
2531 * configuring devices.
2532 */
2533 if (ap->ops->set_piomode)
2534 ap->ops->set_piomode(ap, dev);
2535 }
2536
cf480626 2537 /* prefer hardreset */
932648b0 2538 reset = NULL;
cf480626
TH
2539 ehc->i.action &= ~ATA_EH_RESET;
2540 if (hardreset) {
2541 reset = hardreset;
a674050e 2542 ehc->i.action |= ATA_EH_HARDRESET;
4f7faa3f 2543 } else if (softreset) {
cf480626 2544 reset = softreset;
a674050e 2545 ehc->i.action |= ATA_EH_SOFTRESET;
cf480626 2546 }
f5914a46
TH
2547
2548 if (prereset) {
b1c72916
TH
2549 unsigned long deadline = ata_deadline(jiffies,
2550 ATA_EH_PRERESET_TIMEOUT);
2551
2552 if (slave) {
2553 sehc->i.action &= ~ATA_EH_RESET;
2554 sehc->i.action |= ehc->i.action;
2555 }
2556
2557 rc = prereset(link, deadline);
2558
2559 /* If present, do prereset on slave link too. Reset
2560 * is skipped iff both master and slave links report
2561 * -ENOENT or clear ATA_EH_RESET.
2562 */
2563 if (slave && (rc == 0 || rc == -ENOENT)) {
2564 int tmp;
2565
2566 tmp = prereset(slave, deadline);
2567 if (tmp != -ENOENT)
2568 rc = tmp;
2569
2570 ehc->i.action |= sehc->i.action;
2571 }
2572
f5914a46 2573 if (rc) {
c961922b 2574 if (rc == -ENOENT) {
cc0680a5 2575 ata_link_printk(link, KERN_DEBUG,
4aa9ab67 2576 "port disabled. ignoring.\n");
cf480626 2577 ehc->i.action &= ~ATA_EH_RESET;
4aa9ab67 2578
1eca4365 2579 ata_for_each_dev(dev, link, ALL)
f58229f8 2580 classes[dev->devno] = ATA_DEV_NONE;
4aa9ab67
TH
2581
2582 rc = 0;
c961922b 2583 } else
cc0680a5 2584 ata_link_printk(link, KERN_ERR,
f5914a46 2585 "prereset failed (errno=%d)\n", rc);
fccb6ea5 2586 goto out;
f5914a46 2587 }
f5914a46 2588
932648b0 2589 /* prereset() might have cleared ATA_EH_RESET. If so,
d6515e6f 2590 * bang classes, thaw and return.
932648b0
TH
2591 */
2592 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
1eca4365 2593 ata_for_each_dev(dev, link, ALL)
932648b0 2594 classes[dev->devno] = ATA_DEV_NONE;
d6515e6f
TH
2595 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2596 ata_is_host_link(link))
2597 ata_eh_thaw_port(ap);
932648b0
TH
2598 rc = 0;
2599 goto out;
2600 }
f5914a46
TH
2601 }
2602
022bdb07 2603 retry:
932648b0
TH
2604 /*
2605 * Perform reset
2606 */
dc98c32c
TH
2607 if (ata_is_host_link(link))
2608 ata_eh_freeze_port(ap);
2609
341c2c95 2610 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
31daabda 2611
932648b0
TH
2612 if (reset) {
2613 if (verbose)
2614 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2615 reset == softreset ? "soft" : "hard");
2616
2617 /* mark that this EH session started with reset */
19b72321 2618 ehc->last_reset = jiffies;
932648b0
TH
2619 if (reset == hardreset)
2620 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2621 else
2622 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
022bdb07 2623
b1c72916
TH
2624 rc = ata_do_reset(link, reset, classes, deadline, true);
2625 if (rc && rc != -EAGAIN) {
2626 failed_link = link;
5dbfc9cb 2627 goto fail;
b1c72916
TH
2628 }
2629
2630 /* hardreset slave link if existent */
2631 if (slave && reset == hardreset) {
2632 int tmp;
2633
2634 if (verbose)
2635 ata_link_printk(slave, KERN_INFO,
2636 "hard resetting link\n");
2637
2638 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2639 tmp = ata_do_reset(slave, reset, classes, deadline,
2640 false);
2641 switch (tmp) {
2642 case -EAGAIN:
2643 rc = -EAGAIN;
2644 case 0:
2645 break;
2646 default:
2647 failed_link = slave;
2648 rc = tmp;
2649 goto fail;
2650 }
2651 }
022bdb07 2652
b1c72916 2653 /* perform follow-up SRST if necessary */
932648b0 2654 if (reset == hardreset &&
5dbfc9cb 2655 ata_eh_followup_srst_needed(link, rc, classes)) {
932648b0 2656 reset = softreset;
022bdb07 2657
932648b0
TH
2658 if (!reset) {
2659 ata_link_printk(link, KERN_ERR,
2660 "follow-up softreset required "
2661 "but no softreset avaliable\n");
b1c72916 2662 failed_link = link;
932648b0
TH
2663 rc = -EINVAL;
2664 goto fail;
2665 }
664faf09 2666
932648b0 2667 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
b1c72916 2668 rc = ata_do_reset(link, reset, classes, deadline, true);
fe2c4d01
TH
2669 if (rc) {
2670 failed_link = link;
2671 goto fail;
2672 }
664faf09 2673 }
932648b0
TH
2674 } else {
2675 if (verbose)
2676 ata_link_printk(link, KERN_INFO, "no reset method "
2677 "available, skipping reset\n");
2678 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2679 lflags |= ATA_LFLAG_ASSUME_ATA;
664faf09
TH
2680 }
2681
932648b0
TH
2682 /*
2683 * Post-reset processing
2684 */
1eca4365 2685 ata_for_each_dev(dev, link, ALL) {
416dc9ed
TH
2686 /* After the reset, the device state is PIO 0 and the
2687 * controller state is undefined. Reset also wakes up
2688 * drives from sleeping mode.
2689 */
2690 dev->pio_mode = XFER_PIO_0;
2691 dev->flags &= ~ATA_DFLAG_SLEEPING;
31daabda 2692
3b761d3d
TH
2693 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2694 continue;
2695
2696 /* apply class override */
2697 if (lflags & ATA_LFLAG_ASSUME_ATA)
2698 classes[dev->devno] = ATA_DEV_ATA;
2699 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2700 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
022bdb07
TH
2701 }
2702
416dc9ed
TH
2703 /* record current link speed */
2704 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2705 link->sata_spd = (sstatus >> 4) & 0xf;
b1c72916
TH
2706 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2707 slave->sata_spd = (sstatus >> 4) & 0xf;
008a7896 2708
dc98c32c
TH
2709 /* thaw the port */
2710 if (ata_is_host_link(link))
2711 ata_eh_thaw_port(ap);
2712
f046519f
TH
2713 /* postreset() should clear hardware SError. Although SError
2714 * is cleared during link resume, clearing SError here is
2715 * necessary as some PHYs raise hotplug events after SRST.
2716 * This introduces race condition where hotplug occurs between
2717 * reset and here. This race is mediated by cross checking
2718 * link onlineness and classification result later.
2719 */
b1c72916 2720 if (postreset) {
416dc9ed 2721 postreset(link, classes);
b1c72916
TH
2722 if (slave)
2723 postreset(slave, classes);
2724 }
20952b69 2725
1e641060
TH
2726 /*
2727 * Some controllers can't be frozen very well and may set
2728 * spuruious error conditions during reset. Clear accumulated
2729 * error information. As reset is the final recovery action,
2730 * nothing is lost by doing this.
2731 */
f046519f 2732 spin_lock_irqsave(link->ap->lock, flags);
1e641060 2733 memset(&link->eh_info, 0, sizeof(link->eh_info));
b1c72916 2734 if (slave)
1e641060
TH
2735 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2736 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
f046519f
TH
2737 spin_unlock_irqrestore(link->ap->lock, flags);
2738
3b761d3d
TH
2739 /*
2740 * Make sure onlineness and classification result correspond.
f046519f
TH
2741 * Hotplug could have happened during reset and some
2742 * controllers fail to wait while a drive is spinning up after
2743 * being hotplugged causing misdetection. By cross checking
3b761d3d
TH
2744 * link on/offlineness and classification result, those
2745 * conditions can be reliably detected and retried.
f046519f 2746 */
b1c72916 2747 nr_unknown = 0;
1eca4365 2748 ata_for_each_dev(dev, link, ALL) {
3b761d3d
TH
2749 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2750 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2751 ata_dev_printk(dev, KERN_DEBUG, "link online "
2752 "but device misclassifed\n");
2753 classes[dev->devno] = ATA_DEV_NONE;
b1c72916 2754 nr_unknown++;
3b761d3d
TH
2755 }
2756 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2757 if (ata_class_enabled(classes[dev->devno]))
2758 ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2759 "clearing class %d to NONE\n",
2760 classes[dev->devno]);
2761 classes[dev->devno] = ATA_DEV_NONE;
2762 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2763 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2764 "clearing UNKNOWN to NONE\n");
2765 classes[dev->devno] = ATA_DEV_NONE;
b1c72916 2766 }
f046519f
TH
2767 }
2768
b1c72916 2769 if (classify && nr_unknown) {
f046519f
TH
2770 if (try < max_tries) {
2771 ata_link_printk(link, KERN_WARNING, "link online but "
3b761d3d
TH
2772 "%d devices misclassified, retrying\n",
2773 nr_unknown);
b1c72916 2774 failed_link = link;
f046519f
TH
2775 rc = -EAGAIN;
2776 goto fail;
2777 }
2778 ata_link_printk(link, KERN_WARNING,
3b761d3d
TH
2779 "link online but %d devices misclassified, "
2780 "device detection might fail\n", nr_unknown);
f046519f
TH
2781 }
2782
416dc9ed 2783 /* reset successful, schedule revalidation */
cf480626 2784 ata_eh_done(link, NULL, ATA_EH_RESET);
b1c72916
TH
2785 if (slave)
2786 ata_eh_done(slave, NULL, ATA_EH_RESET);
19b72321 2787 ehc->last_reset = jiffies; /* update to completion time */
416dc9ed 2788 ehc->i.action |= ATA_EH_REVALIDATE;
ae791c05 2789
416dc9ed 2790 rc = 0;
fccb6ea5
TH
2791 out:
2792 /* clear hotplug flag */
2793 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
b1c72916
TH
2794 if (slave)
2795 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
afaa5c37
TH
2796
2797 spin_lock_irqsave(ap->lock, flags);
2798 ap->pflags &= ~ATA_PFLAG_RESETTING;
2799 spin_unlock_irqrestore(ap->lock, flags);
2800
022bdb07 2801 return rc;
416dc9ed
TH
2802
2803 fail:
5958e302
TH
2804 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2805 if (!ata_is_host_link(link) &&
2806 sata_scr_read(link, SCR_STATUS, &sstatus))
2807 rc = -ERESTART;
2808
416dc9ed
TH
2809 if (rc == -ERESTART || try >= max_tries)
2810 goto out;
2811
2812 now = jiffies;
2813 if (time_before(now, deadline)) {
2814 unsigned long delta = deadline - now;
2815
b1c72916 2816 ata_link_printk(failed_link, KERN_WARNING,
0a2c0f56
TH
2817 "reset failed (errno=%d), retrying in %u secs\n",
2818 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
416dc9ed
TH
2819
2820 while (delta)
2821 delta = schedule_timeout_uninterruptible(delta);
2822 }
2823
b1c72916 2824 if (try == max_tries - 1) {
a07d499b 2825 sata_down_spd_limit(link, 0);
b1c72916 2826 if (slave)
a07d499b 2827 sata_down_spd_limit(slave, 0);
b1c72916 2828 } else if (rc == -EPIPE)
a07d499b 2829 sata_down_spd_limit(failed_link, 0);
b1c72916 2830
416dc9ed
TH
2831 if (hardreset)
2832 reset = hardreset;
2833 goto retry;
022bdb07
TH
2834}
2835
45fabbb7
EO
2836static inline void ata_eh_pull_park_action(struct ata_port *ap)
2837{
2838 struct ata_link *link;
2839 struct ata_device *dev;
2840 unsigned long flags;
2841
2842 /*
2843 * This function can be thought of as an extended version of
2844 * ata_eh_about_to_do() specially crafted to accommodate the
2845 * requirements of ATA_EH_PARK handling. Since the EH thread
2846 * does not leave the do {} while () loop in ata_eh_recover as
2847 * long as the timeout for a park request to *one* device on
2848 * the port has not expired, and since we still want to pick
2849 * up park requests to other devices on the same port or
2850 * timeout updates for the same device, we have to pull
2851 * ATA_EH_PARK actions from eh_info into eh_context.i
2852 * ourselves at the beginning of each pass over the loop.
2853 *
2854 * Additionally, all write accesses to &ap->park_req_pending
2855 * through INIT_COMPLETION() (see below) or complete_all()
2856 * (see ata_scsi_park_store()) are protected by the host lock.
2857 * As a result we have that park_req_pending.done is zero on
2858 * exit from this function, i.e. when ATA_EH_PARK actions for
2859 * *all* devices on port ap have been pulled into the
2860 * respective eh_context structs. If, and only if,
2861 * park_req_pending.done is non-zero by the time we reach
2862 * wait_for_completion_timeout(), another ATA_EH_PARK action
2863 * has been scheduled for at least one of the devices on port
2864 * ap and we have to cycle over the do {} while () loop in
2865 * ata_eh_recover() again.
2866 */
2867
2868 spin_lock_irqsave(ap->lock, flags);
2869 INIT_COMPLETION(ap->park_req_pending);
1eca4365
TH
2870 ata_for_each_link(link, ap, EDGE) {
2871 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
2872 struct ata_eh_info *ehi = &link->eh_info;
2873
2874 link->eh_context.i.dev_action[dev->devno] |=
2875 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2876 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2877 }
2878 }
2879 spin_unlock_irqrestore(ap->lock, flags);
2880}
2881
2882static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2883{
2884 struct ata_eh_context *ehc = &dev->link->eh_context;
2885 struct ata_taskfile tf;
2886 unsigned int err_mask;
2887
2888 ata_tf_init(dev, &tf);
2889 if (park) {
2890 ehc->unloaded_mask |= 1 << dev->devno;
2891 tf.command = ATA_CMD_IDLEIMMEDIATE;
2892 tf.feature = 0x44;
2893 tf.lbal = 0x4c;
2894 tf.lbam = 0x4e;
2895 tf.lbah = 0x55;
2896 } else {
2897 ehc->unloaded_mask &= ~(1 << dev->devno);
2898 tf.command = ATA_CMD_CHK_POWER;
2899 }
2900
2901 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2902 tf.protocol |= ATA_PROT_NODATA;
2903 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2904 if (park && (err_mask || tf.lbal != 0xc4)) {
2905 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2906 ehc->unloaded_mask &= ~(1 << dev->devno);
2907 }
2908}
2909
0260731f 2910static int ata_eh_revalidate_and_attach(struct ata_link *link,
084fe639 2911 struct ata_device **r_failed_dev)
022bdb07 2912{
0260731f
TH
2913 struct ata_port *ap = link->ap;
2914 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 2915 struct ata_device *dev;
8c3c52a8 2916 unsigned int new_mask = 0;
084fe639 2917 unsigned long flags;
f58229f8 2918 int rc = 0;
022bdb07
TH
2919
2920 DPRINTK("ENTER\n");
2921
8c3c52a8
TH
2922 /* For PATA drive side cable detection to work, IDENTIFY must
2923 * be done backwards such that PDIAG- is released by the slave
2924 * device before the master device is identified.
2925 */
1eca4365 2926 ata_for_each_dev(dev, link, ALL_REVERSE) {
f58229f8
TH
2927 unsigned int action = ata_eh_dev_action(dev);
2928 unsigned int readid_flags = 0;
022bdb07 2929
bff04647
TH
2930 if (ehc->i.flags & ATA_EHI_DID_RESET)
2931 readid_flags |= ATA_READID_POSTRESET;
2932
9666f400 2933 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
633273a3
TH
2934 WARN_ON(dev->class == ATA_DEV_PMP);
2935
b1c72916 2936 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
022bdb07 2937 rc = -EIO;
8c3c52a8 2938 goto err;
022bdb07
TH
2939 }
2940
0260731f 2941 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
422c9daa
TH
2942 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2943 readid_flags);
022bdb07 2944 if (rc)
8c3c52a8 2945 goto err;
022bdb07 2946
0260731f 2947 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
47005f25 2948
baa1e78a
TH
2949 /* Configuration may have changed, reconfigure
2950 * transfer mode.
2951 */
2952 ehc->i.flags |= ATA_EHI_SETMODE;
2953
3057ac3c 2954 /* schedule the scsi_rescan_device() here */
ad72cf98 2955 schedule_work(&(ap->scsi_rescan_task));
084fe639
TH
2956 } else if (dev->class == ATA_DEV_UNKNOWN &&
2957 ehc->tries[dev->devno] &&
2958 ata_class_enabled(ehc->classes[dev->devno])) {
842faa6c
TH
2959 /* Temporarily set dev->class, it will be
2960 * permanently set once all configurations are
2961 * complete. This is necessary because new
2962 * device configuration is done in two
2963 * separate loops.
2964 */
084fe639
TH
2965 dev->class = ehc->classes[dev->devno];
2966
633273a3
TH
2967 if (dev->class == ATA_DEV_PMP)
2968 rc = sata_pmp_attach(dev);
2969 else
2970 rc = ata_dev_read_id(dev, &dev->class,
2971 readid_flags, dev->id);
842faa6c
TH
2972
2973 /* read_id might have changed class, store and reset */
2974 ehc->classes[dev->devno] = dev->class;
2975 dev->class = ATA_DEV_UNKNOWN;
2976
8c3c52a8
TH
2977 switch (rc) {
2978 case 0:
99cf610a
TH
2979 /* clear error info accumulated during probe */
2980 ata_ering_clear(&dev->ering);
f58229f8 2981 new_mask |= 1 << dev->devno;
8c3c52a8
TH
2982 break;
2983 case -ENOENT:
55a8e2c8
TH
2984 /* IDENTIFY was issued to non-existent
2985 * device. No need to reset. Just
842faa6c 2986 * thaw and ignore the device.
55a8e2c8
TH
2987 */
2988 ata_eh_thaw_port(ap);
084fe639 2989 break;
8c3c52a8 2990 default:
8c3c52a8 2991 goto err;
084fe639 2992 }
8c3c52a8
TH
2993 }
2994 }
084fe639 2995
c1c4e8d5 2996 /* PDIAG- should have been released, ask cable type if post-reset */
33267325
TH
2997 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
2998 if (ap->ops->cable_detect)
2999 ap->cbl = ap->ops->cable_detect(ap);
3000 ata_force_cbl(ap);
3001 }
c1c4e8d5 3002
8c3c52a8
TH
3003 /* Configure new devices forward such that user doesn't see
3004 * device detection messages backwards.
3005 */
1eca4365 3006 ata_for_each_dev(dev, link, ALL) {
4f7c2874 3007 if (!(new_mask & (1 << dev->devno)))
8c3c52a8
TH
3008 continue;
3009
842faa6c
TH
3010 dev->class = ehc->classes[dev->devno];
3011
4f7c2874
TH
3012 if (dev->class == ATA_DEV_PMP)
3013 continue;
3014
8c3c52a8
TH
3015 ehc->i.flags |= ATA_EHI_PRINTINFO;
3016 rc = ata_dev_configure(dev);
3017 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
842faa6c
TH
3018 if (rc) {
3019 dev->class = ATA_DEV_UNKNOWN;
8c3c52a8 3020 goto err;
842faa6c 3021 }
8c3c52a8
TH
3022
3023 spin_lock_irqsave(ap->lock, flags);
3024 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3025 spin_unlock_irqrestore(ap->lock, flags);
3026
3027 /* new device discovered, configure xfermode */
3028 ehc->i.flags |= ATA_EHI_SETMODE;
022bdb07
TH
3029 }
3030
8c3c52a8 3031 return 0;
022bdb07 3032
8c3c52a8
TH
3033 err:
3034 *r_failed_dev = dev;
3035 DPRINTK("EXIT rc=%d\n", rc);
022bdb07
TH
3036 return rc;
3037}
3038
6f1d1e3a
TH
3039/**
3040 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3041 * @link: link on which timings will be programmed
98a1708d 3042 * @r_failed_dev: out parameter for failed device
6f1d1e3a
TH
3043 *
3044 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3045 * ata_set_mode() fails, pointer to the failing device is
3046 * returned in @r_failed_dev.
3047 *
3048 * LOCKING:
3049 * PCI/etc. bus probe sem.
3050 *
3051 * RETURNS:
3052 * 0 on success, negative errno otherwise
3053 */
3054int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3055{
3056 struct ata_port *ap = link->ap;
00115e0f
TH
3057 struct ata_device *dev;
3058 int rc;
6f1d1e3a 3059
76326ac1 3060 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
1eca4365 3061 ata_for_each_dev(dev, link, ENABLED) {
76326ac1
TH
3062 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3063 struct ata_ering_entry *ent;
3064
3065 ent = ata_ering_top(&dev->ering);
3066 if (ent)
3067 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3068 }
3069 }
3070
6f1d1e3a
TH
3071 /* has private set_mode? */
3072 if (ap->ops->set_mode)
00115e0f
TH
3073 rc = ap->ops->set_mode(link, r_failed_dev);
3074 else
3075 rc = ata_do_set_mode(link, r_failed_dev);
3076
3077 /* if transfer mode has changed, set DUBIOUS_XFER on device */
1eca4365 3078 ata_for_each_dev(dev, link, ENABLED) {
00115e0f
TH
3079 struct ata_eh_context *ehc = &link->eh_context;
3080 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3081 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3082
3083 if (dev->xfer_mode != saved_xfer_mode ||
3084 ata_ncq_enabled(dev) != saved_ncq)
3085 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3086 }
3087
3088 return rc;
6f1d1e3a
TH
3089}
3090
11fc33da
TH
3091/**
3092 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3093 * @dev: ATAPI device to clear UA for
3094 *
3095 * Resets and other operations can make an ATAPI device raise
3096 * UNIT ATTENTION which causes the next operation to fail. This
3097 * function clears UA.
3098 *
3099 * LOCKING:
3100 * EH context (may sleep).
3101 *
3102 * RETURNS:
3103 * 0 on success, -errno on failure.
3104 */
3105static int atapi_eh_clear_ua(struct ata_device *dev)
3106{
3107 int i;
3108
3109 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
b5357081 3110 u8 *sense_buffer = dev->link->ap->sector_buf;
11fc33da
TH
3111 u8 sense_key = 0;
3112 unsigned int err_mask;
3113
3114 err_mask = atapi_eh_tur(dev, &sense_key);
3115 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3116 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
3117 "failed (err_mask=0x%x)\n", err_mask);
3118 return -EIO;
3119 }
3120
3121 if (!err_mask || sense_key != UNIT_ATTENTION)
3122 return 0;
3123
3124 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3125 if (err_mask) {
3126 ata_dev_printk(dev, KERN_WARNING, "failed to clear "
3127 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3128 return -EIO;
3129 }
3130 }
3131
3132 ata_dev_printk(dev, KERN_WARNING,
3133 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
3134
3135 return 0;
3136}
3137
6013efd8
TH
3138/**
3139 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3140 * @dev: ATA device which may need FLUSH retry
3141 *
3142 * If @dev failed FLUSH, it needs to be reported upper layer
3143 * immediately as it means that @dev failed to remap and already
3144 * lost at least a sector and further FLUSH retrials won't make
3145 * any difference to the lost sector. However, if FLUSH failed
3146 * for other reasons, for example transmission error, FLUSH needs
3147 * to be retried.
3148 *
3149 * This function determines whether FLUSH failure retry is
3150 * necessary and performs it if so.
3151 *
3152 * RETURNS:
3153 * 0 if EH can continue, -errno if EH needs to be repeated.
3154 */
3155static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3156{
3157 struct ata_link *link = dev->link;
3158 struct ata_port *ap = link->ap;
3159 struct ata_queued_cmd *qc;
3160 struct ata_taskfile tf;
3161 unsigned int err_mask;
3162 int rc = 0;
3163
3164 /* did flush fail for this device? */
3165 if (!ata_tag_valid(link->active_tag))
3166 return 0;
3167
3168 qc = __ata_qc_from_tag(ap, link->active_tag);
3169 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3170 qc->tf.command != ATA_CMD_FLUSH))
3171 return 0;
3172
3173 /* if the device failed it, it should be reported to upper layers */
3174 if (qc->err_mask & AC_ERR_DEV)
3175 return 0;
3176
3177 /* flush failed for some other reason, give it another shot */
3178 ata_tf_init(dev, &tf);
3179
3180 tf.command = qc->tf.command;
3181 tf.flags |= ATA_TFLAG_DEVICE;
3182 tf.protocol = ATA_PROT_NODATA;
3183
3184 ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
3185 tf.command, qc->err_mask);
3186
3187 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3188 if (!err_mask) {
3189 /*
3190 * FLUSH is complete but there's no way to
3191 * successfully complete a failed command from EH.
3192 * Making sure retry is allowed at least once and
3193 * retrying it should do the trick - whatever was in
3194 * the cache is already on the platter and this won't
3195 * cause infinite loop.
3196 */
3197 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3198 } else {
3199 ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
3200 err_mask);
3201 rc = -EIO;
3202
3203 /* if device failed it, report it to upper layers */
3204 if (err_mask & AC_ERR_DEV) {
3205 qc->err_mask |= AC_ERR_DEV;
3206 qc->result_tf = tf;
3207 if (!(ap->pflags & ATA_PFLAG_FROZEN))
3208 rc = 0;
3209 }
3210 }
3211 return rc;
3212}
3213
0260731f 3214static int ata_link_nr_enabled(struct ata_link *link)
022bdb07 3215{
f58229f8
TH
3216 struct ata_device *dev;
3217 int cnt = 0;
022bdb07 3218
1eca4365
TH
3219 ata_for_each_dev(dev, link, ENABLED)
3220 cnt++;
022bdb07
TH
3221 return cnt;
3222}
3223
0260731f 3224static int ata_link_nr_vacant(struct ata_link *link)
084fe639 3225{
f58229f8
TH
3226 struct ata_device *dev;
3227 int cnt = 0;
084fe639 3228
1eca4365 3229 ata_for_each_dev(dev, link, ALL)
f58229f8 3230 if (dev->class == ATA_DEV_UNKNOWN)
084fe639
TH
3231 cnt++;
3232 return cnt;
3233}
3234
0260731f 3235static int ata_eh_skip_recovery(struct ata_link *link)
084fe639 3236{
672b2d65 3237 struct ata_port *ap = link->ap;
0260731f 3238 struct ata_eh_context *ehc = &link->eh_context;
f58229f8 3239 struct ata_device *dev;
084fe639 3240
f9df58cb
TH
3241 /* skip disabled links */
3242 if (link->flags & ATA_LFLAG_DISABLED)
3243 return 1;
3244
e2f3d75f
TH
3245 /* skip if explicitly requested */
3246 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3247 return 1;
3248
672b2d65
TH
3249 /* thaw frozen port and recover failed devices */
3250 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3251 return 0;
3252
3253 /* reset at least once if reset is requested */
3254 if ((ehc->i.action & ATA_EH_RESET) &&
3255 !(ehc->i.flags & ATA_EHI_DID_RESET))
084fe639
TH
3256 return 0;
3257
3258 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1eca4365 3259 ata_for_each_dev(dev, link, ALL) {
084fe639
TH
3260 if (dev->class == ATA_DEV_UNKNOWN &&
3261 ehc->classes[dev->devno] != ATA_DEV_NONE)
3262 return 0;
3263 }
3264
3265 return 1;
3266}
3267
c2c7a89c
TH
3268static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3269{
3270 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3271 u64 now = get_jiffies_64();
3272 int *trials = void_arg;
3273
3274 if (ent->timestamp < now - min(now, interval))
3275 return -1;
3276
3277 (*trials)++;
3278 return 0;
3279}
3280
02c05a27
TH
3281static int ata_eh_schedule_probe(struct ata_device *dev)
3282{
3283 struct ata_eh_context *ehc = &dev->link->eh_context;
c2c7a89c
TH
3284 struct ata_link *link = ata_dev_phys_link(dev);
3285 int trials = 0;
02c05a27
TH
3286
3287 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3288 (ehc->did_probe_mask & (1 << dev->devno)))
3289 return 0;
3290
3291 ata_eh_detach_dev(dev);
3292 ata_dev_init(dev);
3293 ehc->did_probe_mask |= (1 << dev->devno);
cf480626 3294 ehc->i.action |= ATA_EH_RESET;
00115e0f
TH
3295 ehc->saved_xfer_mode[dev->devno] = 0;
3296 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
02c05a27 3297
c2c7a89c
TH
3298 /* Record and count probe trials on the ering. The specific
3299 * error mask used is irrelevant. Because a successful device
3300 * detection clears the ering, this count accumulates only if
3301 * there are consecutive failed probes.
3302 *
3303 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3304 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3305 * forced to 1.5Gbps.
3306 *
3307 * This is to work around cases where failed link speed
3308 * negotiation results in device misdetection leading to
3309 * infinite DEVXCHG or PHRDY CHG events.
3310 */
3311 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3312 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3313
3314 if (trials > ATA_EH_PROBE_TRIALS)
3315 sata_down_spd_limit(link, 1);
3316
02c05a27
TH
3317 return 1;
3318}
3319
9b1e2658 3320static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
fee7ca72 3321{
9af5c9c9 3322 struct ata_eh_context *ehc = &dev->link->eh_context;
fee7ca72 3323
cf9a590a
TH
3324 /* -EAGAIN from EH routine indicates retry without prejudice.
3325 * The requester is responsible for ensuring forward progress.
3326 */
3327 if (err != -EAGAIN)
3328 ehc->tries[dev->devno]--;
fee7ca72
TH
3329
3330 switch (err) {
3331 case -ENODEV:
3332 /* device missing or wrong IDENTIFY data, schedule probing */
3333 ehc->i.probe_mask |= (1 << dev->devno);
3334 case -EINVAL:
3335 /* give it just one more chance */
3336 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3337 case -EIO:
d89293ab 3338 if (ehc->tries[dev->devno] == 1) {
fee7ca72
TH
3339 /* This is the last chance, better to slow
3340 * down than lose it.
3341 */
a07d499b 3342 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
d89293ab
TH
3343 if (dev->pio_mode > XFER_PIO_0)
3344 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
fee7ca72
TH
3345 }
3346 }
3347
3348 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3349 /* disable device if it has used up all its chances */
3350 ata_dev_disable(dev);
3351
3352 /* detach if offline */
b1c72916 3353 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
fee7ca72
TH
3354 ata_eh_detach_dev(dev);
3355
02c05a27 3356 /* schedule probe if necessary */
87fbc5a0 3357 if (ata_eh_schedule_probe(dev)) {
fee7ca72 3358 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
87fbc5a0
TH
3359 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3360 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3361 }
9b1e2658
TH
3362
3363 return 1;
fee7ca72 3364 } else {
cf480626 3365 ehc->i.action |= ATA_EH_RESET;
9b1e2658 3366 return 0;
fee7ca72
TH
3367 }
3368}
3369
022bdb07
TH
3370/**
3371 * ata_eh_recover - recover host port after error
3372 * @ap: host port to recover
f5914a46 3373 * @prereset: prereset method (can be NULL)
022bdb07
TH
3374 * @softreset: softreset method (can be NULL)
3375 * @hardreset: hardreset method (can be NULL)
3376 * @postreset: postreset method (can be NULL)
9b1e2658 3377 * @r_failed_link: out parameter for failed link
022bdb07
TH
3378 *
3379 * This is the alpha and omega, eum and yang, heart and soul of
3380 * libata exception handling. On entry, actions required to
9b1e2658
TH
3381 * recover each link and hotplug requests are recorded in the
3382 * link's eh_context. This function executes all the operations
3383 * with appropriate retrials and fallbacks to resurrect failed
084fe639 3384 * devices, detach goners and greet newcomers.
022bdb07
TH
3385 *
3386 * LOCKING:
3387 * Kernel thread context (may sleep).
3388 *
3389 * RETURNS:
3390 * 0 on success, -errno on failure.
3391 */
fb7fd614
TH
3392int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3393 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3394 ata_postreset_fn_t postreset,
3395 struct ata_link **r_failed_link)
022bdb07 3396{
9b1e2658 3397 struct ata_link *link;
022bdb07 3398 struct ata_device *dev;
0a2c0f56 3399 int nr_failed_devs;
dc98c32c 3400 int rc;
45fabbb7 3401 unsigned long flags, deadline;
022bdb07
TH
3402
3403 DPRINTK("ENTER\n");
3404
3405 /* prep for recovery */
1eca4365 3406 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3407 struct ata_eh_context *ehc = &link->eh_context;
084fe639 3408
f9df58cb
TH
3409 /* re-enable link? */
3410 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3411 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3412 spin_lock_irqsave(ap->lock, flags);
3413 link->flags &= ~ATA_LFLAG_DISABLED;
3414 spin_unlock_irqrestore(ap->lock, flags);
3415 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3416 }
3417
1eca4365 3418 ata_for_each_dev(dev, link, ALL) {
fd995f70
TH
3419 if (link->flags & ATA_LFLAG_NO_RETRY)
3420 ehc->tries[dev->devno] = 1;
3421 else
3422 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
084fe639 3423
9b1e2658
TH
3424 /* collect port action mask recorded in dev actions */
3425 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3426 ~ATA_EH_PERDEV_MASK;
3427 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3428
3429 /* process hotplug request */
3430 if (dev->flags & ATA_DFLAG_DETACH)
3431 ata_eh_detach_dev(dev);
3432
02c05a27
TH
3433 /* schedule probe if necessary */
3434 if (!ata_dev_enabled(dev))
3435 ata_eh_schedule_probe(dev);
084fe639 3436 }
022bdb07
TH
3437 }
3438
3439 retry:
022bdb07 3440 rc = 0;
9b1e2658 3441 nr_failed_devs = 0;
022bdb07 3442
aeb2ecd6 3443 /* if UNLOADING, finish immediately */
b51e9e5d 3444 if (ap->pflags & ATA_PFLAG_UNLOADING)
aeb2ecd6
TH
3445 goto out;
3446
9b1e2658 3447 /* prep for EH */
1eca4365 3448 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3449 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 3450
9b1e2658
TH
3451 /* skip EH if possible. */
3452 if (ata_eh_skip_recovery(link))
3453 ehc->i.action = 0;
3454
1eca4365 3455 ata_for_each_dev(dev, link, ALL)
9b1e2658
TH
3456 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3457 }
084fe639 3458
022bdb07 3459 /* reset */
1eca4365 3460 ata_for_each_link(link, ap, EDGE) {
dc98c32c 3461 struct ata_eh_context *ehc = &link->eh_context;
9b1e2658 3462
dc98c32c
TH
3463 if (!(ehc->i.action & ATA_EH_RESET))
3464 continue;
9b1e2658 3465
dc98c32c
TH
3466 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3467 prereset, softreset, hardreset, postreset);
3468 if (rc) {
3469 ata_link_printk(link, KERN_ERR,
3470 "reset failed, giving up\n");
3471 goto out;
022bdb07 3472 }
022bdb07
TH
3473 }
3474
45fabbb7
EO
3475 do {
3476 unsigned long now;
3477
3478 /*
3479 * clears ATA_EH_PARK in eh_info and resets
3480 * ap->park_req_pending
3481 */
3482 ata_eh_pull_park_action(ap);
3483
3484 deadline = jiffies;
1eca4365
TH
3485 ata_for_each_link(link, ap, EDGE) {
3486 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
3487 struct ata_eh_context *ehc = &link->eh_context;
3488 unsigned long tmp;
3489
3490 if (dev->class != ATA_DEV_ATA)
3491 continue;
3492 if (!(ehc->i.dev_action[dev->devno] &
3493 ATA_EH_PARK))
3494 continue;
3495 tmp = dev->unpark_deadline;
3496 if (time_before(deadline, tmp))
3497 deadline = tmp;
3498 else if (time_before_eq(tmp, jiffies))
3499 continue;
3500 if (ehc->unloaded_mask & (1 << dev->devno))
3501 continue;
3502
3503 ata_eh_park_issue_cmd(dev, 1);
3504 }
3505 }
3506
3507 now = jiffies;
3508 if (time_before_eq(deadline, now))
3509 break;
3510
3511 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3512 deadline - now);
3513 } while (deadline);
1eca4365
TH
3514 ata_for_each_link(link, ap, EDGE) {
3515 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
3516 if (!(link->eh_context.unloaded_mask &
3517 (1 << dev->devno)))
3518 continue;
3519
3520 ata_eh_park_issue_cmd(dev, 0);
3521 ata_eh_done(link, dev, ATA_EH_PARK);
3522 }
3523 }
3524
9b1e2658 3525 /* the rest */
1eca4365 3526 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3527 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 3528
9b1e2658
TH
3529 /* revalidate existing devices and attach new ones */
3530 rc = ata_eh_revalidate_and_attach(link, &dev);
4ae72a1e 3531 if (rc)
022bdb07 3532 goto dev_fail;
022bdb07 3533
633273a3
TH
3534 /* if PMP got attached, return, pmp EH will take care of it */
3535 if (link->device->class == ATA_DEV_PMP) {
3536 ehc->i.action = 0;
3537 return 0;
3538 }
3539
9b1e2658
TH
3540 /* configure transfer mode if necessary */
3541 if (ehc->i.flags & ATA_EHI_SETMODE) {
3542 rc = ata_set_mode(link, &dev);
3543 if (rc)
3544 goto dev_fail;
3545 ehc->i.flags &= ~ATA_EHI_SETMODE;
3546 }
3547
11fc33da
TH
3548 /* If reset has been issued, clear UA to avoid
3549 * disrupting the current users of the device.
3550 */
3551 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1eca4365 3552 ata_for_each_dev(dev, link, ALL) {
11fc33da
TH
3553 if (dev->class != ATA_DEV_ATAPI)
3554 continue;
3555 rc = atapi_eh_clear_ua(dev);
3556 if (rc)
3557 goto dev_fail;
3558 }
3559 }
3560
6013efd8
TH
3561 /* retry flush if necessary */
3562 ata_for_each_dev(dev, link, ALL) {
3563 if (dev->class != ATA_DEV_ATA)
3564 continue;
3565 rc = ata_eh_maybe_retry_flush(dev);
3566 if (rc)
3567 goto dev_fail;
3568 }
3569
11fc33da 3570 /* configure link power saving */
3ec25ebd 3571 if (ehc->i.action & ATA_EH_LPM)
1eca4365 3572 ata_for_each_dev(dev, link, ALL)
c93b263e 3573 ata_dev_enable_pm(dev, ap->lpm_policy);
ca77329f 3574
9b1e2658
TH
3575 /* this link is okay now */
3576 ehc->i.flags = 0;
3577 continue;
022bdb07 3578
2dcb407e 3579dev_fail:
9b1e2658 3580 nr_failed_devs++;
0a2c0f56 3581 ata_eh_handle_dev_fail(dev, rc);
022bdb07 3582
b06ce3e5
TH
3583 if (ap->pflags & ATA_PFLAG_FROZEN) {
3584 /* PMP reset requires working host port.
3585 * Can't retry if it's frozen.
3586 */
071f44b1 3587 if (sata_pmp_attached(ap))
b06ce3e5 3588 goto out;
9b1e2658 3589 break;
b06ce3e5 3590 }
022bdb07
TH
3591 }
3592
0a2c0f56 3593 if (nr_failed_devs)
9b1e2658 3594 goto retry;
022bdb07 3595
9b1e2658
TH
3596 out:
3597 if (rc && r_failed_link)
3598 *r_failed_link = link;
3599
022bdb07
TH
3600 DPRINTK("EXIT, rc=%d\n", rc);
3601 return rc;
3602}
3603
3604/**
3605 * ata_eh_finish - finish up EH
3606 * @ap: host port to finish EH for
3607 *
3608 * Recovery is complete. Clean up EH states and retry or finish
3609 * failed qcs.
3610 *
3611 * LOCKING:
3612 * None.
3613 */
fb7fd614 3614void ata_eh_finish(struct ata_port *ap)
022bdb07
TH
3615{
3616 int tag;
3617
3618 /* retry or finish qcs */
3619 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3620 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3621
3622 if (!(qc->flags & ATA_QCFLAG_FAILED))
3623 continue;
3624
3625 if (qc->err_mask) {
3626 /* FIXME: Once EH migration is complete,
3627 * generate sense data in this function,
3628 * considering both err_mask and tf.
3629 */
03faab78 3630 if (qc->flags & ATA_QCFLAG_RETRY)
022bdb07 3631 ata_eh_qc_retry(qc);
03faab78
TH
3632 else
3633 ata_eh_qc_complete(qc);
022bdb07
TH
3634 } else {
3635 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3636 ata_eh_qc_complete(qc);
3637 } else {
3638 /* feed zero TF to sense generation */
3639 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3640 ata_eh_qc_retry(qc);
3641 }
3642 }
3643 }
da917d69
TH
3644
3645 /* make sure nr_active_links is zero after EH */
3646 WARN_ON(ap->nr_active_links);
3647 ap->nr_active_links = 0;
022bdb07
TH
3648}
3649
3650/**
3651 * ata_do_eh - do standard error handling
3652 * @ap: host port to handle error for
a1efdaba 3653 *
f5914a46 3654 * @prereset: prereset method (can be NULL)
022bdb07
TH
3655 * @softreset: softreset method (can be NULL)
3656 * @hardreset: hardreset method (can be NULL)
3657 * @postreset: postreset method (can be NULL)
3658 *
3659 * Perform standard error handling sequence.
3660 *
3661 * LOCKING:
3662 * Kernel thread context (may sleep).
3663 */
f5914a46
TH
3664void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3665 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3666 ata_postreset_fn_t postreset)
022bdb07 3667{
9b1e2658
TH
3668 struct ata_device *dev;
3669 int rc;
3670
3671 ata_eh_autopsy(ap);
3672 ata_eh_report(ap);
3673
3674 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3675 NULL);
3676 if (rc) {
1eca4365 3677 ata_for_each_dev(dev, &ap->link, ALL)
9b1e2658
TH
3678 ata_dev_disable(dev);
3679 }
3680
022bdb07
TH
3681 ata_eh_finish(ap);
3682}
500530f6 3683
a1efdaba
TH
3684/**
3685 * ata_std_error_handler - standard error handler
3686 * @ap: host port to handle error for
3687 *
3688 * Standard error handler
3689 *
3690 * LOCKING:
3691 * Kernel thread context (may sleep).
3692 */
3693void ata_std_error_handler(struct ata_port *ap)
3694{
3695 struct ata_port_operations *ops = ap->ops;
3696 ata_reset_fn_t hardreset = ops->hardreset;
3697
57c9efdf 3698 /* ignore built-in hardreset if SCR access is not available */
fe06e5f9 3699 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
a1efdaba
TH
3700 hardreset = NULL;
3701
3702 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3703}
3704
6ffa01d8 3705#ifdef CONFIG_PM
500530f6
TH
3706/**
3707 * ata_eh_handle_port_suspend - perform port suspend operation
3708 * @ap: port to suspend
3709 *
3710 * Suspend @ap.
3711 *
3712 * LOCKING:
3713 * Kernel thread context (may sleep).
3714 */
3715static void ata_eh_handle_port_suspend(struct ata_port *ap)
3716{
3717 unsigned long flags;
3718 int rc = 0;
3719
3720 /* are we suspending? */
3721 spin_lock_irqsave(ap->lock, flags);
3722 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3723 ap->pm_mesg.event == PM_EVENT_ON) {
3724 spin_unlock_irqrestore(ap->lock, flags);
3725 return;
3726 }
3727 spin_unlock_irqrestore(ap->lock, flags);
3728
3729 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3730
64578a3d
TH
3731 /* tell ACPI we're suspending */
3732 rc = ata_acpi_on_suspend(ap);
3733 if (rc)
3734 goto out;
3735
500530f6
TH
3736 /* suspend */
3737 ata_eh_freeze_port(ap);
3738
3739 if (ap->ops->port_suspend)
3740 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3741
bd3adca5 3742 ata_acpi_set_state(ap, PMSG_SUSPEND);
64578a3d 3743 out:
500530f6
TH
3744 /* report result */
3745 spin_lock_irqsave(ap->lock, flags);
3746
3747 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3748 if (rc == 0)
3749 ap->pflags |= ATA_PFLAG_SUSPENDED;
64578a3d 3750 else if (ap->pflags & ATA_PFLAG_FROZEN)
500530f6
TH
3751 ata_port_schedule_eh(ap);
3752
3753 if (ap->pm_result) {
3754 *ap->pm_result = rc;
3755 ap->pm_result = NULL;
3756 }
3757
3758 spin_unlock_irqrestore(ap->lock, flags);
3759
3760 return;
3761}
3762
3763/**
3764 * ata_eh_handle_port_resume - perform port resume operation
3765 * @ap: port to resume
3766 *
3767 * Resume @ap.
3768 *
500530f6
TH
3769 * LOCKING:
3770 * Kernel thread context (may sleep).
3771 */
3772static void ata_eh_handle_port_resume(struct ata_port *ap)
3773{
6f9c1ea2
TH
3774 struct ata_link *link;
3775 struct ata_device *dev;
500530f6 3776 unsigned long flags;
9666f400 3777 int rc = 0;
500530f6
TH
3778
3779 /* are we resuming? */
3780 spin_lock_irqsave(ap->lock, flags);
3781 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3782 ap->pm_mesg.event != PM_EVENT_ON) {
3783 spin_unlock_irqrestore(ap->lock, flags);
3784 return;
3785 }
3786 spin_unlock_irqrestore(ap->lock, flags);
3787
9666f400 3788 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
500530f6 3789
6f9c1ea2
TH
3790 /*
3791 * Error timestamps are in jiffies which doesn't run while
3792 * suspended and PHY events during resume isn't too uncommon.
3793 * When the two are combined, it can lead to unnecessary speed
3794 * downs if the machine is suspended and resumed repeatedly.
3795 * Clear error history.
3796 */
3797 ata_for_each_link(link, ap, HOST_FIRST)
3798 ata_for_each_dev(dev, link, ALL)
3799 ata_ering_clear(&dev->ering);
3800
bd3adca5
SL
3801 ata_acpi_set_state(ap, PMSG_ON);
3802
500530f6
TH
3803 if (ap->ops->port_resume)
3804 rc = ap->ops->port_resume(ap);
3805
6746544c
TH
3806 /* tell ACPI that we're resuming */
3807 ata_acpi_on_resume(ap);
3808
9666f400 3809 /* report result */
500530f6
TH
3810 spin_lock_irqsave(ap->lock, flags);
3811 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3812 if (ap->pm_result) {
3813 *ap->pm_result = rc;
3814 ap->pm_result = NULL;
3815 }
3816 spin_unlock_irqrestore(ap->lock, flags);
3817}
6ffa01d8 3818#endif /* CONFIG_PM */