libata: add @ap to ata_wait_register() and introduce ata_msleep()
[linux-block.git] / drivers / ata / libata-eh.c
CommitLineData
ece1d636
TH
1/*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
ece1d636 35#include <linux/kernel.h>
242f9dcb 36#include <linux/blkdev.h>
2855568b 37#include <linux/pci.h>
ece1d636
TH
38#include <scsi/scsi.h>
39#include <scsi/scsi_host.h>
40#include <scsi/scsi_eh.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_cmnd.h>
6521148c 43#include <scsi/scsi_dbg.h>
c6fd2807 44#include "../scsi/scsi_transport_api.h"
ece1d636
TH
45
46#include <linux/libata.h>
47
48#include "libata.h"
49
7d47e8d4 50enum {
3884f7b0 51 /* speed down verdicts */
7d47e8d4
TH
52 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
53 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
54 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
76326ac1 55 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
3884f7b0
TH
56
57 /* error flags */
58 ATA_EFLAG_IS_IO = (1 << 0),
76326ac1 59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
d9027470 60 ATA_EFLAG_OLD_ER = (1 << 31),
3884f7b0
TH
61
62 /* error categories */
63 ATA_ECAT_NONE = 0,
64 ATA_ECAT_ATA_BUS = 1,
65 ATA_ECAT_TOUT_HSM = 2,
66 ATA_ECAT_UNK_DEV = 3,
75f9cafc
TH
67 ATA_ECAT_DUBIOUS_NONE = 4,
68 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
69 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
70 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
71 ATA_ECAT_NR = 8,
7d47e8d4 72
87fbc5a0
TH
73 ATA_EH_CMD_DFL_TIMEOUT = 5000,
74
0a2c0f56
TH
75 /* always put at least this amount of time between resets */
76 ATA_EH_RESET_COOL_DOWN = 5000,
77
341c2c95
TH
78 /* Waiting in ->prereset can never be reliable. It's
79 * sometimes nice to wait there but it can't be depended upon;
80 * otherwise, we wouldn't be resetting. Just give it enough
81 * time for most drives to spin up.
82 */
83 ATA_EH_PRERESET_TIMEOUT = 10000,
84 ATA_EH_FASTDRAIN_INTERVAL = 3000,
11fc33da
TH
85
86 ATA_EH_UA_TRIES = 5,
c2c7a89c
TH
87
88 /* probe speed down parameters, see ata_eh_schedule_probe() */
89 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
90 ATA_EH_PROBE_TRIALS = 2,
31daabda
TH
91};
92
93/* The following table determines how we sequence resets. Each entry
94 * represents timeout for that try. The first try can be soft or
95 * hardreset. All others are hardreset if available. In most cases
96 * the first reset w/ 10sec timeout should succeed. Following entries
97 * are mostly for error handling, hotplug and retarded devices.
98 */
99static const unsigned long ata_eh_reset_timeouts[] = {
341c2c95
TH
100 10000, /* most drives spin up by 10sec */
101 10000, /* > 99% working drives spin up before 20sec */
102 35000, /* give > 30 secs of idleness for retarded devices */
103 5000, /* and sweet one last chance */
d8af0eb6 104 ULONG_MAX, /* > 1 min has elapsed, give up */
31daabda
TH
105};
106
87fbc5a0
TH
107static const unsigned long ata_eh_identify_timeouts[] = {
108 5000, /* covers > 99% of successes and not too boring on failures */
109 10000, /* combined time till here is enough even for media access */
110 30000, /* for true idiots */
111 ULONG_MAX,
112};
113
6013efd8
TH
114static const unsigned long ata_eh_flush_timeouts[] = {
115 15000, /* be generous with flush */
116 15000, /* ditto */
117 30000, /* and even more generous */
118 ULONG_MAX,
119};
120
87fbc5a0
TH
121static const unsigned long ata_eh_other_timeouts[] = {
122 5000, /* same rationale as identify timeout */
123 10000, /* ditto */
124 /* but no merciful 30sec for other commands, it just isn't worth it */
125 ULONG_MAX,
126};
127
128struct ata_eh_cmd_timeout_ent {
129 const u8 *commands;
130 const unsigned long *timeouts;
131};
132
133/* The following table determines timeouts to use for EH internal
134 * commands. Each table entry is a command class and matches the
135 * commands the entry applies to and the timeout table to use.
136 *
137 * On the retry after a command timed out, the next timeout value from
138 * the table is used. If the table doesn't contain further entries,
139 * the last value is used.
140 *
141 * ehc->cmd_timeout_idx keeps track of which timeout to use per
142 * command class, so if SET_FEATURES times out on the first try, the
143 * next try will use the second timeout value only for that class.
144 */
145#define CMDS(cmds...) (const u8 []){ cmds, 0 }
146static const struct ata_eh_cmd_timeout_ent
147ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
148 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
149 .timeouts = ata_eh_identify_timeouts, },
150 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
151 .timeouts = ata_eh_other_timeouts, },
152 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
153 .timeouts = ata_eh_other_timeouts, },
154 { .commands = CMDS(ATA_CMD_SET_FEATURES),
155 .timeouts = ata_eh_other_timeouts, },
156 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
157 .timeouts = ata_eh_other_timeouts, },
6013efd8
TH
158 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
159 .timeouts = ata_eh_flush_timeouts },
87fbc5a0
TH
160};
161#undef CMDS
162
ad9e2762 163static void __ata_port_freeze(struct ata_port *ap);
6ffa01d8 164#ifdef CONFIG_PM
500530f6
TH
165static void ata_eh_handle_port_suspend(struct ata_port *ap);
166static void ata_eh_handle_port_resume(struct ata_port *ap);
6ffa01d8
TH
167#else /* CONFIG_PM */
168static void ata_eh_handle_port_suspend(struct ata_port *ap)
169{ }
170
171static void ata_eh_handle_port_resume(struct ata_port *ap)
172{ }
6ffa01d8 173#endif /* CONFIG_PM */
ad9e2762 174
b64bbc39
TH
175static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
176 va_list args)
177{
178 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
179 ATA_EH_DESC_LEN - ehi->desc_len,
180 fmt, args);
181}
182
183/**
184 * __ata_ehi_push_desc - push error description without adding separator
185 * @ehi: target EHI
186 * @fmt: printf format string
187 *
188 * Format string according to @fmt and append it to @ehi->desc.
189 *
190 * LOCKING:
191 * spin_lock_irqsave(host lock)
192 */
193void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
194{
195 va_list args;
196
197 va_start(args, fmt);
198 __ata_ehi_pushv_desc(ehi, fmt, args);
199 va_end(args);
200}
201
202/**
203 * ata_ehi_push_desc - push error description with separator
204 * @ehi: target EHI
205 * @fmt: printf format string
206 *
207 * Format string according to @fmt and append it to @ehi->desc.
208 * If @ehi->desc is not empty, ", " is added in-between.
209 *
210 * LOCKING:
211 * spin_lock_irqsave(host lock)
212 */
213void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
214{
215 va_list args;
216
217 if (ehi->desc_len)
218 __ata_ehi_push_desc(ehi, ", ");
219
220 va_start(args, fmt);
221 __ata_ehi_pushv_desc(ehi, fmt, args);
222 va_end(args);
223}
224
225/**
226 * ata_ehi_clear_desc - clean error description
227 * @ehi: target EHI
228 *
229 * Clear @ehi->desc.
230 *
231 * LOCKING:
232 * spin_lock_irqsave(host lock)
233 */
234void ata_ehi_clear_desc(struct ata_eh_info *ehi)
235{
236 ehi->desc[0] = '\0';
237 ehi->desc_len = 0;
238}
239
cbcdd875
TH
240/**
241 * ata_port_desc - append port description
242 * @ap: target ATA port
243 * @fmt: printf format string
244 *
245 * Format string according to @fmt and append it to port
246 * description. If port description is not empty, " " is added
247 * in-between. This function is to be used while initializing
248 * ata_host. The description is printed on host registration.
249 *
250 * LOCKING:
251 * None.
252 */
253void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
254{
255 va_list args;
256
257 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
258
259 if (ap->link.eh_info.desc_len)
260 __ata_ehi_push_desc(&ap->link.eh_info, " ");
261
262 va_start(args, fmt);
263 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
264 va_end(args);
265}
266
267#ifdef CONFIG_PCI
268
269/**
270 * ata_port_pbar_desc - append PCI BAR description
271 * @ap: target ATA port
272 * @bar: target PCI BAR
273 * @offset: offset into PCI BAR
274 * @name: name of the area
275 *
276 * If @offset is negative, this function formats a string which
277 * contains the name, address, size and type of the BAR and
278 * appends it to the port description. If @offset is zero or
279 * positive, only name and offsetted address is appended.
280 *
281 * LOCKING:
282 * None.
283 */
284void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
285 const char *name)
286{
287 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
288 char *type = "";
289 unsigned long long start, len;
290
291 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
292 type = "m";
293 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
294 type = "i";
295
296 start = (unsigned long long)pci_resource_start(pdev, bar);
297 len = (unsigned long long)pci_resource_len(pdev, bar);
298
299 if (offset < 0)
300 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
301 else
e6a73ab1
AM
302 ata_port_desc(ap, "%s 0x%llx", name,
303 start + (unsigned long long)offset);
cbcdd875
TH
304}
305
306#endif /* CONFIG_PCI */
307
87fbc5a0
TH
308static int ata_lookup_timeout_table(u8 cmd)
309{
310 int i;
311
312 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
313 const u8 *cur;
314
315 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
316 if (*cur == cmd)
317 return i;
318 }
319
320 return -1;
321}
322
323/**
324 * ata_internal_cmd_timeout - determine timeout for an internal command
325 * @dev: target device
326 * @cmd: internal command to be issued
327 *
328 * Determine timeout for internal command @cmd for @dev.
329 *
330 * LOCKING:
331 * EH context.
332 *
333 * RETURNS:
334 * Determined timeout.
335 */
336unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
337{
338 struct ata_eh_context *ehc = &dev->link->eh_context;
339 int ent = ata_lookup_timeout_table(cmd);
340 int idx;
341
342 if (ent < 0)
343 return ATA_EH_CMD_DFL_TIMEOUT;
344
345 idx = ehc->cmd_timeout_idx[dev->devno][ent];
346 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
347}
348
349/**
350 * ata_internal_cmd_timed_out - notification for internal command timeout
351 * @dev: target device
352 * @cmd: internal command which timed out
353 *
354 * Notify EH that internal command @cmd for @dev timed out. This
355 * function should be called only for commands whose timeouts are
356 * determined using ata_internal_cmd_timeout().
357 *
358 * LOCKING:
359 * EH context.
360 */
361void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
362{
363 struct ata_eh_context *ehc = &dev->link->eh_context;
364 int ent = ata_lookup_timeout_table(cmd);
365 int idx;
366
367 if (ent < 0)
368 return;
369
370 idx = ehc->cmd_timeout_idx[dev->devno][ent];
371 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
372 ehc->cmd_timeout_idx[dev->devno][ent]++;
373}
374
3884f7b0 375static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
0c247c55
TH
376 unsigned int err_mask)
377{
378 struct ata_ering_entry *ent;
379
380 WARN_ON(!err_mask);
381
382 ering->cursor++;
383 ering->cursor %= ATA_ERING_SIZE;
384
385 ent = &ering->ring[ering->cursor];
3884f7b0 386 ent->eflags = eflags;
0c247c55
TH
387 ent->err_mask = err_mask;
388 ent->timestamp = get_jiffies_64();
389}
390
76326ac1
TH
391static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
392{
393 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
394
395 if (ent->err_mask)
396 return ent;
397 return NULL;
398}
399
d9027470
GG
400int ata_ering_map(struct ata_ering *ering,
401 int (*map_fn)(struct ata_ering_entry *, void *),
402 void *arg)
0c247c55
TH
403{
404 int idx, rc = 0;
405 struct ata_ering_entry *ent;
406
407 idx = ering->cursor;
408 do {
409 ent = &ering->ring[idx];
410 if (!ent->err_mask)
411 break;
412 rc = map_fn(ent, arg);
413 if (rc)
414 break;
415 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
416 } while (idx != ering->cursor);
417
418 return rc;
419}
420
d9027470
GG
421int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
422{
423 ent->eflags |= ATA_EFLAG_OLD_ER;
424 return 0;
425}
426
427static void ata_ering_clear(struct ata_ering *ering)
428{
429 ata_ering_map(ering, ata_ering_clear_cb, NULL);
430}
431
64f65ca6
TH
432static unsigned int ata_eh_dev_action(struct ata_device *dev)
433{
9af5c9c9 434 struct ata_eh_context *ehc = &dev->link->eh_context;
64f65ca6
TH
435
436 return ehc->i.action | ehc->i.dev_action[dev->devno];
437}
438
f58229f8 439static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
af181c2d
TH
440 struct ata_eh_info *ehi, unsigned int action)
441{
f58229f8 442 struct ata_device *tdev;
af181c2d
TH
443
444 if (!dev) {
445 ehi->action &= ~action;
1eca4365 446 ata_for_each_dev(tdev, link, ALL)
f58229f8 447 ehi->dev_action[tdev->devno] &= ~action;
af181c2d
TH
448 } else {
449 /* doesn't make sense for port-wide EH actions */
450 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
451
452 /* break ehi->action into ehi->dev_action */
453 if (ehi->action & action) {
1eca4365 454 ata_for_each_dev(tdev, link, ALL)
f58229f8
TH
455 ehi->dev_action[tdev->devno] |=
456 ehi->action & action;
af181c2d
TH
457 ehi->action &= ~action;
458 }
459
460 /* turn off the specified per-dev action */
461 ehi->dev_action[dev->devno] &= ~action;
462 }
463}
464
ece1d636
TH
465/**
466 * ata_scsi_timed_out - SCSI layer time out callback
467 * @cmd: timed out SCSI command
468 *
469 * Handles SCSI layer timeout. We race with normal completion of
470 * the qc for @cmd. If the qc is already gone, we lose and let
471 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
472 * timed out and EH should be invoked. Prevent ata_qc_complete()
473 * from finishing it by setting EH_SCHEDULED and return
474 * EH_NOT_HANDLED.
475 *
ad9e2762
TH
476 * TODO: kill this function once old EH is gone.
477 *
ece1d636
TH
478 * LOCKING:
479 * Called from timer context
480 *
481 * RETURNS:
482 * EH_HANDLED or EH_NOT_HANDLED
483 */
242f9dcb 484enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
ece1d636
TH
485{
486 struct Scsi_Host *host = cmd->device->host;
35bb94b1 487 struct ata_port *ap = ata_shost_to_port(host);
ece1d636
TH
488 unsigned long flags;
489 struct ata_queued_cmd *qc;
242f9dcb 490 enum blk_eh_timer_return ret;
ece1d636
TH
491
492 DPRINTK("ENTER\n");
493
ad9e2762 494 if (ap->ops->error_handler) {
242f9dcb 495 ret = BLK_EH_NOT_HANDLED;
ad9e2762
TH
496 goto out;
497 }
498
242f9dcb 499 ret = BLK_EH_HANDLED;
ba6a1308 500 spin_lock_irqsave(ap->lock, flags);
9af5c9c9 501 qc = ata_qc_from_tag(ap, ap->link.active_tag);
ece1d636
TH
502 if (qc) {
503 WARN_ON(qc->scsicmd != cmd);
504 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
505 qc->err_mask |= AC_ERR_TIMEOUT;
242f9dcb 506 ret = BLK_EH_NOT_HANDLED;
ece1d636 507 }
ba6a1308 508 spin_unlock_irqrestore(ap->lock, flags);
ece1d636 509
ad9e2762 510 out:
ece1d636
TH
511 DPRINTK("EXIT, ret=%d\n", ret);
512 return ret;
513}
514
ece180d1
TH
515static void ata_eh_unload(struct ata_port *ap)
516{
517 struct ata_link *link;
518 struct ata_device *dev;
519 unsigned long flags;
520
521 /* Restore SControl IPM and SPD for the next driver and
522 * disable attached devices.
523 */
524 ata_for_each_link(link, ap, PMP_FIRST) {
525 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
526 ata_for_each_dev(dev, link, ALL)
527 ata_dev_disable(dev);
528 }
529
530 /* freeze and set UNLOADED */
531 spin_lock_irqsave(ap->lock, flags);
532
533 ata_port_freeze(ap); /* won't be thawed */
534 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
535 ap->pflags |= ATA_PFLAG_UNLOADED;
536
537 spin_unlock_irqrestore(ap->lock, flags);
538}
539
ece1d636
TH
540/**
541 * ata_scsi_error - SCSI layer error handler callback
542 * @host: SCSI host on which error occurred
543 *
544 * Handles SCSI-layer-thrown error events.
545 *
546 * LOCKING:
547 * Inherited from SCSI layer (none, can sleep)
548 *
549 * RETURNS:
550 * Zero.
551 */
381544bb 552void ata_scsi_error(struct Scsi_Host *host)
ece1d636 553{
35bb94b1 554 struct ata_port *ap = ata_shost_to_port(host);
a1e10f7e 555 int i;
ad9e2762 556 unsigned long flags;
ece1d636
TH
557
558 DPRINTK("ENTER\n");
559
c429137a
TH
560 /* make sure sff pio task is not running */
561 ata_sff_flush_pio_task(ap);
ece1d636 562
cca3974e 563 /* synchronize with host lock and sort out timeouts */
ad9e2762
TH
564
565 /* For new EH, all qcs are finished in one of three ways -
566 * normal completion, error completion, and SCSI timeout.
c96f1732 567 * Both completions can race against SCSI timeout. When normal
ad9e2762
TH
568 * completion wins, the qc never reaches EH. When error
569 * completion wins, the qc has ATA_QCFLAG_FAILED set.
570 *
571 * When SCSI timeout wins, things are a bit more complex.
572 * Normal or error completion can occur after the timeout but
573 * before this point. In such cases, both types of
574 * completions are honored. A scmd is determined to have
575 * timed out iff its associated qc is active and not failed.
576 */
577 if (ap->ops->error_handler) {
578 struct scsi_cmnd *scmd, *tmp;
579 int nr_timedout = 0;
580
e30349d2 581 spin_lock_irqsave(ap->lock, flags);
d9027470 582
c96f1732
AC
583 /* This must occur under the ap->lock as we don't want
584 a polled recovery to race the real interrupt handler
d9027470 585
c96f1732
AC
586 The lost_interrupt handler checks for any completed but
587 non-notified command and completes much like an IRQ handler.
d9027470 588
c96f1732
AC
589 We then fall into the error recovery code which will treat
590 this as if normal completion won the race */
591
592 if (ap->ops->lost_interrupt)
593 ap->ops->lost_interrupt(ap);
d9027470 594
ad9e2762
TH
595 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
596 struct ata_queued_cmd *qc;
597
598 for (i = 0; i < ATA_MAX_QUEUE; i++) {
599 qc = __ata_qc_from_tag(ap, i);
600 if (qc->flags & ATA_QCFLAG_ACTIVE &&
601 qc->scsicmd == scmd)
602 break;
603 }
604
605 if (i < ATA_MAX_QUEUE) {
606 /* the scmd has an associated qc */
607 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
608 /* which hasn't failed yet, timeout */
609 qc->err_mask |= AC_ERR_TIMEOUT;
610 qc->flags |= ATA_QCFLAG_FAILED;
611 nr_timedout++;
612 }
613 } else {
614 /* Normal completion occurred after
615 * SCSI timeout but before this point.
616 * Successfully complete it.
617 */
618 scmd->retries = scmd->allowed;
619 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
620 }
621 }
622
623 /* If we have timed out qcs. They belong to EH from
624 * this point but the state of the controller is
625 * unknown. Freeze the port to make sure the IRQ
626 * handler doesn't diddle with those qcs. This must
627 * be done atomically w.r.t. setting QCFLAG_FAILED.
628 */
629 if (nr_timedout)
630 __ata_port_freeze(ap);
631
e30349d2 632 spin_unlock_irqrestore(ap->lock, flags);
a1e10f7e
TH
633
634 /* initialize eh_tries */
635 ap->eh_tries = ATA_EH_MAX_TRIES;
ad9e2762 636 } else
e30349d2 637 spin_unlock_wait(ap->lock);
d9027470 638
c96f1732
AC
639 /* If we timed raced normal completion and there is nothing to
640 recover nr_timedout == 0 why exactly are we doing error recovery ? */
ad9e2762
TH
641
642 repeat:
643 /* invoke error handler */
644 if (ap->ops->error_handler) {
cf1b86c8
TH
645 struct ata_link *link;
646
5ddf24c5
TH
647 /* kill fast drain timer */
648 del_timer_sync(&ap->fastdrain_timer);
649
500530f6
TH
650 /* process port resume request */
651 ata_eh_handle_port_resume(ap);
652
f3e81b19 653 /* fetch & clear EH info */
e30349d2 654 spin_lock_irqsave(ap->lock, flags);
f3e81b19 655
1eca4365 656 ata_for_each_link(link, ap, HOST_FIRST) {
00115e0f
TH
657 struct ata_eh_context *ehc = &link->eh_context;
658 struct ata_device *dev;
659
cf1b86c8
TH
660 memset(&link->eh_context, 0, sizeof(link->eh_context));
661 link->eh_context.i = link->eh_info;
662 memset(&link->eh_info, 0, sizeof(link->eh_info));
00115e0f 663
1eca4365 664 ata_for_each_dev(dev, link, ENABLED) {
00115e0f
TH
665 int devno = dev->devno;
666
667 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
668 if (ata_ncq_enabled(dev))
669 ehc->saved_ncq_enabled |= 1 << devno;
670 }
cf1b86c8 671 }
f3e81b19 672
b51e9e5d
TH
673 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
674 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
da917d69 675 ap->excl_link = NULL; /* don't maintain exclusion over EH */
f3e81b19 676
e30349d2 677 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762 678
500530f6
TH
679 /* invoke EH, skip if unloading or suspended */
680 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
720ba126 681 ap->ops->error_handler(ap);
ece180d1
TH
682 else {
683 /* if unloading, commence suicide */
684 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
685 !(ap->pflags & ATA_PFLAG_UNLOADED))
686 ata_eh_unload(ap);
720ba126 687 ata_eh_finish(ap);
ece180d1 688 }
ad9e2762 689
500530f6
TH
690 /* process port suspend request */
691 ata_eh_handle_port_suspend(ap);
692
ad9e2762
TH
693 /* Exception might have happend after ->error_handler
694 * recovered the port but before this point. Repeat
695 * EH in such case.
696 */
e30349d2 697 spin_lock_irqsave(ap->lock, flags);
ad9e2762 698
b51e9e5d 699 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
a1e10f7e 700 if (--ap->eh_tries) {
e30349d2 701 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762
TH
702 goto repeat;
703 }
704 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
a1e10f7e 705 "tries, giving up\n", ATA_EH_MAX_TRIES);
914616a3 706 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
ad9e2762
TH
707 }
708
f3e81b19 709 /* this run is complete, make sure EH info is clear */
1eca4365 710 ata_for_each_link(link, ap, HOST_FIRST)
cf1b86c8 711 memset(&link->eh_info, 0, sizeof(link->eh_info));
f3e81b19 712
e30349d2 713 /* Clear host_eh_scheduled while holding ap->lock such
ad9e2762
TH
714 * that if exception occurs after this point but
715 * before EH completion, SCSI midlayer will
716 * re-initiate EH.
717 */
718 host->host_eh_scheduled = 0;
719
e30349d2 720 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762 721 } else {
9af5c9c9 722 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
ad9e2762
TH
723 ap->ops->eng_timeout(ap);
724 }
ece1d636 725
ad9e2762 726 /* finish or retry handled scmd's and clean up */
ece1d636
TH
727 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
728
729 scsi_eh_flush_done_q(&ap->eh_done_q);
730
ad9e2762 731 /* clean up */
e30349d2 732 spin_lock_irqsave(ap->lock, flags);
ad9e2762 733
1cdaf534 734 if (ap->pflags & ATA_PFLAG_LOADING)
b51e9e5d 735 ap->pflags &= ~ATA_PFLAG_LOADING;
1cdaf534 736 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
ad72cf98 737 schedule_delayed_work(&ap->hotplug_task, 0);
1cdaf534
TH
738
739 if (ap->pflags & ATA_PFLAG_RECOVERED)
740 ata_port_printk(ap, KERN_INFO, "EH complete\n");
580b2102 741
b51e9e5d 742 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
ad9e2762 743
c6cf9e99 744 /* tell wait_eh that we're done */
b51e9e5d 745 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
c6cf9e99
TH
746 wake_up_all(&ap->eh_wait_q);
747
e30349d2 748 spin_unlock_irqrestore(ap->lock, flags);
ad9e2762 749
ece1d636 750 DPRINTK("EXIT\n");
ece1d636
TH
751}
752
c6cf9e99
TH
753/**
754 * ata_port_wait_eh - Wait for the currently pending EH to complete
755 * @ap: Port to wait EH for
756 *
757 * Wait until the currently pending EH is complete.
758 *
759 * LOCKING:
760 * Kernel thread context (may sleep).
761 */
762void ata_port_wait_eh(struct ata_port *ap)
763{
764 unsigned long flags;
765 DEFINE_WAIT(wait);
766
767 retry:
ba6a1308 768 spin_lock_irqsave(ap->lock, flags);
c6cf9e99 769
b51e9e5d 770 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
c6cf9e99 771 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
ba6a1308 772 spin_unlock_irqrestore(ap->lock, flags);
c6cf9e99 773 schedule();
ba6a1308 774 spin_lock_irqsave(ap->lock, flags);
c6cf9e99 775 }
0a1b622e 776 finish_wait(&ap->eh_wait_q, &wait);
c6cf9e99 777
ba6a1308 778 spin_unlock_irqrestore(ap->lock, flags);
c6cf9e99
TH
779
780 /* make sure SCSI EH is complete */
cca3974e 781 if (scsi_host_in_recovery(ap->scsi_host)) {
97750ceb 782 ata_msleep(ap, 10);
c6cf9e99
TH
783 goto retry;
784 }
785}
786
5ddf24c5
TH
787static int ata_eh_nr_in_flight(struct ata_port *ap)
788{
789 unsigned int tag;
790 int nr = 0;
791
792 /* count only non-internal commands */
793 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
794 if (ata_qc_from_tag(ap, tag))
795 nr++;
796
797 return nr;
798}
799
800void ata_eh_fastdrain_timerfn(unsigned long arg)
801{
802 struct ata_port *ap = (void *)arg;
803 unsigned long flags;
804 int cnt;
805
806 spin_lock_irqsave(ap->lock, flags);
807
808 cnt = ata_eh_nr_in_flight(ap);
809
810 /* are we done? */
811 if (!cnt)
812 goto out_unlock;
813
814 if (cnt == ap->fastdrain_cnt) {
815 unsigned int tag;
816
817 /* No progress during the last interval, tag all
818 * in-flight qcs as timed out and freeze the port.
819 */
820 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
821 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
822 if (qc)
823 qc->err_mask |= AC_ERR_TIMEOUT;
824 }
825
826 ata_port_freeze(ap);
827 } else {
828 /* some qcs have finished, give it another chance */
829 ap->fastdrain_cnt = cnt;
830 ap->fastdrain_timer.expires =
341c2c95 831 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
5ddf24c5
TH
832 add_timer(&ap->fastdrain_timer);
833 }
834
835 out_unlock:
836 spin_unlock_irqrestore(ap->lock, flags);
837}
838
839/**
840 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
841 * @ap: target ATA port
842 * @fastdrain: activate fast drain
843 *
844 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
845 * is non-zero and EH wasn't pending before. Fast drain ensures
846 * that EH kicks in in timely manner.
847 *
848 * LOCKING:
849 * spin_lock_irqsave(host lock)
850 */
851static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
852{
853 int cnt;
854
855 /* already scheduled? */
856 if (ap->pflags & ATA_PFLAG_EH_PENDING)
857 return;
858
859 ap->pflags |= ATA_PFLAG_EH_PENDING;
860
861 if (!fastdrain)
862 return;
863
864 /* do we have in-flight qcs? */
865 cnt = ata_eh_nr_in_flight(ap);
866 if (!cnt)
867 return;
868
869 /* activate fast drain */
870 ap->fastdrain_cnt = cnt;
341c2c95
TH
871 ap->fastdrain_timer.expires =
872 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
5ddf24c5
TH
873 add_timer(&ap->fastdrain_timer);
874}
875
f686bcb8
TH
876/**
877 * ata_qc_schedule_eh - schedule qc for error handling
878 * @qc: command to schedule error handling for
879 *
880 * Schedule error handling for @qc. EH will kick in as soon as
881 * other commands are drained.
882 *
883 * LOCKING:
cca3974e 884 * spin_lock_irqsave(host lock)
f686bcb8
TH
885 */
886void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
887{
888 struct ata_port *ap = qc->ap;
fa41efda
TH
889 struct request_queue *q = qc->scsicmd->device->request_queue;
890 unsigned long flags;
f686bcb8
TH
891
892 WARN_ON(!ap->ops->error_handler);
893
894 qc->flags |= ATA_QCFLAG_FAILED;
5ddf24c5 895 ata_eh_set_pending(ap, 1);
f686bcb8
TH
896
897 /* The following will fail if timeout has already expired.
898 * ata_scsi_error() takes care of such scmds on EH entry.
899 * Note that ATA_QCFLAG_FAILED is unconditionally set after
900 * this function completes.
901 */
fa41efda 902 spin_lock_irqsave(q->queue_lock, flags);
242f9dcb 903 blk_abort_request(qc->scsicmd->request);
fa41efda 904 spin_unlock_irqrestore(q->queue_lock, flags);
f686bcb8
TH
905}
906
7b70fc03
TH
907/**
908 * ata_port_schedule_eh - schedule error handling without a qc
909 * @ap: ATA port to schedule EH for
910 *
911 * Schedule error handling for @ap. EH will kick in as soon as
912 * all commands are drained.
913 *
914 * LOCKING:
cca3974e 915 * spin_lock_irqsave(host lock)
7b70fc03
TH
916 */
917void ata_port_schedule_eh(struct ata_port *ap)
918{
919 WARN_ON(!ap->ops->error_handler);
920
f4d6d004
TH
921 if (ap->pflags & ATA_PFLAG_INITIALIZING)
922 return;
923
5ddf24c5 924 ata_eh_set_pending(ap, 1);
cca3974e 925 scsi_schedule_eh(ap->scsi_host);
7b70fc03
TH
926
927 DPRINTK("port EH scheduled\n");
928}
929
dbd82616 930static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
7b70fc03
TH
931{
932 int tag, nr_aborted = 0;
933
934 WARN_ON(!ap->ops->error_handler);
935
5ddf24c5
TH
936 /* we're gonna abort all commands, no need for fast drain */
937 ata_eh_set_pending(ap, 0);
938
7b70fc03
TH
939 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
940 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
941
dbd82616 942 if (qc && (!link || qc->dev->link == link)) {
7b70fc03
TH
943 qc->flags |= ATA_QCFLAG_FAILED;
944 ata_qc_complete(qc);
945 nr_aborted++;
946 }
947 }
948
949 if (!nr_aborted)
950 ata_port_schedule_eh(ap);
951
952 return nr_aborted;
953}
954
dbd82616
TH
955/**
956 * ata_link_abort - abort all qc's on the link
957 * @link: ATA link to abort qc's for
958 *
959 * Abort all active qc's active on @link and schedule EH.
960 *
961 * LOCKING:
962 * spin_lock_irqsave(host lock)
963 *
964 * RETURNS:
965 * Number of aborted qc's.
966 */
967int ata_link_abort(struct ata_link *link)
968{
969 return ata_do_link_abort(link->ap, link);
970}
971
972/**
973 * ata_port_abort - abort all qc's on the port
974 * @ap: ATA port to abort qc's for
975 *
976 * Abort all active qc's of @ap and schedule EH.
977 *
978 * LOCKING:
979 * spin_lock_irqsave(host_set lock)
980 *
981 * RETURNS:
982 * Number of aborted qc's.
983 */
984int ata_port_abort(struct ata_port *ap)
985{
986 return ata_do_link_abort(ap, NULL);
987}
988
e3180499
TH
989/**
990 * __ata_port_freeze - freeze port
991 * @ap: ATA port to freeze
992 *
993 * This function is called when HSM violation or some other
994 * condition disrupts normal operation of the port. Frozen port
995 * is not allowed to perform any operation until the port is
996 * thawed, which usually follows a successful reset.
997 *
998 * ap->ops->freeze() callback can be used for freezing the port
999 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1000 * port cannot be frozen hardware-wise, the interrupt handler
1001 * must ack and clear interrupts unconditionally while the port
1002 * is frozen.
1003 *
1004 * LOCKING:
cca3974e 1005 * spin_lock_irqsave(host lock)
e3180499
TH
1006 */
1007static void __ata_port_freeze(struct ata_port *ap)
1008{
1009 WARN_ON(!ap->ops->error_handler);
1010
1011 if (ap->ops->freeze)
1012 ap->ops->freeze(ap);
1013
b51e9e5d 1014 ap->pflags |= ATA_PFLAG_FROZEN;
e3180499 1015
44877b4e 1016 DPRINTK("ata%u port frozen\n", ap->print_id);
e3180499
TH
1017}
1018
1019/**
1020 * ata_port_freeze - abort & freeze port
1021 * @ap: ATA port to freeze
1022 *
54c38444
JG
1023 * Abort and freeze @ap. The freeze operation must be called
1024 * first, because some hardware requires special operations
1025 * before the taskfile registers are accessible.
e3180499
TH
1026 *
1027 * LOCKING:
cca3974e 1028 * spin_lock_irqsave(host lock)
e3180499
TH
1029 *
1030 * RETURNS:
1031 * Number of aborted commands.
1032 */
1033int ata_port_freeze(struct ata_port *ap)
1034{
1035 int nr_aborted;
1036
1037 WARN_ON(!ap->ops->error_handler);
1038
e3180499 1039 __ata_port_freeze(ap);
54c38444 1040 nr_aborted = ata_port_abort(ap);
e3180499
TH
1041
1042 return nr_aborted;
1043}
1044
7d77b247
TH
1045/**
1046 * sata_async_notification - SATA async notification handler
1047 * @ap: ATA port where async notification is received
1048 *
1049 * Handler to be called when async notification via SDB FIS is
1050 * received. This function schedules EH if necessary.
1051 *
1052 * LOCKING:
1053 * spin_lock_irqsave(host lock)
1054 *
1055 * RETURNS:
1056 * 1 if EH is scheduled, 0 otherwise.
1057 */
1058int sata_async_notification(struct ata_port *ap)
1059{
1060 u32 sntf;
1061 int rc;
1062
1063 if (!(ap->flags & ATA_FLAG_AN))
1064 return 0;
1065
1066 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1067 if (rc == 0)
1068 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1069
071f44b1 1070 if (!sata_pmp_attached(ap) || rc) {
7d77b247 1071 /* PMP is not attached or SNTF is not available */
071f44b1 1072 if (!sata_pmp_attached(ap)) {
7d77b247
TH
1073 /* PMP is not attached. Check whether ATAPI
1074 * AN is configured. If so, notify media
1075 * change.
1076 */
1077 struct ata_device *dev = ap->link.device;
1078
1079 if ((dev->class == ATA_DEV_ATAPI) &&
1080 (dev->flags & ATA_DFLAG_AN))
1081 ata_scsi_media_change_notify(dev);
1082 return 0;
1083 } else {
1084 /* PMP is attached but SNTF is not available.
1085 * ATAPI async media change notification is
1086 * not used. The PMP must be reporting PHY
1087 * status change, schedule EH.
1088 */
1089 ata_port_schedule_eh(ap);
1090 return 1;
1091 }
1092 } else {
1093 /* PMP is attached and SNTF is available */
1094 struct ata_link *link;
1095
1096 /* check and notify ATAPI AN */
1eca4365 1097 ata_for_each_link(link, ap, EDGE) {
7d77b247
TH
1098 if (!(sntf & (1 << link->pmp)))
1099 continue;
1100
1101 if ((link->device->class == ATA_DEV_ATAPI) &&
1102 (link->device->flags & ATA_DFLAG_AN))
1103 ata_scsi_media_change_notify(link->device);
1104 }
1105
1106 /* If PMP is reporting that PHY status of some
1107 * downstream ports has changed, schedule EH.
1108 */
1109 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1110 ata_port_schedule_eh(ap);
1111 return 1;
1112 }
1113
1114 return 0;
1115 }
1116}
1117
e3180499
TH
1118/**
1119 * ata_eh_freeze_port - EH helper to freeze port
1120 * @ap: ATA port to freeze
1121 *
1122 * Freeze @ap.
1123 *
1124 * LOCKING:
1125 * None.
1126 */
1127void ata_eh_freeze_port(struct ata_port *ap)
1128{
1129 unsigned long flags;
1130
1131 if (!ap->ops->error_handler)
1132 return;
1133
ba6a1308 1134 spin_lock_irqsave(ap->lock, flags);
e3180499 1135 __ata_port_freeze(ap);
ba6a1308 1136 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1137}
1138
1139/**
1140 * ata_port_thaw_port - EH helper to thaw port
1141 * @ap: ATA port to thaw
1142 *
1143 * Thaw frozen port @ap.
1144 *
1145 * LOCKING:
1146 * None.
1147 */
1148void ata_eh_thaw_port(struct ata_port *ap)
1149{
1150 unsigned long flags;
1151
1152 if (!ap->ops->error_handler)
1153 return;
1154
ba6a1308 1155 spin_lock_irqsave(ap->lock, flags);
e3180499 1156
b51e9e5d 1157 ap->pflags &= ~ATA_PFLAG_FROZEN;
e3180499
TH
1158
1159 if (ap->ops->thaw)
1160 ap->ops->thaw(ap);
1161
ba6a1308 1162 spin_unlock_irqrestore(ap->lock, flags);
e3180499 1163
44877b4e 1164 DPRINTK("ata%u port thawed\n", ap->print_id);
e3180499
TH
1165}
1166
ece1d636
TH
1167static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1168{
1169 /* nada */
1170}
1171
1172static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1173{
1174 struct ata_port *ap = qc->ap;
1175 struct scsi_cmnd *scmd = qc->scsicmd;
1176 unsigned long flags;
1177
ba6a1308 1178 spin_lock_irqsave(ap->lock, flags);
ece1d636
TH
1179 qc->scsidone = ata_eh_scsidone;
1180 __ata_qc_complete(qc);
1181 WARN_ON(ata_tag_valid(qc->tag));
ba6a1308 1182 spin_unlock_irqrestore(ap->lock, flags);
ece1d636
TH
1183
1184 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1185}
1186
1187/**
1188 * ata_eh_qc_complete - Complete an active ATA command from EH
1189 * @qc: Command to complete
1190 *
1191 * Indicate to the mid and upper layers that an ATA command has
1192 * completed. To be used from EH.
1193 */
1194void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1195{
1196 struct scsi_cmnd *scmd = qc->scsicmd;
1197 scmd->retries = scmd->allowed;
1198 __ata_eh_qc_complete(qc);
1199}
1200
1201/**
1202 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1203 * @qc: Command to retry
1204 *
1205 * Indicate to the mid and upper layers that an ATA command
1206 * should be retried. To be used from EH.
1207 *
1208 * SCSI midlayer limits the number of retries to scmd->allowed.
1209 * scmd->retries is decremented for commands which get retried
1210 * due to unrelated failures (qc->err_mask is zero).
1211 */
1212void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1213{
1214 struct scsi_cmnd *scmd = qc->scsicmd;
1215 if (!qc->err_mask && scmd->retries)
1216 scmd->retries--;
1217 __ata_eh_qc_complete(qc);
1218}
022bdb07 1219
678afac6
TH
1220/**
1221 * ata_dev_disable - disable ATA device
1222 * @dev: ATA device to disable
1223 *
1224 * Disable @dev.
1225 *
1226 * Locking:
1227 * EH context.
1228 */
1229void ata_dev_disable(struct ata_device *dev)
1230{
1231 if (!ata_dev_enabled(dev))
1232 return;
1233
1234 if (ata_msg_drv(dev->link->ap))
1235 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1236 ata_acpi_on_disable(dev);
1237 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1238 dev->class++;
99cf610a
TH
1239
1240 /* From now till the next successful probe, ering is used to
1241 * track probe failures. Clear accumulated device error info.
1242 */
1243 ata_ering_clear(&dev->ering);
678afac6
TH
1244}
1245
0ea035a3
TH
1246/**
1247 * ata_eh_detach_dev - detach ATA device
1248 * @dev: ATA device to detach
1249 *
1250 * Detach @dev.
1251 *
1252 * LOCKING:
1253 * None.
1254 */
fb7fd614 1255void ata_eh_detach_dev(struct ata_device *dev)
0ea035a3 1256{
f58229f8
TH
1257 struct ata_link *link = dev->link;
1258 struct ata_port *ap = link->ap;
90484ebf 1259 struct ata_eh_context *ehc = &link->eh_context;
0ea035a3
TH
1260 unsigned long flags;
1261
1262 ata_dev_disable(dev);
1263
ba6a1308 1264 spin_lock_irqsave(ap->lock, flags);
0ea035a3
TH
1265
1266 dev->flags &= ~ATA_DFLAG_DETACH;
1267
1268 if (ata_scsi_offline_dev(dev)) {
1269 dev->flags |= ATA_DFLAG_DETACHED;
b51e9e5d 1270 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
0ea035a3
TH
1271 }
1272
90484ebf 1273 /* clear per-dev EH info */
f58229f8
TH
1274 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1275 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
90484ebf
TH
1276 ehc->saved_xfer_mode[dev->devno] = 0;
1277 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
beb07c1a 1278
ba6a1308 1279 spin_unlock_irqrestore(ap->lock, flags);
0ea035a3
TH
1280}
1281
022bdb07
TH
1282/**
1283 * ata_eh_about_to_do - about to perform eh_action
955e57df 1284 * @link: target ATA link
47005f25 1285 * @dev: target ATA dev for per-dev action (can be NULL)
022bdb07
TH
1286 * @action: action about to be performed
1287 *
1288 * Called just before performing EH actions to clear related bits
955e57df
TH
1289 * in @link->eh_info such that eh actions are not unnecessarily
1290 * repeated.
022bdb07
TH
1291 *
1292 * LOCKING:
1293 * None.
1294 */
fb7fd614
TH
1295void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1296 unsigned int action)
022bdb07 1297{
955e57df
TH
1298 struct ata_port *ap = link->ap;
1299 struct ata_eh_info *ehi = &link->eh_info;
1300 struct ata_eh_context *ehc = &link->eh_context;
022bdb07
TH
1301 unsigned long flags;
1302
ba6a1308 1303 spin_lock_irqsave(ap->lock, flags);
1cdaf534 1304
955e57df 1305 ata_eh_clear_action(link, dev, ehi, action);
1cdaf534 1306
a568d1d2
TH
1307 /* About to take EH action, set RECOVERED. Ignore actions on
1308 * slave links as master will do them again.
1309 */
1310 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1cdaf534
TH
1311 ap->pflags |= ATA_PFLAG_RECOVERED;
1312
ba6a1308 1313 spin_unlock_irqrestore(ap->lock, flags);
022bdb07
TH
1314}
1315
47005f25
TH
1316/**
1317 * ata_eh_done - EH action complete
955e57df 1318* @ap: target ATA port
47005f25
TH
1319 * @dev: target ATA dev for per-dev action (can be NULL)
1320 * @action: action just completed
1321 *
1322 * Called right after performing EH actions to clear related bits
955e57df 1323 * in @link->eh_context.
47005f25
TH
1324 *
1325 * LOCKING:
1326 * None.
1327 */
fb7fd614
TH
1328void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1329 unsigned int action)
47005f25 1330{
955e57df 1331 struct ata_eh_context *ehc = &link->eh_context;
9af5c9c9 1332
955e57df 1333 ata_eh_clear_action(link, dev, &ehc->i, action);
47005f25
TH
1334}
1335
022bdb07
TH
1336/**
1337 * ata_err_string - convert err_mask to descriptive string
1338 * @err_mask: error mask to convert to string
1339 *
1340 * Convert @err_mask to descriptive string. Errors are
1341 * prioritized according to severity and only the most severe
1342 * error is reported.
1343 *
1344 * LOCKING:
1345 * None.
1346 *
1347 * RETURNS:
1348 * Descriptive string for @err_mask
1349 */
2dcb407e 1350static const char *ata_err_string(unsigned int err_mask)
022bdb07
TH
1351{
1352 if (err_mask & AC_ERR_HOST_BUS)
1353 return "host bus error";
1354 if (err_mask & AC_ERR_ATA_BUS)
1355 return "ATA bus error";
1356 if (err_mask & AC_ERR_TIMEOUT)
1357 return "timeout";
1358 if (err_mask & AC_ERR_HSM)
1359 return "HSM violation";
1360 if (err_mask & AC_ERR_SYSTEM)
1361 return "internal error";
1362 if (err_mask & AC_ERR_MEDIA)
1363 return "media error";
1364 if (err_mask & AC_ERR_INVALID)
1365 return "invalid argument";
1366 if (err_mask & AC_ERR_DEV)
1367 return "device error";
1368 return "unknown error";
1369}
1370
e8ee8451
TH
1371/**
1372 * ata_read_log_page - read a specific log page
1373 * @dev: target device
1374 * @page: page to read
1375 * @buf: buffer to store read page
1376 * @sectors: number of sectors to read
1377 *
1378 * Read log page using READ_LOG_EXT command.
1379 *
1380 * LOCKING:
1381 * Kernel thread context (may sleep).
1382 *
1383 * RETURNS:
1384 * 0 on success, AC_ERR_* mask otherwise.
1385 */
1386static unsigned int ata_read_log_page(struct ata_device *dev,
1387 u8 page, void *buf, unsigned int sectors)
1388{
1389 struct ata_taskfile tf;
1390 unsigned int err_mask;
1391
1392 DPRINTK("read log page - page %d\n", page);
1393
1394 ata_tf_init(dev, &tf);
1395 tf.command = ATA_CMD_READ_LOG_EXT;
1396 tf.lbal = page;
1397 tf.nsect = sectors;
1398 tf.hob_nsect = sectors >> 8;
1399 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1400 tf.protocol = ATA_PROT_PIO;
1401
1402 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1403 buf, sectors * ATA_SECT_SIZE, 0);
e8ee8451
TH
1404
1405 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1406 return err_mask;
1407}
1408
1409/**
1410 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1411 * @dev: Device to read log page 10h from
1412 * @tag: Resulting tag of the failed command
1413 * @tf: Resulting taskfile registers of the failed command
1414 *
1415 * Read log page 10h to obtain NCQ error details and clear error
1416 * condition.
1417 *
1418 * LOCKING:
1419 * Kernel thread context (may sleep).
1420 *
1421 * RETURNS:
1422 * 0 on success, -errno otherwise.
1423 */
1424static int ata_eh_read_log_10h(struct ata_device *dev,
1425 int *tag, struct ata_taskfile *tf)
1426{
9af5c9c9 1427 u8 *buf = dev->link->ap->sector_buf;
e8ee8451
TH
1428 unsigned int err_mask;
1429 u8 csum;
1430 int i;
1431
1432 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1433 if (err_mask)
1434 return -EIO;
1435
1436 csum = 0;
1437 for (i = 0; i < ATA_SECT_SIZE; i++)
1438 csum += buf[i];
1439 if (csum)
1440 ata_dev_printk(dev, KERN_WARNING,
1441 "invalid checksum 0x%x on log page 10h\n", csum);
1442
1443 if (buf[0] & 0x80)
1444 return -ENOENT;
1445
1446 *tag = buf[0] & 0x1f;
1447
1448 tf->command = buf[2];
1449 tf->feature = buf[3];
1450 tf->lbal = buf[4];
1451 tf->lbam = buf[5];
1452 tf->lbah = buf[6];
1453 tf->device = buf[7];
1454 tf->hob_lbal = buf[8];
1455 tf->hob_lbam = buf[9];
1456 tf->hob_lbah = buf[10];
1457 tf->nsect = buf[12];
1458 tf->hob_nsect = buf[13];
1459
1460 return 0;
1461}
1462
11fc33da
TH
1463/**
1464 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1465 * @dev: target ATAPI device
1466 * @r_sense_key: out parameter for sense_key
1467 *
1468 * Perform ATAPI TEST_UNIT_READY.
1469 *
1470 * LOCKING:
1471 * EH context (may sleep).
1472 *
1473 * RETURNS:
1474 * 0 on success, AC_ERR_* mask on failure.
1475 */
1476static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1477{
1478 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1479 struct ata_taskfile tf;
1480 unsigned int err_mask;
1481
1482 ata_tf_init(dev, &tf);
1483
1484 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1485 tf.command = ATA_CMD_PACKET;
1486 tf.protocol = ATAPI_PROT_NODATA;
1487
1488 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1489 if (err_mask == AC_ERR_DEV)
1490 *r_sense_key = tf.feature >> 4;
1491 return err_mask;
1492}
1493
022bdb07
TH
1494/**
1495 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1496 * @dev: device to perform REQUEST_SENSE to
1497 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
3eabddb8 1498 * @dfl_sense_key: default sense key to use
022bdb07
TH
1499 *
1500 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1501 * SENSE. This function is EH helper.
1502 *
1503 * LOCKING:
1504 * Kernel thread context (may sleep).
1505 *
1506 * RETURNS:
1507 * 0 on success, AC_ERR_* mask on failure
1508 */
3eabddb8
TH
1509static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1510 u8 *sense_buf, u8 dfl_sense_key)
022bdb07 1511{
3eabddb8
TH
1512 u8 cdb[ATAPI_CDB_LEN] =
1513 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
9af5c9c9 1514 struct ata_port *ap = dev->link->ap;
022bdb07 1515 struct ata_taskfile tf;
022bdb07
TH
1516
1517 DPRINTK("ATAPI request sense\n");
1518
022bdb07
TH
1519 /* FIXME: is this needed? */
1520 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1521
56287768
AL
1522 /* initialize sense_buf with the error register,
1523 * for the case where they are -not- overwritten
1524 */
022bdb07 1525 sense_buf[0] = 0x70;
3eabddb8 1526 sense_buf[2] = dfl_sense_key;
56287768 1527
a617c09f 1528 /* some devices time out if garbage left in tf */
56287768 1529 ata_tf_init(dev, &tf);
022bdb07 1530
022bdb07
TH
1531 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1532 tf.command = ATA_CMD_PACKET;
1533
1534 /* is it pointless to prefer PIO for "safety reasons"? */
1535 if (ap->flags & ATA_FLAG_PIO_DMA) {
0dc36888 1536 tf.protocol = ATAPI_PROT_DMA;
022bdb07
TH
1537 tf.feature |= ATAPI_PKT_DMA;
1538 } else {
0dc36888 1539 tf.protocol = ATAPI_PROT_PIO;
f2dfc1a1
TH
1540 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1541 tf.lbah = 0;
022bdb07
TH
1542 }
1543
1544 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
2b789108 1545 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
022bdb07
TH
1546}
1547
1548/**
1549 * ata_eh_analyze_serror - analyze SError for a failed port
0260731f 1550 * @link: ATA link to analyze SError for
022bdb07
TH
1551 *
1552 * Analyze SError if available and further determine cause of
1553 * failure.
1554 *
1555 * LOCKING:
1556 * None.
1557 */
0260731f 1558static void ata_eh_analyze_serror(struct ata_link *link)
022bdb07 1559{
0260731f 1560 struct ata_eh_context *ehc = &link->eh_context;
022bdb07
TH
1561 u32 serror = ehc->i.serror;
1562 unsigned int err_mask = 0, action = 0;
f9df58cb 1563 u32 hotplug_mask;
022bdb07 1564
e0614db2 1565 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
022bdb07 1566 err_mask |= AC_ERR_ATA_BUS;
cf480626 1567 action |= ATA_EH_RESET;
022bdb07
TH
1568 }
1569 if (serror & SERR_PROTOCOL) {
1570 err_mask |= AC_ERR_HSM;
cf480626 1571 action |= ATA_EH_RESET;
022bdb07
TH
1572 }
1573 if (serror & SERR_INTERNAL) {
1574 err_mask |= AC_ERR_SYSTEM;
cf480626 1575 action |= ATA_EH_RESET;
022bdb07 1576 }
f9df58cb
TH
1577
1578 /* Determine whether a hotplug event has occurred. Both
1579 * SError.N/X are considered hotplug events for enabled or
1580 * host links. For disabled PMP links, only N bit is
1581 * considered as X bit is left at 1 for link plugging.
1582 */
6b7ae954
TH
1583 if (link->lpm_policy != ATA_LPM_MAX_POWER)
1584 hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
1585 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
f9df58cb
TH
1586 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1587 else
1588 hotplug_mask = SERR_PHYRDY_CHG;
1589
1590 if (serror & hotplug_mask)
084fe639 1591 ata_ehi_hotplugged(&ehc->i);
022bdb07
TH
1592
1593 ehc->i.err_mask |= err_mask;
1594 ehc->i.action |= action;
1595}
1596
e8ee8451
TH
1597/**
1598 * ata_eh_analyze_ncq_error - analyze NCQ error
0260731f 1599 * @link: ATA link to analyze NCQ error for
e8ee8451
TH
1600 *
1601 * Read log page 10h, determine the offending qc and acquire
1602 * error status TF. For NCQ device errors, all LLDDs have to do
1603 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1604 * care of the rest.
1605 *
1606 * LOCKING:
1607 * Kernel thread context (may sleep).
1608 */
10acf3b0 1609void ata_eh_analyze_ncq_error(struct ata_link *link)
e8ee8451 1610{
0260731f
TH
1611 struct ata_port *ap = link->ap;
1612 struct ata_eh_context *ehc = &link->eh_context;
1613 struct ata_device *dev = link->device;
e8ee8451
TH
1614 struct ata_queued_cmd *qc;
1615 struct ata_taskfile tf;
1616 int tag, rc;
1617
1618 /* if frozen, we can't do much */
b51e9e5d 1619 if (ap->pflags & ATA_PFLAG_FROZEN)
e8ee8451
TH
1620 return;
1621
1622 /* is it NCQ device error? */
0260731f 1623 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
e8ee8451
TH
1624 return;
1625
1626 /* has LLDD analyzed already? */
1627 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1628 qc = __ata_qc_from_tag(ap, tag);
1629
1630 if (!(qc->flags & ATA_QCFLAG_FAILED))
1631 continue;
1632
1633 if (qc->err_mask)
1634 return;
1635 }
1636
1637 /* okay, this error is ours */
a09bf4cd 1638 memset(&tf, 0, sizeof(tf));
e8ee8451
TH
1639 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1640 if (rc) {
0260731f 1641 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
e8ee8451
TH
1642 "(errno=%d)\n", rc);
1643 return;
1644 }
1645
0260731f
TH
1646 if (!(link->sactive & (1 << tag))) {
1647 ata_link_printk(link, KERN_ERR, "log page 10h reported "
e8ee8451
TH
1648 "inactive tag %d\n", tag);
1649 return;
1650 }
1651
1652 /* we've got the perpetrator, condemn it */
1653 qc = __ata_qc_from_tag(ap, tag);
1654 memcpy(&qc->result_tf, &tf, sizeof(tf));
a6116c9e 1655 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
5335b729 1656 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
e8ee8451
TH
1657 ehc->i.err_mask &= ~AC_ERR_DEV;
1658}
1659
022bdb07
TH
1660/**
1661 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1662 * @qc: qc to analyze
1663 * @tf: Taskfile registers to analyze
1664 *
1665 * Analyze taskfile of @qc and further determine cause of
1666 * failure. This function also requests ATAPI sense data if
1667 * avaliable.
1668 *
1669 * LOCKING:
1670 * Kernel thread context (may sleep).
1671 *
1672 * RETURNS:
1673 * Determined recovery action
1674 */
1675static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1676 const struct ata_taskfile *tf)
1677{
1678 unsigned int tmp, action = 0;
1679 u8 stat = tf->command, err = tf->feature;
1680
1681 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1682 qc->err_mask |= AC_ERR_HSM;
cf480626 1683 return ATA_EH_RESET;
022bdb07
TH
1684 }
1685
a51d644a
TH
1686 if (stat & (ATA_ERR | ATA_DF))
1687 qc->err_mask |= AC_ERR_DEV;
1688 else
022bdb07
TH
1689 return 0;
1690
1691 switch (qc->dev->class) {
1692 case ATA_DEV_ATA:
1693 if (err & ATA_ICRC)
1694 qc->err_mask |= AC_ERR_ATA_BUS;
1695 if (err & ATA_UNC)
1696 qc->err_mask |= AC_ERR_MEDIA;
1697 if (err & ATA_IDNF)
1698 qc->err_mask |= AC_ERR_INVALID;
1699 break;
1700
1701 case ATA_DEV_ATAPI:
a569a30d 1702 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
3eabddb8
TH
1703 tmp = atapi_eh_request_sense(qc->dev,
1704 qc->scsicmd->sense_buffer,
1705 qc->result_tf.feature >> 4);
a569a30d
TH
1706 if (!tmp) {
1707 /* ATA_QCFLAG_SENSE_VALID is used to
1708 * tell atapi_qc_complete() that sense
1709 * data is already valid.
1710 *
1711 * TODO: interpret sense data and set
1712 * appropriate err_mask.
1713 */
1714 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1715 } else
1716 qc->err_mask |= tmp;
1717 }
022bdb07
TH
1718 }
1719
1720 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
cf480626 1721 action |= ATA_EH_RESET;
022bdb07
TH
1722
1723 return action;
1724}
1725
76326ac1
TH
1726static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1727 int *xfer_ok)
022bdb07 1728{
76326ac1
TH
1729 int base = 0;
1730
1731 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1732 *xfer_ok = 1;
1733
1734 if (!*xfer_ok)
75f9cafc 1735 base = ATA_ECAT_DUBIOUS_NONE;
76326ac1 1736
7d47e8d4 1737 if (err_mask & AC_ERR_ATA_BUS)
76326ac1 1738 return base + ATA_ECAT_ATA_BUS;
022bdb07 1739
7d47e8d4 1740 if (err_mask & AC_ERR_TIMEOUT)
76326ac1 1741 return base + ATA_ECAT_TOUT_HSM;
7d47e8d4 1742
3884f7b0 1743 if (eflags & ATA_EFLAG_IS_IO) {
7d47e8d4 1744 if (err_mask & AC_ERR_HSM)
76326ac1 1745 return base + ATA_ECAT_TOUT_HSM;
7d47e8d4
TH
1746 if ((err_mask &
1747 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
76326ac1 1748 return base + ATA_ECAT_UNK_DEV;
022bdb07
TH
1749 }
1750
1751 return 0;
1752}
1753
7d47e8d4 1754struct speed_down_verdict_arg {
022bdb07 1755 u64 since;
76326ac1 1756 int xfer_ok;
3884f7b0 1757 int nr_errors[ATA_ECAT_NR];
022bdb07
TH
1758};
1759
7d47e8d4 1760static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
022bdb07 1761{
7d47e8d4 1762 struct speed_down_verdict_arg *arg = void_arg;
76326ac1 1763 int cat;
022bdb07 1764
d9027470 1765 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
022bdb07
TH
1766 return -1;
1767
76326ac1
TH
1768 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1769 &arg->xfer_ok);
7d47e8d4 1770 arg->nr_errors[cat]++;
76326ac1 1771
022bdb07
TH
1772 return 0;
1773}
1774
1775/**
7d47e8d4 1776 * ata_eh_speed_down_verdict - Determine speed down verdict
022bdb07
TH
1777 * @dev: Device of interest
1778 *
1779 * This function examines error ring of @dev and determines
7d47e8d4
TH
1780 * whether NCQ needs to be turned off, transfer speed should be
1781 * stepped down, or falling back to PIO is necessary.
022bdb07 1782 *
3884f7b0
TH
1783 * ECAT_ATA_BUS : ATA_BUS error for any command
1784 *
1785 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1786 * IO commands
1787 *
1788 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1789 *
76326ac1
TH
1790 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1791 * data transfer hasn't been verified.
1792 *
3884f7b0
TH
1793 * Verdicts are
1794 *
1795 * NCQ_OFF : Turn off NCQ.
022bdb07 1796 *
3884f7b0
TH
1797 * SPEED_DOWN : Speed down transfer speed but don't fall back
1798 * to PIO.
7d47e8d4 1799 *
3884f7b0 1800 * FALLBACK_TO_PIO : Fall back to PIO.
022bdb07 1801 *
3884f7b0 1802 * Even if multiple verdicts are returned, only one action is
76326ac1
TH
1803 * taken per error. An action triggered by non-DUBIOUS errors
1804 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1805 * This is to expedite speed down decisions right after device is
1806 * initially configured.
1807 *
1808 * The followings are speed down rules. #1 and #2 deal with
1809 * DUBIOUS errors.
7d47e8d4 1810 *
76326ac1
TH
1811 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1812 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1813 *
1814 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1815 * occurred during last 5 mins, NCQ_OFF.
1816 *
1817 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
3884f7b0 1818 * ocurred during last 5 mins, FALLBACK_TO_PIO
7d47e8d4 1819 *
76326ac1 1820 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
3884f7b0
TH
1821 * during last 10 mins, NCQ_OFF.
1822 *
76326ac1 1823 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
3884f7b0 1824 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
7d47e8d4 1825 *
022bdb07
TH
1826 * LOCKING:
1827 * Inherited from caller.
1828 *
1829 * RETURNS:
7d47e8d4 1830 * OR of ATA_EH_SPDN_* flags.
022bdb07 1831 */
7d47e8d4 1832static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
022bdb07 1833{
7d47e8d4
TH
1834 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1835 u64 j64 = get_jiffies_64();
1836 struct speed_down_verdict_arg arg;
1837 unsigned int verdict = 0;
022bdb07 1838
3884f7b0 1839 /* scan past 5 mins of error history */
7d47e8d4 1840 memset(&arg, 0, sizeof(arg));
3884f7b0 1841 arg.since = j64 - min(j64, j5mins);
7d47e8d4 1842 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
022bdb07 1843
76326ac1
TH
1844 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1845 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1846 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1847 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1848
1849 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1850 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1851 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1852
3884f7b0
TH
1853 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1854 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
663f99b8 1855 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
3884f7b0 1856 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
022bdb07 1857
3884f7b0 1858 /* scan past 10 mins of error history */
022bdb07 1859 memset(&arg, 0, sizeof(arg));
3884f7b0 1860 arg.since = j64 - min(j64, j10mins);
7d47e8d4 1861 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
022bdb07 1862
3884f7b0
TH
1863 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1864 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1865 verdict |= ATA_EH_SPDN_NCQ_OFF;
1866
1867 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1868 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
663f99b8 1869 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
3884f7b0 1870 verdict |= ATA_EH_SPDN_SPEED_DOWN;
022bdb07 1871
7d47e8d4 1872 return verdict;
022bdb07
TH
1873}
1874
1875/**
1876 * ata_eh_speed_down - record error and speed down if necessary
1877 * @dev: Failed device
3884f7b0 1878 * @eflags: mask of ATA_EFLAG_* flags
022bdb07
TH
1879 * @err_mask: err_mask of the error
1880 *
1881 * Record error and examine error history to determine whether
1882 * adjusting transmission speed is necessary. It also sets
1883 * transmission limits appropriately if such adjustment is
1884 * necessary.
1885 *
1886 * LOCKING:
1887 * Kernel thread context (may sleep).
1888 *
1889 * RETURNS:
7d47e8d4 1890 * Determined recovery action.
022bdb07 1891 */
3884f7b0
TH
1892static unsigned int ata_eh_speed_down(struct ata_device *dev,
1893 unsigned int eflags, unsigned int err_mask)
022bdb07 1894{
b1c72916 1895 struct ata_link *link = ata_dev_phys_link(dev);
76326ac1 1896 int xfer_ok = 0;
7d47e8d4
TH
1897 unsigned int verdict;
1898 unsigned int action = 0;
1899
1900 /* don't bother if Cat-0 error */
76326ac1 1901 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
022bdb07
TH
1902 return 0;
1903
1904 /* record error and determine whether speed down is necessary */
3884f7b0 1905 ata_ering_record(&dev->ering, eflags, err_mask);
7d47e8d4 1906 verdict = ata_eh_speed_down_verdict(dev);
022bdb07 1907
7d47e8d4
TH
1908 /* turn off NCQ? */
1909 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1910 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1911 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1912 dev->flags |= ATA_DFLAG_NCQ_OFF;
1913 ata_dev_printk(dev, KERN_WARNING,
1914 "NCQ disabled due to excessive errors\n");
1915 goto done;
1916 }
022bdb07 1917
7d47e8d4
TH
1918 /* speed down? */
1919 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1920 /* speed down SATA link speed if possible */
a07d499b 1921 if (sata_down_spd_limit(link, 0) == 0) {
cf480626 1922 action |= ATA_EH_RESET;
7d47e8d4
TH
1923 goto done;
1924 }
022bdb07 1925
7d47e8d4
TH
1926 /* lower transfer mode */
1927 if (dev->spdn_cnt < 2) {
1928 static const int dma_dnxfer_sel[] =
1929 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1930 static const int pio_dnxfer_sel[] =
1931 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1932 int sel;
1933
1934 if (dev->xfer_shift != ATA_SHIFT_PIO)
1935 sel = dma_dnxfer_sel[dev->spdn_cnt];
1936 else
1937 sel = pio_dnxfer_sel[dev->spdn_cnt];
1938
1939 dev->spdn_cnt++;
1940
1941 if (ata_down_xfermask_limit(dev, sel) == 0) {
cf480626 1942 action |= ATA_EH_RESET;
7d47e8d4
TH
1943 goto done;
1944 }
1945 }
1946 }
1947
1948 /* Fall back to PIO? Slowing down to PIO is meaningless for
663f99b8 1949 * SATA ATA devices. Consider it only for PATA and SATAPI.
7d47e8d4
TH
1950 */
1951 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
663f99b8 1952 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
7d47e8d4
TH
1953 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1954 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1955 dev->spdn_cnt = 0;
cf480626 1956 action |= ATA_EH_RESET;
7d47e8d4
TH
1957 goto done;
1958 }
1959 }
022bdb07 1960
022bdb07 1961 return 0;
7d47e8d4
TH
1962 done:
1963 /* device has been slowed down, blow error history */
76326ac1
TH
1964 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1965 ata_ering_clear(&dev->ering);
7d47e8d4 1966 return action;
022bdb07
TH
1967}
1968
1969/**
9b1e2658
TH
1970 * ata_eh_link_autopsy - analyze error and determine recovery action
1971 * @link: host link to perform autopsy on
022bdb07 1972 *
0260731f
TH
1973 * Analyze why @link failed and determine which recovery actions
1974 * are needed. This function also sets more detailed AC_ERR_*
1975 * values and fills sense data for ATAPI CHECK SENSE.
022bdb07
TH
1976 *
1977 * LOCKING:
1978 * Kernel thread context (may sleep).
1979 */
9b1e2658 1980static void ata_eh_link_autopsy(struct ata_link *link)
022bdb07 1981{
0260731f 1982 struct ata_port *ap = link->ap;
936fd732 1983 struct ata_eh_context *ehc = &link->eh_context;
dfcc173d 1984 struct ata_device *dev;
3884f7b0
TH
1985 unsigned int all_err_mask = 0, eflags = 0;
1986 int tag;
022bdb07
TH
1987 u32 serror;
1988 int rc;
1989
1990 DPRINTK("ENTER\n");
1991
1cdaf534
TH
1992 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1993 return;
1994
022bdb07 1995 /* obtain and analyze SError */
936fd732 1996 rc = sata_scr_read(link, SCR_ERROR, &serror);
022bdb07
TH
1997 if (rc == 0) {
1998 ehc->i.serror |= serror;
0260731f 1999 ata_eh_analyze_serror(link);
4e57c517 2000 } else if (rc != -EOPNOTSUPP) {
cf480626 2001 /* SError read failed, force reset and probing */
b558eddd 2002 ehc->i.probe_mask |= ATA_ALL_DEVICES;
cf480626 2003 ehc->i.action |= ATA_EH_RESET;
4e57c517
TH
2004 ehc->i.err_mask |= AC_ERR_OTHER;
2005 }
022bdb07 2006
e8ee8451 2007 /* analyze NCQ failure */
0260731f 2008 ata_eh_analyze_ncq_error(link);
e8ee8451 2009
022bdb07
TH
2010 /* any real error trumps AC_ERR_OTHER */
2011 if (ehc->i.err_mask & ~AC_ERR_OTHER)
2012 ehc->i.err_mask &= ~AC_ERR_OTHER;
2013
2014 all_err_mask |= ehc->i.err_mask;
2015
2016 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2017 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2018
b1c72916
TH
2019 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2020 ata_dev_phys_link(qc->dev) != link)
022bdb07
TH
2021 continue;
2022
2023 /* inherit upper level err_mask */
2024 qc->err_mask |= ehc->i.err_mask;
2025
022bdb07 2026 /* analyze TF */
4528e4da 2027 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
022bdb07
TH
2028
2029 /* DEV errors are probably spurious in case of ATA_BUS error */
2030 if (qc->err_mask & AC_ERR_ATA_BUS)
2031 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2032 AC_ERR_INVALID);
2033
2034 /* any real error trumps unknown error */
2035 if (qc->err_mask & ~AC_ERR_OTHER)
2036 qc->err_mask &= ~AC_ERR_OTHER;
2037
2038 /* SENSE_VALID trumps dev/unknown error and revalidation */
f90f0828 2039 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
022bdb07 2040 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
022bdb07 2041
03faab78 2042 /* determine whether the command is worth retrying */
534ead70
TH
2043 if (qc->flags & ATA_QCFLAG_IO ||
2044 (!(qc->err_mask & AC_ERR_INVALID) &&
2045 qc->err_mask != AC_ERR_DEV))
03faab78
TH
2046 qc->flags |= ATA_QCFLAG_RETRY;
2047
022bdb07 2048 /* accumulate error info */
4528e4da 2049 ehc->i.dev = qc->dev;
022bdb07
TH
2050 all_err_mask |= qc->err_mask;
2051 if (qc->flags & ATA_QCFLAG_IO)
3884f7b0 2052 eflags |= ATA_EFLAG_IS_IO;
022bdb07
TH
2053 }
2054
a20f33ff 2055 /* enforce default EH actions */
b51e9e5d 2056 if (ap->pflags & ATA_PFLAG_FROZEN ||
a20f33ff 2057 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
cf480626 2058 ehc->i.action |= ATA_EH_RESET;
3884f7b0
TH
2059 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2060 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
4528e4da 2061 ehc->i.action |= ATA_EH_REVALIDATE;
022bdb07 2062
dfcc173d
TH
2063 /* If we have offending qcs and the associated failed device,
2064 * perform per-dev EH action only on the offending device.
2065 */
4528e4da 2066 if (ehc->i.dev) {
4528e4da
TH
2067 ehc->i.dev_action[ehc->i.dev->devno] |=
2068 ehc->i.action & ATA_EH_PERDEV_MASK;
2069 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
47005f25
TH
2070 }
2071
2695e366
TH
2072 /* propagate timeout to host link */
2073 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2074 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2075
2076 /* record error and consider speeding down */
dfcc173d 2077 dev = ehc->i.dev;
2695e366
TH
2078 if (!dev && ((ata_link_max_devices(link) == 1 &&
2079 ata_dev_enabled(link->device))))
2080 dev = link->device;
dfcc173d 2081
76326ac1
TH
2082 if (dev) {
2083 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2084 eflags |= ATA_EFLAG_DUBIOUS_XFER;
3884f7b0 2085 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
76326ac1 2086 }
dfcc173d 2087
022bdb07
TH
2088 DPRINTK("EXIT\n");
2089}
2090
2091/**
9b1e2658
TH
2092 * ata_eh_autopsy - analyze error and determine recovery action
2093 * @ap: host port to perform autopsy on
2094 *
2095 * Analyze all links of @ap and determine why they failed and
2096 * which recovery actions are needed.
2097 *
2098 * LOCKING:
2099 * Kernel thread context (may sleep).
2100 */
fb7fd614 2101void ata_eh_autopsy(struct ata_port *ap)
9b1e2658
TH
2102{
2103 struct ata_link *link;
2104
1eca4365 2105 ata_for_each_link(link, ap, EDGE)
9b1e2658 2106 ata_eh_link_autopsy(link);
2695e366 2107
b1c72916
TH
2108 /* Handle the frigging slave link. Autopsy is done similarly
2109 * but actions and flags are transferred over to the master
2110 * link and handled from there.
2111 */
2112 if (ap->slave_link) {
2113 struct ata_eh_context *mehc = &ap->link.eh_context;
2114 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2115
848e4c68
TH
2116 /* transfer control flags from master to slave */
2117 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2118
2119 /* perform autopsy on the slave link */
b1c72916
TH
2120 ata_eh_link_autopsy(ap->slave_link);
2121
848e4c68 2122 /* transfer actions from slave to master and clear slave */
b1c72916
TH
2123 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2124 mehc->i.action |= sehc->i.action;
2125 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2126 mehc->i.flags |= sehc->i.flags;
2127 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2128 }
2129
2695e366
TH
2130 /* Autopsy of fanout ports can affect host link autopsy.
2131 * Perform host link autopsy last.
2132 */
071f44b1 2133 if (sata_pmp_attached(ap))
2695e366 2134 ata_eh_link_autopsy(&ap->link);
9b1e2658
TH
2135}
2136
6521148c
RH
2137/**
2138 * ata_get_cmd_descript - get description for ATA command
2139 * @command: ATA command code to get description for
2140 *
2141 * Return a textual description of the given command, or NULL if the
2142 * command is not known.
2143 *
2144 * LOCKING:
2145 * None
2146 */
2147const char *ata_get_cmd_descript(u8 command)
2148{
2149#ifdef CONFIG_ATA_VERBOSE_ERROR
2150 static const struct
2151 {
2152 u8 command;
2153 const char *text;
2154 } cmd_descr[] = {
2155 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2156 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2157 { ATA_CMD_STANDBY, "STANDBY" },
2158 { ATA_CMD_IDLE, "IDLE" },
2159 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2160 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2161 { ATA_CMD_NOP, "NOP" },
2162 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2163 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2164 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2165 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2166 { ATA_CMD_SERVICE, "SERVICE" },
2167 { ATA_CMD_READ, "READ DMA" },
2168 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2169 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2170 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2171 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2172 { ATA_CMD_WRITE, "WRITE DMA" },
2173 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2174 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2175 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2176 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2177 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2178 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2179 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2180 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2181 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2182 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2183 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2184 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2185 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2186 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2187 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2188 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2189 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2190 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2191 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2192 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2193 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2194 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2195 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2196 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2197 { ATA_CMD_SLEEP, "SLEEP" },
2198 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2199 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2200 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2201 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2202 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2203 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2204 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2205 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2206 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2207 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2208 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2209 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2210 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2211 { ATA_CMD_PMP_READ, "READ BUFFER" },
2212 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2213 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2214 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2215 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2216 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2217 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2218 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2219 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2220 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2221 { ATA_CMD_SMART, "SMART" },
2222 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2223 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
acad7627 2224 { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
6521148c
RH
2225 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2226 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2227 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2228 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2229 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2230 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2231 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2232 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2233 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2234 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2235 { ATA_CMD_RESTORE, "RECALIBRATE" },
2236 { 0, NULL } /* terminate list */
2237 };
2238
2239 unsigned int i;
2240 for (i = 0; cmd_descr[i].text; i++)
2241 if (cmd_descr[i].command == command)
2242 return cmd_descr[i].text;
2243#endif
2244
2245 return NULL;
2246}
2247
9b1e2658
TH
2248/**
2249 * ata_eh_link_report - report error handling to user
0260731f 2250 * @link: ATA link EH is going on
022bdb07
TH
2251 *
2252 * Report EH to user.
2253 *
2254 * LOCKING:
2255 * None.
2256 */
9b1e2658 2257static void ata_eh_link_report(struct ata_link *link)
022bdb07 2258{
0260731f
TH
2259 struct ata_port *ap = link->ap;
2260 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 2261 const char *frozen, *desc;
a1e10f7e 2262 char tries_buf[6];
022bdb07
TH
2263 int tag, nr_failed = 0;
2264
94ff3d54
TH
2265 if (ehc->i.flags & ATA_EHI_QUIET)
2266 return;
2267
022bdb07
TH
2268 desc = NULL;
2269 if (ehc->i.desc[0] != '\0')
2270 desc = ehc->i.desc;
2271
2272 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2273 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2274
b1c72916
TH
2275 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2276 ata_dev_phys_link(qc->dev) != link ||
e027bd36
TH
2277 ((qc->flags & ATA_QCFLAG_QUIET) &&
2278 qc->err_mask == AC_ERR_DEV))
022bdb07
TH
2279 continue;
2280 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2281 continue;
2282
2283 nr_failed++;
2284 }
2285
2286 if (!nr_failed && !ehc->i.err_mask)
2287 return;
2288
2289 frozen = "";
b51e9e5d 2290 if (ap->pflags & ATA_PFLAG_FROZEN)
022bdb07
TH
2291 frozen = " frozen";
2292
a1e10f7e
TH
2293 memset(tries_buf, 0, sizeof(tries_buf));
2294 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2295 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2296 ap->eh_tries);
2297
022bdb07 2298 if (ehc->i.dev) {
e8ee8451 2299 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
a1e10f7e
TH
2300 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2301 ehc->i.err_mask, link->sactive, ehc->i.serror,
2302 ehc->i.action, frozen, tries_buf);
022bdb07 2303 if (desc)
b64bbc39 2304 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
022bdb07 2305 } else {
0260731f 2306 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
a1e10f7e
TH
2307 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2308 ehc->i.err_mask, link->sactive, ehc->i.serror,
2309 ehc->i.action, frozen, tries_buf);
022bdb07 2310 if (desc)
0260731f 2311 ata_link_printk(link, KERN_ERR, "%s\n", desc);
022bdb07
TH
2312 }
2313
6521148c 2314#ifdef CONFIG_ATA_VERBOSE_ERROR
1333e194 2315 if (ehc->i.serror)
da0e21d3 2316 ata_link_printk(link, KERN_ERR,
1333e194
RH
2317 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2318 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2319 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2320 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2321 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2322 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2323 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2324 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2325 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2326 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2327 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2328 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2329 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2330 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2331 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2332 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2333 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2dcb407e 2334 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
6521148c 2335#endif
1333e194 2336
022bdb07
TH
2337 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2338 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
8a937581 2339 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
abb6a889
TH
2340 const u8 *cdb = qc->cdb;
2341 char data_buf[20] = "";
2342 char cdb_buf[70] = "";
022bdb07 2343
0260731f 2344 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
b1c72916 2345 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
022bdb07
TH
2346 continue;
2347
abb6a889
TH
2348 if (qc->dma_dir != DMA_NONE) {
2349 static const char *dma_str[] = {
2350 [DMA_BIDIRECTIONAL] = "bidi",
2351 [DMA_TO_DEVICE] = "out",
2352 [DMA_FROM_DEVICE] = "in",
2353 };
2354 static const char *prot_str[] = {
2355 [ATA_PROT_PIO] = "pio",
2356 [ATA_PROT_DMA] = "dma",
2357 [ATA_PROT_NCQ] = "ncq",
0dc36888
TH
2358 [ATAPI_PROT_PIO] = "pio",
2359 [ATAPI_PROT_DMA] = "dma",
abb6a889
TH
2360 };
2361
2362 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2363 prot_str[qc->tf.protocol], qc->nbytes,
2364 dma_str[qc->dma_dir]);
2365 }
2366
6521148c
RH
2367 if (ata_is_atapi(qc->tf.protocol)) {
2368 if (qc->scsicmd)
2369 scsi_print_command(qc->scsicmd);
2370 else
2371 snprintf(cdb_buf, sizeof(cdb_buf),
abb6a889
TH
2372 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2373 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2374 cdb[0], cdb[1], cdb[2], cdb[3],
2375 cdb[4], cdb[5], cdb[6], cdb[7],
2376 cdb[8], cdb[9], cdb[10], cdb[11],
2377 cdb[12], cdb[13], cdb[14], cdb[15]);
6521148c
RH
2378 } else {
2379 const char *descr = ata_get_cmd_descript(cmd->command);
2380 if (descr)
2381 ata_dev_printk(qc->dev, KERN_ERR,
2382 "failed command: %s\n", descr);
2383 }
abb6a889 2384
8a937581
TH
2385 ata_dev_printk(qc->dev, KERN_ERR,
2386 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
abb6a889 2387 "tag %d%s\n %s"
8a937581 2388 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
5335b729 2389 "Emask 0x%x (%s)%s\n",
8a937581
TH
2390 cmd->command, cmd->feature, cmd->nsect,
2391 cmd->lbal, cmd->lbam, cmd->lbah,
2392 cmd->hob_feature, cmd->hob_nsect,
2393 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
abb6a889 2394 cmd->device, qc->tag, data_buf, cdb_buf,
8a937581
TH
2395 res->command, res->feature, res->nsect,
2396 res->lbal, res->lbam, res->lbah,
2397 res->hob_feature, res->hob_nsect,
2398 res->hob_lbal, res->hob_lbam, res->hob_lbah,
5335b729
TH
2399 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2400 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
1333e194 2401
6521148c 2402#ifdef CONFIG_ATA_VERBOSE_ERROR
1333e194 2403 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2dcb407e 2404 ATA_ERR)) {
1333e194
RH
2405 if (res->command & ATA_BUSY)
2406 ata_dev_printk(qc->dev, KERN_ERR,
2dcb407e 2407 "status: { Busy }\n");
1333e194
RH
2408 else
2409 ata_dev_printk(qc->dev, KERN_ERR,
2410 "status: { %s%s%s%s}\n",
2411 res->command & ATA_DRDY ? "DRDY " : "",
2412 res->command & ATA_DF ? "DF " : "",
2413 res->command & ATA_DRQ ? "DRQ " : "",
2dcb407e 2414 res->command & ATA_ERR ? "ERR " : "");
1333e194
RH
2415 }
2416
2417 if (cmd->command != ATA_CMD_PACKET &&
2418 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2419 ATA_ABORTED)))
2420 ata_dev_printk(qc->dev, KERN_ERR,
2421 "error: { %s%s%s%s}\n",
2422 res->feature & ATA_ICRC ? "ICRC " : "",
2423 res->feature & ATA_UNC ? "UNC " : "",
2424 res->feature & ATA_IDNF ? "IDNF " : "",
2dcb407e 2425 res->feature & ATA_ABORTED ? "ABRT " : "");
6521148c 2426#endif
022bdb07
TH
2427 }
2428}
2429
9b1e2658
TH
2430/**
2431 * ata_eh_report - report error handling to user
2432 * @ap: ATA port to report EH about
2433 *
2434 * Report EH to user.
2435 *
2436 * LOCKING:
2437 * None.
2438 */
fb7fd614 2439void ata_eh_report(struct ata_port *ap)
9b1e2658
TH
2440{
2441 struct ata_link *link;
2442
1eca4365 2443 ata_for_each_link(link, ap, HOST_FIRST)
9b1e2658
TH
2444 ata_eh_link_report(link);
2445}
2446
cc0680a5 2447static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
b1c72916
TH
2448 unsigned int *classes, unsigned long deadline,
2449 bool clear_classes)
d87fa38e 2450{
f58229f8 2451 struct ata_device *dev;
d87fa38e 2452
b1c72916 2453 if (clear_classes)
1eca4365 2454 ata_for_each_dev(dev, link, ALL)
b1c72916 2455 classes[dev->devno] = ATA_DEV_UNKNOWN;
d87fa38e 2456
f046519f 2457 return reset(link, classes, deadline);
d87fa38e
TH
2458}
2459
ae791c05 2460static int ata_eh_followup_srst_needed(struct ata_link *link,
5dbfc9cb 2461 int rc, const unsigned int *classes)
664faf09 2462{
45db2f6c 2463 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
ae791c05 2464 return 0;
5dbfc9cb
TH
2465 if (rc == -EAGAIN)
2466 return 1;
071f44b1 2467 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
3495de73 2468 return 1;
664faf09
TH
2469 return 0;
2470}
2471
fb7fd614
TH
2472int ata_eh_reset(struct ata_link *link, int classify,
2473 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2474 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
022bdb07 2475{
afaa5c37 2476 struct ata_port *ap = link->ap;
b1c72916 2477 struct ata_link *slave = ap->slave_link;
936fd732 2478 struct ata_eh_context *ehc = &link->eh_context;
705d2014 2479 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
664faf09 2480 unsigned int *classes = ehc->classes;
416dc9ed 2481 unsigned int lflags = link->flags;
1cdaf534 2482 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
d8af0eb6 2483 int max_tries = 0, try = 0;
b1c72916 2484 struct ata_link *failed_link;
f58229f8 2485 struct ata_device *dev;
416dc9ed 2486 unsigned long deadline, now;
022bdb07 2487 ata_reset_fn_t reset;
afaa5c37 2488 unsigned long flags;
416dc9ed 2489 u32 sstatus;
b1c72916 2490 int nr_unknown, rc;
022bdb07 2491
932648b0
TH
2492 /*
2493 * Prepare to reset
2494 */
d8af0eb6
TH
2495 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2496 max_tries++;
05944bdf
TH
2497 if (link->flags & ATA_LFLAG_NO_HRST)
2498 hardreset = NULL;
2499 if (link->flags & ATA_LFLAG_NO_SRST)
2500 softreset = NULL;
d8af0eb6 2501
19b72321
TH
2502 /* make sure each reset attemp is at least COOL_DOWN apart */
2503 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2504 now = jiffies;
2505 WARN_ON(time_after(ehc->last_reset, now));
2506 deadline = ata_deadline(ehc->last_reset,
2507 ATA_EH_RESET_COOL_DOWN);
2508 if (time_before(now, deadline))
2509 schedule_timeout_uninterruptible(deadline - now);
2510 }
0a2c0f56 2511
afaa5c37
TH
2512 spin_lock_irqsave(ap->lock, flags);
2513 ap->pflags |= ATA_PFLAG_RESETTING;
2514 spin_unlock_irqrestore(ap->lock, flags);
2515
cf480626 2516 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
13abf50d 2517
1eca4365 2518 ata_for_each_dev(dev, link, ALL) {
cdeab114
TH
2519 /* If we issue an SRST then an ATA drive (not ATAPI)
2520 * may change configuration and be in PIO0 timing. If
2521 * we do a hard reset (or are coming from power on)
2522 * this is true for ATA or ATAPI. Until we've set a
2523 * suitable controller mode we should not touch the
2524 * bus as we may be talking too fast.
2525 */
2526 dev->pio_mode = XFER_PIO_0;
2527
2528 /* If the controller has a pio mode setup function
2529 * then use it to set the chipset to rights. Don't
2530 * touch the DMA setup as that will be dealt with when
2531 * configuring devices.
2532 */
2533 if (ap->ops->set_piomode)
2534 ap->ops->set_piomode(ap, dev);
2535 }
2536
cf480626 2537 /* prefer hardreset */
932648b0 2538 reset = NULL;
cf480626
TH
2539 ehc->i.action &= ~ATA_EH_RESET;
2540 if (hardreset) {
2541 reset = hardreset;
a674050e 2542 ehc->i.action |= ATA_EH_HARDRESET;
4f7faa3f 2543 } else if (softreset) {
cf480626 2544 reset = softreset;
a674050e 2545 ehc->i.action |= ATA_EH_SOFTRESET;
cf480626 2546 }
f5914a46
TH
2547
2548 if (prereset) {
b1c72916
TH
2549 unsigned long deadline = ata_deadline(jiffies,
2550 ATA_EH_PRERESET_TIMEOUT);
2551
2552 if (slave) {
2553 sehc->i.action &= ~ATA_EH_RESET;
2554 sehc->i.action |= ehc->i.action;
2555 }
2556
2557 rc = prereset(link, deadline);
2558
2559 /* If present, do prereset on slave link too. Reset
2560 * is skipped iff both master and slave links report
2561 * -ENOENT or clear ATA_EH_RESET.
2562 */
2563 if (slave && (rc == 0 || rc == -ENOENT)) {
2564 int tmp;
2565
2566 tmp = prereset(slave, deadline);
2567 if (tmp != -ENOENT)
2568 rc = tmp;
2569
2570 ehc->i.action |= sehc->i.action;
2571 }
2572
f5914a46 2573 if (rc) {
c961922b 2574 if (rc == -ENOENT) {
cc0680a5 2575 ata_link_printk(link, KERN_DEBUG,
4aa9ab67 2576 "port disabled. ignoring.\n");
cf480626 2577 ehc->i.action &= ~ATA_EH_RESET;
4aa9ab67 2578
1eca4365 2579 ata_for_each_dev(dev, link, ALL)
f58229f8 2580 classes[dev->devno] = ATA_DEV_NONE;
4aa9ab67
TH
2581
2582 rc = 0;
c961922b 2583 } else
cc0680a5 2584 ata_link_printk(link, KERN_ERR,
f5914a46 2585 "prereset failed (errno=%d)\n", rc);
fccb6ea5 2586 goto out;
f5914a46 2587 }
f5914a46 2588
932648b0 2589 /* prereset() might have cleared ATA_EH_RESET. If so,
d6515e6f 2590 * bang classes, thaw and return.
932648b0
TH
2591 */
2592 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
1eca4365 2593 ata_for_each_dev(dev, link, ALL)
932648b0 2594 classes[dev->devno] = ATA_DEV_NONE;
d6515e6f
TH
2595 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2596 ata_is_host_link(link))
2597 ata_eh_thaw_port(ap);
932648b0
TH
2598 rc = 0;
2599 goto out;
2600 }
f5914a46
TH
2601 }
2602
022bdb07 2603 retry:
932648b0
TH
2604 /*
2605 * Perform reset
2606 */
dc98c32c
TH
2607 if (ata_is_host_link(link))
2608 ata_eh_freeze_port(ap);
2609
341c2c95 2610 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
31daabda 2611
932648b0
TH
2612 if (reset) {
2613 if (verbose)
2614 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2615 reset == softreset ? "soft" : "hard");
2616
2617 /* mark that this EH session started with reset */
19b72321 2618 ehc->last_reset = jiffies;
932648b0
TH
2619 if (reset == hardreset)
2620 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2621 else
2622 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
022bdb07 2623
b1c72916
TH
2624 rc = ata_do_reset(link, reset, classes, deadline, true);
2625 if (rc && rc != -EAGAIN) {
2626 failed_link = link;
5dbfc9cb 2627 goto fail;
b1c72916
TH
2628 }
2629
2630 /* hardreset slave link if existent */
2631 if (slave && reset == hardreset) {
2632 int tmp;
2633
2634 if (verbose)
2635 ata_link_printk(slave, KERN_INFO,
2636 "hard resetting link\n");
2637
2638 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2639 tmp = ata_do_reset(slave, reset, classes, deadline,
2640 false);
2641 switch (tmp) {
2642 case -EAGAIN:
2643 rc = -EAGAIN;
2644 case 0:
2645 break;
2646 default:
2647 failed_link = slave;
2648 rc = tmp;
2649 goto fail;
2650 }
2651 }
022bdb07 2652
b1c72916 2653 /* perform follow-up SRST if necessary */
932648b0 2654 if (reset == hardreset &&
5dbfc9cb 2655 ata_eh_followup_srst_needed(link, rc, classes)) {
932648b0 2656 reset = softreset;
022bdb07 2657
932648b0
TH
2658 if (!reset) {
2659 ata_link_printk(link, KERN_ERR,
2660 "follow-up softreset required "
2661 "but no softreset avaliable\n");
b1c72916 2662 failed_link = link;
932648b0
TH
2663 rc = -EINVAL;
2664 goto fail;
2665 }
664faf09 2666
932648b0 2667 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
b1c72916 2668 rc = ata_do_reset(link, reset, classes, deadline, true);
fe2c4d01
TH
2669 if (rc) {
2670 failed_link = link;
2671 goto fail;
2672 }
664faf09 2673 }
932648b0
TH
2674 } else {
2675 if (verbose)
2676 ata_link_printk(link, KERN_INFO, "no reset method "
2677 "available, skipping reset\n");
2678 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2679 lflags |= ATA_LFLAG_ASSUME_ATA;
664faf09
TH
2680 }
2681
932648b0
TH
2682 /*
2683 * Post-reset processing
2684 */
1eca4365 2685 ata_for_each_dev(dev, link, ALL) {
416dc9ed
TH
2686 /* After the reset, the device state is PIO 0 and the
2687 * controller state is undefined. Reset also wakes up
2688 * drives from sleeping mode.
2689 */
2690 dev->pio_mode = XFER_PIO_0;
2691 dev->flags &= ~ATA_DFLAG_SLEEPING;
31daabda 2692
3b761d3d
TH
2693 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2694 continue;
2695
2696 /* apply class override */
2697 if (lflags & ATA_LFLAG_ASSUME_ATA)
2698 classes[dev->devno] = ATA_DEV_ATA;
2699 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2700 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
022bdb07
TH
2701 }
2702
416dc9ed
TH
2703 /* record current link speed */
2704 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2705 link->sata_spd = (sstatus >> 4) & 0xf;
b1c72916
TH
2706 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2707 slave->sata_spd = (sstatus >> 4) & 0xf;
008a7896 2708
dc98c32c
TH
2709 /* thaw the port */
2710 if (ata_is_host_link(link))
2711 ata_eh_thaw_port(ap);
2712
f046519f
TH
2713 /* postreset() should clear hardware SError. Although SError
2714 * is cleared during link resume, clearing SError here is
2715 * necessary as some PHYs raise hotplug events after SRST.
2716 * This introduces race condition where hotplug occurs between
2717 * reset and here. This race is mediated by cross checking
2718 * link onlineness and classification result later.
2719 */
b1c72916 2720 if (postreset) {
416dc9ed 2721 postreset(link, classes);
b1c72916
TH
2722 if (slave)
2723 postreset(slave, classes);
2724 }
20952b69 2725
1e641060
TH
2726 /*
2727 * Some controllers can't be frozen very well and may set
2728 * spuruious error conditions during reset. Clear accumulated
2729 * error information. As reset is the final recovery action,
2730 * nothing is lost by doing this.
2731 */
f046519f 2732 spin_lock_irqsave(link->ap->lock, flags);
1e641060 2733 memset(&link->eh_info, 0, sizeof(link->eh_info));
b1c72916 2734 if (slave)
1e641060
TH
2735 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2736 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
f046519f
TH
2737 spin_unlock_irqrestore(link->ap->lock, flags);
2738
3b761d3d
TH
2739 /*
2740 * Make sure onlineness and classification result correspond.
f046519f
TH
2741 * Hotplug could have happened during reset and some
2742 * controllers fail to wait while a drive is spinning up after
2743 * being hotplugged causing misdetection. By cross checking
3b761d3d
TH
2744 * link on/offlineness and classification result, those
2745 * conditions can be reliably detected and retried.
f046519f 2746 */
b1c72916 2747 nr_unknown = 0;
1eca4365 2748 ata_for_each_dev(dev, link, ALL) {
3b761d3d
TH
2749 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2750 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2751 ata_dev_printk(dev, KERN_DEBUG, "link online "
2752 "but device misclassifed\n");
2753 classes[dev->devno] = ATA_DEV_NONE;
b1c72916 2754 nr_unknown++;
3b761d3d
TH
2755 }
2756 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2757 if (ata_class_enabled(classes[dev->devno]))
2758 ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2759 "clearing class %d to NONE\n",
2760 classes[dev->devno]);
2761 classes[dev->devno] = ATA_DEV_NONE;
2762 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2763 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2764 "clearing UNKNOWN to NONE\n");
2765 classes[dev->devno] = ATA_DEV_NONE;
b1c72916 2766 }
f046519f
TH
2767 }
2768
b1c72916 2769 if (classify && nr_unknown) {
f046519f
TH
2770 if (try < max_tries) {
2771 ata_link_printk(link, KERN_WARNING, "link online but "
3b761d3d
TH
2772 "%d devices misclassified, retrying\n",
2773 nr_unknown);
b1c72916 2774 failed_link = link;
f046519f
TH
2775 rc = -EAGAIN;
2776 goto fail;
2777 }
2778 ata_link_printk(link, KERN_WARNING,
3b761d3d
TH
2779 "link online but %d devices misclassified, "
2780 "device detection might fail\n", nr_unknown);
f046519f
TH
2781 }
2782
416dc9ed 2783 /* reset successful, schedule revalidation */
cf480626 2784 ata_eh_done(link, NULL, ATA_EH_RESET);
b1c72916
TH
2785 if (slave)
2786 ata_eh_done(slave, NULL, ATA_EH_RESET);
6b7ae954 2787 ehc->last_reset = jiffies; /* update to completion time */
416dc9ed 2788 ehc->i.action |= ATA_EH_REVALIDATE;
6b7ae954 2789 link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */
ae791c05 2790
416dc9ed 2791 rc = 0;
fccb6ea5
TH
2792 out:
2793 /* clear hotplug flag */
2794 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
b1c72916
TH
2795 if (slave)
2796 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
afaa5c37
TH
2797
2798 spin_lock_irqsave(ap->lock, flags);
2799 ap->pflags &= ~ATA_PFLAG_RESETTING;
2800 spin_unlock_irqrestore(ap->lock, flags);
2801
022bdb07 2802 return rc;
416dc9ed
TH
2803
2804 fail:
5958e302
TH
2805 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2806 if (!ata_is_host_link(link) &&
2807 sata_scr_read(link, SCR_STATUS, &sstatus))
2808 rc = -ERESTART;
2809
416dc9ed
TH
2810 if (rc == -ERESTART || try >= max_tries)
2811 goto out;
2812
2813 now = jiffies;
2814 if (time_before(now, deadline)) {
2815 unsigned long delta = deadline - now;
2816
b1c72916 2817 ata_link_printk(failed_link, KERN_WARNING,
0a2c0f56
TH
2818 "reset failed (errno=%d), retrying in %u secs\n",
2819 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
416dc9ed
TH
2820
2821 while (delta)
2822 delta = schedule_timeout_uninterruptible(delta);
2823 }
2824
b1c72916 2825 if (try == max_tries - 1) {
a07d499b 2826 sata_down_spd_limit(link, 0);
b1c72916 2827 if (slave)
a07d499b 2828 sata_down_spd_limit(slave, 0);
b1c72916 2829 } else if (rc == -EPIPE)
a07d499b 2830 sata_down_spd_limit(failed_link, 0);
b1c72916 2831
416dc9ed
TH
2832 if (hardreset)
2833 reset = hardreset;
2834 goto retry;
022bdb07
TH
2835}
2836
45fabbb7
EO
2837static inline void ata_eh_pull_park_action(struct ata_port *ap)
2838{
2839 struct ata_link *link;
2840 struct ata_device *dev;
2841 unsigned long flags;
2842
2843 /*
2844 * This function can be thought of as an extended version of
2845 * ata_eh_about_to_do() specially crafted to accommodate the
2846 * requirements of ATA_EH_PARK handling. Since the EH thread
2847 * does not leave the do {} while () loop in ata_eh_recover as
2848 * long as the timeout for a park request to *one* device on
2849 * the port has not expired, and since we still want to pick
2850 * up park requests to other devices on the same port or
2851 * timeout updates for the same device, we have to pull
2852 * ATA_EH_PARK actions from eh_info into eh_context.i
2853 * ourselves at the beginning of each pass over the loop.
2854 *
2855 * Additionally, all write accesses to &ap->park_req_pending
2856 * through INIT_COMPLETION() (see below) or complete_all()
2857 * (see ata_scsi_park_store()) are protected by the host lock.
2858 * As a result we have that park_req_pending.done is zero on
2859 * exit from this function, i.e. when ATA_EH_PARK actions for
2860 * *all* devices on port ap have been pulled into the
2861 * respective eh_context structs. If, and only if,
2862 * park_req_pending.done is non-zero by the time we reach
2863 * wait_for_completion_timeout(), another ATA_EH_PARK action
2864 * has been scheduled for at least one of the devices on port
2865 * ap and we have to cycle over the do {} while () loop in
2866 * ata_eh_recover() again.
2867 */
2868
2869 spin_lock_irqsave(ap->lock, flags);
2870 INIT_COMPLETION(ap->park_req_pending);
1eca4365
TH
2871 ata_for_each_link(link, ap, EDGE) {
2872 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
2873 struct ata_eh_info *ehi = &link->eh_info;
2874
2875 link->eh_context.i.dev_action[dev->devno] |=
2876 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2877 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2878 }
2879 }
2880 spin_unlock_irqrestore(ap->lock, flags);
2881}
2882
2883static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2884{
2885 struct ata_eh_context *ehc = &dev->link->eh_context;
2886 struct ata_taskfile tf;
2887 unsigned int err_mask;
2888
2889 ata_tf_init(dev, &tf);
2890 if (park) {
2891 ehc->unloaded_mask |= 1 << dev->devno;
2892 tf.command = ATA_CMD_IDLEIMMEDIATE;
2893 tf.feature = 0x44;
2894 tf.lbal = 0x4c;
2895 tf.lbam = 0x4e;
2896 tf.lbah = 0x55;
2897 } else {
2898 ehc->unloaded_mask &= ~(1 << dev->devno);
2899 tf.command = ATA_CMD_CHK_POWER;
2900 }
2901
2902 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2903 tf.protocol |= ATA_PROT_NODATA;
2904 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2905 if (park && (err_mask || tf.lbal != 0xc4)) {
2906 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2907 ehc->unloaded_mask &= ~(1 << dev->devno);
2908 }
2909}
2910
0260731f 2911static int ata_eh_revalidate_and_attach(struct ata_link *link,
084fe639 2912 struct ata_device **r_failed_dev)
022bdb07 2913{
0260731f
TH
2914 struct ata_port *ap = link->ap;
2915 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 2916 struct ata_device *dev;
8c3c52a8 2917 unsigned int new_mask = 0;
084fe639 2918 unsigned long flags;
f58229f8 2919 int rc = 0;
022bdb07
TH
2920
2921 DPRINTK("ENTER\n");
2922
8c3c52a8
TH
2923 /* For PATA drive side cable detection to work, IDENTIFY must
2924 * be done backwards such that PDIAG- is released by the slave
2925 * device before the master device is identified.
2926 */
1eca4365 2927 ata_for_each_dev(dev, link, ALL_REVERSE) {
f58229f8
TH
2928 unsigned int action = ata_eh_dev_action(dev);
2929 unsigned int readid_flags = 0;
022bdb07 2930
bff04647
TH
2931 if (ehc->i.flags & ATA_EHI_DID_RESET)
2932 readid_flags |= ATA_READID_POSTRESET;
2933
9666f400 2934 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
633273a3
TH
2935 WARN_ON(dev->class == ATA_DEV_PMP);
2936
b1c72916 2937 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
022bdb07 2938 rc = -EIO;
8c3c52a8 2939 goto err;
022bdb07
TH
2940 }
2941
0260731f 2942 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
422c9daa
TH
2943 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2944 readid_flags);
022bdb07 2945 if (rc)
8c3c52a8 2946 goto err;
022bdb07 2947
0260731f 2948 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
47005f25 2949
baa1e78a
TH
2950 /* Configuration may have changed, reconfigure
2951 * transfer mode.
2952 */
2953 ehc->i.flags |= ATA_EHI_SETMODE;
2954
3057ac3c 2955 /* schedule the scsi_rescan_device() here */
ad72cf98 2956 schedule_work(&(ap->scsi_rescan_task));
084fe639
TH
2957 } else if (dev->class == ATA_DEV_UNKNOWN &&
2958 ehc->tries[dev->devno] &&
2959 ata_class_enabled(ehc->classes[dev->devno])) {
842faa6c
TH
2960 /* Temporarily set dev->class, it will be
2961 * permanently set once all configurations are
2962 * complete. This is necessary because new
2963 * device configuration is done in two
2964 * separate loops.
2965 */
084fe639
TH
2966 dev->class = ehc->classes[dev->devno];
2967
633273a3
TH
2968 if (dev->class == ATA_DEV_PMP)
2969 rc = sata_pmp_attach(dev);
2970 else
2971 rc = ata_dev_read_id(dev, &dev->class,
2972 readid_flags, dev->id);
842faa6c
TH
2973
2974 /* read_id might have changed class, store and reset */
2975 ehc->classes[dev->devno] = dev->class;
2976 dev->class = ATA_DEV_UNKNOWN;
2977
8c3c52a8
TH
2978 switch (rc) {
2979 case 0:
99cf610a
TH
2980 /* clear error info accumulated during probe */
2981 ata_ering_clear(&dev->ering);
f58229f8 2982 new_mask |= 1 << dev->devno;
8c3c52a8
TH
2983 break;
2984 case -ENOENT:
55a8e2c8
TH
2985 /* IDENTIFY was issued to non-existent
2986 * device. No need to reset. Just
842faa6c 2987 * thaw and ignore the device.
55a8e2c8
TH
2988 */
2989 ata_eh_thaw_port(ap);
084fe639 2990 break;
8c3c52a8 2991 default:
8c3c52a8 2992 goto err;
084fe639 2993 }
8c3c52a8
TH
2994 }
2995 }
084fe639 2996
c1c4e8d5 2997 /* PDIAG- should have been released, ask cable type if post-reset */
33267325
TH
2998 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
2999 if (ap->ops->cable_detect)
3000 ap->cbl = ap->ops->cable_detect(ap);
3001 ata_force_cbl(ap);
3002 }
c1c4e8d5 3003
8c3c52a8
TH
3004 /* Configure new devices forward such that user doesn't see
3005 * device detection messages backwards.
3006 */
1eca4365 3007 ata_for_each_dev(dev, link, ALL) {
4f7c2874 3008 if (!(new_mask & (1 << dev->devno)))
8c3c52a8
TH
3009 continue;
3010
842faa6c
TH
3011 dev->class = ehc->classes[dev->devno];
3012
4f7c2874
TH
3013 if (dev->class == ATA_DEV_PMP)
3014 continue;
3015
8c3c52a8
TH
3016 ehc->i.flags |= ATA_EHI_PRINTINFO;
3017 rc = ata_dev_configure(dev);
3018 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
842faa6c
TH
3019 if (rc) {
3020 dev->class = ATA_DEV_UNKNOWN;
8c3c52a8 3021 goto err;
842faa6c 3022 }
8c3c52a8
TH
3023
3024 spin_lock_irqsave(ap->lock, flags);
3025 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3026 spin_unlock_irqrestore(ap->lock, flags);
3027
3028 /* new device discovered, configure xfermode */
3029 ehc->i.flags |= ATA_EHI_SETMODE;
022bdb07
TH
3030 }
3031
8c3c52a8 3032 return 0;
022bdb07 3033
8c3c52a8
TH
3034 err:
3035 *r_failed_dev = dev;
3036 DPRINTK("EXIT rc=%d\n", rc);
022bdb07
TH
3037 return rc;
3038}
3039
6f1d1e3a
TH
3040/**
3041 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3042 * @link: link on which timings will be programmed
98a1708d 3043 * @r_failed_dev: out parameter for failed device
6f1d1e3a
TH
3044 *
3045 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3046 * ata_set_mode() fails, pointer to the failing device is
3047 * returned in @r_failed_dev.
3048 *
3049 * LOCKING:
3050 * PCI/etc. bus probe sem.
3051 *
3052 * RETURNS:
3053 * 0 on success, negative errno otherwise
3054 */
3055int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3056{
3057 struct ata_port *ap = link->ap;
00115e0f
TH
3058 struct ata_device *dev;
3059 int rc;
6f1d1e3a 3060
76326ac1 3061 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
1eca4365 3062 ata_for_each_dev(dev, link, ENABLED) {
76326ac1
TH
3063 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3064 struct ata_ering_entry *ent;
3065
3066 ent = ata_ering_top(&dev->ering);
3067 if (ent)
3068 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3069 }
3070 }
3071
6f1d1e3a
TH
3072 /* has private set_mode? */
3073 if (ap->ops->set_mode)
00115e0f
TH
3074 rc = ap->ops->set_mode(link, r_failed_dev);
3075 else
3076 rc = ata_do_set_mode(link, r_failed_dev);
3077
3078 /* if transfer mode has changed, set DUBIOUS_XFER on device */
1eca4365 3079 ata_for_each_dev(dev, link, ENABLED) {
00115e0f
TH
3080 struct ata_eh_context *ehc = &link->eh_context;
3081 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3082 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3083
3084 if (dev->xfer_mode != saved_xfer_mode ||
3085 ata_ncq_enabled(dev) != saved_ncq)
3086 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3087 }
3088
3089 return rc;
6f1d1e3a
TH
3090}
3091
11fc33da
TH
3092/**
3093 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3094 * @dev: ATAPI device to clear UA for
3095 *
3096 * Resets and other operations can make an ATAPI device raise
3097 * UNIT ATTENTION which causes the next operation to fail. This
3098 * function clears UA.
3099 *
3100 * LOCKING:
3101 * EH context (may sleep).
3102 *
3103 * RETURNS:
3104 * 0 on success, -errno on failure.
3105 */
3106static int atapi_eh_clear_ua(struct ata_device *dev)
3107{
3108 int i;
3109
3110 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
b5357081 3111 u8 *sense_buffer = dev->link->ap->sector_buf;
11fc33da
TH
3112 u8 sense_key = 0;
3113 unsigned int err_mask;
3114
3115 err_mask = atapi_eh_tur(dev, &sense_key);
3116 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3117 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
3118 "failed (err_mask=0x%x)\n", err_mask);
3119 return -EIO;
3120 }
3121
3122 if (!err_mask || sense_key != UNIT_ATTENTION)
3123 return 0;
3124
3125 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3126 if (err_mask) {
3127 ata_dev_printk(dev, KERN_WARNING, "failed to clear "
3128 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3129 return -EIO;
3130 }
3131 }
3132
3133 ata_dev_printk(dev, KERN_WARNING,
3134 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
3135
3136 return 0;
3137}
3138
6013efd8
TH
3139/**
3140 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3141 * @dev: ATA device which may need FLUSH retry
3142 *
3143 * If @dev failed FLUSH, it needs to be reported upper layer
3144 * immediately as it means that @dev failed to remap and already
3145 * lost at least a sector and further FLUSH retrials won't make
3146 * any difference to the lost sector. However, if FLUSH failed
3147 * for other reasons, for example transmission error, FLUSH needs
3148 * to be retried.
3149 *
3150 * This function determines whether FLUSH failure retry is
3151 * necessary and performs it if so.
3152 *
3153 * RETURNS:
3154 * 0 if EH can continue, -errno if EH needs to be repeated.
3155 */
3156static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3157{
3158 struct ata_link *link = dev->link;
3159 struct ata_port *ap = link->ap;
3160 struct ata_queued_cmd *qc;
3161 struct ata_taskfile tf;
3162 unsigned int err_mask;
3163 int rc = 0;
3164
3165 /* did flush fail for this device? */
3166 if (!ata_tag_valid(link->active_tag))
3167 return 0;
3168
3169 qc = __ata_qc_from_tag(ap, link->active_tag);
3170 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3171 qc->tf.command != ATA_CMD_FLUSH))
3172 return 0;
3173
3174 /* if the device failed it, it should be reported to upper layers */
3175 if (qc->err_mask & AC_ERR_DEV)
3176 return 0;
3177
3178 /* flush failed for some other reason, give it another shot */
3179 ata_tf_init(dev, &tf);
3180
3181 tf.command = qc->tf.command;
3182 tf.flags |= ATA_TFLAG_DEVICE;
3183 tf.protocol = ATA_PROT_NODATA;
3184
3185 ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
3186 tf.command, qc->err_mask);
3187
3188 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3189 if (!err_mask) {
3190 /*
3191 * FLUSH is complete but there's no way to
3192 * successfully complete a failed command from EH.
3193 * Making sure retry is allowed at least once and
3194 * retrying it should do the trick - whatever was in
3195 * the cache is already on the platter and this won't
3196 * cause infinite loop.
3197 */
3198 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3199 } else {
3200 ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
3201 err_mask);
3202 rc = -EIO;
3203
3204 /* if device failed it, report it to upper layers */
3205 if (err_mask & AC_ERR_DEV) {
3206 qc->err_mask |= AC_ERR_DEV;
3207 qc->result_tf = tf;
3208 if (!(ap->pflags & ATA_PFLAG_FROZEN))
3209 rc = 0;
3210 }
3211 }
3212 return rc;
3213}
3214
6b7ae954
TH
3215/**
3216 * ata_eh_set_lpm - configure SATA interface power management
3217 * @link: link to configure power management
3218 * @policy: the link power management policy
3219 * @r_failed_dev: out parameter for failed device
3220 *
3221 * Enable SATA Interface power management. This will enable
3222 * Device Interface Power Management (DIPM) for min_power
3223 * policy, and then call driver specific callbacks for
3224 * enabling Host Initiated Power management.
3225 *
3226 * LOCKING:
3227 * EH context.
3228 *
3229 * RETURNS:
3230 * 0 on success, -errno on failure.
3231 */
3232static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3233 struct ata_device **r_failed_dev)
3234{
6c8ea89c 3235 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
6b7ae954
TH
3236 struct ata_eh_context *ehc = &link->eh_context;
3237 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3238 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3239 unsigned int err_mask;
3240 int rc;
3241
3242 /* if the link or host doesn't do LPM, noop */
3243 if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3244 return 0;
3245
3246 /*
3247 * DIPM is enabled only for MIN_POWER as some devices
3248 * misbehave when the host NACKs transition to SLUMBER. Order
3249 * device and link configurations such that the host always
3250 * allows DIPM requests.
3251 */
3252 ata_for_each_dev(dev, link, ENABLED) {
3253 bool hipm = ata_id_has_hipm(dev->id);
3254 bool dipm = ata_id_has_dipm(dev->id);
3255
3256 /* find the first enabled and LPM enabled devices */
3257 if (!link_dev)
3258 link_dev = dev;
3259
3260 if (!lpm_dev && (hipm || dipm))
3261 lpm_dev = dev;
3262
3263 hints &= ~ATA_LPM_EMPTY;
3264 if (!hipm)
3265 hints &= ~ATA_LPM_HIPM;
3266
3267 /* disable DIPM before changing link config */
3268 if (policy != ATA_LPM_MIN_POWER && dipm) {
3269 err_mask = ata_dev_set_feature(dev,
3270 SETFEATURES_SATA_DISABLE, SATA_DIPM);
3271 if (err_mask && err_mask != AC_ERR_DEV) {
3272 ata_dev_printk(dev, KERN_WARNING,
3273 "failed to disable DIPM, Emask 0x%x\n",
3274 err_mask);
3275 rc = -EIO;
3276 goto fail;
3277 }
3278 }
3279 }
3280
6c8ea89c
TH
3281 if (ap) {
3282 rc = ap->ops->set_lpm(link, policy, hints);
3283 if (!rc && ap->slave_link)
3284 rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3285 } else
3286 rc = sata_pmp_set_lpm(link, policy, hints);
6b7ae954
TH
3287
3288 /*
3289 * Attribute link config failure to the first (LPM) enabled
3290 * device on the link.
3291 */
3292 if (rc) {
3293 if (rc == -EOPNOTSUPP) {
3294 link->flags |= ATA_LFLAG_NO_LPM;
3295 return 0;
3296 }
3297 dev = lpm_dev ? lpm_dev : link_dev;
3298 goto fail;
3299 }
3300
3301 /* host config updated, enable DIPM if transitioning to MIN_POWER */
3302 ata_for_each_dev(dev, link, ENABLED) {
3303 if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) {
3304 err_mask = ata_dev_set_feature(dev,
3305 SETFEATURES_SATA_ENABLE, SATA_DIPM);
3306 if (err_mask && err_mask != AC_ERR_DEV) {
3307 ata_dev_printk(dev, KERN_WARNING,
3308 "failed to enable DIPM, Emask 0x%x\n",
3309 err_mask);
3310 rc = -EIO;
3311 goto fail;
3312 }
3313 }
3314 }
3315
3316 link->lpm_policy = policy;
3317 if (ap && ap->slave_link)
3318 ap->slave_link->lpm_policy = policy;
3319 return 0;
3320
3321fail:
3322 /* if no device or only one more chance is left, disable LPM */
3323 if (!dev || ehc->tries[dev->devno] <= 2) {
3324 ata_link_printk(link, KERN_WARNING,
3325 "disabling LPM on the link\n");
3326 link->flags |= ATA_LFLAG_NO_LPM;
3327 }
3328 if (r_failed_dev)
3329 *r_failed_dev = dev;
3330 return rc;
3331}
3332
0260731f 3333static int ata_link_nr_enabled(struct ata_link *link)
022bdb07 3334{
f58229f8
TH
3335 struct ata_device *dev;
3336 int cnt = 0;
022bdb07 3337
1eca4365
TH
3338 ata_for_each_dev(dev, link, ENABLED)
3339 cnt++;
022bdb07
TH
3340 return cnt;
3341}
3342
0260731f 3343static int ata_link_nr_vacant(struct ata_link *link)
084fe639 3344{
f58229f8
TH
3345 struct ata_device *dev;
3346 int cnt = 0;
084fe639 3347
1eca4365 3348 ata_for_each_dev(dev, link, ALL)
f58229f8 3349 if (dev->class == ATA_DEV_UNKNOWN)
084fe639
TH
3350 cnt++;
3351 return cnt;
3352}
3353
0260731f 3354static int ata_eh_skip_recovery(struct ata_link *link)
084fe639 3355{
672b2d65 3356 struct ata_port *ap = link->ap;
0260731f 3357 struct ata_eh_context *ehc = &link->eh_context;
f58229f8 3358 struct ata_device *dev;
084fe639 3359
f9df58cb
TH
3360 /* skip disabled links */
3361 if (link->flags & ATA_LFLAG_DISABLED)
3362 return 1;
3363
e2f3d75f
TH
3364 /* skip if explicitly requested */
3365 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3366 return 1;
3367
672b2d65
TH
3368 /* thaw frozen port and recover failed devices */
3369 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3370 return 0;
3371
3372 /* reset at least once if reset is requested */
3373 if ((ehc->i.action & ATA_EH_RESET) &&
3374 !(ehc->i.flags & ATA_EHI_DID_RESET))
084fe639
TH
3375 return 0;
3376
3377 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1eca4365 3378 ata_for_each_dev(dev, link, ALL) {
084fe639
TH
3379 if (dev->class == ATA_DEV_UNKNOWN &&
3380 ehc->classes[dev->devno] != ATA_DEV_NONE)
3381 return 0;
3382 }
3383
3384 return 1;
3385}
3386
c2c7a89c
TH
3387static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3388{
3389 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3390 u64 now = get_jiffies_64();
3391 int *trials = void_arg;
3392
3393 if (ent->timestamp < now - min(now, interval))
3394 return -1;
3395
3396 (*trials)++;
3397 return 0;
3398}
3399
02c05a27
TH
3400static int ata_eh_schedule_probe(struct ata_device *dev)
3401{
3402 struct ata_eh_context *ehc = &dev->link->eh_context;
c2c7a89c
TH
3403 struct ata_link *link = ata_dev_phys_link(dev);
3404 int trials = 0;
02c05a27
TH
3405
3406 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3407 (ehc->did_probe_mask & (1 << dev->devno)))
3408 return 0;
3409
3410 ata_eh_detach_dev(dev);
3411 ata_dev_init(dev);
3412 ehc->did_probe_mask |= (1 << dev->devno);
cf480626 3413 ehc->i.action |= ATA_EH_RESET;
00115e0f
TH
3414 ehc->saved_xfer_mode[dev->devno] = 0;
3415 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
02c05a27 3416
6b7ae954 3417 /* the link maybe in a deep sleep, wake it up */
6c8ea89c
TH
3418 if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3419 if (ata_is_host_link(link))
3420 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3421 ATA_LPM_EMPTY);
3422 else
3423 sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3424 ATA_LPM_EMPTY);
3425 }
6b7ae954 3426
c2c7a89c
TH
3427 /* Record and count probe trials on the ering. The specific
3428 * error mask used is irrelevant. Because a successful device
3429 * detection clears the ering, this count accumulates only if
3430 * there are consecutive failed probes.
3431 *
3432 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3433 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3434 * forced to 1.5Gbps.
3435 *
3436 * This is to work around cases where failed link speed
3437 * negotiation results in device misdetection leading to
3438 * infinite DEVXCHG or PHRDY CHG events.
3439 */
3440 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3441 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3442
3443 if (trials > ATA_EH_PROBE_TRIALS)
3444 sata_down_spd_limit(link, 1);
3445
02c05a27
TH
3446 return 1;
3447}
3448
9b1e2658 3449static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
fee7ca72 3450{
9af5c9c9 3451 struct ata_eh_context *ehc = &dev->link->eh_context;
fee7ca72 3452
cf9a590a
TH
3453 /* -EAGAIN from EH routine indicates retry without prejudice.
3454 * The requester is responsible for ensuring forward progress.
3455 */
3456 if (err != -EAGAIN)
3457 ehc->tries[dev->devno]--;
fee7ca72
TH
3458
3459 switch (err) {
3460 case -ENODEV:
3461 /* device missing or wrong IDENTIFY data, schedule probing */
3462 ehc->i.probe_mask |= (1 << dev->devno);
3463 case -EINVAL:
3464 /* give it just one more chance */
3465 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3466 case -EIO:
d89293ab 3467 if (ehc->tries[dev->devno] == 1) {
fee7ca72
TH
3468 /* This is the last chance, better to slow
3469 * down than lose it.
3470 */
a07d499b 3471 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
d89293ab
TH
3472 if (dev->pio_mode > XFER_PIO_0)
3473 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
fee7ca72
TH
3474 }
3475 }
3476
3477 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3478 /* disable device if it has used up all its chances */
3479 ata_dev_disable(dev);
3480
3481 /* detach if offline */
b1c72916 3482 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
fee7ca72
TH
3483 ata_eh_detach_dev(dev);
3484
02c05a27 3485 /* schedule probe if necessary */
87fbc5a0 3486 if (ata_eh_schedule_probe(dev)) {
fee7ca72 3487 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
87fbc5a0
TH
3488 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3489 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3490 }
9b1e2658
TH
3491
3492 return 1;
fee7ca72 3493 } else {
cf480626 3494 ehc->i.action |= ATA_EH_RESET;
9b1e2658 3495 return 0;
fee7ca72
TH
3496 }
3497}
3498
022bdb07
TH
3499/**
3500 * ata_eh_recover - recover host port after error
3501 * @ap: host port to recover
f5914a46 3502 * @prereset: prereset method (can be NULL)
022bdb07
TH
3503 * @softreset: softreset method (can be NULL)
3504 * @hardreset: hardreset method (can be NULL)
3505 * @postreset: postreset method (can be NULL)
9b1e2658 3506 * @r_failed_link: out parameter for failed link
022bdb07
TH
3507 *
3508 * This is the alpha and omega, eum and yang, heart and soul of
3509 * libata exception handling. On entry, actions required to
9b1e2658
TH
3510 * recover each link and hotplug requests are recorded in the
3511 * link's eh_context. This function executes all the operations
3512 * with appropriate retrials and fallbacks to resurrect failed
084fe639 3513 * devices, detach goners and greet newcomers.
022bdb07
TH
3514 *
3515 * LOCKING:
3516 * Kernel thread context (may sleep).
3517 *
3518 * RETURNS:
3519 * 0 on success, -errno on failure.
3520 */
fb7fd614
TH
3521int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3522 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3523 ata_postreset_fn_t postreset,
3524 struct ata_link **r_failed_link)
022bdb07 3525{
9b1e2658 3526 struct ata_link *link;
022bdb07 3527 struct ata_device *dev;
6b7ae954 3528 int rc, nr_fails;
45fabbb7 3529 unsigned long flags, deadline;
022bdb07
TH
3530
3531 DPRINTK("ENTER\n");
3532
3533 /* prep for recovery */
1eca4365 3534 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3535 struct ata_eh_context *ehc = &link->eh_context;
084fe639 3536
f9df58cb
TH
3537 /* re-enable link? */
3538 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3539 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3540 spin_lock_irqsave(ap->lock, flags);
3541 link->flags &= ~ATA_LFLAG_DISABLED;
3542 spin_unlock_irqrestore(ap->lock, flags);
3543 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3544 }
3545
1eca4365 3546 ata_for_each_dev(dev, link, ALL) {
fd995f70
TH
3547 if (link->flags & ATA_LFLAG_NO_RETRY)
3548 ehc->tries[dev->devno] = 1;
3549 else
3550 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
084fe639 3551
9b1e2658
TH
3552 /* collect port action mask recorded in dev actions */
3553 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3554 ~ATA_EH_PERDEV_MASK;
3555 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3556
3557 /* process hotplug request */
3558 if (dev->flags & ATA_DFLAG_DETACH)
3559 ata_eh_detach_dev(dev);
3560
02c05a27
TH
3561 /* schedule probe if necessary */
3562 if (!ata_dev_enabled(dev))
3563 ata_eh_schedule_probe(dev);
084fe639 3564 }
022bdb07
TH
3565 }
3566
3567 retry:
022bdb07
TH
3568 rc = 0;
3569
aeb2ecd6 3570 /* if UNLOADING, finish immediately */
b51e9e5d 3571 if (ap->pflags & ATA_PFLAG_UNLOADING)
aeb2ecd6
TH
3572 goto out;
3573
9b1e2658 3574 /* prep for EH */
1eca4365 3575 ata_for_each_link(link, ap, EDGE) {
9b1e2658 3576 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 3577
9b1e2658
TH
3578 /* skip EH if possible. */
3579 if (ata_eh_skip_recovery(link))
3580 ehc->i.action = 0;
3581
1eca4365 3582 ata_for_each_dev(dev, link, ALL)
9b1e2658
TH
3583 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3584 }
084fe639 3585
022bdb07 3586 /* reset */
1eca4365 3587 ata_for_each_link(link, ap, EDGE) {
dc98c32c 3588 struct ata_eh_context *ehc = &link->eh_context;
9b1e2658 3589
dc98c32c
TH
3590 if (!(ehc->i.action & ATA_EH_RESET))
3591 continue;
9b1e2658 3592
dc98c32c
TH
3593 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3594 prereset, softreset, hardreset, postreset);
3595 if (rc) {
3596 ata_link_printk(link, KERN_ERR,
3597 "reset failed, giving up\n");
3598 goto out;
022bdb07 3599 }
022bdb07
TH
3600 }
3601
45fabbb7
EO
3602 do {
3603 unsigned long now;
3604
3605 /*
3606 * clears ATA_EH_PARK in eh_info and resets
3607 * ap->park_req_pending
3608 */
3609 ata_eh_pull_park_action(ap);
3610
3611 deadline = jiffies;
1eca4365
TH
3612 ata_for_each_link(link, ap, EDGE) {
3613 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
3614 struct ata_eh_context *ehc = &link->eh_context;
3615 unsigned long tmp;
3616
3617 if (dev->class != ATA_DEV_ATA)
3618 continue;
3619 if (!(ehc->i.dev_action[dev->devno] &
3620 ATA_EH_PARK))
3621 continue;
3622 tmp = dev->unpark_deadline;
3623 if (time_before(deadline, tmp))
3624 deadline = tmp;
3625 else if (time_before_eq(tmp, jiffies))
3626 continue;
3627 if (ehc->unloaded_mask & (1 << dev->devno))
3628 continue;
3629
3630 ata_eh_park_issue_cmd(dev, 1);
3631 }
3632 }
3633
3634 now = jiffies;
3635 if (time_before_eq(deadline, now))
3636 break;
3637
3638 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3639 deadline - now);
3640 } while (deadline);
1eca4365
TH
3641 ata_for_each_link(link, ap, EDGE) {
3642 ata_for_each_dev(dev, link, ALL) {
45fabbb7
EO
3643 if (!(link->eh_context.unloaded_mask &
3644 (1 << dev->devno)))
3645 continue;
3646
3647 ata_eh_park_issue_cmd(dev, 0);
3648 ata_eh_done(link, dev, ATA_EH_PARK);
3649 }
3650 }
3651
9b1e2658 3652 /* the rest */
6b7ae954
TH
3653 nr_fails = 0;
3654 ata_for_each_link(link, ap, PMP_FIRST) {
9b1e2658 3655 struct ata_eh_context *ehc = &link->eh_context;
022bdb07 3656
6b7ae954
TH
3657 if (sata_pmp_attached(ap) && ata_is_host_link(link))
3658 goto config_lpm;
3659
9b1e2658
TH
3660 /* revalidate existing devices and attach new ones */
3661 rc = ata_eh_revalidate_and_attach(link, &dev);
4ae72a1e 3662 if (rc)
6b7ae954 3663 goto rest_fail;
022bdb07 3664
633273a3
TH
3665 /* if PMP got attached, return, pmp EH will take care of it */
3666 if (link->device->class == ATA_DEV_PMP) {
3667 ehc->i.action = 0;
3668 return 0;
3669 }
3670
9b1e2658
TH
3671 /* configure transfer mode if necessary */
3672 if (ehc->i.flags & ATA_EHI_SETMODE) {
3673 rc = ata_set_mode(link, &dev);
3674 if (rc)
6b7ae954 3675 goto rest_fail;
9b1e2658
TH
3676 ehc->i.flags &= ~ATA_EHI_SETMODE;
3677 }
3678
11fc33da
TH
3679 /* If reset has been issued, clear UA to avoid
3680 * disrupting the current users of the device.
3681 */
3682 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1eca4365 3683 ata_for_each_dev(dev, link, ALL) {
11fc33da
TH
3684 if (dev->class != ATA_DEV_ATAPI)
3685 continue;
3686 rc = atapi_eh_clear_ua(dev);
3687 if (rc)
6b7ae954 3688 goto rest_fail;
11fc33da
TH
3689 }
3690 }
3691
6013efd8
TH
3692 /* retry flush if necessary */
3693 ata_for_each_dev(dev, link, ALL) {
3694 if (dev->class != ATA_DEV_ATA)
3695 continue;
3696 rc = ata_eh_maybe_retry_flush(dev);
3697 if (rc)
6b7ae954 3698 goto rest_fail;
6013efd8
TH
3699 }
3700
6b7ae954 3701 config_lpm:
11fc33da 3702 /* configure link power saving */
6b7ae954
TH
3703 if (link->lpm_policy != ap->target_lpm_policy) {
3704 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3705 if (rc)
3706 goto rest_fail;
3707 }
ca77329f 3708
9b1e2658
TH
3709 /* this link is okay now */
3710 ehc->i.flags = 0;
3711 continue;
022bdb07 3712
6b7ae954
TH
3713 rest_fail:
3714 nr_fails++;
3715 if (dev)
3716 ata_eh_handle_dev_fail(dev, rc);
022bdb07 3717
b06ce3e5
TH
3718 if (ap->pflags & ATA_PFLAG_FROZEN) {
3719 /* PMP reset requires working host port.
3720 * Can't retry if it's frozen.
3721 */
071f44b1 3722 if (sata_pmp_attached(ap))
b06ce3e5 3723 goto out;
9b1e2658 3724 break;
b06ce3e5 3725 }
022bdb07
TH
3726 }
3727
6b7ae954 3728 if (nr_fails)
9b1e2658 3729 goto retry;
022bdb07 3730
9b1e2658
TH
3731 out:
3732 if (rc && r_failed_link)
3733 *r_failed_link = link;
3734
022bdb07
TH
3735 DPRINTK("EXIT, rc=%d\n", rc);
3736 return rc;
3737}
3738
3739/**
3740 * ata_eh_finish - finish up EH
3741 * @ap: host port to finish EH for
3742 *
3743 * Recovery is complete. Clean up EH states and retry or finish
3744 * failed qcs.
3745 *
3746 * LOCKING:
3747 * None.
3748 */
fb7fd614 3749void ata_eh_finish(struct ata_port *ap)
022bdb07
TH
3750{
3751 int tag;
3752
3753 /* retry or finish qcs */
3754 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3755 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3756
3757 if (!(qc->flags & ATA_QCFLAG_FAILED))
3758 continue;
3759
3760 if (qc->err_mask) {
3761 /* FIXME: Once EH migration is complete,
3762 * generate sense data in this function,
3763 * considering both err_mask and tf.
3764 */
03faab78 3765 if (qc->flags & ATA_QCFLAG_RETRY)
022bdb07 3766 ata_eh_qc_retry(qc);
03faab78
TH
3767 else
3768 ata_eh_qc_complete(qc);
022bdb07
TH
3769 } else {
3770 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3771 ata_eh_qc_complete(qc);
3772 } else {
3773 /* feed zero TF to sense generation */
3774 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3775 ata_eh_qc_retry(qc);
3776 }
3777 }
3778 }
da917d69
TH
3779
3780 /* make sure nr_active_links is zero after EH */
3781 WARN_ON(ap->nr_active_links);
3782 ap->nr_active_links = 0;
022bdb07
TH
3783}
3784
3785/**
3786 * ata_do_eh - do standard error handling
3787 * @ap: host port to handle error for
a1efdaba 3788 *
f5914a46 3789 * @prereset: prereset method (can be NULL)
022bdb07
TH
3790 * @softreset: softreset method (can be NULL)
3791 * @hardreset: hardreset method (can be NULL)
3792 * @postreset: postreset method (can be NULL)
3793 *
3794 * Perform standard error handling sequence.
3795 *
3796 * LOCKING:
3797 * Kernel thread context (may sleep).
3798 */
f5914a46
TH
3799void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3800 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3801 ata_postreset_fn_t postreset)
022bdb07 3802{
9b1e2658
TH
3803 struct ata_device *dev;
3804 int rc;
3805
3806 ata_eh_autopsy(ap);
3807 ata_eh_report(ap);
3808
3809 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3810 NULL);
3811 if (rc) {
1eca4365 3812 ata_for_each_dev(dev, &ap->link, ALL)
9b1e2658
TH
3813 ata_dev_disable(dev);
3814 }
3815
022bdb07
TH
3816 ata_eh_finish(ap);
3817}
500530f6 3818
a1efdaba
TH
3819/**
3820 * ata_std_error_handler - standard error handler
3821 * @ap: host port to handle error for
3822 *
3823 * Standard error handler
3824 *
3825 * LOCKING:
3826 * Kernel thread context (may sleep).
3827 */
3828void ata_std_error_handler(struct ata_port *ap)
3829{
3830 struct ata_port_operations *ops = ap->ops;
3831 ata_reset_fn_t hardreset = ops->hardreset;
3832
57c9efdf 3833 /* ignore built-in hardreset if SCR access is not available */
fe06e5f9 3834 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
a1efdaba
TH
3835 hardreset = NULL;
3836
3837 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3838}
3839
6ffa01d8 3840#ifdef CONFIG_PM
500530f6
TH
3841/**
3842 * ata_eh_handle_port_suspend - perform port suspend operation
3843 * @ap: port to suspend
3844 *
3845 * Suspend @ap.
3846 *
3847 * LOCKING:
3848 * Kernel thread context (may sleep).
3849 */
3850static void ata_eh_handle_port_suspend(struct ata_port *ap)
3851{
3852 unsigned long flags;
3853 int rc = 0;
3854
3855 /* are we suspending? */
3856 spin_lock_irqsave(ap->lock, flags);
3857 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3858 ap->pm_mesg.event == PM_EVENT_ON) {
3859 spin_unlock_irqrestore(ap->lock, flags);
3860 return;
3861 }
3862 spin_unlock_irqrestore(ap->lock, flags);
3863
3864 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3865
64578a3d
TH
3866 /* tell ACPI we're suspending */
3867 rc = ata_acpi_on_suspend(ap);
3868 if (rc)
3869 goto out;
3870
500530f6
TH
3871 /* suspend */
3872 ata_eh_freeze_port(ap);
3873
3874 if (ap->ops->port_suspend)
3875 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3876
bd3adca5 3877 ata_acpi_set_state(ap, PMSG_SUSPEND);
64578a3d 3878 out:
500530f6
TH
3879 /* report result */
3880 spin_lock_irqsave(ap->lock, flags);
3881
3882 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3883 if (rc == 0)
3884 ap->pflags |= ATA_PFLAG_SUSPENDED;
64578a3d 3885 else if (ap->pflags & ATA_PFLAG_FROZEN)
500530f6
TH
3886 ata_port_schedule_eh(ap);
3887
3888 if (ap->pm_result) {
3889 *ap->pm_result = rc;
3890 ap->pm_result = NULL;
3891 }
3892
3893 spin_unlock_irqrestore(ap->lock, flags);
3894
3895 return;
3896}
3897
3898/**
3899 * ata_eh_handle_port_resume - perform port resume operation
3900 * @ap: port to resume
3901 *
3902 * Resume @ap.
3903 *
500530f6
TH
3904 * LOCKING:
3905 * Kernel thread context (may sleep).
3906 */
3907static void ata_eh_handle_port_resume(struct ata_port *ap)
3908{
6f9c1ea2
TH
3909 struct ata_link *link;
3910 struct ata_device *dev;
500530f6 3911 unsigned long flags;
9666f400 3912 int rc = 0;
500530f6
TH
3913
3914 /* are we resuming? */
3915 spin_lock_irqsave(ap->lock, flags);
3916 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3917 ap->pm_mesg.event != PM_EVENT_ON) {
3918 spin_unlock_irqrestore(ap->lock, flags);
3919 return;
3920 }
3921 spin_unlock_irqrestore(ap->lock, flags);
3922
9666f400 3923 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
500530f6 3924
6f9c1ea2
TH
3925 /*
3926 * Error timestamps are in jiffies which doesn't run while
3927 * suspended and PHY events during resume isn't too uncommon.
3928 * When the two are combined, it can lead to unnecessary speed
3929 * downs if the machine is suspended and resumed repeatedly.
3930 * Clear error history.
3931 */
3932 ata_for_each_link(link, ap, HOST_FIRST)
3933 ata_for_each_dev(dev, link, ALL)
3934 ata_ering_clear(&dev->ering);
3935
bd3adca5
SL
3936 ata_acpi_set_state(ap, PMSG_ON);
3937
500530f6
TH
3938 if (ap->ops->port_resume)
3939 rc = ap->ops->port_resume(ap);
3940
6746544c
TH
3941 /* tell ACPI that we're resuming */
3942 ata_acpi_on_resume(ap);
3943
9666f400 3944 /* report result */
500530f6
TH
3945 spin_lock_irqsave(ap->lock, flags);
3946 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3947 if (ap->pm_result) {
3948 *ap->pm_result = rc;
3949 ap->pm_result = NULL;
3950 }
3951 spin_unlock_irqrestore(ap->lock, flags);
3952}
6ffa01d8 3953#endif /* CONFIG_PM */