cw1200: drop useless LIST_HEAD
[linux-2.6-block.git] / drivers / scsi / scsi_error.c
1 /*
2  *  scsi_error.c Copyright (C) 1997 Eric Youngdale
3  *
4  *  SCSI error/timeout handling
5  *      Initial versions: Eric Youngdale.  Based upon conversations with
6  *                        Leonard Zubkoff and David Miller at Linux Expo,
7  *                        ideas originating from all over the place.
8  *
9  *      Restructured scsi_unjam_host and associated functions.
10  *      September 04, 2002 Mike Anderson (andmike@us.ibm.com)
11  *
12  *      Forward port of Russell King's (rmk@arm.linux.org.uk) changes and
13  *      minor cleanups.
14  *      September 30, 2002 Mike Anderson (andmike@us.ibm.com)
15  */
16
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/gfp.h>
20 #include <linux/timer.h>
21 #include <linux/string.h>
22 #include <linux/kernel.h>
23 #include <linux/freezer.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
26 #include <linux/blkdev.h>
27 #include <linux/delay.h>
28 #include <linux/jiffies.h>
29
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <scsi/scsi_dbg.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_driver.h>
35 #include <scsi/scsi_eh.h>
36 #include <scsi/scsi_common.h>
37 #include <scsi/scsi_transport.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_ioctl.h>
40 #include <scsi/scsi_dh.h>
41 #include <scsi/scsi_devinfo.h>
42 #include <scsi/sg.h>
43
44 #include "scsi_priv.h"
45 #include "scsi_logging.h"
46 #include "scsi_transport_api.h"
47
48 #include <trace/events/scsi.h>
49
50 #include <asm/unaligned.h>
51
52 static void scsi_eh_done(struct scsi_cmnd *scmd);
53
54 /*
55  * These should *probably* be handled by the host itself.
56  * Since it is allowed to sleep, it probably should.
57  */
58 #define BUS_RESET_SETTLE_TIME   (10)
59 #define HOST_RESET_SETTLE_TIME  (10)
60
61 static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
62 static int scsi_try_to_abort_cmd(struct scsi_host_template *,
63                                  struct scsi_cmnd *);
64
65 void scsi_eh_wakeup(struct Scsi_Host *shost)
66 {
67         lockdep_assert_held(shost->host_lock);
68
69         if (scsi_host_busy(shost) == shost->host_failed) {
70                 trace_scsi_eh_wakeup(shost);
71                 wake_up_process(shost->ehandler);
72                 SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
73                         "Waking error handler thread\n"));
74         }
75 }
76
77 /**
78  * scsi_schedule_eh - schedule EH for SCSI host
79  * @shost:      SCSI host to invoke error handling on.
80  *
81  * Schedule SCSI EH without scmd.
82  */
83 void scsi_schedule_eh(struct Scsi_Host *shost)
84 {
85         unsigned long flags;
86
87         spin_lock_irqsave(shost->host_lock, flags);
88
89         if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
90             scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
91                 shost->host_eh_scheduled++;
92                 scsi_eh_wakeup(shost);
93         }
94
95         spin_unlock_irqrestore(shost->host_lock, flags);
96 }
97 EXPORT_SYMBOL_GPL(scsi_schedule_eh);
98
99 static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
100 {
101         if (!shost->last_reset || shost->eh_deadline == -1)
102                 return 0;
103
104         /*
105          * 32bit accesses are guaranteed to be atomic
106          * (on all supported architectures), so instead
107          * of using a spinlock we can as well double check
108          * if eh_deadline has been set to 'off' during the
109          * time_before call.
110          */
111         if (time_before(jiffies, shost->last_reset + shost->eh_deadline) &&
112             shost->eh_deadline > -1)
113                 return 0;
114
115         return 1;
116 }
117
118 /**
119  * scmd_eh_abort_handler - Handle command aborts
120  * @work:       command to be aborted.
121  *
122  * Note: this function must be called only for a command that has timed out.
123  * Because the block layer marks a request as complete before it calls
124  * scsi_times_out(), a .scsi_done() call from the LLD for a command that has
125  * timed out do not have any effect. Hence it is safe to call
126  * scsi_finish_command() from this function.
127  */
128 void
129 scmd_eh_abort_handler(struct work_struct *work)
130 {
131         struct scsi_cmnd *scmd =
132                 container_of(work, struct scsi_cmnd, abort_work.work);
133         struct scsi_device *sdev = scmd->device;
134         int rtn;
135
136         if (scsi_host_eh_past_deadline(sdev->host)) {
137                 SCSI_LOG_ERROR_RECOVERY(3,
138                         scmd_printk(KERN_INFO, scmd,
139                                     "eh timeout, not aborting\n"));
140         } else {
141                 SCSI_LOG_ERROR_RECOVERY(3,
142                         scmd_printk(KERN_INFO, scmd,
143                                     "aborting command\n"));
144                 rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
145                 if (rtn == SUCCESS) {
146                         set_host_byte(scmd, DID_TIME_OUT);
147                         if (scsi_host_eh_past_deadline(sdev->host)) {
148                                 SCSI_LOG_ERROR_RECOVERY(3,
149                                         scmd_printk(KERN_INFO, scmd,
150                                                     "eh timeout, not retrying "
151                                                     "aborted command\n"));
152                         } else if (!scsi_noretry_cmd(scmd) &&
153                             (++scmd->retries <= scmd->allowed)) {
154                                 SCSI_LOG_ERROR_RECOVERY(3,
155                                         scmd_printk(KERN_WARNING, scmd,
156                                                     "retry aborted command\n"));
157                                 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
158                                 return;
159                         } else {
160                                 SCSI_LOG_ERROR_RECOVERY(3,
161                                         scmd_printk(KERN_WARNING, scmd,
162                                                     "finish aborted command\n"));
163                                 scsi_finish_command(scmd);
164                                 return;
165                         }
166                 } else {
167                         SCSI_LOG_ERROR_RECOVERY(3,
168                                 scmd_printk(KERN_INFO, scmd,
169                                             "cmd abort %s\n",
170                                             (rtn == FAST_IO_FAIL) ?
171                                             "not send" : "failed"));
172                 }
173         }
174
175         scsi_eh_scmd_add(scmd);
176 }
177
178 /**
179  * scsi_abort_command - schedule a command abort
180  * @scmd:       scmd to abort.
181  *
182  * We only need to abort commands after a command timeout
183  */
184 static int
185 scsi_abort_command(struct scsi_cmnd *scmd)
186 {
187         struct scsi_device *sdev = scmd->device;
188         struct Scsi_Host *shost = sdev->host;
189         unsigned long flags;
190
191         if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
192                 /*
193                  * Retry after abort failed, escalate to next level.
194                  */
195                 SCSI_LOG_ERROR_RECOVERY(3,
196                         scmd_printk(KERN_INFO, scmd,
197                                     "previous abort failed\n"));
198                 BUG_ON(delayed_work_pending(&scmd->abort_work));
199                 return FAILED;
200         }
201
202         spin_lock_irqsave(shost->host_lock, flags);
203         if (shost->eh_deadline != -1 && !shost->last_reset)
204                 shost->last_reset = jiffies;
205         spin_unlock_irqrestore(shost->host_lock, flags);
206
207         scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
208         SCSI_LOG_ERROR_RECOVERY(3,
209                 scmd_printk(KERN_INFO, scmd, "abort scheduled\n"));
210         queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100);
211         return SUCCESS;
212 }
213
214 /**
215  * scsi_eh_reset - call into ->eh_action to reset internal counters
216  * @scmd:       scmd to run eh on.
217  *
218  * The scsi driver might be carrying internal state about the
219  * devices, so we need to call into the driver to reset the
220  * internal state once the error handler is started.
221  */
222 static void scsi_eh_reset(struct scsi_cmnd *scmd)
223 {
224         if (!blk_rq_is_passthrough(scmd->request)) {
225                 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
226                 if (sdrv->eh_reset)
227                         sdrv->eh_reset(scmd);
228         }
229 }
230
231 static void scsi_eh_inc_host_failed(struct rcu_head *head)
232 {
233         struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
234         struct Scsi_Host *shost = scmd->device->host;
235         unsigned long flags;
236
237         spin_lock_irqsave(shost->host_lock, flags);
238         shost->host_failed++;
239         scsi_eh_wakeup(shost);
240         spin_unlock_irqrestore(shost->host_lock, flags);
241 }
242
243 /**
244  * scsi_eh_scmd_add - add scsi cmd to error handling.
245  * @scmd:       scmd to run eh on.
246  */
247 void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
248 {
249         struct Scsi_Host *shost = scmd->device->host;
250         unsigned long flags;
251         int ret;
252
253         WARN_ON_ONCE(!shost->ehandler);
254
255         spin_lock_irqsave(shost->host_lock, flags);
256         if (scsi_host_set_state(shost, SHOST_RECOVERY)) {
257                 ret = scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY);
258                 WARN_ON_ONCE(ret);
259         }
260         if (shost->eh_deadline != -1 && !shost->last_reset)
261                 shost->last_reset = jiffies;
262
263         scsi_eh_reset(scmd);
264         list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
265         spin_unlock_irqrestore(shost->host_lock, flags);
266         /*
267          * Ensure that all tasks observe the host state change before the
268          * host_failed change.
269          */
270         call_rcu(&scmd->rcu, scsi_eh_inc_host_failed);
271 }
272
273 /**
274  * scsi_times_out - Timeout function for normal scsi commands.
275  * @req:        request that is timing out.
276  *
277  * Notes:
278  *     We do not need to lock this.  There is the potential for a race
279  *     only in that the normal completion handling might run, but if the
280  *     normal completion function determines that the timer has already
281  *     fired, then it mustn't do anything.
282  */
283 enum blk_eh_timer_return scsi_times_out(struct request *req)
284 {
285         struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
286         enum blk_eh_timer_return rtn = BLK_EH_DONE;
287         struct Scsi_Host *host = scmd->device->host;
288
289         trace_scsi_dispatch_cmd_timeout(scmd);
290         scsi_log_completion(scmd, TIMEOUT_ERROR);
291
292         if (host->eh_deadline != -1 && !host->last_reset)
293                 host->last_reset = jiffies;
294
295         if (host->hostt->eh_timed_out)
296                 rtn = host->hostt->eh_timed_out(scmd);
297
298         if (rtn == BLK_EH_DONE) {
299                 /*
300                  * Set the command to complete first in order to prevent a real
301                  * completion from releasing the command while error handling
302                  * is using it. If the command was already completed, then the
303                  * lower level driver beat the timeout handler, and it is safe
304                  * to return without escalating error recovery.
305                  *
306                  * If timeout handling lost the race to a real completion, the
307                  * block layer may ignore that due to a fake timeout injection,
308                  * so return RESET_TIMER to allow error handling another shot
309                  * at this command.
310                  */
311                 if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state))
312                         return BLK_EH_RESET_TIMER;
313                 if (scsi_abort_command(scmd) != SUCCESS) {
314                         set_host_byte(scmd, DID_TIME_OUT);
315                         scsi_eh_scmd_add(scmd);
316                 }
317         }
318
319         return rtn;
320 }
321
322 /**
323  * scsi_block_when_processing_errors - Prevent cmds from being queued.
324  * @sdev:       Device on which we are performing recovery.
325  *
326  * Description:
327  *     We block until the host is out of error recovery, and then check to
328  *     see whether the host or the device is offline.
329  *
330  * Return value:
331  *     0 when dev was taken offline by error recovery. 1 OK to proceed.
332  */
333 int scsi_block_when_processing_errors(struct scsi_device *sdev)
334 {
335         int online;
336
337         wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
338
339         online = scsi_device_online(sdev);
340
341         return online;
342 }
343 EXPORT_SYMBOL(scsi_block_when_processing_errors);
344
345 #ifdef CONFIG_SCSI_LOGGING
346 /**
347  * scsi_eh_prt_fail_stats - Log info on failures.
348  * @shost:      scsi host being recovered.
349  * @work_q:     Queue of scsi cmds to process.
350  */
351 static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
352                                           struct list_head *work_q)
353 {
354         struct scsi_cmnd *scmd;
355         struct scsi_device *sdev;
356         int total_failures = 0;
357         int cmd_failed = 0;
358         int cmd_cancel = 0;
359         int devices_failed = 0;
360
361         shost_for_each_device(sdev, shost) {
362                 list_for_each_entry(scmd, work_q, eh_entry) {
363                         if (scmd->device == sdev) {
364                                 ++total_failures;
365                                 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED)
366                                         ++cmd_cancel;
367                                 else
368                                         ++cmd_failed;
369                         }
370                 }
371
372                 if (cmd_cancel || cmd_failed) {
373                         SCSI_LOG_ERROR_RECOVERY(3,
374                                 shost_printk(KERN_INFO, shost,
375                                             "%s: cmds failed: %d, cancel: %d\n",
376                                             __func__, cmd_failed,
377                                             cmd_cancel));
378                         cmd_cancel = 0;
379                         cmd_failed = 0;
380                         ++devices_failed;
381                 }
382         }
383
384         SCSI_LOG_ERROR_RECOVERY(2, shost_printk(KERN_INFO, shost,
385                                    "Total of %d commands on %d"
386                                    " devices require eh work\n",
387                                    total_failures, devices_failed));
388 }
389 #endif
390
391  /**
392  * scsi_report_lun_change - Set flag on all *other* devices on the same target
393  *                          to indicate that a UNIT ATTENTION is expected.
394  * @sdev:       Device reporting the UNIT ATTENTION
395  */
396 static void scsi_report_lun_change(struct scsi_device *sdev)
397 {
398         sdev->sdev_target->expecting_lun_change = 1;
399 }
400
401 /**
402  * scsi_report_sense - Examine scsi sense information and log messages for
403  *                     certain conditions, also issue uevents for some of them.
404  * @sdev:       Device reporting the sense code
405  * @sshdr:      sshdr to be examined
406  */
407 static void scsi_report_sense(struct scsi_device *sdev,
408                               struct scsi_sense_hdr *sshdr)
409 {
410         enum scsi_device_event evt_type = SDEV_EVT_MAXBITS;     /* i.e. none */
411
412         if (sshdr->sense_key == UNIT_ATTENTION) {
413                 if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) {
414                         evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED;
415                         sdev_printk(KERN_WARNING, sdev,
416                                     "Inquiry data has changed");
417                 } else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) {
418                         evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
419                         scsi_report_lun_change(sdev);
420                         sdev_printk(KERN_WARNING, sdev,
421                                     "Warning! Received an indication that the "
422                                     "LUN assignments on this target have "
423                                     "changed. The Linux SCSI layer does not "
424                                     "automatically remap LUN assignments.\n");
425                 } else if (sshdr->asc == 0x3f)
426                         sdev_printk(KERN_WARNING, sdev,
427                                     "Warning! Received an indication that the "
428                                     "operating parameters on this target have "
429                                     "changed. The Linux SCSI layer does not "
430                                     "automatically adjust these parameters.\n");
431
432                 if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) {
433                         evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED;
434                         sdev_printk(KERN_WARNING, sdev,
435                                     "Warning! Received an indication that the "
436                                     "LUN reached a thin provisioning soft "
437                                     "threshold.\n");
438                 }
439
440                 if (sshdr->asc == 0x29) {
441                         evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED;
442                         sdev_printk(KERN_WARNING, sdev,
443                                     "Power-on or device reset occurred\n");
444                 }
445
446                 if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
447                         evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
448                         sdev_printk(KERN_WARNING, sdev,
449                                     "Mode parameters changed");
450                 } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x06) {
451                         evt_type = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED;
452                         sdev_printk(KERN_WARNING, sdev,
453                                     "Asymmetric access state changed");
454                 } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
455                         evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
456                         sdev_printk(KERN_WARNING, sdev,
457                                     "Capacity data has changed");
458                 } else if (sshdr->asc == 0x2a)
459                         sdev_printk(KERN_WARNING, sdev,
460                                     "Parameters changed");
461         }
462
463         if (evt_type != SDEV_EVT_MAXBITS) {
464                 set_bit(evt_type, sdev->pending_events);
465                 schedule_work(&sdev->event_work);
466         }
467 }
468
469 /**
470  * scsi_check_sense - Examine scsi cmd sense
471  * @scmd:       Cmd to have sense checked.
472  *
473  * Return value:
474  *      SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
475  *
476  * Notes:
477  *      When a deferred error is detected the current command has
478  *      not been executed and needs retrying.
479  */
480 int scsi_check_sense(struct scsi_cmnd *scmd)
481 {
482         struct scsi_device *sdev = scmd->device;
483         struct scsi_sense_hdr sshdr;
484
485         if (! scsi_command_normalize_sense(scmd, &sshdr))
486                 return FAILED;  /* no valid sense data */
487
488         scsi_report_sense(sdev, &sshdr);
489
490         if (scsi_sense_is_deferred(&sshdr))
491                 return NEEDS_RETRY;
492
493         if (sdev->handler && sdev->handler->check_sense) {
494                 int rc;
495
496                 rc = sdev->handler->check_sense(sdev, &sshdr);
497                 if (rc != SCSI_RETURN_NOT_HANDLED)
498                         return rc;
499                 /* handler does not care. Drop down to default handling */
500         }
501
502         if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
503                 /*
504                  * nasty: for mid-layer issued TURs, we need to return the
505                  * actual sense data without any recovery attempt.  For eh
506                  * issued ones, we need to try to recover and interpret
507                  */
508                 return SUCCESS;
509
510         /*
511          * Previous logic looked for FILEMARK, EOM or ILI which are
512          * mainly associated with tapes and returned SUCCESS.
513          */
514         if (sshdr.response_code == 0x70) {
515                 /* fixed format */
516                 if (scmd->sense_buffer[2] & 0xe0)
517                         return SUCCESS;
518         } else {
519                 /*
520                  * descriptor format: look for "stream commands sense data
521                  * descriptor" (see SSC-3). Assume single sense data
522                  * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG.
523                  */
524                 if ((sshdr.additional_length > 3) &&
525                     (scmd->sense_buffer[8] == 0x4) &&
526                     (scmd->sense_buffer[11] & 0xe0))
527                         return SUCCESS;
528         }
529
530         switch (sshdr.sense_key) {
531         case NO_SENSE:
532                 return SUCCESS;
533         case RECOVERED_ERROR:
534                 return /* soft_error */ SUCCESS;
535
536         case ABORTED_COMMAND:
537                 if (sshdr.asc == 0x10) /* DIF */
538                         return SUCCESS;
539
540                 if (sshdr.asc == 0x44 && sdev->sdev_bflags & BLIST_RETRY_ITF)
541                         return ADD_TO_MLQUEUE;
542                 if (sshdr.asc == 0xc1 && sshdr.ascq == 0x01 &&
543                     sdev->sdev_bflags & BLIST_RETRY_ASC_C1)
544                         return ADD_TO_MLQUEUE;
545
546                 return NEEDS_RETRY;
547         case NOT_READY:
548         case UNIT_ATTENTION:
549                 /*
550                  * if we are expecting a cc/ua because of a bus reset that we
551                  * performed, treat this just as a retry.  otherwise this is
552                  * information that we should pass up to the upper-level driver
553                  * so that we can deal with it there.
554                  */
555                 if (scmd->device->expecting_cc_ua) {
556                         /*
557                          * Because some device does not queue unit
558                          * attentions correctly, we carefully check
559                          * additional sense code and qualifier so as
560                          * not to squash media change unit attention.
561                          */
562                         if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) {
563                                 scmd->device->expecting_cc_ua = 0;
564                                 return NEEDS_RETRY;
565                         }
566                 }
567                 /*
568                  * we might also expect a cc/ua if another LUN on the target
569                  * reported a UA with an ASC/ASCQ of 3F 0E -
570                  * REPORTED LUNS DATA HAS CHANGED.
571                  */
572                 if (scmd->device->sdev_target->expecting_lun_change &&
573                     sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
574                         return NEEDS_RETRY;
575                 /*
576                  * if the device is in the process of becoming ready, we
577                  * should retry.
578                  */
579                 if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
580                         return NEEDS_RETRY;
581                 /*
582                  * if the device is not started, we need to wake
583                  * the error handler to start the motor
584                  */
585                 if (scmd->device->allow_restart &&
586                     (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
587                         return FAILED;
588                 /*
589                  * Pass the UA upwards for a determination in the completion
590                  * functions.
591                  */
592                 return SUCCESS;
593
594                 /* these are not supported */
595         case DATA_PROTECT:
596                 if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
597                         /* Thin provisioning hard threshold reached */
598                         set_host_byte(scmd, DID_ALLOC_FAILURE);
599                         return SUCCESS;
600                 }
601                 /* FALLTHROUGH */
602         case COPY_ABORTED:
603         case VOLUME_OVERFLOW:
604         case MISCOMPARE:
605         case BLANK_CHECK:
606                 set_host_byte(scmd, DID_TARGET_FAILURE);
607                 return SUCCESS;
608
609         case MEDIUM_ERROR:
610                 if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
611                     sshdr.asc == 0x13 || /* AMNF DATA FIELD */
612                     sshdr.asc == 0x14) { /* RECORD NOT FOUND */
613                         set_host_byte(scmd, DID_MEDIUM_ERROR);
614                         return SUCCESS;
615                 }
616                 return NEEDS_RETRY;
617
618         case HARDWARE_ERROR:
619                 if (scmd->device->retry_hwerror)
620                         return ADD_TO_MLQUEUE;
621                 else
622                         set_host_byte(scmd, DID_TARGET_FAILURE);
623                 /* FALLTHROUGH */
624
625         case ILLEGAL_REQUEST:
626                 if (sshdr.asc == 0x20 || /* Invalid command operation code */
627                     sshdr.asc == 0x21 || /* Logical block address out of range */
628                     sshdr.asc == 0x22 || /* Invalid function */
629                     sshdr.asc == 0x24 || /* Invalid field in cdb */
630                     sshdr.asc == 0x26 || /* Parameter value invalid */
631                     sshdr.asc == 0x27) { /* Write protected */
632                         set_host_byte(scmd, DID_TARGET_FAILURE);
633                 }
634                 return SUCCESS;
635
636         default:
637                 return SUCCESS;
638         }
639 }
640 EXPORT_SYMBOL_GPL(scsi_check_sense);
641
642 static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
643 {
644         struct scsi_host_template *sht = sdev->host->hostt;
645         struct scsi_device *tmp_sdev;
646
647         if (!sht->track_queue_depth ||
648             sdev->queue_depth >= sdev->max_queue_depth)
649                 return;
650
651         if (time_before(jiffies,
652             sdev->last_queue_ramp_up + sdev->queue_ramp_up_period))
653                 return;
654
655         if (time_before(jiffies,
656             sdev->last_queue_full_time + sdev->queue_ramp_up_period))
657                 return;
658
659         /*
660          * Walk all devices of a target and do
661          * ramp up on them.
662          */
663         shost_for_each_device(tmp_sdev, sdev->host) {
664                 if (tmp_sdev->channel != sdev->channel ||
665                     tmp_sdev->id != sdev->id ||
666                     tmp_sdev->queue_depth == sdev->max_queue_depth)
667                         continue;
668
669                 scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1);
670                 sdev->last_queue_ramp_up = jiffies;
671         }
672 }
673
674 static void scsi_handle_queue_full(struct scsi_device *sdev)
675 {
676         struct scsi_host_template *sht = sdev->host->hostt;
677         struct scsi_device *tmp_sdev;
678
679         if (!sht->track_queue_depth)
680                 return;
681
682         shost_for_each_device(tmp_sdev, sdev->host) {
683                 if (tmp_sdev->channel != sdev->channel ||
684                     tmp_sdev->id != sdev->id)
685                         continue;
686                 /*
687                  * We do not know the number of commands that were at
688                  * the device when we got the queue full so we start
689                  * from the highest possible value and work our way down.
690                  */
691                 scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
692         }
693 }
694
695 /**
696  * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD.
697  * @scmd:       SCSI cmd to examine.
698  *
699  * Notes:
700  *    This is *only* called when we are examining the status of commands
701  *    queued during error recovery.  the main difference here is that we
702  *    don't allow for the possibility of retries here, and we are a lot
703  *    more restrictive about what we consider acceptable.
704  */
705 static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
706 {
707         /*
708          * first check the host byte, to see if there is anything in there
709          * that would indicate what we need to do.
710          */
711         if (host_byte(scmd->result) == DID_RESET) {
712                 /*
713                  * rats.  we are already in the error handler, so we now
714                  * get to try and figure out what to do next.  if the sense
715                  * is valid, we have a pretty good idea of what to do.
716                  * if not, we mark it as FAILED.
717                  */
718                 return scsi_check_sense(scmd);
719         }
720         if (host_byte(scmd->result) != DID_OK)
721                 return FAILED;
722
723         /*
724          * next, check the message byte.
725          */
726         if (msg_byte(scmd->result) != COMMAND_COMPLETE)
727                 return FAILED;
728
729         /*
730          * now, check the status byte to see if this indicates
731          * anything special.
732          */
733         switch (status_byte(scmd->result)) {
734         case GOOD:
735                 scsi_handle_queue_ramp_up(scmd->device);
736                 /* FALLTHROUGH */
737         case COMMAND_TERMINATED:
738                 return SUCCESS;
739         case CHECK_CONDITION:
740                 return scsi_check_sense(scmd);
741         case CONDITION_GOOD:
742         case INTERMEDIATE_GOOD:
743         case INTERMEDIATE_C_GOOD:
744                 /*
745                  * who knows?  FIXME(eric)
746                  */
747                 return SUCCESS;
748         case RESERVATION_CONFLICT:
749                 if (scmd->cmnd[0] == TEST_UNIT_READY)
750                         /* it is a success, we probed the device and
751                          * found it */
752                         return SUCCESS;
753                 /* otherwise, we failed to send the command */
754                 return FAILED;
755         case QUEUE_FULL:
756                 scsi_handle_queue_full(scmd->device);
757                 /* fall through */
758         case BUSY:
759                 return NEEDS_RETRY;
760         default:
761                 return FAILED;
762         }
763         return FAILED;
764 }
765
766 /**
767  * scsi_eh_done - Completion function for error handling.
768  * @scmd:       Cmd that is done.
769  */
770 static void scsi_eh_done(struct scsi_cmnd *scmd)
771 {
772         struct completion *eh_action;
773
774         SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
775                         "%s result: %x\n", __func__, scmd->result));
776
777         eh_action = scmd->device->host->eh_action;
778         if (eh_action)
779                 complete(eh_action);
780 }
781
782 /**
783  * scsi_try_host_reset - ask host adapter to reset itself
784  * @scmd:       SCSI cmd to send host reset.
785  */
786 static int scsi_try_host_reset(struct scsi_cmnd *scmd)
787 {
788         unsigned long flags;
789         int rtn;
790         struct Scsi_Host *host = scmd->device->host;
791         struct scsi_host_template *hostt = host->hostt;
792
793         SCSI_LOG_ERROR_RECOVERY(3,
794                 shost_printk(KERN_INFO, host, "Snd Host RST\n"));
795
796         if (!hostt->eh_host_reset_handler)
797                 return FAILED;
798
799         rtn = hostt->eh_host_reset_handler(scmd);
800
801         if (rtn == SUCCESS) {
802                 if (!hostt->skip_settle_delay)
803                         ssleep(HOST_RESET_SETTLE_TIME);
804                 spin_lock_irqsave(host->host_lock, flags);
805                 scsi_report_bus_reset(host, scmd_channel(scmd));
806                 spin_unlock_irqrestore(host->host_lock, flags);
807         }
808
809         return rtn;
810 }
811
812 /**
813  * scsi_try_bus_reset - ask host to perform a bus reset
814  * @scmd:       SCSI cmd to send bus reset.
815  */
816 static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
817 {
818         unsigned long flags;
819         int rtn;
820         struct Scsi_Host *host = scmd->device->host;
821         struct scsi_host_template *hostt = host->hostt;
822
823         SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
824                 "%s: Snd Bus RST\n", __func__));
825
826         if (!hostt->eh_bus_reset_handler)
827                 return FAILED;
828
829         rtn = hostt->eh_bus_reset_handler(scmd);
830
831         if (rtn == SUCCESS) {
832                 if (!hostt->skip_settle_delay)
833                         ssleep(BUS_RESET_SETTLE_TIME);
834                 spin_lock_irqsave(host->host_lock, flags);
835                 scsi_report_bus_reset(host, scmd_channel(scmd));
836                 spin_unlock_irqrestore(host->host_lock, flags);
837         }
838
839         return rtn;
840 }
841
842 static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
843 {
844         sdev->was_reset = 1;
845         sdev->expecting_cc_ua = 1;
846 }
847
848 /**
849  * scsi_try_target_reset - Ask host to perform a target reset
850  * @scmd:       SCSI cmd used to send a target reset
851  *
852  * Notes:
853  *    There is no timeout for this operation.  if this operation is
854  *    unreliable for a given host, then the host itself needs to put a
855  *    timer on it, and set the host back to a consistent state prior to
856  *    returning.
857  */
858 static int scsi_try_target_reset(struct scsi_cmnd *scmd)
859 {
860         unsigned long flags;
861         int rtn;
862         struct Scsi_Host *host = scmd->device->host;
863         struct scsi_host_template *hostt = host->hostt;
864
865         if (!hostt->eh_target_reset_handler)
866                 return FAILED;
867
868         rtn = hostt->eh_target_reset_handler(scmd);
869         if (rtn == SUCCESS) {
870                 spin_lock_irqsave(host->host_lock, flags);
871                 __starget_for_each_device(scsi_target(scmd->device), NULL,
872                                           __scsi_report_device_reset);
873                 spin_unlock_irqrestore(host->host_lock, flags);
874         }
875
876         return rtn;
877 }
878
879 /**
880  * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev
881  * @scmd:       SCSI cmd used to send BDR
882  *
883  * Notes:
884  *    There is no timeout for this operation.  if this operation is
885  *    unreliable for a given host, then the host itself needs to put a
886  *    timer on it, and set the host back to a consistent state prior to
887  *    returning.
888  */
889 static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
890 {
891         int rtn;
892         struct scsi_host_template *hostt = scmd->device->host->hostt;
893
894         if (!hostt->eh_device_reset_handler)
895                 return FAILED;
896
897         rtn = hostt->eh_device_reset_handler(scmd);
898         if (rtn == SUCCESS)
899                 __scsi_report_device_reset(scmd->device, NULL);
900         return rtn;
901 }
902
903 /**
904  * scsi_try_to_abort_cmd - Ask host to abort a SCSI command
905  * @hostt:      SCSI driver host template
906  * @scmd:       SCSI cmd used to send a target reset
907  *
908  * Return value:
909  *      SUCCESS, FAILED, or FAST_IO_FAIL
910  *
911  * Notes:
912  *    SUCCESS does not necessarily indicate that the command
913  *    has been aborted; it only indicates that the LLDDs
914  *    has cleared all references to that command.
915  *    LLDDs should return FAILED only if an abort was required
916  *    but could not be executed. LLDDs should return FAST_IO_FAIL
917  *    if the device is temporarily unavailable (eg due to a
918  *    link down on FibreChannel)
919  */
920 static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt,
921                                  struct scsi_cmnd *scmd)
922 {
923         if (!hostt->eh_abort_handler)
924                 return FAILED;
925
926         return hostt->eh_abort_handler(scmd);
927 }
928
929 static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
930 {
931         if (scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd) != SUCCESS)
932                 if (scsi_try_bus_device_reset(scmd) != SUCCESS)
933                         if (scsi_try_target_reset(scmd) != SUCCESS)
934                                 if (scsi_try_bus_reset(scmd) != SUCCESS)
935                                         scsi_try_host_reset(scmd);
936 }
937
938 /**
939  * scsi_eh_prep_cmnd  - Save a scsi command info as part of error recovery
940  * @scmd:       SCSI command structure to hijack
941  * @ses:        structure to save restore information
942  * @cmnd:       CDB to send. Can be NULL if no new cmnd is needed
943  * @cmnd_size:  size in bytes of @cmnd (must be <= BLK_MAX_CDB)
944  * @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored)
945  *
946  * This function is used to save a scsi command information before re-execution
947  * as part of the error recovery process.  If @sense_bytes is 0 the command
948  * sent must be one that does not transfer any data.  If @sense_bytes != 0
949  * @cmnd is ignored and this functions sets up a REQUEST_SENSE command
950  * and cmnd buffers to read @sense_bytes into @scmd->sense_buffer.
951  */
952 void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
953                         unsigned char *cmnd, int cmnd_size, unsigned sense_bytes)
954 {
955         struct scsi_device *sdev = scmd->device;
956
957         /*
958          * We need saved copies of a number of fields - this is because
959          * error handling may need to overwrite these with different values
960          * to run different commands, and once error handling is complete,
961          * we will need to restore these values prior to running the actual
962          * command.
963          */
964         ses->cmd_len = scmd->cmd_len;
965         ses->cmnd = scmd->cmnd;
966         ses->data_direction = scmd->sc_data_direction;
967         ses->sdb = scmd->sdb;
968         ses->next_rq = scmd->request->next_rq;
969         ses->result = scmd->result;
970         ses->underflow = scmd->underflow;
971         ses->prot_op = scmd->prot_op;
972         ses->eh_eflags = scmd->eh_eflags;
973
974         scmd->prot_op = SCSI_PROT_NORMAL;
975         scmd->eh_eflags = 0;
976         scmd->cmnd = ses->eh_cmnd;
977         memset(scmd->cmnd, 0, BLK_MAX_CDB);
978         memset(&scmd->sdb, 0, sizeof(scmd->sdb));
979         scmd->request->next_rq = NULL;
980         scmd->result = 0;
981
982         if (sense_bytes) {
983                 scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
984                                          sense_bytes);
985                 sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
986                             scmd->sdb.length);
987                 scmd->sdb.table.sgl = &ses->sense_sgl;
988                 scmd->sc_data_direction = DMA_FROM_DEVICE;
989                 scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
990                 scmd->cmnd[0] = REQUEST_SENSE;
991                 scmd->cmnd[4] = scmd->sdb.length;
992                 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
993         } else {
994                 scmd->sc_data_direction = DMA_NONE;
995                 if (cmnd) {
996                         BUG_ON(cmnd_size > BLK_MAX_CDB);
997                         memcpy(scmd->cmnd, cmnd, cmnd_size);
998                         scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
999                 }
1000         }
1001
1002         scmd->underflow = 0;
1003
1004         if (sdev->scsi_level <= SCSI_2 && sdev->scsi_level != SCSI_UNKNOWN)
1005                 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
1006                         (sdev->lun << 5 & 0xe0);
1007
1008         /*
1009          * Zero the sense buffer.  The scsi spec mandates that any
1010          * untransferred sense data should be interpreted as being zero.
1011          */
1012         memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1013 }
1014 EXPORT_SYMBOL(scsi_eh_prep_cmnd);
1015
1016 /**
1017  * scsi_eh_restore_cmnd  - Restore a scsi command info as part of error recovery
1018  * @scmd:       SCSI command structure to restore
1019  * @ses:        saved information from a coresponding call to scsi_eh_prep_cmnd
1020  *
1021  * Undo any damage done by above scsi_eh_prep_cmnd().
1022  */
1023 void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
1024 {
1025         /*
1026          * Restore original data
1027          */
1028         scmd->cmd_len = ses->cmd_len;
1029         scmd->cmnd = ses->cmnd;
1030         scmd->sc_data_direction = ses->data_direction;
1031         scmd->sdb = ses->sdb;
1032         scmd->request->next_rq = ses->next_rq;
1033         scmd->result = ses->result;
1034         scmd->underflow = ses->underflow;
1035         scmd->prot_op = ses->prot_op;
1036         scmd->eh_eflags = ses->eh_eflags;
1037 }
1038 EXPORT_SYMBOL(scsi_eh_restore_cmnd);
1039
1040 /**
1041  * scsi_send_eh_cmnd  - submit a scsi command as part of error recovery
1042  * @scmd:       SCSI command structure to hijack
1043  * @cmnd:       CDB to send
1044  * @cmnd_size:  size in bytes of @cmnd
1045  * @timeout:    timeout for this request
1046  * @sense_bytes: size of sense data to copy or 0
1047  *
1048  * This function is used to send a scsi command down to a target device
1049  * as part of the error recovery process. See also scsi_eh_prep_cmnd() above.
1050  *
1051  * Return value:
1052  *    SUCCESS or FAILED or NEEDS_RETRY
1053  */
1054 static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
1055                              int cmnd_size, int timeout, unsigned sense_bytes)
1056 {
1057         struct scsi_device *sdev = scmd->device;
1058         struct Scsi_Host *shost = sdev->host;
1059         DECLARE_COMPLETION_ONSTACK(done);
1060         unsigned long timeleft = timeout;
1061         struct scsi_eh_save ses;
1062         const unsigned long stall_for = msecs_to_jiffies(100);
1063         int rtn;
1064
1065 retry:
1066         scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
1067         shost->eh_action = &done;
1068
1069         scsi_log_send(scmd);
1070         scmd->scsi_done = scsi_eh_done;
1071         rtn = shost->hostt->queuecommand(shost, scmd);
1072         if (rtn) {
1073                 if (timeleft > stall_for) {
1074                         scsi_eh_restore_cmnd(scmd, &ses);
1075                         timeleft -= stall_for;
1076                         msleep(jiffies_to_msecs(stall_for));
1077                         goto retry;
1078                 }
1079                 /* signal not to enter either branch of the if () below */
1080                 timeleft = 0;
1081                 rtn = FAILED;
1082         } else {
1083                 timeleft = wait_for_completion_timeout(&done, timeout);
1084                 rtn = SUCCESS;
1085         }
1086
1087         shost->eh_action = NULL;
1088
1089         scsi_log_completion(scmd, rtn);
1090
1091         SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1092                         "%s timeleft: %ld\n",
1093                         __func__, timeleft));
1094
1095         /*
1096          * If there is time left scsi_eh_done got called, and we will examine
1097          * the actual status codes to see whether the command actually did
1098          * complete normally, else if we have a zero return and no time left,
1099          * the command must still be pending, so abort it and return FAILED.
1100          * If we never actually managed to issue the command, because
1101          * ->queuecommand() kept returning non zero, use the rtn = FAILED
1102          * value above (so don't execute either branch of the if)
1103          */
1104         if (timeleft) {
1105                 rtn = scsi_eh_completed_normally(scmd);
1106                 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1107                         "%s: scsi_eh_completed_normally %x\n", __func__, rtn));
1108
1109                 switch (rtn) {
1110                 case SUCCESS:
1111                 case NEEDS_RETRY:
1112                 case FAILED:
1113                         break;
1114                 case ADD_TO_MLQUEUE:
1115                         rtn = NEEDS_RETRY;
1116                         break;
1117                 default:
1118                         rtn = FAILED;
1119                         break;
1120                 }
1121         } else if (rtn != FAILED) {
1122                 scsi_abort_eh_cmnd(scmd);
1123                 rtn = FAILED;
1124         }
1125
1126         scsi_eh_restore_cmnd(scmd, &ses);
1127
1128         return rtn;
1129 }
1130
1131 /**
1132  * scsi_request_sense - Request sense data from a particular target.
1133  * @scmd:       SCSI cmd for request sense.
1134  *
1135  * Notes:
1136  *    Some hosts automatically obtain this information, others require
1137  *    that we obtain it on our own. This function will *not* return until
1138  *    the command either times out, or it completes.
1139  */
1140 static int scsi_request_sense(struct scsi_cmnd *scmd)
1141 {
1142         return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0);
1143 }
1144
1145 static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
1146 {
1147         if (!blk_rq_is_passthrough(scmd->request)) {
1148                 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
1149                 if (sdrv->eh_action)
1150                         rtn = sdrv->eh_action(scmd, rtn);
1151         }
1152         return rtn;
1153 }
1154
1155 /**
1156  * scsi_eh_finish_cmd - Handle a cmd that eh is finished with.
1157  * @scmd:       Original SCSI cmd that eh has finished.
1158  * @done_q:     Queue for processed commands.
1159  *
1160  * Notes:
1161  *    We don't want to use the normal command completion while we are are
1162  *    still handling errors - it may cause other commands to be queued,
1163  *    and that would disturb what we are doing.  Thus we really want to
1164  *    keep a list of pending commands for final completion, and once we
1165  *    are ready to leave error handling we handle completion for real.
1166  */
1167 void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
1168 {
1169         list_move_tail(&scmd->eh_entry, done_q);
1170 }
1171 EXPORT_SYMBOL(scsi_eh_finish_cmd);
1172
1173 /**
1174  * scsi_eh_get_sense - Get device sense data.
1175  * @work_q:     Queue of commands to process.
1176  * @done_q:     Queue of processed commands.
1177  *
1178  * Description:
1179  *    See if we need to request sense information.  if so, then get it
1180  *    now, so we have a better idea of what to do.
1181  *
1182  * Notes:
1183  *    This has the unfortunate side effect that if a shost adapter does
1184  *    not automatically request sense information, we end up shutting
1185  *    it down before we request it.
1186  *
1187  *    All drivers should request sense information internally these days,
1188  *    so for now all I have to say is tough noogies if you end up in here.
1189  *
1190  *    XXX: Long term this code should go away, but that needs an audit of
1191  *         all LLDDs first.
1192  */
1193 int scsi_eh_get_sense(struct list_head *work_q,
1194                       struct list_head *done_q)
1195 {
1196         struct scsi_cmnd *scmd, *next;
1197         struct Scsi_Host *shost;
1198         int rtn;
1199
1200         /*
1201          * If SCSI_EH_ABORT_SCHEDULED has been set, it is timeout IO,
1202          * should not get sense.
1203          */
1204         list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1205                 if ((scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) ||
1206                     SCSI_SENSE_VALID(scmd))
1207                         continue;
1208
1209                 shost = scmd->device->host;
1210                 if (scsi_host_eh_past_deadline(shost)) {
1211                         SCSI_LOG_ERROR_RECOVERY(3,
1212                                 scmd_printk(KERN_INFO, scmd,
1213                                             "%s: skip request sense, past eh deadline\n",
1214                                              current->comm));
1215                         break;
1216                 }
1217                 if (status_byte(scmd->result) != CHECK_CONDITION)
1218                         /*
1219                          * don't request sense if there's no check condition
1220                          * status because the error we're processing isn't one
1221                          * that has a sense code (and some devices get
1222                          * confused by sense requests out of the blue)
1223                          */
1224                         continue;
1225
1226                 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
1227                                                   "%s: requesting sense\n",
1228                                                   current->comm));
1229                 rtn = scsi_request_sense(scmd);
1230                 if (rtn != SUCCESS)
1231                         continue;
1232
1233                 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1234                         "sense requested, result %x\n", scmd->result));
1235                 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd));
1236
1237                 rtn = scsi_decide_disposition(scmd);
1238
1239                 /*
1240                  * if the result was normal, then just pass it along to the
1241                  * upper level.
1242                  */
1243                 if (rtn == SUCCESS)
1244                         /* we don't want this command reissued, just
1245                          * finished with the sense data, so set
1246                          * retries to the max allowed to ensure it
1247                          * won't get reissued */
1248                         scmd->retries = scmd->allowed;
1249                 else if (rtn != NEEDS_RETRY)
1250                         continue;
1251
1252                 scsi_eh_finish_cmd(scmd, done_q);
1253         }
1254
1255         return list_empty(work_q);
1256 }
1257 EXPORT_SYMBOL_GPL(scsi_eh_get_sense);
1258
1259 /**
1260  * scsi_eh_tur - Send TUR to device.
1261  * @scmd:       &scsi_cmnd to send TUR
1262  *
1263  * Return value:
1264  *    0 - Device is ready. 1 - Device NOT ready.
1265  */
1266 static int scsi_eh_tur(struct scsi_cmnd *scmd)
1267 {
1268         static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
1269         int retry_cnt = 1, rtn;
1270
1271 retry_tur:
1272         rtn = scsi_send_eh_cmnd(scmd, tur_command, 6,
1273                                 scmd->device->eh_timeout, 0);
1274
1275         SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1276                 "%s return: %x\n", __func__, rtn));
1277
1278         switch (rtn) {
1279         case NEEDS_RETRY:
1280                 if (retry_cnt--)
1281                         goto retry_tur;
1282                 /*FALLTHRU*/
1283         case SUCCESS:
1284                 return 0;
1285         default:
1286                 return 1;
1287         }
1288 }
1289
1290 /**
1291  * scsi_eh_test_devices - check if devices are responding from error recovery.
1292  * @cmd_list:   scsi commands in error recovery.
1293  * @work_q:     queue for commands which still need more error recovery
1294  * @done_q:     queue for commands which are finished
1295  * @try_stu:    boolean on if a STU command should be tried in addition to TUR.
1296  *
1297  * Decription:
1298  *    Tests if devices are in a working state.  Commands to devices now in
1299  *    a working state are sent to the done_q while commands to devices which
1300  *    are still failing to respond are returned to the work_q for more
1301  *    processing.
1302  **/
1303 static int scsi_eh_test_devices(struct list_head *cmd_list,
1304                                 struct list_head *work_q,
1305                                 struct list_head *done_q, int try_stu)
1306 {
1307         struct scsi_cmnd *scmd, *next;
1308         struct scsi_device *sdev;
1309         int finish_cmds;
1310
1311         while (!list_empty(cmd_list)) {
1312                 scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
1313                 sdev = scmd->device;
1314
1315                 if (!try_stu) {
1316                         if (scsi_host_eh_past_deadline(sdev->host)) {
1317                                 /* Push items back onto work_q */
1318                                 list_splice_init(cmd_list, work_q);
1319                                 SCSI_LOG_ERROR_RECOVERY(3,
1320                                         sdev_printk(KERN_INFO, sdev,
1321                                                     "%s: skip test device, past eh deadline",
1322                                                     current->comm));
1323                                 break;
1324                         }
1325                 }
1326
1327                 finish_cmds = !scsi_device_online(scmd->device) ||
1328                         (try_stu && !scsi_eh_try_stu(scmd) &&
1329                          !scsi_eh_tur(scmd)) ||
1330                         !scsi_eh_tur(scmd);
1331
1332                 list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
1333                         if (scmd->device == sdev) {
1334                                 if (finish_cmds &&
1335                                     (try_stu ||
1336                                      scsi_eh_action(scmd, SUCCESS) == SUCCESS))
1337                                         scsi_eh_finish_cmd(scmd, done_q);
1338                                 else
1339                                         list_move_tail(&scmd->eh_entry, work_q);
1340                         }
1341         }
1342         return list_empty(work_q);
1343 }
1344
1345 /**
1346  * scsi_eh_try_stu - Send START_UNIT to device.
1347  * @scmd:       &scsi_cmnd to send START_UNIT
1348  *
1349  * Return value:
1350  *    0 - Device is ready. 1 - Device NOT ready.
1351  */
1352 static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
1353 {
1354         static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
1355
1356         if (scmd->device->allow_restart) {
1357                 int i, rtn = NEEDS_RETRY;
1358
1359                 for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
1360                         rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
1361
1362                 if (rtn == SUCCESS)
1363                         return 0;
1364         }
1365
1366         return 1;
1367 }
1368
1369  /**
1370  * scsi_eh_stu - send START_UNIT if needed
1371  * @shost:      &scsi host being recovered.
1372  * @work_q:     &list_head for pending commands.
1373  * @done_q:     &list_head for processed commands.
1374  *
1375  * Notes:
1376  *    If commands are failing due to not ready, initializing command required,
1377  *      try revalidating the device, which will end up sending a start unit.
1378  */
1379 static int scsi_eh_stu(struct Scsi_Host *shost,
1380                               struct list_head *work_q,
1381                               struct list_head *done_q)
1382 {
1383         struct scsi_cmnd *scmd, *stu_scmd, *next;
1384         struct scsi_device *sdev;
1385
1386         shost_for_each_device(sdev, shost) {
1387                 if (scsi_host_eh_past_deadline(shost)) {
1388                         SCSI_LOG_ERROR_RECOVERY(3,
1389                                 sdev_printk(KERN_INFO, sdev,
1390                                             "%s: skip START_UNIT, past eh deadline\n",
1391                                             current->comm));
1392                         break;
1393                 }
1394                 stu_scmd = NULL;
1395                 list_for_each_entry(scmd, work_q, eh_entry)
1396                         if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
1397                             scsi_check_sense(scmd) == FAILED ) {
1398                                 stu_scmd = scmd;
1399                                 break;
1400                         }
1401
1402                 if (!stu_scmd)
1403                         continue;
1404
1405                 SCSI_LOG_ERROR_RECOVERY(3,
1406                         sdev_printk(KERN_INFO, sdev,
1407                                      "%s: Sending START_UNIT\n",
1408                                     current->comm));
1409
1410                 if (!scsi_eh_try_stu(stu_scmd)) {
1411                         if (!scsi_device_online(sdev) ||
1412                             !scsi_eh_tur(stu_scmd)) {
1413                                 list_for_each_entry_safe(scmd, next,
1414                                                           work_q, eh_entry) {
1415                                         if (scmd->device == sdev &&
1416                                             scsi_eh_action(scmd, SUCCESS) == SUCCESS)
1417                                                 scsi_eh_finish_cmd(scmd, done_q);
1418                                 }
1419                         }
1420                 } else {
1421                         SCSI_LOG_ERROR_RECOVERY(3,
1422                                 sdev_printk(KERN_INFO, sdev,
1423                                             "%s: START_UNIT failed\n",
1424                                             current->comm));
1425                 }
1426         }
1427
1428         return list_empty(work_q);
1429 }
1430
1431
1432 /**
1433  * scsi_eh_bus_device_reset - send bdr if needed
1434  * @shost:      scsi host being recovered.
1435  * @work_q:     &list_head for pending commands.
1436  * @done_q:     &list_head for processed commands.
1437  *
1438  * Notes:
1439  *    Try a bus device reset.  Still, look to see whether we have multiple
1440  *    devices that are jammed or not - if we have multiple devices, it
1441  *    makes no sense to try bus_device_reset - we really would need to try
1442  *    a bus_reset instead.
1443  */
1444 static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1445                                     struct list_head *work_q,
1446                                     struct list_head *done_q)
1447 {
1448         struct scsi_cmnd *scmd, *bdr_scmd, *next;
1449         struct scsi_device *sdev;
1450         int rtn;
1451
1452         shost_for_each_device(sdev, shost) {
1453                 if (scsi_host_eh_past_deadline(shost)) {
1454                         SCSI_LOG_ERROR_RECOVERY(3,
1455                                 sdev_printk(KERN_INFO, sdev,
1456                                             "%s: skip BDR, past eh deadline\n",
1457                                              current->comm));
1458                         break;
1459                 }
1460                 bdr_scmd = NULL;
1461                 list_for_each_entry(scmd, work_q, eh_entry)
1462                         if (scmd->device == sdev) {
1463                                 bdr_scmd = scmd;
1464                                 break;
1465                         }
1466
1467                 if (!bdr_scmd)
1468                         continue;
1469
1470                 SCSI_LOG_ERROR_RECOVERY(3,
1471                         sdev_printk(KERN_INFO, sdev,
1472                                      "%s: Sending BDR\n", current->comm));
1473                 rtn = scsi_try_bus_device_reset(bdr_scmd);
1474                 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1475                         if (!scsi_device_online(sdev) ||
1476                             rtn == FAST_IO_FAIL ||
1477                             !scsi_eh_tur(bdr_scmd)) {
1478                                 list_for_each_entry_safe(scmd, next,
1479                                                          work_q, eh_entry) {
1480                                         if (scmd->device == sdev &&
1481                                             scsi_eh_action(scmd, rtn) != FAILED)
1482                                                 scsi_eh_finish_cmd(scmd,
1483                                                                    done_q);
1484                                 }
1485                         }
1486                 } else {
1487                         SCSI_LOG_ERROR_RECOVERY(3,
1488                                 sdev_printk(KERN_INFO, sdev,
1489                                             "%s: BDR failed\n", current->comm));
1490                 }
1491         }
1492
1493         return list_empty(work_q);
1494 }
1495
1496 /**
1497  * scsi_eh_target_reset - send target reset if needed
1498  * @shost:      scsi host being recovered.
1499  * @work_q:     &list_head for pending commands.
1500  * @done_q:     &list_head for processed commands.
1501  *
1502  * Notes:
1503  *    Try a target reset.
1504  */
1505 static int scsi_eh_target_reset(struct Scsi_Host *shost,
1506                                 struct list_head *work_q,
1507                                 struct list_head *done_q)
1508 {
1509         LIST_HEAD(tmp_list);
1510         LIST_HEAD(check_list);
1511
1512         list_splice_init(work_q, &tmp_list);
1513
1514         while (!list_empty(&tmp_list)) {
1515                 struct scsi_cmnd *next, *scmd;
1516                 int rtn;
1517                 unsigned int id;
1518
1519                 if (scsi_host_eh_past_deadline(shost)) {
1520                         /* push back on work queue for further processing */
1521                         list_splice_init(&check_list, work_q);
1522                         list_splice_init(&tmp_list, work_q);
1523                         SCSI_LOG_ERROR_RECOVERY(3,
1524                                 shost_printk(KERN_INFO, shost,
1525                                             "%s: Skip target reset, past eh deadline\n",
1526                                              current->comm));
1527                         return list_empty(work_q);
1528                 }
1529
1530                 scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
1531                 id = scmd_id(scmd);
1532
1533                 SCSI_LOG_ERROR_RECOVERY(3,
1534                         shost_printk(KERN_INFO, shost,
1535                                      "%s: Sending target reset to target %d\n",
1536                                      current->comm, id));
1537                 rtn = scsi_try_target_reset(scmd);
1538                 if (rtn != SUCCESS && rtn != FAST_IO_FAIL)
1539                         SCSI_LOG_ERROR_RECOVERY(3,
1540                                 shost_printk(KERN_INFO, shost,
1541                                              "%s: Target reset failed"
1542                                              " target: %d\n",
1543                                              current->comm, id));
1544                 list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) {
1545                         if (scmd_id(scmd) != id)
1546                                 continue;
1547
1548                         if (rtn == SUCCESS)
1549                                 list_move_tail(&scmd->eh_entry, &check_list);
1550                         else if (rtn == FAST_IO_FAIL)
1551                                 scsi_eh_finish_cmd(scmd, done_q);
1552                         else
1553                                 /* push back on work queue for further processing */
1554                                 list_move(&scmd->eh_entry, work_q);
1555                 }
1556         }
1557
1558         return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1559 }
1560
1561 /**
1562  * scsi_eh_bus_reset - send a bus reset
1563  * @shost:      &scsi host being recovered.
1564  * @work_q:     &list_head for pending commands.
1565  * @done_q:     &list_head for processed commands.
1566  */
1567 static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1568                              struct list_head *work_q,
1569                              struct list_head *done_q)
1570 {
1571         struct scsi_cmnd *scmd, *chan_scmd, *next;
1572         LIST_HEAD(check_list);
1573         unsigned int channel;
1574         int rtn;
1575
1576         /*
1577          * we really want to loop over the various channels, and do this on
1578          * a channel by channel basis.  we should also check to see if any
1579          * of the failed commands are on soft_reset devices, and if so, skip
1580          * the reset.
1581          */
1582
1583         for (channel = 0; channel <= shost->max_channel; channel++) {
1584                 if (scsi_host_eh_past_deadline(shost)) {
1585                         list_splice_init(&check_list, work_q);
1586                         SCSI_LOG_ERROR_RECOVERY(3,
1587                                 shost_printk(KERN_INFO, shost,
1588                                             "%s: skip BRST, past eh deadline\n",
1589                                              current->comm));
1590                         return list_empty(work_q);
1591                 }
1592
1593                 chan_scmd = NULL;
1594                 list_for_each_entry(scmd, work_q, eh_entry) {
1595                         if (channel == scmd_channel(scmd)) {
1596                                 chan_scmd = scmd;
1597                                 break;
1598                                 /*
1599                                  * FIXME add back in some support for
1600                                  * soft_reset devices.
1601                                  */
1602                         }
1603                 }
1604
1605                 if (!chan_scmd)
1606                         continue;
1607                 SCSI_LOG_ERROR_RECOVERY(3,
1608                         shost_printk(KERN_INFO, shost,
1609                                      "%s: Sending BRST chan: %d\n",
1610                                      current->comm, channel));
1611                 rtn = scsi_try_bus_reset(chan_scmd);
1612                 if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
1613                         list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1614                                 if (channel == scmd_channel(scmd)) {
1615                                         if (rtn == FAST_IO_FAIL)
1616                                                 scsi_eh_finish_cmd(scmd,
1617                                                                    done_q);
1618                                         else
1619                                                 list_move_tail(&scmd->eh_entry,
1620                                                                &check_list);
1621                                 }
1622                         }
1623                 } else {
1624                         SCSI_LOG_ERROR_RECOVERY(3,
1625                                 shost_printk(KERN_INFO, shost,
1626                                              "%s: BRST failed chan: %d\n",
1627                                              current->comm, channel));
1628                 }
1629         }
1630         return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
1631 }
1632
1633 /**
1634  * scsi_eh_host_reset - send a host reset
1635  * @shost:      host to be reset.
1636  * @work_q:     &list_head for pending commands.
1637  * @done_q:     &list_head for processed commands.
1638  */
1639 static int scsi_eh_host_reset(struct Scsi_Host *shost,
1640                               struct list_head *work_q,
1641                               struct list_head *done_q)
1642 {
1643         struct scsi_cmnd *scmd, *next;
1644         LIST_HEAD(check_list);
1645         int rtn;
1646
1647         if (!list_empty(work_q)) {
1648                 scmd = list_entry(work_q->next,
1649                                   struct scsi_cmnd, eh_entry);
1650
1651                 SCSI_LOG_ERROR_RECOVERY(3,
1652                         shost_printk(KERN_INFO, shost,
1653                                      "%s: Sending HRST\n",
1654                                      current->comm));
1655
1656                 rtn = scsi_try_host_reset(scmd);
1657                 if (rtn == SUCCESS) {
1658                         list_splice_init(work_q, &check_list);
1659                 } else if (rtn == FAST_IO_FAIL) {
1660                         list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1661                                         scsi_eh_finish_cmd(scmd, done_q);
1662                         }
1663                 } else {
1664                         SCSI_LOG_ERROR_RECOVERY(3,
1665                                 shost_printk(KERN_INFO, shost,
1666                                              "%s: HRST failed\n",
1667                                              current->comm));
1668                 }
1669         }
1670         return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
1671 }
1672
1673 /**
1674  * scsi_eh_offline_sdevs - offline scsi devices that fail to recover
1675  * @work_q:     &list_head for pending commands.
1676  * @done_q:     &list_head for processed commands.
1677  */
1678 static void scsi_eh_offline_sdevs(struct list_head *work_q,
1679                                   struct list_head *done_q)
1680 {
1681         struct scsi_cmnd *scmd, *next;
1682         struct scsi_device *sdev;
1683
1684         list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1685                 sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
1686                             "not ready after error recovery\n");
1687                 sdev = scmd->device;
1688
1689                 mutex_lock(&sdev->state_mutex);
1690                 scsi_device_set_state(sdev, SDEV_OFFLINE);
1691                 mutex_unlock(&sdev->state_mutex);
1692
1693                 scsi_eh_finish_cmd(scmd, done_q);
1694         }
1695         return;
1696 }
1697
1698 /**
1699  * scsi_noretry_cmd - determine if command should be failed fast
1700  * @scmd:       SCSI cmd to examine.
1701  */
1702 int scsi_noretry_cmd(struct scsi_cmnd *scmd)
1703 {
1704         switch (host_byte(scmd->result)) {
1705         case DID_OK:
1706                 break;
1707         case DID_TIME_OUT:
1708                 goto check_type;
1709         case DID_BUS_BUSY:
1710                 return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
1711         case DID_PARITY:
1712                 return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
1713         case DID_ERROR:
1714                 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1715                     status_byte(scmd->result) == RESERVATION_CONFLICT)
1716                         return 0;
1717                 /* fall through */
1718         case DID_SOFT_ERROR:
1719                 return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
1720         }
1721
1722         if (status_byte(scmd->result) != CHECK_CONDITION)
1723                 return 0;
1724
1725 check_type:
1726         /*
1727          * assume caller has checked sense and determined
1728          * the check condition was retryable.
1729          */
1730         if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
1731             blk_rq_is_passthrough(scmd->request))
1732                 return 1;
1733         else
1734                 return 0;
1735 }
1736
1737 /**
1738  * scsi_decide_disposition - Disposition a cmd on return from LLD.
1739  * @scmd:       SCSI cmd to examine.
1740  *
1741  * Notes:
1742  *    This is *only* called when we are examining the status after sending
1743  *    out the actual data command.  any commands that are queued for error
1744  *    recovery (e.g. test_unit_ready) do *not* come through here.
1745  *
1746  *    When this routine returns failed, it means the error handler thread
1747  *    is woken.  In cases where the error code indicates an error that
1748  *    doesn't require the error handler read (i.e. we don't need to
1749  *    abort/reset), this function should return SUCCESS.
1750  */
1751 int scsi_decide_disposition(struct scsi_cmnd *scmd)
1752 {
1753         int rtn;
1754
1755         /*
1756          * if the device is offline, then we clearly just pass the result back
1757          * up to the top level.
1758          */
1759         if (!scsi_device_online(scmd->device)) {
1760                 SCSI_LOG_ERROR_RECOVERY(5, scmd_printk(KERN_INFO, scmd,
1761                         "%s: device offline - report as SUCCESS\n", __func__));
1762                 return SUCCESS;
1763         }
1764
1765         /*
1766          * first check the host byte, to see if there is anything in there
1767          * that would indicate what we need to do.
1768          */
1769         switch (host_byte(scmd->result)) {
1770         case DID_PASSTHROUGH:
1771                 /*
1772                  * no matter what, pass this through to the upper layer.
1773                  * nuke this special code so that it looks like we are saying
1774                  * did_ok.
1775                  */
1776                 scmd->result &= 0xff00ffff;
1777                 return SUCCESS;
1778         case DID_OK:
1779                 /*
1780                  * looks good.  drop through, and check the next byte.
1781                  */
1782                 break;
1783         case DID_ABORT:
1784                 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
1785                         set_host_byte(scmd, DID_TIME_OUT);
1786                         return SUCCESS;
1787                 }
1788                 /* FALLTHROUGH */
1789         case DID_NO_CONNECT:
1790         case DID_BAD_TARGET:
1791                 /*
1792                  * note - this means that we just report the status back
1793                  * to the top level driver, not that we actually think
1794                  * that it indicates SUCCESS.
1795                  */
1796                 return SUCCESS;
1797         case DID_SOFT_ERROR:
1798                 /*
1799                  * when the low level driver returns did_soft_error,
1800                  * it is responsible for keeping an internal retry counter
1801                  * in order to avoid endless loops (db)
1802                  */
1803                 goto maybe_retry;
1804         case DID_IMM_RETRY:
1805                 return NEEDS_RETRY;
1806
1807         case DID_REQUEUE:
1808                 return ADD_TO_MLQUEUE;
1809         case DID_TRANSPORT_DISRUPTED:
1810                 /*
1811                  * LLD/transport was disrupted during processing of the IO.
1812                  * The transport class is now blocked/blocking,
1813                  * and the transport will decide what to do with the IO
1814                  * based on its timers and recovery capablilities if
1815                  * there are enough retries.
1816                  */
1817                 goto maybe_retry;
1818         case DID_TRANSPORT_FAILFAST:
1819                 /*
1820                  * The transport decided to failfast the IO (most likely
1821                  * the fast io fail tmo fired), so send IO directly upwards.
1822                  */
1823                 return SUCCESS;
1824         case DID_ERROR:
1825                 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1826                     status_byte(scmd->result) == RESERVATION_CONFLICT)
1827                         /*
1828                          * execute reservation conflict processing code
1829                          * lower down
1830                          */
1831                         break;
1832                 /* fallthrough */
1833         case DID_BUS_BUSY:
1834         case DID_PARITY:
1835                 goto maybe_retry;
1836         case DID_TIME_OUT:
1837                 /*
1838                  * when we scan the bus, we get timeout messages for
1839                  * these commands if there is no device available.
1840                  * other hosts report did_no_connect for the same thing.
1841                  */
1842                 if ((scmd->cmnd[0] == TEST_UNIT_READY ||
1843                      scmd->cmnd[0] == INQUIRY)) {
1844                         return SUCCESS;
1845                 } else {
1846                         return FAILED;
1847                 }
1848         case DID_RESET:
1849                 return SUCCESS;
1850         default:
1851                 return FAILED;
1852         }
1853
1854         /*
1855          * next, check the message byte.
1856          */
1857         if (msg_byte(scmd->result) != COMMAND_COMPLETE)
1858                 return FAILED;
1859
1860         /*
1861          * check the status byte to see if this indicates anything special.
1862          */
1863         switch (status_byte(scmd->result)) {
1864         case QUEUE_FULL:
1865                 scsi_handle_queue_full(scmd->device);
1866                 /*
1867                  * the case of trying to send too many commands to a
1868                  * tagged queueing device.
1869                  */
1870                 /* FALLTHROUGH */
1871         case BUSY:
1872                 /*
1873                  * device can't talk to us at the moment.  Should only
1874                  * occur (SAM-3) when the task queue is empty, so will cause
1875                  * the empty queue handling to trigger a stall in the
1876                  * device.
1877                  */
1878                 return ADD_TO_MLQUEUE;
1879         case GOOD:
1880                 if (scmd->cmnd[0] == REPORT_LUNS)
1881                         scmd->device->sdev_target->expecting_lun_change = 0;
1882                 scsi_handle_queue_ramp_up(scmd->device);
1883                 /* FALLTHROUGH */
1884         case COMMAND_TERMINATED:
1885                 return SUCCESS;
1886         case TASK_ABORTED:
1887                 goto maybe_retry;
1888         case CHECK_CONDITION:
1889                 rtn = scsi_check_sense(scmd);
1890                 if (rtn == NEEDS_RETRY)
1891                         goto maybe_retry;
1892                 /* if rtn == FAILED, we have no sense information;
1893                  * returning FAILED will wake the error handler thread
1894                  * to collect the sense and redo the decide
1895                  * disposition */
1896                 return rtn;
1897         case CONDITION_GOOD:
1898         case INTERMEDIATE_GOOD:
1899         case INTERMEDIATE_C_GOOD:
1900         case ACA_ACTIVE:
1901                 /*
1902                  * who knows?  FIXME(eric)
1903                  */
1904                 return SUCCESS;
1905
1906         case RESERVATION_CONFLICT:
1907                 sdev_printk(KERN_INFO, scmd->device,
1908                             "reservation conflict\n");
1909                 set_host_byte(scmd, DID_NEXUS_FAILURE);
1910                 return SUCCESS; /* causes immediate i/o error */
1911         default:
1912                 return FAILED;
1913         }
1914         return FAILED;
1915
1916 maybe_retry:
1917
1918         /* we requeue for retry because the error was retryable, and
1919          * the request was not marked fast fail.  Note that above,
1920          * even if the request is marked fast fail, we still requeue
1921          * for queue congestion conditions (QUEUE_FULL or BUSY) */
1922         if ((++scmd->retries) <= scmd->allowed
1923             && !scsi_noretry_cmd(scmd)) {
1924                 return NEEDS_RETRY;
1925         } else {
1926                 /*
1927                  * no more retries - report this one back to upper level.
1928                  */
1929                 return SUCCESS;
1930         }
1931 }
1932
1933 static void eh_lock_door_done(struct request *req, blk_status_t status)
1934 {
1935         blk_put_request(req);
1936 }
1937
1938 /**
1939  * scsi_eh_lock_door - Prevent medium removal for the specified device
1940  * @sdev:       SCSI device to prevent medium removal
1941  *
1942  * Locking:
1943  *      We must be called from process context.
1944  *
1945  * Notes:
1946  *      We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
1947  *      head of the devices request queue, and continue.
1948  */
1949 static void scsi_eh_lock_door(struct scsi_device *sdev)
1950 {
1951         struct request *req;
1952         struct scsi_request *rq;
1953
1954         req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, 0);
1955         if (IS_ERR(req))
1956                 return;
1957         rq = scsi_req(req);
1958
1959         rq->cmd[0] = ALLOW_MEDIUM_REMOVAL;
1960         rq->cmd[1] = 0;
1961         rq->cmd[2] = 0;
1962         rq->cmd[3] = 0;
1963         rq->cmd[4] = SCSI_REMOVAL_PREVENT;
1964         rq->cmd[5] = 0;
1965         rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
1966
1967         req->rq_flags |= RQF_QUIET;
1968         req->timeout = 10 * HZ;
1969         rq->retries = 5;
1970
1971         blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done);
1972 }
1973
1974 /**
1975  * scsi_restart_operations - restart io operations to the specified host.
1976  * @shost:      Host we are restarting.
1977  *
1978  * Notes:
1979  *    When we entered the error handler, we blocked all further i/o to
1980  *    this device.  we need to 'reverse' this process.
1981  */
1982 static void scsi_restart_operations(struct Scsi_Host *shost)
1983 {
1984         struct scsi_device *sdev;
1985         unsigned long flags;
1986
1987         /*
1988          * If the door was locked, we need to insert a door lock request
1989          * onto the head of the SCSI request queue for the device.  There
1990          * is no point trying to lock the door of an off-line device.
1991          */
1992         shost_for_each_device(sdev, shost) {
1993                 if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
1994                         scsi_eh_lock_door(sdev);
1995                         sdev->was_reset = 0;
1996                 }
1997         }
1998
1999         /*
2000          * next free up anything directly waiting upon the host.  this
2001          * will be requests for character device operations, and also for
2002          * ioctls to queued block devices.
2003          */
2004         SCSI_LOG_ERROR_RECOVERY(3,
2005                 shost_printk(KERN_INFO, shost, "waking up host to restart\n"));
2006
2007         spin_lock_irqsave(shost->host_lock, flags);
2008         if (scsi_host_set_state(shost, SHOST_RUNNING))
2009                 if (scsi_host_set_state(shost, SHOST_CANCEL))
2010                         BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
2011         spin_unlock_irqrestore(shost->host_lock, flags);
2012
2013         wake_up(&shost->host_wait);
2014
2015         /*
2016          * finally we need to re-initiate requests that may be pending.  we will
2017          * have had everything blocked while error handling is taking place, and
2018          * now that error recovery is done, we will need to ensure that these
2019          * requests are started.
2020          */
2021         scsi_run_host_queues(shost);
2022
2023         /*
2024          * if eh is active and host_eh_scheduled is pending we need to re-run
2025          * recovery.  we do this check after scsi_run_host_queues() to allow
2026          * everything pent up since the last eh run a chance to make forward
2027          * progress before we sync again.  Either we'll immediately re-run
2028          * recovery or scsi_device_unbusy() will wake us again when these
2029          * pending commands complete.
2030          */
2031         spin_lock_irqsave(shost->host_lock, flags);
2032         if (shost->host_eh_scheduled)
2033                 if (scsi_host_set_state(shost, SHOST_RECOVERY))
2034                         WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
2035         spin_unlock_irqrestore(shost->host_lock, flags);
2036 }
2037
2038 /**
2039  * scsi_eh_ready_devs - check device ready state and recover if not.
2040  * @shost:      host to be recovered.
2041  * @work_q:     &list_head for pending commands.
2042  * @done_q:     &list_head for processed commands.
2043  */
2044 void scsi_eh_ready_devs(struct Scsi_Host *shost,
2045                         struct list_head *work_q,
2046                         struct list_head *done_q)
2047 {
2048         if (!scsi_eh_stu(shost, work_q, done_q))
2049                 if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
2050                         if (!scsi_eh_target_reset(shost, work_q, done_q))
2051                                 if (!scsi_eh_bus_reset(shost, work_q, done_q))
2052                                         if (!scsi_eh_host_reset(shost, work_q, done_q))
2053                                                 scsi_eh_offline_sdevs(work_q,
2054                                                                       done_q);
2055 }
2056 EXPORT_SYMBOL_GPL(scsi_eh_ready_devs);
2057
2058 /**
2059  * scsi_eh_flush_done_q - finish processed commands or retry them.
2060  * @done_q:     list_head of processed commands.
2061  */
2062 void scsi_eh_flush_done_q(struct list_head *done_q)
2063 {
2064         struct scsi_cmnd *scmd, *next;
2065
2066         list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
2067                 list_del_init(&scmd->eh_entry);
2068                 if (scsi_device_online(scmd->device) &&
2069                     !scsi_noretry_cmd(scmd) &&
2070                     (++scmd->retries <= scmd->allowed)) {
2071                         SCSI_LOG_ERROR_RECOVERY(3,
2072                                 scmd_printk(KERN_INFO, scmd,
2073                                              "%s: flush retry cmd\n",
2074                                              current->comm));
2075                                 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
2076                 } else {
2077                         /*
2078                          * If just we got sense for the device (called
2079                          * scsi_eh_get_sense), scmd->result is already
2080                          * set, do not set DRIVER_TIMEOUT.
2081                          */
2082                         if (!scmd->result)
2083                                 scmd->result |= (DRIVER_TIMEOUT << 24);
2084                         SCSI_LOG_ERROR_RECOVERY(3,
2085                                 scmd_printk(KERN_INFO, scmd,
2086                                              "%s: flush finish cmd\n",
2087                                              current->comm));
2088                         scsi_finish_command(scmd);
2089                 }
2090         }
2091 }
2092 EXPORT_SYMBOL(scsi_eh_flush_done_q);
2093
2094 /**
2095  * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
2096  * @shost:      Host to unjam.
2097  *
2098  * Notes:
2099  *    When we come in here, we *know* that all commands on the bus have
2100  *    either completed, failed or timed out.  we also know that no further
2101  *    commands are being sent to the host, so things are relatively quiet
2102  *    and we have freedom to fiddle with things as we wish.
2103  *
2104  *    This is only the *default* implementation.  it is possible for
2105  *    individual drivers to supply their own version of this function, and
2106  *    if the maintainer wishes to do this, it is strongly suggested that
2107  *    this function be taken as a template and modified.  this function
2108  *    was designed to correctly handle problems for about 95% of the
2109  *    different cases out there, and it should always provide at least a
2110  *    reasonable amount of error recovery.
2111  *
2112  *    Any command marked 'failed' or 'timeout' must eventually have
2113  *    scsi_finish_cmd() called for it.  we do all of the retry stuff
2114  *    here, so when we restart the host after we return it should have an
2115  *    empty queue.
2116  */
2117 static void scsi_unjam_host(struct Scsi_Host *shost)
2118 {
2119         unsigned long flags;
2120         LIST_HEAD(eh_work_q);
2121         LIST_HEAD(eh_done_q);
2122
2123         spin_lock_irqsave(shost->host_lock, flags);
2124         list_splice_init(&shost->eh_cmd_q, &eh_work_q);
2125         spin_unlock_irqrestore(shost->host_lock, flags);
2126
2127         SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
2128
2129         if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
2130                 scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
2131
2132         spin_lock_irqsave(shost->host_lock, flags);
2133         if (shost->eh_deadline != -1)
2134                 shost->last_reset = 0;
2135         spin_unlock_irqrestore(shost->host_lock, flags);
2136         scsi_eh_flush_done_q(&eh_done_q);
2137 }
2138
2139 /**
2140  * scsi_error_handler - SCSI error handler thread
2141  * @data:       Host for which we are running.
2142  *
2143  * Notes:
2144  *    This is the main error handling loop.  This is run as a kernel thread
2145  *    for every SCSI host and handles all error handling activity.
2146  */
2147 int scsi_error_handler(void *data)
2148 {
2149         struct Scsi_Host *shost = data;
2150
2151         /*
2152          * We use TASK_INTERRUPTIBLE so that the thread is not
2153          * counted against the load average as a running process.
2154          * We never actually get interrupted because kthread_run
2155          * disables signal delivery for the created thread.
2156          */
2157         while (true) {
2158                 /*
2159                  * The sequence in kthread_stop() sets the stop flag first
2160                  * then wakes the process.  To avoid missed wakeups, the task
2161                  * should always be in a non running state before the stop
2162                  * flag is checked
2163                  */
2164                 set_current_state(TASK_INTERRUPTIBLE);
2165                 if (kthread_should_stop())
2166                         break;
2167
2168                 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
2169                     shost->host_failed != scsi_host_busy(shost)) {
2170                         SCSI_LOG_ERROR_RECOVERY(1,
2171                                 shost_printk(KERN_INFO, shost,
2172                                              "scsi_eh_%d: sleeping\n",
2173                                              shost->host_no));
2174                         schedule();
2175                         continue;
2176                 }
2177
2178                 __set_current_state(TASK_RUNNING);
2179                 SCSI_LOG_ERROR_RECOVERY(1,
2180                         shost_printk(KERN_INFO, shost,
2181                                      "scsi_eh_%d: waking up %d/%d/%d\n",
2182                                      shost->host_no, shost->host_eh_scheduled,
2183                                      shost->host_failed,
2184                                      scsi_host_busy(shost)));
2185
2186                 /*
2187                  * We have a host that is failing for some reason.  Figure out
2188                  * what we need to do to get it up and online again (if we can).
2189                  * If we fail, we end up taking the thing offline.
2190                  */
2191                 if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
2192                         SCSI_LOG_ERROR_RECOVERY(1,
2193                                 shost_printk(KERN_ERR, shost,
2194                                              "scsi_eh_%d: unable to autoresume\n",
2195                                              shost->host_no));
2196                         continue;
2197                 }
2198
2199                 if (shost->transportt->eh_strategy_handler)
2200                         shost->transportt->eh_strategy_handler(shost);
2201                 else
2202                         scsi_unjam_host(shost);
2203
2204                 /* All scmds have been handled */
2205                 shost->host_failed = 0;
2206
2207                 /*
2208                  * Note - if the above fails completely, the action is to take
2209                  * individual devices offline and flush the queue of any
2210                  * outstanding requests that may have been pending.  When we
2211                  * restart, we restart any I/O to any other devices on the bus
2212                  * which are still online.
2213                  */
2214                 scsi_restart_operations(shost);
2215                 if (!shost->eh_noresume)
2216                         scsi_autopm_put_host(shost);
2217         }
2218         __set_current_state(TASK_RUNNING);
2219
2220         SCSI_LOG_ERROR_RECOVERY(1,
2221                 shost_printk(KERN_INFO, shost,
2222                              "Error handler scsi_eh_%d exiting\n",
2223                              shost->host_no));
2224         shost->ehandler = NULL;
2225         return 0;
2226 }
2227
2228 /*
2229  * Function:    scsi_report_bus_reset()
2230  *
2231  * Purpose:     Utility function used by low-level drivers to report that
2232  *              they have observed a bus reset on the bus being handled.
2233  *
2234  * Arguments:   shost       - Host in question
2235  *              channel     - channel on which reset was observed.
2236  *
2237  * Returns:     Nothing
2238  *
2239  * Lock status: Host lock must be held.
2240  *
2241  * Notes:       This only needs to be called if the reset is one which
2242  *              originates from an unknown location.  Resets originated
2243  *              by the mid-level itself don't need to call this, but there
2244  *              should be no harm.
2245  *
2246  *              The main purpose of this is to make sure that a CHECK_CONDITION
2247  *              is properly treated.
2248  */
2249 void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
2250 {
2251         struct scsi_device *sdev;
2252
2253         __shost_for_each_device(sdev, shost) {
2254                 if (channel == sdev_channel(sdev))
2255                         __scsi_report_device_reset(sdev, NULL);
2256         }
2257 }
2258 EXPORT_SYMBOL(scsi_report_bus_reset);
2259
2260 /*
2261  * Function:    scsi_report_device_reset()
2262  *
2263  * Purpose:     Utility function used by low-level drivers to report that
2264  *              they have observed a device reset on the device being handled.
2265  *
2266  * Arguments:   shost       - Host in question
2267  *              channel     - channel on which reset was observed
2268  *              target      - target on which reset was observed
2269  *
2270  * Returns:     Nothing
2271  *
2272  * Lock status: Host lock must be held
2273  *
2274  * Notes:       This only needs to be called if the reset is one which
2275  *              originates from an unknown location.  Resets originated
2276  *              by the mid-level itself don't need to call this, but there
2277  *              should be no harm.
2278  *
2279  *              The main purpose of this is to make sure that a CHECK_CONDITION
2280  *              is properly treated.
2281  */
2282 void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
2283 {
2284         struct scsi_device *sdev;
2285
2286         __shost_for_each_device(sdev, shost) {
2287                 if (channel == sdev_channel(sdev) &&
2288                     target == sdev_id(sdev))
2289                         __scsi_report_device_reset(sdev, NULL);
2290         }
2291 }
2292 EXPORT_SYMBOL(scsi_report_device_reset);
2293
2294 static void
2295 scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
2296 {
2297 }
2298
2299 /**
2300  * scsi_ioctl_reset: explicitly reset a host/bus/target/device
2301  * @dev:        scsi_device to operate on
2302  * @arg:        reset type (see sg.h)
2303  */
2304 int
2305 scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
2306 {
2307         struct scsi_cmnd *scmd;
2308         struct Scsi_Host *shost = dev->host;
2309         struct request *rq;
2310         unsigned long flags;
2311         int error = 0, rtn, val;
2312
2313         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2314                 return -EACCES;
2315
2316         error = get_user(val, arg);
2317         if (error)
2318                 return error;
2319
2320         if (scsi_autopm_get_host(shost) < 0)
2321                 return -EIO;
2322
2323         error = -EIO;
2324         rq = kzalloc(sizeof(struct request) + sizeof(struct scsi_cmnd) +
2325                         shost->hostt->cmd_size, GFP_KERNEL);
2326         if (!rq)
2327                 goto out_put_autopm_host;
2328         blk_rq_init(NULL, rq);
2329
2330         scmd = (struct scsi_cmnd *)(rq + 1);
2331         scsi_init_command(dev, scmd);
2332         scmd->request = rq;
2333         scmd->cmnd = scsi_req(rq)->cmd;
2334
2335         scmd->scsi_done         = scsi_reset_provider_done_command;
2336         memset(&scmd->sdb, 0, sizeof(scmd->sdb));
2337
2338         scmd->cmd_len                   = 0;
2339
2340         scmd->sc_data_direction         = DMA_BIDIRECTIONAL;
2341
2342         spin_lock_irqsave(shost->host_lock, flags);
2343         shost->tmf_in_progress = 1;
2344         spin_unlock_irqrestore(shost->host_lock, flags);
2345
2346         switch (val & ~SG_SCSI_RESET_NO_ESCALATE) {
2347         case SG_SCSI_RESET_NOTHING:
2348                 rtn = SUCCESS;
2349                 break;
2350         case SG_SCSI_RESET_DEVICE:
2351                 rtn = scsi_try_bus_device_reset(scmd);
2352                 if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2353                         break;
2354                 /* FALLTHROUGH */
2355         case SG_SCSI_RESET_TARGET:
2356                 rtn = scsi_try_target_reset(scmd);
2357                 if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2358                         break;
2359                 /* FALLTHROUGH */
2360         case SG_SCSI_RESET_BUS:
2361                 rtn = scsi_try_bus_reset(scmd);
2362                 if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE))
2363                         break;
2364                 /* FALLTHROUGH */
2365         case SG_SCSI_RESET_HOST:
2366                 rtn = scsi_try_host_reset(scmd);
2367                 if (rtn == SUCCESS)
2368                         break;
2369                 /* FALLTHROUGH */
2370         default:
2371                 rtn = FAILED;
2372                 break;
2373         }
2374
2375         error = (rtn == SUCCESS) ? 0 : -EIO;
2376
2377         spin_lock_irqsave(shost->host_lock, flags);
2378         shost->tmf_in_progress = 0;
2379         spin_unlock_irqrestore(shost->host_lock, flags);
2380
2381         /*
2382          * be sure to wake up anyone who was sleeping or had their queue
2383          * suspended while we performed the TMF.
2384          */
2385         SCSI_LOG_ERROR_RECOVERY(3,
2386                 shost_printk(KERN_INFO, shost,
2387                              "waking up host to restart after TMF\n"));
2388
2389         wake_up(&shost->host_wait);
2390         scsi_run_host_queues(shost);
2391
2392         scsi_put_command(scmd);
2393         kfree(rq);
2394
2395 out_put_autopm_host:
2396         scsi_autopm_put_host(shost);
2397         return error;
2398 }
2399 EXPORT_SYMBOL(scsi_ioctl_reset);
2400
2401 bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd,
2402                                   struct scsi_sense_hdr *sshdr)
2403 {
2404         return scsi_normalize_sense(cmd->sense_buffer,
2405                         SCSI_SENSE_BUFFERSIZE, sshdr);
2406 }
2407 EXPORT_SYMBOL(scsi_command_normalize_sense);
2408
2409 /**
2410  * scsi_get_sense_info_fld - get information field from sense data (either fixed or descriptor format)
2411  * @sense_buffer:       byte array of sense data
2412  * @sb_len:             number of valid bytes in sense_buffer
2413  * @info_out:           pointer to 64 integer where 8 or 4 byte information
2414  *                      field will be placed if found.
2415  *
2416  * Return value:
2417  *      true if information field found, false if not found.
2418  */
2419 bool scsi_get_sense_info_fld(const u8 *sense_buffer, int sb_len,
2420                              u64 *info_out)
2421 {
2422         const u8 * ucp;
2423
2424         if (sb_len < 7)
2425                 return false;
2426         switch (sense_buffer[0] & 0x7f) {
2427         case 0x70:
2428         case 0x71:
2429                 if (sense_buffer[0] & 0x80) {
2430                         *info_out = get_unaligned_be32(&sense_buffer[3]);
2431                         return true;
2432                 }
2433                 return false;
2434         case 0x72:
2435         case 0x73:
2436                 ucp = scsi_sense_desc_find(sense_buffer, sb_len,
2437                                            0 /* info desc */);
2438                 if (ucp && (0xa == ucp[1])) {
2439                         *info_out = get_unaligned_be64(&ucp[4]);
2440                         return true;
2441                 }
2442                 return false;
2443         default:
2444                 return false;
2445         }
2446 }
2447 EXPORT_SYMBOL(scsi_get_sense_info_fld);