X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=drivers%2Fscsi%2Flibata-eh.c;h=bf5a72aca8a4979957ae9a7ce8ff519d70490655;hb=55b4d6a52195a8f277ffddf755ddaff359878f41;hp=9173d8f2ce5db7e999211e03f3509895cb0e2a9a;hpb=c6cf9e99d1de5ca6a08fb639bb73031ffe50d802;p=linux-2.6-block.git diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c index 9173d8f2ce5d..bf5a72aca8a4 100644 --- a/drivers/scsi/libata-eh.c +++ b/drivers/scsi/libata-eh.c @@ -46,6 +46,7 @@ #include "libata.h" static void __ata_port_freeze(struct ata_port *ap); +static void ata_eh_finish(struct ata_port *ap); static void ata_ering_record(struct ata_ering *ering, int is_io, unsigned int err_mask) @@ -92,6 +93,38 @@ static int ata_ering_map(struct ata_ering *ering, return rc; } +static unsigned int ata_eh_dev_action(struct ata_device *dev) +{ + struct ata_eh_context *ehc = &dev->ap->eh_context; + + return ehc->i.action | ehc->i.dev_action[dev->devno]; +} + +static void ata_eh_clear_action(struct ata_device *dev, + struct ata_eh_info *ehi, unsigned int action) +{ + int i; + + if (!dev) { + ehi->action &= ~action; + for (i = 0; i < ATA_MAX_DEVICES; i++) + ehi->dev_action[i] &= ~action; + } else { + /* doesn't make sense for port-wide EH actions */ + WARN_ON(!(action & ATA_EH_PERDEV_MASK)); + + /* break ehi->action into ehi->dev_action */ + if (ehi->action & action) { + for (i = 0; i < ATA_MAX_DEVICES; i++) + ehi->dev_action[i] |= ehi->action & action; + ehi->action &= ~action; + } + + /* turn off the specified per-dev action */ + ehi->dev_action[dev->devno] &= ~action; + } +} + /** * ata_scsi_timed_out - SCSI layer time out callback * @cmd: timed out SCSI command @@ -127,7 +160,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) } ret = EH_HANDLED; - spin_lock_irqsave(&ap->host_set->lock, flags); + spin_lock_irqsave(ap->lock, flags); qc = ata_qc_from_tag(ap, ap->active_tag); if (qc) { WARN_ON(qc->scsicmd != cmd); @@ -135,7 +168,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) qc->err_mask |= AC_ERR_TIMEOUT; ret = EH_NOT_HANDLED; } - spin_unlock_irqrestore(&ap->host_set->lock, flags); + spin_unlock_irqrestore(ap->lock, flags); out: DPRINTK("EXIT, ret=%d\n", ret); @@ -157,7 +190,7 @@ enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) void ata_scsi_error(struct Scsi_Host *host) { struct ata_port *ap = ata_shost_to_port(host); - spinlock_t *hs_lock = &ap->host_set->lock; + spinlock_t *ap_lock = ap->lock; int i, repeat_cnt = ATA_EH_MAX_REPEAT; unsigned long flags; @@ -184,7 +217,7 @@ void ata_scsi_error(struct Scsi_Host *host) struct scsi_cmnd *scmd, *tmp; int nr_timedout = 0; - spin_lock_irqsave(hs_lock, flags); + spin_lock_irqsave(ap_lock, flags); list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { struct ata_queued_cmd *qc; @@ -223,15 +256,15 @@ void ata_scsi_error(struct Scsi_Host *host) if (nr_timedout) __ata_port_freeze(ap); - spin_unlock_irqrestore(hs_lock, flags); + spin_unlock_irqrestore(ap_lock, flags); } else - spin_unlock_wait(hs_lock); + spin_unlock_wait(ap_lock); repeat: /* invoke error handler */ if (ap->ops->error_handler) { /* fetch & clear EH info */ - spin_lock_irqsave(hs_lock, flags); + spin_lock_irqsave(ap_lock, flags); memset(&ap->eh_context, 0, sizeof(ap->eh_context)); ap->eh_context.i = ap->eh_info; @@ -240,23 +273,26 @@ void ata_scsi_error(struct Scsi_Host *host) ap->flags |= ATA_FLAG_EH_IN_PROGRESS; ap->flags &= ~ATA_FLAG_EH_PENDING; - spin_unlock_irqrestore(hs_lock, flags); + spin_unlock_irqrestore(ap_lock, flags); - /* invoke EH */ - ap->ops->error_handler(ap); + /* invoke EH. if unloading, just finish failed qcs */ + if (!(ap->flags & ATA_FLAG_UNLOADING)) + ap->ops->error_handler(ap); + else + ata_eh_finish(ap); /* Exception might have happend after ->error_handler * recovered the port but before this point. Repeat * EH in such case. */ - spin_lock_irqsave(hs_lock, flags); + spin_lock_irqsave(ap_lock, flags); if (ap->flags & ATA_FLAG_EH_PENDING) { if (--repeat_cnt) { ata_port_printk(ap, KERN_INFO, "EH pending after completion, " "repeating EH (cnt=%d)\n", repeat_cnt); - spin_unlock_irqrestore(hs_lock, flags); + spin_unlock_irqrestore(ap_lock, flags); goto repeat; } ata_port_printk(ap, KERN_ERR, "EH pending after %d " @@ -266,14 +302,14 @@ void ata_scsi_error(struct Scsi_Host *host) /* this run is complete, make sure EH info is clear */ memset(&ap->eh_info, 0, sizeof(ap->eh_info)); - /* Clear host_eh_scheduled while holding hs_lock such + /* Clear host_eh_scheduled while holding ap_lock such * that if exception occurs after this point but * before EH completion, SCSI midlayer will * re-initiate EH. */ host->host_eh_scheduled = 0; - spin_unlock_irqrestore(hs_lock, flags); + spin_unlock_irqrestore(ap_lock, flags); } else { WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL); ap->ops->eng_timeout(ap); @@ -285,17 +321,24 @@ void ata_scsi_error(struct Scsi_Host *host) scsi_eh_flush_done_q(&ap->eh_done_q); /* clean up */ - spin_lock_irqsave(hs_lock, flags); + spin_lock_irqsave(ap_lock, flags); + + if (ap->flags & ATA_FLAG_LOADING) { + ap->flags &= ~ATA_FLAG_LOADING; + } else { + if (ap->flags & ATA_FLAG_SCSI_HOTPLUG) + queue_work(ata_aux_wq, &ap->hotplug_task); + if (ap->flags & ATA_FLAG_RECOVERED) + ata_port_printk(ap, KERN_INFO, "EH complete\n"); + } - if (ap->flags & ATA_FLAG_RECOVERED) - ata_port_printk(ap, KERN_INFO, "EH complete\n"); - ap->flags &= ~ATA_FLAG_RECOVERED; + ap->flags &= ~(ATA_FLAG_SCSI_HOTPLUG | ATA_FLAG_RECOVERED); /* tell wait_eh that we're done */ ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS; wake_up_all(&ap->eh_wait_q); - spin_unlock_irqrestore(hs_lock, flags); + spin_unlock_irqrestore(ap_lock, flags); DPRINTK("EXIT\n"); } @@ -315,16 +358,17 @@ void ata_port_wait_eh(struct ata_port *ap) DEFINE_WAIT(wait); retry: - spin_lock_irqsave(&ap->host_set->lock, flags); + spin_lock_irqsave(ap->lock, flags); while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) { prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); - spin_unlock_irqrestore(&ap->host_set->lock, flags); + spin_unlock_irqrestore(ap->lock, flags); schedule(); - spin_lock_irqsave(&ap->host_set->lock, flags); + spin_lock_irqsave(ap->lock, flags); } + finish_wait(&ap->eh_wait_q, &wait); - spin_unlock_irqrestore(&ap->host_set->lock, flags); + spin_unlock_irqrestore(ap->lock, flags); /* make sure SCSI EH is complete */ if (scsi_host_in_recovery(ap->host)) { @@ -356,7 +400,6 @@ void ata_port_wait_eh(struct ata_port *ap) static void ata_qc_timeout(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - struct ata_host_set *host_set = ap->host_set; u8 host_stat = 0, drv_stat; unsigned long flags; @@ -364,7 +407,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) ap->hsm_task_state = HSM_ST_IDLE; - spin_lock_irqsave(&host_set->lock, flags); + spin_lock_irqsave(ap->lock, flags); switch (qc->tf.protocol) { @@ -393,7 +436,7 @@ static void ata_qc_timeout(struct ata_queued_cmd *qc) break; } - spin_unlock_irqrestore(&host_set->lock, flags); + spin_unlock_irqrestore(ap->lock, flags); ata_eh_qc_complete(qc); @@ -580,9 +623,9 @@ void ata_eh_freeze_port(struct ata_port *ap) if (!ap->ops->error_handler) return; - spin_lock_irqsave(&ap->host_set->lock, flags); + spin_lock_irqsave(ap->lock, flags); __ata_port_freeze(ap); - spin_unlock_irqrestore(&ap->host_set->lock, flags); + spin_unlock_irqrestore(ap->lock, flags); } /** @@ -601,14 +644,14 @@ void ata_eh_thaw_port(struct ata_port *ap) if (!ap->ops->error_handler) return; - spin_lock_irqsave(&ap->host_set->lock, flags); + spin_lock_irqsave(ap->lock, flags); ap->flags &= ~ATA_FLAG_FROZEN; if (ap->ops->thaw) ap->ops->thaw(ap); - spin_unlock_irqrestore(&ap->host_set->lock, flags); + spin_unlock_irqrestore(ap->lock, flags); DPRINTK("ata%u port thawed\n", ap->id); } @@ -624,11 +667,11 @@ static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) struct scsi_cmnd *scmd = qc->scsicmd; unsigned long flags; - spin_lock_irqsave(&ap->host_set->lock, flags); + spin_lock_irqsave(ap->lock, flags); qc->scsidone = ata_eh_scsidone; __ata_qc_complete(qc); WARN_ON(ata_tag_valid(qc->tag)); - spin_unlock_irqrestore(&ap->host_set->lock, flags); + spin_unlock_irqrestore(ap->lock, flags); scsi_eh_finish_cmd(scmd, &ap->eh_done_q); } @@ -666,9 +709,42 @@ void ata_eh_qc_retry(struct ata_queued_cmd *qc) __ata_eh_qc_complete(qc); } +/** + * ata_eh_detach_dev - detach ATA device + * @dev: ATA device to detach + * + * Detach @dev. + * + * LOCKING: + * None. + */ +static void ata_eh_detach_dev(struct ata_device *dev) +{ + struct ata_port *ap = dev->ap; + unsigned long flags; + + ata_dev_disable(dev); + + spin_lock_irqsave(ap->lock, flags); + + dev->flags &= ~ATA_DFLAG_DETACH; + + if (ata_scsi_offline_dev(dev)) { + dev->flags |= ATA_DFLAG_DETACHED; + ap->flags |= ATA_FLAG_SCSI_HOTPLUG; + } + + /* clear per-dev EH actions */ + ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK); + ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK); + + spin_unlock_irqrestore(ap->lock, flags); +} + /** * ata_eh_about_to_do - about to perform eh_action * @ap: target ATA port + * @dev: target ATA dev for per-dev action (can be NULL) * @action: action about to be performed * * Called just before performing EH actions to clear related bits @@ -678,14 +754,33 @@ void ata_eh_qc_retry(struct ata_queued_cmd *qc) * LOCKING: * None. */ -static void ata_eh_about_to_do(struct ata_port *ap, unsigned int action) +static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev, + unsigned int action) { unsigned long flags; - spin_lock_irqsave(&ap->host_set->lock, flags); - ap->eh_info.action &= ~action; + spin_lock_irqsave(ap->lock, flags); + ata_eh_clear_action(dev, &ap->eh_info, action); ap->flags |= ATA_FLAG_RECOVERED; - spin_unlock_irqrestore(&ap->host_set->lock, flags); + spin_unlock_irqrestore(ap->lock, flags); +} + +/** + * ata_eh_done - EH action complete + * @ap: target ATA port + * @dev: target ATA dev for per-dev action (can be NULL) + * @action: action just completed + * + * Called right after performing EH actions to clear related bits + * in @ap->eh_context. + * + * LOCKING: + * None. + */ +static void ata_eh_done(struct ata_port *ap, struct ata_device *dev, + unsigned int action) +{ + ata_eh_clear_action(dev, &ap->eh_context.i, action); } /** @@ -904,10 +999,8 @@ static void ata_eh_analyze_serror(struct ata_port *ap) err_mask |= AC_ERR_SYSTEM; action |= ATA_EH_SOFTRESET; } - if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG)) { - err_mask |= AC_ERR_ATA_BUS; - action |= ATA_EH_HARDRESET; - } + if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG)) + ata_ehi_hotplugged(&ehc->i); ehc->i.err_mask |= err_mask; ehc->i.action |= action; @@ -1233,10 +1326,6 @@ static void ata_eh_autopsy(struct ata_port *ap) is_io = 1; } - /* speed down iff command was in progress */ - if (failed_dev) - action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask); - /* enforce default EH actions */ if (ap->flags & ATA_FLAG_FROZEN || all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) @@ -1244,6 +1333,17 @@ static void ata_eh_autopsy(struct ata_port *ap) else if (all_err_mask) action |= ATA_EH_REVALIDATE; + /* if we have offending qcs and the associated failed device */ + if (failed_dev) { + /* speed down */ + action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask); + + /* perform per-dev EH action only on the offending device */ + ehc->i.dev_action[failed_dev->devno] |= + action & ATA_EH_PERDEV_MASK; + action &= ~ATA_EH_PERDEV_MASK; + } + /* record autopsy result */ ehc->i.dev = failed_dev; ehc->i.action = action; @@ -1318,35 +1418,150 @@ static void ata_eh_report(struct ata_port *ap) } } -static int ata_eh_reset(struct ata_port *ap, ata_reset_fn_t softreset, +static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset, + unsigned int *classes) +{ + int i, rc; + + for (i = 0; i < ATA_MAX_DEVICES; i++) + classes[i] = ATA_DEV_UNKNOWN; + + rc = reset(ap, classes); + if (rc) + return rc; + + /* If any class isn't ATA_DEV_UNKNOWN, consider classification + * is complete and convert all ATA_DEV_UNKNOWN to + * ATA_DEV_NONE. + */ + for (i = 0; i < ATA_MAX_DEVICES; i++) + if (classes[i] != ATA_DEV_UNKNOWN) + break; + + if (i < ATA_MAX_DEVICES) + for (i = 0; i < ATA_MAX_DEVICES; i++) + if (classes[i] == ATA_DEV_UNKNOWN) + classes[i] = ATA_DEV_NONE; + + return 0; +} + +static int ata_eh_followup_srst_needed(int rc, int classify, + const unsigned int *classes) +{ + if (rc == -EAGAIN) + return 1; + if (rc != 0) + return 0; + if (classify && classes[0] == ATA_DEV_UNKNOWN) + return 1; + return 0; +} + +static int ata_eh_reset(struct ata_port *ap, int classify, + ata_prereset_fn_t prereset, ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) { struct ata_eh_context *ehc = &ap->eh_context; - unsigned int classes[ATA_MAX_DEVICES]; + unsigned int *classes = ehc->classes; int tries = ATA_EH_RESET_TRIES; + int verbose = !(ap->flags & ATA_FLAG_LOADING); + unsigned int action; ata_reset_fn_t reset; - int i, rc; + int i, did_followup_srst, rc; + /* Determine which reset to use and record in ehc->i.action. + * prereset() may examine and modify it. + */ + action = ehc->i.action; + ehc->i.action &= ~ATA_EH_RESET_MASK; if (softreset && (!hardreset || (!sata_set_spd_needed(ap) && - !(ehc->i.action & ATA_EH_HARDRESET)))) - reset = softreset; + !(action & ATA_EH_HARDRESET)))) + ehc->i.action |= ATA_EH_SOFTRESET; else + ehc->i.action |= ATA_EH_HARDRESET; + + if (prereset) { + rc = prereset(ap); + if (rc) { + ata_port_printk(ap, KERN_ERR, + "prereset failed (errno=%d)\n", rc); + return rc; + } + } + + /* prereset() might have modified ehc->i.action */ + if (ehc->i.action & ATA_EH_HARDRESET) reset = hardreset; + else if (ehc->i.action & ATA_EH_SOFTRESET) + reset = softreset; + else { + /* prereset told us not to reset, bang classes and return */ + for (i = 0; i < ATA_MAX_DEVICES; i++) + classes[i] = ATA_DEV_NONE; + return 0; + } + + /* did prereset() screw up? if so, fix up to avoid oopsing */ + if (!reset) { + ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested " + "invalid reset type\n"); + if (softreset) + reset = softreset; + else + reset = hardreset; + } retry: - ata_port_printk(ap, KERN_INFO, "%s resetting port\n", - reset == softreset ? "soft" : "hard"); + /* shut up during boot probing */ + if (verbose) + ata_port_printk(ap, KERN_INFO, "%s resetting port\n", + reset == softreset ? "soft" : "hard"); /* reset */ - ata_eh_about_to_do(ap, ATA_EH_RESET_MASK); + ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); ehc->i.flags |= ATA_EHI_DID_RESET; rc = ata_do_reset(ap, reset, classes); + did_followup_srst = 0; + if (reset == hardreset && + ata_eh_followup_srst_needed(rc, classify, classes)) { + /* okay, let's do follow-up softreset */ + did_followup_srst = 1; + reset = softreset; + + if (!reset) { + ata_port_printk(ap, KERN_ERR, + "follow-up softreset required " + "but no softreset avaliable\n"); + return -EINVAL; + } + + ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK); + rc = ata_do_reset(ap, reset, classes); + + if (rc == 0 && classify && + classes[0] == ATA_DEV_UNKNOWN) { + ata_port_printk(ap, KERN_ERR, + "classification failed\n"); + return -EINVAL; + } + } + if (rc && --tries) { + const char *type; + + if (reset == softreset) { + if (did_followup_srst) + type = "follow-up soft"; + else + type = "soft"; + } else + type = "hard"; + ata_port_printk(ap, KERN_WARNING, - "%sreset failed, retrying in 5 secs\n", - reset == softreset ? "soft" : "hard"); + "%sreset failed, retrying in 5 secs\n", type); ssleep(5); if (reset == hardreset) @@ -1367,40 +1582,62 @@ static int ata_eh_reset(struct ata_port *ap, ata_reset_fn_t softreset, postreset(ap, classes); /* reset successful, schedule revalidation */ - ehc->i.dev = NULL; - ehc->i.action &= ~ATA_EH_RESET_MASK; + ata_eh_done(ap, NULL, ATA_EH_RESET_MASK); ehc->i.action |= ATA_EH_REVALIDATE; } return rc; } -static int ata_eh_revalidate(struct ata_port *ap, - struct ata_device **r_failed_dev) +static int ata_eh_revalidate_and_attach(struct ata_port *ap, + struct ata_device **r_failed_dev) { struct ata_eh_context *ehc = &ap->eh_context; struct ata_device *dev; + unsigned long flags; int i, rc = 0; DPRINTK("ENTER\n"); for (i = 0; i < ATA_MAX_DEVICES; i++) { + unsigned int action; + dev = &ap->device[i]; + action = ata_eh_dev_action(dev); - if (ehc->i.action & ATA_EH_REVALIDATE && ata_dev_enabled(dev) && - (!ehc->i.dev || ehc->i.dev == dev)) { + if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) { if (ata_port_offline(ap)) { rc = -EIO; break; } - ata_eh_about_to_do(ap, ATA_EH_REVALIDATE); + ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE); rc = ata_dev_revalidate(dev, ehc->i.flags & ATA_EHI_DID_RESET); if (rc) break; - ehc->i.action &= ~ATA_EH_REVALIDATE; + ata_eh_done(ap, dev, ATA_EH_REVALIDATE); + + /* schedule the scsi_rescan_device() here */ + queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); + } else if (dev->class == ATA_DEV_UNKNOWN && + ehc->tries[dev->devno] && + ata_class_enabled(ehc->classes[dev->devno])) { + dev->class = ehc->classes[dev->devno]; + + rc = ata_dev_read_id(dev, &dev->class, 1, dev->id); + if (rc == 0) + rc = ata_dev_configure(dev, 1); + + if (rc) { + dev->class = ATA_DEV_UNKNOWN; + break; + } + + spin_lock_irqsave(ap->lock, flags); + ap->flags |= ATA_FLAG_SCSI_HOTPLUG; + spin_unlock_irqrestore(ap->lock, flags); } } @@ -1421,18 +1658,50 @@ static int ata_port_nr_enabled(struct ata_port *ap) return cnt; } +static int ata_port_nr_vacant(struct ata_port *ap) +{ + int i, cnt = 0; + + for (i = 0; i < ATA_MAX_DEVICES; i++) + if (ap->device[i].class == ATA_DEV_UNKNOWN) + cnt++; + return cnt; +} + +static int ata_eh_skip_recovery(struct ata_port *ap) +{ + struct ata_eh_context *ehc = &ap->eh_context; + int i; + + if (ap->flags & ATA_FLAG_FROZEN || ata_port_nr_enabled(ap)) + return 0; + + /* skip if class codes for all vacant slots are ATA_DEV_NONE */ + for (i = 0; i < ATA_MAX_DEVICES; i++) { + struct ata_device *dev = &ap->device[i]; + + if (dev->class == ATA_DEV_UNKNOWN && + ehc->classes[dev->devno] != ATA_DEV_NONE) + return 0; + } + + return 1; +} + /** * ata_eh_recover - recover host port after error * @ap: host port to recover + * @prereset: prereset method (can be NULL) * @softreset: softreset method (can be NULL) * @hardreset: hardreset method (can be NULL) * @postreset: postreset method (can be NULL) * * This is the alpha and omega, eum and yang, heart and soul of * libata exception handling. On entry, actions required to - * recover each devices are recorded in eh_context. This - * function executes all the operations with appropriate retrials - * and fallbacks to resurrect failed devices. + * recover the port and hotplug requests are recorded in + * eh_context. This function executes all the operations with + * appropriate retrials and fallbacks to resurrect failed + * devices, detach goners and greet newcomers. * * LOCKING: * Kernel thread context (may sleep). @@ -1440,8 +1709,8 @@ static int ata_port_nr_enabled(struct ata_port *ap) * RETURNS: * 0 on success, -errno on failure. */ -static int ata_eh_recover(struct ata_port *ap, ata_reset_fn_t softreset, - ata_reset_fn_t hardreset, +static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, + ata_reset_fn_t softreset, ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) { struct ata_eh_context *ehc = &ap->eh_context; @@ -1455,21 +1724,42 @@ static int ata_eh_recover(struct ata_port *ap, ata_reset_fn_t softreset, dev = &ap->device[i]; ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; + + /* process hotplug request */ + if (dev->flags & ATA_DFLAG_DETACH) + ata_eh_detach_dev(dev); + + if (!ata_dev_enabled(dev) && + ((ehc->i.probe_mask & (1 << dev->devno)) && + !(ehc->did_probe_mask & (1 << dev->devno)))) { + ata_eh_detach_dev(dev); + ata_dev_init(dev); + ehc->did_probe_mask |= (1 << dev->devno); + ehc->i.action |= ATA_EH_SOFTRESET; + } } retry: down_xfermask = 0; rc = 0; + /* if UNLOADING, finish immediately */ + if (ap->flags & ATA_FLAG_UNLOADING) + goto out; + /* skip EH if possible. */ - if (!ata_port_nr_enabled(ap) && !(ap->flags & ATA_FLAG_FROZEN)) + if (ata_eh_skip_recovery(ap)) ehc->i.action = 0; + for (i = 0; i < ATA_MAX_DEVICES; i++) + ehc->classes[i] = ATA_DEV_UNKNOWN; + /* reset */ if (ehc->i.action & ATA_EH_RESET_MASK) { ata_eh_freeze_port(ap); - rc = ata_eh_reset(ap, softreset, hardreset, postreset); + rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset, + softreset, hardreset, postreset); if (rc) { ata_port_printk(ap, KERN_ERR, "reset failed, giving up\n"); @@ -1479,8 +1769,8 @@ static int ata_eh_recover(struct ata_port *ap, ata_reset_fn_t softreset, ata_eh_thaw_port(ap); } - /* revalidate existing devices */ - rc = ata_eh_revalidate(ap, &dev); + /* revalidate existing devices and attach new ones */ + rc = ata_eh_revalidate_and_attach(ap, &dev); if (rc) goto dev_fail; @@ -1498,6 +1788,8 @@ static int ata_eh_recover(struct ata_port *ap, ata_reset_fn_t softreset, dev_fail: switch (rc) { case -ENODEV: + /* device missing, schedule probing */ + ehc->i.probe_mask |= (1 << dev->devno); case -EINVAL: ehc->tries[dev->devno] = 0; break; @@ -1510,15 +1802,31 @@ static int ata_eh_recover(struct ata_port *ap, ata_reset_fn_t softreset, ehc->tries[dev->devno] = 0; } - /* disable device if it has used up all its chances */ - if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) + if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { + /* disable device if it has used up all its chances */ ata_dev_disable(dev); - /* soft didn't work? be haaaaard */ - if (ehc->i.flags & ATA_EHI_DID_RESET) - ehc->i.action |= ATA_EH_HARDRESET; - else - ehc->i.action |= ATA_EH_SOFTRESET; + /* detach if offline */ + if (ata_port_offline(ap)) + ata_eh_detach_dev(dev); + + /* probe if requested */ + if ((ehc->i.probe_mask & (1 << dev->devno)) && + !(ehc->did_probe_mask & (1 << dev->devno))) { + ata_eh_detach_dev(dev); + ata_dev_init(dev); + + ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; + ehc->did_probe_mask |= (1 << dev->devno); + ehc->i.action |= ATA_EH_SOFTRESET; + } + } else { + /* soft didn't work? be haaaaard */ + if (ehc->i.flags & ATA_EHI_DID_RESET) + ehc->i.action |= ATA_EH_HARDRESET; + else + ehc->i.action |= ATA_EH_SOFTRESET; + } if (ata_port_nr_enabled(ap)) { ata_port_printk(ap, KERN_WARNING, "failed to recover some " @@ -1586,6 +1894,7 @@ static void ata_eh_finish(struct ata_port *ap) /** * ata_do_eh - do standard error handling * @ap: host port to handle error for + * @prereset: prereset method (can be NULL) * @softreset: softreset method (can be NULL) * @hardreset: hardreset method (can be NULL) * @postreset: postreset method (can be NULL) @@ -1595,11 +1904,15 @@ static void ata_eh_finish(struct ata_port *ap) * LOCKING: * Kernel thread context (may sleep). */ -void ata_do_eh(struct ata_port *ap, ata_reset_fn_t softreset, - ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) +void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, + ata_reset_fn_t softreset, ata_reset_fn_t hardreset, + ata_postreset_fn_t postreset) { - ata_eh_autopsy(ap); - ata_eh_report(ap); - ata_eh_recover(ap, softreset, hardreset, postreset); + if (!(ap->flags & ATA_FLAG_LOADING)) { + ata_eh_autopsy(ap); + ata_eh_report(ap); + } + + ata_eh_recover(ap, prereset, softreset, hardreset, postreset); ata_eh_finish(ap); }