[SCSI] Merge branch 'linus'
authorJames Bottomley <James.Bottomley@HansenPartnership.com>
Fri, 12 Jun 2009 15:02:03 +0000 (10:02 -0500)
committerJames Bottomley <James.Bottomley@HansenPartnership.com>
Fri, 12 Jun 2009 15:02:03 +0000 (10:02 -0500)
Conflicts:
drivers/message/fusion/mptsas.c

fixed up conflict between req->data_len accessors and mptsas driver updates.

Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
1  2 
drivers/message/fusion/mptsas.c
drivers/net/Makefile
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/mpt2sas/mpt2sas_transport.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/st.c
fs/exofs/osd.c

index 14c490a767a4b718982ea69ccede6efe44b6582c,79f5433359f9b3fc1b346cb78b5d11dcd60d15de..20e0b447e8e83a0ce07455de4d7f56215f43a568
@@@ -93,37 -93,8 +93,37 @@@ static u8   mptsasDoneCtx = MPT_MAX_PROTO
  static u8     mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
  static u8     mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
  static u8     mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS;
 -
 -static void mptsas_hotplug_work(struct work_struct *work);
 +static u8     mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS;
 +
 +static void mptsas_firmware_event_work(struct work_struct *work);
 +static void mptsas_send_sas_event(struct fw_event_work *fw_event);
 +static void mptsas_send_raid_event(struct fw_event_work *fw_event);
 +static void mptsas_send_ir2_event(struct fw_event_work *fw_event);
 +static void mptsas_parse_device_info(struct sas_identify *identify,
 +              struct mptsas_devinfo *device_info);
 +static inline void mptsas_set_rphy(MPT_ADAPTER *ioc,
 +              struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy);
 +static struct mptsas_phyinfo  *mptsas_find_phyinfo_by_sas_address
 +              (MPT_ADAPTER *ioc, u64 sas_address);
 +static int mptsas_sas_device_pg0(MPT_ADAPTER *ioc,
 +      struct mptsas_devinfo *device_info, u32 form, u32 form_specific);
 +static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc,
 +      struct mptsas_enclosure *enclosure, u32 form, u32 form_specific);
 +static int mptsas_add_end_device(MPT_ADAPTER *ioc,
 +      struct mptsas_phyinfo *phy_info);
 +static void mptsas_del_end_device(MPT_ADAPTER *ioc,
 +      struct mptsas_phyinfo *phy_info);
 +static void mptsas_send_link_status_event(struct fw_event_work *fw_event);
 +static struct mptsas_portinfo *mptsas_find_portinfo_by_sas_address
 +              (MPT_ADAPTER *ioc, u64 sas_address);
 +static void mptsas_expander_delete(MPT_ADAPTER *ioc,
 +              struct mptsas_portinfo *port_info, u8 force);
 +static void mptsas_send_expander_event(struct fw_event_work *fw_event);
 +static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
 +static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
 +static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
 +static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
 +static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
  
  static void mptsas_print_phy_data(MPT_ADAPTER *ioc,
                                        MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
@@@ -247,125 -218,30 +247,125 @@@ static void mptsas_print_expander_pg1(M
            le16_to_cpu(pg1->AttachedDevHandle)));
  }
  
 -static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
 +/* inhibit sas firmware event handling */
 +static void
 +mptsas_fw_event_off(MPT_ADAPTER *ioc)
  {
 -      struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
 -      return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&ioc->fw_event_lock, flags);
 +      ioc->fw_events_off = 1;
 +      ioc->sas_discovery_quiesce_io = 0;
 +      spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 +
  }
  
 -static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
 +/* enable sas firmware event handling */
 +static void
 +mptsas_fw_event_on(MPT_ADAPTER *ioc)
  {
 -      struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
 -      return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&ioc->fw_event_lock, flags);
 +      ioc->fw_events_off = 0;
 +      spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  }
  
 -static struct mptsas_portinfo *
 -mptsas_get_hba_portinfo(MPT_ADAPTER *ioc)
 +/* queue a sas firmware event */
 +static void
 +mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
 +    unsigned long delay)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&ioc->fw_event_lock, flags);
 +      list_add_tail(&fw_event->list, &ioc->fw_event_list);
 +      INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
 +      devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n",
 +          ioc->name, __func__, fw_event));
 +      queue_delayed_work(ioc->fw_event_q, &fw_event->work,
 +          delay);
 +      spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 +}
 +
 +/* requeue a sas firmware event */
 +static void
 +mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
 +    unsigned long delay)
 +{
 +      unsigned long flags;
 +      spin_lock_irqsave(&ioc->fw_event_lock, flags);
 +      devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task "
 +          "(fw_event=0x%p)\n", ioc->name, __func__, fw_event));
 +      fw_event->retries++;
 +      queue_delayed_work(ioc->fw_event_q, &fw_event->work,
 +          msecs_to_jiffies(delay));
 +      spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 +}
 +
 +/* free memory assoicated to a sas firmware event */
 +static void
 +mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&ioc->fw_event_lock, flags);
 +      devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
 +          ioc->name, __func__, fw_event));
 +      list_del(&fw_event->list);
 +      kfree(fw_event);
 +      spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 +}
 +
 +/* walk the firmware event queue, and either stop or wait for
 + * outstanding events to complete */
 +static void
 +mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
  {
 -      struct list_head        *head = &ioc->sas_topology;
 -      struct mptsas_portinfo  *pi = NULL;
 +      struct fw_event_work *fw_event, *next;
 +      struct mptsas_target_reset_event *target_reset_list, *n;
 +      u8      flush_q;
 +      MPT_SCSI_HOST   *hd = shost_priv(ioc->sh);
 +
 +      /* flush the target_reset_list */
 +      if (!list_empty(&hd->target_reset_list)) {
 +              list_for_each_entry_safe(target_reset_list, n,
 +                  &hd->target_reset_list, list) {
 +                      dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +                          "%s: removing target reset for id=%d\n",
 +                          ioc->name, __func__,
 +                         target_reset_list->sas_event_data.TargetID));
 +                      list_del(&target_reset_list->list);
 +                      kfree(target_reset_list);
 +              }
 +      }
 +
 +      if (list_empty(&ioc->fw_event_list) ||
 +           !ioc->fw_event_q || in_interrupt())
 +              return;
 +
 +      flush_q = 0;
 +      list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
 +              if (cancel_delayed_work(&fw_event->work))
 +                      mptsas_free_fw_event(ioc, fw_event);
 +              else
 +                      flush_q = 1;
 +      }
 +      if (flush_q)
 +              flush_workqueue(ioc->fw_event_q);
 +}
  
 -      /* always the first entry on sas_topology list */
  
 -      if (!list_empty(head))
 -              pi = list_entry(head->next, struct mptsas_portinfo, list);
 +static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
 +{
 +      struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
 +      return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
 +}
  
 -      return pi;
 +static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
 +{
 +      struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
 +      return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
  }
  
  /*
@@@ -389,38 -265,6 +389,38 @@@ mptsas_find_portinfo_by_handle(MPT_ADAP
        return rc;
  }
  
 +/**
 + *    mptsas_find_portinfo_by_sas_address -
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *    @handle:
 + *
 + *    This function should be called with the sas_topology_mutex already held
 + *
 + **/
 +static struct mptsas_portinfo *
 +mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
 +{
 +      struct mptsas_portinfo *port_info, *rc = NULL;
 +      int i;
 +
 +      if (sas_address >= ioc->hba_port_sas_addr &&
 +          sas_address < (ioc->hba_port_sas_addr +
 +          ioc->hba_port_num_phy))
 +              return ioc->hba_port_info;
 +
 +      mutex_lock(&ioc->sas_topology_mutex);
 +      list_for_each_entry(port_info, &ioc->sas_topology, list)
 +              for (i = 0; i < port_info->num_phys; i++)
 +                      if (port_info->phy_info[i].identify.sas_address ==
 +                          sas_address) {
 +                              rc = port_info;
 +                              goto out;
 +                      }
 + out:
 +      mutex_unlock(&ioc->sas_topology_mutex);
 +      return rc;
 +}
 +
  /*
   * Returns true if there is a scsi end device
   */
@@@ -464,7 -308,6 +464,7 @@@ mptsas_port_delete(MPT_ADAPTER *ioc, st
                if(phy_info->port_details != port_details)
                        continue;
                memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
 +              mptsas_set_rphy(ioc, phy_info, NULL);
                phy_info->port_details = NULL;
        }
        kfree(port_details);
@@@ -536,285 -379,6 +536,285 @@@ starget
                phy_info->port_details->starget = starget;
  }
  
 +/**
 + *    mptsas_add_device_component -
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *    @channel: fw mapped id's
 + *    @id:
 + *    @sas_address:
 + *    @device_info:
 + *
 + **/
 +static void
 +mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
 +      u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id)
 +{
 +      struct mptsas_device_info       *sas_info, *next;
 +      struct scsi_device      *sdev;
 +      struct scsi_target      *starget;
 +      struct sas_rphy *rphy;
 +
 +      /*
 +       * Delete all matching devices out of the list
 +       */
 +      mutex_lock(&ioc->sas_device_info_mutex);
 +      list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
 +          list) {
 +              if (!sas_info->is_logical_volume &&
 +                  (sas_info->sas_address == sas_address ||
 +                  (sas_info->fw.channel == channel &&
 +                   sas_info->fw.id == id))) {
 +                      list_del(&sas_info->list);
 +                      kfree(sas_info);
 +              }
 +      }
 +
 +      sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
 +      if (!sas_info)
 +              goto out;
 +
 +      /*
 +       * Set Firmware mapping
 +       */
 +      sas_info->fw.id = id;
 +      sas_info->fw.channel = channel;
 +
 +      sas_info->sas_address = sas_address;
 +      sas_info->device_info = device_info;
 +      sas_info->slot = slot;
 +      sas_info->enclosure_logical_id = enclosure_logical_id;
 +      INIT_LIST_HEAD(&sas_info->list);
 +      list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
 +
 +      /*
 +       * Set OS mapping
 +       */
 +      shost_for_each_device(sdev, ioc->sh) {
 +              starget = scsi_target(sdev);
 +              rphy = dev_to_rphy(starget->dev.parent);
 +              if (rphy->identify.sas_address == sas_address) {
 +                      sas_info->os.id = starget->id;
 +                      sas_info->os.channel = starget->channel;
 +              }
 +      }
 +
 + out:
 +      mutex_unlock(&ioc->sas_device_info_mutex);
 +      return;
 +}
 +
 +/**
 + *    mptsas_add_device_component_by_fw -
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *    @channel:  fw mapped id's
 + *    @id:
 + *
 + **/
 +static void
 +mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
 +{
 +      struct mptsas_devinfo sas_device;
 +      struct mptsas_enclosure enclosure_info;
 +      int rc;
 +
 +      rc = mptsas_sas_device_pg0(ioc, &sas_device,
 +          (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
 +           MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 +          (channel << 8) + id);
 +      if (rc)
 +              return;
 +
 +      memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
 +      mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
 +          (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
 +           MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
 +           sas_device.handle_enclosure);
 +
 +      mptsas_add_device_component(ioc, sas_device.channel,
 +          sas_device.id, sas_device.sas_address, sas_device.device_info,
 +          sas_device.slot, enclosure_info.enclosure_logical_id);
 +}
 +
 +/**
 + *    mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *    @channel: fw mapped id's
 + *    @id:
 + *
 + **/
 +static void
 +mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
 +              struct scsi_target *starget)
 +{
 +      CONFIGPARMS                     cfg;
 +      ConfigPageHeader_t              hdr;
 +      dma_addr_t                      dma_handle;
 +      pRaidVolumePage0_t              buffer = NULL;
 +      int                             i;
 +      RaidPhysDiskPage0_t             phys_disk;
 +      struct mptsas_device_info       *sas_info, *next;
 +
 +      memset(&cfg, 0 , sizeof(CONFIGPARMS));
 +      memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
 +      hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
 +      /* assumption that all volumes on channel = 0 */
 +      cfg.pageAddr = starget->id;
 +      cfg.cfghdr.hdr = &hdr;
 +      cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
 +      cfg.timeout = 10;
 +
 +      if (mpt_config(ioc, &cfg) != 0)
 +              goto out;
 +
 +      if (!hdr.PageLength)
 +              goto out;
 +
 +      buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
 +          &dma_handle);
 +
 +      if (!buffer)
 +              goto out;
 +
 +      cfg.physAddr = dma_handle;
 +      cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
 +
 +      if (mpt_config(ioc, &cfg) != 0)
 +              goto out;
 +
 +      if (!buffer->NumPhysDisks)
 +              goto out;
 +
 +      /*
 +       * Adding entry for hidden components
 +       */
 +      for (i = 0; i < buffer->NumPhysDisks; i++) {
 +
 +              if (mpt_raid_phys_disk_pg0(ioc,
 +                  buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
 +                      continue;
 +
 +              mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus,
 +                  phys_disk.PhysDiskID);
 +
 +              mutex_lock(&ioc->sas_device_info_mutex);
 +              list_for_each_entry(sas_info, &ioc->sas_device_info_list,
 +                  list) {
 +                      if (!sas_info->is_logical_volume &&
 +                          (sas_info->fw.channel == phys_disk.PhysDiskBus &&
 +                          sas_info->fw.id == phys_disk.PhysDiskID)) {
 +                              sas_info->is_hidden_raid_component = 1;
 +                              sas_info->volume_id = starget->id;
 +                      }
 +              }
 +              mutex_unlock(&ioc->sas_device_info_mutex);
 +
 +      }
 +
 +      /*
 +       * Delete all matching devices out of the list
 +       */
 +      mutex_lock(&ioc->sas_device_info_mutex);
 +      list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
 +          list) {
 +              if (sas_info->is_logical_volume && sas_info->fw.id ==
 +                  starget->id) {
 +                      list_del(&sas_info->list);
 +                      kfree(sas_info);
 +              }
 +      }
 +
 +      sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
 +      if (sas_info) {
 +              sas_info->fw.id = starget->id;
 +              sas_info->os.id = starget->id;
 +              sas_info->os.channel = starget->channel;
 +              sas_info->is_logical_volume = 1;
 +              INIT_LIST_HEAD(&sas_info->list);
 +              list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
 +      }
 +      mutex_unlock(&ioc->sas_device_info_mutex);
 +
 + out:
 +      if (buffer)
 +              pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
 +                  dma_handle);
 +}
 +
 +/**
 + *    mptsas_add_device_component_starget -
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *    @starget:
 + *
 + **/
 +static void
 +mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
 +      struct scsi_target *starget)
 +{
 +      VirtTarget      *vtarget;
 +      struct sas_rphy *rphy;
 +      struct mptsas_phyinfo   *phy_info = NULL;
 +      struct mptsas_enclosure enclosure_info;
 +
 +      rphy = dev_to_rphy(starget->dev.parent);
 +      vtarget = starget->hostdata;
 +      phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
 +                      rphy->identify.sas_address);
 +      if (!phy_info)
 +              return;
 +
 +      memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
 +      mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
 +              (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
 +              MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
 +              phy_info->attached.handle_enclosure);
 +
 +      mptsas_add_device_component(ioc, phy_info->attached.channel,
 +              phy_info->attached.id, phy_info->attached.sas_address,
 +              phy_info->attached.device_info,
 +              phy_info->attached.slot, enclosure_info.enclosure_logical_id);
 +}
 +
 +/**
 + *    mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *    @channel: os mapped id's
 + *    @id:
 + *
 + **/
 +static void
 +mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id)
 +{
 +      struct mptsas_device_info       *sas_info, *next;
 +
 +      /*
 +       * Set is_cached flag
 +       */
 +      list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
 +              list) {
 +              if (sas_info->os.channel == channel && sas_info->os.id == id)
 +                      sas_info->is_cached = 1;
 +      }
 +}
 +
 +/**
 + *    mptsas_del_device_components - Cleaning the list
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *
 + **/
 +static void
 +mptsas_del_device_components(MPT_ADAPTER *ioc)
 +{
 +      struct mptsas_device_info       *sas_info, *next;
 +
 +      mutex_lock(&ioc->sas_device_info_mutex);
 +      list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
 +              list) {
 +              list_del(&sas_info->list);
 +              kfree(sas_info);
 +      }
 +      mutex_unlock(&ioc->sas_device_info_mutex);
 +}
 +
  
  /*
   * mptsas_setup_wide_ports
@@@ -870,8 -434,8 +870,8 @@@ mptsas_setup_wide_ports(MPT_ADAPTER *io
                 * Forming a port
                 */
                if (!port_details) {
 -                      port_details = kzalloc(sizeof(*port_details),
 -                              GFP_KERNEL);
 +                      port_details = kzalloc(sizeof(struct
 +                              mptsas_portinfo_details), GFP_KERNEL);
                        if (!port_details)
                                goto out;
                        port_details->num_phys = 1;
@@@ -959,62 -523,15 +959,62 @@@ mptsas_find_vtarget(MPT_ADAPTER *ioc, u
        VirtTarget                      *vtarget = NULL;
  
        shost_for_each_device(sdev, ioc->sh) {
 -              if ((vdevice = sdev->hostdata) == NULL)
 +              vdevice = sdev->hostdata;
 +              if ((vdevice == NULL) ||
 +                      (vdevice->vtarget == NULL))
 +                      continue;
 +              if ((vdevice->vtarget->tflags &
 +                  MPT_TARGET_FLAGS_RAID_COMPONENT ||
 +                  vdevice->vtarget->raidVolume))
                        continue;
                if (vdevice->vtarget->id == id &&
 -                  vdevice->vtarget->channel == channel)
 +                      vdevice->vtarget->channel == channel)
                        vtarget = vdevice->vtarget;
        }
        return vtarget;
  }
  
 +static void
 +mptsas_queue_device_delete(MPT_ADAPTER *ioc,
 +      MpiEventDataSasDeviceStatusChange_t *sas_event_data)
 +{
 +      struct fw_event_work *fw_event;
 +      int sz;
 +
 +      sz = offsetof(struct fw_event_work, event_data) +
 +          sizeof(MpiEventDataSasDeviceStatusChange_t);
 +      fw_event = kzalloc(sz, GFP_ATOMIC);
 +      if (!fw_event) {
 +              printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
 +                  ioc->name, __func__, __LINE__);
 +              return;
 +      }
 +      memcpy(fw_event->event_data, sas_event_data,
 +          sizeof(MpiEventDataSasDeviceStatusChange_t));
 +      fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE;
 +      fw_event->ioc = ioc;
 +      mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
 +}
 +
 +static void
 +mptsas_queue_rescan(MPT_ADAPTER *ioc)
 +{
 +      struct fw_event_work *fw_event;
 +      int sz;
 +
 +      sz = offsetof(struct fw_event_work, event_data);
 +      fw_event = kzalloc(sz, GFP_ATOMIC);
 +      if (!fw_event) {
 +              printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
 +                  ioc->name, __func__, __LINE__);
 +              return;
 +      }
 +      fw_event->event = -1;
 +      fw_event->ioc = ioc;
 +      mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
 +}
 +
 +
  /**
   * mptsas_target_reset
   *
@@@ -1033,21 -550,13 +1033,21 @@@ mptsas_target_reset(MPT_ADAPTER *ioc, u
  {
        MPT_FRAME_HDR   *mf;
        SCSITaskMgmt_t  *pScsiTm;
 -
 -      if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
 -              dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n",
 -                  ioc->name,__func__, __LINE__));
 +      if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0)
                return 0;
 +
 +
 +      mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
 +      if (mf == NULL) {
 +              dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
 +                      "%s, no msg frames @%d!!\n", ioc->name,
 +                      __func__, __LINE__));
 +              goto out_fail;
        }
  
 +      dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
 +              ioc->name, mf));
 +
        /* Format the Request
         */
        pScsiTm = (SCSITaskMgmt_t *) mf;
  
        DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
  
 -      mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf);
 +      dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +         "TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n",
 +         ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id));
 +
 +      mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
  
        return 1;
 +
 + out_fail:
 +
 +      mpt_clear_taskmgmt_in_progress_flag(ioc);
 +      return 0;
  }
  
  /**
@@@ -1102,12 -602,11 +1102,12 @@@ mptsas_target_reset_queue(MPT_ADAPTER *
  
        vtarget->deleted = 1; /* block IO */
  
 -      target_reset_list = kzalloc(sizeof(*target_reset_list),
 +      target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
            GFP_ATOMIC);
        if (!target_reset_list) {
 -              dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
 -                  ioc->name,__func__, __LINE__));
 +              dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
 +                      "%s, failed to allocate mem @%d..!!\n",
 +                      ioc->name, __func__, __LINE__));
                return;
        }
  
                sizeof(*sas_event_data));
        list_add_tail(&target_reset_list->list, &hd->target_reset_list);
  
 -      if (hd->resetPending)
 -              return;
 +      target_reset_list->time_count = jiffies;
  
        if (mptsas_target_reset(ioc, channel, id)) {
                target_reset_list->target_reset_issued = 1;
 -              hd->resetPending = 1;
        }
  }
  
  /**
 - * mptsas_dev_reset_complete
 - *
 - * Completion for TARGET_RESET after NOT_RESPONDING_EVENT,
 - * enable work queue to finish off removing device from upper layers.
 - * then send next TARGET_RESET in the queue.
 - *
 - * @ioc
 + *    mptsas_taskmgmt_complete - complete SAS task management function
 + *    @ioc: Pointer to MPT_ADAPTER structure
   *
 + *    Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
 + *    queue to finish off removing device from upper layers. then send next
 + *    TARGET_RESET in the queue.
   **/
 -static void
 -mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
 +static int
 +mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
  {
        MPT_SCSI_HOST   *hd = shost_priv(ioc->sh);
          struct list_head *head = &hd->target_reset_list;
 -      struct mptsas_target_reset_event *target_reset_list;
 -      struct mptsas_hotplug_event *ev;
 -      EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
        u8              id, channel;
 -      __le64          sas_address;
 +      struct mptsas_target_reset_event        *target_reset_list;
 +      SCSITaskMgmtReply_t *pScsiTmReply;
 +
 +      dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: "
 +          "(mf = %p, mr = %p)\n", ioc->name, mf, mr));
 +
 +      pScsiTmReply = (SCSITaskMgmtReply_t *)mr;
 +      if (pScsiTmReply) {
 +              dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +                  "\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n"
 +                  "\ttask_type = 0x%02X, iocstatus = 0x%04X "
 +                  "loginfo = 0x%08X,\n\tresponse_code = 0x%02X, "
 +                  "term_cmnds = %d\n", ioc->name,
 +                  pScsiTmReply->Bus, pScsiTmReply->TargetID,
 +                  pScsiTmReply->TaskType,
 +                  le16_to_cpu(pScsiTmReply->IOCStatus),
 +                  le32_to_cpu(pScsiTmReply->IOCLogInfo),
 +                  pScsiTmReply->ResponseCode,
 +                  le32_to_cpu(pScsiTmReply->TerminationCount)));
 +
 +              if (pScsiTmReply->ResponseCode)
 +                      mptscsih_taskmgmt_response_code(ioc,
 +                      pScsiTmReply->ResponseCode);
 +      }
 +
 +      if (pScsiTmReply && (pScsiTmReply->TaskType ==
 +          MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType ==
 +           MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) {
 +              ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
 +              ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
 +              memcpy(ioc->taskmgmt_cmds.reply, mr,
 +                  min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
 +              if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
 +                      ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
 +                      complete(&ioc->taskmgmt_cmds.done);
 +                      return 1;
 +              }
 +              return 0;
 +      }
 +
 +      mpt_clear_taskmgmt_in_progress_flag(ioc);
  
        if (list_empty(head))
 -              return;
 +              return 1;
  
 -      target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, list);
 +      target_reset_list = list_entry(head->next,
 +          struct mptsas_target_reset_event, list);
  
 -      sas_event_data = &target_reset_list->sas_event_data;
 -      id = sas_event_data->TargetID;
 -      channel = sas_event_data->Bus;
 -      hd->resetPending = 0;
 +      dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +          "TaskMgmt: completed (%d seconds)\n",
 +          ioc->name, jiffies_to_msecs(jiffies -
 +          target_reset_list->time_count)/1000));
 +
 +      id = pScsiTmReply->TargetID;
 +      channel = pScsiTmReply->Bus;
 +      target_reset_list->time_count = jiffies;
  
        /*
         * retry target reset
         */
        if (!target_reset_list->target_reset_issued) {
 -              if (mptsas_target_reset(ioc, channel, id)) {
 +              if (mptsas_target_reset(ioc, channel, id))
                        target_reset_list->target_reset_issued = 1;
 -                      hd->resetPending = 1;
 -              }
 -              return;
 +              return 1;
        }
  
        /*
         * enable work queue to remove device from upper layers
         */
        list_del(&target_reset_list->list);
 +      if ((mptsas_find_vtarget(ioc, channel, id)) && !ioc->fw_events_off)
 +              mptsas_queue_device_delete(ioc,
 +                      &target_reset_list->sas_event_data);
  
 -      ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
 -      if (!ev) {
 -              dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
 -                  ioc->name,__func__, __LINE__));
 -              return;
 -      }
 -
 -      INIT_WORK(&ev->work, mptsas_hotplug_work);
 -      ev->ioc = ioc;
 -      ev->handle = le16_to_cpu(sas_event_data->DevHandle);
 -      ev->parent_handle =
 -          le16_to_cpu(sas_event_data->ParentDevHandle);
 -      ev->channel = channel;
 -      ev->id =id;
 -      ev->phy_id = sas_event_data->PhyNum;
 -      memcpy(&sas_address, &sas_event_data->SASAddress,
 -          sizeof(__le64));
 -      ev->sas_address = le64_to_cpu(sas_address);
 -      ev->device_info = le32_to_cpu(sas_event_data->DeviceInfo);
 -      ev->event_type = MPTSAS_DEL_DEVICE;
 -      schedule_work(&ev->work);
 -      kfree(target_reset_list);
  
        /*
         * issue target reset to next device in the queue
  
        head = &hd->target_reset_list;
        if (list_empty(head))
 -              return;
 +              return 1;
  
        target_reset_list = list_entry(head->next, struct mptsas_target_reset_event,
            list);
  
 -      sas_event_data = &target_reset_list->sas_event_data;
 -      id = sas_event_data->TargetID;
 -      channel = sas_event_data->Bus;
 +      id = target_reset_list->sas_event_data.TargetID;
 +      channel = target_reset_list->sas_event_data.Bus;
 +      target_reset_list->time_count = jiffies;
  
 -      if (mptsas_target_reset(ioc, channel, id)) {
 +      if (mptsas_target_reset(ioc, channel, id))
                target_reset_list->target_reset_issued = 1;
 -              hd->resetPending = 1;
 -      }
 -}
  
 -/**
 - * mptsas_taskmgmt_complete
 - *
 - * @ioc
 - * @mf
 - * @mr
 - *
 - **/
 -static int
 -mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
 -{
 -      mptsas_dev_reset_complete(ioc);
 -      return mptscsih_taskmgmt_complete(ioc, mf, mr);
 +      return 1;
  }
  
  /**
@@@ -1243,59 -740,37 +1243,59 @@@ static in
  mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
  {
        MPT_SCSI_HOST   *hd;
 -      struct mptsas_target_reset_event *target_reset_list, *n;
        int rc;
  
        rc = mptscsih_ioc_reset(ioc, reset_phase);
 +      if ((ioc->bus_type != SAS) || (!rc))
 +              return rc;
  
 -      if (ioc->bus_type != SAS)
 -              goto out;
 -
 -      if (reset_phase != MPT_IOC_POST_RESET)
 -              goto out;
 -
 -      if (!ioc->sh || !ioc->sh->hostdata)
 -              goto out;
        hd = shost_priv(ioc->sh);
        if (!hd->ioc)
                goto out;
  
 -      if (list_empty(&hd->target_reset_list))
 -              goto out;
 -
 -      /* flush the target_reset_list */
 -      list_for_each_entry_safe(target_reset_list, n,
 -          &hd->target_reset_list, list) {
 -              list_del(&target_reset_list->list);
 -              kfree(target_reset_list);
 +      switch (reset_phase) {
 +      case MPT_IOC_SETUP_RESET:
 +              dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +                  "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
 +              mptsas_fw_event_off(ioc);
 +              break;
 +      case MPT_IOC_PRE_RESET:
 +              dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +                  "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
 +              break;
 +      case MPT_IOC_POST_RESET:
 +              dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +                  "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
 +              if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
 +                      ioc->sas_mgmt.status |= MPT_MGMT_STATUS_DID_IOCRESET;
 +                      complete(&ioc->sas_mgmt.done);
 +              }
 +              mptsas_cleanup_fw_event_q(ioc);
 +              mptsas_queue_rescan(ioc);
 +              mptsas_fw_event_on(ioc);
 +              break;
 +      default:
 +              break;
        }
  
   out:
        return rc;
  }
  
 +
 +/**
 + * enum device_state -
 + * @DEVICE_RETRY: need to retry the TUR
 + * @DEVICE_ERROR: TUR return error, don't add device
 + * @DEVICE_READY: device can be added
 + *
 + */
 +enum device_state{
 +      DEVICE_RETRY,
 +      DEVICE_ERROR,
 +      DEVICE_READY,
 +};
 +
  static int
  mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
                u32 form, u32 form_specific)
        return error;
  }
  
 +/**
 + *    mptsas_add_end_device - report a new end device to sas transport layer
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *    @phy_info: decribes attached device
 + *
 + *    return (0) success (1) failure
 + *
 + **/
 +static int
 +mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
 +{
 +      struct sas_rphy *rphy;
 +      struct sas_port *port;
 +      struct sas_identify identify;
 +      char *ds = NULL;
 +      u8 fw_id;
 +
 +      if (!phy_info) {
 +              dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 +                      "%s: exit at line=%d\n", ioc->name,
 +                       __func__, __LINE__));
 +              return 1;
 +      }
 +
 +      fw_id = phy_info->attached.id;
 +
 +      if (mptsas_get_rphy(phy_info)) {
 +              dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 +                      "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                       __func__, fw_id, __LINE__));
 +              return 2;
 +      }
 +
 +      port = mptsas_get_port(phy_info);
 +      if (!port) {
 +              dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 +                      "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                       __func__, fw_id, __LINE__));
 +              return 3;
 +      }
 +
 +      if (phy_info->attached.device_info &
 +          MPI_SAS_DEVICE_INFO_SSP_TARGET)
 +              ds = "ssp";
 +      if (phy_info->attached.device_info &
 +          MPI_SAS_DEVICE_INFO_STP_TARGET)
 +              ds = "stp";
 +      if (phy_info->attached.device_info &
 +          MPI_SAS_DEVICE_INFO_SATA_DEVICE)
 +              ds = "sata";
 +
 +      printk(MYIOC_s_INFO_FMT "attaching %s device: fw_channel %d, fw_id %d,"
 +          " phy %d, sas_addr 0x%llx\n", ioc->name, ds,
 +          phy_info->attached.channel, phy_info->attached.id,
 +          phy_info->attached.phy_id, (unsigned long long)
 +          phy_info->attached.sas_address);
 +
 +      mptsas_parse_device_info(&identify, &phy_info->attached);
 +      rphy = sas_end_device_alloc(port);
 +      if (!rphy) {
 +              dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 +                      "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                       __func__, fw_id, __LINE__));
 +              return 5; /* non-fatal: an rphy can be added later */
 +      }
 +
 +      rphy->identify = identify;
 +      if (sas_rphy_add(rphy)) {
 +              dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 +                      "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                       __func__, fw_id, __LINE__));
 +              sas_rphy_free(rphy);
 +              return 6;
 +      }
 +      mptsas_set_rphy(ioc, phy_info, rphy);
 +      return 0;
 +}
 +
 +/**
 + *    mptsas_del_end_device - report a deleted end device to sas transport layer
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *    @phy_info: decribes attached device
 + *
 + **/
 +static void
 +mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
 +{
 +      struct sas_rphy *rphy;
 +      struct sas_port *port;
 +      struct mptsas_portinfo *port_info;
 +      struct mptsas_phyinfo *phy_info_parent;
 +      int i;
 +      char *ds = NULL;
 +      u8 fw_id;
 +      u64 sas_address;
 +
 +      if (!phy_info)
 +              return;
 +
 +      fw_id = phy_info->attached.id;
 +      sas_address = phy_info->attached.sas_address;
 +
 +      if (!phy_info->port_details) {
 +              dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 +                      "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                       __func__, fw_id, __LINE__));
 +              return;
 +      }
 +      rphy = mptsas_get_rphy(phy_info);
 +      if (!rphy) {
 +              dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 +                      "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                       __func__, fw_id, __LINE__));
 +              return;
 +      }
 +
 +      if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_INITIATOR
 +              || phy_info->attached.device_info
 +                      & MPI_SAS_DEVICE_INFO_SMP_INITIATOR
 +              || phy_info->attached.device_info
 +                      & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
 +              ds = "initiator";
 +      if (phy_info->attached.device_info &
 +          MPI_SAS_DEVICE_INFO_SSP_TARGET)
 +              ds = "ssp";
 +      if (phy_info->attached.device_info &
 +          MPI_SAS_DEVICE_INFO_STP_TARGET)
 +              ds = "stp";
 +      if (phy_info->attached.device_info &
 +          MPI_SAS_DEVICE_INFO_SATA_DEVICE)
 +              ds = "sata";
 +
 +      dev_printk(KERN_DEBUG, &rphy->dev, MYIOC_s_FMT
 +          "removing %s device: fw_channel %d, fw_id %d, phy %d,"
 +          "sas_addr 0x%llx\n", ioc->name, ds, phy_info->attached.channel,
 +          phy_info->attached.id, phy_info->attached.phy_id,
 +          (unsigned long long) sas_address);
 +
 +      port = mptsas_get_port(phy_info);
 +      if (!port) {
 +              dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 +                      "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                       __func__, fw_id, __LINE__));
 +              return;
 +      }
 +      port_info = phy_info->portinfo;
 +      phy_info_parent = port_info->phy_info;
 +      for (i = 0; i < port_info->num_phys; i++, phy_info_parent++) {
 +              if (!phy_info_parent->phy)
 +                      continue;
 +              if (phy_info_parent->attached.sas_address !=
 +                  sas_address)
 +                      continue;
 +              dev_printk(KERN_DEBUG, &phy_info_parent->phy->dev,
 +                  MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n",
 +                  ioc->name, phy_info_parent->phy_id,
 +                  phy_info_parent->phy);
 +              sas_port_delete_phy(port, phy_info_parent->phy);
 +      }
 +
 +      dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
 +          "delete port %d, sas_addr (0x%llx)\n", ioc->name,
 +           port->port_identifier, (unsigned long long)sas_address);
 +      sas_port_delete(port);
 +      mptsas_set_port(ioc, phy_info, NULL);
 +      mptsas_port_delete(ioc, phy_info->port_details);
 +}
 +
 +struct mptsas_phyinfo *
 +mptsas_refreshing_device_handles(MPT_ADAPTER *ioc,
 +      struct mptsas_devinfo *sas_device)
 +{
 +      struct mptsas_phyinfo *phy_info;
 +      struct mptsas_portinfo *port_info;
 +      int i;
 +
 +      phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
 +          sas_device->sas_address);
 +      if (!phy_info)
 +              goto out;
 +      port_info = phy_info->portinfo;
 +      if (!port_info)
 +              goto out;
 +      mutex_lock(&ioc->sas_topology_mutex);
 +      for (i = 0; i < port_info->num_phys; i++) {
 +              if (port_info->phy_info[i].attached.sas_address !=
 +                      sas_device->sas_address)
 +                      continue;
 +              port_info->phy_info[i].attached.channel = sas_device->channel;
 +              port_info->phy_info[i].attached.id = sas_device->id;
 +              port_info->phy_info[i].attached.sas_address =
 +                  sas_device->sas_address;
 +              port_info->phy_info[i].attached.handle = sas_device->handle;
 +              port_info->phy_info[i].attached.handle_parent =
 +                  sas_device->handle_parent;
 +              port_info->phy_info[i].attached.handle_enclosure =
 +                  sas_device->handle_enclosure;
 +      }
 +      mutex_unlock(&ioc->sas_topology_mutex);
 + out:
 +      return phy_info;
 +}
 +
 +/**
 + * mptsas_firmware_event_work - work thread for processing fw events
 + * @work: work queue payload containing info describing the event
 + * Context: user
 + *
 + */
 +static void
 +mptsas_firmware_event_work(struct work_struct *work)
 +{
 +      struct fw_event_work *fw_event =
 +              container_of(work, struct fw_event_work, work.work);
 +      MPT_ADAPTER *ioc = fw_event->ioc;
 +
 +      /* special rescan topology handling */
 +      if (fw_event->event == -1) {
 +              if (ioc->in_rescan) {
 +                      devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +                              "%s: rescan ignored as it is in progress\n",
 +                              ioc->name, __func__));
 +                      return;
 +              }
 +              devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rescan after "
 +                  "reset\n", ioc->name, __func__));
 +              ioc->in_rescan = 1;
 +              mptsas_not_responding_devices(ioc);
 +              mptsas_scan_sas_topology(ioc);
 +              ioc->in_rescan = 0;
 +              mptsas_free_fw_event(ioc, fw_event);
 +              return;
 +      }
 +
 +      /* events handling turned off during host reset */
 +      if (ioc->fw_events_off) {
 +              mptsas_free_fw_event(ioc, fw_event);
 +              return;
 +      }
 +
 +      devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_event=(0x%p), "
 +          "event = (0x%02x)\n", ioc->name, __func__, fw_event,
 +          (fw_event->event & 0xFF)));
 +
 +      switch (fw_event->event) {
 +      case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
 +              mptsas_send_sas_event(fw_event);
 +              break;
 +      case MPI_EVENT_INTEGRATED_RAID:
 +              mptsas_send_raid_event(fw_event);
 +              break;
 +      case MPI_EVENT_IR2:
 +              mptsas_send_ir2_event(fw_event);
 +              break;
 +      case MPI_EVENT_PERSISTENT_TABLE_FULL:
 +              mptbase_sas_persist_operation(ioc,
 +                  MPI_SAS_OP_CLEAR_NOT_PRESENT);
 +              mptsas_free_fw_event(ioc, fw_event);
 +              break;
 +      case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
 +              mptsas_broadcast_primative_work(fw_event);
 +              break;
 +      case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
 +              mptsas_send_expander_event(fw_event);
 +              break;
 +      case MPI_EVENT_SAS_PHY_LINK_STATUS:
 +              mptsas_send_link_status_event(fw_event);
 +              break;
 +      case MPI_EVENT_QUEUE_FULL:
 +              mptsas_handle_queue_full_event(fw_event);
 +              break;
 +      }
 +}
 +
 +
 +
  static int
  mptsas_slave_configure(struct scsi_device *sdev)
  {
 +      struct Scsi_Host        *host = sdev->host;
 +      MPT_SCSI_HOST   *hd = shost_priv(host);
 +      MPT_ADAPTER     *ioc = hd->ioc;
 +      VirtDevice      *vdevice = sdev->hostdata;
  
 -      if (sdev->channel == MPTSAS_RAID_CHANNEL)
 +      if (vdevice->vtarget->deleted) {
 +              sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n");
 +              vdevice->vtarget->deleted = 0;
 +      }
 +
 +      /*
 +       * RAID volumes placed beyond the last expected port.
 +       * Ignore sending sas mode pages in that case..
 +       */
 +      if (sdev->channel == MPTSAS_RAID_CHANNEL) {
 +              mptsas_add_device_component_starget_ir(ioc, scsi_target(sdev));
                goto out;
 +      }
  
        sas_read_port_mode_page(sdev);
  
 +      mptsas_add_device_component_starget(ioc, scsi_target(sdev));
 +
   out:
        return mptscsih_slave_configure(sdev);
  }
@@@ -1693,18 -875,9 +1693,18 @@@ mptsas_target_alloc(struct scsi_target 
         * RAID volumes placed beyond the last expected port.
         */
        if (starget->channel == MPTSAS_RAID_CHANNEL) {
 -              for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
 -                      if (id == ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID)
 -                              channel = ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus;
 +              if (!ioc->raid_data.pIocPg2) {
 +                      kfree(vtarget);
 +                      return -ENXIO;
 +              }
 +              for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
 +                      if (id == ioc->raid_data.pIocPg2->
 +                                      RaidVolume[i].VolumeID) {
 +                              channel = ioc->raid_data.pIocPg2->
 +                                      RaidVolume[i].VolumeBus;
 +                      }
 +              }
 +              vtarget->raidVolume = 1;
                goto out;
        }
  
@@@ -1753,18 -926,11 +1753,18 @@@ mptsas_target_destroy(struct scsi_targe
        struct sas_rphy         *rphy;
        struct mptsas_portinfo  *p;
        int                      i;
 -      MPT_ADAPTER *ioc = hd->ioc;
 +      MPT_ADAPTER     *ioc = hd->ioc;
 +      VirtTarget      *vtarget;
  
        if (!starget->hostdata)
                return;
  
 +      vtarget = starget->hostdata;
 +
 +      mptsas_del_device_component_by_os(ioc, starget->channel,
 +          starget->id);
 +
 +
        if (starget->channel == MPTSAS_RAID_CHANNEL)
                goto out;
  
                        if (p->phy_info[i].attached.sas_address !=
                                        rphy->identify.sas_address)
                                continue;
 +
 +                      starget_printk(KERN_INFO, starget, MYIOC_s_FMT
 +                      "delete device: fw_channel %d, fw_id %d, phy %d, "
 +                      "sas_addr 0x%llx\n", ioc->name,
 +                      p->phy_info[i].attached.channel,
 +                      p->phy_info[i].attached.id,
 +                      p->phy_info[i].attached.phy_id, (unsigned long long)
 +                      p->phy_info[i].attached.sas_address);
 +
                        mptsas_set_starget(&p->phy_info[i], NULL);
 -                      goto out;
                }
        }
  
   out:
 +      vtarget->starget = NULL;
        kfree(starget->hostdata);
        starget->hostdata = NULL;
  }
@@@ -1851,8 -1008,6 +1851,8 @@@ mptsas_slave_alloc(struct scsi_device *
  static int
  mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
  {
 +      MPT_SCSI_HOST   *hd;
 +      MPT_ADAPTER     *ioc;
        VirtDevice      *vdevice = SCpnt->device->hostdata;
  
        if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
                return 0;
        }
  
 +      hd = shost_priv(SCpnt->device->host);
 +      ioc = hd->ioc;
 +
 +      if (ioc->sas_discovery_quiesce_io)
 +              return SCSI_MLQUEUE_HOST_BUSY;
 +
  //    scsi_print_command(SCpnt);
  
        return mptscsih_qcmd(SCpnt,done);
@@@ -1965,19 -1114,14 +1965,19 @@@ static int mptsas_get_linkerrors(struc
  static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
                MPT_FRAME_HDR *reply)
  {
 -      ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_COMMAND_GOOD;
 +      ioc->sas_mgmt.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
        if (reply != NULL) {
 -              ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_RF_VALID;
 +              ioc->sas_mgmt.status |= MPT_MGMT_STATUS_RF_VALID;
                memcpy(ioc->sas_mgmt.reply, reply,
                    min(ioc->reply_sz, 4 * reply->u.reply.MsgLength));
        }
 -      complete(&ioc->sas_mgmt.done);
 -      return 1;
 +
 +      if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
 +              ioc->sas_mgmt.status &= ~MPT_MGMT_STATUS_PENDING;
 +              complete(&ioc->sas_mgmt.done);
 +              return 1;
 +      }
 +      return 0;
  }
  
  static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
                MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET;
        req->PhyNum = phy->identify.phy_identifier;
  
 +      INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
        mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
  
        timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
  
        /* a reply frame is expected */
        if ((ioc->sas_mgmt.status &
 -          MPT_IOCTL_STATUS_RF_VALID) == 0) {
 +          MPT_MGMT_STATUS_RF_VALID) == 0) {
                error = -ENXIO;
                goto out_unlock;
        }
        error = 0;
  
   out_unlock:
 +      CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
        mutex_unlock(&ioc->sas_mgmt.mutex);
   out:
        return error;
@@@ -2135,8 -1277,8 +2135,8 @@@ static int mptsas_smp_handler(struct Sc
        /* do we need to support multiple segments? */
        if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
                printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
-                   ioc->name, __func__, req->bio->bi_vcnt, req->data_len,
-                   rsp->bio->bi_vcnt, rsp->data_len);
+                   ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
+                   rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
                return -EINVAL;
        }
  
        smpreq = (SmpPassthroughRequest_t *)mf;
        memset(smpreq, 0, sizeof(*smpreq));
  
-       smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4);
+       smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
        smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
  
        if (rphy)
                struct mptsas_portinfo *port_info;
  
                mutex_lock(&ioc->sas_topology_mutex);
 -              port_info = mptsas_get_hba_portinfo(ioc);
 +              port_info = ioc->hba_port_info;
                if (port_info && port_info->phy_info)
                        sas_address =
                                port_info->phy_info[0].phy->identify.sas_address;
        /* request */
        flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
                       MPI_SGE_FLAGS_END_OF_BUFFER |
 -                     MPI_SGE_FLAGS_DIRECTION |
 -                     mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT;
 +                     MPI_SGE_FLAGS_DIRECTION)
 +                     << MPI_SGE_FLAGS_SHIFT;
-       flagsLength |= (req->data_len - 4);
+       flagsLength |= (blk_rq_bytes(req) - 4);
  
        dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
-                                     req->data_len, PCI_DMA_BIDIRECTIONAL);
+                                     blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
        if (!dma_addr_out)
                goto put_mf;
 -      mpt_add_sge(psge, flagsLength, dma_addr_out);
 -      psge += (sizeof(u32) + sizeof(dma_addr_t));
 +      ioc->add_sge(psge, flagsLength, dma_addr_out);
 +      psge += ioc->SGE_size;
  
        /* response */
 -      flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
 +      flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
 +              MPI_SGE_FLAGS_SYSTEM_ADDRESS |
 +              MPI_SGE_FLAGS_IOC_TO_HOST |
 +              MPI_SGE_FLAGS_END_OF_BUFFER;
 +
 +      flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
-       flagsLength |= rsp->data_len + 4;
+       flagsLength |= blk_rq_bytes(rsp) + 4;
        dma_addr_in =  pci_map_single(ioc->pcidev, bio_data(rsp->bio),
-                                     rsp->data_len, PCI_DMA_BIDIRECTIONAL);
+                                     blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
        if (!dma_addr_in)
                goto unmap;
 -      mpt_add_sge(psge, flagsLength, dma_addr_in);
 +      ioc->add_sge(psge, flagsLength, dma_addr_in);
  
 +      INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
        mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
  
        timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
        }
        mf = NULL;
  
 -      if (ioc->sas_mgmt.status & MPT_IOCTL_STATUS_RF_VALID) {
 +      if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
                SmpPassthroughReply_t *smprep;
  
                smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
                memcpy(req->sense, smprep, sizeof(*smprep));
                req->sense_len = sizeof(*smprep);
-               req->data_len = 0;
-               rsp->data_len -= smprep->ResponseDataLength;
+               req->resid_len = 0;
+               rsp->resid_len -= smprep->ResponseDataLength;
        } else {
 -              printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
 +              printk(MYIOC_s_ERR_FMT
 +                  "%s: smp passthru reply failed to be returned\n",
                    ioc->name, __func__);
                ret = -ENXIO;
        }
  unmap:
        if (dma_addr_out)
-               pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len,
+               pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
                                 PCI_DMA_BIDIRECTIONAL);
        if (dma_addr_in)
-               pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len,
+               pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
                                 PCI_DMA_BIDIRECTIONAL);
  put_mf:
        if (mf)
                mpt_free_msg_frame(ioc, mf);
  out_unlock:
 +      CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
        mutex_unlock(&ioc->sas_mgmt.mutex);
  out:
        return ret;
@@@ -2304,7 -1438,7 +2304,7 @@@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc
  
        port_info->num_phys = buffer->NumPhys;
        port_info->phy_info = kcalloc(port_info->num_phys,
 -              sizeof(*port_info->phy_info),GFP_KERNEL);
 +              sizeof(struct mptsas_phyinfo), GFP_KERNEL);
        if (!port_info->phy_info) {
                error = -ENOMEM;
                goto out_free_consistent;
@@@ -2466,6 -1600,10 +2466,6 @@@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc
        __le64 sas_address;
        int error=0;
  
 -      if (ioc->sas_discovery_runtime &&
 -              mptsas_is_end_device(device_info))
 -                      goto out;
 -
        hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
        hdr.ExtPageLength = 0;
        hdr.PageNumber = 0;
  
        mptsas_print_device_pg0(ioc, buffer);
  
 +      memset(device_info, 0, sizeof(struct mptsas_devinfo));
        device_info->handle = le16_to_cpu(buffer->DevHandle);
        device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle);
        device_info->handle_enclosure =
@@@ -2538,9 -1675,7 +2538,9 @@@ mptsas_sas_expander_pg0(MPT_ADAPTER *io
        SasExpanderPage0_t *buffer;
        dma_addr_t dma_handle;
        int i, error;
 +      __le64 sas_address;
  
 +      memset(port_info, 0, sizeof(struct mptsas_portinfo));
        hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
        hdr.ExtPageLength = 0;
        hdr.PageNumber = 0;
        }
  
        /* save config data */
 -      port_info->num_phys = buffer->NumPhys;
 +      port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
        port_info->phy_info = kcalloc(port_info->num_phys,
 -              sizeof(*port_info->phy_info),GFP_KERNEL);
 +              sizeof(struct mptsas_phyinfo), GFP_KERNEL);
        if (!port_info->phy_info) {
                error = -ENOMEM;
                goto out_free_consistent;
        }
  
 +      memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
        for (i = 0; i < port_info->num_phys; i++) {
                port_info->phy_info[i].portinfo = port_info;
                port_info->phy_info[i].handle =
                    le16_to_cpu(buffer->DevHandle);
 +              port_info->phy_info[i].identify.sas_address =
 +                  le64_to_cpu(sas_address);
 +              port_info->phy_info[i].identify.handle_parent =
 +                  le16_to_cpu(buffer->ParentDevHandle);
        }
  
   out_free_consistent:
@@@ -2622,7 -1752,11 +2622,7 @@@ mptsas_sas_expander_pg1(MPT_ADAPTER *io
        dma_addr_t dma_handle;
        int error=0;
  
 -      if (ioc->sas_discovery_runtime &&
 -              mptsas_is_end_device(&phy_info->attached))
 -                      goto out;
 -
 -      hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
 +      hdr.PageVersion = MPI_SASEXPANDER1_PAGEVERSION;
        hdr.ExtPageLength = 0;
        hdr.PageNumber = 1;
        hdr.Reserved1 = 0;
        cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
  
        error = mpt_config(ioc, &cfg);
 +
 +      if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
 +              error = -ENODEV;
 +              goto out;
 +      }
 +
        if (error)
                goto out_free_consistent;
  
@@@ -2882,21 -2010,16 +2882,21 @@@ static int mptsas_probe_one_phy(struct 
                                goto out;
                        }
                        mptsas_set_port(ioc, phy_info, port);
 -                      dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 -                          "sas_port_alloc: port=%p dev=%p port_id=%d\n",
 -                          ioc->name, port, dev, port->port_identifier));
 +                      devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev,
 +                          MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n",
 +                          ioc->name, port->port_identifier,
 +                          (unsigned long long)phy_info->
 +                          attached.sas_address));
                }
 -              dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_port_add_phy: phy_id=%d\n",
 -                  ioc->name, phy_info->phy_id));
 +              dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +                      "sas_port_add_phy: phy_id=%d\n",
 +                      ioc->name, phy_info->phy_id));
                sas_port_add_phy(port, phy_info->phy);
                phy_info->sas_port_add_phy = 0;
 +              devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev,
 +                  MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name,
 +                   phy_info->phy_id, phy_info->phy));
        }
 -
        if (!mptsas_get_rphy(phy_info) && port && !port->rphy) {
  
                struct sas_rphy *rphy;
                 * the adding/removing of devices that occur
                 * after start of day.
                 */
 -              if (ioc->sas_discovery_runtime &&
 -                      mptsas_is_end_device(&phy_info->attached))
 -                              goto out;
 +              if (mptsas_is_end_device(&phy_info->attached) &&
 +                  phy_info->attached.handle_parent) {
 +                      goto out;
 +              }
  
                mptsas_parse_device_info(&identify, &phy_info->attached);
                if (scsi_is_host_device(parent)) {
                        struct mptsas_portinfo *port_info;
                        int i;
  
 -                      mutex_lock(&ioc->sas_topology_mutex);
 -                      port_info = mptsas_get_hba_portinfo(ioc);
 -                      mutex_unlock(&ioc->sas_topology_mutex);
 +                      port_info = ioc->hba_port_info;
  
                        for (i = 0; i < port_info->num_phys; i++)
                                if (port_info->phy_info[i].identify.sas_address ==
@@@ -2978,7 -2102,7 +2978,7 @@@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc
        struct mptsas_portinfo *port_info, *hba;
        int error = -ENOMEM, i;
  
 -      hba = kzalloc(sizeof(*port_info), GFP_KERNEL);
 +      hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
        if (! hba)
                goto out;
  
  
        mptsas_sas_io_unit_pg1(ioc);
        mutex_lock(&ioc->sas_topology_mutex);
 -      port_info = mptsas_get_hba_portinfo(ioc);
 +      port_info = ioc->hba_port_info;
        if (!port_info) {
 -              port_info = hba;
 +              ioc->hba_port_info = port_info = hba;
 +              ioc->hba_port_num_phy = port_info->num_phys;
                list_add_tail(&port_info->list, &ioc->sas_topology);
        } else {
                for (i = 0; i < hba->num_phys; i++) {
                hba = NULL;
        }
        mutex_unlock(&ioc->sas_topology_mutex);
 +#if defined(CPQ_CIM)
 +      ioc->num_ports = port_info->num_phys;
 +#endif
        for (i = 0; i < port_info->num_phys; i++) {
                mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
                        (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
                         MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
 -
 +              port_info->phy_info[i].identify.handle =
 +                  port_info->phy_info[i].handle;
                mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
                        (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
                         MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 -                       port_info->phy_info[i].handle);
 +                       port_info->phy_info[i].identify.handle);
 +              if (!ioc->hba_port_sas_addr)
 +                      ioc->hba_port_sas_addr =
 +                          port_info->phy_info[i].identify.sas_address;
                port_info->phy_info[i].identify.phy_id =
                    port_info->phy_info[i].phy_id = i;
                if (port_info->phy_info[i].attached.handle)
        return error;
  }
  
 -static int
 -mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle)
 +static void
 +mptsas_expander_refresh(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
  {
 -      struct mptsas_portinfo *port_info, *p, *ex;
 -      struct device *parent;
 -      struct sas_rphy *rphy;
 -      int error = -ENOMEM, i, j;
 -
 -      ex = kzalloc(sizeof(*port_info), GFP_KERNEL);
 -      if (!ex)
 -              goto out;
 -
 -      error = mptsas_sas_expander_pg0(ioc, ex,
 -          (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
 -           MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle);
 -      if (error)
 -              goto out_free_port_info;
 -
 -      *handle = ex->phy_info[0].handle;
 -
 -      mutex_lock(&ioc->sas_topology_mutex);
 -      port_info = mptsas_find_portinfo_by_handle(ioc, *handle);
 -      if (!port_info) {
 -              port_info = ex;
 -              list_add_tail(&port_info->list, &ioc->sas_topology);
 -      } else {
 -              for (i = 0; i < ex->num_phys; i++) {
 -                      port_info->phy_info[i].handle =
 -                              ex->phy_info[i].handle;
 -                      port_info->phy_info[i].port_id =
 -                              ex->phy_info[i].port_id;
 -              }
 -              kfree(ex->phy_info);
 -              kfree(ex);
 -              ex = NULL;
 -      }
 -      mutex_unlock(&ioc->sas_topology_mutex);
 -
 +      struct mptsas_portinfo *parent;
 +      struct device *parent_dev;
 +      struct sas_rphy *rphy;
 +      int             i;
 +      u64             sas_address; /* expander sas address */
 +      u32             handle;
 +
 +      handle = port_info->phy_info[0].handle;
 +      sas_address = port_info->phy_info[0].identify.sas_address;
        for (i = 0; i < port_info->num_phys; i++) {
                mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
 -                      (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
 -                       MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle);
 -
 -              if (port_info->phy_info[i].identify.handle) {
 -                      mptsas_sas_device_pg0(ioc,
 -                              &port_info->phy_info[i].identify,
 -                              (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
 -                               MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 -                              port_info->phy_info[i].identify.handle);
 -                      port_info->phy_info[i].identify.phy_id =
 -                          port_info->phy_info[i].phy_id;
 -              }
 +                  (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
 +                  MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + handle);
 +
 +              mptsas_sas_device_pg0(ioc,
 +                  &port_info->phy_info[i].identify,
 +                  (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
 +                  MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 +                  port_info->phy_info[i].identify.handle);
 +              port_info->phy_info[i].identify.phy_id =
 +                  port_info->phy_info[i].phy_id;
  
                if (port_info->phy_info[i].attached.handle) {
                        mptsas_sas_device_pg0(ioc,
 -                              &port_info->phy_info[i].attached,
 -                              (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
 -                               MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 -                              port_info->phy_info[i].attached.handle);
 +                          &port_info->phy_info[i].attached,
 +                          (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
 +                           MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 +                          port_info->phy_info[i].attached.handle);
                        port_info->phy_info[i].attached.phy_id =
                            port_info->phy_info[i].phy_id;
                }
        }
  
 -      parent = &ioc->sh->shost_gendev;
 -      for (i = 0; i < port_info->num_phys; i++) {
 -              mutex_lock(&ioc->sas_topology_mutex);
 -              list_for_each_entry(p, &ioc->sas_topology, list) {
 -                      for (j = 0; j < p->num_phys; j++) {
 -                              if (port_info->phy_info[i].identify.handle !=
 -                                              p->phy_info[j].attached.handle)
 -                                      continue;
 -                              rphy = mptsas_get_rphy(&p->phy_info[j]);
 -                              parent = &rphy->dev;
 -                      }
 -              }
 +      mutex_lock(&ioc->sas_topology_mutex);
 +      parent = mptsas_find_portinfo_by_handle(ioc,
 +          port_info->phy_info[0].identify.handle_parent);
 +      if (!parent) {
                mutex_unlock(&ioc->sas_topology_mutex);
 +              return;
 +      }
 +      for (i = 0, parent_dev = NULL; i < parent->num_phys && !parent_dev;
 +          i++) {
 +              if (parent->phy_info[i].attached.sas_address == sas_address) {
 +                      rphy = mptsas_get_rphy(&parent->phy_info[i]);
 +                      parent_dev = &rphy->dev;
 +              }
        }
 +      mutex_unlock(&ioc->sas_topology_mutex);
  
        mptsas_setup_wide_ports(ioc, port_info);
 -
        for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
 -              mptsas_probe_one_phy(parent, &port_info->phy_info[i],
 +              mptsas_probe_one_phy(parent_dev, &port_info->phy_info[i],
                    ioc->sas_index, 0);
 -
 -      return 0;
 -
 - out_free_port_info:
 -      if (ex) {
 -              kfree(ex->phy_info);
 -              kfree(ex);
 -      }
 - out:
 -      return error;
  }
  
 -/*
 - * mptsas_delete_expander_phys
 - *
 - *
 - * This will traverse topology, and remove expanders
 - * that are no longer present
 - */
  static void
 -mptsas_delete_expander_phys(MPT_ADAPTER *ioc)
 +mptsas_expander_event_add(MPT_ADAPTER *ioc,
 +    MpiEventDataSasExpanderStatusChange_t *expander_data)
  {
 -      struct mptsas_portinfo buffer;
 -      struct mptsas_portinfo *port_info, *n, *parent;
 -      struct mptsas_phyinfo *phy_info;
 -      struct sas_port * port;
 +      struct mptsas_portinfo *port_info;
        int i;
 -      u64     expander_sas_address;
 +      __le64 sas_address;
 +
 +      port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
 +      if (!port_info)
 +              BUG();
 +      port_info->num_phys = (expander_data->NumPhys) ?
 +          expander_data->NumPhys : 1;
 +      port_info->phy_info = kcalloc(port_info->num_phys,
 +          sizeof(struct mptsas_phyinfo), GFP_KERNEL);
 +      if (!port_info->phy_info)
 +              BUG();
 +      memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
 +      for (i = 0; i < port_info->num_phys; i++) {
 +              port_info->phy_info[i].portinfo = port_info;
 +              port_info->phy_info[i].handle =
 +                  le16_to_cpu(expander_data->DevHandle);
 +              port_info->phy_info[i].identify.sas_address =
 +                  le64_to_cpu(sas_address);
 +              port_info->phy_info[i].identify.handle_parent =
 +                  le16_to_cpu(expander_data->ParentDevHandle);
 +      }
  
        mutex_lock(&ioc->sas_topology_mutex);
 -      list_for_each_entry_safe(port_info, n, &ioc->sas_topology, list) {
 +      list_add_tail(&port_info->list, &ioc->sas_topology);
 +      mutex_unlock(&ioc->sas_topology_mutex);
  
 -              if (!(port_info->phy_info[0].identify.device_info &
 -                  MPI_SAS_DEVICE_INFO_SMP_TARGET))
 -                      continue;
 +      printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
 +          "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
 +          (unsigned long long)sas_address);
  
 -              if (mptsas_sas_expander_pg0(ioc, &buffer,
 -                   (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
 -                   MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
 -                   port_info->phy_info[0].handle)) {
 +      mptsas_expander_refresh(ioc, port_info);
 +}
  
 -                      /*
 -                       * Obtain the port_info instance to the parent port
 -                       */
 -                      parent = mptsas_find_portinfo_by_handle(ioc,
 -                          port_info->phy_info[0].identify.handle_parent);
 -
 -                      if (!parent)
 -                              goto next_port;
 +/**
 + * mptsas_delete_expander_siblings - remove siblings attached to expander
 + * @ioc: Pointer to MPT_ADAPTER structure
 + * @parent: the parent port_info object
 + * @expander: the expander port_info object
 + **/
 +static void
 +mptsas_delete_expander_siblings(MPT_ADAPTER *ioc, struct mptsas_portinfo
 +    *parent, struct mptsas_portinfo *expander)
 +{
 +      struct mptsas_phyinfo *phy_info;
 +      struct mptsas_portinfo *port_info;
 +      struct sas_rphy *rphy;
 +      int i;
  
 -                      expander_sas_address =
 -                              port_info->phy_info[0].identify.sas_address;
 +      phy_info = expander->phy_info;
 +      for (i = 0; i < expander->num_phys; i++, phy_info++) {
 +              rphy = mptsas_get_rphy(phy_info);
 +              if (!rphy)
 +                      continue;
 +              if (rphy->identify.device_type == SAS_END_DEVICE)
 +                      mptsas_del_end_device(ioc, phy_info);
 +      }
  
 +      phy_info = expander->phy_info;
 +      for (i = 0; i < expander->num_phys; i++, phy_info++) {
 +              rphy = mptsas_get_rphy(phy_info);
 +              if (!rphy)
 +                      continue;
 +              if (rphy->identify.device_type ==
 +                  MPI_SAS_DEVICE_INFO_EDGE_EXPANDER ||
 +                  rphy->identify.device_type ==
 +                  MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
 +                      port_info = mptsas_find_portinfo_by_sas_address(ioc,
 +                          rphy->identify.sas_address);
 +                      if (!port_info)
 +                              continue;
 +                      if (port_info == parent) /* backlink rphy */
 +                              continue;
                        /*
 -                       * Delete rphys in the parent that point
 -                       * to this expander.  The transport layer will
 -                       * cleanup all the children.
 -                       */
 -                      phy_info = parent->phy_info;
 -                      for (i = 0; i < parent->num_phys; i++, phy_info++) {
 -                              port = mptsas_get_port(phy_info);
 -                              if (!port)
 -                                      continue;
 -                              if (phy_info->attached.sas_address !=
 -                                      expander_sas_address)
 -                                      continue;
 -                              dsaswideprintk(ioc,
 -                                  dev_printk(KERN_DEBUG, &port->dev,
 -                                  MYIOC_s_FMT "delete port (%d)\n", ioc->name,
 -                                  port->port_identifier));
 -                              sas_port_delete(port);
 -                              mptsas_port_delete(ioc, phy_info->port_details);
 -                      }
 - next_port:
 +                      Delete this expander even if the expdevpage is exists
 +                      because the parent expander is already deleted
 +                      */
 +                      mptsas_expander_delete(ioc, port_info, 1);
 +              }
 +      }
 +}
  
 -                      phy_info = port_info->phy_info;
 -                      for (i = 0; i < port_info->num_phys; i++, phy_info++)
 -                              mptsas_port_delete(ioc, phy_info->port_details);
  
 -                      list_del(&port_info->list);
 -                      kfree(port_info->phy_info);
 -                      kfree(port_info);
 -              }
 -              /*
 -              * Free this memory allocated from inside
 -              * mptsas_sas_expander_pg0
 -              */
 +/**
 + *    mptsas_expander_delete - remove this expander
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *    @port_info: expander port_info struct
 + *    @force: Flag to forcefully delete the expander
 + *
 + **/
 +
 +static void mptsas_expander_delete(MPT_ADAPTER *ioc,
 +              struct mptsas_portinfo *port_info, u8 force)
 +{
 +
 +      struct mptsas_portinfo *parent;
 +      int             i;
 +      u64             expander_sas_address;
 +      struct mptsas_phyinfo *phy_info;
 +      struct mptsas_portinfo buffer;
 +      struct mptsas_portinfo_details *port_details;
 +      struct sas_port *port;
 +
 +      if (!port_info)
 +              return;
 +
 +      /* see if expander is still there before deleting */
 +      mptsas_sas_expander_pg0(ioc, &buffer,
 +          (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
 +          MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
 +          port_info->phy_info[0].identify.handle);
 +
 +      if (buffer.num_phys) {
                kfree(buffer.phy_info);
 +              if (!force)
 +                      return;
        }
 -      mutex_unlock(&ioc->sas_topology_mutex);
 +
 +
 +      /*
 +       * Obtain the port_info instance to the parent port
 +       */
 +      port_details = NULL;
 +      expander_sas_address =
 +          port_info->phy_info[0].identify.sas_address;
 +      parent = mptsas_find_portinfo_by_handle(ioc,
 +          port_info->phy_info[0].identify.handle_parent);
 +      mptsas_delete_expander_siblings(ioc, parent, port_info);
 +      if (!parent)
 +              goto out;
 +
 +      /*
 +       * Delete rphys in the parent that point
 +       * to this expander.
 +       */
 +      phy_info = parent->phy_info;
 +      port = NULL;
 +      for (i = 0; i < parent->num_phys; i++, phy_info++) {
 +              if (!phy_info->phy)
 +                      continue;
 +              if (phy_info->attached.sas_address !=
 +                  expander_sas_address)
 +                      continue;
 +              if (!port) {
 +                      port = mptsas_get_port(phy_info);
 +                      port_details = phy_info->port_details;
 +              }
 +              dev_printk(KERN_DEBUG, &phy_info->phy->dev,
 +                  MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", ioc->name,
 +                  phy_info->phy_id, phy_info->phy);
 +              sas_port_delete_phy(port, phy_info->phy);
 +      }
 +      if (port) {
 +              dev_printk(KERN_DEBUG, &port->dev,
 +                  MYIOC_s_FMT "delete port %d, sas_addr (0x%llx)\n",
 +                  ioc->name, port->port_identifier,
 +                  (unsigned long long)expander_sas_address);
 +              sas_port_delete(port);
 +              mptsas_port_delete(ioc, port_details);
 +      }
 + out:
 +
 +      printk(MYIOC_s_INFO_FMT "delete expander: num_phys %d, "
 +          "sas_addr (0x%llx)\n",  ioc->name, port_info->num_phys,
 +          (unsigned long long)expander_sas_address);
 +
 +      /*
 +       * free link
 +       */
 +      list_del(&port_info->list);
 +      kfree(port_info->phy_info);
 +      kfree(port_info);
  }
  
 -/*
 - * Start of day discovery
 +
 +/**
 + * mptsas_send_expander_event - expanders events
 + * @ioc: Pointer to MPT_ADAPTER structure
 + * @expander_data: event data
 + *
 + *
 + * This function handles adding, removing, and refreshing
 + * device handles within the expander objects.
   */
  static void
 +mptsas_send_expander_event(struct fw_event_work *fw_event)
 +{
 +      MPT_ADAPTER *ioc;
 +      MpiEventDataSasExpanderStatusChange_t *expander_data;
 +      struct mptsas_portinfo *port_info;
 +      __le64 sas_address;
 +      int i;
 +
 +      ioc = fw_event->ioc;
 +      expander_data = (MpiEventDataSasExpanderStatusChange_t *)
 +          fw_event->event_data;
 +      memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
 +      port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
 +
 +      if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
 +              if (port_info) {
 +                      for (i = 0; i < port_info->num_phys; i++) {
 +                              port_info->phy_info[i].portinfo = port_info;
 +                              port_info->phy_info[i].handle =
 +                                  le16_to_cpu(expander_data->DevHandle);
 +                              port_info->phy_info[i].identify.sas_address =
 +                                  le64_to_cpu(sas_address);
 +                              port_info->phy_info[i].identify.handle_parent =
 +                                  le16_to_cpu(expander_data->ParentDevHandle);
 +                      }
 +                      mptsas_expander_refresh(ioc, port_info);
 +              } else if (!port_info && expander_data->NumPhys)
 +                      mptsas_expander_event_add(ioc, expander_data);
 +      } else if (expander_data->ReasonCode ==
 +          MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING)
 +              mptsas_expander_delete(ioc, port_info, 0);
 +
 +      mptsas_free_fw_event(ioc, fw_event);
 +}
 +
 +
 +/**
 + * mptsas_expander_add -
 + * @ioc: Pointer to MPT_ADAPTER structure
 + * @handle:
 + *
 + */
 +struct mptsas_portinfo *
 +mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle)
 +{
 +      struct mptsas_portinfo buffer, *port_info;
 +      int i;
 +
 +      if ((mptsas_sas_expander_pg0(ioc, &buffer,
 +          (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
 +          MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)))
 +              return NULL;
 +
 +      port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC);
 +      if (!port_info) {
 +              dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 +              "%s: exit at line=%d\n", ioc->name,
 +              __func__, __LINE__));
 +              return NULL;
 +      }
 +      port_info->num_phys = buffer.num_phys;
 +      port_info->phy_info = buffer.phy_info;
 +      for (i = 0; i < port_info->num_phys; i++)
 +              port_info->phy_info[i].portinfo = port_info;
 +      mutex_lock(&ioc->sas_topology_mutex);
 +      list_add_tail(&port_info->list, &ioc->sas_topology);
 +      mutex_unlock(&ioc->sas_topology_mutex);
 +      printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
 +          "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
 +          (unsigned long long)buffer.phy_info[0].identify.sas_address);
 +      mptsas_expander_refresh(ioc, port_info);
 +      return port_info;
 +}
 +
 +static void
 +mptsas_send_link_status_event(struct fw_event_work *fw_event)
 +{
 +      MPT_ADAPTER *ioc;
 +      MpiEventDataSasPhyLinkStatus_t *link_data;
 +      struct mptsas_portinfo *port_info;
 +      struct mptsas_phyinfo *phy_info = NULL;
 +      __le64 sas_address;
 +      u8 phy_num;
 +      u8 link_rate;
 +
 +      ioc = fw_event->ioc;
 +      link_data = (MpiEventDataSasPhyLinkStatus_t *)fw_event->event_data;
 +
 +      memcpy(&sas_address, &link_data->SASAddress, sizeof(__le64));
 +      sas_address = le64_to_cpu(sas_address);
 +      link_rate = link_data->LinkRates >> 4;
 +      phy_num = link_data->PhyNum;
 +
 +      port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
 +      if (port_info) {
 +              phy_info = &port_info->phy_info[phy_num];
 +              if (phy_info)
 +                      phy_info->negotiated_link_rate = link_rate;
 +      }
 +
 +      if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
 +          link_rate == MPI_SAS_IOUNIT0_RATE_3_0) {
 +
 +              if (!port_info) {
 +                      if (ioc->old_sas_discovery_protocal) {
 +                              port_info = mptsas_expander_add(ioc,
 +                                      le16_to_cpu(link_data->DevHandle));
 +                              if (port_info)
 +                                      goto out;
 +                      }
 +                      goto out;
 +              }
 +
 +              if (port_info == ioc->hba_port_info)
 +                      mptsas_probe_hba_phys(ioc);
 +              else
 +                      mptsas_expander_refresh(ioc, port_info);
 +      } else if (phy_info && phy_info->phy) {
 +              if (link_rate ==  MPI_SAS_IOUNIT0_RATE_PHY_DISABLED)
 +                      phy_info->phy->negotiated_linkrate =
 +                          SAS_PHY_DISABLED;
 +              else if (link_rate ==
 +                  MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION)
 +                      phy_info->phy->negotiated_linkrate =
 +                          SAS_LINK_RATE_FAILED;
 +              else
 +                      phy_info->phy->negotiated_linkrate =
 +                          SAS_LINK_RATE_UNKNOWN;
 +      }
 + out:
 +      mptsas_free_fw_event(ioc, fw_event);
 +}
 +
 +static void
 +mptsas_not_responding_devices(MPT_ADAPTER *ioc)
 +{
 +      struct mptsas_portinfo buffer, *port_info;
 +      struct mptsas_device_info       *sas_info;
 +      struct mptsas_devinfo sas_device;
 +      u32     handle;
 +      VirtTarget *vtarget = NULL;
 +      struct mptsas_phyinfo *phy_info;
 +      u8 found_expander;
 +      int retval, retry_count;
 +      unsigned long flags;
 +
 +      mpt_findImVolumes(ioc);
 +
 +      spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
 +      if (ioc->ioc_reset_in_progress) {
 +              dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +                 "%s: exiting due to a parallel reset \n", ioc->name,
 +                  __func__));
 +              spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
 +              return;
 +      }
 +      spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
 +
 +      /* devices, logical volumes */
 +      mutex_lock(&ioc->sas_device_info_mutex);
 + redo_device_scan:
 +      list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) {
 +              if (sas_info->is_cached)
 +                      continue;
 +              if (!sas_info->is_logical_volume) {
 +                      sas_device.handle = 0;
 +                      retry_count = 0;
 +retry_page:
 +                      retval = mptsas_sas_device_pg0(ioc, &sas_device,
 +                              (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID
 +                              << MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 +                              (sas_info->fw.channel << 8) +
 +                              sas_info->fw.id);
 +
 +                      if (sas_device.handle)
 +                              continue;
 +                      if (retval == -EBUSY) {
 +                              spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
 +                              if (ioc->ioc_reset_in_progress) {
 +                                      dfailprintk(ioc,
 +                                      printk(MYIOC_s_DEBUG_FMT
 +                                      "%s: exiting due to reset\n",
 +                                      ioc->name, __func__));
 +                                      spin_unlock_irqrestore
 +                                      (&ioc->taskmgmt_lock, flags);
 +                                      mutex_unlock(&ioc->
 +                                      sas_device_info_mutex);
 +                                      return;
 +                              }
 +                              spin_unlock_irqrestore(&ioc->taskmgmt_lock,
 +                              flags);
 +                      }
 +
 +                      if (retval && (retval != -ENODEV)) {
 +                              if (retry_count < 10) {
 +                                      retry_count++;
 +                                      goto retry_page;
 +                              } else {
 +                                      devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +                                      "%s: Config page retry exceeded retry "
 +                                      "count deleting device 0x%llx\n",
 +                                      ioc->name, __func__,
 +                                      sas_info->sas_address));
 +                              }
 +                      }
 +
 +                      /* delete device */
 +                      vtarget = mptsas_find_vtarget(ioc,
 +                              sas_info->fw.channel, sas_info->fw.id);
 +
 +                      if (vtarget)
 +                              vtarget->deleted = 1;
 +
 +                      phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
 +                                      sas_info->sas_address);
 +
 +                      if (phy_info) {
 +                              mptsas_del_end_device(ioc, phy_info);
 +                              goto redo_device_scan;
 +                      }
 +              } else
 +                      mptsas_volume_delete(ioc, sas_info->fw.id);
 +      }
 +      mutex_lock(&ioc->sas_device_info_mutex);
 +
 +      /* expanders */
 +      mutex_lock(&ioc->sas_topology_mutex);
 + redo_expander_scan:
 +      list_for_each_entry(port_info, &ioc->sas_topology, list) {
 +
 +              if (port_info->phy_info &&
 +                  (!(port_info->phy_info[0].identify.device_info &
 +                  MPI_SAS_DEVICE_INFO_SMP_TARGET)))
 +                      continue;
 +              found_expander = 0;
 +              handle = 0xFFFF;
 +              while (!mptsas_sas_expander_pg0(ioc, &buffer,
 +                  (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
 +                   MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle) &&
 +                  !found_expander) {
 +
 +                      handle = buffer.phy_info[0].handle;
 +                      if (buffer.phy_info[0].identify.sas_address ==
 +                          port_info->phy_info[0].identify.sas_address) {
 +                              found_expander = 1;
 +                      }
 +                      kfree(buffer.phy_info);
 +              }
 +
 +              if (!found_expander) {
 +                      mptsas_expander_delete(ioc, port_info, 0);
 +                      goto redo_expander_scan;
 +              }
 +      }
 +      mutex_lock(&ioc->sas_topology_mutex);
 +}
 +
 +/**
 + *    mptsas_probe_expanders - adding expanders
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *
 + **/
 +static void
 +mptsas_probe_expanders(MPT_ADAPTER *ioc)
 +{
 +      struct mptsas_portinfo buffer, *port_info;
 +      u32                     handle;
 +      int i;
 +
 +      handle = 0xFFFF;
 +      while (!mptsas_sas_expander_pg0(ioc, &buffer,
 +          (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
 +           MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)) {
 +
 +              handle = buffer.phy_info[0].handle;
 +              port_info = mptsas_find_portinfo_by_sas_address(ioc,
 +                  buffer.phy_info[0].identify.sas_address);
 +
 +              if (port_info) {
 +                      /* refreshing handles */
 +                      for (i = 0; i < buffer.num_phys; i++) {
 +                              port_info->phy_info[i].handle = handle;
 +                              port_info->phy_info[i].identify.handle_parent =
 +                                  buffer.phy_info[0].identify.handle_parent;
 +                      }
 +                      mptsas_expander_refresh(ioc, port_info);
 +                      kfree(buffer.phy_info);
 +                      continue;
 +              }
 +
 +              port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
 +              if (!port_info) {
 +                      dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 +                      "%s: exit at line=%d\n", ioc->name,
 +                      __func__, __LINE__));
 +                      return;
 +              }
 +              port_info->num_phys = buffer.num_phys;
 +              port_info->phy_info = buffer.phy_info;
 +              for (i = 0; i < port_info->num_phys; i++)
 +                      port_info->phy_info[i].portinfo = port_info;
 +              mutex_lock(&ioc->sas_topology_mutex);
 +              list_add_tail(&port_info->list, &ioc->sas_topology);
 +              mutex_unlock(&ioc->sas_topology_mutex);
 +              printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
 +                  "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
 +          (unsigned long long)buffer.phy_info[0].identify.sas_address);
 +              mptsas_expander_refresh(ioc, port_info);
 +      }
 +}
 +
 +static void
 +mptsas_probe_devices(MPT_ADAPTER *ioc)
 +{
 +      u16 handle;
 +      struct mptsas_devinfo sas_device;
 +      struct mptsas_phyinfo *phy_info;
 +
 +      handle = 0xFFFF;
 +      while (!(mptsas_sas_device_pg0(ioc, &sas_device,
 +          MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
 +
 +              handle = sas_device.handle;
 +
 +              if ((sas_device.device_info &
 +                   (MPI_SAS_DEVICE_INFO_SSP_TARGET |
 +                    MPI_SAS_DEVICE_INFO_STP_TARGET |
 +                    MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0)
 +                      continue;
 +
 +              phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
 +              if (!phy_info)
 +                      continue;
 +
 +              if (mptsas_get_rphy(phy_info))
 +                      continue;
 +
 +              mptsas_add_end_device(ioc, phy_info);
 +      }
 +}
 +
 +/**
 + *    mptsas_scan_sas_topology -
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *    @sas_address:
 + *
 + **/
 +static void
  mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
  {
 -      u32 handle = 0xFFFF;
 +      struct scsi_device *sdev;
        int i;
  
 -      mutex_lock(&ioc->sas_discovery_mutex);
        mptsas_probe_hba_phys(ioc);
 -      while (!mptsas_probe_expander_phys(ioc, &handle))
 -              ;
 +      mptsas_probe_expanders(ioc);
 +      mptsas_probe_devices(ioc);
 +
        /*
          Reporting RAID volumes.
        */
 -      if (!ioc->ir_firmware)
 -              goto out;
 -      if (!ioc->raid_data.pIocPg2)
 -              goto out;
 -      if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
 -              goto out;
 +      if (!ioc->ir_firmware || !ioc->raid_data.pIocPg2 ||
 +          !ioc->raid_data.pIocPg2->NumActiveVolumes)
 +              return;
        for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
 +              sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
 +                  ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
 +              if (sdev) {
 +                      scsi_device_put(sdev);
 +                      continue;
 +              }
 +              printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
 +                  "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
 +                  ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID);
                scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
                    ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
        }
 - out:
 -      mutex_unlock(&ioc->sas_discovery_mutex);
  }
  
 -/*
 - * Work queue thread to handle Runtime discovery
 - * Mere purpose is the hot add/delete of expanders
 - *(Mutex UNLOCKED)
 - */
 +
  static void
 -__mptsas_discovery_work(MPT_ADAPTER *ioc)
 +mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
  {
 -      u32 handle = 0xFFFF;
 +      MPT_ADAPTER *ioc;
 +      EventDataQueueFull_t *qfull_data;
 +      struct mptsas_device_info *sas_info;
 +      struct scsi_device      *sdev;
 +      int depth;
 +      int id = -1;
 +      int channel = -1;
 +      int fw_id, fw_channel;
 +      u16 current_depth;
 +
 +
 +      ioc = fw_event->ioc;
 +      qfull_data = (EventDataQueueFull_t *)fw_event->event_data;
 +      fw_id = qfull_data->TargetID;
 +      fw_channel = qfull_data->Bus;
 +      current_depth = le16_to_cpu(qfull_data->CurrentDepth);
 +
 +      /* if hidden raid component, look for the volume id */
 +      mutex_lock(&ioc->sas_device_info_mutex);
 +      if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) {
 +              list_for_each_entry(sas_info, &ioc->sas_device_info_list,
 +                  list) {
 +                      if (sas_info->is_cached ||
 +                          sas_info->is_logical_volume)
 +                              continue;
 +                      if (sas_info->is_hidden_raid_component &&
 +                          (sas_info->fw.channel == fw_channel &&
 +                          sas_info->fw.id == fw_id)) {
 +                              id = sas_info->volume_id;
 +                              channel = MPTSAS_RAID_CHANNEL;
 +                              goto out;
 +                      }
 +              }
 +      } else {
 +              list_for_each_entry(sas_info, &ioc->sas_device_info_list,
 +                  list) {
 +                      if (sas_info->is_cached ||
 +                          sas_info->is_hidden_raid_component ||
 +                          sas_info->is_logical_volume)
 +                              continue;
 +                      if (sas_info->fw.channel == fw_channel &&
 +                          sas_info->fw.id == fw_id) {
 +                              id = sas_info->os.id;
 +                              channel = sas_info->os.channel;
 +                              goto out;
 +                      }
 +              }
  
 -      ioc->sas_discovery_runtime=1;
 -      mptsas_delete_expander_phys(ioc);
 -      mptsas_probe_hba_phys(ioc);
 -      while (!mptsas_probe_expander_phys(ioc, &handle))
 -              ;
 -      ioc->sas_discovery_runtime=0;
 -}
 +      }
  
 -/*
 - * Work queue thread to handle Runtime discovery
 - * Mere purpose is the hot add/delete of expanders
 - *(Mutex LOCKED)
 - */
 -static void
 -mptsas_discovery_work(struct work_struct *work)
 -{
 -      struct mptsas_discovery_event *ev =
 -              container_of(work, struct mptsas_discovery_event, work);
 -      MPT_ADAPTER *ioc = ev->ioc;
 + out:
 +      mutex_unlock(&ioc->sas_device_info_mutex);
 +
 +      if (id != -1) {
 +              shost_for_each_device(sdev, ioc->sh) {
 +                      if (sdev->id == id && sdev->channel == channel) {
 +                              if (current_depth > sdev->queue_depth) {
 +                                      sdev_printk(KERN_INFO, sdev,
 +                                          "strange observation, the queue "
 +                                          "depth is (%d) meanwhile fw queue "
 +                                          "depth (%d)\n", sdev->queue_depth,
 +                                          current_depth);
 +                                      continue;
 +                              }
 +                              depth = scsi_track_queue_full(sdev,
 +                                  current_depth - 1);
 +                              if (depth > 0)
 +                                      sdev_printk(KERN_INFO, sdev,
 +                                      "Queue depth reduced to (%d)\n",
 +                                         depth);
 +                              else if (depth < 0)
 +                                      sdev_printk(KERN_INFO, sdev,
 +                                      "Tagged Command Queueing is being "
 +                                      "disabled\n");
 +                              else if (depth == 0)
 +                                      sdev_printk(KERN_INFO, sdev,
 +                                      "Queue depth not changed yet\n");
 +                      }
 +              }
 +      }
  
 -      mutex_lock(&ioc->sas_discovery_mutex);
 -      __mptsas_discovery_work(ioc);
 -      mutex_unlock(&ioc->sas_discovery_mutex);
 -      kfree(ev);
 +      mptsas_free_fw_event(ioc, fw_event);
  }
  
 +
  static struct mptsas_phyinfo *
  mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
  {
        return phy_info;
  }
  
 +/**
 + *    mptsas_find_phyinfo_by_phys_disk_num -
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *    @phys_disk_num:
 + *    @channel:
 + *    @id:
 + *
 + **/
  static struct mptsas_phyinfo *
 -mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u8 channel, u8 id)
 +mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num,
 +      u8 channel, u8 id)
  {
 -      struct mptsas_portinfo *port_info;
        struct mptsas_phyinfo *phy_info = NULL;
 +      struct mptsas_portinfo *port_info;
 +      RaidPhysDiskPage1_t *phys_disk = NULL;
 +      int num_paths;
 +      u64 sas_address = 0;
        int i;
  
 -      mutex_lock(&ioc->sas_topology_mutex);
 -      list_for_each_entry(port_info, &ioc->sas_topology, list) {
 -              for (i = 0; i < port_info->num_phys; i++) {
 -                      if (!mptsas_is_end_device(
 -                              &port_info->phy_info[i].attached))
 -                              continue;
 -                      if (port_info->phy_info[i].attached.id != id)
 -                              continue;
 -                      if (port_info->phy_info[i].attached.channel != channel)
 -                              continue;
 -                      phy_info = &port_info->phy_info[i];
 -                      break;
 +      phy_info = NULL;
 +      if (!ioc->raid_data.pIocPg3)
 +              return NULL;
 +      /* dual port support */
 +      num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num);
 +      if (!num_paths)
 +              goto out;
 +      phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
 +         (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
 +      if (!phys_disk)
 +              goto out;
 +      mpt_raid_phys_disk_pg1(ioc, phys_disk_num, phys_disk);
 +      for (i = 0; i < num_paths; i++) {
 +              if ((phys_disk->Path[i].Flags & 1) != 0)
 +                      /* entry no longer valid */
 +                      continue;
 +              if ((id == phys_disk->Path[i].PhysDiskID) &&
 +                  (channel == phys_disk->Path[i].PhysDiskBus)) {
 +                      memcpy(&sas_address, &phys_disk->Path[i].WWID,
 +                              sizeof(u64));
 +                      phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
 +                                      sas_address);
 +                      goto out;
                }
        }
 -      mutex_unlock(&ioc->sas_topology_mutex);
 -      return phy_info;
 -}
  
 -static struct mptsas_phyinfo *
 -mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
 -{
 -      struct mptsas_portinfo *port_info;
 -      struct mptsas_phyinfo *phy_info = NULL;
 -      int i;
 + out:
 +      kfree(phys_disk);
 +      if (phy_info)
 +              return phy_info;
  
 +      /*
 +       * Extra code to handle RAID0 case, where the sas_address is not updated
 +       * in phys_disk_page_1 when hotswapped
 +       */
        mutex_lock(&ioc->sas_topology_mutex);
        list_for_each_entry(port_info, &ioc->sas_topology, list) {
 -              for (i = 0; i < port_info->num_phys; i++) {
 +              for (i = 0; i < port_info->num_phys && !phy_info; i++) {
                        if (!mptsas_is_end_device(
                                &port_info->phy_info[i].attached))
                                continue;
                        if (port_info->phy_info[i].attached.phys_disk_num == ~0)
                                continue;
 -                      if (port_info->phy_info[i].attached.phys_disk_num != id)
 -                              continue;
 -                      if (port_info->phy_info[i].attached.channel != channel)
 -                              continue;
 -                      phy_info = &port_info->phy_info[i];
 -                      break;
 +                      if ((port_info->phy_info[i].attached.phys_disk_num ==
 +                          phys_disk_num) &&
 +                          (port_info->phy_info[i].attached.id == id) &&
 +                          (port_info->phy_info[i].attached.channel ==
 +                           channel))
 +                              phy_info = &port_info->phy_info[i];
                }
        }
        mutex_unlock(&ioc->sas_topology_mutex);
        return phy_info;
  }
  
 -/*
 - * Work queue thread to clear the persitency table
 - */
 -static void
 -mptsas_persist_clear_table(struct work_struct *work)
 -{
 -      MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
 -
 -      mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
 -}
 -
  static void
  mptsas_reprobe_lun(struct scsi_device *sdev, void *data)
  {
@@@ -3885,8 -2517,7 +3885,8 @@@ mptsas_adding_inactive_raid_components(
        pRaidVolumePage0_t              buffer = NULL;
        RaidPhysDiskPage0_t             phys_disk;
        int                             i;
 -      struct mptsas_hotplug_event     *ev;
 +      struct mptsas_phyinfo   *phy_info;
 +      struct mptsas_devinfo           sas_device;
  
        memset(&cfg, 0 , sizeof(CONFIGPARMS));
        memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
                    buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
                        continue;
  
 -              ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
 -              if (!ev) {
 -                      printk(MYIOC_s_WARN_FMT "mptsas: lost hotplug event\n", ioc->name);
 -                      goto out;
 -              }
 +              if (mptsas_sas_device_pg0(ioc, &sas_device,
 +                  (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
 +                   MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 +                      (phys_disk.PhysDiskBus << 8) +
 +                      phys_disk.PhysDiskID))
 +                      continue;
  
 -              INIT_WORK(&ev->work, mptsas_hotplug_work);
 -              ev->ioc = ioc;
 -              ev->id = phys_disk.PhysDiskID;
 -              ev->channel = phys_disk.PhysDiskBus;
 -              ev->phys_disk_num_valid = 1;
 -              ev->phys_disk_num = phys_disk.PhysDiskNum;
 -              ev->event_type = MPTSAS_ADD_DEVICE;
 -              schedule_work(&ev->work);
 +              phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
 +                  sas_device.sas_address);
 +              mptsas_add_end_device(ioc, phy_info);
        }
  
   out:
   * Work queue thread to handle SAS hotplug events
   */
  static void
 -mptsas_hotplug_work(struct work_struct *work)
 +mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
 +    struct mptsas_hotplug_event *hot_plug_info)
  {
 -      struct mptsas_hotplug_event *ev =
 -              container_of(work, struct mptsas_hotplug_event, work);
 -
 -      MPT_ADAPTER *ioc = ev->ioc;
        struct mptsas_phyinfo *phy_info;
 -      struct sas_rphy *rphy;
 -      struct sas_port *port;
 -      struct scsi_device *sdev;
        struct scsi_target * starget;
 -      struct sas_identify identify;
 -      char *ds = NULL;
        struct mptsas_devinfo sas_device;
        VirtTarget *vtarget;
 -      VirtDevice *vdevice;
 +      int i;
 +
 +      switch (hot_plug_info->event_type) {
 +
 +      case MPTSAS_ADD_PHYSDISK:
 +
 +              if (!ioc->raid_data.pIocPg2)
 +                      break;
 +
 +              for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
 +                      if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
 +                          hot_plug_info->id) {
 +                              printk(MYIOC_s_WARN_FMT "firmware bug: unable "
 +                                  "to add hidden disk - target_id matchs "
 +                                  "volume_id\n", ioc->name);
 +                              mptsas_free_fw_event(ioc, fw_event);
 +                              return;
 +                      }
 +              }
 +              mpt_findImVolumes(ioc);
 +
 +      case MPTSAS_ADD_DEVICE:
 +              memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
 +              mptsas_sas_device_pg0(ioc, &sas_device,
 +                  (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
 +                  MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 +                  (hot_plug_info->channel << 8) +
 +                  hot_plug_info->id);
 +
 +              if (!sas_device.handle)
 +                      return;
 +
 +              phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
 +              if (!phy_info)
 +                      break;
 +
 +              if (mptsas_get_rphy(phy_info))
 +                      break;
 +
 +              mptsas_add_end_device(ioc, phy_info);
 +              break;
  
 -      mutex_lock(&ioc->sas_discovery_mutex);
 -      switch (ev->event_type) {
        case MPTSAS_DEL_DEVICE:
 +              phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
 +                  hot_plug_info->sas_address);
 +              mptsas_del_end_device(ioc, phy_info);
 +              break;
  
 -              phy_info = NULL;
 -              if (ev->phys_disk_num_valid) {
 -                      if (ev->hidden_raid_component){
 -                              if (mptsas_sas_device_pg0(ioc, &sas_device,
 -                                  (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
 -                                   MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 -                                  (ev->channel << 8) + ev->id)) {
 -                                      dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 -                                      "%s: exit at line=%d\n", ioc->name,
 -                                              __func__, __LINE__));
 -                                      break;
 -                              }
 -                              phy_info = mptsas_find_phyinfo_by_sas_address(
 -                                  ioc, sas_device.sas_address);
 -                      }else
 -                              phy_info = mptsas_find_phyinfo_by_phys_disk_num(
 -                                  ioc, ev->channel, ev->phys_disk_num);
 -              }
 +      case MPTSAS_DEL_PHYSDISK:
  
 -              if (!phy_info)
 -                      phy_info = mptsas_find_phyinfo_by_target(ioc,
 -                          ev->channel, ev->id);
 +              mpt_findImVolumes(ioc);
  
 -              /*
 -               * Sanity checks, for non-existing phys and remote rphys.
 -               */
 -              if (!phy_info){
 +              phy_info = mptsas_find_phyinfo_by_phys_disk_num(
 +                              ioc, hot_plug_info->phys_disk_num,
 +                              hot_plug_info->channel,
 +                              hot_plug_info->id);
 +              mptsas_del_end_device(ioc, phy_info);
 +              break;
 +
 +      case MPTSAS_ADD_PHYSDISK_REPROBE:
 +
 +              if (mptsas_sas_device_pg0(ioc, &sas_device,
 +                  (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
 +                   MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 +                  (hot_plug_info->channel << 8) + hot_plug_info->id)) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 -                              "%s: exit at line=%d\n", ioc->name,
 -                              __func__, __LINE__));
 +                      "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                               __func__, hot_plug_info->id, __LINE__));
                        break;
                }
 -              if (!phy_info->port_details) {
 +
 +              phy_info = mptsas_find_phyinfo_by_sas_address(
 +                  ioc, sas_device.sas_address);
 +
 +              if (!phy_info) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 -                              "%s: exit at line=%d\n", ioc->name,
 -                              __func__, __LINE__));
 +                              "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                               __func__, hot_plug_info->id, __LINE__));
                        break;
                }
 -              rphy = mptsas_get_rphy(phy_info);
 -              if (!rphy) {
 +
 +              starget = mptsas_get_starget(phy_info);
 +              if (!starget) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 -                              "%s: exit at line=%d\n", ioc->name,
 -                              __func__, __LINE__));
 +                              "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                               __func__, hot_plug_info->id, __LINE__));
                        break;
                }
  
 -              port = mptsas_get_port(phy_info);
 -              if (!port) {
 +              vtarget = starget->hostdata;
 +              if (!vtarget) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 -                              "%s: exit at line=%d\n", ioc->name,
 -                              __func__, __LINE__));
 +                              "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                               __func__, hot_plug_info->id, __LINE__));
                        break;
                }
  
 -              starget = mptsas_get_starget(phy_info);
 -              if (starget) {
 -                      vtarget = starget->hostdata;
 -
 -                      if (!vtarget) {
 -                              dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 -                                      "%s: exit at line=%d\n", ioc->name,
 -                                      __func__, __LINE__));
 -                              break;
 -                      }
 +              mpt_findImVolumes(ioc);
  
 -                      /*
 -                       * Handling  RAID components
 -                       */
 -                      if (ev->phys_disk_num_valid &&
 -                          ev->hidden_raid_component) {
 -                              printk(MYIOC_s_INFO_FMT
 -                                  "RAID Hidding: channel=%d, id=%d, "
 -                                  "physdsk %d \n", ioc->name, ev->channel,
 -                                  ev->id, ev->phys_disk_num);
 -                              vtarget->id = ev->phys_disk_num;
 -                              vtarget->tflags |=
 -                                  MPT_TARGET_FLAGS_RAID_COMPONENT;
 -                              mptsas_reprobe_target(starget, 1);
 -                              phy_info->attached.phys_disk_num =
 -                                  ev->phys_disk_num;
 -                      break;
 -                      }
 -              }
 +              starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Hidding: "
 +                  "fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
 +                  ioc->name, hot_plug_info->channel, hot_plug_info->id,
 +                  hot_plug_info->phys_disk_num, (unsigned long long)
 +                  sas_device.sas_address);
  
 -              if (phy_info->attached.device_info &
 -                  MPI_SAS_DEVICE_INFO_SSP_TARGET)
 -                      ds = "ssp";
 -              if (phy_info->attached.device_info &
 -                  MPI_SAS_DEVICE_INFO_STP_TARGET)
 -                      ds = "stp";
 -              if (phy_info->attached.device_info &
 -                  MPI_SAS_DEVICE_INFO_SATA_DEVICE)
 -                      ds = "sata";
 -
 -              printk(MYIOC_s_INFO_FMT
 -                     "removing %s device, channel %d, id %d, phy %d\n",
 -                     ioc->name, ds, ev->channel, ev->id, phy_info->phy_id);
 -              dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
 -                  "delete port (%d)\n", ioc->name, port->port_identifier);
 -              sas_port_delete(port);
 -              mptsas_port_delete(ioc, phy_info->port_details);
 +              vtarget->id = hot_plug_info->phys_disk_num;
 +              vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
 +              phy_info->attached.phys_disk_num = hot_plug_info->phys_disk_num;
 +              mptsas_reprobe_target(starget, 1);
                break;
 -      case MPTSAS_ADD_DEVICE:
  
 -              if (ev->phys_disk_num_valid)
 -                      mpt_findImVolumes(ioc);
 +      case MPTSAS_DEL_PHYSDISK_REPROBE:
  
 -              /*
 -               * Refresh sas device pg0 data
 -               */
                if (mptsas_sas_device_pg0(ioc, &sas_device,
                    (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
                     MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 -                      (ev->channel << 8) + ev->id)) {
 +                      (hot_plug_info->channel << 8) + hot_plug_info->id)) {
                                dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 -                                      "%s: exit at line=%d\n", ioc->name,
 -                                      __func__, __LINE__));
 +                                  "%s: fw_id=%d exit at line=%d\n",
 +                                  ioc->name, __func__,
 +                                  hot_plug_info->id, __LINE__));
                        break;
                }
  
 -              __mptsas_discovery_work(ioc);
 -
                phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
                                sas_device.sas_address);
 -
 -              if (!phy_info || !phy_info->port_details) {
 +              if (!phy_info) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 -                              "%s: exit at line=%d\n", ioc->name,
 -                              __func__, __LINE__));
 +                          "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                       __func__, hot_plug_info->id, __LINE__));
                        break;
                }
  
                starget = mptsas_get_starget(phy_info);
 -              if (starget && (!ev->hidden_raid_component)){
 -
 -                      vtarget = starget->hostdata;
 -
 -                      if (!vtarget) {
 -                              dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 -                                  "%s: exit at line=%d\n", ioc->name,
 -                                  __func__, __LINE__));
 -                              break;
 -                      }
 -                      /*
 -                       * Handling  RAID components
 -                       */
 -                      if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
 -                              printk(MYIOC_s_INFO_FMT
 -                                  "RAID Exposing: channel=%d, id=%d, "
 -                                  "physdsk %d \n", ioc->name, ev->channel,
 -                                  ev->id, ev->phys_disk_num);
 -                              vtarget->tflags &=
 -                                  ~MPT_TARGET_FLAGS_RAID_COMPONENT;
 -                              vtarget->id = ev->id;
 -                              mptsas_reprobe_target(starget, 0);
 -                              phy_info->attached.phys_disk_num = ~0;
 -                      }
 +              if (!starget) {
 +                      dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 +                          "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                       __func__, hot_plug_info->id, __LINE__));
                        break;
                }
  
 -              if (mptsas_get_rphy(phy_info)) {
 +              vtarget = starget->hostdata;
 +              if (!vtarget) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 -                              "%s: exit at line=%d\n", ioc->name,
 -                              __func__, __LINE__));
 -                      if (ev->channel) printk("%d\n", __LINE__);
 +                          "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                       __func__, hot_plug_info->id, __LINE__));
                        break;
                }
  
 -              port = mptsas_get_port(phy_info);
 -              if (!port) {
 +              if (!(vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 -                              "%s: exit at line=%d\n", ioc->name,
 -                              __func__, __LINE__));
 +                          "%s: fw_id=%d exit at line=%d\n", ioc->name,
 +                       __func__, hot_plug_info->id, __LINE__));
                        break;
                }
 -              memcpy(&phy_info->attached, &sas_device,
 -                  sizeof(struct mptsas_devinfo));
 -
 -              if (phy_info->attached.device_info &
 -                  MPI_SAS_DEVICE_INFO_SSP_TARGET)
 -                      ds = "ssp";
 -              if (phy_info->attached.device_info &
 -                  MPI_SAS_DEVICE_INFO_STP_TARGET)
 -                      ds = "stp";
 -              if (phy_info->attached.device_info &
 -                  MPI_SAS_DEVICE_INFO_SATA_DEVICE)
 -                      ds = "sata";
 -
 -              printk(MYIOC_s_INFO_FMT
 -                     "attaching %s device, channel %d, id %d, phy %d\n",
 -                     ioc->name, ds, ev->channel, ev->id, ev->phy_id);
  
 -              mptsas_parse_device_info(&identify, &phy_info->attached);
 -              rphy = sas_end_device_alloc(port);
 -              if (!rphy) {
 -                      dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 -                              "%s: exit at line=%d\n", ioc->name,
 -                              __func__, __LINE__));
 -                      break; /* non-fatal: an rphy can be added later */
 -              }
 +              mpt_findImVolumes(ioc);
  
 -              rphy->identify = identify;
 -              if (sas_rphy_add(rphy)) {
 -                      dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
 -                              "%s: exit at line=%d\n", ioc->name,
 -                              __func__, __LINE__));
 -                      sas_rphy_free(rphy);
 -                      break;
 -              }
 -              mptsas_set_rphy(ioc, phy_info, rphy);
 +              starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Exposing:"
 +                  " fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
 +                  ioc->name, hot_plug_info->channel, hot_plug_info->id,
 +                  hot_plug_info->phys_disk_num, (unsigned long long)
 +                  sas_device.sas_address);
 +
 +              vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
 +              vtarget->id = hot_plug_info->id;
 +              phy_info->attached.phys_disk_num = ~0;
 +              mptsas_reprobe_target(starget, 0);
 +              mptsas_add_device_component_by_fw(ioc,
 +                  hot_plug_info->channel, hot_plug_info->id);
                break;
 +
        case MPTSAS_ADD_RAID:
 -              sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
 -                  ev->id, 0);
 -              if (sdev) {
 -                      scsi_device_put(sdev);
 -                      break;
 -              }
 -              printk(MYIOC_s_INFO_FMT
 -                     "attaching raid volume, channel %d, id %d\n",
 -                     ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
 -              scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0);
 +
                mpt_findImVolumes(ioc);
 +              printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
 +                  "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
 +                  hot_plug_info->id);
 +              scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
 +                  hot_plug_info->id, 0);
                break;
 +
        case MPTSAS_DEL_RAID:
 -              sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
 -                  ev->id, 0);
 -              if (!sdev)
 -                      break;
 -              printk(MYIOC_s_INFO_FMT
 -                     "removing raid volume, channel %d, id %d\n",
 -                     ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
 -              vdevice = sdev->hostdata;
 -              scsi_remove_device(sdev);
 -              scsi_device_put(sdev);
 +
                mpt_findImVolumes(ioc);
 +              printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
 +                  "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
 +                  hot_plug_info->id);
 +              scsi_remove_device(hot_plug_info->sdev);
 +              scsi_device_put(hot_plug_info->sdev);
                break;
 +
        case MPTSAS_ADD_INACTIVE_VOLUME:
 +
 +              mpt_findImVolumes(ioc);
                mptsas_adding_inactive_raid_components(ioc,
 -                  ev->channel, ev->id);
 +                  hot_plug_info->channel, hot_plug_info->id);
                break;
 -      case MPTSAS_IGNORE_EVENT:
 +
        default:
                break;
        }
  
 -      mutex_unlock(&ioc->sas_discovery_mutex);
 -      kfree(ev);
 +      mptsas_free_fw_event(ioc, fw_event);
  }
  
  static void
 -mptsas_send_sas_event(MPT_ADAPTER *ioc,
 -              EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data)
 +mptsas_send_sas_event(struct fw_event_work *fw_event)
  {
 -      struct mptsas_hotplug_event *ev;
 -      u32 device_info = le32_to_cpu(sas_event_data->DeviceInfo);
 -      __le64 sas_address;
 +      MPT_ADAPTER *ioc;
 +      struct mptsas_hotplug_event hot_plug_info;
 +      EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
 +      u32 device_info;
 +      u64 sas_address;
 +
 +      ioc = fw_event->ioc;
 +      sas_event_data = (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)
 +          fw_event->event_data;
 +      device_info = le32_to_cpu(sas_event_data->DeviceInfo);
  
        if ((device_info &
 -           (MPI_SAS_DEVICE_INFO_SSP_TARGET |
 -            MPI_SAS_DEVICE_INFO_STP_TARGET |
 -            MPI_SAS_DEVICE_INFO_SATA_DEVICE )) == 0)
 +              (MPI_SAS_DEVICE_INFO_SSP_TARGET |
 +              MPI_SAS_DEVICE_INFO_STP_TARGET |
 +              MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) {
 +              mptsas_free_fw_event(ioc, fw_event);
 +              return;
 +      }
 +
 +      if (sas_event_data->ReasonCode ==
 +              MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED) {
 +              mptbase_sas_persist_operation(ioc,
 +              MPI_SAS_OP_CLEAR_NOT_PRESENT);
 +              mptsas_free_fw_event(ioc, fw_event);
                return;
 +      }
  
        switch (sas_event_data->ReasonCode) {
        case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
 -
 -              mptsas_target_reset_queue(ioc, sas_event_data);
 -              break;
 -
        case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
 -              ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
 -              if (!ev) {
 -                      printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name);
 -                      break;
 -              }
 -
 -              INIT_WORK(&ev->work, mptsas_hotplug_work);
 -              ev->ioc = ioc;
 -              ev->handle = le16_to_cpu(sas_event_data->DevHandle);
 -              ev->parent_handle =
 -                  le16_to_cpu(sas_event_data->ParentDevHandle);
 -              ev->channel = sas_event_data->Bus;
 -              ev->id = sas_event_data->TargetID;
 -              ev->phy_id = sas_event_data->PhyNum;
 +              memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
 +              hot_plug_info.handle = le16_to_cpu(sas_event_data->DevHandle);
 +              hot_plug_info.channel = sas_event_data->Bus;
 +              hot_plug_info.id = sas_event_data->TargetID;
 +              hot_plug_info.phy_id = sas_event_data->PhyNum;
                memcpy(&sas_address, &sas_event_data->SASAddress,
 -                  sizeof(__le64));
 -              ev->sas_address = le64_to_cpu(sas_address);
 -              ev->device_info = device_info;
 -
 +                  sizeof(u64));
 +              hot_plug_info.sas_address = le64_to_cpu(sas_address);
 +              hot_plug_info.device_info = device_info;
                if (sas_event_data->ReasonCode &
                    MPI_EVENT_SAS_DEV_STAT_RC_ADDED)
 -                      ev->event_type = MPTSAS_ADD_DEVICE;
 +                      hot_plug_info.event_type = MPTSAS_ADD_DEVICE;
                else
 -                      ev->event_type = MPTSAS_DEL_DEVICE;
 -              schedule_work(&ev->work);
 +                      hot_plug_info.event_type = MPTSAS_DEL_DEVICE;
 +              mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
                break;
 +
        case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
 -      /*
 -       * Persistent table is full.
 -       */
 -              INIT_WORK(&ioc->sas_persist_task,
 -                  mptsas_persist_clear_table);
 -              schedule_work(&ioc->sas_persist_task);
 +              mptbase_sas_persist_operation(ioc,
 +                  MPI_SAS_OP_CLEAR_NOT_PRESENT);
 +              mptsas_free_fw_event(ioc, fw_event);
                break;
 -      /*
 -       * TODO, handle other events
 -       */
 +
        case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
 -      case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
 +      /* TODO */
        case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
 -      case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
 -      case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
 -      case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
 -      case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
 +      /* TODO */
        default:
 +              mptsas_free_fw_event(ioc, fw_event);
                break;
        }
  }
 +
  static void
 -mptsas_send_raid_event(MPT_ADAPTER *ioc,
 -              EVENT_DATA_RAID *raid_event_data)
 +mptsas_send_raid_event(struct fw_event_work *fw_event)
  {
 -      struct mptsas_hotplug_event *ev;
 -      int status = le32_to_cpu(raid_event_data->SettingsStatus);
 -      int state = (status >> 8) & 0xff;
 -
 -      if (ioc->bus_type != SAS)
 -              return;
 -
 -      ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
 -      if (!ev) {
 -              printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name);
 -              return;
 +      MPT_ADAPTER *ioc;
 +      EVENT_DATA_RAID *raid_event_data;
 +      struct mptsas_hotplug_event hot_plug_info;
 +      int status;
 +      int state;
 +      struct scsi_device *sdev = NULL;
 +      VirtDevice *vdevice = NULL;
 +      RaidPhysDiskPage0_t phys_disk;
 +
 +      ioc = fw_event->ioc;
 +      raid_event_data = (EVENT_DATA_RAID *)fw_event->event_data;
 +      status = le32_to_cpu(raid_event_data->SettingsStatus);
 +      state = (status >> 8) & 0xff;
 +
 +      memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
 +      hot_plug_info.id = raid_event_data->VolumeID;
 +      hot_plug_info.channel = raid_event_data->VolumeBus;
 +      hot_plug_info.phys_disk_num = raid_event_data->PhysDiskNum;
 +
 +      if (raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_DELETED ||
 +          raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_CREATED ||
 +          raid_event_data->ReasonCode ==
 +          MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED) {
 +              sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
 +                  hot_plug_info.id, 0);
 +              hot_plug_info.sdev = sdev;
 +              if (sdev)
 +                      vdevice = sdev->hostdata;
        }
  
 -      INIT_WORK(&ev->work, mptsas_hotplug_work);
 -      ev->ioc = ioc;
 -      ev->id = raid_event_data->VolumeID;
 -      ev->channel = raid_event_data->VolumeBus;
 -      ev->event_type = MPTSAS_IGNORE_EVENT;
 +      devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
 +          "ReasonCode=%02x\n", ioc->name, __func__,
 +          raid_event_data->ReasonCode));
  
        switch (raid_event_data->ReasonCode) {
        case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
 -              ev->phys_disk_num_valid = 1;
 -              ev->phys_disk_num = raid_event_data->PhysDiskNum;
 -              ev->event_type = MPTSAS_ADD_DEVICE;
 +              hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK_REPROBE;
                break;
        case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
 -              ev->phys_disk_num_valid = 1;
 -              ev->phys_disk_num = raid_event_data->PhysDiskNum;
 -              ev->hidden_raid_component = 1;
 -              ev->event_type = MPTSAS_DEL_DEVICE;
 +              hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK_REPROBE;
                break;
        case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
                switch (state) {
                case MPI_PD_STATE_ONLINE:
                case MPI_PD_STATE_NOT_COMPATIBLE:
 -                      ev->phys_disk_num_valid = 1;
 -                      ev->phys_disk_num = raid_event_data->PhysDiskNum;
 -                      ev->hidden_raid_component = 1;
 -                      ev->event_type = MPTSAS_ADD_DEVICE;
 +                      mpt_raid_phys_disk_pg0(ioc,
 +                          raid_event_data->PhysDiskNum, &phys_disk);
 +                      hot_plug_info.id = phys_disk.PhysDiskID;
 +                      hot_plug_info.channel = phys_disk.PhysDiskBus;
 +                      hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
                        break;
 +              case MPI_PD_STATE_FAILED:
                case MPI_PD_STATE_MISSING:
                case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
                case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
                case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
 -                      ev->phys_disk_num_valid = 1;
 -                      ev->phys_disk_num = raid_event_data->PhysDiskNum;
 -                      ev->event_type = MPTSAS_DEL_DEVICE;
 +                      hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
                        break;
                default:
                        break;
                }
                break;
        case MPI_EVENT_RAID_RC_VOLUME_DELETED:
 -              ev->event_type = MPTSAS_DEL_RAID;
 +              if (!sdev)
 +                      break;
 +              vdevice->vtarget->deleted = 1; /* block IO */
 +              hot_plug_info.event_type = MPTSAS_DEL_RAID;
                break;
        case MPI_EVENT_RAID_RC_VOLUME_CREATED:
 -              ev->event_type = MPTSAS_ADD_RAID;
 +              if (sdev) {
 +                      scsi_device_put(sdev);
 +                      break;
 +              }
 +              hot_plug_info.event_type = MPTSAS_ADD_RAID;
                break;
        case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
 +              if (!(status & MPI_RAIDVOL0_STATUS_FLAG_ENABLED)) {
 +                      if (!sdev)
 +                              break;
 +                      vdevice->vtarget->deleted = 1; /* block IO */
 +                      hot_plug_info.event_type = MPTSAS_DEL_RAID;
 +                      break;
 +              }
                switch (state) {
                case MPI_RAIDVOL0_STATUS_STATE_FAILED:
                case MPI_RAIDVOL0_STATUS_STATE_MISSING:
 -                      ev->event_type = MPTSAS_DEL_RAID;
 +                      if (!sdev)
 +                              break;
 +                      vdevice->vtarget->deleted = 1; /* block IO */
 +                      hot_plug_info.event_type = MPTSAS_DEL_RAID;
                        break;
                case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
                case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
 -                      ev->event_type = MPTSAS_ADD_RAID;
 +                      if (sdev) {
 +                              scsi_device_put(sdev);
 +                              break;
 +                      }
 +                      hot_plug_info.event_type = MPTSAS_ADD_RAID;
                        break;
                default:
                        break;
        default:
                break;
        }
 -      schedule_work(&ev->work);
 +
 +      if (hot_plug_info.event_type != MPTSAS_IGNORE_EVENT)
 +              mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
 +      else
 +              mptsas_free_fw_event(ioc, fw_event);
  }
  
 -static void
 -mptsas_send_discovery_event(MPT_ADAPTER *ioc,
 -      EVENT_DATA_SAS_DISCOVERY *discovery_data)
 +/**
 + *    mptsas_issue_tm - send mptsas internal tm request
 + *    @ioc: Pointer to MPT_ADAPTER structure
 + *    @type: Task Management type
 + *    @channel: channel number for task management
 + *    @id: Logical Target ID for reset (if appropriate)
 + *    @lun: Logical unit for reset (if appropriate)
 + *    @task_context: Context for the task to be aborted
 + *    @timeout: timeout for task management control
 + *
 + *    return 0 on success and -1 on failure:
 + *
 + */
 +static int
 +mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
 +      int task_context, ulong timeout, u8 *issue_reset)
  {
 -      struct mptsas_discovery_event *ev;
 +      MPT_FRAME_HDR   *mf;
 +      SCSITaskMgmt_t  *pScsiTm;
 +      int              retval;
 +      unsigned long    timeleft;
 +
 +      *issue_reset = 0;
 +      mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
 +      if (mf == NULL) {
 +              retval = -1; /* return failure */
 +              dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no "
 +                  "msg frames!!\n", ioc->name));
 +              goto out;
 +      }
  
 -      /*
 -       * DiscoveryStatus
 -       *
 -       * This flag will be non-zero when firmware
 -       * kicks off discovery, and return to zero
 -       * once its completed.
 -       */
 -      if (discovery_data->DiscoveryStatus)
 -              return;
 +      dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request: mr = %p, "
 +          "task_type = 0x%02X,\n\t timeout = %ld, fw_channel = %d, "
 +          "fw_id = %d, lun = %lld,\n\t task_context = 0x%x\n", ioc->name, mf,
 +           type, timeout, channel, id, (unsigned long long)lun,
 +           task_context));
 +
 +      pScsiTm = (SCSITaskMgmt_t *) mf;
 +      memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
 +      pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
 +      pScsiTm->TaskType = type;
 +      pScsiTm->MsgFlags = 0;
 +      pScsiTm->TargetID = id;
 +      pScsiTm->Bus = channel;
 +      pScsiTm->ChainOffset = 0;
 +      pScsiTm->Reserved = 0;
 +      pScsiTm->Reserved1 = 0;
 +      pScsiTm->TaskMsgContext = task_context;
 +      int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
 +
 +      INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
 +      CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
 +      retval = 0;
 +      mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
 +
 +      /* Now wait for the command to complete */
 +      timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
 +          timeout*HZ);
 +      if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
 +              retval = -1; /* return failure */
 +              dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
 +                  "TaskMgmt request: TIMED OUT!(mr=%p)\n", ioc->name, mf));
 +              mpt_free_msg_frame(ioc, mf);
 +              if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
 +                      goto out;
 +              *issue_reset = 1;
 +              goto out;
 +      }
 +
 +      if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
 +              retval = -1; /* return failure */
 +              dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +                  "TaskMgmt request: failed with no reply\n", ioc->name));
 +              goto out;
 +      }
 +
 + out:
 +      CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
 +      return retval;
 +}
  
 -      ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
 -      if (!ev)
 +/**
 + *    mptsas_broadcast_primative_work - Handle broadcast primitives
 + *    @work: work queue payload containing info describing the event
 + *
 + *    this will be handled in workqueue context.
 + */
 +static void
 +mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
 +{
 +      MPT_ADAPTER *ioc = fw_event->ioc;
 +      MPT_FRAME_HDR   *mf;
 +      VirtDevice      *vdevice;
 +      int                     ii;
 +      struct scsi_cmnd        *sc;
 +      SCSITaskMgmtReply_t     *pScsiTmReply;
 +      u8                      issue_reset;
 +      int                     task_context;
 +      u8                      channel, id;
 +      int                      lun;
 +      u32                      termination_count;
 +      u32                      query_count;
 +
 +      dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +          "%s - enter\n", ioc->name, __func__));
 +
 +      mutex_lock(&ioc->taskmgmt_cmds.mutex);
 +      if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
 +              mutex_unlock(&ioc->taskmgmt_cmds.mutex);
 +              mptsas_requeue_fw_event(ioc, fw_event, 1000);
                return;
 -      INIT_WORK(&ev->work, mptsas_discovery_work);
 -      ev->ioc = ioc;
 -      schedule_work(&ev->work);
 -};
 +      }
 +
 +      issue_reset = 0;
 +      termination_count = 0;
 +      query_count = 0;
 +      mpt_findImVolumes(ioc);
 +      pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
 +
 +      for (ii = 0; ii < ioc->req_depth; ii++) {
 +              if (ioc->fw_events_off)
 +                      goto out;
 +              sc = mptscsih_get_scsi_lookup(ioc, ii);
 +              if (!sc)
 +                      continue;
 +              mf = MPT_INDEX_2_MFPTR(ioc, ii);
 +              if (!mf)
 +                      continue;
 +              task_context = mf->u.frame.hwhdr.msgctxu.MsgContext;
 +              vdevice = sc->device->hostdata;
 +              if (!vdevice || !vdevice->vtarget)
 +                      continue;
 +              if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
 +                      continue; /* skip hidden raid components */
 +              if (vdevice->vtarget->raidVolume)
 +                      continue; /* skip hidden raid components */
 +              channel = vdevice->vtarget->channel;
 +              id = vdevice->vtarget->id;
 +              lun = vdevice->lun;
 +              if (mptsas_issue_tm(ioc, MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK,
 +                  channel, id, (u64)lun, task_context, 30, &issue_reset))
 +                      goto out;
 +              query_count++;
 +              termination_count +=
 +                  le32_to_cpu(pScsiTmReply->TerminationCount);
 +              if ((pScsiTmReply->IOCStatus == MPI_IOCSTATUS_SUCCESS) &&
 +                  (pScsiTmReply->ResponseCode ==
 +                  MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
 +                  pScsiTmReply->ResponseCode ==
 +                  MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
 +                      continue;
 +              if (mptsas_issue_tm(ioc,
 +                  MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET,
 +                  channel, id, (u64)lun, 0, 30, &issue_reset))
 +                      goto out;
 +              termination_count +=
 +                  le32_to_cpu(pScsiTmReply->TerminationCount);
 +      }
 +
 + out:
 +      dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 +          "%s - exit, query_count = %d termination_count = %d\n",
 +          ioc->name, __func__, query_count, termination_count));
 +
 +      ioc->broadcast_aen_busy = 0;
 +      mpt_clear_taskmgmt_in_progress_flag(ioc);
 +      mutex_unlock(&ioc->taskmgmt_cmds.mutex);
 +
 +      if (issue_reset) {
 +              printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
 +                  ioc->name, __func__);
 +              mpt_HardResetHandler(ioc, CAN_SLEEP);
 +      }
 +      mptsas_free_fw_event(ioc, fw_event);
 +}
  
  /*
   * mptsas_send_ir2_event - handle exposing hidden disk when
   *
   */
  static void
 -mptsas_send_ir2_event(MPT_ADAPTER *ioc, PTR_MPI_EVENT_DATA_IR2 ir2_data)
 +mptsas_send_ir2_event(struct fw_event_work *fw_event)
  {
 -      struct mptsas_hotplug_event *ev;
 -
 -      if (ir2_data->ReasonCode !=
 -          MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED)
 -              return;
 -
 -      ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
 -      if (!ev)
 +      MPT_ADAPTER     *ioc;
 +      struct mptsas_hotplug_event hot_plug_info;
 +      MPI_EVENT_DATA_IR2      *ir2_data;
 +      u8 reasonCode;
 +      RaidPhysDiskPage0_t phys_disk;
 +
 +      ioc = fw_event->ioc;
 +      ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data;
 +      reasonCode = ir2_data->ReasonCode;
 +
 +      devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
 +          "ReasonCode=%02x\n", ioc->name, __func__, reasonCode));
 +
 +      memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
 +      hot_plug_info.id = ir2_data->TargetID;
 +      hot_plug_info.channel = ir2_data->Bus;
 +      switch (reasonCode) {
 +      case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
 +              hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME;
 +              break;
 +      case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
 +              hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
 +              hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
 +              break;
 +      case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
 +              hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
 +              mpt_raid_phys_disk_pg0(ioc,
 +                  ir2_data->PhysDiskNum, &phys_disk);
 +              hot_plug_info.id = phys_disk.PhysDiskID;
 +              hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
 +              break;
 +      default:
 +              mptsas_free_fw_event(ioc, fw_event);
                return;
 -
 -      INIT_WORK(&ev->work, mptsas_hotplug_work);
 -      ev->ioc = ioc;
 -      ev->id = ir2_data->TargetID;
 -      ev->channel = ir2_data->Bus;
 -      ev->event_type = MPTSAS_ADD_INACTIVE_VOLUME;
 -
 -      schedule_work(&ev->work);
 -};
 +      }
 +      mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
 +}
  
  static int
  mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
  {
 -      int rc=1;
 -      u8 event = le32_to_cpu(reply->Event) & 0xFF;
 -
 -      if (!ioc->sh)
 -              goto out;
 +      u32 event = le32_to_cpu(reply->Event);
 +      int sz, event_data_sz;
 +      struct fw_event_work *fw_event;
 +      unsigned long delay;
  
 -      /*
 -       * sas_discovery_ignore_events
 -       *
 -       * This flag is to prevent anymore processing of
 -       * sas events once mptsas_remove function is called.
 -       */
 -      if (ioc->sas_discovery_ignore_events) {
 -              rc = mptscsih_event_process(ioc, reply);
 -              goto out;
 -      }
 +      /* events turned off due to host reset or driver unloading */
 +      if (ioc->fw_events_off)
 +              return 0;
  
 +      delay = msecs_to_jiffies(1);
        switch (event) {
 +      case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
 +      {
 +              EVENT_DATA_SAS_BROADCAST_PRIMITIVE *broadcast_event_data =
 +                  (EVENT_DATA_SAS_BROADCAST_PRIMITIVE *)reply->Data;
 +              if (broadcast_event_data->Primitive !=
 +                  MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
 +                      return 0;
 +              if (ioc->broadcast_aen_busy)
 +                      return 0;
 +              ioc->broadcast_aen_busy = 1;
 +              break;
 +      }
        case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
 -              mptsas_send_sas_event(ioc,
 -                      (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data);
 +      {
 +              EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data =
 +                  (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data;
 +
 +              if (sas_event_data->ReasonCode ==
 +                  MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) {
 +                      mptsas_target_reset_queue(ioc, sas_event_data);
 +                      return 0;
 +              }
                break;
 -      case MPI_EVENT_INTEGRATED_RAID:
 -              mptsas_send_raid_event(ioc,
 -                      (EVENT_DATA_RAID *)reply->Data);
 +      }
 +      case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
 +      {
 +              MpiEventDataSasExpanderStatusChange_t *expander_data =
 +                  (MpiEventDataSasExpanderStatusChange_t *)reply->Data;
 +
 +              if (ioc->old_sas_discovery_protocal)
 +                      return 0;
 +
 +              if (expander_data->ReasonCode ==
 +                  MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING &&
 +                  ioc->device_missing_delay)
 +                      delay = HZ * ioc->device_missing_delay;
                break;
 +      }
 +      case MPI_EVENT_SAS_DISCOVERY:
 +      {
 +              u32 discovery_status;
 +              EventDataSasDiscovery_t *discovery_data =
 +                  (EventDataSasDiscovery_t *)reply->Data;
 +
 +              discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus);
 +              ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0;
 +              if (ioc->old_sas_discovery_protocal && !discovery_status)
 +                      mptsas_queue_rescan(ioc);
 +              return 0;
 +      }
 +      case MPI_EVENT_INTEGRATED_RAID:
        case MPI_EVENT_PERSISTENT_TABLE_FULL:
 -              INIT_WORK(&ioc->sas_persist_task,
 -                  mptsas_persist_clear_table);
 -              schedule_work(&ioc->sas_persist_task);
 -              break;
 -       case MPI_EVENT_SAS_DISCOVERY:
 -              mptsas_send_discovery_event(ioc,
 -                      (EVENT_DATA_SAS_DISCOVERY *)reply->Data);
 -              break;
        case MPI_EVENT_IR2:
 -              mptsas_send_ir2_event(ioc,
 -                  (PTR_MPI_EVENT_DATA_IR2)reply->Data);
 +      case MPI_EVENT_SAS_PHY_LINK_STATUS:
 +      case MPI_EVENT_QUEUE_FULL:
                break;
        default:
 -              rc = mptscsih_event_process(ioc, reply);
 -              break;
 +              return 0;
        }
 - out:
  
 -      return rc;
 +      event_data_sz = ((reply->MsgLength * 4) -
 +          offsetof(EventNotificationReply_t, Data));
 +      sz = offsetof(struct fw_event_work, event_data) + event_data_sz;
 +      fw_event = kzalloc(sz, GFP_ATOMIC);
 +      if (!fw_event) {
 +              printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name,
 +               __func__, __LINE__);
 +              return 0;
 +      }
 +      memcpy(fw_event->event_data, reply->Data, event_data_sz);
 +      fw_event->event = event;
 +      fw_event->ioc = ioc;
 +      mptsas_add_fw_event(ioc, fw_event, delay);
 +      return 0;
 +}
 +
 +/* Delete a volume when no longer listed in ioc pg2
 + */
 +static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id)
 +{
 +      struct scsi_device *sdev;
 +      int i;
 +
 +      sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0);
 +      if (!sdev)
 +              return;
 +      if (!ioc->raid_data.pIocPg2)
 +              goto out;
 +      if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
 +              goto out;
 +      for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
 +              if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id)
 +                      goto release_sdev;
 + out:
 +      printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
 +          "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id);
 +      scsi_remove_device(sdev);
 + release_sdev:
 +      scsi_device_put(sdev);
  }
  
  static int
@@@ -4701,7 -3128,6 +4701,7 @@@ mptsas_probe(struct pci_dev *pdev, cons
                return r;
  
        ioc = pci_get_drvdata(pdev);
 +      mptsas_fw_event_off(ioc);
        ioc->DoneCtx = mptsasDoneCtx;
        ioc->TaskCtx = mptsasTaskCtx;
        ioc->InternalCtx = mptsasInternalCtx;
         * A slightly different algorithm is required for
         * 64bit SGEs.
         */
 -      scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
 -      if (sizeof(dma_addr_t) == sizeof(u64)) {
 +      scale = ioc->req_sz/ioc->SGE_size;
 +      if (ioc->sg_addr_size == sizeof(u64)) {
                numSGE = (scale - 1) *
                  (ioc->facts.MaxChainDepth-1) + scale +
 -                (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
 -                sizeof(u32));
 +                (ioc->req_sz - 60) / ioc->SGE_size;
        } else {
                numSGE = 1 + (scale - 1) *
                  (ioc->facts.MaxChainDepth-1) + scale +
 -                (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
 -                sizeof(u32));
 +                (ioc->req_sz - 64) / ioc->SGE_size;
        }
  
        if (numSGE < sh->sg_tablesize) {
  
        /* Clear the TM flags
         */
 -      hd->tmPending = 0;
 -      hd->tmState = TM_STATE_NONE;
 -      hd->resetPending = 0;
        hd->abortSCpnt = NULL;
  
        /* Clear the pointer used to store
  
        ioc->sas_data.ptClear = mpt_pt_clear;
  
 -      init_waitqueue_head(&hd->scandv_waitq);
 -      hd->scandv_wait_done = 0;
        hd->last_queue_full = 0;
        INIT_LIST_HEAD(&hd->target_reset_list);
 +      INIT_LIST_HEAD(&ioc->sas_device_info_list);
 +      mutex_init(&ioc->sas_device_info_mutex);
 +
        spin_unlock_irqrestore(&ioc->FreeQlock, flags);
  
        if (ioc->sas_data.ptClear==1) {
                goto out_mptsas_probe;
        }
  
 +      /* older firmware doesn't support expander events */
 +      if ((ioc->facts.HeaderVersion >> 8) < 0xE)
 +              ioc->old_sas_discovery_protocal = 1;
        mptsas_scan_sas_topology(ioc);
 -
 +      mptsas_fw_event_on(ioc);
        return 0;
  
   out_mptsas_probe:
        return error;
  }
  
 +void
 +mptsas_shutdown(struct pci_dev *pdev)
 +{
 +      MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
 +
 +      mptsas_fw_event_off(ioc);
 +      mptsas_cleanup_fw_event_q(ioc);
 +}
 +
  static void __devexit mptsas_remove(struct pci_dev *pdev)
  {
        MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
        struct mptsas_portinfo *p, *n;
        int i;
  
 +      mptsas_shutdown(pdev);
 +
 +      mptsas_del_device_components(ioc);
 +
        ioc->sas_discovery_ignore_events = 1;
        sas_remove_host(ioc->sh);
  
                list_del(&p->list);
                for (i = 0 ; i < p->num_phys ; i++)
                        mptsas_port_delete(ioc, p->phy_info[i].port_details);
 +
                kfree(p->phy_info);
                kfree(p);
        }
        mutex_unlock(&ioc->sas_topology_mutex);
 -
 +      ioc->hba_port_info = NULL;
        mptscsih_remove(pdev);
  }
  
@@@ -4931,7 -3344,7 +4931,7 @@@ static struct pci_driver mptsas_driver 
        .id_table       = mptsas_pci_table,
        .probe          = mptsas_probe,
        .remove         = __devexit_p(mptsas_remove),
 -      .shutdown       = mptscsih_shutdown,
 +      .shutdown       = mptsas_shutdown,
  #ifdef CONFIG_PM
        .suspend        = mptscsih_suspend,
        .resume         = mptscsih_resume,
@@@ -4951,12 -3364,10 +4951,12 @@@ mptsas_init(void
                return -ENODEV;
  
        mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER);
 -      mptsasTaskCtx = mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER);
 +      mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
        mptsasInternalCtx =
                mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
        mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER);
 +      mptsasDeviceResetCtx =
 +              mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER);
  
        mpt_event_register(mptsasDoneCtx, mptsas_event_process);
        mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset);
@@@ -4981,7 -3392,6 +4981,7 @@@ mptsas_exit(void
        mpt_deregister(mptsasInternalCtx);
        mpt_deregister(mptsasTaskCtx);
        mpt_deregister(mptsasDoneCtx);
 +      mpt_deregister(mptsasDeviceResetCtx);
  }
  
  module_init(mptsas_init);
diff --combined drivers/net/Makefile
index e6f1f8c3f8d418119bcb96571c31aec270504995,a1c25cb4669fb8b9a95b7a1410355423d46b5599..db30ebd7b262336bb3949da5db58e7173d4bfa64
@@@ -73,7 -73,6 +73,7 @@@ obj-$(CONFIG_STNIC) += stnic.o 8390.
  obj-$(CONFIG_FEALNX) += fealnx.o
  obj-$(CONFIG_TIGON3) += tg3.o
  obj-$(CONFIG_BNX2) += bnx2.o
 +obj-$(CONFIG_CNIC) += cnic.o
  obj-$(CONFIG_BNX2X) += bnx2x.o
  bnx2x-objs := bnx2x_main.o bnx2x_link.o
  spidernet-y += spider_net.o spider_net_ethtool.o
@@@ -103,7 -102,7 +103,7 @@@ obj-$(CONFIG_HAMACHI) += hamachi.
  obj-$(CONFIG_NET) += Space.o loopback.o
  obj-$(CONFIG_SEEQ8005) += seeq8005.o
  obj-$(CONFIG_NET_SB1000) += sb1000.o
- obj-$(CONFIG_MAC8390) += mac8390.o 8390.o
+ obj-$(CONFIG_MAC8390) += mac8390.o
  obj-$(CONFIG_APNE) += apne.o 8390.o
  obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
  obj-$(CONFIG_HP100) += hp100.o
index ccbde41c153968398979417a6dafc85c1e3359f7,8032c5adb6a9b2400c318912a7aa376df512b096..e9fa6762044ae9ef398b27b9e44c51a364338528
@@@ -1,7 -1,7 +1,7 @@@
  /*******************************************************************
   * This file is part of the Emulex Linux Device Driver for         *
   * Fibre Channel Host Bus Adapters.                                *
 - * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
 + * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
   * EMULEX and SLI are trademarks of Emulex.                        *
   * www.emulex.com                                                  *
   * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  #include <scsi/scsi_transport_fc.h>
  
  #include "lpfc_version.h"
 +#include "lpfc_hw4.h"
  #include "lpfc_hw.h"
  #include "lpfc_sli.h"
 +#include "lpfc_sli4.h"
  #include "lpfc_nl.h"
  #include "lpfc_disc.h"
  #include "lpfc_scsi.h"
@@@ -59,8 -57,6 +59,8 @@@ static char *dif_op_str[] = 
        "SCSI_PROT_READ_CONVERT",
        "SCSI_PROT_WRITE_CONVERT"
  };
 +static void
 +lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
  
  static void
  lpfc_debug_save_data(struct scsi_cmnd *cmnd)
@@@ -329,7 -325,7 +329,7 @@@ lpfc_ramp_down_queue_handler(struct lpf
  
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
 -              for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
 +              for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        shost = lpfc_shost_from_vport(vports[i]);
                        shost_for_each_device(sdev, shost) {
                                new_queue_depth =
@@@ -383,7 -379,7 +383,7 @@@ lpfc_ramp_up_queue_handler(struct lpfc_
  
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
 -              for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
 +              for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        shost = lpfc_shost_from_vport(vports[i]);
                        shost_for_each_device(sdev, shost) {
                                if (vports[i]->cfg_lun_queue_depth <=
@@@ -431,7 -427,7 +431,7 @@@ lpfc_scsi_dev_block(struct lpfc_hba *ph
  
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
 -              for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
 +              for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        shost = lpfc_shost_from_vport(vports[i]);
                        shost_for_each_device(sdev, shost) {
                                rport = starget_to_rport(scsi_target(sdev));
  }
  
  /**
 - * lpfc_new_scsi_buf - Scsi buffer allocator
 + * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
   * @vport: The virtual port for which this call being executed.
 + * @num_to_allocate: The requested number of buffers to allocate.
   *
 - * This routine allocates a scsi buffer, which contains all the necessary
 - * information needed to initiate a SCSI I/O.  The non-DMAable buffer region
 - * contains information to build the IOCB.  The DMAable region contains
 - * memory for the FCP CMND, FCP RSP, and the initial BPL.  In addition to
 - * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
 - * and the BPL BDE is setup in the IOCB.
 + * This routine allocates a scsi buffer for device with SLI-3 interface spec,
 + * the scsi buffer contains all the necessary information needed to initiate
 + * a SCSI I/O. The non-DMAable buffer region contains information to build
 + * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
 + * and the initial BPL. In addition to allocating memory, the FCP CMND and
 + * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
   *
   * Return codes:
 - *   NULL - Error
 - *   Pointer to lpfc_scsi_buf data structure - Success
 + *   int - number of scsi buffers that were allocated.
 + *   0 = failure, less than num_to_alloc is a partial failure.
   **/
 -static struct lpfc_scsi_buf *
 -lpfc_new_scsi_buf(struct lpfc_vport *vport)
 +static int
 +lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
  {
        struct lpfc_hba *phba = vport->phba;
        struct lpfc_scsi_buf *psb;
        dma_addr_t pdma_phys_fcp_rsp;
        dma_addr_t pdma_phys_bpl;
        uint16_t iotag;
 +      int bcnt;
  
 -      psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
 -      if (!psb)
 -              return NULL;
 +      for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
 +              psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
 +              if (!psb)
 +                      break;
 +
 +              /*
 +               * Get memory from the pci pool to map the virt space to pci
 +               * bus space for an I/O.  The DMA buffer includes space for the
 +               * struct fcp_cmnd, struct fcp_rsp and the number of bde's
 +               * necessary to support the sg_tablesize.
 +               */
 +              psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
 +                                      GFP_KERNEL, &psb->dma_handle);
 +              if (!psb->data) {
 +                      kfree(psb);
 +                      break;
 +              }
 +
 +              /* Initialize virtual ptrs to dma_buf region. */
 +              memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
 +
 +              /* Allocate iotag for psb->cur_iocbq. */
 +              iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
 +              if (iotag == 0) {
 +                      pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
 +                                      psb->data, psb->dma_handle);
 +                      kfree(psb);
 +                      break;
 +              }
 +              psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
 +
 +              psb->fcp_cmnd = psb->data;
 +              psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
 +              psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
 +                      sizeof(struct fcp_rsp);
 +
 +              /* Initialize local short-hand pointers. */
 +              bpl = psb->fcp_bpl;
 +              pdma_phys_fcp_cmd = psb->dma_handle;
 +              pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
 +              pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
 +                      sizeof(struct fcp_rsp);
 +
 +              /*
 +               * The first two bdes are the FCP_CMD and FCP_RSP. The balance
 +               * are sg list bdes.  Initialize the first two and leave the
 +               * rest for queuecommand.
 +               */
 +              bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
 +              bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
 +              bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
 +              bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 +              bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
 +
 +              /* Setup the physical region for the FCP RSP */
 +              bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
 +              bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
 +              bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
 +              bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 +              bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
 +
 +              /*
 +               * Since the IOCB for the FCP I/O is built into this
 +               * lpfc_scsi_buf, initialize it with all known data now.
 +               */
 +              iocb = &psb->cur_iocbq.iocb;
 +              iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
 +              if ((phba->sli_rev == 3) &&
 +                              !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
 +                      /* fill in immediate fcp command BDE */
 +                      iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
 +                      iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
 +                      iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
 +                                      unsli3.fcp_ext.icd);
 +                      iocb->un.fcpi64.bdl.addrHigh = 0;
 +                      iocb->ulpBdeCount = 0;
 +                      iocb->ulpLe = 0;
 +                      /* fill in responce BDE */
 +                      iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
 +                                                      BUFF_TYPE_BDE_64;
 +                      iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
 +                              sizeof(struct fcp_rsp);
 +                      iocb->unsli3.fcp_ext.rbde.addrLow =
 +                              putPaddrLow(pdma_phys_fcp_rsp);
 +                      iocb->unsli3.fcp_ext.rbde.addrHigh =
 +                              putPaddrHigh(pdma_phys_fcp_rsp);
 +              } else {
 +                      iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
 +                      iocb->un.fcpi64.bdl.bdeSize =
 +                                      (2 * sizeof(struct ulp_bde64));
 +                      iocb->un.fcpi64.bdl.addrLow =
 +                                      putPaddrLow(pdma_phys_bpl);
 +                      iocb->un.fcpi64.bdl.addrHigh =
 +                                      putPaddrHigh(pdma_phys_bpl);
 +                      iocb->ulpBdeCount = 1;
 +                      iocb->ulpLe = 1;
 +              }
 +              iocb->ulpClass = CLASS3;
 +              psb->status = IOSTAT_SUCCESS;
 +              /* Put it back into the SCSI buffer list */
 +              lpfc_release_scsi_buf_s4(phba, psb);
  
 -      /*
 -       * Get memory from the pci pool to map the virt space to pci bus space
 -       * for an I/O.  The DMA buffer includes space for the struct fcp_cmnd,
 -       * struct fcp_rsp and the number of bde's necessary to support the
 -       * sg_tablesize.
 -       */
 -      psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
 -                                                      &psb->dma_handle);
 -      if (!psb->data) {
 -              kfree(psb);
 -              return NULL;
        }
  
 -      /* Initialize virtual ptrs to dma_buf region. */
 -      memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
 +      return bcnt;
 +}
  
 -      /* Allocate iotag for psb->cur_iocbq. */
 -      iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
 -      if (iotag == 0) {
 -              pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
 -                            psb->data, psb->dma_handle);
 -              kfree (psb);
 -              return NULL;
 +/**
 + * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
 + * @phba: pointer to lpfc hba data structure.
 + * @axri: pointer to the fcp xri abort wcqe structure.
 + *
 + * This routine is invoked by the worker thread to process a SLI4 fast-path
 + * FCP aborted xri.
 + **/
 +void
 +lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
 +                        struct sli4_wcqe_xri_aborted *axri)
 +{
 +      uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
 +      struct lpfc_scsi_buf *psb, *next_psb;
 +      unsigned long iflag = 0;
 +
 +      spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
 +      list_for_each_entry_safe(psb, next_psb,
 +              &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
 +              if (psb->cur_iocbq.sli4_xritag == xri) {
 +                      list_del(&psb->list);
 +                      psb->status = IOSTAT_SUCCESS;
 +                      spin_unlock_irqrestore(
 +                              &phba->sli4_hba.abts_scsi_buf_list_lock,
 +                              iflag);
 +                      lpfc_release_scsi_buf_s4(phba, psb);
 +                      return;
 +              }
 +      }
 +      spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
 +                              iflag);
 +}
 +
 +/**
 + * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
 + * @phba: pointer to lpfc hba data structure.
 + *
 + * This routine walks the list of scsi buffers that have been allocated and
 + * repost them to the HBA by using SGL block post. This is needed after a
 + * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
 + * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
 + * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
 + *
 + * Returns: 0 = success, non-zero failure.
 + **/
 +int
 +lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
 +{
 +      struct lpfc_scsi_buf *psb;
 +      int index, status, bcnt = 0, rcnt = 0, rc = 0;
 +      LIST_HEAD(sblist);
 +
 +      for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
 +              psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
 +              if (psb) {
 +                      /* Remove from SCSI buffer list */
 +                      list_del(&psb->list);
 +                      /* Add it to a local SCSI buffer list */
 +                      list_add_tail(&psb->list, &sblist);
 +                      if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
 +                              bcnt = rcnt;
 +                              rcnt = 0;
 +                      }
 +              } else
 +                      /* A hole present in the XRI array, need to skip */
 +                      bcnt = rcnt;
 +
 +              if (index == phba->sli4_hba.scsi_xri_cnt - 1)
 +                      /* End of XRI array for SCSI buffer, complete */
 +                      bcnt = rcnt;
 +
 +              /* Continue until collect up to a nembed page worth of sgls */
 +              if (bcnt == 0)
 +                      continue;
 +              /* Now, post the SCSI buffer list sgls as a block */
 +              status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
 +              /* Reset SCSI buffer count for next round of posting */
 +              bcnt = 0;
 +              while (!list_empty(&sblist)) {
 +                      list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
 +                                       list);
 +                      if (status) {
 +                              /* Put this back on the abort scsi list */
 +                              psb->status = IOSTAT_LOCAL_REJECT;
 +                              psb->result = IOERR_ABORT_REQUESTED;
 +                              rc++;
 +                      } else
 +                              psb->status = IOSTAT_SUCCESS;
 +                      /* Put it back into the SCSI buffer list */
 +                      lpfc_release_scsi_buf_s4(phba, psb);
 +              }
        }
 -      psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
 +      return rc;
 +}
  
 -      psb->fcp_cmnd = psb->data;
 -      psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
 -      psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
 -                                                      sizeof(struct fcp_rsp);
 +/**
 + * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
 + * @vport: The virtual port for which this call being executed.
 + * @num_to_allocate: The requested number of buffers to allocate.
 + *
 + * This routine allocates a scsi buffer for device with SLI-4 interface spec,
 + * the scsi buffer contains all the necessary information needed to initiate
 + * a SCSI I/O.
 + *
 + * Return codes:
 + *   int - number of scsi buffers that were allocated.
 + *   0 = failure, less than num_to_alloc is a partial failure.
 + **/
 +static int
 +lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
 +{
 +      struct lpfc_hba *phba = vport->phba;
 +      struct lpfc_scsi_buf *psb;
 +      struct sli4_sge *sgl;
 +      IOCB_t *iocb;
 +      dma_addr_t pdma_phys_fcp_cmd;
 +      dma_addr_t pdma_phys_fcp_rsp;
 +      dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
 +      uint16_t iotag, last_xritag = NO_XRI;
 +      int status = 0, index;
 +      int bcnt;
 +      int non_sequential_xri = 0;
 +      int rc = 0;
 +      LIST_HEAD(sblist);
 +
 +      for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
 +              psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
 +              if (!psb)
 +                      break;
  
 -      /* Initialize local short-hand pointers. */
 -      bpl = psb->fcp_bpl;
 -      pdma_phys_fcp_cmd = psb->dma_handle;
 -      pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
 -      pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
 -                      sizeof(struct fcp_rsp);
 +              /*
 +               * Get memory from the pci pool to map the virt space to pci bus
 +               * space for an I/O.  The DMA buffer includes space for the
 +               * struct fcp_cmnd, struct fcp_rsp and the number of bde's
 +               * necessary to support the sg_tablesize.
 +               */
 +              psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
 +                                              GFP_KERNEL, &psb->dma_handle);
 +              if (!psb->data) {
 +                      kfree(psb);
 +                      break;
 +              }
  
 -      /*
 -       * The first two bdes are the FCP_CMD and FCP_RSP.  The balance are sg
 -       * list bdes.  Initialize the first two and leave the rest for
 -       * queuecommand.
 -       */
 -      bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
 -      bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
 -      bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
 -      bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 -      bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
 -
 -      /* Setup the physical region for the FCP RSP */
 -      bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
 -      bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
 -      bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
 -      bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 -      bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
 +              /* Initialize virtual ptrs to dma_buf region. */
 +              memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
  
 -      /*
 -       * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
 -       * initialize it with all known data now.
 -       */
 -      iocb = &psb->cur_iocbq.iocb;
 -      iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
 -      if ((phba->sli_rev == 3) &&
 -          !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
 -              /* fill in immediate fcp command BDE */
 -              iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
 +              /* Allocate iotag for psb->cur_iocbq. */
 +              iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
 +              if (iotag == 0) {
 +                      kfree(psb);
 +                      break;
 +              }
 +
 +              psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
 +              if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
 +                      pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
 +                            psb->data, psb->dma_handle);
 +                      kfree(psb);
 +                      break;
 +              }
 +              if (last_xritag != NO_XRI
 +                      && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
 +                      non_sequential_xri = 1;
 +              } else
 +                      list_add_tail(&psb->list, &sblist);
 +              last_xritag = psb->cur_iocbq.sli4_xritag;
 +
 +              index = phba->sli4_hba.scsi_xri_cnt++;
 +              psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
 +
 +              psb->fcp_bpl = psb->data;
 +              psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
 +                      - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
 +              psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
 +                                      sizeof(struct fcp_cmnd));
 +
 +              /* Initialize local short-hand pointers. */
 +              sgl = (struct sli4_sge *)psb->fcp_bpl;
 +              pdma_phys_bpl = psb->dma_handle;
 +              pdma_phys_fcp_cmd =
 +                      (psb->dma_handle + phba->cfg_sg_dma_buf_size)
 +                       - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
 +              pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
 +
 +              /*
 +               * The first two bdes are the FCP_CMD and FCP_RSP.  The balance
 +               * are sg list bdes.  Initialize the first two and leave the
 +               * rest for queuecommand.
 +               */
 +              sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
 +              sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
 +              bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
 +              bf_set(lpfc_sli4_sge_last, sgl, 0);
 +              sgl->word2 = cpu_to_le32(sgl->word2);
 +              sgl->word3 = cpu_to_le32(sgl->word3);
 +              sgl++;
 +
 +              /* Setup the physical region for the FCP RSP */
 +              sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
 +              sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
 +              bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
 +              bf_set(lpfc_sli4_sge_last, sgl, 1);
 +              sgl->word2 = cpu_to_le32(sgl->word2);
 +              sgl->word3 = cpu_to_le32(sgl->word3);
 +
 +              /*
 +               * Since the IOCB for the FCP I/O is built into this
 +               * lpfc_scsi_buf, initialize it with all known data now.
 +               */
 +              iocb = &psb->cur_iocbq.iocb;
 +              iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
 +              iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
 +              /* setting the BLP size to 2 * sizeof BDE may not be correct.
 +               * We are setting the bpl to point to out sgl. An sgl's
 +               * entries are 16 bytes, a bpl entries are 12 bytes.
 +               */
                iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
 -              iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
 -                                                     unsli3.fcp_ext.icd);
 -              iocb->un.fcpi64.bdl.addrHigh = 0;
 -              iocb->ulpBdeCount = 0;
 -              iocb->ulpLe = 0;
 -              /* fill in responce BDE */
 -              iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 -              iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
 -                                              sizeof(struct fcp_rsp);
 -              iocb->unsli3.fcp_ext.rbde.addrLow =
 -                                              putPaddrLow(pdma_phys_fcp_rsp);
 -              iocb->unsli3.fcp_ext.rbde.addrHigh =
 -                                              putPaddrHigh(pdma_phys_fcp_rsp);
 -      } else {
 -              iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
 -              iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
 -              iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
 -              iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
 +              iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
 +              iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
                iocb->ulpBdeCount = 1;
                iocb->ulpLe = 1;
 +              iocb->ulpClass = CLASS3;
 +              if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
 +                      pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
 +              else
 +                      pdma_phys_bpl1 = 0;
 +              psb->dma_phys_bpl = pdma_phys_bpl;
 +              phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
 +              if (non_sequential_xri) {
 +                      status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
 +                                              pdma_phys_bpl1,
 +                                              psb->cur_iocbq.sli4_xritag);
 +                      if (status) {
 +                              /* Put this back on the abort scsi list */
 +                              psb->status = IOSTAT_LOCAL_REJECT;
 +                              psb->result = IOERR_ABORT_REQUESTED;
 +                              rc++;
 +                      } else
 +                              psb->status = IOSTAT_SUCCESS;
 +                      /* Put it back into the SCSI buffer list */
 +                      lpfc_release_scsi_buf_s4(phba, psb);
 +                      break;
 +              }
 +      }
 +      if (bcnt) {
 +              status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
 +              /* Reset SCSI buffer count for next round of posting */
 +              while (!list_empty(&sblist)) {
 +                      list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
 +                               list);
 +                      if (status) {
 +                              /* Put this back on the abort scsi list */
 +                              psb->status = IOSTAT_LOCAL_REJECT;
 +                              psb->result = IOERR_ABORT_REQUESTED;
 +                              rc++;
 +                      } else
 +                              psb->status = IOSTAT_SUCCESS;
 +                      /* Put it back into the SCSI buffer list */
 +                      lpfc_release_scsi_buf_s4(phba, psb);
 +              }
        }
 -      iocb->ulpClass = CLASS3;
  
 -      return psb;
 +      return bcnt + non_sequential_xri - rc;
  }
  
  /**
 - * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba
 - * @phba: The Hba for which this call is being executed.
 + * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
 + * @vport: The virtual port for which this call being executed.
 + * @num_to_allocate: The requested number of buffers to allocate.
 + *
 + * This routine wraps the actual SCSI buffer allocator function pointer from
 + * the lpfc_hba struct.
 + *
 + * Return codes:
 + *   int - number of scsi buffers that were allocated.
 + *   0 = failure, less than num_to_alloc is a partial failure.
 + **/
 +static inline int
 +lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
 +{
 +      return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
 +}
 +
 +/**
 + * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
 + * @phba: The HBA for which this call is being executed.
   *
   * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
   * and returns to caller.
@@@ -890,7 -591,7 +890,7 @@@ lpfc_get_scsi_buf(struct lpfc_hba * phb
  }
  
  /**
 - * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list
 + * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
   * @phba: The Hba for which this call is being executed.
   * @psb: The scsi buffer which is being released.
   *
   * lpfc_scsi_buf_list list.
   **/
  static void
 -lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 +lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
  {
        unsigned long iflag = 0;
  
  }
  
  /**
 - * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer
 + * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
 + * @phba: The Hba for which this call is being executed.
 + * @psb: The scsi buffer which is being released.
 + *
 + * This routine releases @psb scsi buffer by adding it to tail of @phba
 + * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
 + * and cannot be reused for at least RA_TOV amount of time if it was
 + * aborted.
 + **/
 +static void
 +lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 +{
 +      unsigned long iflag = 0;
 +
 +      if (psb->status == IOSTAT_LOCAL_REJECT
 +              && psb->result == IOERR_ABORT_REQUESTED) {
 +              spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
 +                                      iflag);
 +              psb->pCmd = NULL;
 +              list_add_tail(&psb->list,
 +                      &phba->sli4_hba.lpfc_abts_scsi_buf_list);
 +              spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
 +                                      iflag);
 +      } else {
 +
 +              spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
 +              psb->pCmd = NULL;
 +              list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
 +              spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
 +      }
 +}
 +
 +/**
 + * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
 + * @phba: The Hba for which this call is being executed.
 + * @psb: The scsi buffer which is being released.
 + *
 + * This routine releases @psb scsi buffer by adding it to tail of @phba
 + * lpfc_scsi_buf_list list.
 + **/
 +static void
 +lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 +{
 +
 +      phba->lpfc_release_scsi_buf(phba, psb);
 +}
 +
 +/**
 + * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
   * @phba: The Hba for which this call is being executed.
   * @lpfc_cmd: The scsi buffer which is going to be mapped.
   *
   * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
 - * field of @lpfc_cmd. This routine scans through sg elements and format the
 - * bdea. This routine also initializes all IOCB fields which are dependent on
 - * scsi command request buffer.
 + * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
 + * through sg elements and format the bdea. This routine also initializes all
 + * IOCB fields which are dependent on scsi command request buffer.
   *
   * Return codes:
   *   1 - Error
   *   0 - Success
   **/
  static int
 -lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 +lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
  {
        struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
        struct scatterlist *sgel = NULL;
@@@ -1659,10 -1312,10 +1659,10 @@@ lpfc_parse_bg_err(struct lpfc_hba *phba
        uint32_t bgstat = bgf->bgstat;
        uint64_t failing_sector = 0;
  
-       printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
+       printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x "
                        "bgstat=0x%x bghm=0x%x\n",
                        cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
-                       cmd->request->nr_sectors, bgstat, bghm);
+                       blk_rq_sectors(cmd->request), bgstat, bghm);
  
        spin_lock(&_dump_buf_lock);
        if (!_dump_buf_done) {
        return ret;
  }
  
 +/**
 + * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
 + * @phba: The Hba for which this call is being executed.
 + * @lpfc_cmd: The scsi buffer which is going to be mapped.
 + *
 + * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
 + * field of @lpfc_cmd for device with SLI-4 interface spec.
 + *
 + * Return codes:
 + *    1 - Error
 + *    0 - Success
 + **/
 +static int
 +lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 +{
 +      struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
 +      struct scatterlist *sgel = NULL;
 +      struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
 +      struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
 +      IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
 +      dma_addr_t physaddr;
 +      uint32_t num_bde = 0;
 +      uint32_t dma_len;
 +      uint32_t dma_offset = 0;
 +      int nseg;
 +
 +      /*
 +       * There are three possibilities here - use scatter-gather segment, use
 +       * the single mapping, or neither.  Start the lpfc command prep by
 +       * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
 +       * data bde entry.
 +       */
 +      if (scsi_sg_count(scsi_cmnd)) {
 +              /*
 +               * The driver stores the segment count returned from pci_map_sg
 +               * because this a count of dma-mappings used to map the use_sg
 +               * pages.  They are not guaranteed to be the same for those
 +               * architectures that implement an IOMMU.
 +               */
 +
 +              nseg = scsi_dma_map(scsi_cmnd);
 +              if (unlikely(!nseg))
 +                      return 1;
 +              sgl += 1;
 +              /* clear the last flag in the fcp_rsp map entry */
 +              sgl->word2 = le32_to_cpu(sgl->word2);
 +              bf_set(lpfc_sli4_sge_last, sgl, 0);
 +              sgl->word2 = cpu_to_le32(sgl->word2);
 +              sgl += 1;
 +
 +              lpfc_cmd->seg_cnt = nseg;
 +              if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
 +                      printk(KERN_ERR "%s: Too many sg segments from "
 +                             "dma_map_sg.  Config %d, seg_cnt %d\n",
 +                             __func__, phba->cfg_sg_seg_cnt,
 +                             lpfc_cmd->seg_cnt);
 +                      scsi_dma_unmap(scsi_cmnd);
 +                      return 1;
 +              }
 +
 +              /*
 +               * The driver established a maximum scatter-gather segment count
 +               * during probe that limits the number of sg elements in any
 +               * single scsi command.  Just run through the seg_cnt and format
 +               * the sge's.
 +               * When using SLI-3 the driver will try to fit all the BDEs into
 +               * the IOCB. If it can't then the BDEs get added to a BPL as it
 +               * does for SLI-2 mode.
 +               */
 +              scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
 +                      physaddr = sg_dma_address(sgel);
 +                      dma_len = sg_dma_len(sgel);
 +                      bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
 +                      sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
 +                      sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
 +                      if ((num_bde + 1) == nseg)
 +                              bf_set(lpfc_sli4_sge_last, sgl, 1);
 +                      else
 +                              bf_set(lpfc_sli4_sge_last, sgl, 0);
 +                      bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
 +                      sgl->word2 = cpu_to_le32(sgl->word2);
 +                      sgl->word3 = cpu_to_le32(sgl->word3);
 +                      dma_offset += dma_len;
 +                      sgl++;
 +              }
 +      } else {
 +              sgl += 1;
 +              /* clear the last flag in the fcp_rsp map entry */
 +              sgl->word2 = le32_to_cpu(sgl->word2);
 +              bf_set(lpfc_sli4_sge_last, sgl, 1);
 +              sgl->word2 = cpu_to_le32(sgl->word2);
 +      }
 +
 +      /*
 +       * Finish initializing those IOCB fields that are dependent on the
 +       * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
 +       * explicitly reinitialized.
 +       * all iocb memory resources are reused.
 +       */
 +      fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
 +
 +      /*
 +       * Due to difference in data length between DIF/non-DIF paths,
 +       * we need to set word 4 of IOCB here
 +       */
 +      iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
 +      return 0;
 +}
 +
 +/**
 + * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
 + * @phba: The Hba for which this call is being executed.
 + * @lpfc_cmd: The scsi buffer which is going to be mapped.
 + *
 + * This routine wraps the actual DMA mapping function pointer from the
 + * lpfc_hba struct.
 + *
 + * Return codes:
 + *    1 - Error
 + *    0 - Success
 + **/
 +static inline int
 +lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 +{
 +      return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
 +}
 +
  /**
   * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
   * @phba: Pointer to hba context object.
@@@ -1978,15 -1504,15 +1978,15 @@@ lpfc_send_scsi_error_event(struct lpfc_
  }
  
  /**
 - * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather
 - * @phba: The Hba for which this call is being executed.
 + * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev
 + * @phba: The HBA for which this call is being executed.
   * @psb: The scsi buffer which is going to be un-mapped.
   *
   * This routine does DMA un-mapping of scatter gather list of scsi command
 - * field of @lpfc_cmd.
 + * field of @lpfc_cmd for device with SLI-3 interface spec.
   **/
  static void
 -lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
 +lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
  {
        /*
         * There are only two special cases to consider.  (1) the scsi command
                                psb->pCmd->sc_data_direction);
  }
  
 +/**
 + * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
 + * @phba: The Hba for which this call is being executed.
 + * @psb: The scsi buffer which is going to be un-mapped.
 + *
 + * This routine does DMA un-mapping of scatter gather list of scsi command
 + * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
 + * remove the sgl for this scsi buffer then we will do it here. For now
 + * we should be able to just call the sli3 unprep routine.
 + **/
 +static void
 +lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 +{
 +      lpfc_scsi_unprep_dma_buf_s3(phba, psb);
 +}
 +
 +/**
 + * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
 + * @phba: The Hba for which this call is being executed.
 + * @psb: The scsi buffer which is going to be un-mapped.
 + *
 + * This routine does DMA un-mapping of scatter gather list of scsi command
 + * field of @lpfc_cmd for device with SLI-4 interface spec.
 + **/
 +static void
 +lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 +{
 +      phba->lpfc_scsi_unprep_dma_buf(phba, psb);
 +}
 +
  /**
   * lpfc_handler_fcp_err - FCP response handler
   * @vport: The virtual port for which this call is being executed.
@@@ -2180,7 -1676,7 +2180,7 @@@ lpfc_handle_fcp_err(struct lpfc_vport *
   * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
   * @phba: The Hba for which this call is being executed.
   * @pIocbIn: The command IOCBQ for the scsi cmnd.
 - * @pIocbOut: The response IOCBQ for the scsi cmnd .
 + * @pIocbOut: The response IOCBQ for the scsi cmnd.
   *
   * This routine assigns scsi command result by looking into response IOCB
   * status field appropriately. This routine handles QUEUE FULL condition as
@@@ -2461,16 -1957,16 +2461,16 @@@ lpfc_fcpcmd_to_iocb(uint8_t *data, stru
  }
  
  /**
 - * lpfc_scsi_prep_cmnd -  Routine to convert scsi cmnd to FCP information unit
 + * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev
   * @vport: The virtual port for which this call is being executed.
   * @lpfc_cmd: The scsi command which needs to send.
   * @pnode: Pointer to lpfc_nodelist.
   *
   * This routine initializes fcp_cmnd and iocb data structure from scsi command
 - * to transfer.
 + * to transfer for device with SLI3 interface spec.
   **/
  static void
 -lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 +lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
                    struct lpfc_nodelist *pnode)
  {
        struct lpfc_hba *phba = vport->phba;
        if (scsi_sg_count(scsi_cmnd)) {
                if (datadir == DMA_TO_DEVICE) {
                        iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
 -                      iocb_cmd->un.fcpi.fcpi_parm = 0;
 -                      iocb_cmd->ulpPU = 0;
 +                      if (phba->sli_rev < LPFC_SLI_REV4) {
 +                              iocb_cmd->un.fcpi.fcpi_parm = 0;
 +                              iocb_cmd->ulpPU = 0;
 +                      } else
 +                              iocb_cmd->ulpPU = PARM_READ_CHECK;
                        fcp_cmnd->fcpCntl3 = WRITE_DATA;
                        phba->fc4OutputRequests++;
                } else {
  }
  
  /**
 - * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit
 + * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev
 + * @vport: The virtual port for which this call is being executed.
 + * @lpfc_cmd: The scsi command which needs to send.
 + * @pnode: Pointer to lpfc_nodelist.
 + *
 + * This routine initializes fcp_cmnd and iocb data structure from scsi command
 + * to transfer for device with SLI4 interface spec.
 + **/
 +static void
 +lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 +                     struct lpfc_nodelist *pnode)
 +{
 +      /*
 +       * The prep cmnd routines do not touch the sgl or its
 +       * entries. We may not have to do anything different.
 +       * I will leave this function in place until we can
 +       * run some IO through the driver and determine if changes
 +       * are needed.
 +       */
 +      return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
 +}
 +
 +/**
 + * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
 + * @vport: The virtual port for which this call is being executed.
 + * @lpfc_cmd: The scsi command which needs to send.
 + * @pnode: Pointer to lpfc_nodelist.
 + *
 + * This routine wraps the actual convert SCSI cmnd function pointer from
 + * the lpfc_hba struct.
 + **/
 +static inline void
 +lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 +                  struct lpfc_nodelist *pnode)
 +{
 +      vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode);
 +}
 +
 +/**
 + * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
   * @vport: The virtual port for which this call is being executed.
   * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
   * @lun: Logical unit number.
   * @task_mgmt_cmd: SCSI task management command.
   *
 - * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
 + * This routine creates FCP information unit corresponding to @task_mgmt_cmd
 + * for device with SLI-3 interface spec.
   *
   * Return codes:
   *   0 - Error
   *   1 - Success
   **/
  static int
 -lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
 +lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
                             struct lpfc_scsi_buf *lpfc_cmd,
                             unsigned int lun,
                             uint8_t task_mgmt_cmd)
        return 1;
  }
  
 +/**
 + * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
 + * @vport: The virtual port for which this call is being executed.
 + * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 + * @lun: Logical unit number.
 + * @task_mgmt_cmd: SCSI task management command.
 + *
 + * This routine creates FCP information unit corresponding to @task_mgmt_cmd
 + * for device with SLI-4 interface spec.
 + *
 + * Return codes:
 + *    0 - Error
 + *    1 - Success
 + **/
 +static int
 +lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
 +                              struct lpfc_scsi_buf *lpfc_cmd,
 +                              unsigned int lun,
 +                              uint8_t task_mgmt_cmd)
 +{
 +      /*
 +       * The prep cmnd routines do not touch the sgl or its
 +       * entries. We may not have to do anything different.
 +       * I will leave this function in place until we can
 +       * run some IO through the driver and determine if changes
 +       * are needed.
 +       */
 +      return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
 +                                              task_mgmt_cmd);
 +}
 +
 +/**
 + * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info
 + * @vport: The virtual port for which this call is being executed.
 + * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 + * @lun: Logical unit number.
 + * @task_mgmt_cmd: SCSI task management command.
 + *
 + * This routine wraps the actual convert SCSI TM to FCP information unit
 + * function pointer from the lpfc_hba struct.
 + *
 + * Return codes:
 + *    0 - Error
 + *    1 - Success
 + **/
 +static inline int
 +lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
 +                           struct lpfc_scsi_buf *lpfc_cmd,
 +                           unsigned int lun,
 +                           uint8_t task_mgmt_cmd)
 +{
 +      struct lpfc_hba *phba = vport->phba;
 +
 +      return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
 +                                                task_mgmt_cmd);
 +}
 +
 +/**
 + * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
 + * @phba: The hba struct for which this call is being executed.
 + * @dev_grp: The HBA PCI-Device group number.
 + *
 + * This routine sets up the SCSI interface API function jump table in @phba
 + * struct.
 + * Returns: 0 - success, -ENODEV - failure.
 + **/
 +int
 +lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
 +{
 +
 +      switch (dev_grp) {
 +      case LPFC_PCI_DEV_LP:
 +              phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
 +              phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
 +              phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3;
 +              phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3;
 +              phba->lpfc_scsi_prep_task_mgmt_cmd =
 +                                      lpfc_scsi_prep_task_mgmt_cmd_s3;
 +              phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
 +              break;
 +      case LPFC_PCI_DEV_OC:
 +              phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
 +              phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
 +              phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
 +              phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
 +              phba->lpfc_scsi_prep_task_mgmt_cmd =
 +                                      lpfc_scsi_prep_task_mgmt_cmd_s4;
 +              phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
 +              break;
 +      default:
 +              lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                              "1418 Invalid HBA PCI-device group: 0x%x\n",
 +                              dev_grp);
 +              return -ENODEV;
 +              break;
 +      }
 +      phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
 +      phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
 +      return 0;
 +}
 +
  /**
   * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
   * @phba: The Hba for which this call is being executed.
@@@ -2826,8 -2178,9 +2826,8 @@@ lpfc_scsi_tgt_reset(struct lpfc_scsi_bu
        lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
                         "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
                         tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
 -      status = lpfc_sli_issue_iocb_wait(phba,
 -                                     &phba->sli.ring[phba->sli.fcp_ring],
 -                                     iocbq, iocbqrsp, lpfc_cmd->timeout);
 +      status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
 +                                        iocbq, iocbqrsp, lpfc_cmd->timeout);
        if (status != IOCB_SUCCESS) {
                if (status == IOCB_TIMEDOUT) {
                        iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@@ -2952,6 -2305,7 +2952,6 @@@ lpfc_queuecommand(struct scsi_cmnd *cmn
        struct Scsi_Host  *shost = cmnd->device->host;
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 -      struct lpfc_sli   *psli = &phba->sli;
        struct lpfc_rport_data *rdata = cmnd->device->hostdata;
        struct lpfc_nodelist *ndlp = rdata->pnode;
        struct lpfc_scsi_buf *lpfc_cmd;
                if (cmnd->cmnd[0] == READ_10)
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                        "9035 BLKGRD: READ @ sector %llu, "
-                                        "count %lu\n",
-                                        (unsigned long long)scsi_get_lba(cmnd),
-                                       cmnd->request->nr_sectors);
+                                       "count %u\n",
+                                       (unsigned long long)scsi_get_lba(cmnd),
+                                       blk_rq_sectors(cmnd->request));
                else if (cmnd->cmnd[0] == WRITE_10)
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                        "9036 BLKGRD: WRITE @ sector %llu, "
-                                       "count %lu cmd=%p\n",
+                                       "count %u cmd=%p\n",
                                        (unsigned long long)scsi_get_lba(cmnd),
-                                       cmnd->request->nr_sectors,
+                                       blk_rq_sectors(cmnd->request),
                                        cmnd);
  
                err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
                if (cmnd->cmnd[0] == READ_10)
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                         "9040 dbg: READ @ sector %llu, "
-                                        "count %lu\n",
+                                        "count %u\n",
                                         (unsigned long long)scsi_get_lba(cmnd),
-                                        cmnd->request->nr_sectors);
+                                        blk_rq_sectors(cmnd->request));
                else if (cmnd->cmnd[0] == WRITE_10)
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                         "9041 dbg: WRITE @ sector %llu, "
-                                        "count %lu cmd=%p\n",
+                                        "count %u cmd=%p\n",
                                         (unsigned long long)scsi_get_lba(cmnd),
-                                        cmnd->request->nr_sectors, cmnd);
+                                        blk_rq_sectors(cmnd->request), cmnd);
                else
                        lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
                                         "9042 dbg: parser not implemented\n");
        lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
  
        atomic_inc(&ndlp->cmd_pending);
 -      err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
 +      err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
                                  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
        if (err) {
                atomic_dec(&ndlp->cmd_pending);
@@@ -3136,6 -2490,7 +3136,6 @@@ lpfc_abort_handler(struct scsi_cmnd *cm
        struct Scsi_Host  *shost = cmnd->device->host;
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 -      struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
        struct lpfc_iocbq *iocb;
        struct lpfc_iocbq *abtsiocb;
        struct lpfc_scsi_buf *lpfc_cmd;
        icmd = &abtsiocb->iocb;
        icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
        icmd->un.acxri.abortContextTag = cmd->ulpContext;
 -      icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
 +      if (phba->sli_rev == LPFC_SLI_REV4)
 +              icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
 +      else
 +              icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
  
        icmd->ulpLe = 1;
        icmd->ulpClass = cmd->ulpClass;
  
        abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
        abtsiocb->vport = vport;
 -      if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
 +      if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
 +          IOCB_ERROR) {
                lpfc_sli_release_iocbq(phba, abtsiocb);
                ret = FAILED;
                goto out;
@@@ -3317,7 -2668,8 +3317,7 @@@ lpfc_device_reset_handler(struct scsi_c
                         "0703 Issue target reset to TGT %d LUN %d "
                         "rpi x%x nlp_flag x%x\n", cmnd->device->id,
                         cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
 -      status = lpfc_sli_issue_iocb_wait(phba,
 -                                        &phba->sli.ring[phba->sli.fcp_ring],
 +      status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
                                          iocbq, iocbqrsp, lpfc_cmd->timeout);
        if (status == IOCB_TIMEDOUT) {
                iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@@ -3473,10 -2825,11 +3473,10 @@@ lpfc_slave_alloc(struct scsi_device *sd
  {
        struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 -      struct lpfc_scsi_buf *scsi_buf = NULL;
        struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
 -      uint32_t total = 0, i;
 +      uint32_t total = 0;
        uint32_t num_to_alloc = 0;
 -      unsigned long flags;
 +      int num_allocated = 0;
  
        if (!rport || fc_remote_port_chkready(rport))
                return -ENXIO;
                                 (phba->cfg_hba_queue_depth - total));
                num_to_alloc = phba->cfg_hba_queue_depth - total;
        }
 -
 -      for (i = 0; i < num_to_alloc; i++) {
 -              scsi_buf = lpfc_new_scsi_buf(vport);
 -              if (!scsi_buf) {
 -                      lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 -                                       "0706 Failed to allocate "
 -                                       "command buffer\n");
 -                      break;
 -              }
 -
 -              spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
 -              phba->total_scsi_bufs++;
 -              list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
 -              spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
 +      num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
 +      if (num_to_alloc != num_allocated) {
 +                      lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
 +                               "0708 Allocation request of %d "
 +                               "command buffers did not succeed.  "
 +                               "Allocated %d buffers.\n",
 +                               num_to_alloc, num_allocated);
        }
        return 0;
  }
index e4d858617c8dad22a7c1f241e5f50ce16e027099,5c65da519e39af511334816c18977912a4920637..686695b155c7af9899864c5d3103e3cfd49624b8
@@@ -264,7 -264,7 +264,7 @@@ struct rep_manu_reply
  };
  
  /**
 - * transport_expander_report_manufacture - obtain SMP report_manufacture
 + * _transport_expander_report_manufacture - obtain SMP report_manufacture
   * @ioc: per adapter object
   * @sas_address: expander sas address
   * @edev: the sas_expander_device object
   * Returns 0 for success, non-zero for failure.
   */
  static int
 -transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
 +_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
      u64 sas_address, struct sas_expander_device *edev)
  {
        Mpi2SmpPassthroughRequest_t *mpi_request;
@@@ -578,7 -578,7 +578,7 @@@ mpt2sas_transport_port_add(struct MPT2S
            MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
            mpt2sas_port->remote_identify.device_type ==
            MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
 -              transport_expander_report_manufacture(ioc,
 +              _transport_expander_report_manufacture(ioc,
                    mpt2sas_port->remote_identify.sas_address,
                    rphy_to_expander_device(rphy));
  
@@@ -852,7 -852,7 +852,7 @@@ rphy_to_ioc(struct sas_rphy *rphy
  }
  
  /**
 - * transport_get_linkerrors -
 + * _transport_get_linkerrors -
   * @phy: The sas phy object
   *
   * Only support sas_host direct attached phys.
   *
   */
  static int
 -transport_get_linkerrors(struct sas_phy *phy)
 +_transport_get_linkerrors(struct sas_phy *phy)
  {
        struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
        struct _sas_phy *mpt2sas_phy;
  }
  
  /**
 - * transport_get_enclosure_identifier -
 + * _transport_get_enclosure_identifier -
   * @phy: The sas phy object
   *
   * Obtain the enclosure logical id for an expander.
   * Returns 0 for success, non-zero for failure.
   */
  static int
 -transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
 +_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
  {
        struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
        struct _sas_node *sas_expander;
  }
  
  /**
 - * transport_get_bay_identifier -
 + * _transport_get_bay_identifier -
   * @phy: The sas phy object
   *
   * Returns the slot id for a device that resides inside an enclosure.
   */
  static int
 -transport_get_bay_identifier(struct sas_rphy *rphy)
 +_transport_get_bay_identifier(struct sas_rphy *rphy)
  {
        struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
        struct _sas_device *sas_device;
  }
  
  /**
 - * transport_phy_reset -
 + * _transport_phy_reset -
   * @phy: The sas phy object
   * @hard_reset:
   *
   * Returns 0 for success, non-zero for failure.
   */
  static int
 -transport_phy_reset(struct sas_phy *phy, int hard_reset)
 +_transport_phy_reset(struct sas_phy *phy, int hard_reset)
  {
        struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
        struct _sas_phy *mpt2sas_phy;
  }
  
  /**
 - * transport_smp_handler - transport portal for smp passthru
 + * _transport_smp_handler - transport portal for smp passthru
   * @shost: shost object
   * @rphy: sas transport rphy object
   * @req:
   *           smp_rep_general /sys/class/bsg/expander-5:0
   */
  static int
 -transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 +_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
      struct request *req)
  {
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
        if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
                printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, "
                    "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt,
-                   req->data_len, rsp->bio->bi_vcnt, rsp->data_len);
+                   blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
                return -EINVAL;
        }
  
        *((u64 *)&mpi_request->SASAddress) = (rphy) ?
            cpu_to_le64(rphy->identify.sas_address) :
            cpu_to_le64(ioc->sas_hba.sas_address);
-       mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4);
+       mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
        psge = &mpi_request->SGL;
  
        /* WRITE sgel first */
            MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
        sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
        dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
-             req->data_len, PCI_DMA_BIDIRECTIONAL);
+               blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
        if (!dma_addr_out) {
                mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
                goto unmap;
        }
  
-       ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4),
+       ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4),
            dma_addr_out);
  
        /* incr sgel */
            MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
            MPI2_SGE_FLAGS_END_OF_LIST);
        sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
-       dma_addr_in =  pci_map_single(ioc->pdev, bio_data(rsp->bio),
-             rsp->data_len, PCI_DMA_BIDIRECTIONAL);
+       dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
+                                    blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
        if (!dma_addr_in) {
                mpt2sas_base_free_smid(ioc, le16_to_cpu(smid));
                goto unmap;
        }
  
-       ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4),
+       ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4),
            dma_addr_in);
  
        dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - "
  
                memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
                req->sense_len = sizeof(*mpi_reply);
-               req->data_len = 0;
-               rsp->data_len -= mpi_reply->ResponseDataLength;
+               req->resid_len = 0;
+               rsp->resid_len -= mpi_reply->ResponseDataLength;
        } else {
                dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT
                    "%s - no reply\n", ioc->name, __func__));
  
   unmap:
        if (dma_addr_out)
-               pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len,
+               pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
                    PCI_DMA_BIDIRECTIONAL);
        if (dma_addr_in)
-               pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len,
+               pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
                    PCI_DMA_BIDIRECTIONAL);
  
   out:
  }
  
  struct sas_function_template mpt2sas_transport_functions = {
 -      .get_linkerrors         = transport_get_linkerrors,
 -      .get_enclosure_identifier = transport_get_enclosure_identifier,
 -      .get_bay_identifier     = transport_get_bay_identifier,
 -      .phy_reset              = transport_phy_reset,
 -      .smp_handler            = transport_smp_handler,
 +      .get_linkerrors         = _transport_get_linkerrors,
 +      .get_enclosure_identifier = _transport_get_enclosure_identifier,
 +      .get_bay_identifier     = _transport_get_bay_identifier,
 +      .phy_reset              = _transport_phy_reset,
 +      .smp_handler            = _transport_smp_handler,
  };
  
  struct scsi_transport_template *mpt2sas_transport_template;
index 71341ad323448c3d3b8e7df520fa531311e94ab0,5776b2ab6b12a5fa54fe974b11e31619d8ee18e0..7a117c18114cd757de416045da2d453327742131
@@@ -118,39 -118,39 +118,39 @@@ static int _osd_print_system_info(struc
                _osd_ver_desc(or));
  
        pFirst = get_attrs[a++].val_ptr;
 -      OSD_INFO("OSD_ATTR_RI_VENDOR_IDENTIFICATION [%s]\n",
 +      OSD_INFO("VENDOR_IDENTIFICATION  [%s]\n",
                (char *)pFirst);
  
        pFirst = get_attrs[a++].val_ptr;
 -      OSD_INFO("OSD_ATTR_RI_PRODUCT_IDENTIFICATION [%s]\n",
 +      OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
                (char *)pFirst);
  
        pFirst = get_attrs[a++].val_ptr;
 -      OSD_INFO("OSD_ATTR_RI_PRODUCT_MODEL [%s]\n",
 +      OSD_INFO("PRODUCT_MODEL          [%s]\n",
                (char *)pFirst);
  
        pFirst = get_attrs[a++].val_ptr;
 -      OSD_INFO("OSD_ATTR_RI_PRODUCT_REVISION_LEVEL [%u]\n",
 +      OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
                pFirst ? get_unaligned_be32(pFirst) : ~0U);
  
        pFirst = get_attrs[a++].val_ptr;
 -      OSD_INFO("OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER [%s]\n",
 +      OSD_INFO("PRODUCT_SERIAL_NUMBER  [%s]\n",
                (char *)pFirst);
  
        pFirst = get_attrs[a].val_ptr;
 -      OSD_INFO("OSD_ATTR_RI_OSD_NAME [%s]\n", (char *)pFirst);
 +      OSD_INFO("OSD_NAME               [%s]\n", (char *)pFirst);
        a++;
  
        pFirst = get_attrs[a++].val_ptr;
 -      OSD_INFO("OSD_ATTR_RI_TOTAL_CAPACITY [0x%llx]\n",
 +      OSD_INFO("TOTAL_CAPACITY         [0x%llx]\n",
                pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
  
        pFirst = get_attrs[a++].val_ptr;
 -      OSD_INFO("OSD_ATTR_RI_USED_CAPACITY [0x%llx]\n",
 +      OSD_INFO("USED_CAPACITY          [0x%llx]\n",
                pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
  
        pFirst = get_attrs[a++].val_ptr;
 -      OSD_INFO("OSD_ATTR_RI_NUMBER_OF_PARTITIONS [%llu]\n",
 +      OSD_INFO("NUMBER_OF_PARTITIONS   [%llu]\n",
                pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
  
        if (a >= nelem)
  
        /* FIXME: Where are the time utilities */
        pFirst = get_attrs[a++].val_ptr;
 -      OSD_INFO("OSD_ATTR_RI_CLOCK [0x%02x%02x%02x%02x%02x%02x]\n",
 +      OSD_INFO("CLOCK                  [0x%02x%02x%02x%02x%02x%02x]\n",
                ((char *)pFirst)[0], ((char *)pFirst)[1],
                ((char *)pFirst)[2], ((char *)pFirst)[3],
                ((char *)pFirst)[4], ((char *)pFirst)[5]);
  
                hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
                                   sid_dump, sizeof(sid_dump), true);
 -              OSD_INFO("OSD_ATTR_RI_OSD_SYSTEM_ID(%d) [%s]\n", len, sid_dump);
 +              OSD_INFO("OSD_SYSTEM_ID(%d)\n"
 +                       "        [%s]\n", len, sid_dump);
                a++;
        }
  out:
@@@ -670,7 -669,7 +670,7 @@@ static int _osd_req_list_objects(struc
        __be16 action, const struct osd_obj_id *obj, osd_id initial_id,
        struct osd_obj_id_list *list, unsigned nelem)
  {
 -      struct request_queue *q = or->osd_dev->scsi_device->request_queue;
 +      struct request_queue *q = osd_request_queue(or->osd_dev);
        u64 len = nelem * sizeof(osd_id) + sizeof(*list);
        struct bio *bio;
  
@@@ -779,32 -778,16 +779,32 @@@ EXPORT_SYMBOL(osd_req_remove_object)
  */
  
  void osd_req_write(struct osd_request *or,
 -      const struct osd_obj_id *obj, struct bio *bio, u64 offset)
 +      const struct osd_obj_id *obj, u64 offset,
 +      struct bio *bio, u64 len)
  {
 -      _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, bio->bi_size);
 +      _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
        WARN_ON(or->out.bio || or->out.total_bytes);
 -      bio->bi_rw |= (1 << BIO_RW);
 +      WARN_ON(0 ==  bio_rw_flagged(bio, BIO_RW));
        or->out.bio = bio;
 -      or->out.total_bytes = bio->bi_size;
 +      or->out.total_bytes = len;
  }
  EXPORT_SYMBOL(osd_req_write);
  
 +int osd_req_write_kern(struct osd_request *or,
 +      const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
 +{
 +      struct request_queue *req_q = osd_request_queue(or->osd_dev);
 +      struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
 +
 +      if (IS_ERR(bio))
 +              return PTR_ERR(bio);
 +
 +      bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
 +      osd_req_write(or, obj, offset, bio, len);
 +      return 0;
 +}
 +EXPORT_SYMBOL(osd_req_write_kern);
 +
  /*TODO: void osd_req_append(struct osd_request *,
        const struct osd_obj_id *, struct bio *data_out); */
  /*TODO: void osd_req_create_write(struct osd_request *,
@@@ -830,31 -813,16 +830,31 @@@ void osd_req_flush_object(struct osd_re
  EXPORT_SYMBOL(osd_req_flush_object);
  
  void osd_req_read(struct osd_request *or,
 -      const struct osd_obj_id *obj, struct bio *bio, u64 offset)
 +      const struct osd_obj_id *obj, u64 offset,
 +      struct bio *bio, u64 len)
  {
 -      _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, bio->bi_size);
 +      _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
        WARN_ON(or->in.bio || or->in.total_bytes);
 -      bio->bi_rw &= ~(1 << BIO_RW);
 +      WARN_ON(1 == bio_rw_flagged(bio, BIO_RW));
        or->in.bio = bio;
 -      or->in.total_bytes = bio->bi_size;
 +      or->in.total_bytes = len;
  }
  EXPORT_SYMBOL(osd_req_read);
  
 +int osd_req_read_kern(struct osd_request *or,
 +      const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
 +{
 +      struct request_queue *req_q = osd_request_queue(or->osd_dev);
 +      struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
 +
 +      if (IS_ERR(bio))
 +              return PTR_ERR(bio);
 +
 +      osd_req_read(or, obj, offset, bio, len);
 +      return 0;
 +}
 +EXPORT_SYMBOL(osd_req_read_kern);
 +
  void osd_req_get_attributes(struct osd_request *or,
        const struct osd_obj_id *obj)
  {
@@@ -921,26 -889,6 +921,6 @@@ int osd_req_add_set_attr_list(struct os
  }
  EXPORT_SYMBOL(osd_req_add_set_attr_list);
  
- static int _append_map_kern(struct request *req,
-       void *buff, unsigned len, gfp_t flags)
- {
-       struct bio *bio;
-       int ret;
-       bio = bio_map_kern(req->q, buff, len, flags);
-       if (IS_ERR(bio)) {
-               OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len,
-                       PTR_ERR(bio));
-               return PTR_ERR(bio);
-       }
-       ret = blk_rq_append_bio(req->q, req, bio);
-       if (ret) {
-               OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret);
-               bio_put(bio);
-       }
-       return ret;
- }
  static int _req_append_segment(struct osd_request *or,
        unsigned padding, struct _osd_req_data_segment *seg,
        struct _osd_req_data_segment *last_seg, struct _osd_io_info *io)
                else
                        pad_buff = io->pad_buff;
  
-               ret = _append_map_kern(io->req, pad_buff, padding,
+               ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding,
                                       or->alloc_flags);
                if (ret)
                        return ret;
                io->total_bytes += padding;
        }
  
-       ret = _append_map_kern(io->req, seg->buff, seg->total_bytes,
+       ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes,
                               or->alloc_flags);
        if (ret)
                return ret;
@@@ -1265,7 -1213,7 +1245,7 @@@ static inline void osd_sec_parms_set_in
  }
  
  static int _osd_req_finalize_data_integrity(struct osd_request *or,
 -      bool has_in, bool has_out, const u8 *cap_key)
 +      bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key)
  {
        struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
        int ret;
                };
                unsigned pad;
  
 -              or->out_data_integ.data_bytes = cpu_to_be64(
 -                      or->out.bio ? or->out.bio->bi_size : 0);
 +              or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
                or->out_data_integ.set_attributes_bytes = cpu_to_be64(
                        or->set_attr.total_bytes);
                or->out_data_integ.get_attributes_bytes = cpu_to_be64(
  /*
   * osd_finalize_request and helpers
   */
+ static struct request *_make_request(struct request_queue *q, bool has_write,
+                             struct _osd_io_info *oii, gfp_t flags)
+ {
+       if (oii->bio)
+               return blk_make_request(q, oii->bio, flags);
+       else {
+               struct request *req;
+               req = blk_get_request(q, has_write ? WRITE : READ, flags);
+               if (unlikely(!req))
+                       return ERR_PTR(-ENOMEM);
+               return req;
+       }
+ }
  
  static int _init_blk_request(struct osd_request *or,
        bool has_in, bool has_out)
        struct scsi_device *scsi_device = or->osd_dev->scsi_device;
        struct request_queue *q = scsi_device->request_queue;
        struct request *req;
-       int ret = -ENOMEM;
+       int ret;
  
-       req = blk_get_request(q, has_out, flags);
-       if (!req)
+       req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags);
+       if (IS_ERR(req)) {
+               ret = PTR_ERR(req);
                goto out;
+       }
  
        or->request = req;
        req->cmd_type = REQ_TYPE_BLOCK_PC;
 +      req->cmd_flags |= REQ_QUIET;
 +
        req->timeout = or->timeout;
        req->retries = or->retries;
        req->sense = or->sense;
                or->out.req = req;
                if (has_in) {
                        /* allocate bidi request */
-                       req = blk_get_request(q, READ, flags);
-                       if (!req) {
+                       req = _make_request(q, false, &or->in, flags);
+                       if (IS_ERR(req)) {
                                OSD_DEBUG("blk_get_request for bidi failed\n");
+                               ret = PTR_ERR(req);
                                goto out;
                        }
                        req->cmd_type = REQ_TYPE_BLOCK_PC;
@@@ -1374,7 -1339,6 +1372,7 @@@ int osd_finalize_request(struct osd_req
  {
        struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
        bool has_in, has_out;
 +      u64 out_data_bytes = or->out.total_bytes;
        int ret;
  
        if (options & OSD_REQ_FUA)
                return ret;
        }
  
-       if (or->out.bio) {
-               ret = blk_rq_append_bio(or->request->q, or->out.req,
-                                       or->out.bio);
-               if (ret) {
-                       OSD_DEBUG("blk_rq_append_bio out failed\n");
-                       return ret;
-               }
-               OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n",
-                       _LLU(or->out.total_bytes), or->out.req->data_len);
-       }
-       if (or->in.bio) {
-               ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio);
-               if (ret) {
-                       OSD_DEBUG("blk_rq_append_bio in failed\n");
-                       return ret;
-               }
-               OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n",
-                       _LLU(or->in.total_bytes), or->in.req->data_len);
-       }
        or->out.pad_buff = sg_out_pad_buffer;
        or->in.pad_buff = sg_in_pad_buffer;
  
                }
        }
  
 -      ret = _osd_req_finalize_data_integrity(or, has_in, has_out, cap_key);
 +      ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
 +                                             out_data_bytes, cap_key);
        if (ret)
                return ret;
  
diff --combined drivers/scsi/scsi_lib.c
index 27dbf2e8e34a6823b0cc04b598857ad05227d641,dd3f9d2b99fd05b7834e0abbb7e2cbe23e12d462..30f3275e119ed57473f1fd4e91270d38538eb025
@@@ -240,11 -240,11 +240,11 @@@ int scsi_execute(struct scsi_device *sd
         * is invalid.  Prevent the garbage from being misinterpreted
         * and prevent security leaks by zeroing out the excess data.
         */
-       if (unlikely(req->data_len > 0 && req->data_len <= bufflen))
-               memset(buffer + (bufflen - req->data_len), 0, req->data_len);
+       if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
+               memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
  
        if (resid)
-               *resid = req->data_len;
+               *resid = req->resid_len;
        ret = req->errors;
   out:
        blk_put_request(req);
@@@ -546,14 -546,9 +546,9 @@@ static struct scsi_cmnd *scsi_end_reque
         * to queue the remainder of them.
         */
        if (blk_end_request(req, error, bytes)) {
-               int leftover = (req->hard_nr_sectors << 9);
-               if (blk_pc_request(req))
-                       leftover = req->data_len;
                /* kill remainder if no retrys */
                if (error && scsi_noretry_cmd(cmd))
-                       blk_end_request(req, error, leftover);
+                       blk_end_request_all(req, error);
                else {
                        if (requeue) {
                                /*
@@@ -672,34 -667,6 +667,6 @@@ void scsi_release_buffers(struct scsi_c
  }
  EXPORT_SYMBOL(scsi_release_buffers);
  
- /*
-  * Bidi commands Must be complete as a whole, both sides at once.
-  * If part of the bytes were written and lld returned
-  * scsi_in()->resid and/or scsi_out()->resid this information will be left
-  * in req->data_len and req->next_rq->data_len. The upper-layer driver can
-  * decide what to do with this information.
-  */
- static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
- {
-       struct request *req = cmd->request;
-       unsigned int dlen = req->data_len;
-       unsigned int next_dlen = req->next_rq->data_len;
-       req->data_len = scsi_out(cmd)->resid;
-       req->next_rq->data_len = scsi_in(cmd)->resid;
-       /* The req and req->next_rq have not been completed */
-       BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen));
-       scsi_release_buffers(cmd);
-       /*
-        * This will goose the queue request function at the end, so we don't
-        * need to worry about launching another command.
-        */
-       scsi_next_command(cmd);
- }
  /*
   * Function:    scsi_io_completion()
   *
  void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
  {
        int result = cmd->result;
-       int this_count;
        struct request_queue *q = cmd->device->request_queue;
        struct request *req = cmd->request;
        int error = 0;
                        if (!sense_deferred)
                                error = -EIO;
                }
+               req->resid_len = scsi_get_resid(cmd);
                if (scsi_bidi_cmnd(cmd)) {
-                       /* will also release_buffers */
-                       scsi_end_bidi_request(cmd);
+                       /*
+                        * Bidi commands Must be complete as a whole,
+                        * both sides at once.
+                        */
+                       req->next_rq->resid_len = scsi_in(cmd)->resid;
+                       blk_end_request_all(req, 0);
+                       scsi_release_buffers(cmd);
+                       scsi_next_command(cmd);
                        return;
                }
-               req->data_len = scsi_get_resid(cmd);
        }
  
        BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
         * Next deal with any sectors which we were able to correctly
         * handle.
         */
-       SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
+       SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
                                      "%d bytes done.\n",
-                                     req->nr_sectors, good_bytes));
+                                     blk_rq_sectors(req), good_bytes));
  
        /*
         * Recovered errors need reporting, but they're always treated
         */
        if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
                return;
-       this_count = blk_rq_bytes(req);
  
        error = -EIO;
  
                        if (driver_byte(result) & DRIVER_SENSE)
                                scsi_print_sense("", cmd);
                }
-               blk_end_request(req, -EIO, blk_rq_bytes(req));
+               blk_end_request_all(req, -EIO);
                scsi_next_command(cmd);
                break;
        case ACTION_REPREP:
@@@ -965,10 -940,7 +940,7 @@@ static int scsi_init_sgtable(struct req
        count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
        BUG_ON(count > sdb->table.nents);
        sdb->table.nents = count;
-       if (blk_pc_request(req))
-               sdb->length = req->data_len;
-       else
-               sdb->length = req->nr_sectors << 9;
+       sdb->length = blk_rq_bytes(req);
        return BLKPREP_OK;
  }
  
@@@ -1087,22 -1059,21 +1059,21 @@@ int scsi_setup_blk_pc_cmnd(struct scsi_
                if (unlikely(ret))
                        return ret;
        } else {
-               BUG_ON(req->data_len);
-               BUG_ON(req->data);
+               BUG_ON(blk_rq_bytes(req));
  
                memset(&cmd->sdb, 0, sizeof(cmd->sdb));
                req->buffer = NULL;
        }
  
        cmd->cmd_len = req->cmd_len;
-       if (!req->data_len)
+       if (!blk_rq_bytes(req))
                cmd->sc_data_direction = DMA_NONE;
        else if (rq_data_dir(req) == WRITE)
                cmd->sc_data_direction = DMA_TO_DEVICE;
        else
                cmd->sc_data_direction = DMA_FROM_DEVICE;
        
-       cmd->transfersize = req->data_len;
+       cmd->transfersize = blk_rq_bytes(req);
        cmd->allowed = req->retries;
        return BLKPREP_OK;
  }
@@@ -1212,7 -1183,7 +1183,7 @@@ int scsi_prep_return(struct request_que
                break;
        case BLKPREP_DEFER:
                /*
-                * If we defer, the elv_next_request() returns NULL, but the
+                * If we defer, the blk_peek_request() returns NULL, but the
                 * queue must be restarted, so we plug here if no returning
                 * command will automatically do that.
                 */
@@@ -1388,7 -1359,7 +1359,7 @@@ static void scsi_kill_request(struct re
        struct scsi_target *starget = scsi_target(sdev);
        struct Scsi_Host *shost = sdev->host;
  
-       blkdev_dequeue_request(req);
+       blk_start_request(req);
  
        if (unlikely(cmd == NULL)) {
                printk(KERN_CRIT "impossible request in %s.\n",
@@@ -1480,7 -1451,7 +1451,7 @@@ static void scsi_request_fn(struct requ
  
        if (!sdev) {
                printk("scsi: killing requests for dead queue\n");
-               while ((req = elv_next_request(q)) != NULL)
+               while ((req = blk_peek_request(q)) != NULL)
                        scsi_kill_request(req, q);
                return;
        }
                 * that the request is fully prepared even if we cannot 
                 * accept it.
                 */
-               req = elv_next_request(q);
+               req = blk_peek_request(q);
                if (!req || !scsi_dev_queue_ready(q, sdev))
                        break;
  
                 * Remove the request from the request list.
                 */
                if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
-                       blkdev_dequeue_request(req);
+                       blk_start_request(req);
                sdev->device_busy++;
  
                spin_unlock(q->queue_lock);
  scsi_internal_device_unblock(struct scsi_device *sdev)
  {
        struct request_queue *q = sdev->request_queue; 
 -      int err;
        unsigned long flags;
        
        /* 
         * Try to transition the scsi device to SDEV_RUNNING
         * and goose the device queue if successful.  
         */
 -      err = scsi_device_set_state(sdev, SDEV_RUNNING);
 -      if (err) {
 -              err = scsi_device_set_state(sdev, SDEV_CREATED);
 -
 -              if (err)
 -                      return err;
 -      }
 +      if (sdev->sdev_state == SDEV_BLOCK)
 +              sdev->sdev_state = SDEV_RUNNING;
 +      else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
 +              sdev->sdev_state = SDEV_CREATED;
 +      else
 +              return -EINVAL;
  
        spin_lock_irqsave(q->queue_lock, flags);
        blk_start_queue(q);
diff --combined drivers/scsi/sd.c
index d8e1d15101b73cf0614d08add504ce1ec5e6e99f,bcf3bd40bbd5fc3bfbecb29f0d7b21b5e393c8b3..878b17a9af3008ab5fe29afc7a8faa4a9c99d58d
@@@ -384,9 -384,9 +384,9 @@@ static int sd_prep_fn(struct request_qu
        struct scsi_device *sdp = q->queuedata;
        struct gendisk *disk = rq->rq_disk;
        struct scsi_disk *sdkp;
-       sector_t block = rq->sector;
+       sector_t block = blk_rq_pos(rq);
        sector_t threshold;
-       unsigned int this_count = rq->nr_sectors;
+       unsigned int this_count = blk_rq_sectors(rq);
        int ret, host_dif;
  
        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
                                        this_count));
  
        if (!sdp || !scsi_device_online(sdp) ||
-           block + rq->nr_sectors > get_capacity(disk)) {
+           block + blk_rq_sectors(rq) > get_capacity(disk)) {
                SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
-                                               "Finishing %ld sectors\n",
-                                               rq->nr_sectors));
+                                               "Finishing %u sectors\n",
+                                               blk_rq_sectors(rq)));
                SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
                                                "Retry with 0x%p\n", SCpnt));
                goto out;
         * for this.
         */
        if (sdp->sector_size == 1024) {
-               if ((block & 1) || (rq->nr_sectors & 1)) {
+               if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
                        scmd_printk(KERN_ERR, SCpnt,
                                    "Bad block number requested\n");
                        goto out;
                }
        }
        if (sdp->sector_size == 2048) {
-               if ((block & 3) || (rq->nr_sectors & 3)) {
+               if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
                        scmd_printk(KERN_ERR, SCpnt,
                                    "Bad block number requested\n");
                        goto out;
                }
        }
        if (sdp->sector_size == 4096) {
-               if ((block & 7) || (rq->nr_sectors & 7)) {
+               if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
                        scmd_printk(KERN_ERR, SCpnt,
                                    "Bad block number requested\n");
                        goto out;
        }
  
        SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
-                                       "%s %d/%ld 512 byte blocks.\n",
+                                       "%s %d/%u 512 byte blocks.\n",
                                        (rq_data_dir(rq) == WRITE) ?
                                        "writing" : "reading", this_count,
-                                       rq->nr_sectors));
+                                       blk_rq_sectors(rq)));
  
        /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
        host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
@@@ -971,8 -971,8 +971,8 @@@ static struct block_device_operations s
  
  static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
  {
-       u64 start_lba = scmd->request->sector;
-       u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512);
+       u64 start_lba = blk_rq_pos(scmd->request);
+       u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
        u64 bad_lba;
        int info_valid;
  
@@@ -1510,7 -1510,7 +1510,7 @@@ got_data
                 */
                sector_size = 512;
        }
-       blk_queue_hardsect_size(sdp->request_queue, sector_size);
+       blk_queue_logical_block_size(sdp->request_queue, sector_size);
  
        {
                char cap_str_2[10], cap_str_10[10];
@@@ -1902,6 -1902,24 +1902,6 @@@ static void sd_probe_async(void *data, 
        index = sdkp->index;
        dev = &sdp->sdev_gendev;
  
 -      if (!sdp->request_queue->rq_timeout) {
 -              if (sdp->type != TYPE_MOD)
 -                      blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
 -              else
 -                      blk_queue_rq_timeout(sdp->request_queue,
 -                                           SD_MOD_TIMEOUT);
 -      }
 -
 -      device_initialize(&sdkp->dev);
 -      sdkp->dev.parent = &sdp->sdev_gendev;
 -      sdkp->dev.class = &sd_disk_class;
 -      dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
 -
 -      if (device_add(&sdkp->dev))
 -              goto out_free_index;
 -
 -      get_device(&sdp->sdev_gendev);
 -
        if (index < SD_MAX_DISKS) {
                gd->major = sd_major((index & 0xf0) >> 4);
                gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
  
        sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
                  sdp->removable ? "removable " : "");
 -
 -      return;
 -
 - out_free_index:
 -      ida_remove(&sd_index_ida, index);
  }
  
  /**
@@@ -2003,24 -2026,6 +2003,24 @@@ static int sd_probe(struct device *dev
        sdkp->openers = 0;
        sdkp->previous_state = 1;
  
 +      if (!sdp->request_queue->rq_timeout) {
 +              if (sdp->type != TYPE_MOD)
 +                      blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
 +              else
 +                      blk_queue_rq_timeout(sdp->request_queue,
 +                                           SD_MOD_TIMEOUT);
 +      }
 +
 +      device_initialize(&sdkp->dev);
 +      sdkp->dev.parent = &sdp->sdev_gendev;
 +      sdkp->dev.class = &sd_disk_class;
 +      dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
 +
 +      if (device_add(&sdkp->dev))
 +              goto out_free_index;
 +
 +      get_device(&sdp->sdev_gendev);
 +
        async_schedule(sd_probe_async, sdkp);
  
        return 0;
   **/
  static int sd_remove(struct device *dev)
  {
 -      struct scsi_disk *sdkp = dev_get_drvdata(dev);
 +      struct scsi_disk *sdkp;
  
 +      async_synchronize_full();
 +      sdkp = dev_get_drvdata(dev);
        device_del(&sdkp->dev);
        del_gendisk(sdkp->disk);
        sd_shutdown(dev);
diff --combined drivers/scsi/st.c
index 6f46e627aab35c71e86c5424a7f954d896980fc2,89bd438e1fe30692e006a0bac18c18fc4c21cd86..b33d04250bbc3badeeb3f00644f137402af07bf1
@@@ -463,7 -463,7 +463,7 @@@ static void st_scsi_execute_end(struct 
        struct scsi_tape *STp = SRpnt->stp;
  
        STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
-       STp->buffer->cmdstat.residual = req->data_len;
+       STp->buffer->cmdstat.residual = req->resid_len;
  
        if (SRpnt->waiting)
                complete(SRpnt->waiting);
@@@ -2964,7 -2964,7 +2964,7 @@@ static int st_int_ioctl(struct scsi_tap
                            !(STp->use_pf & PF_TESTED)) {
                                /* Try the other possible state of Page Format if not
                                   already tried */
 -                              STp->use_pf = !STp->use_pf | PF_TESTED;
 +                              STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
                                st_release_request(SRpnt);
                                SRpnt = NULL;
                                return st_int_ioctl(STp, cmd_in, arg);
@@@ -3983,8 -3983,8 +3983,8 @@@ static int st_probe(struct device *dev
                return -ENODEV;
        }
  
-       i = min(SDp->request_queue->max_hw_segments,
-               SDp->request_queue->max_phys_segments);
+       i = min(queue_max_hw_segments(SDp->request_queue),
+               queue_max_phys_segments(SDp->request_queue));
        if (st_max_sg_segs < i)
                i = st_max_sg_segs;
        buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
diff --combined fs/exofs/osd.c
index 48cc4d11d3fbff46c4fe10521ed1debfaf4ec10b,06ca92672eb5d6118ee644074019a650a96125f3..b3d2ccb87aaa8981374e824066bfd2aa608fa86a
@@@ -50,10 -50,10 +50,10 @@@ int exofs_check_ok_resid(struct osd_req
  
        /* FIXME: should be include in osd_sense_info */
        if (in_resid)
-               *in_resid = or->in.req ? or->in.req->data_len : 0;
+               *in_resid = or->in.req ? or->in.req->resid_len : 0;
  
        if (out_resid)
-               *out_resid = or->out.req ? or->out.req->data_len : 0;
+               *out_resid = or->out.req ? or->out.req->resid_len : 0;
  
        return ret;
  }
@@@ -125,3 -125,29 +125,3 @@@ int extract_attr_from_req(struct osd_re
  
        return -EIO;
  }
 -
 -int osd_req_read_kern(struct osd_request *or,
 -      const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
 -{
 -      struct request_queue *req_q = or->osd_dev->scsi_device->request_queue;
 -      struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
 -
 -      if (!bio)
 -              return -ENOMEM;
 -
 -      osd_req_read(or, obj, bio, offset);
 -      return 0;
 -}
 -
 -int osd_req_write_kern(struct osd_request *or,
 -      const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
 -{
 -      struct request_queue *req_q = or->osd_dev->scsi_device->request_queue;
 -      struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
 -
 -      if (!bio)
 -              return -ENOMEM;
 -
 -      osd_req_write(or, obj, bio, offset);
 -      return 0;
 -}