Merge patch series "lpfc: Update lpfc to revision 14.2.0.15"
authorMartin K. Petersen <martin.petersen@oracle.com>
Fri, 13 Oct 2023 21:00:47 +0000 (17:00 -0400)
committerMartin K. Petersen <martin.petersen@oracle.com>
Fri, 13 Oct 2023 21:00:47 +0000 (17:00 -0400)
Justin Tee <justintee8345@gmail.com> says:

Update lpfc to revision 14.2.0.15

This patch set contains error handling fixes, ELS bug fixes, and
logging improvements.

The patches were cut against Martin's 6.7/scsi-queue tree.

Link: https://lore.kernel.org/r/20231009161812.97232-1-justintee8345@gmail.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
101 files changed:
MAINTAINERS
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/message/fusion/mptctl.c
drivers/message/fusion/mptfc.c
drivers/message/fusion/mptsas.c
drivers/message/fusion/mptscsih.c
drivers/message/fusion/mptscsih.h
drivers/scsi/Kconfig
drivers/scsi/aic7xxx/aic79xx_osm.c
drivers/scsi/aic7xxx/aic7xxx_osm.c
drivers/scsi/bnx2fc/bnx2fc.h
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/device_handler/scsi_dh_hp_sw.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/elx/efct/efct_lio.c
drivers/scsi/esas2r/esas2r_ioctl.c
drivers/scsi/fnic/fnic_fcs.c
drivers/scsi/hisi_sas/hisi_sas.h
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/imm.c
drivers/scsi/imm.h
drivers/scsi/ips.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/libsas/sas_init.c
drivers/scsi/libsas/sas_internal.h
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/megaraid.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mpi3mr/mpi3mr_os.c
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm8001_sas.h
drivers/scsi/pm8001/pm80xx_hwi.c
drivers/scsi/pmcraid.c
drivers/scsi/ppa.c
drivers/scsi/qedf/qedf.h
drivers/scsi/qedf/qedf_io.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qla1280.c
drivers/scsi/qla2xxx/qla_dfs.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_nvme.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/scsi.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_spi.c
drivers/scsi/sd.c
drivers/scsi/sr.c
drivers/scsi/sym53c8xx_2/sym_glue.c
drivers/target/iscsi/Kconfig
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_tmr.c
drivers/target/loopback/tcm_loop.c
drivers/target/sbp/sbp_target.c
drivers/target/target_core_alua.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/target/tcm_fc/tfc_conf.c
drivers/ufs/core/ufshcd.c
drivers/ufs/host/cdns-pltfrm.c
drivers/ufs/host/tc-dwc-g210-pltfrm.c
drivers/ufs/host/ti-j721e-ufs.c
drivers/ufs/host/ufs-exynos.c
drivers/ufs/host/ufs-hisi.c
drivers/ufs/host/ufs-mediatek.c
drivers/ufs/host/ufs-qcom.c
drivers/ufs/host/ufs-qcom.h
drivers/ufs/host/ufs-renesas.c
drivers/ufs/host/ufs-sprd.c
drivers/ufs/host/ufshcd-pci.c
drivers/ufs/host/ufshcd-pltfrm.c
drivers/ufs/host/ufshcd-pltfrm.h
drivers/usb/gadget/function/f_tcm.c
drivers/vhost/scsi.c
drivers/xen/xen-scsiback.c
include/linux/nvme-fc-driver.h
include/scsi/libsas.h
include/target/target_core_base.h
include/target/target_core_fabric.h
include/trace/events/ufs.h
include/ufs/ufs.h
include/ufs/ufshcd.h

index 90f13281d29708439ba448d26308109a3cfd747b..be1cbc6c2059073abbd8c1abf13d5ac751c88e9a 100644 (file)
@@ -11144,7 +11144,6 @@ M:      Sagi Grimberg <sagi@grimberg.me>
 L:     linux-rdma@vger.kernel.org
 L:     target-devel@vger.kernel.org
 S:     Supported
-W:     http://www.linux-iscsi.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
 F:     drivers/infiniband/ulp/isert
 
@@ -13535,6 +13534,7 @@ MEGARAID SCSI/SAS DRIVERS
 M:     Kashyap Desai <kashyap.desai@broadcom.com>
 M:     Sumit Saxena <sumit.saxena@broadcom.com>
 M:     Shivasharan S <shivasharan.srikanteshwara@broadcom.com>
+M:     Chandrakanth patil <chandrakanth.patil@broadcom.com>
 L:     megaraidlinux.pdl@broadcom.com
 L:     linux-scsi@vger.kernel.org
 S:     Maintained
@@ -19162,7 +19162,6 @@ M:      "Martin K. Petersen" <martin.petersen@oracle.com>
 L:     linux-scsi@vger.kernel.org
 L:     target-devel@vger.kernel.org
 S:     Supported
-W:     http://www.linux-iscsi.org
 Q:     https://patchwork.kernel.org/project/target-devel/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
 F:     Documentation/target/
index c12005eab14c1981866864e02513cec8117ab390..58f70cfec45a72abd8df2ba88098a92f7fcacb4a 100644 (file)
@@ -3867,6 +3867,9 @@ static const struct target_core_fabric_ops srpt_template = {
        .tfc_discovery_attrs            = srpt_da_attrs,
        .tfc_wwn_attrs                  = srpt_wwn_attrs,
        .tfc_tpg_attrib_attrs           = srpt_tpg_attrib_attrs,
+
+       .default_submit_type            = TARGET_DIRECT_SUBMIT,
+       .direct_submit_supp             = 1,
 };
 
 /**
index dd028df4b283d55c24e25fa086bd400f5653c5be..9f3999750c23a5a132c0bff228b17c0febb6c203 100644 (file)
@@ -1328,8 +1328,8 @@ mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
 
        /* Set the Version Strings.
         */
-       strncpy (karg->driverVersion, MPT_LINUX_PACKAGE_NAME, MPT_IOCTL_VERSION_LENGTH);
-       karg->driverVersion[MPT_IOCTL_VERSION_LENGTH-1]='\0';
+       strscpy_pad(karg->driverVersion, MPT_LINUX_PACKAGE_NAME,
+                   sizeof(karg->driverVersion));
 
        karg->busChangeEvent = 0;
        karg->hostId = ioc->pfacts[port].PortSCSIID;
@@ -1493,10 +1493,8 @@ mptctl_readtest (MPT_ADAPTER *ioc, unsigned long arg)
 #else
        karg.chip_type = ioc->pcidev->device;
 #endif
-       strncpy (karg.name, ioc->name, MPT_MAX_NAME);
-       karg.name[MPT_MAX_NAME-1]='\0';
-       strncpy (karg.product, ioc->prod_name, MPT_PRODUCT_LENGTH);
-       karg.product[MPT_PRODUCT_LENGTH-1]='\0';
+       strscpy_pad(karg.name, ioc->name, sizeof(karg.name));
+       strscpy_pad(karg.product, ioc->prod_name, sizeof(karg.product));
 
        /* Copy the data from kernel memory to user memory
         */
@@ -2394,7 +2392,7 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
        cfg.dir = 0;    /* read */
        cfg.timeout = 10;
 
-       strncpy(karg.serial_number, " ", 24);
+       strscpy_pad(karg.serial_number, " ", sizeof(karg.serial_number));
        if (mpt_config(ioc, &cfg) == 0) {
                if (cfg.cfghdr.hdr->PageLength > 0) {
                        /* Issue the second config page request */
@@ -2408,8 +2406,9 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
                                if (mpt_config(ioc, &cfg) == 0) {
                                        ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf;
                                        if (strlen(pdata->BoardTracerNumber) > 1) {
-                                               strscpy(karg.serial_number,
-                                                       pdata->BoardTracerNumber, 24);
+                                               strscpy_pad(karg.serial_number,
+                                                       pdata->BoardTracerNumber,
+                                                       sizeof(karg.serial_number));
                                        }
                                }
                                dma_free_coherent(&ioc->pcidev->dev,
@@ -2456,7 +2455,7 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
                }
        }
 
-       /* 
+       /*
         * Gather ISTWI(Industry Standard Two Wire Interface) Data
         */
        if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
index 22e7779a332b7cb5a9a5ac18566e3c34c691921a..aa6bb764df3eb027ed062182d3e20b748e013463 100644 (file)
@@ -183,73 +183,109 @@ static struct fc_function_template mptfc_transport_functions = {
 };
 
 static int
-mptfc_block_error_handler(struct scsi_cmnd *SCpnt,
-                         int (*func)(struct scsi_cmnd *SCpnt),
-                         const char *caller)
+mptfc_block_error_handler(struct fc_rport *rport)
 {
        MPT_SCSI_HOST           *hd;
-       struct scsi_device      *sdev = SCpnt->device;
-       struct Scsi_Host        *shost = sdev->host;
-       struct fc_rport         *rport = starget_to_rport(scsi_target(sdev));
+       struct Scsi_Host        *shost = rport_to_shost(rport);
        unsigned long           flags;
        int                     ready;
-       MPT_ADAPTER             *ioc;
+       MPT_ADAPTER             *ioc;
        int                     loops = 40;     /* seconds */
 
-       hd = shost_priv(SCpnt->device->host);
+       hd = shost_priv(shost);
        ioc = hd->ioc;
        spin_lock_irqsave(shost->host_lock, flags);
        while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY
         || (loops > 0 && ioc->active == 0)) {
                spin_unlock_irqrestore(shost->host_lock, flags);
                dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
-                       "mptfc_block_error_handler.%d: %d:%llu, port status is "
-                       "%x, active flag %d, deferring %s recovery.\n",
+                       "mptfc_block_error_handler.%d: %s, port status is "
+                       "%x, active flag %d, deferring recovery.\n",
                        ioc->name, ioc->sh->host_no,
-                       SCpnt->device->id, SCpnt->device->lun,
-                       ready, ioc->active, caller));
+                       dev_name(&rport->dev), ready, ioc->active));
                msleep(1000);
                spin_lock_irqsave(shost->host_lock, flags);
                loops --;
        }
        spin_unlock_irqrestore(shost->host_lock, flags);
 
-       if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata
-        || ioc->active == 0) {
+       if (ready == DID_NO_CONNECT || ioc->active == 0) {
                dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
-                       "%s.%d: %d:%llu, failing recovery, "
-                       "port state %x, active %d, vdevice %p.\n", caller,
+                       "mpt_block_error_handler.%d: %s, failing recovery, "
+                       "port state %x, active %d.\n",
                        ioc->name, ioc->sh->host_no,
-                       SCpnt->device->id, SCpnt->device->lun, ready,
-                       ioc->active, SCpnt->device->hostdata));
+                       dev_name(&rport->dev), ready, ioc->active));
                return FAILED;
        }
-       dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
-               "%s.%d: %d:%llu, executing recovery.\n", caller,
-               ioc->name, ioc->sh->host_no,
-               SCpnt->device->id, SCpnt->device->lun));
-       return (*func)(SCpnt);
+       return SUCCESS;
 }
 
 static int
 mptfc_abort(struct scsi_cmnd *SCpnt)
 {
-       return
-           mptfc_block_error_handler(SCpnt, mptscsih_abort, __func__);
+       struct Scsi_Host *shost = SCpnt->device->host;
+       struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device));
+       MPT_SCSI_HOST __maybe_unused *hd = shost_priv(shost);
+       int rtn;
+
+       rtn = mptfc_block_error_handler(rport);
+       if (rtn == SUCCESS) {
+               dfcprintk (hd->ioc, printk(MYIOC_s_DEBUG_FMT
+                       "%s.%d: %d:%llu, executing recovery.\n", __func__,
+                       hd->ioc->name, shost->host_no,
+                       SCpnt->device->id, SCpnt->device->lun));
+               rtn = mptscsih_abort(SCpnt);
+       }
+       return rtn;
 }
 
 static int
 mptfc_dev_reset(struct scsi_cmnd *SCpnt)
 {
-       return
-           mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __func__);
+       struct Scsi_Host *shost = SCpnt->device->host;
+       struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device));
+       MPT_SCSI_HOST __maybe_unused *hd = shost_priv(shost);
+       int rtn;
+
+       rtn = mptfc_block_error_handler(rport);
+       if (rtn == SUCCESS) {
+               dfcprintk (hd->ioc, printk(MYIOC_s_DEBUG_FMT
+                       "%s.%d: %d:%llu, executing recovery.\n", __func__,
+                       hd->ioc->name, shost->host_no,
+                       SCpnt->device->id, SCpnt->device->lun));
+               rtn = mptscsih_dev_reset(SCpnt);
+       }
+       return rtn;
 }
 
 static int
 mptfc_bus_reset(struct scsi_cmnd *SCpnt)
 {
-       return
-           mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__);
+       struct Scsi_Host *shost = SCpnt->device->host;
+       MPT_SCSI_HOST __maybe_unused *hd = shost_priv(shost);
+       int channel = SCpnt->device->channel;
+       struct mptfc_rport_info *ri;
+       int rtn;
+
+       list_for_each_entry(ri, &hd->ioc->fc_rports, list) {
+               if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
+                       VirtTarget *vtarget = ri->starget->hostdata;
+
+                       if (!vtarget || vtarget->channel != channel)
+                               continue;
+                       rtn = fc_block_rport(ri->rport);
+                       if (rtn != 0)
+                               break;
+               }
+       }
+       if (rtn == 0) {
+               dfcprintk (hd->ioc, printk(MYIOC_s_DEBUG_FMT
+                       "%s.%d: %d:%llu, executing recovery.\n", __func__,
+                       hd->ioc->name, shost->host_no,
+                       SCpnt->device->id, SCpnt->device->lun));
+               rtn = mptscsih_bus_reset(SCpnt);
+       }
+       return rtn;
 }
 
 static void
index 86f16f3ea47870b0763c16d1f070e48b4a1adaa5..300f8e955a5319f92361a607a754e36be1a669d8 100644 (file)
@@ -2964,17 +2964,17 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
                        goto out_free;
 
                manufacture_reply = data_out + sizeof(struct rep_manu_request);
-               strncpy(edev->vendor_id, manufacture_reply->vendor_id,
-                       SAS_EXPANDER_VENDOR_ID_LEN);
-               strncpy(edev->product_id, manufacture_reply->product_id,
-                       SAS_EXPANDER_PRODUCT_ID_LEN);
-               strncpy(edev->product_rev, manufacture_reply->product_rev,
-                       SAS_EXPANDER_PRODUCT_REV_LEN);
+               strscpy(edev->vendor_id, manufacture_reply->vendor_id,
+                       sizeof(edev->vendor_id));
+               strscpy(edev->product_id, manufacture_reply->product_id,
+                       sizeof(edev->product_id));
+               strscpy(edev->product_rev, manufacture_reply->product_rev,
+                       sizeof(edev->product_rev));
                edev->level = manufacture_reply->sas_format;
                if (manufacture_reply->sas_format) {
-                       strncpy(edev->component_vendor_id,
+                       strscpy(edev->component_vendor_id,
                                manufacture_reply->component_vendor_id,
-                               SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+                               sizeof(edev->component_vendor_id));
                        tmp = (u8 *)&manufacture_reply->component_id;
                        edev->component_id = tmp[0] << 8 | tmp[1];
                        edev->component_revision_id =
index 2bc17087d17ddae636b4e7df47080722b241a6f3..9080a73b4ea64aab61ca1b93dc3cc65995c8bff6 100644 (file)
@@ -1793,7 +1793,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mptscsih_dev_reset - Perform a SCSI TARGET_RESET!  new_eh variant
+ *     mptscsih_dev_reset - Perform a SCSI LOGICAL_UNIT_RESET!
  *     @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
  *
  *     (linux scsi_host_template.eh_dev_reset_handler routine)
@@ -1808,6 +1808,58 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
        VirtDevice       *vdevice;
        MPT_ADAPTER     *ioc;
 
+       /* If we can't locate our host adapter structure, return FAILED status.
+        */
+       if ((hd = shost_priv(SCpnt->device->host)) == NULL){
+               printk(KERN_ERR MYNAM ": lun reset: "
+                  "Can't locate host! (sc=%p)\n", SCpnt);
+               return FAILED;
+       }
+
+       ioc = hd->ioc;
+       printk(MYIOC_s_INFO_FMT "attempting lun reset! (sc=%p)\n",
+              ioc->name, SCpnt);
+       scsi_print_command(SCpnt);
+
+       vdevice = SCpnt->device->hostdata;
+       if (!vdevice || !vdevice->vtarget) {
+               retval = 0;
+               goto out;
+       }
+
+       retval = mptscsih_IssueTaskMgmt(hd,
+                               MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET,
+                               vdevice->vtarget->channel,
+                               vdevice->vtarget->id, vdevice->lun, 0,
+                               mptscsih_get_tm_timeout(ioc));
+
+ out:
+       printk (MYIOC_s_INFO_FMT "lun reset: %s (sc=%p)\n",
+           ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+
+       if (retval == 0)
+               return SUCCESS;
+       else
+               return FAILED;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *     mptscsih_target_reset - Perform a SCSI TARGET_RESET!
+ *     @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
+ *
+ *     (linux scsi_host_template.eh_target_reset_handler routine)
+ *
+ *     Returns SUCCESS or FAILED.
+ **/
+int
+mptscsih_target_reset(struct scsi_cmnd * SCpnt)
+{
+       MPT_SCSI_HOST   *hd;
+       int              retval;
+       VirtDevice       *vdevice;
+       MPT_ADAPTER     *ioc;
+
        /* If we can't locate our host adapter structure, return FAILED status.
         */
        if ((hd = shost_priv(SCpnt->device->host)) == NULL){
@@ -3256,6 +3308,7 @@ EXPORT_SYMBOL(mptscsih_slave_destroy);
 EXPORT_SYMBOL(mptscsih_slave_configure);
 EXPORT_SYMBOL(mptscsih_abort);
 EXPORT_SYMBOL(mptscsih_dev_reset);
+EXPORT_SYMBOL(mptscsih_target_reset);
 EXPORT_SYMBOL(mptscsih_bus_reset);
 EXPORT_SYMBOL(mptscsih_host_reset);
 EXPORT_SYMBOL(mptscsih_bios_param);
index a22c5eaf703ca178ba758cb91b1827361e49e8f5..e3d92c3926733e9b37436aa04727dc1c556c24b6 100644 (file)
@@ -120,6 +120,7 @@ extern void mptscsih_slave_destroy(struct scsi_device *device);
 extern int mptscsih_slave_configure(struct scsi_device *device);
 extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
 extern int mptscsih_dev_reset(struct scsi_cmnd * SCpnt);
+extern int mptscsih_target_reset(struct scsi_cmnd * SCpnt);
 extern int mptscsih_bus_reset(struct scsi_cmnd * SCpnt);
 extern int mptscsih_host_reset(struct scsi_cmnd *SCpnt);
 extern int mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev, sector_t capacity, int geom[]);
index 695a57d894cdd0cba13bd1462ca7f2bf4a368b9a..addac7fbe37b9870380cc715acf923344071e6e6 100644 (file)
@@ -834,21 +834,6 @@ config SCSI_IMM
          To compile this driver as a module, choose M here: the
          module will be called imm.
 
-config SCSI_IZIP_EPP16
-       bool "ppa/imm option - Use slow (but safe) EPP-16"
-       depends on SCSI_IMM
-       help
-         EPP (Enhanced Parallel Port) is a standard for parallel ports which
-         allows them to act as expansion buses that can handle up to 64
-         peripheral devices.
-
-         Some parallel port chipsets are slower than their motherboard, and
-         so we have to control the state of the chipset's FIFO queue every
-         now and then to avoid data loss. This will be done if you say Y
-         here.
-
-         Generally, saying Y is the safe option and slows things down a bit.
-
 config SCSI_IZIP_SLOW_CTR
        bool "ppa/imm option - Assume slow parport control register"
        depends on SCSI_PPA || SCSI_IMM
index f2f3405cdec5e9a69f0e3f943d2482fbca382572..b3075a022d99a481896a113023a04e1af5c64f7e 100644 (file)
@@ -536,13 +536,18 @@ ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
        struct scsi_cmnd *cmd;
 
        cmd = scb->io_ctx;
-       ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
-       scsi_dma_unmap(cmd);
+       if (cmd) {
+               ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
+               scsi_dma_unmap(cmd);
+       }
 }
 
 /******************************** Macros **************************************/
-#define BUILD_SCSIID(ahd, cmd)                                         \
-       (((scmd_id(cmd) << TID_SHIFT) & TID) | (ahd)->our_id)
+static inline unsigned int ahd_build_scsiid(struct ahd_softc *ahd,
+                                           struct scsi_device *sdev)
+{
+       return ((sdev_id(sdev) << TID_SHIFT) & TID) | (ahd)->our_id;
+}
 
 /*
  * Return a string describing the driver.
@@ -811,14 +816,14 @@ ahd_linux_dev_reset(struct scsi_cmnd *cmd)
 
        tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
                                    cmd->device->id, &tstate);
-       reset_scb->io_ctx = cmd;
+       reset_scb->io_ctx = NULL;
        reset_scb->platform_data->dev = dev;
        reset_scb->sg_count = 0;
        ahd_set_residual(reset_scb, 0);
        ahd_set_sense_residual(reset_scb, 0);
        reset_scb->platform_data->xfer_len = 0;
        reset_scb->hscb->control = 0;
-       reset_scb->hscb->scsiid = BUILD_SCSIID(ahd,cmd);
+       reset_scb->hscb->scsiid = ahd_build_scsiid(ahd, cmd->device);
        reset_scb->hscb->lun = cmd->device->lun;
        reset_scb->hscb->cdb_len = 0;
        reset_scb->hscb->task_management = SIU_TASKMGMT_LUN_RESET;
@@ -1577,7 +1582,7 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
         * Fill out basics of the HSCB.
         */
        hscb->control = 0;
-       hscb->scsiid = BUILD_SCSIID(ahd, cmd);
+       hscb->scsiid = ahd_build_scsiid(ahd, cmd->device);
        hscb->lun = cmd->device->lun;
        scb->hscb->task_management = 0;
        mask = SCB_GET_TARGET_MASK(ahd, scb);
@@ -1766,9 +1771,16 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
        dev = scb->platform_data->dev;
        dev->active--;
        dev->openings++;
-       if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
-               cmd->result &= ~(CAM_DEV_QFRZN << 16);
-               dev->qfrozen--;
+       if (cmd) {
+               if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
+                       cmd->result &= ~(CAM_DEV_QFRZN << 16);
+                       dev->qfrozen--;
+               }
+       } else if (scb->flags & SCB_DEVICE_RESET) {
+               if (ahd->platform_data->eh_done)
+                       complete(ahd->platform_data->eh_done);
+               ahd_free_scb(ahd, scb);
+               return;
        }
        ahd_linux_unmap_scb(ahd, scb);
 
index d3b1082654d575df9e1ad6f54b520b4ff9bef790..4ae0a1c4d37488006fdf604ed838a2a76d6bb613 100644 (file)
@@ -366,7 +366,8 @@ static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc,
                                         struct scsi_cmnd *cmd);
 static void ahc_linux_freeze_simq(struct ahc_softc *ahc);
 static void ahc_linux_release_simq(struct ahc_softc *ahc);
-static int  ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag);
+static int  ahc_linux_queue_recovery_cmd(struct scsi_device *sdev,
+                                        struct scsi_cmnd *cmd);
 static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);
 static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc,
                                     struct ahc_devinfo *devinfo);
@@ -728,7 +729,7 @@ ahc_linux_abort(struct scsi_cmnd *cmd)
 {
        int error;
 
-       error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
+       error = ahc_linux_queue_recovery_cmd(cmd->device, cmd);
        if (error != SUCCESS)
                printk("aic7xxx_abort returns 0x%x\n", error);
        return (error);
@@ -742,7 +743,7 @@ ahc_linux_dev_reset(struct scsi_cmnd *cmd)
 {
        int error;
 
-       error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
+       error = ahc_linux_queue_recovery_cmd(cmd->device, NULL);
        if (error != SUCCESS)
                printk("aic7xxx_dev_reset returns 0x%x\n", error);
        return (error);
@@ -798,11 +799,18 @@ struct scsi_host_template aic7xxx_driver_template = {
 
 /**************************** Tasklet Handler *********************************/
 
-/******************************** Macros **************************************/
-#define BUILD_SCSIID(ahc, cmd)                                             \
-       ((((cmd)->device->id << TID_SHIFT) & TID)                           \
-       | (((cmd)->device->channel == 0) ? (ahc)->our_id : (ahc)->our_id_b) \
-       | (((cmd)->device->channel == 0) ? 0 : TWIN_CHNLB))
+
+static inline unsigned int ahc_build_scsiid(struct ahc_softc *ahc,
+                                           struct scsi_device *sdev)
+{
+       unsigned int scsiid = (sdev->id << TID_SHIFT) & TID;
+
+       if (sdev->channel == 0)
+               scsiid |= ahc->our_id;
+       else
+               scsiid |= ahc->our_id_b | TWIN_CHNLB;
+       return scsiid;
+}
 
 /******************************** Bus DMA *************************************/
 int
@@ -1457,7 +1465,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
         * Fill out basics of the HSCB.
         */
        hscb->control = 0;
-       hscb->scsiid = BUILD_SCSIID(ahc, cmd);
+       hscb->scsiid = ahc_build_scsiid(ahc, cmd->device);
        hscb->lun = cmd->device->lun;
        mask = SCB_GET_TARGET_MASK(ahc, scb);
        tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
@@ -2029,11 +2037,12 @@ ahc_linux_release_simq(struct ahc_softc *ahc)
 }
 
 static int
-ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
+ahc_linux_queue_recovery_cmd(struct scsi_device *sdev,
+                            struct scsi_cmnd *cmd)
 {
        struct ahc_softc *ahc;
        struct ahc_linux_device *dev;
-       struct scb *pending_scb;
+       struct scb *pending_scb = NULL, *scb;
        u_int  saved_scbptr;
        u_int  active_scb_index;
        u_int  last_phase;
@@ -2046,18 +2055,19 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
        int    disconnected;
        unsigned long flags;
 
-       pending_scb = NULL;
        paused = FALSE;
        wait = FALSE;
-       ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
+       ahc = *(struct ahc_softc **)sdev->host->hostdata;
 
-       scmd_printk(KERN_INFO, cmd, "Attempting to queue a%s message\n",
-              flag == SCB_ABORT ? "n ABORT" : " TARGET RESET");
+       sdev_printk(KERN_INFO, sdev, "Attempting to queue a%s message\n",
+              cmd ? "n ABORT" : " TARGET RESET");
 
-       printk("CDB:");
-       for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
-               printk(" 0x%x", cmd->cmnd[cdb_byte]);
-       printk("\n");
+       if (cmd) {
+               printk("CDB:");
+               for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
+                       printk(" 0x%x", cmd->cmnd[cdb_byte]);
+               printk("\n");
+       }
 
        ahc_lock(ahc, &flags);
 
@@ -2068,7 +2078,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
         * at all, and the system wanted us to just abort the
         * command, return success.
         */
-       dev = scsi_transport_device_data(cmd->device);
+       dev = scsi_transport_device_data(sdev);
 
        if (dev == NULL) {
                /*
@@ -2076,13 +2086,12 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
                 * so we must not still own the command.
                 */
                printk("%s:%d:%d:%d: Is not an active device\n",
-                      ahc_name(ahc), cmd->device->channel, cmd->device->id,
-                      (u8)cmd->device->lun);
+                      ahc_name(ahc), sdev->channel, sdev->id, (u8)sdev->lun);
                retval = SUCCESS;
                goto no_cmd;
        }
 
-       if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
+       if (cmd && (dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
         && ahc_search_untagged_queues(ahc, cmd, cmd->device->id,
                                       cmd->device->channel + 'A',
                                       (u8)cmd->device->lun,
@@ -2097,25 +2106,28 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
        /*
         * See if we can find a matching cmd in the pending list.
         */
-       LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
-               if (pending_scb->io_ctx == cmd)
-                       break;
-       }
-
-       if (pending_scb == NULL && flag == SCB_DEVICE_RESET) {
-
+       if (cmd) {
+               LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
+                       if (scb->io_ctx == cmd) {
+                               pending_scb = scb;
+                               break;
+                       }
+               }
+       } else {
                /* Any SCB for this device will do for a target reset */
-               LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
-                       if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd),
-                                         scmd_channel(cmd) + 'A',
+               LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
+                       if (ahc_match_scb(ahc, scb, sdev->id,
+                                         sdev->channel + 'A',
                                          CAM_LUN_WILDCARD,
-                                         SCB_LIST_NULL, ROLE_INITIATOR))
+                                         SCB_LIST_NULL, ROLE_INITIATOR)) {
+                               pending_scb = scb;
                                break;
+                       }
                }
        }
 
        if (pending_scb == NULL) {
-               scmd_printk(KERN_INFO, cmd, "Command not found\n");
+               sdev_printk(KERN_INFO, sdev, "Command not found\n");
                goto no_cmd;
        }
 
@@ -2146,22 +2158,22 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
        ahc_dump_card_state(ahc);
 
        disconnected = TRUE;
-       if (flag == SCB_ABORT) {
-               if (ahc_search_qinfifo(ahc, cmd->device->id,
-                                      cmd->device->channel + 'A',
-                                      cmd->device->lun,
+       if (cmd) {
+               if (ahc_search_qinfifo(ahc, sdev->id,
+                                      sdev->channel + 'A',
+                                      sdev->lun,
                                       pending_scb->hscb->tag,
                                       ROLE_INITIATOR, CAM_REQ_ABORTED,
                                       SEARCH_COMPLETE) > 0) {
                        printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
-                              ahc_name(ahc), cmd->device->channel,
-                              cmd->device->id, (u8)cmd->device->lun);
+                              ahc_name(ahc), sdev->channel,
+                              sdev->id, (u8)sdev->lun);
                        retval = SUCCESS;
                        goto done;
                }
-       } else if (ahc_search_qinfifo(ahc, cmd->device->id,
-                                     cmd->device->channel + 'A',
-                                     cmd->device->lun,
+       } else if (ahc_search_qinfifo(ahc, sdev->id,
+                                     sdev->channel + 'A',
+                                     sdev->lun,
                                      pending_scb->hscb->tag,
                                      ROLE_INITIATOR, /*status*/0,
                                      SEARCH_COUNT) > 0) {
@@ -2174,7 +2186,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
                bus_scb = ahc_lookup_scb(ahc, ahc_inb(ahc, SCB_TAG));
                if (bus_scb == pending_scb)
                        disconnected = FALSE;
-               else if (flag != SCB_ABORT
+               else if (!cmd
                      && ahc_inb(ahc, SAVED_SCSIID) == pending_scb->hscb->scsiid
                      && ahc_inb(ahc, SAVED_LUN) == SCB_GET_LUN(pending_scb))
                        disconnected = FALSE;
@@ -2194,18 +2206,18 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
        saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
        if (last_phase != P_BUSFREE
         && (pending_scb->hscb->tag == active_scb_index
-         || (flag == SCB_DEVICE_RESET
-          && SCSIID_TARGET(ahc, saved_scsiid) == scmd_id(cmd)))) {
+         || (!cmd && SCSIID_TARGET(ahc, saved_scsiid) == sdev->id))) {
 
                /*
                 * We're active on the bus, so assert ATN
                 * and hope that the target responds.
                 */
                pending_scb = ahc_lookup_scb(ahc, active_scb_index);
-               pending_scb->flags |= SCB_RECOVERY_SCB|flag;
+               pending_scb->flags |= SCB_RECOVERY_SCB;
+               pending_scb->flags |= cmd ? SCB_ABORT : SCB_DEVICE_RESET;
                ahc_outb(ahc, MSG_OUT, HOST_MSG);
                ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
-               scmd_printk(KERN_INFO, cmd, "Device is active, asserting ATN\n");
+               sdev_printk(KERN_INFO, sdev, "Device is active, asserting ATN\n");
                wait = TRUE;
        } else if (disconnected) {
 
@@ -2226,7 +2238,8 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
                 * an unsolicited reselection occurred.
                 */
                pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
-               pending_scb->flags |= SCB_RECOVERY_SCB|flag;
+               pending_scb->flags |= SCB_RECOVERY_SCB;
+               pending_scb->flags |= cmd ? SCB_ABORT : SCB_DEVICE_RESET;
 
                /*
                 * Remove any cached copy of this SCB in the
@@ -2235,9 +2248,9 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
                 * same element in the SCB, SCB_NEXT, for
                 * both the qinfifo and the disconnected list.
                 */
-               ahc_search_disc_list(ahc, cmd->device->id,
-                                    cmd->device->channel + 'A',
-                                    cmd->device->lun, pending_scb->hscb->tag,
+               ahc_search_disc_list(ahc, sdev->id,
+                                    sdev->channel + 'A',
+                                    sdev->lun, pending_scb->hscb->tag,
                                     /*stop_on_first*/TRUE,
                                     /*remove*/TRUE,
                                     /*save_state*/FALSE);
@@ -2260,9 +2273,9 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
                 * so we are the next SCB for this target
                 * to run.
                 */
-               ahc_search_qinfifo(ahc, cmd->device->id,
-                                  cmd->device->channel + 'A',
-                                  cmd->device->lun, SCB_LIST_NULL,
+               ahc_search_qinfifo(ahc, sdev->id,
+                                  sdev->channel + 'A',
+                                  (u8)sdev->lun, SCB_LIST_NULL,
                                   ROLE_INITIATOR, CAM_REQUEUE_REQ,
                                   SEARCH_COMPLETE);
                ahc_qinfifo_requeue_tail(ahc, pending_scb);
@@ -2271,7 +2284,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
                printk("Device is disconnected, re-queuing SCB\n");
                wait = TRUE;
        } else {
-               scmd_printk(KERN_INFO, cmd, "Unable to deliver message\n");
+               sdev_printk(KERN_INFO, sdev, "Unable to deliver message\n");
                retval = FAILED;
                goto done;
        }
index 046247420cfa5a67eb9c301c48cf8c94102577ca..7e74f77da14f2f36a15cee623fa5bb3e82b8b5d4 100644 (file)
@@ -384,6 +384,7 @@ struct bnx2fc_rport {
 };
 
 struct bnx2fc_mp_req {
+       u64 tm_lun;
        u8 tm_flags;
 
        u32 req_len;
index 77654438559864829fe0a0629472f93fb780ac5b..090d436bcef8f4c1a0c5abdf5c0e1f41af2e53b9 100644 (file)
@@ -1709,7 +1709,8 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
        struct fcoe_cached_sge_ctx *cached_sge;
        struct fcoe_ext_mul_sges_ctx *sgl;
        int dev_type = tgt->dev_type;
-       u64 *fcp_cmnd;
+       struct fcp_cmnd *fcp_cmnd;
+       u64 *raw_fcp_cmnd;
        u64 tmp_fcp_cmnd[4];
        u32 context_id;
        int cnt, i;
@@ -1778,16 +1779,19 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
        task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
 
        /* Fill FCP_CMND IU */
-       fcp_cmnd = (u64 *)
+       fcp_cmnd = (struct fcp_cmnd *)&tmp_fcp_cmnd;
+       bnx2fc_build_fcp_cmnd(io_req, fcp_cmnd);
+       int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
+       memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+       raw_fcp_cmnd = (u64 *)
                    task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
-       bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
 
        /* swap fcp_cmnd */
        cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
 
        for (i = 0; i < cnt; i++) {
-               *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
-               fcp_cmnd++;
+               *raw_fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
+               raw_fcp_cmnd++;
        }
 
        /* Rx Write Tx Read */
index b42a9accb8320ec34b8c5c476e7529d11ee7b8ef..33057908f1477922d13144e4b6230609a6de79ca 100644 (file)
@@ -656,10 +656,9 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
        return SUCCESS;
 }
 
-static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
+static int bnx2fc_initiate_tmf(struct fc_lport *lport, struct fc_rport *rport,
+                              u64 tm_lun, u8 tm_flags)
 {
-       struct fc_lport *lport;
-       struct fc_rport *rport;
        struct fc_rport_libfc_priv *rp;
        struct fcoe_port *port;
        struct bnx2fc_interface *interface;
@@ -668,7 +667,6 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
        struct bnx2fc_mp_req *tm_req;
        struct fcoe_task_ctx_entry *task;
        struct fcoe_task_ctx_entry *task_page;
-       struct Scsi_Host *host = sc_cmd->device->host;
        struct fc_frame_header *fc_hdr;
        struct fcp_cmnd *fcp_cmnd;
        int task_idx, index;
@@ -677,8 +675,6 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
        u32 sid, did;
        unsigned long start = jiffies;
 
-       lport = shost_priv(host);
-       rport = starget_to_rport(scsi_target(sc_cmd->device));
        port = lport_priv(lport);
        interface = port->priv;
 
@@ -689,7 +685,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
        }
        rp = rport->dd_data;
 
-       rc = fc_block_scsi_eh(sc_cmd);
+       rc = fc_block_rport(rport);
        if (rc)
                return rc;
 
@@ -718,7 +714,7 @@ retry_tmf:
                goto retry_tmf;
        }
        /* Initialize rest of io_req fields */
-       io_req->sc_cmd = sc_cmd;
+       io_req->sc_cmd = NULL;
        io_req->port = port;
        io_req->tgt = tgt;
 
@@ -736,11 +732,13 @@ retry_tmf:
        /* Set TM flags */
        io_req->io_req_flags = 0;
        tm_req->tm_flags = tm_flags;
+       tm_req->tm_lun = tm_lun;
 
        /* Fill FCP_CMND */
        bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
        fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
-       memset(fcp_cmnd->fc_cdb, 0,  sc_cmd->cmd_len);
+       int_to_scsilun(tm_lun, &fcp_cmnd->fc_lun);
+       memset(fcp_cmnd->fc_cdb, 0,  BNX2FC_MAX_CMD_LEN);
        fcp_cmnd->fc_dl = 0;
 
        /* Fill FC header */
@@ -763,8 +761,6 @@ retry_tmf:
        task = &(task_page[index]);
        bnx2fc_init_mp_task(io_req, task);
 
-       bnx2fc_priv(sc_cmd)->io_req = io_req;
-
        /* Obtain free SQ entry */
        spin_lock_bh(&tgt->tgt_lock);
        bnx2fc_add_2_sq(tgt, xid);
@@ -1062,7 +1058,10 @@ cleanup_err:
  */
 int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
 {
-       return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
+       struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+       struct fc_lport *lport = shost_priv(rport_to_shost(rport));
+
+       return bnx2fc_initiate_tmf(lport, rport, 0, FCP_TMF_TGT_RESET);
 }
 
 /**
@@ -1075,7 +1074,11 @@ int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
  */
 int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
 {
-       return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
+       struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+       struct fc_lport *lport = shost_priv(rport_to_shost(rport));
+
+       return bnx2fc_initiate_tmf(lport, rport, sc_cmd->device->lun,
+                                  FCP_TMF_LUN_RESET);
 }
 
 static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
@@ -1450,10 +1453,9 @@ io_compl:
 
 static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
 {
-       struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
        struct bnx2fc_rport *tgt = io_req->tgt;
        struct bnx2fc_cmd *cmd, *tmp;
-       u64 tm_lun = sc_cmd->device->lun;
+       struct bnx2fc_mp_req *tm_req = &io_req->mp_req;
        u64 lun;
        int rc = 0;
 
@@ -1465,8 +1467,10 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
         */
        list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
                BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
+               if (!cmd->sc_cmd)
+                       continue;
                lun = cmd->sc_cmd->device->lun;
-               if (lun == tm_lun) {
+               if (lun == tm_req->tm_lun) {
                        /* Initiate ABTS on this cmd */
                        if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
                                              &cmd->req_flags)) {
@@ -1570,31 +1574,36 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
                printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
                        fc_hdr->fh_r_ctl);
        }
-       if (!bnx2fc_priv(sc_cmd)->io_req) {
-               printk(KERN_ERR PFX "tm_compl: io_req is NULL\n");
-               return;
-       }
-       switch (io_req->fcp_status) {
-       case FC_GOOD:
-               if (io_req->cdb_status == 0) {
-                       /* Good IO completion */
-                       sc_cmd->result = DID_OK << 16;
-               } else {
-                       /* Transport status is good, SCSI status not good */
-                       sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+       if (sc_cmd) {
+               if (!bnx2fc_priv(sc_cmd)->io_req) {
+                       printk(KERN_ERR PFX "tm_compl: io_req is NULL\n");
+                       return;
+               }
+               switch (io_req->fcp_status) {
+               case FC_GOOD:
+                       if (io_req->cdb_status == 0) {
+                               /* Good IO completion */
+                               sc_cmd->result = DID_OK << 16;
+                       } else {
+                               /* Transport status is good, SCSI status not good */
+                               sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+                       }
+                       if (io_req->fcp_resid)
+                               scsi_set_resid(sc_cmd, io_req->fcp_resid);
+                       break;
+
+               default:
+                       BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
+                                     io_req->fcp_status);
+                       break;
                }
-               if (io_req->fcp_resid)
-                       scsi_set_resid(sc_cmd, io_req->fcp_resid);
-               break;
 
-       default:
-               BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
-                          io_req->fcp_status);
-               break;
-       }
+               sc_cmd = io_req->sc_cmd;
+               io_req->sc_cmd = NULL;
 
-       sc_cmd = io_req->sc_cmd;
-       io_req->sc_cmd = NULL;
+               bnx2fc_priv(sc_cmd)->io_req = NULL;
+               scsi_done(sc_cmd);
+       }
 
        /* check if the io_req exists in tgt's tmf_q */
        if (io_req->on_tmf_queue) {
@@ -1607,9 +1616,6 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
                return;
        }
 
-       bnx2fc_priv(sc_cmd)->io_req = NULL;
-       scsi_done(sc_cmd);
-
        kref_put(&io_req->refcount, bnx2fc_cmd_release);
        if (io_req->wait_for_abts_comp) {
                BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
@@ -1738,15 +1744,9 @@ static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
 void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
                                  struct fcp_cmnd *fcp_cmnd)
 {
-       struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
-
        memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
 
-       int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun);
-
        fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
-       memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
-
        fcp_cmnd->fc_cmdref = 0;
        fcp_cmnd->fc_pri_ta = 0;
        fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
index abde60a50cf75b30af65e033694b059ba9f6c3d7..bf75940f2be14bebb3713656f1208a54fc5dc781 100644 (file)
@@ -1294,7 +1294,7 @@ static int cxgbi_ddp_reserve(struct cxgbi_conn *cconn,
 
        /*
         * the ddp tag will be used for the itt in the outgoing pdu,
-        * the itt genrated by libiscsi is saved in the ppm and can be
+        * the itt generated by libiscsi is saved in the ppm and can be
         * retrieved via the ddp tag
         */
        err = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
index 5f2f943d926ccf2b8af2f28267a2ef86f930c153..944ea4e0cc4551745cf96b9e0576fea5195cf13b 100644 (file)
@@ -82,7 +82,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
 {
        unsigned char cmd[6] = { TEST_UNIT_READY };
        struct scsi_sense_hdr sshdr;
-       int ret = SCSI_DH_OK, res;
+       int ret, res;
        blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
                                REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
        const struct scsi_exec_args exec_args = {
@@ -92,19 +92,18 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
 retry:
        res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
                               HP_SW_RETRIES, &exec_args);
-       if (res) {
-               if (scsi_sense_valid(&sshdr))
-                       ret = tur_done(sdev, h, &sshdr);
-               else {
-                       sdev_printk(KERN_WARNING, sdev,
-                                   "%s: sending tur failed with %x\n",
-                                   HP_SW_NAME, res);
-                       ret = SCSI_DH_IO;
-               }
-       } else {
+       if (res > 0 && scsi_sense_valid(&sshdr)) {
+               ret = tur_done(sdev, h, &sshdr);
+       } else if (res == 0) {
                h->path_state = HP_SW_PATH_ACTIVE;
                ret = SCSI_DH_OK;
+       } else {
+               sdev_printk(KERN_WARNING, sdev,
+                           "%s: sending tur failed with %x\n",
+                           HP_SW_NAME, res);
+               ret = SCSI_DH_IO;
        }
+
        if (ret == SCSI_DH_IMM_RETRY)
                goto retry;
 
@@ -122,7 +121,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
        unsigned char cmd[6] = { START_STOP, 0, 0, 0, 1, 0 };
        struct scsi_sense_hdr sshdr;
        struct scsi_device *sdev = h->sdev;
-       int res, rc = SCSI_DH_OK;
+       int res, rc;
        int retry_cnt = HP_SW_RETRIES;
        blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
                                REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
@@ -133,35 +132,37 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
 retry:
        res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
                               HP_SW_RETRIES, &exec_args);
-       if (res) {
-               if (!scsi_sense_valid(&sshdr)) {
-                       sdev_printk(KERN_WARNING, sdev,
-                                   "%s: sending start_stop_unit failed, "
-                                   "no sense available\n", HP_SW_NAME);
-                       return SCSI_DH_IO;
-               }
-               switch (sshdr.sense_key) {
-               case NOT_READY:
-                       if (sshdr.asc == 0x04 && sshdr.ascq == 3) {
-                               /*
-                                * LUN not ready - manual intervention required
-                                *
-                                * Switch-over in progress, retry.
-                                */
-                               if (--retry_cnt)
-                                       goto retry;
-                               rc = SCSI_DH_RETRY;
-                               break;
-                       }
-                       fallthrough;
-               default:
-                       sdev_printk(KERN_WARNING, sdev,
-                                   "%s: sending start_stop_unit failed, "
-                                   "sense %x/%x/%x\n", HP_SW_NAME,
-                                   sshdr.sense_key, sshdr.asc, sshdr.ascq);
-                       rc = SCSI_DH_IO;
+       if (!res) {
+               return SCSI_DH_OK;
+       } else if (res < 0 || !scsi_sense_valid(&sshdr)) {
+               sdev_printk(KERN_WARNING, sdev,
+                           "%s: sending start_stop_unit failed, "
+                           "no sense available\n", HP_SW_NAME);
+               return SCSI_DH_IO;
+       }
+
+       switch (sshdr.sense_key) {
+       case NOT_READY:
+               if (sshdr.asc == 0x04 && sshdr.ascq == 3) {
+                       /*
+                        * LUN not ready - manual intervention required
+                        *
+                        * Switch-over in progress, retry.
+                        */
+                       if (--retry_cnt)
+                               goto retry;
+                       rc = SCSI_DH_RETRY;
+                       break;
                }
+               fallthrough;
+       default:
+               sdev_printk(KERN_WARNING, sdev,
+                           "%s: sending start_stop_unit failed, "
+                           "sense %x/%x/%x\n", HP_SW_NAME,
+                           sshdr.sense_key, sshdr.asc, sshdr.ascq);
+               rc = SCSI_DH_IO;
        }
+
        return rc;
 }
 
index c5538645057a6883cef0cff5c7de451007370c2e..1ac2ae17e8be3ce37d8dd554ba6c9039cf4cd434 100644 (file)
@@ -530,7 +530,7 @@ static void send_mode_select(struct work_struct *work)
                container_of(work, struct rdac_controller, ms_work);
        struct scsi_device *sdev = ctlr->ms_sdev;
        struct rdac_dh_data *h = sdev->handler_data;
-       int err = SCSI_DH_OK, retry_cnt = RDAC_RETRY_COUNT;
+       int rc, err, retry_cnt = RDAC_RETRY_COUNT;
        struct rdac_queue_data *tmp, *qdata;
        LIST_HEAD(list);
        unsigned char cdb[MAX_COMMAND_SIZE];
@@ -558,20 +558,23 @@ static void send_mode_select(struct work_struct *work)
                (char *) h->ctlr->array_name, h->ctlr->index,
                (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
 
-       if (scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size,
-                            RDAC_TIMEOUT * HZ, RDAC_RETRIES, &exec_args)) {
+       rc = scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size,
+                             RDAC_TIMEOUT * HZ, RDAC_RETRIES, &exec_args);
+       if (!rc) {
+               h->state = RDAC_STATE_ACTIVE;
+               RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
+                               "MODE_SELECT completed",
+                               (char *) h->ctlr->array_name, h->ctlr->index);
+               err = SCSI_DH_OK;
+       } else if (rc < 0) {
+               err = SCSI_DH_IO;
+       } else {
                err = mode_select_handle_sense(sdev, &sshdr);
                if (err == SCSI_DH_RETRY && retry_cnt--)
                        goto retry;
                if (err == SCSI_DH_IMM_RETRY)
                        goto retry;
        }
-       if (err == SCSI_DH_OK) {
-               h->state = RDAC_STATE_ACTIVE;
-               RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
-                               "MODE_SELECT completed",
-                               (char *) h->ctlr->array_name, h->ctlr->index);
-       }
 
        list_for_each_entry_safe(qdata, tmp, &list, entry) {
                list_del(&qdata->entry);
index a982b9cf9870174f63e7963f54e4eb560ad6843e..6a6ec32c46bdf3a1d532e988a6c98e7fc13f6a6e 100644 (file)
@@ -1611,6 +1611,8 @@ static const struct target_core_fabric_ops efct_lio_ops = {
        .sess_get_initiator_sid         = NULL,
        .tfc_tpg_base_attrs             = efct_lio_tpg_attrs,
        .tfc_tpg_attrib_attrs           = efct_lio_tpg_attrib_attrs,
+       .default_submit_type            = TARGET_DIRECT_SUBMIT,
+       .direct_submit_supp             = 1,
 };
 
 static const struct target_core_fabric_ops efct_lio_npiv_ops = {
@@ -1646,6 +1648,9 @@ static const struct target_core_fabric_ops efct_lio_npiv_ops = {
        .sess_get_initiator_sid         = NULL,
        .tfc_tpg_base_attrs             = efct_lio_npiv_tpg_attrs,
        .tfc_tpg_attrib_attrs           = efct_lio_npiv_tpg_attrib_attrs,
+
+       .default_submit_type            = TARGET_DIRECT_SUBMIT,
+       .direct_submit_supp             = 1,
 };
 
 int efct_scsi_tgt_driver_init(void)
index 055d2e87a2c8da7ea6ff0b75dafe5eabc9e05da1..3f7c1d131ec34fef4bcc265d3e2339a65a0168da 100644 (file)
@@ -41,6 +41,8 @@
  * USA.
  */
 
+#include <linux/bitfield.h>
+
 #include "esas2r.h"
 
 /*
@@ -792,16 +794,10 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
                        pcie_capability_read_dword(a->pcid, PCI_EXP_LNKCAP,
                                                   &caps);
 
-                       gai->pci.link_speed_curr =
-                               (u8)(stat & PCI_EXP_LNKSTA_CLS);
-                       gai->pci.link_speed_max =
-                               (u8)(caps & PCI_EXP_LNKCAP_SLS);
-                       gai->pci.link_width_curr =
-                               (u8)((stat & PCI_EXP_LNKSTA_NLW)
-                                    >> PCI_EXP_LNKSTA_NLW_SHIFT);
-                       gai->pci.link_width_max =
-                               (u8)((caps & PCI_EXP_LNKCAP_MLW)
-                                    >> 4);
+                       gai->pci.link_speed_curr = FIELD_GET(PCI_EXP_LNKSTA_CLS, stat);
+                       gai->pci.link_speed_max = FIELD_GET(PCI_EXP_LNKCAP_SLS, caps);
+                       gai->pci.link_width_curr = FIELD_GET(PCI_EXP_LNKSTA_NLW, stat);
+                       gai->pci.link_width_max = FIELD_GET(PCI_EXP_LNKCAP_MLW, caps);
                }
 
                gai->pci.msi_vector_cnt = 1;
index 79ddfaaf71a41c4c8b390fcfe589ebc9a832f3ec..55632c67a8f24260a7e8148e28a7f1be9068821f 100644 (file)
@@ -145,16 +145,17 @@ void fnic_handle_link(struct work_struct *work)
                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
                if (fnic->config.flags & VFCF_FIP_CAPABLE) {
                        /* start FCoE VLAN discovery */
-                               fnic_fc_trace_set_data(
-                               fnic->lport->host->host_no,
-                               FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
-                               strlen("Link Status: DOWN_UP_VLAN"));
+                       fnic_fc_trace_set_data(fnic->lport->host->host_no,
+                                              FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
+                                              strlen("Link Status: DOWN_UP_VLAN"));
                        fnic_fcoe_send_vlan_req(fnic);
+
                        return;
                }
+
                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
                fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
-                       "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
+                                      "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
                fcoe_ctlr_link_up(&fnic->ctlr);
        } else {
                /* UP -> DOWN */
index 9e73e9cbbcfc6c45fbb17b706aa3003a77ea3821..1e4550156b735d82599a273b125999c0b19353df 100644 (file)
@@ -343,7 +343,7 @@ struct hisi_sas_hw {
                                u8 reg_index, u8 reg_count, u8 *write_data);
        void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
                                           int delay_ms, int timeout_ms);
-       void (*debugfs_snapshot_regs)(struct hisi_hba *hisi_hba);
+       int (*debugfs_snapshot_regs)(struct hisi_hba *hisi_hba);
        int complete_hdr_size;
        const struct scsi_host_template *sht;
 };
@@ -451,7 +451,6 @@ struct hisi_hba {
        const struct hisi_sas_hw *hw;   /* Low level hw interface */
        unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
        struct work_struct rst_work;
-       struct work_struct debugfs_work;
        u32 phy_state;
        u32 intr_coal_ticks;    /* Time of interrupt coalesce in us */
        u32 intr_coal_count;    /* Interrupt count to coalesce */
index 9472b9743aefbe85f0e8aeb6f55374f14db52cac..d50058b41409887f7c413ddb9f793b9fa27c764e 100644 (file)
@@ -1958,8 +1958,11 @@ static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
        struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
        struct hisi_sas_internal_abort_data *timeout = data;
 
-       if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
-               queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
+       if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) {
+               down(&hisi_hba->sem);
+               hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
+               up(&hisi_hba->sem);
+       }
 
        if (task->task_state_flags & SAS_TASK_STATE_DONE) {
                pr_err("Internal abort: timeout %016llx\n",
index bbb64ee6afd7c40ccdc49271785d3119549ce747..ccc5acb39f5a6af954bd452913e4a4dcdb49df12 100644 (file)
@@ -558,8 +558,7 @@ static int experimental_iopoll_q_cnt;
 module_param(experimental_iopoll_q_cnt, int, 0444);
 MODULE_PARM_DESC(experimental_iopoll_q_cnt, "number of queues to be used as poll mode, def=0");
 
-static void debugfs_work_handler_v3_hw(struct work_struct *work);
-static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba);
+static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba);
 
 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
 {
@@ -3388,7 +3387,6 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
        hisi_hba = shost_priv(shost);
 
        INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
-       INIT_WORK(&hisi_hba->debugfs_work, debugfs_work_handler_v3_hw);
        hisi_hba->hw = &hisi_sas_v3_hw;
        hisi_hba->pci_dev = pdev;
        hisi_hba->dev = dev;
@@ -3860,37 +3858,6 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
                            &debugfs_ras_v3_hw_fops);
 }
 
-static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
-{
-       int debugfs_dump_index = hisi_hba->debugfs_dump_index;
-       struct device *dev = hisi_hba->dev;
-       u64 timestamp = local_clock();
-
-       if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
-               dev_warn(dev, "dump count exceeded!\n");
-               return;
-       }
-
-       do_div(timestamp, NSEC_PER_MSEC);
-       hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
-
-       debugfs_snapshot_prepare_v3_hw(hisi_hba);
-
-       debugfs_snapshot_global_reg_v3_hw(hisi_hba);
-       debugfs_snapshot_port_reg_v3_hw(hisi_hba);
-       debugfs_snapshot_axi_reg_v3_hw(hisi_hba);
-       debugfs_snapshot_ras_reg_v3_hw(hisi_hba);
-       debugfs_snapshot_cq_reg_v3_hw(hisi_hba);
-       debugfs_snapshot_dq_reg_v3_hw(hisi_hba);
-       debugfs_snapshot_itct_reg_v3_hw(hisi_hba);
-       debugfs_snapshot_iost_reg_v3_hw(hisi_hba);
-
-       debugfs_create_files_v3_hw(hisi_hba);
-
-       debugfs_snapshot_restore_v3_hw(hisi_hba);
-       hisi_hba->debugfs_dump_index++;
-}
-
 static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
                                                const char __user *user_buf,
                                                size_t count, loff_t *ppos)
@@ -3898,9 +3865,6 @@ static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
        struct hisi_hba *hisi_hba = file->f_inode->i_private;
        char buf[8];
 
-       if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count)
-               return -EFAULT;
-
        if (count > 8)
                return -EFAULT;
 
@@ -3910,7 +3874,12 @@ static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
        if (buf[0] != '1')
                return -EFAULT;
 
-       queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
+       down(&hisi_hba->sem);
+       if (debugfs_snapshot_regs_v3_hw(hisi_hba)) {
+               up(&hisi_hba->sem);
+               return -EFAULT;
+       }
+       up(&hisi_hba->sem);
 
        return count;
 }
@@ -4661,14 +4630,6 @@ static void debugfs_fifo_init_v3_hw(struct hisi_hba *hisi_hba)
        }
 }
 
-static void debugfs_work_handler_v3_hw(struct work_struct *work)
-{
-       struct hisi_hba *hisi_hba =
-               container_of(work, struct hisi_hba, debugfs_work);
-
-       debugfs_snapshot_regs_v3_hw(hisi_hba);
-}
-
 static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
 {
        struct device *dev = hisi_hba->dev;
@@ -4703,7 +4664,7 @@ static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
 {
        const struct hisi_sas_hw *hw = hisi_hba->hw;
        struct device *dev = hisi_hba->dev;
-       int p, c, d, r, i;
+       int p, c, d, r;
        size_t sz;
 
        for (r = 0; r < DEBUGFS_REGS_NUM; r++) {
@@ -4783,11 +4744,48 @@ static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
 
        return 0;
 fail:
-       for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
-               debugfs_release_v3_hw(hisi_hba, i);
+       debugfs_release_v3_hw(hisi_hba, dump_index);
        return -ENOMEM;
 }
 
+static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
+{
+       int debugfs_dump_index = hisi_hba->debugfs_dump_index;
+       struct device *dev = hisi_hba->dev;
+       u64 timestamp = local_clock();
+
+       if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
+               dev_warn(dev, "dump count exceeded!\n");
+               return -EINVAL;
+       }
+
+       if (debugfs_alloc_v3_hw(hisi_hba, debugfs_dump_index)) {
+               dev_warn(dev, "failed to alloc memory\n");
+               return -ENOMEM;
+       }
+
+       do_div(timestamp, NSEC_PER_MSEC);
+       hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
+
+       debugfs_snapshot_prepare_v3_hw(hisi_hba);
+
+       debugfs_snapshot_global_reg_v3_hw(hisi_hba);
+       debugfs_snapshot_port_reg_v3_hw(hisi_hba);
+       debugfs_snapshot_axi_reg_v3_hw(hisi_hba);
+       debugfs_snapshot_ras_reg_v3_hw(hisi_hba);
+       debugfs_snapshot_cq_reg_v3_hw(hisi_hba);
+       debugfs_snapshot_dq_reg_v3_hw(hisi_hba);
+       debugfs_snapshot_itct_reg_v3_hw(hisi_hba);
+       debugfs_snapshot_iost_reg_v3_hw(hisi_hba);
+
+       debugfs_create_files_v3_hw(hisi_hba);
+
+       debugfs_snapshot_restore_v3_hw(hisi_hba);
+       hisi_hba->debugfs_dump_index++;
+
+       return 0;
+}
+
 static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba)
 {
        struct dentry *dir = debugfs_create_dir("phy_down_cnt",
@@ -4865,10 +4863,15 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
        hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
 }
 
+static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
+{
+       debugfs_remove_recursive(hisi_hba->debugfs_dir);
+       hisi_hba->debugfs_dir = NULL;
+}
+
 static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
 {
        struct device *dev = hisi_hba->dev;
-       int i;
 
        hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
                                                   hisi_sas_debugfs_dir);
@@ -4885,19 +4888,6 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
 
        debugfs_phy_down_cnt_init_v3_hw(hisi_hba);
        debugfs_fifo_init_v3_hw(hisi_hba);
-
-       for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
-               if (debugfs_alloc_v3_hw(hisi_hba, i)) {
-                       debugfs_remove_recursive(hisi_hba->debugfs_dir);
-                       dev_dbg(dev, "failed to init debugfs!\n");
-                       break;
-               }
-       }
-}
-
-static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
-{
-       debugfs_remove_recursive(hisi_hba->debugfs_dir);
 }
 
 static int
index ce9eb00e2ca04d6b1a500a39df6bcee19fd71e0f..92c440f2e3a78f05d5fe41827c017af897795f0b 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/bsg-lib.h>
 #include <asm/firmware.h>
 #include <asm/irq.h>
-#include <asm/rtas.h>
 #include <asm/vio.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -38,6 +37,7 @@ static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
 static u64 max_lun = IBMVFC_MAX_LUN;
 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
+static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
@@ -83,6 +83,9 @@ MODULE_PARM_DESC(default_timeout,
 module_param_named(max_requests, max_requests, uint, S_IRUGO);
 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
                 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
+module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
+MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
+                "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
                 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
@@ -160,8 +163,8 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *);
 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
 
-static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *);
-static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *);
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
 
 static const char *unknown_error = "unknown error";
 
@@ -776,28 +779,26 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
  * @vhost:     ibmvfc host who owns the event pool
  * @queue:      ibmvfc queue struct
- * @size:       pool size
  *
  * Returns zero on success.
  **/
 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
-                                 struct ibmvfc_queue *queue,
-                                 unsigned int size)
+                                 struct ibmvfc_queue *queue)
 {
        int i;
        struct ibmvfc_event_pool *pool = &queue->evt_pool;
 
        ENTER;
-       if (!size)
+       if (!queue->total_depth)
                return 0;
 
-       pool->size = size;
-       pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL);
+       pool->size = queue->total_depth;
+       pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
        if (!pool->events)
                return -ENOMEM;
 
        pool->iu_storage = dma_alloc_coherent(vhost->dev,
-                                             size * sizeof(*pool->iu_storage),
+                                             pool->size * sizeof(*pool->iu_storage),
                                              &pool->iu_token, 0);
 
        if (!pool->iu_storage) {
@@ -807,9 +808,11 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
 
        INIT_LIST_HEAD(&queue->sent);
        INIT_LIST_HEAD(&queue->free);
+       queue->evt_free = queue->evt_depth;
+       queue->reserved_free = queue->reserved_depth;
        spin_lock_init(&queue->l_lock);
 
-       for (i = 0; i < size; ++i) {
+       for (i = 0; i < pool->size; ++i) {
                struct ibmvfc_event *evt = &pool->events[i];
 
                /*
@@ -922,7 +925,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
        struct vio_dev *vdev = to_vio_dev(vhost->dev);
        unsigned long flags;
 
-       ibmvfc_dereg_sub_crqs(vhost);
+       ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
 
        /* Re-enable the CRQ */
        do {
@@ -941,7 +944,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
        spin_unlock(vhost->crq.q_lock);
        spin_unlock_irqrestore(vhost->host->host_lock, flags);
 
-       ibmvfc_reg_sub_crqs(vhost);
+       ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
 
        return rc;
 }
@@ -960,7 +963,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
        struct vio_dev *vdev = to_vio_dev(vhost->dev);
        struct ibmvfc_queue *crq = &vhost->crq;
 
-       ibmvfc_dereg_sub_crqs(vhost);
+       ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
 
        /* Close the CRQ */
        do {
@@ -993,7 +996,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
        spin_unlock(vhost->crq.q_lock);
        spin_unlock_irqrestore(vhost->host->host_lock, flags);
 
-       ibmvfc_reg_sub_crqs(vhost);
+       ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
 
        return rc;
 }
@@ -1033,6 +1036,12 @@ static void ibmvfc_free_event(struct ibmvfc_event *evt)
 
        spin_lock_irqsave(&evt->queue->l_lock, flags);
        list_add_tail(&evt->queue_list, &evt->queue->free);
+       if (evt->reserved) {
+               evt->reserved = 0;
+               evt->queue->reserved_free++;
+       } else {
+               evt->queue->evt_free++;
+       }
        if (evt->eh_comp)
                complete(evt->eh_comp);
        spin_unlock_irqrestore(&evt->queue->l_lock, flags);
@@ -1475,6 +1484,12 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
        struct ibmvfc_queue *async_crq = &vhost->async_crq;
        struct device_node *of_node = vhost->dev->of_node;
        const char *location;
+       u16 max_cmds;
+
+       max_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
+       if (mq_enabled)
+               max_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) *
+                       vhost->scsi_scrqs.desired_queues;
 
        memset(login_info, 0, sizeof(*login_info));
 
@@ -1489,7 +1504,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
        if (vhost->client_migrated)
                login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
 
-       login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
+       login_info->max_cmds = cpu_to_be32(max_cmds);
        login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
 
        if (vhost->mq_enabled || vhost->using_channels)
@@ -1508,25 +1523,39 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
 }
 
 /**
- * ibmvfc_get_event - Gets the next free event in pool
+ * __ibmvfc_get_event - Gets the next free event in pool
  * @queue:      ibmvfc queue struct
+ * @reserved:  event is for a reserved management command
  *
  * Returns a free event from the pool.
  **/
-static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
+static struct ibmvfc_event *__ibmvfc_get_event(struct ibmvfc_queue *queue, int reserved)
 {
-       struct ibmvfc_event *evt;
+       struct ibmvfc_event *evt = NULL;
        unsigned long flags;
 
        spin_lock_irqsave(&queue->l_lock, flags);
-       BUG_ON(list_empty(&queue->free));
-       evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+       if (reserved && queue->reserved_free) {
+               evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+               evt->reserved = 1;
+               queue->reserved_free--;
+       } else if (queue->evt_free) {
+               evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+               queue->evt_free--;
+       } else {
+               goto out;
+       }
+
        atomic_set(&evt->free, 0);
        list_del(&evt->queue_list);
+out:
        spin_unlock_irqrestore(&queue->l_lock, flags);
        return evt;
 }
 
+#define ibmvfc_get_event(queue) __ibmvfc_get_event(queue, 0)
+#define ibmvfc_get_reserved_event(queue) __ibmvfc_get_event(queue, 1)
+
 /**
  * ibmvfc_locked_done - Calls evt completion with host_lock held
  * @evt:       ibmvfc evt to complete
@@ -1948,9 +1977,15 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
        if (vhost->using_channels) {
                scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
                evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
+               if (!evt)
+                       return SCSI_MLQUEUE_HOST_BUSY;
+
                evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
-       } else
+       } else {
                evt = ibmvfc_get_event(&vhost->crq);
+               if (!evt)
+                       return SCSI_MLQUEUE_HOST_BUSY;
+       }
 
        ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
        evt->cmnd = cmnd;
@@ -2037,7 +2072,12 @@ static int ibmvfc_bsg_timeout(struct bsg_job *job)
        }
 
        vhost->aborting_passthru = 1;
-       evt = ibmvfc_get_event(&vhost->crq);
+       evt = ibmvfc_get_reserved_event(&vhost->crq);
+       if (!evt) {
+               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+               return -ENOMEM;
+       }
+
        ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
 
        tmf = &evt->iu.tmf;
@@ -2095,7 +2135,11 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
        if (unlikely((rc = ibmvfc_host_chkready(vhost))))
                goto unlock_out;
 
-       evt = ibmvfc_get_event(&vhost->crq);
+       evt = ibmvfc_get_reserved_event(&vhost->crq);
+       if (!evt) {
+               rc = -ENOMEM;
+               goto unlock_out;
+       }
        ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
        plogi = &evt->iu.plogi;
        memset(plogi, 0, sizeof(*plogi));
@@ -2213,7 +2257,12 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
                goto out;
        }
 
-       evt = ibmvfc_get_event(&vhost->crq);
+       evt = ibmvfc_get_reserved_event(&vhost->crq);
+       if (!evt) {
+               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+               rc = -ENOMEM;
+               goto out;
+       }
        ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
        mad = &evt->iu.passthru;
 
@@ -2302,6 +2351,11 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
                else
                        evt = ibmvfc_get_event(&vhost->crq);
 
+               if (!evt) {
+                       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+                       return -ENOMEM;
+               }
+
                ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
                tmf = ibmvfc_init_vfc_cmd(evt, sdev);
                iu = ibmvfc_get_fcp_iu(vhost, tmf);
@@ -2504,7 +2558,9 @@ static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
        struct ibmvfc_event *evt;
        struct ibmvfc_tmf *tmf;
 
-       evt = ibmvfc_get_event(queue);
+       evt = ibmvfc_get_reserved_event(queue);
+       if (!evt)
+               return NULL;
        ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
 
        tmf = &evt->iu.tmf;
@@ -2561,6 +2617,11 @@ static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
 
                if (found_evt && vhost->logged_in) {
                        evt = ibmvfc_init_tmf(&queues[i], sdev, type);
+                       if (!evt) {
+                               spin_unlock(queues[i].q_lock);
+                               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+                               return -ENOMEM;
+                       }
                        evt->sync_iu = &queues[i].cancel_rsp;
                        ibmvfc_send_event(evt, vhost, default_timeout);
                        list_add_tail(&evt->cancel, &cancelq);
@@ -2774,6 +2835,10 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
 
        if (vhost->state == IBMVFC_ACTIVE) {
                evt = ibmvfc_get_event(&vhost->crq);
+               if (!evt) {
+                       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+                       return -ENOMEM;
+               }
                ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
                tmf = ibmvfc_init_vfc_cmd(evt, sdev);
                iu = ibmvfc_get_fcp_iu(vhost, tmf);
@@ -2930,18 +2995,6 @@ static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
        *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
 }
 
-/**
- * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
- * @sdev:      scsi device struct
- * @data:      return code
- *
- **/
-static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
-{
-       unsigned long *rc = data;
-       *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
-}
-
 /**
  * ibmvfc_eh_target_reset_handler - Reset the target
  * @cmd:       scsi command struct
@@ -2951,22 +3004,38 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
  **/
 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
 {
-       struct scsi_device *sdev = cmd->device;
-       struct ibmvfc_host *vhost = shost_priv(sdev->host);
-       struct scsi_target *starget = scsi_target(sdev);
+       struct scsi_target *starget = scsi_target(cmd->device);
+       struct fc_rport *rport = starget_to_rport(starget);
+       struct Scsi_Host *shost = rport_to_shost(rport);
+       struct ibmvfc_host *vhost = shost_priv(shost);
        int block_rc;
        int reset_rc = 0;
        int rc = FAILED;
        unsigned long cancel_rc = 0;
+       bool tgt_reset = false;
 
        ENTER;
-       block_rc = fc_block_scsi_eh(cmd);
+       block_rc = fc_block_rport(rport);
        ibmvfc_wait_while_resetting(vhost);
        if (block_rc != FAST_IO_FAIL) {
-               starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
-               reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+               struct scsi_device *sdev;
+
+               shost_for_each_device(sdev, shost) {
+                       if ((sdev->channel != starget->channel) ||
+                           (sdev->id != starget->id))
+                               continue;
+
+                       cancel_rc |= ibmvfc_cancel_all(sdev,
+                                                      IBMVFC_TMF_TGT_RESET);
+                       if (!tgt_reset) {
+                               reset_rc = ibmvfc_reset_device(sdev,
+                                       IBMVFC_TARGET_RESET, "target");
+                               tgt_reset = true;
+                       }
+               }
        } else
-               starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
+               starget_for_each_device(starget, &cancel_rc,
+                                       ibmvfc_dev_cancel_all_noreset);
 
        if (!cancel_rc && !reset_rc)
                rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
@@ -3513,11 +3582,12 @@ static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
 {
        struct Scsi_Host *shost = class_to_shost(dev);
        struct ibmvfc_host *vhost = shost_priv(shost);
+       struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
        unsigned long flags = 0;
        int len;
 
        spin_lock_irqsave(shost->host_lock, flags);
-       len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->client_scsi_channels);
+       len = snprintf(buf, PAGE_SIZE, "%d\n", scsi->desired_queues);
        spin_unlock_irqrestore(shost->host_lock, flags);
        return len;
 }
@@ -3528,12 +3598,13 @@ static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
 {
        struct Scsi_Host *shost = class_to_shost(dev);
        struct ibmvfc_host *vhost = shost_priv(shost);
+       struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
        unsigned long flags = 0;
        unsigned int channels;
 
        spin_lock_irqsave(shost->host_lock, flags);
        channels = simple_strtoul(buf, NULL, 10);
-       vhost->client_scsi_channels = min(channels, nr_scsi_hw_queues);
+       scsi->desired_queues = min(channels, shost->nr_hw_queues);
        ibmvfc_hard_reset_host(vhost);
        spin_unlock_irqrestore(shost->host_lock, flags);
        return strlen(buf);
@@ -3633,7 +3704,6 @@ static const struct scsi_host_template driver_template = {
        .max_sectors = IBMVFC_MAX_SECTORS,
        .shost_groups = ibmvfc_host_groups,
        .track_queue_depth = 1,
-       .host_tagset = 1,
 };
 
 /**
@@ -3869,7 +3939,7 @@ static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
        }
 }
 
-static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance)
+static irqreturn_t ibmvfc_interrupt_mq(int irq, void *scrq_instance)
 {
        struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
 
@@ -4031,7 +4101,13 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
                return;
 
        kref_get(&tgt->kref);
-       evt = ibmvfc_get_event(&vhost->crq);
+       evt = ibmvfc_get_reserved_event(&vhost->crq);
+       if (!evt) {
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
+               __ibmvfc_reset_host(vhost);
+               return;
+       }
        vhost->discovery_threads++;
        ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
        evt->tgt = tgt;
@@ -4138,7 +4214,13 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
 
        kref_get(&tgt->kref);
        tgt->logo_rcvd = 0;
-       evt = ibmvfc_get_event(&vhost->crq);
+       evt = ibmvfc_get_reserved_event(&vhost->crq);
+       if (!evt) {
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
+               __ibmvfc_reset_host(vhost);
+               return;
+       }
        vhost->discovery_threads++;
        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
        ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
@@ -4214,7 +4296,9 @@ static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_t
        struct ibmvfc_event *evt;
 
        kref_get(&tgt->kref);
-       evt = ibmvfc_get_event(&vhost->crq);
+       evt = ibmvfc_get_reserved_event(&vhost->crq);
+       if (!evt)
+               return NULL;
        ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
        evt->tgt = tgt;
        mad = &evt->iu.implicit_logout;
@@ -4242,6 +4326,13 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
        vhost->discovery_threads++;
        evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
                                                   ibmvfc_tgt_implicit_logout_done);
+       if (!evt) {
+               vhost->discovery_threads--;
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
+               __ibmvfc_reset_host(vhost);
+               return;
+       }
 
        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
        if (ibmvfc_send_event(evt, vhost, default_timeout)) {
@@ -4380,7 +4471,13 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
                return;
 
        kref_get(&tgt->kref);
-       evt = ibmvfc_get_event(&vhost->crq);
+       evt = ibmvfc_get_reserved_event(&vhost->crq);
+       if (!evt) {
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
+               __ibmvfc_reset_host(vhost);
+               return;
+       }
        vhost->discovery_threads++;
        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
        ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
@@ -4546,7 +4643,15 @@ static void ibmvfc_adisc_timeout(struct timer_list *t)
 
        vhost->abort_threads++;
        kref_get(&tgt->kref);
-       evt = ibmvfc_get_event(&vhost->crq);
+       evt = ibmvfc_get_reserved_event(&vhost->crq);
+       if (!evt) {
+               tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
+               vhost->abort_threads--;
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
+               __ibmvfc_reset_host(vhost);
+               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+               return;
+       }
        ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
 
        evt->tgt = tgt;
@@ -4596,7 +4701,13 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
                return;
 
        kref_get(&tgt->kref);
-       evt = ibmvfc_get_event(&vhost->crq);
+       evt = ibmvfc_get_reserved_event(&vhost->crq);
+       if (!evt) {
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
+               __ibmvfc_reset_host(vhost);
+               return;
+       }
        vhost->discovery_threads++;
        ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
        evt->tgt = tgt;
@@ -4699,7 +4810,13 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
                return;
 
        kref_get(&tgt->kref);
-       evt = ibmvfc_get_event(&vhost->crq);
+       evt = ibmvfc_get_reserved_event(&vhost->crq);
+       if (!evt) {
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
+               __ibmvfc_reset_host(vhost);
+               return;
+       }
        vhost->discovery_threads++;
        evt->tgt = tgt;
        ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
@@ -4822,7 +4939,7 @@ static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
        int i, rc;
 
        for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
-               rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
+               rc = ibmvfc_alloc_target(vhost, &vhost->scsi_scrqs.disc_buf[i]);
 
        return rc;
 }
@@ -4871,7 +4988,14 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
 {
        struct ibmvfc_discover_targets *mad;
-       struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+       struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
+       int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+       if (!evt) {
+               ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n");
+               ibmvfc_hard_reset_host(vhost);
+               return;
+       }
 
        ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
        mad = &evt->iu.discover_targets;
@@ -4879,9 +5003,9 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
        mad->common.version = cpu_to_be32(1);
        mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
        mad->common.length = cpu_to_be16(sizeof(*mad));
-       mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
-       mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
-       mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
+       mad->bufflen = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz);
+       mad->buffer.va = cpu_to_be64(vhost->scsi_scrqs.disc_buf_dma);
+       mad->buffer.len = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz);
        mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
        ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
 
@@ -4895,7 +5019,7 @@ static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
 {
        struct ibmvfc_host *vhost = evt->vhost;
        struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
-       struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
+       struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
        u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
        int level = IBMVFC_DEFAULT_LOG_LEVEL;
        int flags, active_queues, i;
@@ -4945,12 +5069,19 @@ static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
 {
        struct ibmvfc_channel_setup_mad *mad;
        struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
-       struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
-       struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
+       struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
+       struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
        unsigned int num_channels =
-               min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
+               min(scrqs->desired_queues, vhost->max_vios_scsi_channels);
+       int level = IBMVFC_DEFAULT_LOG_LEVEL;
        int i;
 
+       if (!evt) {
+               ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n");
+               ibmvfc_hard_reset_host(vhost);
+               return;
+       }
+
        memset(setup_buf, 0, sizeof(*setup_buf));
        if (num_channels == 0)
                setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
@@ -5011,7 +5142,14 @@ static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
 static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
 {
        struct ibmvfc_channel_enquiry *mad;
-       struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+       struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
+       int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+       if (!evt) {
+               ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n");
+               ibmvfc_hard_reset_host(vhost);
+               return;
+       }
 
        ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
        mad = &evt->iu.channel_enquiry;
@@ -5132,7 +5270,13 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
 {
        struct ibmvfc_npiv_login_mad *mad;
-       struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+       struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
+
+       if (!evt) {
+               ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
+               ibmvfc_hard_reset_host(vhost);
+               return;
+       }
 
        ibmvfc_gather_partition_info(vhost);
        ibmvfc_set_login_info(vhost);
@@ -5197,7 +5341,13 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
        struct ibmvfc_npiv_logout_mad *mad;
        struct ibmvfc_event *evt;
 
-       evt = ibmvfc_get_event(&vhost->crq);
+       evt = ibmvfc_get_reserved_event(&vhost->crq);
+       if (!evt) {
+               ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
+               ibmvfc_hard_reset_host(vhost);
+               return;
+       }
+
        ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
 
        mad = &evt->iu.npiv_logout;
@@ -5645,7 +5795,6 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
 {
        struct device *dev = vhost->dev;
        size_t fmt_size;
-       unsigned int pool_size = 0;
 
        ENTER;
        spin_lock_init(&queue->_lock);
@@ -5654,7 +5803,9 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
        switch (fmt) {
        case IBMVFC_CRQ_FMT:
                fmt_size = sizeof(*queue->msgs.crq);
-               pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
+               queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
+               queue->evt_depth = scsi_qdepth;
+               queue->reserved_depth = IBMVFC_NUM_INTERNAL_REQ;
                break;
        case IBMVFC_ASYNC_FMT:
                fmt_size = sizeof(*queue->msgs.async);
@@ -5662,14 +5813,17 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
        case IBMVFC_SUB_CRQ_FMT:
                fmt_size = sizeof(*queue->msgs.scrq);
                /* We need one extra event for Cancel Commands */
-               pool_size = max_requests + 1;
+               queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ;
+               queue->evt_depth = scsi_qdepth;
+               queue->reserved_depth = IBMVFC_NUM_INTERNAL_SUBQ_REQ;
                break;
        default:
                dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
                return -EINVAL;
        }
 
-       if (ibmvfc_init_event_pool(vhost, queue, pool_size)) {
+       queue->fmt = fmt;
+       if (ibmvfc_init_event_pool(vhost, queue)) {
                dev_err(dev, "Couldn't initialize event pool.\n");
                return -ENOMEM;
        }
@@ -5688,7 +5842,6 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
        }
 
        queue->cur = 0;
-       queue->fmt = fmt;
        queue->size = PAGE_SIZE / fmt_size;
 
        queue->vhost = vhost;
@@ -5757,12 +5910,13 @@ reg_crq_failed:
        return retrc;
 }
 
-static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
-                                 int index)
+static int ibmvfc_register_channel(struct ibmvfc_host *vhost,
+                                  struct ibmvfc_channels *channels,
+                                  int index)
 {
        struct device *dev = vhost->dev;
        struct vio_dev *vdev = to_vio_dev(dev);
-       struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
+       struct ibmvfc_queue *scrq = &channels->scrqs[index];
        int rc = -ENOMEM;
 
        ENTER;
@@ -5786,9 +5940,24 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
                goto irq_failed;
        }
 
-       snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
-                vdev->unit_address, index);
-       rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq);
+       switch (channels->protocol) {
+       case IBMVFC_PROTO_SCSI:
+               snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
+                        vdev->unit_address, index);
+               scrq->handler = ibmvfc_interrupt_mq;
+               break;
+       case IBMVFC_PROTO_NVME:
+               snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-nvmf%d",
+                        vdev->unit_address, index);
+               scrq->handler = ibmvfc_interrupt_mq;
+               break;
+       default:
+               dev_err(dev, "Unknown channel protocol (%d)\n",
+                       channels->protocol);
+               goto irq_failed;
+       }
+
+       rc = request_irq(scrq->irq, scrq->handler, 0, scrq->name, scrq);
 
        if (rc) {
                dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
@@ -5804,17 +5973,19 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
 irq_failed:
        do {
                rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
-       } while (rtas_busy_delay(rc));
+       } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 reg_failed:
        LEAVE;
        return rc;
 }
 
-static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
+static void ibmvfc_deregister_channel(struct ibmvfc_host *vhost,
+                                     struct ibmvfc_channels *channels,
+                                     int index)
 {
        struct device *dev = vhost->dev;
        struct vio_dev *vdev = to_vio_dev(dev);
-       struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
+       struct ibmvfc_queue *scrq = &channels->scrqs[index];
        long rc;
 
        ENTER;
@@ -5838,18 +6009,19 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
        LEAVE;
 }
 
-static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost,
+                               struct ibmvfc_channels *channels)
 {
        int i, j;
 
        ENTER;
-       if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
+       if (!vhost->mq_enabled || !channels->scrqs)
                return;
 
-       for (i = 0; i < nr_scsi_hw_queues; i++) {
-               if (ibmvfc_register_scsi_channel(vhost, i)) {
+       for (i = 0; i < channels->max_queues; i++) {
+               if (ibmvfc_register_channel(vhost, channels, i)) {
                        for (j = i; j > 0; j--)
-                               ibmvfc_deregister_scsi_channel(vhost, j - 1);
+                               ibmvfc_deregister_channel(vhost, channels, j - 1);
                        vhost->do_enquiry = 0;
                        return;
                }
@@ -5858,80 +6030,105 @@ static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
        LEAVE;
 }
 
-static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost)
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost,
+                                 struct ibmvfc_channels *channels)
 {
        int i;
 
        ENTER;
-       if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
+       if (!vhost->mq_enabled || !channels->scrqs)
                return;
 
-       for (i = 0; i < nr_scsi_hw_queues; i++)
-               ibmvfc_deregister_scsi_channel(vhost, i);
+       for (i = 0; i < channels->max_queues; i++)
+               ibmvfc_deregister_channel(vhost, channels, i);
 
        LEAVE;
 }
 
-static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
+static int ibmvfc_alloc_channels(struct ibmvfc_host *vhost,
+                                struct ibmvfc_channels *channels)
 {
        struct ibmvfc_queue *scrq;
        int i, j;
+       int rc = 0;
 
+       channels->scrqs = kcalloc(channels->max_queues,
+                                 sizeof(*channels->scrqs),
+                                 GFP_KERNEL);
+       if (!channels->scrqs)
+               return -ENOMEM;
+
+       for (i = 0; i < channels->max_queues; i++) {
+               scrq = &channels->scrqs[i];
+               rc = ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT);
+               if (rc) {
+                       for (j = i; j > 0; j--) {
+                               scrq = &channels->scrqs[j - 1];
+                               ibmvfc_free_queue(vhost, scrq);
+                       }
+                       kfree(channels->scrqs);
+                       channels->scrqs = NULL;
+                       channels->active_queues = 0;
+                       return rc;
+               }
+       }
+
+       return rc;
+}
+
+static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
+{
        ENTER;
        if (!vhost->mq_enabled)
                return;
 
-       vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues,
-                                         sizeof(*vhost->scsi_scrqs.scrqs),
-                                         GFP_KERNEL);
-       if (!vhost->scsi_scrqs.scrqs) {
+       if (ibmvfc_alloc_channels(vhost, &vhost->scsi_scrqs)) {
                vhost->do_enquiry = 0;
+               vhost->mq_enabled = 0;
                return;
        }
 
-       for (i = 0; i < nr_scsi_hw_queues; i++) {
-               scrq = &vhost->scsi_scrqs.scrqs[i];
-               if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) {
-                       for (j = i; j > 0; j--) {
-                               scrq = &vhost->scsi_scrqs.scrqs[j - 1];
-                               ibmvfc_free_queue(vhost, scrq);
-                       }
-                       kfree(vhost->scsi_scrqs.scrqs);
-                       vhost->scsi_scrqs.scrqs = NULL;
-                       vhost->scsi_scrqs.active_queues = 0;
-                       vhost->do_enquiry = 0;
-                       vhost->mq_enabled = 0;
-                       return;
-               }
-       }
-
-       ibmvfc_reg_sub_crqs(vhost);
+       ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
 
        LEAVE;
 }
 
-static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
+static void ibmvfc_release_channels(struct ibmvfc_host *vhost,
+                                   struct ibmvfc_channels *channels)
 {
        struct ibmvfc_queue *scrq;
        int i;
 
+       if (channels->scrqs) {
+               for (i = 0; i < channels->max_queues; i++) {
+                       scrq = &channels->scrqs[i];
+                       ibmvfc_free_queue(vhost, scrq);
+               }
+
+               kfree(channels->scrqs);
+               channels->scrqs = NULL;
+               channels->active_queues = 0;
+       }
+}
+
+static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
+{
        ENTER;
        if (!vhost->scsi_scrqs.scrqs)
                return;
 
-       ibmvfc_dereg_sub_crqs(vhost);
+       ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
 
-       for (i = 0; i < nr_scsi_hw_queues; i++) {
-               scrq = &vhost->scsi_scrqs.scrqs[i];
-               ibmvfc_free_queue(vhost, scrq);
-       }
-
-       kfree(vhost->scsi_scrqs.scrqs);
-       vhost->scsi_scrqs.scrqs = NULL;
-       vhost->scsi_scrqs.active_queues = 0;
+       ibmvfc_release_channels(vhost, &vhost->scsi_scrqs);
        LEAVE;
 }
 
+static void ibmvfc_free_disc_buf(struct device *dev, struct ibmvfc_channels *channels)
+{
+       dma_free_coherent(dev, channels->disc_buf_sz, channels->disc_buf,
+                         channels->disc_buf_dma);
+}
+
 /**
  * ibmvfc_free_mem - Free memory for vhost
  * @vhost:     ibmvfc host struct
@@ -5946,8 +6143,7 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
        ENTER;
        mempool_destroy(vhost->tgt_pool);
        kfree(vhost->trace);
-       dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
-                         vhost->disc_buf_dma);
+       ibmvfc_free_disc_buf(vhost->dev, &vhost->scsi_scrqs);
        dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
                          vhost->login_buf, vhost->login_buf_dma);
        dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
@@ -5957,6 +6153,21 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
        LEAVE;
 }
 
+static int ibmvfc_alloc_disc_buf(struct device *dev, struct ibmvfc_channels *channels)
+{
+       channels->disc_buf_sz = sizeof(*channels->disc_buf) * max_targets;
+       channels->disc_buf = dma_alloc_coherent(dev, channels->disc_buf_sz,
+                                            &channels->disc_buf_dma, GFP_KERNEL);
+
+       if (!channels->disc_buf) {
+               dev_err(dev, "Couldn't allocate %s Discover Targets buffer\n",
+                       (channels->protocol == IBMVFC_PROTO_SCSI) ? "SCSI" : "NVMe");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
 /**
  * ibmvfc_alloc_mem - Allocate memory for vhost
  * @vhost:     ibmvfc host struct
@@ -5992,21 +6203,15 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
                goto free_sg_pool;
        }
 
-       vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
-       vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
-                                            &vhost->disc_buf_dma, GFP_KERNEL);
-
-       if (!vhost->disc_buf) {
-               dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
+       if (ibmvfc_alloc_disc_buf(dev, &vhost->scsi_scrqs))
                goto free_login_buffer;
-       }
 
        vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
                               sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
        atomic_set(&vhost->trace_index, -1);
 
        if (!vhost->trace)
-               goto free_disc_buffer;
+               goto free_scsi_disc_buffer;
 
        vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
                                                      sizeof(struct ibmvfc_target));
@@ -6032,9 +6237,8 @@ free_tgt_pool:
        mempool_destroy(vhost->tgt_pool);
 free_trace:
        kfree(vhost->trace);
-free_disc_buffer:
-       dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
-                         vhost->disc_buf_dma);
+free_scsi_disc_buffer:
+       ibmvfc_free_disc_buf(dev, &vhost->scsi_scrqs);
 free_login_buffer:
        dma_free_coherent(dev, sizeof(*vhost->login_buf),
                          vhost->login_buf, vhost->login_buf_dma);
@@ -6113,7 +6317,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        struct Scsi_Host *shost;
        struct device *dev = &vdev->dev;
        int rc = -ENOMEM;
-       unsigned int max_scsi_queues = IBMVFC_MAX_SCSI_QUEUES;
+       unsigned int online_cpus = num_online_cpus();
+       unsigned int max_scsi_queues = min((unsigned int)IBMVFC_MAX_SCSI_QUEUES, online_cpus);
 
        ENTER;
        shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
@@ -6123,7 +6328,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        }
 
        shost->transportt = ibmvfc_transport_template;
-       shost->can_queue = max_requests;
+       shost->can_queue = scsi_qdepth;
        shost->max_lun = max_lun;
        shost->max_id = max_targets;
        shost->max_sectors = IBMVFC_MAX_SECTORS;
@@ -6142,7 +6347,9 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        vhost->task_set = 1;
 
        vhost->mq_enabled = mq_enabled;
-       vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
+       vhost->scsi_scrqs.desired_queues = min(shost->nr_hw_queues, nr_scsi_channels);
+       vhost->scsi_scrqs.max_queues = shost->nr_hw_queues;
+       vhost->scsi_scrqs.protocol = IBMVFC_PROTO_SCSI;
        vhost->using_channels = 0;
        vhost->do_enquiry = 1;
        vhost->scan_timeout = 0;
@@ -6282,7 +6489,9 @@ static int ibmvfc_resume(struct device *dev)
  */
 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
 {
-       unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
+       unsigned long pool_dma;
+
+       pool_dma = (IBMVFC_MAX_SCSI_QUEUES * scsi_qdepth) * sizeof(union ibmvfc_iu);
        return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
 }
 
index c39a245f43d0254e9e51538fc3cc0ac579e4746a..745ad5ac72517b42f4b83ac6a5a5c22453dfb0ad 100644 (file)
@@ -27,6 +27,7 @@
 #define IBMVFC_ABORT_TIMEOUT           8
 #define IBMVFC_ABORT_WAIT_TIMEOUT      40
 #define IBMVFC_MAX_REQUESTS_DEFAULT    100
+#define IBMVFC_SCSI_QDEPTH             128
 
 #define IBMVFC_DEBUG                   0
 #define IBMVFC_MAX_TARGETS             1024
@@ -57,6 +58,8 @@
  * 2 for each discovery thread
  */
 #define IBMVFC_NUM_INTERNAL_REQ        (1 + 1 + 1 + 2 + (disc_threads * 2))
+/* Reserved suset of events for cancelling channelized IO commands */
+#define IBMVFC_NUM_INTERNAL_SUBQ_REQ 4
 
 #define IBMVFC_MAD_SUCCESS             0x00
 #define IBMVFC_MAD_NOT_SUPPORTED       0xF1
@@ -713,9 +716,15 @@ enum ibmvfc_target_action {
        IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT,
 };
 
+enum ibmvfc_protocol {
+       IBMVFC_PROTO_SCSI = 0,
+       IBMVFC_PROTO_NVME = 1,
+};
+
 struct ibmvfc_target {
        struct list_head queue;
        struct ibmvfc_host *vhost;
+       enum ibmvfc_protocol protocol;
        u64 scsi_id;
        u64 wwpn;
        u64 new_scsi_id;
@@ -758,6 +767,7 @@ struct ibmvfc_event {
        struct completion *eh_comp;
        struct timer_list timer;
        u16 hwq;
+       u8 reserved;
 };
 
 /* a pool of event structs for use */
@@ -793,6 +803,11 @@ struct ibmvfc_queue {
        struct ibmvfc_event_pool evt_pool;
        struct list_head sent;
        struct list_head free;
+       u16 total_depth;
+       u16 evt_depth;
+       u16 reserved_depth;
+       u16 evt_free;
+       u16 reserved_free;
        spinlock_t l_lock;
 
        union ibmvfc_iu cancel_rsp;
@@ -804,11 +819,18 @@ struct ibmvfc_queue {
        unsigned long irq;
        unsigned long hwq_id;
        char name[32];
+       irq_handler_t handler;
 };
 
-struct ibmvfc_scsi_channels {
+struct ibmvfc_channels {
        struct ibmvfc_queue *scrqs;
+       enum ibmvfc_protocol protocol;
        unsigned int active_queues;
+       unsigned int desired_queues;
+       unsigned int max_queues;
+       int disc_buf_sz;
+       struct ibmvfc_discover_targets_entry *disc_buf;
+       dma_addr_t disc_buf_dma;
 };
 
 enum ibmvfc_host_action {
@@ -857,37 +879,33 @@ struct ibmvfc_host {
        mempool_t *tgt_pool;
        struct ibmvfc_queue crq;
        struct ibmvfc_queue async_crq;
-       struct ibmvfc_scsi_channels scsi_scrqs;
+       struct ibmvfc_channels scsi_scrqs;
        struct ibmvfc_npiv_login login_info;
        union ibmvfc_npiv_login_data *login_buf;
        dma_addr_t login_buf_dma;
        struct ibmvfc_channel_setup *channel_setup_buf;
        dma_addr_t channel_setup_dma;
-       int disc_buf_sz;
        int log_level;
-       struct ibmvfc_discover_targets_entry *disc_buf;
        struct mutex passthru_mutex;
-       int max_vios_scsi_channels;
+       unsigned int max_vios_scsi_channels;
        int task_set;
        int init_retries;
        int discovery_threads;
        int abort_threads;
-       int client_migrated;
-       int reinit;
-       int delay_init;
-       int scan_complete;
+       unsigned int client_migrated:1;
+       unsigned int reinit:1;
+       unsigned int delay_init:1;
+       unsigned int logged_in:1;
+       unsigned int mq_enabled:1;
+       unsigned int using_channels:1;
+       unsigned int do_enquiry:1;
+       unsigned int aborting_passthru:1;
+       unsigned int scan_complete:1;
        int scan_timeout;
-       int logged_in;
-       int mq_enabled;
-       int using_channels;
-       int do_enquiry;
-       int client_scsi_channels;
-       int aborting_passthru;
        int events_to_log;
 #define IBMVFC_AE_LINKUP       0x0001
 #define IBMVFC_AE_LINKDOWN     0x0002
 #define IBMVFC_AE_RSCN         0x0004
-       dma_addr_t disc_buf_dma;
        unsigned int partition_number;
        char partition_name[97];
        void (*job_step) (struct ibmvfc_host *);
index 385f812b8793c26ef3321df602b28f6123f401e9..4dc411a58107010dca5ddb73e54ebc231c679631 100644 (file)
@@ -3975,6 +3975,9 @@ static const struct target_core_fabric_ops ibmvscsis_ops = {
        .fabric_drop_tpg                = ibmvscsis_drop_tpg,
 
        .tfc_wwn_attrs                  = ibmvscsis_wwn_attrs,
+
+       .default_submit_type            = TARGET_DIRECT_SUBMIT,
+       .direct_submit_supp             = 1,
 };
 
 static void ibmvscsis_dev_release(struct device *dev) {};
index 07db98161a0365e0158d7a29b9e4f96e4aed2440..180a5ddedb2cdaa0c9f57050618707ed97045337 100644 (file)
@@ -51,10 +51,15 @@ typedef struct {
 } imm_struct;
 
 static void imm_reset_pulse(unsigned int base);
-static int device_check(imm_struct *dev);
+static int device_check(imm_struct *dev, bool autodetect);
 
 #include "imm.h"
 
+static unsigned int mode = IMM_AUTODETECT;
+module_param(mode, uint, 0644);
+MODULE_PARM_DESC(mode, "Transfer mode (0 = Autodetect, 1 = SPP 4-bit, "
+       "2 = SPP 8-bit, 3 = EPP 8-bit, 4 = EPP 16-bit, 5 = EPP 32-bit");
+
 static inline imm_struct *imm_dev(struct Scsi_Host *host)
 {
        return *(imm_struct **)&host->hostdata;
@@ -366,13 +371,10 @@ static int imm_out(imm_struct *dev, char *buffer, int len)
        case IMM_EPP_8:
                epp_reset(ppb);
                w_ctr(ppb, 0x4);
-#ifdef CONFIG_SCSI_IZIP_EPP16
-               if (!(((long) buffer | len) & 0x01))
-                       outsw(ppb + 4, buffer, len >> 1);
-#else
-               if (!(((long) buffer | len) & 0x03))
+               if (dev->mode == IMM_EPP_32 && !(((long) buffer | len) & 0x03))
                        outsl(ppb + 4, buffer, len >> 2);
-#endif
+               else if (dev->mode == IMM_EPP_16 && !(((long) buffer | len) & 0x01))
+                       outsw(ppb + 4, buffer, len >> 1);
                else
                        outsb(ppb + 4, buffer, len);
                w_ctr(ppb, 0xc);
@@ -426,13 +428,10 @@ static int imm_in(imm_struct *dev, char *buffer, int len)
        case IMM_EPP_8:
                epp_reset(ppb);
                w_ctr(ppb, 0x24);
-#ifdef CONFIG_SCSI_IZIP_EPP16
-               if (!(((long) buffer | len) & 0x01))
-                       insw(ppb + 4, buffer, len >> 1);
-#else
-               if (!(((long) buffer | len) & 0x03))
-                       insl(ppb + 4, buffer, len >> 2);
-#endif
+               if (dev->mode == IMM_EPP_32 && !(((long) buffer | len) & 0x03))
+                       insw(ppb + 4, buffer, len >> 2);
+               else if (dev->mode == IMM_EPP_16 && !(((long) buffer | len) & 0x01))
+                       insl(ppb + 4, buffer, len >> 1);
                else
                        insb(ppb + 4, buffer, len);
                w_ctr(ppb, 0x2c);
@@ -589,13 +588,28 @@ static int imm_select(imm_struct *dev, int target)
 
 static int imm_init(imm_struct *dev)
 {
+       bool autodetect = dev->mode == IMM_AUTODETECT;
+
+       if (autodetect) {
+               int modes = dev->dev->port->modes;
+
+               /* Mode detection works up the chain of speed
+                * This avoids a nasty if-then-else-if-... tree
+                */
+               dev->mode = IMM_NIBBLE;
+
+               if (modes & PARPORT_MODE_TRISTATE)
+                       dev->mode = IMM_PS2;
+       }
+
        if (imm_connect(dev, 0) != 1)
                return -EIO;
        imm_reset_pulse(dev->base);
        mdelay(1);      /* Delay to allow devices to settle */
        imm_disconnect(dev);
        mdelay(1);      /* Another delay to allow devices to settle */
-       return device_check(dev);
+
+       return device_check(dev, autodetect);
 }
 
 static inline int imm_send_command(struct scsi_cmnd *cmd)
@@ -1000,7 +1014,7 @@ static int imm_reset(struct scsi_cmnd *cmd)
        return SUCCESS;
 }
 
-static int device_check(imm_struct *dev)
+static int device_check(imm_struct *dev, bool autodetect)
 {
        /* This routine looks for a device and then attempts to use EPP
           to send a command. If all goes as planned then EPP is available. */
@@ -1012,8 +1026,8 @@ static int device_check(imm_struct *dev)
        old_mode = dev->mode;
        for (loop = 0; loop < 8; loop++) {
                /* Attempt to use EPP for Test Unit Ready */
-               if ((ppb & 0x0007) == 0x0000)
-                       dev->mode = IMM_EPP_32;
+               if (autodetect && (ppb & 0x0007) == 0x0000)
+                       dev->mode = IMM_EPP_8;
 
              second_pass:
                imm_connect(dev, CONNECT_EPP_MAYBE);
@@ -1038,7 +1052,7 @@ static int device_check(imm_struct *dev)
                        udelay(1000);
                        imm_disconnect(dev);
                        udelay(1000);
-                       if (dev->mode == IMM_EPP_32) {
+                       if (dev->mode != old_mode) {
                                dev->mode = old_mode;
                                goto second_pass;
                        }
@@ -1063,7 +1077,7 @@ static int device_check(imm_struct *dev)
                        udelay(1000);
                        imm_disconnect(dev);
                        udelay(1000);
-                       if (dev->mode == IMM_EPP_32) {
+                       if (dev->mode != old_mode) {
                                dev->mode = old_mode;
                                goto second_pass;
                        }
@@ -1150,7 +1164,6 @@ static int __imm_attach(struct parport *pb)
        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting);
        DEFINE_WAIT(wait);
        int ports;
-       int modes, ppb;
        int err = -ENOMEM;
        struct pardev_cb imm_cb;
 
@@ -1162,7 +1175,7 @@ static int __imm_attach(struct parport *pb)
 
 
        dev->base = -1;
-       dev->mode = IMM_AUTODETECT;
+       dev->mode = mode < IMM_UNKNOWN ? mode : IMM_AUTODETECT;
        INIT_LIST_HEAD(&dev->list);
 
        temp = find_parent();
@@ -1197,18 +1210,9 @@ static int __imm_attach(struct parport *pb)
        }
        dev->waiting = NULL;
        finish_wait(&waiting, &wait);
-       ppb = dev->base = dev->dev->port->base;
+       dev->base = dev->dev->port->base;
        dev->base_hi = dev->dev->port->base_hi;
-       w_ctr(ppb, 0x0c);
-       modes = dev->dev->port->modes;
-
-       /* Mode detection works up the chain of speed
-        * This avoids a nasty if-then-else-if-... tree
-        */
-       dev->mode = IMM_NIBBLE;
-
-       if (modes & PARPORT_MODE_TRISTATE)
-               dev->mode = IMM_PS2;
+       w_ctr(dev->base, 0x0c);
 
        /* Done configuration */
 
index 411cf94af5b000c39a232ca661af808ba3ea3110..398fa5b15181ce9862b980bc76b86bb0ee8a2d37 100644 (file)
@@ -100,11 +100,7 @@ static char *IMM_MODE_STRING[] =
        [IMM_PS2]        = "PS/2",
        [IMM_EPP_8]      = "EPP 8 bit",
        [IMM_EPP_16]     = "EPP 16 bit",
-#ifdef CONFIG_SCSI_IZIP_EPP16
-       [IMM_EPP_32]     = "EPP 16 bit",
-#else
        [IMM_EPP_32]     = "EPP 32 bit",
-#endif
        [IMM_UNKNOWN]    = "Unknown",
 };
 
index bb206509265e9205ac87a81dbf1f602ccb9f1370..10cf5775a939110e206829c0622dd6c1788685fc 100644 (file)
@@ -835,7 +835,6 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
        int i;
        ips_ha_t *ha;
        ips_scb_t *scb;
-       ips_copp_wait_item_t *item;
 
        METHOD_TRACE("ips_eh_reset", 1);
 
@@ -860,23 +859,6 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
        if (!ha->active)
                return (FAILED);
 
-       /* See if the command is on the copp queue */
-       item = ha->copp_waitlist.head;
-       while ((item) && (item->scsi_cmd != SC))
-               item = item->next;
-
-       if (item) {
-               /* Found it */
-               ips_removeq_copp(&ha->copp_waitlist, item);
-               return (SUCCESS);
-       }
-
-       /* See if the command is on the wait queue */
-       if (ips_removeq_wait(&ha->scb_waitlist, SC)) {
-               /* command not sent yet */
-               return (SUCCESS);
-       }
-
        /* An explanation for the casual observer:                              */
        /* Part of the function of a RAID controller is automatic error         */
        /* detection and recovery.  As such, the only problem that physically   */
index 9c02c9523c4d4d7264098b346a97aa2eb2fee9b0..ab06e9aeb613e7c7d9cf4e45ea7c02b98e7106e8 100644 (file)
@@ -241,6 +241,12 @@ static void fc_lport_ptp_setup(struct fc_lport *lport,
        }
        mutex_lock(&lport->disc.disc_mutex);
        lport->ptp_rdata = fc_rport_create(lport, remote_fid);
+       if (!lport->ptp_rdata) {
+               printk(KERN_WARNING "libfc: Failed to setup lport 0x%x\n",
+                       lport->port_id);
+               mutex_unlock(&lport->disc.disc_mutex);
+               return;
+       }
        kref_get(&lport->ptp_rdata->kref);
        lport->ptp_rdata->ids.port_name = remote_wwpn;
        lport->ptp_rdata->ids.node_name = remote_wwnn;
index ff7b63b10aeb372939d12c95fb4dc1be490c6f1e..8fb7c41c096245077c46b671209623d902fe9c3f 100644 (file)
@@ -275,7 +275,7 @@ static void sas_resume_devices(struct work_struct *work)
  *
  * See comment in sas_discover_sata().
  */
-int sas_discover_end_dev(struct domain_device *dev)
+static int sas_discover_end_dev(struct domain_device *dev)
 {
        return sas_notify_lldd_dev_found(dev);
 }
index 8586dc79f2a0bc12b78971c9bef899add77cdefb..9c8cc723170d16e6fbf701a13a6f23a8f4995094 100644 (file)
@@ -315,8 +315,8 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
 }
 EXPORT_SYMBOL_GPL(sas_phy_reset);
 
-int sas_set_phy_speed(struct sas_phy *phy,
-                     struct sas_phy_linkrates *rates)
+static int sas_set_phy_speed(struct sas_phy *phy,
+                            struct sas_phy_linkrates *rates)
 {
        int ret;
 
index a6dc7dc07fce3799d03cc74cbc1b06a926b28377..3804aef165adb20a75b269e9089060259704e8eb 100644 (file)
@@ -39,6 +39,18 @@ struct sas_phy_data {
        struct sas_work enable_work;
 };
 
+void sas_hash_addr(u8 *hashed, const u8 *sas_addr);
+
+int sas_discover_root_expander(struct domain_device *dev);
+
+int sas_ex_revalidate_domain(struct domain_device *dev);
+void sas_unregister_domain_devices(struct asd_sas_port *port, int gone);
+void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port);
+void sas_discover_event(struct asd_sas_port *port, enum discover_event ev);
+
+void sas_init_dev(struct domain_device *dev);
+void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev);
+
 void sas_scsi_recover_host(struct Scsi_Host *shost);
 
 int  sas_register_phys(struct sas_ha_struct *sas_ha);
index 7f9b221e7c34ae25e663ef30d98075f4f1e83446..ea9b42225e629dd5840e29c8bce622aef7736444 100644 (file)
@@ -6073,7 +6073,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
                                            phba->hba_debugfs_root,
                                            phba,
                                            &lpfc_debugfs_op_multixripools);
-               if (!phba->debug_multixri_pools) {
+               if (IS_ERR(phba->debug_multixri_pools)) {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
                                         "0527 Cannot create debugfs multixripools\n");
                        goto debug_failed;
@@ -6085,7 +6085,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
                        debugfs_create_file(name, S_IFREG | 0644,
                                            phba->hba_debugfs_root,
                                            phba, &lpfc_cgn_buffer_op);
-               if (!phba->debug_cgn_buffer) {
+               if (IS_ERR(phba->debug_cgn_buffer)) {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
                                         "6527 Cannot create debugfs "
                                         "cgn_buffer\n");
@@ -6098,7 +6098,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
                        debugfs_create_file(name, S_IFREG | 0644,
                                            phba->hba_debugfs_root,
                                            phba, &lpfc_rx_monitor_op);
-               if (!phba->debug_rx_monitor) {
+               if (IS_ERR(phba->debug_rx_monitor)) {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
                                         "6528 Cannot create debugfs "
                                         "rx_monitor\n");
@@ -6111,7 +6111,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
                        debugfs_create_file(name, 0644,
                                            phba->hba_debugfs_root,
                                            phba, &lpfc_debugfs_ras_log);
-               if (!phba->debug_ras_log) {
+               if (IS_ERR(phba->debug_ras_log)) {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
                                         "6148 Cannot create debugfs"
                                         " ras_log\n");
@@ -6132,7 +6132,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
                        debugfs_create_file(name, S_IFREG | 0644,
                                            phba->hba_debugfs_root,
                                            phba, &lpfc_debugfs_op_lockstat);
-               if (!phba->debug_lockstat) {
+               if (IS_ERR(phba->debug_lockstat)) {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
                                         "4610 Can't create debugfs lockstat\n");
                        goto debug_failed;
@@ -6358,7 +6358,7 @@ nvmeio_off:
                debugfs_create_file(name, 0644,
                                    vport->vport_debugfs_root,
                                    vport, &lpfc_debugfs_op_scsistat);
-       if (!vport->debug_scsistat) {
+       if (IS_ERR(vport->debug_scsistat)) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
                                 "4611 Cannot create debugfs scsistat\n");
                goto debug_failed;
@@ -6369,7 +6369,7 @@ nvmeio_off:
                debugfs_create_file(name, 0644,
                                    vport->vport_debugfs_root,
                                    vport, &lpfc_debugfs_op_ioktime);
-       if (!vport->debug_ioktime) {
+       if (IS_ERR(vport->debug_ioktime)) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
                                 "0815 Cannot create debugfs ioktime\n");
                goto debug_failed;
index 36de8ccb123821b497b2f2b187634489d953fcca..7ef9841f0728885206e5f0860a5b7fcbcb5a7b24 100644 (file)
@@ -199,11 +199,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
                /* Only 1 thread can drop the initial node reference.  If
                 * another thread has set NLP_DROPPED, this thread is done.
                 */
-               if (!(ndlp->nlp_flag & NLP_DROPPED)) {
+               if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) &&
+                   !(ndlp->nlp_flag & NLP_DROPPED)) {
                        ndlp->nlp_flag |= NLP_DROPPED;
                        spin_unlock_irqrestore(&ndlp->lock, iflags);
                        lpfc_nlp_put(ndlp);
-                       spin_lock_irqsave(&ndlp->lock, iflags);
+                       return;
                }
 
                spin_unlock_irqrestore(&ndlp->lock, iflags);
index 8f750d7d9d95caf14106a40fc38d8c04a23421e8..128fc1bab58653dc4b7f428c0a9ce58f687455d4 100644 (file)
@@ -228,8 +228,7 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
        spin_unlock_irq(&ndlp->lock);
 
        /* On a devloss timeout event, one more put is executed provided the
-        * NVME and SCSI rport unregister requests are complete.  If the vport
-        * is unloading, this extra put is executed by lpfc_drop_node.
+        * NVME and SCSI rport unregister requests are complete.
         */
        if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
                lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
@@ -2569,11 +2568,7 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
  * nvme_transport perspective.  Loss of an rport just means IO cannot
  * be sent and recovery is completely up to the initator.
  * For now, the driver just unbinds the DID and port_role so that
- * no further IO can be issued.  Changes are planned for later.
- *
- * Notes - the ndlp reference count is not decremented here since
- * since there is no nvme_transport api for devloss.  Node ref count
- * is only adjusted in driver unload.
+ * no further IO can be issued.
  */
 void
 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
@@ -2648,6 +2643,21 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                                         "6167 NVME unregister failed %d "
                                         "port_state x%x\n",
                                         ret, remoteport->port_state);
+
+                       if (vport->load_flag & FC_UNLOADING) {
+                               /* Only 1 thread can drop the initial node
+                                * reference. Check if another thread has set
+                                * NLP_DROPPED.
+                                */
+                               spin_lock_irq(&ndlp->lock);
+                               if (!(ndlp->nlp_flag & NLP_DROPPED)) {
+                                       ndlp->nlp_flag |= NLP_DROPPED;
+                                       spin_unlock_irq(&ndlp->lock);
+                                       lpfc_nlp_put(ndlp);
+                                       return;
+                               }
+                               spin_unlock_irq(&ndlp->lock);
+                       }
                }
        }
        return;
index e92f1a73cc9bccdec8ad7af534f44b9a4e5f26f8..329c3da88416a83d053d8f923cfc4f13d6e86b05 100644 (file)
@@ -1898,7 +1898,7 @@ megaraid_reset(struct scsi_cmnd *cmd)
 
        spin_lock_irq(&adapter->lock);
 
-       rval =  megaraid_abort_and_reset(adapter, cmd, SCB_RESET);
+       rval =  megaraid_abort_and_reset(adapter, NULL, SCB_RESET);
 
        /*
         * This is required here to complete any completed requests
@@ -1937,7 +1937,7 @@ megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
 
                scb = list_entry(pos, scb_t, list);
 
-               if (scb->cmd == cmd) { /* Found command */
+               if (!cmd || scb->cmd == cmd) { /* Found command */
 
                        scb->state |= aor;
 
@@ -1956,31 +1956,23 @@ megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
 
                                return FAILED;
                        }
-                       else {
-
-                               /*
-                                * Not yet issued! Remove from the pending
-                                * list
-                                */
-                               dev_warn(&adapter->dev->dev,
-                                       "%s-[%x], driver owner\n",
-                                       (aor==SCB_ABORT) ? "ABORTING":"RESET",
-                                       scb->idx);
-
-                               mega_free_scb(adapter, scb);
-
-                               if( aor == SCB_ABORT ) {
-                                       cmd->result = (DID_ABORT << 16);
-                               }
-                               else {
-                                       cmd->result = (DID_RESET << 16);
-                               }
+                       /*
+                        * Not yet issued! Remove from the pending
+                        * list
+                        */
+                       dev_warn(&adapter->dev->dev,
+                                "%s-[%x], driver owner\n",
+                                (cmd) ? "ABORTING":"RESET",
+                                scb->idx);
+                       mega_free_scb(adapter, scb);
 
+                       if (cmd) {
+                               cmd->result = (DID_ABORT << 16);
                                list_add_tail(SCSI_LIST(cmd),
-                                               &adapter->completed_list);
-
-                               return SUCCESS;
+                                             &adapter->completed_list);
                        }
+
+                       return SUCCESS;
                }
        }
 
@@ -4114,8 +4106,6 @@ static const struct scsi_host_template megaraid_template = {
        .sg_tablesize                   = MAX_SGLIST,
        .cmd_per_lun                    = DEF_CMD_PER_LUN,
        .eh_abort_handler               = megaraid_abort,
-       .eh_device_reset_handler        = megaraid_reset,
-       .eh_bus_reset_handler           = megaraid_reset,
        .eh_host_reset_handler          = megaraid_reset,
        .no_write_same                  = 1,
        .cmd_size                       = sizeof(struct megaraid_cmd_priv),
index 3554f6b07727324ac5a8259be01bd4ecb8db6565..56624cbf7fa5e74e33e531d49af0121868fa93a0 100644 (file)
@@ -23,8 +23,8 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "07.725.01.00-rc1"
-#define MEGASAS_RELDATE                                "Mar 2, 2023"
+#define MEGASAS_VERSION                                "07.727.03.00-rc1"
+#define MEGASAS_RELDATE                                "Oct 03, 2023"
 
 #define MEGASAS_MSIX_NAME_LEN                  32
 
@@ -2332,7 +2332,7 @@ struct megasas_instance {
        u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
        bool use_seqnum_jbod_fp;   /* Added for PD sequence */
        bool smp_affinity_enable;
-       spinlock_t crashdump_lock;
+       struct mutex crashdump_lock;
 
        struct megasas_register_set __iomem *reg_set;
        u32 __iomem *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
index b9d46dcb521094fa9afc66aa09f7f9e8e73a7b4a..3d4f13da1ae873c1efdc573451071bd5693fa864 100644 (file)
@@ -263,13 +263,13 @@ u32 megasas_readl(struct megasas_instance *instance,
         * Fusion registers could intermittently return all zeroes.
         * This behavior is transient in nature and subsequent reads will
         * return valid value. As a workaround in driver, retry readl for
-        * upto three times until a non-zero value is read.
+        * up to thirty times until a non-zero value is read.
         */
        if (instance->adapter_type == AERO_SERIES) {
                do {
                        ret_val = readl(addr);
                        i++;
-               } while (ret_val == 0 && i < 3);
+               } while (ret_val == 0 && i < 30);
                return ret_val;
        } else {
                return readl(addr);
@@ -3271,14 +3271,13 @@ fw_crash_buffer_store(struct device *cdev,
        struct megasas_instance *instance =
                (struct megasas_instance *) shost->hostdata;
        int val = 0;
-       unsigned long flags;
 
        if (kstrtoint(buf, 0, &val) != 0)
                return -EINVAL;
 
-       spin_lock_irqsave(&instance->crashdump_lock, flags);
+       mutex_lock(&instance->crashdump_lock);
        instance->fw_crash_buffer_offset = val;
-       spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+       mutex_unlock(&instance->crashdump_lock);
        return strlen(buf);
 }
 
@@ -3293,24 +3292,23 @@ fw_crash_buffer_show(struct device *cdev,
        unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
        unsigned long chunk_left_bytes;
        unsigned long src_addr;
-       unsigned long flags;
        u32 buff_offset;
 
-       spin_lock_irqsave(&instance->crashdump_lock, flags);
+       mutex_lock(&instance->crashdump_lock);
        buff_offset = instance->fw_crash_buffer_offset;
        if (!instance->crash_dump_buf ||
                !((instance->fw_crash_state == AVAILABLE) ||
                (instance->fw_crash_state == COPYING))) {
                dev_err(&instance->pdev->dev,
                        "Firmware crash dump is not available\n");
-               spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+               mutex_unlock(&instance->crashdump_lock);
                return -EINVAL;
        }
 
        if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
                dev_err(&instance->pdev->dev,
                        "Firmware crash dump offset is out of range\n");
-               spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+               mutex_unlock(&instance->crashdump_lock);
                return 0;
        }
 
@@ -3322,7 +3320,7 @@ fw_crash_buffer_show(struct device *cdev,
        src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
                (buff_offset % dmachunk);
        memcpy(buf, (void *)src_addr, size);
-       spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+       mutex_unlock(&instance->crashdump_lock);
 
        return size;
 }
@@ -3347,7 +3345,6 @@ fw_crash_state_store(struct device *cdev,
        struct megasas_instance *instance =
                (struct megasas_instance *) shost->hostdata;
        int val = 0;
-       unsigned long flags;
 
        if (kstrtoint(buf, 0, &val) != 0)
                return -EINVAL;
@@ -3361,9 +3358,9 @@ fw_crash_state_store(struct device *cdev,
        instance->fw_crash_state = val;
 
        if ((val == COPIED) || (val == COPY_ERROR)) {
-               spin_lock_irqsave(&instance->crashdump_lock, flags);
+               mutex_lock(&instance->crashdump_lock);
                megasas_free_host_crash_buffer(instance);
-               spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+               mutex_unlock(&instance->crashdump_lock);
                if (val == COPY_ERROR)
                        dev_info(&instance->pdev->dev, "application failed to "
                                "copy Firmware crash dump\n");
@@ -7422,7 +7419,7 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
        init_waitqueue_head(&instance->int_cmd_wait_q);
        init_waitqueue_head(&instance->abort_cmd_wait_q);
 
-       spin_lock_init(&instance->crashdump_lock);
+       mutex_init(&instance->crashdump_lock);
        spin_lock_init(&instance->mfi_pool_lock);
        spin_lock_init(&instance->hba_lock);
        spin_lock_init(&instance->stream_lock);
index 8a83f3fc2b865e7c6a430168c86b2f5fb5c5822e..c60014e07b449e2f2f5ab4abfe95bd3f472cf66b 100644 (file)
@@ -4268,6 +4268,9 @@ megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
        }
 
 out:
+       if (!retval && reason == SCSIIO_TIMEOUT_OCR)
+               dev_info(&instance->pdev->dev, "IO is completed, no OCR is required\n");
+
        return retval;
 }
 
index 89ba015c5d7e8d215d256aa92b3a65803b626a86..040031eb0c12d440663b522279fbfaf19309b183 100644 (file)
@@ -4012,20 +4012,45 @@ static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc,
  * mpi3mr_eh_host_reset - Host reset error handling callback
  * @scmd: SCSI command reference
  *
- * Issue controller reset if the scmd is for a Physical Device,
- * if the scmd is for RAID volume, then wait for
- * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any
- * pending I/Os prior to issuing reset to the controller.
+ * Issue controller reset
  *
  * Return: SUCCESS of successful reset else FAILED
  */
 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
+{
+       struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+       int retval = FAILED, ret;
+
+       ret = mpi3mr_soft_reset_handler(mrioc,
+           MPI3MR_RESET_FROM_EH_HOS, 1);
+       if (ret)
+               goto out;
+
+       retval = SUCCESS;
+out:
+       sdev_printk(KERN_INFO, scmd->device,
+           "Host reset is %s for scmd(%p)\n",
+           ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+       return retval;
+}
+
+/**
+ * mpi3mr_eh_bus_reset - Bus reset error handling callback
+ * @scmd: SCSI command reference
+ *
+ * Checks whether pending I/Os are present for the RAID volume;
+ * if not there's no need to reset the adapter.
+ *
+ * Return: SUCCESS of successful reset else FAILED
+ */
+static int mpi3mr_eh_bus_reset(struct scsi_cmnd *scmd)
 {
        struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
        struct mpi3mr_stgt_priv_data *stgt_priv_data;
        struct mpi3mr_sdev_priv_data *sdev_priv_data;
        u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
-       int retval = FAILED, ret;
+       int retval = FAILED;
 
        sdev_priv_data = scmd->device->hostdata;
        if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
@@ -4035,25 +4060,16 @@ static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
 
        if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
                mpi3mr_wait_for_host_io(mrioc,
-                   MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
-               if (!mpi3mr_get_fw_pending_ios(mrioc)) {
+                       MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
+               if (!mpi3mr_get_fw_pending_ios(mrioc))
                        retval = SUCCESS;
-                       goto out;
-               }
        }
+       if (retval == FAILED)
+               mpi3mr_print_pending_host_io(mrioc);
 
-       mpi3mr_print_pending_host_io(mrioc);
-       ret = mpi3mr_soft_reset_handler(mrioc,
-           MPI3MR_RESET_FROM_EH_HOS, 1);
-       if (ret)
-               goto out;
-
-       retval = SUCCESS;
-out:
        sdev_printk(KERN_INFO, scmd->device,
-           "Host reset is %s for scmd(%p)\n",
-           ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
-
+               "Bus reset is %s for scmd(%p)\n",
+               ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
        return retval;
 }
 
@@ -4900,6 +4916,7 @@ static const struct scsi_host_template mpi3mr_driver_template = {
        .change_queue_depth             = mpi3mr_change_queue_depth,
        .eh_device_reset_handler        = mpi3mr_eh_dev_reset,
        .eh_target_reset_handler        = mpi3mr_eh_target_reset,
+       .eh_bus_reset_handler           = mpi3mr_eh_bus_reset,
        .eh_host_reset_handler          = mpi3mr_eh_host_reset,
        .bios_param                     = mpi3mr_bios_param,
        .map_queues                     = mpi3mr_map_queues,
index 33053db5a7134c4ed230ed8fb901956e5a96ab30..dec1e2d380f171c2d61d399bc2de21ac5075dcbc 100644 (file)
@@ -1180,65 +1180,6 @@ void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
        }
 }
 
-#ifndef PM8001_USE_MSIX
-/**
- * pm8001_chip_intx_interrupt_enable - enable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- */
-static void
-pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
-{
-       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
-       pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
-}
-
-/**
- * pm8001_chip_intx_interrupt_disable - disable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- */
-static void
-pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
-{
-       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL);
-}
-
-#else
-
-/**
- * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- * @int_vec_idx: interrupt number to enable
- */
-static void
-pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha,
-       u32 int_vec_idx)
-{
-       u32 msi_index;
-       u32 value;
-       msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
-       msi_index += MSIX_TABLE_BASE;
-       pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_ENABLE);
-       value = (1 << int_vec_idx);
-       pm8001_cw32(pm8001_ha, 0,  MSGU_ODCR, value);
-
-}
-
-/**
- * pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- * @int_vec_idx: interrupt number to disable
- */
-static void
-pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
-       u32 int_vec_idx)
-{
-       u32 msi_index;
-       msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
-       msi_index += MSIX_TABLE_BASE;
-       pm8001_cw32(pm8001_ha, 0,  msi_index, MSIX_INTERRUPT_DISABLE);
-}
-#endif
-
 /**
  * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
  * @pm8001_ha: our hba card information
@@ -1247,11 +1188,14 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
 static void
 pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
-#ifdef PM8001_USE_MSIX
-       pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
-#else
-       pm8001_chip_intx_interrupt_enable(pm8001_ha);
-#endif
+       if (pm8001_ha->use_msix) {
+               pm8001_cw32(pm8001_ha, 0, MSIX_TABLE_BASE,
+                           MSIX_INTERRUPT_ENABLE);
+               pm8001_cw32(pm8001_ha, 0,  MSGU_ODCR, 1);
+       } else {
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+       }
 }
 
 /**
@@ -1262,11 +1206,11 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 static void
 pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
-#ifdef PM8001_USE_MSIX
-       pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
-#else
-       pm8001_chip_intx_interrupt_disable(pm8001_ha);
-#endif
+       if (pm8001_ha->use_msix)
+               pm8001_cw32(pm8001_ha, 0, MSIX_TABLE_BASE,
+                           MSIX_INTERRUPT_DISABLE);
+       else
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL);
 }
 
 /**
@@ -4180,7 +4124,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
        payload.sas_identify.dev_type = SAS_END_DEVICE;
        payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
        memcpy(payload.sas_identify.sas_addr,
-               pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+               &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
        payload.sas_identify.phy_id = phy_id;
 
        return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
@@ -4309,16 +4253,15 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
 
 static u32 pm8001_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
 {
-#ifdef PM8001_USE_MSIX
-       return 1;
-#else
        u32 value;
 
+       if (pm8001_ha->use_msix)
+               return 1;
+
        value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
        if (value)
                return 1;
        return 0;
-#endif
 }
 
 /**
index 5e5ce1e74c3b0567d411b79d7fd30d5ae222e3c9..ed6b7d954dda879a1d070586cdbe90ea2653ee87 100644 (file)
@@ -56,6 +56,18 @@ MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
                " 4: Link rate 6.0G\n"
                " 8: Link rate 12.0G\n");
 
+bool pm8001_use_msix = true;
+module_param_named(use_msix, pm8001_use_msix, bool, 0444);
+MODULE_PARM_DESC(zoned, "Use MSIX interrupts. Default: true");
+
+static bool pm8001_use_tasklet = true;
+module_param_named(use_tasklet, pm8001_use_tasklet, bool, 0444);
+MODULE_PARM_DESC(zoned, "Use MSIX interrupts. Default: true");
+
+static bool pm8001_read_wwn = true;
+module_param_named(read_wwn, pm8001_read_wwn, bool, 0444);
+MODULE_PARM_DESC(zoned, "Get WWN from the controller. Default: true");
+
 static struct scsi_transport_template *pm8001_stt;
 static int pm8001_init_ccb_tag(struct pm8001_hba_info *);
 
@@ -200,8 +212,6 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
        kfree(pm8001_ha);
 }
 
-#ifdef PM8001_USE_TASKLET
-
 /**
  * pm8001_tasklet() - tasklet for 64 msi-x interrupt handler
  * @opaque: the passed general host adapter struct
@@ -209,16 +219,67 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
  */
 static void pm8001_tasklet(unsigned long opaque)
 {
-       struct pm8001_hba_info *pm8001_ha;
-       struct isr_param *irq_vector;
+       struct isr_param *irq_vector = (struct isr_param *)opaque;
+       struct pm8001_hba_info *pm8001_ha = irq_vector->drv_inst;
+
+       if (WARN_ON_ONCE(!pm8001_ha))
+               return;
 
-       irq_vector = (struct isr_param *)opaque;
-       pm8001_ha = irq_vector->drv_inst;
-       if (unlikely(!pm8001_ha))
-               BUG_ON(1);
        PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
 }
-#endif
+
+static void pm8001_init_tasklet(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+
+       if (!pm8001_use_tasklet)
+               return;
+
+       /*  Tasklet for non msi-x interrupt handler */
+       if ((!pm8001_ha->pdev->msix_cap || !pci_msi_enabled()) ||
+           (pm8001_ha->chip_id == chip_8001)) {
+               tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
+                            (unsigned long)&(pm8001_ha->irq_vector[0]));
+               return;
+       }
+       for (i = 0; i < PM8001_MAX_MSIX_VEC; i++)
+               tasklet_init(&pm8001_ha->tasklet[i], pm8001_tasklet,
+                            (unsigned long)&(pm8001_ha->irq_vector[i]));
+}
+
+static void pm8001_kill_tasklet(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+
+       if (!pm8001_use_tasklet)
+               return;
+
+       /* For non-msix and msix interrupts */
+       if ((!pm8001_ha->pdev->msix_cap || !pci_msi_enabled()) ||
+           (pm8001_ha->chip_id == chip_8001)) {
+               tasklet_kill(&pm8001_ha->tasklet[0]);
+               return;
+       }
+
+       for (i = 0; i < PM8001_MAX_MSIX_VEC; i++)
+               tasklet_kill(&pm8001_ha->tasklet[i]);
+}
+
+static irqreturn_t pm8001_handle_irq(struct pm8001_hba_info *pm8001_ha,
+                                    int irq)
+{
+       if (unlikely(!pm8001_ha))
+               return IRQ_NONE;
+
+       if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
+               return IRQ_NONE;
+
+       if (!pm8001_use_tasklet)
+               return PM8001_CHIP_DISP->isr(pm8001_ha, irq);
+
+       tasklet_schedule(&pm8001_ha->tasklet[irq]);
+       return IRQ_HANDLED;
+}
 
 /**
  * pm8001_interrupt_handler_msix - main MSIX interrupt handler.
@@ -230,22 +291,10 @@ static void pm8001_tasklet(unsigned long opaque)
  */
 static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
 {
-       struct isr_param *irq_vector;
-       struct pm8001_hba_info *pm8001_ha;
-       irqreturn_t ret = IRQ_HANDLED;
-       irq_vector = (struct isr_param *)opaque;
-       pm8001_ha = irq_vector->drv_inst;
+       struct isr_param *irq_vector = (struct isr_param *)opaque;
+       struct pm8001_hba_info *pm8001_ha = irq_vector->drv_inst;
 
-       if (unlikely(!pm8001_ha))
-               return IRQ_NONE;
-       if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
-               return IRQ_NONE;
-#ifdef PM8001_USE_TASKLET
-       tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]);
-#else
-       ret = PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);
-#endif
-       return ret;
+       return pm8001_handle_irq(pm8001_ha, irq_vector->irq_id);
 }
 
 /**
@@ -256,25 +305,14 @@ static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
 
 static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
 {
-       struct pm8001_hba_info *pm8001_ha;
-       irqreturn_t ret = IRQ_HANDLED;
        struct sas_ha_struct *sha = dev_id;
-       pm8001_ha = sha->lldd_ha;
-       if (unlikely(!pm8001_ha))
-               return IRQ_NONE;
-       if (!PM8001_CHIP_DISP->is_our_interrupt(pm8001_ha))
-               return IRQ_NONE;
+       struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-#ifdef PM8001_USE_TASKLET
-       tasklet_schedule(&pm8001_ha->tasklet[0]);
-#else
-       ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
-#endif
-       return ret;
+       return pm8001_handle_irq(pm8001_ha, 0);
 }
 
-static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha);
 static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha);
+static void pm8001_free_irq(struct pm8001_hba_info *pm8001_ha);
 
 /**
  * pm8001_alloc - initiate our hba structure and 6 DMAs area.
@@ -294,13 +332,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
        pm8001_dbg(pm8001_ha, INIT, "pm8001_alloc: PHY:%x\n",
                   pm8001_ha->chip->n_phy);
 
-       /* Setup Interrupt */
-       rc = pm8001_setup_irq(pm8001_ha);
-       if (rc) {
-               pm8001_dbg(pm8001_ha, FAIL,
-                          "pm8001_setup_irq failed [ret: %d]\n", rc);
-               goto err_out;
-       }
        /* Request Interrupt */
        rc = pm8001_request_irq(pm8001_ha);
        if (rc)
@@ -519,7 +550,6 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
 {
        struct pm8001_hba_info *pm8001_ha;
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
-       int j;
 
        pm8001_ha = sha->lldd_ha;
        if (!pm8001_ha)
@@ -550,17 +580,8 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
        else
                pm8001_ha->iomb_size = IOMB_SIZE_SPC;
 
-#ifdef PM8001_USE_TASKLET
-       /* Tasklet for non msi-x interrupt handler */
-       if ((!pdev->msix_cap || !pci_msi_enabled())
-           || (pm8001_ha->chip_id == chip_8001))
-               tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
-                       (unsigned long)&(pm8001_ha->irq_vector[0]));
-       else
-               for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
-                       tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
-                               (unsigned long)&(pm8001_ha->irq_vector[j]));
-#endif
+       pm8001_init_tasklet(pm8001_ha);
+
        if (pm8001_ioremap(pm8001_ha))
                goto failed_pci_alloc;
        if (!pm8001_alloc(pm8001_ha, ent))
@@ -666,19 +687,30 @@ static void  pm8001_post_sas_ha_init(struct Scsi_Host *shost,
  */
 static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
 {
-       u8 i, j;
-       u8 sas_add[8];
-#ifdef PM8001_READ_VPD
-       /* For new SPC controllers WWN is stored in flash vpd
-       *  For SPC/SPCve controllers WWN is stored in EEPROM
-       *  For Older SPC WWN is stored in NVMD
-       */
        DECLARE_COMPLETION_ONSTACK(completion);
        struct pm8001_ioctl_payload payload;
+       unsigned long time_remaining;
+       u8 sas_add[8];
        u16 deviceid;
        int rc;
-       unsigned long time_remaining;
+       u8 i, j;
+
+       if (!pm8001_read_wwn) {
+               __be64 dev_sas_addr = cpu_to_be64(0x50010c600047f9d0ULL);
+
+               for (i = 0; i < pm8001_ha->chip->n_phy; i++)
+                       memcpy(&pm8001_ha->phy[i].dev_sas_addr, &dev_sas_addr,
+                              SAS_ADDR_SIZE);
+               memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
+                      SAS_ADDR_SIZE);
+               return 0;
+       }
 
+       /*
+        * For new SPC controllers WWN is stored in flash vpd. For SPC/SPCve
+        * controllers WWN is stored in EEPROM. And for Older SPC WWN is stored
+        * in NVMD.
+        */
        if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
                pm8001_dbg(pm8001_ha, FAIL, "controller is in fatal error state\n");
                return -EIO;
@@ -752,16 +784,7 @@ static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
                           pm8001_ha->phy[i].dev_sas_addr);
        }
        kfree(payload.func_specific);
-#else
-       for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
-               pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
-               pm8001_ha->phy[i].dev_sas_addr =
-                       cpu_to_be64((u64)
-                               (*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
-       }
-       memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
-               SAS_ADDR_SIZE);
-#endif
+
        return 0;
 }
 
@@ -771,13 +794,13 @@ static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
  */
 static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
 {
-
-#ifdef PM8001_READ_VPD
-       /*OPTION ROM FLASH read for the SPC cards */
        DECLARE_COMPLETION_ONSTACK(completion);
        struct pm8001_ioctl_payload payload;
        int rc;
 
+       if (!pm8001_read_wwn)
+               return 0;
+
        pm8001_ha->nvmd_completion = &completion;
        /* SAS ADDRESS read from flash / EEPROM */
        payload.minor_function = 6;
@@ -796,7 +819,7 @@ static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
        wait_for_completion(&completion);
        pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
        kfree(payload.func_specific);
-#endif
+
        return 0;
 }
 
@@ -947,7 +970,6 @@ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
        }
 }
 
-#ifdef PM8001_USE_MSIX
 /**
  * pm8001_setup_msix - enable MSI-X interrupt
  * @pm8001_ha: our ha struct.
@@ -1029,21 +1051,6 @@ static u32 pm8001_request_msix(struct pm8001_hba_info *pm8001_ha)
 
        return rc;
 }
-#endif
-
-static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha)
-{
-       struct pci_dev *pdev;
-
-       pdev = pm8001_ha->pdev;
-
-#ifdef PM8001_USE_MSIX
-       if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
-               return pm8001_setup_msix(pm8001_ha);
-       pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
-#endif
-       return 0;
-}
 
 /**
  * pm8001_request_irq - register interrupt
@@ -1051,27 +1058,59 @@ static u32 pm8001_setup_irq(struct pm8001_hba_info *pm8001_ha)
  */
 static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
 {
-       struct pci_dev *pdev;
+       struct pci_dev *pdev = pm8001_ha->pdev;
        int rc;
 
-       pdev = pm8001_ha->pdev;
+       if (pm8001_use_msix && pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
+               rc = pm8001_setup_msix(pm8001_ha);
+               if (rc) {
+                       pm8001_dbg(pm8001_ha, FAIL,
+                                  "pm8001_setup_irq failed [ret: %d]\n", rc);
+                       return rc;
+               }
 
-#ifdef PM8001_USE_MSIX
-       if (pdev->msix_cap && pci_msi_enabled())
-               return pm8001_request_msix(pm8001_ha);
-       else {
-               pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
-               goto intx;
+               if (!pdev->msix_cap || !pci_msi_enabled())
+                       goto use_intx;
+
+               rc = pm8001_request_msix(pm8001_ha);
+               if (rc)
+                       return rc;
+
+               pm8001_ha->use_msix = true;
+
+               return 0;
        }
-#endif
 
-intx:
-       /* initialize the INT-X interrupt */
+use_intx:
+       /* Initialize the INT-X interrupt */
+       pm8001_dbg(pm8001_ha, INIT, "MSIX not supported!!!\n");
+       pm8001_ha->use_msix = false;
        pm8001_ha->irq_vector[0].irq_id = 0;
        pm8001_ha->irq_vector[0].drv_inst = pm8001_ha;
-       rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
-               pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost));
-       return rc;
+
+       return request_irq(pdev->irq, pm8001_interrupt_handler_intx,
+                          IRQF_SHARED, pm8001_ha->name,
+                          SHOST_TO_SAS_HA(pm8001_ha->shost));
+}
+
+static void pm8001_free_irq(struct pm8001_hba_info *pm8001_ha)
+{
+       struct pci_dev *pdev = pm8001_ha->pdev;
+       int i;
+
+       if (pm8001_ha->use_msix) {
+               for (i = 0; i < pm8001_ha->number_of_intr; i++)
+                       synchronize_irq(pci_irq_vector(pdev, i));
+
+               for (i = 0; i < pm8001_ha->number_of_intr; i++)
+                       free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
+
+               pci_free_irq_vectors(pdev);
+               return;
+       }
+
+       /* INT-X */
+       free_irq(pm8001_ha->irq, pm8001_ha->sas);
 }
 
 /**
@@ -1269,33 +1308,17 @@ err_out:
 static void pm8001_pci_remove(struct pci_dev *pdev)
 {
        struct sas_ha_struct *sha = pci_get_drvdata(pdev);
-       struct pm8001_hba_info *pm8001_ha;
-       int i, j;
-       pm8001_ha = sha->lldd_ha;
+       struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+       int i;
+
        sas_unregister_ha(sha);
        sas_remove_host(pm8001_ha->shost);
        list_del(&pm8001_ha->list);
        PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
        PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
 
-#ifdef PM8001_USE_MSIX
-       for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               synchronize_irq(pci_irq_vector(pdev, i));
-       for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
-       pci_free_irq_vectors(pdev);
-#else
-       free_irq(pm8001_ha->irq, sha);
-#endif
-#ifdef PM8001_USE_TASKLET
-       /* For non-msix and msix interrupts */
-       if ((!pdev->msix_cap || !pci_msi_enabled()) ||
-           (pm8001_ha->chip_id == chip_8001))
-               tasklet_kill(&pm8001_ha->tasklet[0]);
-       else
-               for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
-                       tasklet_kill(&pm8001_ha->tasklet[j]);
-#endif
+       pm8001_free_irq(pm8001_ha);
+       pm8001_kill_tasklet(pm8001_ha);
        scsi_host_put(pm8001_ha->shost);
 
        for (i = 0; i < pm8001_ha->ccb_count; i++) {
@@ -1326,7 +1349,7 @@ static int __maybe_unused pm8001_pci_suspend(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sas_ha_struct *sha = pci_get_drvdata(pdev);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
-       int  i, j;
+
        sas_suspend_ha(sha);
        flush_workqueue(pm8001_wq);
        scsi_block_requests(pm8001_ha->shost);
@@ -1336,24 +1359,10 @@ static int __maybe_unused pm8001_pci_suspend(struct device *dev)
        }
        PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
        PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
-#ifdef PM8001_USE_MSIX
-       for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               synchronize_irq(pci_irq_vector(pdev, i));
-       for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
-       pci_free_irq_vectors(pdev);
-#else
-       free_irq(pm8001_ha->irq, sha);
-#endif
-#ifdef PM8001_USE_TASKLET
-       /* For non-msix and msix interrupts */
-       if ((!pdev->msix_cap || !pci_msi_enabled()) ||
-           (pm8001_ha->chip_id == chip_8001))
-               tasklet_kill(&pm8001_ha->tasklet[0]);
-       else
-               for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
-                       tasklet_kill(&pm8001_ha->tasklet[j]);
-#endif
+
+       pm8001_free_irq(pm8001_ha);
+       pm8001_kill_tasklet(pm8001_ha);
+
        pm8001_info(pm8001_ha, "pdev=0x%p, slot=%s, entering "
                      "suspended state\n", pdev,
                      pm8001_ha->name);
@@ -1372,7 +1381,7 @@ static int __maybe_unused pm8001_pci_resume(struct device *dev)
        struct sas_ha_struct *sha = pci_get_drvdata(pdev);
        struct pm8001_hba_info *pm8001_ha;
        int rc;
-       u8 i = 0, j;
+       u8 i = 0;
        DECLARE_COMPLETION_ONSTACK(completion);
 
        pm8001_ha = sha->lldd_ha;
@@ -1400,17 +1409,9 @@ static int __maybe_unused pm8001_pci_resume(struct device *dev)
        rc = pm8001_request_irq(pm8001_ha);
        if (rc)
                goto err_out_disable;
-#ifdef PM8001_USE_TASKLET
-       /*  Tasklet for non msi-x interrupt handler */
-       if ((!pdev->msix_cap || !pci_msi_enabled()) ||
-           (pm8001_ha->chip_id == chip_8001))
-               tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet,
-                       (unsigned long)&(pm8001_ha->irq_vector[0]));
-       else
-               for (j = 0; j < PM8001_MAX_MSIX_VEC; j++)
-                       tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
-                               (unsigned long)&(pm8001_ha->irq_vector[j]));
-#endif
+
+       pm8001_init_tasklet(pm8001_ha);
+
        PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
        if (pm8001_ha->chip_id != chip_8001) {
                for (i = 1; i < pm8001_ha->number_of_intr; i++)
@@ -1542,6 +1543,9 @@ static int __init pm8001_init(void)
 {
        int rc = -ENOMEM;
 
+       if (pm8001_use_tasklet && !pm8001_use_msix)
+               pm8001_use_tasklet = false;
+
        pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
        if (!pm8001_wq)
                goto err;
index 2fadd353f1c13df1890fcd9894a732f24f008bd9..3ccb7371902f84cb573ef2b95e69380c8505d0de 100644 (file)
@@ -83,10 +83,7 @@ do {                                                                 \
                pm8001_info(HBA, fmt, ##__VA_ARGS__);                   \
 } while (0)
 
-#define PM8001_USE_TASKLET
-#define PM8001_USE_MSIX
-#define PM8001_READ_VPD
-
+extern bool pm8001_use_msix;
 
 #define IS_SPCV_12G(dev)       ((dev->device == 0X8074)                \
                                || (dev->device == 0X8076)              \
@@ -520,14 +517,12 @@ struct pm8001_hba_info {
        struct pm8001_device    *devices;
        struct pm8001_ccb_info  *ccb_info;
        u32                     ccb_count;
-#ifdef PM8001_USE_MSIX
+
+       bool                    use_msix;
        int                     number_of_intr;/*will be used in remove()*/
        char                    intr_drvname[PM8001_MAX_MSIX_VEC]
                                [PM8001_NAME_LENGTH+1+3+1];
-#endif
-#ifdef PM8001_USE_TASKLET
        struct tasklet_struct   tasklet[PM8001_MAX_MSIX_VEC];
-#endif
        u32                     logging_level;
        u32                     link_rate;
        u32                     fw_status;
index f6857632dc7cd70d6cb3682fb43603b40d798632..a52ae6841939462dc4de6d3ffb09e696d0972395 100644 (file)
@@ -1714,27 +1714,6 @@ static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
        pm8001_dbg(pm8001_ha, INIT, "chip reset finished\n");
 }
 
-/**
- * pm80xx_chip_intx_interrupt_enable - enable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- */
-static void
-pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
-{
-       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
-       pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
-}
-
-/**
- * pm80xx_chip_intx_interrupt_disable - disable PM8001 chip interrupt
- * @pm8001_ha: our hba card information
- */
-static void
-pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
-{
-       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL);
-}
-
 /**
  * pm80xx_chip_interrupt_enable - enable PM8001 chip interrupt
  * @pm8001_ha: our hba card information
@@ -1743,16 +1722,16 @@ pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
 static void
 pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
-#ifdef PM8001_USE_MSIX
+       if (!pm8001_ha->use_msix) {
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+               return;
+       }
+
        if (vec < 32)
                pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
        else
-               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
-                           1U << (vec - 32));
-       return;
-#endif
-       pm80xx_chip_intx_interrupt_enable(pm8001_ha);
-
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U, 1U << (vec - 32));
 }
 
 /**
@@ -1763,19 +1742,20 @@ pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 static void
 pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
-#ifdef PM8001_USE_MSIX
+       if (!pm8001_ha->use_msix) {
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL);
+               return;
+       }
+
        if (vec == 0xFF) {
                /* disable all vectors 0-31, 32-63 */
                pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
                pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
-       } else if (vec < 32)
+       } else if (vec < 32) {
                pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
-       else
-               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
-                           1U << (vec - 32));
-       return;
-#endif
-       pm80xx_chip_intx_interrupt_disable(pm8001_ha);
+       } else {
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 1U << (vec - 32));
+       }
 }
 
 /**
@@ -3671,10 +3651,12 @@ static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
                        (struct set_ctrl_cfg_resp *)(piomb + 4);
        u32 status = le32_to_cpu(pPayload->status);
        u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
+       u32 tag = le32_to_cpu(pPayload->tag);
 
        pm8001_dbg(pm8001_ha, MSG,
                   "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
                   status, err_qlfr_pgcd);
+       pm8001_tag_free(pm8001_ha, tag);
 
        return 0;
 }
@@ -4671,7 +4653,7 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
        payload.sas_identify.dev_type = SAS_END_DEVICE;
        payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
        memcpy(payload.sas_identify.sas_addr,
-         &pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+               &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
        payload.sas_identify.phy_id = phy_id;
 
        return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
@@ -4800,16 +4782,15 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
 
 static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
 {
-#ifdef PM8001_USE_MSIX
-       return 1;
-#else
        u32 value;
 
+       if (pm8001_ha->use_msix)
+               return 1;
+
        value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
        if (value)
                return 1;
        return 0;
-#endif
 }
 
 /**
index 50dc30051f221f537332ee3022d4d746b3108ae9..a831b34c08a41a5d48c1f8dbaa1cc5c46d954f89 100644 (file)
@@ -2691,7 +2691,7 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
  *     SUCCESS / FAILED
  */
 static int pmcraid_reset_device(
-       struct scsi_cmnd *scsi_cmd,
+       struct scsi_device *scsi_dev,
        unsigned long timeout,
        u8 modifier)
 {
@@ -2703,11 +2703,11 @@ static int pmcraid_reset_device(
        u32 ioasc;
 
        pinstance =
-               (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
-       res = scsi_cmd->device->hostdata;
+               (struct pmcraid_instance *)scsi_dev->host->hostdata;
+       res = scsi_dev->hostdata;
 
        if (!res) {
-               sdev_printk(KERN_ERR, scsi_cmd->device,
+               sdev_printk(KERN_ERR, scsi_dev,
                            "reset_device: NULL resource pointer\n");
                return FAILED;
        }
@@ -3018,25 +3018,67 @@ static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
 {
        scmd_printk(KERN_INFO, scmd,
                    "resetting device due to an I/O command timeout.\n");
-       return pmcraid_reset_device(scmd,
+       return pmcraid_reset_device(scmd->device,
                                    PMCRAID_INTERNAL_TIMEOUT,
                                    RESET_DEVICE_LUN);
 }
 
 static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
 {
-       scmd_printk(KERN_INFO, scmd,
+       struct Scsi_Host *host = scmd->device->host;
+       struct pmcraid_instance *pinstance =
+               (struct pmcraid_instance *)host->hostdata;
+       struct pmcraid_resource_entry *res = NULL;
+       struct pmcraid_resource_entry *temp;
+       struct scsi_device *sdev = NULL;
+       unsigned long lock_flags;
+
+       /*
+        * The reset device code insists on us passing down
+        * a device, so grab the first device on the bus.
+        */
+       spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
+       list_for_each_entry(temp, &pinstance->used_res_q, queue) {
+               if (scmd->device->channel == PMCRAID_VSET_BUS_ID &&
+                   RES_IS_VSET(temp->cfg_entry)) {
+                       res = temp;
+                       break;
+               } else if (scmd->device->channel == PMCRAID_PHYS_BUS_ID &&
+                          RES_IS_GSCSI(temp->cfg_entry)) {
+                       res = temp;
+                       break;
+               }
+       }
+       if (res)
+               sdev = res->scsi_dev;
+       spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+       if (!sdev)
+               return FAILED;
+
+       sdev_printk(KERN_INFO, sdev,
                    "Doing bus reset due to an I/O command timeout.\n");
-       return pmcraid_reset_device(scmd,
+       return pmcraid_reset_device(sdev,
                                    PMCRAID_RESET_BUS_TIMEOUT,
                                    RESET_DEVICE_BUS);
 }
 
 static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
 {
-       scmd_printk(KERN_INFO, scmd,
+       struct Scsi_Host *shost = scmd->device->host;
+       struct scsi_device *scsi_dev = NULL, *tmp;
+
+       shost_for_each_device(tmp, shost) {
+               if ((tmp->channel == scmd->device->channel) &&
+                   (tmp->id == scmd->device->id)) {
+                       scsi_dev = tmp;
+                       break;
+               }
+       }
+       if (!scsi_dev)
+               return FAILED;
+       sdev_printk(KERN_INFO, scsi_dev,
                    "Doing target reset due to an I/O command timeout.\n");
-       return pmcraid_reset_device(scmd,
+       return pmcraid_reset_device(scsi_dev,
                                    PMCRAID_INTERNAL_TIMEOUT,
                                    RESET_DEVICE_TARGET);
 }
index 19f0b93fa3d85a283bcfb77125def4d38845e3cd..d592ee9170c11f373971028a804f44a6ba6042b7 100644 (file)
@@ -307,9 +307,9 @@ static int ppa_out(ppa_struct *dev, char *buffer, int len)
        case PPA_EPP_8:
                epp_reset(ppb);
                w_ctr(ppb, 0x4);
-               if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x01))
+               if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03))
                        outsl(ppb + 4, buffer, len >> 2);
-               else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x03))
+               else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01))
                        outsw(ppb + 4, buffer, len >> 1);
                else
                        outsb(ppb + 4, buffer, len);
index 1619cc33034f219a882bdf6728839c721a8d6a30..5058e01b65a273084a990e1c285038c4da641c3f 100644 (file)
@@ -112,6 +112,7 @@ struct qedf_ioreq {
 #define QEDF_CMD_ERR_SCSI_DONE         0x5
        u8 io_req_flags;
        uint8_t tm_flags;
+       u64 tm_lun;
        struct qedf_rport *fcport;
 #define        QEDF_CMD_ST_INACTIVE            0
 #define        QEDFC_CMD_ST_IO_ACTIVE          1
@@ -497,7 +498,7 @@ extern void qedf_process_warning_compl(struct qedf_ctx *qedf,
        struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
 extern void qedf_process_error_detect(struct qedf_ctx *qedf,
        struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
-extern void qedf_flush_active_ios(struct qedf_rport *fcport, int lun);
+extern void qedf_flush_active_ios(struct qedf_rport *fcport, u64 lun);
 extern void qedf_release_cmd(struct kref *ref);
 extern int qedf_initiate_abts(struct qedf_ioreq *io_req,
        bool return_scsi_cmd_on_abts);
@@ -522,7 +523,7 @@ extern int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
        bool return_scsi_cmd_on_abts);
 extern void qedf_process_cleanup_compl(struct qedf_ctx *qedf,
        struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
-extern int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags);
+extern int qedf_initiate_tmf(struct fc_rport *rport, u64 lun, u8 tm_flags);
 extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
        struct qedf_ioreq *io_req);
 extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
index 4750ec5789a80db9f07060599d3705ddc6bcc062..bf921caaf6aea684d91b028a0a193f86edba0dea 100644 (file)
@@ -546,7 +546,7 @@ static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
 }
 
 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
-                                 struct fcp_cmnd *fcp_cmnd)
+                               struct fcp_cmnd *fcp_cmnd)
 {
        struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
 
@@ -554,8 +554,12 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
        memset(fcp_cmnd, 0, FCP_CMND_LEN);
 
        /* 8 bytes: SCSI LUN info */
-       int_to_scsilun(sc_cmd->device->lun,
-                       (struct scsi_lun *)&fcp_cmnd->fc_lun);
+       if (io_req->cmd_type == QEDF_TASK_MGMT_CMD)
+               int_to_scsilun(io_req->tm_lun,
+                              (struct scsi_lun *)&fcp_cmnd->fc_lun);
+       else
+               int_to_scsilun(sc_cmd->device->lun,
+                              (struct scsi_lun *)&fcp_cmnd->fc_lun);
 
        /* 4 bytes: flag info */
        fcp_cmnd->fc_pri_ta = 0;
@@ -1095,7 +1099,7 @@ static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
        }
 
        /* The sense buffer can be NULL for TMF commands */
-       if (sc_cmd->sense_buffer) {
+       if (sc_cmd && sc_cmd->sense_buffer) {
                memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
                if (fcp_sns_len)
                        memcpy(sc_cmd->sense_buffer, sense_data,
@@ -1580,7 +1584,7 @@ static void qedf_flush_els_req(struct qedf_ctx *qedf,
 /* A value of -1 for lun is a wild card that means flush all
  * active SCSI I/Os for the target.
  */
-void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
+void qedf_flush_active_ios(struct qedf_rport *fcport, u64 lun)
 {
        struct qedf_ioreq *io_req;
        struct qedf_ctx *qedf;
@@ -1768,10 +1772,6 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
                        kref_put(&io_req->refcount, qedf_release_cmd);
                        continue;
                }
-               if (lun > -1) {
-                       if (io_req->lun != lun)
-                               continue;
-               }
 
                /*
                 * Use kref_get_unless_zero in the unlikely case the command
@@ -1904,6 +1904,7 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
                goto drop_rdata_kref;
        }
 
+       spin_lock_irqsave(&fcport->rport_lock, flags);
        if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
            test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
            test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
@@ -1911,17 +1912,20 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
                         "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
                         io_req->xid, io_req->sc_cmd);
                rc = 1;
+               spin_unlock_irqrestore(&fcport->rport_lock, flags);
                goto drop_rdata_kref;
        }
 
+       /* Set the command type to abort */
+       io_req->cmd_type = QEDF_ABTS;
+       spin_unlock_irqrestore(&fcport->rport_lock, flags);
+
        kref_get(&io_req->refcount);
 
        xid = io_req->xid;
        qedf->control_requests++;
        qedf->packet_aborts++;
 
-       /* Set the command type to abort */
-       io_req->cmd_type = QEDF_ABTS;
        io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
 
        set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
@@ -2210,7 +2214,9 @@ process_els:
                  refcount, fcport, fcport->rdata->ids.port_id);
 
        /* Cleanup cmds re-use the same TID as the original I/O */
+       spin_lock_irqsave(&fcport->rport_lock, flags);
        io_req->cmd_type = QEDF_CLEANUP;
+       spin_unlock_irqrestore(&fcport->rport_lock, flags);
        io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
 
        init_completion(&io_req->cleanup_done);
@@ -2281,7 +2287,7 @@ void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
        complete(&io_req->cleanup_done);
 }
 
-static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
+static int qedf_execute_tmf(struct qedf_rport *fcport, u64 tm_lun,
        uint8_t tm_flags)
 {
        struct qedf_ioreq *io_req;
@@ -2291,17 +2297,10 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
        int rc = 0;
        uint16_t xid;
        int tmo = 0;
-       int lun = 0;
        unsigned long flags;
        struct fcoe_wqe *sqe;
        u16 sqe_idx;
 
-       if (!sc_cmd) {
-               QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
-               return FAILED;
-       }
-
-       lun = (int)sc_cmd->device->lun;
        if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
                QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
                rc = FAILED;
@@ -2321,7 +2320,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
                qedf->target_resets++;
 
        /* Initialize rest of io_req fields */
-       io_req->sc_cmd = sc_cmd;
+       io_req->sc_cmd = NULL;
        io_req->fcport = fcport;
        io_req->cmd_type = QEDF_TASK_MGMT_CMD;
 
@@ -2335,6 +2334,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
 
        /* Default is to return a SCSI command when an error occurs */
        io_req->return_scsi_cmd_on_abts = false;
+       io_req->tm_lun = tm_lun;
 
        /* Obtain exchange id */
        xid = io_req->xid;
@@ -2389,7 +2389,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
 
 
        if (tm_flags == FCP_TMF_LUN_RESET)
-               qedf_flush_active_ios(fcport, lun);
+               qedf_flush_active_ios(fcport, tm_lun);
        else
                qedf_flush_active_ios(fcport, -1);
 
@@ -2404,23 +2404,18 @@ no_flush:
        return rc;
 }
 
-int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
+int qedf_initiate_tmf(struct fc_rport *rport, u64 lun, u8 tm_flags)
 {
-       struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
        struct fc_rport_libfc_priv *rp = rport->dd_data;
        struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
-       struct qedf_ctx *qedf;
-       struct fc_lport *lport = shost_priv(sc_cmd->device->host);
+       struct qedf_ctx *qedf = fcport->qedf;
+       struct fc_lport *lport = rp->local_port;
        int rc = SUCCESS;
-       int rval;
-       struct qedf_ioreq *io_req = NULL;
-       int ref_cnt = 0;
        struct fc_rport_priv *rdata = fcport->rdata;
 
        QEDF_ERR(NULL,
-                "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
-                tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
-                rport->scsi_target_id, (int)sc_cmd->device->lun);
+                "tm_flags 0x%x target_id = 0x%x lun=%llu\n",
+                tm_flags, rport->scsi_target_id, lun);
 
        if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
                QEDF_ERR(NULL, "stale rport\n");
@@ -2431,33 +2426,10 @@ int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
                 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
                 "LUN RESET");
 
-       if (qedf_priv(sc_cmd)->io_req) {
-               io_req = qedf_priv(sc_cmd)->io_req;
-               ref_cnt = kref_read(&io_req->refcount);
-               QEDF_ERR(NULL,
-                        "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
-                        io_req, io_req->xid, ref_cnt);
-       }
-
-       rval = fc_remote_port_chkready(rport);
-       if (rval) {
-               QEDF_ERR(NULL, "device_reset rport not ready\n");
-               rc = FAILED;
-               goto tmf_err;
-       }
-
-       rc = fc_block_scsi_eh(sc_cmd);
+       rc = fc_block_rport(rport);
        if (rc)
                goto tmf_err;
 
-       if (!fcport) {
-               QEDF_ERR(NULL, "device_reset: rport is NULL\n");
-               rc = FAILED;
-               goto tmf_err;
-       }
-
-       qedf = fcport->qedf;
-
        if (!qedf) {
                QEDF_ERR(NULL, "qedf is NULL.\n");
                rc = FAILED;
@@ -2494,7 +2466,7 @@ int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
                goto tmf_err;
        }
 
-       rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
+       rc = qedf_execute_tmf(fcport, lun, tm_flags);
 
 tmf_err:
        kref_put(&rdata->kref, fc_rport_destroy);
@@ -2511,7 +2483,6 @@ void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
        fcp_rsp = &cqe->cqe_info.rsp_info;
        qedf_parse_fcp_rsp(io_req, fcp_rsp);
 
-       io_req->sc_cmd = NULL;
        complete(&io_req->tm_done);
 }
 
index 7825765c936cd651b276af2507799664b5cedeb1..a58353b7b4e8bad1430581b09cb0c08a8b85125c 100644 (file)
@@ -774,7 +774,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
                goto drop_rdata_kref;
        }
 
-       rc = fc_block_scsi_eh(sc_cmd);
+       rc = fc_block_rport(rport);
        if (rc)
                goto drop_rdata_kref;
 
@@ -858,18 +858,19 @@ out:
 
 static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
 {
-       QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...",
-                sc_cmd->device->host->host_no, sc_cmd->device->id,
-                sc_cmd->device->lun);
-       return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
+       struct scsi_target *starget = scsi_target(sc_cmd->device);
+       struct fc_rport *rport = starget_to_rport(starget);
+
+       QEDF_ERR(NULL, "TARGET RESET Issued...");
+       return qedf_initiate_tmf(rport, 0, FCP_TMF_TGT_RESET);
 }
 
 static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
 {
-       QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ",
-                sc_cmd->device->host->host_no, sc_cmd->device->id,
-                sc_cmd->device->lun);
-       return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
+       struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+
+       QEDF_ERR(NULL, "LUN RESET Issued...\n");
+       return qedf_initiate_tmf(rport, sc_cmd->device->lun, FCP_TMF_LUN_RESET);
 }
 
 bool qedf_wait_for_upload(struct qedf_ctx *qedf)
@@ -2805,6 +2806,8 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
        struct qedf_ioreq *io_req;
        struct qedf_rport *fcport;
        u32 comp_type;
+       u8 io_comp_type;
+       unsigned long flags;
 
        comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
            FCOE_CQE_CQE_TYPE_MASK;
@@ -2838,11 +2841,14 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
                return;
        }
 
+       spin_lock_irqsave(&fcport->rport_lock, flags);
+       io_comp_type = io_req->cmd_type;
+       spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
        switch (comp_type) {
        case FCOE_GOOD_COMPLETION_CQE_TYPE:
                atomic_inc(&fcport->free_sqes);
-               switch (io_req->cmd_type) {
+               switch (io_comp_type) {
                case QEDF_SCSI_CMD:
                        qedf_scsi_completion(qedf, cqe, io_req);
                        break;
index 6e5e89aaa283ba85ea676e9913380d2cd31392bf..27bce80262c20c26f4cf7f29f40e2489fd47ae0b 100644 (file)
@@ -716,7 +716,6 @@ enum action {
        ABORT_COMMAND,
        DEVICE_RESET,
        BUS_RESET,
-       ADAPTER_RESET,
 };
 
 
@@ -898,22 +897,9 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
                }
                break;
 
-       case ADAPTER_RESET:
        default:
-               if (qla1280_verbose) {
-                       printk(KERN_INFO
-                              "scsi(%ld): Issued ADAPTER RESET\n",
-                              ha->host_no);
-                       printk(KERN_INFO "scsi(%ld): I/O processing will "
-                              "continue automatically\n", ha->host_no);
-               }
-               ha->flags.reset_active = 1;
-
-               if (qla1280_abort_isp(ha) != 0) {       /* it's dead */
-                       result = FAILED;
-               }
-
-               ha->flags.reset_active = 0;
+               dprintk(1, "RESET invalid action %d\n", action);
+               return FAILED;
        }
 
        /*
@@ -1011,11 +997,27 @@ qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
 static int
 qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
 {
-       int rc;
+       int rc = SUCCESS;
+       struct Scsi_Host *shost = cmd->device->host;
+       struct scsi_qla_host *ha = (struct scsi_qla_host *)shost->hostdata;
 
-       spin_lock_irq(cmd->device->host->host_lock);
-       rc = qla1280_error_action(cmd, ADAPTER_RESET);
-       spin_unlock_irq(cmd->device->host->host_lock);
+       spin_lock_irq(shost->host_lock);
+       if (qla1280_verbose) {
+               printk(KERN_INFO
+                      "scsi(%ld): Issued ADAPTER RESET\n",
+                      ha->host_no);
+               printk(KERN_INFO "scsi(%ld): I/O processing will "
+                      "continue automatically\n", ha->host_no);
+       }
+       ha->flags.reset_active = 1;
+
+       if (qla1280_abort_isp(ha) != 0) {       /* it's dead */
+               rc = FAILED;
+       }
+
+       ha->flags.reset_active = 0;
+
+       spin_unlock_irq(shost->host_lock);
 
        return rc;
 }
index f060e593685de0266d93950d3e263202b263cdde..a7a364760b8002bd9b4bb02270d5e83fe4c5b3f0 100644 (file)
@@ -116,7 +116,7 @@ qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
 
        sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
        fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
-       if (!fp->dfs_rport_dir)
+       if (IS_ERR(fp->dfs_rport_dir))
                return;
        if (NVME_TARGET(vha->hw, fp))
                debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
@@ -708,14 +708,14 @@ create_nodes:
        if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
                ha->tgt.dfs_naqp = debugfs_create_file("naqp",
                    0400, ha->dfs_dir, vha, &dfs_naqp_ops);
-               if (!ha->tgt.dfs_naqp) {
+               if (IS_ERR(ha->tgt.dfs_naqp)) {
                        ql_log(ql_log_warn, vha, 0xd011,
                               "Unable to create debugFS naqp node.\n");
                        goto out;
                }
        }
        vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
-       if (!vha->dfs_rport_root) {
+       if (IS_ERR(vha->dfs_rport_root)) {
                ql_log(ql_log_warn, vha, 0xd012,
                       "Unable to create debugFS rports node.\n");
                goto out;
index 0556969f6dc1bb5e65a92a6bd189afe733f49498..a4a56ab0ba7473f9c6258ed56e8d8bf03ca9aa7a 100644 (file)
@@ -577,7 +577,7 @@ fcport_is_bigger(fc_port_t *fcport)
 static inline struct qla_qpair *
 qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair)
 {
-       int cpuid = smp_processor_id();
+       int cpuid = raw_smp_processor_id();
 
        if (qpair->cpuid != cpuid &&
            ha->qp_cpu_map[cpuid]) {
index e987881918978a66bb05fe32fa53d561554fbeff..d48007e182881b1fcc3aada1327fe77e600d3e2b 100644 (file)
@@ -3965,7 +3965,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
        if (!ha->flags.fw_started)
                return;
 
-       if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
+       if (rsp->qpair->cpuid != raw_smp_processor_id() || !rsp->qpair->rcv_intr) {
                rsp->qpair->rcv_intr = 1;
 
                if (!rsp->qpair->cpu_mapped)
@@ -4468,7 +4468,7 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
        }
        ha = qpair->hw;
 
-       queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
+       queue_work(ha->wq, &qpair->q_work);
 
        return IRQ_HANDLED;
 }
@@ -4494,7 +4494,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
        wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-       queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
+       queue_work(ha->wq, &qpair->q_work);
 
        return IRQ_HANDLED;
 }
index db753d71299109dcb0b8601f50ba9093fca03e32..a8ddf356e6626038fcaf17df560259bad772fec3 100644 (file)
@@ -399,14 +399,14 @@ static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
        nvme->u.nvme.dl = 0;
        nvme->u.nvme.timeout_sec = 0;
        nvme->u.nvme.cmd_dma = fd_resp->rspdma;
-       nvme->u.nvme.cmd_len = fd_resp->rsplen;
+       nvme->u.nvme.cmd_len = cpu_to_le32(fd_resp->rsplen);
        nvme->u.nvme.rsp_len = 0;
        nvme->u.nvme.rsp_dma = 0;
        nvme->u.nvme.exchange_address = uctx->exchange_address;
        nvme->u.nvme.nport_handle = uctx->nport_handle;
        nvme->u.nvme.ox_id = uctx->ox_id;
        dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
-                                  le32_to_cpu(fd_resp->rsplen), DMA_TO_DEVICE);
+                                  fd_resp->rsplen, DMA_TO_DEVICE);
 
        ql_dbg(ql_dbg_unsol, vha, 0x2122,
               "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
@@ -504,13 +504,13 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
        nvme->u.nvme.desc = fd;
        nvme->u.nvme.dir = 0;
        nvme->u.nvme.dl = 0;
-       nvme->u.nvme.cmd_len = fd->rqstlen;
-       nvme->u.nvme.rsp_len = fd->rsplen;
+       nvme->u.nvme.cmd_len = cpu_to_le32(fd->rqstlen);
+       nvme->u.nvme.rsp_len = cpu_to_le32(fd->rsplen);
        nvme->u.nvme.rsp_dma = fd->rspdma;
        nvme->u.nvme.timeout_sec = fd->timeout;
        nvme->u.nvme.cmd_dma = fd->rqstdma;
        dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
-           le32_to_cpu(fd->rqstlen), DMA_TO_DEVICE);
+           fd->rqstlen, DMA_TO_DEVICE);
 
        rval = qla2x00_start_sp(sp);
        if (rval != QLA_SUCCESS) {
index 50db08265c51fdf373bb7d9afb8ce251c000bd2d..7e103d7118253739cc1fad2298b522d46bddb5d6 100644 (file)
@@ -5,6 +5,7 @@
  */
 #include "qla_def.h"
 
+#include <linux/bitfield.h>
 #include <linux/moduleparam.h>
 #include <linux/vmalloc.h>
 #include <linux/delay.h>
@@ -633,8 +634,8 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
                const char *speed_str;
 
                pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
-               lspeed = lstat & PCI_EXP_LNKCAP_SLS;
-               lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
+               lspeed = FIELD_GET(PCI_EXP_LNKCAP_SLS, lstat);
+               lwidth = FIELD_GET(PCI_EXP_LNKCAP_MLW, lstat);
 
                switch (lspeed) {
                case 1:
index 2b815a9928ea3db0ecaa35172649d88b1e86bea6..2ef2dbac0db2739d82c590f4afd987e893230931 100644 (file)
@@ -4425,8 +4425,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
                queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
        } else if (ha->msix_count) {
                if (cmd->atio.u.isp24.fcp_cmnd.rddata)
-                       queue_work_on(smp_processor_id(), qla_tgt_wq,
-                           &cmd->work);
+                       queue_work(qla_tgt_wq, &cmd->work);
                else
                        queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
                            &cmd->work);
index 3b5ba4b47b3b2717708b64f6e85fec986513070a..7e7460a747a447efacfd06ed8b8335c223eb11c2 100644 (file)
@@ -310,7 +310,7 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
        cmd->trc_flags |= TRC_CMD_DONE;
 
        INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
-       queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
+       queue_work(tcm_qla2xxx_free_wq, &cmd->work);
 }
 
 /*
@@ -547,7 +547,7 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
        cmd->trc_flags |= TRC_DATA_IN;
        cmd->cmd_in_wq = 1;
        INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
-       queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
+       queue_work(tcm_qla2xxx_free_wq, &cmd->work);
 }
 
 static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
@@ -1822,6 +1822,9 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
        .tfc_wwn_attrs                  = tcm_qla2xxx_wwn_attrs,
        .tfc_tpg_base_attrs             = tcm_qla2xxx_tpg_attrs,
        .tfc_tpg_attrib_attrs           = tcm_qla2xxx_tpg_attrib_attrs,
+
+       .default_submit_type            = TARGET_DIRECT_SUBMIT,
+       .direct_submit_supp             = 1,
 };
 
 static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
@@ -1859,6 +1862,9 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
        .fabric_init_nodeacl            = tcm_qla2xxx_init_nodeacl,
 
        .tfc_wwn_attrs                  = tcm_qla2xxx_wwn_attrs,
+
+       .default_submit_type            = TARGET_DIRECT_SUBMIT,
+       .direct_submit_supp             = 1,
 };
 
 static int tcm_qla2xxx_register_configfs(void)
index d0911bc28663a8142d0fb0ae69dbad1348efa385..d1c0ba3ef1f5131edaf66d0e124bb784dc8cab9c 100644 (file)
@@ -692,7 +692,7 @@ int scsi_cdl_enable(struct scsi_device *sdev, bool enable)
                ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3,
                                       &data, &sshdr);
                if (ret) {
-                       if (scsi_sense_valid(&sshdr))
+                       if (ret > 0 && scsi_sense_valid(&sshdr))
                                scsi_print_sense_hdr(sdev,
                                        dev_name(&sdev->sdev_gendev), &sshdr);
                        return ret;
index c2f647a7c1b050e0862a9bd3b0ad0a2a88ba535b..195ca80667d06131b2d8f0dd2cec060217549bba 100644 (file)
@@ -2299,10 +2299,10 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
        do {
                result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0,
                                          timeout, 1, &exec_args);
-               if (sdev->removable && scsi_sense_valid(sshdr) &&
+               if (sdev->removable && result > 0 && scsi_sense_valid(sshdr) &&
                    sshdr->sense_key == UNIT_ATTENTION)
                        sdev->changed = 1;
-       } while (scsi_sense_valid(sshdr) &&
+       } while (result > 0 && scsi_sense_valid(sshdr) &&
                 sshdr->sense_key == UNIT_ATTENTION && --retries);
 
        return result;
index 2442d4d2e3f38f2d08fda8548dca91c73b15a579..f668c1c0a98f20bc6b8923687ca451f10351b2aa 100644 (file)
@@ -676,10 +676,10 @@ spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
        for (r = 0; r < retries; r++) {
                result = spi_execute(sdev, spi_write_buffer, REQ_OP_DRV_OUT,
                                     buffer, len, &sshdr);
-               if(result || !scsi_device_online(sdev)) {
+               if (result || !scsi_device_online(sdev)) {
 
                        scsi_device_set_state(sdev, SDEV_QUIESCE);
-                       if (scsi_sense_valid(&sshdr)
+                       if (result > 0 && scsi_sense_valid(&sshdr)
                            && sshdr.sense_key == ILLEGAL_REQUEST
                            /* INVALID FIELD IN CDB */
                            && sshdr.asc == 0x24 && sshdr.ascq == 0x00)
index c92a317ba54756667ec83256fec4304cb725fac9..ce091ffcf3de2b37960441b3d89a50dcdb70794b 100644 (file)
@@ -143,7 +143,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
        struct scsi_mode_data data;
        struct scsi_sense_hdr sshdr;
        static const char temp[] = "temporary ";
-       int len;
+       int len, ret;
 
        if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
                /* no cache control on RBC devices; theoretically they
@@ -190,9 +190,10 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
         */
        data.device_specific = 0;
 
-       if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
-                            sdkp->max_retries, &data, &sshdr)) {
-               if (scsi_sense_valid(&sshdr))
+       ret = scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
+                              sdkp->max_retries, &data, &sshdr);
+       if (ret) {
+               if (ret > 0 && scsi_sense_valid(&sshdr))
                        sd_print_sense_hdr(sdkp, &sshdr);
                return -EINVAL;
        }
@@ -2180,19 +2181,21 @@ sd_spinup_disk(struct scsi_disk *sdkp)
                                                      sdkp->max_retries,
                                                      &exec_args);
 
-                       /*
-                        * If the drive has indicated to us that it
-                        * doesn't have any media in it, don't bother
-                        * with any more polling.
-                        */
-                       if (media_not_present(sdkp, &sshdr)) {
-                               if (media_was_present)
-                                       sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
-                               return;
-                       }
+                       if (the_result > 0) {
+                               /*
+                                * If the drive has indicated to us that it
+                                * doesn't have any media in it, don't bother
+                                * with any more polling.
+                                */
+                               if (media_not_present(sdkp, &sshdr)) {
+                                       if (media_was_present)
+                                               sd_printk(KERN_NOTICE, sdkp,
+                                                         "Media removed, stopped polling\n");
+                                       return;
+                               }
 
-                       if (the_result)
                                sense_valid = scsi_sense_valid(&sshdr);
+                       }
                        retries++;
                } while (retries < 3 &&
                         (!scsi_status_is_good(the_result) ||
@@ -2388,11 +2391,10 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
                the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN,
                                              buffer, RC16_LEN, SD_TIMEOUT,
                                              sdkp->max_retries, &exec_args);
-
-               if (media_not_present(sdkp, &sshdr))
-                       return -ENODEV;
-
                if (the_result > 0) {
+                       if (media_not_present(sdkp, &sshdr))
+                               return -ENODEV;
+
                        sense_valid = scsi_sense_valid(&sshdr);
                        if (sense_valid &&
                            sshdr.sense_key == ILLEGAL_REQUEST &&
@@ -2889,7 +2891,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
        }
 
 bad_sense:
-       if (scsi_sense_valid(&sshdr) &&
+       if (res == -EIO && scsi_sense_valid(&sshdr) &&
            sshdr.sense_key == ILLEGAL_REQUEST &&
            sshdr.asc == 0x24 && sshdr.ascq == 0x0)
                /* Invalid field in CDB */
@@ -2937,7 +2939,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
                sd_first_printk(KERN_WARNING, sdkp,
                          "getting Control mode page failed, assume no ATO\n");
 
-               if (scsi_sense_valid(&sshdr))
+               if (res == -EIO && scsi_sense_valid(&sshdr))
                        sd_print_sense_hdr(sdkp, &sshdr);
 
                return;
index 07ef3db3d1a144f0225d9c8637a258b9800666e6..d093dd187b2f9f6dc798925505736f973ebbd2c9 100644 (file)
@@ -177,7 +177,8 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
 
        result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, sizeof(buf),
                                  SR_TIMEOUT, MAX_RETRIES, &exec_args);
-       if (scsi_sense_valid(&sshdr) && sshdr.sense_key == UNIT_ATTENTION)
+       if (result > 0 && scsi_sense_valid(&sshdr) &&
+           sshdr.sense_key == UNIT_ATTENTION)
                return DISK_EVENT_MEDIA_CHANGE;
 
        if (result || be16_to_cpu(eh->data_len) < sizeof(*med))
index 17491ba10439c52a0959b19609723a1d7987200f..a2560cc807d34ce881f86eb9970e1f7bf57da0bf 100644 (file)
@@ -559,14 +559,15 @@ static void sym53c8xx_timer(struct timer_list *t)
  */
 #define SYM_EH_ABORT           0
 #define SYM_EH_DEVICE_RESET    1
-#define SYM_EH_BUS_RESET       2
-#define SYM_EH_HOST_RESET      3
 
 /*
  *  Generic method for our eh processing.
  *  The 'op' argument tells what we have to do.
  */
-static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
+/*
+ * Error handlers called from the eh thread (one thread per HBA).
+ */
+static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd)
 {
        struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
        struct Scsi_Host *shost = cmd->device->host;
@@ -578,37 +579,13 @@ static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
        int sts = -1;
        struct completion eh_done;
 
-       scmd_printk(KERN_WARNING, cmd, "%s operation started\n", opname);
+       scmd_printk(KERN_WARNING, cmd, "ABORT operation started\n");
 
-       /* We may be in an error condition because the PCI bus
-        * went down. In this case, we need to wait until the
-        * PCI bus is reset, the card is reset, and only then
-        * proceed with the scsi error recovery.  There's no
-        * point in hurrying; take a leisurely wait.
+       /*
+        * Escalate to host reset if the PCI bus went down
         */
-#define WAIT_FOR_PCI_RECOVERY  35
-       if (pci_channel_offline(pdev)) {
-               int finished_reset = 0;
-               init_completion(&eh_done);
-               spin_lock_irq(shost->host_lock);
-               /* Make sure we didn't race */
-               if (pci_channel_offline(pdev)) {
-                       BUG_ON(sym_data->io_reset);
-                       sym_data->io_reset = &eh_done;
-               } else {
-                       finished_reset = 1;
-               }
-               spin_unlock_irq(shost->host_lock);
-               if (!finished_reset)
-                       finished_reset = wait_for_completion_timeout
-                                               (sym_data->io_reset,
-                                               WAIT_FOR_PCI_RECOVERY*HZ);
-               spin_lock_irq(shost->host_lock);
-               sym_data->io_reset = NULL;
-               spin_unlock_irq(shost->host_lock);
-               if (!finished_reset)
-                       return SCSI_FAILED;
-       }
+       if (pci_channel_offline(pdev))
+               return SCSI_FAILED;
 
        spin_lock_irq(shost->host_lock);
        /* This one is queued in some place -> to wait for completion */
@@ -620,28 +597,7 @@ static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
                }
        }
 
-       /* Try to proceed the operation we have been asked for */
-       sts = -1;
-       switch(op) {
-       case SYM_EH_ABORT:
-               sts = sym_abort_scsiio(np, cmd, 1);
-               break;
-       case SYM_EH_DEVICE_RESET:
-               sts = sym_reset_scsi_target(np, cmd->device->id);
-               break;
-       case SYM_EH_BUS_RESET:
-               sym_reset_scsi_bus(np, 1);
-               sts = 0;
-               break;
-       case SYM_EH_HOST_RESET:
-               sym_reset_scsi_bus(np, 0);
-               sym_start_up(shost, 1);
-               sts = 0;
-               break;
-       default:
-               break;
-       }
-
+       sts = sym_abort_scsiio(np, cmd, 1);
        /* On error, restore everything and cross fingers :) */
        if (sts)
                cmd_queued = 0;
@@ -658,33 +614,130 @@ static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd)
                spin_unlock_irq(shost->host_lock);
        }
 
-       dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname,
+       dev_warn(&cmd->device->sdev_gendev, "ABORT operation %s.\n",
                        sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed");
        return sts ? SCSI_FAILED : SCSI_SUCCESS;
 }
 
-
-/*
- * Error handlers called from the eh thread (one thread per HBA).
- */
-static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd)
+static int sym53c8xx_eh_target_reset_handler(struct scsi_cmnd *cmd)
 {
-       return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd);
-}
+       struct scsi_target *starget = scsi_target(cmd->device);
+       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+       struct sym_data *sym_data = shost_priv(shost);
+       struct pci_dev *pdev = sym_data->pdev;
+       struct sym_hcb *np = sym_data->ncb;
+       SYM_QUEHEAD *qp;
+       int sts;
+       struct completion eh_done;
 
-static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd)
-{
-       return sym_eh_handler(SYM_EH_DEVICE_RESET, "DEVICE RESET", cmd);
+       starget_printk(KERN_WARNING, starget,
+                      "TARGET RESET operation started\n");
+
+       /*
+        * Escalate to host reset if the PCI bus went down
+        */
+       if (pci_channel_offline(pdev))
+               return SCSI_FAILED;
+
+       spin_lock_irq(shost->host_lock);
+       sts = sym_reset_scsi_target(np, starget->id);
+       if (!sts) {
+               FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
+                       struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb,
+                                                          link_ccbq);
+                       struct scsi_cmnd *cmd = cp->cmd;
+                       struct sym_ucmd *ucmd;
+
+                       if (!cmd || cmd->device->channel != starget->channel ||
+                           cmd->device->id != starget->id)
+                               continue;
+
+                       ucmd = SYM_UCMD_PTR(cmd);
+                       init_completion(&eh_done);
+                       ucmd->eh_done = &eh_done;
+                       spin_unlock_irq(shost->host_lock);
+                       if (!wait_for_completion_timeout(&eh_done, 5*HZ)) {
+                               ucmd->eh_done = NULL;
+                               sts = -2;
+                       }
+                       spin_lock_irq(shost->host_lock);
+               }
+       }
+       spin_unlock_irq(shost->host_lock);
+
+       starget_printk(KERN_WARNING, starget, "TARGET RESET operation %s.\n",
+                       sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed");
+       return SCSI_SUCCESS;
 }
 
 static int sym53c8xx_eh_bus_reset_handler(struct scsi_cmnd *cmd)
 {
-       return sym_eh_handler(SYM_EH_BUS_RESET, "BUS RESET", cmd);
+       struct Scsi_Host *shost = cmd->device->host;
+       struct sym_data *sym_data = shost_priv(shost);
+       struct pci_dev *pdev = sym_data->pdev;
+       struct sym_hcb *np = sym_data->ncb;
+
+       scmd_printk(KERN_WARNING, cmd, "BUS RESET operation started\n");
+
+       /*
+        * Escalate to host reset if the PCI bus went down
+        */
+       if (pci_channel_offline(pdev))
+               return SCSI_FAILED;
+
+       spin_lock_irq(shost->host_lock);
+       sym_reset_scsi_bus(np, 1);
+       spin_unlock_irq(shost->host_lock);
+
+       dev_warn(&cmd->device->sdev_gendev, "BUS RESET operation complete.\n");
+       return SCSI_SUCCESS;
 }
 
 static int sym53c8xx_eh_host_reset_handler(struct scsi_cmnd *cmd)
 {
-       return sym_eh_handler(SYM_EH_HOST_RESET, "HOST RESET", cmd);
+       struct Scsi_Host *shost = cmd->device->host;
+       struct sym_data *sym_data = shost_priv(shost);
+       struct pci_dev *pdev = sym_data->pdev;
+       struct sym_hcb *np = sym_data->ncb;
+       struct completion eh_done;
+       int finished_reset = 1;
+
+       shost_printk(KERN_WARNING, shost, "HOST RESET operation started\n");
+
+       /* We may be in an error condition because the PCI bus
+        * went down. In this case, we need to wait until the
+        * PCI bus is reset, the card is reset, and only then
+        * proceed with the scsi error recovery.  There's no
+        * point in hurrying; take a leisurely wait.
+        */
+#define WAIT_FOR_PCI_RECOVERY  35
+       if (pci_channel_offline(pdev)) {
+               init_completion(&eh_done);
+               spin_lock_irq(shost->host_lock);
+               /* Make sure we didn't race */
+               if (pci_channel_offline(pdev)) {
+                       BUG_ON(sym_data->io_reset);
+                       sym_data->io_reset = &eh_done;
+                       finished_reset = 0;
+               }
+               spin_unlock_irq(shost->host_lock);
+               if (!finished_reset)
+                       finished_reset = wait_for_completion_timeout
+                                               (sym_data->io_reset,
+                                               WAIT_FOR_PCI_RECOVERY*HZ);
+               spin_lock_irq(shost->host_lock);
+               sym_data->io_reset = NULL;
+               spin_unlock_irq(shost->host_lock);
+       }
+
+       if (finished_reset) {
+               sym_reset_scsi_bus(np, 0);
+               sym_start_up(shost, 1);
+       }
+
+       shost_printk(KERN_WARNING, shost, "HOST RESET operation %s.\n",
+                       finished_reset==1 ? "complete" : "failed");
+       return finished_reset ? SCSI_SUCCESS : SCSI_FAILED;
 }
 
 /*
@@ -1635,7 +1688,7 @@ static const struct scsi_host_template sym2_template = {
        .slave_configure        = sym53c8xx_slave_configure,
        .slave_destroy          = sym53c8xx_slave_destroy,
        .eh_abort_handler       = sym53c8xx_eh_abort_handler,
-       .eh_device_reset_handler = sym53c8xx_eh_device_reset_handler,
+       .eh_target_reset_handler = sym53c8xx_eh_target_reset_handler,
        .eh_bus_reset_handler   = sym53c8xx_eh_bus_reset_handler,
        .eh_host_reset_handler  = sym53c8xx_eh_host_reset_handler,
        .this_id                = 7,
index 922484ea4e3047ac44cbbfc41b7f69f04345751e..922b207bc69dca6fe7f6833046b683d151cfbdb5 100644 (file)
@@ -1,12 +1,15 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config ISCSI_TARGET
-       tristate "Linux-iSCSI.org iSCSI Target Mode Stack"
+       tristate "SCSI Target Mode Stack"
        depends on INET
        select CRYPTO
        select CRYPTO_CRC32C
        select CRYPTO_CRC32C_INTEL if X86
        help
-       Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
-       Target Mode Stack.
+       Say M to enable the SCSI target mode stack. A SCSI target mode stack
+       is software that makes local storage available over a storage network
+       to a SCSI initiator system. The supported storage network technologies
+       include iSCSI, Fibre Channel and the SCSI RDMA Protocol (SRP).
+       Configuration of the SCSI target mode stack happens through configfs.
 
 source "drivers/target/iscsi/cxgbit/Kconfig"
index b516c2893420bc142412061aa5db056d6732b685..1d25e64b068a02867b1320d1c29577cb8d6c81e9 100644 (file)
@@ -1234,12 +1234,6 @@ attach_cmd:
        spin_lock_bh(&conn->cmd_lock);
        list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
        spin_unlock_bh(&conn->cmd_lock);
-       /*
-        * Check if we need to delay processing because of ALUA
-        * Active/NonOptimized primary access state..
-        */
-       core_alua_check_nonop_delay(&cmd->se_cmd);
-
        return 0;
 }
 EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
index 1cff6052e820219eaab4f4061117154d40596d3a..88db94f382bb926d6edbba4f7ca5abbdadb6d70c 100644 (file)
@@ -1589,5 +1589,8 @@ const struct target_core_fabric_ops iscsi_ops = {
        .tfc_tpg_nacl_auth_attrs        = lio_target_nacl_auth_attrs,
        .tfc_tpg_nacl_param_attrs       = lio_target_nacl_param_attrs,
 
-       .write_pending_must_be_called   = true,
+       .write_pending_must_be_called   = 1,
+
+       .default_submit_type            = TARGET_DIRECT_SUBMIT,
+       .direct_submit_supp             = 1,
 };
index f460a66c0e7c504132898dc2f77bb850aecd0111..6797200211836d5966027f236fd7f7d06c53821f 100644 (file)
@@ -948,7 +948,7 @@ int iscsit_execute_cmd(struct iscsit_cmd *cmd, int ooo)
 
                        iscsit_set_unsolicited_dataout(cmd);
                }
-               return transport_handle_cdb_direct(&cmd->se_cmd);
+               return target_submit(&cmd->se_cmd);
 
        case ISCSI_OP_NOOP_OUT:
        case ISCSI_OP_TEXT:
index afc801f255f5d361c5b12bae6ae397210ab4b9b2..9c4aa01b6351b5b578705e16ee0985af0e75fa03 100644 (file)
@@ -318,7 +318,7 @@ static int iscsit_task_reassign_complete_read(
                pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
                        " transport\n", cmd->init_task_tag,
                        cmd->se_cmd.t_state);
-               transport_handle_cdb_direct(se_cmd);
+               target_submit(se_cmd);
                return 0;
        }
 
index 4ec99a55ac305a4829e894030226dacb404b0d2d..8e4035ff367485a17de0e2e609cc9ad4ff2457e3 100644 (file)
@@ -154,7 +154,7 @@ static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd)
                               GFP_ATOMIC))
                return;
 
-       target_queue_submission(se_cmd);
+       target_submit(se_cmd);
        return;
 
 out_done:
@@ -1102,6 +1102,8 @@ static const struct target_core_fabric_ops loop_ops = {
        .tfc_wwn_attrs                  = tcm_loop_wwn_attrs,
        .tfc_tpg_base_attrs             = tcm_loop_tpg_attrs,
        .tfc_tpg_attrib_attrs           = tcm_loop_tpg_attrib_attrs,
+       .default_submit_type            = TARGET_QUEUE_SUBMIT,
+       .direct_submit_supp             = 0,
 };
 
 static int __init tcm_loop_fabric_init(void)
index 2a761bc09193894e9f4a0eaf5c363e251212d58d..b604fcae21e1125bc6d1a7767f1072ef9ce9dc3c 100644 (file)
@@ -2278,6 +2278,9 @@ static const struct target_core_fabric_ops sbp_ops = {
        .tfc_wwn_attrs                  = sbp_wwn_attrs,
        .tfc_tpg_base_attrs             = sbp_tpg_base_attrs,
        .tfc_tpg_attrib_attrs           = sbp_tpg_attrib_attrs,
+
+       .default_submit_type            = TARGET_DIRECT_SUBMIT,
+       .direct_submit_supp             = 1,
 };
 
 static int __init sbp_init(void)
index 3372856319f72ffed8262b19a264ec05b118e5b4..01751faad38663ef6bd95e118c07df52d40b4fa0 100644 (file)
@@ -850,7 +850,6 @@ int core_alua_check_nonop_delay(
        msleep_interruptible(cmd->alua_nonop_delay);
        return 0;
 }
-EXPORT_SYMBOL(core_alua_check_nonop_delay);
 
 static int core_alua_write_tpg_metadata(
        const char *path,
index 936e5ff1b209ecb9df704766d214326eee5e811d..a5f58988130a15c921e45570298edfbe212273ba 100644 (file)
@@ -577,6 +577,7 @@ DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
 DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
 DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc);
+DEF_CONFIGFS_ATTRIB_SHOW(submit_type);
 
 #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name)                           \
 static ssize_t _name##_store(struct config_item *item, const char *page,\
@@ -1231,6 +1232,24 @@ static ssize_t emulate_rsoc_store(struct config_item *item,
        return count;
 }
 
+static ssize_t submit_type_store(struct config_item *item, const char *page,
+                                size_t count)
+{
+       struct se_dev_attrib *da = to_attrib(item);
+       int ret;
+       u8 val;
+
+       ret = kstrtou8(page, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       if (val > TARGET_QUEUE_SUBMIT)
+               return -EINVAL;
+
+       da->submit_type = val;
+       return count;
+}
+
 CONFIGFS_ATTR(, emulate_model_alias);
 CONFIGFS_ATTR(, emulate_dpo);
 CONFIGFS_ATTR(, emulate_fua_write);
@@ -1266,6 +1285,7 @@ CONFIGFS_ATTR(, unmap_zeroes_data);
 CONFIGFS_ATTR(, max_write_same_len);
 CONFIGFS_ATTR(, alua_support);
 CONFIGFS_ATTR(, pgr_support);
+CONFIGFS_ATTR(, submit_type);
 
 /*
  * dev_attrib attributes for devices using the target core SBC/SPC
@@ -1308,6 +1328,7 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
        &attr_alua_support,
        &attr_pgr_support,
        &attr_emulate_rsoc,
+       &attr_submit_type,
        NULL,
 };
 EXPORT_SYMBOL(sbc_attrib_attrs);
@@ -1325,6 +1346,7 @@ struct configfs_attribute *passthrough_attrib_attrs[] = {
        &attr_emulate_pr,
        &attr_alua_support,
        &attr_pgr_support,
+       &attr_submit_type,
        NULL,
 };
 EXPORT_SYMBOL(passthrough_attrib_attrs);
@@ -1392,16 +1414,16 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item,
        /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
        unsigned char buf[INQUIRY_VENDOR_LEN + 2];
        char *stripped = NULL;
-       size_t len;
+       ssize_t len;
        ssize_t ret;
 
-       len = strlcpy(buf, page, sizeof(buf));
-       if (len < sizeof(buf)) {
+       len = strscpy(buf, page, sizeof(buf));
+       if (len > 0) {
                /* Strip any newline added from userspace. */
                stripped = strstrip(buf);
                len = strlen(stripped);
        }
-       if (len > INQUIRY_VENDOR_LEN) {
+       if (len < 0 || len > INQUIRY_VENDOR_LEN) {
                pr_err("Emulated T10 Vendor Identification exceeds"
                        " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
                        "\n");
@@ -1448,16 +1470,16 @@ static ssize_t target_wwn_product_id_store(struct config_item *item,
        /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
        unsigned char buf[INQUIRY_MODEL_LEN + 2];
        char *stripped = NULL;
-       size_t len;
+       ssize_t len;
        ssize_t ret;
 
-       len = strlcpy(buf, page, sizeof(buf));
-       if (len < sizeof(buf)) {
+       len = strscpy(buf, page, sizeof(buf));
+       if (len > 0) {
                /* Strip any newline added from userspace. */
                stripped = strstrip(buf);
                len = strlen(stripped);
        }
-       if (len > INQUIRY_MODEL_LEN) {
+       if (len < 0 || len > INQUIRY_MODEL_LEN) {
                pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
                         __stringify(INQUIRY_MODEL_LEN)
                        "\n");
@@ -1504,16 +1526,16 @@ static ssize_t target_wwn_revision_store(struct config_item *item,
        /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
        unsigned char buf[INQUIRY_REVISION_LEN + 2];
        char *stripped = NULL;
-       size_t len;
+       ssize_t len;
        ssize_t ret;
 
-       len = strlcpy(buf, page, sizeof(buf));
-       if (len < sizeof(buf)) {
+       len = strscpy(buf, page, sizeof(buf));
+       if (len > 0) {
                /* Strip any newline added from userspace. */
                stripped = strstrip(buf);
                len = strlen(stripped);
        }
-       if (len > INQUIRY_REVISION_LEN) {
+       if (len < 0 || len > INQUIRY_REVISION_LEN) {
                pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
                         __stringify(INQUIRY_REVISION_LEN)
                        "\n");
index b7ac60f4a21945b1545e7d7112f09efe2e906a69..0f3fd775fe6dd27ab29a4e27c19db5ce34f12429 100644 (file)
@@ -779,6 +779,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
        dev->dev_attrib.unmap_zeroes_data =
                                DA_UNMAP_ZEROES_DATA_DEFAULT;
        dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
+       dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT;
 
        xcopy_lun = &dev->xcopy_lun;
        rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
index b7c637644cd48d5ede300009bd0f4165c0523727..7156a4dc1ca7d9ddebe469fa59791b52e63b653b 100644 (file)
@@ -1065,8 +1065,32 @@ target_fabric_wwn_cmd_completion_affinity_store(struct config_item *item,
 }
 CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity);
 
+static ssize_t
+target_fabric_wwn_default_submit_type_show(struct config_item *item,
+                                          char *page)
+{
+       struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+                                         param_group);
+       return sysfs_emit(page, "%u\n",
+                         wwn->wwn_tf->tf_ops->default_submit_type);
+}
+CONFIGFS_ATTR_RO(target_fabric_wwn_, default_submit_type);
+
+static ssize_t
+target_fabric_wwn_direct_submit_supported_show(struct config_item *item,
+                                              char *page)
+{
+       struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+                                         param_group);
+       return sysfs_emit(page, "%u\n",
+                         wwn->wwn_tf->tf_ops->direct_submit_supp);
+}
+CONFIGFS_ATTR_RO(target_fabric_wwn_, direct_submit_supported);
+
 static struct configfs_attribute *target_fabric_wwn_param_attrs[] = {
        &target_fabric_wwn_attr_cmd_completion_affinity,
+       &target_fabric_wwn_attr_default_submit_type,
+       &target_fabric_wwn_attr_direct_submit_supported,
        NULL,
 };
 
index 687adc9e086ca942ba85dd591ab3b7bf76cc9192..c81def3c96df6b299d9dedbabf79c1103386a563 100644 (file)
@@ -264,6 +264,7 @@ void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
                percpu_ref_put(&cmd_cnt->refcnt);
 
        percpu_ref_exit(&cmd_cnt->refcnt);
+       kfree(cmd_cnt);
 }
 EXPORT_SYMBOL_GPL(target_free_cmd_counter);
 
@@ -1575,17 +1576,39 @@ target_cmd_parse_cdb(struct se_cmd *cmd)
 }
 EXPORT_SYMBOL(target_cmd_parse_cdb);
 
-/*
- * Used by fabric module frontends to queue tasks directly.
- * May only be used from process context.
- */
-int transport_handle_cdb_direct(
-       struct se_cmd *cmd)
+static int __target_submit(struct se_cmd *cmd)
 {
        sense_reason_t ret;
 
        might_sleep();
 
+       /*
+        * Check if we need to delay processing because of ALUA
+        * Active/NonOptimized primary access state..
+        */
+       core_alua_check_nonop_delay(cmd);
+
+       if (cmd->t_data_nents != 0) {
+               /*
+                * This is primarily a hack for udev and tcm loop which sends
+                * INQUIRYs with a single page and expects the data to be
+                * cleared.
+                */
+               if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
+                   cmd->data_direction == DMA_FROM_DEVICE) {
+                       struct scatterlist *sgl = cmd->t_data_sg;
+                       unsigned char *buf = NULL;
+
+                       BUG_ON(!sgl);
+
+                       buf = kmap_local_page(sg_page(sgl));
+                       if (buf) {
+                               memset(buf + sgl->offset, 0, sgl->length);
+                               kunmap_local(buf);
+                       }
+               }
+       }
+
        if (!cmd->se_lun) {
                dump_stack();
                pr_err("cmd->se_lun is NULL\n");
@@ -1613,7 +1636,6 @@ int transport_handle_cdb_direct(
                transport_generic_request_failure(cmd, ret);
        return 0;
 }
-EXPORT_SYMBOL(transport_handle_cdb_direct);
 
 sense_reason_t
 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
@@ -1780,53 +1802,6 @@ generic_fail:
 }
 EXPORT_SYMBOL_GPL(target_submit_prep);
 
-/**
- * target_submit - perform final initialization and submit cmd to LIO core
- * @se_cmd: command descriptor to submit
- *
- * target_submit_prep must have been called on the cmd, and this must be
- * called from process context.
- */
-void target_submit(struct se_cmd *se_cmd)
-{
-       struct scatterlist *sgl = se_cmd->t_data_sg;
-       unsigned char *buf = NULL;
-
-       might_sleep();
-
-       if (se_cmd->t_data_nents != 0) {
-               BUG_ON(!sgl);
-               /*
-                * A work-around for tcm_loop as some userspace code via
-                * scsi-generic do not memset their associated read buffers,
-                * so go ahead and do that here for type non-data CDBs.  Also
-                * note that this is currently guaranteed to be a single SGL
-                * for this case by target core in target_setup_cmd_from_cdb()
-                * -> transport_generic_cmd_sequencer().
-                */
-               if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
-                    se_cmd->data_direction == DMA_FROM_DEVICE) {
-                       if (sgl)
-                               buf = kmap(sg_page(sgl)) + sgl->offset;
-
-                       if (buf) {
-                               memset(buf, 0, sgl->length);
-                               kunmap(sg_page(sgl));
-                       }
-               }
-
-       }
-
-       /*
-        * Check if we need to delay processing because of ALUA
-        * Active/NonOptimized primary access state..
-        */
-       core_alua_check_nonop_delay(se_cmd);
-
-       transport_handle_cdb_direct(se_cmd);
-}
-EXPORT_SYMBOL_GPL(target_submit);
-
 /**
  * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
  *
@@ -1922,7 +1897,7 @@ void target_queued_submit_work(struct work_struct *work)
                        se_plug = target_plug_device(se_dev);
                }
 
-               target_submit(se_cmd);
+               __target_submit(se_cmd);
        }
 
        if (se_plug)
@@ -1933,7 +1908,7 @@ void target_queued_submit_work(struct work_struct *work)
  * target_queue_submission - queue the cmd to run on the LIO workqueue
  * @se_cmd: command descriptor to submit
  */
-void target_queue_submission(struct se_cmd *se_cmd)
+static void target_queue_submission(struct se_cmd *se_cmd)
 {
        struct se_device *se_dev = se_cmd->se_dev;
        int cpu = se_cmd->cpuid;
@@ -1943,7 +1918,35 @@ void target_queue_submission(struct se_cmd *se_cmd)
        llist_add(&se_cmd->se_cmd_list, &sq->cmd_list);
        queue_work_on(cpu, target_submission_wq, &sq->work);
 }
-EXPORT_SYMBOL_GPL(target_queue_submission);
+
+/**
+ * target_submit - perform final initialization and submit cmd to LIO core
+ * @cmd: command descriptor to submit
+ *
+ * target_submit_prep or something similar must have been called on the cmd,
+ * and this must be called from process context.
+ */
+int target_submit(struct se_cmd *se_cmd)
+{
+       const struct target_core_fabric_ops *tfo = se_cmd->se_sess->se_tpg->se_tpg_tfo;
+       struct se_dev_attrib *da = &se_cmd->se_dev->dev_attrib;
+       u8 submit_type;
+
+       if (da->submit_type == TARGET_FABRIC_DEFAULT_SUBMIT)
+               submit_type = tfo->default_submit_type;
+       else if (da->submit_type == TARGET_DIRECT_SUBMIT &&
+                tfo->direct_submit_supp)
+               submit_type = TARGET_DIRECT_SUBMIT;
+       else
+               submit_type = TARGET_QUEUE_SUBMIT;
+
+       if (submit_type == TARGET_DIRECT_SUBMIT)
+               return __target_submit(se_cmd);
+
+       target_queue_submission(se_cmd);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(target_submit);
 
 static void target_complete_tmr_failure(struct work_struct *work)
 {
index 22cc6cac0ba2b5704873aaa73bab75ea37cb235a..7eb94894bd68faf639a9fc02da004b986df89c2b 100644 (file)
@@ -201,7 +201,7 @@ struct tcmu_tmr {
 
        uint8_t tmr_type;
        uint32_t tmr_cmd_cnt;
-       int16_t tmr_cmd_ids[];
+       int16_t tmr_cmd_ids[] __counted_by(tmr_cmd_cnt);
 };
 
 /*
index 6ac3fc1a7d39bba8478bbdb1e00a27dafb44c46f..5ee03d1cba2bee5fbe04b0ba8122cf5ac4513dff 100644 (file)
@@ -432,6 +432,9 @@ static const struct target_core_fabric_ops ft_fabric_ops = {
 
        .tfc_wwn_attrs                  = ft_wwn_attrs,
        .tfc_tpg_nacl_base_attrs        = ft_nacl_base_attrs,
+
+       .default_submit_type            = TARGET_DIRECT_SUBMIT,
+       .direct_submit_supp             = 1,
 };
 
 static struct notifier_block ft_notifier = {
index 93417518c04d15c2403b5742117c909bea8be6e9..af576d3606f609a594672aafe7cd616868120929 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/module.h>
 #include <linux/regulator/consumer.h>
 #include <linux/sched/clock.h>
+#include <linux/iopoll.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_dbg.h>
 #include <scsi/scsi_driver.h>
@@ -273,7 +274,6 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
-static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
 static irqreturn_t ufshcd_intr(int irq, void *__hba);
 static int ufshcd_change_power_mode(struct ufs_hba *hba,
@@ -446,8 +446,8 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
        } else {
                doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
        }
-       trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
-                       doorbell, hwq_id, transfer_len, intr, lba, opcode, group_id);
+       trace_ufshcd_command(cmd->device, str_t, tag, doorbell, hwq_id,
+                            transfer_len, intr, lba, opcode, group_id);
 }
 
 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
@@ -1356,9 +1356,10 @@ static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
                return;
        }
        hba->clk_scaling.is_suspended = true;
+       hba->clk_scaling.window_start_t = 0;
        spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 
-       __ufshcd_suspend_clkscaling(hba);
+       devfreq_suspend_device(hba->devfreq);
 }
 
 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
@@ -1401,6 +1402,13 @@ static int ufshcd_devfreq_target(struct device *dev,
                return 0;
        }
 
+       /* Skip scaling clock when clock scaling is suspended */
+       if (hba->clk_scaling.is_suspended) {
+               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+               dev_warn(hba->dev, "clock scaling is suspended, skip");
+               return 0;
+       }
+
        if (!hba->clk_scaling.active_reqs)
                sched_clk_scaling_suspend_work = true;
 
@@ -1429,7 +1437,7 @@ static int ufshcd_devfreq_target(struct device *dev,
                ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 
 out:
-       if (sched_clk_scaling_suspend_work)
+       if (sched_clk_scaling_suspend_work && !scale_up)
                queue_work(hba->clk_scaling.workq,
                           &hba->clk_scaling.suspend_work);
 
@@ -1535,16 +1543,6 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba)
        dev_pm_opp_remove(hba->dev, clki->max_freq);
 }
 
-static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
-{
-       unsigned long flags;
-
-       devfreq_suspend_device(hba->devfreq);
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       hba->clk_scaling.window_start_t = 0;
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
-}
-
 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 {
        unsigned long flags;
@@ -1557,11 +1555,12 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
        if (!hba->clk_scaling.is_suspended) {
                suspend = true;
                hba->clk_scaling.is_suspended = true;
+               hba->clk_scaling.window_start_t = 0;
        }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        if (suspend)
-               __ufshcd_suspend_clkscaling(hba);
+               devfreq_suspend_device(hba->devfreq);
 }
 
 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
@@ -2164,7 +2163,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
        lrbp->compl_time_stamp = ktime_set(0, 0);
        lrbp->compl_time_stamp_local_clock = 0;
        ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
-       ufshcd_clk_scaling_start_busy(hba);
+       if (lrbp->cmd)
+               ufshcd_clk_scaling_start_busy(hba);
        if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
                ufshcd_start_monitor(hba, lrbp);
 
@@ -2299,7 +2299,11 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
  */
 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
 {
-       return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY;
+       u32 val;
+       int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
+                                   500, UIC_CMD_TIMEOUT * 1000, false, hba,
+                                   REG_CONTROLLER_STATUS);
+       return ret == 0 ? true : false;
 }
 
 /**
@@ -2392,7 +2396,6 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
                      bool completion)
 {
        lockdep_assert_held(&hba->uic_cmd_mutex);
-       lockdep_assert_held(hba->host->host_lock);
 
        if (!ufshcd_ready_for_uic_cmd(hba)) {
                dev_err(hba->dev,
@@ -2419,7 +2422,6 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 {
        int ret;
-       unsigned long flags;
 
        if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
                return 0;
@@ -2428,9 +2430,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
        mutex_lock(&hba->uic_cmd_mutex);
        ufshcd_add_delay_before_dme_cmd(hba);
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
        ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
        if (!ret)
                ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
 
@@ -2714,27 +2714,23 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
  *                        for SCSI Purposes
  * @hba: per adapter instance
  * @lrbp: pointer to local reference block
- *
- * Return: 0 upon success; < 0 upon failure.
  */
-static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 {
+       struct request *rq = scsi_cmd_to_rq(lrbp->cmd);
+       unsigned int ioprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
        u8 upiu_flags;
-       int ret = 0;
 
        if (hba->ufs_version <= ufshci_version(1, 1))
                lrbp->command_type = UTP_CMD_TYPE_SCSI;
        else
                lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
 
-       if (likely(lrbp->cmd)) {
-               ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
-               ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
-       } else {
-               ret = -EINVAL;
-       }
-
-       return ret;
+       ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
+                                   lrbp->cmd->sc_data_direction, 0);
+       if (ioprio_class == IOPRIO_CLASS_RT)
+               upiu_flags |= UPIU_CMD_FLAGS_CP;
+       ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
 }
 
 /**
@@ -2822,8 +2818,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        int err = 0;
        struct ufs_hw_queue *hwq = NULL;
 
-       WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
-
        switch (hba->ufshcd_state) {
        case UFSHCD_STATE_OPERATIONAL:
                break;
@@ -4133,8 +4127,8 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
                wmb();
                reenable_intr = true;
        }
-       ret = __ufshcd_send_uic_cmd(hba, cmd, false);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
+       ret = __ufshcd_send_uic_cmd(hba, cmd, false);
        if (ret) {
                dev_err(hba->dev,
                        "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
@@ -5097,8 +5091,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
        struct request_queue *q = sdev->request_queue;
 
        blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
-       if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
-               blk_queue_update_dma_alignment(q, SZ_4K - 1);
+
        /*
         * Block runtime-pm until all consumers are added.
         * Refer ufshcd_setup_links().
@@ -5114,6 +5107,9 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
         */
        sdev->silence_suspend = 1;
 
+       if (hba->vops && hba->vops->config_scsi_dev)
+               hba->vops->config_scsi_dev(sdev);
+
        ufshcd_crypto_register(hba, q);
 
        return 0;
@@ -5404,7 +5400,6 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
                                lrbp->utr_descriptor_ptr->header.ocs = ocs;
                        }
                        complete(hba->dev_cmd.complete);
-                       ufshcd_clk_scaling_update_busy(hba);
                }
        }
 }
@@ -6923,8 +6918,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        spin_lock_irqsave(host->host_lock, flags);
 
        task_tag = req->tag;
-       WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
-                 task_tag);
        hba->tmf_rqs[req->tag] = req;
        treq->upiu_req.req_header.task_tag = task_tag;
 
@@ -7498,8 +7491,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
        bool outstanding;
        u32 reg;
 
-       WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
-
        ufshcd_hold(hba);
 
        if (!is_mcq_enabled(hba)) {
@@ -7715,6 +7706,19 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
 
        hba = shost_priv(cmd->device->host);
 
+       /*
+        * If runtime PM sent SSU and got a timeout, scsi_error_handler is
+        * stuck in this function waiting for flush_work(&hba->eh_work). And
+        * ufshcd_err_handler(eh_work) is stuck waiting for runtime PM. Do
+        * ufshcd_link_recovery instead of eh_work to prevent deadlock.
+        */
+       if (hba->pm_op_in_progress) {
+               if (ufshcd_link_recovery(hba))
+                       err = FAILED;
+
+               return err;
+       }
+
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->force_reset = true;
        ufshcd_schedule_eh_work(hba);
@@ -8722,7 +8726,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
        if (ret)
                goto out;
 
-       if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
+       if (!hba->pm_op_in_progress &&
+           (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
                /* Reset the device and controller before doing reinit */
                ufshcd_device_reset(hba);
                ufshcd_hba_stop(hba);
index 2491e7e870283d29bae9bc1abaf6829bf4212fc3..bb30267da4711e345ee4b5aa19d510ca8ac0c9d6 100644 (file)
@@ -305,12 +305,11 @@ static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
  *
  * Return: 0 (success).
  */
-static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
+static void cdns_ufs_pltfrm_remove(struct platform_device *pdev)
 {
        struct ufs_hba *hba =  platform_get_drvdata(pdev);
 
        ufshcd_remove(hba);
-       return 0;
 }
 
 static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
@@ -322,7 +321,7 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
 
 static struct platform_driver cdns_ufs_pltfrm_driver = {
        .probe  = cdns_ufs_pltfrm_probe,
-       .remove = cdns_ufs_pltfrm_remove,
+       .remove_new = cdns_ufs_pltfrm_remove,
        .driver = {
                .name   = "cdns-ufshcd",
                .pm     = &cdns_ufs_dev_pm_ops,
index 4d5389dd958579e9b9eea44f3478d40942e224e3..a3877592604d5d659add23f18f1c93c22ef99341 100644 (file)
@@ -74,14 +74,12 @@ static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev)
  * @pdev: pointer to platform device structure
  *
  */
-static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
+static void tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
 {
        struct ufs_hba *hba =  platform_get_drvdata(pdev);
 
        pm_runtime_get_sync(&(pdev)->dev);
        ufshcd_remove(hba);
-
-       return 0;
 }
 
 static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
@@ -91,7 +89,7 @@ static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
 
 static struct platform_driver tc_dwc_g210_pltfm_driver = {
        .probe          = tc_dwc_g210_pltfm_probe,
-       .remove         = tc_dwc_g210_pltfm_remove,
+       .remove_new     = tc_dwc_g210_pltfm_remove,
        .driver         = {
                .name   = "tc-dwc-g210-pltfm",
                .pm     = &tc_dwc_g210_pltfm_pm_ops,
index 117eb7da92acd1ec579d98f36f91a236eeec402d..250c22df000d534632dd0b5924bade6d5467ee1b 100644 (file)
@@ -65,13 +65,11 @@ disable_pm:
        return ret;
 }
 
-static int ti_j721e_ufs_remove(struct platform_device *pdev)
+static void ti_j721e_ufs_remove(struct platform_device *pdev)
 {
        of_platform_depopulate(&pdev->dev);
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-
-       return 0;
 }
 
 static const struct of_device_id ti_j721e_ufs_of_match[] = {
@@ -85,7 +83,7 @@ MODULE_DEVICE_TABLE(of, ti_j721e_ufs_of_match);
 
 static struct platform_driver ti_j721e_ufs_driver = {
        .probe  = ti_j721e_ufs_probe,
-       .remove = ti_j721e_ufs_remove,
+       .remove_new = ti_j721e_ufs_remove,
        .driver = {
                .name   = "ti-j721e-ufs",
                .of_match_table = ti_j721e_ufs_of_match,
index 3396e0388512c74e08d8f423ffe160c2bb808471..71bd6dbc0547cb4f8596aa1cc7d0f861818cbb83 100644 (file)
@@ -1511,6 +1511,11 @@ static int fsd_ufs_pre_link(struct exynos_ufs *ufs)
        return 0;
 }
 
+static void exynos_ufs_config_scsi_dev(struct scsi_device *sdev)
+{
+       blk_queue_update_dma_alignment(sdev->request_queue, SZ_4K - 1);
+}
+
 static int fsd_ufs_post_link(struct exynos_ufs *ufs)
 {
        int i;
@@ -1579,6 +1584,7 @@ static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
        .hibern8_notify                 = exynos_ufs_hibern8_notify,
        .suspend                        = exynos_ufs_suspend,
        .resume                         = exynos_ufs_resume,
+       .config_scsi_dev                = exynos_ufs_config_scsi_dev,
 };
 
 static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
@@ -1605,7 +1611,7 @@ static int exynos_ufs_probe(struct platform_device *pdev)
        return err;
 }
 
-static int exynos_ufs_remove(struct platform_device *pdev)
+static void exynos_ufs_remove(struct platform_device *pdev)
 {
        struct ufs_hba *hba =  platform_get_drvdata(pdev);
        struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -1615,8 +1621,6 @@ static int exynos_ufs_remove(struct platform_device *pdev)
 
        phy_power_off(ufs->phy);
        phy_exit(ufs->phy);
-
-       return 0;
 }
 
 static struct exynos_ufs_uic_attr exynos7_uic_attr = {
@@ -1680,8 +1684,7 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
                                  UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
                                  UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
                                  UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
-                                 UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
-                                 UFSHCD_QUIRK_4KB_DMA_ALIGNMENT,
+                                 UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
        .opts                   = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
                                  EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
                                  EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
@@ -1756,7 +1759,7 @@ static const struct dev_pm_ops exynos_ufs_pm_ops = {
 
 static struct platform_driver exynos_ufs_pltform = {
        .probe  = exynos_ufs_probe,
-       .remove = exynos_ufs_remove,
+       .remove_new = exynos_ufs_remove,
        .driver = {
                .name   = "exynos-ufshc",
                .pm     = &exynos_ufs_pm_ops,
index 5b3060cd0ab8b80625b309c869b805032b2e60d5..0229ac0a8dbed3176e62b117fd21d265273e7b98 100644 (file)
@@ -575,12 +575,11 @@ static int ufs_hisi_probe(struct platform_device *pdev)
        return ufshcd_pltfrm_init(pdev, of_id->data);
 }
 
-static int ufs_hisi_remove(struct platform_device *pdev)
+static void ufs_hisi_remove(struct platform_device *pdev)
 {
        struct ufs_hba *hba =  platform_get_drvdata(pdev);
 
        ufshcd_remove(hba);
-       return 0;
 }
 
 static const struct dev_pm_ops ufs_hisi_pm_ops = {
@@ -592,7 +591,7 @@ static const struct dev_pm_ops ufs_hisi_pm_ops = {
 
 static struct platform_driver ufs_hisi_pltform = {
        .probe  = ufs_hisi_probe,
-       .remove = ufs_hisi_remove,
+       .remove_new = ufs_hisi_remove,
        .driver = {
                .name   = "ufshcd-hisi",
                .pm     = &ufs_hisi_pm_ops,
index 2383ecd88f1cbacba1e112c3b018b5c09f6c0b5f..fc61790d289b2fce3848b2509ac5febcdc85a9ec 100644 (file)
@@ -806,7 +806,7 @@ static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
                return 0;
        }
 
-       err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
+       err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc, false);
        if (err)
                return err;
 
@@ -1748,13 +1748,12 @@ out:
  *
  * Always return 0
  */
-static int ufs_mtk_remove(struct platform_device *pdev)
+static void ufs_mtk_remove(struct platform_device *pdev)
 {
        struct ufs_hba *hba =  platform_get_drvdata(pdev);
 
        pm_runtime_get_sync(&(pdev)->dev);
        ufshcd_remove(hba);
-       return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -1818,7 +1817,7 @@ static const struct dev_pm_ops ufs_mtk_pm_ops = {
 
 static struct platform_driver ufs_mtk_pltform = {
        .probe      = ufs_mtk_probe,
-       .remove     = ufs_mtk_remove,
+       .remove_new = ufs_mtk_remove,
        .driver = {
                .name   = "ufshcd-mtk",
                .pm     = &ufs_mtk_pm_ops,
index d1149b1c3ed50e4220227ddde7e0f9bc13e883f6..96cb8b5b4e66f5c5c7eb23ccf8cdeccf7c8da233 100644 (file)
@@ -93,8 +93,7 @@ static const struct __ufs_qcom_bw_table {
 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
 
 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
-static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
-                                                      u32 clk_cycles);
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up);
 
 static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
 {
@@ -460,7 +459,7 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
                return ret;
        }
 
-       phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->hs_gear);
+       phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->phy_gear);
 
        /* power on phy - start serdes and phy's power and clocks */
        ret = phy_power_on(phy);
@@ -528,11 +527,20 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
        return err;
 }
 
-/*
+/**
+ * ufs_qcom_cfg_timers - Configure ufs qcom cfg timers
+ *
+ * @hba: host controller instance
+ * @gear: Current operating gear
+ * @hs: current power mode
+ * @rate: current operating rate (A or B)
+ * @update_link_startup_timer: indicate if link_start ongoing
+ * @is_pre_scale_up: flag to check if pre scale up condition.
  * Return: zero for success and non-zero in case of a failure.
  */
 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
-                              u32 hs, u32 rate, bool update_link_startup_timer)
+                              u32 hs, u32 rate, bool update_link_startup_timer,
+                              bool is_pre_scale_up)
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
        struct ufs_clk_info *clki;
@@ -563,11 +571,14 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
        /*
         * The Qunipro controller does not use following registers:
         * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
-        * UFS_REG_PA_LINK_STARTUP_TIMER
-        * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
+        * UFS_REG_PA_LINK_STARTUP_TIMER.
+        * However UTP controller uses SYS1CLK_1US_REG register for Interrupt
         * Aggregation logic.
-       */
-       if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
+        * It is mandatory to write SYS1CLK_1US_REG register on UFS host
+        * controller V4.0.0 onwards.
+        */
+       if (host->hw_ver.major < 4 && ufs_qcom_cap_qunipro(host) &&
+           !ufshcd_is_intr_aggr_allowed(hba))
                return 0;
 
        if (gear == 0) {
@@ -576,8 +587,14 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
        }
 
        list_for_each_entry(clki, &hba->clk_list_head, list) {
-               if (!strcmp(clki->name, "core_clk"))
-                       core_clk_rate = clk_get_rate(clki->clk);
+               if (!strcmp(clki->name, "core_clk")) {
+                       if (is_pre_scale_up)
+                               core_clk_rate = clki->max_freq;
+                       else
+                               core_clk_rate = clk_get_rate(clki->clk);
+                       break;
+               }
+
        }
 
        /* If frequency is smaller than 1MHz, set to 1MHz */
@@ -679,20 +696,17 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
        switch (status) {
        case PRE_CHANGE:
                if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
-                                       0, true)) {
+                                       0, true, false)) {
                        dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
                                __func__);
                        return -EINVAL;
                }
 
-               if (ufs_qcom_cap_qunipro(host))
-                       /*
-                        * set unipro core clock cycles to 150 & clear clock
-                        * divider
-                        */
-                       err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
-                                                                         150);
-
+               if (ufs_qcom_cap_qunipro(host)) {
+                       err = ufs_qcom_set_core_clk_ctrl(hba, true);
+                       if (err)
+                               dev_err(hba->dev, "cfg core clk ctrl failed\n");
+               }
                /*
                 * Some UFS devices (and may be host) have issues if LCC is
                 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
@@ -909,8 +923,13 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
                        return ret;
                }
 
-               /* Use the agreed gear */
-               host->hs_gear = dev_req_params->gear_tx;
+               /*
+                * Update phy_gear only when the gears are scaled to a higher value. This is
+                * because, the PHY gear settings are backwards compatible and we only need to
+                * change the PHY gear settings while scaling to higher gears.
+                */
+               if (dev_req_params->gear_tx > host->phy_gear)
+                       host->phy_gear = dev_req_params->gear_tx;
 
                /* enable the device ref clock before changing to HS mode */
                if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
@@ -926,7 +945,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
        case POST_CHANGE:
                if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
                                        dev_req_params->pwr_rx,
-                                       dev_req_params->hs_rate, false)) {
+                                       dev_req_params->hs_rate, false, false)) {
                        dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
                                __func__);
                        /*
@@ -1277,7 +1296,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
         * Power up the PHY using the minimum supported gear (UFS_HS_G2).
         * Switching to max gear will be performed during reinit if supported.
         */
-       host->hs_gear = UFS_HS_G2;
+       host->phy_gear = UFS_HS_G2;
 
        return 0;
 
@@ -1296,14 +1315,96 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
        phy_exit(host->generic_phy);
 }
 
-static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
-                                                      u32 clk_cycles)
+/**
+ * ufs_qcom_set_clk_40ns_cycles - Configure 40ns clk cycles
+ *
+ * @hba: host controller instance
+ * @cycles_in_1us: No of cycles in 1us to be configured
+ *
+ * Returns error if dme get/set configuration for 40ns fails
+ * and returns zero on success.
+ */
+static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
+                                       u32 cycles_in_1us)
 {
+       struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+       u32 cycles_in_40ns;
+       u32 reg;
        int err;
-       u32 core_clk_ctrl_reg;
 
-       if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
+       /*
+        * UFS host controller V4.0.0 onwards needs to program
+        * PA_VS_CORE_CLK_40NS_CYCLES attribute per programmed
+        * frequency of unipro core clk of UFS host controller.
+        */
+       if (host->hw_ver.major < 4)
+               return 0;
+
+       /*
+        * Generic formulae for cycles_in_40ns = (freq_unipro/25) is not
+        * applicable for all frequencies. For ex: ceil(37.5 MHz/25) will
+        * be 2 and ceil(403 MHZ/25) will be 17 whereas Hardware
+        * specification expect to be 16. Hence use exact hardware spec
+        * mandated value for cycles_in_40ns instead of calculating using
+        * generic formulae.
+        */
+       switch (cycles_in_1us) {
+       case UNIPRO_CORE_CLK_FREQ_403_MHZ:
+               cycles_in_40ns = 16;
+               break;
+       case UNIPRO_CORE_CLK_FREQ_300_MHZ:
+               cycles_in_40ns = 12;
+               break;
+       case UNIPRO_CORE_CLK_FREQ_201_5_MHZ:
+               cycles_in_40ns = 8;
+               break;
+       case UNIPRO_CORE_CLK_FREQ_150_MHZ:
+               cycles_in_40ns = 6;
+               break;
+       case UNIPRO_CORE_CLK_FREQ_100_MHZ:
+               cycles_in_40ns = 4;
+               break;
+       case  UNIPRO_CORE_CLK_FREQ_75_MHZ:
+               cycles_in_40ns = 3;
+               break;
+       case UNIPRO_CORE_CLK_FREQ_37_5_MHZ:
+               cycles_in_40ns = 2;
+               break;
+       default:
+               dev_err(hba->dev, "UNIPRO clk freq %u MHz not supported\n",
+                               cycles_in_1us);
                return -EINVAL;
+       }
+
+       err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), &reg);
+       if (err)
+               return err;
+
+       reg &= ~PA_VS_CORE_CLK_40NS_CYCLES_MASK;
+       reg |= cycles_in_40ns;
+
+       return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg);
+}
+
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
+{
+       struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+       struct list_head *head = &hba->clk_list_head;
+       struct ufs_clk_info *clki;
+       u32 cycles_in_1us;
+       u32 core_clk_ctrl_reg;
+       int err;
+
+       list_for_each_entry(clki, head, list) {
+               if (!IS_ERR_OR_NULL(clki->clk) &&
+                       !strcmp(clki->name, "core_clk_unipro")) {
+                       if (is_scale_up)
+                               cycles_in_1us = ceil(clki->max_freq, (1000 * 1000));
+                       else
+                               cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000));
+                       break;
+               }
+       }
 
        err = ufshcd_dme_get(hba,
                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
@@ -1311,32 +1412,54 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
        if (err)
                return err;
 
-       core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
-       core_clk_ctrl_reg |= clk_cycles;
+       /* Bit mask is different for UFS host controller V4.0.0 onwards */
+       if (host->hw_ver.major >= 4) {
+               if (!FIELD_FIT(CLK_1US_CYCLES_MASK_V4, cycles_in_1us))
+                       return -ERANGE;
+               core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK_V4;
+               core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK_V4, cycles_in_1us);
+       } else {
+               if (!FIELD_FIT(CLK_1US_CYCLES_MASK, cycles_in_1us))
+                       return -ERANGE;
+               core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK;
+               core_clk_ctrl_reg |= FIELD_PREP(CLK_1US_CYCLES_MASK, cycles_in_1us);
+       }
 
        /* Clear CORE_CLK_DIV_EN */
        core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
 
-       return ufshcd_dme_set(hba,
+       err = ufshcd_dme_set(hba,
                            UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
                            core_clk_ctrl_reg);
-}
+       if (err)
+               return err;
 
-static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
-{
-       /* nothing to do as of now */
-       return 0;
+       /* Configure unipro core clk 40ns attribute */
+       return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us);
 }
 
-static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
+static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+       struct ufs_pa_layer_attr *attr = &host->dev_req_params;
+       int ret;
 
        if (!ufs_qcom_cap_qunipro(host))
                return 0;
 
-       /* set unipro core clock cycles to 150 and clear clock divider */
-       return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
+       ret = ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
+                                 attr->hs_rate, false, true);
+       if (ret) {
+               dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__);
+               return ret;
+       }
+       /* set unipro core clock attributes and clear clock divider */
+       return ufs_qcom_set_core_clk_ctrl(hba, true);
+}
+
+static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
+{
+       return 0;
 }
 
 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
@@ -1371,15 +1494,14 @@ static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
        if (!ufs_qcom_cap_qunipro(host))
                return 0;
 
-       /* set unipro core clock cycles to 75 and clear clock divider */
-       return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
+       /* set unipro core clock attributes and clear clock divider */
+       return ufs_qcom_set_core_clk_ctrl(hba, false);
 }
 
 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
                bool scale_up, enum ufs_notify_change_status status)
 {
        struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-       struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
        int err = 0;
 
        /* check the host controller state before sending hibern8 cmd */
@@ -1409,11 +1531,6 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
                        return err;
                }
 
-               ufs_qcom_cfg_timers(hba,
-                                   dev_req_params->gear_rx,
-                                   dev_req_params->pwr_rx,
-                                   dev_req_params->hs_rate,
-                                   false);
                ufs_qcom_icc_update_bw(host);
                ufshcd_uic_hibern8_exit(hba);
        }
@@ -1910,14 +2027,13 @@ static int ufs_qcom_probe(struct platform_device *pdev)
  *
  * Always returns 0
  */
-static int ufs_qcom_remove(struct platform_device *pdev)
+static void ufs_qcom_remove(struct platform_device *pdev)
 {
        struct ufs_hba *hba =  platform_get_drvdata(pdev);
 
        pm_runtime_get_sync(&(pdev)->dev);
        ufshcd_remove(hba);
        platform_msi_domain_free_irqs(hba->dev);
-       return 0;
 }
 
 static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
@@ -1949,7 +2065,7 @@ static const struct dev_pm_ops ufs_qcom_pm_ops = {
 
 static struct platform_driver ufs_qcom_pltform = {
        .probe  = ufs_qcom_probe,
-       .remove = ufs_qcom_remove,
+       .remove_new = ufs_qcom_remove,
        .driver = {
                .name   = "ufshcd-qcom",
                .pm     = &ufs_qcom_pm_ops,
index d6f8e74bd5381c8b7473179fda57b79c7bab607e..9950a0089475fca3088c3d58551600cad3cfb538 100644 (file)
@@ -129,8 +129,21 @@ enum {
 #define PA_VS_CONFIG_REG1      0x9000
 #define DME_VS_CORE_CLK_CTRL   0xD002
 /* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
-#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT               BIT(8)
-#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK      0xFF
+#define CLK_1US_CYCLES_MASK_V4                         GENMASK(27, 16)
+#define CLK_1US_CYCLES_MASK                            GENMASK(7, 0)
+#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT       BIT(8)
+#define PA_VS_CORE_CLK_40NS_CYCLES                     0x9007
+#define PA_VS_CORE_CLK_40NS_CYCLES_MASK                        GENMASK(6, 0)
+
+
+/* QCOM UFS host controller core clk frequencies */
+#define UNIPRO_CORE_CLK_FREQ_37_5_MHZ          38
+#define UNIPRO_CORE_CLK_FREQ_75_MHZ            75
+#define UNIPRO_CORE_CLK_FREQ_100_MHZ           100
+#define UNIPRO_CORE_CLK_FREQ_150_MHZ           150
+#define UNIPRO_CORE_CLK_FREQ_300_MHZ           300
+#define UNIPRO_CORE_CLK_FREQ_201_5_MHZ         202
+#define UNIPRO_CORE_CLK_FREQ_403_MHZ           403
 
 static inline void
 ufs_qcom_get_controller_revision(struct ufs_hba *hba,
@@ -227,7 +240,7 @@ struct ufs_qcom_host {
 
        struct gpio_desc *device_reset;
 
-       u32 hs_gear;
+       u32 phy_gear;
 
        bool esi_enabled;
 };
@@ -244,6 +257,7 @@ ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
 #define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
 #define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
 #define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+#define ceil(freq, div) ((freq) % (div) == 0 ? ((freq)/(div)) : ((freq)/(div) + 1))
 
 int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
 
index cc94970b86c9308fbad8eabd59f54b54107fc76d..8711e5cbc9680a3479a5035f4755b249fa618454 100644 (file)
@@ -388,18 +388,16 @@ static int ufs_renesas_probe(struct platform_device *pdev)
        return ufshcd_pltfrm_init(pdev, &ufs_renesas_vops);
 }
 
-static int ufs_renesas_remove(struct platform_device *pdev)
+static void ufs_renesas_remove(struct platform_device *pdev)
 {
        struct ufs_hba *hba = platform_get_drvdata(pdev);
 
        ufshcd_remove(hba);
-
-       return 0;
 }
 
 static struct platform_driver ufs_renesas_platform = {
        .probe  = ufs_renesas_probe,
-       .remove = ufs_renesas_remove,
+       .remove_new = ufs_renesas_remove,
        .driver = {
                .name   = "ufshcd-renesas",
                .of_match_table = of_match_ptr(ufs_renesas_of_match),
index 2bad75dd6d5894fd6928472a592e94154c0189b7..d8b165908809d69a7ca6bdd82f07ffc87db9def4 100644 (file)
@@ -425,13 +425,12 @@ static int ufs_sprd_probe(struct platform_device *pdev)
        return err;
 }
 
-static int ufs_sprd_remove(struct platform_device *pdev)
+static void ufs_sprd_remove(struct platform_device *pdev)
 {
        struct ufs_hba *hba =  platform_get_drvdata(pdev);
 
        pm_runtime_get_sync(&(pdev)->dev);
        ufshcd_remove(hba);
-       return 0;
 }
 
 static const struct dev_pm_ops ufs_sprd_pm_ops = {
@@ -443,7 +442,7 @@ static const struct dev_pm_ops ufs_sprd_pm_ops = {
 
 static struct platform_driver ufs_sprd_pltform = {
        .probe = ufs_sprd_probe,
-       .remove = ufs_sprd_remove,
+       .remove_new = ufs_sprd_remove,
        .driver = {
                .name = "ufshcd-sprd",
                .pm = &ufs_sprd_pm_ops,
index 248a49e5e7f3570fd2e4d15add6805b062607efc..0aca666d219947b749fe92199847111535eb9459 100644 (file)
@@ -58,11 +58,12 @@ static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
        int err = 0;
        size_t len;
 
-       obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
+       obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL,
+                                     ACPI_TYPE_BUFFER);
        if (!obj)
                return -EOPNOTSUPP;
 
-       if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
+       if (obj->buffer.length < 1) {
                err = -EINVAL;
                goto out;
        }
index 797a4dfe45d908552835f0705449fc7e20466fe1..61cf8b957da4df5fd3f3f4622e06df021159a29b 100644 (file)
@@ -121,7 +121,7 @@ static bool phandle_exists(const struct device_node *np,
 
 #define MAX_PROP_SIZE 32
 int ufshcd_populate_vreg(struct device *dev, const char *name,
-                        struct ufs_vreg **out_vreg)
+                        struct ufs_vreg **out_vreg, bool skip_current)
 {
        char prop_name[MAX_PROP_SIZE];
        struct ufs_vreg *vreg = NULL;
@@ -147,6 +147,11 @@ int ufshcd_populate_vreg(struct device *dev, const char *name,
        if (!vreg->name)
                return -ENOMEM;
 
+       if (skip_current) {
+               vreg->max_uA = 0;
+               goto out;
+       }
+
        snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
        if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
                dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
@@ -175,19 +180,19 @@ static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
        struct device *dev = hba->dev;
        struct ufs_vreg_info *info = &hba->vreg_info;
 
-       err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
+       err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba, true);
        if (err)
                goto out;
 
-       err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
+       err = ufshcd_populate_vreg(dev, "vcc", &info->vcc, false);
        if (err)
                goto out;
 
-       err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
+       err = ufshcd_populate_vreg(dev, "vccq", &info->vccq, false);
        if (err)
                goto out;
 
-       err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
+       err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2, false);
 out:
        return err;
 }
index 2df108f4ac131904b8230a8250f5996a1c041cdc..a86a3ada4befd1aa85894ffddf801d333ddc8bcf 100644 (file)
@@ -32,6 +32,6 @@ void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param);
 int ufshcd_pltfrm_init(struct platform_device *pdev,
                       const struct ufs_hba_variant_ops *vops);
 int ufshcd_populate_vreg(struct device *dev, const char *name,
-                        struct ufs_vreg **out_vreg);
+                        struct ufs_vreg **out_vreg, bool skip_current);
 
 #endif /* UFSHCD_PLTFRM_H_ */
index 79ed2e6e576aab74b4eadb18201fe483f3e5478d..ff33f31bcdf64591211f79028d6828e50097e8fa 100644 (file)
@@ -1687,6 +1687,9 @@ static const struct target_core_fabric_ops usbg_ops = {
 
        .tfc_wwn_attrs                  = usbg_wwn_attrs,
        .tfc_tpg_base_attrs             = usbg_base_attrs,
+
+       .default_submit_type            = TARGET_DIRECT_SUBMIT,
+       .direct_submit_supp             = 1,
 };
 
 /* Start gadget.c code */
index abef0619c7901af0f36232a558e46a1b1dbf16ae..4e3b2c25c7213e16083c2b43cd5e947686d292ea 100644 (file)
@@ -909,7 +909,7 @@ static void vhost_scsi_target_queue_cmd(struct vhost_scsi_cmd *cmd)
                               cmd->tvc_prot_sgl_count, GFP_KERNEL))
                return;
 
-       target_queue_submission(se_cmd);
+       target_submit(se_cmd);
 }
 
 static void
@@ -2598,6 +2598,9 @@ static const struct target_core_fabric_ops vhost_scsi_ops = {
        .tfc_wwn_attrs                  = vhost_scsi_wwn_attrs,
        .tfc_tpg_base_attrs             = vhost_scsi_tpg_attrs,
        .tfc_tpg_attrib_attrs           = vhost_scsi_tpg_attrib_attrs,
+
+       .default_submit_type            = TARGET_QUEUE_SUBMIT,
+       .direct_submit_supp             = 1,
 };
 
 static int __init vhost_scsi_init(void)
index 8b77e4c06e43f8c28b1605f752c7bf208a31950b..0c51edfd13dcdfbb12c988c63c78cd85ea0b086d 100644 (file)
@@ -1832,6 +1832,9 @@ static const struct target_core_fabric_ops scsiback_ops = {
        .tfc_wwn_attrs                  = scsiback_wwn_attrs,
        .tfc_tpg_base_attrs             = scsiback_tpg_attrs,
        .tfc_tpg_param_attrs            = scsiback_param_attrs,
+
+       .default_submit_type            = TARGET_DIRECT_SUBMIT,
+       .direct_submit_supp             = 1,
 };
 
 static const struct xenbus_device_id scsiback_ids[] = {
index f6ef8cf5d7741f924f92600ad7dbb9bcf5fb7227..4109f1bd61285b3ada18af6ec26f34380162cff4 100644 (file)
 struct nvmefc_ls_req {
        void                    *rqstaddr;
        dma_addr_t              rqstdma;
-       __le32                  rqstlen;
+       u32                     rqstlen;
        void                    *rspaddr;
        dma_addr_t              rspdma;
-       __le32                  rsplen;
+       u32                     rsplen;
        u32                     timeout;
 
        void                    *private;
@@ -120,7 +120,7 @@ struct nvmefc_ls_req {
 struct nvmefc_ls_rsp {
        void            *rspbuf;
        dma_addr_t      rspdma;
-       __le32          rsplen;
+       u16             rsplen;
 
        void (*done)(struct nvmefc_ls_rsp *rsp);
        void            *nvme_fc_private;       /* LLDD is not to access !! */
index 8a43534eea5cb1654b2141acd83800fd2fe5d89d..f5257103fdb6d8e4bfa007f7acebf84b4f58d353 100644 (file)
@@ -404,8 +404,6 @@ cmd_to_domain_dev(struct scsi_cmnd *cmd)
        return sdev_to_domain_dev(cmd->device);
 }
 
-void sas_hash_addr(u8 *hashed, const u8 *sas_addr);
-
 /* Before calling a notify event, LLDD should use this function
  * when the link is severed (possibly from its tasklet).
  * The idea is that the Class only reads those, while the LLDD,
@@ -681,7 +679,6 @@ extern void sas_resume_ha(struct sas_ha_struct *sas_ha);
 extern void sas_resume_ha_no_sync(struct sas_ha_struct *sas_ha);
 extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
 
-int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates);
 int sas_phy_reset(struct sas_phy *phy, int hard_reset);
 int sas_phy_enable(struct sas_phy *phy, int enable);
 extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
@@ -699,20 +696,6 @@ extern struct scsi_transport_template *
 sas_domain_attach_transport(struct sas_domain_function_template *);
 extern struct device_attribute dev_attr_phy_event_threshold;
 
-int  sas_discover_root_expander(struct domain_device *);
-
-int  sas_ex_revalidate_domain(struct domain_device *);
-
-void sas_unregister_domain_devices(struct asd_sas_port *port, int gone);
-void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *);
-void sas_discover_event(struct asd_sas_port *, enum discover_event ev);
-
-int  sas_discover_end_dev(struct domain_device *);
-
-void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *);
-
-void sas_init_dev(struct domain_device *);
-
 void sas_task_abort(struct sas_task *);
 int sas_eh_abort_handler(struct scsi_cmnd *cmd);
 int sas_eh_device_reset_handler(struct scsi_cmnd *cmd);
index 159567359bbb0fea21d1ee0576fbeaca13331647..97099a5e3f6c800f94f011b93ef7bf97c28a905b 100644 (file)
 #define SE_MODE_PAGE_BUF                       512
 #define SE_SENSE_BUF                           96
 
+enum target_submit_type {
+       /* Use the fabric driver's default submission type */
+       TARGET_FABRIC_DEFAULT_SUBMIT,
+       /* Submit from the calling context */
+       TARGET_DIRECT_SUBMIT,
+       /* Defer submission to the LIO workqueue */
+       TARGET_QUEUE_SUBMIT,
+};
+
 /* struct se_hba->hba_flags */
 enum hba_flags_table {
        HBA_FLAGS_INTERNAL_USE  = 0x01,
@@ -717,6 +726,7 @@ struct se_dev_attrib {
        u32             unmap_granularity;
        u32             unmap_granularity_alignment;
        u32             max_write_same_len;
+       u8              submit_type;
        struct se_device *da_dev;
        struct config_group da_group;
 };
index b188b1e90e1edb945f0d6cee5e3e3731a16de67e..3378ff9ee271c942536f341678d5045b3c782116 100644 (file)
@@ -113,11 +113,20 @@ struct target_core_fabric_ops {
        struct configfs_attribute **tfc_tpg_nacl_param_attrs;
 
        /*
-        * Set this member variable to true if the SCSI transport protocol
+        * Set this member variable if the SCSI transport protocol
         * (e.g. iSCSI) requires that the Data-Out buffer is transferred in
         * its entirety before a command is aborted.
         */
-       bool write_pending_must_be_called;
+       unsigned int write_pending_must_be_called:1;
+       /*
+        * Set this if the driver supports submitting commands to the backend
+        * from target_submit/target_submit_cmd.
+        */
+       unsigned int direct_submit_supp:1;
+       /*
+        * Set this to a target_submit_type value.
+        */
+       u8 default_submit_type;
 };
 
 int target_register_template(const struct target_core_fabric_ops *fo);
@@ -166,20 +175,18 @@ int       target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
                struct scatterlist *sgl, u32 sgl_count,
                struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
                struct scatterlist *sgl_prot, u32 sgl_prot_count, gfp_t gfp);
-void   target_submit(struct se_cmd *se_cmd);
+int    target_submit(struct se_cmd *se_cmd);
 sense_reason_t transport_lookup_cmd_lun(struct se_cmd *);
 sense_reason_t target_cmd_init_cdb(struct se_cmd *se_cmd, unsigned char *cdb,
                                   gfp_t gfp);
 sense_reason_t target_cmd_parse_cdb(struct se_cmd *);
 void   target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
                unsigned char *, u64, u32, int, int, int);
-void   target_queue_submission(struct se_cmd *se_cmd);
 
 int    target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
                unsigned char *sense, u64 unpacked_lun,
                void *fabric_tmr_ptr, unsigned char tm_type,
                gfp_t, u64, int);
-int    transport_handle_cdb_direct(struct se_cmd *);
 sense_reason_t transport_generic_new_cmd(struct se_cmd *);
 
 void   target_put_cmd_and_wait(struct se_cmd *cmd);
@@ -197,8 +204,6 @@ void        target_stop_session(struct se_session *se_sess);
 void   target_wait_for_sess_cmds(struct se_session *);
 void   target_show_cmd(const char *pfx, struct se_cmd *cmd);
 
-int    core_alua_check_nonop_delay(struct se_cmd *);
-
 int    core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
 void   core_tmr_release_req(struct se_tmr_req *);
 int    transport_generic_handle_tmr(struct se_cmd *);
index 992517ac3292bd4f9380b6a63972df1e8d411518..b930669bd1f0518b541e982746ee665aac392182 100644 (file)
@@ -267,15 +267,15 @@ DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
             TP_ARGS(dev_name, err, usecs, dev_state, link_state));
 
 TRACE_EVENT(ufshcd_command,
-       TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t,
+       TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t,
                 unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len,
                 u32 intr, u64 lba, u8 opcode, u8 group_id),
 
-       TP_ARGS(dev_name, str_t, tag, doorbell, hwq_id, transfer_len,
-                       intr, lba, opcode, group_id),
+       TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
+               opcode, group_id),
 
        TP_STRUCT__entry(
-               __string(dev_name, dev_name)
+               __field(struct scsi_device *, sdev)
                __field(enum ufs_trace_str_t, str_t)
                __field(unsigned int, tag)
                __field(u32, doorbell)
@@ -288,7 +288,7 @@ TRACE_EVENT(ufshcd_command,
        ),
 
        TP_fast_assign(
-               __assign_str(dev_name, dev_name);
+               __entry->sdev = sdev;
                __entry->str_t = str_t;
                __entry->tag = tag;
                __entry->doorbell = doorbell;
@@ -302,8 +302,9 @@ TRACE_EVENT(ufshcd_command,
 
        TP_printk(
                "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x, hwq_id: %d",
-               show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
-               __entry->tag, __entry->doorbell, __entry->transfer_len, __entry->intr,
+               show_ufs_cmd_trace_str(__entry->str_t),
+               dev_name(&__entry->sdev->sdev_dev), __entry->tag,
+               __entry->doorbell, __entry->transfer_len, __entry->intr,
                __entry->lba, (u32)__entry->opcode, str_opcode(__entry->opcode),
                (u32)__entry->group_id, __entry->hwq_id
        )
index 0cced88f4531e87384ab676d1c01149ccb4ef36b..e77ab17868569be73e7bceb44fc5defbb58aea1c 100644 (file)
@@ -98,9 +98,10 @@ enum upiu_response_transaction {
        UPIU_TRANSACTION_REJECT_UPIU    = 0x3F,
 };
 
-/* UPIU Read/Write flags */
+/* UPIU Read/Write flags. See also table "UPIU Flags" in the UFS standard. */
 enum {
        UPIU_CMD_FLAGS_NONE     = 0x00,
+       UPIU_CMD_FLAGS_CP       = 0x04,
        UPIU_CMD_FLAGS_WRITE    = 0x20,
        UPIU_CMD_FLAGS_READ     = 0x40,
 };
index 7d07b256e906b9ceab7f79c09a458a6f014521f6..e0d6590d163d7759fab8e76716cb2b3b72bba4f7 100644 (file)
@@ -28,6 +28,7 @@
 
 #define UFSHCD "ufshcd"
 
+struct scsi_device;
 struct ufs_hba;
 
 enum dev_cmd_type {
@@ -371,6 +372,7 @@ struct ufs_hba_variant_ops {
        int     (*get_outstanding_cqs)(struct ufs_hba *hba,
                                       unsigned long *ocqs);
        int     (*config_esi)(struct ufs_hba *hba);
+       void    (*config_scsi_dev)(struct scsi_device *sdev);
 };
 
 /* clock gating state  */
@@ -596,11 +598,6 @@ enum ufshcd_quirks {
         */
        UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
 
-       /*
-        * Align DMA SG entries on a 4 KiB boundary.
-        */
-       UFSHCD_QUIRK_4KB_DMA_ALIGNMENT                  = 1 << 14,
-
        /*
         * This quirk needs to be enabled if the host controller does not
         * support UIC command