Merge tag 'block-ioctl-cleanup-5.6' into 5.6/scsi-queue
authorMartin K. Petersen <martin.petersen@oracle.com>
Fri, 10 Jan 2020 05:14:46 +0000 (00:14 -0500)
committerMartin K. Petersen <martin.petersen@oracle.com>
Fri, 10 Jan 2020 05:14:46 +0000 (00:14 -0500)
Pull compat_ioctl cleanup from Arnd. Here's his description:

This series concludes the work I did for linux-5.5 on the compat_ioctl()
cleanup, killing off fs/compat_ioctl.c and block/compat_ioctl.c by moving
everything into drivers.

Overall this would be a reduction both in complexity and line count, but
as I'm also adding documentation the overall number of lines increases
in the end.

My plan was originally to keep the SCSI and block parts separate.
This did not work easily because of interdependencies: I cannot
do the final SCSI cleanup in a good way without first addressing the
CDROM ioctls, so this is one series that I hope could be merged through
either the block or the scsi git trees, or possibly both if you can
pull in the same branch.

The series comes in these steps:

1. clean up the sg v3 interface as suggested by Linus. I have
   talked about this with Doug Gilbert as well, and he would
   rebase his sg v4 patches on top of "compat: scsi: sg: fix v3
   compat read/write interface"

2. Actually moving handlers out of block/compat_ioctl.c and
   block/scsi_ioctl.c into drivers, mixed in with cleanup
   patches

3. Document how to do this right. I keep getting asked about this,
   and it helps to point to some documentation file.

The branch is based on another one that fixes a couple of bugs found
during the creation of this series.

Changes since v3:
  https://lore.kernel.org/lkml/20200102145552.1853992-1-arnd@arndb.de/

- Move sr_compat_ioctl fixup to correct patch (Ben Hutchings)
- Add Reviewed-by tags

Changes since v2:
  https://lore.kernel.org/lkml/20191217221708.3730997-1-arnd@arndb.de/

- Rebase to v5.5-rc4, which contains the earlier bugfixes
- Fix sr_block_compat_ioctl() error handling bug found by
  Ben Hutchings
- Fix idecd_locked_compat_ioctl() compat_ptr() bug
- Don't try to handle HDIO_DRIVE_TASKFILE in drivers/ide
- More documentation improvements

Changes since v1:
  https://lore.kernel.org/lkml/20191211204306.1207817-1-arnd@arndb.de/

- move out the bugfixes into a branch for itself
- clean up scsi sg driver further as suggested by Christoph Hellwig
- avoid some ifdefs by moving compat_ptr() out of asm/compat.h
- split out the blkdev_compat_ptr_ioctl function; bug spotted by
  Ben Hutchings
- Improve formatting of documentation

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
75 files changed:
drivers/scsi/aic7xxx/aic7xxx_core.c
drivers/scsi/csiostor/csio_scsi.c
drivers/scsi/esp_scsi.c
drivers/scsi/esp_scsi.h
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/initio.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/libsas/sas_internal.h
drivers/scsi/libsas/sas_port.c
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/libsas/sas_task.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/mpt3sas/mpi/mpi2.h
drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
drivers/scsi/mpt3sas/mpi/mpi2_image.h
drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_config.c
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mpt3sas/mpt3sas_transport.c
drivers/scsi/myrb.h
drivers/scsi/myrs.h
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_fw.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_mr.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_logging.h
drivers/scsi/sd_zbc.c
drivers/scsi/sym53c8xx_2/sym_nvram.c
drivers/scsi/ufs/cdns-pltfrm.c
drivers/scsi/ufs/ufs-mediatek.c
drivers/scsi/ufs/ufs-mediatek.h
drivers/scsi/ufs/ufs-sysfs.c
drivers/scsi/ufs/ufs-sysfs.h
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/unipro.h
drivers/scsi/vmw_pvscsi.c
drivers/target/tcm_fc/tfc_io.c
include/linux/soc/mediatek/mtk_sip_svc.h [new file with mode: 0644]
include/scsi/scsi_device.h
include/uapi/scsi/scsi_bsg_ufs.h

index a9d40d3b90efcbfc9eebf9910cff6bc2299deee0..4190a025381a50445f18fee532ae28addd76cada 100644 (file)
@@ -2314,7 +2314,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
                         * At some speeds, we only support
                         * ST transfers.
                         */
-                       if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
+                       if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
                                *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
                        break;
                }
index 469d0bc9f5fe4db6e756e4270bf522e63cd15566..00cf3357313650a3576882b1f970f7425c4fdd88 100644 (file)
@@ -1383,7 +1383,7 @@ csio_device_reset(struct device *dev,
                return -EINVAL;
 
        /* Delete NPIV lnodes */
-        csio_lnodes_exit(hw, 1);
+       csio_lnodes_exit(hw, 1);
 
        /* Block upper IOs */
        csio_lnodes_block_request(hw);
index bb88995a12c738ce4dbed487f9761a57ce756bde..89afa31e33cba889a9af63dd5253cf8a72f7ff0d 100644 (file)
@@ -243,8 +243,6 @@ static void esp_set_all_config3(struct esp *esp, u8 val)
 /* Reset the ESP chip, _not_ the SCSI bus. */
 static void esp_reset_esp(struct esp *esp)
 {
-       u8 family_code, version;
-
        /* Now reset the ESP chip */
        scsi_esp_cmd(esp, ESP_CMD_RC);
        scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
@@ -257,14 +255,19 @@ static void esp_reset_esp(struct esp *esp)
         */
        esp->max_period = ((35 * esp->ccycle) / 1000);
        if (esp->rev == FAST) {
-               version = esp_read8(ESP_UID);
-               family_code = (version & 0xf8) >> 3;
-               if (family_code == 0x02)
+               u8 family_code = ESP_FAMILY(esp_read8(ESP_UID));
+
+               if (family_code == ESP_UID_F236) {
                        esp->rev = FAS236;
-               else if (family_code == 0x0a)
+               } else if (family_code == ESP_UID_HME) {
                        esp->rev = FASHME; /* Version is usually '5'. */
-               else
+               } else if (family_code == ESP_UID_FSC) {
+                       esp->rev = FSC;
+                       /* Enable Active Negation */
+                       esp_write8(ESP_CONFIG4_RADE, ESP_CFG4);
+               } else {
                        esp->rev = FAS100A;
+               }
                esp->min_period = ((4 * esp->ccycle) / 1000);
        } else {
                esp->min_period = ((5 * esp->ccycle) / 1000);
@@ -308,7 +311,7 @@ static void esp_reset_esp(struct esp *esp)
 
        case FAS236:
        case PCSCSI:
-               /* Fast 236, AM53c974 or HME */
+       case FSC:
                esp_write8(esp->config2, ESP_CFG2);
                if (esp->rev == FASHME) {
                        u8 cfg3 = esp->target[0].esp_config3;
@@ -2373,10 +2376,11 @@ static const char *esp_chip_names[] = {
        "ESP100A",
        "ESP236",
        "FAS236",
+       "AM53C974",
+       "53CF9x-2",
        "FAS100A",
        "FAST",
        "FASHME",
-       "AM53C974",
 };
 
 static struct scsi_transport_template *esp_transport_template;
index 91b32f2a1a1b62148c8edfeb3865ebcecdadd92b..446a3d18c02246c4a14a726f6e31dc7805ca8a76 100644 (file)
 #define ESP_CONFIG3_IMS       0x80     /* ID msg chk'ng        (esp/fas236)  */
 #define ESP_CONFIG3_OBPUSH    0x80     /* Push odd-byte to dma (hme)         */
 
-/* ESP config register 4 read-write, found only on am53c974 chips */
-#define ESP_CONFIG4_RADE      0x04     /* Active negation */
-#define ESP_CONFIG4_RAE       0x08     /* Active negation on REQ and ACK */
-#define ESP_CONFIG4_PWD       0x20     /* Reduced power feature */
-#define ESP_CONFIG4_GE0       0x40     /* Glitch eater bit 0 */
-#define ESP_CONFIG4_GE1       0x80     /* Glitch eater bit 1 */
+/* ESP config register 4 read-write */
+#define ESP_CONFIG4_BBTE      0x01     /* Back-to-back transfers     (fsc)   */
+#define ESP_CONGIG4_TEST      0x02     /* Transfer counter test mode (fsc)   */
+#define ESP_CONFIG4_RADE      0x04     /* Active negation   (am53c974/fsc)   */
+#define ESP_CONFIG4_RAE       0x08     /* Act. negation REQ/ACK (am53c974)   */
+#define ESP_CONFIG4_PWD       0x20     /* Reduced power feature (am53c974)   */
+#define ESP_CONFIG4_GE0       0x40     /* Glitch eater bit 0    (am53c974)   */
+#define ESP_CONFIG4_GE1       0x80     /* Glitch eater bit 1    (am53c974)   */
 
 #define ESP_CONFIG_GE_12NS    (0)
 #define ESP_CONFIG_GE_25NS    (ESP_CONFIG_GE1)
 #define ESP_TEST_TS           0x04     /* Tristate test mode */
 
 /* ESP unique ID register read-only, found on fas236+fas100a only */
+#define ESP_UID_FAM           0xf8     /* ESP family bitmask */
+
+#define ESP_FAMILY(uid) (((uid) & ESP_UID_FAM) >> 3)
+
+/* Values for the ESP family bits */
 #define ESP_UID_F100A         0x00     /* ESP FAS100A  */
 #define ESP_UID_F236          0x02     /* ESP FAS236   */
-#define ESP_UID_REV           0x07     /* ESP revision */
-#define ESP_UID_FAM           0xf8     /* ESP family   */
+#define ESP_UID_HME           0x0a     /* FAS HME      */
+#define ESP_UID_FSC           0x14     /* NCR/Symbios Logic 53CF9x-2 */
 
 /* ESP fifo flags register read-only */
 /* Note that the following implies a 16 byte FIFO on the ESP. */
@@ -257,15 +264,17 @@ struct esp_cmd_priv {
 };
 #define ESP_CMD_PRIV(CMD)      ((struct esp_cmd_priv *)(&(CMD)->SCp))
 
+/* NOTE: this enum is ordered based on chip features! */
 enum esp_rev {
-       ESP100     = 0x00,  /* NCR53C90 - very broken */
-       ESP100A    = 0x01,  /* NCR53C90A */
-       ESP236     = 0x02,
-       FAS236     = 0x03,
-       FAS100A    = 0x04,
-       FAST       = 0x05,
-       FASHME     = 0x06,
-       PCSCSI     = 0x07,  /* AM53c974 */
+       ESP100,  /* NCR53C90 - very broken */
+       ESP100A, /* NCR53C90A */
+       ESP236,
+       FAS236,
+       PCSCSI,  /* AM53c974 */
+       FSC,     /* NCR/Symbios Logic 53CF9x-2 */
+       FAS100A,
+       FAST,
+       FASHME,
 };
 
 struct esp_cmd_entry {
index 54b8c6f9daf4bd650c804ace15025824ade5fb4f..d9e94e81da01787f28b4bbd6cd0d840f1aa833be 100644 (file)
@@ -1877,7 +1877,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
         */
        struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
        struct ibmvscsis_cmd *cmd, *nxt;
-       struct iu_entry *iue;
        long rc = ADAPT_SUCCESS;
        bool retry = false;
 
@@ -1931,8 +1930,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
                                         */
                                        vscsi->credit += 1;
                                } else {
-                                       iue = cmd->iue;
-
                                        crq->valid = VALID_CMD_RESP_EL;
                                        crq->format = cmd->rsp.format;
 
@@ -3796,7 +3793,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
                                                 se_cmd);
        struct iu_entry *iue = cmd->iue;
        struct scsi_info *vscsi = cmd->adapter;
-       char *sd;
        uint len = 0;
        int rc;
 
@@ -3804,7 +3800,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
                               1);
        if (rc) {
                dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
-               sd = se_cmd->sense_buffer;
                se_cmd->scsi_sense_length = 18;
                memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
                /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
index 41fd64c9c8e96cc852c65c08453a5ac63532412e..1d39628ac947f5c21ea611b650ed8ab4a8de47b0 100644 (file)
@@ -1640,7 +1640,7 @@ static int initio_state_6(struct initio_host * host)
  *
  */
 
-int initio_state_7(struct initio_host * host)
+static int initio_state_7(struct initio_host * host)
 {
        int cnt, i;
 
index e9e00740f7ca6892a07359f172ab62b48f100dc6..c5a828a041e07ea88747f574fd10aef4e3bd4cd8 100644 (file)
@@ -137,7 +137,7 @@ static void sas_ata_task_done(struct sas_task *task)
        } else {
                ac = sas_to_ata_err(stat);
                if (ac) {
-                       pr_warn("%s: SAS error %x\n", __func__, stat->stat);
+                       pr_warn("%s: SAS error 0x%x\n", __func__, stat->stat);
                        /* We saw a SAS error. Send a vague error. */
                        if (!link->sactive) {
                                qc->err_mask = ac;
index d7302c2052f916f74c909a3f35941e5e79b3c68e..daf951b0b3f5538fa05f7c7b5e2e99626e18d5cd 100644 (file)
@@ -179,7 +179,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
 
        res = i->dft->lldd_dev_found(dev);
        if (res) {
-               pr_warn("driver on host %s cannot handle device %llx, error:%d\n",
+               pr_warn("driver on host %s cannot handle device %016llx, error:%d\n",
                        dev_name(sas_ha->dev),
                        SAS_ADDR(dev->sas_addr), res);
        }
index 9fdb9c9fbda48295c0abd6be1c2a8f476b4869cd..ab671cdd4cfb090c3797f6d18b8b2d4278e2c11d 100644 (file)
@@ -500,7 +500,7 @@ static int sas_ex_general(struct domain_device *dev)
                ex_assign_report_general(dev, rg_resp);
 
                if (dev->ex_dev.configuring) {
-                       pr_debug("RG: ex %llx self-configuring...\n",
+                       pr_debug("RG: ex %016llx self-configuring...\n",
                                 SAS_ADDR(dev->sas_addr));
                        schedule_timeout_interruptible(5*HZ);
                } else
@@ -881,7 +881,7 @@ static struct domain_device *sas_ex_discover_end_dev(
 
                res = sas_discover_end_dev(child);
                if (res) {
-                       pr_notice("sas_discover_end_dev() for device %16llx at %016llx:%02d returned 0x%x\n",
+                       pr_notice("sas_discover_end_dev() for device %016llx at %016llx:%02d returned 0x%x\n",
                                  SAS_ADDR(child->sas_addr),
                                  SAS_ADDR(parent->sas_addr), phy_id, res);
                        goto out_list_del;
index 01f1738ce6dfff2f4dd8f356fb3aa2f9dff90aeb..1f1d01901978c6a45ef3563c3e523e362a7e57d3 100644 (file)
@@ -107,7 +107,7 @@ static inline void sas_smp_host_handler(struct bsg_job *job,
 
 static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err)
 {
-       pr_warn("%s: for %s device %16llx returned %d\n",
+       pr_warn("%s: for %s device %016llx returned %d\n",
                func, dev->parent ? "exp-attached" :
                "direct-attached",
                SAS_ADDR(dev->sas_addr), err);
index 7c86fd248129a1b6d213ab5f7c0f056d639c08c3..19cf418928faae07fb58a6e911c9297c153a79fe 100644 (file)
@@ -165,7 +165,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
        }
        sas_port_add_phy(port->port, phy->phy);
 
-       pr_debug("%s added to %s, phy_mask:0x%x (%16llx)\n",
+       pr_debug("%s added to %s, phy_mask:0x%x (%016llx)\n",
                 dev_name(&phy->phy->dev), dev_name(&port->port->dev),
                 port->phy_mask,
                 SAS_ADDR(port->attached_sas_addr));
index bec83eb8ab87474a8fd08bc200d9ff5a2ade85dc..9e0975e55c27e247c04eb118d799c1f0d752cfae 100644 (file)
@@ -330,7 +330,7 @@ static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
 
        int_to_scsilun(cmd->device->lun, &lun);
 
-       pr_notice("eh: device %llx LUN %llx has the task\n",
+       pr_notice("eh: device %016llx LUN 0x%llx has the task\n",
                  SAS_ADDR(dev->sas_addr),
                  cmd->device->lun);
 
@@ -615,7 +615,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
  reset:
                        tmf_resp = sas_recover_lu(task->dev, cmd);
                        if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
-                               pr_notice("dev %016llx LU %llx is recovered\n",
+                               pr_notice("dev %016llx LU 0x%llx is recovered\n",
                                          SAS_ADDR(task->dev),
                                          cmd->device->lun);
                                sas_eh_finish_cmd(cmd);
@@ -666,7 +666,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
                         * of effort could recover from errors.  Quite
                         * possibly the HA just disappeared.
                         */
-                       pr_err("error from  device %llx, LUN %llx couldn't be recovered in any way\n",
+                       pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n",
                               SAS_ADDR(task->dev->sas_addr),
                               cmd->device->lun);
 
@@ -851,7 +851,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
        if (scsi_dev->tagged_supported) {
                scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
        } else {
-               pr_notice("device %llx, LUN %llx doesn't support TCQ\n",
+               pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n",
                          SAS_ADDR(dev->sas_addr), scsi_dev->lun);
                scsi_change_queue_depth(scsi_dev, 1);
        }
index 1ded7d85027ec59c300ba2e25de73ee5692e48dd..e2d42593ce529faac0f586be88a24ed96fbef43e 100644 (file)
@@ -27,7 +27,7 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
                memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size);
 
                if (iu->status != SAM_STAT_CHECK_CONDITION)
-                       dev_warn(dev, "dev %llx sent sense data, but stat(%x) is not CHECK CONDITION\n",
+                       dev_warn(dev, "dev %016llx sent sense data, but stat(0x%x) is not CHECK CONDITION\n",
                                 SAS_ADDR(task->dev->sas_addr), iu->status);
        }
        else
index 935f988041989bb1ef667b8c7d343bd49d785ee4..04d73e2be373e3376f1e0427eb1671ea47d6c150 100644 (file)
@@ -1223,6 +1223,8 @@ struct lpfc_hba {
 #define LPFC_POLL_HB   1               /* slowpath heartbeat */
 #define LPFC_POLL_FASTPATH     0       /* called from fastpath */
 #define LPFC_POLL_SLOWPATH     1       /* called from slowpath */
+
+       char os_host_name[MAXHOSTNAMELEN];
 };
 
 static inline struct Scsi_Host *
index 4ff82b36a37a25731f3ed3229943cb3061ebe07c..46f56f30f77ef3d14b2cfb466cf1a5bc5fda73a0 100644 (file)
@@ -4123,14 +4123,13 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
                /*
                 * The 'topology' is not a configurable parameter if :
                 *   - persistent topology enabled
-                *   - G7 adapters
-                *   - G6 with no private loop support
+                *   - G7/G6 with no private loop support
                 */
 
-               if (((phba->hba_flag & HBA_PERSISTENT_TOPO) ||
+               if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
                     (!phba->sli4_hba.pc_sli4_params.pls &&
-                    phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC) ||
-                    phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
+                    (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
+                    phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
                    val == 4) {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
                                "3114 Loop mode not supported\n");
index ee353c84a0972f62eef2d8ef2754bdef743a0693..25d3dd39bc0540abacdfa99dfbd5fa8a383b668e 100644 (file)
@@ -180,7 +180,7 @@ int lpfc_issue_gidft(struct lpfc_vport *vport);
 int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
 int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
 int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t);
-void lpfc_fdmi_num_disc_check(struct lpfc_vport *);
+void lpfc_fdmi_change_check(struct lpfc_vport *vport);
 void lpfc_delayed_disc_tmo(struct timer_list *);
 void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
 
index 99c9bb249758c3e69d1a008100097a00b9e64471..58b35a1442c1610f74187ed44e3a575a63e65766 100644 (file)
@@ -1493,33 +1493,35 @@ int
 lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
        size_t size)
 {
-       char fwrev[FW_REV_STR_SIZE];
-       int n;
+       char fwrev[FW_REV_STR_SIZE] = {0};
+       char tmp[MAXHOSTNAMELEN] = {0};
 
-       lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+       memset(symbol, 0, size);
 
-       n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
-       if (size < n)
-               return n;
+       scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName);
+       if (strlcat(symbol, tmp, size) >= size)
+               goto buffer_done;
 
-       n += scnprintf(symbol + n, size - n, " FV%s", fwrev);
-       if (size < n)
-               return n;
+       lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
+       scnprintf(tmp, sizeof(tmp), " FV%s", fwrev);
+       if (strlcat(symbol, tmp, size) >= size)
+               goto buffer_done;
 
-       n += scnprintf(symbol + n, size - n, " DV%s.",
-                     lpfc_release_version);
-       if (size < n)
-               return n;
+       scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version);
+       if (strlcat(symbol, tmp, size) >= size)
+               goto buffer_done;
 
-       n += scnprintf(symbol + n, size - n, " HN:%s.",
-                     init_utsname()->nodename);
-       if (size < n)
-               return n;
+       scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name);
+       if (strlcat(symbol, tmp, size) >= size)
+               goto buffer_done;
 
        /* Note :- OS name is "Linux" */
-       n += scnprintf(symbol + n, size - n, " OS:%s",
-                     init_utsname()->sysname);
-       return n;
+       scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname);
+       strlcat(symbol, tmp, size);
+
+buffer_done:
+       return strnlen(symbol, size);
+
 }
 
 static uint32_t
@@ -1998,14 +2000,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 
 
 /**
- * lpfc_fdmi_num_disc_check - Check how many mapped NPorts we are connected to
+ * lpfc_fdmi_change_check - Check for changed FDMI parameters
  * @vport: pointer to a host virtual N_Port data structure.
  *
- * Called from hbeat timeout routine to check if the number of discovered
- * ports has changed. If so, re-register thar port Attribute.
+ * Check how many mapped NPorts we are connected to
+ * Check if our hostname changed
+ * Called from hbeat timeout routine to check if any FDMI parameters
+ * changed. If so, re-register those Attributes.
  */
 void
-lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
+lpfc_fdmi_change_check(struct lpfc_vport *vport)
 {
        struct lpfc_hba *phba = vport->phba;
        struct lpfc_nodelist *ndlp;
@@ -2018,17 +2022,41 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
        if (!(vport->fc_flag & FC_FABRIC))
                return;
 
+       ndlp = lpfc_findnode_did(vport, FDMI_DID);
+       if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+               return;
+
+       /* Check if system hostname changed */
+       if (strcmp(phba->os_host_name, init_utsname()->nodename)) {
+               memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
+               scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
+                         init_utsname()->nodename);
+               lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
+
+               /* Since this effects multiple HBA and PORT attributes, we need
+                * de-register and go thru the whole FDMI registration cycle.
+                * DHBA -> DPRT -> RHBA -> RPA  (physical port)
+                * DPRT -> RPRT (vports)
+                */
+               if (vport->port_type == LPFC_PHYSICAL_PORT)
+                       lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
+               else
+                       lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
+
+               /* Since this code path registers all the port attributes
+                * we can just return without further checking.
+                */
+               return;
+       }
+
        if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc))
                return;
 
+       /* Check if the number of mapped NPorts changed */
        cnt = lpfc_find_map_node(vport);
        if (cnt == vport->fdmi_num_disc)
                return;
 
-       ndlp = lpfc_findnode_did(vport, FDMI_DID);
-       if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
-               return;
-
        if (vport->port_type == LPFC_PHYSICAL_PORT) {
                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
                              LPFC_FDMI_PORT_ATTR_num_disc);
@@ -2616,8 +2644,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
        ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
        memset(ae, 0, 256);
 
-       snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
-                init_utsname()->nodename);
+       scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
+                 vport->phba->os_host_name);
 
        len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
        len += (len & 3) ? (4 - (len & 3)) : 4;
index a5ecbce4eda238a4376ef2230f966ae7eb2c5cb2..819335b16c2e4c73d3deadb88dfac5ed72a28ed7 100644 (file)
@@ -2085,6 +2085,8 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
        int copied = 0;
        struct lpfc_dmabuf *dmabuf, *next;
 
+       memset(buffer, 0, size);
+
        spin_lock_irq(&phba->hbalock);
        if (phba->ras_fwlog.state != ACTIVE) {
                spin_unlock_irq(&phba->hbalock);
@@ -2094,10 +2096,15 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba,
 
        list_for_each_entry_safe(dmabuf, next,
                                 &phba->ras_fwlog.fwlog_buff_list, list) {
+               /* Check if copying will go over size and a '\0' char */
+               if ((copied + LPFC_RAS_MAX_ENTRY_SIZE) >= (size - 1)) {
+                       memcpy(buffer + copied, dmabuf->virt,
+                              size - copied - 1);
+                       copied += size - copied - 1;
+                       break;
+               }
                memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE);
                copied += LPFC_RAS_MAX_ENTRY_SIZE;
-               if (size > copied)
-                       break;
        }
        return copied;
 }
index 85ada3deb47dc6f8d2c79177b8b4f63357ad7ee5..dcc8999c6a683ba50fa542b9a6df2a61ca2804cb 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/kthread.h>
 #include <linux/interrupt.h>
 #include <linux/lockdep.h>
+#include <linux/utsname.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -3315,6 +3316,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
                lpfc_sli4_clear_fcf_rr_bmask(phba);
        }
 
+       /* Prepare for LINK up registrations */
+       memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
+       scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
+                 init_utsname()->nodename);
        return;
 out:
        lpfc_vport_set_state(vport, FC_VPORT_FAILED);
index 25cdcbc2b02f1bab4fbad71d014b52823f5cf038..9a064b96e570359e65d201a1c2383b3d7663504b 100644 (file)
@@ -3925,6 +3925,9 @@ struct lpfc_mbx_wr_object {
 #define LPFC_CHANGE_STATUS_FW_RESET            0x02
 #define LPFC_CHANGE_STATUS_PORT_MIGRATION      0x04
 #define LPFC_CHANGE_STATUS_PCI_RESET           0x05
+#define lpfc_wr_object_csf_SHIFT               8
+#define lpfc_wr_object_csf_MASK                        0x00000001
+#define lpfc_wr_object_csf_WORD                        word5
                } response;
        } u;
 };
index 6a04fdb3fbf219a073d1ac65a97dd0d33eb7ecf9..5a605773dd0a02c191ffa6f23d16f613215d6966 100644 (file)
@@ -1362,7 +1362,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
        if (vports != NULL)
                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        lpfc_rcv_seq_check_edtov(vports[i]);
-                       lpfc_fdmi_num_disc_check(vports[i]);
+                       lpfc_fdmi_change_check(vports[i]);
                }
        lpfc_destroy_vport_work_array(phba, vports);
 
@@ -8320,14 +8320,6 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
        phba->hba_flag |= HBA_PERSISTENT_TOPO;
        switch (phba->pcidev->device) {
        case PCI_DEVICE_ID_LANCER_G7_FC:
-               if (tf || (pt == LINK_FLAGS_LOOP)) {
-                       /* Invalid values from FW - use driver params */
-                       phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
-               } else {
-                       /* Prism only supports PT2PT topology */
-                       phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
-               }
-               break;
        case PCI_DEVICE_ID_LANCER_G6_FC:
                if (!tf) {
                        phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
@@ -10449,6 +10441,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
        case LPFC_SLI_INTF_IF_TYPE_6:
                iounmap(phba->sli4_hba.drbl_regs_memmap_p);
                iounmap(phba->sli4_hba.conf_regs_memmap_p);
+               if (phba->sli4_hba.dpp_regs_memmap_p)
+                       iounmap(phba->sli4_hba.dpp_regs_memmap_p);
                break;
        case LPFC_SLI_INTF_IF_TYPE_1:
        default:
index ae435901384650ec7d3edf090a4a495c1b4d1c6a..1c46e3adbda20ce05e0bceb759f95322b25eb988 100644 (file)
@@ -308,7 +308,7 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
                                mb->mbxStatus);
                mempool_free(login_mbox, phba->mbox_mem_pool);
                mempool_free(link_mbox, phba->mbox_mem_pool);
-               lpfc_sli_release_iocbq(phba, save_iocb);
+               kfree(save_iocb);
                return;
        }
 
@@ -325,7 +325,61 @@ lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
        }
 
        mempool_free(link_mbox, phba->mbox_mem_pool);
-       lpfc_sli_release_iocbq(phba, save_iocb);
+       kfree(save_iocb);
+}
+
+/**
+ * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
+ * @phba: Pointer to HBA context object.
+ * @pmb: Pointer to mailbox object.
+ *
+ * This function provides the unreg rpi mailbox completion handler for a tgt.
+ * The routine frees the memory resources associated with the completed
+ * mailbox command and transmits the ELS ACC.
+ *
+ * This routine is only called if we are SLI4, acting in target
+ * mode and the remote NPort issues the PLOGI after link up.
+ **/
+void
+lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+       struct lpfc_vport *vport = pmb->vport;
+       struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
+       LPFC_MBOXQ_t *mbox = pmb->context3;
+       struct lpfc_iocbq *piocb = NULL;
+       int rc;
+
+       if (mbox) {
+               pmb->context3 = NULL;
+               piocb = mbox->context3;
+               mbox->context3 = NULL;
+       }
+
+       /*
+        * Complete the unreg rpi mbx request, and update flags.
+        * This will also restart any deferred events.
+        */
+       lpfc_nlp_get(ndlp);
+       lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
+
+       if (!piocb) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
+                                "4578 PLOGI ACC fail\n");
+               if (mbox)
+                       mempool_free(mbox, phba->mbox_mem_pool);
+               goto out;
+       }
+
+       rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
+       if (rc) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS,
+                                "4579 PLOGI ACC fail %x\n", rc);
+               if (mbox)
+                       mempool_free(mbox, phba->mbox_mem_pool);
+       }
+       kfree(piocb);
+out:
+       lpfc_nlp_put(ndlp);
 }
 
 static int
@@ -345,6 +399,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        struct lpfc_iocbq *save_iocb;
        struct ls_rjt stat;
        uint32_t vid, flag;
+       u16 rpi;
        int rc, defer_acc;
 
        memset(&stat, 0, sizeof (struct ls_rjt));
@@ -488,7 +543,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                        link_mbox->vport = vport;
                        link_mbox->ctx_ndlp = ndlp;
 
-                       save_iocb = lpfc_sli_get_iocbq(phba);
+                       save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
                        if (!save_iocb)
                                goto out;
                        /* Save info from cmd IOCB used in rsp */
@@ -513,7 +568,36 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                goto out;
 
        /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
-       if (phba->sli_rev == LPFC_SLI_REV4)
+       if (phba->nvmet_support && !defer_acc) {
+               link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (!link_mbox)
+                       goto out;
+
+               /* As unique identifiers such as iotag would be overwritten
+                * with those from the cmdiocb, allocate separate temporary
+                * storage for the copy.
+                */
+               save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
+               if (!save_iocb)
+                       goto out;
+
+               /* Unreg RPI is required for SLI4. */
+               rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+               lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
+               link_mbox->vport = vport;
+               link_mbox->ctx_ndlp = ndlp;
+               link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
+
+               if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
+                   (!(vport->fc_flag & FC_OFFLINE_MODE)))
+                       ndlp->nlp_flag |= NLP_UNREG_INP;
+
+               /* Save info from cmd IOCB used in rsp */
+               memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
+
+               /* Delay sending ACC till unreg RPI completes. */
+               defer_acc = 1;
+       } else if (phba->sli_rev == LPFC_SLI_REV4)
                lpfc_unreg_rpi(vport, ndlp);
 
        rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
@@ -553,6 +637,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        if ((vport->port_type == LPFC_NPIV_PORT &&
             vport->cfg_restrict_login)) {
 
+               /* no deferred ACC */
+               kfree(save_iocb);
+
                /* In order to preserve RPIs, we want to cleanup
                 * the default RPI the firmware created to rcv
                 * this ELS request. The only way to do this is
@@ -571,8 +658,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        }
        if (defer_acc) {
                /* So the order here should be:
-                * Issue CONFIG_LINK mbox
-                * CONFIG_LINK cmpl
+                * SLI3 pt2pt
+                *   Issue CONFIG_LINK mbox
+                *   CONFIG_LINK cmpl
+                * SLI4 tgt
+                *   Issue UNREG RPI mbx
+                *   UNREG RPI cmpl
                 * Issue PLOGI ACC
                 * PLOGI ACC cmpl
                 * Issue REG_LOGIN mbox
@@ -596,10 +687,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 out:
        if (defer_acc)
                lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-                               "4577 pt2pt discovery failure: %p %p %p\n",
+                               "4577 discovery failure: %p %p %p\n",
                                save_iocb, link_mbox, login_mbox);
-       if (save_iocb)
-               lpfc_sli_release_iocbq(phba, save_iocb);
+       kfree(save_iocb);
        if (link_mbox)
                mempool_free(link_mbox, phba->mbox_mem_pool);
        if (login_mbox)
index b138d9fee67571552d0f9d3cce715ef853d37880..2c7e0b22db2f14211126066afe9af18ae6e04809 100644 (file)
@@ -481,7 +481,7 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
                spin_lock(&qp->abts_io_buf_list_lock);
                list_for_each_entry_safe(psb, next_psb,
                                         &qp->lpfc_abts_io_buf_list, list) {
-                       if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME)
+                       if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
                                continue;
 
                        if (psb->rdata && psb->rdata->pnode &&
@@ -528,7 +528,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
                        list_del_init(&psb->list);
                        psb->flags &= ~LPFC_SBUF_XBUSY;
                        psb->status = IOSTAT_SUCCESS;
-                       if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
+                       if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
                                qp->abts_nvme_io_bufs--;
                                spin_unlock(&qp->abts_io_buf_list_lock);
                                spin_unlock_irqrestore(&phba->hbalock, iflag);
index 625c046ac4efae6778dc665e80395d4a886d7827..64002b0cb02d464b643d9c48b550a8fa09aa072b 100644 (file)
@@ -4918,8 +4918,17 @@ static int
 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
 {
        phba->hbq_in_use = 1;
-       phba->hbqs[LPFC_ELS_HBQ].entry_count =
-               lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
+       /**
+        * Specific case when the MDS diagnostics is enabled and supported.
+        * The receive buffer count is truncated to manage the incoming
+        * traffic.
+        **/
+       if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
+               phba->hbqs[LPFC_ELS_HBQ].entry_count =
+                       lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
+       else
+               phba->hbqs[LPFC_ELS_HBQ].entry_count =
+                       lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
        phba->hbq_count = 1;
        lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
        /* Initially populate or replenish the HBQs */
@@ -19449,7 +19458,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
        struct lpfc_mbx_wr_object *wr_object;
        LPFC_MBOXQ_t *mbox;
        int rc = 0, i = 0;
-       uint32_t shdr_status, shdr_add_status, shdr_change_status;
+       uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
        uint32_t mbox_tmo;
        struct lpfc_dmabuf *dmabuf;
        uint32_t written = 0;
@@ -19506,6 +19515,16 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
        if (check_change_status) {
                shdr_change_status = bf_get(lpfc_wr_object_change_status,
                                            &wr_object->u.response);
+
+               if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
+                   shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
+                       shdr_csf = bf_get(lpfc_wr_object_csf,
+                                         &wr_object->u.response);
+                       if (shdr_csf)
+                               shdr_change_status =
+                                                  LPFC_CHANGE_STATUS_PCI_RESET;
+               }
+
                switch (shdr_change_status) {
                case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
index 9e5ff58edacab787a0b07c9b9dfc50ce949077fb..9563c49f36ab6274bb363acb2253d62af85857b3 100644 (file)
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "12.6.0.2"
+#define LPFC_DRIVER_VERSION "12.6.0.3"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 18b1e31b5eb801c94b9a93259c1ffa045632723d..ed3923f8db4ffa149fe432c44dad84f86f11b450 100644 (file)
  * 08-28-18  02.00.53  Bumped MPI2_HEADER_VERSION_UNIT.
  *                     Added MPI2_IOCSTATUS_FAILURE
  * 12-17-18  02.00.54  Bumped MPI2_HEADER_VERSION_UNIT
+ * 06-24-19  02.00.55  Bumped MPI2_HEADER_VERSION_UNIT
+ * 08-01-19  02.00.56  Bumped MPI2_HEADER_VERSION_UNIT
+ * 10-02-19  02.00.57  Bumped MPI2_HEADER_VERSION_UNIT
  *  --------------------------------------------------------------------------
  */
 
 
 
 /* Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x36)
+#define MPI2_HEADER_VERSION_UNIT            (0x39)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
 #define MPI2_IOC_STATE_READY               (0x10000000)
 #define MPI2_IOC_STATE_OPERATIONAL         (0x20000000)
 #define MPI2_IOC_STATE_FAULT               (0x40000000)
+#define MPI2_IOC_STATE_COREDUMP            (0x50000000)
 
 #define MPI2_IOC_STATE_MASK                (0xF0000000)
 #define MPI2_IOC_STATE_SHIFT               (28)
index 3a6871aecadac414976b8a44e89d28f375dfd005..43a3bf8ff428c9576a9ede67195457d0ec87a261 100644 (file)
  * 08-28-18  02.00.46  Added NVMs Write Cache flag to IOUnitPage1
  *                     Added DMDReport Delay Time defines to PCIeIOUnitPage1
  * 12-17-18  02.00.47  Swap locations of Slotx2 and Slotx4 in ManPage 7.
+ * 08-01-19  02.00.49  Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID
+ *                     Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT
  */
 
 #ifndef MPI2_CNFG_H
@@ -891,6 +893,8 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 {
 #define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER       (0x00000002)
 #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO                (0x00000001)
 
+#define MPI26_MANPAGE7_FLAG_CONN_LANE_USE_PINOUT        (0x00000020)
+#define MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID       (0x00000010)
 
 /*
  *Generic structure to use for product-specific manufacturing pages
@@ -962,9 +966,10 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
 
 /* IO Unit Page 1 Flags defines */
 #define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK             (0x00030000)
-#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE           (0x00000000)
-#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE          (0x00010000)
-#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE        (0x00020000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT            (16)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE        (0x00000000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE           (0x00010000)
+#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE          (0x00020000)
 #define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK       (0x00004000)
 #define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE  (0x00002000)
 #define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH             (0x00001000)
@@ -3931,7 +3936,13 @@ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 {
        U32     MaximumDataTransferSize;        /*0x0C */
        U32     Capabilities;           /*0x10 */
        U16     NOIOB;          /* 0x14 */
-       U16     Reserved2;              /* 0x16 */
+       U16     ShutdownLatency;        /* 0x16 */
+       U16     VendorID;               /* 0x18 */
+       U16     DeviceID;               /* 0x1A */
+       U16     SubsystemVendorID;      /* 0x1C */
+       U16     SubsystemID;            /* 0x1E */
+       U8      RevisionID;             /* 0x20 */
+       U8      Reserved21[3];          /* 0x21 */
 } MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2,
        Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t;
 
index a3f677853098958df186dd2c4e1634ecb59cd0a0..33b9c3a6fd4012b7ddecea62bb3914afa3671386 100644 (file)
  * 09-07-18  02.06.03  Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
  * 12-17-18  02.06.04  Addd MPI2_EXT_IMAGE_TYPE_PBLP
  *                     Shorten some defines to be compatible with DOS
+ * 06-24-19  02.06.05  Whitespace adjustments to help with identifier
+ *                     checking tool.
+ * 10-02-19  02.06.06  Added MPI26_IMAGE_HEADER_SIG1_COREDUMP
+ *                     Added MPI2_FLASH_REGION_COREDUMP
  */
 #ifndef MPI2_IMAGE_H
 #define MPI2_IMAGE_H
@@ -213,6 +217,8 @@ typedef struct _MPI26_COMPONENT_IMAGE_HEADER {
 #define MPI26_IMAGE_HEADER_SIG1_NVDATA                   (0x5444564E)
 #define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE                (0x20534147)
 #define MPI26_IMAGE_HEADER_SIG1_PBLP                     (0x504C4250)
+/* little-endian "DUMP" */
+#define MPI26_IMAGE_HEADER_SIG1_COREDUMP                 (0x504D5544)
 
 /**** Definitions for Signature2 field ****/
 #define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE                    (0x50584546)
@@ -359,6 +365,7 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA {
 #define MPI2_FLASH_REGION_MR_NVDATA             (0x14)
 #define MPI2_FLASH_REGION_CPLD                  (0x15)
 #define MPI2_FLASH_REGION_PSOC                  (0x16)
+#define MPI2_FLASH_REGION_COREDUMP              (0x17)
 
 /*ImageRevision */
 #define MPI2_FLASH_LAYOUT_IMAGE_REVISION        (0x00)
index 68ea408cd5c5cadb7d7dd73d6c5932ea6901b562..e83c7c529dc9fcf23112b2b0c117413f428db24d 100644 (file)
  *                     Moved FW image definitions ionto new mpi2_image,h
  * 08-14-18   02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16)
  * 09-07-18   02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
+ * 10-02-19   02.00.38 Added MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE
+ *                     Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED
+ *                     Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP
+ *                     Added MPI2_FW_UPLOAD_ITYPE_COREDUMP
  * --------------------------------------------------------------------------
  */
 
@@ -248,6 +252,7 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
 
 /*ConfigurationFlags */
 #define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT  (0x0001)
+#define MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE  (0x0002)
 
 /*minimum depth for a Reply Descriptor Post Queue */
 #define MPI2_RDPQ_DEPTH_MIN                     (16)
@@ -377,6 +382,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
 /*ProductID field uses MPI2_FW_HEADER_PID_ */
 
 /*IOCCapabilities */
+#define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED      (0x00200000)
 #define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV            (0x00100000)
 #define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ            (0x00080000)
 #define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE     (0x00040000)
@@ -1458,8 +1464,8 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
 /*MPI v2.6 and newer */
 #define MPI2_FW_DOWNLOAD_ITYPE_CPLD                 (0x15)
 #define MPI2_FW_DOWNLOAD_ITYPE_PSOC                 (0x16)
+#define MPI2_FW_DOWNLOAD_ITYPE_COREDUMP             (0x17)
 #define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
-#define MPI2_FW_DOWNLOAD_ITYPE_TERMINATE            (0xFF)
 
 /*MPI v2.0 FWDownload TransactionContext Element */
 typedef struct _MPI2_FW_DOWNLOAD_TCSGE {
index 45fd8dfb7c4056fba801fe880e0647ef1d40f09a..663782bb790dc5b690672aaf2658e2fdb9e5460a 100644 (file)
@@ -123,8 +123,15 @@ enum mpt3sas_perf_mode {
        MPT_PERF_MODE_LATENCY   = 2,
 };
 
+static int
+_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
+               u32 ioc_state, int timeout);
 static int
 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
+static void
+_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
+static void
+_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
 
 /**
  * mpt3sas_base_check_cmd_timeout - Function
@@ -609,7 +616,8 @@ _base_fault_reset_work(struct work_struct *work)
 
 
        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-       if (ioc->shost_recovery || ioc->pci_error_recovery)
+       if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
+                       ioc->pci_error_recovery)
                goto rearm_timer;
        spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
@@ -656,20 +664,64 @@ _base_fault_reset_work(struct work_struct *work)
                return; /* don't rearm timer */
        }
 
-       ioc->non_operational_loop = 0;
+       if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
+               u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
+                   ioc->manu_pg11.CoreDumpTOSec :
+                   MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
+
+               timeout /= (FAULT_POLLING_INTERVAL/1000);
+
+               if (ioc->ioc_coredump_loop == 0) {
+                       mpt3sas_print_coredump_info(ioc,
+                           doorbell & MPI2_DOORBELL_DATA_MASK);
+                       /* do not accept any IOs and disable the interrupts */
+                       spin_lock_irqsave(
+                           &ioc->ioc_reset_in_progress_lock, flags);
+                       ioc->shost_recovery = 1;
+                       spin_unlock_irqrestore(
+                           &ioc->ioc_reset_in_progress_lock, flags);
+                       _base_mask_interrupts(ioc);
+                       _base_clear_outstanding_commands(ioc);
+               }
+
+               ioc_info(ioc, "%s: CoreDump loop %d.",
+                   __func__, ioc->ioc_coredump_loop);
 
+               /* Wait until CoreDump completes or times out */
+               if (ioc->ioc_coredump_loop++ < timeout) {
+                       spin_lock_irqsave(
+                           &ioc->ioc_reset_in_progress_lock, flags);
+                       goto rearm_timer;
+               }
+       }
+
+       if (ioc->ioc_coredump_loop) {
+               if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
+                       ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
+                           __func__, ioc->ioc_coredump_loop);
+               else
+                       ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
+                           __func__, ioc->ioc_coredump_loop);
+               ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
+       }
+       ioc->non_operational_loop = 0;
        if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
                rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
                ioc_warn(ioc, "%s: hard reset: %s\n",
                         __func__, rc == 0 ? "success" : "failed");
                doorbell = mpt3sas_base_get_iocstate(ioc, 0);
-               if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
-                       mpt3sas_base_fault_info(ioc, doorbell &
+               if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+                       mpt3sas_print_fault_code(ioc, doorbell &
+                           MPI2_DOORBELL_DATA_MASK);
+               } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+                   MPI2_IOC_STATE_COREDUMP)
+                       mpt3sas_print_coredump_info(ioc, doorbell &
                            MPI2_DOORBELL_DATA_MASK);
                if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
                    MPI2_IOC_STATE_OPERATIONAL)
                        return; /* don't rearm timer */
        }
+       ioc->ioc_coredump_loop = 0;
 
        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  rearm_timer:
@@ -748,6 +800,49 @@ mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
        ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
 }
 
+/**
+ * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
+ * @ioc: per adapter object
+ * @fault_code: fault code
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
+{
+       ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
+}
+
+/**
+ * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
+ * completes or times out
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
+               const char *caller)
+{
+       u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
+                       ioc->manu_pg11.CoreDumpTOSec :
+                       MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
+
+       int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
+                                       timeout);
+
+       if (ioc_state)
+               ioc_err(ioc,
+                   "%s: CoreDump timed out. (ioc_state=0x%x)\n",
+                   caller, ioc_state);
+       else
+               ioc_info(ioc,
+                   "%s: CoreDump completed. (ioc_state=0x%x)\n",
+                   caller, ioc_state);
+
+       return ioc_state;
+}
+
 /**
  * mpt3sas_halt_firmware - halt's mpt controller firmware
  * @ioc: per adapter object
@@ -768,9 +863,14 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
        dump_stack();
 
        doorbell = ioc->base_readl(&ioc->chip->Doorbell);
-       if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
-               mpt3sas_base_fault_info(ioc , doorbell);
-       else {
+       if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+               mpt3sas_print_fault_code(ioc, doorbell &
+                   MPI2_DOORBELL_DATA_MASK);
+       } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+           MPI2_IOC_STATE_COREDUMP) {
+               mpt3sas_print_coredump_info(ioc, doorbell &
+                   MPI2_DOORBELL_DATA_MASK);
+       } else {
                writel(0xC0FFEE00, &ioc->chip->Doorbell);
                ioc_err(ioc, "Firmware is halted due to command timeout\n");
        }
@@ -3103,6 +3203,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
         */
        if (!ioc->combined_reply_queue &&
            ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+               ioc_info(ioc,
+                   "combined ReplyQueue is off, Enabling msix load balance\n");
                ioc->msix_load_balance = true;
        }
 
@@ -3115,9 +3217,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
 
        r = _base_alloc_irq_vectors(ioc);
        if (r < 0) {
-               dfailprintk(ioc,
-                           ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
-                                    r));
+               ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
                goto try_ioapic;
        }
 
@@ -3206,9 +3306,15 @@ _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
        dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
 
        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
-               mpt3sas_base_fault_info(ioc, ioc_state &
+               mpt3sas_print_fault_code(ioc, ioc_state &
                    MPI2_DOORBELL_DATA_MASK);
                rc = _base_diag_reset(ioc);
+       } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+           MPI2_IOC_STATE_COREDUMP) {
+               mpt3sas_print_coredump_info(ioc, ioc_state &
+                    MPI2_DOORBELL_DATA_MASK);
+               mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
+               rc = _base_diag_reset(ioc);
        }
 
        return rc;
@@ -3279,7 +3385,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
        }
 
        if (ioc->chip == NULL) {
-               ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
+               ioc_err(ioc,
+                   "unable to map adapter memory! or resource not found\n");
                r = -EINVAL;
                goto out_fail;
        }
@@ -3318,8 +3425,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
                     ioc->combined_reply_index_count,
                     sizeof(resource_size_t *), GFP_KERNEL);
                if (!ioc->replyPostRegisterIndex) {
-                       dfailprintk(ioc,
-                                   ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
+                       ioc_err(ioc,
+                           "allocation for replyPostRegisterIndex failed!\n");
                        r = -ENOMEM;
                        goto out_fail;
                }
@@ -3466,6 +3573,22 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
        return ioc->cpu_msix_table[raw_smp_processor_id()];
 }
 
+/**
+ * _base_sdev_nr_inflight_request -get number of inflight requests
+ *                                of a request queue.
+ * @q: request_queue object
+ *
+ * returns number of inflight request of a request queue.
+ */
+inline unsigned long
+_base_sdev_nr_inflight_request(struct request_queue *q)
+{
+       struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0];
+
+       return atomic_read(&hctx->nr_active);
+}
+
+
 /**
  * _base_get_high_iops_msix_index - get the msix index of
  *                             high iops queues
@@ -3485,7 +3608,7 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
         * reply queues in terms of batch count 16 when outstanding
         * IOs on the target device is >=8.
         */
-       if (atomic_read(&scmd->device->device_busy) >
+       if (_base_sdev_nr_inflight_request(scmd->device->request_queue) >
            MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
                return base_mod64((
                    atomic64_add_return(1, &ioc->high_iops_outstanding) /
@@ -4264,7 +4387,8 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
        fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
                        &fwpkg_data_dma, GFP_KERNEL);
        if (!fwpkg_data) {
-               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+               ioc_err(ioc,
+                   "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
                        __FILE__, __LINE__, __func__);
                return -ENOMEM;
        }
@@ -4994,12 +5118,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
        }
 
-       dinitprintk(ioc,
-                   ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
-                            ioc->max_sges_in_main_message,
-                            ioc->max_sges_in_chain_message,
-                            ioc->shost->sg_tablesize,
-                            ioc->chains_needed_per_io));
+       ioc_info(ioc,
+           "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
+           "sge_per_io(%d), chains_per_io(%d)\n",
+           ioc->max_sges_in_main_message,
+           ioc->max_sges_in_chain_message,
+           ioc->shost->sg_tablesize,
+           ioc->chains_needed_per_io);
 
        /* reply post queue, 16 byte align */
        reply_post_free_sz = ioc->reply_post_queue_depth *
@@ -5109,15 +5234,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
            ioc->request_sz);
 
-       dinitprintk(ioc,
-                   ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
-                            ioc->request, ioc->hba_queue_depth,
-                            ioc->request_sz,
-                            (ioc->hba_queue_depth * ioc->request_sz) / 1024));
+       ioc_info(ioc,
+           "request pool(0x%p) - dma(0x%llx): "
+           "depth(%d), frame_size(%d), pool_size(%d kB)\n",
+           ioc->request, (unsigned long long) ioc->request_dma,
+           ioc->hba_queue_depth, ioc->request_sz,
+           (ioc->hba_queue_depth * ioc->request_sz) / 1024);
 
-       dinitprintk(ioc,
-                   ioc_info(ioc, "request pool: dma(0x%llx)\n",
-                            (unsigned long long)ioc->request_dma));
        total_sz += sz;
 
        dinitprintk(ioc,
@@ -5302,13 +5425,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                        goto out;
                }
        }
-       dinitprintk(ioc,
-                   ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
-                            ioc->sense, ioc->scsiio_depth,
-                            SCSI_SENSE_BUFFERSIZE, sz / 1024));
-       dinitprintk(ioc,
-                   ioc_info(ioc, "sense_dma(0x%llx)\n",
-                            (unsigned long long)ioc->sense_dma));
+       ioc_info(ioc,
+           "sense pool(0x%p)- dma(0x%llx): depth(%d),"
+           "element_size(%d), pool_size(%d kB)\n",
+           ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
+           SCSI_SENSE_BUFFERSIZE, sz / 1024);
+
        total_sz += sz;
 
        /* reply pool, 4 byte align */
@@ -5386,12 +5508,10 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                ioc_err(ioc, "config page: dma_pool_alloc failed\n");
                goto out;
        }
-       dinitprintk(ioc,
-                   ioc_info(ioc, "config page(0x%p): size(%d)\n",
-                            ioc->config_page, ioc->config_page_sz));
-       dinitprintk(ioc,
-                   ioc_info(ioc, "config_page_dma(0x%llx)\n",
-                            (unsigned long long)ioc->config_page_dma));
+
+       ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
+           ioc->config_page, (unsigned long long)ioc->config_page_dma,
+           ioc->config_page_sz);
        total_sz += ioc->config_page_sz;
 
        ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
@@ -5446,6 +5566,8 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
                        return 0;
                if (count && current_state == MPI2_IOC_STATE_FAULT)
                        break;
+               if (count && current_state == MPI2_IOC_STATE_COREDUMP)
+                       break;
 
                usleep_range(1000, 1500);
                count++;
@@ -5547,7 +5669,12 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
                        doorbell = ioc->base_readl(&ioc->chip->Doorbell);
                        if ((doorbell & MPI2_IOC_STATE_MASK) ==
                            MPI2_IOC_STATE_FAULT) {
-                               mpt3sas_base_fault_info(ioc , doorbell);
+                               mpt3sas_print_fault_code(ioc, doorbell);
+                               return -EFAULT;
+                       }
+                       if ((doorbell & MPI2_IOC_STATE_MASK) ==
+                           MPI2_IOC_STATE_COREDUMP) {
+                               mpt3sas_print_coredump_info(ioc, doorbell);
                                return -EFAULT;
                        }
                } else if (int_status == 0xFFFFFFFF)
@@ -5609,6 +5736,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
 {
        u32 ioc_state;
        int r = 0;
+       unsigned long flags;
 
        if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
                ioc_err(ioc, "%s: unknown reset_type\n", __func__);
@@ -5627,6 +5755,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
                r = -EFAULT;
                goto out;
        }
+
        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
        if (ioc_state) {
                ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -5635,6 +5764,26 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
                goto out;
        }
  out:
+       if (r != 0) {
+               ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+               spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+               /*
+                * Wait for IOC state CoreDump to clear only during
+                * HBA initialization & release time.
+                */
+               if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+                   MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
+                   ioc->fault_reset_work_q == NULL)) {
+                       spin_unlock_irqrestore(
+                           &ioc->ioc_reset_in_progress_lock, flags);
+                       mpt3sas_print_coredump_info(ioc, ioc_state);
+                       mpt3sas_base_wait_for_coredump_completion(ioc,
+                           __func__);
+                       spin_lock_irqsave(
+                           &ioc->ioc_reset_in_progress_lock, flags);
+               }
+               spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+       }
        ioc_info(ioc, "message unit reset: %s\n",
                 r == 0 ? "SUCCESS" : "FAILED");
        return r;
@@ -5782,7 +5931,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
                mfp = (__le32 *)reply;
                pr_info("\toffset:data\n");
                for (i = 0; i < reply_bytes/4; i++)
-                       pr_info("\t[0x%02x]:%08x\n", i*4,
+                       ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
                            le32_to_cpu(mfp[i]));
        }
        return 0;
@@ -5850,10 +5999,9 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
            ioc->ioc_link_reset_in_progress)
                ioc->ioc_link_reset_in_progress = 0;
        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
-               issue_reset =
-                       mpt3sas_base_check_cmd_timeout(ioc,
-                               ioc->base_cmds.status, mpi_request,
-                               sizeof(Mpi2SasIoUnitControlRequest_t)/4);
+               mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
+                   mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
+                   issue_reset);
                goto issue_host_reset;
        }
        if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -5926,10 +6074,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
        wait_for_completion_timeout(&ioc->base_cmds.done,
            msecs_to_jiffies(10000));
        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
-               issue_reset =
-                       mpt3sas_base_check_cmd_timeout(ioc,
-                               ioc->base_cmds.status, mpi_request,
-                               sizeof(Mpi2SepRequest_t)/4);
+               mpt3sas_check_cmd_timeout(ioc,
+                   ioc->base_cmds.status, mpi_request,
+                   sizeof(Mpi2SepRequest_t)/4, issue_reset);
                goto issue_host_reset;
        }
        if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
@@ -6028,9 +6175,15 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
        }
 
        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
-               mpt3sas_base_fault_info(ioc, ioc_state &
+               mpt3sas_print_fault_code(ioc, ioc_state &
                    MPI2_DOORBELL_DATA_MASK);
                goto issue_diag_reset;
+       } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+           MPI2_IOC_STATE_COREDUMP) {
+               ioc_info(ioc,
+                   "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
+                   __func__, ioc_state);
+               return -EFAULT;
        }
 
        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
@@ -6209,6 +6362,12 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
                    cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
        }
 
+       /*
+        * Set the flag to enable CoreDump state feature in IOC firmware.
+        */
+       mpi_request.ConfigurationFlags |=
+           cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
+
        /* This time stamp specifies number of milliseconds
         * since epoch ~ midnight January 1, 1970.
         */
@@ -6220,9 +6379,9 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
                int i;
 
                mfp = (__le32 *)&mpi_request;
-               pr_info("\toffset:data\n");
+               ioc_info(ioc, "\toffset:data\n");
                for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
-                       pr_info("\t[0x%02x]:%08x\n", i*4,
+                       ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
                            le32_to_cpu(mfp[i]));
        }
 
@@ -6592,8 +6751,11 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
                /* wait 100 msec */
                msleep(100);
 
-               if (count++ > 20)
+               if (count++ > 20) {
+                       ioc_info(ioc,
+                           "Stop writing magic sequence after 20 retries\n");
                        goto out;
+               }
 
                host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
                drsprintk(ioc,
@@ -6617,8 +6779,11 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
 
                host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
 
-               if (host_diagnostic == 0xFFFFFFFF)
+               if (host_diagnostic == 0xFFFFFFFF) {
+                       ioc_info(ioc,
+                           "Invalid host diagnostic register value\n");
                        goto out;
+               }
                if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
                        break;
 
@@ -6705,16 +6870,33 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
                return 0;
 
        if (ioc_state & MPI2_DOORBELL_USED) {
-               dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
+               ioc_info(ioc, "unexpected doorbell active!\n");
                goto issue_diag_reset;
        }
 
        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
-               mpt3sas_base_fault_info(ioc, ioc_state &
+               mpt3sas_print_fault_code(ioc, ioc_state &
                    MPI2_DOORBELL_DATA_MASK);
                goto issue_diag_reset;
        }
 
+       if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
+               /*
+                * if host reset is invoked while watch dog thread is waiting
+                * for IOC state to be changed to Fault state then driver has
+                * to wait here for CoreDump state to clear otherwise reset
+                * will be issued to the FW and FW move the IOC state to
+                * reset state without copying the FW logs to coredump region.
+                */
+               if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
+                       mpt3sas_print_coredump_info(ioc, ioc_state &
+                           MPI2_DOORBELL_DATA_MASK);
+                       mpt3sas_base_wait_for_coredump_completion(ioc,
+                           __func__);
+               }
+               goto issue_diag_reset;
+       }
+
        if (type == FORCE_BIG_HAMMER)
                goto issue_diag_reset;
 
@@ -6958,8 +7140,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
        ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
        ioc->reply_queue_count = 1;
        if (!ioc->cpu_msix_table) {
-               dfailprintk(ioc,
-                           ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
+               ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
                r = -ENOMEM;
                goto out_free_resources;
        }
@@ -6968,8 +7149,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
                ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
                    sizeof(resource_size_t *), GFP_KERNEL);
                if (!ioc->reply_post_host_index) {
-                       dfailprintk(ioc,
-                                   ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
+                       ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
                        r = -ENOMEM;
                        goto out_free_resources;
                }
@@ -7195,6 +7375,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
            sizeof(struct mpt3sas_facts));
 
        ioc->non_operational_loop = 0;
+       ioc->ioc_coredump_loop = 0;
        ioc->got_task_abort_from_ioctl = 0;
        return 0;
 
@@ -7276,14 +7457,14 @@ static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
- * _base_after_reset_handler - after reset handler
+ * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
  * @ioc: per adapter object
  */
-static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+static void
+_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
 {
-       mpt3sas_scsih_after_reset_handler(ioc);
-       mpt3sas_ctl_after_reset_handler(ioc);
-       dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
+       dtmprintk(ioc,
+           ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
        if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
                ioc->transport_cmds.status |= MPT3_CMD_RESET;
                mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
@@ -7316,6 +7497,17 @@ static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
        }
 }
 
+/**
+ * _base_clear_outstanding_commands - clear all outstanding commands
+ * @ioc: per adapter object
+ */
+static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
+{
+       mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
+       mpt3sas_ctl_clear_outstanding_ioctls(ioc);
+       _base_clear_outstanding_mpt_commands(ioc);
+}
+
 /**
  * _base_reset_done_handler - reset done handler
  * @ioc: per adapter object
@@ -7474,7 +7666,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
            MPT3_DIAG_BUFFER_IS_RELEASED))) {
                is_trigger = 1;
                ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
-               if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+               if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
+                   (ioc_state & MPI2_IOC_STATE_MASK) ==
+                   MPI2_IOC_STATE_COREDUMP)
                        is_fault = 1;
        }
        _base_pre_reset_handler(ioc);
@@ -7483,7 +7677,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
        r = _base_make_ioc_ready(ioc, type);
        if (r)
                goto out;
-       _base_after_reset_handler(ioc);
+       _base_clear_outstanding_commands(ioc);
 
        /* If this hard reset is called while port enable is active, then
         * there is no reason to call make_ioc_operational
@@ -7514,9 +7708,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
                _base_reset_done_handler(ioc);
 
  out:
-       dtmprintk(ioc,
-                 ioc_info(ioc, "%s: %s\n",
-                          __func__, r == 0 ? "SUCCESS" : "FAILED"));
+       ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
 
        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
        ioc->shost_recovery = 0;
index 4ebf81ea4d2fcf10cbac0df6201d392ad4316db4..e7197150721fe0d0f42406eeecd69ae17e15b17d 100644 (file)
@@ -76,8 +76,8 @@
 #define MPT3SAS_DRIVER_NAME            "mpt3sas"
 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
 #define MPT3SAS_DESCRIPTION    "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION         "32.100.00.00"
-#define MPT3SAS_MAJOR_VERSION          32
+#define MPT3SAS_DRIVER_VERSION         "33.100.00.00"
+#define MPT3SAS_MAJOR_VERSION          33
 #define MPT3SAS_MINOR_VERSION          100
 #define MPT3SAS_BUILD_VERSION          0
 #define MPT3SAS_RELEASE_VERSION        00
 #define MPT2SAS_BUILD_VERSION          0
 #define MPT2SAS_RELEASE_VERSION        00
 
+/* CoreDump: Default timeout */
+#define MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS       (15) /*15 seconds*/
+#define MPT3SAS_COREDUMP_LOOP_DONE                     (0xFF)
+
 /*
  * Set MPT3SAS_SG_DEPTH value based on user input.
  */
 #define MAX_CHAIN_ELEMT_SZ             16
 #define DEFAULT_NUM_FWCHAIN_ELEMTS     8
 
+#define IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT 6
 #define FW_IMG_HDR_READ_TIMEOUT        15
 
 #define IOC_OPERATIONAL_WAIT_COUNT     10
@@ -398,7 +403,10 @@ struct Mpi2ManufacturingPage11_t {
        u8      HostTraceBufferFlags;           /* 4Fh */
        u16     HostTraceBufferMaxSizeKB;       /* 50h */
        u16     HostTraceBufferMinSizeKB;       /* 52h */
-       __le32  Reserved10[2];                  /* 54h - 5Bh */
+       u8      CoreDumpTOSec;                  /* 54h */
+       u8      Reserved8;                      /* 55h */
+       u16     Reserved9;                      /* 56h */
+       __le32  Reserved10;                     /* 58h */
 };
 
 /**
@@ -589,6 +597,7 @@ static inline void sas_device_put(struct _sas_device *s)
  * @connector_name: ASCII value of the Connector's name
  * @serial_number: pointer of serial number string allocated runtime
  * @access_status: Device's Access Status
+ * @shutdown_latency: NVMe device's RTD3 Entry Latency
  * @refcount: reference count for deletion
  */
 struct _pcie_device {
@@ -611,6 +620,7 @@ struct _pcie_device {
        u8      *serial_number;
        u8      reset_timeout;
        u8      access_status;
+       u16     shutdown_latency;
        struct kref refcount;
 };
 /**
@@ -1045,6 +1055,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
  * @cpu_msix_table: table for mapping cpus to msix index
  * @cpu_msix_table_sz: table size
  * @total_io_cnt: Gives total IO count, used to load balance the interrupts
+ * @ioc_coredump_loop: will have non-zero value when FW is in CoreDump state
  * @high_iops_outstanding: used to load balance the interrupts
  *                             within high iops reply queues
  * @msix_load_balance: Enables load balancing of interrupts across
@@ -1073,6 +1084,10 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
  * @event_context: unique id for each logged event
  * @event_log: event log pointer
  * @event_masks: events that are masked
+ * @max_shutdown_latency: timeout value for NVMe shutdown operation,
+ *                     which is equal that NVMe drive's RTD3 Entry Latency
+ *                     which has reported maximum RTD3 Entry Latency value
+ *                     among attached NVMe drives.
  * @facts: static facts data
  * @prev_fw_facts: previous fw facts data
  * @pfacts: static port facts data
@@ -1231,6 +1246,7 @@ struct MPT3SAS_ADAPTER {
        u32             ioc_reset_count;
        MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
        u32             non_operational_loop;
+       u8              ioc_coredump_loop;
        atomic64_t      total_io_cnt;
        atomic64_t      high_iops_outstanding;
        bool            msix_load_balance;
@@ -1283,7 +1299,7 @@ struct MPT3SAS_ADAPTER {
 
        u8              tm_custom_handling;
        u8              nvme_abort_timeout;
-
+       u16             max_shutdown_latency;
 
        /* static config pages */
        struct mpt3sas_facts facts;
@@ -1531,6 +1547,17 @@ void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc,
 u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked);
 
 void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code);
+#define mpt3sas_print_fault_code(ioc, fault_code) \
+do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \
+       mpt3sas_base_fault_info(ioc, fault_code); } while (0)
+
+void mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code);
+#define mpt3sas_print_coredump_info(ioc, fault_code) \
+do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \
+       mpt3sas_base_coredump_info(ioc, fault_code); } while (0)
+
+int mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
+               const char *caller);
 int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
        Mpi2SasIoUnitControlReply_t *mpi_reply,
        Mpi2SasIoUnitControlRequest_t *mpi_request);
@@ -1552,6 +1579,11 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc);
 
 u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
        u8 status, void *mpi_request, int sz);
+#define mpt3sas_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \
+do {   ioc_err(ioc, "In func: %s\n", __func__); \
+       issue_reset = mpt3sas_base_check_cmd_timeout(ioc, \
+       status, mpi_request, sz); } while (0)
+
 int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count);
 
 /* scsih shared API */
@@ -1560,7 +1592,8 @@ struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
 u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
        u32 reply);
 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
-void mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_scsih_clear_outstanding_scsi_tm_commands(
+       struct MPT3SAS_ADAPTER *ioc);
 void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
 
 int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
@@ -1694,7 +1727,7 @@ void mpt3sas_ctl_exit(ushort hbas_to_enumerate);
 u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
        u32 reply);
 void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc);
-void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc);
 void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
 u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc,
        u8 msix_index, u32 reply);
index 14a1a2793dd5db3049cc44a494f75584637e31ab..62ddf53ab3aea1b742d5a1373ce48202411d9ba3 100644 (file)
@@ -101,9 +101,6 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
        Mpi2ConfigRequest_t *mpi_request;
        char *desc = NULL;
 
-       if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
-               return;
-
        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
        switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) {
        case MPI2_CONFIG_PAGETYPE_IO_UNIT:
@@ -269,7 +266,8 @@ mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
                    mpi_reply->MsgLength*4);
        }
        ioc->config_cmds.status &= ~MPT3_CMD_PENDING;
-       _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
+       if (ioc->logging_level & MPT_DEBUG_CONFIG)
+               _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
        ioc->config_cmds.smid = USHRT_MAX;
        complete(&ioc->config_cmds.done);
        return 1;
@@ -305,6 +303,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
        u8 retry_count, issue_host_reset = 0;
        struct config_request mem;
        u32 ioc_status = UINT_MAX;
+       u8 issue_reset = 0;
 
        mutex_lock(&ioc->config_cmds.mutex);
        if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
@@ -378,14 +377,18 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
        config_request = mpt3sas_base_get_msg_frame(ioc, smid);
        ioc->config_cmds.smid = smid;
        memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
-       _config_display_some_debug(ioc, smid, "config_request", NULL);
+       if (ioc->logging_level & MPT_DEBUG_CONFIG)
+               _config_display_some_debug(ioc, smid, "config_request", NULL);
        init_completion(&ioc->config_cmds.done);
        ioc->put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
        if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
-               mpt3sas_base_check_cmd_timeout(ioc,
-                       ioc->config_cmds.status, mpi_request,
-                       sizeof(Mpi2ConfigRequest_t)/4);
+               if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+                       _config_display_some_debug(ioc,
+                           smid, "config_request", NULL);
+               mpt3sas_check_cmd_timeout(ioc,
+                   ioc->config_cmds.status, mpi_request,
+                   sizeof(Mpi2ConfigRequest_t)/4, issue_reset);
                retry_count++;
                if (ioc->config_cmds.smid == smid)
                        mpt3sas_base_free_smid(ioc, smid);
@@ -404,8 +407,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
                /* Reply Frame Sanity Checks to workaround FW issues */
                if ((mpi_request->Header.PageType & 0xF) !=
                    (mpi_reply->Header.PageType & 0xF)) {
+                       if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+                               _config_display_some_debug(ioc,
+                                   smid, "config_request", NULL);
                        _debug_dump_mf(mpi_request, ioc->request_sz/4);
-                       _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+                       _debug_dump_reply(mpi_reply, ioc->reply_sz/4);
                        panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
                              ioc->name, __func__,
                              mpi_request->Header.PageType & 0xF,
@@ -415,8 +421,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
                if (((mpi_request->Header.PageType & 0xF) ==
                    MPI2_CONFIG_PAGETYPE_EXTENDED) &&
                    mpi_request->ExtPageType != mpi_reply->ExtPageType) {
+                       if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+                               _config_display_some_debug(ioc,
+                                   smid, "config_request", NULL);
                        _debug_dump_mf(mpi_request, ioc->request_sz/4);
-                       _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+                       _debug_dump_reply(mpi_reply, ioc->reply_sz/4);
                        panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
                              ioc->name, __func__,
                              mpi_request->ExtPageType,
@@ -439,8 +448,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
                if (p) {
                        if ((mpi_request->Header.PageType & 0xF) !=
                            (p[3] & 0xF)) {
+                               if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+                                       _config_display_some_debug(ioc,
+                                           smid, "config_request", NULL);
                                _debug_dump_mf(mpi_request, ioc->request_sz/4);
-                               _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+                               _debug_dump_reply(mpi_reply, ioc->reply_sz/4);
                                _debug_dump_config(p, min_t(u16, mem.sz,
                                    config_page_sz)/4);
                                panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
@@ -452,8 +464,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
                        if (((mpi_request->Header.PageType & 0xF) ==
                            MPI2_CONFIG_PAGETYPE_EXTENDED) &&
                            (mpi_request->ExtPageType != p[6])) {
+                               if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+                                       _config_display_some_debug(ioc,
+                                           smid, "config_request", NULL);
                                _debug_dump_mf(mpi_request, ioc->request_sz/4);
-                               _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+                               _debug_dump_reply(mpi_reply, ioc->reply_sz/4);
                                _debug_dump_config(p, min_t(u16, mem.sz,
                                    config_page_sz)/4);
                                panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
index 6874cf01773950b5ab739bc1ed8961350ba74552..62e552838565fa32fd36d7e8226218845a2e516f 100644 (file)
@@ -180,6 +180,12 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
        case MPI2_FUNCTION_SMP_PASSTHROUGH:
                desc = "smp_passthrough";
                break;
+       case MPI2_FUNCTION_TOOLBOX:
+               desc = "toolbox";
+               break;
+       case MPI2_FUNCTION_NVME_ENCAPSULATED:
+               desc = "nvme_encapsulated";
+               break;
        }
 
        if (!desc)
@@ -478,14 +484,15 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
- * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * mpt3sas_ctl_reset_handler - clears outstanding ioctl cmd.
  * @ioc: per adapter object
  *
  * The handler for doing any required cleanup or initialization.
  */
-void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc)
 {
-       dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
+       dtmprintk(ioc,
+           ioc_info(ioc, "%s: clear outstanding ioctl cmd\n", __func__));
        if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
                ioc->ctl_cmds.status |= MPT3_CMD_RESET;
                mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
@@ -1021,10 +1028,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
                ioc->ignore_loginfos = 0;
        }
        if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
-               issue_reset =
-                       mpt3sas_base_check_cmd_timeout(ioc,
-                               ioc->ctl_cmds.status, mpi_request,
-                               karg.data_sge_offset);
+               mpt3sas_check_cmd_timeout(ioc,
+                   ioc->ctl_cmds.status, mpi_request,
+                   karg.data_sge_offset, issue_reset);
                goto issue_host_reset;
        }
 
@@ -1325,7 +1331,8 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
                                 __func__));
 
        retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
-       ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
+       ioc_info(ioc,
+           "Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
        return 0;
 }
 
@@ -1733,10 +1740,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
            MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
        if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
-               issue_reset =
-                       mpt3sas_base_check_cmd_timeout(ioc,
-                               ioc->ctl_cmds.status, mpi_request,
-                               sizeof(Mpi2DiagBufferPostRequest_t)/4);
+               mpt3sas_check_cmd_timeout(ioc,
+                   ioc->ctl_cmds.status, mpi_request,
+                   sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
                goto issue_host_reset;
        }
 
@@ -2108,6 +2114,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
        u16 ioc_status;
        u32 ioc_state;
        int rc;
+       u8 reset_needed = 0;
 
        dctlprintk(ioc, ioc_info(ioc, "%s\n",
                                 __func__));
@@ -2115,6 +2122,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
        rc = 0;
        *issue_reset = 0;
 
+
        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
        if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
                if (ioc->diag_buffer_status[buffer_type] &
@@ -2157,9 +2165,10 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
            MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
        if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
-               *issue_reset = mpt3sas_base_check_cmd_timeout(ioc,
-                               ioc->ctl_cmds.status, mpi_request,
-                               sizeof(Mpi2DiagReleaseRequest_t)/4);
+               mpt3sas_check_cmd_timeout(ioc,
+                   ioc->ctl_cmds.status, mpi_request,
+                   sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed);
+                *issue_reset = reset_needed;
                rc = -EFAULT;
                goto out;
        }
@@ -2417,10 +2426,9 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
            MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
        if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
-               issue_reset =
-                       mpt3sas_base_check_cmd_timeout(ioc,
-                               ioc->ctl_cmds.status, mpi_request,
-                               sizeof(Mpi2DiagBufferPostRequest_t)/4);
+               mpt3sas_check_cmd_timeout(ioc,
+                   ioc->ctl_cmds.status, mpi_request,
+                   sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset);
                goto issue_host_reset;
        }
 
index a038be8a0e905fade0f5cb4314a9ad35a04cb7cb..c597d544eb3927da2a0679ef9143fbd6e74922c8 100644 (file)
@@ -1049,6 +1049,34 @@ mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
        return pcie_device;
 }
 
+/**
+ * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
+ * @ioc: per adapter object
+ * Context: This function will acquire ioc->pcie_device_lock
+ *
+ * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
+ * which has reported maximum among all available NVMe drives.
+ * Minimum max_shutdown_latency will be six seconds.
+ */
+static void
+_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
+{
+       struct _pcie_device *pcie_device;
+       unsigned long flags;
+       u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
+
+       spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+       list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
+               if (pcie_device->shutdown_latency) {
+                       if (shutdown_latency < pcie_device->shutdown_latency)
+                               shutdown_latency =
+                                       pcie_device->shutdown_latency;
+               }
+       }
+       ioc->max_shutdown_latency = shutdown_latency;
+       spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
+
 /**
  * _scsih_pcie_device_remove - remove pcie_device from list.
  * @ioc: per adapter object
@@ -1063,6 +1091,7 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
 {
        unsigned long flags;
        int was_on_pcie_device_list = 0;
+       u8 update_latency = 0;
 
        if (!pcie_device)
                return;
@@ -1082,11 +1111,21 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
                list_del_init(&pcie_device->list);
                was_on_pcie_device_list = 1;
        }
+       if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
+               update_latency = 1;
        spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
        if (was_on_pcie_device_list) {
                kfree(pcie_device->serial_number);
                pcie_device_put(pcie_device);
        }
+
+       /*
+        * This device's RTD3 Entry Latency matches IOC's
+        * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
+        * from the available drives as current drive is getting removed.
+        */
+       if (update_latency)
+               _scsih_set_nvme_max_shutdown_latency(ioc);
 }
 
 
@@ -1101,6 +1140,7 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
        struct _pcie_device *pcie_device;
        unsigned long flags;
        int was_on_pcie_device_list = 0;
+       u8 update_latency = 0;
 
        if (ioc->shost_recovery)
                return;
@@ -1113,12 +1153,22 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
                        was_on_pcie_device_list = 1;
                        pcie_device_put(pcie_device);
                }
+               if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
+                       update_latency = 1;
        }
        spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
        if (was_on_pcie_device_list) {
                _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
                pcie_device_put(pcie_device);
        }
+
+       /*
+        * This device's RTD3 Entry Latency matches IOC's
+        * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
+        * from the available drives as current drive is getting removed.
+        */
+       if (update_latency)
+               _scsih_set_nvme_max_shutdown_latency(ioc);
 }
 
 /**
@@ -1554,7 +1604,12 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
                max_depth = 1;
        if (qdepth > max_depth)
                qdepth = max_depth;
-       return scsi_change_queue_depth(sdev, qdepth);
+       scsi_change_queue_depth(sdev, qdepth);
+       sdev_printk(KERN_INFO, sdev,
+           "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
+           sdev->queue_depth, sdev->tagged_supported,
+           sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
+       return sdev->queue_depth;
 }
 
 /**
@@ -2673,6 +2728,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
        u16 smid = 0;
        u32 ioc_state;
        int rc;
+       u8 issue_reset = 0;
 
        lockdep_assert_held(&ioc->tm_cmds.mutex);
 
@@ -2695,7 +2751,13 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
        }
 
        if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
-               mpt3sas_base_fault_info(ioc, ioc_state &
+               mpt3sas_print_fault_code(ioc, ioc_state &
+                   MPI2_DOORBELL_DATA_MASK);
+               rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+               return (!rc) ? SUCCESS : FAILED;
+       } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+           MPI2_IOC_STATE_COREDUMP) {
+               mpt3sas_print_coredump_info(ioc, ioc_state &
                    MPI2_DOORBELL_DATA_MASK);
                rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
                return (!rc) ? SUCCESS : FAILED;
@@ -2726,9 +2788,10 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
        ioc->put_smid_hi_priority(ioc, smid, msix_task);
        wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
        if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
-               if (mpt3sas_base_check_cmd_timeout(ioc,
-                       ioc->tm_cmds.status, mpi_request,
-                       sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
+               mpt3sas_check_cmd_timeout(ioc,
+                   ioc->tm_cmds.status, mpi_request,
+                   sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
+               if (issue_reset) {
                        rc = mpt3sas_base_hard_reset_handler(ioc,
                                        FORCE_BIG_HAMMER);
                        rc = (!rc) ? SUCCESS : FAILED;
@@ -2875,15 +2938,17 @@ scsih_abort(struct scsi_cmnd *scmd)
 
        u8 timeout = 30;
        struct _pcie_device *pcie_device = NULL;
-       sdev_printk(KERN_INFO, scmd->device,
-               "attempting task abort! scmd(%p)\n", scmd);
+       sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
+           "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
+           scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
+           (scmd->request->timeout / HZ) * 1000);
        _scsih_tm_display_info(ioc, scmd);
 
        sas_device_priv_data = scmd->device->hostdata;
        if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
            ioc->remove_host) {
                sdev_printk(KERN_INFO, scmd->device,
-                       "device been deleted! scmd(%p)\n", scmd);
+                   "device been deleted! scmd(0x%p)\n", scmd);
                scmd->result = DID_NO_CONNECT << 16;
                scmd->scsi_done(scmd);
                r = SUCCESS;
@@ -2892,6 +2957,8 @@ scsih_abort(struct scsi_cmnd *scmd)
 
        /* check for completed command */
        if (st == NULL || st->cb_idx == 0xFF) {
+               sdev_printk(KERN_INFO, scmd->device, "No reference found at "
+                   "driver, assuming scmd(0x%p) might have completed\n", scmd);
                scmd->result = DID_RESET << 16;
                r = SUCCESS;
                goto out;
@@ -2920,7 +2987,7 @@ scsih_abort(struct scsi_cmnd *scmd)
        if (r == SUCCESS && st->cb_idx != 0xFF)
                r = FAILED;
  out:
-       sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+       sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
            ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
        if (pcie_device)
                pcie_device_put(pcie_device);
@@ -2949,14 +3016,14 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
        struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
 
        sdev_printk(KERN_INFO, scmd->device,
-               "attempting device reset! scmd(%p)\n", scmd);
+           "attempting device reset! scmd(0x%p)\n", scmd);
        _scsih_tm_display_info(ioc, scmd);
 
        sas_device_priv_data = scmd->device->hostdata;
        if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
            ioc->remove_host) {
                sdev_printk(KERN_INFO, scmd->device,
-                       "device been deleted! scmd(%p)\n", scmd);
+                   "device been deleted! scmd(0x%p)\n", scmd);
                scmd->result = DID_NO_CONNECT << 16;
                scmd->scsi_done(scmd);
                r = SUCCESS;
@@ -2996,7 +3063,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
        if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
                r = FAILED;
  out:
-       sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
+       sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
            ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
 
        if (sas_device)
@@ -3027,15 +3094,15 @@ scsih_target_reset(struct scsi_cmnd *scmd)
        struct scsi_target *starget = scmd->device->sdev_target;
        struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
 
-       starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
-               scmd);
+       starget_printk(KERN_INFO, starget,
+           "attempting target reset! scmd(0x%p)\n", scmd);
        _scsih_tm_display_info(ioc, scmd);
 
        sas_device_priv_data = scmd->device->hostdata;
        if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
            ioc->remove_host) {
-               starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
-                       scmd);
+               starget_printk(KERN_INFO, starget,
+                   "target been deleted! scmd(0x%p)\n", scmd);
                scmd->result = DID_NO_CONNECT << 16;
                scmd->scsi_done(scmd);
                r = SUCCESS;
@@ -3074,7 +3141,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
        if (r == SUCCESS && atomic_read(&starget->target_busy))
                r = FAILED;
  out:
-       starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
+       starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
            ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
 
        if (sas_device)
@@ -3097,7 +3164,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
        struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
        int r, retval;
 
-       ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
+       ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
        scsi_print_command(scmd);
 
        if (ioc->is_driver_loading || ioc->remove_host) {
@@ -3109,7 +3176,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
        retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
        r = (retval < 0) ? FAILED : SUCCESS;
 out:
-       ioc_info(ioc, "host reset: %s scmd(%p)\n",
+       ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
                 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
 
        return r;
@@ -4475,6 +4542,7 @@ static void
 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
        Mpi2EventDataTemperature_t *event_data)
 {
+       u32 doorbell;
        if (ioc->temp_sensors_count >= event_data->SensorNum) {
                ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
                        le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
@@ -4484,6 +4552,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
                        event_data->SensorNum);
                ioc_err(ioc, "Current Temp In Celsius: %d\n",
                        event_data->CurrentTemperature);
+               if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+                       doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+                       if ((doorbell & MPI2_IOC_STATE_MASK) ==
+                           MPI2_IOC_STATE_FAULT) {
+                               mpt3sas_print_fault_code(ioc,
+                                   doorbell & MPI2_DOORBELL_DATA_MASK);
+                       } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+                           MPI2_IOC_STATE_COREDUMP) {
+                               mpt3sas_print_coredump_info(ioc,
+                                   doorbell & MPI2_DOORBELL_DATA_MASK);
+                       }
+               }
        }
 }
 
@@ -6933,6 +7013,16 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
            le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
                pcie_device->nvme_mdts =
                    le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
+               pcie_device->shutdown_latency =
+                       le16_to_cpu(pcie_device_pg2.ShutdownLatency);
+               /*
+                * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
+                * if drive's RTD3 Entry Latency is greater then IOC's
+                * max_shutdown_latency.
+                */
+               if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
+                       ioc->max_shutdown_latency =
+                               pcie_device->shutdown_latency;
                if (pcie_device_pg2.ControllerResetTO)
                        pcie_device->reset_timeout =
                            pcie_device_pg2.ControllerResetTO;
@@ -7669,10 +7759,9 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
        wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
 
        if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
-               issue_reset =
-                       mpt3sas_base_check_cmd_timeout(ioc,
-                               ioc->scsih_cmds.status, mpi_request,
-                               sizeof(Mpi2RaidActionRequest_t)/4);
+               mpt3sas_check_cmd_timeout(ioc,
+                   ioc->scsih_cmds.status, mpi_request,
+                   sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
                rc = -EFAULT;
                goto out;
        }
@@ -9272,15 +9361,17 @@ void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 }
 
 /**
- * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
+ * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
+ *                                                     scsi & tm cmds.
  * @ioc: per adapter object
  *
  * The handler for doing any required cleanup or initialization.
  */
 void
-mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
 {
-       dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
+       dtmprintk(ioc,
+           ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
        if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
                ioc->scsih_cmds.status |= MPT3_CMD_RESET;
                mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
@@ -9357,6 +9448,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
                }
                _scsih_remove_unresponding_devices(ioc);
                _scsih_scan_for_devices_after_reset(ioc);
+               _scsih_set_nvme_max_shutdown_latency(ioc);
                break;
        case MPT3SAS_PORT_ENABLE_COMPLETE:
                ioc->start_scan = 0;
@@ -9659,6 +9751,75 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
        kfree(sas_expander);
 }
 
+/**
+ * _scsih_nvme_shutdown - NVMe shutdown notification
+ * @ioc: per adapter object
+ *
+ * Sending IoUnitControl request with shutdown operation code to alert IOC that
+ * the host system is shutting down so that IOC can issue NVMe shutdown to
+ * NVMe drives attached to it.
+ */
+static void
+_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
+{
+       Mpi26IoUnitControlRequest_t *mpi_request;
+       Mpi26IoUnitControlReply_t *mpi_reply;
+       u16 smid;
+
+       /* are there any NVMe devices ? */
+       if (list_empty(&ioc->pcie_device_list))
+               return;
+
+       mutex_lock(&ioc->scsih_cmds.mutex);
+
+       if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+               ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
+               goto out;
+       }
+
+       ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+       smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+       if (!smid) {
+               ioc_err(ioc,
+                   "%s: failed obtaining a smid\n", __func__);
+               ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+               goto out;
+       }
+
+       mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+       ioc->scsih_cmds.smid = smid;
+       memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
+       mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
+       mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
+
+       init_completion(&ioc->scsih_cmds.done);
+       ioc->put_smid_default(ioc, smid);
+       /* Wait for max_shutdown_latency seconds */
+       ioc_info(ioc,
+               "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
+               ioc->max_shutdown_latency);
+       wait_for_completion_timeout(&ioc->scsih_cmds.done,
+                       ioc->max_shutdown_latency*HZ);
+
+       if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+               ioc_err(ioc, "%s: timeout\n", __func__);
+               goto out;
+       }
+
+       if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+               mpi_reply = ioc->scsih_cmds.reply;
+               ioc_info(ioc, "Io Unit Control shutdown (complete):"
+                       "ioc_status(0x%04x), loginfo(0x%08x)\n",
+                       le16_to_cpu(mpi_reply->IOCStatus),
+                       le32_to_cpu(mpi_reply->IOCLogInfo));
+       }
+ out:
+       ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+       mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+
 /**
  * _scsih_ir_shutdown - IR shutdown notification
  * @ioc: per adapter object
@@ -9851,6 +10012,7 @@ scsih_shutdown(struct pci_dev *pdev)
                                &ioc->ioc_pg1_copy);
 
        _scsih_ir_shutdown(ioc);
+       _scsih_nvme_shutdown(ioc);
        mpt3sas_base_detach(ioc);
 }
 
@@ -10533,6 +10695,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
        ioc->logging_level = logging_level;
        ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
+       /* Host waits for minimum of six seconds */
+       ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
        /*
         * Enable MEMORY MOVE support flag.
         */
@@ -10681,6 +10845,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
        mpt3sas_base_stop_watchdog(ioc);
        flush_scheduled_work();
        scsi_block_requests(shost);
+       _scsih_nvme_shutdown(ioc);
        device_state = pci_choose_state(pdev, state);
        ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
                 pdev, pci_name(pdev), device_state);
@@ -10715,7 +10880,7 @@ scsih_resume(struct pci_dev *pdev)
        r = mpt3sas_base_map_resources(ioc);
        if (r)
                return r;
-
+       ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
        mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
        scsi_unblock_requests(shost);
        mpt3sas_base_start_watchdog(ioc);
@@ -10784,6 +10949,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
        if (rc)
                return PCI_ERS_RESULT_DISCONNECT;
 
+       ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
        rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 
        ioc_warn(ioc, "hard reset: %s\n",
index 5324662751bf11f262a179616e7eea5decb0fc8f..6ec5b7f33dfd7e3f97fe2b2273f008f04f92ead5 100644 (file)
@@ -719,11 +719,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
                sas_device_put(sas_device);
        }
 
-       if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
-               dev_printk(KERN_INFO, &rphy->dev,
-                       "add: handle(0x%04x), sas_addr(0x%016llx)\n",
-                       handle, (unsigned long long)
-                   mpt3sas_port->remote_identify.sas_address);
+       dev_info(&rphy->dev,
+           "add: handle(0x%04x), sas_addr(0x%016llx)\n", handle,
+           (unsigned long long)mpt3sas_port->remote_identify.sas_address);
+
        mpt3sas_port->rphy = rphy;
        spin_lock_irqsave(&ioc->sas_node_lock, flags);
        list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list);
@@ -813,6 +812,8 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
        }
        if (!ioc->remove_host)
                sas_port_delete(mpt3sas_port->port);
+       ioc_info(ioc, "%s: removed: sas_addr(0x%016llx)\n",
+           __func__, (unsigned long long)sas_address);
        kfree(mpt3sas_port);
 }
 
index 9289c19fcb2f3b26d467b3c624a8ada500588cf0..fb8eacfceee88d157120acf5b7fa0dbf94e8a6d7 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
  *
  * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
index e6702ee85e9f4a47e86e61e7cbd3cd3038982009..9f6696d0ddd5f79c30c76b459c6f4277cfe0a4ad 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
  *
  * This driver supports the newer, SCSI-based firmware interface only.
index cbaf178fc9796a976b1a008afe6c0a5f1c73062e..d7169e43f5e180edd53bb1b56ad5d238723a7c5e 100644 (file)
@@ -54,7 +54,8 @@ void qla2x00_bsg_sp_free(srb_t *sp)
        if (sp->type == SRB_CT_CMD ||
            sp->type == SRB_FXIOCB_BCMD ||
            sp->type == SRB_ELS_CMD_HST)
-               kfree(sp->fcport);
+               qla2x00_free_fcport(sp->fcport);
+
        qla2x00_rel_sp(sp);
 }
 
@@ -405,7 +406,7 @@ done_unmap_sg:
 
 done_free_fcport:
        if (bsg_request->msgcode == FC_BSG_RPT_ELS)
-               kfree(fcport);
+               qla2x00_free_fcport(fcport);
 done:
        return rval;
 }
@@ -545,7 +546,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
        return rval;
 
 done_free_fcport:
-       kfree(fcport);
+       qla2x00_free_fcport(fcport);
 done_unmap_sg:
        dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
                bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
@@ -796,7 +797,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
 
        if (atomic_read(&vha->loop_state) == LOOP_READY &&
            (ha->current_topology == ISP_CFG_F ||
-           (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
+           (get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
             req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
            elreq.options == EXTERNAL_LOOPBACK) {
                type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
@@ -2049,7 +2050,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
        return rval;
 
 done_free_fcport:
-       kfree(fcport);
+       qla2x00_free_fcport(fcport);
 
 done_unmap_rsp_sg:
        if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
index 30afc59c18701d6b87a80f4a8871b1219a3bc6cf..e5500bba06ca06c45dffcd9d484cbc71c584229e 100644 (file)
@@ -18,7 +18,7 @@
  * | Device Discovery             |       0x2134       | 0x210e-0x2116  |
  * |                             |                    | 0x211a         |
  * |                              |                    | 0x211c-0x2128  |
- * |                              |                    | 0x212a-0x2130  |
+ * |                              |                    | 0x212a-0x2134  |
  * | Queue Command and IO tracing |       0x3074       | 0x300b         |
  * |                              |                    | 0x3027-0x3028  |
  * |                              |                    | 0x303d-0x3041  |
index 2edd9f7b30742e990a879d1a6e6705a1e7eecabf..ed32e9715794ab70be0da145d28b75a3465f64f2 100644 (file)
@@ -2402,6 +2402,7 @@ typedef struct fc_port {
        unsigned int scan_needed:1;
        unsigned int n2n_flag:1;
        unsigned int explicit_logout:1;
+       unsigned int prli_pend_timer:1;
 
        struct completion nvme_del_done;
        uint32_t nvme_prli_service_param;
@@ -2428,6 +2429,7 @@ typedef struct fc_port {
        struct work_struct free_work;
        struct work_struct reg_work;
        uint64_t jiffies_at_registration;
+       unsigned long prli_expired;
        struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
 
        uint16_t tgt_id;
@@ -2464,6 +2466,7 @@ typedef struct fc_port {
        struct qla_tgt_sess *tgt_session;
        struct ct_sns_desc ct_desc;
        enum discovery_state disc_state;
+       atomic_t shadow_disc_state;
        enum discovery_state next_disc_state;
        enum login_state fw_login_state;
        unsigned long dm_login_expire;
@@ -2510,6 +2513,19 @@ struct event_arg {
 
 extern const char *const port_state_str[5];
 
+static const char * const port_dstate_str[] = {
+       "DELETED",
+       "GNN_ID",
+       "GNL",
+       "LOGIN_PEND",
+       "LOGIN_FAILED",
+       "GPDB",
+       "UPD_FCPORT",
+       "LOGIN_COMPLETE",
+       "ADISC",
+       "DELETE_PEND"
+};
+
 /*
  * FC port flags.
  */
@@ -3263,7 +3279,6 @@ enum qla_work_type {
        QLA_EVT_IDC_ACK,
        QLA_EVT_ASYNC_LOGIN,
        QLA_EVT_ASYNC_LOGOUT,
-       QLA_EVT_ASYNC_LOGOUT_DONE,
        QLA_EVT_ASYNC_ADISC,
        QLA_EVT_UEVENT,
        QLA_EVT_AENFX,
@@ -3953,7 +3968,7 @@ struct qla_hw_data {
        void            *sfp_data;
        dma_addr_t      sfp_data_dma;
 
-       void            *flt;
+       struct qla_flt_header *flt;
        dma_addr_t      flt_dma;
 
 #define XGMAC_DATA_SIZE        4096
@@ -4845,6 +4860,9 @@ struct sff_8247_a0 {
        (ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \
        NVME_ONLY_TARGET(fcport)) \
 
+#define PRLI_PHASE(_cls) \
+       ((_cls == DSC_LS_PRLI_PEND) || (_cls == DSC_LS_PRLI_COMP))
+
 #include "qla_target.h"
 #include "qla_gbl.h"
 #include "qla_dbg.h"
index 9dc09c1174169b6b5f5666dbaee9b62f76fcf627..d641918cdd467f8e16b49008c1051797fa4c85eb 100644 (file)
@@ -1354,12 +1354,12 @@ struct vp_rpt_id_entry_24xx {
        uint8_t port_id[3];
        uint8_t format;
        union {
-               struct {
+               struct _f0 {
                        /* format 0 loop */
                        uint8_t vp_idx_map[16];
                        uint8_t reserved_4[32];
                } f0;
-               struct {
+               struct _f1 {
                        /* format 1 fabric */
                        uint8_t vpstat1_subcode; /* vp_status=1 subcode */
                        uint8_t flags;
@@ -1381,21 +1381,22 @@ struct vp_rpt_id_entry_24xx {
                        uint16_t bbcr;
                        uint8_t reserved_5[6];
                } f1;
-               struct { /* format 2: N2N direct connect */
-                   uint8_t vpstat1_subcode;
-                   uint8_t flags;
-                   uint16_t rsv6;
-                   uint8_t rsv2[12];
-
-                   uint8_t ls_rjt_vendor;
-                   uint8_t ls_rjt_explanation;
-                   uint8_t ls_rjt_reason;
-                   uint8_t rsv3[5];
-
-                   uint8_t port_name[8];
-                   uint8_t node_name[8];
-                   uint8_t remote_nport_id[4];
-                   uint32_t reserved_5;
+               struct _f2 { /* format 2: N2N direct connect */
+                       uint8_t vpstat1_subcode;
+                       uint8_t flags;
+                       uint16_t fip_flags;
+                       uint8_t rsv2[12];
+
+                       uint8_t ls_rjt_vendor;
+                       uint8_t ls_rjt_explanation;
+                       uint8_t ls_rjt_reason;
+                       uint8_t rsv3[5];
+
+                       uint8_t port_name[8];
+                       uint8_t node_name[8];
+                       uint16_t bbcr;
+                       uint8_t reserved_5[2];
+                       uint8_t remote_nport_id[4];
                } f2;
        } u;
 };
@@ -1470,13 +1471,6 @@ struct qla_flt_location {
        uint16_t checksum;
 };
 
-struct qla_flt_header {
-       uint16_t version;
-       uint16_t length;
-       uint16_t checksum;
-       uint16_t unused;
-};
-
 #define FLT_REG_FW             0x01
 #define FLT_REG_BOOT_CODE      0x07
 #define FLT_REG_VPD_0          0x14
@@ -1537,6 +1531,14 @@ struct qla_flt_region {
        uint32_t end;
 };
 
+struct qla_flt_header {
+       uint16_t version;
+       uint16_t length;
+       uint16_t checksum;
+       uint16_t unused;
+       struct qla_flt_region region[0];
+};
+
 #define FLT_REGION_SIZE                16
 #define FLT_MAX_REGIONS                0xFF
 #define FLT_REGIONS_SIZE       (FLT_REGION_SIZE * FLT_MAX_REGIONS)
index 5b163ad85c34f8488f21a4e6303dee93162fa542..2a64729a2bc59f3551ce7906b25b256118a468f6 100644 (file)
@@ -72,14 +72,13 @@ extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
 extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
 extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
-extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
-    uint16_t *);
 struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
     enum qla_work_type);
 extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
 int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e);
 extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *);
 extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
+extern int qla24xx_async_abort_cmd(srb_t *, bool);
 
 extern void qla2x00_set_fcport_state(fc_port_t *fcport, int state);
 extern fc_port_t *
@@ -182,8 +181,6 @@ extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
 extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
-extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
-    fc_port_t *, uint16_t *);
 extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
 extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
@@ -201,6 +198,7 @@ extern void qla2x00_free_host(struct scsi_qla_host *);
 extern void qla2x00_relogin(struct scsi_qla_host *);
 extern void qla2x00_do_work(struct scsi_qla_host *);
 extern void qla2x00_free_fcports(struct scsi_qla_host *);
+extern void qla2x00_free_fcport(fc_port_t *);
 
 extern void qla83xx_schedule_work(scsi_qla_host_t *, int);
 extern void qla83xx_service_idc_aen(struct work_struct *);
@@ -253,8 +251,9 @@ extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *);
 extern void qla2x00_sp_free_dma(srb_t *sp);
 extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
 
-extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
-extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int);
+extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int);
+extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *);
+extern int qla24xx_async_abort_cmd(srb_t *, bool);
 
 extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
 
index 446a9d6ba25506dc06ab9c4d6a6f083f4e056ebf..aaa4a5bbf2ff0115b1eda3134b6749660a4c2e07 100644 (file)
@@ -2963,7 +2963,6 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
                return QLA_FUNCTION_FAILED;
 
        e->u.fcport.fcport = fcport;
-       fcport->flags |= FCF_ASYNC_ACTIVE;
        return qla2x00_post_work(vha, e);
 }
 
@@ -3097,9 +3096,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
 
 done_free_sp:
        sp->free(sp);
-       fcport->flags &= ~FCF_ASYNC_SENT;
 done:
-       fcport->flags &= ~FCF_ASYNC_ACTIVE;
        return rval;
 }
 
@@ -4290,7 +4287,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
        if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
                return rval;
 
-       fcport->disc_state = DSC_GNN_ID;
+       qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
        sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
        if (!sp)
                goto done;
@@ -4464,7 +4461,6 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
 
 done_free_sp:
        sp->free(sp);
-       fcport->flags &= ~FCF_ASYNC_SENT;
 done:
        return rval;
 }
index aa5204163becadc2e88133779eefaafde4e79103..a5076f43edea84dad5eebb19be82faeea9d6ea97 100644 (file)
@@ -49,16 +49,9 @@ qla2x00_sp_timeout(struct timer_list *t)
 {
        srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
        struct srb_iocb *iocb;
-       struct req_que *req;
-       unsigned long flags;
-       struct qla_hw_data *ha = sp->vha->hw;
 
-       WARN_ON_ONCE(irqs_disabled());
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-       req = sp->qpair->req;
-       req->outstanding_cmds[sp->handle] = NULL;
+       WARN_ON(irqs_disabled());
        iocb = &sp->u.iocb_cmd;
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
        iocb->timeout(sp);
 }
 
@@ -142,7 +135,7 @@ static void qla24xx_abort_sp_done(srb_t *sp, int res)
                sp->free(sp);
 }
 
-static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
+int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
 {
        scsi_qla_host_t *vha = cmd_sp->vha;
        struct srb_iocb *abt_iocb;
@@ -242,6 +235,7 @@ qla2x00_async_iocb_timeout(void *data)
        case SRB_NACK_PRLI:
        case SRB_NACK_LOGO:
        case SRB_CTRL_VP:
+       default:
                rc = qla24xx_async_abort_cmd(sp, false);
                if (rc) {
                        spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
@@ -258,10 +252,6 @@ qla2x00_async_iocb_timeout(void *data)
                        sp->done(sp, QLA_FUNCTION_TIMEOUT);
                }
                break;
-       default:
-               WARN_ON_ONCE(true);
-               sp->done(sp, QLA_FUNCTION_TIMEOUT);
-               break;
        }
 }
 
@@ -326,10 +316,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
        if (!sp)
                goto done;
 
+       qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
        fcport->flags |= FCF_ASYNC_SENT;
        fcport->logout_completed = 0;
 
-       fcport->disc_state = DSC_LOGIN_PEND;
        sp->type = SRB_LOGIN_CMD;
        sp->name = "login";
        sp->gen1 = fcport->rscn_gen;
@@ -425,7 +415,7 @@ qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
        fcport->flags &= ~FCF_ASYNC_ACTIVE;
        /* Don't re-login in target mode */
        if (!fcport->tgt_session)
-               qla2x00_mark_device_lost(vha, fcport, 1, 0);
+               qla2x00_mark_device_lost(vha, fcport, 1);
        qlt_logo_completion_handler(fcport, data[0]);
 }
 
@@ -533,7 +523,7 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
 
        e->u.fcport.fcport = fcport;
        fcport->flags |= FCF_ASYNC_ACTIVE;
-       fcport->disc_state = DSC_LOGIN_PEND;
+       qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
        return qla2x00_post_work(vha, e);
 }
 
@@ -685,7 +675,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
        port_id_t id;
        u64 wwn;
        u16 data[2];
-       u8 current_login_state;
+       u8 current_login_state, nvme_cls;
 
        fcport = ea->fcport;
        ql_dbg(ql_dbg_disc, vha, 0xffff,
@@ -744,10 +734,17 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
 
                loop_id = le16_to_cpu(e->nport_handle);
                loop_id = (loop_id & 0x7fff);
-               if (NVME_TARGET(vha->hw, fcport))
-                       current_login_state = e->current_login_state >> 4;
-               else
-                       current_login_state = e->current_login_state & 0xf;
+               nvme_cls = e->current_login_state >> 4;
+               current_login_state = e->current_login_state & 0xf;
+
+               if (PRLI_PHASE(nvme_cls)) {
+                       current_login_state = nvme_cls;
+                       fcport->fc4_type &= ~FS_FC4TYPE_FCP;
+                       fcport->fc4_type |= FS_FC4TYPE_NVME;
+               } else if (PRLI_PHASE(current_login_state)) {
+                       fcport->fc4_type |= FS_FC4TYPE_FCP;
+                       fcport->fc4_type &= ~FS_FC4TYPE_NVME;
+               }
 
                ql_dbg(ql_dbg_disc, vha, 0x20e2,
                    "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
@@ -836,7 +833,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                                 * with GNL. Push disc_state back to DELETED
                                 * so GNL can go out again
                                 */
-                               fcport->disc_state = DSC_DELETED;
+                               qla2x00_set_fcport_disc_state(fcport,
+                                   DSC_DELETED);
                                break;
                        case DSC_LS_PRLI_COMP:
                                if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
@@ -912,7 +910,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                        qla24xx_fcport_handle_login(vha, fcport);
                        break;
                case ISP_CFG_N:
-                       fcport->disc_state = DSC_DELETED;
+                       qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
                        if (time_after_eq(jiffies, fcport->dm_login_expire)) {
                                if (fcport->n2n_link_reset_cnt < 2) {
                                        fcport->n2n_link_reset_cnt++;
@@ -992,7 +990,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
                set_bit(loop_id, vha->hw->loop_id_map);
                wwn = wwn_to_u64(e->port_name);
 
-               ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
+               ql_dbg(ql_dbg_disc, vha, 0x20e8,
                    "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
                    __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
                    e->port_id[0], e->current_login_state, e->last_login_state,
@@ -1051,6 +1049,16 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
 
        spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
        vha->gnl.sent = 0;
+       if (!list_empty(&vha->gnl.fcports)) {
+               /* retrigger gnl */
+               list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports,
+                   gnl_entry) {
+                       list_del_init(&fcport->gnl_entry);
+                       fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+                       if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS)
+                               break;
+               }
+       }
        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 
        sp->free(sp);
@@ -1072,7 +1080,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
 
        spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
        fcport->flags |= FCF_ASYNC_SENT;
-       fcport->disc_state = DSC_GNL;
+       qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
        fcport->last_rscn_gen = fcport->rscn_gen;
        fcport->last_login_gen = fcport->login_gen;
 
@@ -1121,8 +1129,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
 
 done_free_sp:
        sp->free(sp);
-       fcport->flags &= ~FCF_ASYNC_SENT;
 done:
+       fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
        return rval;
 }
 
@@ -1216,12 +1224,19 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
        struct srb_iocb *lio;
        int rval = QLA_FUNCTION_FAILED;
 
-       if (!vha->flags.online)
+       if (!vha->flags.online) {
+               ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
+                   __func__, __LINE__, fcport->port_name);
                return rval;
+       }
 
-       if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
-           fcport->fw_login_state == DSC_LS_PRLI_PEND)
+       if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
+           fcport->fw_login_state == DSC_LS_PRLI_PEND) &&
+           qla_dual_mode_enabled(vha)) {
+               ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
+                   __func__, __LINE__, fcport->port_name);
                return rval;
+       }
 
        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
        if (!sp)
@@ -1295,12 +1310,12 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
                return rval;
        }
 
-       fcport->disc_state = DSC_GPDB;
-
        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
        if (!sp)
                goto done;
 
+       qla2x00_set_fcport_disc_state(fcport, DSC_GPDB);
+
        fcport->flags |= FCF_ASYNC_SENT;
        sp->type = SRB_MB_IOCB;
        sp->name = "gpdb";
@@ -1349,6 +1364,7 @@ done_free_sp:
        sp->free(sp);
        fcport->flags &= ~FCF_ASYNC_SENT;
 done:
+       fcport->flags &= ~FCF_ASYNC_ACTIVE;
        qla24xx_post_gpdb_work(vha, fcport, opt);
        return rval;
 }
@@ -1379,7 +1395,7 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
                ql_dbg(ql_dbg_disc, vha, 0x20d6,
                    "%s %d %8phC session revalidate success\n",
                    __func__, __LINE__, ea->fcport->port_name);
-               ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
+               qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE);
        }
        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 }
@@ -1433,7 +1449,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
                /* Set discovery state back to GNL to Relogin attempt */
                if (qla_dual_mode_enabled(vha) ||
                    qla_ini_mode_enabled(vha)) {
-                       fcport->disc_state = DSC_GNL;
+                       qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
                        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
                }
                return;
@@ -1600,6 +1616,10 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
                        break;
                default:
                        if (fcport->login_pause) {
+                               ql_dbg(ql_dbg_disc, vha, 0x20d8,
+                                   "%s %d %8phC exit\n",
+                                   __func__, __LINE__,
+                                   fcport->port_name);
                                fcport->last_rscn_gen = fcport->rscn_gen;
                                fcport->last_login_gen = fcport->login_gen;
                                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -1758,9 +1778,23 @@ qla2x00_tmf_iocb_timeout(void *data)
 {
        srb_t *sp = data;
        struct srb_iocb *tmf = &sp->u.iocb_cmd;
+       int rc, h;
+       unsigned long flags;
 
-       tmf->u.tmf.comp_status = CS_TIMEOUT;
-       complete(&tmf->u.tmf.comp);
+       rc = qla24xx_async_abort_cmd(sp, false);
+       if (rc) {
+               spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+               for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+                       if (sp->qpair->req->outstanding_cmds[h] == sp) {
+                               sp->qpair->req->outstanding_cmds[h] = NULL;
+                               break;
+                       }
+               }
+               spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+               tmf->u.tmf.comp_status = CS_TIMEOUT;
+               tmf->u.tmf.data = QLA_FUNCTION_FAILED;
+               complete(&tmf->u.tmf.comp);
+       }
 }
 
 static void qla2x00_tmf_sp_done(srb_t *sp, int res)
@@ -1976,7 +2010,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
                        qla24xx_post_prli_work(vha, ea->fcport);
                } else {
                        ql_dbg(ql_dbg_disc, vha, 0x20ea,
-                           "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
+                           "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
                            __func__, __LINE__, ea->fcport->port_name,
                            ea->fcport->loop_id, ea->fcport->d_id.b24);
 
@@ -1996,11 +2030,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
                    __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
 
                ea->fcport->flags &= ~FCF_ASYNC_SENT;
-               ea->fcport->disc_state = DSC_LOGIN_FAILED;
+               qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED);
                if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
                        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
                else
-                       qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
+                       qla2x00_mark_device_lost(vha, ea->fcport, 1);
                break;
        case MBS_LOOP_ID_USED:
                /* data[1] = IO PARAM 1 = nport ID  */
@@ -2047,6 +2081,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
                        set_bit(lid, vha->hw->loop_id_map);
                        ea->fcport->loop_id = lid;
                        ea->fcport->keep_nport_handle = 0;
+                       ea->fcport->logout_on_delete = 1;
                        qlt_schedule_sess_for_deletion(ea->fcport);
                }
                break;
@@ -2054,16 +2089,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
        return;
 }
 
-void
-qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
-    uint16_t *data)
-{
-       qlt_logo_completion_handler(fcport, data[0]);
-       fcport->login_gen++;
-       fcport->flags &= ~FCF_ASYNC_ACTIVE;
-       return;
-}
-
 /****************************************************************************/
 /*                QLogic ISP2x00 Hardware Support Functions.                */
 /****************************************************************************/
@@ -4925,12 +4950,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
        qla2x00_get_data_rate(vha);
 
        /* Determine what we need to do */
-       if (ha->current_topology == ISP_CFG_FL &&
-           (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
-
-               set_bit(RSCN_UPDATE, &flags);
-
-       } else if (ha->current_topology == ISP_CFG_F &&
+       if ((ha->current_topology == ISP_CFG_FL ||
+           ha->current_topology == ISP_CFG_F) &&
            (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
 
                set_bit(RSCN_UPDATE, &flags);
@@ -5230,7 +5251,7 @@ skip_login:
                            qla_ini_mode_enabled(vha)) &&
                            atomic_read(&fcport->state) == FCS_ONLINE) {
                                qla2x00_mark_device_lost(vha, fcport,
-                                       ql2xplogiabsentdevice, 0);
+                                       ql2xplogiabsentdevice);
                                if (fcport->loop_id != FC_NO_LOOP_ID &&
                                    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
                                    fcport->port_type != FCT_INITIATOR &&
@@ -5251,7 +5272,7 @@ skip_login:
        }
 
 cleanup_allocation:
-       kfree(new_fcport);
+       qla2x00_free_fcport(new_fcport);
 
        if (rval != QLA_SUCCESS) {
                ql_dbg(ql_dbg_disc, vha, 0x2098,
@@ -5385,11 +5406,14 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
        ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
            __func__, fcport->port_name);
 
-       fcport->disc_state = DSC_UPD_FCPORT;
+       qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
        fcport->login_retry = vha->hw->login_retry_count;
        fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
        fcport->deleted = 0;
-       fcport->logout_on_delete = 1;
+       if (vha->hw->current_topology == ISP_CFG_NL)
+               fcport->logout_on_delete = 0;
+       else
+               fcport->logout_on_delete = 1;
        fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
 
        switch (vha->hw->current_topology) {
@@ -5405,7 +5429,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 
        if (NVME_TARGET(vha->hw, fcport)) {
                qla_nvme_register_remote(vha, fcport);
-               fcport->disc_state = DSC_LOGIN_COMPLETE;
+               qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
                qla2x00_set_fcport_state(fcport, FCS_ONLINE);
                return;
        }
@@ -5450,7 +5474,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
                }
        }
 
-       fcport->disc_state = DSC_LOGIN_COMPLETE;
+       qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
 }
 
 void qla_register_fcport_fn(struct work_struct *work)
@@ -5859,7 +5883,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
 
                if (NVME_TARGET(vha->hw, fcport)) {
                        if (fcport->disc_state == DSC_DELETE_PEND) {
-                               fcport->disc_state = DSC_GNL;
+                               qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
                                vha->fcport_count--;
                                fcport->login_succ = 0;
                        }
@@ -5905,7 +5929,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
                            qla_ini_mode_enabled(vha)) &&
                            atomic_read(&fcport->state) == FCS_ONLINE) {
                                qla2x00_mark_device_lost(vha, fcport,
-                                       ql2xplogiabsentdevice, 0);
+                                       ql2xplogiabsentdevice);
                                if (fcport->loop_id != FC_NO_LOOP_ID &&
                                    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
                                    fcport->port_type != FCT_INITIATOR &&
@@ -6071,7 +6095,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
                        ha->isp_ops->fabric_logout(vha, fcport->loop_id,
                            fcport->d_id.b.domain, fcport->d_id.b.area,
                            fcport->d_id.b.al_pa);
-                       qla2x00_mark_device_lost(vha, fcport, 1, 0);
+                       qla2x00_mark_device_lost(vha, fcport, 1);
 
                        rval = 1;
                        break;
@@ -6585,9 +6609,9 @@ qla2x00_quiesce_io(scsi_qla_host_t *vha)
        atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                atomic_set(&vha->loop_state, LOOP_DOWN);
-               qla2x00_mark_all_devices_lost(vha, 0);
+               qla2x00_mark_all_devices_lost(vha);
                list_for_each_entry(vp, &ha->vp_list, list)
-                       qla2x00_mark_all_devices_lost(vp, 0);
+                       qla2x00_mark_all_devices_lost(vp);
        } else {
                if (!atomic_read(&vha->loop_down_timer))
                        atomic_set(&vha->loop_down_timer,
@@ -6663,14 +6687,14 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                atomic_set(&vha->loop_state, LOOP_DOWN);
-               qla2x00_mark_all_devices_lost(vha, 0);
+               qla2x00_mark_all_devices_lost(vha);
 
                spin_lock_irqsave(&ha->vport_slock, flags);
                list_for_each_entry(vp, &ha->vp_list, list) {
                        atomic_inc(&vp->vref_count);
                        spin_unlock_irqrestore(&ha->vport_slock, flags);
 
-                       qla2x00_mark_all_devices_lost(vp, 0);
+                       qla2x00_mark_all_devices_lost(vp);
 
                        spin_lock_irqsave(&ha->vport_slock, flags);
                        atomic_dec(&vp->vref_count);
index 352aba4127f7d499117b945a5488c3159b70c250..364b3db8b2dcab08df2314b415e5c2bde2a67df3 100644 (file)
@@ -105,6 +105,30 @@ qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
        INIT_LIST_HEAD(&ctx->dsd_list);
 }
 
+static inline void
+qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
+{
+       int old_val;
+       uint8_t shiftbits, mask;
+
+       /* This will have to change when the max no. of states > 16 */
+       shiftbits = 4;
+       mask = (1 << shiftbits) - 1;
+
+       fcport->disc_state = state;
+       while (1) {
+               old_val = atomic_read(&fcport->shadow_disc_state);
+               if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state,
+                   old_val, (old_val << shiftbits) | state)) {
+                       ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
+                           "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
+                           fcport->port_name, port_dstate_str[old_val & mask],
+                           port_dstate_str[state], fcport->d_id.b24);
+                       return;
+               }
+       }
+}
+
 static inline int
 qla2x00_hba_err_chk_enabled(srb_t *sp)
 {
index 8b050f0b43330543f365a4f8e83eb2ae67342813..47bf60a9490a0f4ce90505a766c2aae0329234b5 100644 (file)
@@ -2537,13 +2537,32 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
        fc_port_t *fcport = sp->fcport;
        struct scsi_qla_host *vha = sp->vha;
        struct srb_iocb *lio = &sp->u.iocb_cmd;
+       unsigned long flags = 0;
+       int res, h;
 
        ql_dbg(ql_dbg_io, vha, 0x3069,
            "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
            sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
            fcport->d_id.b.al_pa);
 
-       complete(&lio->u.els_logo.comp);
+       /* Abort the exchange */
+       res = qla24xx_async_abort_cmd(sp, false);
+       if (res) {
+               ql_dbg(ql_dbg_io, vha, 0x3070,
+                   "mbx abort_command failed.\n");
+               spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+               for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+                       if (sp->qpair->req->outstanding_cmds[h] == sp) {
+                               sp->qpair->req->outstanding_cmds[h] = NULL;
+                               break;
+                       }
+               }
+               spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+               complete(&lio->u.els_logo.comp);
+       } else {
+               ql_dbg(ql_dbg_io, vha, 0x3071,
+                   "mbx abort_command success.\n");
+       }
 }
 
 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
@@ -2717,23 +2736,29 @@ qla2x00_els_dcmd2_iocb_timeout(void *data)
        srb_t *sp = data;
        fc_port_t *fcport = sp->fcport;
        struct scsi_qla_host *vha = sp->vha;
-       struct qla_hw_data *ha = vha->hw;
        unsigned long flags = 0;
-       int res;
+       int res, h;
 
        ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
            "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
            sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
 
        /* Abort the exchange */
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-       res = ha->isp_ops->abort_command(sp);
+       res = qla24xx_async_abort_cmd(sp, false);
        ql_dbg(ql_dbg_io, vha, 0x3070,
            "mbx abort_command %s\n",
            (res == QLA_SUCCESS) ? "successful" : "failed");
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
-       sp->done(sp, QLA_FUNCTION_TIMEOUT);
+       if (res) {
+               spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+               for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+                       if (sp->qpair->req->outstanding_cmds[h] == sp) {
+                               sp->qpair->req->outstanding_cmds[h] = NULL;
+                               break;
+                       }
+               }
+               spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+               sp->done(sp, QLA_FUNCTION_TIMEOUT);
+       }
 }
 
 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
@@ -2852,7 +2877,8 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
                                    fw_status[0], fw_status[1], fw_status[2]);
 
                                fcport->flags &= ~FCF_ASYNC_SENT;
-                               fcport->disc_state = DSC_LOGIN_FAILED;
+                               qla2x00_set_fcport_disc_state(fcport,
+                                   DSC_LOGIN_FAILED);
                                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
                                break;
                        }
@@ -2865,7 +2891,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
                            fw_status[0], fw_status[1], fw_status[2]);
 
                        sp->fcport->flags &= ~FCF_ASYNC_SENT;
-                       sp->fcport->disc_state = DSC_LOGIN_FAILED;
+                       qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
                        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
                        break;
                }
@@ -2898,11 +2924,12 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
        if (!sp) {
                ql_log(ql_log_info, vha, 0x70e6,
                 "SRB allocation failed\n");
+               fcport->flags &= ~FCF_ASYNC_ACTIVE;
                return -ENOMEM;
        }
 
        fcport->flags |= FCF_ASYNC_SENT;
-       fcport->disc_state = DSC_LOGIN_PEND;
+       qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
        elsio = &sp->u.iocb_cmd;
        ql_dbg(ql_dbg_io, vha, 0x3073,
            "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
@@ -2975,7 +3002,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
        }
 
 out:
-       fcport->flags &= ~(FCF_ASYNC_SENT);
+       fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
        qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
        sp->free(sp);
 done:
index 7b8a6bfcf08d97c1cc1421cd67eb53dfbd8ea9c9..e7bad0bfffda1c09446396bc1eab1d023f4c5c89 100644 (file)
@@ -788,7 +788,7 @@ skip_rio:
                if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                        atomic_set(&vha->loop_state, LOOP_DOWN);
                        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
-                       qla2x00_mark_all_devices_lost(vha, 1);
+                       qla2x00_mark_all_devices_lost(vha);
                }
 
                if (vha->vp_idx) {
@@ -861,7 +861,7 @@ skip_rio:
                        }
 
                        vha->device_flags |= DFLG_NO_CABLE;
-                       qla2x00_mark_all_devices_lost(vha, 1);
+                       qla2x00_mark_all_devices_lost(vha);
                }
 
                if (vha->vp_idx) {
@@ -881,7 +881,7 @@ skip_rio:
                if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                        atomic_set(&vha->loop_state, LOOP_DOWN);
                        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
-                       qla2x00_mark_all_devices_lost(vha, 1);
+                       qla2x00_mark_all_devices_lost(vha);
                }
 
                if (vha->vp_idx) {
@@ -924,7 +924,7 @@ skip_rio:
                                atomic_set(&vha->loop_down_timer,
                                    LOOP_DOWN_TIME);
                        if (!N2N_TOPO(ha))
-                               qla2x00_mark_all_devices_lost(vha, 1);
+                               qla2x00_mark_all_devices_lost(vha);
                }
 
                if (vha->vp_idx) {
@@ -953,7 +953,7 @@ skip_rio:
                        if (!atomic_read(&vha->loop_down_timer))
                                atomic_set(&vha->loop_down_timer,
                                    LOOP_DOWN_TIME);
-                       qla2x00_mark_all_devices_lost(vha, 1);
+                       qla2x00_mark_all_devices_lost(vha);
                }
 
                if (vha->vp_idx) {
@@ -1022,7 +1022,6 @@ skip_rio:
                            "Marking port lost loopid=%04x portid=%06x.\n",
                            fcport->loop_id, fcport->d_id.b24);
                        if (qla_ini_mode_enabled(vha)) {
-                               qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
                                fcport->logout_on_delete = 0;
                                qlt_schedule_sess_for_deletion(fcport);
                        }
@@ -1034,14 +1033,14 @@ global_port_update:
                                atomic_set(&vha->loop_down_timer,
                                    LOOP_DOWN_TIME);
                                vha->device_flags |= DFLG_NO_CABLE;
-                               qla2x00_mark_all_devices_lost(vha, 1);
+                               qla2x00_mark_all_devices_lost(vha);
                        }
 
                        if (vha->vp_idx) {
                                atomic_set(&vha->vp_state, VP_FAILED);
                                fc_vport_set_state(vha->fc_vport,
                                    FC_VPORT_FAILED);
-                               qla2x00_mark_all_devices_lost(vha, 1);
+                               qla2x00_mark_all_devices_lost(vha);
                        }
 
                        vha->flags.management_server_logged_in = 0;
@@ -1253,11 +1252,33 @@ global_port_update:
 
        case MBA_DPORT_DIAGNOSTICS:
                ql_dbg(ql_dbg_async, vha, 0x5052,
-                   "D-Port Diagnostics: %04x result=%s\n",
-                   mb[0],
-                   mb[1] == 0 ? "start" :
-                   mb[1] == 1 ? "done (pass)" :
-                   mb[1] == 2 ? "done (error)" : "other");
+                   "D-Port Diagnostics: %04x %04x %04x %04x\n",
+                   mb[0], mb[1], mb[2], mb[3]);
+               if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+                       static char *results[] = {
+                           "start", "done(pass)", "done(error)", "undefined" };
+                       static char *types[] = {
+                           "none", "dynamic", "static", "other" };
+                       uint result = mb[1] >> 0 & 0x3;
+                       uint type = mb[1] >> 6 & 0x3;
+                       uint sw = mb[1] >> 15 & 0x1;
+                       ql_dbg(ql_dbg_async, vha, 0x5052,
+                           "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
+                           results[result], types[type], sw);
+                       if (result == 2) {
+                               static char *reasons[] = {
+                                   "reserved", "unexpected reject",
+                                   "unexpected phase", "retry exceeded",
+                                   "timed out", "not supported",
+                                   "user stopped" };
+                               uint reason = mb[2] >> 0 & 0xf;
+                               uint phase = mb[2] >> 12 & 0xf;
+                               ql_dbg(ql_dbg_async, vha, 0x5052,
+                                   "D-Port Diagnostics: reason=%s phase=%u \n",
+                                   reason < 7 ? reasons[reason] : "other",
+                                   phase >> 1);
+                       }
+               }
                break;
 
        case MBA_TEMPERATURE_ALERT:
@@ -2152,12 +2173,12 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
         * swab32 of the "data" field in the beginning of qla2x00_status_entry()
         * would make guard field appear at offset 2
         */
-       a_guard   = le16_to_cpu(*(uint16_t *)(ap + 2));
-       a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
-       a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
-       e_guard   = le16_to_cpu(*(uint16_t *)(ep + 2));
-       e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
-       e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
+       a_guard   = get_unaligned_le16(ap + 2);
+       a_app_tag = get_unaligned_le16(ap + 0);
+       a_ref_tag = get_unaligned_le32(ap + 4);
+       e_guard   = get_unaligned_le16(ep + 2);
+       e_app_tag = get_unaligned_le16(ep + 0);
+       e_ref_tag = get_unaligned_le32(ep + 4);
 
        ql_dbg(ql_dbg_io, vha, 0x3023,
            "iocb(s) %p Returned STATUS.\n", sts24);
@@ -2745,7 +2766,6 @@ check_scsi_status:
                                port_state_str[FCS_ONLINE],
                                comp_status);
 
-                       qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
                        qlt_schedule_sess_for_deletion(fcport);
                }
 
index b7c1108c48e208f2e634309acd84d46b182bd7e3..9e09964f5c0e4ad9819a59c5038036d8a781b294 100644 (file)
@@ -6152,9 +6152,8 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
        mcp->mb[7] = LSW(MSD(req_dma));
        mcp->mb[8] = MSW(addr);
        /* Setting RAM ID to valid */
-       mcp->mb[10] |= BIT_7;
        /* For MCTP RAM ID is 0x40 */
-       mcp->mb[10] |= 0x40;
+       mcp->mb[10] = BIT_7 | 0x40;
 
        mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
            MBX_0;
index eabc5127174ed90c0cac48c1864826dbb35f3e15..8ae639d089d13068468a4436f03d156b00ef95df 100644 (file)
@@ -147,7 +147,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
                    "Marking port dead, loop_id=0x%04x : %x.\n",
                    fcport->loop_id, fcport->vha->vp_idx);
 
-               qla2x00_mark_device_lost(vha, fcport, 0, 0);
+               qla2x00_mark_device_lost(vha, fcport, 0);
                qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
        }
 }
@@ -167,7 +167,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
        list_for_each_entry(fcport, &vha->vp_fcports, list)
                fcport->logout_on_delete = 0;
 
-       qla2x00_mark_all_devices_lost(vha, 0);
+       qla2x00_mark_all_devices_lost(vha);
 
        /* Remove port id from vp target map */
        spin_lock_irqsave(&vha->hw->hardware_lock, flags);
@@ -327,7 +327,7 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
         */
        if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
                atomic_set(&vha->loop_state, LOOP_DOWN);
-               qla2x00_mark_all_devices_lost(vha, 0);
+               qla2x00_mark_all_devices_lost(vha);
        } else {
                if (!atomic_read(&vha->loop_down_timer))
                        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
index 605b59c76c9010d8c82bbbfa464a02db077f7673..bad043c406225fe0e78f4c87aa7f206925b59bf9 100644 (file)
@@ -1210,9 +1210,9 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha,
                                    " Existing TGT-ID %x did not get "
                                    " offline event from firmware.\n",
                                    fcport->old_tgt_id);
-                               qla2x00_mark_device_lost(vha, fcport, 0, 0);
+                               qla2x00_mark_device_lost(vha, fcport, 0);
                                set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-                               kfree(new_fcport);
+                               qla2x00_free_fcport(new_fcport);
                                return rval;
                        }
                        break;
@@ -1230,7 +1230,7 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha,
                        return QLA_MEMORY_ALLOC_FAILED;
        }
 
-       kfree(new_fcport);
+       qla2x00_free_fcport(new_fcport);
        return rval;
 }
 
@@ -1274,7 +1274,7 @@ qlafx00_configure_all_targets(scsi_qla_host_t *vha)
 
                if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
                        if (fcport->port_type != FCT_INITIATOR)
-                               qla2x00_mark_device_lost(vha, fcport, 0, 0);
+                               qla2x00_mark_device_lost(vha, fcport, 0);
                }
        }
 
@@ -1298,7 +1298,7 @@ qlafx00_configure_all_targets(scsi_qla_host_t *vha)
        /* Free all new device structures not processed. */
        list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
                list_del(&fcport->list);
-               kfree(fcport);
+               qla2x00_free_fcport(fcport);
        }
 
        return rval;
@@ -1706,7 +1706,7 @@ qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
        if (!fcport)
                return;
 
-       qla2x00_mark_device_lost(vha, fcport, 0, 0);
+       qla2x00_mark_device_lost(vha, fcport, 0);
 
        return;
 }
@@ -1740,7 +1740,7 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
                                set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                        } else if (evt->u.aenfx.mbx[2] == 2) {
                                vha->device_flags |= DFLG_NO_CABLE;
-                               qla2x00_mark_all_devices_lost(vha, 1);
+                               qla2x00_mark_all_devices_lost(vha);
                        }
                }
                break;
@@ -2513,7 +2513,7 @@ check_scsi_status:
                    atomic_read(&fcport->state));
 
                if (atomic_read(&fcport->state) == FCS_ONLINE)
-                       qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+                       qla2x00_mark_device_lost(fcport->vha, fcport, 1);
                break;
 
        case CS_ABORTED:
index 2b2028f2383e60c9cc6cf0aadad3551e294de43a..185c5f34d4c14757283a26395aadaf5ecb7131e3 100644 (file)
@@ -1612,8 +1612,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha)
        return (u8 *)&ha->hablob->fw->data[offset];
 }
 
-static __le32
-qla82xx_get_fw_size(struct qla_hw_data *ha)
+static u32 qla82xx_get_fw_size(struct qla_hw_data *ha)
 {
        struct qla82xx_uri_data_desc *uri_desc = NULL;
 
@@ -1624,7 +1623,7 @@ qla82xx_get_fw_size(struct qla_hw_data *ha)
                        return cpu_to_le32(uri_desc->size);
        }
 
-       return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
+       return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]);
 }
 
 static u8 *
@@ -1816,7 +1815,7 @@ qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
        }
 
        flashaddr = FLASH_ADDR_START;
-       size = (__force u32)qla82xx_get_fw_size(ha) / 8;
+       size = qla82xx_get_fw_size(ha) / 8;
        ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
 
        for (i = 0; i < size; i++) {
@@ -1883,7 +1882,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
 static int
 qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
 {
-       __le32 val;
+       uint32_t val;
        uint32_t min_size;
        struct qla_hw_data *ha = vha->hw;
        const struct firmware *fw = ha->hablob->fw;
@@ -1896,8 +1895,8 @@ qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
 
                min_size = QLA82XX_URI_FW_MIN_SIZE;
        } else {
-               val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
-               if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
+               val = get_unaligned_le32(&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
+               if (val != QLA82XX_BDINFO_MAGIC)
                        return -EINVAL;
 
                min_size = QLA82XX_FW_MIN_SIZE;
@@ -3030,7 +3029,7 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
        /* Set DEV_FAILED flag to disable timer */
        vha->device_flags |= DFLG_DEV_FAILED;
        qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
-       qla2x00_mark_all_devices_lost(vha, 0);
+       qla2x00_mark_all_devices_lost(vha);
        vha->flags.online = 0;
        vha->flags.init_done = 0;
 }
index 8b84bc4a6ac81d59f01cf57dc26a4adda22f07f3..b520a980d1dc2e789b969c481083abf646e105ca 100644 (file)
@@ -1110,7 +1110,7 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
 {
        u8 i;
 
-       qla2x00_mark_all_devices_lost(vha, 0);
+       qla2x00_mark_all_devices_lost(vha);
 
        for (i = 0; i < 10; i++) {
                if (wait_event_timeout(vha->fcport_waitQ,
@@ -1667,7 +1667,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
        if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
                atomic_set(&vha->loop_state, LOOP_DOWN);
                atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
-               qla2x00_mark_all_devices_lost(vha, 0);
+               qla2x00_mark_all_devices_lost(vha);
                ret = qla2x00_full_login_lip(vha);
                if (ret != QLA_SUCCESS) {
                        ql_dbg(ql_dbg_taskm, vha, 0x802d,
@@ -3854,37 +3854,21 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha)
 }
 
 static inline void
-qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
-    int defer)
+qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
-       struct fc_rport *rport;
-       scsi_qla_host_t *base_vha;
-       unsigned long flags;
+       int now;
 
        if (!fcport->rport)
                return;
 
-       rport = fcport->rport;
-       if (defer) {
-               base_vha = pci_get_drvdata(vha->hw->pdev);
-               spin_lock_irqsave(vha->host->host_lock, flags);
-               fcport->drport = rport;
-               spin_unlock_irqrestore(vha->host->host_lock, flags);
-               qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
-               set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
-               qla2xxx_wake_dpc(base_vha);
-       } else {
-               int now;
-
-               if (rport) {
-                       ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
-                           "%s %8phN. rport %p roles %x\n",
-                           __func__, fcport->port_name, rport,
-                           rport->roles);
-                       fc_remote_port_delete(rport);
-               }
-               qlt_do_generation_tick(vha, &now);
+       if (fcport->rport) {
+               ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
+                   "%s %8phN. rport %p roles %x\n",
+                   __func__, fcport->port_name, fcport->rport,
+                   fcport->rport->roles);
+               fc_remote_port_delete(fcport->rport);
        }
+       qlt_do_generation_tick(vha, &now);
 }
 
 /*
@@ -3897,18 +3881,18 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
  * Context:
  */
 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
-    int do_login, int defer)
+    int do_login)
 {
        if (IS_QLAFX00(vha->hw)) {
                qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
-               qla2x00_schedule_rport_del(vha, fcport, defer);
+               qla2x00_schedule_rport_del(vha, fcport);
                return;
        }
 
        if (atomic_read(&fcport->state) == FCS_ONLINE &&
            vha->vp_idx == fcport->vha->vp_idx) {
                qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
-               qla2x00_schedule_rport_del(vha, fcport, defer);
+               qla2x00_schedule_rport_del(vha, fcport);
        }
        /*
         * We may need to retry the login, so don't change the state of the
@@ -3937,7 +3921,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
  * Context:
  */
 void
-qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
+qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
 {
        fc_port_t *fcport;
 
@@ -3957,13 +3941,6 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
                 */
                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
                        continue;
-               if (atomic_read(&fcport->state) == FCS_ONLINE) {
-                       qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
-                       if (defer)
-                               qla2x00_schedule_rport_del(vha, fcport, defer);
-                       else if (vha->vp_idx == fcport->vha->vp_idx)
-                               qla2x00_schedule_rport_del(vha, fcport, defer);
-               }
        }
 }
 
@@ -4965,7 +4942,6 @@ int qla2x00_post_async_##name##_work(             \
 
 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
-qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
 qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
 qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
@@ -5032,7 +5008,7 @@ void qla24xx_sched_upd_fcport(fc_port_t *fcport)
        fcport->jiffies_at_registration = jiffies;
        fcport->sec_since_registration = 0;
        fcport->next_disc_state = DSC_DELETED;
-       fcport->disc_state = DSC_UPD_FCPORT;
+       qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
        spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
 
        queue_work(system_unbound_wq, &fcport->reg_work);
@@ -5253,10 +5229,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
                case QLA_EVT_ASYNC_LOGOUT:
                        rc = qla2x00_async_logout(vha, e->u.logio.fcport);
                        break;
-               case QLA_EVT_ASYNC_LOGOUT_DONE:
-                       qla2x00_async_logout_done(vha, e->u.logio.fcport,
-                           e->u.logio.data);
-                       break;
                case QLA_EVT_ASYNC_ADISC:
                        qla2x00_async_adisc(vha, e->u.logio.fcport,
                            e->u.logio.data);
@@ -6899,13 +6871,13 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
                qpair->online = 0;
        mutex_unlock(&ha->mq_lock);
 
-       qla2x00_mark_all_devices_lost(vha, 0);
+       qla2x00_mark_all_devices_lost(vha);
 
        spin_lock_irqsave(&ha->vport_slock, flags);
        list_for_each_entry(vp, &ha->vp_list, list) {
                atomic_inc(&vp->vref_count);
                spin_unlock_irqrestore(&ha->vport_slock, flags);
-               qla2x00_mark_all_devices_lost(vp, 0);
+               qla2x00_mark_all_devices_lost(vp);
                spin_lock_irqsave(&ha->vport_slock, flags);
                atomic_dec(&vp->vref_count);
        }
@@ -7270,6 +7242,8 @@ qla2x00_module_init(void)
        BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
        BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
        BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
+       BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
+       BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
 
        /* Allocate cache for SRBs. */
        srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
index bbe90354f49b09488fa2a3b126f1941694fbe7b1..76a38bf86cbc3de6e07ff51c14a8c809d0b2c036 100644 (file)
@@ -669,8 +669,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
 
        struct qla_hw_data *ha = vha->hw;
        uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0;
-       struct qla_flt_header *flt = (void *)ha->flt;
-       struct qla_flt_region *region = (void *)&flt[1];
+       struct qla_flt_header *flt = ha->flt;
+       struct qla_flt_region *region = &flt->region[0];
        uint16_t *wptr, cnt, chksum;
        uint32_t start;
 
@@ -2652,18 +2652,15 @@ qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start,
     struct qla_flt_region *region)
 {
        struct qla_hw_data *ha = vha->hw;
-       struct qla_flt_header *flt;
-       struct qla_flt_region *flt_reg;
+       struct qla_flt_header *flt = ha->flt;
+       struct qla_flt_region *flt_reg = &flt->region[0];
        uint16_t cnt;
        int rval = QLA_FUNCTION_FAILED;
 
        if (!ha->flt)
                return QLA_FUNCTION_FAILED;
 
-       flt = (struct qla_flt_header *)ha->flt;
-       flt_reg = (struct qla_flt_region *)&flt[1];
        cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
-
        for (; cnt; cnt--, flt_reg++) {
                if (flt_reg->start == start) {
                        memcpy((uint8_t *)region, flt_reg,
index 68c14143e50e4884f4057ad6071b662d9bd38378..70081b395fb2fc3d06d76add17ca7179b6ebca81 100644 (file)
@@ -596,7 +596,8 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
                        spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
                } else {
                        sp->fcport->login_retry = 0;
-                       sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
+                       qla2x00_set_fcport_disc_state(sp->fcport,
+                           DSC_LOGIN_COMPLETE);
                        sp->fcport->deleted = 0;
                        sp->fcport->logout_on_delete = 1;
                }
@@ -957,7 +958,7 @@ void qlt_free_session_done(struct work_struct *work)
        struct qlt_plogi_ack_t *own =
                sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
 
-       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
+       ql_dbg(ql_dbg_disc, vha, 0xf084,
                "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
                " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
                __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
@@ -966,7 +967,7 @@ void qlt_free_session_done(struct work_struct *work)
                sess->send_els_logo);
 
        if (!IS_SW_RESV_ADDR(sess->d_id)) {
-               qla2x00_mark_device_lost(vha, sess, 0, 0);
+               qla2x00_mark_device_lost(vha, sess, 0);
 
                if (sess->send_els_logo) {
                        qlt_port_logo_t logo;
@@ -1024,7 +1025,7 @@ void qlt_free_session_done(struct work_struct *work)
 
                while (!READ_ONCE(sess->logout_completed)) {
                        if (!traced) {
-                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
+                               ql_dbg(ql_dbg_disc, vha, 0xf086,
                                        "%s: waiting for sess %p logout\n",
                                        __func__, sess);
                                traced = true;
@@ -1045,6 +1046,10 @@ void qlt_free_session_done(struct work_struct *work)
                        (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
        }
 
+       spin_lock_irqsave(&vha->work_lock, flags);
+       sess->flags &= ~FCF_ASYNC_SENT;
+       spin_unlock_irqrestore(&vha->work_lock, flags);
+
        spin_lock_irqsave(&ha->tgt.sess_lock, flags);
        if (sess->se_sess) {
                sess->se_sess = NULL;
@@ -1052,7 +1057,7 @@ void qlt_free_session_done(struct work_struct *work)
                        tgt->sess_count--;
        }
 
-       sess->disc_state = DSC_DELETED;
+       qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
        sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
        sess->deleted = QLA_SESS_DELETED;
 
@@ -1108,7 +1113,7 @@ void qlt_free_session_done(struct work_struct *work)
        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
        sess->free_pending = 0;
 
-       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+       ql_dbg(ql_dbg_disc, vha, 0xf001,
            "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
                sess, sess->port_name, vha->fcport_count);
 
@@ -1151,13 +1156,18 @@ void qlt_unreg_sess(struct fc_port *sess)
                return;
        }
        sess->free_pending = 1;
+       /*
+        * Use FCF_ASYNC_SENT flag to block other cmds used in sess
+        * management from being sent.
+        */
+       sess->flags |= FCF_ASYNC_SENT;
        spin_unlock_irqrestore(&sess->vha->work_lock, flags);
 
        if (sess->se_sess)
                vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
 
        sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
-       sess->disc_state = DSC_DELETE_PEND;
+       qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
        sess->last_rscn_gen = sess->rscn_gen;
        sess->last_login_gen = sess->login_gen;
 
@@ -1257,7 +1267,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
        sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
        spin_unlock_irqrestore(&sess->vha->work_lock, flags);
 
-       sess->disc_state = DSC_DELETE_PEND;
+       sess->prli_pend_timer = 0;
+       qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
 
        qla24xx_chk_fcp_state(sess);
 
@@ -3446,13 +3457,13 @@ qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
 
        cmd->trc_flags |= TRC_DIF_ERR;
 
-       cmd->a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
-       cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
-       cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
+       cmd->a_guard   = get_unaligned_be16(ap + 0);
+       cmd->a_app_tag = get_unaligned_be16(ap + 2);
+       cmd->a_ref_tag = get_unaligned_be32(ap + 4);
 
-       cmd->e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
-       cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
-       cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
+       cmd->e_guard   = get_unaligned_be16(ep + 0);
+       cmd->e_app_tag = get_unaligned_be16(ep + 2);
+       cmd->e_ref_tag = get_unaligned_be32(ep + 4);
 
        ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
            "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
@@ -4579,7 +4590,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
                /* find other sess with nport_id collision */
                if (port_id.b24 == other_sess->d_id.b24) {
                        if (loop_id != other_sess->loop_id) {
-                               ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
+                               ql_dbg(ql_dbg_disc, vha, 0x1000c,
                                    "Invalidating sess %p loop_id %d wwn %llx.\n",
                                    other_sess, other_sess->loop_id, other_wwn);
 
@@ -4595,7 +4606,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
                                 * Another wwn used to have our s_id/loop_id
                                 * kill the session, but don't free the loop_id
                                 */
-                               ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
+                               ql_dbg(ql_dbg_disc, vha, 0xf01b,
                                    "Invalidating sess %p loop_id %d wwn %llx.\n",
                                    other_sess, other_sess->loop_id, other_wwn);
 
@@ -4610,7 +4621,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
                /* find other sess with nport handle collision */
                if ((loop_id == other_sess->loop_id) &&
                        (loop_id != FC_NO_LOOP_ID)) {
-                       ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
+                       ql_dbg(ql_dbg_disc, vha, 0x1000d,
                               "Invalidating sess %p loop_id %d wwn %llx.\n",
                               other_sess, other_sess->loop_id, other_wwn);
 
@@ -6053,7 +6064,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
                if (!IS_SW_RESV_ADDR(fcport->d_id))
                   vha->fcport_count++;
                fcport->login_gen++;
-               fcport->disc_state = DSC_LOGIN_COMPLETE;
+               qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
                fcport->login_succ = 1;
                newfcport = 1;
        }
index d006f0a97b8cf43797aa91e4c54fd3376e9dfc2a..6539499e9e956008892d52b292a6d631ef96fe56 100644 (file)
@@ -379,8 +379,7 @@ static inline int get_datalen_for_atio(struct atio_from_isp *atio)
 {
        int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
 
-       return (be32_to_cpu(get_unaligned((uint32_t *)
-           &atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
+       return get_unaligned_be32(&atio->u.isp24.fcp_cmnd.add_cdb[len * 4]);
 }
 
 #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
index 03bd3b712b77499a1749ff9d29bad86b29a51cee..bb03c022e023a2c75478fa12452686135b07f3d3 100644 (file)
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "10.01.00.21-k"
+#define QLA2XXX_VERSION      "10.01.00.22-k"
 
 #define QLA_DRIVER_MAJOR_VER   10
 #define QLA_DRIVER_MINOR_VER   1
index 2323432a0edbcd07345235263a9bf8a610817b97..5504ab11decc7537ffb90da752868144f8734072 100644 (file)
@@ -4145,7 +4145,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
                dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
                                  ha->queues_dma);
 
-        if (ha->fw_dump)
+       if (ha->fw_dump)
                vfree(ha->fw_dump);
 
        ha->queues_len = 0;
index 3e7a45d0dacad97f8828eb01fdc997759705470a..610ee41fa54cbb55119d7275167cca117f34876c 100644 (file)
@@ -2108,6 +2108,8 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
 
        memset(data, 0, sizeof(*data));
        memset(&cmd[0], 0, 12);
+
+       dbd = sdev->set_dbd_for_ms ? 8 : dbd;
        cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
        cmd[2] = modepage;
 
index 836185de28c4e57304c1bcc02dbed41e2574ccb5..3df877886119dadbd543f3351b2e561573f6bdb5 100644 (file)
@@ -53,7 +53,7 @@ do {                                                          \
 } while (0)
 #else
 #define SCSI_LOG_LEVEL(SHIFT, BITS) 0
-#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD)
+#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do { } while (0)
 #endif /* CONFIG_SCSI_LOGGING */
 
 /*
index e0bd4cf1723073e3b27d0158c351a83871334353..e4282bce583475a046a679093d4ddf6cc706b105 100644 (file)
@@ -325,22 +325,21 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
 }
 
 /**
- * sd_zbc_check_zones - Check the device capacity and zone sizes
+ * sd_zbc_check_capacity - Check the device capacity
  * @sdkp: Target disk
+ * @buf: command buffer
+ * @zblock: zone size in number of blocks
  *
- * Check that the device capacity as reported by READ CAPACITY matches the
- * max_lba value (plus one)of the report zones command reply. Also check that
- * all zones of the device have an equal size, only allowing the last zone of
- * the disk to have a smaller size (runt zone). The zone size must also be a
- * power of two.
+ * Get the device zone size and check that the device capacity as reported
+ * by READ CAPACITY matches the max_lba value (plus one) of the report zones
+ * command reply for devices with RC_BASIS == 0.
  *
- * Returns the zone size in number of blocks upon success or an error code
- * upon failure.
+ * Returns 0 upon success or an error code upon failure.
  */
-static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf,
-                             u32 *zblocks)
+static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
+                                u32 *zblocks)
 {
-       u64 zone_blocks = 0;
+       u64 zone_blocks;
        sector_t max_lba;
        unsigned char *rec;
        int ret;
@@ -363,17 +362,9 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, unsigned char *buf,
                }
        }
 
-       /* Parse REPORT ZONES header */
+       /* Get the size of the first reported zone */
        rec = buf + 64;
        zone_blocks = get_unaligned_be64(&rec[8]);
-       if (!zone_blocks || !is_power_of_2(zone_blocks)) {
-               if (sdkp->first_scan)
-                       sd_printk(KERN_NOTICE, sdkp,
-                                 "Devices with non power of 2 zone "
-                                 "size are not supported\n");
-               return -ENODEV;
-       }
-
        if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
                if (sdkp->first_scan)
                        sd_printk(KERN_NOTICE, sdkp,
@@ -405,11 +396,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
        if (ret)
                goto err;
 
-       /*
-        * Check zone size: only devices with a constant zone size (except
-        * an eventual last runt zone) that is a power of 2 are supported.
-        */
-       ret = sd_zbc_check_zones(sdkp, buf, &zone_blocks);
+       /* Check the device capacity reported by report zones */
+       ret = sd_zbc_check_capacity(sdkp, buf, &zone_blocks);
        if (ret != 0)
                goto err;
 
index 9dc17f1288f9a0daa1376a884ffd21a071578028..d37e2a69136ad62a54853df2da959e3ddba34d9c 100644 (file)
@@ -227,7 +227,7 @@ static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram)
 /*
  *  24C16 EEPROM reading.
  *
- *  GPOI0 - data in/data out
+ *  GPIO0 - data in/data out
  *  GPIO1 - clock
  *  Symbios NVRAM wiring now also used by Tekram.
  */
@@ -524,7 +524,7 @@ static int sym_read_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram)
 /*
  *  93C46 EEPROM reading.
  *
- *  GPOI0 - data in
+ *  GPIO0 - data in
  *  GPIO1 - data out
  *  GPIO2 - clock
  *  GPIO4 - chip select
index 6feeb0faf123af11818f9a84207102d67d12e2bb..56a6a1ed5ec2eb8263199fa8467473018362ccdd 100644 (file)
 
 #define CDNS_UFS_REG_HCLKDIV   0xFC
 #define CDNS_UFS_REG_PHY_XCFGD1        0x113C
+#define CDNS_UFS_MAX_L4_ATTRS 12
+
+struct cdns_ufs_host {
+       /**
+        * cdns_ufs_dme_attr_val - for storing L4 attributes
+        */
+       u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS];
+};
+
+/**
+ * cdns_ufs_get_l4_attr - get L4 attributes on local side
+ * @hba: per adapter instance
+ *
+ */
+static void cdns_ufs_get_l4_attr(struct ufs_hba *hba)
+{
+       struct cdns_ufs_host *host = ufshcd_get_variant(hba);
+
+       ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID),
+                      &host->cdns_ufs_dme_attr_val[0]);
+       ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID),
+                      &host->cdns_ufs_dme_attr_val[1]);
+       ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
+                      &host->cdns_ufs_dme_attr_val[2]);
+       ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID),
+                      &host->cdns_ufs_dme_attr_val[3]);
+       ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS),
+                      &host->cdns_ufs_dme_attr_val[4]);
+       ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
+                      &host->cdns_ufs_dme_attr_val[5]);
+       ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
+                      &host->cdns_ufs_dme_attr_val[6]);
+       ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
+                      &host->cdns_ufs_dme_attr_val[7]);
+       ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
+                      &host->cdns_ufs_dme_attr_val[8]);
+       ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
+                      &host->cdns_ufs_dme_attr_val[9]);
+       ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE),
+                      &host->cdns_ufs_dme_attr_val[10]);
+       ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
+                      &host->cdns_ufs_dme_attr_val[11]);
+}
+
+/**
+ * cdns_ufs_set_l4_attr - set L4 attributes on local side
+ * @hba: per adapter instance
+ *
+ */
+static void cdns_ufs_set_l4_attr(struct ufs_hba *hba)
+{
+       struct cdns_ufs_host *host = ufshcd_get_variant(hba);
+
+       ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID),
+                      host->cdns_ufs_dme_attr_val[0]);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID),
+                      host->cdns_ufs_dme_attr_val[1]);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
+                      host->cdns_ufs_dme_attr_val[2]);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID),
+                      host->cdns_ufs_dme_attr_val[3]);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS),
+                      host->cdns_ufs_dme_attr_val[4]);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
+                      host->cdns_ufs_dme_attr_val[5]);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
+                      host->cdns_ufs_dme_attr_val[6]);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
+                      host->cdns_ufs_dme_attr_val[7]);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
+                      host->cdns_ufs_dme_attr_val[8]);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
+                      host->cdns_ufs_dme_attr_val[9]);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE),
+                      host->cdns_ufs_dme_attr_val[10]);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
+                      host->cdns_ufs_dme_attr_val[11]);
+}
 
 /**
  * Sets HCLKDIV register value based on the core_clk
@@ -77,6 +156,22 @@ static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
        return cdns_ufs_set_hclkdiv(hba);
 }
 
+/**
+ * Called around hibern8 enter/exit.
+ * @hba: host controller instance
+ * @cmd: UIC Command
+ * @status: notify stage (pre, post change)
+ *
+ */
+static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
+                                   enum ufs_notify_change_status status)
+{
+       if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER)
+               cdns_ufs_get_l4_attr(hba);
+       if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT)
+               cdns_ufs_set_l4_attr(hba);
+}
+
 /**
  * Called before and after Link startup is carried out.
  * @hba: host controller instance
@@ -117,6 +212,14 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
 static int cdns_ufs_init(struct ufs_hba *hba)
 {
        int status = 0;
+       struct cdns_ufs_host *host;
+       struct device *dev = hba->dev;
+
+       host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+
+       if (!host)
+               return -ENOMEM;
+       ufshcd_set_variant(hba, host);
 
        if (hba->vops && hba->vops->phy_initialization)
                status = hba->vops->phy_initialization(hba);
@@ -144,8 +247,10 @@ static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
 
 static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
        .name = "cdns-ufs-pltfm",
+       .init = cdns_ufs_init,
        .hce_enable_notify = cdns_ufs_hce_enable_notify,
        .link_startup_notify = cdns_ufs_link_startup_notify,
+       .hibern8_notify = cdns_ufs_hibern8_notify,
 };
 
 static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
@@ -154,6 +259,7 @@ static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
        .hce_enable_notify = cdns_ufs_hce_enable_notify,
        .link_startup_notify = cdns_ufs_link_startup_notify,
        .phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
+       .hibern8_notify = cdns_ufs_hibern8_notify,
 };
 
 static const struct of_device_id cdns_ufs_of_match[] = {
@@ -219,6 +325,7 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
 static struct platform_driver cdns_ufs_pltfrm_driver = {
        .probe  = cdns_ufs_pltfrm_probe,
        .remove = cdns_ufs_pltfrm_remove,
+       .shutdown = ufshcd_pltfrm_shutdown,
        .driver = {
                .name   = "cdns-ufshcd",
                .pm     = &cdns_ufs_dev_pm_ops,
index 83e28edc3ac5b9408b20ee2c635907cfe2e51281..41f80eeada463294aa52a5924ca57cdee6d15cc8 100644 (file)
@@ -6,16 +6,29 @@
  *     Peter Wang <peter.wang@mediatek.com>
  */
 
+#include <linux/arm-smccc.h>
+#include <linux/bitfield.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
 
 #include "ufshcd.h"
 #include "ufshcd-pltfrm.h"
 #include "unipro.h"
 #include "ufs-mediatek.h"
 
+#define ufs_mtk_smc(cmd, val, res) \
+       arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
+                     cmd, val, 0, 0, 0, 0, 0, &(res))
+
+#define ufs_mtk_ref_clk_notify(on, res) \
+       ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
+
+#define ufs_mtk_device_reset_ctrl(high, res) \
+       ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
+
 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
 {
        u32 tmp;
@@ -81,6 +94,49 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
        return err;
 }
 
+static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
+{
+       struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+       struct arm_smccc_res res;
+       unsigned long timeout;
+       u32 value;
+
+       if (host->ref_clk_enabled == on)
+               return 0;
+
+       if (on) {
+               ufs_mtk_ref_clk_notify(on, res);
+               ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
+       } else {
+               ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
+       }
+
+       /* Wait for ack */
+       timeout = jiffies + msecs_to_jiffies(REFCLK_REQ_TIMEOUT_MS);
+       do {
+               value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
+
+               /* Wait until ack bit equals to req bit */
+               if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
+                       goto out;
+
+               usleep_range(100, 200);
+       } while (time_before(jiffies, timeout));
+
+       dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
+
+       ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
+
+       return -ETIMEDOUT;
+
+out:
+       host->ref_clk_enabled = on;
+       if (!on)
+               ufs_mtk_ref_clk_notify(on, res);
+
+       return 0;
+}
+
 /**
  * ufs_mtk_setup_clocks - enables/disable clocks
  * @hba: host controller instance
@@ -105,12 +161,16 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
 
        switch (status) {
        case PRE_CHANGE:
-               if (!on)
+               if (!on) {
+                       ufs_mtk_setup_ref_clk(hba, on);
                        ret = phy_power_off(host->mphy);
+               }
                break;
        case POST_CHANGE:
-               if (on)
+               if (on) {
                        ret = phy_power_on(host->mphy);
+                       ufs_mtk_setup_ref_clk(hba, on);
+               }
                break;
        }
 
@@ -150,6 +210,9 @@ static int ufs_mtk_init(struct ufs_hba *hba)
        /* Enable runtime autosuspend */
        hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
 
+       /* Enable clock-gating */
+       hba->caps |= UFSHCD_CAP_CLK_GATING;
+
        /*
         * ufshcd_vops_init() is invoked after
         * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
@@ -238,6 +301,23 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
        return ret;
 }
 
+static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
+{
+       unsigned long flags;
+       u32 ah_ms;
+
+       if (ufshcd_is_clkgating_allowed(hba)) {
+               if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
+                       ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
+                                         hba->ahit);
+               else
+                       ah_ms = 10;
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               hba->clk_gating.delay_ms = ah_ms + 5;
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       }
+}
+
 static int ufs_mtk_post_link(struct ufs_hba *hba)
 {
        /* disable device LCC */
@@ -246,6 +326,15 @@ static int ufs_mtk_post_link(struct ufs_hba *hba)
        /* enable unipro clock gating feature */
        ufs_mtk_cfg_unipro_cg(hba, true);
 
+       /* configure auto-hibern8 timer to 10ms */
+       if (ufshcd_is_auto_hibern8_supported(hba)) {
+               ufshcd_auto_hibern8_update(hba,
+                       FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
+                       FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
+       }
+
+       ufs_mtk_setup_clk_gating(hba);
+
        return 0;
 }
 
@@ -269,12 +358,37 @@ static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
        return ret;
 }
 
+static void ufs_mtk_device_reset(struct ufs_hba *hba)
+{
+       struct arm_smccc_res res;
+
+       ufs_mtk_device_reset_ctrl(0, res);
+
+       /*
+        * The reset signal is active low. UFS devices shall detect
+        * more than or equal to 1us of positive or negative RST_n
+        * pulse width.
+        *
+        * To be on safe side, keep the reset low for at least 10us.
+        */
+       usleep_range(10, 15);
+
+       ufs_mtk_device_reset_ctrl(1, res);
+
+       /* Some devices may need time to respond to rst_n */
+       usleep_range(10000, 15000);
+
+       dev_info(hba->dev, "device reset done\n");
+}
+
 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 {
        struct ufs_mtk_host *host = ufshcd_get_variant(hba);
 
-       if (ufshcd_is_link_hibern8(hba))
+       if (ufshcd_is_link_hibern8(hba)) {
                phy_power_off(host->mphy);
+               ufs_mtk_setup_ref_clk(hba, false);
+       }
 
        return 0;
 }
@@ -283,8 +397,10 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 {
        struct ufs_mtk_host *host = ufshcd_get_variant(hba);
 
-       if (ufshcd_is_link_hibern8(hba))
+       if (ufshcd_is_link_hibern8(hba)) {
+               ufs_mtk_setup_ref_clk(hba, true);
                phy_power_on(host->mphy);
+       }
 
        return 0;
 }
@@ -303,6 +419,7 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
        .pwr_change_notify   = ufs_mtk_pwr_change_notify,
        .suspend             = ufs_mtk_suspend,
        .resume              = ufs_mtk_resume,
+       .device_reset        = ufs_mtk_device_reset,
 };
 
 /**
index 19f8c42fe06ff3c363299efc11413f2f1712bd82..31b7fead19eb954fc8ae69e6273cae9e3c219b05 100644 (file)
@@ -6,6 +6,25 @@
 #ifndef _UFS_MEDIATEK_H
 #define _UFS_MEDIATEK_H
 
+#include <linux/bitops.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
+
+/*
+ * Vendor specific UFSHCI Registers
+ */
+#define REG_UFS_REFCLK_CTRL         0x144
+
+/*
+ * Ref-clk control
+ *
+ * Values for register REG_UFS_REFCLK_CTRL
+ */
+#define REFCLK_RELEASE              0x0
+#define REFCLK_REQUEST              BIT(0)
+#define REFCLK_ACK                  BIT(1)
+
+#define REFCLK_REQ_TIMEOUT_MS       3
+
 /*
  * Vendor specific pre-defined parameters
  */
 #define VS_SAVEPOWERCONTROL         0xD0A6
 #define VS_UNIPROPOWERDOWNCONTROL   0xD0A8
 
+/*
+ * SiP commands
+ */
+#define MTK_SIP_UFS_CONTROL               MTK_SIP_SMC_CMD(0x276)
+#define UFS_MTK_SIP_DEVICE_RESET          BIT(1)
+#define UFS_MTK_SIP_REF_CLK_NOTIFICATION  BIT(3)
+
 /*
  * VS_DEBUGCLOCKENABLE
  */
@@ -48,6 +74,7 @@ enum {
 struct ufs_mtk_host {
        struct ufs_hba *hba;
        struct phy *mphy;
+       bool ref_clk_enabled;
 };
 
 #endif /* !_UFS_MEDIATEK_H */
index ad2abc96c0f19cc36008632d0adf0606f6b6c429..720be3f64be70a1640f15108fb7711d5bc77cfa1 100644 (file)
@@ -118,26 +118,6 @@ static ssize_t spm_target_link_state_show(struct device *dev,
                                ufs_pm_lvl_states[hba->spm_lvl].link_state));
 }
 
-static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
-{
-       unsigned long flags;
-
-       if (!ufshcd_is_auto_hibern8_supported(hba))
-               return;
-
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       if (hba->ahit != ahit)
-               hba->ahit = ahit;
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
-       if (!pm_runtime_suspended(hba->dev)) {
-               pm_runtime_get_sync(hba->dev);
-               ufshcd_hold(hba, false);
-               ufshcd_auto_hibern8_enable(hba);
-               ufshcd_release(hba);
-               pm_runtime_put(hba->dev);
-       }
-}
-
 /* Convert Auto-Hibernate Idle Timer register value to microseconds */
 static int ufshcd_ahit_to_us(u32 ahit)
 {
index e5621e59a432994d897cfb785d0932accb781d9d..0f4e750a6748146198bc9dc24a9553dd80dbf60e 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (C) 2018 Western Digital Corporation
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Western Digital Corporation
  */
 
 #ifndef __UFS_SYSFS_H__
index 3327981ef894e935f56086f6d3ffb3c504fe52de..c89f21698629fe00a6d5c564014895e489258e5a 100644 (file)
@@ -499,9 +499,9 @@ struct ufs_query_res {
 #define UFS_VREG_VCC_MAX_UV       3600000 /* uV */
 #define UFS_VREG_VCC_1P8_MIN_UV    1700000 /* uV */
 #define UFS_VREG_VCC_1P8_MAX_UV    1950000 /* uV */
-#define UFS_VREG_VCCQ_MIN_UV      1100000 /* uV */
-#define UFS_VREG_VCCQ_MAX_UV      1300000 /* uV */
-#define UFS_VREG_VCCQ2_MIN_UV     1650000 /* uV */
+#define UFS_VREG_VCCQ_MIN_UV      1140000 /* uV */
+#define UFS_VREG_VCCQ_MAX_UV      1260000 /* uV */
+#define UFS_VREG_VCCQ2_MIN_UV     1700000 /* uV */
 #define UFS_VREG_VCCQ2_MAX_UV     1950000 /* uV */
 
 /*
index b5966faf3e984190f7af8f60daebcc32cbccf5dc..1b97f2dc0b636b88f4cf7fe5d524b14d469c1b35 100644 (file)
@@ -266,26 +266,18 @@ static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
        return tag >= 0 && tag < hba->nutrs;
 }
 
-static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+static inline void ufshcd_enable_irq(struct ufs_hba *hba)
 {
-       int ret = 0;
-
        if (!hba->is_irq_enabled) {
-               ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
-                               hba);
-               if (ret)
-                       dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
-                               __func__, ret);
+               enable_irq(hba->irq);
                hba->is_irq_enabled = true;
        }
-
-       return ret;
 }
 
 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
 {
        if (hba->is_irq_enabled) {
-               free_irq(hba->irq, hba);
+               disable_irq(hba->irq);
                hba->is_irq_enabled = false;
        }
 }
@@ -335,27 +327,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
        u8 opcode = 0;
        u32 intr, doorbell;
        struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+       struct scsi_cmnd *cmd = lrbp->cmd;
        int transfer_len = -1;
 
        if (!trace_ufshcd_command_enabled()) {
                /* trace UPIU W/O tracing command */
-               if (lrbp->cmd)
+               if (cmd)
                        ufshcd_add_cmd_upiu_trace(hba, tag, str);
                return;
        }
 
-       if (lrbp->cmd) { /* data phase exists */
+       if (cmd) { /* data phase exists */
                /* trace UPIU also */
                ufshcd_add_cmd_upiu_trace(hba, tag, str);
-               opcode = (u8)(*lrbp->cmd->cmnd);
+               opcode = cmd->cmnd[0];
                if ((opcode == READ_10) || (opcode == WRITE_10)) {
                        /*
                         * Currently we only fully trace read(10) and write(10)
                         * commands
                         */
-                       if (lrbp->cmd->request && lrbp->cmd->request->bio)
-                               lba =
-                                 lrbp->cmd->request->bio->bi_iter.bi_sector;
+                       if (cmd->request && cmd->request->bio)
+                               lba = cmd->request->bio->bi_iter.bi_sector;
                        transfer_len = be32_to_cpu(
                                lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
                }
@@ -436,8 +428,7 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba)
 
        ufshcd_print_clk_freqs(hba);
 
-       if (hba->vops && hba->vops->dbg_register_dump)
-               hba->vops->dbg_register_dump(hba);
+       ufshcd_vops_dbg_register_dump(hba);
 }
 
 static
@@ -497,8 +488,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
 static void ufshcd_print_host_state(struct ufs_hba *hba)
 {
        dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
-       dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
-               hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
+       dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
+               hba->outstanding_reqs, hba->outstanding_tasks);
        dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
                hba->saved_err, hba->saved_uic_err);
        dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
@@ -645,40 +636,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
        return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
 }
 
-/**
- * ufshcd_get_tm_free_slot - get a free slot for task management request
- * @hba: per adapter instance
- * @free_slot: pointer to variable with available slot value
- *
- * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
- * Returns 0 if free slot is not available, else return 1 with tag value
- * in @free_slot.
- */
-static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
-{
-       int tag;
-       bool ret = false;
-
-       if (!free_slot)
-               goto out;
-
-       do {
-               tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
-               if (tag >= hba->nutmrs)
-                       goto out;
-       } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
-
-       *free_slot = tag;
-       ret = true;
-out:
-       return ret;
-}
-
-static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
-{
-       clear_bit_unlock(slot, &hba->tm_slots_in_use);
-}
-
 /**
  * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
  * @hba: per adapter instance
@@ -1273,6 +1230,24 @@ out:
        return ret;
 }
 
+static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
+{
+       int *busy = priv;
+
+       WARN_ON_ONCE(reserved);
+       (*busy)++;
+       return false;
+}
+
+/* Whether or not any tag is in use by a request that is in progress. */
+static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
+{
+       struct request_queue *q = hba->cmd_queue;
+       int busy = 0;
+
+       blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
+       return busy;
+}
 
 static int ufshcd_devfreq_get_dev_status(struct device *dev,
                struct devfreq_dev_status *stat)
@@ -1490,6 +1465,8 @@ static void ufshcd_ungate_work(struct work_struct *work)
        spin_unlock_irqrestore(hba->host->host_lock, flags);
        ufshcd_setup_clocks(hba, true);
 
+       ufshcd_enable_irq(hba);
+
        /* Exit from hibern8 */
        if (ufshcd_can_hibern8_during_gating(hba)) {
                /* Prevent gating in this path */
@@ -1619,7 +1596,7 @@ static void ufshcd_gate_work(struct work_struct *work)
 
        if (hba->clk_gating.active_reqs
                || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
-               || hba->lrb_in_use || hba->outstanding_tasks
+               || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
                || hba->active_uic_cmd || hba->uic_async_done)
                goto rel_lock;
 
@@ -1636,6 +1613,8 @@ static void ufshcd_gate_work(struct work_struct *work)
                ufshcd_set_link_hibern8(hba);
        }
 
+       ufshcd_disable_irq(hba);
+
        if (!ufshcd_is_link_active(hba))
                ufshcd_setup_clocks(hba, false);
        else
@@ -1673,7 +1652,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
 
        if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
                || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
-               || hba->lrb_in_use || hba->outstanding_tasks
+               || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
                || hba->active_uic_cmd || hba->uic_async_done
                || ufshcd_eh_in_progress(hba))
                return;
@@ -1881,12 +1860,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 {
        hba->lrb[task_tag].issue_time_stamp = ktime_get();
        hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
+       ufshcd_add_command_trace(hba, task_tag, "send");
        ufshcd_clk_scaling_start_busy(hba);
        __set_bit(task_tag, &hba->outstanding_reqs);
        ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
        /* Make sure that doorbell is committed immediately */
        wmb();
-       ufshcd_add_command_trace(hba, task_tag, "send");
 }
 
 /**
@@ -2239,6 +2218,7 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
 static
 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
 {
+       struct scsi_cmnd *cmd = lrbp->cmd;
        struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
        unsigned short cdb_len;
 
@@ -2252,12 +2232,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
        /* Total EHS length and Data segment length will be zero */
        ucd_req_ptr->header.dword_2 = 0;
 
-       ucd_req_ptr->sc.exp_data_transfer_len =
-               cpu_to_be32(lrbp->cmd->sdb.length);
+       ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
 
-       cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
+       cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
        memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
-       memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
+       memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
 
        memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 }
@@ -2443,22 +2422,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 
        hba->req_abort_count = 0;
 
-       /* acquire the tag to make sure device cmds don't use it */
-       if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
-               /*
-                * Dev manage command in progress, requeue the command.
-                * Requeuing the command helps in cases where the request *may*
-                * find different tag instead of waiting for dev manage command
-                * completion.
-                */
-               err = SCSI_MLQUEUE_HOST_BUSY;
-               goto out;
-       }
-
        err = ufshcd_hold(hba, true);
        if (err) {
                err = SCSI_MLQUEUE_HOST_BUSY;
-               clear_bit_unlock(tag, &hba->lrb_in_use);
                goto out;
        }
        WARN_ON(hba->clk_gating.state != CLKS_ON);
@@ -2479,7 +2445,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        err = ufshcd_map_sg(hba, lrbp);
        if (err) {
                lrbp->cmd = NULL;
-               clear_bit_unlock(tag, &hba->lrb_in_use);
+               ufshcd_release(hba);
                goto out;
        }
        /* Make sure descriptors are ready before ringing the doorbell */
@@ -2626,44 +2592,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
        return err;
 }
 
-/**
- * ufshcd_get_dev_cmd_tag - Get device management command tag
- * @hba: per-adapter instance
- * @tag_out: pointer to variable with available slot value
- *
- * Get a free slot and lock it until device management command
- * completes.
- *
- * Returns false if free slot is unavailable for locking, else
- * return true with tag value in @tag.
- */
-static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
-{
-       int tag;
-       bool ret = false;
-       unsigned long tmp;
-
-       if (!tag_out)
-               goto out;
-
-       do {
-               tmp = ~hba->lrb_in_use;
-               tag = find_last_bit(&tmp, hba->nutrs);
-               if (tag >= hba->nutrs)
-                       goto out;
-       } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
-
-       *tag_out = tag;
-       ret = true;
-out:
-       return ret;
-}
-
-static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
-{
-       clear_bit_unlock(tag, &hba->lrb_in_use);
-}
-
 /**
  * ufshcd_exec_dev_cmd - API for sending device management requests
  * @hba: UFS hba
@@ -2676,6 +2604,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
                enum dev_cmd_type cmd_type, int timeout)
 {
+       struct request_queue *q = hba->cmd_queue;
+       struct request *req;
        struct ufshcd_lrb *lrbp;
        int err;
        int tag;
@@ -2689,7 +2619,13 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
         * Even though we use wait_event() which sleeps indefinitely,
         * the maximum wait time is bounded by SCSI request timeout.
         */
-       wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+       req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+       if (IS_ERR(req)) {
+               err = PTR_ERR(req);
+               goto out_unlock;
+       }
+       tag = req->tag;
+       WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
 
        init_completion(&wait);
        lrbp = &hba->lrb[tag];
@@ -2714,8 +2650,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
                        err ? "query_complete_err" : "query_complete");
 
 out_put_tag:
-       ufshcd_put_dev_cmd_tag(hba, tag);
-       wake_up(&hba->dev_cmd.tag_wq);
+       blk_put_request(req);
+out_unlock:
        up_read(&hba->clk_scaling_lock);
        return err;
 }
@@ -2918,7 +2854,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba,
        int ret = 0;
        u32 retries;
 
-        for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+       for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
                ret = ufshcd_query_attr(hba, opcode, idn, index,
                                                selector, attr_val);
                if (ret)
@@ -3956,6 +3892,24 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
        return ret;
 }
 
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
+{
+       unsigned long flags;
+
+       if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+               return;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (hba->ahit == ahit)
+               goto out_unlock;
+       hba->ahit = ahit;
+       if (!pm_runtime_suspended(hba->dev))
+               ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+out_unlock:
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
+
 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
 {
        unsigned long flags;
@@ -4095,6 +4049,26 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
                ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
                                                pwr_mode->hs_rate);
 
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+                       DL_FC0ProtectionTimeOutVal_Default);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+                       DL_TC0ReplayTimeOutVal_Default);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+                       DL_AFC0ReqTimeOutVal_Default);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+                       DL_FC1ProtectionTimeOutVal_Default);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+                       DL_TC1ReplayTimeOutVal_Default);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+                       DL_AFC1ReqTimeOutVal_Default);
+
+       ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+                       DL_FC0ProtectionTimeOutVal_Default);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+                       DL_TC0ReplayTimeOutVal_Default);
+       ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+                       DL_AFC0ReqTimeOutVal_Default);
+
        ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
                        | pwr_mode->pwr_tx);
 
@@ -4608,6 +4582,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
        /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
        sdev->use_10_for_ms = 1;
 
+       /* DBD field should be set to 1 in mode sense(10) */
+       sdev->set_dbd_for_ms = 1;
+
        /* allow SCSI layer to restart the device in case of errors */
        sdev->allow_restart = 1;
 
@@ -4799,7 +4776,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                break;
        } /* end of switch */
 
-       if (host_byte(result) != DID_OK)
+       if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
                ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
        return result;
 }
@@ -4856,12 +4833,13 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
                        cmd->result = result;
                        /* Mark completed command as NULL in LRB */
                        lrbp->cmd = NULL;
-                       clear_bit_unlock(index, &hba->lrb_in_use);
+                       lrbp->compl_time_stamp = ktime_get();
                        /* Do not touch lrbp after scsi done */
                        cmd->scsi_done(cmd);
                        __ufshcd_release(hba);
                } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
                        lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
+                       lrbp->compl_time_stamp = ktime_get();
                        if (hba->dev_cmd.complete) {
                                ufshcd_add_command_trace(hba, index,
                                                "dev_complete");
@@ -4870,17 +4848,12 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
                }
                if (ufshcd_is_clkscaling_supported(hba))
                        hba->clk_scaling.active_reqs--;
-
-               lrbp->compl_time_stamp = ktime_get();
        }
 
        /* clear corresponding bits of completed commands */
        hba->outstanding_reqs ^= completed_reqs;
 
        ufshcd_clk_scaling_update_busy(hba);
-
-       /* we might have free'd some tags above */
-       wake_up(&hba->dev_cmd.tag_wq);
 }
 
 /**
@@ -5053,6 +5026,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
 
        hba->auto_bkops_enabled = false;
        trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+       hba->is_urgent_bkops_lvl_checked = false;
 out:
        return err;
 }
@@ -5077,6 +5051,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
                hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
                ufshcd_disable_auto_bkops(hba);
        }
+       hba->is_urgent_bkops_lvl_checked = false;
 }
 
 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
@@ -5123,6 +5098,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
                err = ufshcd_enable_auto_bkops(hba);
        else
                err = ufshcd_disable_auto_bkops(hba);
+       hba->urgent_bkops_lvl = curr_status;
 out:
        return err;
 }
@@ -5200,7 +5176,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
        hba = container_of(work, struct ufs_hba, eeh_work);
 
        pm_runtime_get_sync(hba->dev);
-       scsi_block_requests(hba->host);
+       ufshcd_scsi_block_requests(hba);
        err = ufshcd_get_ee_status(hba, &status);
        if (err) {
                dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -5214,7 +5190,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
                ufshcd_bkops_exception_event_handler(hba);
 
 out:
-       scsi_unblock_requests(hba->host);
+       ufshcd_scsi_unblock_requests(hba);
        pm_runtime_put_sync(hba->dev);
        return;
 }
@@ -5348,8 +5324,8 @@ static void ufshcd_err_handler(struct work_struct *work)
 
        /*
         * if host reset is required then skip clearing the pending
-        * transfers forcefully because they will automatically get
-        * cleared after link startup.
+        * transfers forcefully because they will get cleared during
+        * host reset and restore
         */
        if (needs_reset)
                goto skip_pending_xfer_clear;
@@ -5603,6 +5579,27 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
        return retval;
 }
 
+struct ctm_info {
+       struct ufs_hba  *hba;
+       unsigned long   pending;
+       unsigned int    ncpl;
+};
+
+static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
+{
+       struct ctm_info *const ci = priv;
+       struct completion *c;
+
+       WARN_ON_ONCE(reserved);
+       if (test_bit(req->tag, &ci->pending))
+               return true;
+       ci->ncpl++;
+       c = req->end_io_data;
+       if (c)
+               complete(c);
+       return true;
+}
+
 /**
  * ufshcd_tmc_handler - handle task management function completion
  * @hba: per adapter instance
@@ -5613,16 +5610,14 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
  */
 static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
 {
-       u32 tm_doorbell;
+       struct request_queue *q = hba->tmf_queue;
+       struct ctm_info ci = {
+               .hba     = hba,
+               .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
+       };
 
-       tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
-       hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
-       if (hba->tm_condition) {
-               wake_up(&hba->tm_wq);
-               return IRQ_HANDLED;
-       } else {
-               return IRQ_NONE;
-       }
+       blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
+       return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
 }
 
 /**
@@ -5728,7 +5723,10 @@ out:
 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
                struct utp_task_req_desc *treq, u8 tm_function)
 {
+       struct request_queue *q = hba->tmf_queue;
        struct Scsi_Host *host = hba->host;
+       DECLARE_COMPLETION_ONSTACK(wait);
+       struct request *req;
        unsigned long flags;
        int free_slot, task_tag, err;
 
@@ -5737,7 +5735,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
         * Even though we use wait_event() which sleeps indefinitely,
         * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
         */
-       wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+       req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+       req->end_io_data = &wait;
+       free_slot = req->tag;
+       WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
        ufshcd_hold(hba, false);
 
        spin_lock_irqsave(host->host_lock, flags);
@@ -5763,10 +5764,14 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
 
        /* wait until the task management command is completed */
-       err = wait_event_timeout(hba->tm_wq,
-                       test_bit(free_slot, &hba->tm_condition),
+       err = wait_for_completion_io_timeout(&wait,
                        msecs_to_jiffies(TM_CMD_TIMEOUT));
        if (!err) {
+               /*
+                * Make sure that ufshcd_compl_tm() does not trigger a
+                * use-after-free.
+                */
+               req->end_io_data = NULL;
                ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
                dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
                                __func__, tm_function);
@@ -5785,9 +5790,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        __clear_bit(free_slot, &hba->outstanding_tasks);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-       clear_bit(free_slot, &hba->tm_condition);
-       ufshcd_put_tm_slot(hba, free_slot);
-       wake_up(&hba->tm_tag_wq);
+       blk_put_request(req);
 
        ufshcd_release(hba);
        return err;
@@ -5863,6 +5866,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
                                        enum dev_cmd_type cmd_type,
                                        enum query_opcode desc_op)
 {
+       struct request_queue *q = hba->cmd_queue;
+       struct request *req;
        struct ufshcd_lrb *lrbp;
        int err = 0;
        int tag;
@@ -5872,7 +5877,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
 
        down_read(&hba->clk_scaling_lock);
 
-       wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+       req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+       if (IS_ERR(req)) {
+               err = PTR_ERR(req);
+               goto out_unlock;
+       }
+       tag = req->tag;
+       WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
 
        init_completion(&wait);
        lrbp = &hba->lrb[tag];
@@ -5948,8 +5959,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
                }
        }
 
-       ufshcd_put_dev_cmd_tag(hba, tag);
-       wake_up(&hba->dev_cmd.tag_wq);
+       blk_put_request(req);
+out_unlock:
        up_read(&hba->clk_scaling_lock);
        return err;
 }
@@ -6244,9 +6255,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
        hba->lrb[tag].cmd = NULL;
        spin_unlock_irqrestore(host->host_lock, flags);
 
-       clear_bit_unlock(tag, &hba->lrb_in_use);
-       wake_up(&hba->dev_cmd.tag_wq);
-
 out:
        if (!err) {
                err = SUCCESS;
@@ -6279,9 +6287,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
        int err;
        unsigned long flags;
 
-       /* Reset the host controller */
+       /*
+        * Stop the host controller and complete the requests
+        * cleared by h/w
+        */
        spin_lock_irqsave(hba->host->host_lock, flags);
        ufshcd_hba_stop(hba, false);
+       hba->silence_err_logs = true;
+       ufshcd_complete_requests(hba);
+       hba->silence_err_logs = false;
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        /* scale up clocks to max frequency before full reinitialization */
@@ -6315,7 +6329,6 @@ out:
 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
 {
        int err = 0;
-       unsigned long flags;
        int retries = MAX_HOST_RESET_RETRIES;
 
        do {
@@ -6325,15 +6338,6 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
                err = ufshcd_host_reset_and_restore(hba);
        } while (err && --retries);
 
-       /*
-        * After reset the door-bell might be cleared, complete
-        * outstanding requests in s/w here.
-        */
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       ufshcd_transfer_req_compl(hba);
-       ufshcd_tmc_handler(hba);
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
-
        return err;
 }
 
@@ -7082,41 +7086,6 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
        ufshcd_probe_hba(hba);
 }
 
-static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
-{
-       unsigned long flags;
-       struct Scsi_Host *host;
-       struct ufs_hba *hba;
-       int index;
-       bool found = false;
-
-       if (!scmd || !scmd->device || !scmd->device->host)
-               return BLK_EH_DONE;
-
-       host = scmd->device->host;
-       hba = shost_priv(host);
-       if (!hba)
-               return BLK_EH_DONE;
-
-       spin_lock_irqsave(host->host_lock, flags);
-
-       for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
-               if (hba->lrb[index].cmd == scmd) {
-                       found = true;
-                       break;
-               }
-       }
-
-       spin_unlock_irqrestore(host->host_lock, flags);
-
-       /*
-        * Bypass SCSI error handling and reset the block layer timer if this
-        * SCSI command was not actually dispatched to UFS driver, otherwise
-        * let SCSI layer handle the error as usual.
-        */
-       return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
-}
-
 static const struct attribute_group *ufshcd_driver_groups[] = {
        &ufs_sysfs_unit_descriptor_group,
        &ufs_sysfs_lun_attributes_group,
@@ -7135,7 +7104,6 @@ static struct scsi_host_template ufshcd_driver_template = {
        .eh_abort_handler       = ufshcd_abort,
        .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
        .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
-       .eh_timed_out           = ufshcd_eh_timed_out,
        .this_id                = -1,
        .sg_tablesize           = SG_ALL,
        .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
@@ -7701,8 +7669,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
         * turning off the link would also turn off the device.
         */
        else if ((req_link_state == UIC_LINK_OFF_STATE) &&
-                  (!check_for_bkops || (check_for_bkops &&
-                   !hba->auto_bkops_enabled))) {
+                (!check_for_bkops || !hba->auto_bkops_enabled)) {
                /*
                 * Let's make sure that link is in low power mode, we are doing
                 * this currently by putting the link in Hibern8. Otherway to
@@ -7908,6 +7875,11 @@ disable_clks:
        ret = ufshcd_vops_suspend(hba, pm_op);
        if (ret)
                goto set_link_active;
+       /*
+        * Disable the host irq as host controller as there won't be any
+        * host controller transaction expected till resume.
+        */
+       ufshcd_disable_irq(hba);
 
        if (!ufshcd_is_link_active(hba))
                ufshcd_setup_clocks(hba, false);
@@ -7917,11 +7889,7 @@ disable_clks:
 
        hba->clk_gating.state = CLKS_OFF;
        trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
-       /*
-        * Disable the host irq as host controller as there won't be any
-        * host controller transaction expected till resume.
-        */
-       ufshcd_disable_irq(hba);
+
        /* Put the host controller in low power mode if possible */
        ufshcd_hba_vreg_set_lpm(hba);
        goto out;
@@ -7974,9 +7942,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                goto out;
 
        /* enable the host irq as host controller would be active soon */
-       ret = ufshcd_enable_irq(hba);
-       if (ret)
-               goto disable_irq_and_vops_clks;
+       ufshcd_enable_irq(hba);
 
        ret = ufshcd_vreg_set_hpm(hba);
        if (ret)
@@ -8250,6 +8216,9 @@ void ufshcd_remove(struct ufs_hba *hba)
 {
        ufs_bsg_remove(hba);
        ufs_sysfs_remove_nodes(hba->dev);
+       blk_cleanup_queue(hba->tmf_queue);
+       blk_mq_free_tag_set(&hba->tmf_tag_set);
+       blk_cleanup_queue(hba->cmd_queue);
        scsi_remove_host(hba->host);
        /* disable interrupts */
        ufshcd_disable_intr(hba, hba->intr_mask);
@@ -8328,6 +8297,18 @@ out_error:
 }
 EXPORT_SYMBOL(ufshcd_alloc_host);
 
+/* This function exists because blk_mq_alloc_tag_set() requires this. */
+static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
+                                    const struct blk_mq_queue_data *qd)
+{
+       WARN_ON_ONCE(true);
+       return BLK_STS_NOTSUPP;
+}
+
+static const struct blk_mq_ops ufshcd_tmf_ops = {
+       .queue_rq = ufshcd_queue_tmf,
+};
+
 /**
  * ufshcd_init - Driver initialization routine
  * @hba: per-adapter instance
@@ -8397,10 +8378,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
        hba->max_pwr_info.is_valid = false;
 
-       /* Initailize wait queue for task management */
-       init_waitqueue_head(&hba->tm_wq);
-       init_waitqueue_head(&hba->tm_tag_wq);
-
        /* Initialize work queues */
        INIT_WORK(&hba->eh_work, ufshcd_err_handler);
        INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
@@ -8413,9 +8390,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
        init_rwsem(&hba->clk_scaling_lock);
 
-       /* Initialize device management tag acquire wait queue */
-       init_waitqueue_head(&hba->dev_cmd.tag_wq);
-
        ufshcd_init_clk_gating(hba);
 
        ufshcd_init_clk_scaling(hba);
@@ -8449,6 +8423,27 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
                goto exit_gating;
        }
 
+       hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
+       if (IS_ERR(hba->cmd_queue)) {
+               err = PTR_ERR(hba->cmd_queue);
+               goto out_remove_scsi_host;
+       }
+
+       hba->tmf_tag_set = (struct blk_mq_tag_set) {
+               .nr_hw_queues   = 1,
+               .queue_depth    = hba->nutmrs,
+               .ops            = &ufshcd_tmf_ops,
+               .flags          = BLK_MQ_F_NO_SCHED,
+       };
+       err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
+       if (err < 0)
+               goto free_cmd_queue;
+       hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
+       if (IS_ERR(hba->tmf_queue)) {
+               err = PTR_ERR(hba->tmf_queue);
+               goto free_tmf_tag_set;
+       }
+
        /* Reset the attached device */
        ufshcd_vops_device_reset(hba);
 
@@ -8458,7 +8453,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
                dev_err(hba->dev, "Host controller enable failed\n");
                ufshcd_print_host_regs(hba);
                ufshcd_print_host_state(hba);
-               goto out_remove_scsi_host;
+               goto free_tmf_queue;
        }
 
        /*
@@ -8495,6 +8490,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
        return 0;
 
+free_tmf_queue:
+       blk_cleanup_queue(hba->tmf_queue);
+free_tmf_tag_set:
+       blk_mq_free_tag_set(&hba->tmf_tag_set);
+free_cmd_queue:
+       blk_cleanup_queue(hba->cmd_queue);
 out_remove_scsi_host:
        scsi_remove_host(hba->host);
 exit_gating:
index 2740f6941ec69b9c68ed57bf25741b2b397a8ec4..e05cafddc87b72c458fcd5923d2b19b61d0eb19d 100644 (file)
@@ -212,13 +212,11 @@ struct ufs_query {
  * @type: device management command type - Query, NOP OUT
  * @lock: lock to allow one command at a time
  * @complete: internal commands completion
- * @tag_wq: wait queue until free command slot is available
  */
 struct ufs_dev_cmd {
        enum dev_cmd_type type;
        struct mutex lock;
        struct completion *complete;
-       wait_queue_head_t tag_wq;
        struct ufs_query query;
 };
 
@@ -483,7 +481,7 @@ struct ufs_stats {
  * @host: Scsi_Host instance of the driver
  * @dev: device handle
  * @lrb: local reference block
- * @lrb_in_use: lrb in use
+ * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
  * @outstanding_tasks: Bits representing outstanding task requests
  * @outstanding_reqs: Bits representing outstanding transfer requests
  * @capabilities: UFS Controller Capabilities
@@ -495,11 +493,9 @@ struct ufs_stats {
  * @irq: Irq number of the controller
  * @active_uic_cmd: handle of active UIC command
  * @uic_cmd_mutex: mutex for uic command
- * @tm_wq: wait queue for task management
- * @tm_tag_wq: wait queue for free task management slots
- * @tm_slots_in_use: bit map of task management request slots in use
+ * @tmf_tag_set: TMF tag set.
+ * @tmf_queue: Used to allocate TMF tags.
  * @pwr_done: completion for power mode change
- * @tm_condition: condition variable for task management
  * @ufshcd_state: UFSHCD states
  * @eh_flags: Error handling flags
  * @intr_mask: Interrupt Mask Bits
@@ -513,6 +509,7 @@ struct ufs_stats {
  * @uic_error: UFS interconnect layer error status
  * @saved_err: sticky error mask
  * @saved_uic_err: sticky UIC error mask
+ * @silence_err_logs: flag to silence error logs
  * @dev_cmd: ufs device management command information
  * @last_dme_cmd_tstamp: time stamp of the last completed DME command
  * @auto_bkops_enabled: to track whether bkops is enabled in device
@@ -541,6 +538,7 @@ struct ufs_hba {
 
        struct Scsi_Host *host;
        struct device *dev;
+       struct request_queue *cmd_queue;
        /*
         * This field is to keep a reference to "scsi_device" corresponding to
         * "UFS device" W-LU.
@@ -561,7 +559,6 @@ struct ufs_hba {
        u32 ahit;
 
        struct ufshcd_lrb *lrb;
-       unsigned long lrb_in_use;
 
        unsigned long outstanding_tasks;
        unsigned long outstanding_reqs;
@@ -643,10 +640,8 @@ struct ufs_hba {
        /* Device deviations from standard UFS device spec. */
        unsigned int dev_quirks;
 
-       wait_queue_head_t tm_wq;
-       wait_queue_head_t tm_tag_wq;
-       unsigned long tm_condition;
-       unsigned long tm_slots_in_use;
+       struct blk_mq_tag_set tmf_tag_set;
+       struct request_queue *tmf_queue;
 
        struct uic_command *active_uic_cmd;
        struct mutex uic_cmd_mutex;
@@ -670,6 +665,7 @@ struct ufs_hba {
        u32 saved_err;
        u32 saved_uic_err;
        struct ufs_stats ufs_stats;
+       bool silence_err_logs;
 
        /* Device management request data */
        struct ufs_dev_cmd dev_cmd;
@@ -927,6 +923,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
        enum flag_idn idn, bool *flag_res);
 
 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
+void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
 
 #define SD_ASCII_STD true
 #define SD_RAW false
index f539f873f94da4accf6d903909c1492ed5e2b21f..3dc4d8b7650939895b785dd5b0b0d56792a34c1c 100644 (file)
 /* PHY Adapter Protocol Constants */
 #define PA_MAXDATALANES        4
 
+#define DL_FC0ProtectionTimeOutVal_Default     8191
+#define DL_TC0ReplayTimeOutVal_Default         65535
+#define DL_AFC0ReqTimeOutVal_Default           32767
+#define DL_FC1ProtectionTimeOutVal_Default     8191
+#define DL_TC1ReplayTimeOutVal_Default         65535
+#define DL_AFC1ReqTimeOutVal_Default           32767
+
+#define DME_LocalFC0ProtectionTimeOutVal       0xD041
+#define DME_LocalTC0ReplayTimeOutVal           0xD042
+#define DME_LocalAFC0ReqTimeOutVal             0xD043
+
 /* PA power modes */
 enum {
        FAST_MODE       = 1,
index 70008816c91f283200431e2ed8c4f17389527e5c..c3f010df641efa1b569fb014046af4b954b3243d 100644 (file)
@@ -365,7 +365,7 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
                int segs = scsi_dma_map(cmd);
 
                if (segs == -ENOMEM) {
-                       scmd_printk(KERN_ERR, cmd,
+                       scmd_printk(KERN_DEBUG, cmd,
                                    "vmw_pvscsi: Failed to map cmd sglist for DMA.\n");
                        return -ENOMEM;
                } else if (segs > 1) {
@@ -392,7 +392,7 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
                ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen,
                                             cmd->sc_data_direction);
                if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) {
-                       scmd_printk(KERN_ERR, cmd,
+                       scmd_printk(KERN_DEBUG, cmd,
                                    "vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
                        return -ENOMEM;
                }
@@ -402,6 +402,17 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
        return 0;
 }
 
+/*
+ * The device incorrectly doesn't clear the first byte of the sense
+ * buffer in some cases. We have to do it ourselves.
+ * Otherwise we run into trouble when SWIOTLB is forced.
+ */
+static void pvscsi_patch_sense(struct scsi_cmnd *cmd)
+{
+       if (cmd->sense_buffer)
+               cmd->sense_buffer[0] = 0;
+}
+
 static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
                                 struct pvscsi_ctx *ctx)
 {
@@ -544,6 +555,8 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
        cmd = ctx->cmd;
        abort_cmp = ctx->abort_cmp;
        pvscsi_unmap_buffers(adapter, ctx);
+       if (sdstat != SAM_STAT_CHECK_CONDITION)
+               pvscsi_patch_sense(cmd);
        pvscsi_release_context(adapter, ctx);
        if (abort_cmp) {
                /*
@@ -712,7 +725,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
                                cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
                                DMA_FROM_DEVICE);
                if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) {
-                       scmd_printk(KERN_ERR, cmd,
+                       scmd_printk(KERN_DEBUG, cmd,
                                    "vmw_pvscsi: Failed to map sense buffer for DMA.\n");
                        ctx->sensePA = 0;
                        return -ENOMEM;
@@ -873,6 +886,7 @@ static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
                        scmd_printk(KERN_ERR, cmd,
                                    "Forced reset on cmd %p\n", cmd);
                        pvscsi_unmap_buffers(adapter, ctx);
+                       pvscsi_patch_sense(cmd);
                        pvscsi_release_context(adapter, ctx);
                        cmd->result = (DID_RESET << 16);
                        cmd->scsi_done(cmd);
index 1354a157e9afcfc6d743649bbe170b3436e7253c..6a38ff936389beea89cbbb3cf2cc7f5c9e48ec9d 100644 (file)
@@ -221,7 +221,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
        ep = fc_seq_exch(seq);
        lport = ep->lp;
        if (cmd->was_ddp_setup) {
-               BUG_ON(!ep);
                BUG_ON(!lport);
                /*
                 * Since DDP (Large Rx offload) was setup for this request,
diff --git a/include/linux/soc/mediatek/mtk_sip_svc.h b/include/linux/soc/mediatek/mtk_sip_svc.h
new file mode 100644 (file)
index 0000000..082398e
--- /dev/null
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ */
+#ifndef __MTK_SIP_SVC_H
+#define __MTK_SIP_SVC_H
+
+/* Error Code */
+#define SIP_SVC_E_SUCCESS               0
+#define SIP_SVC_E_NOT_SUPPORTED         -1
+#define SIP_SVC_E_INVALID_PARAMS        -2
+#define SIP_SVC_E_INVALID_RANGE         -3
+#define SIP_SVC_E_PERMISSION_DENIED     -4
+
+#ifdef CONFIG_ARM64
+#define MTK_SIP_SMC_CONVENTION          ARM_SMCCC_SMC_64
+#else
+#define MTK_SIP_SMC_CONVENTION          ARM_SMCCC_SMC_32
+#endif
+
+#define MTK_SIP_SMC_CMD(fn_id) \
+       ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, MTK_SIP_SMC_CONVENTION, \
+                          ARM_SMCCC_OWNER_SIP, fn_id)
+
+#endif
index 3ed836db530697ebf76fd40b1fb9e865fcb34b28..f8312a3e5b429db11d2a9ee4cb1767baa17176e6 100644 (file)
@@ -172,6 +172,7 @@ struct scsi_device {
                                     * because we did a bus reset. */
        unsigned use_10_for_rw:1; /* first try 10-byte read / write */
        unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
+       unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */
        unsigned no_report_opcodes:1;   /* no REPORT SUPPORTED OPERATION CODES */
        unsigned no_write_same:1;       /* no WRITE SAME command */
        unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */
index 9988db6ad244fa9bccc40841081d4c81f89915fd..d55f2176dfd470527b694fbb42a011fa4a518278 100644 (file)
@@ -68,14 +68,13 @@ struct utp_upiu_cmd {
  * @header:UPIU header structure DW-0 to DW-2
  * @sc: fields structure for scsi command DW-3 to DW-7
  * @qr: fields structure for query request DW-3 to DW-7
+ * @uc: use utp_upiu_query to host the 4 dwords of uic command
  */
 struct utp_upiu_req {
        struct utp_upiu_header header;
        union {
                struct utp_upiu_cmd             sc;
                struct utp_upiu_query           qr;
-               struct utp_upiu_query           tr;
-               /* use utp_upiu_query to host the 4 dwords of uic command */
                struct utp_upiu_query           uc;
        };
 };