scsi: lpfc: Clean up SLI-4 CQE status handling
authorJustin Tee <justin.tee@broadcom.com>
Tue, 23 May 2023 18:32:03 +0000 (11:32 -0700)
committerMartin K. Petersen <martin.petersen@oracle.com>
Wed, 31 May 2023 22:14:20 +0000 (18:14 -0400)
There is mishandling of SLI-4 CQE status values larger than what is allowed
by the LPFC_IOCB_STATUS_MASK of 4 bits.  The LPFC_IOCB_STATUS_MASK is a
leftover SLI-3 construct and serves no purpose in SLI-4 path.

Remove the LPFC_IOCB_STATUS_MASK and clean up general CQE status handling
in SLI-4 completion paths.

Signed-off-by: Justin Tee <justin.tee@broadcom.com>
Link: https://lore.kernel.org/r/20230523183206.7728-7-justintee8345@gmail.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_scsi.c

index 5e3a93d13a91d0f66619d224429230e2587768cc..dcb87bb5f88b2afe969cb252a602cbd06e47fb66 100644 (file)
@@ -932,8 +932,6 @@ struct lpfc_hba {
        void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
                         struct lpfc_iocbq *);
        int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
-       void (*lpfc_scsi_cmd_iocb_cmpl)
-               (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
 
        /* MBOX interface function jump table entries */
        int (*lpfc_sli_issue_mbox)
index 082f8a109e553b81bc8e5c3be6ba62f18559bcca..5d4f9f27084d6d1fb356d2034bcf22577eb689f5 100644 (file)
@@ -395,9 +395,6 @@ struct lpfc_cqe {
 #define CQE_STATUS_NEED_BUFF_ENTRY     0xf
 #define CQE_STATUS_DI_ERROR            0x16
 
-/* Used when mapping CQE status to IOCB */
-#define LPFC_IOCB_STATUS_MASK          0xf
-
 /* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
 #define CQE_HW_STATUS_NO_ERR           0x0
 #define CQE_HW_STATUS_UNDERRUN         0x1
index 82730a89ecb5412f4a779c82d8bec025985a6ca8..8db7cb99903dbdf45e070f8a1a5e62450e867333 100644 (file)
@@ -310,20 +310,20 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
  * for the LS request.
  **/
 void
-__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba,  struct lpfc_vport *vport,
+__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
                        struct lpfc_iocbq *cmdwqe,
                        struct lpfc_wcqe_complete *wcqe)
 {
        struct nvmefc_ls_req *pnvme_lsreq;
        struct lpfc_dmabuf *buf_ptr;
        struct lpfc_nodelist *ndlp;
-       uint32_t status;
+       int status;
 
        pnvme_lsreq = cmdwqe->context_un.nvme_lsreq;
        ndlp = cmdwqe->ndlp;
        buf_ptr = cmdwqe->bpl_dmabuf;
 
-       status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+       status = bf_get(lpfc_wcqe_c_status, wcqe);
 
        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
                         "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x "
@@ -343,14 +343,17 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba,  struct lpfc_vport *vport,
                kfree(buf_ptr);
                cmdwqe->bpl_dmabuf = NULL;
        }
-       if (pnvme_lsreq->done)
+       if (pnvme_lsreq->done) {
+               if (status != CQE_STATUS_SUCCESS)
+                       status = -ENXIO;
                pnvme_lsreq->done(pnvme_lsreq, status);
-       else
+       } else {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
                                 "6046 NVMEx cmpl without done call back? "
                                 "Data x%px DID %x Xri: %x status %x\n",
                                pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
                                cmdwqe->sli4_xritag, status);
+       }
        if (ndlp) {
                lpfc_nlp_put(ndlp);
                cmdwqe->ndlp = NULL;
@@ -367,7 +370,7 @@ lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        uint32_t status;
        struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
 
-       status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+       status = bf_get(lpfc_wcqe_c_status, wcqe);
 
        if (vport->localport) {
                lport = (struct lpfc_nvme_lport *)vport->localport->private;
@@ -1040,7 +1043,7 @@ lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
                nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
                nCmd->transferred_length = nCmd->payload_length;
        } else {
-               lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
+               lpfc_ncmd->status = status;
                lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
 
                /* For NVME, the only failure path that results in an
index 7517dd55fe91953375e91054bb72e2cb5f8ab620..ce201465dc6f84279cd34558f2d8dab7082d92f3 100644 (file)
@@ -300,7 +300,7 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
        uint32_t status, result;
 
-       status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+       status = bf_get(lpfc_wcqe_c_status, wcqe);
        result = wcqe->parameter;
 
        if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
@@ -350,7 +350,7 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
        if (!phba->targetport)
                goto finish;
 
-       status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+       status = bf_get(lpfc_wcqe_c_status, wcqe);
        result = wcqe->parameter;
 
        tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
index 49aa86c477c66669b0fe7240f0675be7401f8295..a62e091894f6e5951fd4d8a2a26b330ab0e4dd6f 100644 (file)
@@ -4026,7 +4026,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
        struct lpfc_fast_path_event *fast_path_evt;
        struct Scsi_Host *shost;
        u32 logit = LOG_FCP;
-       u32 status, idx;
+       u32 idx;
        u32 lat;
        u8 wait_xb_clr = 0;
 
@@ -4061,8 +4061,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
 #endif
        shost = cmd->device->host;
 
-       status = bf_get(lpfc_wcqe_c_status, wcqe);
-       lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
+       lpfc_cmd->status = bf_get(lpfc_wcqe_c_status, wcqe);
        lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
 
        lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
@@ -4104,11 +4103,6 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
        }
 #endif
        if (unlikely(lpfc_cmd->status)) {
-               if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
-                   (lpfc_cmd->result & IOERR_DRVR_MASK))
-                       lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
-               else if (lpfc_cmd->status >= IOSTAT_CNT)
-                       lpfc_cmd->status = IOSTAT_DEFAULT;
                if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
                    !lpfc_cmd->fcp_rsp->rspStatus3 &&
                    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
@@ -4133,16 +4127,16 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
        }
 
        switch (lpfc_cmd->status) {
-       case IOSTAT_SUCCESS:
+       case CQE_STATUS_SUCCESS:
                cmd->result = DID_OK << 16;
                break;
-       case IOSTAT_FCP_RSP_ERROR:
+       case CQE_STATUS_FCP_RSP_FAILURE:
                lpfc_handle_fcp_err(vport, lpfc_cmd,
                                    pwqeIn->wqe.fcp_iread.total_xfer_len -
                                    wcqe->total_data_placed);
                break;
-       case IOSTAT_NPORT_BSY:
-       case IOSTAT_FABRIC_BSY:
+       case CQE_STATUS_NPORT_BSY:
+       case CQE_STATUS_FABRIC_BSY:
                cmd->result = DID_TRANSPORT_DISRUPTED << 16;
                fast_path_evt = lpfc_alloc_fast_evt(phba);
                if (!fast_path_evt)
@@ -4185,7 +4179,27 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
                                 wcqe->total_data_placed,
                                 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
                break;
-       case IOSTAT_REMOTE_STOP:
+       case CQE_STATUS_DI_ERROR:
+               if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
+                       lpfc_cmd->result = IOERR_RX_DMA_FAILED;
+               else
+                       lpfc_cmd->result = IOERR_TX_DMA_FAILED;
+               lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_BG,
+                                "9048 DI Error xri x%x status x%x DI ext "
+                                "status x%x data placed x%x\n",
+                                lpfc_cmd->cur_iocbq.sli4_xritag,
+                                lpfc_cmd->status, wcqe->parameter,
+                                wcqe->total_data_placed);
+               if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
+                       /* BG enabled cmd. Parse BG error */
+                       lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut);
+                       break;
+               }
+               cmd->result = DID_ERROR << 16;
+               lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
+                                "9040 DI Error on unprotected cmd\n");
+               break;
+       case CQE_STATUS_REMOTE_STOP:
                if (ndlp) {
                        /* This I/O was aborted by the target, we don't
                         * know the rxid and because we did not send the
@@ -4196,7 +4210,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
                                            0, 0);
                }
                fallthrough;
-       case IOSTAT_LOCAL_REJECT:
+       case CQE_STATUS_LOCAL_REJECT:
                if (lpfc_cmd->result & IOERR_DRVR_MASK)
                        lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
                if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
@@ -4217,24 +4231,6 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
                        cmd->result = DID_TRANSPORT_DISRUPTED << 16;
                        break;
                }
-               if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
-                    lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
-                    status == CQE_STATUS_DI_ERROR) {
-                       if (scsi_get_prot_op(cmd) !=
-                           SCSI_PROT_NORMAL) {
-                               /*
-                                * This is a response for a BG enabled
-                                * cmd. Parse BG error
-                                */
-                               lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut);
-                               break;
-                       } else {
-                               lpfc_printf_vlog(vport, KERN_WARNING,
-                                                LOG_BG,
-                                                "9040 non-zero BGSTAT "
-                                                "on unprotected cmd\n");
-                       }
-               }
                lpfc_printf_vlog(vport, KERN_WARNING, logit,
                                 "9036 Local Reject FCP cmd x%x failed"
                                 " <%d/%lld> "
@@ -4253,10 +4249,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
                                 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
                fallthrough;
        default:
-               if (lpfc_cmd->status >= IOSTAT_CNT)
-                       lpfc_cmd->status = IOSTAT_DEFAULT;
                cmd->result = DID_ERROR << 16;
-               lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
                                 "9037 FCP Completion Error: xri %x "
                                 "status x%x result x%x [x%x] "
                                 "placed x%x\n",
@@ -5010,7 +5004,6 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
                return -ENODEV;
        }
        phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
-       phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
        return 0;
 }