scsi: lpfc: Handle XRI_ABORTED_CQE in soft IRQ
authorJames Smart <jsmart2021@gmail.com>
Tue, 21 Nov 2017 00:00:30 +0000 (16:00 -0800)
committerMartin K. Petersen <martin.petersen@oracle.com>
Tue, 5 Dec 2017 01:32:53 +0000 (20:32 -0500)
XRI_ABORTED_CQE completions were not being handled in the fast path.
They were being queued and deferred to the lpfc worker thread for
processing. This is an artifact of the driver design prior to moving
queue processing out of the isr and into a workq element. Now that queue
processing is already in a deferred context, remove this artifact and
process them directly.

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli4.h

index 231302273257e2be64041cf220be9d23f8f1c45d..7219b6ce5dc71151e70ca446ca6fb9b0a7b6d0bf 100644 (file)
@@ -705,7 +705,6 @@ struct lpfc_hba {
                                         * capability
                                         */
 #define HBA_NVME_IOQ_FLUSH      0x80000 /* NVME IO queues flushed. */
-#define NVME_XRI_ABORT_EVENT   0x100000
 
        uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
        struct lpfc_dmabuf slim2p;
index 2bafde2b7cfed6c0d2d151b53011cb176dd8c142..0b2c542011a331dd6379a805f4143d58077e523f 100644 (file)
@@ -640,8 +640,6 @@ lpfc_work_done(struct lpfc_hba *phba)
                        lpfc_handle_rrq_active(phba);
                if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
                        lpfc_sli4_fcp_xri_abort_event_proc(phba);
-               if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
-                       lpfc_sli4_nvme_xri_abort_event_proc(phba);
                if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
                        lpfc_sli4_els_xri_abort_event_proc(phba);
                if (phba->hba_flag & ASYNC_EVENT)
index e98fea93e518c5513366ea041a4c9b7e1ecf8f8c..d6cf28cb3e5fb2812b0f3b106ca528cc4ff44375 100644 (file)
@@ -5947,9 +5947,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
                INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
-
-               /* Fast-path XRI aborted CQ Event work queue list */
-               INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
        }
 
        /* This abort list used by worker thread */
@@ -9193,11 +9190,6 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
        /* Pending ELS XRI abort events */
        list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
                         &cqelist);
-       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
-               /* Pending NVME XRI abort events */
-               list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
-                                &cqelist);
-       }
        /* Pending asynnc events */
        list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
                         &cqelist);
index ddc23428498004a3846847dd4c6064e99743fcb1..e5880521be2a8c3925437dad80acc66207336a10 100644 (file)
@@ -12317,41 +12317,6 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
        }
 }
 
-/**
- * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked by the worker thread to process all the pending
- * SLI4 NVME abort XRI events.
- **/
-void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
-{
-       struct lpfc_cq_event *cq_event;
-
-       /* First, declare the fcp xri abort event has been handled */
-       spin_lock_irq(&phba->hbalock);
-       phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
-       spin_unlock_irq(&phba->hbalock);
-       /* Now, handle all the fcp xri abort events */
-       while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
-               /* Get the first event from the head of the event queue */
-               spin_lock_irq(&phba->hbalock);
-               list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
-                                cq_event, struct lpfc_cq_event, list);
-               spin_unlock_irq(&phba->hbalock);
-               /* Notify aborted XRI for NVME work queue */
-               if (phba->nvmet_support) {
-                       lpfc_sli4_nvmet_xri_aborted(phba,
-                                                   &cq_event->cqe.wcqe_axri);
-               } else {
-                       lpfc_sli4_nvme_xri_aborted(phba,
-                                                  &cq_event->cqe.wcqe_axri);
-               }
-               /* Free the event processed back to the free pool */
-               lpfc_sli4_cq_event_release(phba, cq_event);
-       }
-}
-
 /**
  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
  * @phba: pointer to lpfc hba data structure.
@@ -12548,6 +12513,24 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
        return irspiocbq;
 }
 
+inline struct lpfc_cq_event *
+lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
+{
+       struct lpfc_cq_event *cq_event;
+
+       /* Allocate a new internal CQ_EVENT entry */
+       cq_event = lpfc_sli4_cq_event_alloc(phba);
+       if (!cq_event) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0602 Failed to alloc CQ_EVENT entry\n");
+               return NULL;
+       }
+
+       /* Move the CQE into the event */
+       memcpy(&cq_event->cqe, entry, size);
+       return cq_event;
+}
+
 /**
  * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
  * @phba: Pointer to HBA context object.
@@ -12569,16 +12552,9 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
                        "word2:x%x, word3:x%x\n", mcqe->word0,
                        mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
 
-       /* Allocate a new internal CQ_EVENT entry */
-       cq_event = lpfc_sli4_cq_event_alloc(phba);
-       if (!cq_event) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                               "0394 Failed to allocate CQ_EVENT entry\n");
+       cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
+       if (!cq_event)
                return false;
-       }
-
-       /* Move the CQE into an asynchronous event entry */
-       memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
        spin_lock_irqsave(&phba->hbalock, iflags);
        list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
        /* Set the async event flag */
@@ -12824,18 +12800,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
        struct lpfc_cq_event *cq_event;
        unsigned long iflags;
 
-       /* Allocate a new internal CQ_EVENT entry */
-       cq_event = lpfc_sli4_cq_event_alloc(phba);
-       if (!cq_event) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                               "0602 Failed to allocate CQ_EVENT entry\n");
-               return false;
-       }
-
-       /* Move the CQE into the proper xri abort event list */
-       memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
        switch (cq->subtype) {
        case LPFC_FCP:
+               cq_event = lpfc_cq_event_setup(
+                       phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+               if (!cq_event)
+                       return false;
                spin_lock_irqsave(&phba->hbalock, iflags);
                list_add_tail(&cq_event->list,
                              &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
@@ -12845,6 +12815,10 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
                workposted = true;
                break;
        case LPFC_ELS:
+               cq_event = lpfc_cq_event_setup(
+                       phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+               if (!cq_event)
+                       return false;
                spin_lock_irqsave(&phba->hbalock, iflags);
                list_add_tail(&cq_event->list,
                              &phba->sli4_hba.sp_els_xri_aborted_work_queue);
@@ -12854,13 +12828,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
                workposted = true;
                break;
        case LPFC_NVME:
-               spin_lock_irqsave(&phba->hbalock, iflags);
-               list_add_tail(&cq_event->list,
-                             &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
-               /* Set the nvme xri abort event flag */
-               phba->hba_flag |= NVME_XRI_ABORT_EVENT;
-               spin_unlock_irqrestore(&phba->hbalock, iflags);
-               workposted = true;
+               /* Notify aborted XRI for NVME work queue */
+               if (phba->nvmet_support)
+                       lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
+               else
+                       lpfc_sli4_nvme_xri_aborted(phba, wcqe);
+
+               workposted = false;
                break;
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12868,7 +12842,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
                                "%08x %08x %08x %08x\n",
                                cq->subtype, wcqe->word0, wcqe->parameter,
                                wcqe->word2, wcqe->word3);
-               lpfc_sli4_cq_event_release(phba, cq_event);
                workposted = false;
                break;
        }
index 301ce46d2d70df1a8a568be7cf818202bc66046e..da302bfb02238c231c3a2d9544d58ae8efd15e86 100644 (file)
@@ -672,7 +672,6 @@ struct lpfc_sli4_hba {
        struct list_head sp_asynce_work_queue;
        struct list_head sp_fcp_xri_aborted_work_queue;
        struct list_head sp_els_xri_aborted_work_queue;
-       struct list_head sp_nvme_xri_aborted_work_queue;
        struct list_head sp_unsol_work_queue;
        struct lpfc_sli4_link link_state;
        struct lpfc_sli4_lnk_info lnk_info;
@@ -824,7 +823,6 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
 int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
                        void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
-void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
 void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
                               struct sli4_wcqe_xri_aborted *);